audio_processing: Replaced macro WEBRTC_SPL_RSHIFT_W16 with >>

The implementation of WEBRTC_SPL_RSHIFT_W16 is simply >>. This CL removes the macro usage in audio_processing and signal_processing.

Affected components:
* aecm
* agc
* nsx

Indirectly affecting (through signal_processing changes)
* codecs/cng
* codecs/isac/fix
* codecs/isac/main

BUG=3348,3353
TESTED=locally on Linux and trybots
R=kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/28699005

git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@7432 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/common_audio/signal_processing/refl_coef_to_lpc.c b/common_audio/signal_processing/refl_coef_to_lpc.c
index 3d81778..17055c9 100644
--- a/common_audio/signal_processing/refl_coef_to_lpc.c
+++ b/common_audio/signal_processing/refl_coef_to_lpc.c
@@ -27,7 +27,7 @@
     kptr = k;
     *a = 4096; // i.e., (Word16_MAX >> 3)+1.
     *any = *a;
-    a[1] = WEBRTC_SPL_RSHIFT_W16((*k), 3);
+    a[1] = *k >> 3;
 
     for (m = 1; m < use_order; m++)
     {
@@ -38,7 +38,7 @@
         anyptr = any;
         anyptr++;
 
-        any[m + 1] = WEBRTC_SPL_RSHIFT_W16((*kptr), 3);
+        any[m + 1] = *kptr >> 3;
         for (i = 0; i < m; i++)
         {
             *anyptr = (*aptr)
diff --git a/common_audio/signal_processing/spl_sqrt.c b/common_audio/signal_processing/spl_sqrt.c
index d4f808c..fff73c0 100644
--- a/common_audio/signal_processing/spl_sqrt.c
+++ b/common_audio/signal_processing/spl_sqrt.c
@@ -17,6 +17,8 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
+#include <assert.h>
+
 int32_t WebRtcSpl_SqrtLocal(int32_t in);
 
 int32_t WebRtcSpl_SqrtLocal(int32_t in)
@@ -154,15 +156,15 @@
 
     x_norm = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16); // x_norm = AH
 
-    nshift = WEBRTC_SPL_RSHIFT_W16(sh, 1); // nshift = sh>>1
-    nshift = -nshift; // Negate the power for later de-normalization
+    nshift = (sh / 2);
+    assert(nshift >= 0);
 
     A = (int32_t)WEBRTC_SPL_LSHIFT_W32((int32_t)x_norm, 16);
     A = WEBRTC_SPL_ABS_W32(A); // A = abs(x_norm<<16)
     A = WebRtcSpl_SqrtLocal(A); // A = sqrt(A)
 
-    if ((-2 * nshift) == sh)
-    { // Even shift value case
+    if (2 * nshift == sh) {
+        // Even shift value case
 
         t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16); // t16 = AH
 
@@ -178,7 +180,7 @@
     }
 
     A = A & ((int32_t)0x0000ffff);
-    A = (int32_t)WEBRTC_SPL_SHIFT_W32(A, nshift); // De-normalize the result
+    A >>= nshift;  // De-normalize the result.
 
     return A;
 }
diff --git a/modules/audio_processing/aecm/aecm_core.c b/modules/audio_processing/aecm/aecm_core.c
index 61d4c0b..c489731 100644
--- a/modules/audio_processing/aecm/aecm_core.c
+++ b/modules/audio_processing/aecm/aecm_core.c
@@ -691,10 +691,10 @@
     retVal = filtOld;
     if (filtOld > inVal)
     {
-        retVal -= WEBRTC_SPL_RSHIFT_W16(filtOld - inVal, stepSizeNeg);
+        retVal -= (filtOld - inVal) >> stepSizeNeg;
     } else
     {
-        retVal += WEBRTC_SPL_RSHIFT_W16(inVal - filtOld, stepSizePos);
+        retVal += (inVal - filtOld) >> stepSizePos;
     }
 
     return retVal;
diff --git a/modules/audio_processing/aecm/aecm_core_c.c b/modules/audio_processing/aecm/aecm_core_c.c
index f608a28..d656f9b 100644
--- a/modules/audio_processing/aecm/aecm_core_c.c
+++ b/modules/audio_processing/aecm/aecm_core_c.c
@@ -483,10 +483,7 @@
       if (zeros32 > tmp16no1)
       {
         echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i],
-                                                (uint16_t)WEBRTC_SPL_RSHIFT_W16(
-                                                  supGain,
-                                                  tmp16no1)
-                                                );
+                                                supGain >> tmp16no1);
       } else
       {
         // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
diff --git a/modules/audio_processing/aecm/aecm_core_mips.c b/modules/audio_processing/aecm/aecm_core_mips.c
index 4a07042..fdd5b2c 100644
--- a/modules/audio_processing/aecm/aecm_core_mips.c
+++ b/modules/audio_processing/aecm/aecm_core_mips.c
@@ -988,7 +988,7 @@
       if (zeros32 > tmp16no1) {
         echoEst32Gained = WEBRTC_SPL_UMUL_32_16(
                             (uint32_t)aecm->echoFilt[i],
-                            (uint16_t)WEBRTC_SPL_RSHIFT_W16(supGain, tmp16no1));
+                            supGain >> tmp16no1);
       } else {
         // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
         echoEst32Gained = WEBRTC_SPL_UMUL_32_16(
diff --git a/modules/audio_processing/agc/analog_agc.c b/modules/audio_processing/agc/analog_agc.c
index 357aff0..93fe987 100644
--- a/modules/audio_processing/agc/analog_agc.c
+++ b/modules/audio_processing/agc/analog_agc.c
@@ -296,7 +296,7 @@
         ptr = stt->Rxx16w32_array[0];
     }
 
-    for (i = 0; i < WEBRTC_SPL_RSHIFT_W16(M, 1); i++)
+    for (i = 0; i < M / 2; i++)
     {
         if (stt->fs == 16000)
         {
@@ -546,7 +546,7 @@
     {
         /* Lower the analog target level since we have reached its maximum */
         zeros = WebRtcSpl_NormW32(stt->Rxx160_LPw32);
-        stt->targetIdxOffset = WEBRTC_SPL_RSHIFT_W16((3 * zeros) - stt->targetIdx - 2, 2);
+        stt->targetIdxOffset = (3 * zeros - stt->targetIdx - 2) / 4;
     }
 #endif
 
@@ -696,7 +696,7 @@
         if (stt->vadMic.stdLongTerm < 4500)
         {
             /* Scale between min and max threshold */
-            vadThresh += WEBRTC_SPL_RSHIFT_W16(4500 - stt->vadMic.stdLongTerm, 1);
+            vadThresh += (4500 - stt->vadMic.stdLongTerm) / 2;
         }
 
         /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */
diff --git a/modules/audio_processing/agc/digital_agc.c b/modules/audio_processing/agc/digital_agc.c
index 82a4619..c99a78c 100644
--- a/modules/audio_processing/agc/digital_agc.c
+++ b/modules/audio_processing/agc/digital_agc.c
@@ -507,7 +507,7 @@
     {
         if (gate < 2500)
         {
-            gain_adj = WEBRTC_SPL_RSHIFT_W16(2500 - gate, 5);
+            gain_adj = (2500 - gate) >> 5;
         } else
         {
             gain_adj = 0;
diff --git a/modules/audio_processing/ns/nsx_core.c b/modules/audio_processing/ns/nsx_core.c
index ff72883..d021f68 100644
--- a/modules/audio_processing/ns/nsx_core.c
+++ b/modules/audio_processing/ns/nsx_core.c
@@ -407,13 +407,11 @@
         // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
         // CounterDiv=1/(inst->counter[s]+1) in Q15
         tmp16 += 2;
-        tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2);
-        inst->noiseEstLogQuantile[offset + i] += tmp16no1;
+        inst->noiseEstLogQuantile[offset + i] += tmp16 / 4;
       } else {
         tmp16 += 1;
-        tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1);
         // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
-        tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1);
+        tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16 / 2, 3, 1);
         inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
         if (inst->noiseEstLogQuantile[offset + i] < logval) {
           // This is the smallest fixed point representation we can
@@ -611,7 +609,7 @@
     // Piecewise linear approximation of 'b' in
     // 2^(int_part+frac_part) = 2^int_part * (1 + b)
     // 'b' is given in Q11 and below stored in frac_part.
-    if (WEBRTC_SPL_RSHIFT_W16(frac_part, 10)) {
+    if (frac_part >> 10) {
       // Upper fractional part
       tmp32no2 = WEBRTC_SPL_MUL_16_16(2048 - frac_part, 1244); // Q21
       tmp32no2 = 2048 - WEBRTC_SPL_RSHIFT_W32(tmp32no2, 10);
@@ -669,7 +667,7 @@
     inst->maxLrt = 0x0080000;
     inst->minLrt = 104858;
   }
-  inst->anaLen2 = WEBRTC_SPL_RSHIFT_W16(inst->anaLen, 1);
+  inst->anaLen2 = inst->anaLen / 2;
   inst->magnLen = inst->anaLen2 + 1;
 
   if (inst->real_fft != NULL) {
@@ -1410,7 +1408,7 @@
       tmpU32no1 >>= zeros;
     }
     tmp_2_w32 -= (int32_t)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros)
-    matrix_determinant = WEBRTC_SPL_RSHIFT_W16(matrix_determinant, zeros); // Q(-zeros)
+    matrix_determinant >>= zeros;  // Q(-zeros)
     tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11
     tmp_2_w32 += (int32_t)net_norm << 11;  // Q11
     if (tmp_2_w32 < 0) {
diff --git a/modules/audio_processing/ns/nsx_core_neon.c b/modules/audio_processing/ns/nsx_core_neon.c
index 0a72a08..f88a59d 100644
--- a/modules/audio_processing/ns/nsx_core_neon.c
+++ b/modules/audio_processing/ns/nsx_core_neon.c
@@ -313,13 +313,11 @@
       // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
       // CounterDiv=1/(inst->counter[s]+1) in Q15
       tmp16 += 2;
-      tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2);
-      inst->noiseEstLogQuantile[offset + i] += tmp16no1;
+      inst->noiseEstLogQuantile[offset + i] += tmp16 / 4;
     } else {
       tmp16 += 1;
-      tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1);
       // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
-      tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1);
+      tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16 / 2, 3, 1);
       inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
       if (inst->noiseEstLogQuantile[offset + i] < logval) {
         // logval is the smallest fixed point representation we can have.