Optimize ComputeFrequencyResponse().

Reducing pointer following. This will allow the compiler to optimize more efficiently with the "-fno-strict-aliasing" flag.

Bug: None
Change-Id: Ib1fd3a1cf3f89471b0ec87404650a6061eec5e2d
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/237782
Reviewed-by: Sam Zackrisson <saza@webrtc.org>
Commit-Queue: Christian Schuldt <cschuldt@google.com>
Cr-Commit-Position: refs/heads/main@{#35374}
diff --git a/modules/audio_processing/aec3/adaptive_fir_filter.cc b/modules/audio_processing/aec3/adaptive_fir_filter.cc
index bf3a780..917aa95 100644
--- a/modules/audio_processing/aec3/adaptive_fir_filter.cc
+++ b/modules/audio_processing/aec3/adaptive_fir_filter.cc
@@ -68,19 +68,21 @@
   RTC_DCHECK_EQ(H.size(), H2->capacity());
   for (size_t p = 0; p < num_partitions; ++p) {
     RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+    auto& H2_p = (*H2)[p];
     for (size_t ch = 0; ch < num_render_channels; ++ch) {
+      const FftData& H_p_ch = H[p][ch];
       for (size_t j = 0; j < kFftLengthBy2; j += 4) {
-        const float32x4_t re = vld1q_f32(&H[p][ch].re[j]);
-        const float32x4_t im = vld1q_f32(&H[p][ch].im[j]);
+        const float32x4_t re = vld1q_f32(&H_p_ch.re[j]);
+        const float32x4_t im = vld1q_f32(&H_p_ch.im[j]);
         float32x4_t H2_new = vmulq_f32(re, re);
         H2_new = vmlaq_f32(H2_new, im, im);
-        float32x4_t H2_p_j = vld1q_f32(&(*H2)[p][j]);
+        float32x4_t H2_p_j = vld1q_f32(&H2_p[j]);
         H2_p_j = vmaxq_f32(H2_p_j, H2_new);
-        vst1q_f32(&(*H2)[p][j], H2_p_j);
+        vst1q_f32(&H2_p[j], H2_p_j);
       }
-      float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] +
-                     H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2];
-      (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new);
+      float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] +
+                     H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+      H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new);
     }
   }
 }
@@ -101,20 +103,22 @@
   // constexpr __mmmask8 kMaxMask = static_cast<__mmmask8>(256u);
   for (size_t p = 0; p < num_partitions; ++p) {
     RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+    auto& H2_p = (*H2)[p];
     for (size_t ch = 0; ch < num_render_channels; ++ch) {
+      const FftData& H_p_ch = H[p][ch];
       for (size_t j = 0; j < kFftLengthBy2; j += 4) {
-        const __m128 re = _mm_loadu_ps(&H[p][ch].re[j]);
+        const __m128 re = _mm_loadu_ps(&H_p_ch.re[j]);
         const __m128 re2 = _mm_mul_ps(re, re);
-        const __m128 im = _mm_loadu_ps(&H[p][ch].im[j]);
+        const __m128 im = _mm_loadu_ps(&H_p_ch.im[j]);
         const __m128 im2 = _mm_mul_ps(im, im);
         const __m128 H2_new = _mm_add_ps(re2, im2);
-        __m128 H2_k_j = _mm_loadu_ps(&(*H2)[p][j]);
+        __m128 H2_k_j = _mm_loadu_ps(&H2_p[j]);
         H2_k_j = _mm_max_ps(H2_k_j, H2_new);
-        _mm_storeu_ps(&(*H2)[p][j], H2_k_j);
+        _mm_storeu_ps(&H2_p[j], H2_k_j);
       }
-      float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] +
-                     H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2];
-      (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new);
+      float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] +
+                     H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+      H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new);
     }
   }
 }
diff --git a/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc b/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc
index 245b45a..6c8c948 100644
--- a/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc
+++ b/modules/audio_processing/aec3/adaptive_fir_filter_avx2.cc
@@ -31,19 +31,21 @@
   RTC_DCHECK_EQ(H.size(), H2->capacity());
   for (size_t p = 0; p < num_partitions; ++p) {
     RTC_DCHECK_EQ(kFftLengthBy2Plus1, (*H2)[p].size());
+    auto& H2_p = (*H2)[p];
     for (size_t ch = 0; ch < num_render_channels; ++ch) {
+      const FftData& H_p_ch = H[p][ch];
       for (size_t j = 0; j < kFftLengthBy2; j += 8) {
-        __m256 re = _mm256_loadu_ps(&H[p][ch].re[j]);
+        __m256 re = _mm256_loadu_ps(&H_p_ch.re[j]);
         __m256 re2 = _mm256_mul_ps(re, re);
-        __m256 im = _mm256_loadu_ps(&H[p][ch].im[j]);
+        __m256 im = _mm256_loadu_ps(&H_p_ch.im[j]);
         re2 = _mm256_fmadd_ps(im, im, re2);
-        __m256 H2_k_j = _mm256_loadu_ps(&(*H2)[p][j]);
+        __m256 H2_k_j = _mm256_loadu_ps(&H2_p[j]);
         H2_k_j = _mm256_max_ps(H2_k_j, re2);
-        _mm256_storeu_ps(&(*H2)[p][j], H2_k_j);
+        _mm256_storeu_ps(&H2_p[j], H2_k_j);
       }
-      float H2_new = H[p][ch].re[kFftLengthBy2] * H[p][ch].re[kFftLengthBy2] +
-                     H[p][ch].im[kFftLengthBy2] * H[p][ch].im[kFftLengthBy2];
-      (*H2)[p][kFftLengthBy2] = std::max((*H2)[p][kFftLengthBy2], H2_new);
+      float H2_new = H_p_ch.re[kFftLengthBy2] * H_p_ch.re[kFftLengthBy2] +
+                     H_p_ch.im[kFftLengthBy2] * H_p_ch.im[kFftLengthBy2];
+      H2_p[kFftLengthBy2] = std::max(H2_p[kFftLengthBy2], H2_new);
     }
   }
 }