summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/x86
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2018-10-17 05:59:08 -0500
committertrav90 <travawine@palemoon.org>2018-10-17 05:59:08 -0500
commitdf9477dfa60ebb5d31bc142e58ce46535c17abce (patch)
treec4fdd5d1b09d08c0514f208246260fc87372cb56 /third_party/aom/aom_dsp/x86
parent0cc51bc106250988cc3b89cb5d743a5af52cd35a (diff)
downloadUXP-df9477dfa60ebb5d31bc142e58ce46535c17abce.tar
UXP-df9477dfa60ebb5d31bc142e58ce46535c17abce.tar.gz
UXP-df9477dfa60ebb5d31bc142e58ce46535c17abce.tar.lz
UXP-df9477dfa60ebb5d31bc142e58ce46535c17abce.tar.xz
UXP-df9477dfa60ebb5d31bc142e58ce46535c17abce.zip
Update aom to slightly newer commit ID
Diffstat (limited to 'third_party/aom/aom_dsp/x86')
-rw-r--r--third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c195
-rw-r--r--third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c203
-rw-r--r--third_party/aom/aom_dsp/x86/avg_intrin_sse2.c46
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c656
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_avx2.c1238
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h80
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_sse2.c103
-rw-r--r--third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c498
-rw-r--r--third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c2799
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h45
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_sad_sse4.c1
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_variance_sse4.c3
-rw-r--r--third_party/aom/aom_dsp/x86/synonyms.h28
-rw-r--r--third_party/aom/aom_dsp/x86/txfm_common_avx2.h44
14 files changed, 3413 insertions, 2526 deletions
diff --git a/third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c b/third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c
new file mode 100644
index 000000000..14352895d
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/aom_convolve_hip_sse2.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h>
+#include <assert.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
+
+void aom_convolve8_add_src_hip_sse2(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4,
+ int w, int h) {
+ const int bd = 8;
+ assert(x_step_q4 == 16 && y_step_q4 == 16);
+ assert(!(w & 7));
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ uint16_t temp[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE];
+ int intermediate_height = h + SUBPEL_TAPS - 1;
+ int i, j;
+ const int center_tap = ((SUBPEL_TAPS - 1) / 2);
+ const uint8_t *const src_ptr = src - center_tap * src_stride - center_tap;
+
+ const __m128i zero = _mm_setzero_si128();
+ // Add an offset to account for the "add_src" part of the convolve function.
+ const __m128i offset = _mm_insert_epi16(zero, 1 << FILTER_BITS, 3);
+
+ /* Horizontal filter */
+ {
+ const __m128i coeffs_x =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_x), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS - EXTRAPREC_BITS - 1)) +
+ (1 << (bd + FILTER_BITS - 1)));
+
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i data =
+ _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
+
+ // Filter even-index pixels
+ const __m128i src_0 = _mm_unpacklo_epi8(data, zero);
+ const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
+ const __m128i src_2 = _mm_unpacklo_epi8(_mm_srli_si128(data, 2), zero);
+ const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
+ const __m128i src_4 = _mm_unpacklo_epi8(_mm_srli_si128(data, 4), zero);
+ const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
+ const __m128i src_6 = _mm_unpacklo_epi8(_mm_srli_si128(data, 6), zero);
+ const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
+
+ __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
+ _mm_add_epi32(res_2, res_6));
+ res_even = _mm_srai_epi32(_mm_add_epi32(res_even, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Filter odd-index pixels
+ const __m128i src_1 = _mm_unpacklo_epi8(_mm_srli_si128(data, 1), zero);
+ const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
+ const __m128i src_3 = _mm_unpacklo_epi8(_mm_srli_si128(data, 3), zero);
+ const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
+ const __m128i src_5 = _mm_unpacklo_epi8(_mm_srli_si128(data, 5), zero);
+ const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
+ const __m128i src_7 = _mm_unpacklo_epi8(_mm_srli_si128(data, 7), zero);
+ const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
+
+ __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
+ _mm_add_epi32(res_3, res_7));
+ res_odd = _mm_srai_epi32(_mm_add_epi32(res_odd, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7
+ __m128i res = _mm_packs_epi32(res_even, res_odd);
+ res = _mm_min_epi16(_mm_max_epi16(res, zero),
+ _mm_set1_epi16(EXTRAPREC_CLAMP_LIMIT(bd) - 1));
+ _mm_storeu_si128((__m128i *)&temp[i * MAX_SB_SIZE + j], res);
+ }
+ }
+ }
+
+ /* Vertical filter */
+ {
+ const __m128i coeffs_y =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_y), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS + EXTRAPREC_BITS - 1)) -
+ (1 << (bd + FILTER_BITS + EXTRAPREC_BITS - 1)));
+
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ // Filter even-index pixels
+ const uint16_t *data = &temp[i * MAX_SB_SIZE + j];
+ const __m128i src_0 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_2 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_4 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_6 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
+ const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
+ const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
+ const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
+
+ const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+ _mm_add_epi32(res_4, res_6));
+
+ // Filter odd-index pixels
+ const __m128i src_1 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_3 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_5 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_7 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
+ const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
+ const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
+ const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
+
+ const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+ _mm_add_epi32(res_5, res_7));
+
+ // Rearrange pixels back into the order 0 ... 7
+ const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+ const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+
+ const __m128i res_lo_round = _mm_srai_epi32(
+ _mm_add_epi32(res_lo, round_const), FILTER_BITS + EXTRAPREC_BITS);
+ const __m128i res_hi_round = _mm_srai_epi32(
+ _mm_add_epi32(res_hi, round_const), FILTER_BITS + EXTRAPREC_BITS);
+
+ const __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
+ __m128i res_8bit = _mm_packus_epi16(res_16bit, res_16bit);
+
+ __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
+ _mm_storel_epi64(p, res_8bit);
+ }
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c b/third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c
new file mode 100644
index 000000000..74ce80e50
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/aom_highbd_convolve_hip_ssse3.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <tmmintrin.h>
+#include <assert.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/aom_convolve.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/aom_filter.h"
+
+#if EXTRAPREC_BITS > 2
+#error "Highbd high-prec convolve filter only supports EXTRAPREC_BITS <= 2"
+#error "(need to use 32-bit intermediates for EXTRAPREC_BITS > 2)"
+#endif
+
+void aom_highbd_convolve8_add_src_hip_ssse3(
+ const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8,
+ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4, int w, int h, int bd) {
+ assert(x_step_q4 == 16 && y_step_q4 == 16);
+ assert(!(w & 7));
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ const uint16_t *const src = CONVERT_TO_SHORTPTR(src8);
+ uint16_t *const dst = CONVERT_TO_SHORTPTR(dst8);
+
+ uint16_t temp[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE];
+ int intermediate_height = h + SUBPEL_TAPS - 1;
+ int i, j;
+ const int center_tap = ((SUBPEL_TAPS - 1) / 2);
+ const uint16_t *const src_ptr = src - center_tap * src_stride - center_tap;
+
+ const __m128i zero = _mm_setzero_si128();
+ // Add an offset to account for the "add_src" part of the convolve function.
+ const __m128i offset = _mm_insert_epi16(zero, 1 << FILTER_BITS, 3);
+
+ /* Horizontal filter */
+ {
+ const __m128i coeffs_x =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_x), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS - EXTRAPREC_BITS - 1)) +
+ (1 << (bd + FILTER_BITS - 1)));
+
+ for (i = 0; i < intermediate_height; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i data =
+ _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j]);
+ const __m128i data2 =
+ _mm_loadu_si128((__m128i *)&src_ptr[i * src_stride + j + 8]);
+
+ // Filter even-index pixels
+ const __m128i res_0 = _mm_madd_epi16(data, coeff_01);
+ const __m128i res_2 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 4), coeff_23);
+ const __m128i res_4 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 8), coeff_45);
+ const __m128i res_6 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 12), coeff_67);
+
+ __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
+ _mm_add_epi32(res_2, res_6));
+ res_even = _mm_srai_epi32(_mm_add_epi32(res_even, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Filter odd-index pixels
+ const __m128i res_1 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 2), coeff_01);
+ const __m128i res_3 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 6), coeff_23);
+ const __m128i res_5 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 10), coeff_45);
+ const __m128i res_7 =
+ _mm_madd_epi16(_mm_alignr_epi8(data2, data, 14), coeff_67);
+
+ __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
+ _mm_add_epi32(res_3, res_7));
+ res_odd = _mm_srai_epi32(_mm_add_epi32(res_odd, round_const),
+ FILTER_BITS - EXTRAPREC_BITS);
+
+ // Pack in the column order 0, 2, 4, 6, 1, 3, 5, 7
+ const __m128i maxval = _mm_set1_epi16((EXTRAPREC_CLAMP_LIMIT(bd)) - 1);
+ __m128i res = _mm_packs_epi32(res_even, res_odd);
+ res = _mm_min_epi16(_mm_max_epi16(res, zero), maxval);
+ _mm_storeu_si128((__m128i *)&temp[i * MAX_SB_SIZE + j], res);
+ }
+ }
+ }
+
+ /* Vertical filter */
+ {
+ const __m128i coeffs_y =
+ _mm_add_epi16(_mm_loadu_si128((__m128i *)filter_y), offset);
+
+ // coeffs 0 1 0 1 2 3 2 3
+ const __m128i tmp_0 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
+ // coeffs 4 5 4 5 6 7 6 7
+ const __m128i tmp_1 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
+
+ // coeffs 0 1 0 1 0 1 0 1
+ const __m128i coeff_01 = _mm_unpacklo_epi64(tmp_0, tmp_0);
+ // coeffs 2 3 2 3 2 3 2 3
+ const __m128i coeff_23 = _mm_unpackhi_epi64(tmp_0, tmp_0);
+ // coeffs 4 5 4 5 4 5 4 5
+ const __m128i coeff_45 = _mm_unpacklo_epi64(tmp_1, tmp_1);
+ // coeffs 6 7 6 7 6 7 6 7
+ const __m128i coeff_67 = _mm_unpackhi_epi64(tmp_1, tmp_1);
+
+ const __m128i round_const =
+ _mm_set1_epi32((1 << (FILTER_BITS + EXTRAPREC_BITS - 1)) -
+ (1 << (bd + FILTER_BITS + EXTRAPREC_BITS - 1)));
+
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ // Filter even-index pixels
+ const uint16_t *data = &temp[i * MAX_SB_SIZE + j];
+ const __m128i src_0 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_2 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_4 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_6 =
+ _mm_unpacklo_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_0 = _mm_madd_epi16(src_0, coeff_01);
+ const __m128i res_2 = _mm_madd_epi16(src_2, coeff_23);
+ const __m128i res_4 = _mm_madd_epi16(src_4, coeff_45);
+ const __m128i res_6 = _mm_madd_epi16(src_6, coeff_67);
+
+ const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+ _mm_add_epi32(res_4, res_6));
+
+ // Filter odd-index pixels
+ const __m128i src_1 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 0 * MAX_SB_SIZE),
+ *(__m128i *)(data + 1 * MAX_SB_SIZE));
+ const __m128i src_3 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 2 * MAX_SB_SIZE),
+ *(__m128i *)(data + 3 * MAX_SB_SIZE));
+ const __m128i src_5 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 4 * MAX_SB_SIZE),
+ *(__m128i *)(data + 5 * MAX_SB_SIZE));
+ const __m128i src_7 =
+ _mm_unpackhi_epi16(*(__m128i *)(data + 6 * MAX_SB_SIZE),
+ *(__m128i *)(data + 7 * MAX_SB_SIZE));
+
+ const __m128i res_1 = _mm_madd_epi16(src_1, coeff_01);
+ const __m128i res_3 = _mm_madd_epi16(src_3, coeff_23);
+ const __m128i res_5 = _mm_madd_epi16(src_5, coeff_45);
+ const __m128i res_7 = _mm_madd_epi16(src_7, coeff_67);
+
+ const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+ _mm_add_epi32(res_5, res_7));
+
+ // Rearrange pixels back into the order 0 ... 7
+ const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+ const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+
+ const __m128i res_lo_round = _mm_srai_epi32(
+ _mm_add_epi32(res_lo, round_const), FILTER_BITS + EXTRAPREC_BITS);
+ const __m128i res_hi_round = _mm_srai_epi32(
+ _mm_add_epi32(res_hi, round_const), FILTER_BITS + EXTRAPREC_BITS);
+
+ const __m128i maxval = _mm_set1_epi16((1 << bd) - 1);
+ __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
+ res_16bit = _mm_min_epi16(_mm_max_epi16(res_16bit, zero), maxval);
+
+ __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
+ _mm_storeu_si128(p, res_16bit);
+ }
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c b/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
index bcdc20f63..1a6457402 100644
--- a/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
+++ b/third_party/aom/aom_dsp/x86/avg_intrin_sse2.c
@@ -94,52 +94,6 @@ void aom_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
*min = _mm_extract_epi16(minabsdiff, 0);
}
-unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
- __m128i s0, s1, u0;
- unsigned int avg = 0;
- u0 = _mm_setzero_si128();
- s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
- s0 = _mm_adds_epu16(s0, s1);
-
- s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 8));
- s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 32));
- s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
- avg = _mm_extract_epi16(s0, 0);
- return (avg + 32) >> 6;
-}
-
-unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) {
- __m128i s0, s1, u0;
- unsigned int avg = 0;
-
- u0 = _mm_setzero_si128();
- s0 = _mm_unpacklo_epi8(xx_loadl_32(s), u0);
- s1 = _mm_unpacklo_epi8(xx_loadl_32(s + p), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(xx_loadl_32(s + 2 * p), u0);
- s0 = _mm_adds_epu16(s0, s1);
- s1 = _mm_unpacklo_epi8(xx_loadl_32(s + 3 * p), u0);
- s0 = _mm_adds_epu16(s0, s1);
-
- s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 4));
- s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
- avg = _mm_extract_epi16(s0, 0);
- return (avg + 8) >> 4;
-}
-
static void hadamard_col8_sse2(__m128i *in, int iter) {
__m128i a0 = in[0];
__m128i a1 = in[1];
diff --git a/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c b/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c
index 7d96e26ae..133640eb7 100644
--- a/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_convolve_avx2.c
@@ -14,30 +14,6 @@
#include "./aom_dsp_rtcd.h"
#include "aom_dsp/x86/convolve.h"
-#define CONV8_ROUNDING_BITS (7)
-
-static const uint8_t signal_pattern_0[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
- 7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
- 4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
-
-static const uint8_t signal_pattern_1[32] = { 4, 5, 6, 7, 6, 7, 8, 9,
- 8, 9, 10, 11, 10, 11, 12, 13,
- 4, 5, 6, 7, 6, 7, 8, 9,
- 8, 9, 10, 11, 10, 11, 12, 13 };
-
-static const uint8_t signal_pattern_2[32] = { 6, 7, 8, 9, 8, 9, 10, 11,
- 10, 11, 12, 13, 12, 13, 14, 15,
- 6, 7, 8, 9, 8, 9, 10, 11,
- 10, 11, 12, 13, 12, 13, 14, 15 };
-
-static const uint32_t signal_index[8] = { 2, 3, 4, 5, 2, 3, 4, 5 };
-
-typedef enum { PACK_8x1, PACK_8x2, PACK_16x1 } PixelPackFormat;
-
-typedef void (*WritePixels)(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch);
-
// -----------------------------------------------------------------------------
// Copy and average
@@ -217,6 +193,27 @@ void aom_highbd_convolve_avg_avx2(const uint8_t *src8, ptrdiff_t src_stride,
}
// -----------------------------------------------------------------------------
+// Horizontal and vertical filtering
+
+#define CONV8_ROUNDING_BITS (7)
+
+static const uint8_t signal_pattern_0[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
+ 7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
+ 4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
+
+static const uint8_t signal_pattern_1[32] = { 4, 5, 6, 7, 6, 7, 8, 9,
+ 8, 9, 10, 11, 10, 11, 12, 13,
+ 4, 5, 6, 7, 6, 7, 8, 9,
+ 8, 9, 10, 11, 10, 11, 12, 13 };
+
+static const uint8_t signal_pattern_2[32] = { 6, 7, 8, 9, 8, 9, 10, 11,
+ 10, 11, 12, 13, 12, 13, 14, 15,
+ 6, 7, 8, 9, 8, 9, 10, 11,
+ 10, 11, 12, 13, 12, 13, 14, 15 };
+
+static const uint32_t signal_index[8] = { 2, 3, 4, 5, 2, 3, 4, 5 };
+
+// -----------------------------------------------------------------------------
// Horizontal Filtering
static INLINE void pack_pixels(const __m256i *s, __m256i *p /*p[4]*/) {
@@ -248,52 +245,30 @@ static INLINE void pack_16_pixels(const __m256i *s0, const __m256i *s1,
x[7] = _mm256_permute2x128_si256(pp[1], pp[5], 0x31);
}
-static INLINE void pack_pixels_with_format(const uint16_t *src,
- PixelPackFormat fmt,
- ptrdiff_t stride, __m256i *x) {
- switch (fmt) {
- case PACK_8x1: {
- __m256i pp[8];
- __m256i s0;
- s0 = _mm256_loadu_si256((const __m256i *)src);
- pack_pixels(&s0, pp);
- x[0] = _mm256_permute2x128_si256(pp[0], pp[2], 0x30);
- x[1] = _mm256_permute2x128_si256(pp[1], pp[3], 0x30);
- x[2] = _mm256_permute2x128_si256(pp[2], pp[0], 0x30);
- x[3] = _mm256_permute2x128_si256(pp[3], pp[1], 0x30);
- break;
- }
- case PACK_8x2: {
- __m256i s0, s1;
- s0 = _mm256_loadu_si256((const __m256i *)src);
- s1 = _mm256_loadu_si256((const __m256i *)(src + stride));
- pack_16_pixels(&s0, &s1, x);
- break;
- }
- case PACK_16x1: {
- __m256i s0, s1;
- s0 = _mm256_loadu_si256((const __m256i *)src);
- s1 = _mm256_loadu_si256((const __m256i *)(src + 8));
- pack_16_pixels(&s0, &s1, x);
- break;
- }
- default: { assert(0); }
- }
-}
-
-static INLINE void pack_8x1_pixels(const uint16_t *src, const ptrdiff_t pitch,
- __m256i *x /*x[4]*/) {
- pack_pixels_with_format(src, PACK_8x1, pitch, x);
+static INLINE void pack_8x1_pixels(const uint16_t *src, __m256i *x) {
+ __m256i pp[8];
+ __m256i s0;
+ s0 = _mm256_loadu_si256((const __m256i *)src);
+ pack_pixels(&s0, pp);
+ x[0] = _mm256_permute2x128_si256(pp[0], pp[2], 0x30);
+ x[1] = _mm256_permute2x128_si256(pp[1], pp[3], 0x30);
+ x[2] = _mm256_permute2x128_si256(pp[2], pp[0], 0x30);
+ x[3] = _mm256_permute2x128_si256(pp[3], pp[1], 0x30);
}
-static INLINE void pack_8x2_pixels(const uint16_t *src, const ptrdiff_t pitch,
- __m256i *x /*x[8]*/) {
- pack_pixels_with_format(src, PACK_8x2, pitch, x);
+static INLINE void pack_8x2_pixels(const uint16_t *src, ptrdiff_t stride,
+ __m256i *x) {
+ __m256i s0, s1;
+ s0 = _mm256_loadu_si256((const __m256i *)src);
+ s1 = _mm256_loadu_si256((const __m256i *)(src + stride));
+ pack_16_pixels(&s0, &s1, x);
}
-static INLINE void pack_16x1_pixels(const uint16_t *src, const ptrdiff_t pitch,
- __m256i *x /*x[8]*/) {
- pack_pixels_with_format(src, PACK_16x1, pitch, x);
+static INLINE void pack_16x1_pixels(const uint16_t *src, __m256i *x) {
+ __m256i s0, s1;
+ s0 = _mm256_loadu_si256((const __m256i *)src);
+ s1 = _mm256_loadu_si256((const __m256i *)(src + 8));
+ pack_16_pixels(&s0, &s1, x);
}
// Note:
@@ -323,51 +298,49 @@ static INLINE void filter_8x1_pixels(const __m256i *sig /*sig[4]*/,
a0 = _mm256_madd_epi16(fil[1], sig[1]);
a1 = _mm256_madd_epi16(fil[2], sig[2]);
- const __m256i min = _mm256_min_epi32(a0, a1);
- a = _mm256_add_epi32(a, min);
-
- const __m256i max = _mm256_max_epi32(a0, a1);
- a = _mm256_add_epi32(a, max);
-
- const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
- a = _mm256_add_epi32(a, rounding);
- *y = _mm256_srai_epi32(a, CONV8_ROUNDING_BITS);
+ {
+ const __m256i min = _mm256_min_epi32(a0, a1);
+ a = _mm256_add_epi32(a, min);
+ }
+ {
+ const __m256i max = _mm256_max_epi32(a0, a1);
+ a = _mm256_add_epi32(a, max);
+ }
+ {
+ const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
+ a = _mm256_add_epi32(a, rounding);
+ *y = _mm256_srai_epi32(a, CONV8_ROUNDING_BITS);
+ }
}
-static void write_8x1_pixels(const __m256i *y, const __m256i *z,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
+static INLINE void store_8x1_pixels(const __m256i *y, const __m256i *mask,
+ uint16_t *dst) {
const __m128i a0 = _mm256_castsi256_si128(*y);
const __m128i a1 = _mm256_extractf128_si256(*y, 1);
__m128i res = _mm_packus_epi32(a0, a1);
- (void)z;
- (void)pitch;
res = _mm_min_epi16(res, _mm256_castsi256_si128(*mask));
_mm_storeu_si128((__m128i *)dst, res);
}
-static void write_8x2_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
+static INLINE void store_8x2_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst,
+ ptrdiff_t pitch) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
a = _mm256_min_epi16(a, *mask);
_mm_storeu_si128((__m128i *)dst, _mm256_castsi256_si128(a));
_mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
}
-static void write_16x1_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t dst_pitch) {
- (void)dst_pitch;
+static INLINE void store_16x1_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
a = _mm256_min_epi16(a, *mask);
_mm256_storeu_si256((__m256i *)dst, a);
}
-static void filter_block_width8_horiz(
- const uint16_t *src_ptr, ptrdiff_t src_pitch, const WritePixels write_8x1,
- const WritePixels write_8x2, uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d8_h8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -379,32 +352,22 @@ static void filter_block_width8_horiz(
pack_8x2_pixels(src_ptr, src_pitch, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
- write_8x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
} while (height > 1);
if (height > 0) {
- pack_8x1_pixels(src_ptr, src_pitch, signal);
+ pack_8x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
- write_8x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x1_pixels(&res0, &max, dst_ptr);
}
}
-static void aom_highbd_filter_block1d8_h8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+static void aom_highbd_filter_block1d16_h8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_horiz(src, src_pitch, write_8x1_pixels, write_8x2_pixels,
- dst, dst_pitch, height, filter, bd);
-}
-
-static void filter_block_width16_horiz(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- const WritePixels write_16x1,
- uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter,
- int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -413,23 +376,17 @@ static void filter_block_width16_horiz(const uint16_t *src_ptr,
src_ptr -= 3;
do {
- pack_16x1_pixels(src_ptr, src_pitch, signal);
+ pack_16x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_16x1_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
-static void aom_highbd_filter_block1d16_h8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_horiz(src, src_pitch, write_16x1_pixels, dst, dst_pitch,
- height, filter, bd);
-}
-
+// -----------------------------------------------------------------------------
// 2-tap horizontal filtering
static INLINE void pack_2t_filter(const int16_t *filter, __m256i *f) {
@@ -493,16 +450,6 @@ static INLINE void filter_16_2t_pixels(const __m256i *sig, const __m256i *f,
*y1 = _mm256_srai_epi32(x1, CONV8_ROUNDING_BITS);
}
-static INLINE void filter_8x2_2t_pixels(const __m256i *sig, const __m256i *f,
- __m256i *y0, __m256i *y1) {
- filter_16_2t_pixels(sig, f, y0, y1);
-}
-
-static INLINE void filter_16x1_2t_pixels(const __m256i *sig, const __m256i *f,
- __m256i *y0, __m256i *y1) {
- filter_16_2t_pixels(sig, f, y0, y1);
-}
-
static INLINE void filter_8x1_2t_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0) {
const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
@@ -511,10 +458,9 @@ static INLINE void filter_8x1_2t_pixels(const __m256i *sig, const __m256i *f,
*y0 = _mm256_srai_epi32(x0, CONV8_ROUNDING_BITS);
}
-static void filter_block_width8_2t_horiz(
- const uint16_t *src_ptr, ptrdiff_t src_pitch, const WritePixels write_8x1,
- const WritePixels write_8x2, uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d8_h2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -524,8 +470,8 @@ static void filter_block_width8_2t_horiz(
src_ptr -= 3;
do {
pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
- filter_8x2_2t_pixels(signal, &ff, &res0, &res1);
- write_8x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
@@ -534,24 +480,13 @@ static void filter_block_width8_2t_horiz(
if (height > 0) {
pack_8x1_2t_pixels(src_ptr, signal);
filter_8x1_2t_pixels(signal, &ff, &res0);
- write_8x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x1_pixels(&res0, &max, dst_ptr);
}
}
-static void aom_highbd_filter_block1d8_h2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+static void aom_highbd_filter_block1d16_h2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_horiz(src, src_pitch, write_8x1_pixels,
- write_8x2_pixels, dst, dst_pitch, height, filter,
- bd);
-}
-
-static void filter_block_width16_2t_horiz(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- const WritePixels write_16x1,
- uint16_t *dst_ptr,
- ptrdiff_t dst_pitch, uint32_t height,
- const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -561,21 +496,15 @@ static void filter_block_width16_2t_horiz(const uint16_t *src_ptr,
src_ptr -= 3;
do {
pack_16x1_2t_pixels(src_ptr, signal);
- filter_16x1_2t_pixels(signal, &ff, &res0, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_16x1_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
-static void aom_highbd_filter_block1d16_h2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_horiz(src, src_pitch, write_16x1_pixels, dst,
- dst_pitch, height, filter, bd);
-}
-
+// -----------------------------------------------------------------------------
// Vertical Filtering
static void pack_8x9_init(const uint16_t *src, ptrdiff_t pitch, __m256i *sig) {
@@ -638,22 +567,9 @@ static INLINE void update_pixels(__m256i *sig) {
}
}
-static INLINE void write_8x1_pixels_ver(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)pitch;
- const __m128i v0 = _mm256_castsi256_si128(*y0);
- const __m128i v1 = _mm256_castsi256_si128(*y1);
- __m128i p = _mm_packus_epi32(v0, v1);
- p = _mm_min_epi16(p, _mm256_castsi256_si128(*mask));
- _mm_storeu_si128((__m128i *)dst, p);
-}
-
-static void filter_block_width8_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch, WritePixels write_8x1,
- WritePixels write_8x2, uint16_t *dst_ptr,
- ptrdiff_t dst_pitch, uint32_t height,
- const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d8_v8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[9], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -666,27 +582,13 @@ static void filter_block_width8_vert(const uint16_t *src_ptr,
pack_8x9_pixels(src_ptr, src_pitch, signal);
filter_8x9_pixels(signal, ff, &res0, &res1);
- write_8x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
- } while (height > 1);
-
- if (height > 0) {
- pack_8x9_pixels(src_ptr, src_pitch, signal);
- filter_8x9_pixels(signal, ff, &res0, &res1);
- write_8x1(&res0, &res1, &max, dst_ptr, dst_pitch);
- }
-}
-
-static void aom_highbd_filter_block1d8_v8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_vert(src, src_pitch, write_8x1_pixels_ver,
- write_8x2_pixels, dst, dst_pitch, height, filter,
- bd);
+ } while (height > 0);
}
static void pack_16x9_init(const uint16_t *src, ptrdiff_t pitch, __m256i *sig) {
@@ -770,13 +672,15 @@ static INLINE void filter_16x9_pixels(const __m256i *sig, const __m256i *f,
filter_8x1_pixels(&sig[i << 2], f, &res[i]);
}
- const __m256i l0l1 = _mm256_packus_epi32(res[0], res[1]);
- const __m256i h0h1 = _mm256_packus_epi32(res[2], res[3]);
- *y0 = _mm256_permute2x128_si256(l0l1, h0h1, 0x20);
- *y1 = _mm256_permute2x128_si256(l0l1, h0h1, 0x31);
+ {
+ const __m256i l0l1 = _mm256_packus_epi32(res[0], res[1]);
+ const __m256i h0h1 = _mm256_packus_epi32(res[2], res[3]);
+ *y0 = _mm256_permute2x128_si256(l0l1, h0h1, 0x20);
+ *y1 = _mm256_permute2x128_si256(l0l1, h0h1, 0x31);
+ }
}
-static INLINE void write_16x2_pixels(const __m256i *y0, const __m256i *y1,
+static INLINE void store_16x2_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
__m256i p = _mm256_min_epi16(*y0, *mask);
@@ -785,26 +689,14 @@ static INLINE void write_16x2_pixels(const __m256i *y0, const __m256i *y1,
_mm256_storeu_si256((__m256i *)(dst + pitch), p);
}
-static INLINE void write_16x1_pixels_ver(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)y1;
- (void)pitch;
- const __m256i p = _mm256_min_epi16(*y0, *mask);
- _mm256_storeu_si256((__m256i *)dst, p);
-}
-
static void update_16x9_pixels(__m256i *sig) {
update_pixels(&sig[0]);
update_pixels(&sig[8]);
}
-static void filter_block_width16_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- WritePixels write_16x1,
- WritePixels write_16x2, uint16_t *dst_ptr,
- ptrdiff_t dst_pitch, uint32_t height,
- const int16_t *filter, int bd) {
+static void aom_highbd_filter_block1d16_v8_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[17], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
@@ -816,29 +708,16 @@ static void filter_block_width16_vert(const uint16_t *src_ptr,
do {
pack_16x9_pixels(src_ptr, src_pitch, signal);
filter_16x9_pixels(signal, ff, &res0, &res1);
- write_16x2(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_16x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_16x9_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
- } while (height > 1);
-
- if (height > 0) {
- pack_16x9_pixels(src_ptr, src_pitch, signal);
- filter_16x9_pixels(signal, ff, &res0, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
- }
-}
-
-static void aom_highbd_filter_block1d16_v8_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_vert(src, src_pitch, write_16x1_pixels_ver,
- write_16x2_pixels, dst, dst_pitch, height, filter,
- bd);
+ } while (height > 0);
}
+// -----------------------------------------------------------------------------
// 2-tap vertical filtering
static void pack_16x2_init(const uint16_t *src, __m256i *sig) {
@@ -859,12 +738,9 @@ static INLINE void filter_16x2_2t_pixels(const __m256i *sig, const __m256i *f,
filter_16_2t_pixels(sig, f, y0, y1);
}
-static void filter_block_width16_2t_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- WritePixels write_16x1,
- uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter,
- int bd) {
+static void aom_highbd_filter_block1d16_v2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[3], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
@@ -875,7 +751,7 @@ static void filter_block_width16_2t_vert(const uint16_t *src_ptr,
do {
pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
- write_16x1(&res0, &res1, &max, dst_ptr, dst_pitch);
+ store_16x1_pixels(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
@@ -883,13 +759,6 @@ static void filter_block_width16_2t_vert(const uint16_t *src_ptr,
} while (height > 0);
}
-static void aom_highbd_filter_block1d16_v2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_vert(src, src_pitch, write_16x1_pixels, dst,
- dst_pitch, height, filter, bd);
-}
-
static INLINE void pack_8x1_2t_filter(const int16_t *filter, __m128i *f) {
const __m128i h = _mm_loadu_si128((const __m128i *)filter);
const __m128i p = _mm_set1_epi32(0x09080706);
@@ -920,22 +789,16 @@ static INLINE void filter_8_2t_pixels(const __m128i *sig, const __m128i *f,
*y1 = _mm_srai_epi32(x1, CONV8_ROUNDING_BITS);
}
-static void write_8x1_2t_pixels_ver(const __m128i *y0, const __m128i *y1,
- const __m128i *mask, uint16_t *dst) {
+static INLINE void store_8x1_2t_pixels_ver(const __m128i *y0, const __m128i *y1,
+ const __m128i *mask, uint16_t *dst) {
__m128i res = _mm_packus_epi32(*y0, *y1);
res = _mm_min_epi16(res, *mask);
_mm_storeu_si128((__m128i *)dst, res);
}
-typedef void (*Write8Pixels)(const __m128i *y0, const __m128i *y1,
- const __m128i *mask, uint16_t *dst);
-
-static void filter_block_width8_2t_vert(const uint16_t *src_ptr,
- ptrdiff_t src_pitch,
- Write8Pixels write_8x1,
- uint16_t *dst_ptr, ptrdiff_t dst_pitch,
- uint32_t height, const int16_t *filter,
- int bd) {
+static void aom_highbd_filter_block1d8_v2_avx2(
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+ ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m128i signal[3], res0, res1;
const __m128i max = _mm_set1_epi16((1 << bd) - 1);
__m128i ff;
@@ -946,7 +809,7 @@ static void filter_block_width8_2t_vert(const uint16_t *src_ptr,
do {
pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
filter_8_2t_pixels(signal, &ff, &res0, &res1);
- write_8x1(&res0, &res1, &max, dst_ptr);
+ store_8x1_2t_pixels_ver(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
@@ -954,20 +817,10 @@ static void filter_block_width8_2t_vert(const uint16_t *src_ptr,
} while (height > 0);
}
-static void aom_highbd_filter_block1d8_v2_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
- ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_vert(src, src_pitch, write_8x1_2t_pixels_ver, dst,
- dst_pitch, height, filter, bd);
-}
-
// Calculation with averaging the input pixels
-static void write_8x1_avg_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)y1;
- (void)pitch;
+static INLINE void store_8x1_avg_pixels(const __m256i *y0, const __m256i *mask,
+ uint16_t *dst) {
const __m128i a0 = _mm256_castsi256_si128(*y0);
const __m128i a1 = _mm256_extractf128_si256(*y0, 1);
__m128i res = _mm_packus_epi32(a0, a1);
@@ -977,9 +830,9 @@ static void write_8x1_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm_storeu_si128((__m128i *)dst, res);
}
-static void write_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
+static INLINE void store_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst,
+ ptrdiff_t pitch) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
const __m128i pix0 = _mm_loadu_si128((const __m128i *)dst);
const __m128i pix1 = _mm_loadu_si128((const __m128i *)(dst + pitch));
@@ -991,10 +844,8 @@ static void write_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
}
-static void write_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)pitch;
+static INLINE void store_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
+ const __m256i *mask, uint16_t *dst) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
const __m256i pix = _mm256_loadu_si256((const __m256i *)dst);
a = _mm256_min_epi16(a, *mask);
@@ -1002,21 +853,7 @@ static void write_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm256_storeu_si256((__m256i *)dst, a);
}
-static INLINE void write_8x1_avg_pixels_ver(const __m256i *y0,
- const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)pitch;
- const __m128i v0 = _mm256_castsi256_si128(*y0);
- const __m128i v1 = _mm256_castsi256_si128(*y1);
- __m128i p = _mm_packus_epi32(v0, v1);
- const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
- p = _mm_min_epi16(p, _mm256_castsi256_si128(*mask));
- p = _mm_avg_epu16(p, pix);
- _mm_storeu_si128((__m128i *)dst, p);
-}
-
-static INLINE void write_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
+static INLINE void store_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
const __m256i pix0 = _mm256_loadu_si256((const __m256i *)dst);
@@ -1030,20 +867,10 @@ static INLINE void write_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
_mm256_storeu_si256((__m256i *)(dst + pitch), p);
}
-static INLINE void write_16x1_avg_pixels_ver(const __m256i *y0,
- const __m256i *y1,
- const __m256i *mask, uint16_t *dst,
- ptrdiff_t pitch) {
- (void)y1;
- (void)pitch;
- __m256i p = _mm256_min_epi16(*y0, *mask);
- const __m256i pix = _mm256_loadu_si256((const __m256i *)dst);
- p = _mm256_avg_epu16(p, pix);
- _mm256_storeu_si256((__m256i *)dst, p);
-}
-
-static void write_8x1_2t_avg_pixels_ver(const __m128i *y0, const __m128i *y1,
- const __m128i *mask, uint16_t *dst) {
+static INLINE void store_8x1_2t_avg_pixels_ver(const __m128i *y0,
+ const __m128i *y1,
+ const __m128i *mask,
+ uint16_t *dst) {
__m128i res = _mm_packus_epi32(*y0, *y1);
const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
res = _mm_min_epi16(res, *mask);
@@ -1052,96 +879,229 @@ static void write_8x1_2t_avg_pixels_ver(const __m128i *y0, const __m128i *y1,
}
static void aom_highbd_filter_block1d8_h8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_horiz(src, src_pitch, write_8x1_avg_pixels,
- write_8x2_avg_pixels, dst, dst_pitch, height,
- filter, bd);
+ __m256i signal[8], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ src_ptr -= 3;
+ do {
+ pack_8x2_pixels(src_ptr, src_pitch, signal);
+ filter_8x1_pixels(signal, ff, &res0);
+ filter_8x1_pixels(&signal[4], ff, &res1);
+ store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ height -= 2;
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ } while (height > 1);
+
+ if (height > 0) {
+ pack_8x1_pixels(src_ptr, signal);
+ filter_8x1_pixels(signal, ff, &res0);
+ store_8x1_avg_pixels(&res0, &max, dst_ptr);
+ }
}
static void aom_highbd_filter_block1d16_h8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_horiz(src, src_pitch, write_16x1_avg_pixels, dst,
- dst_pitch, height, filter, bd);
+ __m256i signal[8], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ src_ptr -= 3;
+ do {
+ pack_16x1_pixels(src_ptr, signal);
+ filter_8x1_pixels(signal, ff, &res0);
+ filter_8x1_pixels(&signal[4], ff, &res1);
+ store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
+ height -= 1;
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d8_v8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_vert(src, src_pitch, write_8x1_avg_pixels_ver,
- write_8x2_avg_pixels, dst, dst_pitch, height, filter,
- bd);
+ __m256i signal[9], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ pack_8x9_init(src_ptr, src_pitch, signal);
+
+ do {
+ pack_8x9_pixels(src_ptr, src_pitch, signal);
+
+ filter_8x9_pixels(signal, ff, &res0, &res1);
+ store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ update_pixels(signal);
+
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ height -= 2;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d16_v8_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_vert(src, src_pitch, write_16x1_avg_pixels_ver,
- write_16x2_avg_pixels, dst, dst_pitch, height,
- filter, bd);
-}
+ __m256i signal[17], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff[4];
+ pack_filters(filter, ff);
+
+ pack_16x9_init(src_ptr, src_pitch, signal);
+
+ do {
+ pack_16x9_pixels(src_ptr, src_pitch, signal);
+ filter_16x9_pixels(signal, ff, &res0, &res1);
+ store_16x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ update_16x9_pixels(signal);
-// 2-tap averaging
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ height -= 2;
+ } while (height > 0);
+}
static void aom_highbd_filter_block1d8_h2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_horiz(src, src_pitch, write_8x1_avg_pixels,
- write_8x2_avg_pixels, dst, dst_pitch, height,
- filter, bd);
+ __m256i signal[2], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff;
+ pack_2t_filter(filter, &ff);
+
+ src_ptr -= 3;
+ do {
+ pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
+ height -= 2;
+ src_ptr += src_pitch << 1;
+ dst_ptr += dst_pitch << 1;
+ } while (height > 1);
+
+ if (height > 0) {
+ pack_8x1_2t_pixels(src_ptr, signal);
+ filter_8x1_2t_pixels(signal, &ff, &res0);
+ store_8x1_avg_pixels(&res0, &max, dst_ptr);
+ }
}
static void aom_highbd_filter_block1d16_h2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_horiz(src, src_pitch, write_16x1_avg_pixels, dst,
- dst_pitch, height, filter, bd);
+ __m256i signal[2], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+
+ __m256i ff;
+ pack_2t_filter(filter, &ff);
+
+ src_ptr -= 3;
+ do {
+ pack_16x1_2t_pixels(src_ptr, signal);
+ filter_16_2t_pixels(signal, &ff, &res0, &res1);
+ store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
+ height -= 1;
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d16_v2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width16_2t_vert(src, src_pitch, write_16x1_avg_pixels, dst,
- dst_pitch, height, filter, bd);
+ __m256i signal[3], res0, res1;
+ const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
+ __m256i ff;
+
+ pack_2t_filter(filter, &ff);
+ pack_16x2_init(src_ptr, signal);
+
+ do {
+ pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
+ filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
+ store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
+
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ height -= 1;
+ } while (height > 0);
}
static void aom_highbd_filter_block1d8_v2_avg_avx2(
- const uint16_t *src, ptrdiff_t src_pitch, uint16_t *dst,
+ const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
- filter_block_width8_2t_vert(src, src_pitch, write_8x1_2t_avg_pixels_ver, dst,
- dst_pitch, height, filter, bd);
-}
+ __m128i signal[3], res0, res1;
+ const __m128i max = _mm_set1_epi16((1 << bd) - 1);
+ __m128i ff;
-typedef void HbdFilter1dFunc(const uint16_t *, ptrdiff_t, uint16_t *, ptrdiff_t,
- uint32_t, const int16_t *, int);
+ pack_8x1_2t_filter(filter, &ff);
+ pack_8x2_init(src_ptr, signal);
-#define HIGHBD_FUNC(width, dir, avg, opt) \
- aom_highbd_filter_block1d##width##_##dir##_##avg##opt
+ do {
+ pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
+ filter_8_2t_pixels(signal, &ff, &res0, &res1);
+ store_8x1_2t_avg_pixels_ver(&res0, &res1, &max, dst_ptr);
-HbdFilter1dFunc HIGHBD_FUNC(4, h8, , sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, h2, , sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v8, , sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v2, , sse2);
+ src_ptr += src_pitch;
+ dst_ptr += dst_pitch;
+ height -= 1;
+ } while (height > 0);
+}
-#define aom_highbd_filter_block1d4_h8_avx2 HIGHBD_FUNC(4, h8, , sse2)
-#define aom_highbd_filter_block1d4_h2_avx2 HIGHBD_FUNC(4, h2, , sse2)
-#define aom_highbd_filter_block1d4_v8_avx2 HIGHBD_FUNC(4, v8, , sse2)
-#define aom_highbd_filter_block1d4_v2_avx2 HIGHBD_FUNC(4, v2, , sse2)
+void aom_highbd_filter_block1d4_h8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+void aom_highbd_filter_block1d4_h2_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+void aom_highbd_filter_block1d4_v8_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+void aom_highbd_filter_block1d4_v2_sse2(const uint16_t *, ptrdiff_t, uint16_t *,
+ ptrdiff_t, uint32_t, const int16_t *,
+ int);
+#define aom_highbd_filter_block1d4_h8_avx2 aom_highbd_filter_block1d4_h8_sse2
+#define aom_highbd_filter_block1d4_h2_avx2 aom_highbd_filter_block1d4_h2_sse2
+#define aom_highbd_filter_block1d4_v8_avx2 aom_highbd_filter_block1d4_v8_sse2
+#define aom_highbd_filter_block1d4_v2_avx2 aom_highbd_filter_block1d4_v2_sse2
HIGH_FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2);
HIGH_FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);
HIGH_FUN_CONV_2D(, avx2);
-HbdFilter1dFunc HIGHBD_FUNC(4, h8, avg_, sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, h2, avg_, sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v8, avg_, sse2);
-HbdFilter1dFunc HIGHBD_FUNC(4, v2, avg_, sse2);
-
-#define aom_highbd_filter_block1d4_h8_avg_avx2 HIGHBD_FUNC(4, h8, avg_, sse2)
-#define aom_highbd_filter_block1d4_h2_avg_avx2 HIGHBD_FUNC(4, h2, avg_, sse2)
-#define aom_highbd_filter_block1d4_v8_avg_avx2 HIGHBD_FUNC(4, v8, avg_, sse2)
-#define aom_highbd_filter_block1d4_v2_avg_avx2 HIGHBD_FUNC(4, v2, avg_, sse2)
+void aom_highbd_filter_block1d4_h8_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+void aom_highbd_filter_block1d4_h2_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+void aom_highbd_filter_block1d4_v8_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+void aom_highbd_filter_block1d4_v2_avg_sse2(const uint16_t *, ptrdiff_t,
+ uint16_t *, ptrdiff_t, uint32_t,
+ const int16_t *, int);
+#define aom_highbd_filter_block1d4_h8_avg_avx2 \
+ aom_highbd_filter_block1d4_h8_avg_sse2
+#define aom_highbd_filter_block1d4_h2_avg_avx2 \
+ aom_highbd_filter_block1d4_h2_avg_sse2
+#define aom_highbd_filter_block1d4_v8_avg_avx2 \
+ aom_highbd_filter_block1d4_v8_avg_sse2
+#define aom_highbd_filter_block1d4_v2_avg_avx2 \
+ aom_highbd_filter_block1d4_v2_avg_sse2
HIGH_FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, avx2);
HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_avx2.c b/third_party/aom/aom_dsp/x86/inv_txfm_avx2.c
new file mode 100644
index 000000000..a9d6a127c
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_avx2.c
@@ -0,0 +1,1238 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/inv_txfm.h"
+#include "aom_dsp/x86/inv_txfm_common_avx2.h"
+#include "aom_dsp/x86/txfm_common_avx2.h"
+
+void aom_idct16x16_256_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[16];
+ load_buffer_16x16(input, in);
+ mm256_transpose_16x16(in, in);
+ av1_idct16_avx2(in);
+ mm256_transpose_16x16(in, in);
+ av1_idct16_avx2(in);
+ store_buffer_16xN(in, stride, dest, 16);
+}
+
+static INLINE void transpose_col_to_row_nz4x4(__m256i *in /*in[4]*/) {
+ const __m256i u0 = _mm256_unpacklo_epi16(in[0], in[1]);
+ const __m256i u1 = _mm256_unpacklo_epi16(in[2], in[3]);
+ const __m256i v0 = _mm256_unpacklo_epi32(u0, u1);
+ const __m256i v1 = _mm256_unpackhi_epi32(u0, u1);
+ in[0] = _mm256_permute4x64_epi64(v0, 0xA8);
+ in[1] = _mm256_permute4x64_epi64(v0, 0xA9);
+ in[2] = _mm256_permute4x64_epi64(v1, 0xA8);
+ in[3] = _mm256_permute4x64_epi64(v1, 0xA9);
+}
+
+#define MM256_SHUFFLE_EPI64(x0, x1, imm8) \
+ _mm256_castpd_si256(_mm256_shuffle_pd(_mm256_castsi256_pd(x0), \
+ _mm256_castsi256_pd(x1), imm8))
+
+static INLINE void transpose_col_to_row_nz4x16(__m256i *in /*in[16]*/) {
+ int i;
+ for (i = 0; i < 16; i += 4) {
+ transpose_col_to_row_nz4x4(&in[i]);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ in[i] = MM256_SHUFFLE_EPI64(in[i], in[i + 4], 0);
+ in[i + 8] = MM256_SHUFFLE_EPI64(in[i + 8], in[i + 12], 0);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ in[i] = _mm256_permute2x128_si256(in[i], in[i + 8], 0x20);
+ }
+}
+
+// Coefficients 0-7 before the final butterfly
+static INLINE void idct16_10_first_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p28 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i c2p04 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ const __m256i v4 = _mm256_mulhrs_epi16(in[2], c2p28);
+ const __m256i v7 = _mm256_mulhrs_epi16(in[2], c2p04);
+
+ const __m256i c2p16 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i v0 = _mm256_mulhrs_epi16(in[0], c2p16);
+ const __m256i v1 = v0;
+
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ __m256i v5, v6;
+ unpack_butter_fly(&v7, &v4, &cospi_p16_m16, &cospi_p16_p16, &v5, &v6);
+
+ out[0] = _mm256_add_epi16(v0, v7);
+ out[1] = _mm256_add_epi16(v1, v6);
+ out[2] = _mm256_add_epi16(v1, v5);
+ out[3] = _mm256_add_epi16(v0, v4);
+ out[4] = _mm256_sub_epi16(v0, v4);
+ out[5] = _mm256_sub_epi16(v1, v5);
+ out[6] = _mm256_sub_epi16(v1, v6);
+ out[7] = _mm256_sub_epi16(v0, v7);
+}
+
+// Coefficients 8-15 before the final butterfly
+static INLINE void idct16_10_second_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p30 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i c2p02 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m256i t0 = _mm256_mulhrs_epi16(in[1], c2p30);
+ const __m256i t7 = _mm256_mulhrs_epi16(in[1], c2p02);
+
+ const __m256i c2m26 = pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i c2p06 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+ const __m256i t3 = _mm256_mulhrs_epi16(in[3], c2m26);
+ const __m256i t4 = _mm256_mulhrs_epi16(in[3], c2p06);
+
+ const __m256i cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i cospi_p24_p08 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ __m256i t1, t2, t5, t6;
+ unpack_butter_fly(&t0, &t7, &cospi_m08_p24, &cospi_p24_p08, &t1, &t6);
+ unpack_butter_fly(&t3, &t4, &cospi_m24_m08, &cospi_m08_p24, &t2, &t5);
+
+ out[0] = _mm256_add_epi16(t0, t3);
+ out[1] = _mm256_add_epi16(t1, t2);
+ out[6] = _mm256_add_epi16(t6, t5);
+ out[7] = _mm256_add_epi16(t7, t4);
+
+ const __m256i v2 = _mm256_sub_epi16(t1, t2);
+ const __m256i v3 = _mm256_sub_epi16(t0, t3);
+ const __m256i v4 = _mm256_sub_epi16(t7, t4);
+ const __m256i v5 = _mm256_sub_epi16(t6, t5);
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ unpack_butter_fly(&v5, &v2, &cospi_p16_m16, &cospi_p16_p16, &out[2], &out[5]);
+ unpack_butter_fly(&v4, &v3, &cospi_p16_m16, &cospi_p16_p16, &out[3], &out[4]);
+}
+
+static INLINE void add_sub_butterfly(const __m256i *in, __m256i *out,
+ int size) {
+ int i = 0;
+ const int num = size >> 1;
+ const int bound = size - 1;
+ while (i < num) {
+ out[i] = _mm256_add_epi16(in[i], in[bound - i]);
+ out[bound - i] = _mm256_sub_epi16(in[i], in[bound - i]);
+ i++;
+ }
+}
+
+static INLINE void idct16_10(__m256i *in /*in[16]*/) {
+ __m256i out[16];
+ idct16_10_first_half(in, out);
+ idct16_10_second_half(in, &out[8]);
+ add_sub_butterfly(out, in, 16);
+}
+
+void aom_idct16x16_10_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[16];
+
+ load_coeff(input, &in[0]);
+ load_coeff(input + 16, &in[1]);
+ load_coeff(input + 32, &in[2]);
+ load_coeff(input + 48, &in[3]);
+
+ transpose_col_to_row_nz4x4(in);
+ idct16_10(in);
+
+ transpose_col_to_row_nz4x16(in);
+ idct16_10(in);
+
+ store_buffer_16xN(in, stride, dest, 16);
+}
+
+// Note:
+// For 16x16 int16_t matrix
+// transpose first 8 columns into first 8 rows.
+// Since only upper-left 8x8 are non-zero, the input are first 8 rows (in[8]).
+// After transposing, the 8 row vectors are in in[8].
+void transpose_col_to_row_nz8x8(__m256i *in /*in[8]*/) {
+ __m256i u0 = _mm256_unpacklo_epi16(in[0], in[1]);
+ __m256i u1 = _mm256_unpackhi_epi16(in[0], in[1]);
+ __m256i u2 = _mm256_unpacklo_epi16(in[2], in[3]);
+ __m256i u3 = _mm256_unpackhi_epi16(in[2], in[3]);
+
+ const __m256i v0 = _mm256_unpacklo_epi32(u0, u2);
+ const __m256i v1 = _mm256_unpackhi_epi32(u0, u2);
+ const __m256i v2 = _mm256_unpacklo_epi32(u1, u3);
+ const __m256i v3 = _mm256_unpackhi_epi32(u1, u3);
+
+ u0 = _mm256_unpacklo_epi16(in[4], in[5]);
+ u1 = _mm256_unpackhi_epi16(in[4], in[5]);
+ u2 = _mm256_unpacklo_epi16(in[6], in[7]);
+ u3 = _mm256_unpackhi_epi16(in[6], in[7]);
+
+ const __m256i v4 = _mm256_unpacklo_epi32(u0, u2);
+ const __m256i v5 = _mm256_unpackhi_epi32(u0, u2);
+ const __m256i v6 = _mm256_unpacklo_epi32(u1, u3);
+ const __m256i v7 = _mm256_unpackhi_epi32(u1, u3);
+
+ in[0] = MM256_SHUFFLE_EPI64(v0, v4, 0);
+ in[1] = MM256_SHUFFLE_EPI64(v0, v4, 3);
+ in[2] = MM256_SHUFFLE_EPI64(v1, v5, 0);
+ in[3] = MM256_SHUFFLE_EPI64(v1, v5, 3);
+ in[4] = MM256_SHUFFLE_EPI64(v2, v6, 0);
+ in[5] = MM256_SHUFFLE_EPI64(v2, v6, 3);
+ in[6] = MM256_SHUFFLE_EPI64(v3, v7, 0);
+ in[7] = MM256_SHUFFLE_EPI64(v3, v7, 3);
+}
+
+// Note:
+// For 16x16 int16_t matrix
+// transpose first 8 columns into first 8 rows.
+// Since only matrix left 8x16 are non-zero, the input are total 16 rows
+// (in[16]).
+// After transposing, the 8 row vectors are in in[8]. All else are zero.
+static INLINE void transpose_col_to_row_nz8x16(__m256i *in /*in[16]*/) {
+ transpose_col_to_row_nz8x8(in);
+ transpose_col_to_row_nz8x8(&in[8]);
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ in[i] = _mm256_permute2x128_si256(in[i], in[i + 8], 0x20);
+ }
+}
+
+static INLINE void idct16_38_first_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p28 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i c2p04 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ __m256i t4 = _mm256_mulhrs_epi16(in[2], c2p28);
+ __m256i t7 = _mm256_mulhrs_epi16(in[2], c2p04);
+
+ const __m256i c2m20 = pair256_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
+ const __m256i c2p12 = pair256_set_epi16(2 * cospi_12_64, 2 * cospi_12_64);
+ __m256i t5 = _mm256_mulhrs_epi16(in[6], c2m20);
+ __m256i t6 = _mm256_mulhrs_epi16(in[6], c2p12);
+
+ const __m256i c2p16 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i c2p24 = pair256_set_epi16(2 * cospi_24_64, 2 * cospi_24_64);
+ const __m256i c2p08 = pair256_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
+ const __m256i u0 = _mm256_mulhrs_epi16(in[0], c2p16);
+ const __m256i u1 = _mm256_mulhrs_epi16(in[0], c2p16);
+ const __m256i u2 = _mm256_mulhrs_epi16(in[4], c2p24);
+ const __m256i u3 = _mm256_mulhrs_epi16(in[4], c2p08);
+
+ const __m256i u4 = _mm256_add_epi16(t4, t5);
+ const __m256i u5 = _mm256_sub_epi16(t4, t5);
+ const __m256i u6 = _mm256_sub_epi16(t7, t6);
+ const __m256i u7 = _mm256_add_epi16(t7, t6);
+
+ const __m256i t0 = _mm256_add_epi16(u0, u3);
+ const __m256i t1 = _mm256_add_epi16(u1, u2);
+ const __m256i t2 = _mm256_sub_epi16(u1, u2);
+ const __m256i t3 = _mm256_sub_epi16(u0, u3);
+
+ t4 = u4;
+ t7 = u7;
+
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ unpack_butter_fly(&u6, &u5, &cospi_p16_m16, &cospi_p16_p16, &t5, &t6);
+
+ out[0] = _mm256_add_epi16(t0, t7);
+ out[1] = _mm256_add_epi16(t1, t6);
+ out[2] = _mm256_add_epi16(t2, t5);
+ out[3] = _mm256_add_epi16(t3, t4);
+ out[4] = _mm256_sub_epi16(t3, t4);
+ out[5] = _mm256_sub_epi16(t2, t5);
+ out[6] = _mm256_sub_epi16(t1, t6);
+ out[7] = _mm256_sub_epi16(t0, t7);
+}
+
+static INLINE void idct16_38_second_half(const __m256i *in, __m256i *out) {
+ const __m256i c2p30 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i c2p02 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ __m256i t0 = _mm256_mulhrs_epi16(in[1], c2p30);
+ __m256i t7 = _mm256_mulhrs_epi16(in[1], c2p02);
+
+ const __m256i c2m18 = pair256_set_epi16(-2 * cospi_18_64, -2 * cospi_18_64);
+ const __m256i c2p14 = pair256_set_epi16(2 * cospi_14_64, 2 * cospi_14_64);
+ __m256i t1 = _mm256_mulhrs_epi16(in[7], c2m18);
+ __m256i t6 = _mm256_mulhrs_epi16(in[7], c2p14);
+
+ const __m256i c2p22 = pair256_set_epi16(2 * cospi_22_64, 2 * cospi_22_64);
+ const __m256i c2p10 = pair256_set_epi16(2 * cospi_10_64, 2 * cospi_10_64);
+ __m256i t2 = _mm256_mulhrs_epi16(in[5], c2p22);
+ __m256i t5 = _mm256_mulhrs_epi16(in[5], c2p10);
+
+ const __m256i c2m26 = pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i c2p06 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+ __m256i t3 = _mm256_mulhrs_epi16(in[3], c2m26);
+ __m256i t4 = _mm256_mulhrs_epi16(in[3], c2p06);
+
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7;
+ v0 = _mm256_add_epi16(t0, t1);
+ v1 = _mm256_sub_epi16(t0, t1);
+ v2 = _mm256_sub_epi16(t3, t2);
+ v3 = _mm256_add_epi16(t2, t3);
+ v4 = _mm256_add_epi16(t4, t5);
+ v5 = _mm256_sub_epi16(t4, t5);
+ v6 = _mm256_sub_epi16(t7, t6);
+ v7 = _mm256_add_epi16(t6, t7);
+
+ t0 = v0;
+ t7 = v7;
+ t3 = v3;
+ t4 = v4;
+ const __m256i cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i cospi_p24_p08 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ unpack_butter_fly(&v1, &v6, &cospi_m08_p24, &cospi_p24_p08, &t1, &t6);
+ unpack_butter_fly(&v2, &v5, &cospi_m24_m08, &cospi_m08_p24, &t2, &t5);
+
+ v0 = _mm256_add_epi16(t0, t3);
+ v1 = _mm256_add_epi16(t1, t2);
+ v2 = _mm256_sub_epi16(t1, t2);
+ v3 = _mm256_sub_epi16(t0, t3);
+ v4 = _mm256_sub_epi16(t7, t4);
+ v5 = _mm256_sub_epi16(t6, t5);
+ v6 = _mm256_add_epi16(t6, t5);
+ v7 = _mm256_add_epi16(t7, t4);
+
+ // stage 6, (8-15)
+ out[0] = v0;
+ out[1] = v1;
+ out[6] = v6;
+ out[7] = v7;
+ const __m256i cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+ const __m256i cospi_p16_m16 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ unpack_butter_fly(&v5, &v2, &cospi_p16_m16, &cospi_p16_p16, &out[2], &out[5]);
+ unpack_butter_fly(&v4, &v3, &cospi_p16_m16, &cospi_p16_p16, &out[3], &out[4]);
+}
+
+static INLINE void idct16_38(__m256i *in /*in[16]*/) {
+ __m256i out[16];
+ idct16_38_first_half(in, out);
+ idct16_38_second_half(in, &out[8]);
+ add_sub_butterfly(out, in, 16);
+}
+
+void aom_idct16x16_38_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[16];
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ load_coeff(input + (i << 4), &in[i]);
+ }
+
+ transpose_col_to_row_nz8x8(in);
+ idct16_38(in);
+
+ transpose_col_to_row_nz8x16(in);
+ idct16_38(in);
+
+ store_buffer_16xN(in, stride, dest, 16);
+}
+
+static INLINE int calculate_dc(const tran_low_t *input) {
+ int dc = (int)dct_const_round_shift(input[0] * cospi_16_64);
+ dc = (int)dct_const_round_shift(dc * cospi_16_64);
+ dc = ROUND_POWER_OF_TWO(dc, IDCT_ROUNDING_POS);
+ return dc;
+}
+
+void aom_idct16x16_1_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ const int dc = calculate_dc(input);
+ if (dc == 0) return;
+
+ const __m256i dc_value = _mm256_set1_epi16(dc);
+
+ int i;
+ for (i = 0; i < 16; ++i) {
+ recon_and_store(&dc_value, dest);
+ dest += stride;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// 32x32 partial IDCT
+
+void aom_idct32x32_1_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ const int dc = calculate_dc(input);
+ if (dc == 0) return;
+
+ const __m256i dc_value = _mm256_set1_epi16(dc);
+
+ int i;
+ for (i = 0; i < 32; ++i) {
+ recon_and_store(&dc_value, dest);
+ recon_and_store(&dc_value, dest + 16);
+ dest += stride;
+ }
+}
+
+static void load_buffer_32x16(const tran_low_t *input, __m256i *in /*in[32]*/) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ load_coeff(input, &in[i]);
+ load_coeff(input + 16, &in[i + 16]);
+ input += 32;
+ }
+}
+
+// Note:
+// We extend SSSE3 operations to AVX2. Instead of operating on __m128i, we
+// operate coefficients on __m256i. Our operation capacity doubles for each
+// instruction.
+#define BUTTERFLY_PAIR(x0, x1, co0, co1) \
+ do { \
+ tmp0 = _mm256_madd_epi16(x0, co0); \
+ tmp1 = _mm256_madd_epi16(x1, co0); \
+ tmp2 = _mm256_madd_epi16(x0, co1); \
+ tmp3 = _mm256_madd_epi16(x1, co1); \
+ tmp0 = _mm256_add_epi32(tmp0, rounding); \
+ tmp1 = _mm256_add_epi32(tmp1, rounding); \
+ tmp2 = _mm256_add_epi32(tmp2, rounding); \
+ tmp3 = _mm256_add_epi32(tmp3, rounding); \
+ tmp0 = _mm256_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm256_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm256_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm256_srai_epi32(tmp3, DCT_CONST_BITS); \
+ } while (0)
+
+static INLINE void butterfly(const __m256i *x0, const __m256i *x1,
+ const __m256i *c0, const __m256i *c1, __m256i *y0,
+ __m256i *y1) {
+ __m256i tmp0, tmp1, tmp2, tmp3, u0, u1;
+ const __m256i rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
+
+ u0 = _mm256_unpacklo_epi16(*x0, *x1);
+ u1 = _mm256_unpackhi_epi16(*x0, *x1);
+ BUTTERFLY_PAIR(u0, u1, *c0, *c1);
+ *y0 = _mm256_packs_epi32(tmp0, tmp1);
+ *y1 = _mm256_packs_epi32(tmp2, tmp3);
+}
+
+static INLINE void butterfly_self(__m256i *x0, __m256i *x1, const __m256i *c0,
+ const __m256i *c1) {
+ __m256i tmp0, tmp1, tmp2, tmp3, u0, u1;
+ const __m256i rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
+
+ u0 = _mm256_unpacklo_epi16(*x0, *x1);
+ u1 = _mm256_unpackhi_epi16(*x0, *x1);
+ BUTTERFLY_PAIR(u0, u1, *c0, *c1);
+ *x0 = _mm256_packs_epi32(tmp0, tmp1);
+ *x1 = _mm256_packs_epi32(tmp2, tmp3);
+}
+
+// For each 16x32 block __m256i in[32],
+// Input with index, 2, 6, 10, 14, 18, 22, 26, 30
+// output pixels: 8-15 in __m256i in[32]
+static void idct32_full_16x32_quarter_2(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[16]*/) {
+ __m256i u8, u9, u10, u11, u12, u13, u14, u15; // stp2_
+ __m256i v8, v9, v10, v11, v12, v13, v14, v15; // stp1_
+
+ {
+ const __m256i stg2_0 = pair256_set_epi16(cospi_30_64, -cospi_2_64);
+ const __m256i stg2_1 = pair256_set_epi16(cospi_2_64, cospi_30_64);
+ const __m256i stg2_2 = pair256_set_epi16(cospi_14_64, -cospi_18_64);
+ const __m256i stg2_3 = pair256_set_epi16(cospi_18_64, cospi_14_64);
+ butterfly(&in[2], &in[30], &stg2_0, &stg2_1, &u8, &u15);
+ butterfly(&in[18], &in[14], &stg2_2, &stg2_3, &u9, &u14);
+ }
+
+ v8 = _mm256_add_epi16(u8, u9);
+ v9 = _mm256_sub_epi16(u8, u9);
+ v14 = _mm256_sub_epi16(u15, u14);
+ v15 = _mm256_add_epi16(u15, u14);
+
+ {
+ const __m256i stg2_4 = pair256_set_epi16(cospi_22_64, -cospi_10_64);
+ const __m256i stg2_5 = pair256_set_epi16(cospi_10_64, cospi_22_64);
+ const __m256i stg2_6 = pair256_set_epi16(cospi_6_64, -cospi_26_64);
+ const __m256i stg2_7 = pair256_set_epi16(cospi_26_64, cospi_6_64);
+ butterfly(&in[10], &in[22], &stg2_4, &stg2_5, &u10, &u13);
+ butterfly(&in[26], &in[6], &stg2_6, &stg2_7, &u11, &u12);
+ }
+
+ v10 = _mm256_sub_epi16(u11, u10);
+ v11 = _mm256_add_epi16(u11, u10);
+ v12 = _mm256_add_epi16(u12, u13);
+ v13 = _mm256_sub_epi16(u12, u13);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&v9, &v14, &stg4_4, &stg4_5);
+ butterfly_self(&v10, &v13, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(v8, v11);
+ out[1] = _mm256_add_epi16(v9, v10);
+ out[6] = _mm256_add_epi16(v14, v13);
+ out[7] = _mm256_add_epi16(v15, v12);
+
+ out[2] = _mm256_sub_epi16(v9, v10);
+ out[3] = _mm256_sub_epi16(v8, v11);
+ out[4] = _mm256_sub_epi16(v15, v12);
+ out[5] = _mm256_sub_epi16(v14, v13);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly_self(&out[2], &out[5], &stg6_0, &stg4_0);
+ butterfly_self(&out[3], &out[4], &stg6_0, &stg4_0);
+ }
+}
+
+// For each 8x32 block __m256i in[32],
+// Input with index, 0, 4, 8, 12, 16, 20, 24, 28
+// output pixels: 0-7 in __m256i in[32]
+static void idct32_full_16x32_quarter_1(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[8]*/) {
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7; // stp1_
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7; // stp2_
+
+ {
+ const __m256i stg3_0 = pair256_set_epi16(cospi_28_64, -cospi_4_64);
+ const __m256i stg3_1 = pair256_set_epi16(cospi_4_64, cospi_28_64);
+ const __m256i stg3_2 = pair256_set_epi16(cospi_12_64, -cospi_20_64);
+ const __m256i stg3_3 = pair256_set_epi16(cospi_20_64, cospi_12_64);
+ butterfly(&in[4], &in[28], &stg3_0, &stg3_1, &u4, &u7);
+ butterfly(&in[20], &in[12], &stg3_2, &stg3_3, &u5, &u6);
+ }
+
+ v4 = _mm256_add_epi16(u4, u5);
+ v5 = _mm256_sub_epi16(u4, u5);
+ v6 = _mm256_sub_epi16(u7, u6);
+ v7 = _mm256_add_epi16(u7, u6);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg4_1 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m256i stg4_2 = pair256_set_epi16(cospi_24_64, -cospi_8_64);
+ const __m256i stg4_3 = pair256_set_epi16(cospi_8_64, cospi_24_64);
+ butterfly(&v6, &v5, &stg4_1, &stg4_0, &v5, &v6);
+
+ butterfly(&in[0], &in[16], &stg4_0, &stg4_1, &u0, &u1);
+ butterfly(&in[8], &in[24], &stg4_2, &stg4_3, &u2, &u3);
+ }
+
+ v0 = _mm256_add_epi16(u0, u3);
+ v1 = _mm256_add_epi16(u1, u2);
+ v2 = _mm256_sub_epi16(u1, u2);
+ v3 = _mm256_sub_epi16(u0, u3);
+
+ out[0] = _mm256_add_epi16(v0, v7);
+ out[1] = _mm256_add_epi16(v1, v6);
+ out[2] = _mm256_add_epi16(v2, v5);
+ out[3] = _mm256_add_epi16(v3, v4);
+ out[4] = _mm256_sub_epi16(v3, v4);
+ out[5] = _mm256_sub_epi16(v2, v5);
+ out[6] = _mm256_sub_epi16(v1, v6);
+ out[7] = _mm256_sub_epi16(v0, v7);
+}
+
+// For each 8x32 block __m256i in[32],
+// Input with odd index,
+// 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
+// output pixels: 16-23, 24-31 in __m256i in[32]
+// We avoid hide an offset, 16, inside this function. So we output 0-15 into
+// array out[16]
+static void idct32_full_16x32_quarter_3_4(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[16]*/) {
+ __m256i v16, v17, v18, v19, v20, v21, v22, v23;
+ __m256i v24, v25, v26, v27, v28, v29, v30, v31;
+ __m256i u16, u17, u18, u19, u20, u21, u22, u23;
+ __m256i u24, u25, u26, u27, u28, u29, u30, u31;
+
+ {
+ const __m256i stg1_0 = pair256_set_epi16(cospi_31_64, -cospi_1_64);
+ const __m256i stg1_1 = pair256_set_epi16(cospi_1_64, cospi_31_64);
+ const __m256i stg1_2 = pair256_set_epi16(cospi_15_64, -cospi_17_64);
+ const __m256i stg1_3 = pair256_set_epi16(cospi_17_64, cospi_15_64);
+ const __m256i stg1_4 = pair256_set_epi16(cospi_23_64, -cospi_9_64);
+ const __m256i stg1_5 = pair256_set_epi16(cospi_9_64, cospi_23_64);
+ const __m256i stg1_6 = pair256_set_epi16(cospi_7_64, -cospi_25_64);
+ const __m256i stg1_7 = pair256_set_epi16(cospi_25_64, cospi_7_64);
+ const __m256i stg1_8 = pair256_set_epi16(cospi_27_64, -cospi_5_64);
+ const __m256i stg1_9 = pair256_set_epi16(cospi_5_64, cospi_27_64);
+ const __m256i stg1_10 = pair256_set_epi16(cospi_11_64, -cospi_21_64);
+ const __m256i stg1_11 = pair256_set_epi16(cospi_21_64, cospi_11_64);
+ const __m256i stg1_12 = pair256_set_epi16(cospi_19_64, -cospi_13_64);
+ const __m256i stg1_13 = pair256_set_epi16(cospi_13_64, cospi_19_64);
+ const __m256i stg1_14 = pair256_set_epi16(cospi_3_64, -cospi_29_64);
+ const __m256i stg1_15 = pair256_set_epi16(cospi_29_64, cospi_3_64);
+ butterfly(&in[1], &in[31], &stg1_0, &stg1_1, &u16, &u31);
+ butterfly(&in[17], &in[15], &stg1_2, &stg1_3, &u17, &u30);
+ butterfly(&in[9], &in[23], &stg1_4, &stg1_5, &u18, &u29);
+ butterfly(&in[25], &in[7], &stg1_6, &stg1_7, &u19, &u28);
+
+ butterfly(&in[5], &in[27], &stg1_8, &stg1_9, &u20, &u27);
+ butterfly(&in[21], &in[11], &stg1_10, &stg1_11, &u21, &u26);
+
+ butterfly(&in[13], &in[19], &stg1_12, &stg1_13, &u22, &u25);
+ butterfly(&in[29], &in[3], &stg1_14, &stg1_15, &u23, &u24);
+ }
+
+ v16 = _mm256_add_epi16(u16, u17);
+ v17 = _mm256_sub_epi16(u16, u17);
+ v18 = _mm256_sub_epi16(u19, u18);
+ v19 = _mm256_add_epi16(u19, u18);
+
+ v20 = _mm256_add_epi16(u20, u21);
+ v21 = _mm256_sub_epi16(u20, u21);
+ v22 = _mm256_sub_epi16(u23, u22);
+ v23 = _mm256_add_epi16(u23, u22);
+
+ v24 = _mm256_add_epi16(u24, u25);
+ v25 = _mm256_sub_epi16(u24, u25);
+ v26 = _mm256_sub_epi16(u27, u26);
+ v27 = _mm256_add_epi16(u27, u26);
+
+ v28 = _mm256_add_epi16(u28, u29);
+ v29 = _mm256_sub_epi16(u28, u29);
+ v30 = _mm256_sub_epi16(u31, u30);
+ v31 = _mm256_add_epi16(u31, u30);
+
+ {
+ const __m256i stg3_4 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m256i stg3_5 = pair256_set_epi16(cospi_28_64, cospi_4_64);
+ const __m256i stg3_6 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m256i stg3_8 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m256i stg3_9 = pair256_set_epi16(cospi_12_64, cospi_20_64);
+ const __m256i stg3_10 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+ butterfly_self(&v17, &v30, &stg3_4, &stg3_5);
+ butterfly_self(&v18, &v29, &stg3_6, &stg3_4);
+ butterfly_self(&v21, &v26, &stg3_8, &stg3_9);
+ butterfly_self(&v22, &v25, &stg3_10, &stg3_8);
+ }
+
+ u16 = _mm256_add_epi16(v16, v19);
+ u17 = _mm256_add_epi16(v17, v18);
+ u18 = _mm256_sub_epi16(v17, v18);
+ u19 = _mm256_sub_epi16(v16, v19);
+ u20 = _mm256_sub_epi16(v23, v20);
+ u21 = _mm256_sub_epi16(v22, v21);
+ u22 = _mm256_add_epi16(v22, v21);
+ u23 = _mm256_add_epi16(v23, v20);
+
+ u24 = _mm256_add_epi16(v24, v27);
+ u25 = _mm256_add_epi16(v25, v26);
+ u26 = _mm256_sub_epi16(v25, v26);
+ u27 = _mm256_sub_epi16(v24, v27);
+
+ u28 = _mm256_sub_epi16(v31, v28);
+ u29 = _mm256_sub_epi16(v30, v29);
+ u30 = _mm256_add_epi16(v29, v30);
+ u31 = _mm256_add_epi16(v28, v31);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&u18, &u29, &stg4_4, &stg4_5);
+ butterfly_self(&u19, &u28, &stg4_4, &stg4_5);
+ butterfly_self(&u20, &u27, &stg4_6, &stg4_4);
+ butterfly_self(&u21, &u26, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(u16, u23);
+ out[1] = _mm256_add_epi16(u17, u22);
+ out[2] = _mm256_add_epi16(u18, u21);
+ out[3] = _mm256_add_epi16(u19, u20);
+ out[4] = _mm256_sub_epi16(u19, u20);
+ out[5] = _mm256_sub_epi16(u18, u21);
+ out[6] = _mm256_sub_epi16(u17, u22);
+ out[7] = _mm256_sub_epi16(u16, u23);
+
+ out[8] = _mm256_sub_epi16(u31, u24);
+ out[9] = _mm256_sub_epi16(u30, u25);
+ out[10] = _mm256_sub_epi16(u29, u26);
+ out[11] = _mm256_sub_epi16(u28, u27);
+ out[12] = _mm256_add_epi16(u27, u28);
+ out[13] = _mm256_add_epi16(u26, u29);
+ out[14] = _mm256_add_epi16(u25, u30);
+ out[15] = _mm256_add_epi16(u24, u31);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly_self(&out[4], &out[11], &stg6_0, &stg4_0);
+ butterfly_self(&out[5], &out[10], &stg6_0, &stg4_0);
+ butterfly_self(&out[6], &out[9], &stg6_0, &stg4_0);
+ butterfly_self(&out[7], &out[8], &stg6_0, &stg4_0);
+ }
+}
+
+static void idct32_full_16x32_quarter_1_2(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i temp[16];
+ idct32_full_16x32_quarter_1(in, temp);
+ idct32_full_16x32_quarter_2(in, &temp[8]);
+ add_sub_butterfly(temp, out, 16);
+}
+
+static void idct32_16x32(const __m256i *in /*in[32]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i temp[32];
+ idct32_full_16x32_quarter_1_2(in, temp);
+ idct32_full_16x32_quarter_3_4(in, &temp[16]);
+ add_sub_butterfly(temp, out, 32);
+}
+
+void aom_idct32x32_1024_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i col[64], in[32];
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ load_buffer_32x16(input, in);
+ input += 32 << 4;
+
+ mm256_transpose_16x16(in, in);
+ mm256_transpose_16x16(&in[16], &in[16]);
+ idct32_16x32(in, col + (i << 5));
+ }
+
+ for (i = 0; i < 2; ++i) {
+ int j = i << 4;
+ mm256_transpose_16x16(col + j, in);
+ mm256_transpose_16x16(col + j + 32, &in[16]);
+ idct32_16x32(in, in);
+ store_buffer_16xN(in, stride, dest, 32);
+ dest += 16;
+ }
+}
+
+// Group the coefficient calculation into smaller functions
+// to prevent stack spillover:
+// quarter_1: 0-7
+// quarter_2: 8-15
+// quarter_3_4: 16-23, 24-31
+static void idct32_16x32_135_quarter_1(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[8]*/) {
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7;
+
+ {
+ const __m256i stk4_0 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i stk4_2 = pair256_set_epi16(2 * cospi_24_64, 2 * cospi_24_64);
+ const __m256i stk4_3 = pair256_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
+ u0 = _mm256_mulhrs_epi16(in[0], stk4_0);
+ u2 = _mm256_mulhrs_epi16(in[8], stk4_2);
+ u3 = _mm256_mulhrs_epi16(in[8], stk4_3);
+ u1 = u0;
+ }
+
+ v0 = _mm256_add_epi16(u0, u3);
+ v1 = _mm256_add_epi16(u1, u2);
+ v2 = _mm256_sub_epi16(u1, u2);
+ v3 = _mm256_sub_epi16(u0, u3);
+
+ {
+ const __m256i stk3_0 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i stk3_1 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ const __m256i stk3_2 =
+ pair256_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
+ const __m256i stk3_3 = pair256_set_epi16(2 * cospi_12_64, 2 * cospi_12_64);
+ u4 = _mm256_mulhrs_epi16(in[4], stk3_0);
+ u7 = _mm256_mulhrs_epi16(in[4], stk3_1);
+ u5 = _mm256_mulhrs_epi16(in[12], stk3_2);
+ u6 = _mm256_mulhrs_epi16(in[12], stk3_3);
+ }
+
+ v4 = _mm256_add_epi16(u4, u5);
+ v5 = _mm256_sub_epi16(u4, u5);
+ v6 = _mm256_sub_epi16(u7, u6);
+ v7 = _mm256_add_epi16(u7, u6);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg4_1 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ butterfly(&v6, &v5, &stg4_1, &stg4_0, &v5, &v6);
+ }
+
+ out[0] = _mm256_add_epi16(v0, v7);
+ out[1] = _mm256_add_epi16(v1, v6);
+ out[2] = _mm256_add_epi16(v2, v5);
+ out[3] = _mm256_add_epi16(v3, v4);
+ out[4] = _mm256_sub_epi16(v3, v4);
+ out[5] = _mm256_sub_epi16(v2, v5);
+ out[6] = _mm256_sub_epi16(v1, v6);
+ out[7] = _mm256_sub_epi16(v0, v7);
+}
+
+static void idct32_16x32_135_quarter_2(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[8]*/) {
+ __m256i u8, u9, u10, u11, u12, u13, u14, u15;
+ __m256i v8, v9, v10, v11, v12, v13, v14, v15;
+
+ {
+ const __m256i stk2_0 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i stk2_1 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m256i stk2_2 =
+ pair256_set_epi16(-2 * cospi_18_64, -2 * cospi_18_64);
+ const __m256i stk2_3 = pair256_set_epi16(2 * cospi_14_64, 2 * cospi_14_64);
+ const __m256i stk2_4 = pair256_set_epi16(2 * cospi_22_64, 2 * cospi_22_64);
+ const __m256i stk2_5 = pair256_set_epi16(2 * cospi_10_64, 2 * cospi_10_64);
+ const __m256i stk2_6 =
+ pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i stk2_7 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+ u8 = _mm256_mulhrs_epi16(in[2], stk2_0);
+ u15 = _mm256_mulhrs_epi16(in[2], stk2_1);
+ u9 = _mm256_mulhrs_epi16(in[14], stk2_2);
+ u14 = _mm256_mulhrs_epi16(in[14], stk2_3);
+ u10 = _mm256_mulhrs_epi16(in[10], stk2_4);
+ u13 = _mm256_mulhrs_epi16(in[10], stk2_5);
+ u11 = _mm256_mulhrs_epi16(in[6], stk2_6);
+ u12 = _mm256_mulhrs_epi16(in[6], stk2_7);
+ }
+
+ v8 = _mm256_add_epi16(u8, u9);
+ v9 = _mm256_sub_epi16(u8, u9);
+ v10 = _mm256_sub_epi16(u11, u10);
+ v11 = _mm256_add_epi16(u11, u10);
+ v12 = _mm256_add_epi16(u12, u13);
+ v13 = _mm256_sub_epi16(u12, u13);
+ v14 = _mm256_sub_epi16(u15, u14);
+ v15 = _mm256_add_epi16(u15, u14);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&v9, &v14, &stg4_4, &stg4_5);
+ butterfly_self(&v10, &v13, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(v8, v11);
+ out[1] = _mm256_add_epi16(v9, v10);
+ out[2] = _mm256_sub_epi16(v9, v10);
+ out[3] = _mm256_sub_epi16(v8, v11);
+ out[4] = _mm256_sub_epi16(v15, v12);
+ out[5] = _mm256_sub_epi16(v14, v13);
+ out[6] = _mm256_add_epi16(v14, v13);
+ out[7] = _mm256_add_epi16(v15, v12);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly_self(&out[2], &out[5], &stg6_0, &stg4_0);
+ butterfly_self(&out[3], &out[4], &stg6_0, &stg4_0);
+ }
+}
+
+// 8x32 block even indexed 8 inputs of in[16],
+// output first half 16 to out[32]
+static void idct32_16x32_quarter_1_2(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i temp[16];
+ idct32_16x32_135_quarter_1(in, temp);
+ idct32_16x32_135_quarter_2(in, &temp[8]);
+ add_sub_butterfly(temp, out, 16);
+}
+
+// 8x32 block odd indexed 8 inputs of in[16],
+// output second half 16 to out[32]
+static void idct32_16x32_quarter_3_4(const __m256i *in /*in[16]*/,
+ __m256i *out /*out[32]*/) {
+ __m256i v16, v17, v18, v19, v20, v21, v22, v23;
+ __m256i v24, v25, v26, v27, v28, v29, v30, v31;
+ __m256i u16, u17, u18, u19, u20, u21, u22, u23;
+ __m256i u24, u25, u26, u27, u28, u29, u30, u31;
+
+ {
+ const __m256i stk1_0 = pair256_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
+ const __m256i stk1_1 = pair256_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
+ const __m256i stk1_2 =
+ pair256_set_epi16(-2 * cospi_17_64, -2 * cospi_17_64);
+ const __m256i stk1_3 = pair256_set_epi16(2 * cospi_15_64, 2 * cospi_15_64);
+
+ const __m256i stk1_4 = pair256_set_epi16(2 * cospi_23_64, 2 * cospi_23_64);
+ const __m256i stk1_5 = pair256_set_epi16(2 * cospi_9_64, 2 * cospi_9_64);
+ const __m256i stk1_6 =
+ pair256_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
+ const __m256i stk1_7 = pair256_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
+ const __m256i stk1_8 = pair256_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
+ const __m256i stk1_9 = pair256_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
+ const __m256i stk1_10 =
+ pair256_set_epi16(-2 * cospi_21_64, -2 * cospi_21_64);
+ const __m256i stk1_11 = pair256_set_epi16(2 * cospi_11_64, 2 * cospi_11_64);
+
+ const __m256i stk1_12 = pair256_set_epi16(2 * cospi_19_64, 2 * cospi_19_64);
+ const __m256i stk1_13 = pair256_set_epi16(2 * cospi_13_64, 2 * cospi_13_64);
+ const __m256i stk1_14 =
+ pair256_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
+ const __m256i stk1_15 = pair256_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
+ u16 = _mm256_mulhrs_epi16(in[1], stk1_0);
+ u31 = _mm256_mulhrs_epi16(in[1], stk1_1);
+ u17 = _mm256_mulhrs_epi16(in[15], stk1_2);
+ u30 = _mm256_mulhrs_epi16(in[15], stk1_3);
+
+ u18 = _mm256_mulhrs_epi16(in[9], stk1_4);
+ u29 = _mm256_mulhrs_epi16(in[9], stk1_5);
+ u19 = _mm256_mulhrs_epi16(in[7], stk1_6);
+ u28 = _mm256_mulhrs_epi16(in[7], stk1_7);
+
+ u20 = _mm256_mulhrs_epi16(in[5], stk1_8);
+ u27 = _mm256_mulhrs_epi16(in[5], stk1_9);
+ u21 = _mm256_mulhrs_epi16(in[11], stk1_10);
+ u26 = _mm256_mulhrs_epi16(in[11], stk1_11);
+
+ u22 = _mm256_mulhrs_epi16(in[13], stk1_12);
+ u25 = _mm256_mulhrs_epi16(in[13], stk1_13);
+ u23 = _mm256_mulhrs_epi16(in[3], stk1_14);
+ u24 = _mm256_mulhrs_epi16(in[3], stk1_15);
+ }
+
+ v16 = _mm256_add_epi16(u16, u17);
+ v17 = _mm256_sub_epi16(u16, u17);
+ v18 = _mm256_sub_epi16(u19, u18);
+ v19 = _mm256_add_epi16(u19, u18);
+
+ v20 = _mm256_add_epi16(u20, u21);
+ v21 = _mm256_sub_epi16(u20, u21);
+ v22 = _mm256_sub_epi16(u23, u22);
+ v23 = _mm256_add_epi16(u23, u22);
+
+ v24 = _mm256_add_epi16(u24, u25);
+ v25 = _mm256_sub_epi16(u24, u25);
+ v26 = _mm256_sub_epi16(u27, u26);
+ v27 = _mm256_add_epi16(u27, u26);
+
+ v28 = _mm256_add_epi16(u28, u29);
+ v29 = _mm256_sub_epi16(u28, u29);
+ v30 = _mm256_sub_epi16(u31, u30);
+ v31 = _mm256_add_epi16(u31, u30);
+
+ {
+ const __m256i stg3_4 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m256i stg3_5 = pair256_set_epi16(cospi_28_64, cospi_4_64);
+ const __m256i stg3_6 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m256i stg3_8 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m256i stg3_9 = pair256_set_epi16(cospi_12_64, cospi_20_64);
+ const __m256i stg3_10 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ butterfly_self(&v17, &v30, &stg3_4, &stg3_5);
+ butterfly_self(&v18, &v29, &stg3_6, &stg3_4);
+ butterfly_self(&v21, &v26, &stg3_8, &stg3_9);
+ butterfly_self(&v22, &v25, &stg3_10, &stg3_8);
+ }
+
+ u16 = _mm256_add_epi16(v16, v19);
+ u17 = _mm256_add_epi16(v17, v18);
+ u18 = _mm256_sub_epi16(v17, v18);
+ u19 = _mm256_sub_epi16(v16, v19);
+ u20 = _mm256_sub_epi16(v23, v20);
+ u21 = _mm256_sub_epi16(v22, v21);
+ u22 = _mm256_add_epi16(v22, v21);
+ u23 = _mm256_add_epi16(v23, v20);
+
+ u24 = _mm256_add_epi16(v24, v27);
+ u25 = _mm256_add_epi16(v25, v26);
+ u26 = _mm256_sub_epi16(v25, v26);
+ u27 = _mm256_sub_epi16(v24, v27);
+ u28 = _mm256_sub_epi16(v31, v28);
+ u29 = _mm256_sub_epi16(v30, v29);
+ u30 = _mm256_add_epi16(v29, v30);
+ u31 = _mm256_add_epi16(v28, v31);
+
+ {
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+ butterfly_self(&u18, &u29, &stg4_4, &stg4_5);
+ butterfly_self(&u19, &u28, &stg4_4, &stg4_5);
+ butterfly_self(&u20, &u27, &stg4_6, &stg4_4);
+ butterfly_self(&u21, &u26, &stg4_6, &stg4_4);
+ }
+
+ out[0] = _mm256_add_epi16(u16, u23);
+ out[1] = _mm256_add_epi16(u17, u22);
+ out[2] = _mm256_add_epi16(u18, u21);
+ out[3] = _mm256_add_epi16(u19, u20);
+ v20 = _mm256_sub_epi16(u19, u20);
+ v21 = _mm256_sub_epi16(u18, u21);
+ v22 = _mm256_sub_epi16(u17, u22);
+ v23 = _mm256_sub_epi16(u16, u23);
+
+ v24 = _mm256_sub_epi16(u31, u24);
+ v25 = _mm256_sub_epi16(u30, u25);
+ v26 = _mm256_sub_epi16(u29, u26);
+ v27 = _mm256_sub_epi16(u28, u27);
+ out[12] = _mm256_add_epi16(u27, u28);
+ out[13] = _mm256_add_epi16(u26, u29);
+ out[14] = _mm256_add_epi16(u25, u30);
+ out[15] = _mm256_add_epi16(u24, u31);
+
+ {
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ butterfly(&v20, &v27, &stg6_0, &stg4_0, &out[4], &out[11]);
+ butterfly(&v21, &v26, &stg6_0, &stg4_0, &out[5], &out[10]);
+ butterfly(&v22, &v25, &stg6_0, &stg4_0, &out[6], &out[9]);
+ butterfly(&v23, &v24, &stg6_0, &stg4_0, &out[7], &out[8]);
+ }
+}
+
+// 16x16 block input __m256i in[32], output 16x32 __m256i in[32]
+static void idct32_16x32_135(__m256i *in /*in[32]*/) {
+ __m256i out[32];
+ idct32_16x32_quarter_1_2(in, out);
+ idct32_16x32_quarter_3_4(in, &out[16]);
+ add_sub_butterfly(out, in, 32);
+}
+
+static INLINE void load_buffer_from_32x32(const tran_low_t *coeff, __m256i *in,
+ int size) {
+ int i = 0;
+ while (i < size) {
+ load_coeff(coeff + (i << 5), &in[i]);
+ i += 1;
+ }
+}
+
+static INLINE void zero_buffer(__m256i *in, int num) {
+ int i;
+ for (i = 0; i < num; ++i) {
+ in[i] = _mm256_setzero_si256();
+ }
+}
+
+// Only upper-left 16x16 has non-zero coeff
+void aom_idct32x32_135_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[32];
+ zero_buffer(in, 32);
+ load_buffer_from_32x32(input, in, 16);
+ mm256_transpose_16x16(in, in);
+ idct32_16x32_135(in);
+
+ __m256i out[32];
+ mm256_transpose_16x16(in, out);
+ idct32_16x32_135(out);
+ store_buffer_16xN(out, stride, dest, 32);
+ mm256_transpose_16x16(&in[16], in);
+ idct32_16x32_135(in);
+ store_buffer_16xN(in, stride, dest + 16, 32);
+}
+
+static void idct32_34_first_half(const __m256i *in, __m256i *stp1) {
+ const __m256i stk2_0 = pair256_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m256i stk2_1 = pair256_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m256i stk2_6 = pair256_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m256i stk2_7 = pair256_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+
+ const __m256i stk3_0 = pair256_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m256i stk3_1 = pair256_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stk4_0 = pair256_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m256i stg4_1 = pair256_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m256i x0, x1, x4, x5, x6, x7;
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
+
+ // phase 1
+
+ // 0, 15
+ u2 = _mm256_mulhrs_epi16(in[2], stk2_1); // stp2_15
+ u3 = _mm256_mulhrs_epi16(in[6], stk2_7); // stp2_12
+ v15 = _mm256_add_epi16(u2, u3);
+ // in[0], in[4]
+ x0 = _mm256_mulhrs_epi16(in[0], stk4_0); // stp1[0]
+ x7 = _mm256_mulhrs_epi16(in[4], stk3_1); // stp1[7]
+ v0 = _mm256_add_epi16(x0, x7); // stp2_0
+ stp1[0] = _mm256_add_epi16(v0, v15);
+ stp1[15] = _mm256_sub_epi16(v0, v15);
+
+ // in[2], in[6]
+ u0 = _mm256_mulhrs_epi16(in[2], stk2_0); // stp2_8
+ u1 = _mm256_mulhrs_epi16(in[6], stk2_6); // stp2_11
+ butterfly(&u0, &u2, &stg4_4, &stg4_5, &u4, &u5); // stp2_9, stp2_14
+ butterfly(&u1, &u3, &stg4_6, &stg4_4, &u6, &u7); // stp2_10, stp2_13
+
+ v8 = _mm256_add_epi16(u0, u1);
+ v9 = _mm256_add_epi16(u4, u6);
+ v10 = _mm256_sub_epi16(u4, u6);
+ v11 = _mm256_sub_epi16(u0, u1);
+ v12 = _mm256_sub_epi16(u2, u3);
+ v13 = _mm256_sub_epi16(u5, u7);
+ v14 = _mm256_add_epi16(u5, u7);
+
+ butterfly_self(&v10, &v13, &stg6_0, &stg4_0);
+ butterfly_self(&v11, &v12, &stg6_0, &stg4_0);
+
+ // 1, 14
+ x1 = _mm256_mulhrs_epi16(in[0], stk4_0); // stp1[1], stk4_1 = stk4_0
+ // stp1[2] = stp1[0], stp1[3] = stp1[1]
+ x4 = _mm256_mulhrs_epi16(in[4], stk3_0); // stp1[4]
+ butterfly(&x7, &x4, &stg4_1, &stg4_0, &x5, &x6);
+ v1 = _mm256_add_epi16(x1, x6); // stp2_1
+ v2 = _mm256_add_epi16(x0, x5); // stp2_2
+ stp1[1] = _mm256_add_epi16(v1, v14);
+ stp1[14] = _mm256_sub_epi16(v1, v14);
+
+ stp1[2] = _mm256_add_epi16(v2, v13);
+ stp1[13] = _mm256_sub_epi16(v2, v13);
+
+ v3 = _mm256_add_epi16(x1, x4); // stp2_3
+ v4 = _mm256_sub_epi16(x1, x4); // stp2_4
+
+ v5 = _mm256_sub_epi16(x0, x5); // stp2_5
+
+ v6 = _mm256_sub_epi16(x1, x6); // stp2_6
+ v7 = _mm256_sub_epi16(x0, x7); // stp2_7
+ stp1[3] = _mm256_add_epi16(v3, v12);
+ stp1[12] = _mm256_sub_epi16(v3, v12);
+
+ stp1[6] = _mm256_add_epi16(v6, v9);
+ stp1[9] = _mm256_sub_epi16(v6, v9);
+
+ stp1[7] = _mm256_add_epi16(v7, v8);
+ stp1[8] = _mm256_sub_epi16(v7, v8);
+
+ stp1[4] = _mm256_add_epi16(v4, v11);
+ stp1[11] = _mm256_sub_epi16(v4, v11);
+
+ stp1[5] = _mm256_add_epi16(v5, v10);
+ stp1[10] = _mm256_sub_epi16(v5, v10);
+}
+
+static void idct32_34_second_half(const __m256i *in, __m256i *stp1) {
+ const __m256i stk1_0 = pair256_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
+ const __m256i stk1_1 = pair256_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
+ const __m256i stk1_6 = pair256_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
+ const __m256i stk1_7 = pair256_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
+ const __m256i stk1_8 = pair256_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
+ const __m256i stk1_9 = pair256_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
+ const __m256i stk1_14 = pair256_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
+ const __m256i stk1_15 = pair256_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
+ const __m256i stg3_4 = pair256_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m256i stg3_5 = pair256_set_epi16(cospi_28_64, cospi_4_64);
+ const __m256i stg3_6 = pair256_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m256i stg3_8 = pair256_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m256i stg3_9 = pair256_set_epi16(cospi_12_64, cospi_20_64);
+ const __m256i stg3_10 = pair256_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ const __m256i stg4_0 = pair256_set_epi16(cospi_16_64, cospi_16_64);
+ const __m256i stg4_4 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m256i stg4_5 = pair256_set_epi16(cospi_24_64, cospi_8_64);
+ const __m256i stg4_6 = pair256_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m256i stg6_0 = pair256_set_epi16(-cospi_16_64, cospi_16_64);
+ __m256i v16, v17, v18, v19, v20, v21, v22, v23;
+ __m256i v24, v25, v26, v27, v28, v29, v30, v31;
+ __m256i u16, u17, u18, u19, u20, u21, u22, u23;
+ __m256i u24, u25, u26, u27, u28, u29, u30, u31;
+
+ v16 = _mm256_mulhrs_epi16(in[1], stk1_0);
+ v31 = _mm256_mulhrs_epi16(in[1], stk1_1);
+
+ v19 = _mm256_mulhrs_epi16(in[7], stk1_6);
+ v28 = _mm256_mulhrs_epi16(in[7], stk1_7);
+
+ v20 = _mm256_mulhrs_epi16(in[5], stk1_8);
+ v27 = _mm256_mulhrs_epi16(in[5], stk1_9);
+
+ v23 = _mm256_mulhrs_epi16(in[3], stk1_14);
+ v24 = _mm256_mulhrs_epi16(in[3], stk1_15);
+
+ butterfly(&v16, &v31, &stg3_4, &stg3_5, &v17, &v30);
+ butterfly(&v19, &v28, &stg3_6, &stg3_4, &v18, &v29);
+ butterfly(&v20, &v27, &stg3_8, &stg3_9, &v21, &v26);
+ butterfly(&v23, &v24, &stg3_10, &stg3_8, &v22, &v25);
+
+ u16 = _mm256_add_epi16(v16, v19);
+ u17 = _mm256_add_epi16(v17, v18);
+ u18 = _mm256_sub_epi16(v17, v18);
+ u19 = _mm256_sub_epi16(v16, v19);
+ u20 = _mm256_sub_epi16(v23, v20);
+ u21 = _mm256_sub_epi16(v22, v21);
+ u22 = _mm256_add_epi16(v22, v21);
+ u23 = _mm256_add_epi16(v23, v20);
+ u24 = _mm256_add_epi16(v24, v27);
+ u27 = _mm256_sub_epi16(v24, v27);
+ u25 = _mm256_add_epi16(v25, v26);
+ u26 = _mm256_sub_epi16(v25, v26);
+ u28 = _mm256_sub_epi16(v31, v28);
+ u31 = _mm256_add_epi16(v28, v31);
+ u29 = _mm256_sub_epi16(v30, v29);
+ u30 = _mm256_add_epi16(v29, v30);
+
+ butterfly_self(&u18, &u29, &stg4_4, &stg4_5);
+ butterfly_self(&u19, &u28, &stg4_4, &stg4_5);
+ butterfly_self(&u20, &u27, &stg4_6, &stg4_4);
+ butterfly_self(&u21, &u26, &stg4_6, &stg4_4);
+
+ stp1[0] = _mm256_add_epi16(u16, u23);
+ stp1[7] = _mm256_sub_epi16(u16, u23);
+
+ stp1[1] = _mm256_add_epi16(u17, u22);
+ stp1[6] = _mm256_sub_epi16(u17, u22);
+
+ stp1[2] = _mm256_add_epi16(u18, u21);
+ stp1[5] = _mm256_sub_epi16(u18, u21);
+
+ stp1[3] = _mm256_add_epi16(u19, u20);
+ stp1[4] = _mm256_sub_epi16(u19, u20);
+
+ stp1[8] = _mm256_sub_epi16(u31, u24);
+ stp1[15] = _mm256_add_epi16(u24, u31);
+
+ stp1[9] = _mm256_sub_epi16(u30, u25);
+ stp1[14] = _mm256_add_epi16(u25, u30);
+
+ stp1[10] = _mm256_sub_epi16(u29, u26);
+ stp1[13] = _mm256_add_epi16(u26, u29);
+
+ stp1[11] = _mm256_sub_epi16(u28, u27);
+ stp1[12] = _mm256_add_epi16(u27, u28);
+
+ butterfly_self(&stp1[4], &stp1[11], &stg6_0, &stg4_0);
+ butterfly_self(&stp1[5], &stp1[10], &stg6_0, &stg4_0);
+ butterfly_self(&stp1[6], &stp1[9], &stg6_0, &stg4_0);
+ butterfly_self(&stp1[7], &stp1[8], &stg6_0, &stg4_0);
+}
+
+// 16x16 block input __m256i in[32], output 16x32 __m256i in[32]
+static void idct32_16x32_34(__m256i *in /*in[32]*/) {
+ __m256i out[32];
+ idct32_34_first_half(in, out);
+ idct32_34_second_half(in, &out[16]);
+ add_sub_butterfly(out, in, 32);
+}
+
+// Only upper-left 8x8 has non-zero coeff
+void aom_idct32x32_34_add_avx2(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ __m256i in[32];
+ zero_buffer(in, 32);
+ load_buffer_from_32x32(input, in, 8);
+ mm256_transpose_16x16(in, in);
+ idct32_16x32_34(in);
+
+ __m256i out[32];
+ mm256_transpose_16x16(in, out);
+ idct32_16x32_34(out);
+ store_buffer_16xN(out, stride, dest, 32);
+ mm256_transpose_16x16(&in[16], in);
+ idct32_16x32_34(in);
+ store_buffer_16xN(in, stride, dest + 16, 32);
+}
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h b/third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h
new file mode 100644
index 000000000..4238e651b
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_common_avx2.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_DSP_X86_INV_TXFM_COMMON_AVX2_H
+#define AOM_DSP_X86_INV_TXFM_COMMON_AVX2_H
+
+#include <immintrin.h>
+
+#include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/txfm_common_avx2.h"
+
+static INLINE void load_coeff(const tran_low_t *coeff, __m256i *in) {
+#if CONFIG_HIGHBITDEPTH
+ *in = _mm256_setr_epi16(
+ (int16_t)coeff[0], (int16_t)coeff[1], (int16_t)coeff[2],
+ (int16_t)coeff[3], (int16_t)coeff[4], (int16_t)coeff[5],
+ (int16_t)coeff[6], (int16_t)coeff[7], (int16_t)coeff[8],
+ (int16_t)coeff[9], (int16_t)coeff[10], (int16_t)coeff[11],
+ (int16_t)coeff[12], (int16_t)coeff[13], (int16_t)coeff[14],
+ (int16_t)coeff[15]);
+#else
+ *in = _mm256_loadu_si256((const __m256i *)coeff);
+#endif
+}
+
+static INLINE void load_buffer_16x16(const tran_low_t *coeff, __m256i *in) {
+ int i = 0;
+ while (i < 16) {
+ load_coeff(coeff + (i << 4), &in[i]);
+ i += 1;
+ }
+}
+
+static INLINE void recon_and_store(const __m256i *res, uint8_t *output) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i x = _mm_loadu_si128((__m128i const *)output);
+ __m128i p0 = _mm_unpacklo_epi8(x, zero);
+ __m128i p1 = _mm_unpackhi_epi8(x, zero);
+
+ p0 = _mm_add_epi16(p0, _mm256_castsi256_si128(*res));
+ p1 = _mm_add_epi16(p1, _mm256_extractf128_si256(*res, 1));
+ x = _mm_packus_epi16(p0, p1);
+ _mm_storeu_si128((__m128i *)output, x);
+}
+
+#define IDCT_ROUNDING_POS (6)
+static INLINE void store_buffer_16xN(__m256i *in, const int stride,
+ uint8_t *output, int num) {
+ const __m256i rounding = _mm256_set1_epi16(1 << (IDCT_ROUNDING_POS - 1));
+ int i = 0;
+
+ while (i < num) {
+ in[i] = _mm256_adds_epi16(in[i], rounding);
+ in[i] = _mm256_srai_epi16(in[i], IDCT_ROUNDING_POS);
+ recon_and_store(&in[i], output + i * stride);
+ i += 1;
+ }
+}
+
+static INLINE void unpack_butter_fly(const __m256i *a0, const __m256i *a1,
+ const __m256i *c0, const __m256i *c1,
+ __m256i *b0, __m256i *b1) {
+ __m256i x0, x1;
+ x0 = _mm256_unpacklo_epi16(*a0, *a1);
+ x1 = _mm256_unpackhi_epi16(*a0, *a1);
+ *b0 = butter_fly(&x0, &x1, c0);
+ *b1 = butter_fly(&x0, &x1, c1);
+}
+
+void av1_idct16_avx2(__m256i *in);
+
+#endif // AOM_DSP_X86_INV_TXFM_COMMON_AVX2_H
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
index 5795a1845..be200df4c 100644
--- a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
@@ -3628,4 +3628,107 @@ void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
}
+void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+ int stride, int bd) {
+ tran_low_t out[8 * 8] = { 0 };
+ tran_low_t *outptr = out;
+ int i, j, test;
+ __m128i inptr[8];
+ __m128i min_input, max_input, temp1, temp2, sign_bits;
+ uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+ const __m128i zero = _mm_set1_epi16(0);
+ const __m128i sixteen = _mm_set1_epi16(16);
+ const __m128i max = _mm_set1_epi16(6201);
+ const __m128i min = _mm_set1_epi16(-6201);
+ int optimised_cols = 0;
+
+ // Load input into __m128i & pack to 16 bits
+ for (i = 0; i < 8; i++) {
+ temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i));
+ temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4));
+ inptr[i] = _mm_packs_epi32(temp1, temp2);
+ }
+
+ // Find the min & max for the row transform
+ // only first 4 row has non-zero coefs
+ max_input = _mm_max_epi16(inptr[0], inptr[1]);
+ min_input = _mm_min_epi16(inptr[0], inptr[1]);
+ for (i = 2; i < 4; i++) {
+ max_input = _mm_max_epi16(max_input, inptr[i]);
+ min_input = _mm_min_epi16(min_input, inptr[i]);
+ }
+ max_input = _mm_cmpgt_epi16(max_input, max);
+ min_input = _mm_cmplt_epi16(min_input, min);
+ temp1 = _mm_or_si128(max_input, min_input);
+ test = _mm_movemask_epi8(temp1);
+
+ if (!test) {
+ // Do the row transform
+ aom_idct8_sse2(inptr);
+
+ // Find the min & max for the column transform
+ // N.B. Only first 4 cols contain non-zero coeffs
+ max_input = _mm_max_epi16(inptr[0], inptr[1]);
+ min_input = _mm_min_epi16(inptr[0], inptr[1]);
+ for (i = 2; i < 8; i++) {
+ max_input = _mm_max_epi16(max_input, inptr[i]);
+ min_input = _mm_min_epi16(min_input, inptr[i]);
+ }
+ max_input = _mm_cmpgt_epi16(max_input, max);
+ min_input = _mm_cmplt_epi16(min_input, min);
+ temp1 = _mm_or_si128(max_input, min_input);
+ test = _mm_movemask_epi8(temp1);
+
+ if (test) {
+ // Use fact only first 4 rows contain non-zero coeffs
+ array_transpose_4X8(inptr, inptr);
+ for (i = 0; i < 4; i++) {
+ sign_bits = _mm_cmplt_epi16(inptr[i], zero);
+ temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
+ temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits);
+ _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1);
+ _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2);
+ }
+ } else {
+ // Set to use the optimised transform for the column
+ optimised_cols = 1;
+ }
+ } else {
+ // Run the un-optimised row transform
+ for (i = 0; i < 4; ++i) {
+ aom_highbd_idct8_c(input, outptr, bd);
+ input += 8;
+ outptr += 8;
+ }
+ }
+
+ if (optimised_cols) {
+ aom_idct8_sse2(inptr);
+
+ // Final round & shift and Reconstruction and Store
+ {
+ __m128i d[8];
+ for (i = 0; i < 8; i++) {
+ inptr[i] = _mm_add_epi16(inptr[i], sixteen);
+ d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+ inptr[i] = _mm_srai_epi16(inptr[i], 5);
+ d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
+ // Store
+ _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
+ }
+ }
+ } else {
+ // Run the un-optimised column transform
+ tran_low_t temp_in[8], temp_out[8];
+ for (i = 0; i < 8; ++i) {
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
+ aom_highbd_idct8_c(temp_in, temp_out, bd);
+ for (j = 0; j < 8; ++j) {
+ dest[j * stride + i] = highbd_clip_pixel_add(
+ dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
+ }
+ }
+ }
+}
+
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
index 5166e9e0a..9d16a3e84 100644
--- a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
@@ -9,49 +9,70 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#include <stdlib.h>
-#include <emmintrin.h>
+#include <stdio.h>
#include <tmmintrin.h>
-#include "aom_ports/mem.h"
#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/blend.h"
#include "aom/aom_integer.h"
+#include "aom_dsp/x86/synonyms.h"
-static INLINE __m128i width8_load_2rows(const uint8_t *ptr, int stride) {
- __m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
- __m128i temp2 = _mm_loadl_epi64((const __m128i *)(ptr + stride));
- return _mm_unpacklo_epi64(temp1, temp2);
-}
-
-static INLINE __m128i width4_load_4rows(const uint8_t *ptr, int stride) {
- __m128i temp1 = _mm_cvtsi32_si128(*(const uint32_t *)ptr);
- __m128i temp2 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride));
- __m128i temp3 = _mm_unpacklo_epi32(temp1, temp2);
- temp1 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride * 2));
- temp2 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride * 3));
- temp1 = _mm_unpacklo_epi32(temp1, temp2);
- return _mm_unpacklo_epi64(temp3, temp1);
-}
-
-static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
+// For width a multiple of 16
+static INLINE unsigned int masked_sad_ssse3(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *a_ptr, int a_stride,
const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride,
int width, int height);
static INLINE unsigned int masked_sad8xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height);
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height);
static INLINE unsigned int masked_sad4xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height);
-
-#define MASKSADMXN_SSSE3(m, n) \
- unsigned int aom_masked_sad##m##x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, msk_stride, \
- m, n); \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height);
+
+#define MASKSADMXN_SSSE3(m, n) \
+ unsigned int aom_masked_sad##m##x##n##_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad_ssse3(src, src_stride, ref, ref_stride, second_pred, \
+ m, msk, msk_stride, m, n); \
+ else \
+ return masked_sad_ssse3(src, src_stride, second_pred, m, ref, \
+ ref_stride, msk, msk_stride, m, n); \
+ }
+
+#define MASKSAD8XN_SSSE3(n) \
+ unsigned int aom_masked_sad8x##n##_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, \
+ second_pred, 8, msk, msk_stride, n); \
+ else \
+ return masked_sad8xh_ssse3(src, src_stride, second_pred, 8, ref, \
+ ref_stride, msk, msk_stride, n); \
+ }
+
+#define MASKSAD4XN_SSSE3(n) \
+ unsigned int aom_masked_sad4x##n##_ssse3( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, \
+ second_pred, 4, msk, msk_stride, n); \
+ else \
+ return masked_sad4xh_ssse3(src, src_stride, second_pred, 4, ref, \
+ ref_stride, msk, msk_stride, n); \
}
#if CONFIG_EXT_PARTITION
@@ -67,165 +88,181 @@ MASKSADMXN_SSSE3(32, 16)
MASKSADMXN_SSSE3(16, 32)
MASKSADMXN_SSSE3(16, 16)
MASKSADMXN_SSSE3(16, 8)
-
-#define MASKSAD8XN_SSSE3(n) \
- unsigned int aom_masked_sad8x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, n); \
- }
-
MASKSAD8XN_SSSE3(16)
MASKSAD8XN_SSSE3(8)
MASKSAD8XN_SSSE3(4)
-
-#define MASKSAD4XN_SSSE3(n) \
- unsigned int aom_masked_sad4x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, n); \
- }
-
MASKSAD4XN_SSSE3(8)
MASKSAD4XN_SSSE3(4)
-// For width a multiple of 16
-// Assumes values in m are <=64
-static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
+static INLINE unsigned int masked_sad_ssse3(const uint8_t *src_ptr,
+ int src_stride,
+ const uint8_t *a_ptr, int a_stride,
const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride,
int width, int height) {
- int y, x;
- __m128i a, b, m, temp1, temp2;
+ int x, y;
__m128i res = _mm_setzero_si128();
- __m128i one = _mm_set1_epi16(1);
- // For each row
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
+
for (y = 0; y < height; y++) {
- // Covering the full width
for (x = 0; x < width; x += 16) {
- // Load a, b, m in xmm registers
- a = _mm_loadu_si128((const __m128i *)(a_ptr + x));
- b = _mm_loadu_si128((const __m128i *)(b_ptr + x));
- m = _mm_loadu_si128((const __m128i *)(m_ptr + x));
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu8(a, b);
- temp2 = _mm_subs_epu8(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- temp2 = _mm_maddubs_epi16(temp1, m);
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(temp2, one));
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ const __m128i m = _mm_loadu_si128((const __m128i *)&m_ptr[x]);
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ // Calculate 16 predicted pixels.
+ // Note that the maximum value of any entry of 'pred_l' or 'pred_r'
+ // is 64 * 255, so we have plenty of space to add rounding constants.
+ const __m128i data_l = _mm_unpacklo_epi8(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_l = _mm_maddubs_epi16(data_l, mask_l);
+ pred_l = xx_roundn_epu16(pred_l, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi8(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi8(m, m_inv);
+ __m128i pred_r = _mm_maddubs_epi16(data_r, mask_r);
+ pred_r = xx_roundn_epu16(pred_r, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packus_epi16(pred_l, pred_r);
+ res = _mm_add_epi32(res, _mm_sad_epu8(pred, src));
}
- // Move onto the next row
+
+ src_ptr += src_stride;
a_ptr += a_stride;
b_ptr += b_stride;
m_ptr += m_stride;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ // At this point, we have two 32-bit partial SADs in lanes 0 and 2 of 'res'.
+ int32_t sad =
+ _mm_cvtsi128_si32(res) + _mm_cvtsi128_si32(_mm_srli_si128(res, 8));
+ return (sad + 31) >> 6;
}
static INLINE unsigned int masked_sad8xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height) {
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height) {
int y;
- __m128i a, b, m, temp1, temp2, row_res;
__m128i res = _mm_setzero_si128();
- __m128i one = _mm_set1_epi16(1);
- // Add the masked SAD for 2 rows at a time
- for (y = 0; y < height; y += 2) {
- // Load a, b, m in xmm registers
- a = width8_load_2rows(a_ptr, a_stride);
- b = width8_load_2rows(b_ptr, b_stride);
- m = width8_load_2rows(m_ptr, m_stride);
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu8(a, b);
- temp2 = _mm_subs_epu8(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- row_res = _mm_maddubs_epi16(temp1, m);
-
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
- // Move onto the next rows
+ for (y = 0; y < height; y += 2) {
+ const __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a0 = _mm_loadl_epi64((const __m128i *)a_ptr);
+ const __m128i a1 = _mm_loadl_epi64((const __m128i *)&a_ptr[a_stride]);
+ const __m128i b0 = _mm_loadl_epi64((const __m128i *)b_ptr);
+ const __m128i b1 = _mm_loadl_epi64((const __m128i *)&b_ptr[b_stride]);
+ const __m128i m =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)m_ptr),
+ _mm_loadl_epi64((const __m128i *)&m_ptr[m_stride]));
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi8(a0, b0);
+ const __m128i mask_l = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_l = _mm_maddubs_epi16(data_l, mask_l);
+ pred_l = xx_roundn_epu16(pred_l, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpacklo_epi8(a1, b1);
+ const __m128i mask_r = _mm_unpackhi_epi8(m, m_inv);
+ __m128i pred_r = _mm_maddubs_epi16(data_r, mask_r);
+ pred_r = xx_roundn_epu16(pred_r, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packus_epi16(pred_l, pred_r);
+ res = _mm_add_epi32(res, _mm_sad_epu8(pred, src));
+
+ src_ptr += src_stride * 2;
a_ptr += a_stride * 2;
b_ptr += b_stride * 2;
m_ptr += m_stride * 2;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ int32_t sad =
+ _mm_cvtsi128_si32(res) + _mm_cvtsi128_si32(_mm_srli_si128(res, 8));
+ return (sad + 31) >> 6;
}
static INLINE unsigned int masked_sad4xh_ssse3(
- const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height) {
+ const uint8_t *src_ptr, int src_stride, const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height) {
int y;
- __m128i a, b, m, temp1, temp2, row_res;
__m128i res = _mm_setzero_si128();
- __m128i one = _mm_set1_epi16(1);
- // Add the masked SAD for 4 rows at a time
- for (y = 0; y < height; y += 4) {
- // Load a, b, m in xmm registers
- a = width4_load_4rows(a_ptr, a_stride);
- b = width4_load_4rows(b_ptr, b_stride);
- m = width4_load_4rows(m_ptr, m_stride);
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu8(a, b);
- temp2 = _mm_subs_epu8(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- row_res = _mm_maddubs_epi16(temp1, m);
-
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
-
- // Move onto the next rows
- a_ptr += a_stride * 4;
- b_ptr += b_stride * 4;
- m_ptr += m_stride * 4;
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
+
+ for (y = 0; y < height; y += 2) {
+ // Load two rows at a time, this seems to be a bit faster
+ // than four rows at a time in this case.
+ const __m128i src = _mm_unpacklo_epi32(
+ _mm_cvtsi32_si128(*(uint32_t *)src_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&src_ptr[src_stride]));
+ const __m128i a =
+ _mm_unpacklo_epi32(_mm_cvtsi32_si128(*(uint32_t *)a_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&a_ptr[a_stride]));
+ const __m128i b =
+ _mm_unpacklo_epi32(_mm_cvtsi32_si128(*(uint32_t *)b_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&b_ptr[b_stride]));
+ const __m128i m =
+ _mm_unpacklo_epi32(_mm_cvtsi32_si128(*(uint32_t *)m_ptr),
+ _mm_cvtsi32_si128(*(uint32_t *)&m_ptr[m_stride]));
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ const __m128i data = _mm_unpacklo_epi8(a, b);
+ const __m128i mask = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_16bit = _mm_maddubs_epi16(data, mask);
+ pred_16bit = xx_roundn_epu16(pred_16bit, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packus_epi16(pred_16bit, _mm_setzero_si128());
+ res = _mm_add_epi32(res, _mm_sad_epu8(pred, src));
+
+ src_ptr += src_stride * 2;
+ a_ptr += a_stride * 2;
+ b_ptr += b_stride * 2;
+ m_ptr += m_stride * 2;
}
- // Pad out row result to 32 bit integers & add to running total
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ // At this point, the SAD is stored in lane 0 of 'res'
+ int32_t sad = _mm_cvtsi128_si32(res);
+ return (sad + 31) >> 6;
}
#if CONFIG_HIGHBITDEPTH
-static INLINE __m128i highbd_width4_load_2rows(const uint16_t *ptr,
- int stride) {
- __m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
- __m128i temp2 = _mm_loadl_epi64((const __m128i *)(ptr + stride));
- return _mm_unpacklo_epi64(temp1, temp2);
-}
-
+// For width a multiple of 8
static INLINE unsigned int highbd_masked_sad_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int width, int height);
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int width, int height);
static INLINE unsigned int highbd_masked_sad4xh_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height);
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height);
#define HIGHBD_MASKSADMXN_SSSE3(m, n) \
unsigned int aom_highbd_masked_sad##m##x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return highbd_masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, m, n); \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
+ int msk_stride, int invert_mask) { \
+ if (!invert_mask) \
+ return highbd_masked_sad_ssse3(src8, src_stride, ref8, ref_stride, \
+ second_pred8, m, msk, msk_stride, m, n); \
+ else \
+ return highbd_masked_sad_ssse3(src8, src_stride, second_pred8, m, ref8, \
+ ref_stride, msk, msk_stride, m, n); \
+ }
+
+#define HIGHBD_MASKSAD4XN_SSSE3(n) \
+ unsigned int aom_highbd_masked_sad4x##n##_ssse3( \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
+ int msk_stride, int invert_mask) { \
+ if (!invert_mask) \
+ return highbd_masked_sad4xh_ssse3(src8, src_stride, ref8, ref_stride, \
+ second_pred8, 4, msk, msk_stride, n); \
+ else \
+ return highbd_masked_sad4xh_ssse3(src8, src_stride, second_pred8, 4, \
+ ref8, ref_stride, msk, msk_stride, n); \
}
#if CONFIG_EXT_PARTITION
@@ -244,91 +281,124 @@ HIGHBD_MASKSADMXN_SSSE3(16, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 16)
HIGHBD_MASKSADMXN_SSSE3(8, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 4)
-
-#define HIGHBD_MASKSAD4XN_SSSE3(n) \
- unsigned int aom_highbd_masked_sad4x##n##_ssse3( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *msk, int msk_stride) { \
- return highbd_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
- msk_stride, n); \
- }
-
HIGHBD_MASKSAD4XN_SSSE3(8)
HIGHBD_MASKSAD4XN_SSSE3(4)
-// For width a multiple of 8
-// Assumes values in m are <=64
static INLINE unsigned int highbd_masked_sad_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int width, int height) {
- int y, x;
- __m128i a, b, m, temp1, temp2;
- const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
- const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int width, int height) {
+ const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8);
+ const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8);
+ int x, y;
__m128i res = _mm_setzero_si128();
- // For each row
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i one = _mm_set1_epi16(1);
+
for (y = 0; y < height; y++) {
- // Covering the full width
for (x = 0; x < width; x += 8) {
- // Load a, b, m in xmm registers
- a = _mm_loadu_si128((const __m128i *)(a_ptr + x));
- b = _mm_loadu_si128((const __m128i *)(b_ptr + x));
- m = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(m_ptr + x)),
- _mm_setzero_si128());
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu16(a, b);
- temp2 = _mm_subs_epu16(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Add result of multiplying by m and add pairs together to running total
- res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ // Zero-extend mask to 16 bits
+ const __m128i m = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i *)&m_ptr[x]), _mm_setzero_si128());
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ // Note: the maximum value in pred_l/r is (2^bd)-1 < 2^15,
+ // so it is safe to do signed saturation here.
+ const __m128i pred = _mm_packs_epi32(pred_l, pred_r);
+ // There is no 16-bit SAD instruction, so we have to synthesize
+ // an 8-element SAD. We do this by storing 4 32-bit partial SADs,
+ // and accumulating them at the end
+ const __m128i diff = _mm_abs_epi16(_mm_sub_epi16(pred, src));
+ res = _mm_add_epi32(res, _mm_madd_epi16(diff, one));
}
- // Move onto the next row
+
+ src_ptr += src_stride;
a_ptr += a_stride;
b_ptr += b_stride;
m_ptr += m_stride;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ // At this point, we have four 32-bit partial SADs stored in 'res'.
+ res = _mm_hadd_epi32(res, res);
+ res = _mm_hadd_epi32(res, res);
+ int sad = _mm_cvtsi128_si32(res);
+ return (sad + 31) >> 6;
}
static INLINE unsigned int highbd_masked_sad4xh_ssse3(
- const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
- const uint8_t *m_ptr, int m_stride, int height) {
+ const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, const uint8_t *m_ptr, int m_stride,
+ int height) {
+ const uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8);
+ const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8);
int y;
- __m128i a, b, m, temp1, temp2;
- const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
- const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
__m128i res = _mm_setzero_si128();
- // Add the masked SAD for 2 rows at a time
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i one = _mm_set1_epi16(1);
+
for (y = 0; y < height; y += 2) {
- // Load a, b, m in xmm registers
- a = highbd_width4_load_2rows(a_ptr, a_stride);
- b = highbd_width4_load_2rows(b_ptr, b_stride);
- temp1 = _mm_loadl_epi64((const __m128i *)m_ptr);
- temp2 = _mm_loadl_epi64((const __m128i *)(m_ptr + m_stride));
- m = _mm_unpacklo_epi8(_mm_unpacklo_epi32(temp1, temp2),
- _mm_setzero_si128());
-
- // Calculate the difference between a & b
- temp1 = _mm_subs_epu16(a, b);
- temp2 = _mm_subs_epu16(b, a);
- temp1 = _mm_or_si128(temp1, temp2);
-
- // Multiply by m and add together
- res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
-
- // Move onto the next rows
+ const __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)a_ptr),
+ _mm_loadl_epi64((const __m128i *)&a_ptr[a_stride]));
+ const __m128i b =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)b_ptr),
+ _mm_loadl_epi64((const __m128i *)&b_ptr[b_stride]));
+ // Zero-extend mask to 16 bits
+ const __m128i m = _mm_unpacklo_epi8(
+ _mm_unpacklo_epi32(
+ _mm_cvtsi32_si128(*(const uint32_t *)m_ptr),
+ _mm_cvtsi32_si128(*(const uint32_t *)&m_ptr[m_stride])),
+ _mm_setzero_si128());
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i pred = _mm_packs_epi32(pred_l, pred_r);
+ const __m128i diff = _mm_abs_epi16(_mm_sub_epi16(pred, src));
+ res = _mm_add_epi32(res, _mm_madd_epi16(diff, one));
+
+ src_ptr += src_stride * 2;
a_ptr += a_stride * 2;
b_ptr += b_stride * 2;
m_ptr += m_stride * 2;
}
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- res = _mm_hadd_epi32(res, _mm_setzero_si128());
- // sad = (sad + 31) >> 6;
- return (_mm_cvtsi128_si32(res) + 31) >> 6;
+ res = _mm_hadd_epi32(res, res);
+ res = _mm_hadd_epi32(res, res);
+ int sad = _mm_cvtsi128_si32(res);
+ return (sad + 31) >> 6;
}
-#endif // CONFIG_HIGHBITDEPTH
+
+#endif
diff --git a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
index fe14597f6..be9d437d2 100644
--- a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
@@ -9,1940 +9,1003 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
-#include <assert.h>
#include <stdlib.h>
-#include <emmintrin.h>
+#include <string.h>
#include <tmmintrin.h>
#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/blend.h"
#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#include "aom_dsp/aom_filter.h"
-
-// Half pixel shift
-#define HALF_PIXEL_OFFSET (BIL_SUBPEL_SHIFTS / 2)
-
-/*****************************************************************************
- * Horizontal additions
- *****************************************************************************/
-
-static INLINE int32_t hsum_epi32_si32(__m128i v_d) {
- v_d = _mm_hadd_epi32(v_d, v_d);
- v_d = _mm_hadd_epi32(v_d, v_d);
- return _mm_cvtsi128_si32(v_d);
-}
-
-static INLINE int64_t hsum_epi64_si64(__m128i v_q) {
- v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
-#if ARCH_X86_64
- return _mm_cvtsi128_si64(v_q);
-#else
- {
- int64_t tmp;
- _mm_storel_epi64((__m128i *)&tmp, v_q);
- return tmp;
- }
-#endif
-}
-
-#if CONFIG_HIGHBITDEPTH
-static INLINE int64_t hsum_epi32_si64(__m128i v_d) {
- const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
- const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
- const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
- return hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
-}
-#endif // CONFIG_HIGHBITDEPTH
-
-static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
- uint32_t *sse, int w, int h) {
- int64_t sum64;
- uint64_t sse64;
-
- // Horizontal sum
- sum64 = hsum_epi32_si32(v_sum_d);
- sse64 = hsum_epi64_si64(v_sse_q);
-
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
-
- // Round
- sum64 = ROUND_POWER_OF_TWO(sum64, 6);
- sse64 = ROUND_POWER_OF_TWO(sse64, 12);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute the variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-/*****************************************************************************
- * n*16 Wide versions
- *****************************************************************************/
-
-static INLINE unsigned int masked_variancewxh_ssse3(
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- int ii, jj;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((w % 16) == 0);
-
- for (ii = 0; ii < h; ii++) {
- for (jj = 0; jj < w; jj += 16) {
- // Load inputs - 8 bits
- const __m128i v_a_b = _mm_loadu_si128((const __m128i *)(a + jj));
- const __m128i v_b_b = _mm_loadu_si128((const __m128i *)(b + jj));
- const __m128i v_m_b = _mm_loadu_si128((const __m128i *)(m + jj));
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a0_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b0_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m0_w = _mm_unpacklo_epi8(v_m_b, v_zero);
- const __m128i v_a1_w = _mm_unpackhi_epi8(v_a_b, v_zero);
- const __m128i v_b1_w = _mm_unpackhi_epi8(v_b_b, v_zero);
- const __m128i v_m1_w = _mm_unpackhi_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d0_w = _mm_sub_epi16(v_a0_w, v_b0_w);
- const __m128i v_d1_w = _mm_sub_epi16(v_a1_w, v_b1_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e0_w = _mm_mullo_epi16(v_d0_w, v_m0_w);
- const __m128i v_e0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
- const __m128i v_e1_w = _mm_mullo_epi16(v_d1_w, v_m1_w);
- const __m128i v_e1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se0_d = _mm_madd_epi16(v_e0_w, v_e0_w);
- const __m128i v_se1_d = _mm_madd_epi16(v_e1_w, v_e1_w);
-
- // Sum of v_se{0,1}_d - 31 bits + 31 bits = 32 bits
- const __m128i v_se_d = _mm_add_epi32(v_se0_d, v_se1_d);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e0_d);
- v_sum_d = _mm_add_epi32(v_sum_d, v_e1_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
- }
-
- // Move on to next row
- a += a_stride;
- b += b_stride;
- m += m_stride;
- }
-
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-
-#define MASKED_VARWXH(W, H) \
- unsigned int aom_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- return masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, m_stride, W, \
- H, sse); \
+#include "aom_dsp/x86/synonyms.h"
+
+// For width a multiple of 16
+static void bilinear_filter(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int w, int h);
+
+static void bilinear_filter8xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h);
+
+static void bilinear_filter4xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h);
+
+// For width a multiple of 16
+static void masked_variance(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride, int width,
+ int height, unsigned int *sse, int *sum_);
+
+static void masked_variance8xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_);
+
+static void masked_variance4xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_);
+
+#define MASK_SUBPIX_VAR_SSSE3(W, H) \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ int sum; \
+ uint8_t temp[(H + 1) * W]; \
+ \
+ bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, sse, &sum); \
+ else \
+ masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ }
+
+#define MASK_SUBPIX_VAR8XH_SSSE3(H) \
+ unsigned int aom_masked_sub_pixel_variance8x##H##_ssse3( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ int sum; \
+ uint8_t temp[(H + 1) * 8]; \
+ \
+ bilinear_filter8xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ masked_variance8xh(ref, ref_stride, temp, second_pred, msk, msk_stride, \
+ H, sse, &sum); \
+ else \
+ masked_variance8xh(ref, ref_stride, second_pred, temp, msk, msk_stride, \
+ H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (8 * H)); \
+ }
+
+#define MASK_SUBPIX_VAR4XH_SSSE3(H) \
+ unsigned int aom_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ int sum; \
+ uint8_t temp[(H + 1) * 4]; \
+ \
+ bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ masked_variance4xh(ref, ref_stride, temp, second_pred, msk, msk_stride, \
+ H, sse, &sum); \
+ else \
+ masked_variance4xh(ref, ref_stride, second_pred, temp, msk, msk_stride, \
+ H, sse, &sum); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
}
-MASKED_VARWXH(16, 8)
-MASKED_VARWXH(16, 16)
-MASKED_VARWXH(16, 32)
-MASKED_VARWXH(32, 16)
-MASKED_VARWXH(32, 32)
-MASKED_VARWXH(32, 64)
-MASKED_VARWXH(64, 32)
-MASKED_VARWXH(64, 64)
#if CONFIG_EXT_PARTITION
-MASKED_VARWXH(64, 128)
-MASKED_VARWXH(128, 64)
-MASKED_VARWXH(128, 128)
-#endif // CONFIG_EXT_PARTITION
-
-/*****************************************************************************
- * 8 Wide versions
- *****************************************************************************/
-
-static INLINE unsigned int masked_variance8xh_ssse3(
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int h, unsigned int *sse) {
- int ii;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- for (ii = 0; ii < h; ii++) {
- // Load inputs - 8 bits
- const __m128i v_a_b = _mm_loadl_epi64((const __m128i *)a);
- const __m128i v_b_b = _mm_loadl_epi64((const __m128i *)b);
- const __m128i v_m_b = _mm_loadl_epi64((const __m128i *)m);
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e_w = _mm_mullo_epi16(v_d_w, v_m_w);
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se_d = _mm_madd_epi16(v_e_w, v_e_w);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
-
- // Move on to next row
- a += a_stride;
- b += b_stride;
- m += m_stride;
- }
-
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
-}
-
-#define MASKED_VAR8XH(H) \
- unsigned int aom_masked_variance8x##H##_ssse3( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- return masked_variance8xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
- sse); \
- }
-
-MASKED_VAR8XH(4)
-MASKED_VAR8XH(8)
-MASKED_VAR8XH(16)
-
-/*****************************************************************************
- * 4 Wide versions
- *****************************************************************************/
-
-static INLINE unsigned int masked_variance4xh_ssse3(
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int h, unsigned int *sse) {
- int ii;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((h % 2) == 0);
-
- for (ii = 0; ii < h / 2; ii++) {
- // Load 2 input rows - 8 bits
- const __m128i v_a0_b = _mm_cvtsi32_si128(*(const uint32_t *)a);
- const __m128i v_b0_b = _mm_cvtsi32_si128(*(const uint32_t *)b);
- const __m128i v_m0_b = _mm_cvtsi32_si128(*(const uint32_t *)m);
- const __m128i v_a1_b = _mm_cvtsi32_si128(*(const uint32_t *)(a + a_stride));
- const __m128i v_b1_b = _mm_cvtsi32_si128(*(const uint32_t *)(b + b_stride));
- const __m128i v_m1_b = _mm_cvtsi32_si128(*(const uint32_t *)(m + m_stride));
-
- // Interleave 2 rows into a single register
- const __m128i v_a_b = _mm_unpacklo_epi32(v_a0_b, v_a1_b);
- const __m128i v_b_b = _mm_unpacklo_epi32(v_b0_b, v_b1_b);
- const __m128i v_m_b = _mm_unpacklo_epi32(v_m0_b, v_m1_b);
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e_w = _mm_mullo_epi16(v_d_w, v_m_w);
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se_d = _mm_madd_epi16(v_e_w, v_e_w);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
-
- // Move on to next 2 row
- a += a_stride * 2;
- b += b_stride * 2;
- m += m_stride * 2;
- }
-
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
-}
-
-#define MASKED_VAR4XH(H) \
- unsigned int aom_masked_variance4x##H##_ssse3( \
- const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- return masked_variance4xh_ssse3(a, a_stride, b, b_stride, m, m_stride, H, \
- sse); \
- }
-
-MASKED_VAR4XH(4)
-MASKED_VAR4XH(8)
-
-#if CONFIG_HIGHBITDEPTH
-
-// Main calculation for n*8 wide blocks
-static INLINE void highbd_masked_variance64_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, int64_t *sum, uint64_t *sse) {
- int ii, jj;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((w % 8) == 0);
-
- for (ii = 0; ii < h; ii++) {
- for (jj = 0; jj < w; jj += 8) {
- // Load inputs - 8 bits
- const __m128i v_a_w = _mm_loadu_si128((const __m128i *)(a + jj));
- const __m128i v_b_w = _mm_loadu_si128((const __m128i *)(b + jj));
- const __m128i v_m_b = _mm_loadl_epi64((const __m128i *)(m + jj));
-
- // Unpack m to 16 bits - still containing max 8 bits
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-4095, 4095]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-4095, 4095] * [0, 64] => sum of 2 of these fits in 19 bits
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
- const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
- const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
- const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
- const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
- const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
- const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
- const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
- // Square and sum the errors -> 36bits * 4 = 38bits
- __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
- v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
- v_elo1_d = _mm_srli_si128(v_elo_d, 4);
- v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
- v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
- v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
- v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
- v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
- v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
- v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_q);
- }
-
- // Move on to next row
- a += a_stride;
- b += b_stride;
- m += m_stride;
- }
-
- // Horizontal sum
- *sum = hsum_epi32_si64(v_sum_d);
- *sse = hsum_epi64_si64(v_sse_q);
-
- // Round
- *sum = (*sum >= 0) ? *sum : -*sum;
- *sum = ROUND_POWER_OF_TWO(*sum, 6);
- *sse = ROUND_POWER_OF_TWO(*sse, 12);
-}
-
-// Main calculation for 4 wide blocks
-static INLINE void highbd_masked_variance64_4wide_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int h, int64_t *sum, uint64_t *sse) {
- int ii;
-
- const __m128i v_zero = _mm_setzero_si128();
-
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
-
- assert((h % 2) == 0);
-
- for (ii = 0; ii < h / 2; ii++) {
- // Load 2 input rows - 8 bits
- const __m128i v_a0_w = _mm_loadl_epi64((const __m128i *)a);
- const __m128i v_b0_w = _mm_loadl_epi64((const __m128i *)b);
- const __m128i v_m0_b = _mm_cvtsi32_si128(*(const uint32_t *)m);
- const __m128i v_a1_w = _mm_loadl_epi64((const __m128i *)(a + a_stride));
- const __m128i v_b1_w = _mm_loadl_epi64((const __m128i *)(b + b_stride));
- const __m128i v_m1_b = _mm_cvtsi32_si128(*(const uint32_t *)(m + m_stride));
-
- // Interleave 2 rows into a single register
- const __m128i v_a_w = _mm_unpacklo_epi64(v_a0_w, v_a1_w);
- const __m128i v_b_w = _mm_unpacklo_epi64(v_b0_w, v_b1_w);
- const __m128i v_m_b = _mm_unpacklo_epi32(v_m0_b, v_m1_b);
-
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-4095, 4095]
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-4095, 4095] * [0, 64] => fits in 19 bits (incld sign bit)
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
- const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
- const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
- const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
- const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
- const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
- const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
- const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
- // Square and sum the errors -> 36bits * 4 = 38bits
- __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
- v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
- v_elo1_d = _mm_srli_si128(v_elo_d, 4);
- v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
- v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
- v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
- v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
- v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
- v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
- v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
-
- // Accumulate
- v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
- v_sse_q = _mm_add_epi64(v_sse_q, v_se_q);
-
- // Move on to next row
- a += a_stride * 2;
- b += b_stride * 2;
- m += m_stride * 2;
- }
-
- // Horizontal sum
- *sum = hsum_epi32_si32(v_sum_d);
- *sse = hsum_epi64_si64(v_sse_q);
-
- // Round
- *sum = (*sum >= 0) ? *sum : -*sum;
- *sum = ROUND_POWER_OF_TWO(*sum, 6);
- *sse = ROUND_POWER_OF_TWO(*sse, 12);
-}
-
-static INLINE unsigned int highbd_masked_variancewxh_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- uint64_t sse64;
- int64_t sum64;
-
- if (w == 4)
- highbd_masked_variance64_4wide_ssse3(a, a_stride, b, b_stride, m, m_stride,
- h, &sum64, &sse64);
- else
- highbd_masked_variance64_ssse3(a, a_stride, b, b_stride, m, m_stride, w, h,
- &sum64, &sse64);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute and return variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- uint64_t sse64;
- int64_t sum64;
-
- if (w == 4)
- highbd_masked_variance64_4wide_ssse3(a, a_stride, b, b_stride, m, m_stride,
- h, &sum64, &sse64);
- else
- highbd_masked_variance64_ssse3(a, a_stride, b, b_stride, m, m_stride, w, h,
- &sum64, &sse64);
-
- // Normalise
- sum64 = ROUND_POWER_OF_TWO(sum64, 2);
- sse64 = ROUND_POWER_OF_TWO(sse64, 4);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute and return variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
- const uint16_t *a, int a_stride, const uint16_t *b, int b_stride,
- const uint8_t *m, int m_stride, int w, int h, unsigned int *sse) {
- uint64_t sse64;
- int64_t sum64;
-
- if (w == 4)
- highbd_masked_variance64_4wide_ssse3(a, a_stride, b, b_stride, m, m_stride,
- h, &sum64, &sse64);
- else
- highbd_masked_variance64_ssse3(a, a_stride, b, b_stride, m, m_stride, w, h,
- &sum64, &sse64);
-
- sum64 = ROUND_POWER_OF_TWO(sum64, 4);
- sse64 = ROUND_POWER_OF_TWO(sse64, 8);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute and return variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-
-#define HIGHBD_MASKED_VARWXH(W, H) \
- unsigned int aom_highbd_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
- uint16_t *b = CONVERT_TO_SHORTPTR(b8); \
- return highbd_masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, \
- m_stride, W, H, sse); \
- } \
- \
- unsigned int aom_highbd_10_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
- uint16_t *b = CONVERT_TO_SHORTPTR(b8); \
- return highbd_10_masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, \
- m_stride, W, H, sse); \
- } \
- \
- unsigned int aom_highbd_12_masked_variance##W##x##H##_ssse3( \
- const uint8_t *a8, int a_stride, const uint8_t *b8, int b_stride, \
- const uint8_t *m, int m_stride, unsigned int *sse) { \
- uint16_t *a = CONVERT_TO_SHORTPTR(a8); \
- uint16_t *b = CONVERT_TO_SHORTPTR(b8); \
- return highbd_12_masked_variancewxh_ssse3(a, a_stride, b, b_stride, m, \
- m_stride, W, H, sse); \
- }
-
-HIGHBD_MASKED_VARWXH(4, 4)
-HIGHBD_MASKED_VARWXH(4, 8)
-HIGHBD_MASKED_VARWXH(8, 4)
-HIGHBD_MASKED_VARWXH(8, 8)
-HIGHBD_MASKED_VARWXH(8, 16)
-HIGHBD_MASKED_VARWXH(16, 8)
-HIGHBD_MASKED_VARWXH(16, 16)
-HIGHBD_MASKED_VARWXH(16, 32)
-HIGHBD_MASKED_VARWXH(32, 16)
-HIGHBD_MASKED_VARWXH(32, 32)
-HIGHBD_MASKED_VARWXH(32, 64)
-HIGHBD_MASKED_VARWXH(64, 32)
-HIGHBD_MASKED_VARWXH(64, 64)
-#if CONFIG_EXT_PARTITION
-HIGHBD_MASKED_VARWXH(64, 128)
-HIGHBD_MASKED_VARWXH(128, 64)
-HIGHBD_MASKED_VARWXH(128, 128)
-#endif // CONFIG_EXT_PARTITION
-
+MASK_SUBPIX_VAR_SSSE3(128, 128)
+MASK_SUBPIX_VAR_SSSE3(128, 64)
+MASK_SUBPIX_VAR_SSSE3(64, 128)
#endif
-
-//////////////////////////////////////////////////////////////////////////////
-// Sub pixel versions
-//////////////////////////////////////////////////////////////////////////////
-
-typedef __m128i (*filter_fn_t)(__m128i v_a_b, __m128i v_b_b,
- __m128i v_filter_b);
-
-static INLINE __m128i apply_filter_avg(const __m128i v_a_b, const __m128i v_b_b,
- const __m128i v_filter_b) {
- (void)v_filter_b;
- return _mm_avg_epu8(v_a_b, v_b_b);
-}
-
-static INLINE __m128i apply_filter(const __m128i v_a_b, const __m128i v_b_b,
- const __m128i v_filter_b) {
- const __m128i v_rounding_w = _mm_set1_epi16(1 << (FILTER_BITS - 1));
- __m128i v_input_lo_b = _mm_unpacklo_epi8(v_a_b, v_b_b);
- __m128i v_input_hi_b = _mm_unpackhi_epi8(v_a_b, v_b_b);
- __m128i v_temp0_w = _mm_maddubs_epi16(v_input_lo_b, v_filter_b);
- __m128i v_temp1_w = _mm_maddubs_epi16(v_input_hi_b, v_filter_b);
- __m128i v_res_lo_w =
- _mm_srai_epi16(_mm_add_epi16(v_temp0_w, v_rounding_w), FILTER_BITS);
- __m128i v_res_hi_w =
- _mm_srai_epi16(_mm_add_epi16(v_temp1_w, v_rounding_w), FILTER_BITS);
- return _mm_packus_epi16(v_res_lo_w, v_res_hi_w);
-}
-
-// Apply the filter to the contents of the lower half of a and b
-static INLINE void apply_filter_lo(const __m128i v_a_lo_b,
- const __m128i v_b_lo_b,
- const __m128i v_filter_b, __m128i *v_res_w) {
- const __m128i v_rounding_w = _mm_set1_epi16(1 << (FILTER_BITS - 1));
- __m128i v_input_b = _mm_unpacklo_epi8(v_a_lo_b, v_b_lo_b);
- __m128i v_temp0_w = _mm_maddubs_epi16(v_input_b, v_filter_b);
- *v_res_w =
- _mm_srai_epi16(_mm_add_epi16(v_temp0_w, v_rounding_w), FILTER_BITS);
-}
-
-static void sum_and_sse(const __m128i v_a_b, const __m128i v_b_b,
- const __m128i v_m_b, __m128i *v_sum_d,
- __m128i *v_sse_q) {
- const __m128i v_zero = _mm_setzero_si128();
- // Unpack to 16 bits - still containing max 8 bits
- const __m128i v_a0_w = _mm_unpacklo_epi8(v_a_b, v_zero);
- const __m128i v_b0_w = _mm_unpacklo_epi8(v_b_b, v_zero);
- const __m128i v_m0_w = _mm_unpacklo_epi8(v_m_b, v_zero);
- const __m128i v_a1_w = _mm_unpackhi_epi8(v_a_b, v_zero);
- const __m128i v_b1_w = _mm_unpackhi_epi8(v_b_b, v_zero);
- const __m128i v_m1_w = _mm_unpackhi_epi8(v_m_b, v_zero);
-
- // Difference: [-255, 255]
- const __m128i v_d0_w = _mm_sub_epi16(v_a0_w, v_b0_w);
- const __m128i v_d1_w = _mm_sub_epi16(v_a1_w, v_b1_w);
-
- // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
- const __m128i v_e0_w = _mm_mullo_epi16(v_d0_w, v_m0_w);
- const __m128i v_e0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
- const __m128i v_e1_w = _mm_mullo_epi16(v_d1_w, v_m1_w);
- const __m128i v_e1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
-
- // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
- const __m128i v_se0_d = _mm_madd_epi16(v_e0_w, v_e0_w);
- const __m128i v_se1_d = _mm_madd_epi16(v_e1_w, v_e1_w);
-
- // Sum of v_se{0,1}_d - 31 bits + 31 bits = 32 bits
- const __m128i v_se_d = _mm_add_epi32(v_se0_d, v_se1_d);
-
- // Unpack Squared error to 64 bits
- const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
- const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
-
- // Accumulate
- *v_sum_d = _mm_add_epi32(*v_sum_d, v_e0_d);
- *v_sum_d = _mm_add_epi32(*v_sum_d, v_e1_d);
- *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_lo_q);
- *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_hi_q);
-}
-
-// Functions for width (W) >= 16
-unsigned int aom_masked_subpel_varWxH_xzero(const uint8_t *src, int src_stride,
- int yoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int w, int h,
- filter_fn_t filter_fn) {
+MASK_SUBPIX_VAR_SSSE3(64, 64)
+MASK_SUBPIX_VAR_SSSE3(64, 32)
+MASK_SUBPIX_VAR_SSSE3(32, 64)
+MASK_SUBPIX_VAR_SSSE3(32, 32)
+MASK_SUBPIX_VAR_SSSE3(32, 16)
+MASK_SUBPIX_VAR_SSSE3(16, 32)
+MASK_SUBPIX_VAR_SSSE3(16, 16)
+MASK_SUBPIX_VAR_SSSE3(16, 8)
+MASK_SUBPIX_VAR8XH_SSSE3(16)
+MASK_SUBPIX_VAR8XH_SSSE3(8)
+MASK_SUBPIX_VAR8XH_SSSE3(4)
+MASK_SUBPIX_VAR4XH_SSSE3(8)
+MASK_SUBPIX_VAR4XH_SSSE3(4)
+
+static INLINE __m128i filter_block(const __m128i a, const __m128i b,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi8(a, b);
+ v0 = _mm_maddubs_epi16(v0, filter);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
+
+ __m128i v1 = _mm_unpackhi_epi8(a, b);
+ v1 = _mm_maddubs_epi16(v1, filter);
+ v1 = xx_roundn_epu16(v1, FILTER_BITS);
+
+ return _mm_packus_epi16(v0, v1);
+}
+
+static void bilinear_filter(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int w, int h) {
int i, j;
- __m128i v_src0_b, v_src1_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_b = _mm_set1_epi16(
- (bilinear_filters_2t[yoffset][1] << 8) + bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 16) {
- // Load the first row ready
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j));
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row apply the filter
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + j + src_stride));
- v_res_b = filter_fn(v_src0_b, v_src1_b, v_filter_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j));
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row apply the filter
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j + src_stride * 2));
- v_res_b = filter_fn(v_src1_b, v_src0_b, v_filter_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j + dst_stride));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j + msk_stride));
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 16) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ _mm_storeu_si128((__m128i *)&b[j], x);
+ }
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-unsigned int aom_masked_subpel_varWxH_yzero(const uint8_t *src, int src_stride,
- int xoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int w, int h,
- filter_fn_t filter_fn) {
- int i, j;
- __m128i v_src0_b, v_src1_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_b = _mm_set1_epi16(
- (bilinear_filters_2t[xoffset][1] << 8) + bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i++) {
- for (j = 0; j < w; j += 16) {
- // Load this row and one below & apply the filter to them
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_res_b = filter_fn(v_src0_b, v_src1_b, v_filter_b);
-
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j));
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+ } else if (xoffset == 4) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 16) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&src[j + 16]);
+ __m128i z = _mm_alignr_epi8(y, x, 1);
+ _mm_storeu_si128((__m128i *)&b[j], _mm_avg_epu8(x, z));
+ }
+ src += src_stride;
+ b += w;
}
- src += src_stride;
- dst += dst_stride;
- msk += msk_stride;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-unsigned int aom_masked_subpel_varWxH_xnonzero_ynonzero(
- const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int w, int h, filter_fn_t xfilter_fn,
- filter_fn_t yfilter_fn) {
- int i, j;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b;
- __m128i v_filtered0_b, v_filtered1_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filterx_b = _mm_set1_epi16(
- (bilinear_filters_2t[xoffset][1] << 8) + bilinear_filters_2t[xoffset][0]);
- const __m128i v_filtery_b = _mm_set1_epi16(
- (bilinear_filters_2t[yoffset][1] << 8) + bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 16) {
- // Load the first row ready
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_filtered0_b = xfilter_fn(v_src0_b, v_src1_b, v_filterx_b);
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row & apply the filter
- v_src2_b = _mm_loadu_si128((const __m128i *)(src + src_stride + j));
- v_src3_b = _mm_loadu_si128((const __m128i *)(src + src_stride + j + 1));
- v_filtered1_b = xfilter_fn(v_src2_b, v_src3_b, v_filterx_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + j));
- // Complete the calculation for this row and add it to the running total
- v_res_b = yfilter_fn(v_filtered0_b, v_filtered1_b, v_filtery_b);
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row & apply the filter
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j));
- v_src1_b =
- _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j + 1));
- v_filtered0_b = xfilter_fn(v_src0_b, v_src1_b, v_filterx_b);
- // Load the dst and msk for the variance calculation
- v_dst_b = _mm_loadu_si128((const __m128i *)(dst + dst_stride + j));
- v_msk_b = _mm_loadu_si128((const __m128i *)(msk + msk_stride + j));
- // Complete the calculation for this row and add it to the running total
- v_res_b = yfilter_fn(v_filtered1_b, v_filtered0_b, v_filtery_b);
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ } else {
+ uint8_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi16(hfilter[0] | (hfilter[1] << 8));
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 16) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&src[j + 16]);
+ const __m128i z = _mm_alignr_epi8(y, x, 1);
+ const __m128i res = filter_block(x, z, hfilter_vec);
+ _mm_storeu_si128((__m128i *)&b[j], res);
+ }
+
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
}
- return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
-}
-// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
-// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int aom_masked_subpel_var4xH_xzero(const uint8_t *src, int src_stride,
- int yoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered1_w, v_filtered2_w;
- __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b;
- __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_res_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first row of src data ready
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- for (i = 0; i < h; i += 4) {
- // Load the rest of the source data for these rows
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- v_src1_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
- v_src2_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- v_src3_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 3));
- v_src3_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
- v_src0_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 4));
- // Load the dst data
- v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 0));
- v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 1));
- v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
- v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 2));
- v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 3));
- v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
- v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
- // Load the mask data
- v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 0));
- v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 1));
- v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
- v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 2));
- v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 3));
- v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
- v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
- // Apply the y filter
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src3_b, v_src1_b);
- v_src2_b =
- _mm_or_si128(_mm_slli_si128(v_src1_b, 4),
- _mm_and_si128(v_src0_b, _mm_setr_epi32(-1, 0, 0, 0)));
- v_res_b = _mm_avg_epu8(v_src1_b, v_src2_b);
- } else {
- v_src2_b =
- _mm_or_si128(_mm_slli_si128(v_src1_b, 4),
- _mm_and_si128(v_src2_b, _mm_setr_epi32(-1, 0, 0, 0)));
- apply_filter_lo(v_src1_b, v_src2_b, v_filter_b, &v_filtered1_w);
- v_src2_b =
- _mm_or_si128(_mm_slli_si128(v_src3_b, 4),
- _mm_and_si128(v_src0_b, _mm_setr_epi32(-1, 0, 0, 0)));
- apply_filter_lo(v_src3_b, v_src2_b, v_filter_b, &v_filtered2_w);
- v_res_b = _mm_packus_epi16(v_filtered2_w, v_filtered1_w);
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 16) {
+ __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ _mm_storeu_si128((__m128i *)&dst[j], _mm_avg_epu8(x, y));
+ }
+ dst += w;
}
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
-}
-
-// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int aom_masked_subpel_var8xH_xzero(const uint8_t *src, int src_stride,
- int yoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w, v_res_b;
- __m128i v_dst_b = _mm_setzero_si128();
- __m128i v_msk_b = _mm_setzero_si128();
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first row of src data ready
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- for (i = 0; i < h; i += 2) {
- if (yoffset == HALF_PIXEL_OFFSET) {
- // Load the rest of the source data for these rows
- v_src1_b = _mm_or_si128(
- _mm_slli_si128(v_src0_b, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 1)));
- v_src0_b = _mm_or_si128(
- _mm_slli_si128(v_src1_b, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 2)));
- // Apply the y filter
- v_res_b = _mm_avg_epu8(v_src1_b, v_src0_b);
- } else {
- // Load the data and apply the y filter
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- apply_filter_lo(v_src0_b, v_src1_b, v_filter_b, &v_filtered0_w);
- v_src0_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- apply_filter_lo(v_src1_b, v_src0_b, v_filter_b, &v_filtered1_w);
- v_res_b = _mm_packus_epi16(v_filtered1_w, v_filtered0_w);
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi16(vfilter[0] | (vfilter[1] << 8));
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 16) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ const __m128i res = filter_block(x, y, vfilter_vec);
+ _mm_storeu_si128((__m128i *)&dst[j], res);
+ }
+
+ dst += w;
}
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
}
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
}
-// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
-// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int aom_masked_subpel_var4xH_yzero(const uint8_t *src, int src_stride,
- int xoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered0_w, v_filtered2_w;
- __m128i v_src0_shift_b, v_src1_shift_b, v_src2_shift_b, v_src3_shift_b;
- __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b;
- __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_res_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 4) {
- // Load the src data
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- v_src0_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- v_src2_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- v_src0_shift_b = _mm_unpacklo_epi32(v_src1_shift_b, v_src0_shift_b);
- v_src2_shift_b = _mm_srli_si128(v_src2_b, 1);
- v_src3_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 3));
- v_src2_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
- v_src3_shift_b = _mm_srli_si128(v_src3_b, 1);
- v_src2_shift_b = _mm_unpacklo_epi32(v_src3_shift_b, v_src2_shift_b);
- // Load the dst data
- v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 0));
- v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 1));
- v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
- v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 2));
- v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 3));
- v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
- v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
- // Load the mask data
- v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 0));
- v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 1));
- v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
- v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 2));
- v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 3));
- v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
- v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src0_b = _mm_unpacklo_epi64(v_src2_b, v_src0_b);
- v_src0_shift_b = _mm_unpacklo_epi64(v_src2_shift_b, v_src0_shift_b);
- v_res_b = _mm_avg_epu8(v_src0_b, v_src0_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filter_b, &v_filtered0_w);
- apply_filter_lo(v_src2_b, v_src2_shift_b, v_filter_b, &v_filtered2_w);
- v_res_b = _mm_packus_epi16(v_filtered2_w, v_filtered0_w);
- }
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
-}
+static INLINE __m128i filter_block_2rows(const __m128i a0, const __m128i b0,
+ const __m128i a1, const __m128i b1,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi8(a0, b0);
+ v0 = _mm_maddubs_epi16(v0, filter);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
-unsigned int aom_masked_subpel_var8xH_yzero(const uint8_t *src, int src_stride,
- int xoffset, const uint8_t *dst,
- int dst_stride, const uint8_t *msk,
- int msk_stride, unsigned int *sse,
- int h) {
- int i;
- __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w;
- __m128i v_src0_shift_b, v_src1_shift_b, v_res_b, v_dst_b, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 2) {
- // Load the src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_res_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filter_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filter_b, &v_filtered1_w);
- v_res_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
- }
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
+ __m128i v1 = _mm_unpacklo_epi8(a1, b1);
+ v1 = _mm_maddubs_epi16(v1, filter);
+ v1 = xx_roundn_epu16(v1, FILTER_BITS);
+
+ return _mm_packus_epi16(v0, v1);
}
-// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
-// xmm[63:32] = row 3, xmm[31:0] = row 4
-unsigned int aom_masked_subpel_var4xH_xnonzero_ynonzero(
- const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int h) {
+static void bilinear_filter8xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h) {
int i;
- __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered0_w, v_filtered2_w;
- __m128i v_src0_shift_b, v_src1_shift_b, v_src2_shift_b, v_src3_shift_b;
- __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b, v_temp_b;
- __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_extra_row_b, v_res_b;
- __m128i v_xres_b[2];
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filterx_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- __m128i v_filtery_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 4) {
- // Load the src data
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- v_src0_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- v_src2_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- v_src0_shift_b = _mm_unpacklo_epi32(v_src1_shift_b, v_src0_shift_b);
- v_src2_shift_b = _mm_srli_si128(v_src2_b, 1);
- v_src3_b = _mm_loadl_epi64((const __m128i *)(src + src_stride * 3));
- v_src2_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
- v_src3_shift_b = _mm_srli_si128(v_src3_b, 1);
- v_src2_shift_b = _mm_unpacklo_epi32(v_src3_shift_b, v_src2_shift_b);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src0_b = _mm_unpacklo_epi64(v_src2_b, v_src0_b);
- v_src0_shift_b = _mm_unpacklo_epi64(v_src2_shift_b, v_src0_shift_b);
- v_xres_b[i == 0 ? 0 : 1] = _mm_avg_epu8(v_src0_b, v_src0_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src2_b, v_src2_shift_b, v_filterx_b, &v_filtered2_w);
- v_xres_b[i == 0 ? 0 : 1] = _mm_packus_epi16(v_filtered2_w, v_filtered0_w);
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)src);
+ _mm_storel_epi64((__m128i *)b, x);
+ src += src_stride;
+ b += 8;
+ }
+ } else if (xoffset == 4) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadu_si128((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 1);
+ _mm_storel_epi64((__m128i *)b, _mm_avg_epu8(x, z));
+ src += src_stride;
+ b += 8;
}
- // Move onto the next set of rows
- src += src_stride * 4;
- }
- // Load one more row to be used in the y filter
- v_src0_b = _mm_loadl_epi64((const __m128i *)src);
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_extra_row_b = _mm_and_si128(_mm_avg_epu8(v_src0_b, v_src0_shift_b),
- _mm_setr_epi32(-1, 0, 0, 0));
} else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- v_extra_row_b =
- _mm_and_si128(_mm_packus_epi16(v_filtered0_w, _mm_setzero_si128()),
- _mm_setr_epi32(-1, 0, 0, 0));
- }
+ uint8_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi16(hfilter[0] | (hfilter[1] << 8));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 1);
+ const __m128i x1 = _mm_loadu_si128((__m128i *)&src[src_stride]);
+ const __m128i z1 = _mm_srli_si128(x1, 1);
+ const __m128i res = filter_block_2rows(x0, z0, x1, z1, hfilter_vec);
+ _mm_storeu_si128((__m128i *)b, res);
- for (i = 0; i < h; i += 4) {
- if (h == 8 && i == 0) {
- v_temp_b = _mm_or_si128(_mm_slli_si128(v_xres_b[0], 4),
- _mm_srli_si128(v_xres_b[1], 12));
- } else {
- v_temp_b = _mm_or_si128(_mm_slli_si128(v_xres_b[i == 0 ? 0 : 1], 4),
- v_extra_row_b);
+ src += src_stride * 2;
+ b += 16;
}
- // Apply the y filter
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_b = _mm_avg_epu8(v_xres_b[i == 0 ? 0 : 1], v_temp_b);
- } else {
- v_res_b = apply_filter(v_xres_b[i == 0 ? 0 : 1], v_temp_b, v_filtery_b);
+ // Handle i = h separately
+ const __m128i x0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 1);
+
+ __m128i v0 = _mm_unpacklo_epi8(x0, z0);
+ v0 = _mm_maddubs_epi16(v0, hfilter_vec);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
+
+ _mm_storel_epi64((__m128i *)b, _mm_packus_epi16(v0, v0));
+ }
+
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ __m128i y = _mm_loadl_epi64((__m128i *)&dst[8]);
+ _mm_storel_epi64((__m128i *)dst, _mm_avg_epu8(x, y));
+ dst += 8;
}
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi16(vfilter[0] | (vfilter[1] << 8));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ const __m128i y = _mm_loadl_epi64((__m128i *)&dst[8]);
+ const __m128i z = _mm_loadl_epi64((__m128i *)&dst[16]);
+ const __m128i res = filter_block_2rows(x, y, y, z, vfilter_vec);
+ _mm_storeu_si128((__m128i *)dst, res);
- // Load the dst data
- v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 0));
- v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 1));
- v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
- v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 2));
- v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t *)(dst + dst_stride * 3));
- v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
- v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
- // Load the mask data
- v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 0));
- v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 1));
- v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
- v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 2));
- v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t *)(msk + msk_stride * 3));
- v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
- v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- dst += dst_stride * 4;
- msk += msk_stride * 4;
+ dst += 16;
+ }
}
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int aom_masked_subpel_var8xH_xnonzero_ynonzero(
- const uint8_t *src, int src_stride, int xoffset, int yoffset,
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int h) {
+static void bilinear_filter4xh(const uint8_t *src, int src_stride, int xoffset,
+ int yoffset, uint8_t *dst, int h) {
int i;
- __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w, v_dst_b, v_msk_b;
- __m128i v_src0_shift_b, v_src1_shift_b;
- __m128i v_xres0_b, v_xres1_b, v_res_b, v_temp_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filterx_b = _mm_set1_epi16((bilinear_filters_2t[xoffset][1] << 8) +
- bilinear_filters_2t[xoffset][0]);
- __m128i v_filtery_b = _mm_set1_epi16((bilinear_filters_2t[yoffset][1] << 8) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first block of src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_xres0_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
- v_xres0_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
- }
- for (i = 0; i < h; i += 4) {
- // Load the next block of src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 2));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 3));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_xres1_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
- v_xres1_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = xx_loadl_32((__m128i *)src);
+ xx_storel_32((__m128i *)b, x);
+ src += src_stride;
+ b += 4;
}
- // Apply the y filter to the previous block
- v_temp_b = _mm_or_si128(_mm_srli_si128(v_xres0_b, 8),
- _mm_slli_si128(v_xres1_b, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_b = _mm_avg_epu8(v_xres0_b, v_temp_b);
- } else {
- v_res_b = apply_filter(v_xres0_b, v_temp_b, v_filtery_b);
+ } else if (xoffset == 4) {
+ uint8_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 1);
+ xx_storel_32((__m128i *)b, _mm_avg_epu8(x, z));
+ src += src_stride;
+ b += 4;
}
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next block of src data
- v_src0_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 4));
- v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
- v_src1_b = _mm_loadu_si128((const __m128i *)(src + src_stride * 5));
- v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
- v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
- v_xres0_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
- } else {
- apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
- apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
- v_xres0_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+ } else {
+ uint8_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi16(hfilter[0] | (hfilter[1] << 8));
+ for (i = 0; i < h; i += 4) {
+ const __m128i x0 = _mm_loadl_epi64((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 1);
+ const __m128i x1 = _mm_loadl_epi64((__m128i *)&src[src_stride]);
+ const __m128i z1 = _mm_srli_si128(x1, 1);
+ const __m128i x2 = _mm_loadl_epi64((__m128i *)&src[src_stride * 2]);
+ const __m128i z2 = _mm_srli_si128(x2, 1);
+ const __m128i x3 = _mm_loadl_epi64((__m128i *)&src[src_stride * 3]);
+ const __m128i z3 = _mm_srli_si128(x3, 1);
+
+ const __m128i a0 = _mm_unpacklo_epi32(x0, x1);
+ const __m128i b0 = _mm_unpacklo_epi32(z0, z1);
+ const __m128i a1 = _mm_unpacklo_epi32(x2, x3);
+ const __m128i b1 = _mm_unpacklo_epi32(z2, z3);
+ const __m128i res = filter_block_2rows(a0, b0, a1, b1, hfilter_vec);
+ _mm_storeu_si128((__m128i *)b, res);
+
+ src += src_stride * 4;
+ b += 16;
}
- // Apply the y filter to the previous block
- v_temp_b = _mm_or_si128(_mm_srli_si128(v_xres1_b, 8),
- _mm_slli_si128(v_xres0_b, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_b = _mm_avg_epu8(v_xres1_b, v_temp_b);
- } else {
- v_res_b = apply_filter(v_xres1_b, v_temp_b, v_filtery_b);
+ // Handle i = h separately
+ const __m128i x = _mm_loadl_epi64((__m128i *)src);
+ const __m128i z = _mm_srli_si128(x, 1);
+
+ __m128i v0 = _mm_unpacklo_epi8(x, z);
+ v0 = _mm_maddubs_epi16(v0, hfilter_vec);
+ v0 = xx_roundn_epu16(v0, FILTER_BITS);
+
+ xx_storel_32((__m128i *)b, _mm_packus_epi16(v0, v0));
+ }
+
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ __m128i x = xx_loadl_32((__m128i *)dst);
+ __m128i y = xx_loadl_32((__m128i *)&dst[4]);
+ xx_storel_32((__m128i *)dst, _mm_avg_epu8(x, y));
+ dst += 4;
+ }
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi16(vfilter[0] | (vfilter[1] << 8));
+ for (i = 0; i < h; i += 4) {
+ const __m128i a = xx_loadl_32((__m128i *)dst);
+ const __m128i b = xx_loadl_32((__m128i *)&dst[4]);
+ const __m128i c = xx_loadl_32((__m128i *)&dst[8]);
+ const __m128i d = xx_loadl_32((__m128i *)&dst[12]);
+ const __m128i e = xx_loadl_32((__m128i *)&dst[16]);
+
+ const __m128i a0 = _mm_unpacklo_epi32(a, b);
+ const __m128i b0 = _mm_unpacklo_epi32(b, c);
+ const __m128i a1 = _mm_unpacklo_epi32(c, d);
+ const __m128i b1 = _mm_unpacklo_epi32(d, e);
+ const __m128i res = filter_block_2rows(a0, b0, a1, b1, vfilter_vec);
+ _mm_storeu_si128((__m128i *)dst, res);
+
+ dst += 16;
}
- // Load the dst data
- v_dst_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 3)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 3)));
- // Compute the sum and SSE
- sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
-}
-
-// For W >=16
-#define MASK_SUBPIX_VAR_LARGE(W, H) \
- unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- assert(W % 16 == 0); \
- if (xoffset == 0) { \
- if (yoffset == 0) \
- return aom_masked_variance##W##x##H##_ssse3( \
- src, src_stride, dst, dst_stride, msk, msk_stride, sse); \
- else if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_xzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_xzero(src, src_stride, yoffset, dst, \
- dst_stride, msk, msk_stride, \
- sse, W, H, apply_filter); \
- } else if (yoffset == 0) { \
- if (xoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_yzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_yzero(src, src_stride, xoffset, dst, \
- dst_stride, msk, msk_stride, \
- sse, W, H, apply_filter); \
- } else if (xoffset == HALF_PIXEL_OFFSET) { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst, \
- dst_stride, msk, msk_stride, sse, W, H, apply_filter_avg, \
- apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter_avg, apply_filter); \
- } else { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter, apply_filter_avg); \
- else \
- return aom_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, apply_filter, apply_filter); \
- } \
- }
-
-// For W < 16
-#define MASK_SUBPIX_VAR_SMALL(W, H) \
- unsigned int aom_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- assert(W == 4 || W == 8); \
- if (xoffset == 0 && yoffset == 0) \
- return aom_masked_variance##W##x##H##_ssse3( \
- src, src_stride, dst, dst_stride, msk, msk_stride, sse); \
- else if (xoffset == 0) \
- return aom_masked_subpel_var##W##xH_xzero( \
- src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H); \
- else if (yoffset == 0) \
- return aom_masked_subpel_var##W##xH_yzero( \
- src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H); \
- else \
- return aom_masked_subpel_var##W##xH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
- sse, H); \
}
-
-MASK_SUBPIX_VAR_SMALL(4, 4)
-MASK_SUBPIX_VAR_SMALL(4, 8)
-MASK_SUBPIX_VAR_SMALL(8, 4)
-MASK_SUBPIX_VAR_SMALL(8, 8)
-MASK_SUBPIX_VAR_SMALL(8, 16)
-MASK_SUBPIX_VAR_LARGE(16, 8)
-MASK_SUBPIX_VAR_LARGE(16, 16)
-MASK_SUBPIX_VAR_LARGE(16, 32)
-MASK_SUBPIX_VAR_LARGE(32, 16)
-MASK_SUBPIX_VAR_LARGE(32, 32)
-MASK_SUBPIX_VAR_LARGE(32, 64)
-MASK_SUBPIX_VAR_LARGE(64, 32)
-MASK_SUBPIX_VAR_LARGE(64, 64)
-#if CONFIG_EXT_PARTITION
-MASK_SUBPIX_VAR_LARGE(64, 128)
-MASK_SUBPIX_VAR_LARGE(128, 64)
-MASK_SUBPIX_VAR_LARGE(128, 128)
-#endif // CONFIG_EXT_PARTITION
-
-#if CONFIG_HIGHBITDEPTH
-typedef uint32_t (*highbd_calc_masked_var_t)(__m128i v_sum_d, __m128i v_sse_q,
- uint32_t *sse, int w, int h);
-typedef unsigned int (*highbd_variance_fn_t)(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- const uint8_t *m, int m_stride,
- unsigned int *sse);
-typedef __m128i (*highbd_filter_fn_t)(__m128i v_a_w, __m128i v_b_w,
- __m128i v_filter_w);
-
-static INLINE __m128i highbd_apply_filter_avg(const __m128i v_a_w,
- const __m128i v_b_w,
- const __m128i v_filter_w) {
- (void)v_filter_w;
- return _mm_avg_epu16(v_a_w, v_b_w);
}
-static INLINE __m128i highbd_apply_filter(const __m128i v_a_w,
- const __m128i v_b_w,
- const __m128i v_filter_w) {
- const __m128i v_rounding_d = _mm_set1_epi32(1 << (FILTER_BITS - 1));
- __m128i v_input_lo_w = _mm_unpacklo_epi16(v_a_w, v_b_w);
- __m128i v_input_hi_w = _mm_unpackhi_epi16(v_a_w, v_b_w);
- __m128i v_temp0_d = _mm_madd_epi16(v_input_lo_w, v_filter_w);
- __m128i v_temp1_d = _mm_madd_epi16(v_input_hi_w, v_filter_w);
- __m128i v_res_lo_d =
- _mm_srai_epi32(_mm_add_epi32(v_temp0_d, v_rounding_d), FILTER_BITS);
- __m128i v_res_hi_d =
- _mm_srai_epi32(_mm_add_epi32(v_temp1_d, v_rounding_d), FILTER_BITS);
- return _mm_packs_epi32(v_res_lo_d, v_res_hi_d);
-}
-// Apply the filter to the contents of the lower half of a and b
-static INLINE void highbd_apply_filter_lo(const __m128i v_a_lo_w,
- const __m128i v_b_lo_w,
- const __m128i v_filter_w,
- __m128i *v_res_d) {
- const __m128i v_rounding_d = _mm_set1_epi32(1 << (FILTER_BITS - 1));
- __m128i v_input_w = _mm_unpacklo_epi16(v_a_lo_w, v_b_lo_w);
- __m128i v_temp0_d = _mm_madd_epi16(v_input_w, v_filter_w);
- *v_res_d =
- _mm_srai_epi32(_mm_add_epi32(v_temp0_d, v_rounding_d), FILTER_BITS);
-}
+static INLINE void accumulate_block(const __m128i src, const __m128i a,
+ const __m128i b, const __m128i m,
+ __m128i *sum, __m128i *sum_sq) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i mask_max = _mm_set1_epi8((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i m_inv = _mm_sub_epi8(mask_max, m);
+
+ // Calculate 16 predicted pixels.
+ // Note that the maximum value of any entry of 'pred_l' or 'pred_r'
+ // is 64 * 255, so we have plenty of space to add rounding constants.
+ const __m128i data_l = _mm_unpacklo_epi8(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi8(m, m_inv);
+ __m128i pred_l = _mm_maddubs_epi16(data_l, mask_l);
+ pred_l = xx_roundn_epu16(pred_l, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi8(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi8(m, m_inv);
+ __m128i pred_r = _mm_maddubs_epi16(data_r, mask_r);
+ pred_r = xx_roundn_epu16(pred_r, AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i src_l = _mm_unpacklo_epi8(src, zero);
+ const __m128i src_r = _mm_unpackhi_epi8(src, zero);
+ const __m128i diff_l = _mm_sub_epi16(pred_l, src_l);
+ const __m128i diff_r = _mm_sub_epi16(pred_r, src_r);
+
+ // Update partial sums and partial sums of squares
+ *sum =
+ _mm_add_epi32(*sum, _mm_madd_epi16(_mm_add_epi16(diff_l, diff_r), one));
+ *sum_sq =
+ _mm_add_epi32(*sum_sq, _mm_add_epi32(_mm_madd_epi16(diff_l, diff_l),
+ _mm_madd_epi16(diff_r, diff_r)));
+}
+
+static void masked_variance(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, int a_stride,
+ const uint8_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride, int width,
+ int height, unsigned int *sse, int *sum_) {
+ int x, y;
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 16) {
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ const __m128i m = _mm_loadu_si128((const __m128i *)&m_ptr[x]);
+ accumulate_block(src, a, b, m, &sum, &sum_sq);
+ }
-static void highbd_sum_and_sse(const __m128i v_a_w, const __m128i v_b_w,
- const __m128i v_m_b, __m128i *v_sum_d,
- __m128i *v_sse_q) {
- const __m128i v_zero = _mm_setzero_si128();
- const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
-
- // Difference: [-2^12, 2^12] => 13 bits (incld sign bit)
- const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
-
- // Error - [-4095, 4095] * [0, 64] & sum pairs => fits in 19 + 1 bits
- const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
-
- // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
- const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
- const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
- const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
- const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
- const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
- const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
- const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
- // Square and sum the errors -> 36bits * 4 = 38bits
- __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
- v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
- v_elo1_d = _mm_srli_si128(v_elo_d, 4);
- v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
- v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
- v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
- v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
- v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
- v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
- v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
-
- // Accumulate
- *v_sum_d = _mm_add_epi32(*v_sum_d, v_e_d);
- *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
+ src_ptr += src_stride;
+ a_ptr += a_stride;
+ b_ptr += b_stride;
+ m_ptr += m_stride;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, sum);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
+}
+
+static void masked_variance8xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_) {
+ int y;
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+
+ for (y = 0; y < height; y += 2) {
+ __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a = _mm_loadu_si128((const __m128i *)a_ptr);
+ const __m128i b = _mm_loadu_si128((const __m128i *)b_ptr);
+ const __m128i m =
+ _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)m_ptr),
+ _mm_loadl_epi64((const __m128i *)&m_ptr[m_stride]));
+ accumulate_block(src, a, b, m, &sum, &sum_sq);
+
+ src_ptr += src_stride * 2;
+ a_ptr += 16;
+ b_ptr += 16;
+ m_ptr += m_stride * 2;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, sum);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
+}
+
+static void masked_variance4xh(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *a_ptr, const uint8_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride, int height,
+ unsigned int *sse, int *sum_) {
+ int y;
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+
+ for (y = 0; y < height; y += 4) {
+ // Load four rows at a time
+ __m128i src =
+ _mm_setr_epi32(*(uint32_t *)src_ptr, *(uint32_t *)&src_ptr[src_stride],
+ *(uint32_t *)&src_ptr[src_stride * 2],
+ *(uint32_t *)&src_ptr[src_stride * 3]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)a_ptr);
+ const __m128i b = _mm_loadu_si128((const __m128i *)b_ptr);
+ const __m128i m = _mm_setr_epi32(
+ *(uint32_t *)m_ptr, *(uint32_t *)&m_ptr[m_stride],
+ *(uint32_t *)&m_ptr[m_stride * 2], *(uint32_t *)&m_ptr[m_stride * 3]);
+ accumulate_block(src, a, b, m, &sum, &sum_sq);
+
+ src_ptr += src_stride * 4;
+ a_ptr += 16;
+ b_ptr += 16;
+ m_ptr += m_stride * 4;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, sum);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
}
-static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- uint32_t *sse, int w,
- int h) {
- int64_t sum64;
- uint64_t sse64;
-
- // Horizontal sum
- sum64 = hsum_epi32_si32(v_sum_d);
- sse64 = hsum_epi64_si64(v_sse_q);
-
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
-
- // Round
- sum64 = ROUND_POWER_OF_TWO(sum64, 6);
- sse64 = ROUND_POWER_OF_TWO(sse64, 12);
-
- // Normalise
- sum64 = ROUND_POWER_OF_TWO(sum64, 2);
- sse64 = ROUND_POWER_OF_TWO(sse64, 4);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute the variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
-static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d,
- __m128i v_sse_q,
- uint32_t *sse, int w,
- int h) {
- int64_t sum64;
- uint64_t sse64;
-
- // Horizontal sum
- sum64 = hsum_epi32_si64(v_sum_d);
- sse64 = hsum_epi64_si64(v_sse_q);
-
- sum64 = (sum64 >= 0) ? sum64 : -sum64;
-
- // Round
- sum64 = ROUND_POWER_OF_TWO(sum64, 6);
- sse64 = ROUND_POWER_OF_TWO(sse64, 12);
-
- // Normalise
- sum64 = ROUND_POWER_OF_TWO(sum64, 4);
- sse64 = ROUND_POWER_OF_TWO(sse64, 8);
-
- // Store the SSE
- *sse = (uint32_t)sse64;
- // Compute the variance
- return *sse - (uint32_t)((sum64 * sum64) / (w * h));
-}
+#if CONFIG_HIGHBITDEPTH
+// For width a multiple of 8
+static void highbd_bilinear_filter(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int w, int h);
+
+static void highbd_bilinear_filter4xh(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int h);
+
+// For width a multiple of 8
+static void highbd_masked_variance(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr, int a_stride,
+ const uint16_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride,
+ int width, int height, uint64_t *sse,
+ int *sum_);
+
+static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr,
+ const uint16_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride,
+ int height, int *sse, int *sum_);
+
+#define HIGHBD_MASK_SUBPIX_VAR_SSSE3(W, H) \
+ unsigned int aom_highbd_8_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ uint64_t sse64; \
+ int sum; \
+ uint16_t temp[(H + 1) * W]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ else \
+ highbd_masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ *sse = (uint32_t)sse64; \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ } \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ uint64_t sse64; \
+ int sum; \
+ uint16_t temp[(H + 1) * W]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ else \
+ highbd_masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 4); \
+ sum = ROUND_POWER_OF_TWO(sum, 2); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ } \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ uint64_t sse64; \
+ int sum; \
+ uint16_t temp[(H + 1) * W]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter(src, src_stride, xoffset, yoffset, temp, W, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance(ref, ref_stride, temp, W, second_pred, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ else \
+ highbd_masked_variance(ref, ref_stride, second_pred, W, temp, W, msk, \
+ msk_stride, W, H, &sse64, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 8); \
+ sum = ROUND_POWER_OF_TWO(sum, 4); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ }
+
+#define HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(H) \
+ unsigned int aom_highbd_8_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ int sse_; \
+ int sum; \
+ uint16_t temp[(H + 1) * 4]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance4xh(ref, ref_stride, temp, second_pred, msk, \
+ msk_stride, H, &sse_, &sum); \
+ else \
+ highbd_masked_variance4xh(ref, ref_stride, second_pred, temp, msk, \
+ msk_stride, H, &sse_, &sum); \
+ *sse = (uint32_t)sse_; \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ } \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ int sse_; \
+ int sum; \
+ uint16_t temp[(H + 1) * 4]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance4xh(ref, ref_stride, temp, second_pred, msk, \
+ msk_stride, H, &sse_, &sum); \
+ else \
+ highbd_masked_variance4xh(ref, ref_stride, second_pred, temp, msk, \
+ msk_stride, H, &sse_, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 4); \
+ sum = ROUND_POWER_OF_TWO(sum, 2); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ } \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance4x##H##_ssse3( \
+ const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, \
+ const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
+ int sse_; \
+ int sum; \
+ uint16_t temp[(H + 1) * 4]; \
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
+ const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
+ const uint16_t *second_pred = CONVERT_TO_SHORTPTR(second_pred8); \
+ \
+ highbd_bilinear_filter4xh(src, src_stride, xoffset, yoffset, temp, H); \
+ \
+ if (!invert_mask) \
+ highbd_masked_variance4xh(ref, ref_stride, temp, second_pred, msk, \
+ msk_stride, H, &sse_, &sum); \
+ else \
+ highbd_masked_variance4xh(ref, ref_stride, second_pred, temp, msk, \
+ msk_stride, H, &sse_, &sum); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 8); \
+ sum = ROUND_POWER_OF_TWO(sum, 4); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ }
-// High bit depth functions for width (W) >= 8
-unsigned int aom_highbd_masked_subpel_varWxH_xzero(
- const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int w, int h, highbd_filter_fn_t filter_fn,
- highbd_calc_masked_var_t calc_var) {
+#if CONFIG_EXT_PARTITION
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(128, 128)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(128, 64)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(64, 128)
+#endif
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(64, 64)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(64, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 64)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 8)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 8)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 4)
+HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(8)
+HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(4)
+
+static INLINE __m128i highbd_filter_block(const __m128i a, const __m128i b,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi16(a, b);
+ v0 = _mm_madd_epi16(v0, filter);
+ v0 = xx_roundn_epu32(v0, FILTER_BITS);
+
+ __m128i v1 = _mm_unpackhi_epi16(a, b);
+ v1 = _mm_madd_epi16(v1, filter);
+ v1 = xx_roundn_epu32(v1, FILTER_BITS);
+
+ return _mm_packs_epi32(v0, v1);
+}
+
+static void highbd_bilinear_filter(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int w, int h) {
int i, j;
- __m128i v_src0_w, v_src1_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_w =
- _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 8) {
- // Load the first row ready
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j));
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row apply the filter
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + j + src_stride));
- v_res_w = filter_fn(v_src0_w, v_src1_w, v_filter_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j));
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row apply the filter
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j + src_stride * 2));
- v_res_w = filter_fn(v_src1_w, v_src0_w, v_filter_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j + dst_stride));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j + msk_stride));
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 8) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ _mm_storeu_si128((__m128i *)&b[j], x);
+ }
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
- }
- return calc_var(v_sum_d, v_sse_q, sse, w, h);
-}
-unsigned int aom_highbd_masked_subpel_varWxH_yzero(
- const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int w, int h, highbd_filter_fn_t filter_fn,
- highbd_calc_masked_var_t calc_var) {
- int i, j;
- __m128i v_src0_w, v_src1_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filter_w =
- _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i++) {
- for (j = 0; j < w; j += 8) {
- // Load this row & apply the filter to them
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_res_w = filter_fn(v_src0_w, v_src1_w, v_filter_w);
-
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j));
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+ } else if (xoffset == 4) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 8) {
+ __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&src[j + 8]);
+ __m128i z = _mm_alignr_epi8(y, x, 2);
+ _mm_storeu_si128((__m128i *)&b[j], _mm_avg_epu16(x, z));
+ }
+ src += src_stride;
+ b += w;
}
- src += src_stride;
- dst += dst_stride;
- msk += msk_stride;
- }
- return calc_var(v_sum_d, v_sse_q, sse, w, h);
-}
-
-unsigned int aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
- const uint16_t *src, int src_stride, int xoffset, int yoffset,
- const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int w, int h, highbd_filter_fn_t xfilter_fn,
- highbd_filter_fn_t yfilter_fn, highbd_calc_masked_var_t calc_var) {
- int i, j;
- __m128i v_src0_w, v_src1_w, v_src2_w, v_src3_w;
- __m128i v_filtered0_w, v_filtered1_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- const __m128i v_filterx_w =
- _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- const __m128i v_filtery_w =
- _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- for (j = 0; j < w; j += 8) {
- // Load the first row ready
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + j));
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + j + 1));
- v_filtered0_w = xfilter_fn(v_src0_w, v_src1_w, v_filterx_w);
- // Process 2 rows at a time
- for (i = 0; i < h; i += 2) {
- // Load the next row & apply the filter
- v_src2_w = _mm_loadu_si128((const __m128i *)(src + src_stride + j));
- v_src3_w = _mm_loadu_si128((const __m128i *)(src + src_stride + j + 1));
- v_filtered1_w = xfilter_fn(v_src2_w, v_src3_w, v_filterx_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + j));
- // Complete the calculation for this row and add it to the running total
- v_res_w = yfilter_fn(v_filtered0_w, v_filtered1_w, v_filtery_w);
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next row & apply the filter
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j));
- v_src1_w =
- _mm_loadu_si128((const __m128i *)(src + src_stride * 2 + j + 1));
- v_filtered0_w = xfilter_fn(v_src0_w, v_src1_w, v_filterx_w);
- // Load the dst and msk for the variance calculation
- v_dst_w = _mm_loadu_si128((const __m128i *)(dst + dst_stride + j));
- v_msk_b = _mm_loadl_epi64((const __m128i *)(msk + msk_stride + j));
- // Complete the calculation for this row and add it to the running total
- v_res_w = yfilter_fn(v_filtered1_w, v_filtered0_w, v_filtery_w);
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next block of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
+ } else {
+ uint16_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi32(hfilter[0] | (hfilter[1] << 16));
+ for (i = 0; i < h + 1; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&src[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&src[j + 8]);
+ const __m128i z = _mm_alignr_epi8(y, x, 2);
+ const __m128i res = highbd_filter_block(x, z, hfilter_vec);
+ _mm_storeu_si128((__m128i *)&b[j], res);
+ }
+
+ src += src_stride;
+ b += w;
}
- // Reset to the top of the block
- src -= src_stride * h;
- dst -= dst_stride * h;
- msk -= msk_stride * h;
}
- return calc_var(v_sum_d, v_sse_q, sse, w, h);
-}
-// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
-unsigned int aom_highbd_masked_subpel_var4xH_xzero(
- const uint16_t *src, int src_stride, int yoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int h, highbd_calc_masked_var_t calc_var) {
- int i;
- __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d, v_res_w;
- __m128i v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_w = _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first row of src data ready
- v_src0_w = _mm_loadl_epi64((const __m128i *)src);
- for (i = 0; i < h; i += 2) {
- if (yoffset == HALF_PIXEL_OFFSET) {
- // Load the rest of the source data for these rows
- v_src1_w = _mm_or_si128(
- _mm_slli_si128(v_src0_w, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 1)));
- v_src0_w = _mm_or_si128(
- _mm_slli_si128(v_src1_w, 8),
- _mm_loadl_epi64((const __m128i *)(src + src_stride * 2)));
- // Apply the y filter
- v_res_w = _mm_avg_epu16(v_src1_w, v_src0_w);
- } else {
- // Load the data and apply the y filter
- v_src1_w = _mm_loadl_epi64((const __m128i *)(src + src_stride * 1));
- highbd_apply_filter_lo(v_src0_w, v_src1_w, v_filter_w, &v_filtered0_d);
- v_src0_w = _mm_loadl_epi64((const __m128i *)(src + src_stride * 2));
- highbd_apply_filter_lo(v_src1_w, v_src0_w, v_filter_w, &v_filtered1_d);
- v_res_w = _mm_packs_epi32(v_filtered1_d, v_filtered0_d);
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ _mm_storeu_si128((__m128i *)&dst[j], _mm_avg_epu16(x, y));
+ }
+ dst += w;
+ }
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi32(vfilter[0] | (vfilter[1] << 16));
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const __m128i x = _mm_loadu_si128((__m128i *)&dst[j]);
+ const __m128i y = _mm_loadu_si128((__m128i *)&dst[j + w]);
+ const __m128i res = highbd_filter_block(x, y, vfilter_vec);
+ _mm_storeu_si128((__m128i *)&dst[j], res);
+ }
+
+ dst += w;
}
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
}
- return calc_var(v_sum_d, v_sse_q, sse, 4, h);
}
-unsigned int aom_highbd_masked_subpel_var4xH_yzero(
- const uint16_t *src, int src_stride, int xoffset, const uint16_t *dst,
- int dst_stride, const uint8_t *msk, int msk_stride, unsigned int *sse,
- int h, highbd_calc_masked_var_t calc_var) {
- int i;
- __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d;
- __m128i v_src0_shift_w, v_src1_shift_w, v_res_w, v_dst_w, v_msk_b;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filter_w = _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- for (i = 0; i < h; i += 2) {
- // Load the src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_res_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filter_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filter_w,
- &v_filtered1_d);
- v_res_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
- }
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 2;
- dst += dst_stride * 2;
- msk += msk_stride * 2;
- }
- return calc_var(v_sum_d, v_sse_q, sse, 4, h);
+static INLINE __m128i highbd_filter_block_2rows(const __m128i a0,
+ const __m128i b0,
+ const __m128i a1,
+ const __m128i b1,
+ const __m128i filter) {
+ __m128i v0 = _mm_unpacklo_epi16(a0, b0);
+ v0 = _mm_madd_epi16(v0, filter);
+ v0 = xx_roundn_epu32(v0, FILTER_BITS);
+
+ __m128i v1 = _mm_unpacklo_epi16(a1, b1);
+ v1 = _mm_madd_epi16(v1, filter);
+ v1 = xx_roundn_epu32(v1, FILTER_BITS);
+
+ return _mm_packs_epi32(v0, v1);
}
-unsigned int aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
- const uint16_t *src, int src_stride, int xoffset, int yoffset,
- const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
- unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
+static void highbd_bilinear_filter4xh(const uint16_t *src, int src_stride,
+ int xoffset, int yoffset, uint16_t *dst,
+ int h) {
int i;
- __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d, v_dst_w, v_msk_b;
- __m128i v_src0_shift_w, v_src1_shift_w;
- __m128i v_xres0_w, v_xres1_w, v_res_w, v_temp_w;
- __m128i v_sum_d = _mm_setzero_si128();
- __m128i v_sse_q = _mm_setzero_si128();
- __m128i v_filterx_w = _mm_set1_epi32((bilinear_filters_2t[xoffset][1] << 16) +
- bilinear_filters_2t[xoffset][0]);
- __m128i v_filtery_w = _mm_set1_epi32((bilinear_filters_2t[yoffset][1] << 16) +
- bilinear_filters_2t[yoffset][0]);
- assert(xoffset < BIL_SUBPEL_SHIFTS);
- assert(yoffset < BIL_SUBPEL_SHIFTS);
- // Load the first block of src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_xres0_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
- &v_filtered1_d);
- v_xres0_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
- }
- for (i = 0; i < h; i += 4) {
- // Load the next block of src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 2));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 3));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_xres1_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
- &v_filtered1_d);
- v_xres1_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+ // Horizontal filter
+ if (xoffset == 0) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)src);
+ _mm_storel_epi64((__m128i *)b, x);
+ src += src_stride;
+ b += 4;
}
- // Apply the y filter to the previous block
- v_temp_w = _mm_or_si128(_mm_srli_si128(v_xres0_w, 8),
- _mm_slli_si128(v_xres1_w, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_w = _mm_avg_epu16(v_xres0_w, v_temp_w);
- } else {
- v_res_w = highbd_apply_filter(v_xres0_w, v_temp_w, v_filtery_w);
+ } else if (xoffset == 4) {
+ uint16_t *b = dst;
+ for (i = 0; i < h + 1; ++i) {
+ __m128i x = _mm_loadu_si128((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 2);
+ _mm_storel_epi64((__m128i *)b, _mm_avg_epu16(x, z));
+ src += src_stride;
+ b += 4;
}
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
-
- // Load the next block of src data
- v_src0_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 4));
- v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
- v_src1_w = _mm_loadu_si128((const __m128i *)(src + src_stride * 5));
- v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
- // Apply the x filter
- if (xoffset == HALF_PIXEL_OFFSET) {
- v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
- v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
- v_xres0_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
- } else {
- highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
- &v_filtered0_d);
- highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
- &v_filtered1_d);
- v_xres0_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+ } else {
+ uint16_t *b = dst;
+ const uint8_t *hfilter = bilinear_filters_2t[xoffset];
+ const __m128i hfilter_vec = _mm_set1_epi32(hfilter[0] | (hfilter[1] << 16));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x0 = _mm_loadu_si128((__m128i *)src);
+ const __m128i z0 = _mm_srli_si128(x0, 2);
+ const __m128i x1 = _mm_loadu_si128((__m128i *)&src[src_stride]);
+ const __m128i z1 = _mm_srli_si128(x1, 2);
+ const __m128i res =
+ highbd_filter_block_2rows(x0, z0, x1, z1, hfilter_vec);
+ _mm_storeu_si128((__m128i *)b, res);
+
+ src += src_stride * 2;
+ b += 8;
}
- // Apply the y filter to the previous block
- v_temp_w = _mm_or_si128(_mm_srli_si128(v_xres1_w, 8),
- _mm_slli_si128(v_xres0_w, 8));
- if (yoffset == HALF_PIXEL_OFFSET) {
- v_res_w = _mm_avg_epu16(v_xres1_w, v_temp_w);
- } else {
- v_res_w = highbd_apply_filter(v_xres1_w, v_temp_w, v_filtery_w);
+ // Process i = h separately
+ __m128i x = _mm_loadu_si128((__m128i *)src);
+ __m128i z = _mm_srli_si128(x, 2);
+
+ __m128i v0 = _mm_unpacklo_epi16(x, z);
+ v0 = _mm_madd_epi16(v0, hfilter_vec);
+ v0 = xx_roundn_epu32(v0, FILTER_BITS);
+
+ _mm_storel_epi64((__m128i *)b, _mm_packs_epi32(v0, v0));
+ }
+
+ // Vertical filter
+ if (yoffset == 0) {
+ // The data is already in 'dst', so no need to filter
+ } else if (yoffset == 4) {
+ for (i = 0; i < h; ++i) {
+ __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ __m128i y = _mm_loadl_epi64((__m128i *)&dst[4]);
+ _mm_storel_epi64((__m128i *)dst, _mm_avg_epu16(x, y));
+ dst += 4;
}
- // Load the dst data
- v_dst_w = _mm_unpacklo_epi64(
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 3)));
- // Load the mask data
- v_msk_b = _mm_unpacklo_epi32(
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 2)),
- _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 3)));
- // Compute the sum and SSE
- highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
- // Move onto the next set of rows
- src += src_stride * 4;
- dst += dst_stride * 4;
- msk += msk_stride * 4;
- }
- return calc_var(v_sum_d, v_sse_q, sse, 4, h);
-}
+ } else {
+ const uint8_t *vfilter = bilinear_filters_2t[yoffset];
+ const __m128i vfilter_vec = _mm_set1_epi32(vfilter[0] | (vfilter[1] << 16));
+ for (i = 0; i < h; i += 2) {
+ const __m128i x = _mm_loadl_epi64((__m128i *)dst);
+ const __m128i y = _mm_loadl_epi64((__m128i *)&dst[4]);
+ const __m128i z = _mm_loadl_epi64((__m128i *)&dst[8]);
+ const __m128i res = highbd_filter_block_2rows(x, y, y, z, vfilter_vec);
+ _mm_storeu_si128((__m128i *)dst, res);
-// For W >=8
-#define HIGHBD_MASK_SUBPIX_VAR_LARGE(W, H) \
- unsigned int highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse, highbd_calc_masked_var_t calc_var, \
- highbd_variance_fn_t full_variance_function) { \
- uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
- assert(W % 8 == 0); \
- if (xoffset == 0) { \
- if (yoffset == 0) \
- return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
- msk_stride, sse); \
- else if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_xzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_xzero( \
- src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, \
- W, H, highbd_apply_filter, calc_var); \
- } else if (yoffset == 0) { \
- if (xoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_yzero( \
- src, src_stride, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_yzero( \
- src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, \
- W, H, highbd_apply_filter, calc_var); \
- } else if (xoffset == HALF_PIXEL_OFFSET) { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, HALF_PIXEL_OFFSET, dst, \
- dst_stride, msk, msk_stride, sse, W, H, highbd_apply_filter_avg, \
- highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, HALF_PIXEL_OFFSET, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter_avg, \
- highbd_apply_filter, calc_var); \
- } else { \
- if (yoffset == HALF_PIXEL_OFFSET) \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, HALF_PIXEL_OFFSET, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter, \
- highbd_apply_filter_avg, calc_var); \
- else \
- return aom_highbd_masked_subpel_varWxH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, \
- msk_stride, sse, W, H, highbd_apply_filter, highbd_apply_filter, \
- calc_var); \
- } \
+ dst += 8;
+ }
}
+}
-// For W < 8
-#define HIGHBD_MASK_SUBPIX_VAR_SMALL(W, H) \
- unsigned int highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse, highbd_calc_masked_var_t calc_var, \
- highbd_variance_fn_t full_variance_function) { \
- uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
- assert(W == 4); \
- if (xoffset == 0 && yoffset == 0) \
- return full_variance_function(src8, src_stride, dst8, dst_stride, msk, \
- msk_stride, sse); \
- else if (xoffset == 0) \
- return aom_highbd_masked_subpel_var4xH_xzero( \
- src, src_stride, yoffset, dst, dst_stride, msk, msk_stride, sse, H, \
- calc_var); \
- else if (yoffset == 0) \
- return aom_highbd_masked_subpel_var4xH_yzero( \
- src, src_stride, xoffset, dst, dst_stride, msk, msk_stride, sse, H, \
- calc_var); \
- else \
- return aom_highbd_masked_subpel_var4xH_xnonzero_ynonzero( \
- src, src_stride, xoffset, yoffset, dst, dst_stride, msk, msk_stride, \
- sse, H, calc_var); \
- }
+static void highbd_masked_variance(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr, int a_stride,
+ const uint16_t *b_ptr, int b_stride,
+ const uint8_t *m_ptr, int m_stride,
+ int width, int height, uint64_t *sse,
+ int *sum_) {
+ int x, y;
+ // Note on bit widths:
+ // The maximum value of 'sum' is (2^12 - 1) * 128 * 128 =~ 2^26,
+ // so this can be kept as four 32-bit values.
+ // But the maximum value of 'sum_sq' is (2^12 - 1)^2 * 128 * 128 =~ 2^38,
+ // so this must be stored as two 64-bit values.
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i zero = _mm_setzero_si128();
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += 8) {
+ const __m128i src = _mm_loadu_si128((const __m128i *)&src_ptr[x]);
+ const __m128i a = _mm_loadu_si128((const __m128i *)&a_ptr[x]);
+ const __m128i b = _mm_loadu_si128((const __m128i *)&b_ptr[x]);
+ const __m128i m =
+ _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)&m_ptr[x]), zero);
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ // Calculate 8 predicted pixels.
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i src_l = _mm_unpacklo_epi16(src, zero);
+ const __m128i src_r = _mm_unpackhi_epi16(src, zero);
+ __m128i diff_l = _mm_sub_epi32(pred_l, src_l);
+ __m128i diff_r = _mm_sub_epi32(pred_r, src_r);
+
+ // Update partial sums and partial sums of squares
+ sum = _mm_add_epi32(sum, _mm_add_epi32(diff_l, diff_r));
+ // A trick: Now each entry of diff_l and diff_r is stored in a 32-bit
+ // field, but the range of values is only [-(2^12 - 1), 2^12 - 1].
+ // So we can re-pack into 16-bit fields and use _mm_madd_epi16
+ // to calculate the squares and partially sum them.
+ const __m128i tmp = _mm_packs_epi32(diff_l, diff_r);
+ const __m128i prod = _mm_madd_epi16(tmp, tmp);
+ // Then we want to sign-extend to 64 bits and accumulate
+ const __m128i sign = _mm_srai_epi32(prod, 31);
+ const __m128i tmp_0 = _mm_unpacklo_epi32(prod, sign);
+ const __m128i tmp_1 = _mm_unpackhi_epi32(prod, sign);
+ sum_sq = _mm_add_epi64(sum_sq, _mm_add_epi64(tmp_0, tmp_1));
+ }
-#define HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(W, H) \
- unsigned int aom_highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
- sse, calc_masked_variance, \
- aom_highbd_masked_variance##W##x##H##_ssse3); \
- } \
- unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
- sse, highbd_10_calc_masked_variance, \
- aom_highbd_10_masked_variance##W##x##H##_ssse3); \
- } \
- unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
- const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
- const uint8_t *dst8, int dst_stride, const uint8_t *msk, int msk_stride, \
- unsigned int *sse) { \
- return highbd_masked_sub_pixel_variance##W##x##H##_ssse3( \
- src8, src_stride, xoffset, yoffset, dst8, dst_stride, msk, msk_stride, \
- sse, highbd_12_calc_masked_variance, \
- aom_highbd_12_masked_variance##W##x##H##_ssse3); \
- }
+ src_ptr += src_stride;
+ a_ptr += a_stride;
+ b_ptr += b_stride;
+ m_ptr += m_stride;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, zero);
+ sum = _mm_hadd_epi32(sum, zero);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ sum_sq = _mm_add_epi64(sum_sq, _mm_srli_si128(sum_sq, 8));
+ _mm_storel_epi64((__m128i *)sse, sum_sq);
+}
+
+static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
+ const uint16_t *a_ptr,
+ const uint16_t *b_ptr,
+ const uint8_t *m_ptr, int m_stride,
+ int height, int *sse, int *sum_) {
+ int y;
+ // Note: For this function, h <= 8 (or maybe 16 if we add 4:1 partitions).
+ // So the maximum value of sum is (2^12 - 1) * 4 * 16 =~ 2^18
+ // and the maximum value of sum_sq is (2^12 - 1)^2 * 4 * 16 =~ 2^30.
+ // So we can safely pack sum_sq into 32-bit fields, which is slightly more
+ // convenient.
+ __m128i sum = _mm_setzero_si128(), sum_sq = _mm_setzero_si128();
+ const __m128i mask_max = _mm_set1_epi16((1 << AOM_BLEND_A64_ROUND_BITS));
+ const __m128i round_const =
+ _mm_set1_epi32((1 << AOM_BLEND_A64_ROUND_BITS) >> 1);
+ const __m128i zero = _mm_setzero_si128();
+
+ for (y = 0; y < height; y += 2) {
+ __m128i src = _mm_unpacklo_epi64(
+ _mm_loadl_epi64((const __m128i *)src_ptr),
+ _mm_loadl_epi64((const __m128i *)&src_ptr[src_stride]));
+ const __m128i a = _mm_loadu_si128((const __m128i *)a_ptr);
+ const __m128i b = _mm_loadu_si128((const __m128i *)b_ptr);
+ const __m128i m = _mm_unpacklo_epi8(
+ _mm_unpacklo_epi32(
+ _mm_cvtsi32_si128(*(const uint32_t *)m_ptr),
+ _mm_cvtsi32_si128(*(const uint32_t *)&m_ptr[m_stride])),
+ zero);
+ const __m128i m_inv = _mm_sub_epi16(mask_max, m);
+
+ const __m128i data_l = _mm_unpacklo_epi16(a, b);
+ const __m128i mask_l = _mm_unpacklo_epi16(m, m_inv);
+ __m128i pred_l = _mm_madd_epi16(data_l, mask_l);
+ pred_l = _mm_srai_epi32(_mm_add_epi32(pred_l, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i data_r = _mm_unpackhi_epi16(a, b);
+ const __m128i mask_r = _mm_unpackhi_epi16(m, m_inv);
+ __m128i pred_r = _mm_madd_epi16(data_r, mask_r);
+ pred_r = _mm_srai_epi32(_mm_add_epi32(pred_r, round_const),
+ AOM_BLEND_A64_ROUND_BITS);
+
+ const __m128i src_l = _mm_unpacklo_epi16(src, zero);
+ const __m128i src_r = _mm_unpackhi_epi16(src, zero);
+ __m128i diff_l = _mm_sub_epi32(pred_l, src_l);
+ __m128i diff_r = _mm_sub_epi32(pred_r, src_r);
+
+ // Update partial sums and partial sums of squares
+ sum = _mm_add_epi32(sum, _mm_add_epi32(diff_l, diff_r));
+ const __m128i tmp = _mm_packs_epi32(diff_l, diff_r);
+ const __m128i prod = _mm_madd_epi16(tmp, tmp);
+ sum_sq = _mm_add_epi32(sum_sq, prod);
+
+ src_ptr += src_stride * 2;
+ a_ptr += 8;
+ b_ptr += 8;
+ m_ptr += m_stride * 2;
+ }
+ // Reduce down to a single sum and sum of squares
+ sum = _mm_hadd_epi32(sum, sum_sq);
+ sum = _mm_hadd_epi32(sum, zero);
+ *sum_ = _mm_cvtsi128_si32(sum);
+ *sse = _mm_cvtsi128_si32(_mm_srli_si128(sum, 4));
+}
-HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 4)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(4, 4)
-HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 8)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(4, 8)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 4)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 4)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 8)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 8)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 16)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 16)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 8)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 8)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 16)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 16)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 32)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 32)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 16)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 16)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 32)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 32)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 64)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 64)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 32)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 32)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 64)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 64)
-#if CONFIG_EXT_PARTITION
-HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 128)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 128)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(128, 64)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(128, 64)
-HIGHBD_MASK_SUBPIX_VAR_LARGE(128, 128)
-HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(128, 128)
-#endif // CONFIG_EXT_PARTITION
#endif
diff --git a/third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h b/third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h
new file mode 100644
index 000000000..73589a32a
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/obmc_intrinsic_ssse3.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_DSP_X86_OBMC_INTRINSIC_SSSE3_H_
+#define AOM_DSP_X86_OBMC_INTRINSIC_SSSE3_H_
+
+#include <immintrin.h>
+
+#include "./aom_config.h"
+
+static INLINE int32_t xx_hsum_epi32_si32(__m128i v_d) {
+ v_d = _mm_hadd_epi32(v_d, v_d);
+ v_d = _mm_hadd_epi32(v_d, v_d);
+ return _mm_cvtsi128_si32(v_d);
+}
+
+static INLINE int64_t xx_hsum_epi64_si64(__m128i v_q) {
+ v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
+#if ARCH_X86_64
+ return _mm_cvtsi128_si64(v_q);
+#else
+ {
+ int64_t tmp;
+ _mm_storel_epi64((__m128i *)&tmp, v_q);
+ return tmp;
+ }
+#endif
+}
+
+static INLINE int64_t xx_hsum_epi32_si64(__m128i v_d) {
+ const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
+ const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
+ const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
+ return xx_hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
+}
+
+#endif // AOM_DSP_X86_OBMC_INTRINSIC_SSSE3_H_
diff --git a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
index ad77f974c..21632644f 100644
--- a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
@@ -17,6 +17,7 @@
#include "aom/aom_integer.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/x86/obmc_intrinsic_ssse3.h"
#include "aom_dsp/x86/synonyms.h"
////////////////////////////////////////////////////////////////////////////////
diff --git a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
index efb3659cf..1797ded80 100644
--- a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
@@ -17,8 +17,9 @@
#include "aom/aom_integer.h"
#include "aom_dsp/aom_dsp_common.h"
-#include "aom_dsp/x86/synonyms.h"
#include "aom_dsp/aom_filter.h"
+#include "aom_dsp/x86/obmc_intrinsic_ssse3.h"
+#include "aom_dsp/x86/synonyms.h"
////////////////////////////////////////////////////////////////////////////////
// 8 bit
diff --git a/third_party/aom/aom_dsp/x86/synonyms.h b/third_party/aom/aom_dsp/x86/synonyms.h
index bef606dae..cd049a454 100644
--- a/third_party/aom/aom_dsp/x86/synonyms.h
+++ b/third_party/aom/aom_dsp/x86/synonyms.h
@@ -89,32 +89,4 @@ static INLINE __m128i xx_roundn_epi32(__m128i v_val_d, int bits) {
return _mm_srai_epi32(v_tmp_d, bits);
}
-#ifdef __SSSE3__
-static INLINE int32_t xx_hsum_epi32_si32(__m128i v_d) {
- v_d = _mm_hadd_epi32(v_d, v_d);
- v_d = _mm_hadd_epi32(v_d, v_d);
- return _mm_cvtsi128_si32(v_d);
-}
-
-static INLINE int64_t xx_hsum_epi64_si64(__m128i v_q) {
- v_q = _mm_add_epi64(v_q, _mm_srli_si128(v_q, 8));
-#if ARCH_X86_64
- return _mm_cvtsi128_si64(v_q);
-#else
- {
- int64_t tmp;
- _mm_storel_epi64((__m128i *)&tmp, v_q);
- return tmp;
- }
-#endif
-}
-
-static INLINE int64_t xx_hsum_epi32_si64(__m128i v_d) {
- const __m128i v_sign_d = _mm_cmplt_epi32(v_d, _mm_setzero_si128());
- const __m128i v_0_q = _mm_unpacklo_epi32(v_d, v_sign_d);
- const __m128i v_1_q = _mm_unpackhi_epi32(v_d, v_sign_d);
- return xx_hsum_epi64_si64(_mm_add_epi64(v_0_q, v_1_q));
-}
-#endif // __SSSE3__
-
#endif // AOM_DSP_X86_SYNONYMS_H_
diff --git a/third_party/aom/aom_dsp/x86/txfm_common_avx2.h b/third_party/aom/aom_dsp/x86/txfm_common_avx2.h
index 39e9b8e2a..4f7a60c22 100644
--- a/third_party/aom/aom_dsp/x86/txfm_common_avx2.h
+++ b/third_party/aom/aom_dsp/x86/txfm_common_avx2.h
@@ -34,7 +34,8 @@ static INLINE void mm256_reverse_epi16(__m256i *u) {
*u = _mm256_permute2x128_si256(v, v, 1);
}
-static INLINE void mm256_transpose_16x16(__m256i *in) {
+// Note: in and out could have the same value
+static INLINE void mm256_transpose_16x16(const __m256i *in, __m256i *out) {
__m256i tr0_0 = _mm256_unpacklo_epi16(in[0], in[1]);
__m256i tr0_1 = _mm256_unpackhi_epi16(in[0], in[1]);
__m256i tr0_2 = _mm256_unpacklo_epi16(in[2], in[3]);
@@ -143,29 +144,30 @@ static INLINE void mm256_transpose_16x16(__m256i *in) {
// 86 96 a6 b6 c6 d6 e6 f6 8e ae 9e be ce de ee fe
// 87 97 a7 b7 c7 d7 e7 f7 8f 9f af bf cf df ef ff
- in[0] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x20); // 0010 0000
- in[8] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x31); // 0011 0001
- in[1] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x20);
- in[9] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x31);
- in[2] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x20);
- in[10] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x31);
- in[3] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x20);
- in[11] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x31);
-
- in[4] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x20);
- in[12] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x31);
- in[5] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x20);
- in[13] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x31);
- in[6] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x20);
- in[14] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x31);
- in[7] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x20);
- in[15] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x31);
+ out[0] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x20); // 0010 0000
+ out[8] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x31); // 0011 0001
+ out[1] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x20);
+ out[9] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x31);
+ out[2] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x20);
+ out[10] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x31);
+ out[3] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x20);
+ out[11] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x31);
+
+ out[4] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x20);
+ out[12] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x31);
+ out[5] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x20);
+ out[13] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x31);
+ out[6] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x20);
+ out[14] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x31);
+ out[7] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x20);
+ out[15] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x31);
}
-static INLINE __m256i butter_fly(__m256i a0, __m256i a1, const __m256i cospi) {
+static INLINE __m256i butter_fly(const __m256i *a0, const __m256i *a1,
+ const __m256i *cospi) {
const __m256i dct_rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
- __m256i y0 = _mm256_madd_epi16(a0, cospi);
- __m256i y1 = _mm256_madd_epi16(a1, cospi);
+ __m256i y0 = _mm256_madd_epi16(*a0, *cospi);
+ __m256i y1 = _mm256_madd_epi16(*a1, *cospi);
y0 = _mm256_add_epi32(y0, dct_rounding);
y1 = _mm256_add_epi32(y1, dct_rounding);