summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/x86
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2018-10-18 06:04:57 -0500
committertrav90 <travawine@palemoon.org>2018-10-18 06:04:57 -0500
commit7369c7d7a5eed32963d8af37658286617919f91c (patch)
tree5397ce7ee9bca1641118fdc3187bd9e2b24fdc9c /third_party/aom/aom_dsp/x86
parent77887af9c4ad1420bbdb33984af4f74b55ca59db (diff)
downloadUXP-7369c7d7a5eed32963d8af37658286617919f91c.tar
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.gz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.lz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.xz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.zip
Update aom to commit id f5bdeac22930ff4c6b219be49c843db35970b918
Diffstat (limited to 'third_party/aom/aom_dsp/x86')
-rw-r--r--third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c141
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c171
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c2
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm6
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm13
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm2
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_variance_sse2.c270
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_sse2.c234
-rw-r--r--third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c12
-rw-r--r--third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c28
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_sad_sse4.c12
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_variance_sse4.c12
-rw-r--r--third_party/aom/aom_dsp/x86/sad4d_sse2.asm6
-rw-r--r--third_party/aom/aom_dsp/x86/sad_highbd_avx2.c5
-rw-r--r--third_party/aom/aom_dsp/x86/sad_sse2.asm16
-rw-r--r--third_party/aom/aom_dsp/x86/variance_sse2.c469
16 files changed, 642 insertions, 757 deletions
diff --git a/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c b/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c
index a337e618d..657dcfa22 100644
--- a/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c
+++ b/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c
@@ -85,147 +85,6 @@ void aom_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
}
-void aom_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
- int stride) {
- __m128i in0, in1, in2, in3;
- __m128i u0, u1;
- __m128i sum = _mm_setzero_si128();
- int i;
-
- for (i = 0; i < 2; ++i) {
- in0 = _mm_load_si128((const __m128i *)(input + 0 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 0 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 1 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 1 * stride + 8));
-
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 2 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 2 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 3 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 3 * stride + 8));
-
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 4 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 4 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 5 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 5 * stride + 8));
-
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 6 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 6 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 7 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 7 * stride + 8));
-
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- sum = _mm_add_epi16(sum, u1);
- input += 8 * stride;
- }
-
- u0 = _mm_setzero_si128();
- in0 = _mm_unpacklo_epi16(u0, sum);
- in1 = _mm_unpackhi_epi16(u0, sum);
- in0 = _mm_srai_epi32(in0, 16);
- in1 = _mm_srai_epi32(in1, 16);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_unpacklo_epi32(sum, u0);
- in1 = _mm_unpackhi_epi32(sum, u0);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_srli_si128(sum, 8);
-
- in1 = _mm_add_epi32(sum, in0);
- in1 = _mm_srai_epi32(in1, 1);
- output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
-}
-
-void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
- int stride) {
- __m128i in0, in1, in2, in3;
- __m128i u0, u1;
- __m128i sum = _mm_setzero_si128();
- int i;
-
- for (i = 0; i < 8; ++i) {
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- sum = _mm_add_epi16(sum, u1);
- }
-
- u0 = _mm_setzero_si128();
- in0 = _mm_unpacklo_epi16(u0, sum);
- in1 = _mm_unpackhi_epi16(u0, sum);
- in0 = _mm_srai_epi32(in0, 16);
- in1 = _mm_srai_epi32(in1, 16);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_unpacklo_epi32(sum, u0);
- in1 = _mm_unpackhi_epi32(sum, u0);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_srli_si128(sum, 8);
-
- in1 = _mm_add_epi32(sum, in0);
- in1 = _mm_srai_epi32(in1, 3);
- output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
-}
-
#define DCT_HIGH_BIT_DEPTH 0
#define FDCT4x4_2D aom_fdct4x4_sse2
#define FDCT8x8_2D aom_fdct8x8_sse2
diff --git a/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c
new file mode 100644
index 000000000..2bbf15ef2
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+
+static INLINE void init_one_qp(const __m128i *p, __m256i *qp) {
+ const __m128i sign = _mm_srai_epi16(*p, 15);
+ const __m128i dc = _mm_unpacklo_epi16(*p, sign);
+ const __m128i ac = _mm_unpackhi_epi16(*p, sign);
+ *qp = _mm256_insertf128_si256(_mm256_castsi128_si256(dc), ac, 1);
+}
+
+static INLINE void update_qp(__m256i *qp) {
+ int i;
+ for (i = 0; i < 5; ++i) {
+ qp[i] = _mm256_permute2x128_si256(qp[i], qp[i], 0x11);
+ }
+}
+
+static INLINE void init_qp(const int16_t *zbin_ptr, const int16_t *round_ptr,
+ const int16_t *quant_ptr, const int16_t *dequant_ptr,
+ const int16_t *quant_shift_ptr, __m256i *qp) {
+ const __m128i zbin = _mm_loadu_si128((const __m128i *)zbin_ptr);
+ const __m128i round = _mm_loadu_si128((const __m128i *)round_ptr);
+ const __m128i quant = _mm_loadu_si128((const __m128i *)quant_ptr);
+ const __m128i dequant = _mm_loadu_si128((const __m128i *)dequant_ptr);
+ const __m128i quant_shift = _mm_loadu_si128((const __m128i *)quant_shift_ptr);
+ init_one_qp(&zbin, &qp[0]);
+ init_one_qp(&round, &qp[1]);
+ init_one_qp(&quant, &qp[2]);
+ init_one_qp(&dequant, &qp[3]);
+ init_one_qp(&quant_shift, &qp[4]);
+}
+
+// Note:
+// *x is vector multiplied by *y which is 16 int32_t parallel multiplication
+// and right shift 16. The output, 16 int32_t is save in *p.
+static INLINE void mm256_mul_shift_epi32(const __m256i *x, const __m256i *y,
+ __m256i *p) {
+ __m256i prod_lo = _mm256_mul_epi32(*x, *y);
+ __m256i prod_hi = _mm256_srli_epi64(*x, 32);
+ const __m256i mult_hi = _mm256_srli_epi64(*y, 32);
+ prod_hi = _mm256_mul_epi32(prod_hi, mult_hi);
+
+ prod_lo = _mm256_srli_epi64(prod_lo, 16);
+ const __m256i mask = _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
+ prod_lo = _mm256_and_si256(prod_lo, mask);
+ prod_hi = _mm256_srli_epi64(prod_hi, 16);
+
+ prod_hi = _mm256_slli_epi64(prod_hi, 32);
+ *p = _mm256_or_si256(prod_lo, prod_hi);
+}
+
+static INLINE void quantize(const __m256i *qp, __m256i *c,
+ const int16_t *iscan_ptr, tran_low_t *qcoeff,
+ tran_low_t *dqcoeff, __m256i *eob) {
+ const __m256i abs = _mm256_abs_epi32(*c);
+ const __m256i flag1 = _mm256_cmpgt_epi32(abs, qp[0]);
+ __m256i flag2 = _mm256_cmpeq_epi32(abs, qp[0]);
+ flag2 = _mm256_or_si256(flag1, flag2);
+ const int32_t nzflag = _mm256_movemask_epi8(flag2);
+
+ if (LIKELY(nzflag)) {
+ __m256i q = _mm256_add_epi32(abs, qp[1]);
+ __m256i tmp;
+ mm256_mul_shift_epi32(&q, &qp[2], &tmp);
+ q = _mm256_add_epi32(tmp, q);
+
+ mm256_mul_shift_epi32(&q, &qp[4], &q);
+ __m256i dq = _mm256_mullo_epi32(q, qp[3]);
+
+ q = _mm256_sign_epi32(q, *c);
+ dq = _mm256_sign_epi32(dq, *c);
+ q = _mm256_and_si256(q, flag2);
+ dq = _mm256_and_si256(dq, flag2);
+
+ _mm256_storeu_si256((__m256i *)qcoeff, q);
+ _mm256_storeu_si256((__m256i *)dqcoeff, dq);
+
+ const __m128i isc = _mm_loadu_si128((const __m128i *)iscan_ptr);
+ const __m128i zr = _mm_setzero_si128();
+ const __m128i lo = _mm_unpacklo_epi16(isc, zr);
+ const __m128i hi = _mm_unpackhi_epi16(isc, zr);
+ const __m256i iscan =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
+
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i zc = _mm256_cmpeq_epi32(dq, zero);
+ const __m256i nz = _mm256_cmpeq_epi32(zc, zero);
+ __m256i cur_eob = _mm256_sub_epi32(iscan, nz);
+ cur_eob = _mm256_and_si256(cur_eob, nz);
+ *eob = _mm256_max_epi32(cur_eob, *eob);
+ } else {
+ const __m256i zero = _mm256_setzero_si256();
+ _mm256_storeu_si256((__m256i *)qcoeff, zero);
+ _mm256_storeu_si256((__m256i *)dqcoeff, zero);
+ }
+}
+
+void aom_highbd_quantize_b_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ (void)scan;
+ const unsigned int step = 8;
+
+ if (LIKELY(!skip_block)) {
+ __m256i qp[5], coeff;
+ init_qp(zbin_ptr, round_ptr, quant_ptr, dequant_ptr, quant_shift_ptr, qp);
+ coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+
+ __m256i eob = _mm256_setzero_si256();
+ quantize(qp, &coeff, iscan, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan += step;
+ n_coeffs -= step;
+
+ update_qp(qp);
+
+ while (n_coeffs > 0) {
+ coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+ quantize(qp, &coeff, iscan, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan += step;
+ n_coeffs -= step;
+ }
+ {
+ __m256i eob_s;
+ eob_s = _mm256_shuffle_epi32(eob, 0xe);
+ eob = _mm256_max_epi16(eob, eob_s);
+ eob_s = _mm256_shufflelo_epi16(eob, 0xe);
+ eob = _mm256_max_epi16(eob, eob_s);
+ eob_s = _mm256_shufflelo_epi16(eob, 1);
+ eob = _mm256_max_epi16(eob, eob_s);
+ const __m128i final_eob = _mm_max_epi16(_mm256_castsi256_si128(eob),
+ _mm256_extractf128_si256(eob, 1));
+ *eob_ptr = _mm_extract_epi16(final_eob, 0);
+ }
+ } else {
+ do {
+ const __m256i zero = _mm256_setzero_si256();
+ _mm256_storeu_si256((__m256i *)qcoeff_ptr, zero);
+ _mm256_storeu_si256((__m256i *)dqcoeff_ptr, zero);
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ n_coeffs -= step;
+ } while (n_coeffs > 0);
+ *eob_ptr = 0;
+ }
+}
diff --git a/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c
index 3ee24ab16..5570ca5b7 100644
--- a/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -15,7 +15,6 @@
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
-#if CONFIG_HIGHBITDEPTH
void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
@@ -152,4 +151,3 @@ void aom_highbd_quantize_b_32x32_sse2(
}
*eob_ptr = eob + 1;
}
-#endif
diff --git a/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm b/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm
index 0c7cb3998..9c3bbdd69 100644
--- a/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm
@@ -288,3 +288,9 @@ HIGH_SADNXN4D 8, 8
HIGH_SADNXN4D 8, 4
HIGH_SADNXN4D 4, 8
HIGH_SADNXN4D 4, 4
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SADNXN4D 4, 16
+HIGH_SADNXN4D 16, 4
+HIGH_SADNXN4D 8, 32
+HIGH_SADNXN4D 32, 8
+%endif
diff --git a/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm b/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm
index 8427b891c..248b98ef5 100644
--- a/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm
@@ -227,6 +227,10 @@ HIGH_SAD32XN 16 ; highbd_sad32x16_sse2
HIGH_SAD32XN 64, 1 ; highbd_sad32x64_avg_sse2
HIGH_SAD32XN 32, 1 ; highbd_sad32x32_avg_sse2
HIGH_SAD32XN 16, 1 ; highbd_sad32x16_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SAD32XN 8 ; highbd_sad_32x8_sse2
+HIGH_SAD32XN 8, 1 ; highbd_sad_32x8_avg_sse2
+%endif
; unsigned int aom_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -295,7 +299,10 @@ HIGH_SAD16XN 8 ; highbd_sad16x8_sse2
HIGH_SAD16XN 32, 1 ; highbd_sad16x32_avg_sse2
HIGH_SAD16XN 16, 1 ; highbd_sad16x16_avg_sse2
HIGH_SAD16XN 8, 1 ; highbd_sad16x8_avg_sse2
-
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SAD16XN 4 ; highbd_sad_16x4_sse2
+HIGH_SAD16XN 4, 1 ; highbd_sad_16x4_avg_sse2
+%endif
; unsigned int aom_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -364,3 +371,7 @@ HIGH_SAD8XN 4 ; highbd_sad8x4_sse2
HIGH_SAD8XN 16, 1 ; highbd_sad8x16_avg_sse2
HIGH_SAD8XN 8, 1 ; highbd_sad8x8_avg_sse2
HIGH_SAD8XN 4, 1 ; highbd_sad8x4_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SAD8XN 32 ; highbd_sad_8x32_sse2
+HIGH_SAD8XN 32, 1 ; highbd_sad_8x32_avg_sse2
+%endif
diff --git a/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm b/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
index 797e9c1d4..ee19796e3 100644
--- a/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
@@ -75,7 +75,7 @@ SECTION .text
paddd m6, m4
mov r1, ssem ; r1 = unsigned int *sse
movd [r1], m7 ; store sse
- movd rax, m6 ; store sum as return value
+ movd eax, m6 ; store sum as return value
%endif
RET
%endmacro
diff --git a/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c b/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
index 29f96ce24..93923ffb0 100644
--- a/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
@@ -9,6 +9,7 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include <assert.h>
#include <emmintrin.h> // SSE2
#include "./aom_config.h"
@@ -16,6 +17,9 @@
#include "aom_ports/mem.h"
+#include "./av1_rtcd.h"
+#include "av1/common/filter.h"
+
typedef uint32_t (*high_variance_fn_t)(const uint16_t *src, int src_stride,
const uint16_t *ref, int ref_stride,
uint32_t *sse, int *sum);
@@ -181,6 +185,11 @@ VAR_FN(16, 16, 16, 8);
VAR_FN(16, 8, 8, 7);
VAR_FN(8, 16, 8, 7);
VAR_FN(8, 8, 8, 6);
+#if CONFIG_EXT_PARTITION_TYPES
+VAR_FN(16, 4, 16, 6);
+VAR_FN(8, 32, 8, 8);
+VAR_FN(32, 8, 16, 8);
+#endif
#undef VAR_FN
@@ -387,6 +396,7 @@ DECLS(sse2);
return (var >= 0) ? (uint32_t)var : 0; \
}
+#if CONFIG_EXT_PARTITION_TYPES
#define FNS(opt) \
FN(64, 64, 16, 6, 6, opt, (int64_t)); \
FN(64, 32, 16, 6, 5, opt, (int64_t)); \
@@ -398,7 +408,24 @@ DECLS(sse2);
FN(16, 8, 16, 4, 3, opt, (int64_t)); \
FN(8, 16, 8, 3, 4, opt, (int64_t)); \
FN(8, 8, 8, 3, 3, opt, (int64_t)); \
- FN(8, 4, 8, 3, 2, opt, (int64_t));
+ FN(8, 4, 8, 3, 2, opt, (int64_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int64_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int64_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int64_t))
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int64_t))
+#endif
FNS(sse2);
@@ -412,9 +439,9 @@ FNS(sse2);
const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec, \
ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
void *unused);
-#define DECLS(opt1) \
- DECL(16, opt1) \
- DECL(8, opt1)
+#define DECLS(opt) \
+ DECL(16, opt) \
+ DECL(8, opt)
DECLS(sse2);
#undef DECL
@@ -546,18 +573,36 @@ DECLS(sse2);
return (var >= 0) ? (uint32_t)var : 0; \
}
-#define FNS(opt1) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (int64_t)); \
- FN(8, 16, 8, 4, 3, opt1, (int64_t)); \
- FN(8, 8, 8, 3, 3, opt1, (int64_t)); \
- FN(8, 4, 8, 3, 2, opt1, (int64_t));
+#if CONFIG_EXT_PARTITION_TYPES
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int64_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int64_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int64_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int64_t));
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int64_t));
+#endif
FNS(sse2);
@@ -565,131 +610,94 @@ FNS(sse2);
#undef FN
void aom_highbd_upsampled_pred_sse2(uint16_t *comp_pred, int width, int height,
- const uint8_t *ref8, int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-
- if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i s4 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 32));
- __m128i s5 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 40));
- __m128i s6 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 48));
- __m128i s7 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 56));
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t2 = _mm_unpacklo_epi16(s4, s5);
- t3 = _mm_unpacklo_epi16(s6, s7);
- t0 = _mm_unpacklo_epi32(t0, t1);
- t2 = _mm_unpacklo_epi32(t2, t3);
- t0 = _mm_unpacklo_epi64(t0, t2);
-
- _mm_storeu_si128((__m128i *)(comp_pred), t0);
+ int subpel_x_q3, int subpel_y_q3,
+ const uint8_t *ref8, int ref_stride,
+ int bd) {
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ if (width >= 8) {
+ int i;
+ assert(!(width & 7));
+ /*Read 8 pixels one row at a time.*/
+ for (i = 0; i < height; i++) {
+ int j;
+ for (j = 0; j < width; j += 8) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
+ _mm_storeu_si128((__m128i *)comp_pred, s0);
+ comp_pred += 8;
+ ref += 8;
+ }
+ ref += ref_stride - width;
+ }
+ } else {
+ int i;
+ assert(!(width & 3));
+ /*Read 4 pixels two rows at a time.*/
+ for (i = 0; i < height; i += 2) {
+ __m128i s0 = _mm_loadl_epi64((const __m128i *)ref);
+ __m128i s1 = _mm_loadl_epi64((const __m128i *)(ref + ref_stride));
+ __m128i t0 = _mm_unpacklo_epi64(s0, s1);
+ _mm_storeu_si128((__m128i *)comp_pred, t0);
comp_pred += 8;
- ref += 64; // 8 * 8;
+ ref += 2 * ref_stride;
}
- ref += stride - (width << 3);
}
} else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t0 = _mm_unpacklo_epi32(t0, t1);
-
- _mm_storel_epi64((__m128i *)(comp_pred), t0);
- comp_pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
+ InterpFilterParams filter;
+ filter = av1_get_interp_filter_params(EIGHTTAP_REGULAR);
+ if (!subpel_y_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ aom_highbd_convolve8_horiz(ref8, ref_stride,
+ CONVERT_TO_BYTEPTR(comp_pred), width, kernel,
+ 16, NULL, -1, width, height, bd);
+ } else if (!subpel_x_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ aom_highbd_convolve8_vert(ref8, ref_stride, CONVERT_TO_BYTEPTR(comp_pred),
+ width, NULL, -1, kernel, 16, width, height, bd);
+ } else {
+ DECLARE_ALIGNED(16, uint16_t,
+ temp[((MAX_SB_SIZE + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *kernel_x;
+ const int16_t *kernel_y;
+ int intermediate_height;
+ kernel_x = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ kernel_y = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter.taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ aom_highbd_convolve8_horiz(ref8 - ref_stride * ((filter.taps >> 1) - 1),
+ ref_stride, CONVERT_TO_BYTEPTR(temp),
+ MAX_SB_SIZE, kernel_x, 16, NULL, -1, width,
+ intermediate_height, bd);
+ aom_highbd_convolve8_vert(
+ CONVERT_TO_BYTEPTR(temp + MAX_SB_SIZE * ((filter.taps >> 1) - 1)),
+ MAX_SB_SIZE, CONVERT_TO_BYTEPTR(comp_pred), width, NULL, -1, kernel_y,
+ 16, width, height, bd);
}
}
}
void aom_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
const uint8_t *pred8, int width,
- int height, const uint8_t *ref8,
- int ref_stride) {
- const __m128i one = _mm_set1_epi16(1);
- int i, j;
- int stride = ref_stride << 3;
+ int height, int subpel_x_q3,
+ int subpel_y_q3,
+ const uint8_t *ref8,
+ int ref_stride, int bd) {
uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-
- if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i s4 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 32));
- __m128i s5 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 40));
- __m128i s6 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 48));
- __m128i s7 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 56));
- __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t2 = _mm_unpacklo_epi16(s4, s5);
- t3 = _mm_unpacklo_epi16(s6, s7);
- t0 = _mm_unpacklo_epi32(t0, t1);
- t2 = _mm_unpacklo_epi32(t2, t3);
- t0 = _mm_unpacklo_epi64(t0, t2);
-
- p0 = _mm_adds_epu16(t0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
-
- _mm_storeu_si128((__m128i *)(comp_pred), p0);
- comp_pred += 8;
- pred += 8;
- ref += 8 * 8;
- }
- ref += stride - (width << 3);
- }
- } else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i p0 = _mm_loadl_epi64((const __m128i *)pred);
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t0 = _mm_unpacklo_epi32(t0, t1);
-
- p0 = _mm_adds_epu16(t0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
-
- _mm_storel_epi64((__m128i *)(comp_pred), p0);
- comp_pred += 4;
- pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
- }
+ int n;
+ int i;
+ aom_highbd_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3,
+ ref8, ref_stride, bd);
+ /*The total number of pixels must be a multiple of 8 (e.g., 4x4).*/
+ assert(!(width * height & 7));
+ n = width * height >> 3;
+ for (i = 0; i < n; i++) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)comp_pred);
+ __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
+ _mm_storeu_si128((__m128i *)comp_pred, _mm_avg_epu16(s0, p0));
+ comp_pred += 8;
+ pred += 8;
}
}
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
index be200df4c..86ce928b7 100644
--- a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
@@ -3498,237 +3498,3 @@ void idct32_8col(__m128i *in0, __m128i *in1) {
in1[14] = _mm_sub_epi16(stp1_1, stp1_30);
in1[15] = _mm_sub_epi16(stp1_0, stp1_31);
}
-
-#if CONFIG_HIGHBITDEPTH
-static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
- __m128i ubounded, retval;
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i one = _mm_set1_epi16(1);
- const __m128i max = _mm_subs_epi16(_mm_slli_epi16(one, bd), one);
- ubounded = _mm_cmpgt_epi16(value, max);
- retval = _mm_andnot_si128(ubounded, value);
- ubounded = _mm_and_si128(ubounded, max);
- retval = _mm_or_si128(retval, ubounded);
- retval = _mm_and_si128(retval, _mm_cmpgt_epi16(retval, zero));
- return retval;
-}
-
-void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
- tran_low_t out[4 * 4];
- tran_low_t *outptr = out;
- int i, j;
- __m128i inptr[4];
- __m128i sign_bits[2];
- __m128i temp_mm, min_input, max_input;
- int test;
- uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- int optimised_cols = 0;
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i eight = _mm_set1_epi16(8);
- const __m128i max = _mm_set1_epi16(12043);
- const __m128i min = _mm_set1_epi16(-12043);
- // Load input into __m128i
- inptr[0] = _mm_loadu_si128((const __m128i *)input);
- inptr[1] = _mm_loadu_si128((const __m128i *)(input + 4));
- inptr[2] = _mm_loadu_si128((const __m128i *)(input + 8));
- inptr[3] = _mm_loadu_si128((const __m128i *)(input + 12));
-
- // Pack to 16 bits
- inptr[0] = _mm_packs_epi32(inptr[0], inptr[1]);
- inptr[1] = _mm_packs_epi32(inptr[2], inptr[3]);
-
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp_mm = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp_mm);
-
- if (!test) {
- // Do the row transform
- aom_idct4_sse2(inptr);
-
- // Check the min & max values
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp_mm = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp_mm);
-
- if (test) {
- array_transpose_4x4(inptr);
- sign_bits[0] = _mm_cmplt_epi16(inptr[0], zero);
- sign_bits[1] = _mm_cmplt_epi16(inptr[1], zero);
- inptr[3] = _mm_unpackhi_epi16(inptr[1], sign_bits[1]);
- inptr[2] = _mm_unpacklo_epi16(inptr[1], sign_bits[1]);
- inptr[1] = _mm_unpackhi_epi16(inptr[0], sign_bits[0]);
- inptr[0] = _mm_unpacklo_epi16(inptr[0], sign_bits[0]);
- _mm_storeu_si128((__m128i *)outptr, inptr[0]);
- _mm_storeu_si128((__m128i *)(outptr + 4), inptr[1]);
- _mm_storeu_si128((__m128i *)(outptr + 8), inptr[2]);
- _mm_storeu_si128((__m128i *)(outptr + 12), inptr[3]);
- } else {
- // Set to use the optimised transform for the column
- optimised_cols = 1;
- }
- } else {
- // Run the un-optimised row transform
- for (i = 0; i < 4; ++i) {
- aom_highbd_idct4_c(input, outptr, bd);
- input += 4;
- outptr += 4;
- }
- }
-
- if (optimised_cols) {
- aom_idct4_sse2(inptr);
-
- // Final round and shift
- inptr[0] = _mm_add_epi16(inptr[0], eight);
- inptr[1] = _mm_add_epi16(inptr[1], eight);
-
- inptr[0] = _mm_srai_epi16(inptr[0], 4);
- inptr[1] = _mm_srai_epi16(inptr[1], 4);
-
- // Reconstruction and Store
- {
- __m128i d0 = _mm_loadl_epi64((const __m128i *)dest);
- __m128i d2 = _mm_loadl_epi64((const __m128i *)(dest + stride * 2));
- d0 = _mm_unpacklo_epi64(
- d0, _mm_loadl_epi64((const __m128i *)(dest + stride)));
- d2 = _mm_unpacklo_epi64(
- d2, _mm_loadl_epi64((const __m128i *)(dest + stride * 3)));
- d0 = clamp_high_sse2(_mm_adds_epi16(d0, inptr[0]), bd);
- d2 = clamp_high_sse2(_mm_adds_epi16(d2, inptr[1]), bd);
- // store input0
- _mm_storel_epi64((__m128i *)dest, d0);
- // store input1
- d0 = _mm_srli_si128(d0, 8);
- _mm_storel_epi64((__m128i *)(dest + stride), d0);
- // store input2
- _mm_storel_epi64((__m128i *)(dest + stride * 2), d2);
- // store input3
- d2 = _mm_srli_si128(d2, 8);
- _mm_storel_epi64((__m128i *)(dest + stride * 3), d2);
- }
- } else {
- // Run the un-optimised column transform
- tran_low_t temp_in[4], temp_out[4];
- // Columns
- for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- aom_highbd_idct4_c(temp_in, temp_out, bd);
- for (j = 0; j < 4; ++j) {
- dest[j * stride + i] = highbd_clip_pixel_add(
- dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
- }
- }
- }
-}
-
-void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
- tran_low_t out[8 * 8] = { 0 };
- tran_low_t *outptr = out;
- int i, j, test;
- __m128i inptr[8];
- __m128i min_input, max_input, temp1, temp2, sign_bits;
- uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i sixteen = _mm_set1_epi16(16);
- const __m128i max = _mm_set1_epi16(6201);
- const __m128i min = _mm_set1_epi16(-6201);
- int optimised_cols = 0;
-
- // Load input into __m128i & pack to 16 bits
- for (i = 0; i < 8; i++) {
- temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i));
- temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4));
- inptr[i] = _mm_packs_epi32(temp1, temp2);
- }
-
- // Find the min & max for the row transform
- // only first 4 row has non-zero coefs
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- for (i = 2; i < 4; i++) {
- max_input = _mm_max_epi16(max_input, inptr[i]);
- min_input = _mm_min_epi16(min_input, inptr[i]);
- }
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp1 = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp1);
-
- if (!test) {
- // Do the row transform
- aom_idct8_sse2(inptr);
-
- // Find the min & max for the column transform
- // N.B. Only first 4 cols contain non-zero coeffs
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- for (i = 2; i < 8; i++) {
- max_input = _mm_max_epi16(max_input, inptr[i]);
- min_input = _mm_min_epi16(min_input, inptr[i]);
- }
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp1 = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp1);
-
- if (test) {
- // Use fact only first 4 rows contain non-zero coeffs
- array_transpose_4X8(inptr, inptr);
- for (i = 0; i < 4; i++) {
- sign_bits = _mm_cmplt_epi16(inptr[i], zero);
- temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
- temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits);
- _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1);
- _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2);
- }
- } else {
- // Set to use the optimised transform for the column
- optimised_cols = 1;
- }
- } else {
- // Run the un-optimised row transform
- for (i = 0; i < 4; ++i) {
- aom_highbd_idct8_c(input, outptr, bd);
- input += 8;
- outptr += 8;
- }
- }
-
- if (optimised_cols) {
- aom_idct8_sse2(inptr);
-
- // Final round & shift and Reconstruction and Store
- {
- __m128i d[8];
- for (i = 0; i < 8; i++) {
- inptr[i] = _mm_add_epi16(inptr[i], sixteen);
- d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
- inptr[i] = _mm_srai_epi16(inptr[i], 5);
- d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
- // Store
- _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
- }
- }
- } else {
- // Run the un-optimised column transform
- tran_low_t temp_in[8], temp_out[8];
- for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- aom_highbd_idct8_c(temp_in, temp_out, bd);
- for (j = 0; j < 8; ++j) {
- dest[j * stride + i] = highbd_clip_pixel_add(
- dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
- }
- }
- }
-}
-
-#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
index 9d16a3e84..6a73ac460 100644
--- a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
@@ -93,6 +93,12 @@ MASKSAD8XN_SSSE3(8)
MASKSAD8XN_SSSE3(4)
MASKSAD4XN_SSSE3(8)
MASKSAD4XN_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+MASKSAD4XN_SSSE3(16)
+MASKSADMXN_SSSE3(16, 4)
+MASKSAD8XN_SSSE3(32)
+MASKSADMXN_SSSE3(32, 8)
+#endif
static INLINE unsigned int masked_sad_ssse3(const uint8_t *src_ptr,
int src_stride,
@@ -283,6 +289,12 @@ HIGHBD_MASKSADMXN_SSSE3(8, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 4)
HIGHBD_MASKSAD4XN_SSSE3(8)
HIGHBD_MASKSAD4XN_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+HIGHBD_MASKSAD4XN_SSSE3(16)
+HIGHBD_MASKSADMXN_SSSE3(16, 4)
+HIGHBD_MASKSADMXN_SSSE3(8, 32)
+HIGHBD_MASKSADMXN_SSSE3(32, 8)
+#endif
static INLINE unsigned int highbd_masked_sad_ssse3(
const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
diff --git a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
index be9d437d2..24e7ed1c6 100644
--- a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -126,6 +126,12 @@ MASK_SUBPIX_VAR8XH_SSSE3(8)
MASK_SUBPIX_VAR8XH_SSSE3(4)
MASK_SUBPIX_VAR4XH_SSSE3(8)
MASK_SUBPIX_VAR4XH_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+MASK_SUBPIX_VAR4XH_SSSE3(16)
+MASK_SUBPIX_VAR_SSSE3(16, 4)
+MASK_SUBPIX_VAR8XH_SSSE3(32)
+MASK_SUBPIX_VAR_SSSE3(32, 8)
+#endif
static INLINE __m128i filter_block(const __m128i a, const __m128i b,
const __m128i filter) {
@@ -564,6 +570,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
uint64_t sse64; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * W]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -579,7 +586,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, W, H, &sse64, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 4); \
sum = ROUND_POWER_OF_TWO(sum, 2); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
} \
unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
@@ -587,6 +595,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
uint64_t sse64; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * W]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -602,7 +611,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, W, H, &sse64, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 8); \
sum = ROUND_POWER_OF_TWO(sum, 4); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
#define HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(H) \
@@ -634,6 +644,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
int sse_; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * 4]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -649,7 +660,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, H, &sse_, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 4); \
sum = ROUND_POWER_OF_TWO(sum, 2); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (4 * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
} \
unsigned int aom_highbd_12_masked_sub_pixel_variance4x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
@@ -657,6 +669,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
int sse_; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * 4]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -672,7 +685,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, H, &sse_, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 8); \
sum = ROUND_POWER_OF_TWO(sum, 4); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (4 * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
#if CONFIG_EXT_PARTITION
@@ -693,6 +707,12 @@ HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 8)
HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 4)
HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(8)
HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 4)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 8)
+#endif
static INLINE __m128i highbd_filter_block(const __m128i a, const __m128i b,
const __m128i filter) {
diff --git a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
index 21632644f..3fd6f71e5 100644
--- a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
@@ -137,6 +137,12 @@ OBMCSADWXH(8, 8)
OBMCSADWXH(8, 4)
OBMCSADWXH(4, 8)
OBMCSADWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+OBMCSADWXH(4, 16)
+OBMCSADWXH(16, 4)
+OBMCSADWXH(8, 32)
+OBMCSADWXH(32, 8)
+#endif
////////////////////////////////////////////////////////////////////////////////
// High bit-depth
@@ -260,4 +266,10 @@ HBD_OBMCSADWXH(8, 8)
HBD_OBMCSADWXH(8, 4)
HBD_OBMCSADWXH(4, 8)
HBD_OBMCSADWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+HBD_OBMCSADWXH(4, 16)
+HBD_OBMCSADWXH(16, 4)
+HBD_OBMCSADWXH(8, 32)
+HBD_OBMCSADWXH(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
index 1797ded80..44cfa8e28 100644
--- a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
@@ -146,6 +146,12 @@ OBMCVARWXH(8, 8)
OBMCVARWXH(8, 4)
OBMCVARWXH(4, 8)
OBMCVARWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+OBMCVARWXH(4, 16)
+OBMCVARWXH(16, 4)
+OBMCVARWXH(8, 32)
+OBMCVARWXH(32, 8)
+#endif
////////////////////////////////////////////////////////////////////////////////
// High bit-depth
@@ -353,4 +359,10 @@ HBD_OBMCVARWXH(8, 8)
HBD_OBMCVARWXH(8, 4)
HBD_OBMCVARWXH(4, 8)
HBD_OBMCVARWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+HBD_OBMCVARWXH(4, 16)
+HBD_OBMCVARWXH(16, 4)
+HBD_OBMCVARWXH(8, 32)
+HBD_OBMCVARWXH(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/sad4d_sse2.asm b/third_party/aom/aom_dsp/x86/sad4d_sse2.asm
index 8f04ef2f3..4570e2ce6 100644
--- a/third_party/aom/aom_dsp/x86/sad4d_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/sad4d_sse2.asm
@@ -251,3 +251,9 @@ SADNXN4D 8, 8
SADNXN4D 8, 4
SADNXN4D 4, 8
SADNXN4D 4, 4
+%if CONFIG_EXT_PARTITION_TYPES
+SADNXN4D 4, 16
+SADNXN4D 16, 4
+SADNXN4D 8, 32
+SADNXN4D 32, 8
+%endif
diff --git a/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c b/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c
index 196394379..e8dd87a26 100644
--- a/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c
+++ b/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c
@@ -704,7 +704,12 @@ unsigned int aom_highbd_sad128x128_avg_avx2(const uint8_t *src, int src_stride,
static INLINE void get_4d_sad_from_mm256_epi32(const __m256i *v,
uint32_t *res) {
__m256i u0, u1, u2, u3;
+#if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+ const __m256i mask = _mm256_setr_epi32(UINT32_MAX, 0, UINT32_MAX, 0,
+ UINT32_MAX, 0, UINT32_MAX, 0);
+#else
const __m256i mask = _mm256_set1_epi64x(UINT32_MAX);
+#endif
__m128i sad;
// 8 32-bit summation
diff --git a/third_party/aom/aom_dsp/x86/sad_sse2.asm b/third_party/aom/aom_dsp/x86/sad_sse2.asm
index e45457a57..88d427077 100644
--- a/third_party/aom/aom_dsp/x86/sad_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/sad_sse2.asm
@@ -208,6 +208,10 @@ SAD32XN 16 ; sad32x16_sse2
SAD32XN 64, 1 ; sad32x64_avg_sse2
SAD32XN 32, 1 ; sad32x32_avg_sse2
SAD32XN 16, 1 ; sad32x16_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+SAD32XN 8 ; sad_32x8_sse2
+SAD32XN 8, 1 ; sad_32x8_avg_sse2
+%endif
; unsigned int aom_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -254,6 +258,10 @@ SAD16XN 8 ; sad16x8_sse2
SAD16XN 32, 1 ; sad16x32_avg_sse2
SAD16XN 16, 1 ; sad16x16_avg_sse2
SAD16XN 8, 1 ; sad16x8_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+SAD16XN 4 ; sad_16x4_sse2
+SAD16XN 4, 1 ; sad_16x4_avg_sse2
+%endif
; unsigned int aom_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -298,6 +306,10 @@ SAD8XN 4 ; sad8x4_sse2
SAD8XN 16, 1 ; sad8x16_avg_sse2
SAD8XN 8, 1 ; sad8x8_avg_sse2
SAD8XN 4, 1 ; sad8x4_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+SAD8XN 32 ; sad_8x32_sse2
+SAD8XN 32, 1 ; sad_8x32_avg_sse2
+%endif
; unsigned int aom_sad4x{4, 8}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -343,3 +355,7 @@ SAD4XN 8 ; sad4x8_sse
SAD4XN 4 ; sad4x4_sse
SAD4XN 8, 1 ; sad4x8_avg_sse
SAD4XN 4, 1 ; sad4x4_avg_sse
+%if CONFIG_EXT_PARTITION_TYPES
+SAD4XN 16 ; sad_4x16_sse2
+SAD4XN 16, 1 ; sad_4x16_avg_sse2
+%endif
diff --git a/third_party/aom/aom_dsp/x86/variance_sse2.c b/third_party/aom/aom_dsp/x86/variance_sse2.c
index d9563aa7f..918844185 100644
--- a/third_party/aom/aom_dsp/x86/variance_sse2.c
+++ b/third_party/aom/aom_dsp/x86/variance_sse2.c
@@ -17,6 +17,9 @@
#include "aom_ports/mem.h"
+#include "./av1_rtcd.h"
+#include "av1/common/filter.h"
+
typedef void (*getNxMvar_fn_t)(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse, int *sum);
@@ -335,6 +338,52 @@ unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
return *sse;
}
+#if CONFIG_EXT_PARTITION_TYPES
+unsigned int aom_variance4x16_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 4, 16, sse, &sum,
+ get4x4var_sse2, 4);
+ assert(sum <= 255 * 4 * 16);
+ assert(sum >= -255 * 4 * 16);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 6);
+}
+
+unsigned int aom_variance16x4_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 16, 4, sse, &sum,
+ get4x4var_sse2, 4);
+ assert(sum <= 255 * 16 * 4);
+ assert(sum >= -255 * 16 * 4);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 6);
+}
+
+unsigned int aom_variance8x32_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 8, 32, sse, &sum,
+ aom_get8x8var_sse2, 8);
+ assert(sum <= 255 * 8 * 32);
+ assert(sum >= -255 * 8 * 32);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 8);
+}
+
+unsigned int aom_variance32x8_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 32, 8, sse, &sum,
+ aom_get8x8var_sse2, 8);
+ assert(sum <= 255 * 32 * 8);
+ assert(sum >= -255 * 32 * 8);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 8);
+}
+#endif
+
// The 2 unused parameters are place holders for PIC enabled build.
// These definitions are for functions defined in subpel_variance.asm
#define DECL(w, opt) \
@@ -342,13 +391,13 @@ unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
void *unused0, void *unused)
-#define DECLS(opt1, opt2) \
- DECL(4, opt1); \
- DECL(8, opt1); \
- DECL(16, opt1)
+#define DECLS(opt) \
+ DECL(4, opt); \
+ DECL(8, opt); \
+ DECL(16, opt)
-DECLS(sse2, sse2);
-DECLS(ssse3, ssse3);
+DECLS(sse2);
+DECLS(ssse3);
#undef DECLS
#undef DECL
@@ -384,23 +433,44 @@ DECLS(ssse3, ssse3);
return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)); \
- FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)); \
- FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)); \
- FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)); \
- FN(4, 8, 4, 2, 3, opt1, (int32_t), (int32_t)); \
- FN(4, 4, 4, 2, 2, opt1, (int32_t), (int32_t))
-
-FNS(sse2, sse2);
-FNS(ssse3, ssse3);
+#if CONFIG_EXT_PARTITION_TYPES
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t)); \
+ FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int32_t), (int32_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int32_t), (int32_t))
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t))
+#endif
+
+FNS(sse2);
+FNS(ssse3);
#undef FNS
#undef FN
@@ -412,13 +482,13 @@ FNS(ssse3, ssse3);
const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \
ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
void *unused)
-#define DECLS(opt1, opt2) \
- DECL(4, opt1); \
- DECL(8, opt1); \
- DECL(16, opt1)
+#define DECLS(opt) \
+ DECL(4, opt); \
+ DECL(8, opt); \
+ DECL(16, opt)
-DECLS(sse2, sse2);
-DECLS(ssse3, ssse3);
+DECLS(sse2);
+DECLS(ssse3);
#undef DECL
#undef DECLS
@@ -455,236 +525,149 @@ DECLS(ssse3, ssse3);
return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)); \
- FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)); \
- FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)); \
- FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)); \
- FN(4, 8, 4, 2, 3, opt1, (uint32_t), (int32_t)); \
- FN(4, 4, 4, 2, 2, opt1, (uint32_t), (int32_t))
-
-FNS(sse2, sse);
-FNS(ssse3, ssse3);
+#if CONFIG_EXT_PARTITION_TYPES
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t)); \
+ FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int32_t), (int32_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int32_t), (int32_t))
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t))
+#endif
+
+FNS(sse2);
+FNS(ssse3);
#undef FNS
#undef FN
void aom_upsampled_pred_sse2(uint8_t *comp_pred, int width, int height,
+ int subpel_x_q3, int subpel_y_q3,
const uint8_t *ref, int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
-
- if (width >= 16) {
- // read 16 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 16) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i s4 = _mm_loadu_si128((const __m128i *)(ref + 64));
- __m128i s5 = _mm_loadu_si128((const __m128i *)(ref + 80));
- __m128i s6 = _mm_loadu_si128((const __m128i *)(ref + 96));
- __m128i s7 = _mm_loadu_si128((const __m128i *)(ref + 112));
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
- t2 = _mm_unpacklo_epi8(s4, s5);
- s5 = _mm_unpackhi_epi8(s4, s5);
- t3 = _mm_unpacklo_epi8(s6, s7);
- s7 = _mm_unpackhi_epi8(s6, s7);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s4 = _mm_unpacklo_epi8(t2, s5);
- s6 = _mm_unpacklo_epi8(t3, s7);
- s0 = _mm_unpacklo_epi32(s0, s2);
- s4 = _mm_unpacklo_epi32(s4, s6);
- s0 = _mm_unpacklo_epi64(s0, s4);
-
- _mm_storeu_si128((__m128i *)(comp_pred), s0);
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ if (width >= 16) {
+ int i;
+ assert(!(width & 15));
+ /*Read 16 pixels one row at a time.*/
+ for (i = 0; i < height; i++) {
+ int j;
+ for (j = 0; j < width; j += 16) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
+ _mm_storeu_si128((__m128i *)comp_pred, s0);
+ comp_pred += 16;
+ ref += 16;
+ }
+ ref += ref_stride - width;
+ }
+ } else if (width >= 8) {
+ int i;
+ assert(!(width & 7));
+ assert(!(height & 1));
+ /*Read 8 pixels two rows at a time.*/
+ for (i = 0; i < height; i += 2) {
+ __m128i s0 = _mm_loadl_epi64((const __m128i *)ref);
+ __m128i s1 = _mm_loadl_epi64((const __m128i *)(ref + ref_stride));
+ __m128i t0 = _mm_unpacklo_epi64(s0, s1);
+ _mm_storeu_si128((__m128i *)comp_pred, t0);
comp_pred += 16;
- ref += 16 * 8;
+ ref += 2 * ref_stride;
}
- ref += stride - (width << 3);
- }
- } else if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s0 = _mm_unpacklo_epi32(s0, s2);
-
- _mm_storel_epi64((__m128i *)(comp_pred), s0);
- comp_pred += 8;
- ref += 8 * 8;
+ } else {
+ int i;
+ assert(!(width & 3));
+ assert(!(height & 3));
+ /*Read 4 pixels four rows at a time.*/
+ for (i = 0; i < height; i++) {
+ __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
+ __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + ref_stride));
+ __m128i s2 =
+ _mm_cvtsi32_si128(*(const uint32_t *)(ref + 2 * ref_stride));
+ __m128i s3 =
+ _mm_cvtsi32_si128(*(const uint32_t *)(ref + 3 * ref_stride));
+ __m128i t0 = _mm_unpacklo_epi32(s0, s1);
+ __m128i t1 = _mm_unpacklo_epi32(s2, s3);
+ __m128i u0 = _mm_unpacklo_epi64(t0, t1);
+ _mm_storeu_si128((__m128i *)comp_pred, u0);
+ comp_pred += 16;
+ ref += 4 * ref_stride;
}
- ref += stride - (width << 3);
}
} else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i t0;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- s0 = _mm_unpacklo_epi8(t0, s1);
-
- *(int *)comp_pred = _mm_cvtsi128_si32(s0);
- comp_pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
+ InterpFilterParams filter;
+ filter = av1_get_interp_filter_params(EIGHTTAP_REGULAR);
+ if (!subpel_y_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ aom_convolve8_horiz(ref, ref_stride, comp_pred, width, kernel, 16, NULL,
+ -1, width, height);
+ } else if (!subpel_x_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ aom_convolve8_vert(ref, ref_stride, comp_pred, width, NULL, -1, kernel,
+ 16, width, height);
+ } else {
+ DECLARE_ALIGNED(16, uint8_t,
+ temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *kernel_x;
+ const int16_t *kernel_y;
+ int intermediate_height;
+ kernel_x = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ kernel_y = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter.taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ aom_convolve8_horiz(ref - ref_stride * ((filter.taps >> 1) - 1),
+ ref_stride, temp, MAX_SB_SIZE, kernel_x, 16, NULL, -1,
+ width, intermediate_height);
+ aom_convolve8_vert(temp + MAX_SB_SIZE * ((filter.taps >> 1) - 1),
+ MAX_SB_SIZE, comp_pred, width, NULL, -1, kernel_y, 16,
+ width, height);
}
}
}
void aom_comp_avg_upsampled_pred_sse2(uint8_t *comp_pred, const uint8_t *pred,
- int width, int height, const uint8_t *ref,
+ int width, int height, int subpel_x_q3,
+ int subpel_y_q3, const uint8_t *ref,
int ref_stride) {
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i one = _mm_set1_epi16(1);
- int i, j;
- int stride = ref_stride << 3;
-
- if (width >= 16) {
- // read 16 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 16) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i s4 = _mm_loadu_si128((const __m128i *)(ref + 64));
- __m128i s5 = _mm_loadu_si128((const __m128i *)(ref + 80));
- __m128i s6 = _mm_loadu_si128((const __m128i *)(ref + 96));
- __m128i s7 = _mm_loadu_si128((const __m128i *)(ref + 112));
- __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
- __m128i p1;
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
- t2 = _mm_unpacklo_epi8(s4, s5);
- s5 = _mm_unpackhi_epi8(s4, s5);
- t3 = _mm_unpacklo_epi8(s6, s7);
- s7 = _mm_unpackhi_epi8(s6, s7);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s4 = _mm_unpacklo_epi8(t2, s5);
- s6 = _mm_unpacklo_epi8(t3, s7);
-
- s0 = _mm_unpacklo_epi32(s0, s2);
- s4 = _mm_unpacklo_epi32(s4, s6);
- s0 = _mm_unpacklo_epi8(s0, zero);
- s4 = _mm_unpacklo_epi8(s4, zero);
-
- p1 = _mm_unpackhi_epi8(p0, zero);
- p0 = _mm_unpacklo_epi8(p0, zero);
- p0 = _mm_adds_epu16(s0, p0);
- p1 = _mm_adds_epu16(s4, p1);
- p0 = _mm_adds_epu16(p0, one);
- p1 = _mm_adds_epu16(p1, one);
-
- p0 = _mm_srli_epi16(p0, 1);
- p1 = _mm_srli_epi16(p1, 1);
- p0 = _mm_packus_epi16(p0, p1);
-
- _mm_storeu_si128((__m128i *)(comp_pred), p0);
- comp_pred += 16;
- pred += 16;
- ref += 16 * 8;
- }
- ref += stride - (width << 3);
- }
- } else if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i p0 = _mm_loadl_epi64((const __m128i *)pred);
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s0 = _mm_unpacklo_epi32(s0, s2);
- s0 = _mm_unpacklo_epi8(s0, zero);
-
- p0 = _mm_unpacklo_epi8(p0, zero);
- p0 = _mm_adds_epu16(s0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
- p0 = _mm_packus_epi16(p0, zero);
-
- _mm_storel_epi64((__m128i *)(comp_pred), p0);
- comp_pred += 8;
- pred += 8;
- ref += 8 * 8;
- }
- ref += stride - (width << 3);
- }
- } else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i p0 = _mm_cvtsi32_si128(*(const uint32_t *)pred);
- __m128i t0;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- s0 = _mm_unpacklo_epi8(t0, s1);
- s0 = _mm_unpacklo_epi8(s0, zero);
-
- p0 = _mm_unpacklo_epi8(p0, zero);
- p0 = _mm_adds_epu16(s0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
- p0 = _mm_packus_epi16(p0, zero);
-
- *(int *)comp_pred = _mm_cvtsi128_si32(p0);
- comp_pred += 4;
- pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
- }
+ int n;
+ int i;
+ aom_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
+ ref_stride);
+ /*The total number of pixels must be a multiple of 16 (e.g., 4x4).*/
+ assert(!(width * height & 15));
+ n = width * height >> 4;
+ for (i = 0; i < n; i++) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)comp_pred);
+ __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
+ _mm_storeu_si128((__m128i *)comp_pred, _mm_avg_epu8(s0, p0));
+ comp_pred += 16;
+ pred += 16;
}
}