summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2018-10-18 06:04:57 -0500
committertrav90 <travawine@palemoon.org>2018-10-18 06:04:57 -0500
commit7369c7d7a5eed32963d8af37658286617919f91c (patch)
tree5397ce7ee9bca1641118fdc3187bd9e2b24fdc9c /third_party/aom/aom_dsp
parent77887af9c4ad1420bbdb33984af4f74b55ca59db (diff)
downloadUXP-7369c7d7a5eed32963d8af37658286617919f91c.tar
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.gz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.lz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.xz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.zip
Update aom to commit id f5bdeac22930ff4c6b219be49c843db35970b918
Diffstat (limited to 'third_party/aom/aom_dsp')
-rw-r--r--third_party/aom/aom_dsp/aom_convolve.c227
-rw-r--r--third_party/aom/aom_dsp/aom_dsp.cmake56
-rw-r--r--third_party/aom/aom_dsp/aom_dsp.mk5
-rw-r--r--third_party/aom/aom_dsp/aom_dsp_common.h17
-rwxr-xr-xthird_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl149
-rw-r--r--third_party/aom/aom_dsp/aom_filter.h6
-rw-r--r--third_party/aom/aom_dsp/bitreader.h2
-rw-r--r--third_party/aom/aom_dsp/bitwriter.h2
-rw-r--r--third_party/aom/aom_dsp/blend_a64_mask.c63
-rw-r--r--third_party/aom/aom_dsp/fwd_txfm.c42
-rw-r--r--third_party/aom/aom_dsp/fwd_txfm.h5
-rw-r--r--third_party/aom/aom_dsp/intrapred.c569
-rw-r--r--third_party/aom/aom_dsp/inv_txfm.c1080
-rw-r--r--third_party/aom/aom_dsp/inv_txfm.h19
-rw-r--r--third_party/aom/aom_dsp/mips/fwd_dct32x32_msa.c20
-rw-r--r--third_party/aom/aom_dsp/mips/fwd_txfm_msa.c8
-rw-r--r--third_party/aom/aom_dsp/prob.c4
-rw-r--r--third_party/aom/aom_dsp/prob.h2
-rw-r--r--third_party/aom/aom_dsp/quantize.c13
-rw-r--r--third_party/aom/aom_dsp/sad.c58
-rw-r--r--third_party/aom/aom_dsp/simd/v256_intrinsics_v128.h22
-rw-r--r--third_party/aom/aom_dsp/txfm_common.h79
-rw-r--r--third_party/aom/aom_dsp/variance.c218
-rw-r--r--third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c141
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c171
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c2
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm6
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm13
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm2
-rw-r--r--third_party/aom/aom_dsp/x86/highbd_variance_sse2.c270
-rw-r--r--third_party/aom/aom_dsp/x86/inv_txfm_sse2.c234
-rw-r--r--third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c12
-rw-r--r--third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c28
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_sad_sse4.c12
-rw-r--r--third_party/aom/aom_dsp/x86/obmc_variance_sse4.c12
-rw-r--r--third_party/aom/aom_dsp/x86/sad4d_sse2.asm6
-rw-r--r--third_party/aom/aom_dsp/x86/sad_highbd_avx2.c5
-rw-r--r--third_party/aom/aom_dsp/x86/sad_sse2.asm16
-rw-r--r--third_party/aom/aom_dsp/x86/variance_sse2.c469
39 files changed, 1850 insertions, 2215 deletions
diff --git a/third_party/aom/aom_dsp/aom_convolve.c b/third_party/aom/aom_dsp/aom_convolve.c
index 4dac6aacc..c903ea52d 100644
--- a/third_party/aom/aom_dsp/aom_convolve.c
+++ b/third_party/aom/aom_dsp/aom_convolve.c
@@ -41,6 +41,29 @@ static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
}
}
+static void convolve_horiz_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *x_filters, int x0_qn,
+ int x_step_qn, int w, int h) {
+ int x, y;
+ src -= SUBPEL_TAPS / 2 - 1;
+ for (y = 0; y < h; ++y) {
+ int x_qn = x0_qn;
+ for (x = 0; x < w; ++x) {
+ const uint8_t *const src_x = &src[x_qn >> SCALE_SUBPEL_BITS]; // q8
+ const int x_filter_idx = (x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS;
+ assert(x_filter_idx < SUBPEL_SHIFTS);
+ const int16_t *const x_filter = x_filters[x_filter_idx];
+ int k, sum = 0;
+ for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
+ dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+ x_qn += x_step_qn;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *x_filters, int x0_q4,
@@ -63,6 +86,30 @@ static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
}
}
+static void convolve_avg_horiz_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *x_filters, int x0_qn,
+ int x_step_qn, int w, int h) {
+ int x, y;
+ src -= SUBPEL_TAPS / 2 - 1;
+ for (y = 0; y < h; ++y) {
+ int x_qn = x0_qn;
+ for (x = 0; x < w; ++x) {
+ const uint8_t *const src_x = &src[x_qn >> SCALE_SUBPEL_BITS];
+ const int x_filter_idx = (x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS;
+ assert(x_filter_idx < SUBPEL_SHIFTS);
+ const int16_t *const x_filter = x_filters[x_filter_idx];
+ int k, sum = 0;
+ for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k];
+ dst[x] = ROUND_POWER_OF_TWO(
+ dst[x] + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1);
+ x_qn += x_step_qn;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *y_filters, int y0_q4,
@@ -86,6 +133,31 @@ static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride,
}
}
+static void convolve_vert_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *y_filters, int y0_qn,
+ int y_step_qn, int w, int h) {
+ int x, y;
+ src -= src_stride * (SUBPEL_TAPS / 2 - 1);
+
+ for (x = 0; x < w; ++x) {
+ int y_qn = y0_qn;
+ for (y = 0; y < h; ++y) {
+ const unsigned char *src_y =
+ &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+ const int16_t *const y_filter =
+ y_filters[(y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS];
+ int k, sum = 0;
+ for (k = 0; k < SUBPEL_TAPS; ++k)
+ sum += src_y[k * src_stride] * y_filter[k];
+ dst[y * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+ y_qn += y_step_qn;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *y_filters, int y0_q4,
@@ -112,6 +184,34 @@ static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride,
}
}
+static void convolve_avg_vert_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *y_filters, int y0_qn,
+ int y_step_qn, int w, int h) {
+ int x, y;
+ src -= src_stride * (SUBPEL_TAPS / 2 - 1);
+
+ for (x = 0; x < w; ++x) {
+ int y_qn = y0_qn;
+ for (y = 0; y < h; ++y) {
+ const unsigned char *src_y =
+ &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+ const int16_t *const y_filter =
+ y_filters[(y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS];
+ int k, sum = 0;
+ for (k = 0; k < SUBPEL_TAPS; ++k)
+ sum += src_y[k * src_stride] * y_filter[k];
+ dst[y * dst_stride] = ROUND_POWER_OF_TWO(
+ dst[y * dst_stride] +
+ clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)),
+ 1);
+ y_qn += y_step_qn;
+ }
+ ++src;
+ ++dst;
+ }
+}
+
static void convolve(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const InterpKernel *const x_filters,
int x0_q4, int x_step_q4,
@@ -146,6 +246,41 @@ static void convolve(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
dst_stride, y_filters, y0_q4, y_step_q4, w, h);
}
+static void convolve_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *const x_filters, int x0_qn,
+ int x_step_qn, const InterpKernel *const y_filters,
+ int y0_qn, int y_step_qn, int w, int h) {
+ // TODO(afergs): Update comment here
+ // Note: Fixed size intermediate buffer, temp, places limits on parameters.
+ // 2d filtering proceeds in 2 steps:
+ // (1) Interpolate horizontally into an intermediate buffer, temp.
+ // (2) Interpolate temp vertically to derive the sub-pixel result.
+ // Deriving the maximum number of rows in the temp buffer (135):
+ // --Smallest scaling factor is x1/2 ==> y_step_qn = 32 (Normative).
+ // --Largest block size is 64x64 pixels.
+ // --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the
+ // original frame (in 1/16th pixel units).
+ // --Must round-up because block may be located at sub-pixel position.
+ // --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
+ // --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
+ uint8_t temp[MAX_EXT_SIZE * MAX_SB_SIZE];
+ int intermediate_height =
+ (((h - 1) * y_step_qn + y0_qn) >> SCALE_SUBPEL_BITS) + SUBPEL_TAPS;
+
+ assert(w <= MAX_SB_SIZE);
+ assert(h <= MAX_SB_SIZE);
+
+ assert(y_step_qn <= SCALE_SUBPEL_BITS * 2);
+ assert(x_step_qn <= SCALE_SUBPEL_BITS * 2);
+
+ convolve_horiz_scale_c(src - src_stride * (SUBPEL_TAPS / 2 - 1), src_stride,
+ temp, MAX_SB_SIZE, x_filters, x0_qn, x_step_qn, w,
+ intermediate_height);
+ convolve_vert_scale_c(temp + MAX_SB_SIZE * (SUBPEL_TAPS / 2 - 1), MAX_SB_SIZE,
+ dst, dst_stride, y_filters, y0_qn, y_step_qn, w, h);
+}
+
static const InterpKernel *get_filter_base(const int16_t *filter) {
// NOTE: This assumes that the filter table is 256-byte aligned.
// TODO(agrange) Modify to make independent of table alignment.
@@ -171,6 +306,21 @@ void aom_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
w, h);
}
+void aom_convolve8_horiz_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int subpel_x,
+ int x_step_qn, const int16_t *filter_y,
+ int subpel_y, int y_step_qn, int w, int h) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+
+ (void)subpel_y;
+ (void)filter_y;
+ (void)y_step_qn;
+
+ convolve_horiz_scale_c(src, src_stride, dst, dst_stride, filters_x, subpel_x,
+ x_step_qn, w, h);
+}
+
void aom_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -186,6 +336,22 @@ void aom_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
x_step_q4, w, h);
}
+void aom_convolve8_avg_horiz_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int subpel_x,
+ int x_step_qn, const int16_t *filter_y,
+ int subpel_y, int y_step_qn, int w,
+ int h) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+
+ (void)subpel_y;
+ (void)filter_y;
+ (void)y_step_qn;
+
+ convolve_avg_horiz_scale_c(src, src_stride, dst, dst_stride, filters_x,
+ subpel_x, x_step_qn, w, h);
+}
+
void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -201,6 +367,21 @@ void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
w, h);
}
+void aom_convolve8_vert_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int subpel_x,
+ int x_step_qn, const int16_t *filter_y,
+ int subpel_y, int y_step_qn, int w, int h) {
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+
+ (void)subpel_x;
+ (void)filter_x;
+ (void)x_step_qn;
+
+ convolve_vert_scale_c(src, src_stride, dst, dst_stride, filters_y, subpel_y,
+ y_step_qn, w, h);
+}
+
void aom_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -216,6 +397,21 @@ void aom_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
y_step_q4, w, h);
}
+void aom_convolve8_avg_vert_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int subpel_x,
+ int x_step_qn, const int16_t *filter_y,
+ int subpel_y, int y_step_qn, int w, int h) {
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+
+ (void)subpel_x;
+ (void)filter_x;
+ (void)x_step_qn;
+
+ convolve_avg_vert_scale_c(src, src_stride, dst, dst_stride, filters_y,
+ subpel_y, y_step_qn, w, h);
+}
+
void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
@@ -230,6 +426,19 @@ void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
filters_y, y0_q4, y_step_q4, w, h);
}
+void aom_convolve8_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int subpel_x, int x_step_qn,
+ const int16_t *filter_y, int subpel_y, int y_step_qn,
+ int w, int h) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+
+ const InterpKernel *const filters_y = get_filter_base(filter_y);
+
+ convolve_scale_c(src, src_stride, dst, dst_stride, filters_x, subpel_x,
+ x_step_qn, filters_y, subpel_y, y_step_qn, w, h);
+}
+
void aom_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int x_step_q4, const int16_t *filter_y, int y_step_q4,
@@ -245,6 +454,22 @@ void aom_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
h);
}
+void aom_convolve8_avg_scale_c(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int subpel_x,
+ int x_step_qn, const int16_t *filter_y,
+ int subpel_y, int y_step_qn, int w, int h) {
+ /* Fixed size intermediate buffer places limits on parameters. */
+ DECLARE_ALIGNED(16, uint8_t, temp[MAX_SB_SIZE * MAX_SB_SIZE]);
+ assert(w <= MAX_SB_SIZE);
+ assert(h <= MAX_SB_SIZE);
+
+ aom_convolve8_scale_c(src, src_stride, temp, MAX_SB_SIZE, filter_x, subpel_x,
+ x_step_qn, filter_y, subpel_y, y_step_qn, w, h);
+ aom_convolve_avg_c(temp, MAX_SB_SIZE, dst, dst_stride, NULL, 0, NULL, 0, w,
+ h);
+}
+
void aom_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride, const int16_t *filter_x,
int filter_x_stride, const int16_t *filter_y,
@@ -332,6 +557,7 @@ void aom_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
filter_y, y_step_q4, w, h);
}
+// TODO(afergs): Make sure this works too
#if CONFIG_LOOP_RESTORATION
static void convolve_add_src_horiz(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
@@ -569,6 +795,7 @@ void aom_convolve8_add_src_hip_c(const uint8_t *src, ptrdiff_t src_stride,
}
#endif // CONFIG_LOOP_RESTORATION
+// TODO(afergs): Make sure this works too
#if CONFIG_HIGHBITDEPTH
static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
diff --git a/third_party/aom/aom_dsp/aom_dsp.cmake b/third_party/aom/aom_dsp/aom_dsp.cmake
index 5a49ae817..3ce6761ca 100644
--- a/third_party/aom/aom_dsp/aom_dsp.cmake
+++ b/third_party/aom/aom_dsp/aom_dsp.cmake
@@ -287,6 +287,7 @@ if (CONFIG_AV1_ENCODER)
"${AOM_ROOT}/aom_dsp/x86/fwd_dct32x32_impl_avx2.h"
"${AOM_ROOT}/aom_dsp/x86/fwd_txfm_avx2.c"
"${AOM_ROOT}/aom_dsp/x86/fwd_txfm_avx2.h"
+ "${AOM_ROOT}/aom_dsp/x86/highbd_quantize_intrin_avx2.c"
"${AOM_ROOT}/aom_dsp/x86/sad4d_avx2.c"
"${AOM_ROOT}/aom_dsp/x86/sad_avx2.c"
"${AOM_ROOT}/aom_dsp/x86/sad_impl_avx2.c"
@@ -312,6 +313,7 @@ if (CONFIG_AV1_ENCODER)
"${AOM_ROOT}/aom_dsp/x86/fwd_txfm_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/fwd_txfm_sse2.h"
"${AOM_ROOT}/aom_dsp/x86/halfpix_variance_sse2.c"
+ "${AOM_ROOT}/aom_dsp/x86/highbd_quantize_intrin_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/variance_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/sum_squares_sse2.c")
@@ -330,10 +332,16 @@ if (CONFIG_AV1_ENCODER)
"${AOM_ROOT}/aom_dsp/mips/variance_msa.c"
"${AOM_ROOT}/aom_dsp/mips/sub_pixel_variance_msa.c")
+ if (CONFIG_EXT_INTER)
+ set(AOM_DSP_ENCODER_INTRIN_SSSE3
+ ${AOM_DSP_ENCODER_INTRIN_SSSE3}
+ "${AOM_ROOT}/aom_dsp/x86/masked_sad_intrin_ssse3.c"
+ "${AOM_ROOT}/aom_dsp/x86/masked_variance_intrin_ssse3.c")
+ endif ()
+
if (CONFIG_HIGHBITDEPTH)
set(AOM_DSP_ENCODER_INTRIN_SSE2
${AOM_DSP_ENCODER_INTRIN_SSE2}
- "${AOM_ROOT}/aom_dsp/x86/highbd_quantize_intrin_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/highbd_subtract_sse2.c")
endif ()
endif ()
@@ -407,29 +415,38 @@ endif ()
# has been created.
function (setup_aom_dsp_targets)
add_library(aom_dsp_common OBJECT ${AOM_DSP_COMMON_SOURCES})
- set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} aom_dsp_common)
- target_sources(aom PUBLIC $<TARGET_OBJECTS:aom_dsp_common>)
+ list(APPEND AOM_LIB_TARGETS aom_dsp_common)
+ create_dummy_source_file("aom_av1" "c" "dummy_source_file")
+ add_library(aom_dsp OBJECT "${dummy_source_file}")
+ target_sources(aom PRIVATE $<TARGET_OBJECTS:aom_dsp_common>)
+ list(APPEND AOM_LIB_TARGETS aom_dsp)
+
+ # Not all generators support libraries consisting only of object files. Add a
+ # dummy source file to the aom_dsp target.
+ add_dummy_source_file_to_target("aom_dsp" "c")
if (CONFIG_AV1_DECODER)
add_library(aom_dsp_decoder OBJECT ${AOM_DSP_DECODER_SOURCES})
set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} aom_dsp_decoder)
- target_sources(aom PUBLIC $<TARGET_OBJECTS:aom_dsp_decoder>)
+ target_sources(aom PRIVATE $<TARGET_OBJECTS:aom_dsp_decoder>)
endif ()
if (CONFIG_AV1_ENCODER)
add_library(aom_dsp_encoder OBJECT ${AOM_DSP_ENCODER_SOURCES})
set(AOM_LIB_TARGETS ${AOM_LIB_TARGETS} aom_dsp_encoder)
- target_sources(aom PUBLIC $<TARGET_OBJECTS:aom_dsp_encoder>)
+ target_sources(aom PRIVATE $<TARGET_OBJECTS:aom_dsp_encoder>)
endif ()
if (HAVE_SSE2)
add_asm_library("aom_dsp_common_sse2" "AOM_DSP_COMMON_ASM_SSE2" "aom")
add_intrinsics_object_library("-msse2" "sse2" "aom_dsp_common"
- "AOM_DSP_COMMON_INTRIN_SSE2")
+ "AOM_DSP_COMMON_INTRIN_SSE2" "aom")
+
if (CONFIG_AV1_ENCODER)
- add_asm_library("aom_dsp_encoder_sse2" "AOM_DSP_ENCODER_ASM_SSE2" "aom")
+ add_asm_library("aom_dsp_encoder_sse2" "AOM_DSP_ENCODER_ASM_SSE2"
+ "aom")
add_intrinsics_object_library("-msse2" "sse2" "aom_dsp_encoder"
- "AOM_DSP_ENCODER_INTRIN_SSE2")
+ "AOM_DSP_ENCODER_INTRIN_SSE2" "aom")
endif()
endif ()
@@ -440,7 +457,7 @@ function (setup_aom_dsp_targets)
if (HAVE_SSSE3)
add_asm_library("aom_dsp_common_ssse3" "AOM_DSP_COMMON_ASM_SSSE3" "aom")
add_intrinsics_object_library("-mssse3" "ssse3" "aom_dsp_common"
- "AOM_DSP_COMMON_INTRIN_SSSE3")
+ "AOM_DSP_COMMON_INTRIN_SSSE3" "aom")
if (CONFIG_AV1_ENCODER)
if ("${AOM_TARGET_CPU}" STREQUAL "x86_64")
@@ -448,16 +465,20 @@ function (setup_aom_dsp_targets)
${AOM_DSP_ENCODER_ASM_SSSE3_X86_64})
endif ()
add_asm_library("aom_dsp_encoder_ssse3" "AOM_DSP_ENCODER_ASM_SSSE3" "aom")
+ if (AOM_DSP_ENCODER_INTRIN_SSSE3)
+ add_intrinsics_object_library("-mssse3" "ssse3" "aom_dsp_encoder"
+ "AOM_DSP_ENCODER_INTRIN_SSSE3" "aom")
+ endif ()
endif ()
endif ()
if (HAVE_SSE4_1)
add_intrinsics_object_library("-msse4.1" "sse4_1" "aom_dsp_common"
- "AOM_DSP_COMMON_INTRIN_SSE4_1")
+ "AOM_DSP_COMMON_INTRIN_SSE4_1" "aom")
if (CONFIG_AV1_ENCODER)
if (AOM_DSP_ENCODER_INTRIN_SSE4_1)
add_intrinsics_object_library("-msse4.1" "sse4_1" "aom_dsp_encoder"
- "AOM_DSP_ENCODER_INTRIN_SSE4_1")
+ "AOM_DSP_ENCODER_INTRIN_SSE4_1" "aom")
endif ()
add_asm_library("aom_dsp_encoder_sse4_1" "AOM_DSP_ENCODER_ASM_SSE4_1"
"aom")
@@ -473,10 +494,10 @@ function (setup_aom_dsp_targets)
if (HAVE_AVX2)
add_intrinsics_object_library("-mavx2" "avx2" "aom_dsp_common"
- "AOM_DSP_COMMON_INTRIN_AVX2")
+ "AOM_DSP_COMMON_INTRIN_AVX2" "aom")
if (CONFIG_AV1_ENCODER)
add_intrinsics_object_library("-mavx2" "avx2" "aom_dsp_encoder"
- "AOM_DSP_ENCODER_INTRIN_AVX2")
+ "AOM_DSP_ENCODER_INTRIN_AVX2" "aom")
endif ()
endif ()
@@ -490,20 +511,21 @@ function (setup_aom_dsp_targets)
if (HAVE_NEON)
add_intrinsics_object_library("${AOM_NEON_INTRIN_FLAG}" "neon"
- "aom_dsp_common" "AOM_DSP_COMMON_INTRIN_NEON")
+ "aom_dsp_common" "AOM_DSP_COMMON_INTRIN_NEON"
+ "aom")
endif ()
if (HAVE_DSPR2)
add_intrinsics_object_library("" "dspr2" "aom_dsp_common"
- "AOM_DSP_COMMON_INTRIN_DSPR2")
+ "AOM_DSP_COMMON_INTRIN_DSPR2" "aom")
endif ()
if (HAVE_MSA)
add_intrinsics_object_library("" "msa" "aom_dsp_common"
- "AOM_DSP_COMMON_INTRIN_MSA")
+ "AOM_DSP_COMMON_INTRIN_MSA" "aom")
if (CONFIG_AV1_ENCODER)
add_intrinsics_object_library("" "msa" "aom_dsp_encoder"
- "AOM_DSP_ENCODER_INTRIN_MSA")
+ "AOM_DSP_ENCODER_INTRIN_MSA" "aom")
endif ()
endif ()
diff --git a/third_party/aom/aom_dsp/aom_dsp.mk b/third_party/aom/aom_dsp/aom_dsp.mk
index 6e2d5630e..f9d675ac0 100644
--- a/third_party/aom/aom_dsp/aom_dsp.mk
+++ b/third_party/aom/aom_dsp/aom_dsp.mk
@@ -290,9 +290,10 @@ DSP_SRCS-yes += quantize.c
DSP_SRCS-yes += quantize.h
DSP_SRCS-$(HAVE_SSE2) += x86/quantize_sse2.c
-ifeq ($(CONFIG_HIGHBITDEPTH),yes)
+
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_quantize_intrin_sse2.c
-endif
+DSP_SRCS-$(HAVE_AVX2) += x86/highbd_quantize_intrin_avx2.c
+
ifeq ($(ARCH_X86_64),yes)
DSP_SRCS-$(HAVE_SSSE3) += x86/quantize_ssse3_x86_64.asm
DSP_SRCS-$(HAVE_AVX) += x86/quantize_avx_x86_64.asm
diff --git a/third_party/aom/aom_dsp/aom_dsp_common.h b/third_party/aom/aom_dsp/aom_dsp_common.h
index 82f9a95e9..5b104321b 100644
--- a/third_party/aom/aom_dsp/aom_dsp_common.h
+++ b/third_party/aom/aom_dsp/aom_dsp_common.h
@@ -31,8 +31,6 @@ extern "C" {
#define AOMMIN(x, y) (((x) < (y)) ? (x) : (y))
#define AOMMAX(x, y) (((x) > (y)) ? (x) : (y))
-#define NELEMENTS(x) (sizeof((x)) / sizeof((x)[0]))
-
#define IMPLIES(a, b) (!(a) || (b)) // Logical 'a implies b' (or 'a -> b')
#define IS_POWER_OF_TWO(x) (((x) & ((x)-1)) == 0)
@@ -54,16 +52,9 @@ extern "C" {
#define UNLIKELY(v) (v)
#endif
-#define AOM_SWAP(type, a, b) \
- do { \
- type c = (b); \
- b = a; \
- a = c; \
- } while (0)
-
#if CONFIG_AOM_QM
typedef uint16_t qm_val_t;
-#define AOM_QM_BITS 6
+#define AOM_QM_BITS 5
#endif
#if CONFIG_HIGHBITDEPTH
// Note:
@@ -87,11 +78,14 @@ static INLINE int clamp(int value, int low, int high) {
return value < low ? low : (value > high ? high : value);
}
+static INLINE int64_t clamp64(int64_t value, int64_t low, int64_t high) {
+ return value < low ? low : (value > high ? high : value);
+}
+
static INLINE double fclamp(double value, double low, double high) {
return value < low ? low : (value > high ? high : value);
}
-#if CONFIG_HIGHBITDEPTH
static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
switch (bd) {
case 8:
@@ -100,7 +94,6 @@ static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
case 12: return (uint16_t)clamp(val, 0, 4095);
}
}
-#endif // CONFIG_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
diff --git a/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl b/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
index 8047cbc09..0c0356870 100755
--- a/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/third_party/aom/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -40,12 +40,26 @@ foreach $w (@block_widths) {
push @block_sizes, [$w, $h] if ($w <= 2*$h && $h <= 2*$w) ;
}
}
+if (aom_config("CONFIG_EXT_PARTITION_TYPES")) {
+ push @block_sizes, [4, 16];
+ push @block_sizes, [16, 4];
+ push @block_sizes, [8, 32];
+ push @block_sizes, [32, 8];
+}
@tx_dims = (2, 4, 8, 16, 32);
if (aom_config("CONFIG_TX64X64") eq "yes") {
push @tx_dims, '64';
}
+@tx_sizes = ();
+foreach $w (@tx_dims) {
+ push @tx_sizes, [$w, $w];
+ foreach $h (@tx_dims) {
+ push @tx_sizes, [$w, $h] if ($w >=4 && $h >=4 && ($w == 2*$h || $h == 2*$w));
+ }
+}
+
@pred_names = qw/dc dc_top dc_left dc_128 v h d207e d63e d45e d117 d135 d153/;
if (aom_config("CONFIG_ALT_INTRA") eq "yes") {
push @pred_names, qw/paeth smooth/;
@@ -60,9 +74,8 @@ if (aom_config("CONFIG_ALT_INTRA") eq "yes") {
# Intra prediction
#
-foreach $dim (@tx_dims) {
- $w = ${dim};
- $h = ${dim};
+foreach (@tx_sizes) {
+ ($w, $h) = @$_;
foreach $pred_name (@pred_names) {
add_proto "void", "aom_${pred_name}_predictor_${w}x${h}",
"uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
@@ -142,20 +155,27 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
#
# Sub Pixel Filters
#
-add_proto qw/void aom_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_scaled_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_scaled_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_scaled_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_scaled_avg_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_scaled_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-add_proto qw/void aom_scaled_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_2d/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_scaled_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
+
+add_proto qw/void aom_convolve8_horiz_scale/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int subpel_x, int x_step_q4, const int16_t *filter_y, int subpel_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_vert_scale/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int subpel_x, int x_step_q4, const int16_t *filter_y, int subpel_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_horiz_scale/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int subpel_x, int x_step_q4, const int16_t *filter_y, int subpel_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_vert_scale/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int subpel_x, int x_step_q4, const int16_t *filter_y, int subpel_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_scale/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int subpel_x, int x_step_q4, const int16_t *filter_y, int subpel_y, int y_step_q4, int w, int h";
+add_proto qw/void aom_convolve8_avg_scale/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int subpel_x, int x_step_q4, const int16_t *filter_y, int subpel_y, int y_step_q4, int w, int h";
specialize qw/aom_convolve_copy sse2 /;
specialize qw/aom_convolve_avg sse2 /;
@@ -334,24 +354,15 @@ if ((aom_config("CONFIG_AV1_ENCODER") eq "yes") || (aom_config("CONFIG_PVQ") eq
add_proto qw/void aom_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct8x8 sse2/, "$ssse3_x86_64";
- add_proto qw/void aom_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/aom_fdct8x8_1 sse2/;
-
add_proto qw/void aom_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct16x16 sse2/;
- add_proto qw/void aom_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/aom_fdct16x16_1 sse2 avx2/;
-
add_proto qw/void aom_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct32x32 sse2 avx2/;
add_proto qw/void aom_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct32x32_rd sse2 avx2/;
- add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/aom_fdct32x32_1 sse2 avx2/;
-
# High bit depth
add_proto qw/void aom_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_highbd_fdct4x4 sse2/;
@@ -359,20 +370,15 @@ if ((aom_config("CONFIG_AV1_ENCODER") eq "yes") || (aom_config("CONFIG_PVQ") eq
add_proto qw/void aom_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_highbd_fdct8x8 sse2/;
- add_proto qw/void aom_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-
add_proto qw/void aom_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_highbd_fdct16x16 sse2/;
- add_proto qw/void aom_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-
add_proto qw/void aom_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_highbd_fdct32x32 sse2/;
add_proto qw/void aom_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_highbd_fdct32x32_rd sse2/;
- add_proto qw/void aom_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
} else {
add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct4x4 sse2 msa/;
@@ -383,47 +389,25 @@ if ((aom_config("CONFIG_AV1_ENCODER") eq "yes") || (aom_config("CONFIG_PVQ") eq
add_proto qw/void aom_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
- add_proto qw/void aom_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/aom_fdct8x8_1 sse2 neon msa/;
-
add_proto qw/void aom_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct16x16 sse2 msa/;
- add_proto qw/void aom_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/aom_fdct16x16_1 sse2 avx2 msa/;
-
add_proto qw/void aom_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct32x32 sse2 avx2 msa/;
add_proto qw/void aom_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/aom_fdct32x32_rd sse2 avx2 msa/;
-
- add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
- specialize qw/aom_fdct32x32_1 sse2 avx2 msa/;
} # CONFIG_HIGHBITDEPTH
} # CONFIG_AV1_ENCODER
#
# Inverse transform
if (aom_config("CONFIG_AV1") eq "yes") {
-if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
specialize qw/aom_iwht4x4_16_add sse2/;
- add_proto qw/void aom_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-
- add_proto qw/void aom_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-
- add_proto qw/void aom_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-
- add_proto qw/void aom_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-
- add_proto qw/void aom_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-
- add_proto qw/void aom_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-
add_proto qw/void aom_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
add_proto qw/void aom_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
@@ -469,10 +453,8 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
specialize qw/aom_idct32x32_1_add sse2 avx2/;
-
- add_proto qw/void aom_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
- specialize qw/aom_highbd_idct4x4_16_add sse2/;
}
+if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
} else {
{
add_proto qw/void aom_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
@@ -541,13 +523,12 @@ if (aom_config("CONFIG_AOM_QM") eq "yes") {
add_proto qw/void aom_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+ add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+ add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+
+ add_proto qw/void aom_highbd_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- add_proto qw/void aom_highbd_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
- } # CONFIG_HIGHBITDEPTH
} # CONFIG_AV1_ENCODER
} else {
if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
@@ -559,21 +540,23 @@ if (aom_config("CONFIG_AOM_QM") eq "yes") {
add_proto qw/void aom_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/aom_highbd_quantize_b sse2/;
+ add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/aom_highbd_quantize_b sse2 avx2/;
- add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/aom_highbd_quantize_b_32x32 sse2/;
+ add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/aom_highbd_quantize_b_32x32 sse2/;
+
+ add_proto qw/void aom_highbd_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- add_proto qw/void aom_highbd_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- } # CONFIG_HIGHBITDEPTH
} # CONFIG_AV1_ENCODER
} # CONFIG_AOM_QM
if (aom_config("CONFIG_AV1") eq "yes") {
#
# Alpha blending with mask
#
+ if (aom_config("CONFIG_CONVOLVE_ROUND") eq "yes") {
+ add_proto qw/void aom_blend_a64_d32_mask/, "int32_t *dst, uint32_t dst_stride, const int32_t *src0, uint32_t src0_stride, const int32_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx";
+ }
add_proto qw/void aom_blend_a64_mask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, uint32_t mask_stride, int h, int w, int suby, int subx";
add_proto qw/void aom_blend_a64_hmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
add_proto qw/void aom_blend_a64_vmask/, "uint8_t *dst, uint32_t dst_stride, const uint8_t *src0, uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride, const uint8_t *mask, int h, int w";
@@ -927,15 +910,15 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
#
# ...
#
-add_proto qw/void aom_upsampled_pred/, "uint8_t *comp_pred, int width, int height, const uint8_t *ref, int ref_stride";
+add_proto qw/void aom_upsampled_pred/, "uint8_t *comp_pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref, int ref_stride";
specialize qw/aom_upsampled_pred sse2/;
-add_proto qw/void aom_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
+add_proto qw/void aom_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref, int ref_stride";
specialize qw/aom_comp_avg_upsampled_pred sse2/;
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
- add_proto qw/void aom_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, const uint8_t *ref8, int ref_stride";
+ add_proto qw/void aom_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref8, int ref_stride, int bd";
specialize qw/aom_highbd_upsampled_pred sse2/;
- add_proto qw/void aom_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
+ add_proto qw/void aom_highbd_comp_avg_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref8, int ref_stride, int bd";
specialize qw/aom_highbd_comp_avg_upsampled_pred sse2/;
}
@@ -1005,6 +988,22 @@ specialize qw/aom_sub_pixel_avg_variance8x8 msa sse2 ssse3/;
specialize qw/aom_sub_pixel_avg_variance8x4 msa sse2 ssse3/;
specialize qw/aom_sub_pixel_avg_variance4x8 msa sse2 ssse3/;
specialize qw/aom_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
+
+if (aom_config("CONFIG_EXT_PARTITION_TYPES")) {
+ specialize qw/aom_variance4x16 sse2/;
+ specialize qw/aom_variance16x4 sse2/;
+ specialize qw/aom_variance8x32 sse2/;
+ specialize qw/aom_variance32x8 sse2/;
+ specialize qw/aom_sub_pixel_variance4x16 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_variance16x4 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_variance8x32 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_variance32x8 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance4x16 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance16x4 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance8x32 sse2 ssse3/;
+ specialize qw/aom_sub_pixel_avg_variance32x8 sse2 ssse3/;
+}
+
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach $bd (8, 10, 12) {
add_proto qw/unsigned int/, "aom_highbd_${bd}_variance2x2", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
@@ -1021,6 +1020,8 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
if ($w != 128 && $h != 128 && $w != 4 && $h != 4) {
specialize "aom_highbd_${bd}_variance${w}x${h}", "sse2";
}
+ # TODO(david.barker): When ext-partition-types is enabled, we currenly
+ # don't have vectorized 4x16 highbd variance functions
if ($w == 4 && $h == 4) {
specialize "aom_highbd_${bd}_variance${w}x${h}", "sse4_1";
}
@@ -1496,10 +1497,10 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
if (aom_config("CONFIG_EXT_INTER") eq "yes") {
add_proto qw/void aom_comp_mask_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
- add_proto qw/void aom_comp_mask_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
+ add_proto qw/void aom_comp_mask_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_comp_mask_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
- add_proto qw/void aom_highbd_comp_mask_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
+ add_proto qw/void aom_highbd_comp_mask_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask, int bd";
}
}
diff --git a/third_party/aom/aom_dsp/aom_filter.h b/third_party/aom/aom_dsp/aom_filter.h
index 04d113dd3..58e8bb284 100644
--- a/third_party/aom/aom_dsp/aom_filter.h
+++ b/third_party/aom/aom_dsp/aom_filter.h
@@ -25,6 +25,12 @@ extern "C" {
#define SUBPEL_SHIFTS (1 << SUBPEL_BITS)
#define SUBPEL_TAPS 8
+#define SCALE_SUBPEL_BITS 10
+#define SCALE_SUBPEL_SHIFTS (1 << SCALE_SUBPEL_BITS)
+#define SCALE_SUBPEL_MASK (SCALE_SUBPEL_SHIFTS - 1)
+#define SCALE_EXTRA_BITS (SCALE_SUBPEL_BITS - SUBPEL_BITS)
+#define SCALE_EXTRA_OFF ((1 << SCALE_EXTRA_BITS) / 2)
+
typedef int16_t InterpKernel[SUBPEL_TAPS];
#define BIL_SUBPEL_BITS 3
diff --git a/third_party/aom/aom_dsp/bitreader.h b/third_party/aom/aom_dsp/bitreader.h
index 5bad70cb3..88bedccc2 100644
--- a/third_party/aom/aom_dsp/bitreader.h
+++ b/third_party/aom/aom_dsp/bitreader.h
@@ -194,9 +194,7 @@ static INLINE int aom_read_symbol_(aom_reader *r, aom_cdf_prob *cdf,
int nsymbs ACCT_STR_PARAM) {
int ret;
ret = aom_read_cdf(r, cdf, nsymbs, ACCT_STR_NAME);
-#if CONFIG_EC_ADAPT
update_cdf(cdf, ret, nsymbs);
-#endif
return ret;
}
diff --git a/third_party/aom/aom_dsp/bitwriter.h b/third_party/aom/aom_dsp/bitwriter.h
index 588e47bf3..68bc1c8f8 100644
--- a/third_party/aom/aom_dsp/bitwriter.h
+++ b/third_party/aom/aom_dsp/bitwriter.h
@@ -140,9 +140,7 @@ static INLINE void aom_write_cdf(aom_writer *w, int symb,
static INLINE void aom_write_symbol(aom_writer *w, int symb, aom_cdf_prob *cdf,
int nsymbs) {
aom_write_cdf(w, symb, cdf, nsymbs);
-#if CONFIG_EC_ADAPT
update_cdf(cdf, symb, nsymbs);
-#endif
}
static INLINE void aom_write_tree_as_cdf(aom_writer *w,
diff --git a/third_party/aom/aom_dsp/blend_a64_mask.c b/third_party/aom/aom_dsp/blend_a64_mask.c
index 3e15542c9..c35fa19f8 100644
--- a/third_party/aom/aom_dsp/blend_a64_mask.c
+++ b/third_party/aom/aom_dsp/blend_a64_mask.c
@@ -18,6 +18,69 @@
#include "./aom_dsp_rtcd.h"
+#if CONFIG_CONVOLVE_ROUND
+// Blending with alpha mask. Mask values come from the range [0, 64],
+// as described for AOM_BLEND_A64 in aom_dsp/blend.h. src0 or src1 can
+// be the same as dst, or dst can be different from both sources.
+
+void aom_blend_a64_d32_mask_c(int32_t *dst, uint32_t dst_stride,
+ const int32_t *src0, uint32_t src0_stride,
+ const int32_t *src1, uint32_t src1_stride,
+ const uint8_t *mask, uint32_t mask_stride, int h,
+ int w, int subh, int subw) {
+ int i, j;
+
+ assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
+ assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
+
+ assert(h >= 1);
+ assert(w >= 1);
+ assert(IS_POWER_OF_TWO(h));
+ assert(IS_POWER_OF_TWO(w));
+
+ if (subw == 0 && subh == 0) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; ++j) {
+ const int m = mask[i * mask_stride + j];
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
+ src1[i * src1_stride + j]);
+ }
+ }
+ } else if (subw == 1 && subh == 1) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; ++j) {
+ const int m = ROUND_POWER_OF_TWO(
+ mask[(2 * i) * mask_stride + (2 * j)] +
+ mask[(2 * i + 1) * mask_stride + (2 * j)] +
+ mask[(2 * i) * mask_stride + (2 * j + 1)] +
+ mask[(2 * i + 1) * mask_stride + (2 * j + 1)],
+ 2);
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
+ src1[i * src1_stride + j]);
+ }
+ }
+ } else if (subw == 1 && subh == 0) {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; ++j) {
+ const int m = AOM_BLEND_AVG(mask[i * mask_stride + (2 * j)],
+ mask[i * mask_stride + (2 * j + 1)]);
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
+ src1[i * src1_stride + j]);
+ }
+ }
+ } else {
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; ++j) {
+ const int m = AOM_BLEND_AVG(mask[(2 * i) * mask_stride + j],
+ mask[(2 * i + 1) * mask_stride + j]);
+ dst[i * dst_stride + j] = AOM_BLEND_A64(m, src0[i * src0_stride + j],
+ src1[i * src1_stride + j]);
+ }
+ }
+ }
+}
+#endif // CONFIG_CONVOLVE_ROUND
+
// Blending with alpha mask. Mask values come from the range [0, 64],
// as described for AOM_BLEND_A64 in aom_dsp/blend.h. src0 or src1 can
// be the same as dst, or dst can be different from both sources.
diff --git a/third_party/aom/aom_dsp/fwd_txfm.c b/third_party/aom/aom_dsp/fwd_txfm.c
index 12ee02ba1..1ceef7782 100644
--- a/third_party/aom/aom_dsp/fwd_txfm.c
+++ b/third_party/aom/aom_dsp/fwd_txfm.c
@@ -172,15 +172,6 @@ void aom_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
}
}
-void aom_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
- int r, c;
- tran_low_t sum = 0;
- for (r = 0; r < 8; ++r)
- for (c = 0; c < 8; ++c) sum += input[r * stride + c];
-
- output[0] = sum;
-}
-
void aom_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
@@ -361,15 +352,6 @@ void aom_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
}
}
-void aom_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
- int r, c;
- int sum = 0;
- for (r = 0; r < 16; ++r)
- for (c = 0; c < 16; ++c) sum += input[r * stride + c];
-
- output[0] = (tran_low_t)(sum >> 1);
-}
-
static INLINE tran_high_t dct_32_round(tran_high_t input) {
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
// TODO(debargha, peter.derivaz): Find new bounds for this assert,
@@ -758,15 +740,6 @@ void aom_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
}
}
-void aom_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
- int r, c;
- int sum = 0;
- for (r = 0; r < 32; ++r)
- for (c = 0; c < 32; ++c) sum += input[r * stride + c];
-
- output[0] = (tran_low_t)(sum >> 3);
-}
-
#if CONFIG_HIGHBITDEPTH
void aom_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
int stride) {
@@ -778,32 +751,17 @@ void aom_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
aom_fdct8x8_c(input, final_output, stride);
}
-void aom_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
- int stride) {
- aom_fdct8x8_1_c(input, final_output, stride);
-}
-
void aom_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
int stride) {
aom_fdct16x16_c(input, output, stride);
}
-void aom_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
- int stride) {
- aom_fdct16x16_1_c(input, output, stride);
-}
-
void aom_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
aom_fdct32x32_c(input, out, stride);
}
-
void aom_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
int stride) {
aom_fdct32x32_rd_c(input, out, stride);
}
-void aom_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
- int stride) {
- aom_fdct32x32_1_c(input, out, stride);
-}
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/fwd_txfm.h b/third_party/aom/aom_dsp/fwd_txfm.h
index 579dbd06e..f4dc04ab4 100644
--- a/third_party/aom/aom_dsp/fwd_txfm.h
+++ b/third_party/aom/aom_dsp/fwd_txfm.h
@@ -20,10 +20,5 @@ static INLINE tran_high_t saturate_int16(tran_high_t value) {
return result < INT16_MIN ? INT16_MIN : result;
}
-static INLINE tran_high_t fdct_round_shift(tran_high_t input) {
- tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
- return rv;
-}
-
void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round);
#endif // AOM_DSP_FWD_TXFM_H_
diff --git a/third_party/aom/aom_dsp/intrapred.c b/third_party/aom/aom_dsp/intrapred.c
index 370d0374b..b4d47ae89 100644
--- a/third_party/aom/aom_dsp/intrapred.c
+++ b/third_party/aom/aom_dsp/intrapred.c
@@ -23,13 +23,14 @@
#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
#define AVG2(a, b) (((a) + (b) + 1) >> 1)
-static INLINE void d207e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void d207e_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r, c;
(void)above;
- for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
dst[c] = c & 1 ? AVG3(left[(c >> 1) + r], left[(c >> 1) + r + 1],
left[(c >> 1) + r + 2])
: AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]);
@@ -38,12 +39,13 @@ static INLINE void d207e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
}
}
-static INLINE void d63e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void d63e_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r, c;
(void)left;
- for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1],
above[(r >> 1) + c + 2])
: AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
@@ -52,46 +54,49 @@ static INLINE void d63e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
}
}
-static INLINE void d45e_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void d45e_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r, c;
(void)left;
- for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
dst[c] = AVG3(above[r + c], above[r + c + 1],
- above[r + c + 1 + (r + c + 2 < bs * 2)]);
+ above[r + c + 1 + (r + c + 2 < bw + bh)]);
}
dst += stride;
}
}
-static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r, c;
// first row
- for (c = 0; c < bs; c++) dst[c] = AVG2(above[c - 1], above[c]);
+ for (c = 0; c < bw; c++) dst[c] = AVG2(above[c - 1], above[c]);
dst += stride;
// second row
dst[0] = AVG3(left[0], above[-1], above[0]);
- for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+ for (c = 1; c < bw; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
dst += stride;
// the rest of first col
dst[0] = AVG3(above[-1], left[0], left[1]);
- for (r = 3; r < bs; ++r)
+ for (r = 3; r < bh; ++r)
dst[(r - 2) * stride] = AVG3(left[r - 3], left[r - 2], left[r - 1]);
// the rest of the block
- for (r = 2; r < bs; ++r) {
- for (c = 1; c < bs; c++) dst[c] = dst[-2 * stride + c - 1];
+ for (r = 2; r < bh; ++r) {
+ for (c = 1; c < bw; c++) dst[c] = dst[-2 * stride + c - 1];
dst += stride;
}
}
-static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int i;
#if CONFIG_TX64X64
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ > 7
@@ -111,64 +116,65 @@ static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
#endif
#endif // CONFIG_TX64X64
- // dst(bs, bs - 2)[0], i.e., border starting at bottom-left
- for (i = 0; i < bs - 2; ++i) {
- border[i] = AVG3(left[bs - 3 - i], left[bs - 2 - i], left[bs - 1 - i]);
+ // dst(bh, bh - 2)[0], i.e., border starting at bottom-left
+ for (i = 0; i < bh - 2; ++i) {
+ border[i] = AVG3(left[bh - 3 - i], left[bh - 2 - i], left[bh - 1 - i]);
}
- border[bs - 2] = AVG3(above[-1], left[0], left[1]);
- border[bs - 1] = AVG3(left[0], above[-1], above[0]);
- border[bs - 0] = AVG3(above[-1], above[0], above[1]);
+ border[bh - 2] = AVG3(above[-1], left[0], left[1]);
+ border[bh - 1] = AVG3(left[0], above[-1], above[0]);
+ border[bh - 0] = AVG3(above[-1], above[0], above[1]);
// dst[0][2, size), i.e., remaining top border ascending
- for (i = 0; i < bs - 2; ++i) {
- border[bs + 1 + i] = AVG3(above[i], above[i + 1], above[i + 2]);
+ for (i = 0; i < bw - 2; ++i) {
+ border[bh + 1 + i] = AVG3(above[i], above[i + 1], above[i + 2]);
}
- for (i = 0; i < bs; ++i) {
- memcpy(dst + i * stride, border + bs - 1 - i, bs);
+ for (i = 0; i < bh; ++i) {
+ memcpy(dst + i * stride, border + bh - 1 - i, bw);
}
}
-static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r, c;
dst[0] = AVG2(above[-1], left[0]);
- for (r = 1; r < bs; r++) dst[r * stride] = AVG2(left[r - 1], left[r]);
+ for (r = 1; r < bh; r++) dst[r * stride] = AVG2(left[r - 1], left[r]);
dst++;
dst[0] = AVG3(left[0], above[-1], above[0]);
dst[stride] = AVG3(above[-1], left[0], left[1]);
- for (r = 2; r < bs; r++)
+ for (r = 2; r < bh; r++)
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
dst++;
- for (c = 0; c < bs - 2; c++)
+ for (c = 0; c < bw - 2; c++)
dst[c] = AVG3(above[c - 1], above[c], above[c + 1]);
dst += stride;
- for (r = 1; r < bs; ++r) {
- for (c = 0; c < bs - 2; c++) dst[c] = dst[-stride + c - 2];
+ for (r = 1; r < bh; ++r) {
+ for (c = 0; c < bw - 2; c++) dst[c] = dst[-stride + c - 2];
dst += stride;
}
}
-static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
const uint8_t *above, const uint8_t *left) {
int r;
(void)left;
- for (r = 0; r < bs; r++) {
- memcpy(dst, above, bs);
+ for (r = 0; r < bh; r++) {
+ memcpy(dst, above, bw);
dst += stride;
}
}
-static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
const uint8_t *above, const uint8_t *left) {
int r;
(void)above;
- for (r = 0; r < bs; r++) {
- memset(dst, left[r], bs);
+ for (r = 0; r < bh; r++) {
+ memset(dst, left[r], bw);
dst += stride;
}
}
@@ -189,13 +195,14 @@ static INLINE uint16_t paeth_predictor_single(uint16_t left, uint16_t top,
: (p_top <= p_top_left) ? top : top_left;
}
-static INLINE void paeth_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void paeth_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r, c;
const uint8_t ytop_left = above[-1];
- for (r = 0; r < bs; r++) {
- for (c = 0; c < bs; c++)
+ for (r = 0; r < bh; r++) {
+ for (c = 0; c < bw; c++)
dst[c] = (uint8_t)paeth_predictor_single(left[r], above[c], ytop_left);
dst += stride;
}
@@ -236,32 +243,38 @@ static const uint8_t sm_weight_arrays[2 * MAX_BLOCK_DIM] = {
};
// Some basic checks on weights for smooth predictor.
-#define sm_weights_sanity_checks(weights, weights_scale, pred_scale) \
- assert(weights[0] < weights_scale); \
- assert(weights_scale - weights[bs - 1] < weights_scale); \
+#define sm_weights_sanity_checks(weights_w, weights_h, weights_scale, \
+ pred_scale) \
+ assert(weights_w[0] < weights_scale); \
+ assert(weights_h[0] < weights_scale); \
+ assert(weights_scale - weights_w[bw - 1] < weights_scale); \
+ assert(weights_scale - weights_h[bh - 1] < weights_scale); \
assert(pred_scale < 31) // ensures no overflow when calculating predictor.
#define divide_round(value, bits) (((value) + (1 << ((bits)-1))) >> (bits))
-static INLINE void smooth_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
- const uint8_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
- const uint8_t right_pred = above[bs - 1]; // estimated by top-right pixel
- const uint8_t *const sm_weights = sm_weight_arrays + bs;
+static INLINE void smooth_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
+ const uint8_t below_pred = left[bh - 1]; // estimated by bottom-left pixel
+ const uint8_t right_pred = above[bw - 1]; // estimated by top-right pixel
+ const uint8_t *const sm_weights_w = sm_weight_arrays + bw;
+ const uint8_t *const sm_weights_h = sm_weight_arrays + bh;
// scale = 2 * 2^sm_weight_log2_scale
const int log2_scale = 1 + sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
- sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+ sm_weights_sanity_checks(sm_weights_w, sm_weights_h, scale,
+ log2_scale + sizeof(*dst));
int r;
- for (r = 0; r < bs; ++r) {
+ for (r = 0; r < bh; ++r) {
int c;
- for (c = 0; c < bs; ++c) {
+ for (c = 0; c < bw; ++c) {
const uint8_t pixels[] = { above[c], below_pred, left[r], right_pred };
- const uint8_t weights[] = { sm_weights[r], scale - sm_weights[r],
- sm_weights[c], scale - sm_weights[c] };
+ const uint8_t weights[] = { sm_weights_h[r], scale - sm_weights_h[r],
+ sm_weights_w[c], scale - sm_weights_w[c] };
uint32_t this_pred = 0;
int i;
- assert(scale >= sm_weights[r] && scale >= sm_weights[c]);
+ assert(scale >= sm_weights_h[r] && scale >= sm_weights_w[c]);
for (i = 0; i < 4; ++i) {
this_pred += weights[i] * pixels[i];
}
@@ -272,20 +285,21 @@ static INLINE void smooth_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
}
#if CONFIG_SMOOTH_HV
-static INLINE void smooth_v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
+static INLINE void smooth_v_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
const uint8_t *left) {
- const uint8_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
- const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ const uint8_t below_pred = left[bh - 1]; // estimated by bottom-left pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bh;
// scale = 2^sm_weight_log2_scale
const int log2_scale = sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
- sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+ sm_weights_sanity_checks(sm_weights, sm_weights, scale,
+ log2_scale + sizeof(*dst));
int r;
- for (r = 0; r < bs; r++) {
+ for (r = 0; r < bh; r++) {
int c;
- for (c = 0; c < bs; ++c) {
+ for (c = 0; c < bw; ++c) {
const uint8_t pixels[] = { above[c], below_pred };
const uint8_t weights[] = { sm_weights[r], scale - sm_weights[r] };
uint32_t this_pred = 0;
@@ -300,20 +314,21 @@ static INLINE void smooth_v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
}
}
-static INLINE void smooth_h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
+static INLINE void smooth_h_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
const uint8_t *left) {
- const uint8_t right_pred = above[bs - 1]; // estimated by top-right pixel
- const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ const uint8_t right_pred = above[bw - 1]; // estimated by top-right pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bw;
// scale = 2^sm_weight_log2_scale
const int log2_scale = sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
- sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+ sm_weights_sanity_checks(sm_weights, sm_weights, scale,
+ log2_scale + sizeof(*dst));
int r;
- for (r = 0; r < bs; r++) {
+ for (r = 0; r < bh; r++) {
int c;
- for (c = 0; c < bs; ++c) {
+ for (c = 0; c < bw; ++c) {
const uint8_t pixels[] = { left[r], right_pred };
const uint8_t weights[] = { sm_weights[c], scale - sm_weights[c] };
uint32_t this_pred = 0;
@@ -331,74 +346,78 @@ static INLINE void smooth_h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
#else
-static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
const uint8_t *above, const uint8_t *left) {
int r, c;
int ytop_left = above[-1];
- for (r = 0; r < bs; r++) {
- for (c = 0; c < bs; c++)
+ for (r = 0; r < bh; r++) {
+ for (c = 0; c < bw; c++)
dst[c] = clip_pixel(left[r] + above[c] - ytop_left);
dst += stride;
}
}
#endif // CONFIG_ALT_INTRA
-static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int r;
(void)above;
(void)left;
- for (r = 0; r < bs; r++) {
- memset(dst, 128, bs);
+ for (r = 0; r < bh; r++) {
+ memset(dst, 128, bw);
dst += stride;
}
}
-static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above,
+static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
const uint8_t *left) {
int i, r, expected_dc, sum = 0;
(void)above;
- for (i = 0; i < bs; i++) sum += left[i];
- expected_dc = (sum + (bs >> 1)) / bs;
+ for (i = 0; i < bh; i++) sum += left[i];
+ expected_dc = (sum + (bh >> 1)) / bh;
- for (r = 0; r < bs; r++) {
- memset(dst, expected_dc, bs);
+ for (r = 0; r < bh; r++) {
+ memset(dst, expected_dc, bw);
dst += stride;
}
}
-static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
- const uint8_t *above, const uint8_t *left) {
+static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint8_t *above,
+ const uint8_t *left) {
int i, r, expected_dc, sum = 0;
(void)left;
- for (i = 0; i < bs; i++) sum += above[i];
- expected_dc = (sum + (bs >> 1)) / bs;
+ for (i = 0; i < bw; i++) sum += above[i];
+ expected_dc = (sum + (bw >> 1)) / bw;
- for (r = 0; r < bs; r++) {
- memset(dst, expected_dc, bs);
+ for (r = 0; r < bh; r++) {
+ memset(dst, expected_dc, bw);
dst += stride;
}
}
-static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
+static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
const uint8_t *above, const uint8_t *left) {
int i, r, expected_dc, sum = 0;
- const int count = 2 * bs;
+ const int count = bw + bh;
- for (i = 0; i < bs; i++) {
+ for (i = 0; i < bw; i++) {
sum += above[i];
+ }
+ for (i = 0; i < bh; i++) {
sum += left[i];
}
expected_dc = (sum + (count >> 1)) / count;
- for (r = 0; r < bs; r++) {
- memset(dst, expected_dc, bs);
+ for (r = 0; r < bh; r++) {
+ memset(dst, expected_dc, bw);
dst += stride;
}
}
@@ -546,14 +565,14 @@ void aom_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
#if CONFIG_HIGHBITDEPTH
static INLINE void highbd_d207e_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
(void)above;
(void)bd;
- for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
dst[c] = c & 1 ? AVG3(left[(c >> 1) + r], left[(c >> 1) + r + 1],
left[(c >> 1) + r + 2])
: AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]);
@@ -563,13 +582,13 @@ static INLINE void highbd_d207e_predictor(uint16_t *dst, ptrdiff_t stride,
}
static INLINE void highbd_d63e_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
(void)left;
(void)bd;
- for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1],
above[(r >> 1) + c + 2])
: AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]);
@@ -579,111 +598,111 @@ static INLINE void highbd_d63e_predictor(uint16_t *dst, ptrdiff_t stride,
}
static INLINE void highbd_d45e_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
(void)left;
(void)bd;
- for (r = 0; r < bs; ++r) {
- for (c = 0; c < bs; ++c) {
+ for (r = 0; r < bh; ++r) {
+ for (c = 0; c < bw; ++c) {
dst[c] = AVG3(above[r + c], above[r + c + 1],
- above[r + c + 1 + (r + c + 2 < bs * 2)]);
+ above[r + c + 1 + (r + c + 2 < bw + bh)]);
}
dst += stride;
}
}
static INLINE void highbd_d117_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
(void)bd;
// first row
- for (c = 0; c < bs; c++) dst[c] = AVG2(above[c - 1], above[c]);
+ for (c = 0; c < bw; c++) dst[c] = AVG2(above[c - 1], above[c]);
dst += stride;
// second row
dst[0] = AVG3(left[0], above[-1], above[0]);
- for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+ for (c = 1; c < bw; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
dst += stride;
// the rest of first col
dst[0] = AVG3(above[-1], left[0], left[1]);
- for (r = 3; r < bs; ++r)
+ for (r = 3; r < bh; ++r)
dst[(r - 2) * stride] = AVG3(left[r - 3], left[r - 2], left[r - 1]);
// the rest of the block
- for (r = 2; r < bs; ++r) {
- for (c = 1; c < bs; c++) dst[c] = dst[-2 * stride + c - 1];
+ for (r = 2; r < bh; ++r) {
+ for (c = 1; c < bw; c++) dst[c] = dst[-2 * stride + c - 1];
dst += stride;
}
}
static INLINE void highbd_d135_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
(void)bd;
dst[0] = AVG3(left[0], above[-1], above[0]);
- for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
+ for (c = 1; c < bw; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]);
dst[stride] = AVG3(above[-1], left[0], left[1]);
- for (r = 2; r < bs; ++r)
+ for (r = 2; r < bh; ++r)
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
dst += stride;
- for (r = 1; r < bs; ++r) {
- for (c = 1; c < bs; c++) dst[c] = dst[-stride + c - 1];
+ for (r = 1; r < bh; ++r) {
+ for (c = 1; c < bw; c++) dst[c] = dst[-stride + c - 1];
dst += stride;
}
}
static INLINE void highbd_d153_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
(void)bd;
dst[0] = AVG2(above[-1], left[0]);
- for (r = 1; r < bs; r++) dst[r * stride] = AVG2(left[r - 1], left[r]);
+ for (r = 1; r < bh; r++) dst[r * stride] = AVG2(left[r - 1], left[r]);
dst++;
dst[0] = AVG3(left[0], above[-1], above[0]);
dst[stride] = AVG3(above[-1], left[0], left[1]);
- for (r = 2; r < bs; r++)
+ for (r = 2; r < bh; r++)
dst[r * stride] = AVG3(left[r - 2], left[r - 1], left[r]);
dst++;
- for (c = 0; c < bs - 2; c++)
+ for (c = 0; c < bw - 2; c++)
dst[c] = AVG3(above[c - 1], above[c], above[c + 1]);
dst += stride;
- for (r = 1; r < bs; ++r) {
- for (c = 0; c < bs - 2; c++) dst[c] = dst[-stride + c - 2];
+ for (r = 1; r < bh; ++r) {
+ for (c = 0; c < bw - 2; c++) dst[c] = dst[-stride + c - 2];
dst += stride;
}
}
-static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
+static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r;
(void)left;
(void)bd;
- for (r = 0; r < bs; r++) {
- memcpy(dst, above, bs * sizeof(uint16_t));
+ for (r = 0; r < bh; r++) {
+ memcpy(dst, above, bw * sizeof(uint16_t));
dst += stride;
}
}
-static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
+static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r;
(void)above;
(void)bd;
- for (r = 0; r < bs; r++) {
- aom_memset16(dst, left[r], bs);
+ for (r = 0; r < bh; r++) {
+ aom_memset16(dst, left[r], bw);
dst += stride;
}
}
@@ -777,39 +796,42 @@ void aom_highbd_d153_predictor_2x2_c(uint16_t *dst, ptrdiff_t stride,
#if CONFIG_ALT_INTRA
static INLINE void highbd_paeth_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
const uint16_t ytop_left = above[-1];
(void)bd;
- for (r = 0; r < bs; r++) {
- for (c = 0; c < bs; c++)
+ for (r = 0; r < bh; r++) {
+ for (c = 0; c < bw; c++)
dst[c] = paeth_predictor_single(left[r], above[c], ytop_left);
dst += stride;
}
}
static INLINE void highbd_smooth_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh,
+ const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
- const uint16_t right_pred = above[bs - 1]; // estimated by top-right pixel
- const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ const uint16_t below_pred = left[bh - 1]; // estimated by bottom-left pixel
+ const uint16_t right_pred = above[bw - 1]; // estimated by top-right pixel
+ const uint8_t *const sm_weights_w = sm_weight_arrays + bw;
+ const uint8_t *const sm_weights_h = sm_weight_arrays + bh;
// scale = 2 * 2^sm_weight_log2_scale
const int log2_scale = 1 + sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
- sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+ sm_weights_sanity_checks(sm_weights_w, sm_weights_h, scale,
+ log2_scale + sizeof(*dst));
int r;
- for (r = 0; r < bs; ++r) {
+ for (r = 0; r < bh; ++r) {
int c;
- for (c = 0; c < bs; ++c) {
+ for (c = 0; c < bw; ++c) {
const uint16_t pixels[] = { above[c], below_pred, left[r], right_pred };
- const uint8_t weights[] = { sm_weights[r], scale - sm_weights[r],
- sm_weights[c], scale - sm_weights[c] };
+ const uint8_t weights[] = { sm_weights_h[r], scale - sm_weights_h[r],
+ sm_weights_w[c], scale - sm_weights_w[c] };
uint32_t this_pred = 0;
int i;
- assert(scale >= sm_weights[r] && scale >= sm_weights[c]);
+ assert(scale >= sm_weights_h[r] && scale >= sm_weights_w[c]);
for (i = 0; i < 4; ++i) {
this_pred += weights[i] * pixels[i];
}
@@ -821,19 +843,21 @@ static INLINE void highbd_smooth_predictor(uint16_t *dst, ptrdiff_t stride,
#if CONFIG_SMOOTH_HV
static INLINE void highbd_smooth_v_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh,
+ const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16_t below_pred = left[bs - 1]; // estimated by bottom-left pixel
- const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ const uint16_t below_pred = left[bh - 1]; // estimated by bottom-left pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bh;
// scale = 2^sm_weight_log2_scale
const int log2_scale = sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
- sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+ sm_weights_sanity_checks(sm_weights, sm_weights, scale,
+ log2_scale + sizeof(*dst));
int r;
- for (r = 0; r < bs; r++) {
+ for (r = 0; r < bh; r++) {
int c;
- for (c = 0; c < bs; ++c) {
+ for (c = 0; c < bw; ++c) {
const uint16_t pixels[] = { above[c], below_pred };
const uint8_t weights[] = { sm_weights[r], scale - sm_weights[r] };
uint32_t this_pred = 0;
@@ -849,19 +873,21 @@ static INLINE void highbd_smooth_v_predictor(uint16_t *dst, ptrdiff_t stride,
}
static INLINE void highbd_smooth_h_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh,
+ const uint16_t *above,
const uint16_t *left, int bd) {
- const uint16_t right_pred = above[bs - 1]; // estimated by top-right pixel
- const uint8_t *const sm_weights = sm_weight_arrays + bs;
+ const uint16_t right_pred = above[bw - 1]; // estimated by top-right pixel
+ const uint8_t *const sm_weights = sm_weight_arrays + bw;
// scale = 2^sm_weight_log2_scale
const int log2_scale = sm_weight_log2_scale;
const uint16_t scale = (1 << sm_weight_log2_scale);
- sm_weights_sanity_checks(sm_weights, scale, log2_scale + sizeof(*dst));
+ sm_weights_sanity_checks(sm_weights, sm_weights, scale,
+ log2_scale + sizeof(*dst));
int r;
- for (r = 0; r < bs; r++) {
+ for (r = 0; r < bh; r++) {
int c;
- for (c = 0; c < bs; ++c) {
+ for (c = 0; c < bw; ++c) {
const uint16_t pixels[] = { left[r], right_pred };
const uint8_t weights[] = { sm_weights[c], scale - sm_weights[c] };
uint32_t this_pred = 0;
@@ -878,15 +904,15 @@ static INLINE void highbd_smooth_h_predictor(uint16_t *dst, ptrdiff_t stride,
#endif
#else
-static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
+static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int r, c;
int ytop_left = above[-1];
(void)bd;
- for (r = 0; r < bs; r++) {
- for (c = 0; c < bs; c++)
+ for (r = 0; r < bh; r++) {
+ for (c = 0; c < bw; c++)
dst[c] = clip_pixel_highbd(left[r] + above[c] - ytop_left, bd);
dst += stride;
}
@@ -894,66 +920,71 @@ static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
#endif // CONFIG_ALT_INTRA
static INLINE void highbd_dc_128_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh,
+ const uint16_t *above,
const uint16_t *left, int bd) {
int r;
(void)above;
(void)left;
- for (r = 0; r < bs; r++) {
- aom_memset16(dst, 128 << (bd - 8), bs);
+ for (r = 0; r < bh; r++) {
+ aom_memset16(dst, 128 << (bd - 8), bw);
dst += stride;
}
}
static INLINE void highbd_dc_left_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh,
+ const uint16_t *above,
const uint16_t *left, int bd) {
int i, r, expected_dc, sum = 0;
(void)above;
(void)bd;
- for (i = 0; i < bs; i++) sum += left[i];
- expected_dc = (sum + (bs >> 1)) / bs;
+ for (i = 0; i < bh; i++) sum += left[i];
+ expected_dc = (sum + (bh >> 1)) / bh;
- for (r = 0; r < bs; r++) {
- aom_memset16(dst, expected_dc, bs);
+ for (r = 0; r < bh; r++) {
+ aom_memset16(dst, expected_dc, bw);
dst += stride;
}
}
static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride,
- int bs, const uint16_t *above,
+ int bw, int bh,
+ const uint16_t *above,
const uint16_t *left, int bd) {
int i, r, expected_dc, sum = 0;
(void)left;
(void)bd;
- for (i = 0; i < bs; i++) sum += above[i];
- expected_dc = (sum + (bs >> 1)) / bs;
+ for (i = 0; i < bw; i++) sum += above[i];
+ expected_dc = (sum + (bw >> 1)) / bw;
- for (r = 0; r < bs; r++) {
- aom_memset16(dst, expected_dc, bs);
+ for (r = 0; r < bh; r++) {
+ aom_memset16(dst, expected_dc, bw);
dst += stride;
}
}
-static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
- const uint16_t *above,
+static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bw,
+ int bh, const uint16_t *above,
const uint16_t *left, int bd) {
int i, r, expected_dc, sum = 0;
- const int count = 2 * bs;
+ const int count = bw + bh;
(void)bd;
- for (i = 0; i < bs; i++) {
+ for (i = 0; i < bw; i++) {
sum += above[i];
+ }
+ for (i = 0; i < bh; i++) {
sum += left[i];
}
expected_dc = (sum + (count >> 1)) / count;
- for (r = 0; r < bs; r++) {
- aom_memset16(dst, expected_dc, bs);
+ for (r = 0; r < bh; r++) {
+ aom_memset16(dst, expected_dc, bw);
dst += stride;
}
}
@@ -962,99 +993,121 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
// This serves as a wrapper function, so that all the prediction functions
// can be unified and accessed as a pointer array. Note that the boundary
// above and left are not necessarily used all the time.
-#define intra_pred_sized(type, size) \
- void aom_##type##_predictor_##size##x##size##_c( \
- uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \
- const uint8_t *left) { \
- type##_predictor(dst, stride, size, above, left); \
+#define intra_pred_sized(type, width, height) \
+ void aom_##type##_predictor_##width##x##height##_c( \
+ uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \
+ const uint8_t *left) { \
+ type##_predictor(dst, stride, width, height, above, left); \
}
#if CONFIG_HIGHBITDEPTH
-#define intra_pred_highbd_sized(type, size) \
- void aom_highbd_##type##_predictor_##size##x##size##_c( \
- uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
- const uint16_t *left, int bd) { \
- highbd_##type##_predictor(dst, stride, size, above, left, bd); \
+#define intra_pred_highbd_sized(type, width, height) \
+ void aom_highbd_##type##_predictor_##width##x##height##_c( \
+ uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
+ const uint16_t *left, int bd) { \
+ highbd_##type##_predictor(dst, stride, width, height, above, left, bd); \
}
/* clang-format off */
#if CONFIG_TX64X64
-#define intra_pred_allsizes(type) \
- intra_pred_sized(type, 2) \
- intra_pred_sized(type, 4) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32) \
- intra_pred_sized(type, 64) \
- intra_pred_highbd_sized(type, 2) \
- intra_pred_highbd_sized(type, 4) \
- intra_pred_highbd_sized(type, 8) \
- intra_pred_highbd_sized(type, 16) \
- intra_pred_highbd_sized(type, 32) \
- intra_pred_highbd_sized(type, 64)
-
+#define intra_pred_rectangular(type) \
+ intra_pred_sized(type, 4, 8) \
+ intra_pred_sized(type, 8, 4) \
+ intra_pred_sized(type, 8, 16) \
+ intra_pred_sized(type, 16, 8) \
+ intra_pred_sized(type, 16, 32) \
+ intra_pred_sized(type, 32, 16) \
+ intra_pred_highbd_sized(type, 4, 8) \
+ intra_pred_highbd_sized(type, 8, 4) \
+ intra_pred_highbd_sized(type, 8, 16) \
+ intra_pred_highbd_sized(type, 16, 8) \
+ intra_pred_highbd_sized(type, 16, 32) \
+ intra_pred_highbd_sized(type, 32, 16)
#define intra_pred_above_4x4(type) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32) \
- intra_pred_sized(type, 64) \
- intra_pred_highbd_sized(type, 4) \
- intra_pred_highbd_sized(type, 8) \
- intra_pred_highbd_sized(type, 16) \
- intra_pred_highbd_sized(type, 32) \
- intra_pred_highbd_sized(type, 64)
-#else // CONFIG_TX64X64
+ intra_pred_sized(type, 8, 8) \
+ intra_pred_sized(type, 16, 16) \
+ intra_pred_sized(type, 32, 32) \
+ intra_pred_sized(type, 64, 64) \
+ intra_pred_highbd_sized(type, 4, 4) \
+ intra_pred_highbd_sized(type, 8, 8) \
+ intra_pred_highbd_sized(type, 16, 16) \
+ intra_pred_highbd_sized(type, 32, 32) \
+ intra_pred_highbd_sized(type, 64, 64) \
+ intra_pred_rectangular(type)
#define intra_pred_allsizes(type) \
- intra_pred_sized(type, 2) \
- intra_pred_sized(type, 4) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32) \
- intra_pred_highbd_sized(type, 2) \
- intra_pred_highbd_sized(type, 4) \
- intra_pred_highbd_sized(type, 8) \
- intra_pred_highbd_sized(type, 16) \
- intra_pred_highbd_sized(type, 32)
-
+ intra_pred_sized(type, 2, 2) \
+ intra_pred_sized(type, 4, 4) \
+ intra_pred_highbd_sized(type, 2, 2) \
+ intra_pred_above_4x4(type)
+#else // CONFIG_TX64X64
+#define intra_pred_rectangular(type) \
+ intra_pred_sized(type, 4, 8) \
+ intra_pred_sized(type, 8, 4) \
+ intra_pred_sized(type, 8, 16) \
+ intra_pred_sized(type, 16, 8) \
+ intra_pred_sized(type, 16, 32) \
+ intra_pred_sized(type, 32, 16) \
+ intra_pred_highbd_sized(type, 4, 8) \
+ intra_pred_highbd_sized(type, 8, 4) \
+ intra_pred_highbd_sized(type, 8, 16) \
+ intra_pred_highbd_sized(type, 16, 8) \
+ intra_pred_highbd_sized(type, 16, 32) \
+ intra_pred_highbd_sized(type, 32, 16)
#define intra_pred_above_4x4(type) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32) \
- intra_pred_highbd_sized(type, 4) \
- intra_pred_highbd_sized(type, 8) \
- intra_pred_highbd_sized(type, 16) \
- intra_pred_highbd_sized(type, 32)
+ intra_pred_sized(type, 8, 8) \
+ intra_pred_sized(type, 16, 16) \
+ intra_pred_sized(type, 32, 32) \
+ intra_pred_highbd_sized(type, 4, 4) \
+ intra_pred_highbd_sized(type, 8, 8) \
+ intra_pred_highbd_sized(type, 16, 16) \
+ intra_pred_highbd_sized(type, 32, 32) \
+ intra_pred_rectangular(type)
+#define intra_pred_allsizes(type) \
+ intra_pred_sized(type, 2, 2) \
+ intra_pred_sized(type, 4, 4) \
+ intra_pred_highbd_sized(type, 2, 2) \
+ intra_pred_above_4x4(type)
#endif // CONFIG_TX64X64
#else
#if CONFIG_TX64X64
-#define intra_pred_allsizes(type) \
- intra_pred_sized(type, 2) \
- intra_pred_sized(type, 4) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32) \
- intra_pred_sized(type, 64)
-
+#define intra_pred_rectangular(type) \
+ intra_pred_sized(type, 4, 8) \
+ intra_pred_sized(type, 8, 4) \
+ intra_pred_sized(type, 8, 16) \
+ intra_pred_sized(type, 16, 8) \
+ intra_pred_sized(type, 16, 32) \
+ intra_pred_sized(type, 32, 16)
#define intra_pred_above_4x4(type) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32) \
- intra_pred_sized(type, 64)
-#else // CONFIG_TX64X64
+ intra_pred_sized(type, 8, 8) \
+ intra_pred_sized(type, 16, 16) \
+ intra_pred_sized(type, 32, 32) \
+ intra_pred_sized(type, 64, 64) \
+ intra_pred_rectangular(type)
#define intra_pred_allsizes(type) \
- intra_pred_sized(type, 2) \
- intra_pred_sized(type, 4) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32)
-
+ intra_pred_sized(type, 2, 2) \
+ intra_pred_sized(type, 4, 4) \
+ intra_pred_above_4x4(type)
+#else // CONFIG_TX64X64
+#define intra_pred_rectangular(type) \
+ intra_pred_sized(type, 4, 8) \
+ intra_pred_sized(type, 8, 4) \
+ intra_pred_sized(type, 8, 16) \
+ intra_pred_sized(type, 16, 8) \
+ intra_pred_sized(type, 16, 32) \
+ intra_pred_sized(type, 32, 16)
#define intra_pred_above_4x4(type) \
- intra_pred_sized(type, 8) \
- intra_pred_sized(type, 16) \
- intra_pred_sized(type, 32)
+ intra_pred_sized(type, 8, 8) \
+ intra_pred_sized(type, 16, 16) \
+ intra_pred_sized(type, 32, 32) \
+ intra_pred_rectangular(type)
+#define intra_pred_allsizes(type) \
+ intra_pred_sized(type, 2, 2) \
+ intra_pred_sized(type, 4, 4) \
+ intra_pred_above_4x4(type)
#endif // CONFIG_TX64X64
+
#endif // CONFIG_HIGHBITDEPTH
intra_pred_allsizes(d207e)
diff --git a/third_party/aom/aom_dsp/inv_txfm.c b/third_party/aom/aom_dsp/inv_txfm.c
index 6e7d8c928..398eb0a12 100644
--- a/third_party/aom/aom_dsp/inv_txfm.c
+++ b/third_party/aom/aom_dsp/inv_txfm.c
@@ -14,6 +14,9 @@
#include "./aom_dsp_rtcd.h"
#include "aom_dsp/inv_txfm.h"
+#if CONFIG_DAALA_DCT4 || CONFIG_DAALA_DCT8
+#include "av1/common/daala_tx.h"
+#endif
void aom_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
@@ -93,6 +96,18 @@ void aom_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
}
}
+#if CONFIG_DAALA_DCT4
+void aom_idct4_c(const tran_low_t *input, tran_low_t *output) {
+ int i;
+ od_coeff x[4];
+ od_coeff y[4];
+ for (i = 0; i < 4; i++) y[i] = input[i];
+ od_bin_idct4(x, 1, y);
+ for (i = 0; i < 4; i++) output[i] = (tran_low_t)x[i];
+}
+
+#else
+
void aom_idct4_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step[4];
tran_high_t temp1, temp2;
@@ -112,6 +127,7 @@ void aom_idct4_c(const tran_low_t *input, tran_low_t *output) {
output[2] = WRAPLOW(step[1] - step[2]);
output[3] = WRAPLOW(step[0] - step[3]);
}
+#endif
void aom_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[4 * 4];
@@ -156,6 +172,18 @@ void aom_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
}
}
+#if CONFIG_DAALA_DCT8
+void aom_idct8_c(const tran_low_t *input, tran_low_t *output) {
+ int i;
+ od_coeff x[8];
+ od_coeff y[8];
+ for (i = 0; i < 8; i++) y[i] = (od_coeff)input[i];
+ od_bin_idct8(x, 1, y);
+ for (i = 0; i < 8; i++) output[i] = (tran_low_t)x[i];
+}
+
+#else
+
void aom_idct8_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
@@ -209,6 +237,7 @@ void aom_idct8_c(const tran_low_t *input, tran_low_t *output) {
output[6] = WRAPLOW(step1[1] - step1[6]);
output[7] = WRAPLOW(step1[0] - step1[7]);
}
+#endif
void aom_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8];
@@ -284,6 +313,18 @@ void aom_iadst4_c(const tran_low_t *input, tran_low_t *output) {
output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3));
}
+#if CONFIG_DAALA_DCT8
+void aom_iadst8_c(const tran_low_t *input, tran_low_t *output) {
+ int i;
+ od_coeff x[8];
+ od_coeff y[8];
+ for (i = 0; i < 8; i++) y[i] = (od_coeff)input[i];
+ od_bin_idst8(x, 1, y);
+ for (i = 0; i < 8; i++) output[i] = (tran_low_t)x[i];
+}
+
+#else
+
void aom_iadst8_c(const tran_low_t *input, tran_low_t *output) {
int s0, s1, s2, s3, s4, s5, s6, s7;
@@ -361,6 +402,8 @@ void aom_iadst8_c(const tran_low_t *input, tran_low_t *output) {
output[7] = WRAPLOW(-x1);
}
+#endif
+
void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
tran_low_t out[8 * 8] = { 0 };
tran_low_t *outptr = out;
@@ -1179,6 +1222,109 @@ void aom_idct32_c(const tran_low_t *input, tran_low_t *output) {
output[31] = WRAPLOW(step1[0] - step1[31]);
}
+#if CONFIG_MRC_TX
+void aom_imrc32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+ int stride, int *mask) {
+ tran_low_t out[32 * 32];
+ tran_low_t *outptr = out;
+ int i, j;
+ tran_low_t temp_in[32], temp_out[32];
+
+ // Rows
+ for (i = 0; i < 32; ++i) {
+ int16_t zero_coeff[16];
+ for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1];
+ for (j = 0; j < 8; ++j)
+ zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
+ for (j = 0; j < 4; ++j)
+ zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
+ for (j = 0; j < 2; ++j)
+ zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
+
+ if (zero_coeff[0] | zero_coeff[1])
+ aom_idct32_c(input, outptr);
+ else
+ memset(outptr, 0, sizeof(tran_low_t) * 32);
+ input += 32;
+ outptr += 32;
+ }
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
+ aom_idct32_c(temp_in, temp_out);
+ for (j = 0; j < 32; ++j) {
+ // Only add the coefficient if the mask value is 1
+ int mask_val = mask[j * 32 + i];
+ dest[j * stride + i] =
+ mask_val ? clip_pixel_add(dest[j * stride + i],
+ ROUND_POWER_OF_TWO(temp_out[j], 6))
+ : dest[j * stride + i];
+ }
+ }
+}
+
+void aom_imrc32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int *mask) {
+ tran_low_t out[32 * 32] = { 0 };
+ tran_low_t *outptr = out;
+ int i, j;
+ tran_low_t temp_in[32], temp_out[32];
+
+ // Rows
+ // only upper-left 16x16 has non-zero coeff
+ for (i = 0; i < 16; ++i) {
+ aom_idct32_c(input, outptr);
+ input += 32;
+ outptr += 32;
+ }
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
+ aom_idct32_c(temp_in, temp_out);
+ for (j = 0; j < 32; ++j) {
+ // Only add the coefficient if the mask value is 1
+ int mask_val = mask[j * 32 + i];
+ dest[j * stride + i] =
+ mask_val ? clip_pixel_add(dest[j * stride + i],
+ ROUND_POWER_OF_TWO(temp_out[j], 6))
+ : dest[j * stride + i];
+ }
+ }
+}
+
+void aom_imrc32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int *mask) {
+ tran_low_t out[32 * 32] = { 0 };
+ tran_low_t *outptr = out;
+ int i, j;
+ tran_low_t temp_in[32], temp_out[32];
+
+ // Rows
+ // only upper-left 8x8 has non-zero coeff
+ for (i = 0; i < 8; ++i) {
+ aom_idct32_c(input, outptr);
+ input += 32;
+ outptr += 32;
+ }
+
+ // Columns
+ for (i = 0; i < 32; ++i) {
+ for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
+ aom_idct32_c(temp_in, temp_out);
+ for (j = 0; j < 32; ++j) {
+ // Only add the coefficient if the mask value is 1
+ int mask_val = mask[j * 32 + i];
+ dest[j * stride + i] =
+ mask_val ? clip_pixel_add(dest[j * stride + i],
+ ROUND_POWER_OF_TWO(temp_out[j], 6))
+ : dest[j * stride + i];
+ }
+ }
+}
+#endif // CONFIG_MRC_TX
+
void aom_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
int stride) {
tran_low_t out[32 * 32];
@@ -1283,7 +1429,6 @@ void aom_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
}
}
-#if CONFIG_HIGHBITDEPTH
void aom_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
@@ -1374,936 +1519,3 @@ void aom_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
dest++;
}
}
-
-void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_low_t step[4];
- tran_high_t temp1, temp2;
- (void)bd;
- // stage 1
- temp1 = (input[0] + input[2]) * cospi_16_64;
- temp2 = (input[0] - input[2]) * cospi_16_64;
- step[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
- temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
- step[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- // stage 2
- output[0] = HIGHBD_WRAPLOW(step[0] + step[3], bd);
- output[1] = HIGHBD_WRAPLOW(step[1] + step[2], bd);
- output[2] = HIGHBD_WRAPLOW(step[1] - step[2], bd);
- output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
-}
-
-void aom_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
- tran_low_t out[4 * 4];
- tran_low_t *outptr = out;
- int i, j;
- tran_low_t temp_in[4], temp_out[4];
- uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
-
- // Rows
- for (i = 0; i < 4; ++i) {
- aom_highbd_idct4_c(input, outptr, bd);
- input += 4;
- outptr += 4;
- }
-
- // Columns
- for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- aom_highbd_idct4_c(temp_in, temp_out, bd);
- for (j = 0; j < 4; ++j) {
- dest[j * stride + i] = highbd_clip_pixel_add(
- dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
- }
- }
-}
-
-void aom_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
- int dest_stride, int bd) {
- int i;
- tran_high_t a1;
- tran_low_t out =
- HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
- uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
-
- out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
- a1 = ROUND_POWER_OF_TWO(out, 4);
-
- for (i = 0; i < 4; i++) {
- dest[0] = highbd_clip_pixel_add(dest[0], a1, bd);
- dest[1] = highbd_clip_pixel_add(dest[1], a1, bd);
- dest[2] = highbd_clip_pixel_add(dest[2], a1, bd);
- dest[3] = highbd_clip_pixel_add(dest[3], a1, bd);
- dest += dest_stride;
- }
-}
-
-void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_low_t step1[8], step2[8];
- tran_high_t temp1, temp2;
- // stage 1
- step1[0] = input[0];
- step1[2] = input[4];
- step1[1] = input[2];
- step1[3] = input[6];
- temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
- temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
- step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
- temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
- step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- // stage 2 & stage 3 - even half
- aom_highbd_idct4_c(step1, step1, bd);
-
- // stage 2 - odd half
- step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
- step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
- step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
- step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
-
- // stage 3 - odd half
- step1[4] = step2[4];
- temp1 = (step2[6] - step2[5]) * cospi_16_64;
- temp2 = (step2[5] + step2[6]) * cospi_16_64;
- step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[7] = step2[7];
-
- // stage 4
- output[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
- output[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
- output[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
- output[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
- output[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
- output[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
- output[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
- output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
-}
-
-void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
-
- tran_low_t x0 = input[0];
- tran_low_t x1 = input[1];
- tran_low_t x2 = input[2];
- tran_low_t x3 = input[3];
- (void)bd;
-
- if (!(x0 | x1 | x2 | x3)) {
- memset(output, 0, 4 * sizeof(*output));
- return;
- }
-
- s0 = sinpi_1_9 * x0;
- s1 = sinpi_2_9 * x0;
- s2 = sinpi_3_9 * x1;
- s3 = sinpi_4_9 * x2;
- s4 = sinpi_1_9 * x2;
- s5 = sinpi_2_9 * x3;
- s6 = sinpi_4_9 * x3;
- s7 = (tran_high_t)HIGHBD_WRAPLOW(x0 - x2 + x3, bd);
-
- s0 = s0 + s3 + s5;
- s1 = s1 - s4 - s6;
- s3 = s2;
- s2 = sinpi_3_9 * s7;
-
- // 1-D transform scaling factor is sqrt(2).
- // The overall dynamic range is 14b (input) + 14b (multiplication scaling)
- // + 1b (addition) = 29b.
- // Hence the output bit depth is 15b.
- output[0] = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s3), bd);
- output[1] = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s3), bd);
- output[2] = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
- output[3] = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s1 - s3), bd);
-}
-
-void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
-
- tran_low_t x0 = input[7];
- tran_low_t x1 = input[0];
- tran_low_t x2 = input[5];
- tran_low_t x3 = input[2];
- tran_low_t x4 = input[3];
- tran_low_t x5 = input[4];
- tran_low_t x6 = input[1];
- tran_low_t x7 = input[6];
- (void)bd;
-
- if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
- memset(output, 0, 8 * sizeof(*output));
- return;
- }
-
- // stage 1
- s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
- s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
- s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
- s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
- s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
- s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
- s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
- s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
-
- x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s4), bd);
- x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s5), bd);
- x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 + s6), bd);
- x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 + s7), bd);
- x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 - s4), bd);
- x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 - s5), bd);
- x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 - s6), bd);
- x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 - s7), bd);
-
- // stage 2
- s0 = x0;
- s1 = x1;
- s2 = x2;
- s3 = x3;
- s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
- s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
- s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
- s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
-
- x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
- x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
- x2 = HIGHBD_WRAPLOW(s0 - s2, bd);
- x3 = HIGHBD_WRAPLOW(s1 - s3, bd);
- x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s6), bd);
- x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s7), bd);
- x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s6), bd);
- x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
-
- // stage 3
- s2 = cospi_16_64 * (x2 + x3);
- s3 = cospi_16_64 * (x2 - x3);
- s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (x6 - x7);
-
- x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
- x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
- x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6), bd);
- x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7), bd);
-
- output[0] = HIGHBD_WRAPLOW(x0, bd);
- output[1] = HIGHBD_WRAPLOW(-x4, bd);
- output[2] = HIGHBD_WRAPLOW(x6, bd);
- output[3] = HIGHBD_WRAPLOW(-x2, bd);
- output[4] = HIGHBD_WRAPLOW(x3, bd);
- output[5] = HIGHBD_WRAPLOW(-x7, bd);
- output[6] = HIGHBD_WRAPLOW(x5, bd);
- output[7] = HIGHBD_WRAPLOW(-x1, bd);
-}
-
-void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_low_t step1[16], step2[16];
- tran_high_t temp1, temp2;
- (void)bd;
-
- // stage 1
- step1[0] = input[0 / 2];
- step1[1] = input[16 / 2];
- step1[2] = input[8 / 2];
- step1[3] = input[24 / 2];
- step1[4] = input[4 / 2];
- step1[5] = input[20 / 2];
- step1[6] = input[12 / 2];
- step1[7] = input[28 / 2];
- step1[8] = input[2 / 2];
- step1[9] = input[18 / 2];
- step1[10] = input[10 / 2];
- step1[11] = input[26 / 2];
- step1[12] = input[6 / 2];
- step1[13] = input[22 / 2];
- step1[14] = input[14 / 2];
- step1[15] = input[30 / 2];
-
- // stage 2
- step2[0] = step1[0];
- step2[1] = step1[1];
- step2[2] = step1[2];
- step2[3] = step1[3];
- step2[4] = step1[4];
- step2[5] = step1[5];
- step2[6] = step1[6];
- step2[7] = step1[7];
-
- temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
- temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
- step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
- temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
- step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
- temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
- step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
- temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
- step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- // stage 3
- step1[0] = step2[0];
- step1[1] = step2[1];
- step1[2] = step2[2];
- step1[3] = step2[3];
-
- temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
- temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
- step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
- temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
- step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd);
- step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd);
- step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd);
- step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd);
- step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd);
- step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd);
- step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd);
- step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
-
- // stage 4
- temp1 = (step1[0] + step1[1]) * cospi_16_64;
- temp2 = (step1[0] - step1[1]) * cospi_16_64;
- step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
- temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
- step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
- step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
- step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
- step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
-
- step2[8] = step1[8];
- step2[15] = step1[15];
- temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
- temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
- step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
- temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
- step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step2[11] = step1[11];
- step2[12] = step1[12];
-
- // stage 5
- step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd);
- step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd);
- step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
- step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
- step1[4] = step2[4];
- temp1 = (step2[6] - step2[5]) * cospi_16_64;
- temp2 = (step2[5] + step2[6]) * cospi_16_64;
- step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[7] = step2[7];
-
- step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd);
- step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd);
- step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd);
- step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd);
- step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd);
- step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd);
- step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd);
- step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd);
-
- // stage 6
- step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
- step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
- step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
- step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
- step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
- step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
- step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
- step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
- step2[8] = step1[8];
- step2[9] = step1[9];
- temp1 = (-step1[10] + step1[13]) * cospi_16_64;
- temp2 = (step1[10] + step1[13]) * cospi_16_64;
- step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = (-step1[11] + step1[12]) * cospi_16_64;
- temp2 = (step1[11] + step1[12]) * cospi_16_64;
- step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step2[14] = step1[14];
- step2[15] = step1[15];
-
- // stage 7
- output[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd);
- output[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd);
- output[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd);
- output[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd);
- output[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd);
- output[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd);
- output[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd);
- output[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd);
- output[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd);
- output[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd);
- output[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd);
- output[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd);
- output[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd);
- output[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd);
- output[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd);
- output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
-}
-
-void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
- tran_high_t s9, s10, s11, s12, s13, s14, s15;
-
- tran_low_t x0 = input[15];
- tran_low_t x1 = input[0];
- tran_low_t x2 = input[13];
- tran_low_t x3 = input[2];
- tran_low_t x4 = input[11];
- tran_low_t x5 = input[4];
- tran_low_t x6 = input[9];
- tran_low_t x7 = input[6];
- tran_low_t x8 = input[7];
- tran_low_t x9 = input[8];
- tran_low_t x10 = input[5];
- tran_low_t x11 = input[10];
- tran_low_t x12 = input[3];
- tran_low_t x13 = input[12];
- tran_low_t x14 = input[1];
- tran_low_t x15 = input[14];
- (void)bd;
-
- if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
- x13 | x14 | x15)) {
- memset(output, 0, 16 * sizeof(*output));
- return;
- }
-
- // stage 1
- s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
- s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
- s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
- s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
- s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
- s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
- s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
- s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
- s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
- s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
- s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
- s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
- s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
- s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
- s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
- s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
-
- x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s8), bd);
- x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s9), bd);
- x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 + s10), bd);
- x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 + s11), bd);
- x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s12), bd);
- x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s13), bd);
- x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6 + s14), bd);
- x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7 + s15), bd);
- x8 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 - s8), bd);
- x9 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 - s9), bd);
- x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 - s10), bd);
- x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 - s11), bd);
- x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s12), bd);
- x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s13), bd);
- x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s6 - s14), bd);
- x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s7 - s15), bd);
-
- // stage 2
- s0 = x0;
- s1 = x1;
- s2 = x2;
- s3 = x3;
- s4 = x4;
- s5 = x5;
- s6 = x6;
- s7 = x7;
- s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
- s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
- s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
- s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
- s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
- s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
- s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
- s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
-
- x0 = HIGHBD_WRAPLOW(s0 + s4, bd);
- x1 = HIGHBD_WRAPLOW(s1 + s5, bd);
- x2 = HIGHBD_WRAPLOW(s2 + s6, bd);
- x3 = HIGHBD_WRAPLOW(s3 + s7, bd);
- x4 = HIGHBD_WRAPLOW(s0 - s4, bd);
- x5 = HIGHBD_WRAPLOW(s1 - s5, bd);
- x6 = HIGHBD_WRAPLOW(s2 - s6, bd);
- x7 = HIGHBD_WRAPLOW(s3 - s7, bd);
- x8 = HIGHBD_WRAPLOW(dct_const_round_shift(s8 + s12), bd);
- x9 = HIGHBD_WRAPLOW(dct_const_round_shift(s9 + s13), bd);
- x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s10 + s14), bd);
- x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s11 + s15), bd);
- x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s8 - s12), bd);
- x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s9 - s13), bd);
- x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s10 - s14), bd);
- x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s11 - s15), bd);
-
- // stage 3
- s0 = x0;
- s1 = x1;
- s2 = x2;
- s3 = x3;
- s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
- s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
- s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
- s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
- s8 = x8;
- s9 = x9;
- s10 = x10;
- s11 = x11;
- s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
- s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
- s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
- s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
-
- x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
- x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
- x2 = HIGHBD_WRAPLOW(s0 - s2, bd);
- x3 = HIGHBD_WRAPLOW(s1 - s3, bd);
- x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s6), bd);
- x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s7), bd);
- x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s6), bd);
- x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
- x8 = HIGHBD_WRAPLOW(s8 + s10, bd);
- x9 = HIGHBD_WRAPLOW(s9 + s11, bd);
- x10 = HIGHBD_WRAPLOW(s8 - s10, bd);
- x11 = HIGHBD_WRAPLOW(s9 - s11, bd);
- x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s12 + s14), bd);
- x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 + s15), bd);
- x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s12 - s14), bd);
- x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 - s15), bd);
-
- // stage 4
- s2 = (-cospi_16_64) * (x2 + x3);
- s3 = cospi_16_64 * (x2 - x3);
- s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (-x6 + x7);
- s10 = cospi_16_64 * (x10 + x11);
- s11 = cospi_16_64 * (-x10 + x11);
- s14 = (-cospi_16_64) * (x14 + x15);
- s15 = cospi_16_64 * (x14 - x15);
-
- x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
- x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
- x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6), bd);
- x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7), bd);
- x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s10), bd);
- x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s11), bd);
- x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s14), bd);
- x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s15), bd);
-
- output[0] = HIGHBD_WRAPLOW(x0, bd);
- output[1] = HIGHBD_WRAPLOW(-x8, bd);
- output[2] = HIGHBD_WRAPLOW(x12, bd);
- output[3] = HIGHBD_WRAPLOW(-x4, bd);
- output[4] = HIGHBD_WRAPLOW(x6, bd);
- output[5] = HIGHBD_WRAPLOW(x14, bd);
- output[6] = HIGHBD_WRAPLOW(x10, bd);
- output[7] = HIGHBD_WRAPLOW(x2, bd);
- output[8] = HIGHBD_WRAPLOW(x3, bd);
- output[9] = HIGHBD_WRAPLOW(x11, bd);
- output[10] = HIGHBD_WRAPLOW(x15, bd);
- output[11] = HIGHBD_WRAPLOW(x7, bd);
- output[12] = HIGHBD_WRAPLOW(x5, bd);
- output[13] = HIGHBD_WRAPLOW(-x13, bd);
- output[14] = HIGHBD_WRAPLOW(x9, bd);
- output[15] = HIGHBD_WRAPLOW(-x1, bd);
-}
-
-void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
- tran_low_t step1[32], step2[32];
- tran_high_t temp1, temp2;
- (void)bd;
-
- // stage 1
- step1[0] = input[0];
- step1[1] = input[16];
- step1[2] = input[8];
- step1[3] = input[24];
- step1[4] = input[4];
- step1[5] = input[20];
- step1[6] = input[12];
- step1[7] = input[28];
- step1[8] = input[2];
- step1[9] = input[18];
- step1[10] = input[10];
- step1[11] = input[26];
- step1[12] = input[6];
- step1[13] = input[22];
- step1[14] = input[14];
- step1[15] = input[30];
-
- temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
- temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
- step1[16] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[31] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
- temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
- step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
- temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
- step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
- temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
- step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
- temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
- step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
- temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
- step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
- temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
- step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
- temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
- step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- // stage 2
- step2[0] = step1[0];
- step2[1] = step1[1];
- step2[2] = step1[2];
- step2[3] = step1[3];
- step2[4] = step1[4];
- step2[5] = step1[5];
- step2[6] = step1[6];
- step2[7] = step1[7];
-
- temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
- temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
- step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
- temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
- step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
- temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
- step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
- temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
- step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[17], bd);
- step2[17] = HIGHBD_WRAPLOW(step1[16] - step1[17], bd);
- step2[18] = HIGHBD_WRAPLOW(-step1[18] + step1[19], bd);
- step2[19] = HIGHBD_WRAPLOW(step1[18] + step1[19], bd);
- step2[20] = HIGHBD_WRAPLOW(step1[20] + step1[21], bd);
- step2[21] = HIGHBD_WRAPLOW(step1[20] - step1[21], bd);
- step2[22] = HIGHBD_WRAPLOW(-step1[22] + step1[23], bd);
- step2[23] = HIGHBD_WRAPLOW(step1[22] + step1[23], bd);
- step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[25], bd);
- step2[25] = HIGHBD_WRAPLOW(step1[24] - step1[25], bd);
- step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[27], bd);
- step2[27] = HIGHBD_WRAPLOW(step1[26] + step1[27], bd);
- step2[28] = HIGHBD_WRAPLOW(step1[28] + step1[29], bd);
- step2[29] = HIGHBD_WRAPLOW(step1[28] - step1[29], bd);
- step2[30] = HIGHBD_WRAPLOW(-step1[30] + step1[31], bd);
- step2[31] = HIGHBD_WRAPLOW(step1[30] + step1[31], bd);
-
- // stage 3
- step1[0] = step2[0];
- step1[1] = step2[1];
- step1[2] = step2[2];
- step1[3] = step2[3];
-
- temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
- temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
- step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
- temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
- step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-
- step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd);
- step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd);
- step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd);
- step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd);
- step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd);
- step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd);
- step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd);
- step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
-
- step1[16] = step2[16];
- step1[31] = step2[31];
- temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
- temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
- step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
- temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
- step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[19] = step2[19];
- step1[20] = step2[20];
- temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
- temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
- step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
- temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
- step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[23] = step2[23];
- step1[24] = step2[24];
- step1[27] = step2[27];
- step1[28] = step2[28];
-
- // stage 4
- temp1 = (step1[0] + step1[1]) * cospi_16_64;
- temp2 = (step1[0] - step1[1]) * cospi_16_64;
- step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
- temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
- step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
- step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
- step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
- step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
-
- step2[8] = step1[8];
- step2[15] = step1[15];
- temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
- temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
- step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
- temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
- step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step2[11] = step1[11];
- step2[12] = step1[12];
-
- step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[19], bd);
- step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[18], bd);
- step2[18] = HIGHBD_WRAPLOW(step1[17] - step1[18], bd);
- step2[19] = HIGHBD_WRAPLOW(step1[16] - step1[19], bd);
- step2[20] = HIGHBD_WRAPLOW(-step1[20] + step1[23], bd);
- step2[21] = HIGHBD_WRAPLOW(-step1[21] + step1[22], bd);
- step2[22] = HIGHBD_WRAPLOW(step1[21] + step1[22], bd);
- step2[23] = HIGHBD_WRAPLOW(step1[20] + step1[23], bd);
-
- step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[27], bd);
- step2[25] = HIGHBD_WRAPLOW(step1[25] + step1[26], bd);
- step2[26] = HIGHBD_WRAPLOW(step1[25] - step1[26], bd);
- step2[27] = HIGHBD_WRAPLOW(step1[24] - step1[27], bd);
- step2[28] = HIGHBD_WRAPLOW(-step1[28] + step1[31], bd);
- step2[29] = HIGHBD_WRAPLOW(-step1[29] + step1[30], bd);
- step2[30] = HIGHBD_WRAPLOW(step1[29] + step1[30], bd);
- step2[31] = HIGHBD_WRAPLOW(step1[28] + step1[31], bd);
-
- // stage 5
- step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd);
- step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd);
- step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
- step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
- step1[4] = step2[4];
- temp1 = (step2[6] - step2[5]) * cospi_16_64;
- temp2 = (step2[5] + step2[6]) * cospi_16_64;
- step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[7] = step2[7];
-
- step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd);
- step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd);
- step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd);
- step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd);
- step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd);
- step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd);
- step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd);
- step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd);
-
- step1[16] = step2[16];
- step1[17] = step2[17];
- temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
- temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
- step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
- temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
- step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
- temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
- step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
- temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
- step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[22] = step2[22];
- step1[23] = step2[23];
- step1[24] = step2[24];
- step1[25] = step2[25];
- step1[30] = step2[30];
- step1[31] = step2[31];
-
- // stage 6
- step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
- step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
- step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
- step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
- step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
- step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
- step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
- step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
- step2[8] = step1[8];
- step2[9] = step1[9];
- temp1 = (-step1[10] + step1[13]) * cospi_16_64;
- temp2 = (step1[10] + step1[13]) * cospi_16_64;
- step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = (-step1[11] + step1[12]) * cospi_16_64;
- temp2 = (step1[11] + step1[12]) * cospi_16_64;
- step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step2[14] = step1[14];
- step2[15] = step1[15];
-
- step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[23], bd);
- step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[22], bd);
- step2[18] = HIGHBD_WRAPLOW(step1[18] + step1[21], bd);
- step2[19] = HIGHBD_WRAPLOW(step1[19] + step1[20], bd);
- step2[20] = HIGHBD_WRAPLOW(step1[19] - step1[20], bd);
- step2[21] = HIGHBD_WRAPLOW(step1[18] - step1[21], bd);
- step2[22] = HIGHBD_WRAPLOW(step1[17] - step1[22], bd);
- step2[23] = HIGHBD_WRAPLOW(step1[16] - step1[23], bd);
-
- step2[24] = HIGHBD_WRAPLOW(-step1[24] + step1[31], bd);
- step2[25] = HIGHBD_WRAPLOW(-step1[25] + step1[30], bd);
- step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[29], bd);
- step2[27] = HIGHBD_WRAPLOW(-step1[27] + step1[28], bd);
- step2[28] = HIGHBD_WRAPLOW(step1[27] + step1[28], bd);
- step2[29] = HIGHBD_WRAPLOW(step1[26] + step1[29], bd);
- step2[30] = HIGHBD_WRAPLOW(step1[25] + step1[30], bd);
- step2[31] = HIGHBD_WRAPLOW(step1[24] + step1[31], bd);
-
- // stage 7
- step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd);
- step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd);
- step1[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd);
- step1[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd);
- step1[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd);
- step1[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd);
- step1[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd);
- step1[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd);
- step1[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd);
- step1[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd);
- step1[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd);
- step1[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd);
- step1[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd);
- step1[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd);
- step1[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd);
- step1[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
-
- step1[16] = step2[16];
- step1[17] = step2[17];
- step1[18] = step2[18];
- step1[19] = step2[19];
- temp1 = (-step2[20] + step2[27]) * cospi_16_64;
- temp2 = (step2[20] + step2[27]) * cospi_16_64;
- step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = (-step2[21] + step2[26]) * cospi_16_64;
- temp2 = (step2[21] + step2[26]) * cospi_16_64;
- step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = (-step2[22] + step2[25]) * cospi_16_64;
- temp2 = (step2[22] + step2[25]) * cospi_16_64;
- step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- temp1 = (-step2[23] + step2[24]) * cospi_16_64;
- temp2 = (step2[23] + step2[24]) * cospi_16_64;
- step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
- step1[28] = step2[28];
- step1[29] = step2[29];
- step1[30] = step2[30];
- step1[31] = step2[31];
-
- // final stage
- output[0] = HIGHBD_WRAPLOW(step1[0] + step1[31], bd);
- output[1] = HIGHBD_WRAPLOW(step1[1] + step1[30], bd);
- output[2] = HIGHBD_WRAPLOW(step1[2] + step1[29], bd);
- output[3] = HIGHBD_WRAPLOW(step1[3] + step1[28], bd);
- output[4] = HIGHBD_WRAPLOW(step1[4] + step1[27], bd);
- output[5] = HIGHBD_WRAPLOW(step1[5] + step1[26], bd);
- output[6] = HIGHBD_WRAPLOW(step1[6] + step1[25], bd);
- output[7] = HIGHBD_WRAPLOW(step1[7] + step1[24], bd);
- output[8] = HIGHBD_WRAPLOW(step1[8] + step1[23], bd);
- output[9] = HIGHBD_WRAPLOW(step1[9] + step1[22], bd);
- output[10] = HIGHBD_WRAPLOW(step1[10] + step1[21], bd);
- output[11] = HIGHBD_WRAPLOW(step1[11] + step1[20], bd);
- output[12] = HIGHBD_WRAPLOW(step1[12] + step1[19], bd);
- output[13] = HIGHBD_WRAPLOW(step1[13] + step1[18], bd);
- output[14] = HIGHBD_WRAPLOW(step1[14] + step1[17], bd);
- output[15] = HIGHBD_WRAPLOW(step1[15] + step1[16], bd);
- output[16] = HIGHBD_WRAPLOW(step1[15] - step1[16], bd);
- output[17] = HIGHBD_WRAPLOW(step1[14] - step1[17], bd);
- output[18] = HIGHBD_WRAPLOW(step1[13] - step1[18], bd);
- output[19] = HIGHBD_WRAPLOW(step1[12] - step1[19], bd);
- output[20] = HIGHBD_WRAPLOW(step1[11] - step1[20], bd);
- output[21] = HIGHBD_WRAPLOW(step1[10] - step1[21], bd);
- output[22] = HIGHBD_WRAPLOW(step1[9] - step1[22], bd);
- output[23] = HIGHBD_WRAPLOW(step1[8] - step1[23], bd);
- output[24] = HIGHBD_WRAPLOW(step1[7] - step1[24], bd);
- output[25] = HIGHBD_WRAPLOW(step1[6] - step1[25], bd);
- output[26] = HIGHBD_WRAPLOW(step1[5] - step1[26], bd);
- output[27] = HIGHBD_WRAPLOW(step1[4] - step1[27], bd);
- output[28] = HIGHBD_WRAPLOW(step1[3] - step1[28], bd);
- output[29] = HIGHBD_WRAPLOW(step1[2] - step1[29], bd);
- output[30] = HIGHBD_WRAPLOW(step1[1] - step1[30], bd);
- output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
-}
-
-#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/inv_txfm.h b/third_party/aom/aom_dsp/inv_txfm.h
index e64d463ea..a9c485e74 100644
--- a/third_party/aom/aom_dsp/inv_txfm.h
+++ b/third_party/aom/aom_dsp/inv_txfm.h
@@ -23,8 +23,7 @@ extern "C" {
#endif
static INLINE tran_high_t dct_const_round_shift(tran_high_t input) {
- tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
- return rv;
+ return ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
}
static INLINE tran_high_t check_range(tran_high_t input, int bd) {
@@ -51,9 +50,19 @@ static INLINE tran_high_t check_range(tran_high_t input, int bd) {
}
#define WRAPLOW(x) ((int32_t)check_range(x, 8))
-#if CONFIG_HIGHBITDEPTH
#define HIGHBD_WRAPLOW(x, bd) ((int32_t)check_range((x), bd))
-#endif // CONFIG_HIGHBITDEPTH
+
+#if CONFIG_MRC_TX
+// These each perform dct but add coefficients based on a mask
+void aom_imrc32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+ int stride, int *mask);
+
+void aom_imrc32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int *mask);
+
+void aom_imrc32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+ int *mask);
+#endif // CONFIG_MRC_TX
void aom_idct4_c(const tran_low_t *input, tran_low_t *output);
void aom_idct8_c(const tran_low_t *input, tran_low_t *output);
@@ -63,7 +72,6 @@ void aom_iadst4_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst8_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst16_c(const tran_low_t *input, tran_low_t *output);
-#if CONFIG_HIGHBITDEPTH
void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
@@ -78,7 +86,6 @@ static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
trans = HIGHBD_WRAPLOW(trans, bd);
return clip_pixel_highbd(dest + (int)trans, bd);
}
-#endif
static INLINE uint8_t clip_pixel_add(uint8_t dest, tran_high_t trans) {
trans = WRAPLOW(trans);
diff --git a/third_party/aom/aom_dsp/mips/fwd_dct32x32_msa.c b/third_party/aom/aom_dsp/mips/fwd_dct32x32_msa.c
index dc9c63226..43dce8ba6 100644
--- a/third_party/aom/aom_dsp/mips/fwd_dct32x32_msa.c
+++ b/third_party/aom/aom_dsp/mips/fwd_dct32x32_msa.c
@@ -926,23 +926,3 @@ void aom_fdct32x32_rd_msa(const int16_t *input, int16_t *out,
out + (8 * i * 32));
}
}
-
-void aom_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
- int sum = LD_HADD(input, stride);
- sum += LD_HADD(input + 8, stride);
- sum += LD_HADD(input + 16, stride);
- sum += LD_HADD(input + 24, stride);
- sum += LD_HADD(input + 32 * 8, stride);
- sum += LD_HADD(input + 32 * 8 + 8, stride);
- sum += LD_HADD(input + 32 * 8 + 16, stride);
- sum += LD_HADD(input + 32 * 8 + 24, stride);
- sum += LD_HADD(input + 32 * 16, stride);
- sum += LD_HADD(input + 32 * 16 + 8, stride);
- sum += LD_HADD(input + 32 * 16 + 16, stride);
- sum += LD_HADD(input + 32 * 16 + 24, stride);
- sum += LD_HADD(input + 32 * 24, stride);
- sum += LD_HADD(input + 32 * 24 + 8, stride);
- sum += LD_HADD(input + 32 * 24 + 16, stride);
- sum += LD_HADD(input + 32 * 24 + 24, stride);
- out[0] = (int16_t)(sum >> 3);
-}
diff --git a/third_party/aom/aom_dsp/mips/fwd_txfm_msa.c b/third_party/aom/aom_dsp/mips/fwd_txfm_msa.c
index f16d290c8..7a285b7b8 100644
--- a/third_party/aom/aom_dsp/mips/fwd_txfm_msa.c
+++ b/third_party/aom/aom_dsp/mips/fwd_txfm_msa.c
@@ -236,11 +236,3 @@ void aom_fdct16x16_msa(const int16_t *input, int16_t *output,
fdct16x8_1d_row((&tmp_buf[0] + (128 * i)), (output + (128 * i)));
}
}
-
-void aom_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
- int sum = LD_HADD(input, stride);
- sum += LD_HADD(input + 8, stride);
- sum += LD_HADD(input + 16 * 8, stride);
- sum += LD_HADD(input + 16 * 8 + 8, stride);
- out[0] = (int16_t)(sum >> 1);
-}
diff --git a/third_party/aom/aom_dsp/prob.c b/third_party/aom/aom_dsp/prob.c
index eefe7521f..a42fb806b 100644
--- a/third_party/aom/aom_dsp/prob.c
+++ b/third_party/aom/aom_dsp/prob.c
@@ -186,10 +186,8 @@ int tree_to_cdf(const aom_tree_index *tree, const aom_prob *probs,
for (i = 1; i < nsymbs; i++) {
cdf[i] = AOM_ICDF(AOM_ICDF(cdf[i - 1]) + cdf[i]);
}
-// Store symbol count at the end of the CDF
-#if CONFIG_EC_ADAPT
+ // Store symbol count at the end of the CDF
cdf[nsymbs] = 0;
-#endif
return nsymbs;
}
diff --git a/third_party/aom/aom_dsp/prob.h b/third_party/aom/aom_dsp/prob.h
index ec6654ab7..35db134e5 100644
--- a/third_party/aom/aom_dsp/prob.h
+++ b/third_party/aom/aom_dsp/prob.h
@@ -148,7 +148,6 @@ static INLINE void av1_tree_to_cdf(const aom_tree_index *tree,
void av1_indices_from_tree(int *ind, int *inv, const aom_tree_index *tree);
-#if CONFIG_EC_ADAPT
static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
const int rate = 4 + (cdf[nsymbs] > 31) + get_msb(nsymbs);
const int rate2 = 5;
@@ -183,7 +182,6 @@ static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
#endif
cdf[nsymbs] += (cdf[nsymbs] < 32);
}
-#endif
#ifdef __cplusplus
} // extern "C"
diff --git a/third_party/aom/aom_dsp/quantize.c b/third_party/aom/aom_dsp/quantize.c
index 0759c22e3..fe98b6028 100644
--- a/third_party/aom/aom_dsp/quantize.c
+++ b/third_party/aom/aom_dsp/quantize.c
@@ -256,7 +256,6 @@ void aom_quantize_dc_64x64(const tran_low_t *coeff_ptr, int skip_block,
}
#endif // CONFIG_TX64X64
-#if CONFIG_HIGHBITDEPTH
void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
@@ -523,7 +522,6 @@ void aom_highbd_quantize_b_64x64_c(
*eob_ptr = eob + 1;
}
#endif // CONFIG_TX64X64
-#endif // CONFIG_HIGHBITDEPTH
#else // CONFIG_AOM_QM
@@ -602,7 +600,6 @@ void aom_quantize_dc_64x64(const tran_low_t *coeff_ptr, int skip_block,
}
#endif // CONFIG_TX64X64
-#if CONFIG_HIGHBITDEPTH
void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
int skip_block, const int16_t *round_ptr,
const int16_t quant, tran_low_t *qcoeff_ptr,
@@ -716,8 +713,7 @@ void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
if (abs_coeff >= zbins[rc != 0]) {
const int64_t tmp1 = abs_coeff + round_ptr[rc != 0];
const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1;
- const uint32_t abs_qcoeff =
- (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 16);
+ const int abs_qcoeff = (int)((tmp2 * quant_shift_ptr[rc != 0]) >> 16);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
if (abs_qcoeff) eob = i;
@@ -767,8 +763,7 @@ void aom_highbd_quantize_b_32x32_c(
const int64_t tmp1 =
abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1;
- const uint32_t abs_qcoeff =
- (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 15);
+ const int abs_qcoeff = (int)((tmp2 * quant_shift_ptr[rc != 0]) >> 15);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
if (abs_qcoeff) eob = idx_arr[i];
@@ -818,8 +813,7 @@ void aom_highbd_quantize_b_64x64_c(
const int64_t tmp1 =
abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 2);
const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1;
- const uint32_t abs_qcoeff =
- (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 14);
+ const int abs_qcoeff = (int)((tmp2 * quant_shift_ptr[rc != 0]) >> 14);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 4;
if (abs_qcoeff) eob = idx_arr[i];
@@ -828,5 +822,4 @@ void aom_highbd_quantize_b_64x64_c(
*eob_ptr = eob + 1;
}
#endif // CONFIG_TX64X64
-#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_AOM_QM
diff --git a/third_party/aom/aom_dsp/sad.c b/third_party/aom/aom_dsp/sad.c
index 2cc172ba5..b9c789ce5 100644
--- a/third_party/aom/aom_dsp/sad.c
+++ b/third_party/aom/aom_dsp/sad.c
@@ -153,10 +153,21 @@ sadMxN(4, 4)
sadMxNxK(4, 4, 3)
sadMxNxK(4, 4, 8)
sadMxNx4D(4, 4)
+
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+sadMxN(4, 16)
+sadMxNx4D(4, 16)
+sadMxN(16, 4)
+sadMxNx4D(16, 4)
+sadMxN(8, 32)
+sadMxNx4D(8, 32)
+sadMxN(32, 8)
+sadMxNx4D(32, 8)
+#endif
/* clang-format on */
#if CONFIG_HIGHBITDEPTH
- static INLINE
+ static INLINE
unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
int b_stride, int width, int height) {
int y, x;
@@ -307,11 +318,22 @@ highbd_sadMxN(4, 4)
highbd_sadMxNxK(4, 4, 3)
highbd_sadMxNxK(4, 4, 8)
highbd_sadMxNx4D(4, 4)
+
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+highbd_sadMxN(4, 16)
+highbd_sadMxNx4D(4, 16)
+highbd_sadMxN(16, 4)
+highbd_sadMxNx4D(16, 4)
+highbd_sadMxN(8, 32)
+highbd_sadMxNx4D(8, 32)
+highbd_sadMxN(32, 8)
+highbd_sadMxNx4D(32, 8)
+#endif
/* clang-format on */
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_AV1 && CONFIG_EXT_INTER
- static INLINE
+ static INLINE
unsigned int masked_sad(const uint8_t *src, int src_stride,
const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m, int m_stride,
@@ -367,10 +389,17 @@ MASKSADMxN(8, 8)
MASKSADMxN(8, 4)
MASKSADMxN(4, 8)
MASKSADMxN(4, 4)
+
+#if CONFIG_EXT_PARTITION_TYPES
+MASKSADMxN(4, 16)
+MASKSADMxN(16, 4)
+MASKSADMxN(8, 32)
+MASKSADMxN(32, 8)
+#endif
/* clang-format on */
#if CONFIG_HIGHBITDEPTH
- static INLINE
+ static INLINE
unsigned int highbd_masked_sad(const uint8_t *src8, int src_stride,
const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
@@ -429,6 +458,13 @@ HIGHBD_MASKSADMXN(8, 8)
HIGHBD_MASKSADMXN(8, 4)
HIGHBD_MASKSADMXN(4, 8)
HIGHBD_MASKSADMXN(4, 4)
+
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+HIGHBD_MASKSADMXN(4, 16)
+HIGHBD_MASKSADMXN(16, 4)
+HIGHBD_MASKSADMXN(8, 32)
+HIGHBD_MASKSADMXN(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_AV1 && CONFIG_EXT_INTER
@@ -480,10 +516,17 @@ OBMCSADMxN(8, 8)
OBMCSADMxN(8, 4)
OBMCSADMxN(4, 8)
OBMCSADMxN(4, 4)
+
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+OBMCSADMxN(4, 16)
+OBMCSADMxN(16, 4)
+OBMCSADMxN(8, 32)
+OBMCSADMxN(32, 8)
+#endif
/* clang-format on */
#if CONFIG_HIGHBITDEPTH
- static INLINE
+ static INLINE
unsigned int highbd_obmc_sad(const uint8_t *pre8, int pre_stride,
const int32_t *wsrc, const int32_t *mask,
int width, int height) {
@@ -529,6 +572,13 @@ HIGHBD_OBMCSADMXN(8, 8)
HIGHBD_OBMCSADMXN(8, 4)
HIGHBD_OBMCSADMXN(4, 8)
HIGHBD_OBMCSADMXN(4, 4)
+
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+HIGHBD_OBMCSADMXN(4, 16)
+HIGHBD_OBMCSADMXN(16, 4)
+HIGHBD_OBMCSADMXN(8, 32)
+HIGHBD_OBMCSADMXN(32, 8)
+#endif
/* clang-format on */
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_AV1 && CONFIG_MOTION_VAR
diff --git a/third_party/aom/aom_dsp/simd/v256_intrinsics_v128.h b/third_party/aom/aom_dsp/simd/v256_intrinsics_v128.h
index a4b334ea6..cbea55ca1 100644
--- a/third_party/aom/aom_dsp/simd/v256_intrinsics_v128.h
+++ b/third_party/aom/aom_dsp/simd/v256_intrinsics_v128.h
@@ -508,17 +508,19 @@ SIMD_INLINE v256 v256_shr_s32(v256 a, unsigned int c) {
to enforce that. */
#define v256_shl_n_byte(a, n) \
((n) < 16 ? v256_from_v128(v128_or(v128_shl_n_byte(a.hi, n), \
- v128_shr_n_byte(a.lo, 16 - (n))), \
+ v128_shr_n_byte(a.lo, (16 - (n)) & 31)), \
v128_shl_n_byte(a.lo, (n))) \
- : v256_from_v128((n) > 16 ? v128_shl_n_byte(a.lo, (n)-16) : a.lo, \
- v128_zero()))
-
-#define v256_shr_n_byte(a, n) \
- ((n) < 16 ? v256_from_v128(v128_shr_n_byte(a.hi, n), \
- v128_or(v128_shr_n_byte(a.lo, n), \
- v128_shl_n_byte(a.hi, 16 - (n)))) \
- : v256_from_v128(v128_zero(), \
- (n) > 16 ? v128_shr_n_byte(a.hi, (n)-16) : a.hi))
+ : v256_from_v128( \
+ (n) > 16 ? v128_shl_n_byte(a.lo, ((n)-16) & 31) : a.lo, \
+ v128_zero()))
+
+#define v256_shr_n_byte(a, n) \
+ ((n) < 16 ? v256_from_v128(v128_shr_n_byte(a.hi, n), \
+ v128_or(v128_shr_n_byte(a.lo, n), \
+ v128_shl_n_byte(a.hi, (16 - (n)) & 31))) \
+ : v256_from_v128( \
+ v128_zero(), \
+ (n) > 16 ? v128_shr_n_byte(a.hi, ((n)-16) & 31) : a.hi))
#define v256_align(a, b, c) \
((c) ? v256_or(v256_shr_n_byte(b, c), v256_shl_n_byte(a, 32 - (c))) : b)
diff --git a/third_party/aom/aom_dsp/txfm_common.h b/third_party/aom/aom_dsp/txfm_common.h
index a5e964aad..01732ae64 100644
--- a/third_party/aom/aom_dsp/txfm_common.h
+++ b/third_party/aom/aom_dsp/txfm_common.h
@@ -21,6 +21,27 @@
#define UNIT_QUANT_SHIFT 2
#define UNIT_QUANT_FACTOR (1 << UNIT_QUANT_SHIFT)
+typedef struct txfm_param {
+ // for both forward and inverse transforms
+ int tx_type;
+ int tx_size;
+ int lossless;
+ int bd;
+#if CONFIG_MRC_TX || CONFIG_LGT
+ int stride;
+ uint8_t *dst;
+#endif // CONFIG_MRC_TX || CONFIG_LGT
+#if CONFIG_LGT
+ int is_inter;
+ int mode;
+#endif
+// for inverse transforms only
+#if CONFIG_ADAPT_SCAN
+ const int16_t *eob_threshold;
+#endif
+ int eob;
+} TxfmParam;
+
// Constants:
// for (int i = 1; i< 32; ++i)
// printf("static const int cospi_%d_64 = %.0f;\n", i,
@@ -67,4 +88,62 @@ static const tran_high_t sinpi_4_9 = 15212;
// 16384 * sqrt(2)
static const tran_high_t Sqrt2 = 23170;
+static INLINE tran_high_t fdct_round_shift(tran_high_t input) {
+ tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+ return rv;
+}
+
+#if CONFIG_LGT
+// The Line Graph Transforms (LGTs) matrices are written as follows.
+// Each 2D array is 16384 times an LGT matrix, which is the matrix of
+// eigenvectors of the graph Laplacian matrices for the line graph.
+
+// LGT4 name: lgt4_140
+// Self loops: 1.400, 0.000, 0.000, 0.000
+// Edges: 1.000, 1.000, 1.000
+static const tran_high_t lgt4_140[4][4] = {
+ { 4206, 9518, 13524, 15674 },
+ { 11552, 14833, 1560, -13453 },
+ { 15391, -1906, -14393, 9445 },
+ { 12201, -14921, 12016, -4581 },
+};
+
+// LGT4 name: lgt4_170
+// Self loops: 1.700, 0.000, 0.000, 0.000
+// Edges: 1.000, 1.000, 1.000
+static const tran_high_t lgt4_170[4][4] = {
+ { 3636, 9287, 13584, 15902 },
+ { 10255, 15563, 2470, -13543 },
+ { 14786, 711, -15249, 9231 },
+ { 14138, -14420, 10663, -3920 },
+};
+
+// LGT8 name: lgt8_150
+// Self loops: 1.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
+// Edges: 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000
+static const tran_high_t lgt8_150[8][8] = {
+ { 2075, 5110, 7958, 10511, 12677, 14376, 15544, 16140 },
+ { 6114, 13307, 16196, 13845, 7015, -2084, -10509, -15534 },
+ { 9816, 16163, 8717, -6168, -15790, -11936, 2104, 14348 },
+ { 12928, 12326, -7340, -15653, 242, 15763, 6905, -12632 },
+ { 15124, 3038, -16033, 1758, 15507, -6397, -13593, 10463 },
+ { 15895, -7947, -7947, 15895, -7947, -7947, 15895, -7947 },
+ { 14325, -15057, 9030, 1050, -10659, 15483, -13358, 5236 },
+ { 9054, -12580, 14714, -15220, 14043, -11312, 7330, -2537 },
+};
+
+// LGT8 name: lgt8_170
+// Self loops: 1.700, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
+// Edges: 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000
+static const tran_high_t lgt8_170[8][8] = {
+ { 1858, 4947, 7850, 10458, 12672, 14411, 15607, 16217 },
+ { 5494, 13022, 16256, 14129, 7343, -1864, -10456, -15601 },
+ { 8887, 16266, 9500, -5529, -15749, -12273, 1876, 14394 },
+ { 11870, 13351, -6199, -15984, -590, 15733, 7273, -12644 },
+ { 14248, 5137, -15991, 291, 15893, -5685, -13963, 10425 },
+ { 15716, -5450, -10010, 15929, -6665, -8952, 16036, -7835 },
+ { 15533, -13869, 6559, 3421, -12009, 15707, -13011, 5018 },
+ { 11357, -13726, 14841, -14600, 13025, -10259, 6556, -2254 },
+};
+#endif // CONFIG_LGT
#endif // AOM_DSP_TXFM_COMMON_H_
diff --git a/third_party/aom/aom_dsp/variance.c b/third_party/aom/aom_dsp/variance.c
index 79677c92f..a4c3616e7 100644
--- a/third_party/aom/aom_dsp/variance.c
+++ b/third_party/aom/aom_dsp/variance.c
@@ -9,6 +9,8 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
#include "./aom_config.h"
#include "./aom_dsp_rtcd.h"
@@ -20,6 +22,9 @@
#include "aom_dsp/aom_filter.h"
#include "aom_dsp/blend.h"
+#include "./av1_rtcd.h"
+#include "av1/common/filter.h"
+
uint32_t aom_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride) {
int distortion = 0;
@@ -246,6 +251,13 @@ VARIANCES(4, 2)
VARIANCES(2, 4)
VARIANCES(2, 2)
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+VARIANCES(4, 16)
+VARIANCES(16, 4)
+VARIANCES(8, 32)
+VARIANCES(32, 8)
+#endif
+
GET_VAR(16, 16)
GET_VAR(8, 8)
@@ -271,33 +283,66 @@ void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
// Get pred block from up-sampled reference.
void aom_upsampled_pred_c(uint8_t *comp_pred, int width, int height,
- const uint8_t *ref, int ref_stride) {
- int i, j, k;
- int stride = ref_stride << 3;
-
- for (i = 0; i < height; i++) {
- for (j = 0, k = 0; j < width; j++, k += 8) {
- comp_pred[j] = ref[k];
+ int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
+ int ref_stride) {
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ int i;
+ for (i = 0; i < height; i++) {
+ memcpy(comp_pred, ref, width * sizeof(*comp_pred));
+ comp_pred += width;
+ ref += ref_stride;
+ }
+ } else {
+ InterpFilterParams filter;
+ filter = av1_get_interp_filter_params(EIGHTTAP_REGULAR);
+ if (!subpel_y_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ /*Directly call C version to allow this to work for small (2x2) sizes.*/
+ aom_convolve8_horiz_c(ref, ref_stride, comp_pred, width, kernel, 16, NULL,
+ -1, width, height);
+ } else if (!subpel_x_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ /*Directly call C version to allow this to work for small (2x2) sizes.*/
+ aom_convolve8_vert_c(ref, ref_stride, comp_pred, width, NULL, -1, kernel,
+ 16, width, height);
+ } else {
+ DECLARE_ALIGNED(16, uint8_t,
+ temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *kernel_x;
+ const int16_t *kernel_y;
+ int intermediate_height;
+ kernel_x = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ kernel_y = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter.taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ /*Directly call C versions to allow this to work for small (2x2) sizes.*/
+ aom_convolve8_horiz_c(ref - ref_stride * ((filter.taps >> 1) - 1),
+ ref_stride, temp, MAX_SB_SIZE, kernel_x, 16, NULL,
+ -1, width, intermediate_height);
+ aom_convolve8_vert_c(temp + MAX_SB_SIZE * ((filter.taps >> 1) - 1),
+ MAX_SB_SIZE, comp_pred, width, NULL, -1, kernel_y,
+ 16, width, height);
}
- comp_pred += width;
- ref += stride;
}
}
void aom_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
- int width, int height, const uint8_t *ref,
+ int width, int height, int subpel_x_q3,
+ int subpel_y_q3, const uint8_t *ref,
int ref_stride) {
int i, j;
- int stride = ref_stride << 3;
+ aom_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
+ ref_stride);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
- const int tmp = ref[(j << 3)] + pred[j];
- comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
+ comp_pred[j] = ROUND_POWER_OF_TWO(comp_pred[j] + pred[j], 1);
}
comp_pred += width;
pred += width;
- ref += stride;
}
}
@@ -611,6 +656,13 @@ HIGHBD_VARIANCES(4, 2)
HIGHBD_VARIANCES(2, 4)
HIGHBD_VARIANCES(2, 2)
+#if CONFIG_AV1 && CONFIG_EXT_PARTITION_TYPES
+HIGHBD_VARIANCES(4, 16)
+HIGHBD_VARIANCES(16, 4)
+HIGHBD_VARIANCES(8, 32)
+HIGHBD_VARIANCES(32, 8)
+#endif
+
HIGHBD_GET_VAR(8)
HIGHBD_GET_VAR(16)
@@ -637,37 +689,74 @@ void aom_highbd_comp_avg_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
}
void aom_highbd_upsampled_pred_c(uint16_t *comp_pred, int width, int height,
- const uint8_t *ref8, int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
-
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
- for (i = 0; i < height; ++i) {
- for (j = 0; j < width; ++j) {
- comp_pred[j] = ref[(j << 3)];
+ int subpel_x_q3, int subpel_y_q3,
+ const uint8_t *ref8, int ref_stride, int bd) {
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ const uint16_t *ref;
+ int i;
+ ref = CONVERT_TO_SHORTPTR(ref8);
+ for (i = 0; i < height; i++) {
+ memcpy(comp_pred, ref, width * sizeof(*comp_pred));
+ comp_pred += width;
+ ref += ref_stride;
+ }
+ } else {
+ InterpFilterParams filter;
+ filter = av1_get_interp_filter_params(EIGHTTAP_REGULAR);
+ if (!subpel_y_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ /*Directly call C version to allow this to work for small (2x2) sizes.*/
+ aom_highbd_convolve8_horiz_c(ref8, ref_stride,
+ CONVERT_TO_BYTEPTR(comp_pred), width, kernel,
+ 16, NULL, -1, width, height, bd);
+ } else if (!subpel_x_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ /*Directly call C version to allow this to work for small (2x2) sizes.*/
+ aom_highbd_convolve8_vert_c(ref8, ref_stride,
+ CONVERT_TO_BYTEPTR(comp_pred), width, NULL,
+ -1, kernel, 16, width, height, bd);
+ } else {
+ DECLARE_ALIGNED(16, uint16_t,
+ temp[((MAX_SB_SIZE + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *kernel_x;
+ const int16_t *kernel_y;
+ int intermediate_height;
+ kernel_x = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ kernel_y = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter.taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ /*Directly call C versions to allow this to work for small (2x2) sizes.*/
+ aom_highbd_convolve8_horiz_c(ref8 - ref_stride * ((filter.taps >> 1) - 1),
+ ref_stride, CONVERT_TO_BYTEPTR(temp),
+ MAX_SB_SIZE, kernel_x, 16, NULL, -1, width,
+ intermediate_height, bd);
+ aom_highbd_convolve8_vert_c(
+ CONVERT_TO_BYTEPTR(temp + MAX_SB_SIZE * ((filter.taps >> 1) - 1)),
+ MAX_SB_SIZE, CONVERT_TO_BYTEPTR(comp_pred), width, NULL, -1, kernel_y,
+ 16, width, height, bd);
}
- comp_pred += width;
- ref += stride;
}
}
void aom_highbd_comp_avg_upsampled_pred_c(uint16_t *comp_pred,
const uint8_t *pred8, int width,
- int height, const uint8_t *ref8,
- int ref_stride) {
+ int height, int subpel_x_q3,
+ int subpel_y_q3, const uint8_t *ref8,
+ int ref_stride, int bd) {
int i, j;
- int stride = ref_stride << 3;
- uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ const uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ aom_highbd_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3,
+ ref8, ref_stride, bd);
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
- const int tmp = pred[j] + ref[(j << 3)];
- comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
+ comp_pred[j] = ROUND_POWER_OF_TWO(pred[j] + comp_pred[j], 1);
}
comp_pred += width;
pred += width;
- ref += stride;
}
}
#endif // CONFIG_HIGHBITDEPTH
@@ -694,22 +783,23 @@ void aom_comp_mask_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
}
void aom_comp_mask_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
- int width, int height, const uint8_t *ref,
+ int width, int height, int subpel_x_q3,
+ int subpel_y_q3, const uint8_t *ref,
int ref_stride, const uint8_t *mask,
int mask_stride, int invert_mask) {
int i, j;
- int stride = ref_stride << 3;
+ aom_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
+ ref_stride);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
if (!invert_mask)
- comp_pred[j] = AOM_BLEND_A64(mask[j], ref[(j << 3)], pred[j]);
+ comp_pred[j] = AOM_BLEND_A64(mask[j], comp_pred[j], pred[j]);
else
- comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], ref[(j << 3)]);
+ comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], comp_pred[j]);
}
comp_pred += width;
pred += width;
- ref += stride;
mask += mask_stride;
}
}
@@ -753,6 +843,13 @@ MASK_SUBPIX_VAR(128, 64)
MASK_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
+#if CONFIG_EXT_PARTITION_TYPES
+MASK_SUBPIX_VAR(4, 16)
+MASK_SUBPIX_VAR(16, 4)
+MASK_SUBPIX_VAR(8, 32)
+MASK_SUBPIX_VAR(32, 8)
+#endif
+
#if CONFIG_HIGHBITDEPTH
void aom_highbd_comp_mask_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
int width, int height, const uint8_t *ref8,
@@ -775,26 +872,24 @@ void aom_highbd_comp_mask_pred_c(uint16_t *comp_pred, const uint8_t *pred8,
}
}
-void aom_highbd_comp_mask_upsampled_pred_c(uint16_t *comp_pred,
- const uint8_t *pred8, int width,
- int height, const uint8_t *ref8,
- int ref_stride, const uint8_t *mask,
- int mask_stride, int invert_mask) {
+void aom_highbd_comp_mask_upsampled_pred_c(
+ uint16_t *comp_pred, const uint8_t *pred8, int width, int height,
+ int subpel_x_q3, int subpel_y_q3, const uint8_t *ref8, int ref_stride,
+ const uint8_t *mask, int mask_stride, int invert_mask, int bd) {
int i, j;
- int stride = ref_stride << 3;
uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ aom_highbd_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3,
+ ref8, ref_stride, bd);
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
if (!invert_mask)
- comp_pred[j] = AOM_BLEND_A64(mask[j], ref[j << 3], pred[j]);
+ comp_pred[j] = AOM_BLEND_A64(mask[j], comp_pred[j], pred[j]);
else
- comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], ref[j << 3]);
+ comp_pred[j] = AOM_BLEND_A64(mask[j], pred[j], comp_pred[j]);
}
comp_pred += width;
pred += width;
- ref += stride;
mask += mask_stride;
}
}
@@ -884,6 +979,13 @@ HIGHBD_MASK_SUBPIX_VAR(64, 128)
HIGHBD_MASK_SUBPIX_VAR(128, 64)
HIGHBD_MASK_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
+
+#if CONFIG_EXT_PARTITION_TYPES
+HIGHBD_MASK_SUBPIX_VAR(4, 16)
+HIGHBD_MASK_SUBPIX_VAR(16, 4)
+HIGHBD_MASK_SUBPIX_VAR(8, 32)
+HIGHBD_MASK_SUBPIX_VAR(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_AV1 && CONFIG_EXT_INTER
@@ -983,6 +1085,17 @@ OBMC_VAR(128, 128)
OBMC_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
+#if CONFIG_EXT_PARTITION_TYPES
+OBMC_VAR(4, 16)
+OBMC_SUBPIX_VAR(4, 16)
+OBMC_VAR(16, 4)
+OBMC_SUBPIX_VAR(16, 4)
+OBMC_VAR(8, 32)
+OBMC_SUBPIX_VAR(8, 32)
+OBMC_VAR(32, 8)
+OBMC_SUBPIX_VAR(32, 8)
+#endif
+
#if CONFIG_HIGHBITDEPTH
static INLINE void highbd_obmc_variance64(const uint8_t *pre8, int pre_stride,
const int32_t *wsrc,
@@ -1164,5 +1277,16 @@ HIGHBD_OBMC_SUBPIX_VAR(128, 64)
HIGHBD_OBMC_VAR(128, 128)
HIGHBD_OBMC_SUBPIX_VAR(128, 128)
#endif // CONFIG_EXT_PARTITION
+
+#if CONFIG_EXT_PARTITION_TYPES
+HIGHBD_OBMC_VAR(4, 16)
+HIGHBD_OBMC_SUBPIX_VAR(4, 16)
+HIGHBD_OBMC_VAR(16, 4)
+HIGHBD_OBMC_SUBPIX_VAR(16, 4)
+HIGHBD_OBMC_VAR(8, 32)
+HIGHBD_OBMC_SUBPIX_VAR(8, 32)
+HIGHBD_OBMC_VAR(32, 8)
+HIGHBD_OBMC_SUBPIX_VAR(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_AV1 && CONFIG_MOTION_VAR
diff --git a/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c b/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c
index a337e618d..657dcfa22 100644
--- a/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c
+++ b/third_party/aom/aom_dsp/x86/fwd_txfm_sse2.c
@@ -85,147 +85,6 @@ void aom_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
}
-void aom_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
- int stride) {
- __m128i in0, in1, in2, in3;
- __m128i u0, u1;
- __m128i sum = _mm_setzero_si128();
- int i;
-
- for (i = 0; i < 2; ++i) {
- in0 = _mm_load_si128((const __m128i *)(input + 0 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 0 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 1 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 1 * stride + 8));
-
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 2 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 2 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 3 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 3 * stride + 8));
-
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 4 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 4 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 5 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 5 * stride + 8));
-
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 6 * stride + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 6 * stride + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 7 * stride + 0));
- in3 = _mm_load_si128((const __m128i *)(input + 7 * stride + 8));
-
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- sum = _mm_add_epi16(sum, u1);
- input += 8 * stride;
- }
-
- u0 = _mm_setzero_si128();
- in0 = _mm_unpacklo_epi16(u0, sum);
- in1 = _mm_unpackhi_epi16(u0, sum);
- in0 = _mm_srai_epi32(in0, 16);
- in1 = _mm_srai_epi32(in1, 16);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_unpacklo_epi32(sum, u0);
- in1 = _mm_unpackhi_epi32(sum, u0);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_srli_si128(sum, 8);
-
- in1 = _mm_add_epi32(sum, in0);
- in1 = _mm_srai_epi32(in1, 1);
- output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
-}
-
-void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
- int stride) {
- __m128i in0, in1, in2, in3;
- __m128i u0, u1;
- __m128i sum = _mm_setzero_si128();
- int i;
-
- for (i = 0; i < 8; ++i) {
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- in0 = _mm_load_si128((const __m128i *)(input + 0));
- in1 = _mm_load_si128((const __m128i *)(input + 8));
- in2 = _mm_load_si128((const __m128i *)(input + 16));
- in3 = _mm_load_si128((const __m128i *)(input + 24));
-
- input += stride;
- sum = _mm_add_epi16(sum, u1);
- u0 = _mm_add_epi16(in0, in1);
- u1 = _mm_add_epi16(in2, in3);
- sum = _mm_add_epi16(sum, u0);
-
- sum = _mm_add_epi16(sum, u1);
- }
-
- u0 = _mm_setzero_si128();
- in0 = _mm_unpacklo_epi16(u0, sum);
- in1 = _mm_unpackhi_epi16(u0, sum);
- in0 = _mm_srai_epi32(in0, 16);
- in1 = _mm_srai_epi32(in1, 16);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_unpacklo_epi32(sum, u0);
- in1 = _mm_unpackhi_epi32(sum, u0);
-
- sum = _mm_add_epi32(in0, in1);
- in0 = _mm_srli_si128(sum, 8);
-
- in1 = _mm_add_epi32(sum, in0);
- in1 = _mm_srai_epi32(in1, 3);
- output[0] = (tran_low_t)_mm_cvtsi128_si32(in1);
-}
-
#define DCT_HIGH_BIT_DEPTH 0
#define FDCT4x4_2D aom_fdct4x4_sse2
#define FDCT8x8_2D aom_fdct8x8_sse2
diff --git a/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c
new file mode 100644
index 000000000..2bbf15ef2
--- /dev/null
+++ b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_avx2.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom/aom_integer.h"
+
+static INLINE void init_one_qp(const __m128i *p, __m256i *qp) {
+ const __m128i sign = _mm_srai_epi16(*p, 15);
+ const __m128i dc = _mm_unpacklo_epi16(*p, sign);
+ const __m128i ac = _mm_unpackhi_epi16(*p, sign);
+ *qp = _mm256_insertf128_si256(_mm256_castsi128_si256(dc), ac, 1);
+}
+
+static INLINE void update_qp(__m256i *qp) {
+ int i;
+ for (i = 0; i < 5; ++i) {
+ qp[i] = _mm256_permute2x128_si256(qp[i], qp[i], 0x11);
+ }
+}
+
+static INLINE void init_qp(const int16_t *zbin_ptr, const int16_t *round_ptr,
+ const int16_t *quant_ptr, const int16_t *dequant_ptr,
+ const int16_t *quant_shift_ptr, __m256i *qp) {
+ const __m128i zbin = _mm_loadu_si128((const __m128i *)zbin_ptr);
+ const __m128i round = _mm_loadu_si128((const __m128i *)round_ptr);
+ const __m128i quant = _mm_loadu_si128((const __m128i *)quant_ptr);
+ const __m128i dequant = _mm_loadu_si128((const __m128i *)dequant_ptr);
+ const __m128i quant_shift = _mm_loadu_si128((const __m128i *)quant_shift_ptr);
+ init_one_qp(&zbin, &qp[0]);
+ init_one_qp(&round, &qp[1]);
+ init_one_qp(&quant, &qp[2]);
+ init_one_qp(&dequant, &qp[3]);
+ init_one_qp(&quant_shift, &qp[4]);
+}
+
+// Note:
+// *x is vector multiplied by *y which is 16 int32_t parallel multiplication
+// and right shift 16. The output, 16 int32_t is save in *p.
+static INLINE void mm256_mul_shift_epi32(const __m256i *x, const __m256i *y,
+ __m256i *p) {
+ __m256i prod_lo = _mm256_mul_epi32(*x, *y);
+ __m256i prod_hi = _mm256_srli_epi64(*x, 32);
+ const __m256i mult_hi = _mm256_srli_epi64(*y, 32);
+ prod_hi = _mm256_mul_epi32(prod_hi, mult_hi);
+
+ prod_lo = _mm256_srli_epi64(prod_lo, 16);
+ const __m256i mask = _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1);
+ prod_lo = _mm256_and_si256(prod_lo, mask);
+ prod_hi = _mm256_srli_epi64(prod_hi, 16);
+
+ prod_hi = _mm256_slli_epi64(prod_hi, 32);
+ *p = _mm256_or_si256(prod_lo, prod_hi);
+}
+
+static INLINE void quantize(const __m256i *qp, __m256i *c,
+ const int16_t *iscan_ptr, tran_low_t *qcoeff,
+ tran_low_t *dqcoeff, __m256i *eob) {
+ const __m256i abs = _mm256_abs_epi32(*c);
+ const __m256i flag1 = _mm256_cmpgt_epi32(abs, qp[0]);
+ __m256i flag2 = _mm256_cmpeq_epi32(abs, qp[0]);
+ flag2 = _mm256_or_si256(flag1, flag2);
+ const int32_t nzflag = _mm256_movemask_epi8(flag2);
+
+ if (LIKELY(nzflag)) {
+ __m256i q = _mm256_add_epi32(abs, qp[1]);
+ __m256i tmp;
+ mm256_mul_shift_epi32(&q, &qp[2], &tmp);
+ q = _mm256_add_epi32(tmp, q);
+
+ mm256_mul_shift_epi32(&q, &qp[4], &q);
+ __m256i dq = _mm256_mullo_epi32(q, qp[3]);
+
+ q = _mm256_sign_epi32(q, *c);
+ dq = _mm256_sign_epi32(dq, *c);
+ q = _mm256_and_si256(q, flag2);
+ dq = _mm256_and_si256(dq, flag2);
+
+ _mm256_storeu_si256((__m256i *)qcoeff, q);
+ _mm256_storeu_si256((__m256i *)dqcoeff, dq);
+
+ const __m128i isc = _mm_loadu_si128((const __m128i *)iscan_ptr);
+ const __m128i zr = _mm_setzero_si128();
+ const __m128i lo = _mm_unpacklo_epi16(isc, zr);
+ const __m128i hi = _mm_unpackhi_epi16(isc, zr);
+ const __m256i iscan =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
+
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i zc = _mm256_cmpeq_epi32(dq, zero);
+ const __m256i nz = _mm256_cmpeq_epi32(zc, zero);
+ __m256i cur_eob = _mm256_sub_epi32(iscan, nz);
+ cur_eob = _mm256_and_si256(cur_eob, nz);
+ *eob = _mm256_max_epi32(cur_eob, *eob);
+ } else {
+ const __m256i zero = _mm256_setzero_si256();
+ _mm256_storeu_si256((__m256i *)qcoeff, zero);
+ _mm256_storeu_si256((__m256i *)dqcoeff, zero);
+ }
+}
+
+void aom_highbd_quantize_b_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ (void)scan;
+ const unsigned int step = 8;
+
+ if (LIKELY(!skip_block)) {
+ __m256i qp[5], coeff;
+ init_qp(zbin_ptr, round_ptr, quant_ptr, dequant_ptr, quant_shift_ptr, qp);
+ coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+
+ __m256i eob = _mm256_setzero_si256();
+ quantize(qp, &coeff, iscan, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan += step;
+ n_coeffs -= step;
+
+ update_qp(qp);
+
+ while (n_coeffs > 0) {
+ coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+ quantize(qp, &coeff, iscan, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan += step;
+ n_coeffs -= step;
+ }
+ {
+ __m256i eob_s;
+ eob_s = _mm256_shuffle_epi32(eob, 0xe);
+ eob = _mm256_max_epi16(eob, eob_s);
+ eob_s = _mm256_shufflelo_epi16(eob, 0xe);
+ eob = _mm256_max_epi16(eob, eob_s);
+ eob_s = _mm256_shufflelo_epi16(eob, 1);
+ eob = _mm256_max_epi16(eob, eob_s);
+ const __m128i final_eob = _mm_max_epi16(_mm256_castsi256_si128(eob),
+ _mm256_extractf128_si256(eob, 1));
+ *eob_ptr = _mm_extract_epi16(final_eob, 0);
+ }
+ } else {
+ do {
+ const __m256i zero = _mm256_setzero_si256();
+ _mm256_storeu_si256((__m256i *)qcoeff_ptr, zero);
+ _mm256_storeu_si256((__m256i *)dqcoeff_ptr, zero);
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ n_coeffs -= step;
+ } while (n_coeffs > 0);
+ *eob_ptr = 0;
+ }
+}
diff --git a/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c
index 3ee24ab16..5570ca5b7 100644
--- a/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -15,7 +15,6 @@
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
-#if CONFIG_HIGHBITDEPTH
void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr,
@@ -152,4 +151,3 @@ void aom_highbd_quantize_b_32x32_sse2(
}
*eob_ptr = eob + 1;
}
-#endif
diff --git a/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm b/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm
index 0c7cb3998..9c3bbdd69 100644
--- a/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/highbd_sad4d_sse2.asm
@@ -288,3 +288,9 @@ HIGH_SADNXN4D 8, 8
HIGH_SADNXN4D 8, 4
HIGH_SADNXN4D 4, 8
HIGH_SADNXN4D 4, 4
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SADNXN4D 4, 16
+HIGH_SADNXN4D 16, 4
+HIGH_SADNXN4D 8, 32
+HIGH_SADNXN4D 32, 8
+%endif
diff --git a/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm b/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm
index 8427b891c..248b98ef5 100644
--- a/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/highbd_sad_sse2.asm
@@ -227,6 +227,10 @@ HIGH_SAD32XN 16 ; highbd_sad32x16_sse2
HIGH_SAD32XN 64, 1 ; highbd_sad32x64_avg_sse2
HIGH_SAD32XN 32, 1 ; highbd_sad32x32_avg_sse2
HIGH_SAD32XN 16, 1 ; highbd_sad32x16_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SAD32XN 8 ; highbd_sad_32x8_sse2
+HIGH_SAD32XN 8, 1 ; highbd_sad_32x8_avg_sse2
+%endif
; unsigned int aom_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -295,7 +299,10 @@ HIGH_SAD16XN 8 ; highbd_sad16x8_sse2
HIGH_SAD16XN 32, 1 ; highbd_sad16x32_avg_sse2
HIGH_SAD16XN 16, 1 ; highbd_sad16x16_avg_sse2
HIGH_SAD16XN 8, 1 ; highbd_sad16x8_avg_sse2
-
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SAD16XN 4 ; highbd_sad_16x4_sse2
+HIGH_SAD16XN 4, 1 ; highbd_sad_16x4_avg_sse2
+%endif
; unsigned int aom_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -364,3 +371,7 @@ HIGH_SAD8XN 4 ; highbd_sad8x4_sse2
HIGH_SAD8XN 16, 1 ; highbd_sad8x16_avg_sse2
HIGH_SAD8XN 8, 1 ; highbd_sad8x8_avg_sse2
HIGH_SAD8XN 4, 1 ; highbd_sad8x4_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+HIGH_SAD8XN 32 ; highbd_sad_8x32_sse2
+HIGH_SAD8XN 32, 1 ; highbd_sad_8x32_avg_sse2
+%endif
diff --git a/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm b/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
index 797e9c1d4..ee19796e3 100644
--- a/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm
@@ -75,7 +75,7 @@ SECTION .text
paddd m6, m4
mov r1, ssem ; r1 = unsigned int *sse
movd [r1], m7 ; store sse
- movd rax, m6 ; store sum as return value
+ movd eax, m6 ; store sum as return value
%endif
RET
%endmacro
diff --git a/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c b/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
index 29f96ce24..93923ffb0 100644
--- a/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
+++ b/third_party/aom/aom_dsp/x86/highbd_variance_sse2.c
@@ -9,6 +9,7 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include <assert.h>
#include <emmintrin.h> // SSE2
#include "./aom_config.h"
@@ -16,6 +17,9 @@
#include "aom_ports/mem.h"
+#include "./av1_rtcd.h"
+#include "av1/common/filter.h"
+
typedef uint32_t (*high_variance_fn_t)(const uint16_t *src, int src_stride,
const uint16_t *ref, int ref_stride,
uint32_t *sse, int *sum);
@@ -181,6 +185,11 @@ VAR_FN(16, 16, 16, 8);
VAR_FN(16, 8, 8, 7);
VAR_FN(8, 16, 8, 7);
VAR_FN(8, 8, 8, 6);
+#if CONFIG_EXT_PARTITION_TYPES
+VAR_FN(16, 4, 16, 6);
+VAR_FN(8, 32, 8, 8);
+VAR_FN(32, 8, 16, 8);
+#endif
#undef VAR_FN
@@ -387,6 +396,7 @@ DECLS(sse2);
return (var >= 0) ? (uint32_t)var : 0; \
}
+#if CONFIG_EXT_PARTITION_TYPES
#define FNS(opt) \
FN(64, 64, 16, 6, 6, opt, (int64_t)); \
FN(64, 32, 16, 6, 5, opt, (int64_t)); \
@@ -398,7 +408,24 @@ DECLS(sse2);
FN(16, 8, 16, 4, 3, opt, (int64_t)); \
FN(8, 16, 8, 3, 4, opt, (int64_t)); \
FN(8, 8, 8, 3, 3, opt, (int64_t)); \
- FN(8, 4, 8, 3, 2, opt, (int64_t));
+ FN(8, 4, 8, 3, 2, opt, (int64_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int64_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int64_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int64_t))
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int64_t))
+#endif
FNS(sse2);
@@ -412,9 +439,9 @@ FNS(sse2);
const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec, \
ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
void *unused);
-#define DECLS(opt1) \
- DECL(16, opt1) \
- DECL(8, opt1)
+#define DECLS(opt) \
+ DECL(16, opt) \
+ DECL(8, opt)
DECLS(sse2);
#undef DECL
@@ -546,18 +573,36 @@ DECLS(sse2);
return (var >= 0) ? (uint32_t)var : 0; \
}
-#define FNS(opt1) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (int64_t)); \
- FN(8, 16, 8, 4, 3, opt1, (int64_t)); \
- FN(8, 8, 8, 3, 3, opt1, (int64_t)); \
- FN(8, 4, 8, 3, 2, opt1, (int64_t));
+#if CONFIG_EXT_PARTITION_TYPES
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int64_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int64_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int64_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int64_t));
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int64_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int64_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int64_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int64_t));
+#endif
FNS(sse2);
@@ -565,131 +610,94 @@ FNS(sse2);
#undef FN
void aom_highbd_upsampled_pred_sse2(uint16_t *comp_pred, int width, int height,
- const uint8_t *ref8, int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-
- if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i s4 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 32));
- __m128i s5 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 40));
- __m128i s6 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 48));
- __m128i s7 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 56));
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t2 = _mm_unpacklo_epi16(s4, s5);
- t3 = _mm_unpacklo_epi16(s6, s7);
- t0 = _mm_unpacklo_epi32(t0, t1);
- t2 = _mm_unpacklo_epi32(t2, t3);
- t0 = _mm_unpacklo_epi64(t0, t2);
-
- _mm_storeu_si128((__m128i *)(comp_pred), t0);
+ int subpel_x_q3, int subpel_y_q3,
+ const uint8_t *ref8, int ref_stride,
+ int bd) {
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ if (width >= 8) {
+ int i;
+ assert(!(width & 7));
+ /*Read 8 pixels one row at a time.*/
+ for (i = 0; i < height; i++) {
+ int j;
+ for (j = 0; j < width; j += 8) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
+ _mm_storeu_si128((__m128i *)comp_pred, s0);
+ comp_pred += 8;
+ ref += 8;
+ }
+ ref += ref_stride - width;
+ }
+ } else {
+ int i;
+ assert(!(width & 3));
+ /*Read 4 pixels two rows at a time.*/
+ for (i = 0; i < height; i += 2) {
+ __m128i s0 = _mm_loadl_epi64((const __m128i *)ref);
+ __m128i s1 = _mm_loadl_epi64((const __m128i *)(ref + ref_stride));
+ __m128i t0 = _mm_unpacklo_epi64(s0, s1);
+ _mm_storeu_si128((__m128i *)comp_pred, t0);
comp_pred += 8;
- ref += 64; // 8 * 8;
+ ref += 2 * ref_stride;
}
- ref += stride - (width << 3);
}
} else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t0 = _mm_unpacklo_epi32(t0, t1);
-
- _mm_storel_epi64((__m128i *)(comp_pred), t0);
- comp_pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
+ InterpFilterParams filter;
+ filter = av1_get_interp_filter_params(EIGHTTAP_REGULAR);
+ if (!subpel_y_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ aom_highbd_convolve8_horiz(ref8, ref_stride,
+ CONVERT_TO_BYTEPTR(comp_pred), width, kernel,
+ 16, NULL, -1, width, height, bd);
+ } else if (!subpel_x_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ aom_highbd_convolve8_vert(ref8, ref_stride, CONVERT_TO_BYTEPTR(comp_pred),
+ width, NULL, -1, kernel, 16, width, height, bd);
+ } else {
+ DECLARE_ALIGNED(16, uint16_t,
+ temp[((MAX_SB_SIZE + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *kernel_x;
+ const int16_t *kernel_y;
+ int intermediate_height;
+ kernel_x = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ kernel_y = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter.taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ aom_highbd_convolve8_horiz(ref8 - ref_stride * ((filter.taps >> 1) - 1),
+ ref_stride, CONVERT_TO_BYTEPTR(temp),
+ MAX_SB_SIZE, kernel_x, 16, NULL, -1, width,
+ intermediate_height, bd);
+ aom_highbd_convolve8_vert(
+ CONVERT_TO_BYTEPTR(temp + MAX_SB_SIZE * ((filter.taps >> 1) - 1)),
+ MAX_SB_SIZE, CONVERT_TO_BYTEPTR(comp_pred), width, NULL, -1, kernel_y,
+ 16, width, height, bd);
}
}
}
void aom_highbd_comp_avg_upsampled_pred_sse2(uint16_t *comp_pred,
const uint8_t *pred8, int width,
- int height, const uint8_t *ref8,
- int ref_stride) {
- const __m128i one = _mm_set1_epi16(1);
- int i, j;
- int stride = ref_stride << 3;
+ int height, int subpel_x_q3,
+ int subpel_y_q3,
+ const uint8_t *ref8,
+ int ref_stride, int bd) {
uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
-
- if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i s4 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 32));
- __m128i s5 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 40));
- __m128i s6 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 48));
- __m128i s7 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 56));
- __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t2 = _mm_unpacklo_epi16(s4, s5);
- t3 = _mm_unpacklo_epi16(s6, s7);
- t0 = _mm_unpacklo_epi32(t0, t1);
- t2 = _mm_unpacklo_epi32(t2, t3);
- t0 = _mm_unpacklo_epi64(t0, t2);
-
- p0 = _mm_adds_epu16(t0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
-
- _mm_storeu_si128((__m128i *)(comp_pred), p0);
- comp_pred += 8;
- pred += 8;
- ref += 8 * 8;
- }
- ref += stride - (width << 3);
- }
- } else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
- __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 8));
- __m128i s2 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 16));
- __m128i s3 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + 24));
- __m128i p0 = _mm_loadl_epi64((const __m128i *)pred);
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi16(s0, s1);
- t1 = _mm_unpacklo_epi16(s2, s3);
- t0 = _mm_unpacklo_epi32(t0, t1);
-
- p0 = _mm_adds_epu16(t0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
-
- _mm_storel_epi64((__m128i *)(comp_pred), p0);
- comp_pred += 4;
- pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
- }
+ int n;
+ int i;
+ aom_highbd_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3,
+ ref8, ref_stride, bd);
+ /*The total number of pixels must be a multiple of 8 (e.g., 4x4).*/
+ assert(!(width * height & 7));
+ n = width * height >> 3;
+ for (i = 0; i < n; i++) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)comp_pred);
+ __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
+ _mm_storeu_si128((__m128i *)comp_pred, _mm_avg_epu16(s0, p0));
+ comp_pred += 8;
+ pred += 8;
}
}
diff --git a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
index be200df4c..86ce928b7 100644
--- a/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
+++ b/third_party/aom/aom_dsp/x86/inv_txfm_sse2.c
@@ -3498,237 +3498,3 @@ void idct32_8col(__m128i *in0, __m128i *in1) {
in1[14] = _mm_sub_epi16(stp1_1, stp1_30);
in1[15] = _mm_sub_epi16(stp1_0, stp1_31);
}
-
-#if CONFIG_HIGHBITDEPTH
-static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
- __m128i ubounded, retval;
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i one = _mm_set1_epi16(1);
- const __m128i max = _mm_subs_epi16(_mm_slli_epi16(one, bd), one);
- ubounded = _mm_cmpgt_epi16(value, max);
- retval = _mm_andnot_si128(ubounded, value);
- ubounded = _mm_and_si128(ubounded, max);
- retval = _mm_or_si128(retval, ubounded);
- retval = _mm_and_si128(retval, _mm_cmpgt_epi16(retval, zero));
- return retval;
-}
-
-void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
- tran_low_t out[4 * 4];
- tran_low_t *outptr = out;
- int i, j;
- __m128i inptr[4];
- __m128i sign_bits[2];
- __m128i temp_mm, min_input, max_input;
- int test;
- uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- int optimised_cols = 0;
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i eight = _mm_set1_epi16(8);
- const __m128i max = _mm_set1_epi16(12043);
- const __m128i min = _mm_set1_epi16(-12043);
- // Load input into __m128i
- inptr[0] = _mm_loadu_si128((const __m128i *)input);
- inptr[1] = _mm_loadu_si128((const __m128i *)(input + 4));
- inptr[2] = _mm_loadu_si128((const __m128i *)(input + 8));
- inptr[3] = _mm_loadu_si128((const __m128i *)(input + 12));
-
- // Pack to 16 bits
- inptr[0] = _mm_packs_epi32(inptr[0], inptr[1]);
- inptr[1] = _mm_packs_epi32(inptr[2], inptr[3]);
-
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp_mm = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp_mm);
-
- if (!test) {
- // Do the row transform
- aom_idct4_sse2(inptr);
-
- // Check the min & max values
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp_mm = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp_mm);
-
- if (test) {
- array_transpose_4x4(inptr);
- sign_bits[0] = _mm_cmplt_epi16(inptr[0], zero);
- sign_bits[1] = _mm_cmplt_epi16(inptr[1], zero);
- inptr[3] = _mm_unpackhi_epi16(inptr[1], sign_bits[1]);
- inptr[2] = _mm_unpacklo_epi16(inptr[1], sign_bits[1]);
- inptr[1] = _mm_unpackhi_epi16(inptr[0], sign_bits[0]);
- inptr[0] = _mm_unpacklo_epi16(inptr[0], sign_bits[0]);
- _mm_storeu_si128((__m128i *)outptr, inptr[0]);
- _mm_storeu_si128((__m128i *)(outptr + 4), inptr[1]);
- _mm_storeu_si128((__m128i *)(outptr + 8), inptr[2]);
- _mm_storeu_si128((__m128i *)(outptr + 12), inptr[3]);
- } else {
- // Set to use the optimised transform for the column
- optimised_cols = 1;
- }
- } else {
- // Run the un-optimised row transform
- for (i = 0; i < 4; ++i) {
- aom_highbd_idct4_c(input, outptr, bd);
- input += 4;
- outptr += 4;
- }
- }
-
- if (optimised_cols) {
- aom_idct4_sse2(inptr);
-
- // Final round and shift
- inptr[0] = _mm_add_epi16(inptr[0], eight);
- inptr[1] = _mm_add_epi16(inptr[1], eight);
-
- inptr[0] = _mm_srai_epi16(inptr[0], 4);
- inptr[1] = _mm_srai_epi16(inptr[1], 4);
-
- // Reconstruction and Store
- {
- __m128i d0 = _mm_loadl_epi64((const __m128i *)dest);
- __m128i d2 = _mm_loadl_epi64((const __m128i *)(dest + stride * 2));
- d0 = _mm_unpacklo_epi64(
- d0, _mm_loadl_epi64((const __m128i *)(dest + stride)));
- d2 = _mm_unpacklo_epi64(
- d2, _mm_loadl_epi64((const __m128i *)(dest + stride * 3)));
- d0 = clamp_high_sse2(_mm_adds_epi16(d0, inptr[0]), bd);
- d2 = clamp_high_sse2(_mm_adds_epi16(d2, inptr[1]), bd);
- // store input0
- _mm_storel_epi64((__m128i *)dest, d0);
- // store input1
- d0 = _mm_srli_si128(d0, 8);
- _mm_storel_epi64((__m128i *)(dest + stride), d0);
- // store input2
- _mm_storel_epi64((__m128i *)(dest + stride * 2), d2);
- // store input3
- d2 = _mm_srli_si128(d2, 8);
- _mm_storel_epi64((__m128i *)(dest + stride * 3), d2);
- }
- } else {
- // Run the un-optimised column transform
- tran_low_t temp_in[4], temp_out[4];
- // Columns
- for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
- aom_highbd_idct4_c(temp_in, temp_out, bd);
- for (j = 0; j < 4; ++j) {
- dest[j * stride + i] = highbd_clip_pixel_add(
- dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
- }
- }
- }
-}
-
-void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
- int stride, int bd) {
- tran_low_t out[8 * 8] = { 0 };
- tran_low_t *outptr = out;
- int i, j, test;
- __m128i inptr[8];
- __m128i min_input, max_input, temp1, temp2, sign_bits;
- uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i sixteen = _mm_set1_epi16(16);
- const __m128i max = _mm_set1_epi16(6201);
- const __m128i min = _mm_set1_epi16(-6201);
- int optimised_cols = 0;
-
- // Load input into __m128i & pack to 16 bits
- for (i = 0; i < 8; i++) {
- temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i));
- temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4));
- inptr[i] = _mm_packs_epi32(temp1, temp2);
- }
-
- // Find the min & max for the row transform
- // only first 4 row has non-zero coefs
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- for (i = 2; i < 4; i++) {
- max_input = _mm_max_epi16(max_input, inptr[i]);
- min_input = _mm_min_epi16(min_input, inptr[i]);
- }
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp1 = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp1);
-
- if (!test) {
- // Do the row transform
- aom_idct8_sse2(inptr);
-
- // Find the min & max for the column transform
- // N.B. Only first 4 cols contain non-zero coeffs
- max_input = _mm_max_epi16(inptr[0], inptr[1]);
- min_input = _mm_min_epi16(inptr[0], inptr[1]);
- for (i = 2; i < 8; i++) {
- max_input = _mm_max_epi16(max_input, inptr[i]);
- min_input = _mm_min_epi16(min_input, inptr[i]);
- }
- max_input = _mm_cmpgt_epi16(max_input, max);
- min_input = _mm_cmplt_epi16(min_input, min);
- temp1 = _mm_or_si128(max_input, min_input);
- test = _mm_movemask_epi8(temp1);
-
- if (test) {
- // Use fact only first 4 rows contain non-zero coeffs
- array_transpose_4X8(inptr, inptr);
- for (i = 0; i < 4; i++) {
- sign_bits = _mm_cmplt_epi16(inptr[i], zero);
- temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
- temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits);
- _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1);
- _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2);
- }
- } else {
- // Set to use the optimised transform for the column
- optimised_cols = 1;
- }
- } else {
- // Run the un-optimised row transform
- for (i = 0; i < 4; ++i) {
- aom_highbd_idct8_c(input, outptr, bd);
- input += 8;
- outptr += 8;
- }
- }
-
- if (optimised_cols) {
- aom_idct8_sse2(inptr);
-
- // Final round & shift and Reconstruction and Store
- {
- __m128i d[8];
- for (i = 0; i < 8; i++) {
- inptr[i] = _mm_add_epi16(inptr[i], sixteen);
- d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
- inptr[i] = _mm_srai_epi16(inptr[i], 5);
- d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
- // Store
- _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
- }
- }
- } else {
- // Run the un-optimised column transform
- tran_low_t temp_in[8], temp_out[8];
- for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
- aom_highbd_idct8_c(temp_in, temp_out, bd);
- for (j = 0; j < 8; ++j) {
- dest[j * stride + i] = highbd_clip_pixel_add(
- dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
- }
- }
- }
-}
-
-#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
index 9d16a3e84..6a73ac460 100644
--- a/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_sad_intrin_ssse3.c
@@ -93,6 +93,12 @@ MASKSAD8XN_SSSE3(8)
MASKSAD8XN_SSSE3(4)
MASKSAD4XN_SSSE3(8)
MASKSAD4XN_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+MASKSAD4XN_SSSE3(16)
+MASKSADMXN_SSSE3(16, 4)
+MASKSAD8XN_SSSE3(32)
+MASKSADMXN_SSSE3(32, 8)
+#endif
static INLINE unsigned int masked_sad_ssse3(const uint8_t *src_ptr,
int src_stride,
@@ -283,6 +289,12 @@ HIGHBD_MASKSADMXN_SSSE3(8, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 4)
HIGHBD_MASKSAD4XN_SSSE3(8)
HIGHBD_MASKSAD4XN_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+HIGHBD_MASKSAD4XN_SSSE3(16)
+HIGHBD_MASKSADMXN_SSSE3(16, 4)
+HIGHBD_MASKSADMXN_SSSE3(8, 32)
+HIGHBD_MASKSADMXN_SSSE3(32, 8)
+#endif
static INLINE unsigned int highbd_masked_sad_ssse3(
const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
diff --git a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
index be9d437d2..24e7ed1c6 100644
--- a/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/third_party/aom/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -126,6 +126,12 @@ MASK_SUBPIX_VAR8XH_SSSE3(8)
MASK_SUBPIX_VAR8XH_SSSE3(4)
MASK_SUBPIX_VAR4XH_SSSE3(8)
MASK_SUBPIX_VAR4XH_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+MASK_SUBPIX_VAR4XH_SSSE3(16)
+MASK_SUBPIX_VAR_SSSE3(16, 4)
+MASK_SUBPIX_VAR8XH_SSSE3(32)
+MASK_SUBPIX_VAR_SSSE3(32, 8)
+#endif
static INLINE __m128i filter_block(const __m128i a, const __m128i b,
const __m128i filter) {
@@ -564,6 +570,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
uint64_t sse64; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * W]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -579,7 +586,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, W, H, &sse64, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 4); \
sum = ROUND_POWER_OF_TWO(sum, 2); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
} \
unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
@@ -587,6 +595,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
uint64_t sse64; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * W]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -602,7 +611,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, W, H, &sse64, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 8); \
sum = ROUND_POWER_OF_TWO(sum, 4); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
#define HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(H) \
@@ -634,6 +644,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
int sse_; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * 4]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -649,7 +660,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, H, &sse_, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 4); \
sum = ROUND_POWER_OF_TWO(sum, 2); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (4 * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
} \
unsigned int aom_highbd_12_masked_sub_pixel_variance4x##H##_ssse3( \
const uint8_t *src8, int src_stride, int xoffset, int yoffset, \
@@ -657,6 +669,7 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
const uint8_t *msk, int msk_stride, int invert_mask, uint32_t *sse) { \
int sse_; \
int sum; \
+ int64_t var; \
uint16_t temp[(H + 1) * 4]; \
const uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
const uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
@@ -672,7 +685,8 @@ static void highbd_masked_variance4xh(const uint16_t *src_ptr, int src_stride,
msk_stride, H, &sse_, &sum); \
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse_, 8); \
sum = ROUND_POWER_OF_TWO(sum, 4); \
- return *sse - (uint32_t)(((int64_t)sum * sum) / (4 * H)); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (4 * H)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
#if CONFIG_EXT_PARTITION
@@ -693,6 +707,12 @@ HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 8)
HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 4)
HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(8)
HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(4)
+#if CONFIG_EXT_PARTITION_TYPES
+HIGHBD_MASK_SUBPIX_VAR4XH_SSSE3(16)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(16, 4)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(8, 32)
+HIGHBD_MASK_SUBPIX_VAR_SSSE3(32, 8)
+#endif
static INLINE __m128i highbd_filter_block(const __m128i a, const __m128i b,
const __m128i filter) {
diff --git a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
index 21632644f..3fd6f71e5 100644
--- a/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_sad_sse4.c
@@ -137,6 +137,12 @@ OBMCSADWXH(8, 8)
OBMCSADWXH(8, 4)
OBMCSADWXH(4, 8)
OBMCSADWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+OBMCSADWXH(4, 16)
+OBMCSADWXH(16, 4)
+OBMCSADWXH(8, 32)
+OBMCSADWXH(32, 8)
+#endif
////////////////////////////////////////////////////////////////////////////////
// High bit-depth
@@ -260,4 +266,10 @@ HBD_OBMCSADWXH(8, 8)
HBD_OBMCSADWXH(8, 4)
HBD_OBMCSADWXH(4, 8)
HBD_OBMCSADWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+HBD_OBMCSADWXH(4, 16)
+HBD_OBMCSADWXH(16, 4)
+HBD_OBMCSADWXH(8, 32)
+HBD_OBMCSADWXH(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
index 1797ded80..44cfa8e28 100644
--- a/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
+++ b/third_party/aom/aom_dsp/x86/obmc_variance_sse4.c
@@ -146,6 +146,12 @@ OBMCVARWXH(8, 8)
OBMCVARWXH(8, 4)
OBMCVARWXH(4, 8)
OBMCVARWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+OBMCVARWXH(4, 16)
+OBMCVARWXH(16, 4)
+OBMCVARWXH(8, 32)
+OBMCVARWXH(32, 8)
+#endif
////////////////////////////////////////////////////////////////////////////////
// High bit-depth
@@ -353,4 +359,10 @@ HBD_OBMCVARWXH(8, 8)
HBD_OBMCVARWXH(8, 4)
HBD_OBMCVARWXH(4, 8)
HBD_OBMCVARWXH(4, 4)
+#if CONFIG_EXT_PARTITION_TYPES
+HBD_OBMCVARWXH(4, 16)
+HBD_OBMCVARWXH(16, 4)
+HBD_OBMCVARWXH(8, 32)
+HBD_OBMCVARWXH(32, 8)
+#endif
#endif // CONFIG_HIGHBITDEPTH
diff --git a/third_party/aom/aom_dsp/x86/sad4d_sse2.asm b/third_party/aom/aom_dsp/x86/sad4d_sse2.asm
index 8f04ef2f3..4570e2ce6 100644
--- a/third_party/aom/aom_dsp/x86/sad4d_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/sad4d_sse2.asm
@@ -251,3 +251,9 @@ SADNXN4D 8, 8
SADNXN4D 8, 4
SADNXN4D 4, 8
SADNXN4D 4, 4
+%if CONFIG_EXT_PARTITION_TYPES
+SADNXN4D 4, 16
+SADNXN4D 16, 4
+SADNXN4D 8, 32
+SADNXN4D 32, 8
+%endif
diff --git a/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c b/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c
index 196394379..e8dd87a26 100644
--- a/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c
+++ b/third_party/aom/aom_dsp/x86/sad_highbd_avx2.c
@@ -704,7 +704,12 @@ unsigned int aom_highbd_sad128x128_avg_avx2(const uint8_t *src, int src_stride,
static INLINE void get_4d_sad_from_mm256_epi32(const __m256i *v,
uint32_t *res) {
__m256i u0, u1, u2, u3;
+#if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+ const __m256i mask = _mm256_setr_epi32(UINT32_MAX, 0, UINT32_MAX, 0,
+ UINT32_MAX, 0, UINT32_MAX, 0);
+#else
const __m256i mask = _mm256_set1_epi64x(UINT32_MAX);
+#endif
__m128i sad;
// 8 32-bit summation
diff --git a/third_party/aom/aom_dsp/x86/sad_sse2.asm b/third_party/aom/aom_dsp/x86/sad_sse2.asm
index e45457a57..88d427077 100644
--- a/third_party/aom/aom_dsp/x86/sad_sse2.asm
+++ b/third_party/aom/aom_dsp/x86/sad_sse2.asm
@@ -208,6 +208,10 @@ SAD32XN 16 ; sad32x16_sse2
SAD32XN 64, 1 ; sad32x64_avg_sse2
SAD32XN 32, 1 ; sad32x32_avg_sse2
SAD32XN 16, 1 ; sad32x16_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+SAD32XN 8 ; sad_32x8_sse2
+SAD32XN 8, 1 ; sad_32x8_avg_sse2
+%endif
; unsigned int aom_sad16x{8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -254,6 +258,10 @@ SAD16XN 8 ; sad16x8_sse2
SAD16XN 32, 1 ; sad16x32_avg_sse2
SAD16XN 16, 1 ; sad16x16_avg_sse2
SAD16XN 8, 1 ; sad16x8_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+SAD16XN 4 ; sad_16x4_sse2
+SAD16XN 4, 1 ; sad_16x4_avg_sse2
+%endif
; unsigned int aom_sad8x{8,16}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -298,6 +306,10 @@ SAD8XN 4 ; sad8x4_sse2
SAD8XN 16, 1 ; sad8x16_avg_sse2
SAD8XN 8, 1 ; sad8x8_avg_sse2
SAD8XN 4, 1 ; sad8x4_avg_sse2
+%if CONFIG_EXT_PARTITION_TYPES
+SAD8XN 32 ; sad_8x32_sse2
+SAD8XN 32, 1 ; sad_8x32_avg_sse2
+%endif
; unsigned int aom_sad4x{4, 8}_sse2(uint8_t *src, int src_stride,
; uint8_t *ref, int ref_stride);
@@ -343,3 +355,7 @@ SAD4XN 8 ; sad4x8_sse
SAD4XN 4 ; sad4x4_sse
SAD4XN 8, 1 ; sad4x8_avg_sse
SAD4XN 4, 1 ; sad4x4_avg_sse
+%if CONFIG_EXT_PARTITION_TYPES
+SAD4XN 16 ; sad_4x16_sse2
+SAD4XN 16, 1 ; sad_4x16_avg_sse2
+%endif
diff --git a/third_party/aom/aom_dsp/x86/variance_sse2.c b/third_party/aom/aom_dsp/x86/variance_sse2.c
index d9563aa7f..918844185 100644
--- a/third_party/aom/aom_dsp/x86/variance_sse2.c
+++ b/third_party/aom/aom_dsp/x86/variance_sse2.c
@@ -17,6 +17,9 @@
#include "aom_ports/mem.h"
+#include "./av1_rtcd.h"
+#include "av1/common/filter.h"
+
typedef void (*getNxMvar_fn_t)(const unsigned char *src, int src_stride,
const unsigned char *ref, int ref_stride,
unsigned int *sse, int *sum);
@@ -335,6 +338,52 @@ unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
return *sse;
}
+#if CONFIG_EXT_PARTITION_TYPES
+unsigned int aom_variance4x16_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 4, 16, sse, &sum,
+ get4x4var_sse2, 4);
+ assert(sum <= 255 * 4 * 16);
+ assert(sum >= -255 * 4 * 16);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 6);
+}
+
+unsigned int aom_variance16x4_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 16, 4, sse, &sum,
+ get4x4var_sse2, 4);
+ assert(sum <= 255 * 16 * 4);
+ assert(sum >= -255 * 16 * 4);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 6);
+}
+
+unsigned int aom_variance8x32_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 8, 32, sse, &sum,
+ aom_get8x8var_sse2, 8);
+ assert(sum <= 255 * 8 * 32);
+ assert(sum >= -255 * 8 * 32);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 8);
+}
+
+unsigned int aom_variance32x8_sse2(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_sse2(src, src_stride, ref, ref_stride, 32, 8, sse, &sum,
+ aom_get8x8var_sse2, 8);
+ assert(sum <= 255 * 32 * 8);
+ assert(sum >= -255 * 32 * 8);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 8);
+}
+#endif
+
// The 2 unused parameters are place holders for PIC enabled build.
// These definitions are for functions defined in subpel_variance.asm
#define DECL(w, opt) \
@@ -342,13 +391,13 @@ unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride,
const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \
const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \
void *unused0, void *unused)
-#define DECLS(opt1, opt2) \
- DECL(4, opt1); \
- DECL(8, opt1); \
- DECL(16, opt1)
+#define DECLS(opt) \
+ DECL(4, opt); \
+ DECL(8, opt); \
+ DECL(16, opt)
-DECLS(sse2, sse2);
-DECLS(ssse3, ssse3);
+DECLS(sse2);
+DECLS(ssse3);
#undef DECLS
#undef DECL
@@ -384,23 +433,44 @@ DECLS(ssse3, ssse3);
return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)); \
- FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)); \
- FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)); \
- FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)); \
- FN(4, 8, 4, 2, 3, opt1, (int32_t), (int32_t)); \
- FN(4, 4, 4, 2, 2, opt1, (int32_t), (int32_t))
-
-FNS(sse2, sse2);
-FNS(ssse3, ssse3);
+#if CONFIG_EXT_PARTITION_TYPES
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t)); \
+ FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int32_t), (int32_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int32_t), (int32_t))
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (int32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (int32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (int32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (int32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (int32_t), (int32_t))
+#endif
+
+FNS(sse2);
+FNS(ssse3);
#undef FNS
#undef FN
@@ -412,13 +482,13 @@ FNS(ssse3, ssse3);
const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \
ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \
void *unused)
-#define DECLS(opt1, opt2) \
- DECL(4, opt1); \
- DECL(8, opt1); \
- DECL(16, opt1)
+#define DECLS(opt) \
+ DECL(4, opt); \
+ DECL(8, opt); \
+ DECL(16, opt)
-DECLS(sse2, sse2);
-DECLS(ssse3, ssse3);
+DECLS(sse2);
+DECLS(ssse3);
#undef DECL
#undef DECLS
@@ -455,236 +525,149 @@ DECLS(ssse3, ssse3);
return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \
}
-#define FNS(opt1, opt2) \
- FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
- FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
- FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
- FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
- FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
- FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
- FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)); \
- FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)); \
- FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)); \
- FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)); \
- FN(4, 8, 4, 2, 3, opt1, (uint32_t), (int32_t)); \
- FN(4, 4, 4, 2, 2, opt1, (uint32_t), (int32_t))
-
-FNS(sse2, sse);
-FNS(ssse3, ssse3);
+#if CONFIG_EXT_PARTITION_TYPES
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t)); \
+ FN(4, 16, 4, 2, 4, opt, (int32_t), (int32_t)); \
+ FN(16, 4, 16, 4, 2, opt, (int32_t), (int32_t)); \
+ FN(8, 32, 8, 3, 5, opt, (int32_t), (int32_t)); \
+ FN(32, 8, 16, 5, 3, opt, (int32_t), (int32_t))
+#else
+#define FNS(opt) \
+ FN(64, 64, 16, 6, 6, opt, (int64_t), (int64_t)); \
+ FN(64, 32, 16, 6, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 64, 16, 5, 6, opt, (int64_t), (int64_t)); \
+ FN(32, 32, 16, 5, 5, opt, (int64_t), (int64_t)); \
+ FN(32, 16, 16, 5, 4, opt, (int64_t), (int64_t)); \
+ FN(16, 32, 16, 4, 5, opt, (int64_t), (int64_t)); \
+ FN(16, 16, 16, 4, 4, opt, (uint32_t), (int64_t)); \
+ FN(16, 8, 16, 4, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 16, 8, 3, 4, opt, (uint32_t), (int32_t)); \
+ FN(8, 8, 8, 3, 3, opt, (uint32_t), (int32_t)); \
+ FN(8, 4, 8, 3, 2, opt, (uint32_t), (int32_t)); \
+ FN(4, 8, 4, 2, 3, opt, (uint32_t), (int32_t)); \
+ FN(4, 4, 4, 2, 2, opt, (uint32_t), (int32_t))
+#endif
+
+FNS(sse2);
+FNS(ssse3);
#undef FNS
#undef FN
void aom_upsampled_pred_sse2(uint8_t *comp_pred, int width, int height,
+ int subpel_x_q3, int subpel_y_q3,
const uint8_t *ref, int ref_stride) {
- int i, j;
- int stride = ref_stride << 3;
-
- if (width >= 16) {
- // read 16 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 16) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i s4 = _mm_loadu_si128((const __m128i *)(ref + 64));
- __m128i s5 = _mm_loadu_si128((const __m128i *)(ref + 80));
- __m128i s6 = _mm_loadu_si128((const __m128i *)(ref + 96));
- __m128i s7 = _mm_loadu_si128((const __m128i *)(ref + 112));
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
- t2 = _mm_unpacklo_epi8(s4, s5);
- s5 = _mm_unpackhi_epi8(s4, s5);
- t3 = _mm_unpacklo_epi8(s6, s7);
- s7 = _mm_unpackhi_epi8(s6, s7);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s4 = _mm_unpacklo_epi8(t2, s5);
- s6 = _mm_unpacklo_epi8(t3, s7);
- s0 = _mm_unpacklo_epi32(s0, s2);
- s4 = _mm_unpacklo_epi32(s4, s6);
- s0 = _mm_unpacklo_epi64(s0, s4);
-
- _mm_storeu_si128((__m128i *)(comp_pred), s0);
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ if (width >= 16) {
+ int i;
+ assert(!(width & 15));
+ /*Read 16 pixels one row at a time.*/
+ for (i = 0; i < height; i++) {
+ int j;
+ for (j = 0; j < width; j += 16) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
+ _mm_storeu_si128((__m128i *)comp_pred, s0);
+ comp_pred += 16;
+ ref += 16;
+ }
+ ref += ref_stride - width;
+ }
+ } else if (width >= 8) {
+ int i;
+ assert(!(width & 7));
+ assert(!(height & 1));
+ /*Read 8 pixels two rows at a time.*/
+ for (i = 0; i < height; i += 2) {
+ __m128i s0 = _mm_loadl_epi64((const __m128i *)ref);
+ __m128i s1 = _mm_loadl_epi64((const __m128i *)(ref + ref_stride));
+ __m128i t0 = _mm_unpacklo_epi64(s0, s1);
+ _mm_storeu_si128((__m128i *)comp_pred, t0);
comp_pred += 16;
- ref += 16 * 8;
+ ref += 2 * ref_stride;
}
- ref += stride - (width << 3);
- }
- } else if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s0 = _mm_unpacklo_epi32(s0, s2);
-
- _mm_storel_epi64((__m128i *)(comp_pred), s0);
- comp_pred += 8;
- ref += 8 * 8;
+ } else {
+ int i;
+ assert(!(width & 3));
+ assert(!(height & 3));
+ /*Read 4 pixels four rows at a time.*/
+ for (i = 0; i < height; i++) {
+ __m128i s0 = _mm_cvtsi32_si128(*(const uint32_t *)ref);
+ __m128i s1 = _mm_cvtsi32_si128(*(const uint32_t *)(ref + ref_stride));
+ __m128i s2 =
+ _mm_cvtsi32_si128(*(const uint32_t *)(ref + 2 * ref_stride));
+ __m128i s3 =
+ _mm_cvtsi32_si128(*(const uint32_t *)(ref + 3 * ref_stride));
+ __m128i t0 = _mm_unpacklo_epi32(s0, s1);
+ __m128i t1 = _mm_unpacklo_epi32(s2, s3);
+ __m128i u0 = _mm_unpacklo_epi64(t0, t1);
+ _mm_storeu_si128((__m128i *)comp_pred, u0);
+ comp_pred += 16;
+ ref += 4 * ref_stride;
}
- ref += stride - (width << 3);
}
} else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i t0;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- s0 = _mm_unpacklo_epi8(t0, s1);
-
- *(int *)comp_pred = _mm_cvtsi128_si32(s0);
- comp_pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
+ InterpFilterParams filter;
+ filter = av1_get_interp_filter_params(EIGHTTAP_REGULAR);
+ if (!subpel_y_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ aom_convolve8_horiz(ref, ref_stride, comp_pred, width, kernel, 16, NULL,
+ -1, width, height);
+ } else if (!subpel_x_q3) {
+ const int16_t *kernel;
+ kernel = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ aom_convolve8_vert(ref, ref_stride, comp_pred, width, NULL, -1, kernel,
+ 16, width, height);
+ } else {
+ DECLARE_ALIGNED(16, uint8_t,
+ temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *kernel_x;
+ const int16_t *kernel_y;
+ int intermediate_height;
+ kernel_x = av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ kernel_y = av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter.taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ aom_convolve8_horiz(ref - ref_stride * ((filter.taps >> 1) - 1),
+ ref_stride, temp, MAX_SB_SIZE, kernel_x, 16, NULL, -1,
+ width, intermediate_height);
+ aom_convolve8_vert(temp + MAX_SB_SIZE * ((filter.taps >> 1) - 1),
+ MAX_SB_SIZE, comp_pred, width, NULL, -1, kernel_y, 16,
+ width, height);
}
}
}
void aom_comp_avg_upsampled_pred_sse2(uint8_t *comp_pred, const uint8_t *pred,
- int width, int height, const uint8_t *ref,
+ int width, int height, int subpel_x_q3,
+ int subpel_y_q3, const uint8_t *ref,
int ref_stride) {
- const __m128i zero = _mm_set1_epi16(0);
- const __m128i one = _mm_set1_epi16(1);
- int i, j;
- int stride = ref_stride << 3;
-
- if (width >= 16) {
- // read 16 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 16) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i s4 = _mm_loadu_si128((const __m128i *)(ref + 64));
- __m128i s5 = _mm_loadu_si128((const __m128i *)(ref + 80));
- __m128i s6 = _mm_loadu_si128((const __m128i *)(ref + 96));
- __m128i s7 = _mm_loadu_si128((const __m128i *)(ref + 112));
- __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
- __m128i p1;
- __m128i t0, t1, t2, t3;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
- t2 = _mm_unpacklo_epi8(s4, s5);
- s5 = _mm_unpackhi_epi8(s4, s5);
- t3 = _mm_unpacklo_epi8(s6, s7);
- s7 = _mm_unpackhi_epi8(s6, s7);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s4 = _mm_unpacklo_epi8(t2, s5);
- s6 = _mm_unpacklo_epi8(t3, s7);
-
- s0 = _mm_unpacklo_epi32(s0, s2);
- s4 = _mm_unpacklo_epi32(s4, s6);
- s0 = _mm_unpacklo_epi8(s0, zero);
- s4 = _mm_unpacklo_epi8(s4, zero);
-
- p1 = _mm_unpackhi_epi8(p0, zero);
- p0 = _mm_unpacklo_epi8(p0, zero);
- p0 = _mm_adds_epu16(s0, p0);
- p1 = _mm_adds_epu16(s4, p1);
- p0 = _mm_adds_epu16(p0, one);
- p1 = _mm_adds_epu16(p1, one);
-
- p0 = _mm_srli_epi16(p0, 1);
- p1 = _mm_srli_epi16(p1, 1);
- p0 = _mm_packus_epi16(p0, p1);
-
- _mm_storeu_si128((__m128i *)(comp_pred), p0);
- comp_pred += 16;
- pred += 16;
- ref += 16 * 8;
- }
- ref += stride - (width << 3);
- }
- } else if (width >= 8) {
- // read 8 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 8) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i s2 = _mm_loadu_si128((const __m128i *)(ref + 32));
- __m128i s3 = _mm_loadu_si128((const __m128i *)(ref + 48));
- __m128i p0 = _mm_loadl_epi64((const __m128i *)pred);
- __m128i t0, t1;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- t1 = _mm_unpacklo_epi8(s2, s3);
- s3 = _mm_unpackhi_epi8(s2, s3);
-
- s0 = _mm_unpacklo_epi8(t0, s1);
- s2 = _mm_unpacklo_epi8(t1, s3);
- s0 = _mm_unpacklo_epi32(s0, s2);
- s0 = _mm_unpacklo_epi8(s0, zero);
-
- p0 = _mm_unpacklo_epi8(p0, zero);
- p0 = _mm_adds_epu16(s0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
- p0 = _mm_packus_epi16(p0, zero);
-
- _mm_storel_epi64((__m128i *)(comp_pred), p0);
- comp_pred += 8;
- pred += 8;
- ref += 8 * 8;
- }
- ref += stride - (width << 3);
- }
- } else {
- // read 4 points at one time
- for (i = 0; i < height; i++) {
- for (j = 0; j < width; j += 4) {
- __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
- __m128i s1 = _mm_loadu_si128((const __m128i *)(ref + 16));
- __m128i p0 = _mm_cvtsi32_si128(*(const uint32_t *)pred);
- __m128i t0;
-
- t0 = _mm_unpacklo_epi8(s0, s1);
- s1 = _mm_unpackhi_epi8(s0, s1);
- s0 = _mm_unpacklo_epi8(t0, s1);
- s0 = _mm_unpacklo_epi8(s0, zero);
-
- p0 = _mm_unpacklo_epi8(p0, zero);
- p0 = _mm_adds_epu16(s0, p0);
- p0 = _mm_adds_epu16(p0, one);
- p0 = _mm_srli_epi16(p0, 1);
- p0 = _mm_packus_epi16(p0, zero);
-
- *(int *)comp_pred = _mm_cvtsi128_si32(p0);
- comp_pred += 4;
- pred += 4;
- ref += 4 * 8;
- }
- ref += stride - (width << 3);
- }
+ int n;
+ int i;
+ aom_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
+ ref_stride);
+ /*The total number of pixels must be a multiple of 16 (e.g., 4x4).*/
+ assert(!(width * height & 15));
+ n = width * height >> 4;
+ for (i = 0; i < n; i++) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)comp_pred);
+ __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
+ _mm_storeu_si128((__m128i *)comp_pred, _mm_avg_epu8(s0, p0));
+ comp_pred += 16;
+ pred += 16;
}
}