summaryrefslogtreecommitdiffstats
path: root/media/ffvpx/libavcodec/x86
diff options
context:
space:
mode:
authortrav90 <travawine@openmailbox.org>2018-02-04 13:19:22 -0600
committertrav90 <travawine@openmailbox.org>2018-02-04 13:19:22 -0600
commit30bfbb3f97bd64b7838bcb55c98fa698b1bcc9d2 (patch)
treea6031d45b30f0b5b66e2b1774d75b0f953a2aedc /media/ffvpx/libavcodec/x86
parentfc7d9fade54dfbe275c4808dabe30a19415082e0 (diff)
downloadUXP-30bfbb3f97bd64b7838bcb55c98fa698b1bcc9d2.tar
UXP-30bfbb3f97bd64b7838bcb55c98fa698b1bcc9d2.tar.gz
UXP-30bfbb3f97bd64b7838bcb55c98fa698b1bcc9d2.tar.lz
UXP-30bfbb3f97bd64b7838bcb55c98fa698b1bcc9d2.tar.xz
UXP-30bfbb3f97bd64b7838bcb55c98fa698b1bcc9d2.zip
Update FFmpeg code to n3.2-65-gee56777
Diffstat (limited to 'media/ffvpx/libavcodec/x86')
-rw-r--r--media/ffvpx/libavcodec/x86/h264_i386.h12
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init.c10
-rw-r--r--media/ffvpx/libavcodec/x86/vp9itxfm.asm434
-rw-r--r--media/ffvpx/libavcodec/x86/vp9lpf.asm238
-rw-r--r--media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm2
5 files changed, 593 insertions, 103 deletions
diff --git a/media/ffvpx/libavcodec/x86/h264_i386.h b/media/ffvpx/libavcodec/x86/h264_i386.h
index 4dfbc3093..19cd12838 100644
--- a/media/ffvpx/libavcodec/x86/h264_i386.h
+++ b/media/ffvpx/libavcodec/x86/h264_i386.h
@@ -91,13 +91,13 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
"sub %10, %1 \n\t"
"mov %2, %0 \n\t"
"movl %7, %%ecx \n\t"
- "add %1, %%"REG_c" \n\t"
+ "add %1, %%"FF_REG_c" \n\t"
"movl %%ecx, (%0) \n\t"
"test $1, %4 \n\t"
" jnz 5f \n\t"
- "add"OPSIZE" $4, %2 \n\t"
+ "add"FF_OPSIZE" $4, %2 \n\t"
"4: \n\t"
"add $1, %1 \n\t"
@@ -105,7 +105,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
" jb 3b \n\t"
"mov %2, %0 \n\t"
"movl %7, %%ecx \n\t"
- "add %1, %%"REG_c" \n\t"
+ "add %1, %%"FF_REG_c" \n\t"
"movl %%ecx, (%0) \n\t"
"5: \n\t"
"add %9, %k0 \n\t"
@@ -116,7 +116,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff,
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end))
TABLES_ARG
- : "%"REG_c, "memory"
+ : "%"FF_REG_c, "memory"
);
return coeff_count;
}
@@ -183,7 +183,7 @@ static int decode_significance_8x8_x86(CABACContext *c,
"test $1, %4 \n\t"
" jnz 5f \n\t"
- "add"OPSIZE" $4, %2 \n\t"
+ "add"FF_OPSIZE" $4, %2 \n\t"
"4: \n\t"
"add $1, %6 \n\t"
@@ -202,7 +202,7 @@ static int decode_significance_8x8_x86(CABACContext *c,
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end)),
"i"(H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET) TABLES_ARG
- : "%"REG_c, "memory"
+ : "%"FF_REG_c, "memory"
);
return coeff_count;
}
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init.c b/media/ffvpx/libavcodec/x86/vp9dsp_init.c
index 469a66171..cc781a009 100644
--- a/media/ffvpx/libavcodec/x86/vp9dsp_init.c
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init.c
@@ -114,6 +114,8 @@ itxfm_func(idct, idct, 32, sse2);
itxfm_func(idct, idct, 32, ssse3);
itxfm_func(idct, idct, 32, avx);
itxfm_func(iwht, iwht, 4, mmx);
+itxfm_func(idct, idct, 16, avx2);
+itxfm_func(idct, idct, 32, avx2);
#undef itxfm_func
#undef itxfm_funcs
@@ -124,6 +126,8 @@ void ff_vp9_loop_filter_v_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stri
void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
int E, int I, int H)
+lpf_funcs(4, 8, mmxext);
+lpf_funcs(8, 8, mmxext);
lpf_funcs(16, 16, sse2);
lpf_funcs(16, 16, ssse3);
lpf_funcs(16, 16, avx);
@@ -279,6 +283,10 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact)
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
+ dsp->loop_filter_8[0][0] = ff_vp9_loop_filter_h_4_8_mmxext;
+ dsp->loop_filter_8[0][1] = ff_vp9_loop_filter_v_4_8_mmxext;
+ dsp->loop_filter_8[1][0] = ff_vp9_loop_filter_h_8_8_mmxext;
+ dsp->loop_filter_8[1][1] = ff_vp9_loop_filter_v_8_8_mmxext;
init_subpel2(4, 0, 4, put, 8, mmxext);
init_subpel2(4, 1, 4, avg, 8, mmxext);
init_fpel_func(4, 1, 4, avg, _8, mmxext);
@@ -382,6 +390,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact)
init_fpel_func(0, 1, 64, avg, _8, avx2);
if (ARCH_X86_64) {
#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+ dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx2;
+ dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx2;
init_subpel3_32_64(0, put, 8, avx2);
init_subpel3_32_64(1, avg, 8, avx2);
#endif
diff --git a/media/ffvpx/libavcodec/x86/vp9itxfm.asm b/media/ffvpx/libavcodec/x86/vp9itxfm.asm
index 6d5008e33..57d6d353b 100644
--- a/media/ffvpx/libavcodec/x86/vp9itxfm.asm
+++ b/media/ffvpx/libavcodec/x86/vp9itxfm.asm
@@ -24,36 +24,36 @@
%include "libavutil/x86/x86util.asm"
%include "vp9itxfm_template.asm"
-SECTION_RODATA
+SECTION_RODATA 32
%macro VP9_IDCT_COEFFS 2-3 0
const pw_m%1_%2
-times 4 dw -%1, %2
+times 8 dw -%1, %2
const pw_%2_%1
-times 4 dw %2, %1
+times 8 dw %2, %1
%if %3 == 1
const pw_m%2_m%1
-times 4 dw -%2, -%1
+times 8 dw -%2, -%1
%if %1 != %2
const pw_m%2_%1
-times 4 dw -%2, %1
+times 8 dw -%2, %1
const pw_%1_%2
-times 4 dw %1, %2
+times 8 dw %1, %2
%endif
%endif
%if %1 < 11585
-pw_m%1x2: times 8 dw -%1*2
+pw_m%1x2: times 16 dw -%1*2
%elif %1 > 11585
-pw_%1x2: times 8 dw %1*2
+pw_%1x2: times 16 dw %1*2
%else
const pw_%1x2
-times 8 dw %1*2
+times 16 dw %1*2
%endif
%if %2 != %1
-pw_%2x2: times 8 dw %2*2
+pw_%2x2: times 16 dw %2*2
%endif
%endmacro
@@ -127,16 +127,33 @@ SECTION .text
%endmacro
%macro VP9_STORE_2X 5-6 dstq ; reg1, reg2, tmp1, tmp2, zero, dst
+%if mmsize == 32
+ pmovzxbw m%3, [%6]
+ pmovzxbw m%4, [%6+strideq]
+%else
movh m%3, [%6]
movh m%4, [%6+strideq]
punpcklbw m%3, m%5
punpcklbw m%4, m%5
+%endif
paddw m%3, m%1
paddw m%4, m%2
+%if mmsize == 32
+ packuswb m%3, m%4
+ ; Intel...
+ vpermq m%3, m%3, q3120
+ mova [%6], xm%3
+ vextracti128 [%6+strideq], m%3, 1
+%elif mmsize == 16
+ packuswb m%3, m%4
+ movh [%6], m%3
+ movhps [%6+strideq], m%3
+%else
packuswb m%3, m%5
packuswb m%4, m%5
movh [%6], m%3
movh [%6+strideq], m%4
+%endif
%endmacro
%macro ZERO_BLOCK 4 ; mem, stride, nnzcpl, zero_reg
@@ -1421,6 +1438,180 @@ VP9_IDCT_IDCT_16x16_ADD_XMM sse2
VP9_IDCT_IDCT_16x16_ADD_XMM ssse3
VP9_IDCT_IDCT_16x16_ADD_XMM avx
+%macro VP9_IDCT16_YMM_1D 0
+ VP9_UNPACK_MULSUB_2W_4X 1, 15, 16305, 1606, [pd_8192], 0, 4 ; t8, t15
+ VP9_UNPACK_MULSUB_2W_4X 9, 7, 10394, 12665, [pd_8192], 0, 4 ; t9, t14
+
+ SUMSUB_BA w, 9, 1, 0 ; t8, t9
+ SUMSUB_BA w, 7, 15, 0 ; t15, t14
+
+ VP9_UNPACK_MULSUB_2W_4X 15, 1, 15137, 6270, [pd_8192], 0, 4 ; t9, t14
+
+ VP9_UNPACK_MULSUB_2W_4X 5, 11, 14449, 7723, [pd_8192], 0, 4 ; t10, t13
+ VP9_UNPACK_MULSUB_2W_4X 13, 3, 4756, 15679, [pd_8192], 0, 4 ; t11, t12
+
+ SUMSUB_BA w, 5, 13, 0 ; t11, t10
+ SUMSUB_BA w, 11, 3, 0 ; t12, t13
+
+ VP9_UNPACK_MULSUB_2W_4X 3, 13, 6270, m15137, [pd_8192], 0, 4 ; t10, t13
+
+ SUMSUB_BA w, 5, 9, 0 ; t8, t11
+ SUMSUB_BA w, 3, 15, 0 ; t9, t10
+ SUMSUB_BA w, 11, 7, 0 ; t15, t12
+ SUMSUB_BA w, 13, 1, 0 ; t14, t13
+
+ SUMSUB_BA w, 15, 1, 0
+ SUMSUB_BA w, 9, 7, 0
+ pmulhrsw m1, [pw_11585x2] ; t10
+ pmulhrsw m7, [pw_11585x2] ; t11
+ pmulhrsw m9, [pw_11585x2] ; t12
+ pmulhrsw m15, [pw_11585x2] ; t13
+
+ ; even (tx8x8)
+ mova m4, [blockq+128]
+ mova [blockq+128], m5
+ VP9_UNPACK_MULSUB_2W_4X 4, 12, 15137, 6270, [pd_8192], 0, 5 ; t2, t3
+ VP9_UNPACK_MULSUB_2W_4X 2, 14, 16069, 3196, [pd_8192], 0, 5 ; t4, t7
+ VP9_UNPACK_MULSUB_2W_4X 10, 6, 9102, 13623, [pd_8192], 0, 5 ; t5, t6
+ mova m0, [blockq+ 0]
+ SUMSUB_BA w, 8, 0, 5
+ pmulhrsw m8, [pw_11585x2] ; t0
+ pmulhrsw m0, [pw_11585x2] ; t1
+
+ SUMSUB_BA w, 10, 2, 5 ; t4, t5
+ SUMSUB_BA w, 6, 14, 5 ; t7, t6
+ SUMSUB_BA w, 12, 8, 5 ; t0, t3
+ SUMSUB_BA w, 4, 0, 5 ; t1, t2
+
+ SUMSUB_BA w, 2, 14, 5
+ pmulhrsw m14, [pw_11585x2] ; t5
+ pmulhrsw m2, [pw_11585x2] ; t6
+
+ SUMSUB_BA w, 6, 12, 5 ; t0, t7
+ SUMSUB_BA w, 2, 4, 5 ; t1, t6
+ SUMSUB_BA w, 14, 0, 5 ; t2, t5
+ SUMSUB_BA w, 10, 8, 5 ; t3, t4
+
+ ; final stage
+ SUMSUB_BA w, 11, 6, 5 ; out0, out15
+ SUMSUB_BA w, 13, 2, 5 ; out1, out14
+ SUMSUB_BA w, 15, 14, 5 ; out2, out13
+ SUMSUB_BA w, 9, 10, 5 ; out3, out12
+ SUMSUB_BA w, 7, 8, 5 ; out4, out11
+ SUMSUB_BA w, 1, 0, 5 ; out5, out10
+ SUMSUB_BA w, 3, 4, 5 ; out6, out9
+ mova m5, [blockq+128]
+ mova [blockq+192], m3
+ SUMSUB_BA w, 5, 12, 3 ; out7, out8
+
+ SWAP 0, 11, 8, 12, 10
+ SWAP 1, 13, 14, 2, 15, 6, 3, 9, 4, 7, 5
+%endmacro
+
+; this is almost identical to VP9_STORE_2X, but it does two rows
+; for slightly improved interleaving, and it omits vpermq since the
+; input is DC so all values are identical
+%macro VP9_STORE_YMM_DC_4X 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
+ mova xm%2, [dstq]
+ mova xm%4, [dstq+strideq*2]
+ vinserti128 m%2, m%2, [dstq+strideq], 1
+ vinserti128 m%4, m%4, [dstq+stride3q], 1
+ punpckhbw m%3, m%2, m%6
+ punpcklbw m%2, m%6
+ punpckhbw m%5, m%4, m%6
+ punpcklbw m%4, m%6
+ paddw m%3, m%1
+ paddw m%2, m%1
+ paddw m%5, m%1
+ paddw m%4, m%1
+ packuswb m%2, m%3
+ packuswb m%4, m%5
+ mova [dstq], xm%2
+ mova [dstq+strideq*2], xm%4
+ vextracti128 [dstq+strideq], m%2, 1
+ vextracti128 [dstq+stride3q], m%4, 1
+%endmacro
+
+%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_idct_idct_16x16_add, 4, 4, 16, dst, stride, block, eob
+ cmp eobd, 1 ; faster path for when only DC is set
+ jg .idctfull
+
+ ; dc-only
+ mova m1, [pw_11585x2]
+ vpbroadcastw m0, [blockq]
+ pmulhrsw m0, m1
+ pmulhrsw m0, m1
+ pxor m5, m5
+ pmulhrsw m0, [pw_512]
+ movd [blockq], xm5
+
+ DEFINE_ARGS dst, stride, stride3, cnt
+ mov cntd, 4
+ lea stride3q, [strideq*3]
+.loop_dc:
+ VP9_STORE_YMM_DC_4X 0, 1, 2, 3, 4, 5
+ lea dstq, [dstq+4*strideq]
+ dec cntd
+ jg .loop_dc
+ RET
+
+ DEFINE_ARGS dst, stride, block, eob
+.idctfull:
+ mova m1, [blockq+ 32]
+ mova m2, [blockq+ 64]
+ mova m3, [blockq+ 96]
+ mova m5, [blockq+160]
+ mova m6, [blockq+192]
+ mova m7, [blockq+224]
+ mova m8, [blockq+256]
+ mova m9, [blockq+288]
+ mova m10, [blockq+320]
+ mova m11, [blockq+352]
+ mova m12, [blockq+384]
+ mova m13, [blockq+416]
+ mova m14, [blockq+448]
+ mova m15, [blockq+480]
+
+ VP9_IDCT16_YMM_1D
+ TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
+ [blockq+192], [blockq+128], 1
+ mova [blockq+ 0], m0
+ VP9_IDCT16_YMM_1D
+
+ mova [blockq+224], m7
+ mova [blockq+480], m15
+ pxor m15, m15
+
+ ; store
+ VP9_IDCT8_WRITEx2 0, 1, 6, 7, 15, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 2, 3, 6, 7, 15, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 4, 5, 6, 7, 15, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ mova m6, [blockq+192]
+ mova m7, [blockq+224]
+ SWAP 0, 15
+ mova m15, [blockq+480]
+ VP9_IDCT8_WRITEx2 6, 7, 1, 2, 0, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 8, 9, 1, 2, 0, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 10, 11, 1, 2, 0, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 12, 13, 1, 2, 0, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+ VP9_IDCT8_WRITEx2 14, 15, 1, 2, 0, [pw_512], 6
+ lea dstq, [dstq+2*strideq]
+
+ ; at the end of the loop, m0 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 32, 16, m0
+ RET
+%endif
+
;---------------------------------------------------------------------------------------------
; void vp9_iadst_iadst_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
;---------------------------------------------------------------------------------------------
@@ -1801,7 +1992,12 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
;---------------------------------------------------------------------------------------------
%macro VP9_IDCT32_1D 2-3 32 ; src, pass, nnzc
-%assign %%str 16*%2*%2
+%if %2 == 1
+%assign %%str mmsize
+%else
+%assign %%str 64
+%endif
+
; first do t0-15, this can be done identical to idct16x16
VP9_IDCT16_1D_START %1, %3/2, 64*2, tmpq, 2*%%str, 1
@@ -2096,17 +2292,125 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
mova m3, [tmpq+20*%%str] ; t5
mova m13, [tmpq+24*%%str] ; t6
- SUMSUB_BA w, 6, 8, 10
+ SUMSUB_BA w, 6, 8, 10
mova [tmpq+ 3*%%str], m8 ; t15
- mova m10, [tmpq+28*%%str] ; t7
SUMSUB_BA w, 0, 9, 8
SUMSUB_BA w, 15, 12, 8
SUMSUB_BA w, 14, 11, 8
SUMSUB_BA w, 1, 2, 8
SUMSUB_BA w, 7, 3, 8
SUMSUB_BA w, 5, 13, 8
+ mova m10, [tmpq+28*%%str] ; t7
SUMSUB_BA w, 4, 10, 8
+%if cpuflag(avx2)
+ ; the "shitty" about this idct is that the final pass does the outermost
+ ; interleave sumsubs (t0/31, t1/30, etc) but the tN for the 16x16 need
+ ; to be sequential, which means I need to load/store half of the sumsub
+ ; intermediates back to/from memory to get a 16x16 transpose going...
+ ; This would be easier if we had more (e.g. 32) YMM regs here.
+ mova [tmpq+ 7*%%str], m9
+ mova [tmpq+11*%%str], m12
+ mova [tmpq+15*%%str], m11
+ mova [tmpq+19*%%str], m2
+ mova [tmpq+23*%%str], m3
+ mova [tmpq+27*%%str], m13
+ mova [tmpq+31*%%str], m10
+ mova [tmpq+12*%%str], m5
+
+ mova m13, [tmpq+30*%%str] ; t8
+ mova m12, [tmpq+26*%%str] ; t9
+ mova m11, [tmpq+22*%%str] ; t10
+ mova m10, [tmpq+18*%%str] ; t11
+ mova m9, [tmpq+17*%%str] ; t20
+ mova m8, [tmpq+ 1*%%str] ; t21
+ mova m3, [tmpq+25*%%str] ; t22
+ mova m2, [tmpq+ 5*%%str] ; t23
+
+ SUMSUB_BA w, 9, 10, 5
+ SUMSUB_BA w, 8, 11, 5
+ SUMSUB_BA w, 3, 12, 5
+ SUMSUB_BA w, 2, 13, 5
+ mova [tmpq+ 1*%%str], m10
+ mova [tmpq+ 5*%%str], m11
+ mova [tmpq+17*%%str], m12
+ mova [tmpq+25*%%str], m13
+
+ mova m13, [tmpq+14*%%str] ; t12
+ mova m12, [tmpq+10*%%str] ; t13
+ mova m11, [tmpq+ 9*%%str] ; t18
+ mova m10, [tmpq+13*%%str] ; t19
+
+ SUMSUB_BA w, 11, 12, 5
+ SUMSUB_BA w, 10, 13, 5
+ mova [tmpq+ 9*%%str], m13
+ mova [tmpq+13*%%str], m12
+ mova [tmpq+10*%%str], m10
+ mova [tmpq+14*%%str], m11
+ mova m13, [tmpq+ 6*%%str] ; t14
+ mova m12, [tmpq+ 2*%%str] ; t15
+ mova m11, [tmpq+21*%%str] ; t16
+ mova m10, [tmpq+29*%%str] ; t17
+ SUMSUB_BA w, 11, 12, 5
+ SUMSUB_BA w, 10, 13, 5
+ mova [tmpq+21*%%str], m12
+ mova [tmpq+29*%%str], m13
+ mova m12, [tmpq+10*%%str]
+ mova m13, [tmpq+14*%%str]
+
+ TRANSPOSE16x16W 6, 0, 15, 14, 1, 7, 5, 4, \
+ 2, 3, 8, 9, 12, 13, 10, 11, \
+ [tmpq+12*%%str], [tmpq+ 8*%%str], 1
+ mova [tmpq+ 0*%%str], m6
+ mova [tmpq+ 2*%%str], m0
+ mova [tmpq+ 4*%%str], m15
+ mova [tmpq+ 6*%%str], m14
+ mova [tmpq+10*%%str], m7
+ mova [tmpq+12*%%str], m5
+ mova [tmpq+14*%%str], m4
+ mova [tmpq+16*%%str], m2
+ mova [tmpq+18*%%str], m3
+ mova [tmpq+20*%%str], m8
+ mova [tmpq+22*%%str], m9
+ mova [tmpq+24*%%str], m12
+ mova [tmpq+26*%%str], m13
+ mova [tmpq+28*%%str], m10
+ mova [tmpq+30*%%str], m11
+
+ mova m0, [tmpq+21*%%str]
+ mova m1, [tmpq+29*%%str]
+ mova m2, [tmpq+13*%%str]
+ mova m3, [tmpq+ 9*%%str]
+ mova m4, [tmpq+ 1*%%str]
+ mova m5, [tmpq+ 5*%%str]
+ mova m7, [tmpq+25*%%str]
+ mova m8, [tmpq+31*%%str]
+ mova m9, [tmpq+27*%%str]
+ mova m10, [tmpq+23*%%str]
+ mova m11, [tmpq+19*%%str]
+ mova m12, [tmpq+15*%%str]
+ mova m13, [tmpq+11*%%str]
+ mova m14, [tmpq+ 7*%%str]
+ mova m15, [tmpq+ 3*%%str]
+ TRANSPOSE16x16W 0, 1, 2, 3, 4, 5, 6, 7, \
+ 8, 9, 10, 11, 12, 13, 14, 15, \
+ [tmpq+17*%%str], [tmpq+ 9*%%str], 1
+ mova [tmpq+ 1*%%str], m0
+ mova [tmpq+ 3*%%str], m1
+ mova [tmpq+ 5*%%str], m2
+ mova [tmpq+ 7*%%str], m3
+ mova [tmpq+11*%%str], m5
+ mova [tmpq+13*%%str], m6
+ mova [tmpq+15*%%str], m7
+ mova [tmpq+17*%%str], m8
+ mova [tmpq+19*%%str], m9
+ mova [tmpq+21*%%str], m10
+ mova [tmpq+23*%%str], m11
+ mova [tmpq+25*%%str], m12
+ mova [tmpq+27*%%str], m13
+ mova [tmpq+29*%%str], m14
+ mova [tmpq+31*%%str], m15
+%else ; !avx2
TRANSPOSE8x8W 6, 0, 15, 14, 1, 7, 5, 4, 8
mova [tmpq+ 0*%%str], m6
mova [tmpq+ 4*%%str], m0
@@ -2175,6 +2479,7 @@ IADST16_FN iadst, IADST16, iadst, IADST16, avx
mova [tmpq+22*%%str], m13
mova [tmpq+26*%%str], m14
mova [tmpq+30*%%str], m15
+%endif ; avx2
%else
mova m2, [tmpq+24*%%str] ; t6
mova m3, [tmpq+28*%%str] ; t7
@@ -2623,3 +2928,106 @@ cglobal vp9_idct_idct_32x32_add, 0, 6 + ARCH_X86_64 * 3, 16, 2048, dst, stride,
VP9_IDCT_IDCT_32x32_ADD_XMM sse2
VP9_IDCT_IDCT_32x32_ADD_XMM ssse3
VP9_IDCT_IDCT_32x32_ADD_XMM avx
+
+; this is almost identical to VP9_STORE_2X, but it does two rows
+; for slightly improved interleaving, and it omits vpermq since the
+; input is DC so all values are identical
+%macro VP9_STORE_YMM_DC_2X2 6 ; reg, tmp1, tmp2, tmp3, tmp4, zero
+ mova m%2, [dstq]
+ mova m%4, [dstq+strideq]
+ punpckhbw m%3, m%2, m%6
+ punpcklbw m%2, m%6
+ punpckhbw m%5, m%4, m%6
+ punpcklbw m%4, m%6
+ paddw m%3, m%1
+ paddw m%2, m%1
+ paddw m%5, m%1
+ paddw m%4, m%1
+ packuswb m%2, m%3
+ packuswb m%4, m%5
+ mova [dstq+strideq*0], m%2
+ mova [dstq+strideq*1], m%4
+%endmacro
+
+%if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+cglobal vp9_idct_idct_32x32_add, 4, 9, 16, 2048, dst, stride, block, eob
+ cmp eobd, 135
+ jg .idctfull
+ cmp eobd, 1
+ jg .idct16x16
+
+ ; dc-only case
+ mova m1, [pw_11585x2]
+ vpbroadcastw m0, [blockq]
+ pmulhrsw m0, m1
+ pmulhrsw m0, m1
+ pxor m5, m5
+ pmulhrsw m0, [pw_512]
+ movd [blockq], xm5
+
+ DEFINE_ARGS dst, stride, cnt
+ mov cntd, 16
+.loop_dc:
+ VP9_STORE_YMM_DC_2X2 0, 1, 2, 3, 4, 5
+ lea dstq, [dstq+2*strideq]
+ dec cntd
+ jg .loop_dc
+ RET
+
+ DEFINE_ARGS dst_bak, stride, block, cnt, dst, stride30, dst_end, stride2, tmp
+.idct16x16:
+ mov tmpq, rsp
+ VP9_IDCT32_1D blockq, 1, 16
+
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 2
+ sub stride30q, stride2q ; stride*30
+.loop2_16x16:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2, 16
+ add dst_bakq, 16
+ add tmpq, 32
+ dec cntd
+ jg .loop2_16x16
+
+ ; at the end of the loop, m1 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 64, 16, m1
+ RET
+
+.idctfull:
+ mov cntd, 2
+ mov tmpq, rsp
+.loop1_full:
+ VP9_IDCT32_1D blockq, 1
+ add blockq, 32
+ add tmpq, 1024
+ dec cntd
+ jg .loop1_full
+
+ sub blockq, 64
+
+ mov stride30q, strideq ; stride
+ lea stride2q, [strideq*2] ; stride*2
+ shl stride30q, 5 ; stride*32
+ mov cntd, 2
+ mov tmpq, rsp
+ sub stride30q, stride2q ; stride*30
+.loop2_full:
+ mov dstq, dst_bakq
+ lea dst_endq, [dstq+stride30q]
+ VP9_IDCT32_1D tmpq, 2
+ add dst_bakq, 16
+ add tmpq, 32
+ dec cntd
+ jg .loop2_full
+
+ ; at the end of the loop, m1 should still be zero
+ ; use that to zero out block coefficients
+ ZERO_BLOCK blockq, 64, 32, m1
+ RET
+%endif
diff --git a/media/ffvpx/libavcodec/x86/vp9lpf.asm b/media/ffvpx/libavcodec/x86/vp9lpf.asm
index 2c4fe214d..4e7ede223 100644
--- a/media/ffvpx/libavcodec/x86/vp9lpf.asm
+++ b/media/ffvpx/libavcodec/x86/vp9lpf.asm
@@ -52,7 +52,7 @@ mask_mix48: times 8 db 0x00
SECTION .text
%macro SCRATCH 3
-%if ARCH_X86_64
+%ifdef m8
SWAP %1, %2
%else
mova [%3], m%1
@@ -60,7 +60,7 @@ SECTION .text
%endmacro
%macro UNSCRATCH 3
-%if ARCH_X86_64
+%ifdef m8
SWAP %1, %2
%else
mova m%1, [%3]
@@ -69,7 +69,7 @@ SECTION .text
; %1 = abs(%2-%3)
%macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp
-%if ARCH_X86_64
+%ifdef m8
psubusb %1, %3, %2
psubusb %4, %2, %3
%else
@@ -102,7 +102,7 @@ SECTION .text
%endmacro
%macro UNPACK 4
-%if ARCH_X86_64
+%ifdef m8
punpck%1bw %2, %3, %4
%else
mova %2, %3
@@ -112,27 +112,27 @@ SECTION .text
%macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1
; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32]
- psubw %3, [rsp+%4+%5*32]
- psubw %3, [rsp+%4+%6*32]
- paddw %3, [rsp+%4+%7*32]
+ psubw %3, [rsp+%4+%5*mmsize*2]
+ psubw %3, [rsp+%4+%6*mmsize*2]
+ paddw %3, [rsp+%4+%7*mmsize*2]
%ifnidn %10, ""
%if %11 == 0
punpck%2bw %1, %10, m0
%else
UNPACK %2, %1, %10, m0
%endif
- mova [rsp+%4+%8*32], %1
+ mova [rsp+%4+%8*mmsize*2], %1
paddw %3, %1
%else
- paddw %3, [rsp+%4+%8*32]
+ paddw %3, [rsp+%4+%8*mmsize*2]
%endif
psraw %1, %3, %9
%endmacro
; FIXME interleave l/h better (for instruction pairing)
%macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source
- FILTER%7_INIT %1, l, %3, %6 + 0
- FILTER%7_INIT %2, h, %4, %6 + 16
+ FILTER%7_INIT %1, l, %3, %6 + 0
+ FILTER%7_INIT %2, h, %4, %6 + mmsize
packuswb %1, %2
MASK_APPLY %1, %9, %8, %2
mova %5, %1
@@ -147,8 +147,8 @@ SECTION .text
mova %14, %15
%endif
%endif
- FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16
- FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16
+ FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16
+ FILTER_SUBx2_ADDx2 %2, h, %4, %6 + mmsize, %7, %8, %9, %10, %11, %14, %16
packuswb %1, %2
%ifnidn %13, ""
MASK_APPLY %1, %13, %12, %2
@@ -195,21 +195,21 @@ SECTION .text
%macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
UNPACK %2, %1, rp3, m0 ; p3: B->W
- mova [rsp+%4+0*32], %1
+ mova [rsp+%4+0*mmsize*2], %1
paddw %3, %1, %1 ; p3*2
paddw %3, %1 ; p3*3
punpck%2bw %1, m1, m0 ; p2: B->W
- mova [rsp+%4+1*32], %1
+ mova [rsp+%4+1*mmsize*2], %1
paddw %3, %1 ; p3*3 + p2
paddw %3, %1 ; p3*3 + p2*2
UNPACK %2, %1, rp1, m0 ; p1: B->W
- mova [rsp+%4+2*32], %1
+ mova [rsp+%4+2*mmsize*2], %1
paddw %3, %1 ; p3*3 + p2*2 + p1
UNPACK %2, %1, rp0, m0 ; p0: B->W
- mova [rsp+%4+3*32], %1
+ mova [rsp+%4+3*mmsize*2], %1
paddw %3, %1 ; p3*3 + p2*2 + p1 + p0
UNPACK %2, %1, rq0, m0 ; q0: B->W
- mova [rsp+%4+4*32], %1
+ mova [rsp+%4+4*mmsize*2], %1
paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0
paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4
psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3
@@ -217,24 +217,24 @@ SECTION .text
%macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off
punpck%2bw %1, m2, m0 ; p7: B->W
- mova [rsp+%4+ 8*32], %1
+ mova [rsp+%4+ 8*mmsize*2], %1
psllw %3, %1, 3 ; p7*8
psubw %3, %1 ; p7*7
punpck%2bw %1, m3, m0 ; p6: B->W
- mova [rsp+%4+ 9*32], %1
+ mova [rsp+%4+ 9*mmsize*2], %1
paddw %3, %1 ; p7*7 + p6
paddw %3, %1 ; p7*7 + p6*2
UNPACK %2, %1, rp5, m0 ; p5: B->W
- mova [rsp+%4+10*32], %1
+ mova [rsp+%4+10*mmsize*2], %1
paddw %3, %1 ; p7*7 + p6*2 + p5
UNPACK %2, %1, rp4, m0 ; p4: B->W
- mova [rsp+%4+11*32], %1
+ mova [rsp+%4+11*mmsize*2], %1
paddw %3, %1 ; p7*7 + p6*2 + p5 + p4
- paddw %3, [rsp+%4+ 0*32] ; p7*7 + p6*2 + p5 + p4 + p3
- paddw %3, [rsp+%4+ 1*32] ; p7*7 + p6*2 + p5 + .. + p2
- paddw %3, [rsp+%4+ 2*32] ; p7*7 + p6*2 + p5 + .. + p1
- paddw %3, [rsp+%4+ 3*32] ; p7*7 + p6*2 + p5 + .. + p0
- paddw %3, [rsp+%4+ 4*32] ; p7*7 + p6*2 + p5 + .. + p0 + q0
+ paddw %3, [rsp+%4+ 0*mmsize*2] ; p7*7 + p6*2 + p5 + p4 + p3
+ paddw %3, [rsp+%4+ 1*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p2
+ paddw %3, [rsp+%4+ 2*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p1
+ paddw %3, [rsp+%4+ 3*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p0
+ paddw %3, [rsp+%4+ 4*mmsize*2] ; p7*7 + p6*2 + p5 + .. + p0 + q0
paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8
psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4
%endmacro
@@ -334,22 +334,24 @@ SECTION .text
%endmacro
%macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0
-%define P3 rsp + 0 + %1
-%define P2 rsp + 16 + %1
-%define P1 rsp + 32 + %1
-%define P0 rsp + 48 + %1
-%define Q0 rsp + 64 + %1
-%define Q1 rsp + 80 + %1
-%define Q2 rsp + 96 + %1
-%define Q3 rsp + 112 + %1
-%define P7 rsp + 128 + %1
-%define P6 rsp + 144 + %1
-%define P5 rsp + 160 + %1
-%define P4 rsp + 176 + %1
-%define Q4 rsp + 192 + %1
-%define Q5 rsp + 208 + %1
-%define Q6 rsp + 224 + %1
-%define Q7 rsp + 240 + %1
+%define P3 rsp + 0*mmsize + %1
+%define P2 rsp + 1*mmsize + %1
+%define P1 rsp + 2*mmsize + %1
+%define P0 rsp + 3*mmsize + %1
+%define Q0 rsp + 4*mmsize + %1
+%define Q1 rsp + 5*mmsize + %1
+%define Q2 rsp + 6*mmsize + %1
+%define Q3 rsp + 7*mmsize + %1
+%if mmsize == 16
+%define P7 rsp + 8*mmsize + %1
+%define P6 rsp + 9*mmsize + %1
+%define P5 rsp + 10*mmsize + %1
+%define P4 rsp + 11*mmsize + %1
+%define Q4 rsp + 12*mmsize + %1
+%define Q5 rsp + 13*mmsize + %1
+%define Q6 rsp + 14*mmsize + %1
+%define Q7 rsp + 15*mmsize + %1
+%endif
%endmacro
; ..............AB -> AAAAAAAABBBBBBBB
@@ -363,14 +365,19 @@ SECTION .text
%endif
%endmacro
-%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=32bit stack only
+%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=mmx/32bit stack only
+%assign %%ext 0
+%if ARCH_X86_32 || mmsize == 8
+%assign %%ext %5
+%endif
+
%if UNIX64
-cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3
+cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 5, 9, 16, %3 + %4 + %%ext, dst, stride, E, I, H, mstride, dst2, stride3, mstride3
%else
%if WIN64
-cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3
+cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 4, 8, 16, %3 + %4 + %%ext, dst, stride, E, I, mstride, dst2, stride3, mstride3
%else
-cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, dst2, stride3, mstride3
+cglobal vp9_loop_filter_%1_%2_ %+ mmsize, 2, 6, 16, %3 + %4 + %%ext, dst, stride, mstride, dst2, stride3, mstride3
%define Ed dword r2m
%define Id dword r3m
%endif
@@ -384,18 +391,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
lea mstride3q, [mstrideq*3]
%ifidn %1, h
-%if %2 > 16
+%if %2 != 16
+%if mmsize == 16
%define movx movh
+%else
+%define movx mova
+%endif
lea dstq, [dstq + 4*strideq - 4]
%else
%define movx movu
lea dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos)
%endif
- lea dst2q, [dstq + 8*strideq]
%else
lea dstq, [dstq + 4*mstrideq]
- lea dst2q, [dstq + 8*strideq]
%endif
+ ; FIXME we shouldn't need two dts registers if mmsize == 8
+ lea dst2q, [dstq + 8*strideq]
DEFINE_REAL_P7_TO_Q7
@@ -406,11 +417,11 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
movx m3, [P4]
movx m4, [P3]
movx m5, [P2]
-%if ARCH_X86_64 || %2 != 16
+%if (ARCH_X86_64 && mmsize == 16) || %2 > 16
movx m6, [P1]
%endif
movx m7, [P0]
-%if ARCH_X86_64
+%ifdef m8
movx m8, [Q0]
movx m9, [Q1]
movx m10, [Q2]
@@ -502,7 +513,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
movhps [Q5], m6
movhps [Q7], m7
DEFINE_TRANSPOSED_P7_TO_Q7
-%else ; %2 == 44/48/84/88
+%elif %2 > 16 ; %2 == 44/48/84/88
punpcklbw m0, m1
punpcklbw m2, m3
punpcklbw m4, m5
@@ -529,12 +540,31 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
mova [Q1], m5
mova [Q2], m7
mova [Q3], m3
+%else ; %2 == 4 || %2 == 8
+ SBUTTERFLY bw, 0, 1, 6
+ SBUTTERFLY bw, 2, 3, 6
+ SBUTTERFLY bw, 4, 5, 6
+ mova [rsp+4*mmsize], m5
+ mova m6, [P1]
+ SBUTTERFLY bw, 6, 7, 5
+ DEFINE_TRANSPOSED_P7_TO_Q7
+ TRANSPOSE4x4W 0, 2, 4, 6, 5
+ mova [P3], m0
+ mova [P2], m2
+ mova [P1], m4
+ mova [P0], m6
+ mova m5, [rsp+4*mmsize]
+ TRANSPOSE4x4W 1, 3, 5, 7, 0
+ mova [Q0], m1
+ mova [Q1], m3
+ mova [Q2], m5
+ mova [Q3], m7
%endif ; %2
%endif ; x86-32/64
%endif ; %1 == h
; calc fm mask
-%if %2 == 16
+%if %2 == 16 || mmsize == 8
%if cpuflag(ssse3)
pxor m0, m0
%endif
@@ -552,7 +582,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
mova m0, [pb_80]
pxor m2, m0
pxor m3, m0
-%if ARCH_X86_64
+%ifdef m8
%ifidn %1, v
mova m8, [P3]
mova m9, [P2]
@@ -613,10 +643,10 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3)
; calc flat8in (if not 44_16) and hev masks
-%if %2 != 44
+%if %2 != 44 && %2 != 4
mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80
ABSSUB_GT m2, rp3, rp0, m6, m5 ; abs(p3 - p0) <= 1
-%if ARCH_X86_64
+%ifdef m8
mova m8, [pb_80]
%define rb80 m8
%else
@@ -625,7 +655,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
ABSSUB_GT m1, rp2, rp0, m6, m5, rb80 ; abs(p2 - p0) <= 1
por m2, m1
ABSSUB m4, rp1, rp0, m5 ; abs(p1 - p0)
-%if %2 == 16
+%if %2 <= 16
%if cpuflag(ssse3)
pxor m0, m0
%endif
@@ -655,8 +685,15 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
%endif
%else
mova m6, [pb_80]
+%if %2 == 44
movd m7, Hd
SPLATB_MIX m7
+%else
+%if cpuflag(ssse3)
+ pxor m0, m0
+%endif
+ SPLATB_REG m7, H, m0 ; H H H H ...
+%endif
pxor m7, m6
ABSSUB m4, rp1, rp0, m1 ; abs(p1 - p0)
pxor m4, m6
@@ -670,7 +707,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
%if %2 == 16
; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3)
; calc flat8out mask
-%if ARCH_X86_64
+%ifdef m8
mova m8, [P7]
mova m9, [P6]
%define rp7 m8
@@ -682,7 +719,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
ABSSUB_GT m1, rp7, rp0, m6, m5 ; abs(p7 - p0) <= 1
ABSSUB_GT m7, rp6, rp0, m6, m5 ; abs(p6 - p0) <= 1
por m1, m7
-%if ARCH_X86_64
+%ifdef m8
mova m8, [P5]
mova m9, [P4]
%define rp5 m8
@@ -695,7 +732,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
por m1, m7
ABSSUB_GT m7, rp4, rp0, m6, m5 ; abs(p4 - p0) <= 1
por m1, m7
-%if ARCH_X86_64
+%ifdef m8
mova m14, [Q4]
mova m15, [Q5]
%define rq4 m14
@@ -708,7 +745,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
por m1, m7
ABSSUB_GT m7, rq5, rq0, m6, m5 ; abs(q5 - q0) <= 1
por m1, m7
-%if ARCH_X86_64
+%ifdef m8
mova m14, [Q6]
mova m15, [Q7]
%define rq6 m14
@@ -738,7 +775,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7)
; filter2()
-%if %2 != 44
+%if %2 != 44 && %2 != 4
mova m6, [pb_80] ; already in m6 if 44_16
SCRATCH 2, 15, rsp+%3+%4
%if %2 == 16
@@ -756,7 +793,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
paddsb m4, m2 ; 3*(q0 - p0) + (p1 - q1)
paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127)
paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127)
-%if ARCH_X86_64
+%ifdef m8
mova m14, [pb_10] ; will be reused in filter4()
%define rb10 m14
%else
@@ -765,8 +802,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
SRSHIFT3B_2X m6, m4, rb10, m7 ; f1 and f2 sign byte shift by 3
SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1
SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2
-%if %2 != 44
-%if ARCH_X86_64
+%if %2 != 44 && %2 != 4
+%ifdef m8
pandn m6, m15, m3 ; ~mask(in) & mask(fm)
%else
mova m6, [rsp+%3+%4]
@@ -787,8 +824,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
paddsb m6, m2, [pb_4] ; m6: f1 = clip(f + 4, 127)
paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127)
SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3
-%if %2 != 44
-%if ARCH_X86_64
+%if %2 != 44 && %2 != 4
+%ifdef m8
pandn m5, m15, m3 ; ~mask(in) & mask(fm)
%else
mova m5, [rsp+%3+%4]
@@ -815,26 +852,26 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
mova [P1], m1
mova [Q1], m4
-%if %2 != 44
+%if %2 != 44 && %2 != 4
UNSCRATCH 2, 15, rsp+%3+%4
%endif
; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1)
; filter6()
-%if %2 != 44
+%if %2 != 44 && %2 != 4
pxor m0, m0
-%if %2 > 16
+%if %2 != 16
pand m3, m2
%else
pand m2, m3 ; mask(fm) & mask(in)
-%if ARCH_X86_64
+%ifdef m8
pandn m3, m8, m2 ; ~mask(out) & (mask(fm) & mask(in))
%else
mova m3, [rsp+%3+%4+16]
pandn m3, m2
%endif
%endif
-%if ARCH_X86_64
+%ifdef m8
mova m14, [P3]
mova m9, [Q3]
%define rp3 m14
@@ -882,7 +919,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
pand m1, m2 ; mask(out) & (mask(fm) & mask(in))
mova m2, [P7]
mova m3, [P6]
-%if ARCH_X86_64
+%ifdef m8
mova m8, [P5]
mova m9, [P4]
%define rp5 m8
@@ -1008,7 +1045,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
movhps [Q5], m6
movhps [Q7], m7
%endif
-%elif %2 == 44
+%elif %2 == 44 || %2 == 4
SWAP 0, 1 ; m0 = p1
SWAP 1, 7 ; m1 = p0
SWAP 2, 5 ; m2 = q0
@@ -1018,6 +1055,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
SBUTTERFLY bw, 2, 3, 4
SBUTTERFLY wd, 0, 2, 4
SBUTTERFLY wd, 1, 3, 4
+%if mmsize == 16
movd [P7], m0
movd [P3], m2
movd [Q0], m1
@@ -1047,6 +1085,20 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
movd [Q3], m1
movd [Q7], m3
%else
+ movd [P7], m0
+ movd [P5], m2
+ movd [P3], m1
+ movd [P1], m3
+ psrlq m0, 32
+ psrlq m2, 32
+ psrlq m1, 32
+ psrlq m3, 32
+ movd [P6], m0
+ movd [P4], m2
+ movd [P2], m1
+ movd [P0], m3
+%endif
+%else
; the following code do a transpose of 8 full lines to 16 half
; lines (high part). It is inlined to avoid the need of a staging area
mova m0, [P3]
@@ -1055,12 +1107,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
mova m3, [P0]
mova m4, [Q0]
mova m5, [Q1]
-%if ARCH_X86_64
+%ifdef m8
mova m6, [Q2]
%endif
mova m7, [Q3]
DEFINE_REAL_P7_TO_Q7
-%if ARCH_X86_64
+%ifdef m8
SBUTTERFLY bw, 0, 1, 8
SBUTTERFLY bw, 2, 3, 8
SBUTTERFLY bw, 4, 5, 8
@@ -1075,27 +1127,32 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
SBUTTERFLY dq, 3, 7, 8
%else
SBUTTERFLY bw, 0, 1, 6
- mova [rsp+64], m1
- mova m6, [rsp+96]
+ mova [rsp+mmsize*4], m1
+ mova m6, [rsp+mmsize*6]
SBUTTERFLY bw, 2, 3, 1
SBUTTERFLY bw, 4, 5, 1
SBUTTERFLY bw, 6, 7, 1
SBUTTERFLY wd, 0, 2, 1
- mova [rsp+96], m2
- mova m1, [rsp+64]
+ mova [rsp+mmsize*6], m2
+ mova m1, [rsp+mmsize*4]
SBUTTERFLY wd, 1, 3, 2
SBUTTERFLY wd, 4, 6, 2
SBUTTERFLY wd, 5, 7, 2
SBUTTERFLY dq, 0, 4, 2
SBUTTERFLY dq, 1, 5, 2
+%if mmsize == 16
movh [Q0], m1
movhps [Q1], m1
- mova m2, [rsp+96]
+%else
+ mova [P3], m1
+%endif
+ mova m2, [rsp+mmsize*6]
SBUTTERFLY dq, 2, 6, 1
SBUTTERFLY dq, 3, 7, 1
%endif
SWAP 3, 6
SWAP 1, 4
+%if mmsize == 16
movh [P7], m0
movhps [P6], m0
movh [P5], m1
@@ -1104,7 +1161,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
movhps [P2], m2
movh [P1], m3
movhps [P0], m3
-%if ARCH_X86_64
+%ifdef m8
movh [Q0], m4
movhps [Q1], m4
%endif
@@ -1114,6 +1171,15 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride,
movhps [Q5], m6
movh [Q6], m7
movhps [Q7], m7
+%else
+ mova [P7], m0
+ mova [P6], m1
+ mova [P5], m2
+ mova [P4], m3
+ mova [P2], m5
+ mova [P1], m6
+ mova [P0], m7
+%endif
%endif
%endif
@@ -1137,3 +1203,9 @@ LPF_16_VH_ALL_OPTS 44, 0, 128, 0
LPF_16_VH_ALL_OPTS 48, 256, 128, 16
LPF_16_VH_ALL_OPTS 84, 256, 128, 16
LPF_16_VH_ALL_OPTS 88, 256, 128, 16
+
+INIT_MMX mmxext
+LOOPFILTER v, 4, 0, 0, 0
+LOOPFILTER h, 4, 0, 64, 0
+LOOPFILTER v, 8, 128, 0, 8
+LOOPFILTER h, 8, 128, 64, 8
diff --git a/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm b/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm
index c15437b8b..c0888170c 100644
--- a/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm
+++ b/media/ffvpx/libavcodec/x86/vp9lpf_16bpp.asm
@@ -78,7 +78,7 @@ SECTION .text
%endif
%endmacro
-; calulate p or q portion of flat8out
+; calculate p or q portion of flat8out
%macro FLAT8OUT_HALF 0
psubw m4, m0 ; q4-q0
psubw m5, m0 ; q5-q0