summaryrefslogtreecommitdiffstats
path: root/media/ffvpx/libavutil/x86/float_dsp.asm
diff options
context:
space:
mode:
authorMoonchild <mcwerewolf@gmail.com>2018-04-27 19:15:26 +0200
committerGitHub <noreply@github.com>2018-04-27 19:15:26 +0200
commitc75dae3ed21bfa5a8ae46cd83d18329af5bea05a (patch)
treeef657c38feb2368a3c86765778d1f068aa5eb614 /media/ffvpx/libavutil/x86/float_dsp.asm
parentc82c6d960a7f19d6595171f9705c43514f20c1ec (diff)
parent6ada4b14e4cfc91f5f1b2556623cab691f3ab813 (diff)
downloadUXP-c75dae3ed21bfa5a8ae46cd83d18329af5bea05a.tar
UXP-c75dae3ed21bfa5a8ae46cd83d18329af5bea05a.tar.gz
UXP-c75dae3ed21bfa5a8ae46cd83d18329af5bea05a.tar.lz
UXP-c75dae3ed21bfa5a8ae46cd83d18329af5bea05a.tar.xz
UXP-c75dae3ed21bfa5a8ae46cd83d18329af5bea05a.zip
Merge pull request #275 from trav90/ffvpx-resync
Resync ffvpx code with 3.4.2-release from upstream
Diffstat (limited to 'media/ffvpx/libavutil/x86/float_dsp.asm')
-rw-r--r--media/ffvpx/libavutil/x86/float_dsp.asm86
1 files changed, 81 insertions, 5 deletions
diff --git a/media/ffvpx/libavutil/x86/float_dsp.asm b/media/ffvpx/libavutil/x86/float_dsp.asm
index 021ff03c8..06d2d2cfd 100644
--- a/media/ffvpx/libavutil/x86/float_dsp.asm
+++ b/media/ffvpx/libavutil/x86/float_dsp.asm
@@ -22,6 +22,9 @@
%include "x86util.asm"
+SECTION_RODATA 32
+pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
+
SECTION .text
;-----------------------------------------------------------------------------
@@ -149,6 +152,69 @@ INIT_XMM sse
VECTOR_FMUL_SCALAR
;------------------------------------------------------------------------------
+; void ff_vector_dmac_scalar(double *dst, const double *src, double mul,
+; int len)
+;------------------------------------------------------------------------------
+
+%macro VECTOR_DMAC_SCALAR 0
+%if ARCH_X86_32
+cglobal vector_dmac_scalar, 2,4,5, dst, src, mul, len, lenaddr
+ mov lenq, lenaddrm
+ VBROADCASTSD m0, mulm
+%else
+%if UNIX64
+cglobal vector_dmac_scalar, 3,3,5, dst, src, len
+%else
+cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
+ SWAP 0, 2
+%endif
+ movlhps xm0, xm0
+%if cpuflag(avx)
+ vinsertf128 m0, m0, xm0, 1
+%endif
+%endif
+ lea lenq, [lend*8-mmsize*4]
+.loop:
+%if cpuflag(fma3)
+ movaps m1, [dstq+lenq]
+ movaps m2, [dstq+lenq+1*mmsize]
+ movaps m3, [dstq+lenq+2*mmsize]
+ movaps m4, [dstq+lenq+3*mmsize]
+ fmaddpd m1, m0, [srcq+lenq], m1
+ fmaddpd m2, m0, [srcq+lenq+1*mmsize], m2
+ fmaddpd m3, m0, [srcq+lenq+2*mmsize], m3
+ fmaddpd m4, m0, [srcq+lenq+3*mmsize], m4
+%else ; cpuflag
+ mulpd m1, m0, [srcq+lenq]
+ mulpd m2, m0, [srcq+lenq+1*mmsize]
+ mulpd m3, m0, [srcq+lenq+2*mmsize]
+ mulpd m4, m0, [srcq+lenq+3*mmsize]
+ addpd m1, m1, [dstq+lenq]
+ addpd m2, m2, [dstq+lenq+1*mmsize]
+ addpd m3, m3, [dstq+lenq+2*mmsize]
+ addpd m4, m4, [dstq+lenq+3*mmsize]
+%endif ; cpuflag
+ movaps [dstq+lenq], m1
+ movaps [dstq+lenq+1*mmsize], m2
+ movaps [dstq+lenq+2*mmsize], m3
+ movaps [dstq+lenq+3*mmsize], m4
+ sub lenq, mmsize*4
+ jge .loop
+ REP_RET
+%endmacro
+
+INIT_XMM sse2
+VECTOR_DMAC_SCALAR
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+VECTOR_DMAC_SCALAR
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+VECTOR_DMAC_SCALAR
+%endif
+
+;------------------------------------------------------------------------------
; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
; int len)
;------------------------------------------------------------------------------
@@ -177,8 +243,8 @@ cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
.loop:
mulpd m1, m0, [srcq+lenq ]
mulpd m2, m0, [srcq+lenq+mmsize]
- mova [dstq+lenq ], m1
- mova [dstq+lenq+mmsize], m2
+ movaps [dstq+lenq ], m1
+ movaps [dstq+lenq+mmsize], m2
sub lenq, 2*mmsize
jge .loop
REP_RET
@@ -296,10 +362,16 @@ VECTOR_FMUL_ADD
;-----------------------------------------------------------------------------
%macro VECTOR_FMUL_REVERSE 0
cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
+%if cpuflag(avx2)
+ movaps m2, [pd_reverse]
+%endif
lea lenq, [lend*4 - 2*mmsize]
ALIGN 16
.loop:
-%if cpuflag(avx)
+%if cpuflag(avx2)
+ vpermps m0, m2, [src1q]
+ vpermps m1, m2, [src1q+mmsize]
+%elif cpuflag(avx)
vmovaps xmm0, [src1q + 16]
vinsertf128 m0, m0, [src1q], 1
vshufps m0, m0, m0, q0123
@@ -314,8 +386,8 @@ ALIGN 16
%endif
mulps m0, m0, [src0q + lenq + mmsize]
mulps m1, m1, [src0q + lenq]
- mova [dstq + lenq + mmsize], m0
- mova [dstq + lenq], m1
+ movaps [dstq + lenq + mmsize], m0
+ movaps [dstq + lenq], m1
add src1q, 2*mmsize
sub lenq, 2*mmsize
jge .loop
@@ -328,6 +400,10 @@ VECTOR_FMUL_REVERSE
INIT_YMM avx
VECTOR_FMUL_REVERSE
%endif
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+VECTOR_FMUL_REVERSE
+%endif
; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
INIT_XMM sse