summaryrefslogtreecommitdiffstats
path: root/media/libopus/celt/arm/celt_ne10_mdct.c
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /media/libopus/celt/arm/celt_ne10_mdct.c
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'media/libopus/celt/arm/celt_ne10_mdct.c')
-rw-r--r--media/libopus/celt/arm/celt_ne10_mdct.c258
1 files changed, 258 insertions, 0 deletions
diff --git a/media/libopus/celt/arm/celt_ne10_mdct.c b/media/libopus/celt/arm/celt_ne10_mdct.c
new file mode 100644
index 000000000..293c3efd7
--- /dev/null
+++ b/media/libopus/celt/arm/celt_ne10_mdct.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2015 Xiph.Org Foundation
+ Written by Viswanath Puttagunta */
+/**
+ @file celt_ne10_mdct.c
+ @brief ARM Neon optimizations for mdct using NE10 library
+ */
+
+/*
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef SKIP_CONFIG_H
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#endif
+
+#include "kiss_fft.h"
+#include "_kiss_fft_guts.h"
+#include "mdct.h"
+#include "stack_alloc.h"
+
+void clt_mdct_forward_neon(const mdct_lookup *l,
+ kiss_fft_scalar *in,
+ kiss_fft_scalar * OPUS_RESTRICT out,
+ const opus_val16 *window,
+ int overlap, int shift, int stride, int arch)
+{
+ int i;
+ int N, N2, N4;
+ VARDECL(kiss_fft_scalar, f);
+ VARDECL(kiss_fft_cpx, f2);
+ const kiss_fft_state *st = l->kfft[shift];
+ const kiss_twiddle_scalar *trig;
+
+ SAVE_STACK;
+
+ N = l->n;
+ trig = l->trig;
+ for (i=0;i<shift;i++)
+ {
+ N >>= 1;
+ trig += N;
+ }
+ N2 = N>>1;
+ N4 = N>>2;
+
+ ALLOC(f, N2, kiss_fft_scalar);
+ ALLOC(f2, N4, kiss_fft_cpx);
+
+ /* Consider the input to be composed of four blocks: [a, b, c, d] */
+ /* Window, shuffle, fold */
+ {
+ /* Temp pointers to make it really clear to the compiler what we're doing */
+ const kiss_fft_scalar * OPUS_RESTRICT xp1 = in+(overlap>>1);
+ const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+N2-1+(overlap>>1);
+ kiss_fft_scalar * OPUS_RESTRICT yp = f;
+ const opus_val16 * OPUS_RESTRICT wp1 = window+(overlap>>1);
+ const opus_val16 * OPUS_RESTRICT wp2 = window+(overlap>>1)-1;
+ for(i=0;i<((overlap+3)>>2);i++)
+ {
+ /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
+ *yp++ = MULT16_32_Q15(*wp2, xp1[N2]) + MULT16_32_Q15(*wp1,*xp2);
+ *yp++ = MULT16_32_Q15(*wp1, *xp1) - MULT16_32_Q15(*wp2, xp2[-N2]);
+ xp1+=2;
+ xp2-=2;
+ wp1+=2;
+ wp2-=2;
+ }
+ wp1 = window;
+ wp2 = window+overlap-1;
+ for(;i<N4-((overlap+3)>>2);i++)
+ {
+ /* Real part arranged as a-bR, Imag part arranged as -c-dR */
+ *yp++ = *xp2;
+ *yp++ = *xp1;
+ xp1+=2;
+ xp2-=2;
+ }
+ for(;i<N4;i++)
+ {
+ /* Real part arranged as a-bR, Imag part arranged as -c-dR */
+ *yp++ = -MULT16_32_Q15(*wp1, xp1[-N2]) + MULT16_32_Q15(*wp2, *xp2);
+ *yp++ = MULT16_32_Q15(*wp2, *xp1) + MULT16_32_Q15(*wp1, xp2[N2]);
+ xp1+=2;
+ xp2-=2;
+ wp1+=2;
+ wp2-=2;
+ }
+ }
+ /* Pre-rotation */
+ {
+ kiss_fft_scalar * OPUS_RESTRICT yp = f;
+ const kiss_twiddle_scalar *t = &trig[0];
+ for(i=0;i<N4;i++)
+ {
+ kiss_fft_cpx yc;
+ kiss_twiddle_scalar t0, t1;
+ kiss_fft_scalar re, im, yr, yi;
+ t0 = t[i];
+ t1 = t[N4+i];
+ re = *yp++;
+ im = *yp++;
+ yr = S_MUL(re,t0) - S_MUL(im,t1);
+ yi = S_MUL(im,t0) + S_MUL(re,t1);
+ yc.r = yr;
+ yc.i = yi;
+ f2[i] = yc;
+ }
+ }
+
+ opus_fft(st, f2, (kiss_fft_cpx *)f, arch);
+
+ /* Post-rotate */
+ {
+ /* Temp pointers to make it really clear to the compiler what we're doing */
+ const kiss_fft_cpx * OPUS_RESTRICT fp = (kiss_fft_cpx *)f;
+ kiss_fft_scalar * OPUS_RESTRICT yp1 = out;
+ kiss_fft_scalar * OPUS_RESTRICT yp2 = out+stride*(N2-1);
+ const kiss_twiddle_scalar *t = &trig[0];
+ /* Temp pointers to make it really clear to the compiler what we're doing */
+ for(i=0;i<N4;i++)
+ {
+ kiss_fft_scalar yr, yi;
+ yr = S_MUL(fp->i,t[N4+i]) - S_MUL(fp->r,t[i]);
+ yi = S_MUL(fp->r,t[N4+i]) + S_MUL(fp->i,t[i]);
+ *yp1 = yr;
+ *yp2 = yi;
+ fp++;
+ yp1 += 2*stride;
+ yp2 -= 2*stride;
+ }
+ }
+ RESTORE_STACK;
+}
+
+void clt_mdct_backward_neon(const mdct_lookup *l,
+ kiss_fft_scalar *in,
+ kiss_fft_scalar * OPUS_RESTRICT out,
+ const opus_val16 * OPUS_RESTRICT window,
+ int overlap, int shift, int stride, int arch)
+{
+ int i;
+ int N, N2, N4;
+ VARDECL(kiss_fft_scalar, f);
+ const kiss_twiddle_scalar *trig;
+ const kiss_fft_state *st = l->kfft[shift];
+
+ N = l->n;
+ trig = l->trig;
+ for (i=0;i<shift;i++)
+ {
+ N >>= 1;
+ trig += N;
+ }
+ N2 = N>>1;
+ N4 = N>>2;
+
+ ALLOC(f, N2, kiss_fft_scalar);
+
+ /* Pre-rotate */
+ {
+ /* Temp pointers to make it really clear to the compiler what we're doing */
+ const kiss_fft_scalar * OPUS_RESTRICT xp1 = in;
+ const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+stride*(N2-1);
+ kiss_fft_scalar * OPUS_RESTRICT yp = f;
+ const kiss_twiddle_scalar * OPUS_RESTRICT t = &trig[0];
+ for(i=0;i<N4;i++)
+ {
+ kiss_fft_scalar yr, yi;
+ yr = S_MUL(*xp2, t[i]) + S_MUL(*xp1, t[N4+i]);
+ yi = S_MUL(*xp1, t[i]) - S_MUL(*xp2, t[N4+i]);
+ yp[2*i] = yr;
+ yp[2*i+1] = yi;
+ xp1+=2*stride;
+ xp2-=2*stride;
+ }
+ }
+
+ opus_ifft(st, (kiss_fft_cpx *)f, (kiss_fft_cpx*)(out+(overlap>>1)), arch);
+
+ /* Post-rotate and de-shuffle from both ends of the buffer at once to make
+ it in-place. */
+ {
+ kiss_fft_scalar * yp0 = out+(overlap>>1);
+ kiss_fft_scalar * yp1 = out+(overlap>>1)+N2-2;
+ const kiss_twiddle_scalar *t = &trig[0];
+ /* Loop to (N4+1)>>1 to handle odd N4. When N4 is odd, the
+ middle pair will be computed twice. */
+ for(i=0;i<(N4+1)>>1;i++)
+ {
+ kiss_fft_scalar re, im, yr, yi;
+ kiss_twiddle_scalar t0, t1;
+ re = yp0[0];
+ im = yp0[1];
+ t0 = t[i];
+ t1 = t[N4+i];
+ /* We'd scale up by 2 here, but instead it's done when mixing the windows */
+ yr = S_MUL(re,t0) + S_MUL(im,t1);
+ yi = S_MUL(re,t1) - S_MUL(im,t0);
+ re = yp1[0];
+ im = yp1[1];
+ yp0[0] = yr;
+ yp1[1] = yi;
+
+ t0 = t[(N4-i-1)];
+ t1 = t[(N2-i-1)];
+ /* We'd scale up by 2 here, but instead it's done when mixing the windows */
+ yr = S_MUL(re,t0) + S_MUL(im,t1);
+ yi = S_MUL(re,t1) - S_MUL(im,t0);
+ yp1[0] = yr;
+ yp0[1] = yi;
+ yp0 += 2;
+ yp1 -= 2;
+ }
+ }
+
+ /* Mirror on both sides for TDAC */
+ {
+ kiss_fft_scalar * OPUS_RESTRICT xp1 = out+overlap-1;
+ kiss_fft_scalar * OPUS_RESTRICT yp1 = out;
+ const opus_val16 * OPUS_RESTRICT wp1 = window;
+ const opus_val16 * OPUS_RESTRICT wp2 = window+overlap-1;
+
+ for(i = 0; i < overlap/2; i++)
+ {
+ kiss_fft_scalar x1, x2;
+ x1 = *xp1;
+ x2 = *yp1;
+ *yp1++ = MULT16_32_Q15(*wp2, x2) - MULT16_32_Q15(*wp1, x1);
+ *xp1-- = MULT16_32_Q15(*wp1, x2) + MULT16_32_Q15(*wp2, x1);
+ wp1++;
+ wp2--;
+ }
+ }
+ RESTORE_STACK;
+}