diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /media/libopus/celt/mips/vq_mipsr1.h | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'media/libopus/celt/mips/vq_mipsr1.h')
-rw-r--r-- | media/libopus/celt/mips/vq_mipsr1.h | 125 |
1 files changed, 125 insertions, 0 deletions
diff --git a/media/libopus/celt/mips/vq_mipsr1.h b/media/libopus/celt/mips/vq_mipsr1.h new file mode 100644 index 000000000..54cef8613 --- /dev/null +++ b/media/libopus/celt/mips/vq_mipsr1.h @@ -0,0 +1,125 @@ +/* Copyright (c) 2007-2008 CSIRO + Copyright (c) 2007-2009 Xiph.Org Foundation + Written by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef __VQ_MIPSR1_H__ +#define __VQ_MIPSR1_H__ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "mathops.h" +#include "arch.h" + +static unsigned extract_collapse_mask(int *iy, int N, int B); +static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X, int N, opus_val32 Ryy, opus_val16 gain); +static void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread); +static void renormalise_vector_mips(celt_norm *X, int N, opus_val16 gain, int arch); + +#define OVERRIDE_vq_exp_rotation1 +static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) +{ + int i; + opus_val16 ms; + celt_norm *Xptr; + Xptr = X; + ms = NEG16(s); + for (i=0;i<len-stride;i++) + { + celt_norm x1, x2; + x1 = Xptr[0]; + x2 = Xptr[stride]; + Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); + *Xptr++ = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); + } + Xptr = &X[len-2*stride-1]; + for (i=len-2*stride-1;i>=0;i--) + { + celt_norm x1, x2; + x1 = Xptr[0]; + x2 = Xptr[stride]; + Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); + *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); + } +} + +#define OVERRIDE_renormalise_vector + +#define renormalise_vector(X, N, gain, arch) \ + (renormalise_vector_mips(X, N, gain, arch)) + +void renormalise_vector_mips(celt_norm *X, int N, opus_val16 gain, int arch) +{ + int i; +#ifdef FIXED_POINT + int k; +#endif + opus_val32 E = EPSILON; + opus_val16 g; + opus_val32 t; + celt_norm *xptr = X; + int X0, X1; + + (void)arch; + + asm volatile("mult $ac1, $0, $0"); + asm volatile("MTLO %0, $ac1" : :"r" (E)); + /*if(N %4) + printf("error");*/ + for (i=0;i<N-2;i+=2) + { + X0 = (int)*xptr++; + asm volatile("MADD $ac1, %0, %1" : : "r" (X0), "r" (X0)); + + X1 = (int)*xptr++; + asm volatile("MADD $ac1, %0, %1" : : "r" (X1), "r" (X1)); + } + + for (;i<N;i++) + { + X0 = (int)*xptr++; + asm volatile("MADD $ac1, %0, %1" : : "r" (X0), "r" (X0)); + } + + asm volatile("MFLO %0, $ac1" : "=r" (E)); +#ifdef FIXED_POINT + k = celt_ilog2(E)>>1; +#endif + t = VSHR32(E, 2*(k-7)); + g = MULT16_16_P15(celt_rsqrt_norm(t),gain); + + xptr = X; + for (i=0;i<N;i++) + { + *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1)); + xptr++; + } + /*return celt_sqrt(E);*/ +} + +#endif /* __VQ_MIPSR1_H__ */ |