From bbcc64772580c8a979288791afa02d30bc476d2e Mon Sep 17 00:00:00 2001 From: trav90 Date: Fri, 19 Oct 2018 21:52:15 -0500 Subject: Update aom to v1.0.0 Update aom to commit id d14c5bb4f336ef1842046089849dee4a301fbbf0. --- third_party/aom/aom_dsp/simd/v128_intrinsics_arm.h | 350 +++++++++++++++++++-- 1 file changed, 318 insertions(+), 32 deletions(-) (limited to 'third_party/aom/aom_dsp/simd/v128_intrinsics_arm.h') diff --git a/third_party/aom/aom_dsp/simd/v128_intrinsics_arm.h b/third_party/aom/aom_dsp/simd/v128_intrinsics_arm.h index 0377d4ce1..d4fec4237 100644 --- a/third_party/aom/aom_dsp/simd/v128_intrinsics_arm.h +++ b/third_party/aom/aom_dsp/simd/v128_intrinsics_arm.h @@ -13,7 +13,8 @@ #define _V128_INTRINSICS_H #include -#include "./v64_intrinsics_arm.h" + +#include "aom_dsp/simd/v64_intrinsics_arm.h" typedef int64x2_t v128; @@ -28,7 +29,7 @@ SIMD_INLINE v64 v128_high_v64(v128 a) { return vget_high_s64(a); } SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { return vcombine_s64(b, a); } SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) { - return vcombine_s64((uint64x1_t)b, (uint64x1_t)a); + return vcombine_s64((int64x1_t)b, (int64x1_t)a); } SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { @@ -83,22 +84,57 @@ SIMD_INLINE v128 v128_dup_32(uint32_t x) { return vreinterpretq_s64_u32(vdupq_n_u32(x)); } +SIMD_INLINE v128 v128_dup_64(uint64_t x) { + return vreinterpretq_s64_u64(vdupq_n_u64(x)); +} + +SIMD_INLINE int64_t v128_dotp_su8(v128 a, v128 b) { + int16x8_t t1 = vmulq_s16( + vmovl_s8(vreinterpret_s8_s64(vget_low_s64(a))), + vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(b))))); + int16x8_t t2 = vmulq_s16( + vmovl_s8(vreinterpret_s8_s64(vget_high_s64(a))), + vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(b))))); +#if defined(__aarch64__) + return vaddlvq_s16(t1) + vaddlvq_s16(t2); +#else + int64x2_t t = vpaddlq_s32(vaddq_s32(vpaddlq_s16(t1), vpaddlq_s16(t2))); + return (int64_t)vget_high_s64(t) + (int64_t)vget_low_s64(t); +#endif +} + SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) { return v64_dotp_s16(vget_high_s64(a), vget_high_s64(b)) + v64_dotp_s16(vget_low_s64(a), vget_low_s64(b)); } +SIMD_INLINE int64_t v128_dotp_s32(v128 a, v128 b) { + int64x2_t t = vpaddlq_s32( + vmulq_s32(vreinterpretq_s32_s64(a), vreinterpretq_s32_s64(b))); + return (int64_t)vget_high_s64(t) + (int64_t)vget_low_s64(t); +} + SIMD_INLINE uint64_t v128_hadd_u8(v128 x) { +#if defined(__aarch64__) + return vaddlvq_u8(vreinterpretq_u8_s64(x)); +#else uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vreinterpretq_u8_s64(x)))); return vget_lane_s32( vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0); +#endif } SIMD_INLINE v128 v128_padd_s16(v128 a) { return vreinterpretq_s64_s32(vpaddlq_s16(vreinterpretq_s16_s64(a))); } -typedef struct { sad64_internal hi, lo; } sad128_internal; +SIMD_INLINE v128 v128_padd_u8(v128 a) { + return vreinterpretq_s64_u16(vpaddlq_u8(vreinterpretq_u8_s64(a))); +} + +typedef struct { + sad64_internal hi, lo; +} sad128_internal; SIMD_INLINE sad128_internal v128_sad_u8_init() { sad128_internal s; @@ -117,14 +153,21 @@ SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) { } SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) { - return (uint32_t)(v64_sad_u8_sum(s.hi) + v64_sad_u8_sum(s.lo)); +#if defined(__aarch64__) + return vaddlvq_u16(s.hi) + vaddlvq_u16(s.lo); +#else + uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vaddq_u16(s.hi, s.lo))); + return (uint32_t)(uint64_t)(vget_high_u64(t) + vget_low_u64(t)); +#endif } -typedef struct { ssd64_internal hi, lo; } ssd128_internal; +typedef struct { + ssd64_internal hi, lo; +} ssd128_internal; SIMD_INLINE ssd128_internal v128_ssd_u8_init() { ssd128_internal s; - s.hi = s.lo = (ssd64_internal)(uint64_t)0; + s.hi = s.lo = v64_ssd_u8_init(); return s; } @@ -154,6 +197,16 @@ SIMD_INLINE v128 v128_add_8(v128 x, v128 y) { vaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y))); } +SIMD_INLINE v128 v128_sadd_u8(v128 x, v128 y) { + return vreinterpretq_s64_u8( + vqaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y))); +} + +SIMD_INLINE v128 v128_sadd_s8(v128 x, v128 y) { + return vreinterpretq_s64_s8( + vqaddq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y))); +} + SIMD_INLINE v128 v128_add_16(v128 x, v128 y) { return vreinterpretq_s64_s16( vaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y))); @@ -169,6 +222,11 @@ SIMD_INLINE v128 v128_add_32(v128 x, v128 y) { vaddq_u32(vreinterpretq_u32_s64(x), vreinterpretq_u32_s64(y))); } +SIMD_INLINE v128 v128_add_64(v128 x, v128 y) { + return vreinterpretq_s64_u64( + vaddq_u64(vreinterpretq_u64_s64(x), vreinterpretq_u64_s64(y))); +} + SIMD_INLINE v128 v128_sub_8(v128 x, v128 y) { return vreinterpretq_s64_u8( vsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y))); @@ -204,6 +262,8 @@ SIMD_INLINE v128 v128_sub_32(v128 x, v128 y) { vsubq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); } +SIMD_INLINE v128 v128_sub_64(v128 x, v128 y) { return vsubq_s64(x, y); } + SIMD_INLINE v128 v128_abs_s16(v128 x) { return vreinterpretq_s64_s16(vabsq_s16(vreinterpretq_s16_s64(x))); } @@ -223,8 +283,16 @@ SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) { } SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) { +#if defined(__aarch64__) + return vreinterpretq_s64_s16(vuzp2q_s16( + vreinterpretq_s16_s32(vmull_s16(vreinterpret_s16_s64(vget_low_s64(a)), + vreinterpret_s16_s64(vget_low_s64(b)))), + vreinterpretq_s16_s32( + vmull_high_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b))))); +#else return v128_from_v64(v64_mulhi_s16(vget_high_s64(a), vget_high_s64(b)), v64_mulhi_s16(vget_low_s64(a), vget_low_s64(b))); +#endif } SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) { @@ -233,13 +301,32 @@ SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) { } SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) { +#if defined(__aarch64__) + int32x4_t t1 = vmull_s16(vreinterpret_s16_s64(vget_low_s64(a)), + vreinterpret_s16_s64(vget_low_s64(b))); + int32x4_t t2 = + vmull_high_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b)); + return vreinterpretq_s64_s32(vpaddq_s32(t1, t2)); +#else return v128_from_v64(v64_madd_s16(vget_high_s64(a), vget_high_s64(b)), v64_madd_s16(vget_low_s64(a), vget_low_s64(b))); +#endif } SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) { +#if defined(__aarch64__) + int16x8_t t1 = vmulq_s16( + vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(a)))), + vmovl_s8(vreinterpret_s8_s64(vget_low_s64(b)))); + int16x8_t t2 = vmulq_s16( + vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(a)))), + vmovl_s8(vreinterpret_s8_s64(vget_high_s64(b)))); + return vreinterpretq_s64_s16( + vqaddq_s16(vuzp1q_s16(t1, t2), vuzp2q_s16(t1, t2))); +#else return v128_from_v64(v64_madd_us8(vget_high_s64(a), vget_high_s64(b)), v64_madd_us8(vget_low_s64(a), vget_low_s64(b))); +#endif } SIMD_INLINE v128 v128_avg_u8(v128 x, v128 y) { @@ -252,6 +339,11 @@ SIMD_INLINE v128 v128_rdavg_u8(v128 x, v128 y) { vhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y))); } +SIMD_INLINE v128 v128_rdavg_u16(v128 x, v128 y) { + return vreinterpretq_s64_u16( + vhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y))); +} + SIMD_INLINE v128 v128_avg_u16(v128 x, v128 y) { return vreinterpretq_s64_u16( vrhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y))); @@ -272,6 +364,26 @@ SIMD_INLINE v128 v128_min_s8(v128 x, v128 y) { vminq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y))); } +SIMD_INLINE uint32_t v128_movemask_8(v128 a) { + a = vreinterpretq_s64_u8(vcltq_s8(vreinterpretq_s8_s64(a), vdupq_n_s8(0))); +#if defined(__aarch64__) + uint8x16_t m = + vandq_u8(vreinterpretq_u8_s64(a), + vreinterpretq_u8_u64(vdupq_n_u64(0x8040201008040201ULL))); + return vaddv_u8(vget_low_u8(m)) + (vaddv_u8(vget_high_u8(m)) << 8); +#else + uint64x2_t m = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8( + vandq_u8(vreinterpretq_u8_s64(a), + vreinterpretq_u8_u64(vdupq_n_u64(0x8040201008040201ULL)))))); + return v64_u64(v64_ziplo_8(v128_high_v64((v128)m), v128_low_v64((v128)m))); +#endif +} + +SIMD_INLINE v128 v128_blend_8(v128 a, v128 b, v128 c) { + c = vreinterpretq_s64_u8(vcltq_s8(vreinterpretq_s8_s64(c), vdupq_n_s8(0))); + return v128_or(v128_and(b, c), v128_andn(a, c)); +} + SIMD_INLINE v128 v128_max_s8(v128 x, v128 y) { return vreinterpretq_s64_s8( vmaxq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y))); @@ -287,14 +399,34 @@ SIMD_INLINE v128 v128_max_s16(v128 x, v128 y) { vmaxq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y))); } +SIMD_INLINE v128 v128_min_s32(v128 x, v128 y) { + return vreinterpretq_s64_s32( + vminq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); +} + +SIMD_INLINE v128 v128_max_s32(v128 x, v128 y) { + return vreinterpretq_s64_s32( + vmaxq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); +} + SIMD_INLINE v128 v128_ziplo_8(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u8( + vzip1q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x))); +#else uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)); return vreinterpretq_s64_u8(r.val[0]); +#endif } SIMD_INLINE v128 v128_ziphi_8(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u8( + vzip2q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x))); +#else uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)); return vreinterpretq_s64_u8(r.val[1]); +#endif } SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) { @@ -303,13 +435,23 @@ SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) { } SIMD_INLINE v128 v128_ziplo_16(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u16( + vzip1q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x))); +#else int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x)); return vreinterpretq_s64_s16(r.val[0]); +#endif } SIMD_INLINE v128 v128_ziphi_16(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u16( + vzip2q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x))); +#else int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x)); return vreinterpretq_s64_s16(r.val[1]); +#endif } SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) { @@ -318,13 +460,23 @@ SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) { } SIMD_INLINE v128 v128_ziplo_32(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u32( + vzip1q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x))); +#else int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x)); return vreinterpretq_s64_s32(r.val[0]); +#endif } SIMD_INLINE v128 v128_ziphi_32(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u32( + vzip2q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x))); +#else int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x)); return vreinterpretq_s64_s32(r.val[1]); +#endif } SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) { @@ -333,47 +485,76 @@ SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) { } SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) { - return v128_from_v64(vget_low_u64((uint64x2_t)a), - vget_low_u64((uint64x2_t)b)); + return v128_from_v64(vget_low_s64((int64x2_t)a), vget_low_s64((int64x2_t)b)); } SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) { - return v128_from_v64(vget_high_u64((uint64x2_t)a), - vget_high_u64((uint64x2_t)b)); + return v128_from_v64(vget_high_s64((int64x2_t)a), + vget_high_s64((int64x2_t)b)); } SIMD_INLINE v128 v128_unziplo_8(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u8( + vuzp1q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x))); +#else uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)); return vreinterpretq_s64_u8(r.val[0]); +#endif } SIMD_INLINE v128 v128_unziphi_8(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u8( + vuzp2q_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x))); +#else uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x)); return vreinterpretq_s64_u8(r.val[1]); +#endif } SIMD_INLINE v128 v128_unziplo_16(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u16( + vuzp1q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x))); +#else uint16x8x2_t r = vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x)); return vreinterpretq_s64_u16(r.val[0]); +#endif } SIMD_INLINE v128 v128_unziphi_16(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u16( + vuzp2q_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x))); +#else uint16x8x2_t r = vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x)); return vreinterpretq_s64_u16(r.val[1]); +#endif } SIMD_INLINE v128 v128_unziplo_32(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u32( + vuzp1q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x))); +#else uint32x4x2_t r = vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x)); return vreinterpretq_s64_u32(r.val[0]); +#endif } SIMD_INLINE v128 v128_unziphi_32(v128 x, v128 y) { +#if defined(__aarch64__) + return vreinterpretq_s64_u32( + vuzp2q_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x))); +#else uint32x4x2_t r = vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x)); return vreinterpretq_s64_u32(r.val[1]); +#endif } SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { @@ -406,6 +587,12 @@ SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) { vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(b)))); } +SIMD_INLINE v128 v128_pack_s32_u16(v128 a, v128 b) { + return v128_from_v64( + vreinterpret_s64_u16(vqmovun_s32(vreinterpretq_s32_s64(a))), + vreinterpret_s64_u16(vqmovun_s32(vreinterpretq_s32_s64(b)))); +} + SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) { return v128_from_v64( vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(a))), @@ -447,15 +634,17 @@ SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) { } SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) { - return v128_from_64( - (uint64_t)vreinterpret_s64_u8( - vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)), - vget_high_u8(vreinterpretq_u8_s64(x)) } }, - vreinterpret_u8_s64(vget_high_s64(pattern)))), - (uint64_t)vreinterpret_s64_u8( - vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)), - vget_high_u8(vreinterpretq_u8_s64(x)) } }, - vreinterpret_u8_s64(vget_low_s64(pattern))))); +#if defined(__aarch64__) + return vreinterpretq_s64_u8( + vqtbl1q_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(pattern))); +#else + uint8x8x2_t p = { { vget_low_u8(vreinterpretq_u8_s64(x)), + vget_high_u8(vreinterpretq_u8_s64(x)) } }; + return v128_from_64((uint64_t)vreinterpret_s64_u8(vtbl2_u8( + p, vreinterpret_u8_s64(vget_high_s64(pattern)))), + (uint64_t)vreinterpret_s64_u8(vtbl2_u8( + p, vreinterpret_u8_s64(vget_low_s64(pattern))))); +#endif } SIMD_INLINE v128 v128_cmpgt_s8(v128 x, v128 y) { @@ -488,19 +677,37 @@ SIMD_INLINE v128 v128_cmpeq_16(v128 x, v128 y) { vceqq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y))); } +SIMD_INLINE v128 v128_cmpgt_s32(v128 x, v128 y) { + return vreinterpretq_s64_u32( + vcgtq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); +} + +SIMD_INLINE v128 v128_cmplt_s32(v128 x, v128 y) { + return vreinterpretq_s64_u32( + vcltq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); +} + +SIMD_INLINE v128 v128_cmpeq_32(v128 x, v128 y) { + return vreinterpretq_s64_u32( + vceqq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); +} + SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) { - return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8( - vreinterpretq_u8_s64(a), vdupq_n_s8(c))); + return (c > 7) ? v128_zero() + : vreinterpretq_s64_u8( + vshlq_u8(vreinterpretq_u8_s64(a), vdupq_n_s8(c))); } SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) { - return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8( - vreinterpretq_u8_s64(a), vdupq_n_s8(-c))); + return (c > 7) ? v128_zero() + : vreinterpretq_s64_u8( + vshlq_u8(vreinterpretq_u8_s64(a), vdupq_n_s8(-c))); } SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) { - return (c > 7) ? v128_ones() : vreinterpretq_s64_s8(vshlq_s8( - vreinterpretq_s8_s64(a), vdupq_n_s8(-c))); + return (c > 7) ? v128_ones() + : vreinterpretq_s64_s8( + vshlq_s8(vreinterpretq_s8_s64(a), vdupq_n_s8(-c))); } SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) { @@ -539,6 +746,22 @@ SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) { vshlq_s32(vreinterpretq_s32_s64(a), vdupq_n_s32(-c))); } +SIMD_INLINE v128 v128_shl_64(v128 a, unsigned int c) { + return (c > 63) ? v128_zero() + : vreinterpretq_s64_u64( + vshlq_u64(vreinterpretq_u64_s64(a), vdupq_n_s64(c))); +} + +SIMD_INLINE v128 v128_shr_u64(v128 a, unsigned int c) { + return (c > 63) ? v128_zero() + : vreinterpretq_s64_u64( + vshlq_u64(vreinterpretq_u64_s64(a), vdupq_n_s64(-c))); +} + +SIMD_INLINE v128 v128_shr_s64(v128 a, unsigned int c) { + return (c > 63) ? v128_ones() : vshlq_s64(a, vdupq_n_s64(-c)); +} + #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__) SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) { @@ -562,16 +785,18 @@ SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) { SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) { return n < 8 ? v128_from_64( - vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), n * 8), - vorr_u64( + (uint64_t)vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), + n * 8), + (uint64_t)vorr_u64( vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)), n * 8), vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), (8 - n) * 8))) - : (n == 8 - ? v128_from_64(0, vreinterpret_u64_s64(vget_high_s64(a))) - : v128_from_64( - 0, vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), - (n - 8) * 8))); + : (n == 8 ? v128_from_64(0, (uint64_t)vreinterpret_u64_s64( + vget_high_s64(a))) + : v128_from_64( + 0, (uint64_t)vshr_n_u64( + vreinterpret_u64_s64(vget_high_s64(a)), + (n - 8) * 8))); } SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) { @@ -610,6 +835,18 @@ SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) { return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c)); } +SIMD_INLINE v128 v128_shl_n_64(v128 a, unsigned int c) { + return vreinterpretq_s64_u64(vshlq_n_u64(vreinterpretq_u64_s64(a), c)); +} + +SIMD_INLINE v128 v128_shr_n_u64(v128 a, unsigned int c) { + return vreinterpretq_s64_u64(vshrq_n_u64(vreinterpretq_u64_s64(a), c)); +} + +SIMD_INLINE v128 v128_shr_n_s64(v128 a, unsigned int c) { + return vshrq_n_s64(a, c); +} + #else SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) { @@ -666,6 +903,55 @@ SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) { return v128_shr_s32(a, c); } +SIMD_INLINE v128 v128_shl_n_64(v128 a, unsigned int c) { + return v128_shl_64(a, c); +} + +SIMD_INLINE v128 v128_shr_n_u64(v128 a, unsigned int c) { + return v128_shr_u64(a, c); +} + +SIMD_INLINE v128 v128_shr_n_s64(v128 a, unsigned int c) { + return v128_shr_s64(a, c); +} + #endif +typedef uint32x4_t sad128_internal_u16; + +SIMD_INLINE sad128_internal_u16 v128_sad_u16_init() { return vdupq_n_u32(0); } + +/* Implementation dependent return value. Result must be finalised with + * v128_sad_u16_sum(). */ +SIMD_INLINE sad128_internal_u16 v128_sad_u16(sad128_internal_u16 s, v128 a, + v128 b) { + return vaddq_u32( + s, vpaddlq_u16(vsubq_u16( + vmaxq_u16(vreinterpretq_u16_s64(a), vreinterpretq_u16_s64(b)), + vminq_u16(vreinterpretq_u16_s64(a), vreinterpretq_u16_s64(b))))); +} + +SIMD_INLINE uint32_t v128_sad_u16_sum(sad128_internal_u16 s) { + uint64x2_t t = vpaddlq_u32(s); + return (uint32_t)(uint64_t)vget_high_u64(t) + + (uint32_t)(uint64_t)vget_low_u64(t); +} + +typedef v128 ssd128_internal_s16; +SIMD_INLINE ssd128_internal_s16 v128_ssd_s16_init() { return v128_zero(); } + +/* Implementation dependent return value. Result must be finalised with + * v128_ssd_s16_sum(). */ +SIMD_INLINE ssd128_internal_s16 v128_ssd_s16(ssd128_internal_s16 s, v128 a, + v128 b) { + v128 d = v128_sub_16(a, b); + d = v128_madd_s16(d, d); + return v128_add_64( + s, vreinterpretq_s64_u64(vpaddlq_u32(vreinterpretq_u32_s64(d)))); +} + +SIMD_INLINE uint64_t v128_ssd_s16_sum(ssd128_internal_s16 s) { + return v64_u64(v128_low_v64(s)) + v64_u64(v128_high_v64(s)); +} + #endif /* _V128_INTRINSICS_H */ -- cgit v1.2.3