summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/simd/v256_intrinsics_c.h
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/aom/aom_dsp/simd/v256_intrinsics_c.h')
-rw-r--r--third_party/aom/aom_dsp/simd/v256_intrinsics_c.h251
1 files changed, 240 insertions, 11 deletions
diff --git a/third_party/aom/aom_dsp/simd/v256_intrinsics_c.h b/third_party/aom/aom_dsp/simd/v256_intrinsics_c.h
index f96ca7fa6..5b412df71 100644
--- a/third_party/aom/aom_dsp/simd/v256_intrinsics_c.h
+++ b/third_party/aom/aom_dsp/simd/v256_intrinsics_c.h
@@ -14,8 +14,10 @@
#include <stdio.h>
#include <stdlib.h>
-#include "./v128_intrinsics_c.h"
-#include "./aom_config.h"
+
+#include "config/aom_config.h"
+
+#include "aom_dsp/simd/v128_intrinsics_c.h"
typedef union {
uint8_t u8[32];
@@ -34,6 +36,8 @@ SIMD_INLINE uint32_t c_v256_low_u32(c_v256 a) { return a.u32[0]; }
SIMD_INLINE c_v64 c_v256_low_v64(c_v256 a) { return a.v64[0]; }
+SIMD_INLINE uint64_t c_v256_low_u64(c_v256 a) { return a.u64[0]; }
+
SIMD_INLINE c_v128 c_v256_low_v128(c_v256 a) { return a.v128[0]; }
SIMD_INLINE c_v128 c_v256_high_v128(c_v256 a) { return a.v128[1]; }
@@ -120,23 +124,39 @@ SIMD_INLINE c_v256 c_v256_dup_32(uint32_t x) {
return t;
}
+SIMD_INLINE c_v256 c_v256_dup_64(uint64_t x) {
+ c_v256 t;
+ t.u64[3] = t.u64[2] = t.u64[1] = t.u64[0] = x;
+ return t;
+}
+
+SIMD_INLINE int64_t c_v256_dotp_su8(c_v256 a, c_v256 b) {
+ return c_v128_dotp_su8(a.v128[1], b.v128[1]) +
+ c_v128_dotp_su8(a.v128[0], b.v128[0]);
+}
+
SIMD_INLINE int64_t c_v256_dotp_s16(c_v256 a, c_v256 b) {
return c_v128_dotp_s16(a.v128[1], b.v128[1]) +
c_v128_dotp_s16(a.v128[0], b.v128[0]);
}
+SIMD_INLINE int64_t c_v256_dotp_s32(c_v256 a, c_v256 b) {
+ return c_v128_dotp_s32(a.v128[1], b.v128[1]) +
+ c_v128_dotp_s32(a.v128[0], b.v128[0]);
+}
+
SIMD_INLINE uint64_t c_v256_hadd_u8(c_v256 a) {
return c_v128_hadd_u8(a.v128[1]) + c_v128_hadd_u8(a.v128[0]);
}
typedef uint32_t c_sad256_internal;
-SIMD_INLINE c_sad128_internal c_v256_sad_u8_init() { return 0; }
+SIMD_INLINE c_sad256_internal c_v256_sad_u8_init() { return 0; }
/* Implementation dependent return value. Result must be finalised with
v256_sad_u8_sum().
The result for more than 16 v256_sad_u8() calls is undefined. */
-SIMD_INLINE c_sad128_internal c_v256_sad_u8(c_sad256_internal s, c_v256 a,
+SIMD_INLINE c_sad256_internal c_v256_sad_u8(c_sad256_internal s, c_v256 a,
c_v256 b) {
int c;
for (c = 0; c < 32; c++)
@@ -191,6 +211,16 @@ SIMD_INLINE c_v256 c_v256_add_16(c_v256 a, c_v256 b) {
c_v128_add_16(a.v128[0], b.v128[0]));
}
+SIMD_INLINE c_v256 c_v256_sadd_s8(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_sadd_s8(a.v128[1], b.v128[1]),
+ c_v128_sadd_s8(a.v128[0], b.v128[0]));
+}
+
+SIMD_INLINE c_v256 c_v256_sadd_u8(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_sadd_u8(a.v128[1], b.v128[1]),
+ c_v128_sadd_u8(a.v128[0], b.v128[0]));
+}
+
SIMD_INLINE c_v256 c_v256_sadd_s16(c_v256 a, c_v256 b) {
return c_v256_from_v128(c_v128_sadd_s16(a.v128[1], b.v128[1]),
c_v128_sadd_s16(a.v128[0], b.v128[0]));
@@ -201,6 +231,23 @@ SIMD_INLINE c_v256 c_v256_add_32(c_v256 a, c_v256 b) {
c_v128_add_32(a.v128[0], b.v128[0]));
}
+SIMD_INLINE c_v256 c_v256_add_64(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_add_64(a.v128[1], b.v128[1]),
+ c_v128_add_64(a.v128[0], b.v128[0]));
+}
+
+SIMD_INLINE c_v256 c_v256_sub_64(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_sub_64(a.v128[1], b.v128[1]),
+ c_v128_sub_64(a.v128[0], b.v128[0]));
+}
+
+SIMD_INLINE c_v256 c_v256_padd_u8(c_v256 a) {
+ c_v256 t;
+ for (int i = 0; i < 16; i++)
+ t.u16[i] = (uint16_t)a.u8[i * 2] + (uint16_t)a.u8[i * 2 + 1];
+ return t;
+}
+
SIMD_INLINE c_v256 c_v256_padd_s16(c_v256 a) {
c_v256 t;
t.s32[0] = (int32_t)a.s16[0] + (int32_t)a.s16[1];
@@ -299,6 +346,11 @@ SIMD_INLINE c_v256 c_v256_rdavg_u8(c_v256 a, c_v256 b) {
c_v128_rdavg_u8(a.v128[0], b.v128[0]));
}
+SIMD_INLINE c_v256 c_v256_rdavg_u16(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_rdavg_u16(a.v128[1], b.v128[1]),
+ c_v128_rdavg_u16(a.v128[0], b.v128[0]));
+}
+
SIMD_INLINE c_v256 c_v256_avg_u16(c_v256 a, c_v256 b) {
return c_v256_from_v128(c_v128_avg_u16(a.v128[1], b.v128[1]),
c_v128_avg_u16(a.v128[0], b.v128[0]));
@@ -319,6 +371,30 @@ SIMD_INLINE c_v256 c_v256_min_s8(c_v256 a, c_v256 b) {
c_v128_min_s8(a.v128[0], b.v128[0]));
}
+SIMD_INLINE uint32_t c_v256_movemask_8(c_v256 a) {
+ return ((a.s8[31] < 0) << 31) | ((a.s8[30] < 0) << 30) |
+ ((a.s8[29] < 0) << 29) | ((a.s8[28] < 0) << 28) |
+ ((a.s8[27] < 0) << 27) | ((a.s8[26] < 0) << 26) |
+ ((a.s8[25] < 0) << 25) | ((a.s8[24] < 0) << 24) |
+ ((a.s8[23] < 0) << 23) | ((a.s8[22] < 0) << 22) |
+ ((a.s8[21] < 0) << 21) | ((a.s8[20] < 0) << 20) |
+ ((a.s8[19] < 0) << 19) | ((a.s8[18] < 0) << 18) |
+ ((a.s8[17] < 0) << 17) | ((a.s8[16] < 0) << 16) |
+ ((a.s8[15] < 0) << 15) | ((a.s8[14] < 0) << 14) |
+ ((a.s8[13] < 0) << 13) | ((a.s8[12] < 0) << 12) |
+ ((a.s8[11] < 0) << 11) | ((a.s8[10] < 0) << 10) |
+ ((a.s8[9] < 0) << 9) | ((a.s8[8] < 0) << 8) | ((a.s8[7] < 0) << 7) |
+ ((a.s8[6] < 0) << 6) | ((a.s8[5] < 0) << 5) | ((a.s8[4] < 0) << 4) |
+ ((a.s8[3] < 0) << 3) | ((a.s8[2] < 0) << 2) | ((a.s8[1] < 0) << 1) |
+ ((a.s8[0] < 0) << 0);
+}
+
+SIMD_INLINE c_v256 c_v256_blend_8(c_v256 a, c_v256 b, c_v256 c) {
+ c_v256 t;
+ for (int i = 0; i < 32; i++) t.u8[i] = c.s8[i] < 0 ? b.u8[i] : a.u8[i];
+ return t;
+}
+
SIMD_INLINE c_v256 c_v256_max_s8(c_v256 a, c_v256 b) {
return c_v256_from_v128(c_v128_max_s8(a.v128[1], b.v128[1]),
c_v128_max_s8(a.v128[0], b.v128[0]));
@@ -334,6 +410,16 @@ SIMD_INLINE c_v256 c_v256_max_s16(c_v256 a, c_v256 b) {
c_v128_max_s16(a.v128[0], b.v128[0]));
}
+SIMD_INLINE c_v256 c_v256_min_s32(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_min_s32(a.v128[1], b.v128[1]),
+ c_v128_min_s32(a.v128[0], b.v128[0]));
+}
+
+SIMD_INLINE c_v256 c_v256_max_s32(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_max_s32(a.v128[1], b.v128[1]),
+ c_v128_max_s32(a.v128[0], b.v128[0]));
+}
+
SIMD_INLINE c_v256 c_v256_ziplo_8(c_v256 a, c_v256 b) {
return c_v256_from_v128(c_v128_ziphi_8(a.v128[0], b.v128[0]),
c_v128_ziplo_8(a.v128[0], b.v128[0]));
@@ -482,6 +568,32 @@ SIMD_INLINE c_v256 c_v256_unziphi_32(c_v256 a, c_v256 b) {
: _c_v256_unzip_32(b, a, 1);
}
+SIMD_INLINE c_v256 _c_v256_unzip_64(c_v256 a, c_v256 b, int mode) {
+ c_v256 t;
+ if (mode) {
+ t.u64[3] = b.u64[3];
+ t.u64[2] = b.u64[1];
+ t.u64[1] = a.u64[3];
+ t.u64[0] = a.u64[1];
+ } else {
+ t.u64[3] = a.u64[2];
+ t.u64[2] = a.u64[0];
+ t.u64[1] = b.u64[2];
+ t.u64[0] = b.u64[0];
+ }
+ return t;
+}
+
+SIMD_INLINE c_v256 c_v256_unziplo_64(c_v256 a, c_v256 b) {
+ return CONFIG_BIG_ENDIAN ? _c_v256_unzip_64(a, b, 1)
+ : _c_v256_unzip_64(a, b, 0);
+}
+
+SIMD_INLINE c_v256 c_v256_unziphi_64(c_v256 a, c_v256 b) {
+ return CONFIG_BIG_ENDIAN ? _c_v256_unzip_64(b, a, 0)
+ : _c_v256_unzip_64(b, a, 1);
+}
+
SIMD_INLINE c_v256 c_v256_unpack_u8_s16(c_v128 a) {
return c_v256_from_v128(c_v128_unpackhi_u8_s16(a), c_v128_unpacklo_u8_s16(a));
}
@@ -515,6 +627,11 @@ SIMD_INLINE c_v256 c_v256_pack_s32_s16(c_v256 a, c_v256 b) {
c_v128_pack_s32_s16(b.v128[1], b.v128[0]));
}
+SIMD_INLINE c_v256 c_v256_pack_s32_u16(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_pack_s32_u16(a.v128[1], a.v128[0]),
+ c_v128_pack_s32_u16(b.v128[1], b.v128[0]));
+}
+
SIMD_INLINE c_v256 c_v256_pack_s16_u8(c_v256 a, c_v256 b) {
return c_v256_from_v128(c_v128_pack_s16_u8(a.v128[1], a.v128[0]),
c_v128_pack_s16_u8(b.v128[1], b.v128[0]));
@@ -558,15 +675,21 @@ SIMD_INLINE c_v256 c_v256_unpackhi_s16_s32(c_v256 a) {
SIMD_INLINE c_v256 c_v256_shuffle_8(c_v256 a, c_v256 pattern) {
c_v256 t;
int c;
- for (c = 0; c < 32; c++) {
- if (pattern.u8[c] & ~31) {
- fprintf(stderr, "Undefined v256_shuffle_8 index %d/%d\n", pattern.u8[c],
- c);
- abort();
- }
+ for (c = 0; c < 32; c++)
t.u8[c] = a.u8[CONFIG_BIG_ENDIAN ? 31 - (pattern.u8[c] & 31)
: pattern.u8[c] & 31];
- }
+
+ return t;
+}
+
+SIMD_INLINE c_v256 c_v256_wideshuffle_8(c_v256 a, c_v256 b, c_v256 pattern) {
+ c_v256 t;
+ int c;
+ for (c = 0; c < 32; c++)
+ t.u8[c] = (pattern.u8[c] < 32
+ ? b.u8
+ : a.u8)[CONFIG_BIG_ENDIAN ? 31 - (pattern.u8[c] & 31)
+ : pattern.u8[c] & 31];
return t;
}
@@ -607,6 +730,21 @@ SIMD_INLINE c_v256 c_v256_cmpeq_16(c_v256 a, c_v256 b) {
c_v128_cmpeq_16(a.v128[0], b.v128[0]));
}
+SIMD_INLINE c_v256 c_v256_cmpgt_s32(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_cmpgt_s32(a.v128[1], b.v128[1]),
+ c_v128_cmpgt_s32(a.v128[0], b.v128[0]));
+}
+
+SIMD_INLINE c_v256 c_v256_cmplt_s32(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_cmplt_s32(a.v128[1], b.v128[1]),
+ c_v128_cmplt_s32(a.v128[0], b.v128[0]));
+}
+
+SIMD_INLINE c_v256 c_v256_cmpeq_32(c_v256 a, c_v256 b) {
+ return c_v256_from_v128(c_v128_cmpeq_32(a.v128[1], b.v128[1]),
+ c_v128_cmpeq_32(a.v128[0], b.v128[0]));
+}
+
SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, unsigned int n) {
if (n < 16)
return c_v256_from_v128(c_v128_or(c_v128_shl_n_byte(a.v128[1], n),
@@ -685,6 +823,45 @@ SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, unsigned int c) {
c_v128_shr_s32(a.v128[0], c));
}
+SIMD_INLINE c_v256 c_v256_shr_s64(c_v256 a, unsigned int n) {
+ c_v256 t;
+ if (SIMD_CHECK && n > 63) {
+ fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
+ abort();
+ }
+ t.s64[3] = a.s64[3] >> n;
+ t.s64[2] = a.s64[2] >> n;
+ t.s64[1] = a.s64[1] >> n;
+ t.s64[0] = a.s64[0] >> n;
+ return t;
+}
+
+SIMD_INLINE c_v256 c_v256_shr_u64(c_v256 a, unsigned int n) {
+ c_v256 t;
+ if (SIMD_CHECK && n > 63) {
+ fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
+ abort();
+ }
+ t.u64[3] = a.u64[3] >> n;
+ t.u64[2] = a.u64[2] >> n;
+ t.u64[1] = a.u64[1] >> n;
+ t.u64[0] = a.u64[0] >> n;
+ return t;
+}
+
+SIMD_INLINE c_v256 c_v256_shl_64(c_v256 a, unsigned int n) {
+ c_v256 t;
+ if (SIMD_CHECK && n > 63) {
+ fprintf(stderr, "Error: undefined s64 shift right %d\n", n);
+ abort();
+ }
+ t.u64[3] = a.u64[3] << n;
+ t.u64[2] = a.u64[2] << n;
+ t.u64[1] = a.u64[1] << n;
+ t.u64[0] = a.u64[0] << n;
+ return t;
+}
+
SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, unsigned int n) {
return c_v256_shl_8(a, n);
}
@@ -697,6 +874,10 @@ SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, unsigned int n) {
return c_v256_shl_32(a, n);
}
+SIMD_INLINE c_v256 c_v256_shl_n_64(c_v256 a, unsigned int n) {
+ return c_v256_shl_64(a, n);
+}
+
SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, unsigned int n) {
return c_v256_shr_u8(a, n);
}
@@ -709,6 +890,10 @@ SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, unsigned int n) {
return c_v256_shr_u32(a, n);
}
+SIMD_INLINE c_v256 c_v256_shr_n_u64(c_v256 a, unsigned int n) {
+ return c_v256_shr_u64(a, n);
+}
+
SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, unsigned int n) {
return c_v256_shr_s8(a, n);
}
@@ -721,4 +906,48 @@ SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, unsigned int n) {
return c_v256_shr_s32(a, n);
}
+SIMD_INLINE c_v256 c_v256_shr_n_s64(c_v256 a, unsigned int n) {
+ return c_v256_shr_s64(a, n);
+}
+
+SIMD_INLINE c_v256 c_v256_shr_n_word(c_v256 a, const unsigned int n) {
+ return c_v256_shr_n_byte(a, 2 * n);
+}
+SIMD_INLINE c_v256 c_v256_shl_n_word(c_v256 a, const unsigned int n) {
+ return c_v256_shl_n_byte(a, 2 * n);
+}
+
+typedef uint32_t c_sad256_internal_u16;
+
+SIMD_INLINE c_sad256_internal_u16 c_v256_sad_u16_init() { return 0; }
+
+/* Implementation dependent return value. Result must be finalised with
+ v256_sad_u16_sum(). */
+SIMD_INLINE c_sad256_internal_u16 c_v256_sad_u16(c_sad256_internal_u16 s,
+ c_v256 a, c_v256 b) {
+ int c;
+ for (c = 0; c < 16; c++)
+ s += a.u16[c] > b.u16[c] ? a.u16[c] - b.u16[c] : b.u16[c] - a.u16[c];
+ return s;
+}
+
+SIMD_INLINE uint32_t c_v256_sad_u16_sum(c_sad256_internal_u16 s) { return s; }
+
+typedef uint64_t c_ssd256_internal_s16;
+
+SIMD_INLINE c_ssd256_internal_s16 c_v256_ssd_s16_init() { return 0; }
+
+/* Implementation dependent return value. Result must be finalised with
+ * v256_ssd_s16_sum(). */
+SIMD_INLINE c_ssd256_internal_s16 c_v256_ssd_s16(c_ssd256_internal_s16 s,
+ c_v256 a, c_v256 b) {
+ int c;
+ for (c = 0; c < 16; c++)
+ s += (int32_t)(int16_t)(a.s16[c] - b.s16[c]) *
+ (int32_t)(int16_t)(a.s16[c] - b.s16[c]);
+ return s;
+}
+
+SIMD_INLINE uint64_t c_v256_ssd_s16_sum(c_ssd256_internal_s16 s) { return s; }
+
#endif /* _V256_INTRINSICS_C_H */