diff options
author | trav90 <travawine@palemoon.org> | 2018-10-19 23:00:02 -0500 |
---|---|---|
committer | trav90 <travawine@palemoon.org> | 2018-10-19 23:00:02 -0500 |
commit | b8df135c97a854c2ff9b4394b016649c601177fa (patch) | |
tree | 802b7de5ad245f1a12adbcef835ab0d0687c1bf8 /third_party/aom/aom_dsp/arm | |
parent | a4d3c59dcac642f6b9557dc09b60eda40b517630 (diff) | |
download | UXP-b8df135c97a854c2ff9b4394b016649c601177fa.tar UXP-b8df135c97a854c2ff9b4394b016649c601177fa.tar.gz UXP-b8df135c97a854c2ff9b4394b016649c601177fa.tar.lz UXP-b8df135c97a854c2ff9b4394b016649c601177fa.tar.xz UXP-b8df135c97a854c2ff9b4394b016649c601177fa.zip |
Update libaom to rev b25610052a1398032320008d69b51d2da94f5928
Diffstat (limited to 'third_party/aom/aom_dsp/arm')
-rw-r--r-- | third_party/aom/aom_dsp/arm/intrapred_neon.c | 60 | ||||
-rw-r--r-- | third_party/aom/aom_dsp/arm/loopfilter_neon.c | 228 |
2 files changed, 288 insertions, 0 deletions
diff --git a/third_party/aom/aom_dsp/arm/intrapred_neon.c b/third_party/aom/aom_dsp/arm/intrapred_neon.c index 69470eeb0..c85b1e910 100644 --- a/third_party/aom/aom_dsp/arm/intrapred_neon.c +++ b/third_party/aom/aom_dsp/arm/intrapred_neon.c @@ -528,3 +528,63 @@ void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, } } } + +static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bw, + const uint16_t *above, + const uint16_t *left) { + assert(bw >= 4); + assert(IS_POWER_OF_TWO(bw)); + int expected_dc, sum = 0; + const int count = bw * 2; + uint32x4_t sum_q = vdupq_n_u32(0); + uint32x2_t sum_d; + uint16_t *dst_1; + if (bw >= 8) { + for (int i = 0; i < bw; i += 8) { + sum_q = vpadalq_u16(sum_q, vld1q_u16(above)); + sum_q = vpadalq_u16(sum_q, vld1q_u16(left)); + above += 8; + left += 8; + } + sum_d = vadd_u32(vget_low_u32(sum_q), vget_high_u32(sum_q)); + sum = vget_lane_s32(vreinterpret_s32_u64(vpaddl_u32(sum_d)), 0); + expected_dc = (sum + (count >> 1)) / count; + const uint16x8_t dc = vdupq_n_u16((uint16_t)expected_dc); + for (int r = 0; r < bw; r++) { + dst_1 = dst; + for (int i = 0; i < bw; i += 8) { + vst1q_u16(dst_1, dc); + dst_1 += 8; + } + dst += stride; + } + } else { // 4x4 + sum_q = vaddl_u16(vld1_u16(above), vld1_u16(left)); + sum_d = vadd_u32(vget_low_u32(sum_q), vget_high_u32(sum_q)); + sum = vget_lane_s32(vreinterpret_s32_u64(vpaddl_u32(sum_d)), 0); + expected_dc = (sum + (count >> 1)) / count; + const uint16x4_t dc = vdup_n_u16((uint16_t)expected_dc); + for (int r = 0; r < bw; r++) { + vst1_u16(dst, dc); + dst += stride; + } + } +} + +#define intra_pred_highbd_sized_neon(type, width) \ + void aom_highbd_##type##_predictor_##width##x##width##_neon( \ + uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \ + const uint16_t *left, int bd) { \ + (void)bd; \ + highbd_##type##_predictor(dst, stride, width, above, left); \ + } + +#define intra_pred_square(type) \ + intra_pred_highbd_sized_neon(type, 4); \ + intra_pred_highbd_sized_neon(type, 8); \ + intra_pred_highbd_sized_neon(type, 16); \ + intra_pred_highbd_sized_neon(type, 32); \ + intra_pred_highbd_sized_neon(type, 64); + +intra_pred_square(dc); +#undef intra_pred_square diff --git a/third_party/aom/aom_dsp/arm/loopfilter_neon.c b/third_party/aom/aom_dsp/arm/loopfilter_neon.c index ee1a3c78f..bdc67626d 100644 --- a/third_party/aom/aom_dsp/arm/loopfilter_neon.c +++ b/third_party/aom/aom_dsp/arm/loopfilter_neon.c @@ -52,6 +52,36 @@ static INLINE uint8x8_t lpf_mask(uint8x8_t p3q3, uint8x8_t p2q2, uint8x8_t p1q1, return mask_8x8; } +static INLINE uint8x8_t lpf_mask2(uint8x8_t p1q1, uint8x8_t p0q0, + const uint8_t blimit, const uint8_t limit) { + uint32x2x2_t p0q0_p1q1; + uint16x8_t temp_16x8; + uint16x4_t temp0_16x4, temp1_16x4; + const uint16x4_t blimit_16x4 = vdup_n_u16(blimit); + const uint8x8_t limit_8x8 = vdup_n_u8(limit); + uint8x8_t mask_8x8, temp_8x8; + + mask_8x8 = vabd_u8(p1q1, p0q0); + mask_8x8 = vcle_u8(mask_8x8, limit_8x8); + + temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8))); + mask_8x8 = vand_u8(mask_8x8, temp_8x8); + + p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1)); + temp_8x8 = vabd_u8(vreinterpret_u8_u32(p0q0_p1q1.val[0]), + vreinterpret_u8_u32(p0q0_p1q1.val[1])); + temp_16x8 = vmovl_u8(temp_8x8); + temp0_16x4 = vshl_n_u16(vget_low_u16(temp_16x8), 1); + temp1_16x4 = vshr_n_u16(vget_high_u16(temp_16x8), 1); + temp0_16x4 = vadd_u16(temp0_16x4, temp1_16x4); + temp0_16x4 = vcle_u16(temp0_16x4, blimit_16x4); + temp_8x8 = vmovn_u16(vcombine_u16(temp0_16x4, temp0_16x4)); + + mask_8x8 = vand_u8(mask_8x8, temp_8x8); + + return mask_8x8; +} + static INLINE uint8x8_t lpf_flat_mask4(uint8x8_t p3q3, uint8x8_t p2q2, uint8x8_t p1q1, uint8x8_t p0q0) { const uint8x8_t thresh_8x8 = vdup_n_u8(1); // for bd==8 threshold is always 1 @@ -523,6 +553,68 @@ static void lpf_6_neon(uint8x8_t *p2q2, uint8x8_t *p1q1, uint8x8_t *p0q0, } } +static void lpf_4_neon(uint8x8_t *p1q1, uint8x8_t *p0q0, const uint8_t blimit, + const uint8_t limit, const uint8_t thresh) { + int32x2x2_t ps0_qs0, ps1_qs1; + int16x8_t filter_s16; + const uint8x8_t thresh_f4 = vdup_n_u8(thresh); + uint8x8_t mask_8x8, temp0_8x8, temp1_8x8; + int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8; + int8x8_t op0, oq0, op1, oq1; + int8x8_t pq_s0, pq_s1; + int8x8_t filter_s8, filter1_s8, filter2_s8; + int8x8_t hev_8x8; + const int8x8_t sign_mask = vdup_n_s8(0x80); + const int8x8_t val_4 = vdup_n_s8(4); + const int8x8_t val_3 = vdup_n_s8(3); + + // Calculate filter mask + mask_8x8 = lpf_mask2(*p1q1, *p0q0, blimit, limit); + + pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask); + pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask); + + ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0)); + ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1)); + ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]); + qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]); + ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]); + qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]); + + // hev_mask + temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4); + temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8))); + hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8)); + + // add outer taps if we have high edge variance + filter_s8 = vqsub_s8(ps1_s8, qs1_s8); + filter_s8 = vand_s8(filter_s8, hev_8x8); + + // inner taps + temp_s8 = vqsub_s8(qs0_s8, ps0_s8); + filter_s16 = vmovl_s8(filter_s8); + filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3); + filter_s8 = vqmovn_s16(filter_s16); + filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8)); + + filter1_s8 = vqadd_s8(filter_s8, val_4); + filter2_s8 = vqadd_s8(filter_s8, val_3); + filter1_s8 = vshr_n_s8(filter1_s8, 3); + filter2_s8 = vshr_n_s8(filter2_s8, 3); + + oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask); + op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask); + + filter_s8 = vrshr_n_s8(filter1_s8, 1); + filter_s8 = vbic_s8(filter_s8, hev_8x8); + + oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask); + op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask); + + *p0q0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4)); + *p1q1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4)); +} + void aom_lpf_vertical_14_neon(uint8_t *src, int stride, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { uint8x16_t row0, row1, row2, row3; @@ -646,6 +738,125 @@ void aom_lpf_vertical_8_neon(uint8_t *src, int stride, const uint8_t *blimit, store_u8_8x4(src - 4, stride, p3q0, p2q1, p1q2, p0q3); } +void aom_lpf_vertical_6_neon(uint8_t *src, int stride, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint32x2x2_t p2q2_p1q1, pxqy_p0q0; + uint32x2_t pq_rev; + uint8x8_t pxq0, p2q1, p1q2, p0qy; + uint8x8_t p0q0, p1q1, p2q2, pxqy; + + // row0: px p2 p1 p0 | q0 q1 q2 qy + // row1: px p2 p1 p0 | q0 q1 q2 qy + // row2: px p2 p1 p0 | q0 q1 q2 qy + // row3: px p2 p1 p0 | q0 q1 q2 qy + load_u8_8x4(src - 4, stride, &pxq0, &p2q1, &p1q2, &p0qy); + + transpose_u8_8x4(&pxq0, &p2q1, &p1q2, &p0qy); + + pq_rev = vrev64_u32(vreinterpret_u32_u8(p0qy)); + pxqy_p0q0 = vtrn_u32(vreinterpret_u32_u8(pxq0), pq_rev); + + pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q2)); + p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q1), pq_rev); + + p0q0 = vreinterpret_u8_u32(vrev64_u32(pxqy_p0q0.val[1])); + p1q1 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1])); + p2q2 = vreinterpret_u8_u32(p2q2_p1q1.val[0]); + pxqy = vreinterpret_u8_u32(pxqy_p0q0.val[0]); + + lpf_6_neon(&p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh); + + pq_rev = vrev64_u32(vreinterpret_u32_u8(p0q0)); + pxqy_p0q0 = vtrn_u32(vreinterpret_u32_u8(pxqy), pq_rev); + + pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q1)); + p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q2), pq_rev); + + p0qy = vreinterpret_u8_u32(vrev64_u32(pxqy_p0q0.val[1])); + p1q2 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1])); + p2q1 = vreinterpret_u8_u32(p2q2_p1q1.val[0]); + pxq0 = vreinterpret_u8_u32(pxqy_p0q0.val[0]); + transpose_u8_8x4(&pxq0, &p2q1, &p1q2, &p0qy); + + store_u8_8x4(src - 4, stride, pxq0, p2q1, p1q2, p0qy); +} + +void aom_lpf_vertical_4_neon(uint8_t *src, int stride, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint32x2x2_t p1q0_p0q1, p1q1_p0q0, p1p0_q1q0; + uint32x2_t pq_rev; + uint8x8_t UNINITIALIZED_IS_SAFE(p1p0), q0q1, p0q0, p1q1; + + // row0: p1 p0 | q0 q1 + // row1: p1 p0 | q0 q1 + // row2: p1 p0 | q0 q1 + // row3: p1 p0 | q0 q1 + load_u8_4x1(src - 2, &p1p0, 0); + load_u8_4x1((src - 2) + 1 * stride, &p1p0, 1); + load_u8_4x1((src - 2) + 2 * stride, &q0q1, 0); + load_u8_4x1((src - 2) + 3 * stride, &q0q1, 1); + + transpose_u8_4x4(&p1p0, &q0q1); + + p1q0_p0q1 = vtrn_u32(vreinterpret_u32_u8(p1p0), vreinterpret_u32_u8(q0q1)); + + pq_rev = vrev64_u32(p1q0_p0q1.val[1]); + p1q1_p0q0 = vtrn_u32(p1q0_p0q1.val[0], pq_rev); + + p1q1 = vreinterpret_u8_u32(p1q1_p0q0.val[0]); + p0q0 = vreinterpret_u8_u32(p1q1_p0q0.val[1]); + + lpf_4_neon(&p1q1, &p0q0, *blimit, *limit, *thresh); + + p1p0_q1q0 = vtrn_u32(vreinterpret_u32_u8(p1q1), vreinterpret_u32_u8(p0q0)); + + p1p0 = vreinterpret_u8_u32(p1p0_q1q0.val[0]); + q0q1 = vreinterpret_u8_u32(vrev64_u32(p1p0_q1q0.val[1])); + + transpose_u8_4x4(&p1p0, &q0q1); + + store_u8_4x1(src - 2, p1p0, 0); + store_u8_4x1((src - 2) + 1 * stride, q0q1, 0); + store_u8_4x1((src - 2) + 2 * stride, p1p0, 1); + store_u8_4x1((src - 2) + 3 * stride, q0q1, 1); +} + +void aom_lpf_horizontal_14_neon(uint8_t *src, int stride, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t p0q0, p1q1, p2q2, p3q3, p4q4, p5q5, UNINITIALIZED_IS_SAFE(p6q6); + + load_u8_4x1(src - 7 * stride, &p6q6, 0); + load_u8_4x1(src - 6 * stride, &p5q5, 0); + load_u8_4x1(src - 5 * stride, &p4q4, 0); + load_u8_4x1(src - 4 * stride, &p3q3, 0); + load_u8_4x1(src - 3 * stride, &p2q2, 0); + load_u8_4x1(src - 2 * stride, &p1q1, 0); + load_u8_4x1(src - 1 * stride, &p0q0, 0); + load_u8_4x1(src + 0 * stride, &p0q0, 1); + load_u8_4x1(src + 1 * stride, &p1q1, 1); + load_u8_4x1(src + 2 * stride, &p2q2, 1); + load_u8_4x1(src + 3 * stride, &p3q3, 1); + load_u8_4x1(src + 4 * stride, &p4q4, 1); + load_u8_4x1(src + 5 * stride, &p5q5, 1); + load_u8_4x1(src + 6 * stride, &p6q6, 1); + + lpf_14_neon(&p6q6, &p5q5, &p4q4, &p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit, + *thresh); + + store_u8_4x1(src - 6 * stride, p5q5, 0); + store_u8_4x1(src - 5 * stride, p4q4, 0); + store_u8_4x1(src - 4 * stride, p3q3, 0); + store_u8_4x1(src - 3 * stride, p2q2, 0); + store_u8_4x1(src - 2 * stride, p1q1, 0); + store_u8_4x1(src - 1 * stride, p0q0, 0); + store_u8_4x1(src + 0 * stride, p0q0, 1); + store_u8_4x1(src + 1 * stride, p1q1, 1); + store_u8_4x1(src + 2 * stride, p2q2, 1); + store_u8_4x1(src + 3 * stride, p3q3, 1); + store_u8_4x1(src + 4 * stride, p4q4, 1); + store_u8_4x1(src + 5 * stride, p5q5, 1); +} + void aom_lpf_horizontal_8_neon(uint8_t *src, int stride, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { uint8x8_t p0q0, p1q1, p2q2, p3q3; @@ -698,3 +909,20 @@ void aom_lpf_horizontal_6_neon(uint8_t *src, int stride, const uint8_t *blimit, vst1_lane_u32((uint32_t *)(src + 1 * stride), vreinterpret_u32_u8(p1q1), 1); vst1_lane_u32((uint32_t *)(src + 2 * stride), vreinterpret_u32_u8(p2q2), 1); } + +void aom_lpf_horizontal_4_neon(uint8_t *src, int stride, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t p0q0, UNINITIALIZED_IS_SAFE(p1q1); + + load_u8_4x1(src - 2 * stride, &p1q1, 0); + load_u8_4x1(src - 1 * stride, &p0q0, 0); + load_u8_4x1(src + 0 * stride, &p0q0, 1); + load_u8_4x1(src + 1 * stride, &p1q1, 1); + + lpf_4_neon(&p1q1, &p0q0, *blimit, *limit, *thresh); + + store_u8_4x1(src - 2 * stride, p1q1, 0); + store_u8_4x1(src - 1 * stride, p0q0, 0); + store_u8_4x1(src + 0 * stride, p0q0, 1); + store_u8_4x1(src + 1 * stride, p1q1, 1); +} |