From 68569dee1416593955c1570d638b3d9250b33012 Mon Sep 17 00:00:00 2001 From: trav90 Date: Mon, 15 Oct 2018 21:45:30 -0500 Subject: Import aom library This is the reference implementation for the Alliance for Open Media's av1 video code. The commit used was 4d668d7feb1f8abd809d1bca0418570a7f142a36. --- third_party/aom/av1/common/x86/selfguided_sse4.c | 1805 ++++++++++++++++++++++ 1 file changed, 1805 insertions(+) create mode 100644 third_party/aom/av1/common/x86/selfguided_sse4.c (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c new file mode 100644 index 000000000..260faa8c9 --- /dev/null +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -0,0 +1,1805 @@ +#include + +#include "./aom_config.h" +#include "./av1_rtcd.h" +#include "av1/common/restoration.h" + +/* Calculate four consecutive entries of the intermediate A and B arrays + (corresponding to the first loop in the C version of + av1_selfguided_restoration) +*/ +static void calc_block(__m128i sum, __m128i sum_sq, __m128i n, + __m128i one_over_n, __m128i s, int bit_depth, int idx, + int32_t *A, int32_t *B) { + __m128i a, b, p; +#if CONFIG_HIGHBITDEPTH + if (bit_depth > 8) { + __m128i rounding_a = _mm_set1_epi32((1 << (2 * (bit_depth - 8))) >> 1); + __m128i rounding_b = _mm_set1_epi32((1 << (bit_depth - 8)) >> 1); + __m128i shift_a = _mm_set_epi64x(0, 2 * (bit_depth - 8)); + __m128i shift_b = _mm_set_epi64x(0, bit_depth - 8); + a = _mm_srl_epi32(_mm_add_epi32(sum_sq, rounding_a), shift_a); + b = _mm_srl_epi32(_mm_add_epi32(sum, rounding_b), shift_b); + a = _mm_mullo_epi32(a, n); + b = _mm_mullo_epi32(b, b); + p = _mm_sub_epi32(_mm_max_epi32(a, b), b); + } else { +#endif + (void)bit_depth; + a = _mm_mullo_epi32(sum_sq, n); + b = _mm_mullo_epi32(sum, sum); + p = _mm_sub_epi32(a, b); +#if CONFIG_HIGHBITDEPTH + } +#endif + + __m128i rounding_z = _mm_set1_epi32((1 << SGRPROJ_MTABLE_BITS) >> 1); + __m128i z = _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rounding_z), + SGRPROJ_MTABLE_BITS); + z = _mm_min_epi32(z, _mm_set1_epi32(255)); + + // 'Gather' type instructions are not available pre-AVX2, so synthesize a + // gather using scalar loads. + __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)], + x_by_xplus1[_mm_extract_epi32(z, 2)], + x_by_xplus1[_mm_extract_epi32(z, 1)], + x_by_xplus1[_mm_extract_epi32(z, 0)]); + + _mm_storeu_si128((__m128i *)&A[idx], a_res); + + __m128i rounding_res = _mm_set1_epi32((1 << SGRPROJ_RECIP_BITS) >> 1); + __m128i a_complement = _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res); + __m128i b_int = + _mm_mullo_epi32(a_complement, _mm_mullo_epi32(sum, one_over_n)); + __m128i b_res = + _mm_srli_epi32(_mm_add_epi32(b_int, rounding_res), SGRPROJ_RECIP_BITS); + + _mm_storeu_si128((__m128i *)&B[idx], b_res); +} + +static void selfguided_restoration_1_v(uint8_t *src, int width, int height, + int src_stride, int32_t *A, int32_t *B, + int buf_stride) { + int i, j; + + // Vertical sum + // When the width is not a multiple of 4, we know that 'stride' is rounded up + // to a multiple of 4. So it is safe for this loop to calculate extra columns + // at the right-hand edge of the frame. + int width_extend = (width + 3) & ~3; + for (j = 0; j < width_extend; j += 4) { + __m128i a, b, x, y, x2, y2; + __m128i sum, sum_sq, tmp; + + a = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[j])); + b = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[src_stride + j])); + + sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b)); + tmp = _mm_unpacklo_epi16(a, b); + sum_sq = _mm_madd_epi16(tmp, tmp); + + _mm_store_si128((__m128i *)&B[j], sum); + _mm_store_si128((__m128i *)&A[j], sum_sq); + + x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + for (i = 1; i < height - 2; ++i) { + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + y = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i + 2) * src_stride + j])); + + sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + + x2 = _mm_mullo_epi32(x, x); + y2 = _mm_mullo_epi32(y, y); + + sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + } + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); + } +} + +static void selfguided_restoration_1_h(int32_t *A, int32_t *B, int width, + int height, int buf_stride, int eps, + int bit_depth) { + int i, j; + + // Horizontal sum + int width_extend = (width + 3) & ~3; + for (i = 0; i < height; ++i) { + int h = AOMMIN(2, height - i) + AOMMIN(1, i); + + __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]); + __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]); + __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]); + __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]); + + // Note: The _mm_slli_si128 call sets up a register containing + // {0, A[i * buf_stride], ..., A[i * buf_stride + 2]}, + // so that the first element of 'sum' (which should only add two values + // together) ends up calculated correctly. + __m128i sum_ = _mm_add_epi32(_mm_slli_si128(b1, 4), + _mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4))); + __m128i sum_sq_ = _mm_add_epi32( + _mm_slli_si128(a1, 4), _mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4))); + __m128i n = _mm_set_epi32(3 * h, 3 * h, 3 * h, 2 * h); + __m128i one_over_n = + _mm_set_epi32(one_by_x[3 * h - 1], one_by_x[3 * h - 1], + one_by_x[3 * h - 1], one_by_x[2 * h - 1]); + __m128i s = _mm_set_epi32( + sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], + sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][2 * h - 1]); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride, A, + B); + + n = _mm_set1_epi32(3 * h); + one_over_n = _mm_set1_epi32(one_by_x[3 * h - 1]); + s = _mm_set1_epi32(sgrproj_mtable[eps - 1][3 * h - 1]); + + // Re-align a1 and b1 so that they start at index i * buf_stride + 3 + a2 = _mm_alignr_epi8(a2, a1, 12); + b2 = _mm_alignr_epi8(b2, b1, 12); + + // Note: When the width is not a multiple of 4, this loop may end up + // writing to the last 4 columns of the frame, potentially with incorrect + // values (especially for r=2 and r=3). + // This is fine, since we fix up those values in the block after this + // loop, and in exchange we never have more than four values to + // write / fix up after this loop finishes. + for (j = 4; j < width_extend - 4; j += 4) { + a1 = a2; + b1 = b2; + a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]); + b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]); + /* Loop invariant: At this point, + a1 = original A[i * buf_stride + j - 1 : i * buf_stride + j + 3] + a2 = original A[i * buf_stride + j + 3 : i * buf_stride + j + 7] + and similar for b1,b2 and B + */ + sum_ = _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), + _mm_alignr_epi8(b2, b1, 8))); + sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), + _mm_alignr_epi8(a2, a1, 8))); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + A, B); + } + __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]); + __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]); + + j = width - 4; + switch (width % 4) { + case 0: + a1 = a2; + b1 = b2; + a2 = a3; + b2 = b3; + break; + case 1: + a1 = _mm_alignr_epi8(a2, a1, 4); + b1 = _mm_alignr_epi8(b2, b1, 4); + a2 = _mm_alignr_epi8(a3, a2, 4); + b2 = _mm_alignr_epi8(b3, b2, 4); + break; + case 2: + a1 = _mm_alignr_epi8(a2, a1, 8); + b1 = _mm_alignr_epi8(b2, b1, 8); + a2 = _mm_alignr_epi8(a3, a2, 8); + b2 = _mm_alignr_epi8(b3, b2, 8); + break; + case 3: + a1 = _mm_alignr_epi8(a2, a1, 12); + b1 = _mm_alignr_epi8(b2, b1, 12); + a2 = _mm_alignr_epi8(a3, a2, 12); + b2 = _mm_alignr_epi8(b3, b2, 12); + break; + } + + // Zero out the data loaded from "off the edge" of the array + __m128i zero = _mm_setzero_si128(); + a2 = _mm_blend_epi16(a2, zero, 0xfc); + b2 = _mm_blend_epi16(b2, zero, 0xfc); + + sum_ = _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), + _mm_alignr_epi8(b2, b1, 8))); + sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), + _mm_alignr_epi8(a2, a1, 8))); + n = _mm_set_epi32(2 * h, 3 * h, 3 * h, 3 * h); + one_over_n = _mm_set_epi32(one_by_x[2 * h - 1], one_by_x[3 * h - 1], + one_by_x[3 * h - 1], one_by_x[3 * h - 1]); + s = _mm_set_epi32( + sgrproj_mtable[eps - 1][2 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], + sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + A, B); + } +} + +static void selfguided_restoration_2_v(uint8_t *src, int width, int height, + int src_stride, int32_t *A, int32_t *B, + int buf_stride) { + int i, j; + + // Vertical sum + int width_extend = (width + 3) & ~3; + for (j = 0; j < width_extend; j += 4) { + __m128i a, b, c, c2, x, y, x2, y2; + __m128i sum, sum_sq, tmp; + + a = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[j])); + b = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[src_stride + j])); + c = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); + + sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c)); + // Important: Since c may be up to 2^8, the result on squaring may + // be up to 2^16. So we need to zero-extend, not sign-extend. + c2 = _mm_cvtepu16_epi32(_mm_mullo_epi16(c, c)); + tmp = _mm_unpacklo_epi16(a, b); + sum_sq = _mm_add_epi32(_mm_madd_epi16(tmp, tmp), c2); + + _mm_store_si128((__m128i *)&B[j], sum); + _mm_store_si128((__m128i *)&A[j], sum_sq); + + x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[3 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + for (i = 2; i < height - 3; ++i) { + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_cvtsi32_si128(*((int *)&src[(i - 2) * src_stride + j]))); + y = _mm_cvtepu8_epi32( + _mm_cvtsi32_si128(*((int *)&src[(i + 3) * src_stride + j]))); + + sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + + x2 = _mm_mullo_epi32(x, x); + y2 = _mm_mullo_epi32(y, y); + + sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + } + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); + } +} + +static void selfguided_restoration_2_h(int32_t *A, int32_t *B, int width, + int height, int buf_stride, int eps, + int bit_depth) { + int i, j; + + // Horizontal sum + int width_extend = (width + 3) & ~3; + for (i = 0; i < height; ++i) { + int h = AOMMIN(3, height - i) + AOMMIN(2, i); + + __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]); + __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]); + __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]); + __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]); + + __m128i sum_ = _mm_add_epi32( + _mm_add_epi32( + _mm_add_epi32(_mm_slli_si128(b1, 8), _mm_slli_si128(b1, 4)), + _mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4))), + _mm_alignr_epi8(b2, b1, 8)); + __m128i sum_sq_ = _mm_add_epi32( + _mm_add_epi32( + _mm_add_epi32(_mm_slli_si128(a1, 8), _mm_slli_si128(a1, 4)), + _mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4))), + _mm_alignr_epi8(a2, a1, 8)); + + __m128i n = _mm_set_epi32(5 * h, 5 * h, 4 * h, 3 * h); + __m128i one_over_n = + _mm_set_epi32(one_by_x[5 * h - 1], one_by_x[5 * h - 1], + one_by_x[4 * h - 1], one_by_x[3 * h - 1]); + __m128i s = _mm_set_epi32( + sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], + sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride, A, + B); + + // Re-align a1 and b1 so that they start at index i * buf_stride + 2 + a2 = _mm_alignr_epi8(a2, a1, 8); + b2 = _mm_alignr_epi8(b2, b1, 8); + + n = _mm_set1_epi32(5 * h); + one_over_n = _mm_set1_epi32(one_by_x[5 * h - 1]); + s = _mm_set1_epi32(sgrproj_mtable[eps - 1][5 * h - 1]); + + for (j = 4; j < width_extend - 4; j += 4) { + a1 = a2; + a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 2]); + b1 = b2; + b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 2]); + /* Loop invariant: At this point, + a1 = original A[i * buf_stride + j - 2 : i * buf_stride + j + 2] + a2 = original A[i * buf_stride + j + 2 : i * buf_stride + j + 6] + and similar for b1,b2 and B + */ + sum_ = _mm_add_epi32( + _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), + _mm_alignr_epi8(b2, b1, 8))), + _mm_add_epi32(_mm_alignr_epi8(b2, b1, 12), b2)); + sum_sq_ = _mm_add_epi32( + _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), + _mm_alignr_epi8(a2, a1, 8))), + _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2)); + + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + A, B); + } + // If the width is not a multiple of 4, we need to reset j to width - 4 + // and adjust a1, a2, b1, b2 so that the loop invariant above is maintained + __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 2]); + __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 2]); + + j = width - 4; + switch (width % 4) { + case 0: + a1 = a2; + b1 = b2; + a2 = a3; + b2 = b3; + break; + case 1: + a1 = _mm_alignr_epi8(a2, a1, 4); + b1 = _mm_alignr_epi8(b2, b1, 4); + a2 = _mm_alignr_epi8(a3, a2, 4); + b2 = _mm_alignr_epi8(b3, b2, 4); + break; + case 2: + a1 = _mm_alignr_epi8(a2, a1, 8); + b1 = _mm_alignr_epi8(b2, b1, 8); + a2 = _mm_alignr_epi8(a3, a2, 8); + b2 = _mm_alignr_epi8(b3, b2, 8); + break; + case 3: + a1 = _mm_alignr_epi8(a2, a1, 12); + b1 = _mm_alignr_epi8(b2, b1, 12); + a2 = _mm_alignr_epi8(a3, a2, 12); + b2 = _mm_alignr_epi8(b3, b2, 12); + break; + } + + // Zero out the data loaded from "off the edge" of the array + __m128i zero = _mm_setzero_si128(); + a2 = _mm_blend_epi16(a2, zero, 0xf0); + b2 = _mm_blend_epi16(b2, zero, 0xf0); + + sum_ = _mm_add_epi32( + _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), + _mm_alignr_epi8(b2, b1, 8))), + _mm_add_epi32(_mm_alignr_epi8(b2, b1, 12), b2)); + sum_sq_ = _mm_add_epi32( + _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), + _mm_alignr_epi8(a2, a1, 8))), + _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2)); + + n = _mm_set_epi32(3 * h, 4 * h, 5 * h, 5 * h); + one_over_n = _mm_set_epi32(one_by_x[3 * h - 1], one_by_x[4 * h - 1], + one_by_x[5 * h - 1], one_by_x[5 * h - 1]); + s = _mm_set_epi32( + sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1], + sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1]); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + A, B); + } +} + +static void selfguided_restoration_3_v(uint8_t *src, int width, int height, + int src_stride, int32_t *A, int32_t *B, + int buf_stride) { + int i, j; + + // Vertical sum over 7-pixel regions, 4 columns at a time + int width_extend = (width + 3) & ~3; + for (j = 0; j < width_extend; j += 4) { + __m128i a, b, c, d, x, y, x2, y2; + __m128i sum, sum_sq, tmp, tmp2; + + a = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[j])); + b = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[src_stride + j])); + c = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); + d = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[3 * src_stride + j])); + + sum = _mm_cvtepi16_epi32( + _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d))); + tmp = _mm_unpacklo_epi16(a, b); + tmp2 = _mm_unpacklo_epi16(c, d); + sum_sq = + _mm_add_epi32(_mm_madd_epi16(tmp, tmp), _mm_madd_epi16(tmp2, tmp2)); + + _mm_store_si128((__m128i *)&B[j], sum); + _mm_store_si128((__m128i *)&A[j], sum_sq); + + x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[5 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[6 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + for (i = 3; i < height - 4; ++i) { + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_cvtsi32_si128(*((int *)&src[(i - 3) * src_stride + j]))); + y = _mm_cvtepu8_epi32( + _mm_cvtsi32_si128(*((int *)&src[(i + 4) * src_stride + j]))); + + sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + + x2 = _mm_mullo_epi32(x, x); + y2 = _mm_mullo_epi32(y, y); + + sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + } + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); + + x = _mm_cvtepu8_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 3) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 3) * buf_stride + j], sum_sq); + } +} + +static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width, + int height, int buf_stride, int eps, + int bit_depth) { + int i, j; + // Horizontal sum over 7-pixel regions of dst + int width_extend = (width + 3) & ~3; + for (i = 0; i < height; ++i) { + int h = AOMMIN(4, height - i) + AOMMIN(3, i); + + __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]); + __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]); + __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]); + __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]); + + __m128i sum_ = _mm_add_epi32( + _mm_add_epi32( + _mm_add_epi32(_mm_slli_si128(b1, 12), _mm_slli_si128(b1, 8)), + _mm_add_epi32(_mm_slli_si128(b1, 4), b1)), + _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), + _mm_alignr_epi8(b2, b1, 8)), + _mm_alignr_epi8(b2, b1, 12))); + __m128i sum_sq_ = _mm_add_epi32( + _mm_add_epi32( + _mm_add_epi32(_mm_slli_si128(a1, 12), _mm_slli_si128(a1, 8)), + _mm_add_epi32(_mm_slli_si128(a1, 4), a1)), + _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), + _mm_alignr_epi8(a2, a1, 8)), + _mm_alignr_epi8(a2, a1, 12))); + + __m128i n = _mm_set_epi32(7 * h, 6 * h, 5 * h, 4 * h); + __m128i one_over_n = + _mm_set_epi32(one_by_x[7 * h - 1], one_by_x[6 * h - 1], + one_by_x[5 * h - 1], one_by_x[4 * h - 1]); + __m128i s = _mm_set_epi32( + sgrproj_mtable[eps - 1][7 * h - 1], sgrproj_mtable[eps - 1][6 * h - 1], + sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1]); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride, A, + B); + + // Re-align a1 and b1 so that they start at index i * buf_stride + 1 + a2 = _mm_alignr_epi8(a2, a1, 4); + b2 = _mm_alignr_epi8(b2, b1, 4); + + n = _mm_set1_epi32(7 * h); + one_over_n = _mm_set1_epi32(one_by_x[7 * h - 1]); + s = _mm_set1_epi32(sgrproj_mtable[eps - 1][7 * h - 1]); + + for (j = 4; j < width_extend - 4; j += 4) { + a1 = a2; + a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]); + b1 = b2; + b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]); + __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 5]); + __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 5]); + /* Loop invariant: At this point, + a1 = original A[i * buf_stride + j - 3 : i * buf_stride + j + 1] + a2 = original A[i * buf_stride + j + 1 : i * buf_stride + j + 5] + a3 = original A[i * buf_stride + j + 5 : i * buf_stride + j + 9] + and similar for b1,b2,b3 and B + */ + sum_ = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)), + _mm_add_epi32(_mm_alignr_epi8(b2, b1, 8), + _mm_alignr_epi8(b2, b1, 12))), + _mm_add_epi32(_mm_add_epi32(b2, _mm_alignr_epi8(b3, b2, 4)), + _mm_alignr_epi8(b3, b2, 8))); + sum_sq_ = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)), + _mm_add_epi32(_mm_alignr_epi8(a2, a1, 8), + _mm_alignr_epi8(a2, a1, 12))), + _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(a3, a2, 4)), + _mm_alignr_epi8(a3, a2, 8))); + + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + A, B); + } + __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]); + __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]); + + j = width - 4; + switch (width % 4) { + case 0: + a1 = a2; + b1 = b2; + a2 = a3; + b2 = b3; + break; + case 1: + a1 = _mm_alignr_epi8(a2, a1, 4); + b1 = _mm_alignr_epi8(b2, b1, 4); + a2 = _mm_alignr_epi8(a3, a2, 4); + b2 = _mm_alignr_epi8(b3, b2, 4); + break; + case 2: + a1 = _mm_alignr_epi8(a2, a1, 8); + b1 = _mm_alignr_epi8(b2, b1, 8); + a2 = _mm_alignr_epi8(a3, a2, 8); + b2 = _mm_alignr_epi8(b3, b2, 8); + break; + case 3: + a1 = _mm_alignr_epi8(a2, a1, 12); + b1 = _mm_alignr_epi8(b2, b1, 12); + a2 = _mm_alignr_epi8(a3, a2, 12); + b2 = _mm_alignr_epi8(b3, b2, 12); + break; + } + + // Zero out the data loaded from "off the edge" of the array + __m128i zero = _mm_setzero_si128(); + a2 = _mm_blend_epi16(a2, zero, 0xc0); + b2 = _mm_blend_epi16(b2, zero, 0xc0); + + sum_ = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)), + _mm_add_epi32(_mm_alignr_epi8(b2, b1, 8), + _mm_alignr_epi8(b2, b1, 12))), + _mm_add_epi32(_mm_add_epi32(b2, _mm_alignr_epi8(zero, b2, 4)), + _mm_alignr_epi8(zero, b2, 8))); + sum_sq_ = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)), + _mm_add_epi32(_mm_alignr_epi8(a2, a1, 8), + _mm_alignr_epi8(a2, a1, 12))), + _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(zero, a2, 4)), + _mm_alignr_epi8(zero, a2, 8))); + + n = _mm_set_epi32(4 * h, 5 * h, 6 * h, 7 * h); + one_over_n = _mm_set_epi32(one_by_x[4 * h - 1], one_by_x[5 * h - 1], + one_by_x[6 * h - 1], one_by_x[7 * h - 1]); + s = _mm_set_epi32( + sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], + sgrproj_mtable[eps - 1][6 * h - 1], sgrproj_mtable[eps - 1][7 * h - 1]); + calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + A, B); + } +} + +void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, + int stride, int32_t *dst, int dst_stride, + int r, int eps, int32_t *tmpbuf) { + int32_t *A = tmpbuf; + int32_t *B = A + SGRPROJ_OUTBUF_SIZE; + int i, j; + // Adjusting the stride of A and B here appears to avoid bad cache effects, + // leading to a significant speed improvement. + // We also align the stride to a multiple of 16 bytes for efficiency. + int buf_stride = ((width + 3) & ~3) + 16; + + // Don't filter tiles with dimensions < 5 on any axis + if ((width < 5) || (height < 5)) return; + + if (r == 1) { + selfguided_restoration_1_v(dgd, width, height, stride, A, B, buf_stride); + selfguided_restoration_1_h(A, B, width, height, buf_stride, eps, 8); + } else if (r == 2) { + selfguided_restoration_2_v(dgd, width, height, stride, A, B, buf_stride); + selfguided_restoration_2_h(A, B, width, height, buf_stride, eps, 8); + } else if (r == 3) { + selfguided_restoration_3_v(dgd, width, height, stride, A, B, buf_stride); + selfguided_restoration_3_h(A, B, width, height, buf_stride, eps, 8); + } else { + assert(0); + } + + { + i = 0; + j = 0; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] + + A[k + buf_stride + 1]; + const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k + buf_stride] + + B[k + buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] + + A[k + buf_stride - 1] + A[k + buf_stride + 1]; + const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k + buf_stride] + + B[k + buf_stride - 1] + B[k + buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + j = width - 1; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] + + A[k + buf_stride - 1]; + const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k + buf_stride] + + B[k + buf_stride - 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + } + for (i = 1; i < height - 1; ++i) { + j = 0; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + + A[k + 1] + A[k - buf_stride + 1] + + A[k + buf_stride + 1]; + const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + + B[k + 1] + B[k - buf_stride + 1] + + B[k + buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + + // Vectorize the innermost loop + for (j = 1; j < width - 1; j += 4) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 5; + + __m128i tmp0 = _mm_loadu_si128((__m128i *)&A[k - 1 - buf_stride]); + __m128i tmp1 = _mm_loadu_si128((__m128i *)&A[k + 3 - buf_stride]); + __m128i tmp2 = _mm_loadu_si128((__m128i *)&A[k - 1]); + __m128i tmp3 = _mm_loadu_si128((__m128i *)&A[k + 3]); + __m128i tmp4 = _mm_loadu_si128((__m128i *)&A[k - 1 + buf_stride]); + __m128i tmp5 = _mm_loadu_si128((__m128i *)&A[k + 3 + buf_stride]); + + __m128i a0 = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 4), tmp2), + _mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 8), + _mm_alignr_epi8(tmp5, tmp4, 4))), + _mm_alignr_epi8(tmp1, tmp0, 4)); + __m128i a1 = _mm_add_epi32(_mm_add_epi32(tmp0, tmp4), + _mm_add_epi32(_mm_alignr_epi8(tmp1, tmp0, 8), + _mm_alignr_epi8(tmp5, tmp4, 8))); + __m128i a = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(a0, a1), 2), a1); + + __m128i tmp6 = _mm_loadu_si128((__m128i *)&B[k - 1 - buf_stride]); + __m128i tmp7 = _mm_loadu_si128((__m128i *)&B[k + 3 - buf_stride]); + __m128i tmp8 = _mm_loadu_si128((__m128i *)&B[k - 1]); + __m128i tmp9 = _mm_loadu_si128((__m128i *)&B[k + 3]); + __m128i tmp10 = _mm_loadu_si128((__m128i *)&B[k - 1 + buf_stride]); + __m128i tmp11 = _mm_loadu_si128((__m128i *)&B[k + 3 + buf_stride]); + + __m128i b0 = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 4), tmp8), + _mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 8), + _mm_alignr_epi8(tmp11, tmp10, 4))), + _mm_alignr_epi8(tmp7, tmp6, 4)); + __m128i b1 = + _mm_add_epi32(_mm_add_epi32(tmp6, tmp10), + _mm_add_epi32(_mm_alignr_epi8(tmp7, tmp6, 8), + _mm_alignr_epi8(tmp11, tmp10, 8))); + __m128i b = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(b0, b1), 2), b1); + + __m128i src = _mm_cvtepu8_epi32(_mm_loadu_si128((__m128i *)&dgd[l])); + + __m128i rounding = _mm_set1_epi32( + (1 << (SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS)) >> 1); + __m128i v = _mm_add_epi32(_mm_mullo_epi32(a, src), b); + __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding), + SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + _mm_storeu_si128((__m128i *)&dst[m], w); + } + + // Deal with any extra pixels at the right-hand edge of the frame + // (typically have 2 such pixels, but may have anywhere between 0 and 3) + for (; j < width - 1; ++j) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 5; + const int32_t a = + (A[k] + A[k - 1] + A[k + 1] + A[k - buf_stride] + A[k + buf_stride]) * + 4 + + (A[k - 1 - buf_stride] + A[k - 1 + buf_stride] + + A[k + 1 - buf_stride] + A[k + 1 + buf_stride]) * + 3; + const int32_t b = + (B[k] + B[k - 1] + B[k + 1] + B[k - buf_stride] + B[k + buf_stride]) * + 4 + + (B[k - 1 - buf_stride] + B[k - 1 + buf_stride] + + B[k + 1 - buf_stride] + B[k + 1 + buf_stride]) * + 3; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + + j = width - 1; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + + A[k - 1] + A[k - buf_stride - 1] + + A[k + buf_stride - 1]; + const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + + B[k - 1] + B[k - buf_stride - 1] + + B[k + buf_stride - 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + } + + { + i = height - 1; + j = 0; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] + + A[k - buf_stride + 1]; + const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k - buf_stride] + + B[k - buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] + + A[k - buf_stride - 1] + A[k - buf_stride + 1]; + const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k - buf_stride] + + B[k - buf_stride - 1] + B[k - buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + j = width - 1; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] + + A[k - buf_stride - 1]; + const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k - buf_stride] + + B[k - buf_stride - 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + } +} + +void av1_highpass_filter_sse4_1(uint8_t *dgd, int width, int height, int stride, + int32_t *dst, int dst_stride, int corner, + int edge) { + int i, j; + const int center = (1 << SGRPROJ_RST_BITS) - 4 * (corner + edge); + + { + i = 0; + j = 0; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k + 1] + dgd[k + stride] + dgd[k] * 2) + + corner * + (dgd[k + stride + 1] + dgd[k + 1] + dgd[k + stride] + dgd[k]); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = center * dgd[k] + + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k + 1] + dgd[k]) + + corner * (dgd[k + stride - 1] + dgd[k + stride + 1] + + dgd[k - 1] + dgd[k + 1]); + } + j = width - 1; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k] * 2) + + corner * + (dgd[k + stride - 1] + dgd[k - 1] + dgd[k + stride] + dgd[k]); + } + } + { + i = height - 1; + j = 0; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k + 1] + dgd[k - stride] + dgd[k] * 2) + + corner * + (dgd[k - stride + 1] + dgd[k + 1] + dgd[k - stride] + dgd[k]); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = center * dgd[k] + + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k + 1] + dgd[k]) + + corner * (dgd[k - stride - 1] + dgd[k - stride + 1] + + dgd[k - 1] + dgd[k + 1]); + } + j = width - 1; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k] * 2) + + corner * + (dgd[k - stride - 1] + dgd[k - 1] + dgd[k - stride] + dgd[k]); + } + } + __m128i center_ = _mm_set1_epi16(center); + __m128i edge_ = _mm_set1_epi16(edge); + __m128i corner_ = _mm_set1_epi16(corner); + for (i = 1; i < height - 1; ++i) { + j = 0; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + + edge * (dgd[k - stride] + dgd[k + 1] + dgd[k + stride] + dgd[k]) + + corner * (dgd[k + stride + 1] + dgd[k - stride + 1] + + dgd[k - stride] + dgd[k + stride]); + } + // Process in units of 8 pixels at a time. + for (j = 1; j < width - 8; j += 8) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + + __m128i a = _mm_loadu_si128((__m128i *)&dgd[k - stride - 1]); + __m128i b = _mm_loadu_si128((__m128i *)&dgd[k - 1]); + __m128i c = _mm_loadu_si128((__m128i *)&dgd[k + stride - 1]); + + __m128i tl = _mm_cvtepu8_epi16(a); + __m128i tr = _mm_cvtepu8_epi16(_mm_srli_si128(a, 8)); + __m128i cl = _mm_cvtepu8_epi16(b); + __m128i cr = _mm_cvtepu8_epi16(_mm_srli_si128(b, 8)); + __m128i bl = _mm_cvtepu8_epi16(c); + __m128i br = _mm_cvtepu8_epi16(_mm_srli_si128(c, 8)); + + __m128i x = _mm_alignr_epi8(cr, cl, 2); + __m128i y = _mm_add_epi16(_mm_add_epi16(_mm_alignr_epi8(tr, tl, 2), cl), + _mm_add_epi16(_mm_alignr_epi8(br, bl, 2), + _mm_alignr_epi8(cr, cl, 4))); + __m128i z = _mm_add_epi16(_mm_add_epi16(tl, bl), + _mm_add_epi16(_mm_alignr_epi8(tr, tl, 4), + _mm_alignr_epi8(br, bl, 4))); + + __m128i res = _mm_add_epi16(_mm_mullo_epi16(x, center_), + _mm_add_epi16(_mm_mullo_epi16(y, edge_), + _mm_mullo_epi16(z, corner_))); + + _mm_storeu_si128((__m128i *)&dst[l], _mm_cvtepi16_epi32(res)); + _mm_storeu_si128((__m128i *)&dst[l + 4], + _mm_cvtepi16_epi32(_mm_srli_si128(res, 8))); + } + // If there are enough pixels left in this row, do another batch of 4 + // pixels. + for (; j < width - 4; j += 4) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + + __m128i a = _mm_loadl_epi64((__m128i *)&dgd[k - stride - 1]); + __m128i b = _mm_loadl_epi64((__m128i *)&dgd[k - 1]); + __m128i c = _mm_loadl_epi64((__m128i *)&dgd[k + stride - 1]); + + __m128i tl = _mm_cvtepu8_epi16(a); + __m128i cl = _mm_cvtepu8_epi16(b); + __m128i bl = _mm_cvtepu8_epi16(c); + + __m128i x = _mm_srli_si128(cl, 2); + __m128i y = _mm_add_epi16( + _mm_add_epi16(_mm_srli_si128(tl, 2), cl), + _mm_add_epi16(_mm_srli_si128(bl, 2), _mm_srli_si128(cl, 4))); + __m128i z = _mm_add_epi16( + _mm_add_epi16(tl, bl), + _mm_add_epi16(_mm_srli_si128(tl, 4), _mm_srli_si128(bl, 4))); + + __m128i res = _mm_add_epi16(_mm_mullo_epi16(x, center_), + _mm_add_epi16(_mm_mullo_epi16(y, edge_), + _mm_mullo_epi16(z, corner_))); + + _mm_storeu_si128((__m128i *)&dst[l], _mm_cvtepi16_epi32(res)); + } + // Handle any leftover pixels + for (; j < width - 1; ++j) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + + edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k + 1]) + + corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + + dgd[k - stride + 1] + dgd[k + stride + 1]); + } + j = width - 1; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + + edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k]) + + corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + + dgd[k - stride] + dgd[k + stride]); + } + } +} + +void apply_selfguided_restoration_sse4_1(uint8_t *dat, int width, int height, + int stride, int eps, int *xqd, + uint8_t *dst, int dst_stride, + int32_t *tmpbuf) { + int xq[2]; + int32_t *flt1 = tmpbuf; + int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX; + int32_t *tmpbuf2 = flt2 + RESTORATION_TILEPELS_MAX; + int i, j; + assert(width * height <= RESTORATION_TILEPELS_MAX); +#if USE_HIGHPASS_IN_SGRPROJ + av1_highpass_filter_sse4_1(dat, width, height, stride, flt1, width, + sgr_params[eps].corner, sgr_params[eps].edge); +#else + av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt1, width, + sgr_params[eps].r1, sgr_params[eps].e1, + tmpbuf2); +#endif // USE_HIGHPASS_IN_SGRPROJ + av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt2, width, + sgr_params[eps].r2, sgr_params[eps].e2, + tmpbuf2); + decode_xq(xqd, xq); + + __m128i xq0 = _mm_set1_epi32(xq[0]); + __m128i xq1 = _mm_set1_epi32(xq[1]); + for (i = 0; i < height; ++i) { + // Calculate output in batches of 8 pixels + for (j = 0; j < width; j += 8) { + const int k = i * width + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + __m128i src = + _mm_slli_epi16(_mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&dat[l])), + SGRPROJ_RST_BITS); + + const __m128i u_0 = _mm_cvtepu16_epi32(src); + const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(src, 8)); + + const __m128i f1_0 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k]), u_0); + const __m128i f2_0 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k]), u_0); + const __m128i f1_1 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k + 4]), u_1); + const __m128i f2_1 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k + 4]), u_1); + + const __m128i v_0 = _mm_add_epi32( + _mm_add_epi32(_mm_mullo_epi32(xq0, f1_0), _mm_mullo_epi32(xq1, f2_0)), + _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS)); + const __m128i v_1 = _mm_add_epi32( + _mm_add_epi32(_mm_mullo_epi32(xq0, f1_1), _mm_mullo_epi32(xq1, f2_1)), + _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS)); + + const __m128i rounding = + _mm_set1_epi32((1 << (SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS)) >> 1); + const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding), + SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding), + SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + + const __m128i tmp = _mm_packs_epi32(w_0, w_1); + const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */); + _mm_storel_epi64((__m128i *)&dst[m], res); + } + // Process leftover pixels + for (; j < width; ++j) { + const int k = i * width + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int32_t u = ((int32_t)dat[l] << SGRPROJ_RST_BITS); + const int32_t f1 = (int32_t)flt1[k] - u; + const int32_t f2 = (int32_t)flt2[k] - u; + const int32_t v = xq[0] * f1 + xq[1] * f2 + (u << SGRPROJ_PRJ_BITS); + const int16_t w = + (int16_t)ROUND_POWER_OF_TWO(v, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + dst[m] = (uint16_t)clip_pixel(w); + } + } +} + +#if CONFIG_HIGHBITDEPTH +// Only the vertical sums need to be adjusted for highbitdepth + +static void highbd_selfguided_restoration_1_v(uint16_t *src, int width, + int height, int src_stride, + int32_t *A, int32_t *B, + int buf_stride) { + int i, j; + + int width_extend = (width + 3) & ~3; + for (j = 0; j < width_extend; j += 4) { + __m128i a, b, x, y, x2, y2; + __m128i sum, sum_sq, tmp; + + a = _mm_loadl_epi64((__m128i *)&src[j]); + b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]); + + sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b)); + tmp = _mm_unpacklo_epi16(a, b); + sum_sq = _mm_madd_epi16(tmp, tmp); + + _mm_store_si128((__m128i *)&B[j], sum); + _mm_store_si128((__m128i *)&A[j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + for (i = 1; i < height - 2; ++i) { + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + y = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i + 2) * src_stride + j])); + + sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + + x2 = _mm_mullo_epi32(x, x); + y2 = _mm_mullo_epi32(y, y); + + sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + } + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); + } +} + +static void highbd_selfguided_restoration_2_v(uint16_t *src, int width, + int height, int src_stride, + int32_t *A, int32_t *B, + int buf_stride) { + int i, j; + + int width_extend = (width + 3) & ~3; + for (j = 0; j < width_extend; j += 4) { + __m128i a, b, c, c2, x, y, x2, y2; + __m128i sum, sum_sq, tmp; + + a = _mm_loadl_epi64((__m128i *)&src[j]); + b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]); + c = _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]); + + sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c)); + // Important: We need to widen *before* squaring here, since + // c^2 may be up to 2^24. + c = _mm_cvtepu16_epi32(c); + c2 = _mm_mullo_epi32(c, c); + tmp = _mm_unpacklo_epi16(a, b); + sum_sq = _mm_add_epi32(_mm_madd_epi16(tmp, tmp), c2); + + _mm_store_si128((__m128i *)&B[j], sum); + _mm_store_si128((__m128i *)&A[j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[3 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + for (i = 2; i < height - 3; ++i) { + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + y = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i + 3) * src_stride + j])); + + sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + + x2 = _mm_mullo_epi32(x, x); + y2 = _mm_mullo_epi32(y, y); + + sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + } + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); + } +} + +static void highbd_selfguided_restoration_3_v(uint16_t *src, int width, + int height, int src_stride, + int32_t *A, int32_t *B, + int buf_stride) { + int i, j; + + int width_extend = (width + 3) & ~3; + for (j = 0; j < width_extend; j += 4) { + __m128i a, b, c, d, x, y, x2, y2; + __m128i sum, sum_sq, tmp, tmp2; + + a = _mm_loadl_epi64((__m128i *)&src[j]); + b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]); + c = _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]); + d = _mm_loadl_epi64((__m128i *)&src[3 * src_stride + j]); + + sum = _mm_cvtepi16_epi32( + _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d))); + tmp = _mm_unpacklo_epi16(a, b); + tmp2 = _mm_unpacklo_epi16(c, d); + sum_sq = + _mm_add_epi32(_mm_madd_epi16(tmp, tmp), _mm_madd_epi16(tmp2, tmp2)); + + _mm_store_si128((__m128i *)&B[j], sum); + _mm_store_si128((__m128i *)&A[j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[5 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[6 * src_stride + j])); + sum = _mm_add_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_add_epi32(sum_sq, x2); + + for (i = 3; i < height - 4; ++i) { + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j])); + y = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i + 4) * src_stride + j])); + + sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + + x2 = _mm_mullo_epi32(x, x); + y2 = _mm_mullo_epi32(y, y); + + sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + } + _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); + + x = _mm_cvtepu16_epi32( + _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + sum = _mm_sub_epi32(sum, x); + x2 = _mm_mullo_epi32(x, x); + sum_sq = _mm_sub_epi32(sum_sq, x2); + + _mm_store_si128((__m128i *)&B[(i + 3) * buf_stride + j], sum); + _mm_store_si128((__m128i *)&A[(i + 3) * buf_stride + j], sum_sq); + } +} + +void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, + int height, int stride, + int32_t *dst, int dst_stride, + int bit_depth, int r, int eps, + int32_t *tmpbuf) { + int32_t *A = tmpbuf; + int32_t *B = A + SGRPROJ_OUTBUF_SIZE; + int i, j; + // Adjusting the stride of A and B here appears to avoid bad cache effects, + // leading to a significant speed improvement. + // We also align the stride to a multiple of 16 bytes for efficiency. + int buf_stride = ((width + 3) & ~3) + 16; + + // Don't filter tiles with dimensions < 5 on any axis + if ((width < 5) || (height < 5)) return; + + if (r == 1) { + highbd_selfguided_restoration_1_v(dgd, width, height, stride, A, B, + buf_stride); + selfguided_restoration_1_h(A, B, width, height, buf_stride, eps, bit_depth); + } else if (r == 2) { + highbd_selfguided_restoration_2_v(dgd, width, height, stride, A, B, + buf_stride); + selfguided_restoration_2_h(A, B, width, height, buf_stride, eps, bit_depth); + } else if (r == 3) { + highbd_selfguided_restoration_3_v(dgd, width, height, stride, A, B, + buf_stride); + selfguided_restoration_3_h(A, B, width, height, buf_stride, eps, bit_depth); + } else { + assert(0); + } + + { + i = 0; + j = 0; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] + + A[k + buf_stride + 1]; + const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k + buf_stride] + + B[k + buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] + + A[k + buf_stride - 1] + A[k + buf_stride + 1]; + const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k + buf_stride] + + B[k + buf_stride - 1] + B[k + buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + j = width - 1; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] + + A[k + buf_stride - 1]; + const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k + buf_stride] + + B[k + buf_stride - 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + } + for (i = 1; i < height - 1; ++i) { + j = 0; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + + A[k + 1] + A[k - buf_stride + 1] + + A[k + buf_stride + 1]; + const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + + B[k + 1] + B[k - buf_stride + 1] + + B[k + buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + + // Vectorize the innermost loop + for (j = 1; j < width - 1; j += 4) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 5; + + __m128i tmp0 = _mm_loadu_si128((__m128i *)&A[k - 1 - buf_stride]); + __m128i tmp1 = _mm_loadu_si128((__m128i *)&A[k + 3 - buf_stride]); + __m128i tmp2 = _mm_loadu_si128((__m128i *)&A[k - 1]); + __m128i tmp3 = _mm_loadu_si128((__m128i *)&A[k + 3]); + __m128i tmp4 = _mm_loadu_si128((__m128i *)&A[k - 1 + buf_stride]); + __m128i tmp5 = _mm_loadu_si128((__m128i *)&A[k + 3 + buf_stride]); + + __m128i a0 = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 4), tmp2), + _mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 8), + _mm_alignr_epi8(tmp5, tmp4, 4))), + _mm_alignr_epi8(tmp1, tmp0, 4)); + __m128i a1 = _mm_add_epi32(_mm_add_epi32(tmp0, tmp4), + _mm_add_epi32(_mm_alignr_epi8(tmp1, tmp0, 8), + _mm_alignr_epi8(tmp5, tmp4, 8))); + __m128i a = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(a0, a1), 2), a1); + + __m128i tmp6 = _mm_loadu_si128((__m128i *)&B[k - 1 - buf_stride]); + __m128i tmp7 = _mm_loadu_si128((__m128i *)&B[k + 3 - buf_stride]); + __m128i tmp8 = _mm_loadu_si128((__m128i *)&B[k - 1]); + __m128i tmp9 = _mm_loadu_si128((__m128i *)&B[k + 3]); + __m128i tmp10 = _mm_loadu_si128((__m128i *)&B[k - 1 + buf_stride]); + __m128i tmp11 = _mm_loadu_si128((__m128i *)&B[k + 3 + buf_stride]); + + __m128i b0 = _mm_add_epi32( + _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 4), tmp8), + _mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 8), + _mm_alignr_epi8(tmp11, tmp10, 4))), + _mm_alignr_epi8(tmp7, tmp6, 4)); + __m128i b1 = + _mm_add_epi32(_mm_add_epi32(tmp6, tmp10), + _mm_add_epi32(_mm_alignr_epi8(tmp7, tmp6, 8), + _mm_alignr_epi8(tmp11, tmp10, 8))); + __m128i b = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(b0, b1), 2), b1); + + __m128i src = _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i *)&dgd[l])); + + __m128i rounding = _mm_set1_epi32( + (1 << (SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS)) >> 1); + __m128i v = _mm_add_epi32(_mm_mullo_epi32(a, src), b); + __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding), + SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + _mm_storeu_si128((__m128i *)&dst[m], w); + } + + // Deal with any extra pixels at the right-hand edge of the frame + // (typically have 2 such pixels, but may have anywhere between 0 and 3) + for (; j < width - 1; ++j) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 5; + const int32_t a = + (A[k] + A[k - 1] + A[k + 1] + A[k - buf_stride] + A[k + buf_stride]) * + 4 + + (A[k - 1 - buf_stride] + A[k - 1 + buf_stride] + + A[k + 1 - buf_stride] + A[k + 1 + buf_stride]) * + 3; + const int32_t b = + (B[k] + B[k - 1] + B[k + 1] + B[k - buf_stride] + B[k + buf_stride]) * + 4 + + (B[k - 1 - buf_stride] + B[k - 1 + buf_stride] + + B[k + 1 - buf_stride] + B[k + 1 + buf_stride]) * + 3; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + + j = width - 1; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + + A[k - 1] + A[k - buf_stride - 1] + + A[k + buf_stride - 1]; + const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + + B[k - 1] + B[k - buf_stride - 1] + + B[k + buf_stride - 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + } + + { + i = height - 1; + j = 0; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] + + A[k - buf_stride + 1]; + const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k - buf_stride] + + B[k - buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] + + A[k - buf_stride - 1] + A[k - buf_stride + 1]; + const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k - buf_stride] + + B[k - buf_stride - 1] + B[k - buf_stride + 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + j = width - 1; + { + const int k = i * buf_stride + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int nb = 3; + const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] + + A[k - buf_stride - 1]; + const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k - buf_stride] + + B[k - buf_stride - 1]; + const int32_t v = a * dgd[l] + b; + dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + } + } +} + +void av1_highpass_filter_highbd_sse4_1(uint16_t *dgd, int width, int height, + int stride, int32_t *dst, int dst_stride, + int corner, int edge) { + int i, j; + const int center = (1 << SGRPROJ_RST_BITS) - 4 * (corner + edge); + + { + i = 0; + j = 0; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k + 1] + dgd[k + stride] + dgd[k] * 2) + + corner * + (dgd[k + stride + 1] + dgd[k + 1] + dgd[k + stride] + dgd[k]); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = center * dgd[k] + + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k + 1] + dgd[k]) + + corner * (dgd[k + stride - 1] + dgd[k + stride + 1] + + dgd[k - 1] + dgd[k + 1]); + } + j = width - 1; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k] * 2) + + corner * + (dgd[k + stride - 1] + dgd[k - 1] + dgd[k + stride] + dgd[k]); + } + } + __m128i center_ = _mm_set1_epi32(center); + __m128i edge_ = _mm_set1_epi32(edge); + __m128i corner_ = _mm_set1_epi32(corner); + for (i = 1; i < height - 1; ++i) { + j = 0; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + + edge * (dgd[k - stride] + dgd[k + 1] + dgd[k + stride] + dgd[k]) + + corner * (dgd[k + stride + 1] + dgd[k - stride + 1] + + dgd[k - stride] + dgd[k + stride]); + } + // Process 4 pixels at a time + for (j = 1; j < width - 4; j += 4) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + + __m128i a = _mm_loadu_si128((__m128i *)&dgd[k - stride - 1]); + __m128i b = _mm_loadu_si128((__m128i *)&dgd[k - 1]); + __m128i c = _mm_loadu_si128((__m128i *)&dgd[k + stride - 1]); + + __m128i tl = _mm_cvtepu16_epi32(a); + __m128i tr = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8)); + __m128i cl = _mm_cvtepu16_epi32(b); + __m128i cr = _mm_cvtepu16_epi32(_mm_srli_si128(b, 8)); + __m128i bl = _mm_cvtepu16_epi32(c); + __m128i br = _mm_cvtepu16_epi32(_mm_srli_si128(c, 8)); + + __m128i x = _mm_alignr_epi8(cr, cl, 4); + __m128i y = _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tr, tl, 4), cl), + _mm_add_epi32(_mm_alignr_epi8(br, bl, 4), + _mm_alignr_epi8(cr, cl, 8))); + __m128i z = _mm_add_epi32(_mm_add_epi32(tl, bl), + _mm_add_epi32(_mm_alignr_epi8(tr, tl, 8), + _mm_alignr_epi8(br, bl, 8))); + + __m128i res = _mm_add_epi32(_mm_mullo_epi32(x, center_), + _mm_add_epi32(_mm_mullo_epi32(y, edge_), + _mm_mullo_epi32(z, corner_))); + + _mm_storeu_si128((__m128i *)&dst[l], res); + } + // Handle any leftover pixels + for (; j < width - 1; ++j) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + + edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k + 1]) + + corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + + dgd[k - stride + 1] + dgd[k + stride + 1]); + } + j = width - 1; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + + edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k]) + + corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + + dgd[k - stride] + dgd[k + stride]); + } + } + { + i = height - 1; + j = 0; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k + 1] + dgd[k - stride] + dgd[k] * 2) + + corner * + (dgd[k - stride + 1] + dgd[k + 1] + dgd[k - stride] + dgd[k]); + } + for (j = 1; j < width - 1; ++j) { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = center * dgd[k] + + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k + 1] + dgd[k]) + + corner * (dgd[k - stride - 1] + dgd[k - stride + 1] + + dgd[k - 1] + dgd[k + 1]); + } + j = width - 1; + { + const int k = i * stride + j; + const int l = i * dst_stride + j; + dst[l] = + center * dgd[k] + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k] * 2) + + corner * + (dgd[k - stride - 1] + dgd[k - 1] + dgd[k - stride] + dgd[k]); + } + } +} + +void apply_selfguided_restoration_highbd_sse4_1( + uint16_t *dat, int width, int height, int stride, int bit_depth, int eps, + int *xqd, uint16_t *dst, int dst_stride, int32_t *tmpbuf) { + int xq[2]; + int32_t *flt1 = tmpbuf; + int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX; + int32_t *tmpbuf2 = flt2 + RESTORATION_TILEPELS_MAX; + int i, j; + assert(width * height <= RESTORATION_TILEPELS_MAX); +#if USE_HIGHPASS_IN_SGRPROJ + av1_highpass_filter_highbd_sse4_1(dat, width, height, stride, flt1, width, + sgr_params[eps].corner, + sgr_params[eps].edge); +#else + av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt1, + width, bit_depth, sgr_params[eps].r1, + sgr_params[eps].e1, tmpbuf2); +#endif // USE_HIGHPASS_IN_SGRPROJ + av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt2, + width, bit_depth, sgr_params[eps].r2, + sgr_params[eps].e2, tmpbuf2); + decode_xq(xqd, xq); + + __m128i xq0 = _mm_set1_epi32(xq[0]); + __m128i xq1 = _mm_set1_epi32(xq[1]); + for (i = 0; i < height; ++i) { + // Calculate output in batches of 8 pixels + for (j = 0; j < width; j += 8) { + const int k = i * width + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + __m128i src = + _mm_slli_epi16(_mm_load_si128((__m128i *)&dat[l]), SGRPROJ_RST_BITS); + + const __m128i u_0 = _mm_cvtepu16_epi32(src); + const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(src, 8)); + + const __m128i f1_0 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k]), u_0); + const __m128i f2_0 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k]), u_0); + const __m128i f1_1 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k + 4]), u_1); + const __m128i f2_1 = + _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k + 4]), u_1); + + const __m128i v_0 = _mm_add_epi32( + _mm_add_epi32(_mm_mullo_epi32(xq0, f1_0), _mm_mullo_epi32(xq1, f2_0)), + _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS)); + const __m128i v_1 = _mm_add_epi32( + _mm_add_epi32(_mm_mullo_epi32(xq0, f1_1), _mm_mullo_epi32(xq1, f2_1)), + _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS)); + + const __m128i rounding = + _mm_set1_epi32((1 << (SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS)) >> 1); + const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding), + SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding), + SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + + // Pack into 16 bits and clamp to [0, 2^bit_depth) + const __m128i tmp = _mm_packus_epi32(w_0, w_1); + const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1); + const __m128i res = _mm_min_epi16(tmp, max); + + _mm_store_si128((__m128i *)&dst[m], res); + } + // Process leftover pixels + for (; j < width; ++j) { + const int k = i * width + j; + const int l = i * stride + j; + const int m = i * dst_stride + j; + const int32_t u = ((int32_t)dat[l] << SGRPROJ_RST_BITS); + const int32_t f1 = (int32_t)flt1[k] - u; + const int32_t f2 = (int32_t)flt2[k] - u; + const int32_t v = xq[0] * f1 + xq[1] * f2 + (u << SGRPROJ_PRJ_BITS); + const int16_t w = + (int16_t)ROUND_POWER_OF_TWO(v, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); + dst[m] = (uint16_t)clip_pixel_highbd(w, bit_depth); + } + } +} + +#endif -- cgit v1.2.3 From 7369c7d7a5eed32963d8af37658286617919f91c Mon Sep 17 00:00:00 2001 From: trav90 Date: Thu, 18 Oct 2018 06:04:57 -0500 Subject: Update aom to commit id f5bdeac22930ff4c6b219be49c843db35970b918 --- third_party/aom/av1/common/x86/selfguided_sse4.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c index 260faa8c9..e2e4f51c3 100644 --- a/third_party/aom/av1/common/x86/selfguided_sse4.c +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -16,8 +16,8 @@ static void calc_block(__m128i sum, __m128i sum_sq, __m128i n, if (bit_depth > 8) { __m128i rounding_a = _mm_set1_epi32((1 << (2 * (bit_depth - 8))) >> 1); __m128i rounding_b = _mm_set1_epi32((1 << (bit_depth - 8)) >> 1); - __m128i shift_a = _mm_set_epi64x(0, 2 * (bit_depth - 8)); - __m128i shift_b = _mm_set_epi64x(0, bit_depth - 8); + __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8)); + __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8); a = _mm_srl_epi32(_mm_add_epi32(sum_sq, rounding_a), shift_a); b = _mm_srl_epi32(_mm_add_epi32(sum, rounding_b), shift_b); a = _mm_mullo_epi32(a, n); -- cgit v1.2.3 From ec910d81405c736a4490383a250299a7837c2e64 Mon Sep 17 00:00:00 2001 From: trav90 Date: Thu, 18 Oct 2018 21:53:44 -0500 Subject: Update aom to commit id e87fb2378f01103d5d6e477a4ef6892dc714e614 --- third_party/aom/av1/common/x86/selfguided_sse4.c | 176 ++++++++++++----------- 1 file changed, 95 insertions(+), 81 deletions(-) (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c index e2e4f51c3..4006b8518 100644 --- a/third_party/aom/av1/common/x86/selfguided_sse4.c +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -3,6 +3,7 @@ #include "./aom_config.h" #include "./av1_rtcd.h" #include "av1/common/restoration.h" +#include "aom_dsp/x86/synonyms.h" /* Calculate four consecutive entries of the intermediate A and B arrays (corresponding to the first loop in the C version of @@ -71,8 +72,8 @@ static void selfguided_restoration_1_v(uint8_t *src, int width, int height, __m128i a, b, x, y, x2, y2; __m128i sum, sum_sq, tmp; - a = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[j])); - b = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[src_stride + j])); + a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j])); + b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j])); sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b)); tmp = _mm_unpacklo_epi16(a, b); @@ -81,7 +82,7 @@ static void selfguided_restoration_1_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[j], sum); _mm_store_si128((__m128i *)&A[j], sum_sq); - x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); + x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[2 * src_stride + j])); sum = _mm_add_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_add_epi32(sum_sq, x2); @@ -91,9 +92,9 @@ static void selfguided_restoration_1_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); y = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i + 2) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i + 2) * src_stride + j])); sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); @@ -106,7 +107,7 @@ static void selfguided_restoration_1_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); sum = _mm_sub_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_sub_epi32(sum_sq, x2); @@ -242,9 +243,9 @@ static void selfguided_restoration_2_v(uint8_t *src, int width, int height, __m128i a, b, c, c2, x, y, x2, y2; __m128i sum, sum_sq, tmp; - a = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[j])); - b = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[src_stride + j])); - c = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); + a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j])); + b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j])); + c = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[2 * src_stride + j])); sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c)); // Important: Since c may be up to 2^8, the result on squaring may @@ -256,7 +257,7 @@ static void selfguided_restoration_2_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[j], sum); _mm_store_si128((__m128i *)&A[j], sum_sq); - x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[3 * src_stride + j])); + x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[3 * src_stride + j])); sum = _mm_add_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_add_epi32(sum_sq, x2); @@ -264,7 +265,7 @@ static void selfguided_restoration_2_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[buf_stride + j], sum); _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); - x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); + x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[4 * src_stride + j])); sum = _mm_add_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_add_epi32(sum_sq, x2); @@ -289,7 +290,7 @@ static void selfguided_restoration_2_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 2) * src_stride + j])); sum = _mm_sub_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_sub_epi32(sum_sq, x2); @@ -298,7 +299,7 @@ static void selfguided_restoration_2_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); sum = _mm_sub_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_sub_epi32(sum_sq, x2); @@ -443,10 +444,10 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, __m128i a, b, c, d, x, y, x2, y2; __m128i sum, sum_sq, tmp, tmp2; - a = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[j])); - b = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[src_stride + j])); - c = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); - d = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&src[3 * src_stride + j])); + a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j])); + b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j])); + c = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[2 * src_stride + j])); + d = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[3 * src_stride + j])); sum = _mm_cvtepi16_epi32( _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d))); @@ -458,7 +459,7 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[j], sum); _mm_store_si128((__m128i *)&A[j], sum_sq); - x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); + x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[4 * src_stride + j])); sum = _mm_add_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_add_epi32(sum_sq, x2); @@ -466,7 +467,7 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[buf_stride + j], sum); _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); - x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[5 * src_stride + j])); + x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[5 * src_stride + j])); sum = _mm_add_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_add_epi32(sum_sq, x2); @@ -474,7 +475,7 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum); _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq); - x = _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i *)&src[6 * src_stride + j])); + x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[6 * src_stride + j])); sum = _mm_add_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_add_epi32(sum_sq, x2); @@ -483,10 +484,8 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - x = _mm_cvtepu8_epi32( - _mm_cvtsi32_si128(*((int *)&src[(i - 3) * src_stride + j]))); - y = _mm_cvtepu8_epi32( - _mm_cvtsi32_si128(*((int *)&src[(i + 4) * src_stride + j]))); + x = _mm_cvtepu8_epi32(xx_loadl_32(&src[(i - 3) * src_stride + j])); + y = _mm_cvtepu8_epi32(xx_loadl_32(&src[(i + 4) * src_stride + j])); sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); @@ -499,7 +498,7 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 3) * src_stride + j])); sum = _mm_sub_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_sub_epi32(sum_sq, x2); @@ -508,7 +507,7 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 2) * src_stride + j])); sum = _mm_sub_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_sub_epi32(sum_sq, x2); @@ -517,7 +516,7 @@ static void selfguided_restoration_3_v(uint8_t *src, int width, int height, _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); x = _mm_cvtepu8_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); + xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); sum = _mm_sub_epi32(sum, x); x2 = _mm_mullo_epi32(x, x); sum_sq = _mm_sub_epi32(sum_sq, x2); @@ -664,38 +663,48 @@ static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width, } void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, - int stride, int32_t *dst, int dst_stride, - int r, int eps, int32_t *tmpbuf) { - int32_t *A = tmpbuf; - int32_t *B = A + SGRPROJ_OUTBUF_SIZE; + int dgd_stride, int32_t *dst, + int dst_stride, int r, int eps) { + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; + const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; + int32_t A_[RESTORATION_PROC_UNIT_PELS]; + int32_t B_[RESTORATION_PROC_UNIT_PELS]; + int32_t *A = A_; + int32_t *B = B_; int i, j; // Adjusting the stride of A and B here appears to avoid bad cache effects, // leading to a significant speed improvement. // We also align the stride to a multiple of 16 bytes for efficiency. - int buf_stride = ((width + 3) & ~3) + 16; + int buf_stride = ((width_ext + 3) & ~3) + 16; // Don't filter tiles with dimensions < 5 on any axis if ((width < 5) || (height < 5)) return; + uint8_t *dgd0 = dgd - dgd_stride * SGRPROJ_BORDER_VERT - SGRPROJ_BORDER_HORZ; if (r == 1) { - selfguided_restoration_1_v(dgd, width, height, stride, A, B, buf_stride); - selfguided_restoration_1_h(A, B, width, height, buf_stride, eps, 8); + selfguided_restoration_1_v(dgd0, width_ext, height_ext, dgd_stride, A, B, + buf_stride); + selfguided_restoration_1_h(A, B, width_ext, height_ext, buf_stride, eps, 8); } else if (r == 2) { - selfguided_restoration_2_v(dgd, width, height, stride, A, B, buf_stride); - selfguided_restoration_2_h(A, B, width, height, buf_stride, eps, 8); + selfguided_restoration_2_v(dgd0, width_ext, height_ext, dgd_stride, A, B, + buf_stride); + selfguided_restoration_2_h(A, B, width_ext, height_ext, buf_stride, eps, 8); } else if (r == 3) { - selfguided_restoration_3_v(dgd, width, height, stride, A, B, buf_stride); - selfguided_restoration_3_h(A, B, width, height, buf_stride, eps, 8); + selfguided_restoration_3_v(dgd0, width_ext, height_ext, dgd_stride, A, B, + buf_stride); + selfguided_restoration_3_h(A, B, width_ext, height_ext, buf_stride, eps, 8); } else { assert(0); } + A += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; { i = 0; j = 0; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] + @@ -707,7 +716,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, } for (j = 1; j < width - 1; ++j) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] + @@ -720,7 +729,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, j = width - 1; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] + @@ -735,7 +744,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, j = 0; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + @@ -751,7 +760,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, // Vectorize the innermost loop for (j = 1; j < width - 1; j += 4) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 5; @@ -804,7 +813,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, // (typically have 2 such pixels, but may have anywhere between 0 and 3) for (; j < width - 1; ++j) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 5; const int32_t a = @@ -826,7 +835,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, j = width - 1; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + @@ -845,7 +854,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, j = 0; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] + @@ -857,7 +866,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, } for (j = 1; j < width - 1; ++j) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] + @@ -870,7 +879,7 @@ void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, j = width - 1; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] + @@ -1051,7 +1060,6 @@ void apply_selfguided_restoration_sse4_1(uint8_t *dat, int width, int height, int xq[2]; int32_t *flt1 = tmpbuf; int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX; - int32_t *tmpbuf2 = flt2 + RESTORATION_TILEPELS_MAX; int i, j; assert(width * height <= RESTORATION_TILEPELS_MAX); #if USE_HIGHPASS_IN_SGRPROJ @@ -1059,12 +1067,10 @@ void apply_selfguided_restoration_sse4_1(uint8_t *dat, int width, int height, sgr_params[eps].corner, sgr_params[eps].edge); #else av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt1, width, - sgr_params[eps].r1, sgr_params[eps].e1, - tmpbuf2); + sgr_params[eps].r1, sgr_params[eps].e1); #endif // USE_HIGHPASS_IN_SGRPROJ av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt2, width, - sgr_params[eps].r2, sgr_params[eps].e2, - tmpbuf2); + sgr_params[eps].r2, sgr_params[eps].e2); decode_xq(xqd, xq); __m128i xq0 = _mm_set1_epi32(xq[0]); @@ -1364,43 +1370,52 @@ static void highbd_selfguided_restoration_3_v(uint16_t *src, int width, } void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, - int height, int stride, + int height, int dgd_stride, int32_t *dst, int dst_stride, - int bit_depth, int r, int eps, - int32_t *tmpbuf) { - int32_t *A = tmpbuf; - int32_t *B = A + SGRPROJ_OUTBUF_SIZE; + int bit_depth, int r, int eps) { + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; + const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; + int32_t A_[RESTORATION_PROC_UNIT_PELS]; + int32_t B_[RESTORATION_PROC_UNIT_PELS]; + int32_t *A = A_; + int32_t *B = B_; int i, j; // Adjusting the stride of A and B here appears to avoid bad cache effects, // leading to a significant speed improvement. // We also align the stride to a multiple of 16 bytes for efficiency. - int buf_stride = ((width + 3) & ~3) + 16; + int buf_stride = ((width_ext + 3) & ~3) + 16; // Don't filter tiles with dimensions < 5 on any axis if ((width < 5) || (height < 5)) return; + uint16_t *dgd0 = dgd - dgd_stride * SGRPROJ_BORDER_VERT - SGRPROJ_BORDER_HORZ; if (r == 1) { - highbd_selfguided_restoration_1_v(dgd, width, height, stride, A, B, - buf_stride); - selfguided_restoration_1_h(A, B, width, height, buf_stride, eps, bit_depth); + highbd_selfguided_restoration_1_v(dgd0, width_ext, height_ext, dgd_stride, + A, B, buf_stride); + selfguided_restoration_1_h(A, B, width_ext, height_ext, buf_stride, eps, + bit_depth); } else if (r == 2) { - highbd_selfguided_restoration_2_v(dgd, width, height, stride, A, B, - buf_stride); - selfguided_restoration_2_h(A, B, width, height, buf_stride, eps, bit_depth); + highbd_selfguided_restoration_2_v(dgd0, width_ext, height_ext, dgd_stride, + A, B, buf_stride); + selfguided_restoration_2_h(A, B, width_ext, height_ext, buf_stride, eps, + bit_depth); } else if (r == 3) { - highbd_selfguided_restoration_3_v(dgd, width, height, stride, A, B, - buf_stride); - selfguided_restoration_3_h(A, B, width, height, buf_stride, eps, bit_depth); + highbd_selfguided_restoration_3_v(dgd0, width_ext, height_ext, dgd_stride, + A, B, buf_stride); + selfguided_restoration_3_h(A, B, width_ext, height_ext, buf_stride, eps, + bit_depth); } else { assert(0); } + A += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; + B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; { i = 0; j = 0; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] + @@ -1412,7 +1427,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, } for (j = 1; j < width - 1; ++j) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] + @@ -1425,7 +1440,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, j = width - 1; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] + @@ -1440,7 +1455,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, j = 0; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + @@ -1456,7 +1471,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, // Vectorize the innermost loop for (j = 1; j < width - 1; j += 4) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 5; @@ -1509,7 +1524,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, // (typically have 2 such pixels, but may have anywhere between 0 and 3) for (; j < width - 1; ++j) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 5; const int32_t a = @@ -1531,7 +1546,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, j = width - 1; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + @@ -1550,7 +1565,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, j = 0; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] + @@ -1562,7 +1577,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, } for (j = 1; j < width - 1; ++j) { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] + @@ -1575,7 +1590,7 @@ void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, j = width - 1; { const int k = i * buf_stride + j; - const int l = i * stride + j; + const int l = i * dgd_stride + j; const int m = i * dst_stride + j; const int nb = 3; const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] + @@ -1725,7 +1740,6 @@ void apply_selfguided_restoration_highbd_sse4_1( int xq[2]; int32_t *flt1 = tmpbuf; int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX; - int32_t *tmpbuf2 = flt2 + RESTORATION_TILEPELS_MAX; int i, j; assert(width * height <= RESTORATION_TILEPELS_MAX); #if USE_HIGHPASS_IN_SGRPROJ @@ -1735,11 +1749,11 @@ void apply_selfguided_restoration_highbd_sse4_1( #else av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt1, width, bit_depth, sgr_params[eps].r1, - sgr_params[eps].e1, tmpbuf2); + sgr_params[eps].e1); #endif // USE_HIGHPASS_IN_SGRPROJ av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt2, width, bit_depth, sgr_params[eps].r2, - sgr_params[eps].e2, tmpbuf2); + sgr_params[eps].e2); decode_xq(xqd, xq); __m128i xq0 = _mm_set1_epi32(xq[0]); -- cgit v1.2.3 From 125aff11b7587a55d5a94b1337e44cbc68655c0b Mon Sep 17 00:00:00 2001 From: trav90 Date: Thu, 18 Oct 2018 21:56:49 -0500 Subject: Fix aom compile errors with VS2015 Import BUG=aomedia:900 --- third_party/aom/av1/common/x86/selfguided_sse4.c | 30 +++++++++++++----------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c index 4006b8518..9de9177c1 100644 --- a/third_party/aom/av1/common/x86/selfguided_sse4.c +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -10,9 +10,11 @@ av1_selfguided_restoration) */ static void calc_block(__m128i sum, __m128i sum_sq, __m128i n, - __m128i one_over_n, __m128i s, int bit_depth, int idx, - int32_t *A, int32_t *B) { + __m128i *one_over_n_, __m128i *s_, int bit_depth, + int idx, int32_t *A, int32_t *B) { __m128i a, b, p; + __m128i one_over_n = *one_over_n_; + __m128i s = *s_; #if CONFIG_HIGHBITDEPTH if (bit_depth > 8) { __m128i rounding_a = _mm_set1_epi32((1 << (2 * (bit_depth - 8))) >> 1); @@ -147,7 +149,7 @@ static void selfguided_restoration_1_h(int32_t *A, int32_t *B, int width, __m128i s = _mm_set_epi32( sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][2 * h - 1]); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride, A, + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A, B); n = _mm_set1_epi32(3 * h); @@ -178,8 +180,8 @@ static void selfguided_restoration_1_h(int32_t *A, int32_t *B, int width, _mm_alignr_epi8(b2, b1, 8))); sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), _mm_alignr_epi8(a2, a1, 8))); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, - A, B); + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, + i * buf_stride + j, A, B); } __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]); __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]); @@ -227,7 +229,7 @@ static void selfguided_restoration_1_h(int32_t *A, int32_t *B, int width, s = _mm_set_epi32( sgrproj_mtable[eps - 1][2 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j, A, B); } } @@ -342,7 +344,7 @@ static void selfguided_restoration_2_h(int32_t *A, int32_t *B, int width, __m128i s = _mm_set_epi32( sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride, A, + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A, B); // Re-align a1 and b1 so that they start at index i * buf_stride + 2 @@ -372,8 +374,8 @@ static void selfguided_restoration_2_h(int32_t *A, int32_t *B, int width, _mm_alignr_epi8(a2, a1, 8))), _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2)); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, - A, B); + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, + i * buf_stride + j, A, B); } // If the width is not a multiple of 4, we need to reset j to width - 4 // and adjust a1, a2, b1, b2 so that the loop invariant above is maintained @@ -428,7 +430,7 @@ static void selfguided_restoration_2_h(int32_t *A, int32_t *B, int width, s = _mm_set_epi32( sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1]); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j, A, B); } } @@ -562,7 +564,7 @@ static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width, __m128i s = _mm_set_epi32( sgrproj_mtable[eps - 1][7 * h - 1], sgrproj_mtable[eps - 1][6 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1]); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride, A, + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A, B); // Re-align a1 and b1 so that they start at index i * buf_stride + 1 @@ -599,8 +601,8 @@ static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width, _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(a3, a2, 4)), _mm_alignr_epi8(a3, a2, 8))); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, - A, B); + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, + i * buf_stride + j, A, B); } __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]); __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]); @@ -657,7 +659,7 @@ static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width, s = _mm_set_epi32( sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][6 * h - 1], sgrproj_mtable[eps - 1][7 * h - 1]); - calc_block(sum_, sum_sq_, n, one_over_n, s, bit_depth, i * buf_stride + j, + calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j, A, B); } } -- cgit v1.2.3 From bbcc64772580c8a979288791afa02d30bc476d2e Mon Sep 17 00:00:00 2001 From: trav90 Date: Fri, 19 Oct 2018 21:52:15 -0500 Subject: Update aom to v1.0.0 Update aom to commit id d14c5bb4f336ef1842046089849dee4a301fbbf0. --- third_party/aom/av1/common/x86/selfguided_sse4.c | 2254 ++++++---------------- 1 file changed, 538 insertions(+), 1716 deletions(-) (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c index 9de9177c1..a42c94028 100644 --- a/third_party/aom/av1/common/x86/selfguided_sse4.c +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -1,1821 +1,643 @@ #include -#include "./aom_config.h" -#include "./av1_rtcd.h" +#include "config/aom_config.h" +#include "config/av1_rtcd.h" + #include "av1/common/restoration.h" #include "aom_dsp/x86/synonyms.h" -/* Calculate four consecutive entries of the intermediate A and B arrays - (corresponding to the first loop in the C version of - av1_selfguided_restoration) -*/ -static void calc_block(__m128i sum, __m128i sum_sq, __m128i n, - __m128i *one_over_n_, __m128i *s_, int bit_depth, - int idx, int32_t *A, int32_t *B) { - __m128i a, b, p; - __m128i one_over_n = *one_over_n_; - __m128i s = *s_; -#if CONFIG_HIGHBITDEPTH - if (bit_depth > 8) { - __m128i rounding_a = _mm_set1_epi32((1 << (2 * (bit_depth - 8))) >> 1); - __m128i rounding_b = _mm_set1_epi32((1 << (bit_depth - 8)) >> 1); - __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8)); - __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8); - a = _mm_srl_epi32(_mm_add_epi32(sum_sq, rounding_a), shift_a); - b = _mm_srl_epi32(_mm_add_epi32(sum, rounding_b), shift_b); - a = _mm_mullo_epi32(a, n); - b = _mm_mullo_epi32(b, b); - p = _mm_sub_epi32(_mm_max_epi32(a, b), b); - } else { -#endif - (void)bit_depth; - a = _mm_mullo_epi32(sum_sq, n); - b = _mm_mullo_epi32(sum, sum); - p = _mm_sub_epi32(a, b); -#if CONFIG_HIGHBITDEPTH - } -#endif - - __m128i rounding_z = _mm_set1_epi32((1 << SGRPROJ_MTABLE_BITS) >> 1); - __m128i z = _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rounding_z), - SGRPROJ_MTABLE_BITS); - z = _mm_min_epi32(z, _mm_set1_epi32(255)); - - // 'Gather' type instructions are not available pre-AVX2, so synthesize a - // gather using scalar loads. - __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)], - x_by_xplus1[_mm_extract_epi32(z, 2)], - x_by_xplus1[_mm_extract_epi32(z, 1)], - x_by_xplus1[_mm_extract_epi32(z, 0)]); - - _mm_storeu_si128((__m128i *)&A[idx], a_res); - - __m128i rounding_res = _mm_set1_epi32((1 << SGRPROJ_RECIP_BITS) >> 1); - __m128i a_complement = _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res); - __m128i b_int = - _mm_mullo_epi32(a_complement, _mm_mullo_epi32(sum, one_over_n)); - __m128i b_res = - _mm_srli_epi32(_mm_add_epi32(b_int, rounding_res), SGRPROJ_RECIP_BITS); - - _mm_storeu_si128((__m128i *)&B[idx], b_res); +// Load 4 bytes from the possibly-misaligned pointer p, extend each byte to +// 32-bit precision and return them in an SSE register. +static __m128i xx_load_extend_8_32(const void *p) { + return _mm_cvtepu8_epi32(xx_loadl_32(p)); } -static void selfguided_restoration_1_v(uint8_t *src, int width, int height, - int src_stride, int32_t *A, int32_t *B, - int buf_stride) { - int i, j; - - // Vertical sum - // When the width is not a multiple of 4, we know that 'stride' is rounded up - // to a multiple of 4. So it is safe for this loop to calculate extra columns - // at the right-hand edge of the frame. - int width_extend = (width + 3) & ~3; - for (j = 0; j < width_extend; j += 4) { - __m128i a, b, x, y, x2, y2; - __m128i sum, sum_sq, tmp; - - a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j])); - b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j])); - - sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b)); - tmp = _mm_unpacklo_epi16(a, b); - sum_sq = _mm_madd_epi16(tmp, tmp); - - _mm_store_si128((__m128i *)&B[j], sum); - _mm_store_si128((__m128i *)&A[j], sum_sq); - - x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[2 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); - - for (i = 1; i < height - 2; ++i) { - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); - y = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i + 2) * src_stride + j])); - - sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); - - x2 = _mm_mullo_epi32(x, x); - y2 = _mm_mullo_epi32(y, y); - - sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); - } - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); +// Load 4 halfwords from the possibly-misaligned pointer p, extend each +// halfword to 32-bit precision and return them in an SSE register. +static __m128i xx_load_extend_16_32(const void *p) { + return _mm_cvtepu16_epi32(xx_loadl_64(p)); +} - _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); - } +// Compute the scan of an SSE register holding 4 32-bit integers. If the +// register holds x0..x3 then the scan will hold x0, x0+x1, x0+x1+x2, +// x0+x1+x2+x3 +static __m128i scan_32(__m128i x) { + const __m128i x01 = _mm_add_epi32(x, _mm_slli_si128(x, 4)); + return _mm_add_epi32(x01, _mm_slli_si128(x01, 8)); } -static void selfguided_restoration_1_h(int32_t *A, int32_t *B, int width, - int height, int buf_stride, int eps, - int bit_depth) { - int i, j; - - // Horizontal sum - int width_extend = (width + 3) & ~3; - for (i = 0; i < height; ++i) { - int h = AOMMIN(2, height - i) + AOMMIN(1, i); - - __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]); - __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]); - __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]); - __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]); - - // Note: The _mm_slli_si128 call sets up a register containing - // {0, A[i * buf_stride], ..., A[i * buf_stride + 2]}, - // so that the first element of 'sum' (which should only add two values - // together) ends up calculated correctly. - __m128i sum_ = _mm_add_epi32(_mm_slli_si128(b1, 4), - _mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4))); - __m128i sum_sq_ = _mm_add_epi32( - _mm_slli_si128(a1, 4), _mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4))); - __m128i n = _mm_set_epi32(3 * h, 3 * h, 3 * h, 2 * h); - __m128i one_over_n = - _mm_set_epi32(one_by_x[3 * h - 1], one_by_x[3 * h - 1], - one_by_x[3 * h - 1], one_by_x[2 * h - 1]); - __m128i s = _mm_set_epi32( - sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], - sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][2 * h - 1]); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A, - B); - - n = _mm_set1_epi32(3 * h); - one_over_n = _mm_set1_epi32(one_by_x[3 * h - 1]); - s = _mm_set1_epi32(sgrproj_mtable[eps - 1][3 * h - 1]); - - // Re-align a1 and b1 so that they start at index i * buf_stride + 3 - a2 = _mm_alignr_epi8(a2, a1, 12); - b2 = _mm_alignr_epi8(b2, b1, 12); - - // Note: When the width is not a multiple of 4, this loop may end up - // writing to the last 4 columns of the frame, potentially with incorrect - // values (especially for r=2 and r=3). - // This is fine, since we fix up those values in the block after this - // loop, and in exchange we never have more than four values to - // write / fix up after this loop finishes. - for (j = 4; j < width_extend - 4; j += 4) { - a1 = a2; - b1 = b2; - a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]); - b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]); - /* Loop invariant: At this point, - a1 = original A[i * buf_stride + j - 1 : i * buf_stride + j + 3] - a2 = original A[i * buf_stride + j + 3 : i * buf_stride + j + 7] - and similar for b1,b2 and B - */ - sum_ = _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), - _mm_alignr_epi8(b2, b1, 8))); - sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), - _mm_alignr_epi8(a2, a1, 8))); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, - i * buf_stride + j, A, B); - } - __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]); - __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]); - - j = width - 4; - switch (width % 4) { - case 0: - a1 = a2; - b1 = b2; - a2 = a3; - b2 = b3; - break; - case 1: - a1 = _mm_alignr_epi8(a2, a1, 4); - b1 = _mm_alignr_epi8(b2, b1, 4); - a2 = _mm_alignr_epi8(a3, a2, 4); - b2 = _mm_alignr_epi8(b3, b2, 4); - break; - case 2: - a1 = _mm_alignr_epi8(a2, a1, 8); - b1 = _mm_alignr_epi8(b2, b1, 8); - a2 = _mm_alignr_epi8(a3, a2, 8); - b2 = _mm_alignr_epi8(b3, b2, 8); - break; - case 3: - a1 = _mm_alignr_epi8(a2, a1, 12); - b1 = _mm_alignr_epi8(b2, b1, 12); - a2 = _mm_alignr_epi8(a3, a2, 12); - b2 = _mm_alignr_epi8(b3, b2, 12); - break; +// Compute two integral images from src. B sums elements; A sums their +// squares. The images are offset by one pixel, so will have width and height +// equal to width + 1, height + 1 and the first row and column will be zero. +// +// A+1 and B+1 should be aligned to 16 bytes. buf_stride should be a multiple +// of 4. +static void integral_images(const uint8_t *src, int src_stride, int width, + int height, int32_t *A, int32_t *B, + int buf_stride) { + // Write out the zero top row + memset(A, 0, sizeof(*A) * (width + 1)); + memset(B, 0, sizeof(*B) * (width + 1)); + + const __m128i zero = _mm_setzero_si128(); + for (int i = 0; i < height; ++i) { + // Zero the left column. + A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0; + + // ldiff is the difference H - D where H is the output sample immediately + // to the left and D is the output sample above it. These are scalars, + // replicated across the four lanes. + __m128i ldiff1 = zero, ldiff2 = zero; + for (int j = 0; j < width; j += 4) { + const int ABj = 1 + j; + + const __m128i above1 = xx_load_128(B + ABj + i * buf_stride); + const __m128i above2 = xx_load_128(A + ABj + i * buf_stride); + + const __m128i x1 = xx_load_extend_8_32(src + j + i * src_stride); + const __m128i x2 = _mm_madd_epi16(x1, x1); + + const __m128i sc1 = scan_32(x1); + const __m128i sc2 = scan_32(x2); + + const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1); + const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2); + + xx_store_128(B + ABj + (i + 1) * buf_stride, row1); + xx_store_128(A + ABj + (i + 1) * buf_stride, row2); + + // Calculate the new H - D. + ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff); + ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff); } - - // Zero out the data loaded from "off the edge" of the array - __m128i zero = _mm_setzero_si128(); - a2 = _mm_blend_epi16(a2, zero, 0xfc); - b2 = _mm_blend_epi16(b2, zero, 0xfc); - - sum_ = _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), - _mm_alignr_epi8(b2, b1, 8))); - sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), - _mm_alignr_epi8(a2, a1, 8))); - n = _mm_set_epi32(2 * h, 3 * h, 3 * h, 3 * h); - one_over_n = _mm_set_epi32(one_by_x[2 * h - 1], one_by_x[3 * h - 1], - one_by_x[3 * h - 1], one_by_x[3 * h - 1]); - s = _mm_set_epi32( - sgrproj_mtable[eps - 1][2 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1], - sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j, - A, B); } } -static void selfguided_restoration_2_v(uint8_t *src, int width, int height, - int src_stride, int32_t *A, int32_t *B, - int buf_stride) { - int i, j; - - // Vertical sum - int width_extend = (width + 3) & ~3; - for (j = 0; j < width_extend; j += 4) { - __m128i a, b, c, c2, x, y, x2, y2; - __m128i sum, sum_sq, tmp; - - a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j])); - b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j])); - c = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[2 * src_stride + j])); +// Compute two integral images from src. B sums elements; A sums their squares +// +// A and B should be aligned to 16 bytes. buf_stride should be a multiple of 4. +static void integral_images_highbd(const uint16_t *src, int src_stride, + int width, int height, int32_t *A, + int32_t *B, int buf_stride) { + // Write out the zero top row + memset(A, 0, sizeof(*A) * (width + 1)); + memset(B, 0, sizeof(*B) * (width + 1)); - sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c)); - // Important: Since c may be up to 2^8, the result on squaring may - // be up to 2^16. So we need to zero-extend, not sign-extend. - c2 = _mm_cvtepu16_epi32(_mm_mullo_epi16(c, c)); - tmp = _mm_unpacklo_epi16(a, b); - sum_sq = _mm_add_epi32(_mm_madd_epi16(tmp, tmp), c2); + const __m128i zero = _mm_setzero_si128(); + for (int i = 0; i < height; ++i) { + // Zero the left column. + A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0; - _mm_store_si128((__m128i *)&B[j], sum); - _mm_store_si128((__m128i *)&A[j], sum_sq); + // ldiff is the difference H - D where H is the output sample immediately + // to the left and D is the output sample above it. These are scalars, + // replicated across the four lanes. + __m128i ldiff1 = zero, ldiff2 = zero; + for (int j = 0; j < width; j += 4) { + const int ABj = 1 + j; - x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[3 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); + const __m128i above1 = xx_load_128(B + ABj + i * buf_stride); + const __m128i above2 = xx_load_128(A + ABj + i * buf_stride); - _mm_store_si128((__m128i *)&B[buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); + const __m128i x1 = xx_load_extend_16_32(src + j + i * src_stride); + const __m128i x2 = _mm_madd_epi16(x1, x1); - x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[4 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); + const __m128i sc1 = scan_32(x1); + const __m128i sc2 = scan_32(x2); - for (i = 2; i < height - 3; ++i) { - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1); + const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2); - x = _mm_cvtepu8_epi32( - _mm_cvtsi32_si128(*((int *)&src[(i - 2) * src_stride + j]))); - y = _mm_cvtepu8_epi32( - _mm_cvtsi32_si128(*((int *)&src[(i + 3) * src_stride + j]))); + xx_store_128(B + ABj + (i + 1) * buf_stride, row1); + xx_store_128(A + ABj + (i + 1) * buf_stride, row2); - sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); - - x2 = _mm_mullo_epi32(x, x); - y2 = _mm_mullo_epi32(y, y); - - sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + // Calculate the new H - D. + ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff); + ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff); } - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 2) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); } } -static void selfguided_restoration_2_h(int32_t *A, int32_t *B, int width, - int height, int buf_stride, int eps, - int bit_depth) { - int i, j; - - // Horizontal sum - int width_extend = (width + 3) & ~3; - for (i = 0; i < height; ++i) { - int h = AOMMIN(3, height - i) + AOMMIN(2, i); - - __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]); - __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]); - __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]); - __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]); - - __m128i sum_ = _mm_add_epi32( - _mm_add_epi32( - _mm_add_epi32(_mm_slli_si128(b1, 8), _mm_slli_si128(b1, 4)), - _mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4))), - _mm_alignr_epi8(b2, b1, 8)); - __m128i sum_sq_ = _mm_add_epi32( - _mm_add_epi32( - _mm_add_epi32(_mm_slli_si128(a1, 8), _mm_slli_si128(a1, 4)), - _mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4))), - _mm_alignr_epi8(a2, a1, 8)); - - __m128i n = _mm_set_epi32(5 * h, 5 * h, 4 * h, 3 * h); - __m128i one_over_n = - _mm_set_epi32(one_by_x[5 * h - 1], one_by_x[5 * h - 1], - one_by_x[4 * h - 1], one_by_x[3 * h - 1]); - __m128i s = _mm_set_epi32( - sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], - sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A, - B); - - // Re-align a1 and b1 so that they start at index i * buf_stride + 2 - a2 = _mm_alignr_epi8(a2, a1, 8); - b2 = _mm_alignr_epi8(b2, b1, 8); - - n = _mm_set1_epi32(5 * h); - one_over_n = _mm_set1_epi32(one_by_x[5 * h - 1]); - s = _mm_set1_epi32(sgrproj_mtable[eps - 1][5 * h - 1]); - - for (j = 4; j < width_extend - 4; j += 4) { - a1 = a2; - a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 2]); - b1 = b2; - b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 2]); - /* Loop invariant: At this point, - a1 = original A[i * buf_stride + j - 2 : i * buf_stride + j + 2] - a2 = original A[i * buf_stride + j + 2 : i * buf_stride + j + 6] - and similar for b1,b2 and B - */ - sum_ = _mm_add_epi32( - _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), - _mm_alignr_epi8(b2, b1, 8))), - _mm_add_epi32(_mm_alignr_epi8(b2, b1, 12), b2)); - sum_sq_ = _mm_add_epi32( - _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), - _mm_alignr_epi8(a2, a1, 8))), - _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2)); - - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, - i * buf_stride + j, A, B); - } - // If the width is not a multiple of 4, we need to reset j to width - 4 - // and adjust a1, a2, b1, b2 so that the loop invariant above is maintained - __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 2]); - __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 2]); - - j = width - 4; - switch (width % 4) { - case 0: - a1 = a2; - b1 = b2; - a2 = a3; - b2 = b3; - break; - case 1: - a1 = _mm_alignr_epi8(a2, a1, 4); - b1 = _mm_alignr_epi8(b2, b1, 4); - a2 = _mm_alignr_epi8(a3, a2, 4); - b2 = _mm_alignr_epi8(b3, b2, 4); - break; - case 2: - a1 = _mm_alignr_epi8(a2, a1, 8); - b1 = _mm_alignr_epi8(b2, b1, 8); - a2 = _mm_alignr_epi8(a3, a2, 8); - b2 = _mm_alignr_epi8(b3, b2, 8); - break; - case 3: - a1 = _mm_alignr_epi8(a2, a1, 12); - b1 = _mm_alignr_epi8(b2, b1, 12); - a2 = _mm_alignr_epi8(a3, a2, 12); - b2 = _mm_alignr_epi8(b3, b2, 12); - break; - } - - // Zero out the data loaded from "off the edge" of the array - __m128i zero = _mm_setzero_si128(); - a2 = _mm_blend_epi16(a2, zero, 0xf0); - b2 = _mm_blend_epi16(b2, zero, 0xf0); - - sum_ = _mm_add_epi32( - _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), - _mm_alignr_epi8(b2, b1, 8))), - _mm_add_epi32(_mm_alignr_epi8(b2, b1, 12), b2)); - sum_sq_ = _mm_add_epi32( - _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), - _mm_alignr_epi8(a2, a1, 8))), - _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2)); - - n = _mm_set_epi32(3 * h, 4 * h, 5 * h, 5 * h); - one_over_n = _mm_set_epi32(one_by_x[3 * h - 1], one_by_x[4 * h - 1], - one_by_x[5 * h - 1], one_by_x[5 * h - 1]); - s = _mm_set_epi32( - sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1], - sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1]); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j, - A, B); - } +// Compute 4 values of boxsum from the given integral image. ii should point +// at the middle of the box (for the first value). r is the box radius. +static INLINE __m128i boxsum_from_ii(const int32_t *ii, int stride, int r) { + const __m128i tl = xx_loadu_128(ii - (r + 1) - (r + 1) * stride); + const __m128i tr = xx_loadu_128(ii + (r + 0) - (r + 1) * stride); + const __m128i bl = xx_loadu_128(ii - (r + 1) + r * stride); + const __m128i br = xx_loadu_128(ii + (r + 0) + r * stride); + const __m128i u = _mm_sub_epi32(tr, tl); + const __m128i v = _mm_sub_epi32(br, bl); + return _mm_sub_epi32(v, u); } -static void selfguided_restoration_3_v(uint8_t *src, int width, int height, - int src_stride, int32_t *A, int32_t *B, - int buf_stride) { - int i, j; - - // Vertical sum over 7-pixel regions, 4 columns at a time - int width_extend = (width + 3) & ~3; - for (j = 0; j < width_extend; j += 4) { - __m128i a, b, c, d, x, y, x2, y2; - __m128i sum, sum_sq, tmp, tmp2; +static __m128i round_for_shift(unsigned shift) { + return _mm_set1_epi32((1 << shift) >> 1); +} - a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j])); - b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j])); - c = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[2 * src_stride + j])); - d = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[3 * src_stride + j])); +static __m128i compute_p(__m128i sum1, __m128i sum2, int bit_depth, int n) { + __m128i an, bb; + if (bit_depth > 8) { + const __m128i rounding_a = round_for_shift(2 * (bit_depth - 8)); + const __m128i rounding_b = round_for_shift(bit_depth - 8); + const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8)); + const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8); + const __m128i a = _mm_srl_epi32(_mm_add_epi32(sum2, rounding_a), shift_a); + const __m128i b = _mm_srl_epi32(_mm_add_epi32(sum1, rounding_b), shift_b); + // b < 2^14, so we can use a 16-bit madd rather than a 32-bit + // mullo to square it + bb = _mm_madd_epi16(b, b); + an = _mm_max_epi32(_mm_mullo_epi32(a, _mm_set1_epi32(n)), bb); + } else { + bb = _mm_madd_epi16(sum1, sum1); + an = _mm_mullo_epi32(sum2, _mm_set1_epi32(n)); + } + return _mm_sub_epi32(an, bb); +} - sum = _mm_cvtepi16_epi32( - _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d))); - tmp = _mm_unpacklo_epi16(a, b); - tmp2 = _mm_unpacklo_epi16(c, d); - sum_sq = - _mm_add_epi32(_mm_madd_epi16(tmp, tmp), _mm_madd_epi16(tmp2, tmp2)); +// Assumes that C, D are integral images for the original buffer which has been +// extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels +// on the sides. A, B, C, D point at logical position (0, 0). +static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D, + int width, int height, int buf_stride, int bit_depth, + int sgr_params_idx, int radius_idx) { + const sgr_params_type *const params = &sgr_params[sgr_params_idx]; + const int r = params->r[radius_idx]; + const int n = (2 * r + 1) * (2 * r + 1); + const __m128i s = _mm_set1_epi32(params->s[radius_idx]); + // one_over_n[n-1] is 2^12/n, so easily fits in an int16 + const __m128i one_over_n = _mm_set1_epi32(one_by_x[n - 1]); + + const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS); + const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS); + + // Set up masks + const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff); + __m128i mask[4]; + for (int idx = 0; idx < 4; idx++) { + const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx)); + mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift)); + } - _mm_store_si128((__m128i *)&B[j], sum); - _mm_store_si128((__m128i *)&A[j], sum_sq); + for (int i = -1; i < height + 1; ++i) { + for (int j = -1; j < width + 1; j += 4) { + const int32_t *Cij = C + i * buf_stride + j; + const int32_t *Dij = D + i * buf_stride + j; - x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[4 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); + __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r); + __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r); - _mm_store_si128((__m128i *)&B[buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); + // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain + // some uninitialised data in their upper words. We use a mask to + // ensure that these bits are set to 0. + int idx = AOMMIN(4, width + 1 - j); + assert(idx >= 1); - x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[5 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); + if (idx < 4) { + sum1 = _mm_and_si128(mask[idx], sum1); + sum2 = _mm_and_si128(mask[idx], sum2); + } - _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq); + const __m128i p = compute_p(sum1, sum2, bit_depth, n); - x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[6 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); + const __m128i z = _mm_min_epi32( + _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z), + SGRPROJ_MTABLE_BITS), + _mm_set1_epi32(255)); - for (i = 3; i < height - 4; ++i) { - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + // 'Gather' type instructions are not available pre-AVX2, so synthesize a + // gather using scalar loads. + const __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)], + x_by_xplus1[_mm_extract_epi32(z, 2)], + x_by_xplus1[_mm_extract_epi32(z, 1)], + x_by_xplus1[_mm_extract_epi32(z, 0)]); - x = _mm_cvtepu8_epi32(xx_loadl_32(&src[(i - 3) * src_stride + j])); - y = _mm_cvtepu8_epi32(xx_loadl_32(&src[(i + 4) * src_stride + j])); + xx_storeu_128(A + i * buf_stride + j, a_res); - sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + const __m128i a_complement = + _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res); - x2 = _mm_mullo_epi32(x, x); - y2 = _mm_mullo_epi32(y, y); + // sum1 might have lanes greater than 2^15, so we can't use madd to do + // multiplication involving sum1. However, a_complement and one_over_n + // are both less than 256, so we can multiply them first. + const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n); + const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1); + const __m128i b_res = + _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS); - sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + xx_storeu_128(B + i * buf_stride + j, b_res); } - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 3) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 2) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); - - x = _mm_cvtepu8_epi32( - xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 3) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 3) * buf_stride + j], sum_sq); } } -static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width, - int height, int buf_stride, int eps, - int bit_depth) { - int i, j; - // Horizontal sum over 7-pixel regions of dst - int width_extend = (width + 3) & ~3; - for (i = 0; i < height; ++i) { - int h = AOMMIN(4, height - i) + AOMMIN(3, i); - - __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]); - __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]); - __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]); - __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]); - - __m128i sum_ = _mm_add_epi32( - _mm_add_epi32( - _mm_add_epi32(_mm_slli_si128(b1, 12), _mm_slli_si128(b1, 8)), - _mm_add_epi32(_mm_slli_si128(b1, 4), b1)), - _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(b2, b1, 4), - _mm_alignr_epi8(b2, b1, 8)), - _mm_alignr_epi8(b2, b1, 12))); - __m128i sum_sq_ = _mm_add_epi32( - _mm_add_epi32( - _mm_add_epi32(_mm_slli_si128(a1, 12), _mm_slli_si128(a1, 8)), - _mm_add_epi32(_mm_slli_si128(a1, 4), a1)), - _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(a2, a1, 4), - _mm_alignr_epi8(a2, a1, 8)), - _mm_alignr_epi8(a2, a1, 12))); - - __m128i n = _mm_set_epi32(7 * h, 6 * h, 5 * h, 4 * h); - __m128i one_over_n = - _mm_set_epi32(one_by_x[7 * h - 1], one_by_x[6 * h - 1], - one_by_x[5 * h - 1], one_by_x[4 * h - 1]); - __m128i s = _mm_set_epi32( - sgrproj_mtable[eps - 1][7 * h - 1], sgrproj_mtable[eps - 1][6 * h - 1], - sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1]); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A, - B); - - // Re-align a1 and b1 so that they start at index i * buf_stride + 1 - a2 = _mm_alignr_epi8(a2, a1, 4); - b2 = _mm_alignr_epi8(b2, b1, 4); - - n = _mm_set1_epi32(7 * h); - one_over_n = _mm_set1_epi32(one_by_x[7 * h - 1]); - s = _mm_set1_epi32(sgrproj_mtable[eps - 1][7 * h - 1]); - - for (j = 4; j < width_extend - 4; j += 4) { - a1 = a2; - a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]); - b1 = b2; - b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]); - __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 5]); - __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 5]); - /* Loop invariant: At this point, - a1 = original A[i * buf_stride + j - 3 : i * buf_stride + j + 1] - a2 = original A[i * buf_stride + j + 1 : i * buf_stride + j + 5] - a3 = original A[i * buf_stride + j + 5 : i * buf_stride + j + 9] - and similar for b1,b2,b3 and B - */ - sum_ = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)), - _mm_add_epi32(_mm_alignr_epi8(b2, b1, 8), - _mm_alignr_epi8(b2, b1, 12))), - _mm_add_epi32(_mm_add_epi32(b2, _mm_alignr_epi8(b3, b2, 4)), - _mm_alignr_epi8(b3, b2, 8))); - sum_sq_ = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)), - _mm_add_epi32(_mm_alignr_epi8(a2, a1, 8), - _mm_alignr_epi8(a2, a1, 12))), - _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(a3, a2, 4)), - _mm_alignr_epi8(a3, a2, 8))); - - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, - i * buf_stride + j, A, B); - } - __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]); - __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]); - - j = width - 4; - switch (width % 4) { - case 0: - a1 = a2; - b1 = b2; - a2 = a3; - b2 = b3; - break; - case 1: - a1 = _mm_alignr_epi8(a2, a1, 4); - b1 = _mm_alignr_epi8(b2, b1, 4); - a2 = _mm_alignr_epi8(a3, a2, 4); - b2 = _mm_alignr_epi8(b3, b2, 4); - break; - case 2: - a1 = _mm_alignr_epi8(a2, a1, 8); - b1 = _mm_alignr_epi8(b2, b1, 8); - a2 = _mm_alignr_epi8(a3, a2, 8); - b2 = _mm_alignr_epi8(b3, b2, 8); - break; - case 3: - a1 = _mm_alignr_epi8(a2, a1, 12); - b1 = _mm_alignr_epi8(b2, b1, 12); - a2 = _mm_alignr_epi8(a3, a2, 12); - b2 = _mm_alignr_epi8(b3, b2, 12); - break; - } - - // Zero out the data loaded from "off the edge" of the array - __m128i zero = _mm_setzero_si128(); - a2 = _mm_blend_epi16(a2, zero, 0xc0); - b2 = _mm_blend_epi16(b2, zero, 0xc0); - - sum_ = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)), - _mm_add_epi32(_mm_alignr_epi8(b2, b1, 8), - _mm_alignr_epi8(b2, b1, 12))), - _mm_add_epi32(_mm_add_epi32(b2, _mm_alignr_epi8(zero, b2, 4)), - _mm_alignr_epi8(zero, b2, 8))); - sum_sq_ = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)), - _mm_add_epi32(_mm_alignr_epi8(a2, a1, 8), - _mm_alignr_epi8(a2, a1, 12))), - _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(zero, a2, 4)), - _mm_alignr_epi8(zero, a2, 8))); - - n = _mm_set_epi32(4 * h, 5 * h, 6 * h, 7 * h); - one_over_n = _mm_set_epi32(one_by_x[4 * h - 1], one_by_x[5 * h - 1], - one_by_x[6 * h - 1], one_by_x[7 * h - 1]); - s = _mm_set_epi32( - sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1], - sgrproj_mtable[eps - 1][6 * h - 1], sgrproj_mtable[eps - 1][7 * h - 1]); - calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j, - A, B); - } +// Calculate 4 values of the "cross sum" starting at buf. This is a 3x3 filter +// where the outer four corners have weight 3 and all other pixels have weight +// 4. +// +// Pixels are indexed like this: +// xtl xt xtr +// xl x xr +// xbl xb xbr +// +// buf points to x +// +// fours = xl + xt + xr + xb + x +// threes = xtl + xtr + xbr + xbl +// cross_sum = 4 * fours + 3 * threes +// = 4 * (fours + threes) - threes +// = (fours + threes) << 2 - threes +static INLINE __m128i cross_sum(const int32_t *buf, int stride) { + const __m128i xtl = xx_loadu_128(buf - 1 - stride); + const __m128i xt = xx_loadu_128(buf - stride); + const __m128i xtr = xx_loadu_128(buf + 1 - stride); + const __m128i xl = xx_loadu_128(buf - 1); + const __m128i x = xx_loadu_128(buf); + const __m128i xr = xx_loadu_128(buf + 1); + const __m128i xbl = xx_loadu_128(buf - 1 + stride); + const __m128i xb = xx_loadu_128(buf + stride); + const __m128i xbr = xx_loadu_128(buf + 1 + stride); + + const __m128i fours = _mm_add_epi32( + xl, _mm_add_epi32(xt, _mm_add_epi32(xr, _mm_add_epi32(xb, x)))); + const __m128i threes = + _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl))); + + return _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(fours, threes), 2), threes); } -void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height, - int dgd_stride, int32_t *dst, - int dst_stride, int r, int eps) { - const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; - const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; - int32_t A_[RESTORATION_PROC_UNIT_PELS]; - int32_t B_[RESTORATION_PROC_UNIT_PELS]; - int32_t *A = A_; - int32_t *B = B_; - int i, j; - // Adjusting the stride of A and B here appears to avoid bad cache effects, - // leading to a significant speed improvement. - // We also align the stride to a multiple of 16 bytes for efficiency. - int buf_stride = ((width_ext + 3) & ~3) + 16; - - // Don't filter tiles with dimensions < 5 on any axis - if ((width < 5) || (height < 5)) return; - - uint8_t *dgd0 = dgd - dgd_stride * SGRPROJ_BORDER_VERT - SGRPROJ_BORDER_HORZ; - if (r == 1) { - selfguided_restoration_1_v(dgd0, width_ext, height_ext, dgd_stride, A, B, - buf_stride); - selfguided_restoration_1_h(A, B, width_ext, height_ext, buf_stride, eps, 8); - } else if (r == 2) { - selfguided_restoration_2_v(dgd0, width_ext, height_ext, dgd_stride, A, B, - buf_stride); - selfguided_restoration_2_h(A, B, width_ext, height_ext, buf_stride, eps, 8); - } else if (r == 3) { - selfguided_restoration_3_v(dgd0, width_ext, height_ext, dgd_stride, A, B, - buf_stride); - selfguided_restoration_3_h(A, B, width_ext, height_ext, buf_stride, eps, 8); - } else { - assert(0); - } - A += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; - B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; - - { - i = 0; - j = 0; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] + - A[k + buf_stride + 1]; - const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k + buf_stride] + - B[k + buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] + - A[k + buf_stride - 1] + A[k + buf_stride + 1]; - const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k + buf_stride] + - B[k + buf_stride - 1] + B[k + buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - j = width - 1; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] + - A[k + buf_stride - 1]; - const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k + buf_stride] + - B[k + buf_stride - 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - } - for (i = 1; i < height - 1; ++i) { - j = 0; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + - A[k + 1] + A[k - buf_stride + 1] + - A[k + buf_stride + 1]; - const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + - B[k + 1] + B[k - buf_stride + 1] + - B[k + buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - - // Vectorize the innermost loop - for (j = 1; j < width - 1; j += 4) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 5; - - __m128i tmp0 = _mm_loadu_si128((__m128i *)&A[k - 1 - buf_stride]); - __m128i tmp1 = _mm_loadu_si128((__m128i *)&A[k + 3 - buf_stride]); - __m128i tmp2 = _mm_loadu_si128((__m128i *)&A[k - 1]); - __m128i tmp3 = _mm_loadu_si128((__m128i *)&A[k + 3]); - __m128i tmp4 = _mm_loadu_si128((__m128i *)&A[k - 1 + buf_stride]); - __m128i tmp5 = _mm_loadu_si128((__m128i *)&A[k + 3 + buf_stride]); - - __m128i a0 = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 4), tmp2), - _mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 8), - _mm_alignr_epi8(tmp5, tmp4, 4))), - _mm_alignr_epi8(tmp1, tmp0, 4)); - __m128i a1 = _mm_add_epi32(_mm_add_epi32(tmp0, tmp4), - _mm_add_epi32(_mm_alignr_epi8(tmp1, tmp0, 8), - _mm_alignr_epi8(tmp5, tmp4, 8))); - __m128i a = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(a0, a1), 2), a1); - - __m128i tmp6 = _mm_loadu_si128((__m128i *)&B[k - 1 - buf_stride]); - __m128i tmp7 = _mm_loadu_si128((__m128i *)&B[k + 3 - buf_stride]); - __m128i tmp8 = _mm_loadu_si128((__m128i *)&B[k - 1]); - __m128i tmp9 = _mm_loadu_si128((__m128i *)&B[k + 3]); - __m128i tmp10 = _mm_loadu_si128((__m128i *)&B[k - 1 + buf_stride]); - __m128i tmp11 = _mm_loadu_si128((__m128i *)&B[k + 3 + buf_stride]); - - __m128i b0 = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 4), tmp8), - _mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 8), - _mm_alignr_epi8(tmp11, tmp10, 4))), - _mm_alignr_epi8(tmp7, tmp6, 4)); - __m128i b1 = - _mm_add_epi32(_mm_add_epi32(tmp6, tmp10), - _mm_add_epi32(_mm_alignr_epi8(tmp7, tmp6, 8), - _mm_alignr_epi8(tmp11, tmp10, 8))); - __m128i b = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(b0, b1), 2), b1); - - __m128i src = _mm_cvtepu8_epi32(_mm_loadu_si128((__m128i *)&dgd[l])); - - __m128i rounding = _mm_set1_epi32( - (1 << (SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS)) >> 1); - __m128i v = _mm_add_epi32(_mm_mullo_epi32(a, src), b); +// The final filter for self-guided restoration. Computes a weighted average +// across A, B with "cross sums" (see cross_sum implementation above). +static void final_filter(int32_t *dst, int dst_stride, const int32_t *A, + const int32_t *B, int buf_stride, const void *dgd8, + int dgd_stride, int width, int height, int highbd) { + const int nb = 5; + const __m128i rounding = + round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); + const uint8_t *dgd_real = + highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8; + + for (int i = 0; i < height; ++i) { + for (int j = 0; j < width; j += 4) { + const __m128i a = cross_sum(A + i * buf_stride + j, buf_stride); + const __m128i b = cross_sum(B + i * buf_stride + j, buf_stride); + const __m128i raw = + xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); + const __m128i src = + highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw); + + __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b); __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding), SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - _mm_storeu_si128((__m128i *)&dst[m], w); - } - // Deal with any extra pixels at the right-hand edge of the frame - // (typically have 2 such pixels, but may have anywhere between 0 and 3) - for (; j < width - 1; ++j) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 5; - const int32_t a = - (A[k] + A[k - 1] + A[k + 1] + A[k - buf_stride] + A[k + buf_stride]) * - 4 + - (A[k - 1 - buf_stride] + A[k - 1 + buf_stride] + - A[k + 1 - buf_stride] + A[k + 1 + buf_stride]) * - 3; - const int32_t b = - (B[k] + B[k - 1] + B[k + 1] + B[k - buf_stride] + B[k + buf_stride]) * - 4 + - (B[k - 1 - buf_stride] + B[k - 1 + buf_stride] + - B[k + 1 - buf_stride] + B[k + 1 + buf_stride]) * - 3; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - - j = width - 1; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + - A[k - 1] + A[k - buf_stride - 1] + - A[k + buf_stride - 1]; - const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + - B[k - 1] + B[k - buf_stride - 1] + - B[k + buf_stride - 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - } - - { - i = height - 1; - j = 0; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] + - A[k - buf_stride + 1]; - const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k - buf_stride] + - B[k - buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] + - A[k - buf_stride - 1] + A[k - buf_stride + 1]; - const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k - buf_stride] + - B[k - buf_stride - 1] + B[k - buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - j = width - 1; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] + - A[k - buf_stride - 1]; - const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k - buf_stride] + - B[k - buf_stride - 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - } -} - -void av1_highpass_filter_sse4_1(uint8_t *dgd, int width, int height, int stride, - int32_t *dst, int dst_stride, int corner, - int edge) { - int i, j; - const int center = (1 << SGRPROJ_RST_BITS) - 4 * (corner + edge); - - { - i = 0; - j = 0; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k + 1] + dgd[k + stride] + dgd[k] * 2) + - corner * - (dgd[k + stride + 1] + dgd[k + 1] + dgd[k + stride] + dgd[k]); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = center * dgd[k] + - edge * (dgd[k - 1] + dgd[k + stride] + dgd[k + 1] + dgd[k]) + - corner * (dgd[k + stride - 1] + dgd[k + stride + 1] + - dgd[k - 1] + dgd[k + 1]); - } - j = width - 1; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k] * 2) + - corner * - (dgd[k + stride - 1] + dgd[k - 1] + dgd[k + stride] + dgd[k]); - } - } - { - i = height - 1; - j = 0; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k + 1] + dgd[k - stride] + dgd[k] * 2) + - corner * - (dgd[k - stride + 1] + dgd[k + 1] + dgd[k - stride] + dgd[k]); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = center * dgd[k] + - edge * (dgd[k - 1] + dgd[k - stride] + dgd[k + 1] + dgd[k]) + - corner * (dgd[k - stride - 1] + dgd[k - stride + 1] + - dgd[k - 1] + dgd[k + 1]); - } - j = width - 1; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k] * 2) + - corner * - (dgd[k - stride - 1] + dgd[k - 1] + dgd[k - stride] + dgd[k]); - } - } - __m128i center_ = _mm_set1_epi16(center); - __m128i edge_ = _mm_set1_epi16(edge); - __m128i corner_ = _mm_set1_epi16(corner); - for (i = 1; i < height - 1; ++i) { - j = 0; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + - edge * (dgd[k - stride] + dgd[k + 1] + dgd[k + stride] + dgd[k]) + - corner * (dgd[k + stride + 1] + dgd[k - stride + 1] + - dgd[k - stride] + dgd[k + stride]); - } - // Process in units of 8 pixels at a time. - for (j = 1; j < width - 8; j += 8) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - - __m128i a = _mm_loadu_si128((__m128i *)&dgd[k - stride - 1]); - __m128i b = _mm_loadu_si128((__m128i *)&dgd[k - 1]); - __m128i c = _mm_loadu_si128((__m128i *)&dgd[k + stride - 1]); - - __m128i tl = _mm_cvtepu8_epi16(a); - __m128i tr = _mm_cvtepu8_epi16(_mm_srli_si128(a, 8)); - __m128i cl = _mm_cvtepu8_epi16(b); - __m128i cr = _mm_cvtepu8_epi16(_mm_srli_si128(b, 8)); - __m128i bl = _mm_cvtepu8_epi16(c); - __m128i br = _mm_cvtepu8_epi16(_mm_srli_si128(c, 8)); - - __m128i x = _mm_alignr_epi8(cr, cl, 2); - __m128i y = _mm_add_epi16(_mm_add_epi16(_mm_alignr_epi8(tr, tl, 2), cl), - _mm_add_epi16(_mm_alignr_epi8(br, bl, 2), - _mm_alignr_epi8(cr, cl, 4))); - __m128i z = _mm_add_epi16(_mm_add_epi16(tl, bl), - _mm_add_epi16(_mm_alignr_epi8(tr, tl, 4), - _mm_alignr_epi8(br, bl, 4))); - - __m128i res = _mm_add_epi16(_mm_mullo_epi16(x, center_), - _mm_add_epi16(_mm_mullo_epi16(y, edge_), - _mm_mullo_epi16(z, corner_))); - - _mm_storeu_si128((__m128i *)&dst[l], _mm_cvtepi16_epi32(res)); - _mm_storeu_si128((__m128i *)&dst[l + 4], - _mm_cvtepi16_epi32(_mm_srli_si128(res, 8))); - } - // If there are enough pixels left in this row, do another batch of 4 - // pixels. - for (; j < width - 4; j += 4) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - - __m128i a = _mm_loadl_epi64((__m128i *)&dgd[k - stride - 1]); - __m128i b = _mm_loadl_epi64((__m128i *)&dgd[k - 1]); - __m128i c = _mm_loadl_epi64((__m128i *)&dgd[k + stride - 1]); - - __m128i tl = _mm_cvtepu8_epi16(a); - __m128i cl = _mm_cvtepu8_epi16(b); - __m128i bl = _mm_cvtepu8_epi16(c); - - __m128i x = _mm_srli_si128(cl, 2); - __m128i y = _mm_add_epi16( - _mm_add_epi16(_mm_srli_si128(tl, 2), cl), - _mm_add_epi16(_mm_srli_si128(bl, 2), _mm_srli_si128(cl, 4))); - __m128i z = _mm_add_epi16( - _mm_add_epi16(tl, bl), - _mm_add_epi16(_mm_srli_si128(tl, 4), _mm_srli_si128(bl, 4))); - - __m128i res = _mm_add_epi16(_mm_mullo_epi16(x, center_), - _mm_add_epi16(_mm_mullo_epi16(y, edge_), - _mm_mullo_epi16(z, corner_))); - - _mm_storeu_si128((__m128i *)&dst[l], _mm_cvtepi16_epi32(res)); - } - // Handle any leftover pixels - for (; j < width - 1; ++j) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + - edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k + 1]) + - corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + - dgd[k - stride + 1] + dgd[k + stride + 1]); - } - j = width - 1; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + - edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k]) + - corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + - dgd[k - stride] + dgd[k + stride]); + xx_storeu_128(dst + i * dst_stride + j, w); } } } -void apply_selfguided_restoration_sse4_1(uint8_t *dat, int width, int height, - int stride, int eps, int *xqd, - uint8_t *dst, int dst_stride, - int32_t *tmpbuf) { - int xq[2]; - int32_t *flt1 = tmpbuf; - int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX; - int i, j; - assert(width * height <= RESTORATION_TILEPELS_MAX); -#if USE_HIGHPASS_IN_SGRPROJ - av1_highpass_filter_sse4_1(dat, width, height, stride, flt1, width, - sgr_params[eps].corner, sgr_params[eps].edge); -#else - av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt1, width, - sgr_params[eps].r1, sgr_params[eps].e1); -#endif // USE_HIGHPASS_IN_SGRPROJ - av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt2, width, - sgr_params[eps].r2, sgr_params[eps].e2); - decode_xq(xqd, xq); - - __m128i xq0 = _mm_set1_epi32(xq[0]); - __m128i xq1 = _mm_set1_epi32(xq[1]); - for (i = 0; i < height; ++i) { - // Calculate output in batches of 8 pixels - for (j = 0; j < width; j += 8) { - const int k = i * width + j; - const int l = i * stride + j; - const int m = i * dst_stride + j; - __m128i src = - _mm_slli_epi16(_mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&dat[l])), - SGRPROJ_RST_BITS); - - const __m128i u_0 = _mm_cvtepu16_epi32(src); - const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(src, 8)); - - const __m128i f1_0 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k]), u_0); - const __m128i f2_0 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k]), u_0); - const __m128i f1_1 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k + 4]), u_1); - const __m128i f2_1 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k + 4]), u_1); - - const __m128i v_0 = _mm_add_epi32( - _mm_add_epi32(_mm_mullo_epi32(xq0, f1_0), _mm_mullo_epi32(xq1, f2_0)), - _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS)); - const __m128i v_1 = _mm_add_epi32( - _mm_add_epi32(_mm_mullo_epi32(xq0, f1_1), _mm_mullo_epi32(xq1, f2_1)), - _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS)); - - const __m128i rounding = - _mm_set1_epi32((1 << (SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS)) >> 1); - const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding), - SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); - const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding), - SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); - - const __m128i tmp = _mm_packs_epi32(w_0, w_1); - const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */); - _mm_storel_epi64((__m128i *)&dst[m], res); - } - // Process leftover pixels - for (; j < width; ++j) { - const int k = i * width + j; - const int l = i * stride + j; - const int m = i * dst_stride + j; - const int32_t u = ((int32_t)dat[l] << SGRPROJ_RST_BITS); - const int32_t f1 = (int32_t)flt1[k] - u; - const int32_t f2 = (int32_t)flt2[k] - u; - const int32_t v = xq[0] * f1 + xq[1] * f2 + (u << SGRPROJ_PRJ_BITS); - const int16_t w = - (int16_t)ROUND_POWER_OF_TWO(v, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); - dst[m] = (uint16_t)clip_pixel(w); - } +// Assumes that C, D are integral images for the original buffer which has been +// extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels +// on the sides. A, B, C, D point at logical position (0, 0). +static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C, + const int32_t *D, int width, int height, + int buf_stride, int bit_depth, int sgr_params_idx, + int radius_idx) { + const sgr_params_type *const params = &sgr_params[sgr_params_idx]; + const int r = params->r[radius_idx]; + const int n = (2 * r + 1) * (2 * r + 1); + const __m128i s = _mm_set1_epi32(params->s[radius_idx]); + // one_over_n[n-1] is 2^12/n, so easily fits in an int16 + const __m128i one_over_n = _mm_set1_epi32(one_by_x[n - 1]); + + const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS); + const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS); + + // Set up masks + const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff); + __m128i mask[4]; + for (int idx = 0; idx < 4; idx++) { + const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx)); + mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift)); } -} - -#if CONFIG_HIGHBITDEPTH -// Only the vertical sums need to be adjusted for highbitdepth -static void highbd_selfguided_restoration_1_v(uint16_t *src, int width, - int height, int src_stride, - int32_t *A, int32_t *B, - int buf_stride) { - int i, j; + for (int i = -1; i < height + 1; i += 2) { + for (int j = -1; j < width + 1; j += 4) { + const int32_t *Cij = C + i * buf_stride + j; + const int32_t *Dij = D + i * buf_stride + j; - int width_extend = (width + 3) & ~3; - for (j = 0; j < width_extend; j += 4) { - __m128i a, b, x, y, x2, y2; - __m128i sum, sum_sq, tmp; + __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r); + __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r); - a = _mm_loadl_epi64((__m128i *)&src[j]); - b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]); + // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain + // some uninitialised data in their upper words. We use a mask to + // ensure that these bits are set to 0. + int idx = AOMMIN(4, width + 1 - j); + assert(idx >= 1); - sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b)); - tmp = _mm_unpacklo_epi16(a, b); - sum_sq = _mm_madd_epi16(tmp, tmp); + if (idx < 4) { + sum1 = _mm_and_si128(mask[idx], sum1); + sum2 = _mm_and_si128(mask[idx], sum2); + } - _mm_store_si128((__m128i *)&B[j], sum); - _mm_store_si128((__m128i *)&A[j], sum_sq); + const __m128i p = compute_p(sum1, sum2, bit_depth, n); - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); + const __m128i z = _mm_min_epi32( + _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z), + SGRPROJ_MTABLE_BITS), + _mm_set1_epi32(255)); - for (i = 1; i < height - 2; ++i) { - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); + // 'Gather' type instructions are not available pre-AVX2, so synthesize a + // gather using scalar loads. + const __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)], + x_by_xplus1[_mm_extract_epi32(z, 2)], + x_by_xplus1[_mm_extract_epi32(z, 1)], + x_by_xplus1[_mm_extract_epi32(z, 0)]); - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); - y = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i + 2) * src_stride + j])); + xx_storeu_128(A + i * buf_stride + j, a_res); - sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); + const __m128i a_complement = + _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res); - x2 = _mm_mullo_epi32(x, x); - y2 = _mm_mullo_epi32(y, y); + // sum1 might have lanes greater than 2^15, so we can't use madd to do + // multiplication involving sum1. However, a_complement and one_over_n + // are both less than 256, so we can multiply them first. + const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n); + const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1); + const __m128i b_res = + _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS); - sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); + xx_storeu_128(B + i * buf_stride + j, b_res); } - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); } } -static void highbd_selfguided_restoration_2_v(uint16_t *src, int width, - int height, int src_stride, - int32_t *A, int32_t *B, - int buf_stride) { - int i, j; - - int width_extend = (width + 3) & ~3; - for (j = 0; j < width_extend; j += 4) { - __m128i a, b, c, c2, x, y, x2, y2; - __m128i sum, sum_sq, tmp; - - a = _mm_loadl_epi64((__m128i *)&src[j]); - b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]); - c = _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]); - - sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c)); - // Important: We need to widen *before* squaring here, since - // c^2 may be up to 2^24. - c = _mm_cvtepu16_epi32(c); - c2 = _mm_mullo_epi32(c, c); - tmp = _mm_unpacklo_epi16(a, b); - sum_sq = _mm_add_epi32(_mm_madd_epi16(tmp, tmp), c2); - - _mm_store_si128((__m128i *)&B[j], sum); - _mm_store_si128((__m128i *)&A[j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[3 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); - - for (i = 2; i < height - 3; ++i) { - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); - y = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i + 3) * src_stride + j])); - - sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); - - x2 = _mm_mullo_epi32(x, x); - y2 = _mm_mullo_epi32(y, y); - - sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); - } - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); - } +// Calculate 4 values of the "cross sum" starting at buf. +// +// Pixels are indexed like this: +// xtl xt xtr +// - buf - +// xbl xb xbr +// +// Pixels are weighted like this: +// 5 6 5 +// 0 0 0 +// 5 6 5 +// +// fives = xtl + xtr + xbl + xbr +// sixes = xt + xb +// cross_sum = 6 * sixes + 5 * fives +// = 5 * (fives + sixes) - sixes +// = (fives + sixes) << 2 + (fives + sixes) + sixes +static INLINE __m128i cross_sum_fast_even_row(const int32_t *buf, int stride) { + const __m128i xtl = xx_loadu_128(buf - 1 - stride); + const __m128i xt = xx_loadu_128(buf - stride); + const __m128i xtr = xx_loadu_128(buf + 1 - stride); + const __m128i xbl = xx_loadu_128(buf - 1 + stride); + const __m128i xb = xx_loadu_128(buf + stride); + const __m128i xbr = xx_loadu_128(buf + 1 + stride); + + const __m128i fives = + _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl))); + const __m128i sixes = _mm_add_epi32(xt, xb); + const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes); + + return _mm_add_epi32( + _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes), + sixes); +} + +// Calculate 4 values of the "cross sum" starting at buf. +// +// Pixels are indexed like this: +// xl x xr +// +// Pixels are weighted like this: +// 5 6 5 +// +// buf points to x +// +// fives = xl + xr +// sixes = x +// cross_sum = 5 * fives + 6 * sixes +// = 4 * (fives + sixes) + (fives + sixes) + sixes +// = (fives + sixes) << 2 + (fives + sixes) + sixes +static INLINE __m128i cross_sum_fast_odd_row(const int32_t *buf) { + const __m128i xl = xx_loadu_128(buf - 1); + const __m128i x = xx_loadu_128(buf); + const __m128i xr = xx_loadu_128(buf + 1); + + const __m128i fives = _mm_add_epi32(xl, xr); + const __m128i sixes = x; + + const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes); + + return _mm_add_epi32( + _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes), + sixes); } -static void highbd_selfguided_restoration_3_v(uint16_t *src, int width, - int height, int src_stride, - int32_t *A, int32_t *B, - int buf_stride) { - int i, j; - - int width_extend = (width + 3) & ~3; - for (j = 0; j < width_extend; j += 4) { - __m128i a, b, c, d, x, y, x2, y2; - __m128i sum, sum_sq, tmp, tmp2; - - a = _mm_loadl_epi64((__m128i *)&src[j]); - b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]); - c = _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]); - d = _mm_loadl_epi64((__m128i *)&src[3 * src_stride + j]); - - sum = _mm_cvtepi16_epi32( - _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d))); - tmp = _mm_unpacklo_epi16(a, b); - tmp2 = _mm_unpacklo_epi16(c, d); - sum_sq = - _mm_add_epi32(_mm_madd_epi16(tmp, tmp), _mm_madd_epi16(tmp2, tmp2)); - - _mm_store_si128((__m128i *)&B[j], sum); - _mm_store_si128((__m128i *)&A[j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[4 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[5 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[6 * src_stride + j])); - sum = _mm_add_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_add_epi32(sum_sq, x2); - - for (i = 3; i < height - 4; ++i) { - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j])); - y = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i + 4) * src_stride + j])); - - sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x)); - - x2 = _mm_mullo_epi32(x, x); - y2 = _mm_mullo_epi32(y, y); - - sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2)); +// The final filter for the self-guided restoration. Computes a +// weighted average across A, B with "cross sums" (see cross_sum_... +// implementations above). +static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A, + const int32_t *B, int buf_stride, + const void *dgd8, int dgd_stride, int width, + int height, int highbd) { + const int nb0 = 5; + const int nb1 = 4; + + const __m128i rounding0 = + round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS); + const __m128i rounding1 = + round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS); + + const uint8_t *dgd_real = + highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8; + + for (int i = 0; i < height; ++i) { + if (!(i & 1)) { // even row + for (int j = 0; j < width; j += 4) { + const __m128i a = + cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride); + const __m128i b = + cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride); + const __m128i raw = + xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); + const __m128i src = + highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw); + + __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b); + __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding0), + SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS); + + xx_storeu_128(dst + i * dst_stride + j, w); + } + } else { // odd row + for (int j = 0; j < width; j += 4) { + const __m128i a = cross_sum_fast_odd_row(A + i * buf_stride + j); + const __m128i b = cross_sum_fast_odd_row(B + i * buf_stride + j); + const __m128i raw = + xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); + const __m128i src = + highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw); + + __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b); + __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding1), + SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS); + + xx_storeu_128(dst + i * dst_stride + j, w); + } } - _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq); - - x = _mm_cvtepu16_epi32( - _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j])); - sum = _mm_sub_epi32(sum, x); - x2 = _mm_mullo_epi32(x, x); - sum_sq = _mm_sub_epi32(sum_sq, x2); - - _mm_store_si128((__m128i *)&B[(i + 3) * buf_stride + j], sum); - _mm_store_si128((__m128i *)&A[(i + 3) * buf_stride + j], sum_sq); } } -void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width, - int height, int dgd_stride, - int32_t *dst, int dst_stride, - int bit_depth, int r, int eps) { +void av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width, + int height, int dgd_stride, + int32_t *flt0, int32_t *flt1, + int flt_stride, int sgr_params_idx, + int bit_depth, int highbd) { + DECLARE_ALIGNED(16, int32_t, buf[4 * RESTORATION_PROC_UNIT_PELS]); + memset(buf, 0, sizeof(buf)); + const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; - int32_t A_[RESTORATION_PROC_UNIT_PELS]; - int32_t B_[RESTORATION_PROC_UNIT_PELS]; - int32_t *A = A_; - int32_t *B = B_; - int i, j; + // Adjusting the stride of A and B here appears to avoid bad cache effects, // leading to a significant speed improvement. // We also align the stride to a multiple of 16 bytes for efficiency. int buf_stride = ((width_ext + 3) & ~3) + 16; - // Don't filter tiles with dimensions < 5 on any axis - if ((width < 5) || (height < 5)) return; - - uint16_t *dgd0 = dgd - dgd_stride * SGRPROJ_BORDER_VERT - SGRPROJ_BORDER_HORZ; - if (r == 1) { - highbd_selfguided_restoration_1_v(dgd0, width_ext, height_ext, dgd_stride, - A, B, buf_stride); - selfguided_restoration_1_h(A, B, width_ext, height_ext, buf_stride, eps, - bit_depth); - } else if (r == 2) { - highbd_selfguided_restoration_2_v(dgd0, width_ext, height_ext, dgd_stride, - A, B, buf_stride); - selfguided_restoration_2_h(A, B, width_ext, height_ext, buf_stride, eps, - bit_depth); - } else if (r == 3) { - highbd_selfguided_restoration_3_v(dgd0, width_ext, height_ext, dgd_stride, - A, B, buf_stride); - selfguided_restoration_3_h(A, B, width_ext, height_ext, buf_stride, eps, - bit_depth); - } else { - assert(0); - } - A += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; - B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ; - - { - i = 0; - j = 0; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] + - A[k + buf_stride + 1]; - const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k + buf_stride] + - B[k + buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] + - A[k + buf_stride - 1] + A[k + buf_stride + 1]; - const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k + buf_stride] + - B[k + buf_stride - 1] + B[k + buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - j = width - 1; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] + - A[k + buf_stride - 1]; - const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k + buf_stride] + - B[k + buf_stride - 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - } - for (i = 1; i < height - 1; ++i) { - j = 0; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + - A[k + 1] + A[k - buf_stride + 1] + - A[k + buf_stride + 1]; - const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + - B[k + 1] + B[k - buf_stride + 1] + - B[k + buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - - // Vectorize the innermost loop - for (j = 1; j < width - 1; j += 4) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 5; - - __m128i tmp0 = _mm_loadu_si128((__m128i *)&A[k - 1 - buf_stride]); - __m128i tmp1 = _mm_loadu_si128((__m128i *)&A[k + 3 - buf_stride]); - __m128i tmp2 = _mm_loadu_si128((__m128i *)&A[k - 1]); - __m128i tmp3 = _mm_loadu_si128((__m128i *)&A[k + 3]); - __m128i tmp4 = _mm_loadu_si128((__m128i *)&A[k - 1 + buf_stride]); - __m128i tmp5 = _mm_loadu_si128((__m128i *)&A[k + 3 + buf_stride]); - - __m128i a0 = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 4), tmp2), - _mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 8), - _mm_alignr_epi8(tmp5, tmp4, 4))), - _mm_alignr_epi8(tmp1, tmp0, 4)); - __m128i a1 = _mm_add_epi32(_mm_add_epi32(tmp0, tmp4), - _mm_add_epi32(_mm_alignr_epi8(tmp1, tmp0, 8), - _mm_alignr_epi8(tmp5, tmp4, 8))); - __m128i a = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(a0, a1), 2), a1); - - __m128i tmp6 = _mm_loadu_si128((__m128i *)&B[k - 1 - buf_stride]); - __m128i tmp7 = _mm_loadu_si128((__m128i *)&B[k + 3 - buf_stride]); - __m128i tmp8 = _mm_loadu_si128((__m128i *)&B[k - 1]); - __m128i tmp9 = _mm_loadu_si128((__m128i *)&B[k + 3]); - __m128i tmp10 = _mm_loadu_si128((__m128i *)&B[k - 1 + buf_stride]); - __m128i tmp11 = _mm_loadu_si128((__m128i *)&B[k + 3 + buf_stride]); - - __m128i b0 = _mm_add_epi32( - _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 4), tmp8), - _mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 8), - _mm_alignr_epi8(tmp11, tmp10, 4))), - _mm_alignr_epi8(tmp7, tmp6, 4)); - __m128i b1 = - _mm_add_epi32(_mm_add_epi32(tmp6, tmp10), - _mm_add_epi32(_mm_alignr_epi8(tmp7, tmp6, 8), - _mm_alignr_epi8(tmp11, tmp10, 8))); - __m128i b = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(b0, b1), 2), b1); - - __m128i src = _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i *)&dgd[l])); - - __m128i rounding = _mm_set1_epi32( - (1 << (SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS)) >> 1); - __m128i v = _mm_add_epi32(_mm_mullo_epi32(a, src), b); - __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding), - SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - _mm_storeu_si128((__m128i *)&dst[m], w); - } - - // Deal with any extra pixels at the right-hand edge of the frame - // (typically have 2 such pixels, but may have anywhere between 0 and 3) - for (; j < width - 1; ++j) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 5; - const int32_t a = - (A[k] + A[k - 1] + A[k + 1] + A[k - buf_stride] + A[k + buf_stride]) * - 4 + - (A[k - 1 - buf_stride] + A[k - 1 + buf_stride] + - A[k + 1 - buf_stride] + A[k + 1 + buf_stride]) * - 3; - const int32_t b = - (B[k] + B[k - 1] + B[k + 1] + B[k - buf_stride] + B[k + buf_stride]) * - 4 + - (B[k - 1 - buf_stride] + B[k - 1 + buf_stride] + - B[k + 1 - buf_stride] + B[k + 1 + buf_stride]) * - 3; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - - j = width - 1; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) + - A[k - 1] + A[k - buf_stride - 1] + - A[k + buf_stride - 1]; - const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) + - B[k - 1] + B[k - buf_stride - 1] + - B[k + buf_stride - 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } + // The "tl" pointers point at the top-left of the initialised data for the + // array. Adding 3 here ensures that column 1 is 16-byte aligned. + int32_t *Atl = buf + 0 * RESTORATION_PROC_UNIT_PELS + 3; + int32_t *Btl = buf + 1 * RESTORATION_PROC_UNIT_PELS + 3; + int32_t *Ctl = buf + 2 * RESTORATION_PROC_UNIT_PELS + 3; + int32_t *Dtl = buf + 3 * RESTORATION_PROC_UNIT_PELS + 3; + + // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note + // there's a zero row and column in A, B (integral images), so we move down + // and right one for them. + const int buf_diag_border = + SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT; + + int32_t *A0 = Atl + 1 + buf_stride; + int32_t *B0 = Btl + 1 + buf_stride; + int32_t *C0 = Ctl + 1 + buf_stride; + int32_t *D0 = Dtl + 1 + buf_stride; + + // Finally, A, B, C, D point at position (0, 0). + int32_t *A = A0 + buf_diag_border; + int32_t *B = B0 + buf_diag_border; + int32_t *C = C0 + buf_diag_border; + int32_t *D = D0 + buf_diag_border; + + const int dgd_diag_border = + SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT; + const uint8_t *dgd0 = dgd8 - dgd_diag_border; + + // Generate integral images from the input. C will contain sums of squares; D + // will contain just sums + if (highbd) + integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext, + height_ext, Ctl, Dtl, buf_stride); + else + integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl, + buf_stride); + + const sgr_params_type *const params = &sgr_params[sgr_params_idx]; + // Write to flt0 and flt1 + // If params->r == 0 we skip the corresponding filter. We only allow one of + // the radii to be 0, as having both equal to 0 would be equivalent to + // skipping SGR entirely. + assert(!(params->r[0] == 0 && params->r[1] == 0)); + assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); + assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); + + if (params->r[0] > 0) { + calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth, + sgr_params_idx, 0); + final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride, + width, height, highbd); } - { - i = height - 1; - j = 0; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] + - A[k - buf_stride + 1]; - const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k - buf_stride] + - B[k - buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] + - A[k - buf_stride - 1] + A[k - buf_stride + 1]; - const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k - buf_stride] + - B[k - buf_stride - 1] + B[k - buf_stride + 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } - j = width - 1; - { - const int k = i * buf_stride + j; - const int l = i * dgd_stride + j; - const int m = i * dst_stride + j; - const int nb = 3; - const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] + - A[k - buf_stride - 1]; - const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k - buf_stride] + - B[k - buf_stride - 1]; - const int32_t v = a * dgd[l] + b; - dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); - } + if (params->r[1] > 0) { + calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx, + 1); + final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width, + height, highbd); } } -void av1_highpass_filter_highbd_sse4_1(uint16_t *dgd, int width, int height, - int stride, int32_t *dst, int dst_stride, - int corner, int edge) { - int i, j; - const int center = (1 << SGRPROJ_RST_BITS) - 4 * (corner + edge); - - { - i = 0; - j = 0; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k + 1] + dgd[k + stride] + dgd[k] * 2) + - corner * - (dgd[k + stride + 1] + dgd[k + 1] + dgd[k + stride] + dgd[k]); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = center * dgd[k] + - edge * (dgd[k - 1] + dgd[k + stride] + dgd[k + 1] + dgd[k]) + - corner * (dgd[k + stride - 1] + dgd[k + stride + 1] + - dgd[k - 1] + dgd[k + 1]); - } - j = width - 1; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k] * 2) + - corner * - (dgd[k + stride - 1] + dgd[k - 1] + dgd[k + stride] + dgd[k]); - } - } - __m128i center_ = _mm_set1_epi32(center); - __m128i edge_ = _mm_set1_epi32(edge); - __m128i corner_ = _mm_set1_epi32(corner); - for (i = 1; i < height - 1; ++i) { - j = 0; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + - edge * (dgd[k - stride] + dgd[k + 1] + dgd[k + stride] + dgd[k]) + - corner * (dgd[k + stride + 1] + dgd[k - stride + 1] + - dgd[k - stride] + dgd[k + stride]); - } - // Process 4 pixels at a time - for (j = 1; j < width - 4; j += 4) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - - __m128i a = _mm_loadu_si128((__m128i *)&dgd[k - stride - 1]); - __m128i b = _mm_loadu_si128((__m128i *)&dgd[k - 1]); - __m128i c = _mm_loadu_si128((__m128i *)&dgd[k + stride - 1]); - - __m128i tl = _mm_cvtepu16_epi32(a); - __m128i tr = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8)); - __m128i cl = _mm_cvtepu16_epi32(b); - __m128i cr = _mm_cvtepu16_epi32(_mm_srli_si128(b, 8)); - __m128i bl = _mm_cvtepu16_epi32(c); - __m128i br = _mm_cvtepu16_epi32(_mm_srli_si128(c, 8)); - - __m128i x = _mm_alignr_epi8(cr, cl, 4); - __m128i y = _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tr, tl, 4), cl), - _mm_add_epi32(_mm_alignr_epi8(br, bl, 4), - _mm_alignr_epi8(cr, cl, 8))); - __m128i z = _mm_add_epi32(_mm_add_epi32(tl, bl), - _mm_add_epi32(_mm_alignr_epi8(tr, tl, 8), - _mm_alignr_epi8(br, bl, 8))); - - __m128i res = _mm_add_epi32(_mm_mullo_epi32(x, center_), - _mm_add_epi32(_mm_mullo_epi32(y, edge_), - _mm_mullo_epi32(z, corner_))); - - _mm_storeu_si128((__m128i *)&dst[l], res); - } - // Handle any leftover pixels - for (; j < width - 1; ++j) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + - edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k + 1]) + - corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + - dgd[k - stride + 1] + dgd[k + stride + 1]); - } - j = width - 1; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + - edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k]) + - corner * (dgd[k + stride - 1] + dgd[k - stride - 1] + - dgd[k - stride] + dgd[k + stride]); - } - } - { - i = height - 1; - j = 0; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k + 1] + dgd[k - stride] + dgd[k] * 2) + - corner * - (dgd[k - stride + 1] + dgd[k + 1] + dgd[k - stride] + dgd[k]); - } - for (j = 1; j < width - 1; ++j) { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = center * dgd[k] + - edge * (dgd[k - 1] + dgd[k - stride] + dgd[k + 1] + dgd[k]) + - corner * (dgd[k - stride - 1] + dgd[k - stride + 1] + - dgd[k - 1] + dgd[k + 1]); - } - j = width - 1; - { - const int k = i * stride + j; - const int l = i * dst_stride + j; - dst[l] = - center * dgd[k] + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k] * 2) + - corner * - (dgd[k - stride - 1] + dgd[k - 1] + dgd[k - stride] + dgd[k]); - } - } -} - -void apply_selfguided_restoration_highbd_sse4_1( - uint16_t *dat, int width, int height, int stride, int bit_depth, int eps, - int *xqd, uint16_t *dst, int dst_stride, int32_t *tmpbuf) { +void apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width, + int height, int stride, int eps, + const int *xqd, uint8_t *dst8, + int dst_stride, int32_t *tmpbuf, + int bit_depth, int highbd) { + int32_t *flt0 = tmpbuf; + int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX; + assert(width * height <= RESTORATION_UNITPELS_MAX); + av1_selfguided_restoration_sse4_1(dat8, width, height, stride, flt0, flt1, + width, eps, bit_depth, highbd); + const sgr_params_type *const params = &sgr_params[eps]; int xq[2]; - int32_t *flt1 = tmpbuf; - int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX; - int i, j; - assert(width * height <= RESTORATION_TILEPELS_MAX); -#if USE_HIGHPASS_IN_SGRPROJ - av1_highpass_filter_highbd_sse4_1(dat, width, height, stride, flt1, width, - sgr_params[eps].corner, - sgr_params[eps].edge); -#else - av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt1, - width, bit_depth, sgr_params[eps].r1, - sgr_params[eps].e1); -#endif // USE_HIGHPASS_IN_SGRPROJ - av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt2, - width, bit_depth, sgr_params[eps].r2, - sgr_params[eps].e2); - decode_xq(xqd, xq); + decode_xq(xqd, xq, params); __m128i xq0 = _mm_set1_epi32(xq[0]); __m128i xq1 = _mm_set1_epi32(xq[1]); - for (i = 0; i < height; ++i) { + + for (int i = 0; i < height; ++i) { // Calculate output in batches of 8 pixels - for (j = 0; j < width; j += 8) { + for (int j = 0; j < width; j += 8) { const int k = i * width + j; - const int l = i * stride + j; const int m = i * dst_stride + j; - __m128i src = - _mm_slli_epi16(_mm_load_si128((__m128i *)&dat[l]), SGRPROJ_RST_BITS); - - const __m128i u_0 = _mm_cvtepu16_epi32(src); - const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(src, 8)); - - const __m128i f1_0 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k]), u_0); - const __m128i f2_0 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k]), u_0); - const __m128i f1_1 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k + 4]), u_1); - const __m128i f2_1 = - _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k + 4]), u_1); - - const __m128i v_0 = _mm_add_epi32( - _mm_add_epi32(_mm_mullo_epi32(xq0, f1_0), _mm_mullo_epi32(xq1, f2_0)), - _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS)); - const __m128i v_1 = _mm_add_epi32( - _mm_add_epi32(_mm_mullo_epi32(xq0, f1_1), _mm_mullo_epi32(xq1, f2_1)), - _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS)); + + const uint8_t *dat8ij = dat8 + i * stride + j; + __m128i src; + if (highbd) { + src = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij)); + } else { + src = _mm_cvtepu8_epi16(xx_loadl_64(dat8ij)); + } + + const __m128i u = _mm_slli_epi16(src, SGRPROJ_RST_BITS); + const __m128i u_0 = _mm_cvtepu16_epi32(u); + const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(u, 8)); + + __m128i v_0 = _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS); + __m128i v_1 = _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS); + + if (params->r[0] > 0) { + const __m128i f1_0 = _mm_sub_epi32(xx_loadu_128(&flt0[k]), u_0); + v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq0, f1_0)); + + const __m128i f1_1 = _mm_sub_epi32(xx_loadu_128(&flt0[k + 4]), u_1); + v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq0, f1_1)); + } + + if (params->r[1] > 0) { + const __m128i f2_0 = _mm_sub_epi32(xx_loadu_128(&flt1[k]), u_0); + v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq1, f2_0)); + + const __m128i f2_1 = _mm_sub_epi32(xx_loadu_128(&flt1[k + 4]), u_1); + v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq1, f2_1)); + } const __m128i rounding = - _mm_set1_epi32((1 << (SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS)) >> 1); + round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); - // Pack into 16 bits and clamp to [0, 2^bit_depth) - const __m128i tmp = _mm_packus_epi32(w_0, w_1); - const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1); - const __m128i res = _mm_min_epi16(tmp, max); - - _mm_store_si128((__m128i *)&dst[m], res); - } - // Process leftover pixels - for (; j < width; ++j) { - const int k = i * width + j; - const int l = i * stride + j; - const int m = i * dst_stride + j; - const int32_t u = ((int32_t)dat[l] << SGRPROJ_RST_BITS); - const int32_t f1 = (int32_t)flt1[k] - u; - const int32_t f2 = (int32_t)flt2[k] - u; - const int32_t v = xq[0] * f1 + xq[1] * f2 + (u << SGRPROJ_PRJ_BITS); - const int16_t w = - (int16_t)ROUND_POWER_OF_TWO(v, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); - dst[m] = (uint16_t)clip_pixel_highbd(w, bit_depth); + if (highbd) { + // Pack into 16 bits and clamp to [0, 2^bit_depth) + const __m128i tmp = _mm_packus_epi32(w_0, w_1); + const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1); + const __m128i res = _mm_min_epi16(tmp, max); + xx_storeu_128(CONVERT_TO_SHORTPTR(dst8 + m), res); + } else { + // Pack into 8 bits and clamp to [0, 256) + const __m128i tmp = _mm_packs_epi32(w_0, w_1); + const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */); + xx_storel_64(dst8 + m, res); + } } } } - -#endif -- cgit v1.2.3 From b8df135c97a854c2ff9b4394b016649c601177fa Mon Sep 17 00:00:00 2001 From: trav90 Date: Fri, 19 Oct 2018 23:00:02 -0500 Subject: Update libaom to rev b25610052a1398032320008d69b51d2da94f5928 --- third_party/aom/av1/common/x86/selfguided_sse4.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c index a42c94028..c64150b9d 100644 --- a/third_party/aom/av1/common/x86/selfguided_sse4.c +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -1,3 +1,14 @@ +/* + * Copyright (c) 2018, Alliance for Open Media. All rights reserved + * + * This source code is subject to the terms of the BSD 2 Clause License and + * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License + * was not distributed with this source code in the LICENSE file, you can + * obtain it at www.aomedia.org/license/software. If the Alliance for Open + * Media Patent License 1.0 was not distributed with this source code in the + * PATENTS file, you can obtain it at www.aomedia.org/license/patent. + */ + #include #include "config/aom_config.h" -- cgit v1.2.3 From d2499ead93dc4298c0882fe98902acb1b5209f99 Mon Sep 17 00:00:00 2001 From: trav90 Date: Fri, 19 Oct 2018 23:05:00 -0500 Subject: Update libaom to commit ID 1e227d41f0616de9548a673a83a21ef990b62591 --- third_party/aom/av1/common/x86/selfguided_sse4.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c') diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c index c64150b9d..ea3f6d942 100644 --- a/third_party/aom/av1/common/x86/selfguided_sse4.c +++ b/third_party/aom/av1/common/x86/selfguided_sse4.c @@ -499,13 +499,15 @@ static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A, } } -void av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width, - int height, int dgd_stride, - int32_t *flt0, int32_t *flt1, - int flt_stride, int sgr_params_idx, - int bit_depth, int highbd) { - DECLARE_ALIGNED(16, int32_t, buf[4 * RESTORATION_PROC_UNIT_PELS]); - memset(buf, 0, sizeof(buf)); +int av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width, + int height, int dgd_stride, int32_t *flt0, + int32_t *flt1, int flt_stride, + int sgr_params_idx, int bit_depth, + int highbd) { + int32_t *buf = (int32_t *)aom_memalign( + 16, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS); + if (!buf) return -1; + memset(buf, 0, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS); const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; @@ -574,6 +576,8 @@ void av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width, final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width, height, highbd); } + aom_free(buf); + return 0; } void apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width, @@ -584,8 +588,10 @@ void apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width, int32_t *flt0 = tmpbuf; int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX; assert(width * height <= RESTORATION_UNITPELS_MAX); - av1_selfguided_restoration_sse4_1(dat8, width, height, stride, flt0, flt1, - width, eps, bit_depth, highbd); + const int ret = av1_selfguided_restoration_sse4_1( + dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd); + (void)ret; + assert(!ret); const sgr_params_type *const params = &sgr_params[eps]; int xq[2]; decode_xq(xqd, xq, params); -- cgit v1.2.3