summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/common/x86/selfguided_sse4.c
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/aom/av1/common/x86/selfguided_sse4.c')
-rw-r--r--third_party/aom/av1/common/x86/selfguided_sse4.c2254
1 files changed, 538 insertions, 1716 deletions
diff --git a/third_party/aom/av1/common/x86/selfguided_sse4.c b/third_party/aom/av1/common/x86/selfguided_sse4.c
index 9de9177c1..a42c94028 100644
--- a/third_party/aom/av1/common/x86/selfguided_sse4.c
+++ b/third_party/aom/av1/common/x86/selfguided_sse4.c
@@ -1,1821 +1,643 @@
#include <smmintrin.h>
-#include "./aom_config.h"
-#include "./av1_rtcd.h"
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
#include "av1/common/restoration.h"
#include "aom_dsp/x86/synonyms.h"
-/* Calculate four consecutive entries of the intermediate A and B arrays
- (corresponding to the first loop in the C version of
- av1_selfguided_restoration)
-*/
-static void calc_block(__m128i sum, __m128i sum_sq, __m128i n,
- __m128i *one_over_n_, __m128i *s_, int bit_depth,
- int idx, int32_t *A, int32_t *B) {
- __m128i a, b, p;
- __m128i one_over_n = *one_over_n_;
- __m128i s = *s_;
-#if CONFIG_HIGHBITDEPTH
- if (bit_depth > 8) {
- __m128i rounding_a = _mm_set1_epi32((1 << (2 * (bit_depth - 8))) >> 1);
- __m128i rounding_b = _mm_set1_epi32((1 << (bit_depth - 8)) >> 1);
- __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8));
- __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8);
- a = _mm_srl_epi32(_mm_add_epi32(sum_sq, rounding_a), shift_a);
- b = _mm_srl_epi32(_mm_add_epi32(sum, rounding_b), shift_b);
- a = _mm_mullo_epi32(a, n);
- b = _mm_mullo_epi32(b, b);
- p = _mm_sub_epi32(_mm_max_epi32(a, b), b);
- } else {
-#endif
- (void)bit_depth;
- a = _mm_mullo_epi32(sum_sq, n);
- b = _mm_mullo_epi32(sum, sum);
- p = _mm_sub_epi32(a, b);
-#if CONFIG_HIGHBITDEPTH
- }
-#endif
-
- __m128i rounding_z = _mm_set1_epi32((1 << SGRPROJ_MTABLE_BITS) >> 1);
- __m128i z = _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rounding_z),
- SGRPROJ_MTABLE_BITS);
- z = _mm_min_epi32(z, _mm_set1_epi32(255));
-
- // 'Gather' type instructions are not available pre-AVX2, so synthesize a
- // gather using scalar loads.
- __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)],
- x_by_xplus1[_mm_extract_epi32(z, 2)],
- x_by_xplus1[_mm_extract_epi32(z, 1)],
- x_by_xplus1[_mm_extract_epi32(z, 0)]);
-
- _mm_storeu_si128((__m128i *)&A[idx], a_res);
-
- __m128i rounding_res = _mm_set1_epi32((1 << SGRPROJ_RECIP_BITS) >> 1);
- __m128i a_complement = _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
- __m128i b_int =
- _mm_mullo_epi32(a_complement, _mm_mullo_epi32(sum, one_over_n));
- __m128i b_res =
- _mm_srli_epi32(_mm_add_epi32(b_int, rounding_res), SGRPROJ_RECIP_BITS);
-
- _mm_storeu_si128((__m128i *)&B[idx], b_res);
+// Load 4 bytes from the possibly-misaligned pointer p, extend each byte to
+// 32-bit precision and return them in an SSE register.
+static __m128i xx_load_extend_8_32(const void *p) {
+ return _mm_cvtepu8_epi32(xx_loadl_32(p));
}
-static void selfguided_restoration_1_v(uint8_t *src, int width, int height,
- int src_stride, int32_t *A, int32_t *B,
- int buf_stride) {
- int i, j;
-
- // Vertical sum
- // When the width is not a multiple of 4, we know that 'stride' is rounded up
- // to a multiple of 4. So it is safe for this loop to calculate extra columns
- // at the right-hand edge of the frame.
- int width_extend = (width + 3) & ~3;
- for (j = 0; j < width_extend; j += 4) {
- __m128i a, b, x, y, x2, y2;
- __m128i sum, sum_sq, tmp;
-
- a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j]));
- b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j]));
-
- sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b));
- tmp = _mm_unpacklo_epi16(a, b);
- sum_sq = _mm_madd_epi16(tmp, tmp);
-
- _mm_store_si128((__m128i *)&B[j], sum);
- _mm_store_si128((__m128i *)&A[j], sum_sq);
-
- x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[2 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
-
- for (i = 1; i < height - 2; ++i) {
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j]));
- y = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i + 2) * src_stride + j]));
-
- sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x));
-
- x2 = _mm_mullo_epi32(x, x);
- y2 = _mm_mullo_epi32(y, y);
-
- sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2));
- }
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
+// Load 4 halfwords from the possibly-misaligned pointer p, extend each
+// halfword to 32-bit precision and return them in an SSE register.
+static __m128i xx_load_extend_16_32(const void *p) {
+ return _mm_cvtepu16_epi32(xx_loadl_64(p));
+}
- _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq);
- }
+// Compute the scan of an SSE register holding 4 32-bit integers. If the
+// register holds x0..x3 then the scan will hold x0, x0+x1, x0+x1+x2,
+// x0+x1+x2+x3
+static __m128i scan_32(__m128i x) {
+ const __m128i x01 = _mm_add_epi32(x, _mm_slli_si128(x, 4));
+ return _mm_add_epi32(x01, _mm_slli_si128(x01, 8));
}
-static void selfguided_restoration_1_h(int32_t *A, int32_t *B, int width,
- int height, int buf_stride, int eps,
- int bit_depth) {
- int i, j;
-
- // Horizontal sum
- int width_extend = (width + 3) & ~3;
- for (i = 0; i < height; ++i) {
- int h = AOMMIN(2, height - i) + AOMMIN(1, i);
-
- __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]);
- __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]);
- __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]);
- __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]);
-
- // Note: The _mm_slli_si128 call sets up a register containing
- // {0, A[i * buf_stride], ..., A[i * buf_stride + 2]},
- // so that the first element of 'sum' (which should only add two values
- // together) ends up calculated correctly.
- __m128i sum_ = _mm_add_epi32(_mm_slli_si128(b1, 4),
- _mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)));
- __m128i sum_sq_ = _mm_add_epi32(
- _mm_slli_si128(a1, 4), _mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)));
- __m128i n = _mm_set_epi32(3 * h, 3 * h, 3 * h, 2 * h);
- __m128i one_over_n =
- _mm_set_epi32(one_by_x[3 * h - 1], one_by_x[3 * h - 1],
- one_by_x[3 * h - 1], one_by_x[2 * h - 1]);
- __m128i s = _mm_set_epi32(
- sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1],
- sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][2 * h - 1]);
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A,
- B);
-
- n = _mm_set1_epi32(3 * h);
- one_over_n = _mm_set1_epi32(one_by_x[3 * h - 1]);
- s = _mm_set1_epi32(sgrproj_mtable[eps - 1][3 * h - 1]);
-
- // Re-align a1 and b1 so that they start at index i * buf_stride + 3
- a2 = _mm_alignr_epi8(a2, a1, 12);
- b2 = _mm_alignr_epi8(b2, b1, 12);
-
- // Note: When the width is not a multiple of 4, this loop may end up
- // writing to the last 4 columns of the frame, potentially with incorrect
- // values (especially for r=2 and r=3).
- // This is fine, since we fix up those values in the block after this
- // loop, and in exchange we never have more than four values to
- // write / fix up after this loop finishes.
- for (j = 4; j < width_extend - 4; j += 4) {
- a1 = a2;
- b1 = b2;
- a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]);
- b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]);
- /* Loop invariant: At this point,
- a1 = original A[i * buf_stride + j - 1 : i * buf_stride + j + 3]
- a2 = original A[i * buf_stride + j + 3 : i * buf_stride + j + 7]
- and similar for b1,b2 and B
- */
- sum_ = _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4),
- _mm_alignr_epi8(b2, b1, 8)));
- sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4),
- _mm_alignr_epi8(a2, a1, 8)));
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth,
- i * buf_stride + j, A, B);
- }
- __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 3]);
- __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 3]);
-
- j = width - 4;
- switch (width % 4) {
- case 0:
- a1 = a2;
- b1 = b2;
- a2 = a3;
- b2 = b3;
- break;
- case 1:
- a1 = _mm_alignr_epi8(a2, a1, 4);
- b1 = _mm_alignr_epi8(b2, b1, 4);
- a2 = _mm_alignr_epi8(a3, a2, 4);
- b2 = _mm_alignr_epi8(b3, b2, 4);
- break;
- case 2:
- a1 = _mm_alignr_epi8(a2, a1, 8);
- b1 = _mm_alignr_epi8(b2, b1, 8);
- a2 = _mm_alignr_epi8(a3, a2, 8);
- b2 = _mm_alignr_epi8(b3, b2, 8);
- break;
- case 3:
- a1 = _mm_alignr_epi8(a2, a1, 12);
- b1 = _mm_alignr_epi8(b2, b1, 12);
- a2 = _mm_alignr_epi8(a3, a2, 12);
- b2 = _mm_alignr_epi8(b3, b2, 12);
- break;
+// Compute two integral images from src. B sums elements; A sums their
+// squares. The images are offset by one pixel, so will have width and height
+// equal to width + 1, height + 1 and the first row and column will be zero.
+//
+// A+1 and B+1 should be aligned to 16 bytes. buf_stride should be a multiple
+// of 4.
+static void integral_images(const uint8_t *src, int src_stride, int width,
+ int height, int32_t *A, int32_t *B,
+ int buf_stride) {
+ // Write out the zero top row
+ memset(A, 0, sizeof(*A) * (width + 1));
+ memset(B, 0, sizeof(*B) * (width + 1));
+
+ const __m128i zero = _mm_setzero_si128();
+ for (int i = 0; i < height; ++i) {
+ // Zero the left column.
+ A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
+
+ // ldiff is the difference H - D where H is the output sample immediately
+ // to the left and D is the output sample above it. These are scalars,
+ // replicated across the four lanes.
+ __m128i ldiff1 = zero, ldiff2 = zero;
+ for (int j = 0; j < width; j += 4) {
+ const int ABj = 1 + j;
+
+ const __m128i above1 = xx_load_128(B + ABj + i * buf_stride);
+ const __m128i above2 = xx_load_128(A + ABj + i * buf_stride);
+
+ const __m128i x1 = xx_load_extend_8_32(src + j + i * src_stride);
+ const __m128i x2 = _mm_madd_epi16(x1, x1);
+
+ const __m128i sc1 = scan_32(x1);
+ const __m128i sc2 = scan_32(x2);
+
+ const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1);
+ const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2);
+
+ xx_store_128(B + ABj + (i + 1) * buf_stride, row1);
+ xx_store_128(A + ABj + (i + 1) * buf_stride, row2);
+
+ // Calculate the new H - D.
+ ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff);
+ ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff);
}
-
- // Zero out the data loaded from "off the edge" of the array
- __m128i zero = _mm_setzero_si128();
- a2 = _mm_blend_epi16(a2, zero, 0xfc);
- b2 = _mm_blend_epi16(b2, zero, 0xfc);
-
- sum_ = _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4),
- _mm_alignr_epi8(b2, b1, 8)));
- sum_sq_ = _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4),
- _mm_alignr_epi8(a2, a1, 8)));
- n = _mm_set_epi32(2 * h, 3 * h, 3 * h, 3 * h);
- one_over_n = _mm_set_epi32(one_by_x[2 * h - 1], one_by_x[3 * h - 1],
- one_by_x[3 * h - 1], one_by_x[3 * h - 1]);
- s = _mm_set_epi32(
- sgrproj_mtable[eps - 1][2 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1],
- sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]);
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j,
- A, B);
}
}
-static void selfguided_restoration_2_v(uint8_t *src, int width, int height,
- int src_stride, int32_t *A, int32_t *B,
- int buf_stride) {
- int i, j;
-
- // Vertical sum
- int width_extend = (width + 3) & ~3;
- for (j = 0; j < width_extend; j += 4) {
- __m128i a, b, c, c2, x, y, x2, y2;
- __m128i sum, sum_sq, tmp;
-
- a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j]));
- b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j]));
- c = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[2 * src_stride + j]));
+// Compute two integral images from src. B sums elements; A sums their squares
+//
+// A and B should be aligned to 16 bytes. buf_stride should be a multiple of 4.
+static void integral_images_highbd(const uint16_t *src, int src_stride,
+ int width, int height, int32_t *A,
+ int32_t *B, int buf_stride) {
+ // Write out the zero top row
+ memset(A, 0, sizeof(*A) * (width + 1));
+ memset(B, 0, sizeof(*B) * (width + 1));
- sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c));
- // Important: Since c may be up to 2^8, the result on squaring may
- // be up to 2^16. So we need to zero-extend, not sign-extend.
- c2 = _mm_cvtepu16_epi32(_mm_mullo_epi16(c, c));
- tmp = _mm_unpacklo_epi16(a, b);
- sum_sq = _mm_add_epi32(_mm_madd_epi16(tmp, tmp), c2);
+ const __m128i zero = _mm_setzero_si128();
+ for (int i = 0; i < height; ++i) {
+ // Zero the left column.
+ A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
- _mm_store_si128((__m128i *)&B[j], sum);
- _mm_store_si128((__m128i *)&A[j], sum_sq);
+ // ldiff is the difference H - D where H is the output sample immediately
+ // to the left and D is the output sample above it. These are scalars,
+ // replicated across the four lanes.
+ __m128i ldiff1 = zero, ldiff2 = zero;
+ for (int j = 0; j < width; j += 4) {
+ const int ABj = 1 + j;
- x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[3 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
+ const __m128i above1 = xx_load_128(B + ABj + i * buf_stride);
+ const __m128i above2 = xx_load_128(A + ABj + i * buf_stride);
- _mm_store_si128((__m128i *)&B[buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq);
+ const __m128i x1 = xx_load_extend_16_32(src + j + i * src_stride);
+ const __m128i x2 = _mm_madd_epi16(x1, x1);
- x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[4 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
+ const __m128i sc1 = scan_32(x1);
+ const __m128i sc2 = scan_32(x2);
- for (i = 2; i < height - 3; ++i) {
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
+ const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1);
+ const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2);
- x = _mm_cvtepu8_epi32(
- _mm_cvtsi32_si128(*((int *)&src[(i - 2) * src_stride + j])));
- y = _mm_cvtepu8_epi32(
- _mm_cvtsi32_si128(*((int *)&src[(i + 3) * src_stride + j])));
+ xx_store_128(B + ABj + (i + 1) * buf_stride, row1);
+ xx_store_128(A + ABj + (i + 1) * buf_stride, row2);
- sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x));
-
- x2 = _mm_mullo_epi32(x, x);
- y2 = _mm_mullo_epi32(y, y);
-
- sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2));
+ // Calculate the new H - D.
+ ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff);
+ ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff);
}
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 2) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq);
}
}
-static void selfguided_restoration_2_h(int32_t *A, int32_t *B, int width,
- int height, int buf_stride, int eps,
- int bit_depth) {
- int i, j;
-
- // Horizontal sum
- int width_extend = (width + 3) & ~3;
- for (i = 0; i < height; ++i) {
- int h = AOMMIN(3, height - i) + AOMMIN(2, i);
-
- __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]);
- __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]);
- __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]);
- __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]);
-
- __m128i sum_ = _mm_add_epi32(
- _mm_add_epi32(
- _mm_add_epi32(_mm_slli_si128(b1, 8), _mm_slli_si128(b1, 4)),
- _mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4))),
- _mm_alignr_epi8(b2, b1, 8));
- __m128i sum_sq_ = _mm_add_epi32(
- _mm_add_epi32(
- _mm_add_epi32(_mm_slli_si128(a1, 8), _mm_slli_si128(a1, 4)),
- _mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4))),
- _mm_alignr_epi8(a2, a1, 8));
-
- __m128i n = _mm_set_epi32(5 * h, 5 * h, 4 * h, 3 * h);
- __m128i one_over_n =
- _mm_set_epi32(one_by_x[5 * h - 1], one_by_x[5 * h - 1],
- one_by_x[4 * h - 1], one_by_x[3 * h - 1]);
- __m128i s = _mm_set_epi32(
- sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1],
- sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][3 * h - 1]);
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A,
- B);
-
- // Re-align a1 and b1 so that they start at index i * buf_stride + 2
- a2 = _mm_alignr_epi8(a2, a1, 8);
- b2 = _mm_alignr_epi8(b2, b1, 8);
-
- n = _mm_set1_epi32(5 * h);
- one_over_n = _mm_set1_epi32(one_by_x[5 * h - 1]);
- s = _mm_set1_epi32(sgrproj_mtable[eps - 1][5 * h - 1]);
-
- for (j = 4; j < width_extend - 4; j += 4) {
- a1 = a2;
- a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 2]);
- b1 = b2;
- b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 2]);
- /* Loop invariant: At this point,
- a1 = original A[i * buf_stride + j - 2 : i * buf_stride + j + 2]
- a2 = original A[i * buf_stride + j + 2 : i * buf_stride + j + 6]
- and similar for b1,b2 and B
- */
- sum_ = _mm_add_epi32(
- _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4),
- _mm_alignr_epi8(b2, b1, 8))),
- _mm_add_epi32(_mm_alignr_epi8(b2, b1, 12), b2));
- sum_sq_ = _mm_add_epi32(
- _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4),
- _mm_alignr_epi8(a2, a1, 8))),
- _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2));
-
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth,
- i * buf_stride + j, A, B);
- }
- // If the width is not a multiple of 4, we need to reset j to width - 4
- // and adjust a1, a2, b1, b2 so that the loop invariant above is maintained
- __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 2]);
- __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 2]);
-
- j = width - 4;
- switch (width % 4) {
- case 0:
- a1 = a2;
- b1 = b2;
- a2 = a3;
- b2 = b3;
- break;
- case 1:
- a1 = _mm_alignr_epi8(a2, a1, 4);
- b1 = _mm_alignr_epi8(b2, b1, 4);
- a2 = _mm_alignr_epi8(a3, a2, 4);
- b2 = _mm_alignr_epi8(b3, b2, 4);
- break;
- case 2:
- a1 = _mm_alignr_epi8(a2, a1, 8);
- b1 = _mm_alignr_epi8(b2, b1, 8);
- a2 = _mm_alignr_epi8(a3, a2, 8);
- b2 = _mm_alignr_epi8(b3, b2, 8);
- break;
- case 3:
- a1 = _mm_alignr_epi8(a2, a1, 12);
- b1 = _mm_alignr_epi8(b2, b1, 12);
- a2 = _mm_alignr_epi8(a3, a2, 12);
- b2 = _mm_alignr_epi8(b3, b2, 12);
- break;
- }
-
- // Zero out the data loaded from "off the edge" of the array
- __m128i zero = _mm_setzero_si128();
- a2 = _mm_blend_epi16(a2, zero, 0xf0);
- b2 = _mm_blend_epi16(b2, zero, 0xf0);
-
- sum_ = _mm_add_epi32(
- _mm_add_epi32(b1, _mm_add_epi32(_mm_alignr_epi8(b2, b1, 4),
- _mm_alignr_epi8(b2, b1, 8))),
- _mm_add_epi32(_mm_alignr_epi8(b2, b1, 12), b2));
- sum_sq_ = _mm_add_epi32(
- _mm_add_epi32(a1, _mm_add_epi32(_mm_alignr_epi8(a2, a1, 4),
- _mm_alignr_epi8(a2, a1, 8))),
- _mm_add_epi32(_mm_alignr_epi8(a2, a1, 12), a2));
-
- n = _mm_set_epi32(3 * h, 4 * h, 5 * h, 5 * h);
- one_over_n = _mm_set_epi32(one_by_x[3 * h - 1], one_by_x[4 * h - 1],
- one_by_x[5 * h - 1], one_by_x[5 * h - 1]);
- s = _mm_set_epi32(
- sgrproj_mtable[eps - 1][3 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1],
- sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1]);
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j,
- A, B);
- }
+// Compute 4 values of boxsum from the given integral image. ii should point
+// at the middle of the box (for the first value). r is the box radius.
+static INLINE __m128i boxsum_from_ii(const int32_t *ii, int stride, int r) {
+ const __m128i tl = xx_loadu_128(ii - (r + 1) - (r + 1) * stride);
+ const __m128i tr = xx_loadu_128(ii + (r + 0) - (r + 1) * stride);
+ const __m128i bl = xx_loadu_128(ii - (r + 1) + r * stride);
+ const __m128i br = xx_loadu_128(ii + (r + 0) + r * stride);
+ const __m128i u = _mm_sub_epi32(tr, tl);
+ const __m128i v = _mm_sub_epi32(br, bl);
+ return _mm_sub_epi32(v, u);
}
-static void selfguided_restoration_3_v(uint8_t *src, int width, int height,
- int src_stride, int32_t *A, int32_t *B,
- int buf_stride) {
- int i, j;
-
- // Vertical sum over 7-pixel regions, 4 columns at a time
- int width_extend = (width + 3) & ~3;
- for (j = 0; j < width_extend; j += 4) {
- __m128i a, b, c, d, x, y, x2, y2;
- __m128i sum, sum_sq, tmp, tmp2;
+static __m128i round_for_shift(unsigned shift) {
+ return _mm_set1_epi32((1 << shift) >> 1);
+}
- a = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[j]));
- b = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[src_stride + j]));
- c = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[2 * src_stride + j]));
- d = _mm_cvtepu8_epi16(xx_loadl_32((__m128i *)&src[3 * src_stride + j]));
+static __m128i compute_p(__m128i sum1, __m128i sum2, int bit_depth, int n) {
+ __m128i an, bb;
+ if (bit_depth > 8) {
+ const __m128i rounding_a = round_for_shift(2 * (bit_depth - 8));
+ const __m128i rounding_b = round_for_shift(bit_depth - 8);
+ const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8));
+ const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8);
+ const __m128i a = _mm_srl_epi32(_mm_add_epi32(sum2, rounding_a), shift_a);
+ const __m128i b = _mm_srl_epi32(_mm_add_epi32(sum1, rounding_b), shift_b);
+ // b < 2^14, so we can use a 16-bit madd rather than a 32-bit
+ // mullo to square it
+ bb = _mm_madd_epi16(b, b);
+ an = _mm_max_epi32(_mm_mullo_epi32(a, _mm_set1_epi32(n)), bb);
+ } else {
+ bb = _mm_madd_epi16(sum1, sum1);
+ an = _mm_mullo_epi32(sum2, _mm_set1_epi32(n));
+ }
+ return _mm_sub_epi32(an, bb);
+}
- sum = _mm_cvtepi16_epi32(
- _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d)));
- tmp = _mm_unpacklo_epi16(a, b);
- tmp2 = _mm_unpacklo_epi16(c, d);
- sum_sq =
- _mm_add_epi32(_mm_madd_epi16(tmp, tmp), _mm_madd_epi16(tmp2, tmp2));
+// Assumes that C, D are integral images for the original buffer which has been
+// extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
+// on the sides. A, B, C, D point at logical position (0, 0).
+static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D,
+ int width, int height, int buf_stride, int bit_depth,
+ int sgr_params_idx, int radius_idx) {
+ const sgr_params_type *const params = &sgr_params[sgr_params_idx];
+ const int r = params->r[radius_idx];
+ const int n = (2 * r + 1) * (2 * r + 1);
+ const __m128i s = _mm_set1_epi32(params->s[radius_idx]);
+ // one_over_n[n-1] is 2^12/n, so easily fits in an int16
+ const __m128i one_over_n = _mm_set1_epi32(one_by_x[n - 1]);
+
+ const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
+ const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
+
+ // Set up masks
+ const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff);
+ __m128i mask[4];
+ for (int idx = 0; idx < 4; idx++) {
+ const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx));
+ mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
+ }
- _mm_store_si128((__m128i *)&B[j], sum);
- _mm_store_si128((__m128i *)&A[j], sum_sq);
+ for (int i = -1; i < height + 1; ++i) {
+ for (int j = -1; j < width + 1; j += 4) {
+ const int32_t *Cij = C + i * buf_stride + j;
+ const int32_t *Dij = D + i * buf_stride + j;
- x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[4 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
+ __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r);
+ __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r);
- _mm_store_si128((__m128i *)&B[buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq);
+ // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain
+ // some uninitialised data in their upper words. We use a mask to
+ // ensure that these bits are set to 0.
+ int idx = AOMMIN(4, width + 1 - j);
+ assert(idx >= 1);
- x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[5 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
+ if (idx < 4) {
+ sum1 = _mm_and_si128(mask[idx], sum1);
+ sum2 = _mm_and_si128(mask[idx], sum2);
+ }
- _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq);
+ const __m128i p = compute_p(sum1, sum2, bit_depth, n);
- x = _mm_cvtepu8_epi32(xx_loadl_32((__m128i *)&src[6 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
+ const __m128i z = _mm_min_epi32(
+ _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z),
+ SGRPROJ_MTABLE_BITS),
+ _mm_set1_epi32(255));
- for (i = 3; i < height - 4; ++i) {
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
+ // 'Gather' type instructions are not available pre-AVX2, so synthesize a
+ // gather using scalar loads.
+ const __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)],
+ x_by_xplus1[_mm_extract_epi32(z, 2)],
+ x_by_xplus1[_mm_extract_epi32(z, 1)],
+ x_by_xplus1[_mm_extract_epi32(z, 0)]);
- x = _mm_cvtepu8_epi32(xx_loadl_32(&src[(i - 3) * src_stride + j]));
- y = _mm_cvtepu8_epi32(xx_loadl_32(&src[(i + 4) * src_stride + j]));
+ xx_storeu_128(A + i * buf_stride + j, a_res);
- sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x));
+ const __m128i a_complement =
+ _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
- x2 = _mm_mullo_epi32(x, x);
- y2 = _mm_mullo_epi32(y, y);
+ // sum1 might have lanes greater than 2^15, so we can't use madd to do
+ // multiplication involving sum1. However, a_complement and one_over_n
+ // are both less than 256, so we can multiply them first.
+ const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n);
+ const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1);
+ const __m128i b_res =
+ _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS);
- sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2));
+ xx_storeu_128(B + i * buf_stride + j, b_res);
}
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 3) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 2) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu8_epi32(
- xx_loadl_32((__m128i *)&src[(i - 1) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 3) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 3) * buf_stride + j], sum_sq);
}
}
-static void selfguided_restoration_3_h(int32_t *A, int32_t *B, int width,
- int height, int buf_stride, int eps,
- int bit_depth) {
- int i, j;
- // Horizontal sum over 7-pixel regions of dst
- int width_extend = (width + 3) & ~3;
- for (i = 0; i < height; ++i) {
- int h = AOMMIN(4, height - i) + AOMMIN(3, i);
-
- __m128i a1 = _mm_loadu_si128((__m128i *)&A[i * buf_stride]);
- __m128i b1 = _mm_loadu_si128((__m128i *)&B[i * buf_stride]);
- __m128i a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + 4]);
- __m128i b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + 4]);
-
- __m128i sum_ = _mm_add_epi32(
- _mm_add_epi32(
- _mm_add_epi32(_mm_slli_si128(b1, 12), _mm_slli_si128(b1, 8)),
- _mm_add_epi32(_mm_slli_si128(b1, 4), b1)),
- _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(b2, b1, 4),
- _mm_alignr_epi8(b2, b1, 8)),
- _mm_alignr_epi8(b2, b1, 12)));
- __m128i sum_sq_ = _mm_add_epi32(
- _mm_add_epi32(
- _mm_add_epi32(_mm_slli_si128(a1, 12), _mm_slli_si128(a1, 8)),
- _mm_add_epi32(_mm_slli_si128(a1, 4), a1)),
- _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(a2, a1, 4),
- _mm_alignr_epi8(a2, a1, 8)),
- _mm_alignr_epi8(a2, a1, 12)));
-
- __m128i n = _mm_set_epi32(7 * h, 6 * h, 5 * h, 4 * h);
- __m128i one_over_n =
- _mm_set_epi32(one_by_x[7 * h - 1], one_by_x[6 * h - 1],
- one_by_x[5 * h - 1], one_by_x[4 * h - 1]);
- __m128i s = _mm_set_epi32(
- sgrproj_mtable[eps - 1][7 * h - 1], sgrproj_mtable[eps - 1][6 * h - 1],
- sgrproj_mtable[eps - 1][5 * h - 1], sgrproj_mtable[eps - 1][4 * h - 1]);
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride, A,
- B);
-
- // Re-align a1 and b1 so that they start at index i * buf_stride + 1
- a2 = _mm_alignr_epi8(a2, a1, 4);
- b2 = _mm_alignr_epi8(b2, b1, 4);
-
- n = _mm_set1_epi32(7 * h);
- one_over_n = _mm_set1_epi32(one_by_x[7 * h - 1]);
- s = _mm_set1_epi32(sgrproj_mtable[eps - 1][7 * h - 1]);
-
- for (j = 4; j < width_extend - 4; j += 4) {
- a1 = a2;
- a2 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]);
- b1 = b2;
- b2 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]);
- __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 5]);
- __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 5]);
- /* Loop invariant: At this point,
- a1 = original A[i * buf_stride + j - 3 : i * buf_stride + j + 1]
- a2 = original A[i * buf_stride + j + 1 : i * buf_stride + j + 5]
- a3 = original A[i * buf_stride + j + 5 : i * buf_stride + j + 9]
- and similar for b1,b2,b3 and B
- */
- sum_ = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)),
- _mm_add_epi32(_mm_alignr_epi8(b2, b1, 8),
- _mm_alignr_epi8(b2, b1, 12))),
- _mm_add_epi32(_mm_add_epi32(b2, _mm_alignr_epi8(b3, b2, 4)),
- _mm_alignr_epi8(b3, b2, 8)));
- sum_sq_ = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)),
- _mm_add_epi32(_mm_alignr_epi8(a2, a1, 8),
- _mm_alignr_epi8(a2, a1, 12))),
- _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(a3, a2, 4)),
- _mm_alignr_epi8(a3, a2, 8)));
-
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth,
- i * buf_stride + j, A, B);
- }
- __m128i a3 = _mm_loadu_si128((__m128i *)&A[i * buf_stride + j + 1]);
- __m128i b3 = _mm_loadu_si128((__m128i *)&B[i * buf_stride + j + 1]);
-
- j = width - 4;
- switch (width % 4) {
- case 0:
- a1 = a2;
- b1 = b2;
- a2 = a3;
- b2 = b3;
- break;
- case 1:
- a1 = _mm_alignr_epi8(a2, a1, 4);
- b1 = _mm_alignr_epi8(b2, b1, 4);
- a2 = _mm_alignr_epi8(a3, a2, 4);
- b2 = _mm_alignr_epi8(b3, b2, 4);
- break;
- case 2:
- a1 = _mm_alignr_epi8(a2, a1, 8);
- b1 = _mm_alignr_epi8(b2, b1, 8);
- a2 = _mm_alignr_epi8(a3, a2, 8);
- b2 = _mm_alignr_epi8(b3, b2, 8);
- break;
- case 3:
- a1 = _mm_alignr_epi8(a2, a1, 12);
- b1 = _mm_alignr_epi8(b2, b1, 12);
- a2 = _mm_alignr_epi8(a3, a2, 12);
- b2 = _mm_alignr_epi8(b3, b2, 12);
- break;
- }
-
- // Zero out the data loaded from "off the edge" of the array
- __m128i zero = _mm_setzero_si128();
- a2 = _mm_blend_epi16(a2, zero, 0xc0);
- b2 = _mm_blend_epi16(b2, zero, 0xc0);
-
- sum_ = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(b1, _mm_alignr_epi8(b2, b1, 4)),
- _mm_add_epi32(_mm_alignr_epi8(b2, b1, 8),
- _mm_alignr_epi8(b2, b1, 12))),
- _mm_add_epi32(_mm_add_epi32(b2, _mm_alignr_epi8(zero, b2, 4)),
- _mm_alignr_epi8(zero, b2, 8)));
- sum_sq_ = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(a1, _mm_alignr_epi8(a2, a1, 4)),
- _mm_add_epi32(_mm_alignr_epi8(a2, a1, 8),
- _mm_alignr_epi8(a2, a1, 12))),
- _mm_add_epi32(_mm_add_epi32(a2, _mm_alignr_epi8(zero, a2, 4)),
- _mm_alignr_epi8(zero, a2, 8)));
-
- n = _mm_set_epi32(4 * h, 5 * h, 6 * h, 7 * h);
- one_over_n = _mm_set_epi32(one_by_x[4 * h - 1], one_by_x[5 * h - 1],
- one_by_x[6 * h - 1], one_by_x[7 * h - 1]);
- s = _mm_set_epi32(
- sgrproj_mtable[eps - 1][4 * h - 1], sgrproj_mtable[eps - 1][5 * h - 1],
- sgrproj_mtable[eps - 1][6 * h - 1], sgrproj_mtable[eps - 1][7 * h - 1]);
- calc_block(sum_, sum_sq_, n, &one_over_n, &s, bit_depth, i * buf_stride + j,
- A, B);
- }
+// Calculate 4 values of the "cross sum" starting at buf. This is a 3x3 filter
+// where the outer four corners have weight 3 and all other pixels have weight
+// 4.
+//
+// Pixels are indexed like this:
+// xtl xt xtr
+// xl x xr
+// xbl xb xbr
+//
+// buf points to x
+//
+// fours = xl + xt + xr + xb + x
+// threes = xtl + xtr + xbr + xbl
+// cross_sum = 4 * fours + 3 * threes
+// = 4 * (fours + threes) - threes
+// = (fours + threes) << 2 - threes
+static INLINE __m128i cross_sum(const int32_t *buf, int stride) {
+ const __m128i xtl = xx_loadu_128(buf - 1 - stride);
+ const __m128i xt = xx_loadu_128(buf - stride);
+ const __m128i xtr = xx_loadu_128(buf + 1 - stride);
+ const __m128i xl = xx_loadu_128(buf - 1);
+ const __m128i x = xx_loadu_128(buf);
+ const __m128i xr = xx_loadu_128(buf + 1);
+ const __m128i xbl = xx_loadu_128(buf - 1 + stride);
+ const __m128i xb = xx_loadu_128(buf + stride);
+ const __m128i xbr = xx_loadu_128(buf + 1 + stride);
+
+ const __m128i fours = _mm_add_epi32(
+ xl, _mm_add_epi32(xt, _mm_add_epi32(xr, _mm_add_epi32(xb, x))));
+ const __m128i threes =
+ _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl)));
+
+ return _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(fours, threes), 2), threes);
}
-void av1_selfguided_restoration_sse4_1(uint8_t *dgd, int width, int height,
- int dgd_stride, int32_t *dst,
- int dst_stride, int r, int eps) {
- const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
- const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
- int32_t A_[RESTORATION_PROC_UNIT_PELS];
- int32_t B_[RESTORATION_PROC_UNIT_PELS];
- int32_t *A = A_;
- int32_t *B = B_;
- int i, j;
- // Adjusting the stride of A and B here appears to avoid bad cache effects,
- // leading to a significant speed improvement.
- // We also align the stride to a multiple of 16 bytes for efficiency.
- int buf_stride = ((width_ext + 3) & ~3) + 16;
-
- // Don't filter tiles with dimensions < 5 on any axis
- if ((width < 5) || (height < 5)) return;
-
- uint8_t *dgd0 = dgd - dgd_stride * SGRPROJ_BORDER_VERT - SGRPROJ_BORDER_HORZ;
- if (r == 1) {
- selfguided_restoration_1_v(dgd0, width_ext, height_ext, dgd_stride, A, B,
- buf_stride);
- selfguided_restoration_1_h(A, B, width_ext, height_ext, buf_stride, eps, 8);
- } else if (r == 2) {
- selfguided_restoration_2_v(dgd0, width_ext, height_ext, dgd_stride, A, B,
- buf_stride);
- selfguided_restoration_2_h(A, B, width_ext, height_ext, buf_stride, eps, 8);
- } else if (r == 3) {
- selfguided_restoration_3_v(dgd0, width_ext, height_ext, dgd_stride, A, B,
- buf_stride);
- selfguided_restoration_3_h(A, B, width_ext, height_ext, buf_stride, eps, 8);
- } else {
- assert(0);
- }
- A += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
- B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
-
- {
- i = 0;
- j = 0;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] +
- A[k + buf_stride + 1];
- const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k + buf_stride] +
- B[k + buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] +
- A[k + buf_stride - 1] + A[k + buf_stride + 1];
- const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k + buf_stride] +
- B[k + buf_stride - 1] + B[k + buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- j = width - 1;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] +
- A[k + buf_stride - 1];
- const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k + buf_stride] +
- B[k + buf_stride - 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- }
- for (i = 1; i < height - 1; ++i) {
- j = 0;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) +
- A[k + 1] + A[k - buf_stride + 1] +
- A[k + buf_stride + 1];
- const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) +
- B[k + 1] + B[k - buf_stride + 1] +
- B[k + buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
-
- // Vectorize the innermost loop
- for (j = 1; j < width - 1; j += 4) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 5;
-
- __m128i tmp0 = _mm_loadu_si128((__m128i *)&A[k - 1 - buf_stride]);
- __m128i tmp1 = _mm_loadu_si128((__m128i *)&A[k + 3 - buf_stride]);
- __m128i tmp2 = _mm_loadu_si128((__m128i *)&A[k - 1]);
- __m128i tmp3 = _mm_loadu_si128((__m128i *)&A[k + 3]);
- __m128i tmp4 = _mm_loadu_si128((__m128i *)&A[k - 1 + buf_stride]);
- __m128i tmp5 = _mm_loadu_si128((__m128i *)&A[k + 3 + buf_stride]);
-
- __m128i a0 = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 4), tmp2),
- _mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 8),
- _mm_alignr_epi8(tmp5, tmp4, 4))),
- _mm_alignr_epi8(tmp1, tmp0, 4));
- __m128i a1 = _mm_add_epi32(_mm_add_epi32(tmp0, tmp4),
- _mm_add_epi32(_mm_alignr_epi8(tmp1, tmp0, 8),
- _mm_alignr_epi8(tmp5, tmp4, 8)));
- __m128i a = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(a0, a1), 2), a1);
-
- __m128i tmp6 = _mm_loadu_si128((__m128i *)&B[k - 1 - buf_stride]);
- __m128i tmp7 = _mm_loadu_si128((__m128i *)&B[k + 3 - buf_stride]);
- __m128i tmp8 = _mm_loadu_si128((__m128i *)&B[k - 1]);
- __m128i tmp9 = _mm_loadu_si128((__m128i *)&B[k + 3]);
- __m128i tmp10 = _mm_loadu_si128((__m128i *)&B[k - 1 + buf_stride]);
- __m128i tmp11 = _mm_loadu_si128((__m128i *)&B[k + 3 + buf_stride]);
-
- __m128i b0 = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 4), tmp8),
- _mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 8),
- _mm_alignr_epi8(tmp11, tmp10, 4))),
- _mm_alignr_epi8(tmp7, tmp6, 4));
- __m128i b1 =
- _mm_add_epi32(_mm_add_epi32(tmp6, tmp10),
- _mm_add_epi32(_mm_alignr_epi8(tmp7, tmp6, 8),
- _mm_alignr_epi8(tmp11, tmp10, 8)));
- __m128i b = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(b0, b1), 2), b1);
-
- __m128i src = _mm_cvtepu8_epi32(_mm_loadu_si128((__m128i *)&dgd[l]));
-
- __m128i rounding = _mm_set1_epi32(
- (1 << (SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS)) >> 1);
- __m128i v = _mm_add_epi32(_mm_mullo_epi32(a, src), b);
+// The final filter for self-guided restoration. Computes a weighted average
+// across A, B with "cross sums" (see cross_sum implementation above).
+static void final_filter(int32_t *dst, int dst_stride, const int32_t *A,
+ const int32_t *B, int buf_stride, const void *dgd8,
+ int dgd_stride, int width, int height, int highbd) {
+ const int nb = 5;
+ const __m128i rounding =
+ round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
+ const uint8_t *dgd_real =
+ highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i a = cross_sum(A + i * buf_stride + j, buf_stride);
+ const __m128i b = cross_sum(B + i * buf_stride + j, buf_stride);
+ const __m128i raw =
+ xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
+ const __m128i src =
+ highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
+
+ __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
__m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding),
SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- _mm_storeu_si128((__m128i *)&dst[m], w);
- }
- // Deal with any extra pixels at the right-hand edge of the frame
- // (typically have 2 such pixels, but may have anywhere between 0 and 3)
- for (; j < width - 1; ++j) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 5;
- const int32_t a =
- (A[k] + A[k - 1] + A[k + 1] + A[k - buf_stride] + A[k + buf_stride]) *
- 4 +
- (A[k - 1 - buf_stride] + A[k - 1 + buf_stride] +
- A[k + 1 - buf_stride] + A[k + 1 + buf_stride]) *
- 3;
- const int32_t b =
- (B[k] + B[k - 1] + B[k + 1] + B[k - buf_stride] + B[k + buf_stride]) *
- 4 +
- (B[k - 1 - buf_stride] + B[k - 1 + buf_stride] +
- B[k + 1 - buf_stride] + B[k + 1 + buf_stride]) *
- 3;
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
-
- j = width - 1;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) +
- A[k - 1] + A[k - buf_stride - 1] +
- A[k + buf_stride - 1];
- const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) +
- B[k - 1] + B[k - buf_stride - 1] +
- B[k + buf_stride - 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- }
-
- {
- i = height - 1;
- j = 0;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] +
- A[k - buf_stride + 1];
- const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k - buf_stride] +
- B[k - buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] +
- A[k - buf_stride - 1] + A[k - buf_stride + 1];
- const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k - buf_stride] +
- B[k - buf_stride - 1] + B[k - buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- j = width - 1;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] +
- A[k - buf_stride - 1];
- const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k - buf_stride] +
- B[k - buf_stride - 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- }
-}
-
-void av1_highpass_filter_sse4_1(uint8_t *dgd, int width, int height, int stride,
- int32_t *dst, int dst_stride, int corner,
- int edge) {
- int i, j;
- const int center = (1 << SGRPROJ_RST_BITS) - 4 * (corner + edge);
-
- {
- i = 0;
- j = 0;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k + 1] + dgd[k + stride] + dgd[k] * 2) +
- corner *
- (dgd[k + stride + 1] + dgd[k + 1] + dgd[k + stride] + dgd[k]);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] = center * dgd[k] +
- edge * (dgd[k - 1] + dgd[k + stride] + dgd[k + 1] + dgd[k]) +
- corner * (dgd[k + stride - 1] + dgd[k + stride + 1] +
- dgd[k - 1] + dgd[k + 1]);
- }
- j = width - 1;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k] * 2) +
- corner *
- (dgd[k + stride - 1] + dgd[k - 1] + dgd[k + stride] + dgd[k]);
- }
- }
- {
- i = height - 1;
- j = 0;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k + 1] + dgd[k - stride] + dgd[k] * 2) +
- corner *
- (dgd[k - stride + 1] + dgd[k + 1] + dgd[k - stride] + dgd[k]);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] = center * dgd[k] +
- edge * (dgd[k - 1] + dgd[k - stride] + dgd[k + 1] + dgd[k]) +
- corner * (dgd[k - stride - 1] + dgd[k - stride + 1] +
- dgd[k - 1] + dgd[k + 1]);
- }
- j = width - 1;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k] * 2) +
- corner *
- (dgd[k - stride - 1] + dgd[k - 1] + dgd[k - stride] + dgd[k]);
- }
- }
- __m128i center_ = _mm_set1_epi16(center);
- __m128i edge_ = _mm_set1_epi16(edge);
- __m128i corner_ = _mm_set1_epi16(corner);
- for (i = 1; i < height - 1; ++i) {
- j = 0;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] +
- edge * (dgd[k - stride] + dgd[k + 1] + dgd[k + stride] + dgd[k]) +
- corner * (dgd[k + stride + 1] + dgd[k - stride + 1] +
- dgd[k - stride] + dgd[k + stride]);
- }
- // Process in units of 8 pixels at a time.
- for (j = 1; j < width - 8; j += 8) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
-
- __m128i a = _mm_loadu_si128((__m128i *)&dgd[k - stride - 1]);
- __m128i b = _mm_loadu_si128((__m128i *)&dgd[k - 1]);
- __m128i c = _mm_loadu_si128((__m128i *)&dgd[k + stride - 1]);
-
- __m128i tl = _mm_cvtepu8_epi16(a);
- __m128i tr = _mm_cvtepu8_epi16(_mm_srli_si128(a, 8));
- __m128i cl = _mm_cvtepu8_epi16(b);
- __m128i cr = _mm_cvtepu8_epi16(_mm_srli_si128(b, 8));
- __m128i bl = _mm_cvtepu8_epi16(c);
- __m128i br = _mm_cvtepu8_epi16(_mm_srli_si128(c, 8));
-
- __m128i x = _mm_alignr_epi8(cr, cl, 2);
- __m128i y = _mm_add_epi16(_mm_add_epi16(_mm_alignr_epi8(tr, tl, 2), cl),
- _mm_add_epi16(_mm_alignr_epi8(br, bl, 2),
- _mm_alignr_epi8(cr, cl, 4)));
- __m128i z = _mm_add_epi16(_mm_add_epi16(tl, bl),
- _mm_add_epi16(_mm_alignr_epi8(tr, tl, 4),
- _mm_alignr_epi8(br, bl, 4)));
-
- __m128i res = _mm_add_epi16(_mm_mullo_epi16(x, center_),
- _mm_add_epi16(_mm_mullo_epi16(y, edge_),
- _mm_mullo_epi16(z, corner_)));
-
- _mm_storeu_si128((__m128i *)&dst[l], _mm_cvtepi16_epi32(res));
- _mm_storeu_si128((__m128i *)&dst[l + 4],
- _mm_cvtepi16_epi32(_mm_srli_si128(res, 8)));
- }
- // If there are enough pixels left in this row, do another batch of 4
- // pixels.
- for (; j < width - 4; j += 4) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
-
- __m128i a = _mm_loadl_epi64((__m128i *)&dgd[k - stride - 1]);
- __m128i b = _mm_loadl_epi64((__m128i *)&dgd[k - 1]);
- __m128i c = _mm_loadl_epi64((__m128i *)&dgd[k + stride - 1]);
-
- __m128i tl = _mm_cvtepu8_epi16(a);
- __m128i cl = _mm_cvtepu8_epi16(b);
- __m128i bl = _mm_cvtepu8_epi16(c);
-
- __m128i x = _mm_srli_si128(cl, 2);
- __m128i y = _mm_add_epi16(
- _mm_add_epi16(_mm_srli_si128(tl, 2), cl),
- _mm_add_epi16(_mm_srli_si128(bl, 2), _mm_srli_si128(cl, 4)));
- __m128i z = _mm_add_epi16(
- _mm_add_epi16(tl, bl),
- _mm_add_epi16(_mm_srli_si128(tl, 4), _mm_srli_si128(bl, 4)));
-
- __m128i res = _mm_add_epi16(_mm_mullo_epi16(x, center_),
- _mm_add_epi16(_mm_mullo_epi16(y, edge_),
- _mm_mullo_epi16(z, corner_)));
-
- _mm_storeu_si128((__m128i *)&dst[l], _mm_cvtepi16_epi32(res));
- }
- // Handle any leftover pixels
- for (; j < width - 1; ++j) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] +
- edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k + 1]) +
- corner * (dgd[k + stride - 1] + dgd[k - stride - 1] +
- dgd[k - stride + 1] + dgd[k + stride + 1]);
- }
- j = width - 1;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] +
- edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k]) +
- corner * (dgd[k + stride - 1] + dgd[k - stride - 1] +
- dgd[k - stride] + dgd[k + stride]);
+ xx_storeu_128(dst + i * dst_stride + j, w);
}
}
}
-void apply_selfguided_restoration_sse4_1(uint8_t *dat, int width, int height,
- int stride, int eps, int *xqd,
- uint8_t *dst, int dst_stride,
- int32_t *tmpbuf) {
- int xq[2];
- int32_t *flt1 = tmpbuf;
- int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX;
- int i, j;
- assert(width * height <= RESTORATION_TILEPELS_MAX);
-#if USE_HIGHPASS_IN_SGRPROJ
- av1_highpass_filter_sse4_1(dat, width, height, stride, flt1, width,
- sgr_params[eps].corner, sgr_params[eps].edge);
-#else
- av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt1, width,
- sgr_params[eps].r1, sgr_params[eps].e1);
-#endif // USE_HIGHPASS_IN_SGRPROJ
- av1_selfguided_restoration_sse4_1(dat, width, height, stride, flt2, width,
- sgr_params[eps].r2, sgr_params[eps].e2);
- decode_xq(xqd, xq);
-
- __m128i xq0 = _mm_set1_epi32(xq[0]);
- __m128i xq1 = _mm_set1_epi32(xq[1]);
- for (i = 0; i < height; ++i) {
- // Calculate output in batches of 8 pixels
- for (j = 0; j < width; j += 8) {
- const int k = i * width + j;
- const int l = i * stride + j;
- const int m = i * dst_stride + j;
- __m128i src =
- _mm_slli_epi16(_mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i *)&dat[l])),
- SGRPROJ_RST_BITS);
-
- const __m128i u_0 = _mm_cvtepu16_epi32(src);
- const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(src, 8));
-
- const __m128i f1_0 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k]), u_0);
- const __m128i f2_0 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k]), u_0);
- const __m128i f1_1 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k + 4]), u_1);
- const __m128i f2_1 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k + 4]), u_1);
-
- const __m128i v_0 = _mm_add_epi32(
- _mm_add_epi32(_mm_mullo_epi32(xq0, f1_0), _mm_mullo_epi32(xq1, f2_0)),
- _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS));
- const __m128i v_1 = _mm_add_epi32(
- _mm_add_epi32(_mm_mullo_epi32(xq0, f1_1), _mm_mullo_epi32(xq1, f2_1)),
- _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS));
-
- const __m128i rounding =
- _mm_set1_epi32((1 << (SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS)) >> 1);
- const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding),
- SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
- const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding),
- SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
-
- const __m128i tmp = _mm_packs_epi32(w_0, w_1);
- const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */);
- _mm_storel_epi64((__m128i *)&dst[m], res);
- }
- // Process leftover pixels
- for (; j < width; ++j) {
- const int k = i * width + j;
- const int l = i * stride + j;
- const int m = i * dst_stride + j;
- const int32_t u = ((int32_t)dat[l] << SGRPROJ_RST_BITS);
- const int32_t f1 = (int32_t)flt1[k] - u;
- const int32_t f2 = (int32_t)flt2[k] - u;
- const int32_t v = xq[0] * f1 + xq[1] * f2 + (u << SGRPROJ_PRJ_BITS);
- const int16_t w =
- (int16_t)ROUND_POWER_OF_TWO(v, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
- dst[m] = (uint16_t)clip_pixel(w);
- }
+// Assumes that C, D are integral images for the original buffer which has been
+// extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
+// on the sides. A, B, C, D point at logical position (0, 0).
+static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C,
+ const int32_t *D, int width, int height,
+ int buf_stride, int bit_depth, int sgr_params_idx,
+ int radius_idx) {
+ const sgr_params_type *const params = &sgr_params[sgr_params_idx];
+ const int r = params->r[radius_idx];
+ const int n = (2 * r + 1) * (2 * r + 1);
+ const __m128i s = _mm_set1_epi32(params->s[radius_idx]);
+ // one_over_n[n-1] is 2^12/n, so easily fits in an int16
+ const __m128i one_over_n = _mm_set1_epi32(one_by_x[n - 1]);
+
+ const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
+ const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
+
+ // Set up masks
+ const __m128i ones32 = _mm_set_epi32(0, 0, 0xffffffff, 0xffffffff);
+ __m128i mask[4];
+ for (int idx = 0; idx < 4; idx++) {
+ const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx));
+ mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
}
-}
-
-#if CONFIG_HIGHBITDEPTH
-// Only the vertical sums need to be adjusted for highbitdepth
-static void highbd_selfguided_restoration_1_v(uint16_t *src, int width,
- int height, int src_stride,
- int32_t *A, int32_t *B,
- int buf_stride) {
- int i, j;
+ for (int i = -1; i < height + 1; i += 2) {
+ for (int j = -1; j < width + 1; j += 4) {
+ const int32_t *Cij = C + i * buf_stride + j;
+ const int32_t *Dij = D + i * buf_stride + j;
- int width_extend = (width + 3) & ~3;
- for (j = 0; j < width_extend; j += 4) {
- __m128i a, b, x, y, x2, y2;
- __m128i sum, sum_sq, tmp;
+ __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r);
+ __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r);
- a = _mm_loadl_epi64((__m128i *)&src[j]);
- b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]);
+ // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain
+ // some uninitialised data in their upper words. We use a mask to
+ // ensure that these bits are set to 0.
+ int idx = AOMMIN(4, width + 1 - j);
+ assert(idx >= 1);
- sum = _mm_cvtepi16_epi32(_mm_add_epi16(a, b));
- tmp = _mm_unpacklo_epi16(a, b);
- sum_sq = _mm_madd_epi16(tmp, tmp);
+ if (idx < 4) {
+ sum1 = _mm_and_si128(mask[idx], sum1);
+ sum2 = _mm_and_si128(mask[idx], sum2);
+ }
- _mm_store_si128((__m128i *)&B[j], sum);
- _mm_store_si128((__m128i *)&A[j], sum_sq);
+ const __m128i p = compute_p(sum1, sum2, bit_depth, n);
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
+ const __m128i z = _mm_min_epi32(
+ _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z),
+ SGRPROJ_MTABLE_BITS),
+ _mm_set1_epi32(255));
- for (i = 1; i < height - 2; ++i) {
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
+ // 'Gather' type instructions are not available pre-AVX2, so synthesize a
+ // gather using scalar loads.
+ const __m128i a_res = _mm_set_epi32(x_by_xplus1[_mm_extract_epi32(z, 3)],
+ x_by_xplus1[_mm_extract_epi32(z, 2)],
+ x_by_xplus1[_mm_extract_epi32(z, 1)],
+ x_by_xplus1[_mm_extract_epi32(z, 0)]);
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j]));
- y = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i + 2) * src_stride + j]));
+ xx_storeu_128(A + i * buf_stride + j, a_res);
- sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x));
+ const __m128i a_complement =
+ _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
- x2 = _mm_mullo_epi32(x, x);
- y2 = _mm_mullo_epi32(y, y);
+ // sum1 might have lanes greater than 2^15, so we can't use madd to do
+ // multiplication involving sum1. However, a_complement and one_over_n
+ // are both less than 256, so we can multiply them first.
+ const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n);
+ const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1);
+ const __m128i b_res =
+ _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS);
- sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2));
+ xx_storeu_128(B + i * buf_stride + j, b_res);
}
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq);
}
}
-static void highbd_selfguided_restoration_2_v(uint16_t *src, int width,
- int height, int src_stride,
- int32_t *A, int32_t *B,
- int buf_stride) {
- int i, j;
-
- int width_extend = (width + 3) & ~3;
- for (j = 0; j < width_extend; j += 4) {
- __m128i a, b, c, c2, x, y, x2, y2;
- __m128i sum, sum_sq, tmp;
-
- a = _mm_loadl_epi64((__m128i *)&src[j]);
- b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]);
- c = _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]);
-
- sum = _mm_cvtepi16_epi32(_mm_add_epi16(_mm_add_epi16(a, b), c));
- // Important: We need to widen *before* squaring here, since
- // c^2 may be up to 2^24.
- c = _mm_cvtepu16_epi32(c);
- c2 = _mm_mullo_epi32(c, c);
- tmp = _mm_unpacklo_epi16(a, b);
- sum_sq = _mm_add_epi32(_mm_madd_epi16(tmp, tmp), c2);
-
- _mm_store_si128((__m128i *)&B[j], sum);
- _mm_store_si128((__m128i *)&A[j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[3 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[4 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
-
- for (i = 2; i < height - 3; ++i) {
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j]));
- y = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i + 3) * src_stride + j]));
-
- sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x));
-
- x2 = _mm_mullo_epi32(x, x);
- y2 = _mm_mullo_epi32(y, y);
-
- sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2));
- }
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq);
- }
+// Calculate 4 values of the "cross sum" starting at buf.
+//
+// Pixels are indexed like this:
+// xtl xt xtr
+// - buf -
+// xbl xb xbr
+//
+// Pixels are weighted like this:
+// 5 6 5
+// 0 0 0
+// 5 6 5
+//
+// fives = xtl + xtr + xbl + xbr
+// sixes = xt + xb
+// cross_sum = 6 * sixes + 5 * fives
+// = 5 * (fives + sixes) - sixes
+// = (fives + sixes) << 2 + (fives + sixes) + sixes
+static INLINE __m128i cross_sum_fast_even_row(const int32_t *buf, int stride) {
+ const __m128i xtl = xx_loadu_128(buf - 1 - stride);
+ const __m128i xt = xx_loadu_128(buf - stride);
+ const __m128i xtr = xx_loadu_128(buf + 1 - stride);
+ const __m128i xbl = xx_loadu_128(buf - 1 + stride);
+ const __m128i xb = xx_loadu_128(buf + stride);
+ const __m128i xbr = xx_loadu_128(buf + 1 + stride);
+
+ const __m128i fives =
+ _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl)));
+ const __m128i sixes = _mm_add_epi32(xt, xb);
+ const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes);
+
+ return _mm_add_epi32(
+ _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes),
+ sixes);
+}
+
+// Calculate 4 values of the "cross sum" starting at buf.
+//
+// Pixels are indexed like this:
+// xl x xr
+//
+// Pixels are weighted like this:
+// 5 6 5
+//
+// buf points to x
+//
+// fives = xl + xr
+// sixes = x
+// cross_sum = 5 * fives + 6 * sixes
+// = 4 * (fives + sixes) + (fives + sixes) + sixes
+// = (fives + sixes) << 2 + (fives + sixes) + sixes
+static INLINE __m128i cross_sum_fast_odd_row(const int32_t *buf) {
+ const __m128i xl = xx_loadu_128(buf - 1);
+ const __m128i x = xx_loadu_128(buf);
+ const __m128i xr = xx_loadu_128(buf + 1);
+
+ const __m128i fives = _mm_add_epi32(xl, xr);
+ const __m128i sixes = x;
+
+ const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes);
+
+ return _mm_add_epi32(
+ _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes),
+ sixes);
}
-static void highbd_selfguided_restoration_3_v(uint16_t *src, int width,
- int height, int src_stride,
- int32_t *A, int32_t *B,
- int buf_stride) {
- int i, j;
-
- int width_extend = (width + 3) & ~3;
- for (j = 0; j < width_extend; j += 4) {
- __m128i a, b, c, d, x, y, x2, y2;
- __m128i sum, sum_sq, tmp, tmp2;
-
- a = _mm_loadl_epi64((__m128i *)&src[j]);
- b = _mm_loadl_epi64((__m128i *)&src[src_stride + j]);
- c = _mm_loadl_epi64((__m128i *)&src[2 * src_stride + j]);
- d = _mm_loadl_epi64((__m128i *)&src[3 * src_stride + j]);
-
- sum = _mm_cvtepi16_epi32(
- _mm_add_epi16(_mm_add_epi16(a, b), _mm_add_epi16(c, d)));
- tmp = _mm_unpacklo_epi16(a, b);
- tmp2 = _mm_unpacklo_epi16(c, d);
- sum_sq =
- _mm_add_epi32(_mm_madd_epi16(tmp, tmp), _mm_madd_epi16(tmp2, tmp2));
-
- _mm_store_si128((__m128i *)&B[j], sum);
- _mm_store_si128((__m128i *)&A[j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[4 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[5 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[2 * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[2 * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[6 * src_stride + j]));
- sum = _mm_add_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_add_epi32(sum_sq, x2);
-
- for (i = 3; i < height - 4; ++i) {
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j]));
- y = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i + 4) * src_stride + j]));
-
- sum = _mm_add_epi32(sum, _mm_sub_epi32(y, x));
-
- x2 = _mm_mullo_epi32(x, x);
- y2 = _mm_mullo_epi32(y, y);
-
- sum_sq = _mm_add_epi32(sum_sq, _mm_sub_epi32(y2, x2));
+// The final filter for the self-guided restoration. Computes a
+// weighted average across A, B with "cross sums" (see cross_sum_...
+// implementations above).
+static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A,
+ const int32_t *B, int buf_stride,
+ const void *dgd8, int dgd_stride, int width,
+ int height, int highbd) {
+ const int nb0 = 5;
+ const int nb1 = 4;
+
+ const __m128i rounding0 =
+ round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
+ const __m128i rounding1 =
+ round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
+
+ const uint8_t *dgd_real =
+ highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
+
+ for (int i = 0; i < height; ++i) {
+ if (!(i & 1)) { // even row
+ for (int j = 0; j < width; j += 4) {
+ const __m128i a =
+ cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride);
+ const __m128i b =
+ cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride);
+ const __m128i raw =
+ xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
+ const __m128i src =
+ highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
+
+ __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
+ __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding0),
+ SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
+
+ xx_storeu_128(dst + i * dst_stride + j, w);
+ }
+ } else { // odd row
+ for (int j = 0; j < width; j += 4) {
+ const __m128i a = cross_sum_fast_odd_row(A + i * buf_stride + j);
+ const __m128i b = cross_sum_fast_odd_row(B + i * buf_stride + j);
+ const __m128i raw =
+ xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
+ const __m128i src =
+ highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
+
+ __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
+ __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding1),
+ SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
+
+ xx_storeu_128(dst + i * dst_stride + j, w);
+ }
}
- _mm_store_si128((__m128i *)&B[i * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[i * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 3) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 1) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 1) * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 2) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 2) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 2) * buf_stride + j], sum_sq);
-
- x = _mm_cvtepu16_epi32(
- _mm_loadl_epi64((__m128i *)&src[(i - 1) * src_stride + j]));
- sum = _mm_sub_epi32(sum, x);
- x2 = _mm_mullo_epi32(x, x);
- sum_sq = _mm_sub_epi32(sum_sq, x2);
-
- _mm_store_si128((__m128i *)&B[(i + 3) * buf_stride + j], sum);
- _mm_store_si128((__m128i *)&A[(i + 3) * buf_stride + j], sum_sq);
}
}
-void av1_selfguided_restoration_highbd_sse4_1(uint16_t *dgd, int width,
- int height, int dgd_stride,
- int32_t *dst, int dst_stride,
- int bit_depth, int r, int eps) {
+void av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width,
+ int height, int dgd_stride,
+ int32_t *flt0, int32_t *flt1,
+ int flt_stride, int sgr_params_idx,
+ int bit_depth, int highbd) {
+ DECLARE_ALIGNED(16, int32_t, buf[4 * RESTORATION_PROC_UNIT_PELS]);
+ memset(buf, 0, sizeof(buf));
+
const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
- int32_t A_[RESTORATION_PROC_UNIT_PELS];
- int32_t B_[RESTORATION_PROC_UNIT_PELS];
- int32_t *A = A_;
- int32_t *B = B_;
- int i, j;
+
// Adjusting the stride of A and B here appears to avoid bad cache effects,
// leading to a significant speed improvement.
// We also align the stride to a multiple of 16 bytes for efficiency.
int buf_stride = ((width_ext + 3) & ~3) + 16;
- // Don't filter tiles with dimensions < 5 on any axis
- if ((width < 5) || (height < 5)) return;
-
- uint16_t *dgd0 = dgd - dgd_stride * SGRPROJ_BORDER_VERT - SGRPROJ_BORDER_HORZ;
- if (r == 1) {
- highbd_selfguided_restoration_1_v(dgd0, width_ext, height_ext, dgd_stride,
- A, B, buf_stride);
- selfguided_restoration_1_h(A, B, width_ext, height_ext, buf_stride, eps,
- bit_depth);
- } else if (r == 2) {
- highbd_selfguided_restoration_2_v(dgd0, width_ext, height_ext, dgd_stride,
- A, B, buf_stride);
- selfguided_restoration_2_h(A, B, width_ext, height_ext, buf_stride, eps,
- bit_depth);
- } else if (r == 3) {
- highbd_selfguided_restoration_3_v(dgd0, width_ext, height_ext, dgd_stride,
- A, B, buf_stride);
- selfguided_restoration_3_h(A, B, width_ext, height_ext, buf_stride, eps,
- bit_depth);
- } else {
- assert(0);
- }
- A += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
- B += SGRPROJ_BORDER_VERT * buf_stride + SGRPROJ_BORDER_HORZ;
-
- {
- i = 0;
- j = 0;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k + buf_stride] +
- A[k + buf_stride + 1];
- const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k + buf_stride] +
- B[k + buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k + buf_stride] +
- A[k + buf_stride - 1] + A[k + buf_stride + 1];
- const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k + buf_stride] +
- B[k + buf_stride - 1] + B[k + buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- j = width - 1;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k + buf_stride] +
- A[k + buf_stride - 1];
- const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k + buf_stride] +
- B[k + buf_stride - 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- }
- for (i = 1; i < height - 1; ++i) {
- j = 0;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) +
- A[k + 1] + A[k - buf_stride + 1] +
- A[k + buf_stride + 1];
- const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) +
- B[k + 1] + B[k - buf_stride + 1] +
- B[k + buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
-
- // Vectorize the innermost loop
- for (j = 1; j < width - 1; j += 4) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 5;
-
- __m128i tmp0 = _mm_loadu_si128((__m128i *)&A[k - 1 - buf_stride]);
- __m128i tmp1 = _mm_loadu_si128((__m128i *)&A[k + 3 - buf_stride]);
- __m128i tmp2 = _mm_loadu_si128((__m128i *)&A[k - 1]);
- __m128i tmp3 = _mm_loadu_si128((__m128i *)&A[k + 3]);
- __m128i tmp4 = _mm_loadu_si128((__m128i *)&A[k - 1 + buf_stride]);
- __m128i tmp5 = _mm_loadu_si128((__m128i *)&A[k + 3 + buf_stride]);
-
- __m128i a0 = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 4), tmp2),
- _mm_add_epi32(_mm_alignr_epi8(tmp3, tmp2, 8),
- _mm_alignr_epi8(tmp5, tmp4, 4))),
- _mm_alignr_epi8(tmp1, tmp0, 4));
- __m128i a1 = _mm_add_epi32(_mm_add_epi32(tmp0, tmp4),
- _mm_add_epi32(_mm_alignr_epi8(tmp1, tmp0, 8),
- _mm_alignr_epi8(tmp5, tmp4, 8)));
- __m128i a = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(a0, a1), 2), a1);
-
- __m128i tmp6 = _mm_loadu_si128((__m128i *)&B[k - 1 - buf_stride]);
- __m128i tmp7 = _mm_loadu_si128((__m128i *)&B[k + 3 - buf_stride]);
- __m128i tmp8 = _mm_loadu_si128((__m128i *)&B[k - 1]);
- __m128i tmp9 = _mm_loadu_si128((__m128i *)&B[k + 3]);
- __m128i tmp10 = _mm_loadu_si128((__m128i *)&B[k - 1 + buf_stride]);
- __m128i tmp11 = _mm_loadu_si128((__m128i *)&B[k + 3 + buf_stride]);
-
- __m128i b0 = _mm_add_epi32(
- _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 4), tmp8),
- _mm_add_epi32(_mm_alignr_epi8(tmp9, tmp8, 8),
- _mm_alignr_epi8(tmp11, tmp10, 4))),
- _mm_alignr_epi8(tmp7, tmp6, 4));
- __m128i b1 =
- _mm_add_epi32(_mm_add_epi32(tmp6, tmp10),
- _mm_add_epi32(_mm_alignr_epi8(tmp7, tmp6, 8),
- _mm_alignr_epi8(tmp11, tmp10, 8)));
- __m128i b = _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(b0, b1), 2), b1);
-
- __m128i src = _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i *)&dgd[l]));
-
- __m128i rounding = _mm_set1_epi32(
- (1 << (SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS)) >> 1);
- __m128i v = _mm_add_epi32(_mm_mullo_epi32(a, src), b);
- __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding),
- SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- _mm_storeu_si128((__m128i *)&dst[m], w);
- }
-
- // Deal with any extra pixels at the right-hand edge of the frame
- // (typically have 2 such pixels, but may have anywhere between 0 and 3)
- for (; j < width - 1; ++j) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 5;
- const int32_t a =
- (A[k] + A[k - 1] + A[k + 1] + A[k - buf_stride] + A[k + buf_stride]) *
- 4 +
- (A[k - 1 - buf_stride] + A[k - 1 + buf_stride] +
- A[k + 1 - buf_stride] + A[k + 1 + buf_stride]) *
- 3;
- const int32_t b =
- (B[k] + B[k - 1] + B[k + 1] + B[k - buf_stride] + B[k + buf_stride]) *
- 4 +
- (B[k - 1 - buf_stride] + B[k - 1 + buf_stride] +
- B[k + 1 - buf_stride] + B[k + 1 + buf_stride]) *
- 3;
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
-
- j = width - 1;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - buf_stride] + A[k + buf_stride]) +
- A[k - 1] + A[k - buf_stride - 1] +
- A[k + buf_stride - 1];
- const int32_t b = B[k] + 2 * (B[k - buf_stride] + B[k + buf_stride]) +
- B[k - 1] + B[k - buf_stride - 1] +
- B[k + buf_stride - 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
+ // The "tl" pointers point at the top-left of the initialised data for the
+ // array. Adding 3 here ensures that column 1 is 16-byte aligned.
+ int32_t *Atl = buf + 0 * RESTORATION_PROC_UNIT_PELS + 3;
+ int32_t *Btl = buf + 1 * RESTORATION_PROC_UNIT_PELS + 3;
+ int32_t *Ctl = buf + 2 * RESTORATION_PROC_UNIT_PELS + 3;
+ int32_t *Dtl = buf + 3 * RESTORATION_PROC_UNIT_PELS + 3;
+
+ // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note
+ // there's a zero row and column in A, B (integral images), so we move down
+ // and right one for them.
+ const int buf_diag_border =
+ SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT;
+
+ int32_t *A0 = Atl + 1 + buf_stride;
+ int32_t *B0 = Btl + 1 + buf_stride;
+ int32_t *C0 = Ctl + 1 + buf_stride;
+ int32_t *D0 = Dtl + 1 + buf_stride;
+
+ // Finally, A, B, C, D point at position (0, 0).
+ int32_t *A = A0 + buf_diag_border;
+ int32_t *B = B0 + buf_diag_border;
+ int32_t *C = C0 + buf_diag_border;
+ int32_t *D = D0 + buf_diag_border;
+
+ const int dgd_diag_border =
+ SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT;
+ const uint8_t *dgd0 = dgd8 - dgd_diag_border;
+
+ // Generate integral images from the input. C will contain sums of squares; D
+ // will contain just sums
+ if (highbd)
+ integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext,
+ height_ext, Ctl, Dtl, buf_stride);
+ else
+ integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl,
+ buf_stride);
+
+ const sgr_params_type *const params = &sgr_params[sgr_params_idx];
+ // Write to flt0 and flt1
+ // If params->r == 0 we skip the corresponding filter. We only allow one of
+ // the radii to be 0, as having both equal to 0 would be equivalent to
+ // skipping SGR entirely.
+ assert(!(params->r[0] == 0 && params->r[1] == 0));
+ assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
+ assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
+
+ if (params->r[0] > 0) {
+ calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth,
+ sgr_params_idx, 0);
+ final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride,
+ width, height, highbd);
}
- {
- i = height - 1;
- j = 0;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k + 1] + 2 * A[k - buf_stride] +
- A[k - buf_stride + 1];
- const int32_t b = 3 * B[k] + 2 * B[k + 1] + 2 * B[k - buf_stride] +
- B[k - buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = A[k] + 2 * (A[k - 1] + A[k + 1]) + A[k - buf_stride] +
- A[k - buf_stride - 1] + A[k - buf_stride + 1];
- const int32_t b = B[k] + 2 * (B[k - 1] + B[k + 1]) + B[k - buf_stride] +
- B[k - buf_stride - 1] + B[k - buf_stride + 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
- j = width - 1;
- {
- const int k = i * buf_stride + j;
- const int l = i * dgd_stride + j;
- const int m = i * dst_stride + j;
- const int nb = 3;
- const int32_t a = 3 * A[k] + 2 * A[k - 1] + 2 * A[k - buf_stride] +
- A[k - buf_stride - 1];
- const int32_t b = 3 * B[k] + 2 * B[k - 1] + 2 * B[k - buf_stride] +
- B[k - buf_stride - 1];
- const int32_t v = a * dgd[l] + b;
- dst[m] = ROUND_POWER_OF_TWO(v, SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
- }
+ if (params->r[1] > 0) {
+ calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx,
+ 1);
+ final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width,
+ height, highbd);
}
}
-void av1_highpass_filter_highbd_sse4_1(uint16_t *dgd, int width, int height,
- int stride, int32_t *dst, int dst_stride,
- int corner, int edge) {
- int i, j;
- const int center = (1 << SGRPROJ_RST_BITS) - 4 * (corner + edge);
-
- {
- i = 0;
- j = 0;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k + 1] + dgd[k + stride] + dgd[k] * 2) +
- corner *
- (dgd[k + stride + 1] + dgd[k + 1] + dgd[k + stride] + dgd[k]);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] = center * dgd[k] +
- edge * (dgd[k - 1] + dgd[k + stride] + dgd[k + 1] + dgd[k]) +
- corner * (dgd[k + stride - 1] + dgd[k + stride + 1] +
- dgd[k - 1] + dgd[k + 1]);
- }
- j = width - 1;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k - 1] + dgd[k + stride] + dgd[k] * 2) +
- corner *
- (dgd[k + stride - 1] + dgd[k - 1] + dgd[k + stride] + dgd[k]);
- }
- }
- __m128i center_ = _mm_set1_epi32(center);
- __m128i edge_ = _mm_set1_epi32(edge);
- __m128i corner_ = _mm_set1_epi32(corner);
- for (i = 1; i < height - 1; ++i) {
- j = 0;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] +
- edge * (dgd[k - stride] + dgd[k + 1] + dgd[k + stride] + dgd[k]) +
- corner * (dgd[k + stride + 1] + dgd[k - stride + 1] +
- dgd[k - stride] + dgd[k + stride]);
- }
- // Process 4 pixels at a time
- for (j = 1; j < width - 4; j += 4) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
-
- __m128i a = _mm_loadu_si128((__m128i *)&dgd[k - stride - 1]);
- __m128i b = _mm_loadu_si128((__m128i *)&dgd[k - 1]);
- __m128i c = _mm_loadu_si128((__m128i *)&dgd[k + stride - 1]);
-
- __m128i tl = _mm_cvtepu16_epi32(a);
- __m128i tr = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
- __m128i cl = _mm_cvtepu16_epi32(b);
- __m128i cr = _mm_cvtepu16_epi32(_mm_srli_si128(b, 8));
- __m128i bl = _mm_cvtepu16_epi32(c);
- __m128i br = _mm_cvtepu16_epi32(_mm_srli_si128(c, 8));
-
- __m128i x = _mm_alignr_epi8(cr, cl, 4);
- __m128i y = _mm_add_epi32(_mm_add_epi32(_mm_alignr_epi8(tr, tl, 4), cl),
- _mm_add_epi32(_mm_alignr_epi8(br, bl, 4),
- _mm_alignr_epi8(cr, cl, 8)));
- __m128i z = _mm_add_epi32(_mm_add_epi32(tl, bl),
- _mm_add_epi32(_mm_alignr_epi8(tr, tl, 8),
- _mm_alignr_epi8(br, bl, 8)));
-
- __m128i res = _mm_add_epi32(_mm_mullo_epi32(x, center_),
- _mm_add_epi32(_mm_mullo_epi32(y, edge_),
- _mm_mullo_epi32(z, corner_)));
-
- _mm_storeu_si128((__m128i *)&dst[l], res);
- }
- // Handle any leftover pixels
- for (; j < width - 1; ++j) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] +
- edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k + 1]) +
- corner * (dgd[k + stride - 1] + dgd[k - stride - 1] +
- dgd[k - stride + 1] + dgd[k + stride + 1]);
- }
- j = width - 1;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] +
- edge * (dgd[k - stride] + dgd[k - 1] + dgd[k + stride] + dgd[k]) +
- corner * (dgd[k + stride - 1] + dgd[k - stride - 1] +
- dgd[k - stride] + dgd[k + stride]);
- }
- }
- {
- i = height - 1;
- j = 0;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k + 1] + dgd[k - stride] + dgd[k] * 2) +
- corner *
- (dgd[k - stride + 1] + dgd[k + 1] + dgd[k - stride] + dgd[k]);
- }
- for (j = 1; j < width - 1; ++j) {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] = center * dgd[k] +
- edge * (dgd[k - 1] + dgd[k - stride] + dgd[k + 1] + dgd[k]) +
- corner * (dgd[k - stride - 1] + dgd[k - stride + 1] +
- dgd[k - 1] + dgd[k + 1]);
- }
- j = width - 1;
- {
- const int k = i * stride + j;
- const int l = i * dst_stride + j;
- dst[l] =
- center * dgd[k] + edge * (dgd[k - 1] + dgd[k - stride] + dgd[k] * 2) +
- corner *
- (dgd[k - stride - 1] + dgd[k - 1] + dgd[k - stride] + dgd[k]);
- }
- }
-}
-
-void apply_selfguided_restoration_highbd_sse4_1(
- uint16_t *dat, int width, int height, int stride, int bit_depth, int eps,
- int *xqd, uint16_t *dst, int dst_stride, int32_t *tmpbuf) {
+void apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width,
+ int height, int stride, int eps,
+ const int *xqd, uint8_t *dst8,
+ int dst_stride, int32_t *tmpbuf,
+ int bit_depth, int highbd) {
+ int32_t *flt0 = tmpbuf;
+ int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
+ assert(width * height <= RESTORATION_UNITPELS_MAX);
+ av1_selfguided_restoration_sse4_1(dat8, width, height, stride, flt0, flt1,
+ width, eps, bit_depth, highbd);
+ const sgr_params_type *const params = &sgr_params[eps];
int xq[2];
- int32_t *flt1 = tmpbuf;
- int32_t *flt2 = flt1 + RESTORATION_TILEPELS_MAX;
- int i, j;
- assert(width * height <= RESTORATION_TILEPELS_MAX);
-#if USE_HIGHPASS_IN_SGRPROJ
- av1_highpass_filter_highbd_sse4_1(dat, width, height, stride, flt1, width,
- sgr_params[eps].corner,
- sgr_params[eps].edge);
-#else
- av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt1,
- width, bit_depth, sgr_params[eps].r1,
- sgr_params[eps].e1);
-#endif // USE_HIGHPASS_IN_SGRPROJ
- av1_selfguided_restoration_highbd_sse4_1(dat, width, height, stride, flt2,
- width, bit_depth, sgr_params[eps].r2,
- sgr_params[eps].e2);
- decode_xq(xqd, xq);
+ decode_xq(xqd, xq, params);
__m128i xq0 = _mm_set1_epi32(xq[0]);
__m128i xq1 = _mm_set1_epi32(xq[1]);
- for (i = 0; i < height; ++i) {
+
+ for (int i = 0; i < height; ++i) {
// Calculate output in batches of 8 pixels
- for (j = 0; j < width; j += 8) {
+ for (int j = 0; j < width; j += 8) {
const int k = i * width + j;
- const int l = i * stride + j;
const int m = i * dst_stride + j;
- __m128i src =
- _mm_slli_epi16(_mm_load_si128((__m128i *)&dat[l]), SGRPROJ_RST_BITS);
-
- const __m128i u_0 = _mm_cvtepu16_epi32(src);
- const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(src, 8));
-
- const __m128i f1_0 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k]), u_0);
- const __m128i f2_0 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k]), u_0);
- const __m128i f1_1 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt1[k + 4]), u_1);
- const __m128i f2_1 =
- _mm_sub_epi32(_mm_loadu_si128((__m128i *)&flt2[k + 4]), u_1);
-
- const __m128i v_0 = _mm_add_epi32(
- _mm_add_epi32(_mm_mullo_epi32(xq0, f1_0), _mm_mullo_epi32(xq1, f2_0)),
- _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS));
- const __m128i v_1 = _mm_add_epi32(
- _mm_add_epi32(_mm_mullo_epi32(xq0, f1_1), _mm_mullo_epi32(xq1, f2_1)),
- _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS));
+
+ const uint8_t *dat8ij = dat8 + i * stride + j;
+ __m128i src;
+ if (highbd) {
+ src = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij));
+ } else {
+ src = _mm_cvtepu8_epi16(xx_loadl_64(dat8ij));
+ }
+
+ const __m128i u = _mm_slli_epi16(src, SGRPROJ_RST_BITS);
+ const __m128i u_0 = _mm_cvtepu16_epi32(u);
+ const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(u, 8));
+
+ __m128i v_0 = _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS);
+ __m128i v_1 = _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS);
+
+ if (params->r[0] > 0) {
+ const __m128i f1_0 = _mm_sub_epi32(xx_loadu_128(&flt0[k]), u_0);
+ v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq0, f1_0));
+
+ const __m128i f1_1 = _mm_sub_epi32(xx_loadu_128(&flt0[k + 4]), u_1);
+ v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq0, f1_1));
+ }
+
+ if (params->r[1] > 0) {
+ const __m128i f2_0 = _mm_sub_epi32(xx_loadu_128(&flt1[k]), u_0);
+ v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq1, f2_0));
+
+ const __m128i f2_1 = _mm_sub_epi32(xx_loadu_128(&flt1[k + 4]), u_1);
+ v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq1, f2_1));
+ }
const __m128i rounding =
- _mm_set1_epi32((1 << (SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS)) >> 1);
+ round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding),
SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding),
SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
- // Pack into 16 bits and clamp to [0, 2^bit_depth)
- const __m128i tmp = _mm_packus_epi32(w_0, w_1);
- const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1);
- const __m128i res = _mm_min_epi16(tmp, max);
-
- _mm_store_si128((__m128i *)&dst[m], res);
- }
- // Process leftover pixels
- for (; j < width; ++j) {
- const int k = i * width + j;
- const int l = i * stride + j;
- const int m = i * dst_stride + j;
- const int32_t u = ((int32_t)dat[l] << SGRPROJ_RST_BITS);
- const int32_t f1 = (int32_t)flt1[k] - u;
- const int32_t f2 = (int32_t)flt2[k] - u;
- const int32_t v = xq[0] * f1 + xq[1] * f2 + (u << SGRPROJ_PRJ_BITS);
- const int16_t w =
- (int16_t)ROUND_POWER_OF_TWO(v, SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
- dst[m] = (uint16_t)clip_pixel_highbd(w, bit_depth);
+ if (highbd) {
+ // Pack into 16 bits and clamp to [0, 2^bit_depth)
+ const __m128i tmp = _mm_packus_epi32(w_0, w_1);
+ const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1);
+ const __m128i res = _mm_min_epi16(tmp, max);
+ xx_storeu_128(CONVERT_TO_SHORTPTR(dst8 + m), res);
+ } else {
+ // Pack into 8 bits and clamp to [0, 256)
+ const __m128i tmp = _mm_packs_epi32(w_0, w_1);
+ const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */);
+ xx_storel_64(dst8 + m, res);
+ }
}
}
}
-
-#endif