summaryrefslogtreecommitdiffstats
path: root/media/libwebp/dsp
diff options
context:
space:
mode:
Diffstat (limited to 'media/libwebp/dsp')
-rw-r--r--media/libwebp/dsp/dsp.h6
-rw-r--r--media/libwebp/dsp/lossless.c2
-rw-r--r--media/libwebp/dsp/lossless.h14
-rw-r--r--media/libwebp/dsp/msa_macro.h2
-rw-r--r--media/libwebp/dsp/quant.h70
-rw-r--r--media/libwebp/dsp/rescaler.c4
-rw-r--r--media/libwebp/dsp/rescaler_neon.c18
-rw-r--r--media/libwebp/dsp/rescaler_sse2.c35
-rw-r--r--media/libwebp/dsp/yuv.h2
9 files changed, 132 insertions, 21 deletions
diff --git a/media/libwebp/dsp/dsp.h b/media/libwebp/dsp/dsp.h
index 537ea2044..4e509bd2c 100644
--- a/media/libwebp/dsp/dsp.h
+++ b/media/libwebp/dsp/dsp.h
@@ -76,10 +76,6 @@ extern "C" {
#define WEBP_USE_SSE41
#endif
-#if defined(__AVX2__) || defined(WEBP_HAVE_AVX2)
-#define WEBP_USE_AVX2
-#endif
-
// The intrinsics currently cause compiler errors with arm-nacl-gcc and the
// inline assembly would need to be modified for use with Native Client.
#if (defined(__ARM_NEON__) || \
@@ -679,4 +675,4 @@ void VP8FiltersInit(void);
} // extern "C"
#endif
-#endif /* WEBP_DSP_DSP_H_ */
+#endif // WEBP_DSP_DSP_H_
diff --git a/media/libwebp/dsp/lossless.c b/media/libwebp/dsp/lossless.c
index 93ccecdfd..1a1523d22 100644
--- a/media/libwebp/dsp/lossless.c
+++ b/media/libwebp/dsp/lossless.c
@@ -23,8 +23,6 @@
#include "../dsp/lossless.h"
#include "../dsp/lossless_common.h"
-#define MAX_DIFF_COST (1e30f)
-
//------------------------------------------------------------------------------
// Image transforms.
diff --git a/media/libwebp/dsp/lossless.h b/media/libwebp/dsp/lossless.h
index 4a1d1e0dd..6db5fafc1 100644
--- a/media/libwebp/dsp/lossless.h
+++ b/media/libwebp/dsp/lossless.h
@@ -163,7 +163,7 @@ extern VP8LCostCombinedFunc VP8LExtraCostCombined;
extern VP8LCombinedShannonEntropyFunc VP8LCombinedShannonEntropy;
typedef struct { // small struct to hold counters
- int counts[2]; // index: 0=zero steak, 1=non-zero streak
+ int counts[2]; // index: 0=zero streak, 1=non-zero streak
int streaks[2][2]; // [zero/non-zero][streak<3 / streak>=3]
} VP8LStreaks;
@@ -194,10 +194,14 @@ extern VP8LGetEntropyUnrefinedFunc VP8LGetEntropyUnrefined;
void VP8LBitsEntropyUnrefined(const uint32_t* const array, int n,
VP8LBitEntropy* const entropy);
-typedef void (*VP8LHistogramAddFunc)(const VP8LHistogram* const a,
- const VP8LHistogram* const b,
- VP8LHistogram* const out);
-extern VP8LHistogramAddFunc VP8LHistogramAdd;
+typedef void (*VP8LAddVectorFunc)(const uint32_t* a, const uint32_t* b,
+ uint32_t* out, int size);
+extern VP8LAddVectorFunc VP8LAddVector;
+typedef void (*VP8LAddVectorEqFunc)(const uint32_t* a, uint32_t* out, int size);
+extern VP8LAddVectorEqFunc VP8LAddVectorEq;
+void VP8LHistogramAdd(const VP8LHistogram* const a,
+ const VP8LHistogram* const b,
+ VP8LHistogram* const out);
// -----------------------------------------------------------------------------
// PrefixEncode()
diff --git a/media/libwebp/dsp/msa_macro.h b/media/libwebp/dsp/msa_macro.h
index dfacda6cc..de026a1d9 100644
--- a/media/libwebp/dsp/msa_macro.h
+++ b/media/libwebp/dsp/msa_macro.h
@@ -1389,4 +1389,4 @@ static WEBP_INLINE uint32_t func_hadd_uh_u32(v8u16 in) {
} while (0)
#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
-#endif /* WEBP_DSP_MSA_MACRO_H_ */
+#endif // WEBP_DSP_MSA_MACRO_H_
diff --git a/media/libwebp/dsp/quant.h b/media/libwebp/dsp/quant.h
new file mode 100644
index 000000000..b82e728a5
--- /dev/null
+++ b/media/libwebp/dsp/quant.h
@@ -0,0 +1,70 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+
+#ifndef WEBP_DSP_QUANT_H_
+#define WEBP_DSP_QUANT_H_
+
+#include "../dsp/dsp.h"
+#include "../webp/types.h"
+
+#if defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) && \
+ !defined(WEBP_HAVE_NEON_RTCD)
+#include <arm_neon.h>
+
+#define IsFlat IsFlat_NEON
+
+static uint32x2_t horizontal_add_uint32x4(const uint32x4_t a) {
+ const uint64x2_t b = vpaddlq_u32(a);
+ return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+}
+
+static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks,
+ int thresh) {
+ const int16x8_t tst_ones = vdupq_n_s16(-1);
+ uint32x4_t sum = vdupq_n_u32(0);
+
+ for (int i = 0; i < num_blocks; ++i) {
+ // Set DC to zero.
+ const int16x8_t a_0 = vsetq_lane_s16(0, vld1q_s16(levels), 0);
+ const int16x8_t a_1 = vld1q_s16(levels + 8);
+
+ const uint16x8_t b_0 = vshrq_n_u16(vtstq_s16(a_0, tst_ones), 15);
+ const uint16x8_t b_1 = vshrq_n_u16(vtstq_s16(a_1, tst_ones), 15);
+
+ sum = vpadalq_u16(sum, b_0);
+ sum = vpadalq_u16(sum, b_1);
+
+ levels += 16;
+ }
+ return thresh >= (int32_t)vget_lane_u32(horizontal_add_uint32x4(sum), 0);
+}
+
+#else
+
+#define IsFlat IsFlat_C
+
+static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks,
+ int thresh) {
+ int score = 0;
+ while (num_blocks-- > 0) { // TODO(skal): refine positional scoring?
+ int i;
+ for (i = 1; i < 16; ++i) { // omit DC, we're only interested in AC
+ score += (levels[i] != 0);
+ if (score > thresh) return 0;
+ }
+ levels += 16;
+ }
+ return 1;
+}
+
+#endif // defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) &&
+ // !defined(WEBP_HAVE_NEON_RTCD)
+
+#endif // WEBP_DSP_QUANT_H_
diff --git a/media/libwebp/dsp/rescaler.c b/media/libwebp/dsp/rescaler.c
index f70e6beef..6bf387f8e 100644
--- a/media/libwebp/dsp/rescaler.c
+++ b/media/libwebp/dsp/rescaler.c
@@ -21,6 +21,7 @@
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
+#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
//------------------------------------------------------------------------------
// Row import
@@ -138,7 +139,7 @@ void WebPRescalerExportRowShrink_C(WebPRescaler* const wrk) {
if (yscale) {
for (x_out = 0; x_out < x_out_max; ++x_out) {
const uint32_t frac = (uint32_t)MULT_FIX(frow[x_out], yscale);
- const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
+ const int v = (int)MULT_FIX_FLOOR(irow[x_out] - frac, wrk->fxy_scale);
assert(v >= 0 && v <= 255);
dst[x_out] = v;
irow[x_out] = frac; // new fractional start
@@ -153,6 +154,7 @@ void WebPRescalerExportRowShrink_C(WebPRescaler* const wrk) {
}
}
+#undef MULT_FIX_FLOOR
#undef MULT_FIX
#undef ROUNDER
diff --git a/media/libwebp/dsp/rescaler_neon.c b/media/libwebp/dsp/rescaler_neon.c
index 835e646c1..b560d0cdc 100644
--- a/media/libwebp/dsp/rescaler_neon.c
+++ b/media/libwebp/dsp/rescaler_neon.c
@@ -22,6 +22,7 @@
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
#define MULT_FIX_C(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
+#define MULT_FIX_FLOOR_C(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
#define LOAD_32x4(SRC, DST) const uint32x4_t DST = vld1q_u32((SRC))
#define LOAD_32x8(SRC, DST0, DST1) \
@@ -35,8 +36,11 @@
#if (WEBP_RESCALER_RFIX == 32)
#define MAKE_HALF_CST(C) vdupq_n_s32((int32_t)((C) >> 1))
-#define MULT_FIX(A, B) /* note: B is actualy scale>>1. See MAKE_HALF_CST */ \
+// note: B is actualy scale>>1. See MAKE_HALF_CST
+#define MULT_FIX(A, B) \
vreinterpretq_u32_s32(vqrdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
+#define MULT_FIX_FLOOR(A, B) \
+ vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
#else
#error "MULT_FIX/WEBP_RESCALER_RFIX need some more work"
#endif
@@ -135,8 +139,8 @@ static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) {
const uint32x4_t A1 = MULT_FIX(in1, yscale_half);
const uint32x4_t B0 = vqsubq_u32(in2, A0);
const uint32x4_t B1 = vqsubq_u32(in3, A1);
- const uint32x4_t C0 = MULT_FIX(B0, fxy_scale_half);
- const uint32x4_t C1 = MULT_FIX(B1, fxy_scale_half);
+ const uint32x4_t C0 = MULT_FIX_FLOOR(B0, fxy_scale_half);
+ const uint32x4_t C1 = MULT_FIX_FLOOR(B1, fxy_scale_half);
const uint16x4_t D0 = vmovn_u32(C0);
const uint16x4_t D1 = vmovn_u32(C1);
const uint8x8_t E = vmovn_u16(vcombine_u16(D0, D1));
@@ -145,7 +149,7 @@ static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) {
}
for (; x_out < x_out_max; ++x_out) {
const uint32_t frac = (uint32_t)MULT_FIX_C(frow[x_out], yscale);
- const int v = (int)MULT_FIX_C(irow[x_out] - frac, wrk->fxy_scale);
+ const int v = (int)MULT_FIX_FLOOR_C(irow[x_out] - frac, fxy_scale);
assert(v >= 0 && v <= 255);
dst[x_out] = v;
irow[x_out] = frac; // new fractional start
@@ -170,6 +174,12 @@ static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) {
}
}
+#undef MULT_FIX_FLOOR_C
+#undef MULT_FIX_C
+#undef MULT_FIX_FLOOR
+#undef MULT_FIX
+#undef ROUNDER
+
//------------------------------------------------------------------------------
extern void WebPRescalerDspInitNEON(void);
diff --git a/media/libwebp/dsp/rescaler_sse2.c b/media/libwebp/dsp/rescaler_sse2.c
index 1306f8457..2d35f76ab 100644
--- a/media/libwebp/dsp/rescaler_sse2.c
+++ b/media/libwebp/dsp/rescaler_sse2.c
@@ -25,6 +25,7 @@
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
+#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
// input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0
static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) {
@@ -224,6 +225,35 @@ static WEBP_INLINE void ProcessRow_SSE2(const __m128i* const A0,
_mm_storel_epi64((__m128i*)dst, G);
}
+static WEBP_INLINE void ProcessRow_Floor_SSE2(const __m128i* const A0,
+ const __m128i* const A1,
+ const __m128i* const A2,
+ const __m128i* const A3,
+ const __m128i* const mult,
+ uint8_t* const dst) {
+ const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0);
+ const __m128i B0 = _mm_mul_epu32(*A0, *mult);
+ const __m128i B1 = _mm_mul_epu32(*A1, *mult);
+ const __m128i B2 = _mm_mul_epu32(*A2, *mult);
+ const __m128i B3 = _mm_mul_epu32(*A3, *mult);
+ const __m128i D0 = _mm_srli_epi64(B0, WEBP_RESCALER_RFIX);
+ const __m128i D1 = _mm_srli_epi64(B1, WEBP_RESCALER_RFIX);
+#if (WEBP_RESCALER_RFIX < 32)
+ const __m128i D2 =
+ _mm_and_si128(_mm_slli_epi64(B2, 32 - WEBP_RESCALER_RFIX), mask);
+ const __m128i D3 =
+ _mm_and_si128(_mm_slli_epi64(B3, 32 - WEBP_RESCALER_RFIX), mask);
+#else
+ const __m128i D2 = _mm_and_si128(B2, mask);
+ const __m128i D3 = _mm_and_si128(B3, mask);
+#endif
+ const __m128i E0 = _mm_or_si128(D0, D2);
+ const __m128i E1 = _mm_or_si128(D1, D3);
+ const __m128i F = _mm_packs_epi32(E0, E1);
+ const __m128i G = _mm_packus_epi16(F, F);
+ _mm_storel_epi64((__m128i*)dst, G);
+}
+
static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) {
int x_out;
uint8_t* const dst = wrk->dst;
@@ -322,12 +352,12 @@ static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) {
const __m128i G1 = _mm_or_si128(D1, F3);
_mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);
_mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);
- ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
+ ProcessRow_Floor_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
}
}
for (; x_out < x_out_max; ++x_out) {
const uint32_t frac = (int)MULT_FIX(frow[x_out], yscale);
- const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
+ const int v = (int)MULT_FIX_FLOOR(irow[x_out] - frac, wrk->fxy_scale);
assert(v >= 0 && v <= 255);
dst[x_out] = v;
irow[x_out] = frac; // new fractional start
@@ -352,6 +382,7 @@ static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) {
}
}
+#undef MULT_FIX_FLOOR
#undef MULT_FIX
#undef ROUNDER
diff --git a/media/libwebp/dsp/yuv.h b/media/libwebp/dsp/yuv.h
index b4c5d0b6c..947b89e13 100644
--- a/media/libwebp/dsp/yuv.h
+++ b/media/libwebp/dsp/yuv.h
@@ -207,4 +207,4 @@ static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) {
} // extern "C"
#endif
-#endif /* WEBP_DSP_YUV_H_ */
+#endif // WEBP_DSP_YUV_H_