1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#ifndef AOM_DSP_X86_BLEND_SSE4_H_
#define AOM_DSP_X86_BLEND_SSE4_H_
#include "aom_dsp/blend.h"
#include "aom_dsp/x86/synonyms.h"
//////////////////////////////////////////////////////////////////////////////
// Common kernels
//////////////////////////////////////////////////////////////////////////////
static INLINE __m128i blend_4(const uint8_t *src0, const uint8_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w) {
const __m128i v_s0_b = xx_loadl_32(src0);
const __m128i v_s1_b = xx_loadl_32(src1);
const __m128i v_s0_w = _mm_cvtepu8_epi16(v_s0_b);
const __m128i v_s1_w = _mm_cvtepu8_epi16(v_s1_b);
const __m128i v_p0_w = _mm_mullo_epi16(v_s0_w, v_m0_w);
const __m128i v_p1_w = _mm_mullo_epi16(v_s1_w, v_m1_w);
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
static INLINE __m128i blend_8(const uint8_t *src0, const uint8_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w) {
const __m128i v_s0_b = xx_loadl_64(src0);
const __m128i v_s1_b = xx_loadl_64(src1);
const __m128i v_s0_w = _mm_cvtepu8_epi16(v_s0_b);
const __m128i v_s1_w = _mm_cvtepu8_epi16(v_s1_b);
const __m128i v_p0_w = _mm_mullo_epi16(v_s0_w, v_m0_w);
const __m128i v_p1_w = _mm_mullo_epi16(v_s1_w, v_m1_w);
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
#if CONFIG_HIGHBITDEPTH
typedef __m128i (*blend_unit_fn)(const uint16_t *src0, const uint16_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w);
static INLINE __m128i blend_4_b10(const uint16_t *src0, const uint16_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w) {
const __m128i v_s0_w = xx_loadl_64(src0);
const __m128i v_s1_w = xx_loadl_64(src1);
const __m128i v_p0_w = _mm_mullo_epi16(v_s0_w, v_m0_w);
const __m128i v_p1_w = _mm_mullo_epi16(v_s1_w, v_m1_w);
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
static INLINE __m128i blend_8_b10(const uint16_t *src0, const uint16_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w) {
const __m128i v_s0_w = xx_loadu_128(src0);
const __m128i v_s1_w = xx_loadu_128(src1);
const __m128i v_p0_w = _mm_mullo_epi16(v_s0_w, v_m0_w);
const __m128i v_p1_w = _mm_mullo_epi16(v_s1_w, v_m1_w);
const __m128i v_sum_w = _mm_add_epi16(v_p0_w, v_p1_w);
const __m128i v_res_w = xx_roundn_epu16(v_sum_w, AOM_BLEND_A64_ROUND_BITS);
return v_res_w;
}
static INLINE __m128i blend_4_b12(const uint16_t *src0, const uint16_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w) {
const __m128i v_s0_w = xx_loadl_64(src0);
const __m128i v_s1_w = xx_loadl_64(src1);
// Interleave
const __m128i v_m01_w = _mm_unpacklo_epi16(v_m0_w, v_m1_w);
const __m128i v_s01_w = _mm_unpacklo_epi16(v_s0_w, v_s1_w);
// Multiply-Add
const __m128i v_sum_d = _mm_madd_epi16(v_s01_w, v_m01_w);
// Scale
const __m128i v_ssum_d =
_mm_srli_epi32(v_sum_d, AOM_BLEND_A64_ROUND_BITS - 1);
// Pack
const __m128i v_pssum_d = _mm_packs_epi32(v_ssum_d, v_ssum_d);
// Round
const __m128i v_res_w = xx_round_epu16(v_pssum_d);
return v_res_w;
}
static INLINE __m128i blend_8_b12(const uint16_t *src0, const uint16_t *src1,
const __m128i v_m0_w, const __m128i v_m1_w) {
const __m128i v_s0_w = xx_loadu_128(src0);
const __m128i v_s1_w = xx_loadu_128(src1);
// Interleave
const __m128i v_m01l_w = _mm_unpacklo_epi16(v_m0_w, v_m1_w);
const __m128i v_m01h_w = _mm_unpackhi_epi16(v_m0_w, v_m1_w);
const __m128i v_s01l_w = _mm_unpacklo_epi16(v_s0_w, v_s1_w);
const __m128i v_s01h_w = _mm_unpackhi_epi16(v_s0_w, v_s1_w);
// Multiply-Add
const __m128i v_suml_d = _mm_madd_epi16(v_s01l_w, v_m01l_w);
const __m128i v_sumh_d = _mm_madd_epi16(v_s01h_w, v_m01h_w);
// Scale
const __m128i v_ssuml_d =
_mm_srli_epi32(v_suml_d, AOM_BLEND_A64_ROUND_BITS - 1);
const __m128i v_ssumh_d =
_mm_srli_epi32(v_sumh_d, AOM_BLEND_A64_ROUND_BITS - 1);
// Pack
const __m128i v_pssum_d = _mm_packs_epi32(v_ssuml_d, v_ssumh_d);
// Round
const __m128i v_res_w = xx_round_epu16(v_pssum_d);
return v_res_w;
}
#endif // CONFIG_HIGHBITDEPTH
#endif // AOM_DSP_X86_BLEND_SSE4_H_
|