diff options
author | JustOff <Off.Just.Off@gmail.com> | 2018-06-09 15:11:22 +0300 |
---|---|---|
committer | JustOff <Off.Just.Off@gmail.com> | 2018-06-11 16:42:50 +0300 |
commit | f83f62e1bff0c2aedc32e67fe369ba923c5b104a (patch) | |
tree | fbb69e76754552dde5c3c5d4fe928ed9693f601a /security/nss/lib/freebl/verified | |
parent | 75323087aea91719bbb4f766bc6298d0618f0163 (diff) | |
download | UXP-f83f62e1bff0c2aedc32e67fe369ba923c5b104a.tar UXP-f83f62e1bff0c2aedc32e67fe369ba923c5b104a.tar.gz UXP-f83f62e1bff0c2aedc32e67fe369ba923c5b104a.tar.lz UXP-f83f62e1bff0c2aedc32e67fe369ba923c5b104a.tar.xz UXP-f83f62e1bff0c2aedc32e67fe369ba923c5b104a.zip |
Update NSS to 3.36.4-RTM
Diffstat (limited to 'security/nss/lib/freebl/verified')
-rw-r--r-- | security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c | 390 | ||||
-rw-r--r-- | security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h | 61 | ||||
-rw-r--r-- | security/nss/lib/freebl/verified/kremlib_base.h | 1 | ||||
-rw-r--r-- | security/nss/lib/freebl/verified/vec128.h | 345 |
4 files changed, 797 insertions, 0 deletions
diff --git a/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c new file mode 100644 index 000000000..4eba49f47 --- /dev/null +++ b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c @@ -0,0 +1,390 @@ +/* Copyright 2016-2017 INRIA and Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Hacl_Chacha20_Vec128.h" + +inline static void +Hacl_Impl_Chacha20_Vec128_State_state_incr(vec *k) +{ + vec k3 = k[3U]; + k[3U] = vec_increment(k3); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_State_state_to_key_block(uint8_t *stream_block, vec *k) +{ + vec k0 = k[0U]; + vec k1 = k[1U]; + vec k2 = k[2U]; + vec k3 = k[3U]; + uint8_t *a = stream_block; + uint8_t *b = stream_block + (uint32_t)16U; + uint8_t *c = stream_block + (uint32_t)32U; + uint8_t *d = stream_block + (uint32_t)48U; + vec_store_le(a, k0); + vec_store_le(b, k1); + vec_store_le(c, k2); + vec_store_le(d, k3); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_State_state_setup(vec *st, uint8_t *k, uint8_t *n1, uint32_t c) +{ + st[0U] = + vec_load_32x4((uint32_t)0x61707865U, + (uint32_t)0x3320646eU, + (uint32_t)0x79622d32U, + (uint32_t)0x6b206574U); + vec k0 = vec_load128_le(k); + vec k1 = vec_load128_le(k + (uint32_t)16U); + st[1U] = k0; + st[2U] = k1; + uint32_t n0 = load32_le(n1); + uint8_t *x00 = n1 + (uint32_t)4U; + uint32_t n10 = load32_le(x00); + uint8_t *x0 = n1 + (uint32_t)8U; + uint32_t n2 = load32_le(x0); + vec v1 = vec_load_32x4(c, n0, n10, n2); + st[3U] = v1; +} + +inline static void +Hacl_Impl_Chacha20_Vec128_round(vec *st) +{ + vec sa = st[0U]; + vec sb0 = st[1U]; + vec sd0 = st[3U]; + vec sa10 = vec_add(sa, sb0); + vec sd10 = vec_rotate_left(vec_xor(sd0, sa10), (uint32_t)16U); + st[0U] = sa10; + st[3U] = sd10; + vec sa0 = st[2U]; + vec sb1 = st[3U]; + vec sd2 = st[1U]; + vec sa11 = vec_add(sa0, sb1); + vec sd11 = vec_rotate_left(vec_xor(sd2, sa11), (uint32_t)12U); + st[2U] = sa11; + st[1U] = sd11; + vec sa2 = st[0U]; + vec sb2 = st[1U]; + vec sd3 = st[3U]; + vec sa12 = vec_add(sa2, sb2); + vec sd12 = vec_rotate_left(vec_xor(sd3, sa12), (uint32_t)8U); + st[0U] = sa12; + st[3U] = sd12; + vec sa3 = st[2U]; + vec sb = st[3U]; + vec sd = st[1U]; + vec sa1 = vec_add(sa3, sb); + vec sd1 = vec_rotate_left(vec_xor(sd, sa1), (uint32_t)7U); + st[2U] = sa1; + st[1U] = sd1; +} + +inline static void +Hacl_Impl_Chacha20_Vec128_double_round(vec *st) +{ + Hacl_Impl_Chacha20_Vec128_round(st); + vec r1 = st[1U]; + vec r20 = st[2U]; + vec r30 = st[3U]; + st[1U] = vec_shuffle_right(r1, (uint32_t)1U); + st[2U] = vec_shuffle_right(r20, (uint32_t)2U); + st[3U] = vec_shuffle_right(r30, (uint32_t)3U); + Hacl_Impl_Chacha20_Vec128_round(st); + vec r10 = st[1U]; + vec r2 = st[2U]; + vec r3 = st[3U]; + st[1U] = vec_shuffle_right(r10, (uint32_t)3U); + st[2U] = vec_shuffle_right(r2, (uint32_t)2U); + st[3U] = vec_shuffle_right(r3, (uint32_t)1U); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_double_round3(vec *st, vec *st_, vec *st__) +{ + Hacl_Impl_Chacha20_Vec128_double_round(st); + Hacl_Impl_Chacha20_Vec128_double_round(st_); + Hacl_Impl_Chacha20_Vec128_double_round(st__); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_sum_states(vec *st_, vec *st) +{ + vec s0 = st[0U]; + vec s1 = st[1U]; + vec s2 = st[2U]; + vec s3 = st[3U]; + vec s0_ = st_[0U]; + vec s1_ = st_[1U]; + vec s2_ = st_[2U]; + vec s3_ = st_[3U]; + st_[0U] = vec_add(s0_, s0); + st_[1U] = vec_add(s1_, s1); + st_[2U] = vec_add(s2_, s2); + st_[3U] = vec_add(s3_, s3); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_copy_state(vec *st_, vec *st) +{ + vec st0 = st[0U]; + vec st1 = st[1U]; + vec st2 = st[2U]; + vec st3 = st[3U]; + st_[0U] = st0; + st_[1U] = st1; + st_[2U] = st2; + st_[3U] = st3; +} + +inline static void +Hacl_Impl_Chacha20_Vec128_chacha20_core(vec *k, vec *st) +{ + Hacl_Impl_Chacha20_Vec128_copy_state(k, st); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U) + Hacl_Impl_Chacha20_Vec128_double_round(k); + Hacl_Impl_Chacha20_Vec128_sum_states(k, st); +} + +static void +Hacl_Impl_Chacha20_Vec128_state_incr(vec *st) +{ + Hacl_Impl_Chacha20_Vec128_State_state_incr(st); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_chacha20_incr3(vec *k0, vec *k1, vec *k2, vec *st) +{ + Hacl_Impl_Chacha20_Vec128_copy_state(k0, st); + Hacl_Impl_Chacha20_Vec128_copy_state(k1, st); + Hacl_Impl_Chacha20_Vec128_state_incr(k1); + Hacl_Impl_Chacha20_Vec128_copy_state(k2, k1); + Hacl_Impl_Chacha20_Vec128_state_incr(k2); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_chacha20_sum3(vec *k0, vec *k1, vec *k2, vec *st) +{ + Hacl_Impl_Chacha20_Vec128_sum_states(k0, st); + Hacl_Impl_Chacha20_Vec128_state_incr(st); + Hacl_Impl_Chacha20_Vec128_sum_states(k1, st); + Hacl_Impl_Chacha20_Vec128_state_incr(st); + Hacl_Impl_Chacha20_Vec128_sum_states(k2, st); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_chacha20_core3(vec *k0, vec *k1, vec *k2, vec *st) +{ + Hacl_Impl_Chacha20_Vec128_chacha20_incr3(k0, k1, k2, st); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U) + Hacl_Impl_Chacha20_Vec128_double_round3(k0, k1, k2); + Hacl_Impl_Chacha20_Vec128_chacha20_sum3(k0, k1, k2, st); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_chacha20_block(uint8_t *stream_block, vec *st) +{ + KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U); + vec k[4U]; + for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i) + k[_i] = vec_zero(); + Hacl_Impl_Chacha20_Vec128_chacha20_core(k, st); + Hacl_Impl_Chacha20_Vec128_State_state_to_key_block(stream_block, k); +} + +inline static void +Hacl_Impl_Chacha20_Vec128_init(vec *st, uint8_t *k, uint8_t *n1, uint32_t ctr) +{ + Hacl_Impl_Chacha20_Vec128_State_state_setup(st, k, n1, ctr); +} + +static void +Hacl_Impl_Chacha20_Vec128_update_last(uint8_t *output, uint8_t *plain, uint32_t len, vec *st) +{ + uint8_t block[64U] = { 0U }; + Hacl_Impl_Chacha20_Vec128_chacha20_block(block, st); + uint8_t *mask = block; + for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U) { + uint8_t xi = plain[i]; + uint8_t yi = mask[i]; + output[i] = xi ^ yi; + } +} + +static void +Hacl_Impl_Chacha20_Vec128_xor_block(uint8_t *output, uint8_t *plain, vec *st) +{ + vec p0 = vec_load_le(plain); + vec p1 = vec_load_le(plain + (uint32_t)16U); + vec p2 = vec_load_le(plain + (uint32_t)32U); + vec p3 = vec_load_le(plain + (uint32_t)48U); + vec k0 = st[0U]; + vec k1 = st[1U]; + vec k2 = st[2U]; + vec k3 = st[3U]; + vec o00 = vec_xor(p0, k0); + vec o10 = vec_xor(p1, k1); + vec o20 = vec_xor(p2, k2); + vec o30 = vec_xor(p3, k3); + uint8_t *o0 = output; + uint8_t *o1 = output + (uint32_t)16U; + uint8_t *o2 = output + (uint32_t)32U; + uint8_t *o3 = output + (uint32_t)48U; + vec_store_le(o0, o00); + vec_store_le(o1, o10); + vec_store_le(o2, o20); + vec_store_le(o3, o30); +} + +static void +Hacl_Impl_Chacha20_Vec128_update(uint8_t *output, uint8_t *plain, vec *st) +{ + KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U); + vec k[4U]; + for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i) + k[_i] = vec_zero(); + Hacl_Impl_Chacha20_Vec128_chacha20_core(k, st); + Hacl_Impl_Chacha20_Vec128_xor_block(output, plain, k); +} + +static void +Hacl_Impl_Chacha20_Vec128_update3(uint8_t *output, uint8_t *plain, vec *st) +{ + KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U); + vec k0[4U]; + for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i) + k0[_i] = vec_zero(); + KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U); + vec k1[4U]; + for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i) + k1[_i] = vec_zero(); + KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U); + vec k2[4U]; + for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i) + k2[_i] = vec_zero(); + Hacl_Impl_Chacha20_Vec128_chacha20_core3(k0, k1, k2, st); + uint8_t *p0 = plain; + uint8_t *p1 = plain + (uint32_t)64U; + uint8_t *p2 = plain + (uint32_t)128U; + uint8_t *o0 = output; + uint8_t *o1 = output + (uint32_t)64U; + uint8_t *o2 = output + (uint32_t)128U; + Hacl_Impl_Chacha20_Vec128_xor_block(o0, p0, k0); + Hacl_Impl_Chacha20_Vec128_xor_block(o1, p1, k1); + Hacl_Impl_Chacha20_Vec128_xor_block(o2, p2, k2); +} + +static void +Hacl_Impl_Chacha20_Vec128_update3_( + uint8_t *output, + uint8_t *plain, + uint32_t len, + vec *st, + uint32_t i) +{ + uint8_t *out_block = output + (uint32_t)192U * i; + uint8_t *plain_block = plain + (uint32_t)192U * i; + Hacl_Impl_Chacha20_Vec128_update3(out_block, plain_block, st); + Hacl_Impl_Chacha20_Vec128_state_incr(st); +} + +static void +Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks3( + uint8_t *output, + uint8_t *plain, + uint32_t len, + vec *st) +{ + for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U) + Hacl_Impl_Chacha20_Vec128_update3_(output, plain, len, st, i); +} + +static void +Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks( + uint8_t *output, + uint8_t *plain, + uint32_t len, + vec *st) +{ + uint32_t len3 = len / (uint32_t)3U; + uint32_t rest3 = len % (uint32_t)3U; + uint8_t *plain_ = plain; + uint8_t *blocks1 = plain + (uint32_t)192U * len3; + uint8_t *output_ = output; + uint8_t *outs = output + (uint32_t)192U * len3; + Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks3(output_, plain_, len3, st); + if (rest3 == (uint32_t)2U) { + uint8_t *block0 = blocks1; + uint8_t *block1 = blocks1 + (uint32_t)64U; + uint8_t *out0 = outs; + uint8_t *out1 = outs + (uint32_t)64U; + Hacl_Impl_Chacha20_Vec128_update(out0, block0, st); + Hacl_Impl_Chacha20_Vec128_state_incr(st); + Hacl_Impl_Chacha20_Vec128_update(out1, block1, st); + Hacl_Impl_Chacha20_Vec128_state_incr(st); + } else if (rest3 == (uint32_t)1U) { + Hacl_Impl_Chacha20_Vec128_update(outs, blocks1, st); + Hacl_Impl_Chacha20_Vec128_state_incr(st); + } +} + +static void +Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode( + uint8_t *output, + uint8_t *plain, + uint32_t len, + vec *st) +{ + uint32_t blocks_len = len >> (uint32_t)6U; + uint32_t part_len = len & (uint32_t)0x3fU; + uint8_t *output_ = output; + uint8_t *plain_ = plain; + uint8_t *output__ = output + (uint32_t)64U * blocks_len; + uint8_t *plain__ = plain + (uint32_t)64U * blocks_len; + Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks(output_, plain_, blocks_len, st); + if (part_len > (uint32_t)0U) + Hacl_Impl_Chacha20_Vec128_update_last(output__, plain__, part_len, st); +} + +static void +Hacl_Impl_Chacha20_Vec128_chacha20( + uint8_t *output, + uint8_t *plain, + uint32_t len, + uint8_t *k, + uint8_t *n1, + uint32_t ctr) +{ + KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U); + vec buf[4U]; + for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i) + buf[_i] = vec_zero(); + vec *st = buf; + Hacl_Impl_Chacha20_Vec128_init(st, k, n1, ctr); + Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode(output, plain, len, st); +} + +void +Hacl_Chacha20_Vec128_chacha20( + uint8_t *output, + uint8_t *plain, + uint32_t len, + uint8_t *k, + uint8_t *n1, + uint32_t ctr) +{ + Hacl_Impl_Chacha20_Vec128_chacha20(output, plain, len, k, n1, ctr); +} diff --git a/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h new file mode 100644 index 000000000..57942093d --- /dev/null +++ b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h @@ -0,0 +1,61 @@ +/* Copyright 2016-2017 INRIA and Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kremlib.h" +#ifndef __Hacl_Chacha20_Vec128_H +#define __Hacl_Chacha20_Vec128_H + +#include "vec128.h" + +typedef uint32_t Hacl_Impl_Xor_Lemmas_u32; + +typedef uint8_t Hacl_Impl_Xor_Lemmas_u8; + +typedef uint32_t Hacl_Impl_Chacha20_Vec128_State_u32; + +typedef uint32_t Hacl_Impl_Chacha20_Vec128_State_h32; + +typedef uint8_t *Hacl_Impl_Chacha20_Vec128_State_uint8_p; + +typedef vec *Hacl_Impl_Chacha20_Vec128_State_state; + +typedef uint32_t Hacl_Impl_Chacha20_Vec128_u32; + +typedef uint32_t Hacl_Impl_Chacha20_Vec128_h32; + +typedef uint8_t *Hacl_Impl_Chacha20_Vec128_uint8_p; + +typedef uint32_t Hacl_Impl_Chacha20_Vec128_idx; + +typedef struct +{ + void *k; + void *n; + uint32_t ctr; +} Hacl_Impl_Chacha20_Vec128_log_t_; + +typedef void *Hacl_Impl_Chacha20_Vec128_log_t; + +typedef uint8_t *Hacl_Chacha20_Vec128_uint8_p; + +void +Hacl_Chacha20_Vec128_chacha20( + uint8_t *output, + uint8_t *plain, + uint32_t len, + uint8_t *k, + uint8_t *n1, + uint32_t ctr); +#endif diff --git a/security/nss/lib/freebl/verified/kremlib_base.h b/security/nss/lib/freebl/verified/kremlib_base.h index 61bac11d4..14170625d 100644 --- a/security/nss/lib/freebl/verified/kremlib_base.h +++ b/security/nss/lib/freebl/verified/kremlib_base.h @@ -17,6 +17,7 @@ #define __KREMLIB_BASE_H #include <inttypes.h> +#include <limits.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> diff --git a/security/nss/lib/freebl/verified/vec128.h b/security/nss/lib/freebl/verified/vec128.h new file mode 100644 index 000000000..986e9db82 --- /dev/null +++ b/security/nss/lib/freebl/verified/vec128.h @@ -0,0 +1,345 @@ +/* Copyright 2016-2017 INRIA and Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __Vec_H +#define __Vec_H + +#ifdef __MSVC__ +#define forceinline __forceinline inline +#elif (defined(__GNUC__) || defined(__clang__)) +#define forceinline __attribute__((always_inline)) inline +#else +#define forceinline inline +#endif + +#if defined(__SSSE3__) || defined(__AVX2__) || defined(__AVX__) + +#include <emmintrin.h> +#include <tmmintrin.h> + +#define VEC128 +#define vec_size 4 + +typedef __m128i vec; + +static forceinline vec +vec_rotate_left_8(vec v) +{ + __m128i x = _mm_set_epi8(14, 13, 12, 15, 10, 9, 8, 11, 6, 5, 4, 7, 2, 1, 0, 3); + return _mm_shuffle_epi8(v, x); +} + +static forceinline vec +vec_rotate_left_16(vec v) +{ + __m128i x = _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2); + return _mm_shuffle_epi8(v, x); +} + +static forceinline vec +vec_rotate_left(vec v, unsigned int n) +{ + if (n == 8) + return vec_rotate_left_8(v); + if (n == 16) + return vec_rotate_left_16(v); + return _mm_xor_si128(_mm_slli_epi32(v, n), + _mm_srli_epi32(v, 32 - n)); +} + +static forceinline vec +vec_rotate_right(vec v, unsigned int n) +{ + return (vec_rotate_left(v, 32 - n)); +} + +#define vec_shuffle_right(x, n) \ + _mm_shuffle_epi32(x, _MM_SHUFFLE((3 + (n)) % 4, (2 + (n)) % 4, (1 + (n)) % 4, (n) % 4)) + +#define vec_shuffle_left(x, n) vec_shuffle_right((x), 4 - (n)) + +static forceinline vec +vec_load_32x4(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4) +{ + return _mm_set_epi32(x4, x3, x2, x1); +} + +static forceinline vec +vec_load_32x8(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8) +{ + return _mm_set_epi32(x4, x3, x2, x1); +} + +static forceinline vec +vec_load_le(const unsigned char* in) +{ + return _mm_loadu_si128((__m128i*)(in)); +} + +static forceinline vec +vec_load128_le(const unsigned char* in) +{ + return vec_load_le(in); +} + +static forceinline void +vec_store_le(unsigned char* out, vec v) +{ + _mm_storeu_si128((__m128i*)(out), v); +} + +static forceinline vec +vec_add(vec v1, vec v2) +{ + return _mm_add_epi32(v1, v2); +} + +static forceinline vec +vec_add_u32(vec v1, uint32_t x) +{ + vec v2 = vec_load_32x4(x, 0, 0, 0); + return _mm_add_epi32(v1, v2); +} + +static forceinline vec +vec_increment(vec v1) +{ + vec one = vec_load_32x4(1, 0, 0, 0); + return _mm_add_epi32(v1, one); +} + +static forceinline vec +vec_xor(vec v1, vec v2) +{ + return _mm_xor_si128(v1, v2); +} + +#define vec_zero() _mm_set_epi32(0, 0, 0, 0) + +#elif defined(__ARM_NEON__) || defined(__ARM_NEON) +#include <arm_neon.h> + +typedef uint32x4_t vec; + +static forceinline vec +vec_xor(vec v1, vec v2) +{ + return veorq_u32(v1, v2); +} + +#define vec_rotate_left(x, n) \ + vsriq_n_u32(vshlq_n_u32((x), (n)), (x), 32 - (n)) + +#define vec_rotate_right(a, b) \ + vec_rotate_left((b), 32 - (b)) + +#define vec_shuffle_right(x, n) \ + vextq_u32((x), (x), (n)) + +#define vec_shuffle_left(a, b) \ + vec_shuffle_right((a), 4 - (b)) + +static forceinline vec +vec_load_32x4(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4) +{ + uint32_t a[4] = { x1, x2, x3, x4 }; + return vld1q_u32(a); +} + +static forceinline vec +vec_load_32(uint32_t x1) +{ + uint32_t a[4] = { x1, x1, x1, x1 }; + return vld1q_u32(a); +} + +static forceinline vec +vec_load_32x8(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8) +{ + return vec_load_32x4(x1, x2, x3, x4); +} + +static forceinline vec +vec_load_le(const unsigned char* in) +{ + return vld1q_u32((uint32_t*)in); +} + +static forceinline vec +vec_load128_le(const unsigned char* in) +{ + return vec_load_le(in); +} + +static forceinline void +vec_store_le(unsigned char* out, vec v) +{ + vst1q_u32((uint32_t*)out, v); +} + +static forceinline vec +vec_add(vec v1, vec v2) +{ + return vaddq_u32(v1, v2); +} + +static forceinline vec +vec_add_u32(vec v1, uint32_t x) +{ + vec v2 = vec_load_32x4(x, 0, 0, 0); + return vec_add(v1, v2); +} + +static forceinline vec +vec_increment(vec v1) +{ + vec one = vec_load_32x4(1, 0, 0, 0); + return vec_add(v1, one); +} + +#define vec_zero() vec_load_32x4(0, 0, 0, 0) + +#else + +#define VEC128 +#define vec_size 4 + +typedef struct { + uint32_t v[4]; +} vec; + +static forceinline vec +vec_xor(vec v1, vec v2) +{ + vec r; + r.v[0] = v1.v[0] ^ v2.v[0]; + r.v[1] = v1.v[1] ^ v2.v[1]; + r.v[2] = v1.v[2] ^ v2.v[2]; + r.v[3] = v1.v[3] ^ v2.v[3]; + return r; +} + +static forceinline vec +vec_rotate_left(vec v, unsigned int n) +{ + vec r; + r.v[0] = (v.v[0] << n) ^ (v.v[0] >> (32 - n)); + r.v[1] = (v.v[1] << n) ^ (v.v[1] >> (32 - n)); + r.v[2] = (v.v[2] << n) ^ (v.v[2] >> (32 - n)); + r.v[3] = (v.v[3] << n) ^ (v.v[3] >> (32 - n)); + return r; +} + +static forceinline vec +vec_rotate_right(vec v, unsigned int n) +{ + return (vec_rotate_left(v, 32 - n)); +} + +static forceinline vec +vec_shuffle_right(vec v, unsigned int n) +{ + vec r; + r.v[0] = v.v[n % 4]; + r.v[1] = v.v[(n + 1) % 4]; + r.v[2] = v.v[(n + 2) % 4]; + r.v[3] = v.v[(n + 3) % 4]; + return r; +} + +static forceinline vec +vec_shuffle_left(vec x, unsigned int n) +{ + return vec_shuffle_right(x, 4 - n); +} + +static forceinline vec +vec_load_32x4(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) +{ + vec v; + v.v[0] = x0; + v.v[1] = x1; + v.v[2] = x2; + v.v[3] = x3; + return v; +} + +static forceinline vec +vec_load_32(uint32_t x0) +{ + vec v; + v.v[0] = x0; + v.v[1] = x0; + v.v[2] = x0; + v.v[3] = x0; + return v; +} + +static forceinline vec +vec_load_le(const uint8_t* in) +{ + vec r; + r.v[0] = load32_le((uint8_t*)in); + r.v[1] = load32_le((uint8_t*)in + 4); + r.v[2] = load32_le((uint8_t*)in + 8); + r.v[3] = load32_le((uint8_t*)in + 12); + return r; +} + +static forceinline void +vec_store_le(unsigned char* out, vec r) +{ + store32_le(out, r.v[0]); + store32_le(out + 4, r.v[1]); + store32_le(out + 8, r.v[2]); + store32_le(out + 12, r.v[3]); +} + +static forceinline vec +vec_load128_le(const unsigned char* in) +{ + return vec_load_le(in); +} + +static forceinline vec +vec_add(vec v1, vec v2) +{ + vec r; + r.v[0] = v1.v[0] + v2.v[0]; + r.v[1] = v1.v[1] + v2.v[1]; + r.v[2] = v1.v[2] + v2.v[2]; + r.v[3] = v1.v[3] + v2.v[3]; + return r; +} + +static forceinline vec +vec_add_u32(vec v1, uint32_t x) +{ + vec v2 = vec_load_32x4(x, 0, 0, 0); + return vec_add(v1, v2); +} + +static forceinline vec +vec_increment(vec v1) +{ + vec one = vec_load_32x4(1, 0, 0, 0); + return vec_add(v1, one); +} + +#define vec_zero() vec_load_32x4(0, 0, 0, 0) + +#endif + +#endif |