summaryrefslogtreecommitdiffstats
path: root/security/nss/lib/freebl/verified
diff options
context:
space:
mode:
Diffstat (limited to 'security/nss/lib/freebl/verified')
-rw-r--r--security/nss/lib/freebl/verified/FStar.c255
-rw-r--r--security/nss/lib/freebl/verified/FStar.h69
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Chacha20.c270
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Chacha20.h81
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c390
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h61
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Curve25519.c845
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Curve25519.h57
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Poly1305_64.c485
-rw-r--r--security/nss/lib/freebl/verified/Hacl_Poly1305_64.h99
-rw-r--r--security/nss/lib/freebl/verified/kremlib.h672
-rw-r--r--security/nss/lib/freebl/verified/kremlib_base.h192
-rw-r--r--security/nss/lib/freebl/verified/specs/Spec.CTR.fst98
-rw-r--r--security/nss/lib/freebl/verified/specs/Spec.Chacha20.fst169
-rw-r--r--security/nss/lib/freebl/verified/specs/Spec.Curve25519.fst168
-rw-r--r--security/nss/lib/freebl/verified/specs/Spec.Poly1305.fst107
-rw-r--r--security/nss/lib/freebl/verified/vec128.h345
17 files changed, 4363 insertions, 0 deletions
diff --git a/security/nss/lib/freebl/verified/FStar.c b/security/nss/lib/freebl/verified/FStar.c
new file mode 100644
index 000000000..4e5f6d50d
--- /dev/null
+++ b/security/nss/lib/freebl/verified/FStar.c
@@ -0,0 +1,255 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file was auto-generated by KreMLin! */
+
+#include "FStar.h"
+
+static uint64_t
+FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
+{
+ return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
+}
+
+static uint64_t
+FStar_UInt128_carry(uint64_t a, uint64_t b)
+{
+ return FStar_UInt128_constant_time_carry(a, b);
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return (
+ (FStar_UInt128_uint128){
+ .low = a.low + b.low,
+ .high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return (
+ (FStar_UInt128_uint128){
+ .low = a.low + b.low,
+ .high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return (
+ (FStar_UInt128_uint128){
+ .low = a.low - b.low,
+ .high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return (
+ (FStar_UInt128_uint128){
+ .low = a.low - b.low,
+ .high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return FStar_UInt128_sub_mod_impl(a, b);
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return ((FStar_UInt128_uint128){.low = a.low & b.low, .high = a.high & b.high });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return ((FStar_UInt128_uint128){.low = a.low ^ b.low, .high = a.high ^ b.high });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return ((FStar_UInt128_uint128){.low = a.low | b.low, .high = a.high | b.high });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_lognot(FStar_UInt128_uint128 a)
+{
+ return ((FStar_UInt128_uint128){.low = ~a.low, .high = ~a.high });
+}
+
+static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
+
+static uint64_t
+FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
+}
+
+static uint64_t
+FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return FStar_UInt128_add_u64_shift_left(hi, lo, s);
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s == (uint32_t)0U)
+ return a;
+ else
+ return (
+ (FStar_UInt128_uint128){
+ .low = a.low << s,
+ .high = FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s) });
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+ return ((FStar_UInt128_uint128){.low = (uint64_t)0U, .high = a.low << (s - FStar_UInt128_u32_64) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s < FStar_UInt128_u32_64)
+ return FStar_UInt128_shift_left_small(a, s);
+ else
+ return FStar_UInt128_shift_left_large(a, s);
+}
+
+static uint64_t
+FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
+}
+
+static uint64_t
+FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+ return FStar_UInt128_add_u64_shift_right(hi, lo, s);
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s == (uint32_t)0U)
+ return a;
+ else
+ return (
+ (FStar_UInt128_uint128){
+ .low = FStar_UInt128_add_u64_shift_right_respec(a.high, a.low, s),
+ .high = a.high >> s });
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+ return ((FStar_UInt128_uint128){.low = a.high >> (s - FStar_UInt128_u32_64), .high = (uint64_t)0U });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
+{
+ if (s < FStar_UInt128_u32_64)
+ return FStar_UInt128_shift_right_small(a, s);
+ else
+ return FStar_UInt128_shift_right_large(a, s);
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return (
+ (FStar_UInt128_uint128){
+ .low = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high),
+ .high = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+ return (
+ (FStar_UInt128_uint128){
+ .low = (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high)) | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)),
+ .high = (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high)) | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_uint64_to_uint128(uint64_t a)
+{
+ return ((FStar_UInt128_uint128){.low = a, .high = (uint64_t)0U });
+}
+
+uint64_t
+FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
+{
+ return a.low;
+}
+
+static uint64_t FStar_UInt128_u64_l32_mask = (uint64_t)0xffffffffU;
+
+static uint64_t
+FStar_UInt128_u64_mod_32(uint64_t a)
+{
+ return a & FStar_UInt128_u64_l32_mask;
+}
+
+static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
+
+static K___uint64_t_uint64_t_uint64_t_uint64_t
+FStar_UInt128_mul_wide_impl_t_(uint64_t x, uint64_t y)
+{
+ return (
+ (K___uint64_t_uint64_t_uint64_t_uint64_t){
+ .fst = FStar_UInt128_u64_mod_32(x),
+ .snd = FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y)),
+ .thd = x >> FStar_UInt128_u32_32,
+ .f3 = (x >> FStar_UInt128_u32_32) * FStar_UInt128_u64_mod_32(y) + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32) });
+}
+
+static uint64_t
+FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
+{
+ return lo + (hi << FStar_UInt128_u32_32);
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y)
+{
+ K___uint64_t_uint64_t_uint64_t_uint64_t scrut = FStar_UInt128_mul_wide_impl_t_(x, y);
+ uint64_t u1 = scrut.fst;
+ uint64_t w3 = scrut.snd;
+ uint64_t x_ = scrut.thd;
+ uint64_t t_ = scrut.f3;
+ return (
+ (FStar_UInt128_uint128){
+ .low = FStar_UInt128_u32_combine_(u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_),
+ w3),
+ .high = x_ * (y >> FStar_UInt128_u32_32) + (t_ >> FStar_UInt128_u32_32) +
+ ((u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_)) >> FStar_UInt128_u32_32) });
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
+{
+ return FStar_UInt128_mul_wide_impl(x, y);
+}
diff --git a/security/nss/lib/freebl/verified/FStar.h b/security/nss/lib/freebl/verified/FStar.h
new file mode 100644
index 000000000..7b105b8f2
--- /dev/null
+++ b/security/nss/lib/freebl/verified/FStar.h
@@ -0,0 +1,69 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file was auto-generated by KreMLin! */
+#ifndef __FStar_H
+#define __FStar_H
+
+#include "kremlib_base.h"
+
+typedef struct
+{
+ uint64_t low;
+ uint64_t high;
+} FStar_UInt128_uint128;
+
+typedef FStar_UInt128_uint128 FStar_UInt128_t;
+
+extern void FStar_UInt128_constant_time_carry_ok(uint64_t x0, uint64_t x1);
+
+FStar_UInt128_uint128 FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
+
+FStar_UInt128_uint128 FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s);
+
+FStar_UInt128_uint128 FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
+
+FStar_UInt128_uint128 FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
+
+uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
+
+typedef struct
+{
+ uint64_t fst;
+ uint64_t snd;
+ uint64_t thd;
+ uint64_t f3;
+} K___uint64_t_uint64_t_uint64_t_uint64_t;
+
+FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
+#endif
diff --git a/security/nss/lib/freebl/verified/Hacl_Chacha20.c b/security/nss/lib/freebl/verified/Hacl_Chacha20.c
new file mode 100644
index 000000000..45a743035
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Chacha20.c
@@ -0,0 +1,270 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hacl_Chacha20.h"
+
+static void
+Hacl_Lib_LoadStore32_uint32s_from_le_bytes(uint32_t *output, uint8_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U) {
+ uint8_t *x0 = input + (uint32_t)4U * i;
+ uint32_t inputi = load32_le(x0);
+ output[i] = inputi;
+ }
+}
+
+static void
+Hacl_Lib_LoadStore32_uint32s_to_le_bytes(uint8_t *output, uint32_t *input, uint32_t len)
+{
+ for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U) {
+ uint32_t hd1 = input[i];
+ uint8_t *x0 = output + (uint32_t)4U * i;
+ store32_le(x0, hd1);
+ }
+}
+
+inline static uint32_t
+Hacl_Impl_Chacha20_rotate_left(uint32_t a, uint32_t s)
+{
+ return a << s | a >> ((uint32_t)32U - s);
+}
+
+inline static void
+Hacl_Impl_Chacha20_quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ uint32_t sa = st[a];
+ uint32_t sb0 = st[b];
+ st[a] = sa + sb0;
+ uint32_t sd = st[d];
+ uint32_t sa10 = st[a];
+ uint32_t sda = sd ^ sa10;
+ st[d] = Hacl_Impl_Chacha20_rotate_left(sda, (uint32_t)16U);
+ uint32_t sa0 = st[c];
+ uint32_t sb1 = st[d];
+ st[c] = sa0 + sb1;
+ uint32_t sd0 = st[b];
+ uint32_t sa11 = st[c];
+ uint32_t sda0 = sd0 ^ sa11;
+ st[b] = Hacl_Impl_Chacha20_rotate_left(sda0, (uint32_t)12U);
+ uint32_t sa2 = st[a];
+ uint32_t sb2 = st[b];
+ st[a] = sa2 + sb2;
+ uint32_t sd1 = st[d];
+ uint32_t sa12 = st[a];
+ uint32_t sda1 = sd1 ^ sa12;
+ st[d] = Hacl_Impl_Chacha20_rotate_left(sda1, (uint32_t)8U);
+ uint32_t sa3 = st[c];
+ uint32_t sb = st[d];
+ st[c] = sa3 + sb;
+ uint32_t sd2 = st[b];
+ uint32_t sa1 = st[c];
+ uint32_t sda2 = sd2 ^ sa1;
+ st[b] = Hacl_Impl_Chacha20_rotate_left(sda2, (uint32_t)7U);
+}
+
+inline static void
+Hacl_Impl_Chacha20_double_round(uint32_t *st)
+{
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
+ Hacl_Impl_Chacha20_quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
+}
+
+inline static void
+Hacl_Impl_Chacha20_rounds(uint32_t *st)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U)
+ Hacl_Impl_Chacha20_double_round(st);
+}
+
+inline static void
+Hacl_Impl_Chacha20_sum_states(uint32_t *st, uint32_t *st_)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ uint32_t xi = st[i];
+ uint32_t yi = st_[i];
+ st[i] = xi + yi;
+ }
+}
+
+inline static void
+Hacl_Impl_Chacha20_copy_state(uint32_t *st, uint32_t *st_)
+{
+ memcpy(st, st_, (uint32_t)16U * sizeof st_[0U]);
+}
+
+inline static void
+Hacl_Impl_Chacha20_chacha20_core(uint32_t *k, uint32_t *st, uint32_t ctr)
+{
+ st[12U] = ctr;
+ Hacl_Impl_Chacha20_copy_state(k, st);
+ Hacl_Impl_Chacha20_rounds(k);
+ Hacl_Impl_Chacha20_sum_states(k, st);
+}
+
+inline static void
+Hacl_Impl_Chacha20_chacha20_block(uint8_t *stream_block, uint32_t *st, uint32_t ctr)
+{
+ uint32_t st_[16U] = { 0U };
+ Hacl_Impl_Chacha20_chacha20_core(st_, st, ctr);
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(stream_block, st_, (uint32_t)16U);
+}
+
+inline static void
+Hacl_Impl_Chacha20_init(uint32_t *st, uint8_t *k, uint8_t *n1)
+{
+ uint32_t *stcst = st;
+ uint32_t *stk = st + (uint32_t)4U;
+ uint32_t *stc = st + (uint32_t)12U;
+ uint32_t *stn = st + (uint32_t)13U;
+ stcst[0U] = (uint32_t)0x61707865U;
+ stcst[1U] = (uint32_t)0x3320646eU;
+ stcst[2U] = (uint32_t)0x79622d32U;
+ stcst[3U] = (uint32_t)0x6b206574U;
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(stk, k, (uint32_t)8U);
+ stc[0U] = (uint32_t)0U;
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(stn, n1, (uint32_t)3U);
+}
+
+static void
+Hacl_Impl_Chacha20_update(uint8_t *output, uint8_t *plain, uint32_t *st, uint32_t ctr)
+{
+ uint32_t b[48U] = { 0U };
+ uint32_t *k = b;
+ uint32_t *ib = b + (uint32_t)16U;
+ uint32_t *ob = b + (uint32_t)32U;
+ Hacl_Impl_Chacha20_chacha20_core(k, st, ctr);
+ Hacl_Lib_LoadStore32_uint32s_from_le_bytes(ib, plain, (uint32_t)16U);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ uint32_t xi = ib[i];
+ uint32_t yi = k[i];
+ ob[i] = xi ^ yi;
+ }
+ Hacl_Lib_LoadStore32_uint32s_to_le_bytes(output, ob, (uint32_t)16U);
+}
+
+static void
+Hacl_Impl_Chacha20_update_last(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint32_t ctr)
+{
+ uint8_t block[64U] = { 0U };
+ Hacl_Impl_Chacha20_chacha20_block(block, st, ctr);
+ uint8_t *mask = block;
+ for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U) {
+ uint8_t xi = plain[i];
+ uint8_t yi = mask[i];
+ output[i] = xi ^ yi;
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_chacha20_counter_mode_blocks(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t num_blocks,
+ uint32_t *st,
+ uint32_t ctr)
+{
+ for (uint32_t i = (uint32_t)0U; i < num_blocks; i = i + (uint32_t)1U) {
+ uint8_t *b = plain + (uint32_t)64U * i;
+ uint8_t *o = output + (uint32_t)64U * i;
+ Hacl_Impl_Chacha20_update(o, b, st, ctr + i);
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_chacha20_counter_mode(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint32_t *st,
+ uint32_t ctr)
+{
+ uint32_t blocks_len = len >> (uint32_t)6U;
+ uint32_t part_len = len & (uint32_t)0x3fU;
+ uint8_t *output_ = output;
+ uint8_t *plain_ = plain;
+ uint8_t *output__ = output + (uint32_t)64U * blocks_len;
+ uint8_t *plain__ = plain + (uint32_t)64U * blocks_len;
+ Hacl_Impl_Chacha20_chacha20_counter_mode_blocks(output_, plain_, blocks_len, st, ctr);
+ if (part_len > (uint32_t)0U)
+ Hacl_Impl_Chacha20_update_last(output__, plain__, part_len, st, ctr + blocks_len);
+}
+
+static void
+Hacl_Impl_Chacha20_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr)
+{
+ uint32_t buf[16U] = { 0U };
+ uint32_t *st = buf;
+ Hacl_Impl_Chacha20_init(st, k, n1);
+ Hacl_Impl_Chacha20_chacha20_counter_mode(output, plain, len, st, ctr);
+}
+
+void
+Hacl_Chacha20_chacha20_key_block(uint8_t *block, uint8_t *k, uint8_t *n1, uint32_t ctr)
+{
+ uint32_t buf[16U] = { 0U };
+ uint32_t *st = buf;
+ Hacl_Impl_Chacha20_init(st, k, n1);
+ Hacl_Impl_Chacha20_chacha20_block(block, st, ctr);
+}
+
+/*
+ This function implements Chacha20
+
+ val chacha20 :
+ output:uint8_p ->
+ plain:uint8_p{ disjoint output plain } ->
+ len:uint32_t{ v len = length output /\ v len = length plain } ->
+ key:uint8_p{ length key = 32 } ->
+ nonce:uint8_p{ length nonce = 12 } ->
+ ctr:uint32_t{ v ctr + length plain / 64 < pow2 32 } ->
+ Stack unit
+ (requires
+ fun h -> live h output /\ live h plain /\ live h nonce /\ live h key)
+ (ensures
+ fun h0 _ h1 ->
+ live h1 output /\ live h0 plain /\ modifies_1 output h0 h1 /\
+ live h0 nonce /\
+ live h0 key /\
+ h1.[ output ] ==
+ chacha20_encrypt_bytes h0.[ key ] h0.[ nonce ] (v ctr) h0.[ plain ])
+*/
+void
+Hacl_Chacha20_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr)
+{
+ Hacl_Impl_Chacha20_chacha20(output, plain, len, k, n1, ctr);
+}
diff --git a/security/nss/lib/freebl/verified/Hacl_Chacha20.h b/security/nss/lib/freebl/verified/Hacl_Chacha20.h
new file mode 100644
index 000000000..f97e44b74
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Chacha20.h
@@ -0,0 +1,81 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kremlib.h"
+#ifndef __Hacl_Chacha20_H
+#define __Hacl_Chacha20_H
+
+typedef uint32_t Hacl_Impl_Xor_Lemmas_u32;
+
+typedef uint8_t Hacl_Impl_Xor_Lemmas_u8;
+
+typedef uint8_t *Hacl_Lib_LoadStore32_uint8_p;
+
+typedef uint32_t Hacl_Impl_Chacha20_u32;
+
+typedef uint32_t Hacl_Impl_Chacha20_h32;
+
+typedef uint8_t *Hacl_Impl_Chacha20_uint8_p;
+
+typedef uint32_t *Hacl_Impl_Chacha20_state;
+
+typedef uint32_t Hacl_Impl_Chacha20_idx;
+
+typedef struct
+{
+ void *k;
+ void *n;
+} Hacl_Impl_Chacha20_log_t_;
+
+typedef void *Hacl_Impl_Chacha20_log_t;
+
+typedef uint32_t Hacl_Lib_Create_h32;
+
+typedef uint8_t *Hacl_Chacha20_uint8_p;
+
+typedef uint32_t Hacl_Chacha20_uint32_t;
+
+void Hacl_Chacha20_chacha20_key_block(uint8_t *block, uint8_t *k, uint8_t *n1, uint32_t ctr);
+
+/*
+ This function implements Chacha20
+
+ val chacha20 :
+ output:uint8_p ->
+ plain:uint8_p{ disjoint output plain } ->
+ len:uint32_t{ v len = length output /\ v len = length plain } ->
+ key:uint8_p{ length key = 32 } ->
+ nonce:uint8_p{ length nonce = 12 } ->
+ ctr:uint32_t{ v ctr + length plain / 64 < pow2 32 } ->
+ Stack unit
+ (requires
+ fun h -> live h output /\ live h plain /\ live h nonce /\ live h key)
+ (ensures
+ fun h0 _ h1 ->
+ live h1 output /\ live h0 plain /\ modifies_1 output h0 h1 /\
+ live h0 nonce /\
+ live h0 key /\
+ h1.[ output ] ==
+ chacha20_encrypt_bytes h0.[ key ] h0.[ nonce ] (v ctr) h0.[ plain ])
+*/
+void
+Hacl_Chacha20_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr);
+#endif
diff --git a/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c
new file mode 100644
index 000000000..4eba49f47
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.c
@@ -0,0 +1,390 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hacl_Chacha20_Vec128.h"
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_State_state_incr(vec *k)
+{
+ vec k3 = k[3U];
+ k[3U] = vec_increment(k3);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_State_state_to_key_block(uint8_t *stream_block, vec *k)
+{
+ vec k0 = k[0U];
+ vec k1 = k[1U];
+ vec k2 = k[2U];
+ vec k3 = k[3U];
+ uint8_t *a = stream_block;
+ uint8_t *b = stream_block + (uint32_t)16U;
+ uint8_t *c = stream_block + (uint32_t)32U;
+ uint8_t *d = stream_block + (uint32_t)48U;
+ vec_store_le(a, k0);
+ vec_store_le(b, k1);
+ vec_store_le(c, k2);
+ vec_store_le(d, k3);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_State_state_setup(vec *st, uint8_t *k, uint8_t *n1, uint32_t c)
+{
+ st[0U] =
+ vec_load_32x4((uint32_t)0x61707865U,
+ (uint32_t)0x3320646eU,
+ (uint32_t)0x79622d32U,
+ (uint32_t)0x6b206574U);
+ vec k0 = vec_load128_le(k);
+ vec k1 = vec_load128_le(k + (uint32_t)16U);
+ st[1U] = k0;
+ st[2U] = k1;
+ uint32_t n0 = load32_le(n1);
+ uint8_t *x00 = n1 + (uint32_t)4U;
+ uint32_t n10 = load32_le(x00);
+ uint8_t *x0 = n1 + (uint32_t)8U;
+ uint32_t n2 = load32_le(x0);
+ vec v1 = vec_load_32x4(c, n0, n10, n2);
+ st[3U] = v1;
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_round(vec *st)
+{
+ vec sa = st[0U];
+ vec sb0 = st[1U];
+ vec sd0 = st[3U];
+ vec sa10 = vec_add(sa, sb0);
+ vec sd10 = vec_rotate_left(vec_xor(sd0, sa10), (uint32_t)16U);
+ st[0U] = sa10;
+ st[3U] = sd10;
+ vec sa0 = st[2U];
+ vec sb1 = st[3U];
+ vec sd2 = st[1U];
+ vec sa11 = vec_add(sa0, sb1);
+ vec sd11 = vec_rotate_left(vec_xor(sd2, sa11), (uint32_t)12U);
+ st[2U] = sa11;
+ st[1U] = sd11;
+ vec sa2 = st[0U];
+ vec sb2 = st[1U];
+ vec sd3 = st[3U];
+ vec sa12 = vec_add(sa2, sb2);
+ vec sd12 = vec_rotate_left(vec_xor(sd3, sa12), (uint32_t)8U);
+ st[0U] = sa12;
+ st[3U] = sd12;
+ vec sa3 = st[2U];
+ vec sb = st[3U];
+ vec sd = st[1U];
+ vec sa1 = vec_add(sa3, sb);
+ vec sd1 = vec_rotate_left(vec_xor(sd, sa1), (uint32_t)7U);
+ st[2U] = sa1;
+ st[1U] = sd1;
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_double_round(vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_round(st);
+ vec r1 = st[1U];
+ vec r20 = st[2U];
+ vec r30 = st[3U];
+ st[1U] = vec_shuffle_right(r1, (uint32_t)1U);
+ st[2U] = vec_shuffle_right(r20, (uint32_t)2U);
+ st[3U] = vec_shuffle_right(r30, (uint32_t)3U);
+ Hacl_Impl_Chacha20_Vec128_round(st);
+ vec r10 = st[1U];
+ vec r2 = st[2U];
+ vec r3 = st[3U];
+ st[1U] = vec_shuffle_right(r10, (uint32_t)3U);
+ st[2U] = vec_shuffle_right(r2, (uint32_t)2U);
+ st[3U] = vec_shuffle_right(r3, (uint32_t)1U);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_double_round3(vec *st, vec *st_, vec *st__)
+{
+ Hacl_Impl_Chacha20_Vec128_double_round(st);
+ Hacl_Impl_Chacha20_Vec128_double_round(st_);
+ Hacl_Impl_Chacha20_Vec128_double_round(st__);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_sum_states(vec *st_, vec *st)
+{
+ vec s0 = st[0U];
+ vec s1 = st[1U];
+ vec s2 = st[2U];
+ vec s3 = st[3U];
+ vec s0_ = st_[0U];
+ vec s1_ = st_[1U];
+ vec s2_ = st_[2U];
+ vec s3_ = st_[3U];
+ st_[0U] = vec_add(s0_, s0);
+ st_[1U] = vec_add(s1_, s1);
+ st_[2U] = vec_add(s2_, s2);
+ st_[3U] = vec_add(s3_, s3);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_copy_state(vec *st_, vec *st)
+{
+ vec st0 = st[0U];
+ vec st1 = st[1U];
+ vec st2 = st[2U];
+ vec st3 = st[3U];
+ st_[0U] = st0;
+ st_[1U] = st1;
+ st_[2U] = st2;
+ st_[3U] = st3;
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_chacha20_core(vec *k, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_copy_state(k, st);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U)
+ Hacl_Impl_Chacha20_Vec128_double_round(k);
+ Hacl_Impl_Chacha20_Vec128_sum_states(k, st);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_state_incr(vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_State_state_incr(st);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_chacha20_incr3(vec *k0, vec *k1, vec *k2, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_copy_state(k0, st);
+ Hacl_Impl_Chacha20_Vec128_copy_state(k1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(k1);
+ Hacl_Impl_Chacha20_Vec128_copy_state(k2, k1);
+ Hacl_Impl_Chacha20_Vec128_state_incr(k2);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_chacha20_sum3(vec *k0, vec *k1, vec *k2, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_sum_states(k0, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ Hacl_Impl_Chacha20_Vec128_sum_states(k1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ Hacl_Impl_Chacha20_Vec128_sum_states(k2, st);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_chacha20_core3(vec *k0, vec *k1, vec *k2, vec *st)
+{
+ Hacl_Impl_Chacha20_Vec128_chacha20_incr3(k0, k1, k2, st);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U)
+ Hacl_Impl_Chacha20_Vec128_double_round3(k0, k1, k2);
+ Hacl_Impl_Chacha20_Vec128_chacha20_sum3(k0, k1, k2, st);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_chacha20_block(uint8_t *stream_block, vec *st)
+{
+ KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U);
+ vec k[4U];
+ for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i)
+ k[_i] = vec_zero();
+ Hacl_Impl_Chacha20_Vec128_chacha20_core(k, st);
+ Hacl_Impl_Chacha20_Vec128_State_state_to_key_block(stream_block, k);
+}
+
+inline static void
+Hacl_Impl_Chacha20_Vec128_init(vec *st, uint8_t *k, uint8_t *n1, uint32_t ctr)
+{
+ Hacl_Impl_Chacha20_Vec128_State_state_setup(st, k, n1, ctr);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_update_last(uint8_t *output, uint8_t *plain, uint32_t len, vec *st)
+{
+ uint8_t block[64U] = { 0U };
+ Hacl_Impl_Chacha20_Vec128_chacha20_block(block, st);
+ uint8_t *mask = block;
+ for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U) {
+ uint8_t xi = plain[i];
+ uint8_t yi = mask[i];
+ output[i] = xi ^ yi;
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_xor_block(uint8_t *output, uint8_t *plain, vec *st)
+{
+ vec p0 = vec_load_le(plain);
+ vec p1 = vec_load_le(plain + (uint32_t)16U);
+ vec p2 = vec_load_le(plain + (uint32_t)32U);
+ vec p3 = vec_load_le(plain + (uint32_t)48U);
+ vec k0 = st[0U];
+ vec k1 = st[1U];
+ vec k2 = st[2U];
+ vec k3 = st[3U];
+ vec o00 = vec_xor(p0, k0);
+ vec o10 = vec_xor(p1, k1);
+ vec o20 = vec_xor(p2, k2);
+ vec o30 = vec_xor(p3, k3);
+ uint8_t *o0 = output;
+ uint8_t *o1 = output + (uint32_t)16U;
+ uint8_t *o2 = output + (uint32_t)32U;
+ uint8_t *o3 = output + (uint32_t)48U;
+ vec_store_le(o0, o00);
+ vec_store_le(o1, o10);
+ vec_store_le(o2, o20);
+ vec_store_le(o3, o30);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_update(uint8_t *output, uint8_t *plain, vec *st)
+{
+ KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U);
+ vec k[4U];
+ for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i)
+ k[_i] = vec_zero();
+ Hacl_Impl_Chacha20_Vec128_chacha20_core(k, st);
+ Hacl_Impl_Chacha20_Vec128_xor_block(output, plain, k);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_update3(uint8_t *output, uint8_t *plain, vec *st)
+{
+ KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U);
+ vec k0[4U];
+ for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i)
+ k0[_i] = vec_zero();
+ KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U);
+ vec k1[4U];
+ for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i)
+ k1[_i] = vec_zero();
+ KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U);
+ vec k2[4U];
+ for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i)
+ k2[_i] = vec_zero();
+ Hacl_Impl_Chacha20_Vec128_chacha20_core3(k0, k1, k2, st);
+ uint8_t *p0 = plain;
+ uint8_t *p1 = plain + (uint32_t)64U;
+ uint8_t *p2 = plain + (uint32_t)128U;
+ uint8_t *o0 = output;
+ uint8_t *o1 = output + (uint32_t)64U;
+ uint8_t *o2 = output + (uint32_t)128U;
+ Hacl_Impl_Chacha20_Vec128_xor_block(o0, p0, k0);
+ Hacl_Impl_Chacha20_Vec128_xor_block(o1, p1, k1);
+ Hacl_Impl_Chacha20_Vec128_xor_block(o2, p2, k2);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_update3_(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st,
+ uint32_t i)
+{
+ uint8_t *out_block = output + (uint32_t)192U * i;
+ uint8_t *plain_block = plain + (uint32_t)192U * i;
+ Hacl_Impl_Chacha20_Vec128_update3(out_block, plain_block, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks3(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st)
+{
+ for (uint32_t i = (uint32_t)0U; i < len; i = i + (uint32_t)1U)
+ Hacl_Impl_Chacha20_Vec128_update3_(output, plain, len, st, i);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st)
+{
+ uint32_t len3 = len / (uint32_t)3U;
+ uint32_t rest3 = len % (uint32_t)3U;
+ uint8_t *plain_ = plain;
+ uint8_t *blocks1 = plain + (uint32_t)192U * len3;
+ uint8_t *output_ = output;
+ uint8_t *outs = output + (uint32_t)192U * len3;
+ Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks3(output_, plain_, len3, st);
+ if (rest3 == (uint32_t)2U) {
+ uint8_t *block0 = blocks1;
+ uint8_t *block1 = blocks1 + (uint32_t)64U;
+ uint8_t *out0 = outs;
+ uint8_t *out1 = outs + (uint32_t)64U;
+ Hacl_Impl_Chacha20_Vec128_update(out0, block0, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ Hacl_Impl_Chacha20_Vec128_update(out1, block1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ } else if (rest3 == (uint32_t)1U) {
+ Hacl_Impl_Chacha20_Vec128_update(outs, blocks1, st);
+ Hacl_Impl_Chacha20_Vec128_state_incr(st);
+ }
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ vec *st)
+{
+ uint32_t blocks_len = len >> (uint32_t)6U;
+ uint32_t part_len = len & (uint32_t)0x3fU;
+ uint8_t *output_ = output;
+ uint8_t *plain_ = plain;
+ uint8_t *output__ = output + (uint32_t)64U * blocks_len;
+ uint8_t *plain__ = plain + (uint32_t)64U * blocks_len;
+ Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode_blocks(output_, plain_, blocks_len, st);
+ if (part_len > (uint32_t)0U)
+ Hacl_Impl_Chacha20_Vec128_update_last(output__, plain__, part_len, st);
+}
+
+static void
+Hacl_Impl_Chacha20_Vec128_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr)
+{
+ KRML_CHECK_SIZE(vec_zero(), (uint32_t)4U);
+ vec buf[4U];
+ for (uint32_t _i = 0U; _i < (uint32_t)4U; ++_i)
+ buf[_i] = vec_zero();
+ vec *st = buf;
+ Hacl_Impl_Chacha20_Vec128_init(st, k, n1, ctr);
+ Hacl_Impl_Chacha20_Vec128_chacha20_counter_mode(output, plain, len, st);
+}
+
+void
+Hacl_Chacha20_Vec128_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr)
+{
+ Hacl_Impl_Chacha20_Vec128_chacha20(output, plain, len, k, n1, ctr);
+}
diff --git a/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h
new file mode 100644
index 000000000..57942093d
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Chacha20_Vec128.h
@@ -0,0 +1,61 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kremlib.h"
+#ifndef __Hacl_Chacha20_Vec128_H
+#define __Hacl_Chacha20_Vec128_H
+
+#include "vec128.h"
+
+typedef uint32_t Hacl_Impl_Xor_Lemmas_u32;
+
+typedef uint8_t Hacl_Impl_Xor_Lemmas_u8;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_State_u32;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_State_h32;
+
+typedef uint8_t *Hacl_Impl_Chacha20_Vec128_State_uint8_p;
+
+typedef vec *Hacl_Impl_Chacha20_Vec128_State_state;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_u32;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_h32;
+
+typedef uint8_t *Hacl_Impl_Chacha20_Vec128_uint8_p;
+
+typedef uint32_t Hacl_Impl_Chacha20_Vec128_idx;
+
+typedef struct
+{
+ void *k;
+ void *n;
+ uint32_t ctr;
+} Hacl_Impl_Chacha20_Vec128_log_t_;
+
+typedef void *Hacl_Impl_Chacha20_Vec128_log_t;
+
+typedef uint8_t *Hacl_Chacha20_Vec128_uint8_p;
+
+void
+Hacl_Chacha20_Vec128_chacha20(
+ uint8_t *output,
+ uint8_t *plain,
+ uint32_t len,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr);
+#endif
diff --git a/security/nss/lib/freebl/verified/Hacl_Curve25519.c b/security/nss/lib/freebl/verified/Hacl_Curve25519.c
new file mode 100644
index 000000000..f2dcddc57
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Curve25519.c
@@ -0,0 +1,845 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hacl_Curve25519.h"
+
+static void
+Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b4 = b[4U];
+ uint64_t b0 = b[0U];
+ uint64_t b4_ = b4 & (uint64_t)0x7ffffffffffffU;
+ uint64_t b0_ = b0 + (uint64_t)19U * (b4 >> (uint32_t)51U);
+ b[4U] = b4_;
+ b[0U] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ {
+ FStar_UInt128_t xi = input[0U];
+ output[0U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[1U];
+ output[1U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[2U];
+ output[2U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[3U];
+ output[3U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[4U];
+ output[4U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s)
+{
+ {
+ FStar_UInt128_t xi = output[0U];
+ uint64_t yi = input[0U];
+ output[0U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[1U];
+ uint64_t yi = input[1U];
+ output[1U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[2U];
+ uint64_t yi = input[2U];
+ output[2U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[3U];
+ uint64_t yi = input[3U];
+ output[3U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[4U];
+ uint64_t yi = input[4U];
+ output[4U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ {
+ uint32_t ctr = (uint32_t)0U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t)1U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t)2U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t)3U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void
+Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ uint64_t tmp = output[4U];
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)0U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)1U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)2U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)3U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ output[0U] = tmp;
+ uint64_t b0 = output[0U];
+ output[0U] = (uint64_t)19U * b0;
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input21)
+{
+ {
+ uint64_t input2i = input21[0U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[1U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[2U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[3U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t)4U;
+ uint64_t input2i = input21[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void
+Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input21)
+{
+ uint64_t tmp[5U] = { 0U };
+ memcpy(tmp, input, (uint32_t)5U * sizeof input[0U]);
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t t[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input21);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ FStar_UInt128_t b4 = t[4U];
+ FStar_UInt128_t b0 = t[0U];
+ FStar_UInt128_t
+ b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t)19U,
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
+ t[4U] = b4_;
+ t[0U] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0U];
+ uint64_t i1 = output[1U];
+ uint64_t i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)51U);
+ output[0U] = i0_;
+ output[1U] = i1_;
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare__(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ uint64_t r0 = output[0U];
+ uint64_t r1 = output[1U];
+ uint64_t r2 = output[2U];
+ uint64_t r3 = output[3U];
+ uint64_t r4 = output[4U];
+ uint64_t d0 = r0 * (uint64_t)2U;
+ uint64_t d1 = r1 * (uint64_t)2U;
+ uint64_t d2 = r2 * (uint64_t)2U * (uint64_t)19U;
+ uint64_t d419 = r4 * (uint64_t)19U;
+ uint64_t d4 = d419 * (uint64_t)2U;
+ FStar_UInt128_t
+ s0 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(r0, r0),
+ FStar_UInt128_mul_wide(d4, r1)),
+ FStar_UInt128_mul_wide(d2, r3));
+ FStar_UInt128_t
+ s1 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r1),
+ FStar_UInt128_mul_wide(d4, r2)),
+ FStar_UInt128_mul_wide(r3 * (uint64_t)19U, r3));
+ FStar_UInt128_t
+ s2 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r2),
+ FStar_UInt128_mul_wide(r1, r1)),
+ FStar_UInt128_mul_wide(d4, r3));
+ FStar_UInt128_t
+ s3 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r3),
+ FStar_UInt128_mul_wide(d1, r2)),
+ FStar_UInt128_mul_wide(r4, d419));
+ FStar_UInt128_t
+ s4 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r4),
+ FStar_UInt128_mul_wide(d1, r3)),
+ FStar_UInt128_mul_wide(r2, r2));
+ tmp[0U] = s0;
+ tmp[1U] = s1;
+ tmp[2U] = s2;
+ tmp[3U] = s3;
+ tmp[4U] = s4;
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ Hacl_Bignum_Fsquare_fsquare__(tmp, output);
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4U];
+ FStar_UInt128_t b0 = tmp[0U];
+ FStar_UInt128_t
+ b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t)19U,
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
+ tmp[4U] = b4_;
+ tmp[0U] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+ uint64_t i0 = output[0U];
+ uint64_t i1 = output[1U];
+ uint64_t i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)51U);
+ output[0U] = i0_;
+ output[1U] = i1_;
+}
+
+static void
+Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, FStar_UInt128_t *tmp, uint32_t count1)
+{
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+ for (uint32_t i = (uint32_t)1U; i < count1; i = i + (uint32_t)1U)
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t t[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t t[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void
+Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
+{
+ uint64_t buf[20U] = { 0U };
+ uint64_t *a = buf;
+ uint64_t *t00 = buf + (uint32_t)5U;
+ uint64_t *b0 = buf + (uint32_t)10U;
+ Hacl_Bignum_Fsquare_fsquare_times(a, z, (uint32_t)1U);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t)2U);
+ Hacl_Bignum_Fmul_fmul(b0, t00, z);
+ Hacl_Bignum_Fmul_fmul(a, b0, a);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t)1U);
+ Hacl_Bignum_Fmul_fmul(b0, t00, b0);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5U);
+ uint64_t *t01 = buf + (uint32_t)5U;
+ uint64_t *b1 = buf + (uint32_t)10U;
+ uint64_t *c0 = buf + (uint32_t)15U;
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10U);
+ Hacl_Bignum_Fmul_fmul(c0, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20U);
+ Hacl_Bignum_Fmul_fmul(t01, t01, c0);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10U);
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50U);
+ uint64_t *a0 = buf;
+ uint64_t *t0 = buf + (uint32_t)5U;
+ uint64_t *b = buf + (uint32_t)10U;
+ uint64_t *c = buf + (uint32_t)15U;
+ Hacl_Bignum_Fmul_fmul(c, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100U);
+ Hacl_Bignum_Fmul_fmul(t0, t0, c);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50U);
+ Hacl_Bignum_Fmul_fmul(t0, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5U);
+ Hacl_Bignum_Fmul_fmul(out, t0, a0);
+}
+
+inline static void
+Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
+{
+ {
+ uint64_t xi = a[0U];
+ uint64_t yi = b[0U];
+ a[0U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[1U];
+ uint64_t yi = b[1U];
+ a[1U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[2U];
+ uint64_t yi = b[2U];
+ a[2U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[3U];
+ uint64_t yi = b[3U];
+ a[3U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[4U];
+ uint64_t yi = b[4U];
+ a[4U] = xi + yi;
+ }
+}
+
+inline static void
+Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
+{
+ uint64_t tmp[5U] = { 0U };
+ memcpy(tmp, b, (uint32_t)5U * sizeof b[0U]);
+ uint64_t b0 = tmp[0U];
+ uint64_t b1 = tmp[1U];
+ uint64_t b2 = tmp[2U];
+ uint64_t b3 = tmp[3U];
+ uint64_t b4 = tmp[4U];
+ tmp[0U] = b0 + (uint64_t)0x3fffffffffff68U;
+ tmp[1U] = b1 + (uint64_t)0x3ffffffffffff8U;
+ tmp[2U] = b2 + (uint64_t)0x3ffffffffffff8U;
+ tmp[3U] = b3 + (uint64_t)0x3ffffffffffff8U;
+ tmp[4U] = b4 + (uint64_t)0x3ffffffffffff8U;
+ {
+ uint64_t xi = a[0U];
+ uint64_t yi = tmp[0U];
+ a[0U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[1U];
+ uint64_t yi = tmp[1U];
+ a[1U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[2U];
+ uint64_t yi = tmp[2U];
+ a[2U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[3U];
+ uint64_t yi = tmp[3U];
+ a[3U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[4U];
+ uint64_t yi = tmp[4U];
+ a[4U] = yi - xi;
+ }
+}
+
+inline static void
+Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
+{
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t tmp[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ {
+ uint64_t xi = b[0U];
+ tmp[0U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[1U];
+ tmp[1U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[2U];
+ tmp[2U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[3U];
+ tmp[3U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[4U];
+ tmp[4U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4U];
+ FStar_UInt128_t b0 = tmp[0U];
+ FStar_UInt128_t
+ b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t)19U,
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
+ tmp[4U] = b4_;
+ tmp[0U] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+}
+
+inline static void
+Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_Fmul_fmul(output, a, b);
+}
+
+inline static void
+Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
+{
+ Hacl_Bignum_Crecip_crecip(output, input);
+}
+
+static void
+Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+ uint32_t i = ctr - (uint32_t)1U;
+ uint64_t ai = a[i];
+ uint64_t bi = b[i];
+ uint64_t x = swap1 & (ai ^ bi);
+ uint64_t ai1 = ai ^ x;
+ uint64_t bi1 = bi ^ x;
+ a[i] = ai1;
+ b[i] = bi1;
+}
+
+static void
+Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+ if (!(ctr == (uint32_t)0U)) {
+ Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
+ uint32_t i = ctr - (uint32_t)1U;
+ Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
+ }
+}
+
+static void
+Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
+{
+ uint64_t swap1 = (uint64_t)0U - iswap;
+ Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5U);
+ Hacl_EC_Point_swap_conditional_(a + (uint32_t)5U, b + (uint32_t)5U, swap1, (uint32_t)5U);
+}
+
+static void
+Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
+{
+ memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
+ memcpy(output + (uint32_t)5U,
+ input + (uint32_t)5U,
+ (uint32_t)5U * sizeof(input + (uint32_t)5U)[0U]);
+}
+
+static void
+Hacl_EC_AddAndDouble_fmonty(
+ uint64_t *pp,
+ uint64_t *ppq,
+ uint64_t *p,
+ uint64_t *pq,
+ uint64_t *qmqp)
+{
+ uint64_t *qx = qmqp;
+ uint64_t *x2 = pp;
+ uint64_t *z2 = pp + (uint32_t)5U;
+ uint64_t *x3 = ppq;
+ uint64_t *z3 = ppq + (uint32_t)5U;
+ uint64_t *x = p;
+ uint64_t *z = p + (uint32_t)5U;
+ uint64_t *xprime = pq;
+ uint64_t *zprime = pq + (uint32_t)5U;
+ uint64_t buf[40U] = { 0U };
+ uint64_t *origx = buf;
+ uint64_t *origxprime = buf + (uint32_t)5U;
+ uint64_t *xxprime0 = buf + (uint32_t)25U;
+ uint64_t *zzprime0 = buf + (uint32_t)30U;
+ memcpy(origx, x, (uint32_t)5U * sizeof x[0U]);
+ Hacl_Bignum_fsum(x, z);
+ Hacl_Bignum_fdifference(z, origx);
+ memcpy(origxprime, xprime, (uint32_t)5U * sizeof xprime[0U]);
+ Hacl_Bignum_fsum(xprime, zprime);
+ Hacl_Bignum_fdifference(zprime, origxprime);
+ Hacl_Bignum_fmul(xxprime0, xprime, z);
+ Hacl_Bignum_fmul(zzprime0, x, zprime);
+ uint64_t *origxprime0 = buf + (uint32_t)5U;
+ uint64_t *xx0 = buf + (uint32_t)15U;
+ uint64_t *zz0 = buf + (uint32_t)20U;
+ uint64_t *xxprime = buf + (uint32_t)25U;
+ uint64_t *zzprime = buf + (uint32_t)30U;
+ uint64_t *zzzprime = buf + (uint32_t)35U;
+ memcpy(origxprime0, xxprime, (uint32_t)5U * sizeof xxprime[0U]);
+ Hacl_Bignum_fsum(xxprime, zzprime);
+ Hacl_Bignum_fdifference(zzprime, origxprime0);
+ Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1U);
+ Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1U);
+ Hacl_Bignum_fmul(z3, zzzprime, qx);
+ Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1U);
+ Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1U);
+ uint64_t *zzz = buf + (uint32_t)10U;
+ uint64_t *xx = buf + (uint32_t)15U;
+ uint64_t *zz = buf + (uint32_t)20U;
+ Hacl_Bignum_fmul(x2, xx, zz);
+ Hacl_Bignum_fdifference(zz, xx);
+ uint64_t scalar = (uint64_t)121665U;
+ Hacl_Bignum_fscalar(zzz, zz, scalar);
+ Hacl_Bignum_fsum(zzz, xx);
+ Hacl_Bignum_fmul(z2, zzz, zz);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt)
+{
+ uint64_t bit = (uint64_t)(byt >> (uint32_t)7U);
+ Hacl_EC_Point_swap_conditional(nq, nqpq, bit);
+ Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+ uint64_t bit0 = (uint64_t)(byt >> (uint32_t)7U);
+ Hacl_EC_Point_swap_conditional(nq2, nqpq2, bit0);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt)
+{
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+ uint8_t byt1 = byt << (uint32_t)1U;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt,
+ uint32_t i)
+{
+ if (!(i == (uint32_t)0U)) {
+ uint32_t i_ = i - (uint32_t)1U;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
+ uint8_t byt_ = byt << (uint32_t)2U;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
+ }
+}
+
+static void
+Hacl_EC_Ladder_BigLoop_cmult_big_loop(
+ uint8_t *n1,
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint32_t i)
+{
+ if (!(i == (uint32_t)0U)) {
+ uint32_t i1 = i - (uint32_t)1U;
+ uint8_t byte = n1[i1];
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4U);
+ Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
+ }
+}
+
+static void
+Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
+{
+ uint64_t point_buf[40U] = { 0U };
+ uint64_t *nq = point_buf;
+ uint64_t *nqpq = point_buf + (uint32_t)10U;
+ uint64_t *nq2 = point_buf + (uint32_t)20U;
+ uint64_t *nqpq2 = point_buf + (uint32_t)30U;
+ Hacl_EC_Point_copy(nqpq, q);
+ nq[0U] = (uint64_t)1U;
+ Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32U);
+ Hacl_EC_Point_copy(result, nq);
+}
+
+static void
+Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
+{
+ uint64_t i0 = load64_le(input);
+ uint8_t *x00 = input + (uint32_t)6U;
+ uint64_t i1 = load64_le(x00);
+ uint8_t *x01 = input + (uint32_t)12U;
+ uint64_t i2 = load64_le(x01);
+ uint8_t *x02 = input + (uint32_t)19U;
+ uint64_t i3 = load64_le(x02);
+ uint8_t *x0 = input + (uint32_t)24U;
+ uint64_t i4 = load64_le(x0);
+ uint64_t output0 = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t output1 = i1 >> (uint32_t)3U & (uint64_t)0x7ffffffffffffU;
+ uint64_t output2 = i2 >> (uint32_t)6U & (uint64_t)0x7ffffffffffffU;
+ uint64_t output3 = i3 >> (uint32_t)1U & (uint64_t)0x7ffffffffffffU;
+ uint64_t output4 = i4 >> (uint32_t)12U & (uint64_t)0x7ffffffffffffU;
+ output[0U] = output0;
+ output[1U] = output1;
+ output[2U] = output2;
+ output[3U] = output3;
+ output[4U] = output4;
+}
+
+static void
+Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0U];
+ uint64_t t1 = input[1U];
+ uint64_t t2 = input[2U];
+ uint64_t t3 = input[3U];
+ uint64_t t4 = input[4U];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
+ uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
+ uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
+ uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
+ uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
+ input[0U] = t0_;
+ input[1U] = t1__;
+ input[2U] = t2__;
+ input[3U] = t3__;
+ input[4U] = t4_;
+}
+
+static void
+Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+}
+
+static void
+Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0U];
+ uint64_t t1 = input[1U];
+ uint64_t t2 = input[2U];
+ uint64_t t3 = input[3U];
+ uint64_t t4 = input[4U];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
+ uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
+ uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
+ uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
+ uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
+ input[0U] = t0_;
+ input[1U] = t1__;
+ input[2U] = t2__;
+ input[3U] = t3__;
+ input[4U] = t4_;
+}
+
+static void
+Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_second_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+ uint64_t i0 = input[0U];
+ uint64_t i1 = input[1U];
+ uint64_t i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)51U);
+ input[0U] = i0_;
+ input[1U] = i1_;
+}
+
+static void
+Hacl_EC_Format_fcontract_trim(uint64_t *input)
+{
+ uint64_t a0 = input[0U];
+ uint64_t a1 = input[1U];
+ uint64_t a2 = input[2U];
+ uint64_t a3 = input[3U];
+ uint64_t a4 = input[4U];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffedU);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
+ uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffedU & mask);
+ uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffffU & mask);
+ uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffffU & mask);
+ uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffffU & mask);
+ uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffffU & mask);
+ input[0U] = a0_;
+ input[1U] = a1_;
+ input[2U] = a2_;
+ input[3U] = a3_;
+ input[4U] = a4_;
+}
+
+static void
+Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
+{
+ uint64_t t0 = input[0U];
+ uint64_t t1 = input[1U];
+ uint64_t t2 = input[2U];
+ uint64_t t3 = input[3U];
+ uint64_t t4 = input[4U];
+ uint64_t o0 = t1 << (uint32_t)51U | t0;
+ uint64_t o1 = t2 << (uint32_t)38U | t1 >> (uint32_t)13U;
+ uint64_t o2 = t3 << (uint32_t)25U | t2 >> (uint32_t)26U;
+ uint64_t o3 = t4 << (uint32_t)12U | t3 >> (uint32_t)39U;
+ uint8_t *b0 = output;
+ uint8_t *b1 = output + (uint32_t)8U;
+ uint8_t *b2 = output + (uint32_t)16U;
+ uint8_t *b3 = output + (uint32_t)24U;
+ store64_le(b0, o0);
+ store64_le(b1, o1);
+ store64_le(b2, o2);
+ store64_le(b3, o3);
+}
+
+static void
+Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_full(input);
+ Hacl_EC_Format_fcontract_second_carry_full(input);
+ Hacl_EC_Format_fcontract_trim(input);
+ Hacl_EC_Format_fcontract_store(output, input);
+}
+
+static void
+Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
+{
+ uint64_t *x = point;
+ uint64_t *z = point + (uint32_t)5U;
+ uint64_t buf[10U] = { 0U };
+ uint64_t *zmone = buf;
+ uint64_t *sc = buf + (uint32_t)5U;
+ Hacl_Bignum_crecip(zmone, z);
+ Hacl_Bignum_fmul(sc, x, zmone);
+ Hacl_EC_Format_fcontract(scalar, sc);
+}
+
+void
+Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+ uint64_t buf0[10U] = { 0U };
+ uint64_t *x0 = buf0;
+ uint64_t *z = buf0 + (uint32_t)5U;
+ Hacl_EC_Format_fexpand(x0, basepoint);
+ z[0U] = (uint64_t)1U;
+ uint64_t *q = buf0;
+ uint8_t e[32U] = { 0U };
+ memcpy(e, secret, (uint32_t)32U * sizeof secret[0U]);
+ uint8_t e0 = e[0U];
+ uint8_t e31 = e[31U];
+ uint8_t e01 = e0 & (uint8_t)248U;
+ uint8_t e311 = e31 & (uint8_t)127U;
+ uint8_t e312 = e311 | (uint8_t)64U;
+ e[0U] = e01;
+ e[31U] = e312;
+ uint8_t *scalar = e;
+ uint64_t buf[15U] = { 0U };
+ uint64_t *nq = buf;
+ uint64_t *x = nq;
+ x[0U] = (uint64_t)1U;
+ Hacl_EC_Ladder_cmult(nq, scalar, q);
+ Hacl_EC_Format_scalar_of_point(mypublic, nq);
+}
+
+void
+Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+ Hacl_EC_crypto_scalarmult(mypublic, secret, basepoint);
+}
diff --git a/security/nss/lib/freebl/verified/Hacl_Curve25519.h b/security/nss/lib/freebl/verified/Hacl_Curve25519.h
new file mode 100644
index 000000000..0e443f177
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Curve25519.h
@@ -0,0 +1,57 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kremlib.h"
+#ifndef __Hacl_Curve25519_H
+#define __Hacl_Curve25519_H
+
+typedef uint64_t Hacl_Bignum_Constants_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Constants_wide;
+
+typedef uint64_t Hacl_Bignum_Parameters_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Parameters_wide;
+
+typedef uint32_t Hacl_Bignum_Parameters_ctr;
+
+typedef uint64_t *Hacl_Bignum_Parameters_felem;
+
+typedef FStar_UInt128_t *Hacl_Bignum_Parameters_felem_wide;
+
+typedef void *Hacl_Bignum_Parameters_seqelem;
+
+typedef void *Hacl_Bignum_Parameters_seqelem_wide;
+
+typedef FStar_UInt128_t Hacl_Bignum_Wide_t;
+
+typedef uint64_t Hacl_Bignum_Limb_t;
+
+extern void Hacl_Bignum_lemma_diff(Prims_int x0, Prims_int x1, Prims_pos x2);
+
+typedef uint64_t *Hacl_EC_Point_point;
+
+typedef uint8_t *Hacl_EC_Ladder_SmallLoop_uint8_p;
+
+typedef uint8_t *Hacl_EC_Ladder_uint8_p;
+
+typedef uint8_t *Hacl_EC_Format_uint8_p;
+
+void Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
+
+typedef uint8_t *Hacl_Curve25519_uint8_p;
+
+void Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
+#endif
diff --git a/security/nss/lib/freebl/verified/Hacl_Poly1305_64.c b/security/nss/lib/freebl/verified/Hacl_Poly1305_64.c
new file mode 100644
index 000000000..984031ae2
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Poly1305_64.c
@@ -0,0 +1,485 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hacl_Poly1305_64.h"
+
+inline static void
+Hacl_Bignum_Modulo_reduce(uint64_t *b)
+{
+ uint64_t b0 = b[0U];
+ b[0U] = (b0 << (uint32_t)4U) + (b0 << (uint32_t)2U);
+}
+
+inline static void
+Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b2 = b[2U];
+ uint64_t b0 = b[0U];
+ uint64_t b2_42 = b2 >> (uint32_t)42U;
+ b[2U] = b2 & (uint64_t)0x3ffffffffffU;
+ b[0U] = (b2_42 << (uint32_t)2U) + b2_42 + b0;
+}
+
+inline static void
+Hacl_Bignum_Modulo_carry_top_wide(FStar_UInt128_t *b)
+{
+ FStar_UInt128_t b2 = b[2U];
+ FStar_UInt128_t b0 = b[0U];
+ FStar_UInt128_t
+ b2_ = FStar_UInt128_logand(b2, FStar_UInt128_uint64_to_uint128((uint64_t)0x3ffffffffffU));
+ uint64_t b2_42 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b2, (uint32_t)42U));
+ FStar_UInt128_t
+ b0_ = FStar_UInt128_add(b0, FStar_UInt128_uint64_to_uint128((b2_42 << (uint32_t)2U) + b2_42));
+ b[2U] = b2_;
+ b[0U] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
+ FStar_UInt128_t xi = input[i];
+ output[i] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
+ FStar_UInt128_t xi = output[i];
+ uint64_t yi = input[i];
+ output[i] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)2U; i = i + (uint32_t)1U) {
+ uint32_t ctr = i;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0xfffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)44U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_carry_limb_(uint64_t *tmp)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)2U; i = i + (uint32_t)1U) {
+ uint32_t ctr = i;
+ uint64_t tctr = tmp[ctr];
+ uint64_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = tctr & (uint64_t)0xfffffffffffU;
+ uint64_t c = tctr >> (uint32_t)44U;
+ tmp[ctr] = r0;
+ tmp[ctr + (uint32_t)1U] = tctrp1 + c;
+ }
+}
+
+inline static void
+Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ uint64_t tmp = output[2U];
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)2U; i = i + (uint32_t)1U) {
+ uint32_t ctr = (uint32_t)3U - i - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ output[0U] = tmp;
+ Hacl_Bignum_Modulo_reduce(output);
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input2)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)2U; i = i + (uint32_t)1U) {
+ uint64_t input2i = input2[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t)2U;
+ uint64_t input2i = input2[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void
+Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2)
+{
+ uint64_t tmp[3U] = { 0U };
+ memcpy(tmp, input, (uint32_t)3U * sizeof input[0U]);
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)3U);
+ FStar_UInt128_t t[3U];
+ for (uint32_t _i = 0U; _i < (uint32_t)3U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input2);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ Hacl_Bignum_Modulo_carry_top_wide(t);
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0U];
+ uint64_t i1 = output[1U];
+ uint64_t i0_ = i0 & (uint64_t)0xfffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)44U);
+ output[0U] = i0_;
+ output[1U] = i1_;
+}
+
+inline static void
+Hacl_Bignum_AddAndMultiply_add_and_multiply(uint64_t *acc, uint64_t *block, uint64_t *r)
+{
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
+ uint64_t xi = acc[i];
+ uint64_t yi = block[i];
+ acc[i] = xi + yi;
+ }
+ Hacl_Bignum_Fmul_fmul(acc, acc, r);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_update(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *r3 = r;
+ uint64_t tmp[3U] = { 0U };
+ FStar_UInt128_t m0 = load128_le(m);
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(m0) & (uint64_t)0xfffffffffffU;
+ uint64_t
+ r1 =
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t)44U)) & (uint64_t)0xfffffffffffU;
+ uint64_t r2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t)88U));
+ tmp[0U] = r0;
+ tmp[1U] = r1;
+ tmp[2U] = r2;
+ uint64_t b2 = tmp[2U];
+ uint64_t b2_ = (uint64_t)0x10000000000U | b2;
+ tmp[2U] = b2_;
+ Hacl_Bignum_AddAndMultiply_add_and_multiply(acc, tmp, r3);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_process_last_block_(
+ uint8_t *block,
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t rem_)
+{
+ uint64_t tmp[3U] = { 0U };
+ FStar_UInt128_t m0 = load128_le(block);
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(m0) & (uint64_t)0xfffffffffffU;
+ uint64_t
+ r1 =
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t)44U)) & (uint64_t)0xfffffffffffU;
+ uint64_t r2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(m0, (uint32_t)88U));
+ tmp[0U] = r0;
+ tmp[1U] = r1;
+ tmp[2U] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ Hacl_Bignum_AddAndMultiply_add_and_multiply(h, tmp, r);
+}
+
+inline static void
+Hacl_Impl_Poly1305_64_poly1305_process_last_block(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t rem_)
+{
+ uint8_t zero1 = (uint8_t)0U;
+ KRML_CHECK_SIZE(zero1, (uint32_t)16U);
+ uint8_t block[16U];
+ for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
+ block[_i] = zero1;
+ uint32_t i0 = (uint32_t)rem_;
+ uint32_t i = (uint32_t)rem_;
+ memcpy(block, m, i * sizeof m[0U]);
+ block[i0] = (uint8_t)1U;
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block_(block, st, m, rem_);
+}
+
+static void
+Hacl_Impl_Poly1305_64_poly1305_last_pass(uint64_t *acc)
+{
+ Hacl_Bignum_Fproduct_carry_limb_(acc);
+ Hacl_Bignum_Modulo_carry_top(acc);
+ uint64_t a0 = acc[0U];
+ uint64_t a10 = acc[1U];
+ uint64_t a20 = acc[2U];
+ uint64_t a0_ = a0 & (uint64_t)0xfffffffffffU;
+ uint64_t r0 = a0 >> (uint32_t)44U;
+ uint64_t a1_ = (a10 + r0) & (uint64_t)0xfffffffffffU;
+ uint64_t r1 = (a10 + r0) >> (uint32_t)44U;
+ uint64_t a2_ = a20 + r1;
+ acc[0U] = a0_;
+ acc[1U] = a1_;
+ acc[2U] = a2_;
+ Hacl_Bignum_Modulo_carry_top(acc);
+ uint64_t i0 = acc[0U];
+ uint64_t i1 = acc[1U];
+ uint64_t i0_ = i0 & (uint64_t)0xfffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)44U);
+ acc[0U] = i0_;
+ acc[1U] = i1_;
+ uint64_t a00 = acc[0U];
+ uint64_t a1 = acc[1U];
+ uint64_t a2 = acc[2U];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a00, (uint64_t)0xffffffffffbU);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0xfffffffffffU);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x3ffffffffffU);
+ uint64_t mask = (mask0 & mask1) & mask2;
+ uint64_t a0_0 = a00 - ((uint64_t)0xffffffffffbU & mask);
+ uint64_t a1_0 = a1 - ((uint64_t)0xfffffffffffU & mask);
+ uint64_t a2_0 = a2 - ((uint64_t)0x3ffffffffffU & mask);
+ acc[0U] = a0_0;
+ acc[1U] = a1_0;
+ acc[2U] = a2_0;
+}
+
+static Hacl_Impl_Poly1305_64_State_poly1305_state
+Hacl_Impl_Poly1305_64_mk_state(uint64_t *r, uint64_t *h)
+{
+ return ((Hacl_Impl_Poly1305_64_State_poly1305_state){.r = r, .h = h });
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_blocks(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t len1)
+{
+ if (!(len1 == (uint64_t)0U)) {
+ uint8_t *block = m;
+ uint8_t *tail1 = m + (uint32_t)16U;
+ Hacl_Impl_Poly1305_64_poly1305_update(st, block);
+ uint64_t len2 = len1 - (uint64_t)1U;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, tail1, len2);
+ }
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_partial(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *kr)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *x0 = r;
+ FStar_UInt128_t k1 = load128_le(kr);
+ FStar_UInt128_t
+ k_clamped =
+ FStar_UInt128_logand(k1,
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)0x0ffffffc0ffffffcU),
+ (uint32_t)64U),
+ FStar_UInt128_uint64_to_uint128((uint64_t)0x0ffffffc0fffffffU)));
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(k_clamped) & (uint64_t)0xfffffffffffU;
+ uint64_t
+ r1 =
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t)44U)) & (uint64_t)0xfffffffffffU;
+ uint64_t
+ r2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t)88U));
+ x0[0U] = r0;
+ x0[1U] = r1;
+ x0[2U] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *x00 = h;
+ x00[0U] = (uint64_t)0U;
+ x00[1U] = (uint64_t)0U;
+ x00[2U] = (uint64_t)0U;
+ Hacl_Standalone_Poly1305_64_poly1305_blocks(st, input, len1);
+}
+
+static void
+Hacl_Standalone_Poly1305_64_poly1305_complete(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint64_t len1,
+ uint8_t *k1)
+{
+ uint8_t *kr = k1;
+ uint64_t len16 = len1 >> (uint32_t)4U;
+ uint64_t rem16 = len1 & (uint64_t)0xfU;
+ uint8_t *part_input = m;
+ uint8_t *last_block = m + (uint32_t)((uint64_t)16U * len16);
+ Hacl_Standalone_Poly1305_64_poly1305_partial(st, part_input, len16, kr);
+ if (!(rem16 == (uint64_t)0U))
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block(st, last_block, rem16);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_poly1305_last_pass(acc);
+}
+
+static void
+Hacl_Standalone_Poly1305_64_crypto_onetimeauth_(
+ uint8_t *output,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *k1)
+{
+ uint64_t buf[6U] = { 0U };
+ uint64_t *r = buf;
+ uint64_t *h = buf + (uint32_t)3U;
+ Hacl_Impl_Poly1305_64_State_poly1305_state st = Hacl_Impl_Poly1305_64_mk_state(r, h);
+ uint8_t *key_s = k1 + (uint32_t)16U;
+ Hacl_Standalone_Poly1305_64_poly1305_complete(st, input, len1, k1);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h3 = scrut.h;
+ uint64_t *acc = h3;
+ FStar_UInt128_t k_ = load128_le(key_s);
+ uint64_t h0 = acc[0U];
+ uint64_t h1 = acc[1U];
+ uint64_t h2 = acc[2U];
+ FStar_UInt128_t
+ acc_ =
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128(h2
+ << (uint32_t)24U |
+ h1 >> (uint32_t)20U),
+ (uint32_t)64U),
+ FStar_UInt128_uint64_to_uint128(h1 << (uint32_t)44U | h0));
+ FStar_UInt128_t mac_ = FStar_UInt128_add_mod(acc_, k_);
+ store128_le(output, mac_);
+}
+
+static void
+Hacl_Standalone_Poly1305_64_crypto_onetimeauth(
+ uint8_t *output,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *k1)
+{
+ Hacl_Standalone_Poly1305_64_crypto_onetimeauth_(output, input, len1, k1);
+}
+
+Hacl_Impl_Poly1305_64_State_poly1305_state
+Hacl_Poly1305_64_mk_state(uint64_t *r, uint64_t *acc)
+{
+ return Hacl_Impl_Poly1305_64_mk_state(r, acc);
+}
+
+void
+Hacl_Poly1305_64_init(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *k1)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *r = scrut.r;
+ uint64_t *x0 = r;
+ FStar_UInt128_t k10 = load128_le(k1);
+ FStar_UInt128_t
+ k_clamped =
+ FStar_UInt128_logand(k10,
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)0x0ffffffc0ffffffcU),
+ (uint32_t)64U),
+ FStar_UInt128_uint64_to_uint128((uint64_t)0x0ffffffc0fffffffU)));
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(k_clamped) & (uint64_t)0xfffffffffffU;
+ uint64_t
+ r1 =
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t)44U)) & (uint64_t)0xfffffffffffU;
+ uint64_t
+ r2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(k_clamped, (uint32_t)88U));
+ x0[0U] = r0;
+ x0[1U] = r1;
+ x0[2U] = r2;
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut0 = st;
+ uint64_t *h = scrut0.h;
+ uint64_t *x00 = h;
+ x00[0U] = (uint64_t)0U;
+ x00[1U] = (uint64_t)0U;
+ x00[2U] = (uint64_t)0U;
+}
+
+void
+Hacl_Poly1305_64_update_block(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *m)
+{
+ Hacl_Impl_Poly1305_64_poly1305_update(st, m);
+}
+
+void
+Hacl_Poly1305_64_update(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint32_t num_blocks)
+{
+ if (!(num_blocks == (uint32_t)0U)) {
+ uint8_t *block = m;
+ uint8_t *m_ = m + (uint32_t)16U;
+ uint32_t n1 = num_blocks - (uint32_t)1U;
+ Hacl_Poly1305_64_update_block(st, block);
+ Hacl_Poly1305_64_update(st, m_, n1);
+ }
+}
+
+void
+Hacl_Poly1305_64_update_last(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint32_t len1)
+{
+ if (!((uint64_t)len1 == (uint64_t)0U))
+ Hacl_Impl_Poly1305_64_poly1305_process_last_block(st, m, (uint64_t)len1);
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ Hacl_Impl_Poly1305_64_poly1305_last_pass(acc);
+}
+
+void
+Hacl_Poly1305_64_finish(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *mac,
+ uint8_t *k1)
+{
+ Hacl_Impl_Poly1305_64_State_poly1305_state scrut = st;
+ uint64_t *h = scrut.h;
+ uint64_t *acc = h;
+ FStar_UInt128_t k_ = load128_le(k1);
+ uint64_t h0 = acc[0U];
+ uint64_t h1 = acc[1U];
+ uint64_t h2 = acc[2U];
+ FStar_UInt128_t
+ acc_ =
+ FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128(h2
+ << (uint32_t)24U |
+ h1 >> (uint32_t)20U),
+ (uint32_t)64U),
+ FStar_UInt128_uint64_to_uint128(h1 << (uint32_t)44U | h0));
+ FStar_UInt128_t mac_ = FStar_UInt128_add_mod(acc_, k_);
+ store128_le(mac, mac_);
+}
+
+void
+Hacl_Poly1305_64_crypto_onetimeauth(
+ uint8_t *output,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *k1)
+{
+ Hacl_Standalone_Poly1305_64_crypto_onetimeauth(output, input, len1, k1);
+}
diff --git a/security/nss/lib/freebl/verified/Hacl_Poly1305_64.h b/security/nss/lib/freebl/verified/Hacl_Poly1305_64.h
new file mode 100644
index 000000000..0aa9a0de3
--- /dev/null
+++ b/security/nss/lib/freebl/verified/Hacl_Poly1305_64.h
@@ -0,0 +1,99 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kremlib.h"
+#ifndef __Hacl_Poly1305_64_H
+#define __Hacl_Poly1305_64_H
+
+typedef uint64_t Hacl_Bignum_Constants_limb;
+
+typedef FStar_UInt128_t Hacl_Bignum_Constants_wide;
+
+typedef FStar_UInt128_t Hacl_Bignum_Wide_t;
+
+typedef uint64_t Hacl_Bignum_Limb_t;
+
+typedef void *Hacl_Impl_Poly1305_64_State_log_t;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_State_bigint;
+
+typedef void *Hacl_Impl_Poly1305_64_State_seqelem;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_State_elemB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_wordB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_State_wordB_16;
+
+typedef struct
+{
+ uint64_t *r;
+ uint64_t *h;
+} Hacl_Impl_Poly1305_64_State_poly1305_state;
+
+typedef void *Hacl_Impl_Poly1305_64_log_t;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_bigint;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_uint8_p;
+
+typedef uint64_t *Hacl_Impl_Poly1305_64_elemB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_wordB;
+
+typedef uint8_t *Hacl_Impl_Poly1305_64_wordB_16;
+
+typedef uint8_t *Hacl_Poly1305_64_uint8_p;
+
+typedef uint64_t Hacl_Poly1305_64_uint64_t;
+
+typedef uint8_t *Hacl_Poly1305_64_key;
+
+typedef Hacl_Impl_Poly1305_64_State_poly1305_state Hacl_Poly1305_64_state;
+
+Hacl_Impl_Poly1305_64_State_poly1305_state
+Hacl_Poly1305_64_mk_state(uint64_t *r, uint64_t *acc);
+
+void Hacl_Poly1305_64_init(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *k1);
+
+void Hacl_Poly1305_64_update_block(Hacl_Impl_Poly1305_64_State_poly1305_state st, uint8_t *m);
+
+void
+Hacl_Poly1305_64_update(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint32_t num_blocks);
+
+void
+Hacl_Poly1305_64_update_last(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *m,
+ uint32_t len1);
+
+void
+Hacl_Poly1305_64_finish(
+ Hacl_Impl_Poly1305_64_State_poly1305_state st,
+ uint8_t *mac,
+ uint8_t *k1);
+
+void
+Hacl_Poly1305_64_crypto_onetimeauth(
+ uint8_t *output,
+ uint8_t *input,
+ uint64_t len1,
+ uint8_t *k1);
+#endif
diff --git a/security/nss/lib/freebl/verified/kremlib.h b/security/nss/lib/freebl/verified/kremlib.h
new file mode 100644
index 000000000..c12164e74
--- /dev/null
+++ b/security/nss/lib/freebl/verified/kremlib.h
@@ -0,0 +1,672 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __KREMLIB_H
+#define __KREMLIB_H
+
+#include "kremlib_base.h"
+
+/* For tests only: we might need this function to be forward-declared, because
+ * the dependency on WasmSupport appears very late, after SimplifyWasm, and
+ * sadly, after the topological order has been done. */
+void WasmSupport_check_buffer_size(uint32_t s);
+
+/******************************************************************************/
+/* Stubs to ease compilation of non-Low* code */
+/******************************************************************************/
+
+/* Some types that KreMLin has no special knowledge of; many of them appear in
+ * signatures of ghost functions, meaning that it suffices to give them (any)
+ * definition. */
+typedef void *FStar_Seq_Base_seq, *Prims_prop, *FStar_HyperStack_mem,
+ *FStar_Set_set, *Prims_st_pre_h, *FStar_Heap_heap, *Prims_all_pre_h,
+ *FStar_TSet_set, *Prims_list, *FStar_Map_t, *FStar_UInt63_t_,
+ *FStar_Int63_t_, *FStar_UInt63_t, *FStar_Int63_t, *FStar_UInt_uint_t,
+ *FStar_Int_int_t, *FStar_HyperStack_stackref, *FStar_Bytes_bytes,
+ *FStar_HyperHeap_rid, *FStar_Heap_aref, *FStar_Monotonic_Heap_heap,
+ *FStar_Monotonic_Heap_aref, *FStar_Monotonic_HyperHeap_rid,
+ *FStar_Monotonic_HyperStack_mem, *FStar_Char_char_;
+
+typedef const char *Prims_string;
+
+/* For "bare" targets that do not have a C stdlib, the user might want to use
+ * [-add-include '"mydefinitions.h"'] and override these. */
+#ifndef KRML_HOST_PRINTF
+#define KRML_HOST_PRINTF printf
+#endif
+
+#ifndef KRML_HOST_EXIT
+#define KRML_HOST_EXIT exit
+#endif
+
+#ifndef KRML_HOST_MALLOC
+#define KRML_HOST_MALLOC malloc
+#endif
+
+/* In statement position, exiting is easy. */
+#define KRML_EXIT \
+ do { \
+ KRML_HOST_PRINTF("Unimplemented function at %s:%d\n", __FILE__, __LINE__); \
+ KRML_HOST_EXIT(254); \
+ } while (0)
+
+/* In expression position, use the comma-operator and a malloc to return an
+ * expression of the right size. KreMLin passes t as the parameter to the macro.
+ */
+#define KRML_EABORT(t, msg) \
+ (KRML_HOST_PRINTF("KreMLin abort at %s:%d\n%s\n", __FILE__, __LINE__, msg), \
+ KRML_HOST_EXIT(255), *((t *)KRML_HOST_MALLOC(sizeof(t))))
+
+/* In FStar.Buffer.fst, the size of arrays is uint32_t, but it's a number of
+ * *elements*. Do an ugly, run-time check (some of which KreMLin can eliminate).
+ */
+#define KRML_CHECK_SIZE(elt, size) \
+ if (((size_t)size) > SIZE_MAX / sizeof(elt)) { \
+ KRML_HOST_PRINTF( \
+ "Maximum allocatable size exceeded, aborting before overflow at " \
+ "%s:%d\n", \
+ __FILE__, __LINE__); \
+ KRML_HOST_EXIT(253); \
+ }
+
+/* A series of GCC atrocities to trace function calls (kremlin's [-d c-calls]
+ * option). Useful when trying to debug, say, Wasm, to compare traces. */
+/* clang-format off */
+#ifdef __GNUC__
+#define KRML_FORMAT(X) _Generic((X), \
+ uint8_t : "0x%08" PRIx8, \
+ uint16_t: "0x%08" PRIx16, \
+ uint32_t: "0x%08" PRIx32, \
+ uint64_t: "0x%08" PRIx64, \
+ int8_t : "0x%08" PRIx8, \
+ int16_t : "0x%08" PRIx16, \
+ int32_t : "0x%08" PRIx32, \
+ int64_t : "0x%08" PRIx64, \
+ default : "%s")
+
+#define KRML_FORMAT_ARG(X) _Generic((X), \
+ uint8_t : X, \
+ uint16_t: X, \
+ uint32_t: X, \
+ uint64_t: X, \
+ int8_t : X, \
+ int16_t : X, \
+ int32_t : X, \
+ int64_t : X, \
+ default : "unknown")
+/* clang-format on */
+
+#define KRML_DEBUG_RETURN(X) \
+ ({ \
+ __auto_type _ret = (X); \
+ KRML_HOST_PRINTF("returning: "); \
+ KRML_HOST_PRINTF(KRML_FORMAT(_ret), KRML_FORMAT_ARG(_ret)); \
+ KRML_HOST_PRINTF(" \n"); \
+ _ret; \
+ })
+#endif
+
+#define FStar_Buffer_eqb(b1, b2, n) \
+ (memcmp((b1), (b2), (n) * sizeof((b1)[0])) == 0)
+
+/* Stubs to make ST happy. Important note: you must generate a use of the macro
+ * argument, otherwise, you may have FStar_ST_recall(f) as the only use of f;
+ * KreMLin will think that this is a valid use, but then the C compiler, after
+ * macro expansion, will error out. */
+#define FStar_HyperHeap_root 0
+#define FStar_Pervasives_Native_fst(x) (x).fst
+#define FStar_Pervasives_Native_snd(x) (x).snd
+#define FStar_Seq_Base_createEmpty(x) 0
+#define FStar_Seq_Base_create(len, init) 0
+#define FStar_Seq_Base_upd(s, i, e) 0
+#define FStar_Seq_Base_eq(l1, l2) 0
+#define FStar_Seq_Base_length(l1) 0
+#define FStar_Seq_Base_append(x, y) 0
+#define FStar_Seq_Base_slice(x, y, z) 0
+#define FStar_Seq_Properties_snoc(x, y) 0
+#define FStar_Seq_Properties_cons(x, y) 0
+#define FStar_Seq_Base_index(x, y) 0
+#define FStar_HyperStack_is_eternal_color(x) 0
+#define FStar_Monotonic_HyperHeap_root 0
+#define FStar_Buffer_to_seq_full(x) 0
+#define FStar_Buffer_recall(x)
+#define FStar_HyperStack_ST_op_Colon_Equals(x, v) KRML_EXIT
+#define FStar_HyperStack_ST_op_Bang(x) 0
+#define FStar_HyperStack_ST_salloc(x) 0
+#define FStar_HyperStack_ST_ralloc(x, y) 0
+#define FStar_HyperStack_ST_new_region(x) (0)
+#define FStar_Monotonic_RRef_m_alloc(x) \
+ { \
+ 0 \
+ }
+
+#define FStar_HyperStack_ST_recall(x) \
+ do { \
+ (void)(x); \
+ } while (0)
+
+#define FStar_HyperStack_ST_recall_region(x) \
+ do { \
+ (void)(x); \
+ } while (0)
+
+#define FStar_Monotonic_RRef_m_recall(x1, x2) \
+ do { \
+ (void)(x1); \
+ (void)(x2); \
+ } while (0)
+
+#define FStar_Monotonic_RRef_m_write(x1, x2, x3, x4, x5) \
+ do { \
+ (void)(x1); \
+ (void)(x2); \
+ (void)(x3); \
+ (void)(x4); \
+ (void)(x5); \
+ } while (0)
+
+/******************************************************************************/
+/* Endian-ness macros that can only be implemented in C */
+/******************************************************************************/
+
+/* ... for Linux */
+#if defined(__linux__) || defined(__CYGWIN__)
+#include <endian.h>
+
+/* ... for OSX */
+#elif defined(__APPLE__)
+#include <libkern/OSByteOrder.h>
+#define htole64(x) OSSwapHostToLittleInt64(x)
+#define le64toh(x) OSSwapLittleToHostInt64(x)
+#define htobe64(x) OSSwapHostToBigInt64(x)
+#define be64toh(x) OSSwapBigToHostInt64(x)
+
+#define htole16(x) OSSwapHostToLittleInt16(x)
+#define le16toh(x) OSSwapLittleToHostInt16(x)
+#define htobe16(x) OSSwapHostToBigInt16(x)
+#define be16toh(x) OSSwapBigToHostInt16(x)
+
+#define htole32(x) OSSwapHostToLittleInt32(x)
+#define le32toh(x) OSSwapLittleToHostInt32(x)
+#define htobe32(x) OSSwapHostToBigInt32(x)
+#define be32toh(x) OSSwapBigToHostInt32(x)
+
+/* ... for Solaris */
+#elif defined(__sun__)
+#include <sys/byteorder.h>
+#define htole64(x) LE_64(x)
+#define le64toh(x) LE_64(x)
+#define htobe64(x) BE_64(x)
+#define be64toh(x) BE_64(x)
+
+#define htole16(x) LE_16(x)
+#define le16toh(x) LE_16(x)
+#define htobe16(x) BE_16(x)
+#define be16toh(x) BE_16(x)
+
+#define htole32(x) LE_32(x)
+#define le32toh(x) LE_32(x)
+#define htobe32(x) BE_32(x)
+#define be32toh(x) BE_32(x)
+
+/* ... for the BSDs */
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+#include <sys/endian.h>
+#elif defined(__OpenBSD__)
+#include <endian.h>
+
+/* ... for Windows (MSVC)... not targeting XBOX 360! */
+#elif defined(_MSC_VER)
+
+#include <stdlib.h>
+#define htobe16(x) _byteswap_ushort(x)
+#define htole16(x) (x)
+#define be16toh(x) _byteswap_ushort(x)
+#define le16toh(x) (x)
+
+#define htobe32(x) _byteswap_ulong(x)
+#define htole32(x) (x)
+#define be32toh(x) _byteswap_ulong(x)
+#define le32toh(x) (x)
+
+#define htobe64(x) _byteswap_uint64(x)
+#define htole64(x) (x)
+#define be64toh(x) _byteswap_uint64(x)
+#define le64toh(x) (x)
+
+/* ... for Windows (GCC-like, e.g. mingw or clang) */
+#elif (defined(_WIN32) || defined(_WIN64)) && \
+ (defined(__GNUC__) || defined(__clang__))
+
+#define htobe16(x) __builtin_bswap16(x)
+#define htole16(x) (x)
+#define be16toh(x) __builtin_bswap16(x)
+#define le16toh(x) (x)
+
+#define htobe32(x) __builtin_bswap32(x)
+#define htole32(x) (x)
+#define be32toh(x) __builtin_bswap32(x)
+#define le32toh(x) (x)
+
+#define htobe64(x) __builtin_bswap64(x)
+#define htole64(x) (x)
+#define be64toh(x) __builtin_bswap64(x)
+#define le64toh(x) (x)
+
+/* ... generic big-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+/* byte swapping code inspired by:
+ * https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h
+ * */
+
+#define htobe32(x) (x)
+#define be32toh(x) (x)
+#define htole32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+#define le32toh(x) (htole32((x)))
+
+#define htobe64(x) (x)
+#define be64toh(x) (x)
+#define htole64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+#define le64toh(x) (htole64((x)))
+
+/* ... generic little-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+#define htole32(x) (x)
+#define le32toh(x) (x)
+#define htobe32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+#define be32toh(x) (htobe32((x)))
+
+#define htole64(x) (x)
+#define le64toh(x) (x)
+#define htobe64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+#define be64toh(x) (htobe64((x)))
+
+/* ... couldn't determine endian-ness of the target platform */
+#else
+#error "Please define __BYTE_ORDER__!"
+
+#endif /* defined(__linux__) || ... */
+
+/* Loads and stores. These avoid undefined behavior due to unaligned memory
+ * accesses, via memcpy. */
+
+inline static uint16_t
+load16(uint8_t *b)
+{
+ uint16_t x;
+ memcpy(&x, b, 2);
+ return x;
+}
+
+inline static uint32_t
+load32(uint8_t *b)
+{
+ uint32_t x;
+ memcpy(&x, b, 4);
+ return x;
+}
+
+inline static uint64_t
+load64(uint8_t *b)
+{
+ uint64_t x;
+ memcpy(&x, b, 8);
+ return x;
+}
+
+inline static void
+store16(uint8_t *b, uint16_t i)
+{
+ memcpy(b, &i, 2);
+}
+
+inline static void
+store32(uint8_t *b, uint32_t i)
+{
+ memcpy(b, &i, 4);
+}
+
+inline static void
+store64(uint8_t *b, uint64_t i)
+{
+ memcpy(b, &i, 8);
+}
+
+#define load16_le(b) (le16toh(load16(b)))
+#define store16_le(b, i) (store16(b, htole16(i)))
+#define load16_be(b) (be16toh(load16(b)))
+#define store16_be(b, i) (store16(b, htobe16(i)))
+
+#define load32_le(b) (le32toh(load32(b)))
+#define store32_le(b, i) (store32(b, htole32(i)))
+#define load32_be(b) (be32toh(load32(b)))
+#define store32_be(b, i) (store32(b, htobe32(i)))
+
+#define load64_le(b) (le64toh(load64(b)))
+#define store64_le(b, i) (store64(b, htole64(i)))
+#define load64_be(b) (be64toh(load64(b)))
+#define store64_be(b, i) (store64(b, htobe64(i)))
+
+/******************************************************************************/
+/* Checked integers to ease the compilation of non-Low* code */
+/******************************************************************************/
+
+typedef int32_t Prims_pos, Prims_nat, Prims_nonzero, Prims_int,
+ krml_checked_int_t;
+
+inline static bool
+Prims_op_GreaterThanOrEqual(int32_t x, int32_t y)
+{
+ return x >= y;
+}
+
+inline static bool
+Prims_op_LessThanOrEqual(int32_t x, int32_t y)
+{
+ return x <= y;
+}
+
+inline static bool
+Prims_op_GreaterThan(int32_t x, int32_t y)
+{
+ return x > y;
+}
+
+inline static bool
+Prims_op_LessThan(int32_t x, int32_t y)
+{
+ return x < y;
+}
+
+#define RETURN_OR(x) \
+ do { \
+ int64_t __ret = x; \
+ if (__ret < INT32_MIN || INT32_MAX < __ret) { \
+ KRML_HOST_PRINTF("Prims.{int,nat,pos} integer overflow at %s:%d\n", \
+ __FILE__, __LINE__); \
+ KRML_HOST_EXIT(252); \
+ } \
+ return (int32_t)__ret; \
+ } while (0)
+
+inline static int32_t
+Prims_pow2(int32_t x)
+{
+ RETURN_OR((int64_t)1 << (int64_t)x);
+}
+
+inline static int32_t
+Prims_op_Multiply(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x * (int64_t)y);
+}
+
+inline static int32_t
+Prims_op_Addition(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x + (int64_t)y);
+}
+
+inline static int32_t
+Prims_op_Subtraction(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x - (int64_t)y);
+}
+
+inline static int32_t
+Prims_op_Division(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x / (int64_t)y);
+}
+
+inline static int32_t
+Prims_op_Modulus(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x % (int64_t)y);
+}
+
+inline static int8_t
+FStar_UInt8_uint_to_t(int8_t x)
+{
+ return x;
+}
+inline static int16_t
+FStar_UInt16_uint_to_t(int16_t x)
+{
+ return x;
+}
+inline static int32_t
+FStar_UInt32_uint_to_t(int32_t x)
+{
+ return x;
+}
+inline static int64_t
+FStar_UInt64_uint_to_t(int64_t x)
+{
+ return x;
+}
+
+inline static int8_t
+FStar_UInt8_v(int8_t x)
+{
+ return x;
+}
+inline static int16_t
+FStar_UInt16_v(int16_t x)
+{
+ return x;
+}
+inline static int32_t
+FStar_UInt32_v(int32_t x)
+{
+ return x;
+}
+inline static int64_t
+FStar_UInt64_v(int64_t x)
+{
+ return x;
+}
+
+/* Platform-specific 128-bit arithmetic. These are static functions in a header,
+ * so that each translation unit gets its own copy and the C compiler can
+ * optimize. */
+#ifndef KRML_NOUINT128
+typedef unsigned __int128 FStar_UInt128_t, FStar_UInt128_t_, uint128_t;
+
+static inline void
+print128(const char *where, uint128_t n)
+{
+ KRML_HOST_PRINTF("%s: [%" PRIu64 ",%" PRIu64 "]\n", where,
+ (uint64_t)(n >> 64), (uint64_t)n);
+}
+
+static inline uint128_t
+load128_le(uint8_t *b)
+{
+ uint128_t l = (uint128_t)load64_le(b);
+ uint128_t h = (uint128_t)load64_le(b + 8);
+ return (h << 64 | l);
+}
+
+static inline void
+store128_le(uint8_t *b, uint128_t n)
+{
+ store64_le(b, (uint64_t)n);
+ store64_le(b + 8, (uint64_t)(n >> 64));
+}
+
+static inline uint128_t
+load128_be(uint8_t *b)
+{
+ uint128_t h = (uint128_t)load64_be(b);
+ uint128_t l = (uint128_t)load64_be(b + 8);
+ return (h << 64 | l);
+}
+
+static inline void
+store128_be(uint8_t *b, uint128_t n)
+{
+ store64_be(b, (uint64_t)(n >> 64));
+ store64_be(b + 8, (uint64_t)n);
+}
+
+#define FStar_UInt128_add(x, y) ((x) + (y))
+#define FStar_UInt128_mul(x, y) ((x) * (y))
+#define FStar_UInt128_add_mod(x, y) ((x) + (y))
+#define FStar_UInt128_sub(x, y) ((x) - (y))
+#define FStar_UInt128_sub_mod(x, y) ((x) - (y))
+#define FStar_UInt128_logand(x, y) ((x) & (y))
+#define FStar_UInt128_logor(x, y) ((x) | (y))
+#define FStar_UInt128_logxor(x, y) ((x) ^ (y))
+#define FStar_UInt128_lognot(x) (~(x))
+#define FStar_UInt128_shift_left(x, y) ((x) << (y))
+#define FStar_UInt128_shift_right(x, y) ((x) >> (y))
+#define FStar_UInt128_uint64_to_uint128(x) ((uint128_t)(x))
+#define FStar_UInt128_uint128_to_uint64(x) ((uint64_t)(x))
+#define FStar_UInt128_mul_wide(x, y) ((uint128_t)(x) * (y))
+#define FStar_UInt128_op_Hat_Hat(x, y) ((x) ^ (y))
+
+static inline uint128_t
+FStar_UInt128_eq_mask(uint128_t x, uint128_t y)
+{
+ uint64_t mask =
+ FStar_UInt64_eq_mask((uint64_t)(x >> 64), (uint64_t)(y >> 64)) &
+ FStar_UInt64_eq_mask(x, y);
+ return ((uint128_t)mask) << 64 | mask;
+}
+
+static inline uint128_t
+FStar_UInt128_gte_mask(uint128_t x, uint128_t y)
+{
+ uint64_t mask =
+ (FStar_UInt64_gte_mask(x >> 64, y >> 64) &
+ ~(FStar_UInt64_eq_mask(x >> 64, y >> 64))) |
+ (FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask(x, y));
+ return ((uint128_t)mask) << 64 | mask;
+}
+
+#else /* !defined(KRML_NOUINT128) */
+
+/* This is a bad circular dependency... should fix it properly. */
+#include "FStar.h"
+
+typedef FStar_UInt128_uint128 FStar_UInt128_t_, uint128_t;
+
+/* A series of definitions written using pointers. */
+static inline void
+print128_(const char *where, uint128_t *n)
+{
+ KRML_HOST_PRINTF("%s: [0x%08" PRIx64 ",0x%08" PRIx64 "]\n", where, n->high, n->low);
+}
+
+static inline void
+load128_le_(uint8_t *b, uint128_t *r)
+{
+ r->low = load64_le(b);
+ r->high = load64_le(b + 8);
+}
+
+static inline void
+store128_le_(uint8_t *b, uint128_t *n)
+{
+ store64_le(b, n->low);
+ store64_le(b + 8, n->high);
+}
+
+static inline void
+load128_be_(uint8_t *b, uint128_t *r)
+{
+ r->high = load64_be(b);
+ r->low = load64_be(b + 8);
+}
+
+static inline void
+store128_be_(uint8_t *b, uint128_t *n)
+{
+ store64_be(b, n->high);
+ store64_be(b + 8, n->low);
+}
+
+#ifndef KRML_NOSTRUCT_PASSING
+
+static inline void
+print128(const char *where, uint128_t n)
+{
+ print128_(where, &n);
+}
+
+static inline uint128_t
+load128_le(uint8_t *b)
+{
+ uint128_t r;
+ load128_le_(b, &r);
+ return r;
+}
+
+static inline void
+store128_le(uint8_t *b, uint128_t n)
+{
+ store128_le_(b, &n);
+}
+
+static inline uint128_t
+load128_be(uint8_t *b)
+{
+ uint128_t r;
+ load128_be_(b, &r);
+ return r;
+}
+
+static inline void
+store128_be(uint8_t *b, uint128_t n)
+{
+ store128_be_(b, &n);
+}
+
+#else /* !defined(KRML_STRUCT_PASSING) */
+
+#define print128 print128_
+#define load128_le load128_le_
+#define store128_le store128_le_
+#define load128_be load128_be_
+#define store128_be store128_be_
+
+#endif /* KRML_STRUCT_PASSING */
+#endif /* KRML_UINT128 */
+#endif /* __KREMLIB_H */
diff --git a/security/nss/lib/freebl/verified/kremlib_base.h b/security/nss/lib/freebl/verified/kremlib_base.h
new file mode 100644
index 000000000..14170625d
--- /dev/null
+++ b/security/nss/lib/freebl/verified/kremlib_base.h
@@ -0,0 +1,192 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __KREMLIB_BASE_H
+#define __KREMLIB_BASE_H
+
+#include <inttypes.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+/******************************************************************************/
+/* Some macros to ease compatibility */
+/******************************************************************************/
+
+/* Define __cdecl and friends when using GCC, so that we can safely compile code
+ * that contains __cdecl on all platforms. Note that this is in a separate
+ * header so that Dafny-generated code can include just this file. */
+#ifndef _MSC_VER
+/* Use the gcc predefined macros if on a platform/architectures that set them.
+ * Otherwise define them to be empty. */
+#ifndef __cdecl
+#define __cdecl
+#endif
+#ifndef __stdcall
+#define __stdcall
+#endif
+#ifndef __fastcall
+#define __fastcall
+#endif
+#endif
+
+#ifdef __GNUC__
+#define inline __inline__
+#endif
+
+/* GCC-specific attribute syntax; everyone else gets the standard C inline
+ * attribute. */
+#ifdef __GNU_C__
+#ifndef __clang__
+#define force_inline inline __attribute__((always_inline))
+#else
+#define force_inline inline
+#endif
+#else
+#define force_inline inline
+#endif
+
+/******************************************************************************/
+/* Implementing C.fst */
+/******************************************************************************/
+
+/* Uppercase issue; we have to define lowercase versions of the C macros (as we
+ * have no way to refer to an uppercase *variable* in F*). */
+extern int exit_success;
+extern int exit_failure;
+
+/* This one allows the user to write C.EXIT_SUCCESS. */
+typedef int exit_code;
+
+void print_string(const char *s);
+void print_bytes(uint8_t *b, uint32_t len);
+
+/* The universal null pointer defined in C.Nullity.fst */
+#define C_Nullity_null(X) 0
+
+/* If some globals need to be initialized before the main, then kremlin will
+ * generate and try to link last a function with this type: */
+void kremlinit_globals(void);
+
+/******************************************************************************/
+/* Implementation of machine integers (possibly of 128-bit integers) */
+/******************************************************************************/
+
+/* Integer types */
+typedef uint64_t FStar_UInt64_t, FStar_UInt64_t_;
+typedef int64_t FStar_Int64_t, FStar_Int64_t_;
+typedef uint32_t FStar_UInt32_t, FStar_UInt32_t_;
+typedef int32_t FStar_Int32_t, FStar_Int32_t_;
+typedef uint16_t FStar_UInt16_t, FStar_UInt16_t_;
+typedef int16_t FStar_Int16_t, FStar_Int16_t_;
+typedef uint8_t FStar_UInt8_t, FStar_UInt8_t_;
+typedef int8_t FStar_Int8_t, FStar_Int8_t_;
+
+static inline uint32_t
+rotate32_left(uint32_t x, uint32_t n)
+{
+ /* assert (n<32); */
+ return (x << n) | (x >> (32 - n));
+}
+static inline uint32_t
+rotate32_right(uint32_t x, uint32_t n)
+{
+ /* assert (n<32); */
+ return (x >> n) | (x << (32 - n));
+}
+
+/* Constant time comparisons */
+static inline uint8_t
+FStar_UInt8_eq_mask(uint8_t x, uint8_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return (int8_t)x >> 7;
+}
+
+static inline uint8_t
+FStar_UInt8_gte_mask(uint8_t x, uint8_t y)
+{
+ return ~(uint8_t)(((int32_t)x - y) >> 31);
+}
+
+static inline uint16_t
+FStar_UInt16_eq_mask(uint16_t x, uint16_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return (int16_t)x >> 15;
+}
+
+static inline uint16_t
+FStar_UInt16_gte_mask(uint16_t x, uint16_t y)
+{
+ return ~(uint16_t)(((int32_t)x - y) >> 31);
+}
+
+static inline uint32_t
+FStar_UInt32_eq_mask(uint32_t x, uint32_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 16;
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return ((int32_t)x) >> 31;
+}
+
+static inline uint32_t
+FStar_UInt32_gte_mask(uint32_t x, uint32_t y)
+{
+ return ~((uint32_t)(((int64_t)x - y) >> 63));
+}
+
+static inline uint64_t
+FStar_UInt64_eq_mask(uint64_t x, uint64_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 32;
+ x &= x << 16;
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return ((int64_t)x) >> 63;
+}
+
+static inline uint64_t
+FStar_UInt64_gte_mask(uint64_t x, uint64_t y)
+{
+ uint64_t low63 =
+ ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x7fffffffffffffff)) -
+ (int64_t)(y & UINT64_C(0x7fffffffffffffff))) >>
+ 63));
+ uint64_t high_bit =
+ ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x8000000000000000)) -
+ (int64_t)(y & UINT64_C(0x8000000000000000))) >>
+ 63));
+ return low63 & high_bit;
+}
+
+#endif
diff --git a/security/nss/lib/freebl/verified/specs/Spec.CTR.fst b/security/nss/lib/freebl/verified/specs/Spec.CTR.fst
new file mode 100644
index 000000000..e411cd353
--- /dev/null
+++ b/security/nss/lib/freebl/verified/specs/Spec.CTR.fst
@@ -0,0 +1,98 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+module Spec.CTR
+
+module ST = FStar.HyperStack.ST
+
+open FStar.Mul
+open FStar.Seq
+open Spec.Lib
+
+#reset-options "--initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
+
+type block_cipher_ctx = {
+ keylen: nat ;
+ blocklen: (x:nat{x>0});
+ noncelen: nat;
+ counterbits: nat;
+ incr: pos}
+
+type key (c:block_cipher_ctx) = lbytes c.keylen
+type nonce (c:block_cipher_ctx) = lbytes c.noncelen
+type block (c:block_cipher_ctx) = lbytes (c.blocklen*c.incr)
+type counter (c:block_cipher_ctx) = UInt.uint_t c.counterbits
+type block_cipher (c:block_cipher_ctx) = key c -> nonce c -> counter c -> block c
+
+val xor: #len:nat -> x:lbytes len -> y:lbytes len -> Tot (lbytes len)
+let xor #len x y = map2 FStar.UInt8.(fun x y -> x ^^ y) x y
+
+
+val counter_mode_blocks:
+ ctx: block_cipher_ctx ->
+ bc: block_cipher ctx ->
+ k:key ctx -> n:nonce ctx -> c:counter ctx ->
+ plain:seq UInt8.t{c + ctx.incr * (length plain / ctx.blocklen) < pow2 ctx.counterbits /\
+ length plain % (ctx.blocklen * ctx.incr) = 0} ->
+ Tot (lbytes (length plain))
+ (decreases (length plain))
+#reset-options "--z3rlimit 200 --max_fuel 0"
+let rec counter_mode_blocks ctx block_enc key nonce counter plain =
+ let len = length plain in
+ let len' = len / (ctx.blocklen * ctx.incr) in
+ Math.Lemmas.lemma_div_mod len (ctx.blocklen * ctx.incr) ;
+ if len = 0 then Seq.createEmpty #UInt8.t
+ else (
+ let prefix, block = split plain (len - ctx.blocklen * ctx.incr) in
+ (* TODO: move to a single lemma for clarify *)
+ Math.Lemmas.lemma_mod_plus (length prefix) 1 (ctx.blocklen * ctx.incr);
+ Math.Lemmas.lemma_div_le (length prefix) len ctx.blocklen;
+ Spec.CTR.Lemmas.lemma_div len (ctx.blocklen * ctx.incr);
+ (* End TODO *)
+ let cipher = counter_mode_blocks ctx block_enc key nonce counter prefix in
+ let mask = block_enc key nonce (counter + (len / ctx.blocklen - 1) * ctx.incr) in
+ let eb = xor block mask in
+ cipher @| eb
+ )
+
+
+val counter_mode:
+ ctx: block_cipher_ctx ->
+ bc: block_cipher ctx ->
+ k:key ctx -> n:nonce ctx -> c:counter ctx ->
+ plain:seq UInt8.t{c + ctx.incr * (length plain / ctx.blocklen) < pow2 ctx.counterbits} ->
+ Tot (lbytes (length plain))
+ (decreases (length plain))
+#reset-options "--z3rlimit 200 --max_fuel 0"
+let counter_mode ctx block_enc key nonce counter plain =
+ let len = length plain in
+ let blocks_len = (ctx.incr * ctx.blocklen) * (len / (ctx.blocklen * ctx.incr)) in
+ let part_len = len % (ctx.blocklen * ctx.incr) in
+ (* TODO: move to a single lemma for clarify *)
+ Math.Lemmas.lemma_div_mod len (ctx.blocklen * ctx.incr);
+ Math.Lemmas.multiple_modulo_lemma (len / (ctx.blocklen * ctx.incr)) (ctx.blocklen * ctx.incr);
+ Math.Lemmas.lemma_div_le (blocks_len) len ctx.blocklen;
+ (* End TODO *)
+ let blocks, last_block = split plain blocks_len in
+ let cipher_blocks = counter_mode_blocks ctx block_enc key nonce counter blocks in
+ let cipher_last_block =
+ if part_len > 0
+ then (* encrypt final partial block(s) *)
+ let mask = block_enc key nonce (counter+ctx.incr*(length plain / ctx.blocklen)) in
+ let mask = slice mask 0 part_len in
+ assert(length last_block = part_len);
+ xor #part_len last_block mask
+ else createEmpty in
+ cipher_blocks @| cipher_last_block
diff --git a/security/nss/lib/freebl/verified/specs/Spec.Chacha20.fst b/security/nss/lib/freebl/verified/specs/Spec.Chacha20.fst
new file mode 100644
index 000000000..0bdc69725
--- /dev/null
+++ b/security/nss/lib/freebl/verified/specs/Spec.Chacha20.fst
@@ -0,0 +1,169 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+module Spec.Chacha20
+
+module ST = FStar.HyperStack.ST
+
+open FStar.Mul
+open FStar.Seq
+open FStar.UInt32
+open FStar.Endianness
+open Spec.Lib
+open Spec.Chacha20.Lemmas
+open Seq.Create
+
+#set-options "--max_fuel 0 --z3rlimit 100"
+
+(* Constants *)
+let keylen = 32 (* in bytes *)
+let blocklen = 64 (* in bytes *)
+let noncelen = 12 (* in bytes *)
+
+type key = lbytes keylen
+type block = lbytes blocklen
+type nonce = lbytes noncelen
+type counter = UInt.uint_t 32
+
+// using @ as a functional substitute for ;
+// internally, blocks are represented as 16 x 4-byte integers
+type state = m:seq UInt32.t {length m = 16}
+type idx = n:nat{n < 16}
+type shuffle = state -> Tot state
+
+let line (a:idx) (b:idx) (d:idx) (s:t{0 < v s /\ v s < 32}) (m:state) : Tot state =
+ let m = m.[a] <- (m.[a] +%^ m.[b]) in
+ let m = m.[d] <- ((m.[d] ^^ m.[a]) <<< s) in m
+
+let quarter_round a b c d : shuffle =
+ line a b d 16ul @
+ line c d b 12ul @
+ line a b d 8ul @
+ line c d b 7ul
+
+let column_round : shuffle =
+ quarter_round 0 4 8 12 @
+ quarter_round 1 5 9 13 @
+ quarter_round 2 6 10 14 @
+ quarter_round 3 7 11 15
+
+let diagonal_round : shuffle =
+ quarter_round 0 5 10 15 @
+ quarter_round 1 6 11 12 @
+ quarter_round 2 7 8 13 @
+ quarter_round 3 4 9 14
+
+let double_round: shuffle =
+ column_round @ diagonal_round (* 2 rounds *)
+
+let rounds : shuffle =
+ iter 10 double_round (* 20 rounds *)
+
+let chacha20_core (s:state) : Tot state =
+ let s' = rounds s in
+ Spec.Loops.seq_map2 (fun x y -> x +%^ y) s' s
+
+(* state initialization *)
+let c0 = 0x61707865ul
+let c1 = 0x3320646eul
+let c2 = 0x79622d32ul
+let c3 = 0x6b206574ul
+
+let setup (k:key) (n:nonce) (c:counter): Tot state =
+ create_4 c0 c1 c2 c3 @|
+ uint32s_from_le 8 k @|
+ create_1 (UInt32.uint_to_t c) @|
+ uint32s_from_le 3 n
+
+let chacha20_block (k:key) (n:nonce) (c:counter): Tot block =
+ let st = setup k n c in
+ let st' = chacha20_core st in
+ uint32s_to_le 16 st'
+
+let chacha20_ctx: Spec.CTR.block_cipher_ctx =
+ let open Spec.CTR in
+ {
+ keylen = keylen;
+ blocklen = blocklen;
+ noncelen = noncelen;
+ counterbits = 32;
+ incr = 1
+ }
+
+let chacha20_cipher: Spec.CTR.block_cipher chacha20_ctx = chacha20_block
+
+let chacha20_encrypt_bytes key nonce counter m =
+ Spec.CTR.counter_mode chacha20_ctx chacha20_cipher key nonce counter m
+
+
+unfold let test_plaintext = [
+ 0x4cuy; 0x61uy; 0x64uy; 0x69uy; 0x65uy; 0x73uy; 0x20uy; 0x61uy;
+ 0x6euy; 0x64uy; 0x20uy; 0x47uy; 0x65uy; 0x6euy; 0x74uy; 0x6cuy;
+ 0x65uy; 0x6duy; 0x65uy; 0x6euy; 0x20uy; 0x6fuy; 0x66uy; 0x20uy;
+ 0x74uy; 0x68uy; 0x65uy; 0x20uy; 0x63uy; 0x6cuy; 0x61uy; 0x73uy;
+ 0x73uy; 0x20uy; 0x6fuy; 0x66uy; 0x20uy; 0x27uy; 0x39uy; 0x39uy;
+ 0x3auy; 0x20uy; 0x49uy; 0x66uy; 0x20uy; 0x49uy; 0x20uy; 0x63uy;
+ 0x6fuy; 0x75uy; 0x6cuy; 0x64uy; 0x20uy; 0x6fuy; 0x66uy; 0x66uy;
+ 0x65uy; 0x72uy; 0x20uy; 0x79uy; 0x6fuy; 0x75uy; 0x20uy; 0x6fuy;
+ 0x6euy; 0x6cuy; 0x79uy; 0x20uy; 0x6fuy; 0x6euy; 0x65uy; 0x20uy;
+ 0x74uy; 0x69uy; 0x70uy; 0x20uy; 0x66uy; 0x6fuy; 0x72uy; 0x20uy;
+ 0x74uy; 0x68uy; 0x65uy; 0x20uy; 0x66uy; 0x75uy; 0x74uy; 0x75uy;
+ 0x72uy; 0x65uy; 0x2cuy; 0x20uy; 0x73uy; 0x75uy; 0x6euy; 0x73uy;
+ 0x63uy; 0x72uy; 0x65uy; 0x65uy; 0x6euy; 0x20uy; 0x77uy; 0x6fuy;
+ 0x75uy; 0x6cuy; 0x64uy; 0x20uy; 0x62uy; 0x65uy; 0x20uy; 0x69uy;
+ 0x74uy; 0x2euy
+]
+
+unfold let test_ciphertext = [
+ 0x6euy; 0x2euy; 0x35uy; 0x9auy; 0x25uy; 0x68uy; 0xf9uy; 0x80uy;
+ 0x41uy; 0xbauy; 0x07uy; 0x28uy; 0xdduy; 0x0duy; 0x69uy; 0x81uy;
+ 0xe9uy; 0x7euy; 0x7auy; 0xecuy; 0x1duy; 0x43uy; 0x60uy; 0xc2uy;
+ 0x0auy; 0x27uy; 0xafuy; 0xccuy; 0xfduy; 0x9fuy; 0xaeuy; 0x0buy;
+ 0xf9uy; 0x1buy; 0x65uy; 0xc5uy; 0x52uy; 0x47uy; 0x33uy; 0xabuy;
+ 0x8fuy; 0x59uy; 0x3duy; 0xabuy; 0xcduy; 0x62uy; 0xb3uy; 0x57uy;
+ 0x16uy; 0x39uy; 0xd6uy; 0x24uy; 0xe6uy; 0x51uy; 0x52uy; 0xabuy;
+ 0x8fuy; 0x53uy; 0x0cuy; 0x35uy; 0x9fuy; 0x08uy; 0x61uy; 0xd8uy;
+ 0x07uy; 0xcauy; 0x0duy; 0xbfuy; 0x50uy; 0x0duy; 0x6auy; 0x61uy;
+ 0x56uy; 0xa3uy; 0x8euy; 0x08uy; 0x8auy; 0x22uy; 0xb6uy; 0x5euy;
+ 0x52uy; 0xbcuy; 0x51uy; 0x4duy; 0x16uy; 0xccuy; 0xf8uy; 0x06uy;
+ 0x81uy; 0x8cuy; 0xe9uy; 0x1auy; 0xb7uy; 0x79uy; 0x37uy; 0x36uy;
+ 0x5auy; 0xf9uy; 0x0buy; 0xbfuy; 0x74uy; 0xa3uy; 0x5buy; 0xe6uy;
+ 0xb4uy; 0x0buy; 0x8euy; 0xeduy; 0xf2uy; 0x78uy; 0x5euy; 0x42uy;
+ 0x87uy; 0x4duy
+]
+
+unfold let test_key = [
+ 0uy; 1uy; 2uy; 3uy; 4uy; 5uy; 6uy; 7uy;
+ 8uy; 9uy; 10uy; 11uy; 12uy; 13uy; 14uy; 15uy;
+ 16uy; 17uy; 18uy; 19uy; 20uy; 21uy; 22uy; 23uy;
+ 24uy; 25uy; 26uy; 27uy; 28uy; 29uy; 30uy; 31uy
+ ]
+unfold let test_nonce = [
+ 0uy; 0uy; 0uy; 0uy; 0uy; 0uy; 0uy; 0x4auy; 0uy; 0uy; 0uy; 0uy
+ ]
+
+unfold let test_counter = 1
+
+let test() =
+ assert_norm(List.Tot.length test_plaintext = 114);
+ assert_norm(List.Tot.length test_ciphertext = 114);
+ assert_norm(List.Tot.length test_key = 32);
+ assert_norm(List.Tot.length test_nonce = 12);
+ let test_plaintext = createL test_plaintext in
+ let test_ciphertext = createL test_ciphertext in
+ let test_key = createL test_key in
+ let test_nonce = createL test_nonce in
+ chacha20_encrypt_bytes test_key test_nonce test_counter test_plaintext
+ = test_ciphertext
diff --git a/security/nss/lib/freebl/verified/specs/Spec.Curve25519.fst b/security/nss/lib/freebl/verified/specs/Spec.Curve25519.fst
new file mode 100644
index 000000000..af4035b09
--- /dev/null
+++ b/security/nss/lib/freebl/verified/specs/Spec.Curve25519.fst
@@ -0,0 +1,168 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+module Spec.Curve25519
+
+module ST = FStar.HyperStack.ST
+
+open FStar.Mul
+open FStar.Seq
+open FStar.UInt8
+open FStar.Endianness
+open Spec.Lib
+open Spec.Curve25519.Lemmas
+
+#reset-options "--initial_fuel 0 --max_fuel 0 --z3rlimit 20"
+
+(* Field types and parameters *)
+let prime = pow2 255 - 19
+type elem : Type0 = e:int{e >= 0 /\ e < prime}
+let fadd e1 e2 = (e1 + e2) % prime
+let fsub e1 e2 = (e1 - e2) % prime
+let fmul e1 e2 = (e1 * e2) % prime
+let zero : elem = 0
+let one : elem = 1
+let ( +@ ) = fadd
+let ( *@ ) = fmul
+
+(** Exponentiation *)
+let rec ( ** ) (e:elem) (n:pos) : Tot elem (decreases n) =
+ if n = 1 then e
+ else
+ if n % 2 = 0 then op_Star_Star (e `fmul` e) (n / 2)
+ else e `fmul` (op_Star_Star (e `fmul` e) ((n-1)/2))
+
+(* Type aliases *)
+type scalar = lbytes 32
+type serialized_point = lbytes 32
+type proj_point = | Proj: x:elem -> z:elem -> proj_point
+
+let decodeScalar25519 (k:scalar) =
+ let k = k.[0] <- (k.[0] &^ 248uy) in
+ let k = k.[31] <- ((k.[31] &^ 127uy) |^ 64uy) in k
+
+let decodePoint (u:serialized_point) =
+ (little_endian u % pow2 255) % prime
+
+let add_and_double qx nq nqp1 =
+ let x_1 = qx in
+ let x_2, z_2 = nq.x, nq.z in
+ let x_3, z_3 = nqp1.x, nqp1.z in
+ let a = x_2 `fadd` z_2 in
+ let aa = a**2 in
+ let b = x_2 `fsub` z_2 in
+ let bb = b**2 in
+ let e = aa `fsub` bb in
+ let c = x_3 `fadd` z_3 in
+ let d = x_3 `fsub` z_3 in
+ let da = d `fmul` a in
+ let cb = c `fmul` b in
+ let x_3 = (da `fadd` cb)**2 in
+ let z_3 = x_1 `fmul` ((da `fsub` cb)**2) in
+ let x_2 = aa `fmul` bb in
+ let z_2 = e `fmul` (aa `fadd` (121665 `fmul` e)) in
+ Proj x_2 z_2, Proj x_3 z_3
+
+let ith_bit (k:scalar) (i:nat{i < 256}) =
+ let q = i / 8 in let r = i % 8 in
+ (v (k.[q]) / pow2 r) % 2
+
+let rec montgomery_ladder_ (init:elem) x xp1 (k:scalar) (ctr:nat{ctr<=256})
+ : Tot proj_point (decreases ctr) =
+ if ctr = 0 then x
+ else (
+ let ctr' = ctr - 1 in
+ let (x', xp1') =
+ if ith_bit k ctr' = 1 then (
+ let nqp2, nqp1 = add_and_double init xp1 x in
+ nqp1, nqp2
+ ) else add_and_double init x xp1 in
+ montgomery_ladder_ init x' xp1' k ctr'
+ )
+
+let montgomery_ladder (init:elem) (k:scalar) : Tot proj_point =
+ montgomery_ladder_ init (Proj one zero) (Proj init one) k 256
+
+let encodePoint (p:proj_point) : Tot serialized_point =
+ let p = p.x `fmul` (p.z ** (prime - 2)) in
+ little_bytes 32ul p
+
+let scalarmult (k:scalar) (u:serialized_point) : Tot serialized_point =
+ let k = decodeScalar25519 k in
+ let u = decodePoint u in
+ let res = montgomery_ladder u k in
+ encodePoint res
+
+
+(* ********************* *)
+(* RFC 7748 Test Vectors *)
+(* ********************* *)
+
+let scalar1 = [
+ 0xa5uy; 0x46uy; 0xe3uy; 0x6buy; 0xf0uy; 0x52uy; 0x7cuy; 0x9duy;
+ 0x3buy; 0x16uy; 0x15uy; 0x4buy; 0x82uy; 0x46uy; 0x5euy; 0xdduy;
+ 0x62uy; 0x14uy; 0x4cuy; 0x0auy; 0xc1uy; 0xfcuy; 0x5auy; 0x18uy;
+ 0x50uy; 0x6auy; 0x22uy; 0x44uy; 0xbauy; 0x44uy; 0x9auy; 0xc4uy
+]
+
+let scalar2 = [
+ 0x4buy; 0x66uy; 0xe9uy; 0xd4uy; 0xd1uy; 0xb4uy; 0x67uy; 0x3cuy;
+ 0x5auy; 0xd2uy; 0x26uy; 0x91uy; 0x95uy; 0x7duy; 0x6auy; 0xf5uy;
+ 0xc1uy; 0x1buy; 0x64uy; 0x21uy; 0xe0uy; 0xeauy; 0x01uy; 0xd4uy;
+ 0x2cuy; 0xa4uy; 0x16uy; 0x9euy; 0x79uy; 0x18uy; 0xbauy; 0x0duy
+]
+
+let input1 = [
+ 0xe6uy; 0xdbuy; 0x68uy; 0x67uy; 0x58uy; 0x30uy; 0x30uy; 0xdbuy;
+ 0x35uy; 0x94uy; 0xc1uy; 0xa4uy; 0x24uy; 0xb1uy; 0x5fuy; 0x7cuy;
+ 0x72uy; 0x66uy; 0x24uy; 0xecuy; 0x26uy; 0xb3uy; 0x35uy; 0x3buy;
+ 0x10uy; 0xa9uy; 0x03uy; 0xa6uy; 0xd0uy; 0xabuy; 0x1cuy; 0x4cuy
+]
+
+let input2 = [
+ 0xe5uy; 0x21uy; 0x0fuy; 0x12uy; 0x78uy; 0x68uy; 0x11uy; 0xd3uy;
+ 0xf4uy; 0xb7uy; 0x95uy; 0x9duy; 0x05uy; 0x38uy; 0xaeuy; 0x2cuy;
+ 0x31uy; 0xdbuy; 0xe7uy; 0x10uy; 0x6fuy; 0xc0uy; 0x3cuy; 0x3euy;
+ 0xfcuy; 0x4cuy; 0xd5uy; 0x49uy; 0xc7uy; 0x15uy; 0xa4uy; 0x93uy
+]
+
+let expected1 = [
+ 0xc3uy; 0xdauy; 0x55uy; 0x37uy; 0x9duy; 0xe9uy; 0xc6uy; 0x90uy;
+ 0x8euy; 0x94uy; 0xeauy; 0x4duy; 0xf2uy; 0x8duy; 0x08uy; 0x4fuy;
+ 0x32uy; 0xecuy; 0xcfuy; 0x03uy; 0x49uy; 0x1cuy; 0x71uy; 0xf7uy;
+ 0x54uy; 0xb4uy; 0x07uy; 0x55uy; 0x77uy; 0xa2uy; 0x85uy; 0x52uy
+]
+let expected2 = [
+ 0x95uy; 0xcbuy; 0xdeuy; 0x94uy; 0x76uy; 0xe8uy; 0x90uy; 0x7duy;
+ 0x7auy; 0xaduy; 0xe4uy; 0x5cuy; 0xb4uy; 0xb8uy; 0x73uy; 0xf8uy;
+ 0x8buy; 0x59uy; 0x5auy; 0x68uy; 0x79uy; 0x9fuy; 0xa1uy; 0x52uy;
+ 0xe6uy; 0xf8uy; 0xf7uy; 0x64uy; 0x7auy; 0xacuy; 0x79uy; 0x57uy
+]
+
+let test () =
+ assert_norm(List.Tot.length scalar1 = 32);
+ assert_norm(List.Tot.length scalar2 = 32);
+ assert_norm(List.Tot.length input1 = 32);
+ assert_norm(List.Tot.length input2 = 32);
+ assert_norm(List.Tot.length expected1 = 32);
+ assert_norm(List.Tot.length expected2 = 32);
+ let scalar1 = createL scalar1 in
+ let scalar2 = createL scalar2 in
+ let input1 = createL input1 in
+ let input2 = createL input2 in
+ let expected1 = createL expected1 in
+ let expected2 = createL expected2 in
+ scalarmult scalar1 input1 = expected1
+ && scalarmult scalar2 input2 = expected2
diff --git a/security/nss/lib/freebl/verified/specs/Spec.Poly1305.fst b/security/nss/lib/freebl/verified/specs/Spec.Poly1305.fst
new file mode 100644
index 000000000..f9d8a4cb2
--- /dev/null
+++ b/security/nss/lib/freebl/verified/specs/Spec.Poly1305.fst
@@ -0,0 +1,107 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+module Spec.Poly1305
+
+module ST = FStar.HyperStack.ST
+
+open FStar.Math.Lib
+open FStar.Mul
+open FStar.Seq
+open FStar.UInt8
+open FStar.Endianness
+open Spec.Poly1305.Lemmas
+
+#set-options "--initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
+
+(* Field types and parameters *)
+let prime = pow2 130 - 5
+type elem = e:int{e >= 0 /\ e < prime}
+let fadd (e1:elem) (e2:elem) = (e1 + e2) % prime
+let fmul (e1:elem) (e2:elem) = (e1 * e2) % prime
+let zero : elem = 0
+let one : elem = 1
+let op_Plus_At = fadd
+let op_Star_At = fmul
+(* Type aliases *)
+let op_Amp_Bar = UInt.logand #128
+type word = w:bytes{length w <= 16}
+type word_16 = w:bytes{length w = 16}
+type tag = word_16
+type key = lbytes 32
+type text = seq word
+
+(* Specification code *)
+let encode (w:word) =
+ (pow2 (8 * length w)) `fadd` (little_endian w)
+
+let rec poly (txt:text) (r:e:elem) : Tot elem (decreases (length txt)) =
+ if length txt = 0 then zero
+ else
+ let a = poly (Seq.tail txt) r in
+ let n = encode (Seq.head txt) in
+ (n `fadd` a) `fmul` r
+
+let encode_r (rb:word_16) =
+ (little_endian rb) &| 0x0ffffffc0ffffffc0ffffffc0fffffff
+
+let finish (a:elem) (s:word_16) : Tot tag =
+ let n = (a + little_endian s) % pow2 128 in
+ little_bytes 16ul n
+
+let rec encode_bytes (txt:bytes) : Tot text (decreases (length txt)) =
+ if length txt = 0 then createEmpty
+ else
+ let w, txt = split txt (min (length txt) 16) in
+ append_last (encode_bytes txt) w
+
+let poly1305 (msg:bytes) (k:key) : Tot tag =
+ let text = encode_bytes msg in
+ let r = encode_r (slice k 0 16) in
+ let s = slice k 16 32 in
+ finish (poly text r) s
+
+
+(* ********************* *)
+(* RFC 7539 Test Vectors *)
+(* ********************* *)
+
+#reset-options "--initial_fuel 0 --max_fuel 0 --z3rlimit 20"
+
+unfold let msg = [
+ 0x43uy; 0x72uy; 0x79uy; 0x70uy; 0x74uy; 0x6fuy; 0x67uy; 0x72uy;
+ 0x61uy; 0x70uy; 0x68uy; 0x69uy; 0x63uy; 0x20uy; 0x46uy; 0x6fuy;
+ 0x72uy; 0x75uy; 0x6duy; 0x20uy; 0x52uy; 0x65uy; 0x73uy; 0x65uy;
+ 0x61uy; 0x72uy; 0x63uy; 0x68uy; 0x20uy; 0x47uy; 0x72uy; 0x6fuy;
+ 0x75uy; 0x70uy ]
+
+unfold let k = [
+ 0x85uy; 0xd6uy; 0xbeuy; 0x78uy; 0x57uy; 0x55uy; 0x6duy; 0x33uy;
+ 0x7fuy; 0x44uy; 0x52uy; 0xfeuy; 0x42uy; 0xd5uy; 0x06uy; 0xa8uy;
+ 0x01uy; 0x03uy; 0x80uy; 0x8auy; 0xfbuy; 0x0duy; 0xb2uy; 0xfduy;
+ 0x4auy; 0xbfuy; 0xf6uy; 0xafuy; 0x41uy; 0x49uy; 0xf5uy; 0x1buy ]
+
+unfold let expected = [
+ 0xa8uy; 0x06uy; 0x1duy; 0xc1uy; 0x30uy; 0x51uy; 0x36uy; 0xc6uy;
+ 0xc2uy; 0x2buy; 0x8buy; 0xafuy; 0x0cuy; 0x01uy; 0x27uy; 0xa9uy ]
+
+let test () : Tot bool =
+ assert_norm(List.Tot.length msg = 34);
+ assert_norm(List.Tot.length k = 32);
+ assert_norm(List.Tot.length expected = 16);
+ let msg = createL msg in
+ let k = createL k in
+ let expected = createL expected in
+ poly1305 msg k = expected
diff --git a/security/nss/lib/freebl/verified/vec128.h b/security/nss/lib/freebl/verified/vec128.h
new file mode 100644
index 000000000..986e9db82
--- /dev/null
+++ b/security/nss/lib/freebl/verified/vec128.h
@@ -0,0 +1,345 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __Vec_H
+#define __Vec_H
+
+#ifdef __MSVC__
+#define forceinline __forceinline inline
+#elif (defined(__GNUC__) || defined(__clang__))
+#define forceinline __attribute__((always_inline)) inline
+#else
+#define forceinline inline
+#endif
+
+#if defined(__SSSE3__) || defined(__AVX2__) || defined(__AVX__)
+
+#include <emmintrin.h>
+#include <tmmintrin.h>
+
+#define VEC128
+#define vec_size 4
+
+typedef __m128i vec;
+
+static forceinline vec
+vec_rotate_left_8(vec v)
+{
+ __m128i x = _mm_set_epi8(14, 13, 12, 15, 10, 9, 8, 11, 6, 5, 4, 7, 2, 1, 0, 3);
+ return _mm_shuffle_epi8(v, x);
+}
+
+static forceinline vec
+vec_rotate_left_16(vec v)
+{
+ __m128i x = _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
+ return _mm_shuffle_epi8(v, x);
+}
+
+static forceinline vec
+vec_rotate_left(vec v, unsigned int n)
+{
+ if (n == 8)
+ return vec_rotate_left_8(v);
+ if (n == 16)
+ return vec_rotate_left_16(v);
+ return _mm_xor_si128(_mm_slli_epi32(v, n),
+ _mm_srli_epi32(v, 32 - n));
+}
+
+static forceinline vec
+vec_rotate_right(vec v, unsigned int n)
+{
+ return (vec_rotate_left(v, 32 - n));
+}
+
+#define vec_shuffle_right(x, n) \
+ _mm_shuffle_epi32(x, _MM_SHUFFLE((3 + (n)) % 4, (2 + (n)) % 4, (1 + (n)) % 4, (n) % 4))
+
+#define vec_shuffle_left(x, n) vec_shuffle_right((x), 4 - (n))
+
+static forceinline vec
+vec_load_32x4(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4)
+{
+ return _mm_set_epi32(x4, x3, x2, x1);
+}
+
+static forceinline vec
+vec_load_32x8(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8)
+{
+ return _mm_set_epi32(x4, x3, x2, x1);
+}
+
+static forceinline vec
+vec_load_le(const unsigned char* in)
+{
+ return _mm_loadu_si128((__m128i*)(in));
+}
+
+static forceinline vec
+vec_load128_le(const unsigned char* in)
+{
+ return vec_load_le(in);
+}
+
+static forceinline void
+vec_store_le(unsigned char* out, vec v)
+{
+ _mm_storeu_si128((__m128i*)(out), v);
+}
+
+static forceinline vec
+vec_add(vec v1, vec v2)
+{
+ return _mm_add_epi32(v1, v2);
+}
+
+static forceinline vec
+vec_add_u32(vec v1, uint32_t x)
+{
+ vec v2 = vec_load_32x4(x, 0, 0, 0);
+ return _mm_add_epi32(v1, v2);
+}
+
+static forceinline vec
+vec_increment(vec v1)
+{
+ vec one = vec_load_32x4(1, 0, 0, 0);
+ return _mm_add_epi32(v1, one);
+}
+
+static forceinline vec
+vec_xor(vec v1, vec v2)
+{
+ return _mm_xor_si128(v1, v2);
+}
+
+#define vec_zero() _mm_set_epi32(0, 0, 0, 0)
+
+#elif defined(__ARM_NEON__) || defined(__ARM_NEON)
+#include <arm_neon.h>
+
+typedef uint32x4_t vec;
+
+static forceinline vec
+vec_xor(vec v1, vec v2)
+{
+ return veorq_u32(v1, v2);
+}
+
+#define vec_rotate_left(x, n) \
+ vsriq_n_u32(vshlq_n_u32((x), (n)), (x), 32 - (n))
+
+#define vec_rotate_right(a, b) \
+ vec_rotate_left((b), 32 - (b))
+
+#define vec_shuffle_right(x, n) \
+ vextq_u32((x), (x), (n))
+
+#define vec_shuffle_left(a, b) \
+ vec_shuffle_right((a), 4 - (b))
+
+static forceinline vec
+vec_load_32x4(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4)
+{
+ uint32_t a[4] = { x1, x2, x3, x4 };
+ return vld1q_u32(a);
+}
+
+static forceinline vec
+vec_load_32(uint32_t x1)
+{
+ uint32_t a[4] = { x1, x1, x1, x1 };
+ return vld1q_u32(a);
+}
+
+static forceinline vec
+vec_load_32x8(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8)
+{
+ return vec_load_32x4(x1, x2, x3, x4);
+}
+
+static forceinline vec
+vec_load_le(const unsigned char* in)
+{
+ return vld1q_u32((uint32_t*)in);
+}
+
+static forceinline vec
+vec_load128_le(const unsigned char* in)
+{
+ return vec_load_le(in);
+}
+
+static forceinline void
+vec_store_le(unsigned char* out, vec v)
+{
+ vst1q_u32((uint32_t*)out, v);
+}
+
+static forceinline vec
+vec_add(vec v1, vec v2)
+{
+ return vaddq_u32(v1, v2);
+}
+
+static forceinline vec
+vec_add_u32(vec v1, uint32_t x)
+{
+ vec v2 = vec_load_32x4(x, 0, 0, 0);
+ return vec_add(v1, v2);
+}
+
+static forceinline vec
+vec_increment(vec v1)
+{
+ vec one = vec_load_32x4(1, 0, 0, 0);
+ return vec_add(v1, one);
+}
+
+#define vec_zero() vec_load_32x4(0, 0, 0, 0)
+
+#else
+
+#define VEC128
+#define vec_size 4
+
+typedef struct {
+ uint32_t v[4];
+} vec;
+
+static forceinline vec
+vec_xor(vec v1, vec v2)
+{
+ vec r;
+ r.v[0] = v1.v[0] ^ v2.v[0];
+ r.v[1] = v1.v[1] ^ v2.v[1];
+ r.v[2] = v1.v[2] ^ v2.v[2];
+ r.v[3] = v1.v[3] ^ v2.v[3];
+ return r;
+}
+
+static forceinline vec
+vec_rotate_left(vec v, unsigned int n)
+{
+ vec r;
+ r.v[0] = (v.v[0] << n) ^ (v.v[0] >> (32 - n));
+ r.v[1] = (v.v[1] << n) ^ (v.v[1] >> (32 - n));
+ r.v[2] = (v.v[2] << n) ^ (v.v[2] >> (32 - n));
+ r.v[3] = (v.v[3] << n) ^ (v.v[3] >> (32 - n));
+ return r;
+}
+
+static forceinline vec
+vec_rotate_right(vec v, unsigned int n)
+{
+ return (vec_rotate_left(v, 32 - n));
+}
+
+static forceinline vec
+vec_shuffle_right(vec v, unsigned int n)
+{
+ vec r;
+ r.v[0] = v.v[n % 4];
+ r.v[1] = v.v[(n + 1) % 4];
+ r.v[2] = v.v[(n + 2) % 4];
+ r.v[3] = v.v[(n + 3) % 4];
+ return r;
+}
+
+static forceinline vec
+vec_shuffle_left(vec x, unsigned int n)
+{
+ return vec_shuffle_right(x, 4 - n);
+}
+
+static forceinline vec
+vec_load_32x4(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3)
+{
+ vec v;
+ v.v[0] = x0;
+ v.v[1] = x1;
+ v.v[2] = x2;
+ v.v[3] = x3;
+ return v;
+}
+
+static forceinline vec
+vec_load_32(uint32_t x0)
+{
+ vec v;
+ v.v[0] = x0;
+ v.v[1] = x0;
+ v.v[2] = x0;
+ v.v[3] = x0;
+ return v;
+}
+
+static forceinline vec
+vec_load_le(const uint8_t* in)
+{
+ vec r;
+ r.v[0] = load32_le((uint8_t*)in);
+ r.v[1] = load32_le((uint8_t*)in + 4);
+ r.v[2] = load32_le((uint8_t*)in + 8);
+ r.v[3] = load32_le((uint8_t*)in + 12);
+ return r;
+}
+
+static forceinline void
+vec_store_le(unsigned char* out, vec r)
+{
+ store32_le(out, r.v[0]);
+ store32_le(out + 4, r.v[1]);
+ store32_le(out + 8, r.v[2]);
+ store32_le(out + 12, r.v[3]);
+}
+
+static forceinline vec
+vec_load128_le(const unsigned char* in)
+{
+ return vec_load_le(in);
+}
+
+static forceinline vec
+vec_add(vec v1, vec v2)
+{
+ vec r;
+ r.v[0] = v1.v[0] + v2.v[0];
+ r.v[1] = v1.v[1] + v2.v[1];
+ r.v[2] = v1.v[2] + v2.v[2];
+ r.v[3] = v1.v[3] + v2.v[3];
+ return r;
+}
+
+static forceinline vec
+vec_add_u32(vec v1, uint32_t x)
+{
+ vec v2 = vec_load_32x4(x, 0, 0, 0);
+ return vec_add(v1, v2);
+}
+
+static forceinline vec
+vec_increment(vec v1)
+{
+ vec one = vec_load_32x4(1, 0, 0, 0);
+ return vec_add(v1, one);
+}
+
+#define vec_zero() vec_load_32x4(0, 0, 0, 0)
+
+#endif
+
+#endif