diff --git a/.github/workflows/crypto-test-harness.yml b/.github/workflows/crypto-test-harness.yml new file mode 100644 index 0000000000000..67fc1a643eec1 --- /dev/null +++ b/.github/workflows/crypto-test-harness.yml @@ -0,0 +1,44 @@ +--- +name: Linux + +on: + pull_request: + workflow_dispatch: + +jobs: + tcrypt: + runs-on: ubuntu-22.04 + steps: + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y flex bison bc cpio busybox-static kmod + - name: Checkout + uses: actions/checkout@v3 + - name: Build kernel & Modules + run: | + cp zeta/test-artifacts/config-um .config + make olddefconfig ARCH=um + make -j$(nproc) ARCH=um all + mkdir initramfs + make modules_install INSTALL_MOD_PATH=./initramfs ARCH=um + - name: Prepare initramfs + run: | + mkdir initramfs/bin initramfs/proc + cp -p /bin/busybox initramfs/bin/busybox + cp -p zeta/test-artifacts/init initramfs/init + cp -p zeta/test-artifacts/test-script.sh initramfs/test-script.sh + cd initramfs + find . -print0 | cpio --null --create --verbose --format=newc | gzip --best > ../uml-initramfs.cpio.gz + cd .. + - name: Run user-mode linux test harness + continue-on-error: true + run: | + sudo ./linux initrd=uml-initramfs.cpio.gz > tcrypt.out 2>&1 + cat tcrypt.out + - name: Collect artifacts + uses: actions/upload-artifact@v3 + if: success() || failure() + with: + name: tcrypt_job + path: ./tcrypt.out diff --git a/Makefile b/Makefile index beddccac32831..b0b41f51283d2 100644 --- a/Makefile +++ b/Makefile @@ -857,9 +857,9 @@ ifdef CONFIG_READABLE_ASM KBUILD_CFLAGS += -fno-reorder-blocks -fno-ipa-cp-clone -fno-partial-inlining endif -ifneq ($(CONFIG_FRAME_WARN),0) -KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN) -endif +#ifneq ($(CONFIG_FRAME_WARN),0) +#KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN) +#endif stackp-flags-y := -fno-stack-protector stackp-flags-$(CONFIG_STACKPROTECTOR) := -fstack-protector diff --git a/crypto/Kconfig b/crypto/Kconfig index 650b1b3620d81..3c4177b7a4d85 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -254,6 +254,25 @@ config CRYPTO_RSA help RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017) +config CRYPTO_RSA_GENERIC + tristate "RSA (Rivest-Shamir-Adleman)" + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + select MPILIB + select ASN1 + help + RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017) + +config CRYPTO_RSA_HACL + tristate "RSA (Rivest-Shamir-Adleman)" + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + select MPILIB + select ASN1 + help + RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017) from HACL* + + config CRYPTO_DH tristate "DH (Diffie-Hellman)" select CRYPTO_KPP @@ -290,13 +309,13 @@ config CRYPTO_ECDSA tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)" select CRYPTO_ECC select CRYPTO_AKCIPHER + select CRYPTO_DRBG_HMAC select ASN1 help ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186, ISO/IEC 14888-3) using curves P-192, P-256, and P-384 - - Only signature verification is implemented. + P-256 uses formally a verified implementation from HACL* config CRYPTO_ECRDSA tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)" @@ -1026,6 +1045,12 @@ config CRYPTO_SHA256 This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP). Used by the btrfs filesystem, Ceph, NFS, and SMB. +config CRYPTO_SHA2_HACL + tristate "SHA-224 and SHA-256 and SHA-384 and SHA-512" + select CRYPTO_HASH + help + SHA-2 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) from HACL* + config CRYPTO_SHA512 tristate "SHA-384 and SHA-512" select CRYPTO_HASH @@ -1038,6 +1063,12 @@ config CRYPTO_SHA3 help SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) +config CRYPTO_SHA3_HACL + tristate "SHA-3" + select CRYPTO_HASH + help + SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) from HACL* + config CRYPTO_SM3 tristate diff --git a/crypto/Makefile b/crypto/Makefile index 953a7e105e58c..eebf81dceb1bc 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -38,10 +38,19 @@ $(obj)/rsa_helper.o: $(obj)/rsapubkey.asn1.h $(obj)/rsaprivkey.asn1.h rsa_generic-y := rsapubkey.asn1.o rsa_generic-y += rsaprivkey.asn1.o -rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o +rsa_generic-y += rsa.o rsa_generic-y += rsa-pkcs1pad.o -obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o +obj-$(CONFIG_CRYPTO_RSA_GENERIC) += rsa_generic.o + +rsa_hacl-y := rsapubkey.asn1.o +rsa_hacl-y += rsaprivkey.asn1.o +rsa_hacl-y += hacl_bignum.o +rsa_hacl-y += rsa-hacl-generated.o +rsa_hacl-y += rsa_helper.o +rsa_hacl-y += rsa-hacl.o +rsa_hacl-y += rsa-pkcs1pad.o +obj-$(CONFIG_CRYPTO_RSA_HACL) += rsa_hacl.o $(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h $(obj)/sm2.o: $(obj)/sm2signature.asn1.h @@ -51,9 +60,12 @@ sm2_generic-y += sm2.o obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o +$(obj)/ecprivkey.asn1.o: $(obj)/ecprivkey.asn1.c $(obj)/ecprivkey.asn1.h $(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h -$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h +$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h $(obj)/ecprivkey.asn1.h +ecdsa_generic-y += p256-hacl-generated.o ecdsa_generic-y += ecdsa.o +ecdsa_generic-y += ecprivkey.asn1.o ecdsa_generic-y += ecdsasignature.asn1.o obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o @@ -77,8 +89,10 @@ obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o +obj-$(CONFIG_CRYPTO_SHA2_HACL) += sha2-hacl-generated.o sha2-hacl.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o +obj-$(CONFIG_CRYPTO_SHA3_HACL) += sha3-hacl-generated.o sha3-hacl.o obj-$(CONFIG_CRYPTO_SM3) += sm3.o obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1 index 702c41a3c7137..1791ddf4168a8 100644 --- a/crypto/asymmetric_keys/pkcs8.asn1 +++ b/crypto/asymmetric_keys/pkcs8.asn1 @@ -20,5 +20,5 @@ Attribute ::= ANY AlgorithmIdentifier ::= SEQUENCE { algorithm OBJECT IDENTIFIER ({ pkcs8_note_OID }), - parameters ANY OPTIONAL + parameters ANY OPTIONAL ({ pkcs8_note_algo_parameter }) } diff --git a/crypto/asymmetric_keys/pkcs8_parser.c b/crypto/asymmetric_keys/pkcs8_parser.c index 105dcce27f711..d603811d4faa4 100644 --- a/crypto/asymmetric_keys/pkcs8_parser.c +++ b/crypto/asymmetric_keys/pkcs8_parser.c @@ -21,9 +21,10 @@ struct pkcs8_parse_context { struct public_key *pub; unsigned long data; /* Start of data */ enum OID last_oid; /* Last OID encountered */ - enum OID algo_oid; /* Algorithm OID */ u32 key_size; const void *key; + const void *algo_param; + u32 algo_param_len; }; /* @@ -47,6 +48,17 @@ int pkcs8_note_OID(void *context, size_t hdrlen, return 0; } +int pkcs8_note_algo_parameter(void *context, size_t hdrlen, + unsigned char tag, + const void *value, size_t vlen) +{ + struct pkcs8_parse_context *ctx = context; + + ctx->algo_param = value; + ctx->algo_param_len = vlen; + return 0; +} + /* * Note the version number of the ASN.1 blob. */ @@ -70,12 +82,39 @@ int pkcs8_note_algo(void *context, size_t hdrlen, { struct pkcs8_parse_context *ctx = context; - if (ctx->last_oid != OID_rsaEncryption) - return -ENOPKG; - - ctx->pub->pkey_algo = "rsa"; - return 0; -} + enum OID curve_id; + + switch (ctx->last_oid) { + case OID_id_ecPublicKey: + if (!ctx->algo_param || ctx->algo_param_len == 0) + return -EBADMSG; + curve_id = look_up_OID(ctx->algo_param, ctx->algo_param_len); + + switch (curve_id) { + case OID_id_prime192v1: + ctx->pub->pkey_algo = "ecdsa-nist-p192"; + break; + case OID_id_prime256v1: + ctx->pub->pkey_algo = "ecdsa-nist-p256"; + break; + case OID_id_ansip384r1: + ctx->pub->pkey_algo = "ecdsa-nist-p384"; + break; + default: + return -ENOPKG; + } + break; + + case OID_rsaEncryption: + ctx->pub->pkey_algo = "rsa"; + break; + + default: + return -ENOPKG; + } + + return 0; + } /* * Note the key data of the ASN.1 blob. diff --git a/crypto/ecc.c b/crypto/ecc.c index f53fb4d6af992..daccb2c55abb2 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -488,7 +488,7 @@ static void vli_square(u64 *result, const u64 *left, unsigned int ndigits) /* Computes result = (left + right) % mod. * Assumes that left < mod and right < mod, result != mod. */ -static void vli_mod_add(u64 *result, const u64 *left, const u64 *right, +void vli_mod_add(u64 *result, const u64 *left, const u64 *right, const u64 *mod, unsigned int ndigits) { u64 carry; @@ -501,6 +501,7 @@ static void vli_mod_add(u64 *result, const u64 *left, const u64 *right, if (carry || vli_cmp(result, mod, ndigits) >= 0) vli_sub(result, result, mod, ndigits); } +EXPORT_SYMBOL(vli_mod_add); /* Computes result = (left - right) % mod. * Assumes that left < mod and right < mod, result != mod. @@ -963,7 +964,7 @@ void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, EXPORT_SYMBOL(vli_mod_mult_slow); /* Computes result = (left * right) % curve_prime. */ -static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, +void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, const struct ecc_curve *curve) { u64 product[2 * ECC_MAX_DIGITS]; @@ -971,6 +972,7 @@ static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, vli_mult(product, left, right, curve->g.ndigits); vli_mmod_fast(result, product, curve); } +EXPORT_SYMBOL(vli_mod_mult_fast); /* Computes result = left^2 % curve_prime. */ static void vli_mod_square_fast(u64 *result, const u64 *left, @@ -1277,7 +1279,7 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, vli_set(x1, t7, ndigits); } -static void ecc_point_mult(struct ecc_point *result, +void ecc_point_mult(struct ecc_point *result, const struct ecc_point *point, const u64 *scalar, u64 *initial_z, const struct ecc_curve *curve, unsigned int ndigits) @@ -1335,6 +1337,7 @@ static void ecc_point_mult(struct ecc_point *result, vli_set(result->x, rx[0], ndigits); vli_set(result->y, ry[0], ndigits); } +EXPORT_SYMBOL(ecc_point_mult); /* Computes R = P + Q mod p */ static void ecc_point_add(const struct ecc_point *result, diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c index fbd76498aba83..f22c95a09d2e0 100644 --- a/crypto/ecdsa.c +++ b/crypto/ecdsa.c @@ -6,18 +6,29 @@ #include #include #include +#include +#include +#include +#include #include #include #include #include +#include +#include "ecprivkey.asn1.h" #include "ecdsasignature.asn1.h" +#include "hacl_p256.h" + struct ecc_ctx { unsigned int curve_id; const struct ecc_curve *curve; + bool key_set; + bool is_private; bool pub_key_set; + u64 d[ECC_MAX_DIGITS]; /* privkey big integer */ u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */ u64 y[ECC_MAX_DIGITS]; struct ecc_point pub_key; @@ -148,7 +159,7 @@ static int ecdsa_verify(struct akcipher_request *req) ssize_t diff; int ret; - if (unlikely(!ctx->pub_key_set)) + if (unlikely(!ctx->key_set)) return -EINVAL; buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); @@ -175,9 +186,23 @@ static int ecdsa_verify(struct akcipher_request *req) memcpy(&rawhash, buffer + req->src_len, keylen); } - ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); - - ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); + if (strncmp(ctx->curve->name, "nist_256", 8) == 0) { + u8 pk[64]; + u8 r[32]; + u8 s[32]; + ecc_swap_digits(ctx->x, (u64*)pk, 4); + ecc_swap_digits(ctx->y, (u64*)(pk + 32), 4); + ecc_swap_digits(sig_ctx.r, (u64*)r, ctx->curve->g.ndigits); + ecc_swap_digits(sig_ctx.s, (u64*)s, ctx->curve->g.ndigits); + if (Hacl_P256_ecdsa_verif_without_hash(req->dst_len, rawhash, pk, r, s)) { + ret = 0; + } else { + ret = -EKEYREJECTED; + } + } else { + ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); + ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); + } error: kfree(buffer); @@ -185,6 +210,281 @@ static int ecdsa_verify(struct akcipher_request *req) return ret; } +static int _ecdsa_sign(struct ecc_ctx *ctx, const u64 *hash, const u64 *k, + struct ecdsa_signature_ctx *sig_ctx) +{ + unsigned int ndigits = ctx->curve->g.ndigits; + u64 rd_h[ECC_MAX_DIGITS]; + u64 kinv[ECC_MAX_DIGITS]; + /* we can use s as y coordinate here as we're discarding it anyway later */ + struct ecc_point K = ECC_POINT_INIT(sig_ctx->r, sig_ctx->s, ndigits); + + ecc_point_mult(&K, &ctx->curve->g, k, NULL, ctx->curve, ndigits); + + if (vli_cmp(sig_ctx->r, ctx->curve->n, ndigits) >= 0) + vli_sub(sig_ctx->r, sig_ctx->r, ctx->curve->n, ndigits); + + if (vli_is_zero(sig_ctx->r, ndigits)) + return -EAGAIN; + + vli_mod_mult_slow(rd_h, sig_ctx->r, ctx->d, ctx->curve->n, ndigits); + vli_mod_add(rd_h, rd_h, hash, ctx->curve->n, ndigits); + vli_mod_inv(kinv, k, ctx->curve->n, ndigits); + vli_mod_mult_slow(sig_ctx->s, kinv, rd_h, ctx->curve->n, ndigits); + + if (vli_is_zero(sig_ctx->s, ndigits)) + return -EAGAIN; + + memzero_explicit(rd_h, sizeof(rd_h)); + memzero_explicit(kinv, sizeof(kinv)); + return 0; +} + +/* RFC 6979 p. 3.1.1 selects the same hash function that was used to + * process the input message. However, we don't have this information in + * the context and can only guess based on the size of the hash. This is + * OK, because p. 3.6 states that a different function may be used of the + * same (or higher) strength. Therefore, we pick SHA-512 as the default + * case. The only disadvantage would be that the KAT vectors from the RFC + * will not be verifiable. Userspace should not depend on it anyway as any + * higher priority ECDSA crypto drivers may actually not implement + * deterministic signatures + */ +static struct crypto_rng *rfc6979_alloc_rng(struct ecc_ctx *ctx, + size_t hash_size, u8 *rawhash) +{ + u64 seed[2 * ECC_MAX_DIGITS]; + unsigned int ndigits = ctx->curve->g.ndigits; + struct drbg_string entropy, pers = {0}; + struct drbg_test_data seed_data; + const char *alg; + struct crypto_rng *rng; + int err; + + switch (hash_size) { + case SHA1_DIGEST_SIZE: + alg = "drbg_nopr_hmac_sha1"; + break; + case SHA256_DIGEST_SIZE: + alg = "drbg_nopr_hmac_sha256"; + break; + case SHA384_DIGEST_SIZE: + alg = "drbg_nopr_hmac_sha384"; + break; + default: + alg = "drbg_nopr_hmac_sha512"; + } + + rng = crypto_alloc_rng(alg, 0, 0); + if (IS_ERR(rng)) + return rng; + + ecc_swap_digits(ctx->d, seed, ndigits); + memcpy(seed + ndigits, rawhash, ndigits << ECC_DIGITS_TO_BYTES_SHIFT); + drbg_string_fill(&entropy, (u8 *)seed, (ndigits * 2) << ECC_DIGITS_TO_BYTES_SHIFT); + seed_data.testentropy = &entropy; + err = crypto_drbg_reset_test(rng, &pers, &seed_data); + if (err) { + crypto_free_rng(rng); + return ERR_PTR(err); + } + + return rng; +} + +static int rfc6979_gen_k(struct ecc_ctx *ctx, struct crypto_rng *rng, u64 *k) +{ + unsigned int ndigits = ctx->curve->g.ndigits; + u8 K[ECC_MAX_BYTES]; + int ret; + + do { + ret = crypto_rng_get_bytes(rng, K, ndigits << ECC_DIGITS_TO_BYTES_SHIFT); + if (ret) + return ret; + + ecc_swap_digits((u64 *)K, k, ndigits); + } while (vli_cmp(k, ctx->curve->n, ndigits) >= 0); + + memzero_explicit(K, sizeof(K)); + return 0; +} + +static int rfc6979_gen_k_hacl(struct ecc_ctx *ctx, struct crypto_rng *rng, u8 *k) +{ + unsigned int ndigits = ctx->curve->g.ndigits; + int ret; + + do { + ret = crypto_rng_get_bytes(rng, k, ndigits << ECC_DIGITS_TO_BYTES_SHIFT); + if (ret) + return ret; + + } while (!Hacl_P256_validate_private_key(k)); + + return 0; +} + +/* scratch buffer should be at least ECC_MAX_BYTES */ +static int asn1_encode_signature_sg(struct akcipher_request *req, + struct ecdsa_signature_ctx *sig_ctx, + u8 *scratch) +{ + unsigned int ndigits = sig_ctx->curve->g.ndigits; + unsigned int r_bits = vli_num_bits(sig_ctx->r, ndigits); + unsigned int s_bits = vli_num_bits(sig_ctx->s, ndigits); + struct sg_mapping_iter miter; + unsigned int nents; + u8 *buf, *p; + size_t needed = 2; /* tag and len for the top ASN1 sequence */ + + needed += 2; /* tag and len for r as an ASN1 integer */ + needed += BITS_TO_BYTES(r_bits); + if (r_bits % 8 == 0) + /* leftmost bit is set, so need another byte for 0x00 to make the + * integer positive + */ + needed++; + + needed += 2; /* tag and len for s as an ASN1 integer */ + needed += BITS_TO_BYTES(s_bits); + if (s_bits % 8 == 0) + /* leftmost bit is set, so need another byte for 0x00 to make the + * integer positive + */ + needed++; + + if (req->dst_len < needed) { + req->dst_len = needed; + return -EOVERFLOW; + } + + nents = sg_nents_for_len(req->dst, needed); + if (nents == 1) { + sg_miter_start(&miter, req->dst, nents, SG_MITER_ATOMIC | SG_MITER_TO_SG); + sg_miter_next(&miter); + buf = miter.addr; + } else { + buf = kmalloc(needed, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* we will begin from the end */ + ecc_swap_digits(sig_ctx->s, (u64 *)scratch, ndigits); + p = buf + needed - BITS_TO_BYTES(s_bits); + memcpy(p, scratch + + (ndigits << ECC_DIGITS_TO_BYTES_SHIFT) - BITS_TO_BYTES(s_bits), + BITS_TO_BYTES(s_bits)); + if (s_bits % 8 == 0) { + p--; + *p = 0; + } + p -= 2; + p[0] = ASN1_INT; + p[1] = (s_bits % 8 == 0) ? BITS_TO_BYTES(s_bits) + 1 : BITS_TO_BYTES(s_bits); + + ecc_swap_digits(sig_ctx->r, (u64 *)scratch, ndigits); + p -= BITS_TO_BYTES(r_bits); + memcpy(p, scratch + + (ndigits << ECC_DIGITS_TO_BYTES_SHIFT) - BITS_TO_BYTES(r_bits), + BITS_TO_BYTES(r_bits)); + if (r_bits % 8 == 0) { + p--; + *p = 0; + } + p -= 2; + p[0] = ASN1_INT; + p[1] = (r_bits % 8 == 0) ? BITS_TO_BYTES(r_bits) + 1 : BITS_TO_BYTES(r_bits); + + buf[0] = ASN1_CONS_BIT | ASN1_SEQ; + buf[1] = (needed - 2) & 0xff; + + if (nents == 1) + sg_miter_stop(&miter); + else { + sg_copy_from_buffer(req->dst, nents, buf, needed); + kfree(buf); + } + req->dst_len = needed; + + return 0; +} + +static int ecdsa_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + size_t keylen = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + u8 rawhash_k[ECC_MAX_BYTES]; + u64 hash[ECC_MAX_DIGITS]; + struct ecdsa_signature_ctx sig_ctx = { + .curve = ctx->curve, + }; + struct crypto_rng *rng; + ssize_t diff; + int ret; + + /* if the hash is shorter then we will add leading zeros to fit to ndigits */ + diff = keylen - req->src_len; + if (diff >= 0) { + if (diff) + memset(rawhash_k, 0, diff); + sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len), + &rawhash_k[diff], req->src_len); + } else if (diff < 0) { + /* given hash is longer, we take the left-most bytes */ + sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len), + rawhash_k, req->src_len); + } + + rng = rfc6979_alloc_rng(ctx, req->src_len, rawhash_k); + if (IS_ERR(rng)) + return PTR_ERR(rng); + + if (strncmp(ctx->curve->name, "nist_256", 8) == 0) { + u8 private_key[32]; + u8 signature[64]; + u8 nonce[32]; + ecc_swap_digits(ctx->d, (u64*)private_key, 2); + ret = rfc6979_gen_k_hacl(ctx, rng, nonce); + if (ret) { + goto alloc_rng; + } + /* The signing function also checks that the scalars are valid. */ + /* XXX: Is the value blinded already or should this be done here? */ + do { + if (Hacl_P256_ecdsa_sign_p256_without_hash(signature, req->dst_len, + rawhash_k, private_key, nonce)) { + ret = 0; + } else { + ret = -EAGAIN; + } + } while (ret == -EAGAIN); + /* Encode the signature. Note that this could be more efficient when + done directly and not first converting it to u64s. */ + ecc_swap_digits(signature, sig_ctx.r, 2); + ecc_swap_digits(signature + 32, sig_ctx.s, 2); + ret = asn1_encode_signature_sg(req, &sig_ctx, rawhash_k); + } else { + ecc_swap_digits((u64 *)rawhash_k, hash, ctx->curve->g.ndigits); + do { + ret = rfc6979_gen_k(ctx, rng, (u64 *)rawhash_k); + if (ret) + goto alloc_rng; + + ret = _ecdsa_sign(ctx, hash, (u64 *)rawhash_k, &sig_ctx); + } while (ret == -EAGAIN); + memzero_explicit(rawhash_k, sizeof(rawhash_k)); + + ret = asn1_encode_signature_sg(req, &sig_ctx, rawhash_k); + } + +alloc_rng: + crypto_free_rng(rng); + return ret; +} + static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id) { ctx->curve_id = curve_id; @@ -198,7 +498,9 @@ static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id) static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx) { - ctx->pub_key_set = false; + ctx->key_set = false; + if (ctx->is_private) + memzero_explicit(ctx->d, sizeof(ctx->d)); } static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx) @@ -246,11 +548,107 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits); ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); - ctx->pub_key_set = ret == 0; + ctx->key_set = ret == 0; + ctx->is_private = false; return ret; } +int ecc_get_priv_key(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecc_ctx *ctx = context; + size_t dlen = ctx->curve->g.ndigits * sizeof(u64); + ssize_t diff = vlen - dlen; + const char *d = value; + u8 priv[ECC_MAX_BYTES]; + + /* diff = 0: 'value' has exacly the right size + * diff > 0: 'value' has too many bytes; one leading zero is allowed that + * makes the value a positive integer; error on more + * diff < 0: 'value' is missing leading zeros, which we add + */ + if (diff > 0) { + /* skip over leading zeros that make 'value' a positive int */ + if (*d == 0) { + vlen -= 1; + diff--; + d++; + } + if (diff) + return -EINVAL; + } + if (-diff >= dlen) + return -EINVAL; + + if (diff) { + /* leading zeros not given in 'value' */ + memset(priv, 0, -diff); + } + + memcpy(&priv[-diff], d, vlen); + + ecc_swap_digits((u64 *)priv, ctx->d, ctx->curve->g.ndigits); + memzero_explicit(priv, sizeof(priv)); + return ecc_is_key_valid(ctx->curve_id, ctx->curve->g.ndigits, ctx->d, dlen); +} + +int ecc_get_priv_params(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecc_ctx *ctx = context; + + switch (look_up_OID(value, vlen)) { + case OID_id_prime192v1: + return (ctx->curve_id == ECC_CURVE_NIST_P192) ? 0 : -EINVAL; + case OID_id_prime256v1: + return (ctx->curve_id == ECC_CURVE_NIST_P256) ? 0 : -EINVAL; + case OID_id_ansip384r1: + return (ctx->curve_id == ECC_CURVE_NIST_P384) ? 0 : -EINVAL; + default: + break; + } + + return -EINVAL; +} + +int ecc_get_priv_version(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + if (vlen == 1) { + if (*((u8 *)value) == 1) + return 0; + } + + return -EINVAL; +} + +static int ecdsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = ecdsa_ecc_ctx_reset(ctx); + if (ret < 0) + return ret; + + ret = asn1_ber_decoder(&ecprivkey_decoder, ctx, key, keylen); + if (ret) + return ret; + + ecc_point_mult(&ctx->pub_key, &ctx->curve->g, ctx->d, NULL, ctx->curve, + ctx->curve->g.ndigits); + ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); + if (ret) + return ret; + + ctx->key_set = ret == 0; + ctx->is_private = true; + + return ret; + } + static void ecdsa_exit_tfm(struct crypto_akcipher *tfm) { struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); @@ -262,7 +660,22 @@ static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm) { struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); - return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + if (!ctx->key_set) + return 0; + + if (ctx->is_private) { + /* see ecdsasignature.asn1 + * for a max 384 bit curve we would only need 1 byte length + * ASN1 encoding for the top level sequence and r,s integers + * 1 byte sequence tag + 1 byte sequence length (max 102 for 384 + * bit curve) + 2 (for r and s) * (1 byte integer tag + 1 byte + * integer length (max 49 for 384 bit curve) + 1 zero byte (if r + * or s has leftmost bit set) + sizeof(r or s) + */ + return 2 + 2 * (3 + (ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT)); + } + + return ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; } static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) @@ -273,7 +686,9 @@ static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) } static struct akcipher_alg ecdsa_nist_p384 = { + .sign = ecdsa_sign, .verify = ecdsa_verify, + .set_priv_key = ecdsa_set_priv_key, .set_pub_key = ecdsa_set_pub_key, .max_size = ecdsa_max_size, .init = ecdsa_nist_p384_init_tfm, @@ -295,7 +710,9 @@ static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm) } static struct akcipher_alg ecdsa_nist_p256 = { + .sign = ecdsa_sign, .verify = ecdsa_verify, + .set_priv_key = ecdsa_set_priv_key, .set_pub_key = ecdsa_set_pub_key, .max_size = ecdsa_max_size, .init = ecdsa_nist_p256_init_tfm, @@ -317,7 +734,9 @@ static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm) } static struct akcipher_alg ecdsa_nist_p192 = { + .sign = ecdsa_sign, .verify = ecdsa_verify, + .set_priv_key = ecdsa_set_priv_key, .set_pub_key = ecdsa_set_pub_key, .max_size = ecdsa_max_size, .init = ecdsa_nist_p192_init_tfm, diff --git a/crypto/ecprivkey.asn1 b/crypto/ecprivkey.asn1 new file mode 100644 index 0000000000000..92e7d7d0703ce --- /dev/null +++ b/crypto/ecprivkey.asn1 @@ -0,0 +1,6 @@ +ECPrivateKey ::= SEQUENCE { + version INTEGER ({ ecc_get_priv_version }), + privateKey OCTET STRING ({ ecc_get_priv_key }), + parameters [0] OBJECT IDENTIFIER OPTIONAL ({ ecc_get_priv_params }), + publicKey [1] BIT STRING OPTIONAL +} diff --git a/crypto/hacl_bignum.c b/crypto/hacl_bignum.c new file mode 100644 index 0000000000000..7d3b0d8ed6b88 --- /dev/null +++ b/crypto/hacl_bignum.c @@ -0,0 +1,2141 @@ +/* GPLv2 or MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + */ + +#include "hacl_bignum.h" + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( + uint32_t aLen, + uint32_t *a, + uint32_t *b, + uint32_t *tmp, + uint32_t *res +) +{ + if (aLen < 32U || aLen % 2U == 1U) + { + Hacl_Bignum_Multiplication_bn_mul_u32(aLen, a, aLen, b, res); + return; + } + uint32_t len2 = aLen / 2U; + uint32_t *a0 = a; + uint32_t *a1 = a + len2; + uint32_t *b0 = b; + uint32_t *b1 = b + len2; + uint32_t *t0 = tmp; + uint32_t *t1 = tmp + len2; + uint32_t *tmp_ = tmp + aLen; + uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_); + uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0); + for (uint32_t i = 0U; i < len2; i++) + { + uint32_t *os = t0; + uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]); + os[i] = x; + } + KRML_MAYBE_UNUSED_VAR(c10); + uint32_t c00 = c0; + uint32_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b0, b1, tmp_); + uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1); + for (uint32_t i = 0U; i < len2; i++) + { + uint32_t *os = t1; + uint32_t x = ((0U - c010) & t1[i]) | (~(0U - c010) & tmp_[i]); + os[i] = x; + } + KRML_MAYBE_UNUSED_VAR(c1); + uint32_t c11 = c010; + uint32_t *t23 = tmp + aLen; + uint32_t *tmp1 = tmp + aLen + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, t0, t1, tmp1, t23); + uint32_t *r01 = res; + uint32_t *r23 = res + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, a0, b0, tmp1, r01); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, a1, b1, tmp1, r23); + uint32_t *r011 = res; + uint32_t *r231 = res + aLen; + uint32_t *t01 = tmp; + uint32_t *t231 = tmp + aLen; + uint32_t *t45 = tmp + 2U * aLen; + uint32_t *t67 = tmp + 3U * aLen; + uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01); + uint32_t c_sign = c00 ^ c11; + uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t67); + uint32_t c31 = c2 - c3; + uint32_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, t01, t231, t45); + uint32_t c41 = c2 + c4; + uint32_t mask = 0U - c_sign; + for (uint32_t i = 0U; i < aLen; i++) + { + uint32_t *os = t45; + uint32_t x = (mask & t45[i]) | (~mask & t67[i]); + os[i] = x; + } + uint32_t c5 = (mask & c41) | (~mask & c31); + uint32_t aLen2 = aLen / 2U; + uint32_t *r0 = res + aLen2; + uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0); + uint32_t c6 = r10; + uint32_t c60 = c6; + uint32_t c7 = c5 + c60; + uint32_t *r = res + aLen + aLen2; + uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r); + uint32_t r1; + if (1U < aLen + aLen - (aLen + aLen2)) + { + uint32_t *a11 = r + 1U; + uint32_t *res1 = r + 1U; + uint32_t c = c01; + for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) + { + uint32_t t11 = a11[4U * i]; + uint32_t *res_i0 = res1 + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i0); + uint32_t t110 = a11[4U * i + 1U]; + uint32_t *res_i1 = res1 + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, 0U, res_i1); + uint32_t t111 = a11[4U * i + 2U]; + uint32_t *res_i2 = res1 + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, 0U, res_i2); + uint32_t t112 = a11[4U * i + 3U]; + uint32_t *res_i = res1 + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, 0U, res_i); + } + for + (uint32_t + i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U; + i + < aLen + aLen - (aLen + aLen2) - 1U; + i++) + { + uint32_t t11 = a11[i]; + uint32_t *res_i = res1 + i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i); + } + uint32_t c110 = c; + r1 = c110; + } + else + { + r1 = c01; + } + uint32_t c8 = r1; + uint32_t c = c8; + uint32_t c9 = c; + KRML_MAYBE_UNUSED_VAR(c9); +} + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( + uint32_t aLen, + uint64_t *a, + uint64_t *b, + uint64_t *tmp, + uint64_t *res +) +{ + if (aLen < 32U || aLen % 2U == 1U) + { + Hacl_Bignum_Multiplication_bn_mul_u64(aLen, a, aLen, b, res); + return; + } + uint32_t len2 = aLen / 2U; + uint64_t *a0 = a; + uint64_t *a1 = a + len2; + uint64_t *b0 = b; + uint64_t *b1 = b + len2; + uint64_t *t0 = tmp; + uint64_t *t1 = tmp + len2; + uint64_t *tmp_ = tmp + aLen; + uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_); + uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0); + for (uint32_t i = 0U; i < len2; i++) + { + uint64_t *os = t0; + uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]); + os[i] = x; + } + KRML_MAYBE_UNUSED_VAR(c10); + uint64_t c00 = c0; + uint64_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b0, b1, tmp_); + uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1); + for (uint32_t i = 0U; i < len2; i++) + { + uint64_t *os = t1; + uint64_t x = ((0ULL - c010) & t1[i]) | (~(0ULL - c010) & tmp_[i]); + os[i] = x; + } + KRML_MAYBE_UNUSED_VAR(c1); + uint64_t c11 = c010; + uint64_t *t23 = tmp + aLen; + uint64_t *tmp1 = tmp + aLen + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, t0, t1, tmp1, t23); + uint64_t *r01 = res; + uint64_t *r23 = res + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, a0, b0, tmp1, r01); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, a1, b1, tmp1, r23); + uint64_t *r011 = res; + uint64_t *r231 = res + aLen; + uint64_t *t01 = tmp; + uint64_t *t231 = tmp + aLen; + uint64_t *t45 = tmp + 2U * aLen; + uint64_t *t67 = tmp + 3U * aLen; + uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01); + uint64_t c_sign = c00 ^ c11; + uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t67); + uint64_t c31 = c2 - c3; + uint64_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, t01, t231, t45); + uint64_t c41 = c2 + c4; + uint64_t mask = 0ULL - c_sign; + for (uint32_t i = 0U; i < aLen; i++) + { + uint64_t *os = t45; + uint64_t x = (mask & t45[i]) | (~mask & t67[i]); + os[i] = x; + } + uint64_t c5 = (mask & c41) | (~mask & c31); + uint32_t aLen2 = aLen / 2U; + uint64_t *r0 = res + aLen2; + uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0); + uint64_t c6 = r10; + uint64_t c60 = c6; + uint64_t c7 = c5 + c60; + uint64_t *r = res + aLen + aLen2; + uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r); + uint64_t r1; + if (1U < aLen + aLen - (aLen + aLen2)) + { + uint64_t *a11 = r + 1U; + uint64_t *res1 = r + 1U; + uint64_t c = c01; + for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) + { + uint64_t t11 = a11[4U * i]; + uint64_t *res_i0 = res1 + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i0); + uint64_t t110 = a11[4U * i + 1U]; + uint64_t *res_i1 = res1 + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, 0ULL, res_i1); + uint64_t t111 = a11[4U * i + 2U]; + uint64_t *res_i2 = res1 + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, 0ULL, res_i2); + uint64_t t112 = a11[4U * i + 3U]; + uint64_t *res_i = res1 + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, 0ULL, res_i); + } + for + (uint32_t + i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U; + i + < aLen + aLen - (aLen + aLen2) - 1U; + i++) + { + uint64_t t11 = a11[i]; + uint64_t *res_i = res1 + i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i); + } + uint64_t c110 = c; + r1 = c110; + } + else + { + r1 = c01; + } + uint64_t c8 = r1; + uint64_t c = c8; + uint64_t c9 = c; + KRML_MAYBE_UNUSED_VAR(c9); +} + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( + uint32_t aLen, + uint32_t *a, + uint32_t *tmp, + uint32_t *res +) +{ + if (aLen < 32U || aLen % 2U == 1U) + { + Hacl_Bignum_Multiplication_bn_sqr_u32(aLen, a, res); + return; + } + uint32_t len2 = aLen / 2U; + uint32_t *a0 = a; + uint32_t *a1 = a + len2; + uint32_t *t0 = tmp; + uint32_t *tmp_ = tmp + aLen; + uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_); + uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0); + for (uint32_t i = 0U; i < len2; i++) + { + uint32_t *os = t0; + uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]); + os[i] = x; + } + KRML_MAYBE_UNUSED_VAR(c1); + uint32_t c00 = c0; + KRML_MAYBE_UNUSED_VAR(c00); + uint32_t *t23 = tmp + aLen; + uint32_t *tmp1 = tmp + aLen + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, t0, tmp1, t23); + uint32_t *r01 = res; + uint32_t *r23 = res + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, a0, tmp1, r01); + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, a1, tmp1, r23); + uint32_t *r011 = res; + uint32_t *r231 = res + aLen; + uint32_t *t01 = tmp; + uint32_t *t231 = tmp + aLen; + uint32_t *t45 = tmp + 2U * aLen; + uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01); + uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45); + uint32_t c5 = c2 - c3; + uint32_t aLen2 = aLen / 2U; + uint32_t *r0 = res + aLen2; + uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0); + uint32_t c4 = r10; + uint32_t c6 = c4; + uint32_t c7 = c5 + c6; + uint32_t *r = res + aLen + aLen2; + uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r); + uint32_t r1; + if (1U < aLen + aLen - (aLen + aLen2)) + { + uint32_t *a11 = r + 1U; + uint32_t *res1 = r + 1U; + uint32_t c = c01; + for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) + { + uint32_t t1 = a11[4U * i]; + uint32_t *res_i0 = res1 + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i0); + uint32_t t10 = a11[4U * i + 1U]; + uint32_t *res_i1 = res1 + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, 0U, res_i1); + uint32_t t11 = a11[4U * i + 2U]; + uint32_t *res_i2 = res1 + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i2); + uint32_t t12 = a11[4U * i + 3U]; + uint32_t *res_i = res1 + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, 0U, res_i); + } + for + (uint32_t + i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U; + i + < aLen + aLen - (aLen + aLen2) - 1U; + i++) + { + uint32_t t1 = a11[i]; + uint32_t *res_i = res1 + i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i); + } + uint32_t c10 = c; + r1 = c10; + } + else + { + r1 = c01; + } + uint32_t c8 = r1; + uint32_t c = c8; + uint32_t c9 = c; + KRML_MAYBE_UNUSED_VAR(c9); +} + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( + uint32_t aLen, + uint64_t *a, + uint64_t *tmp, + uint64_t *res +) +{ + if (aLen < 32U || aLen % 2U == 1U) + { + Hacl_Bignum_Multiplication_bn_sqr_u64(aLen, a, res); + return; + } + uint32_t len2 = aLen / 2U; + uint64_t *a0 = a; + uint64_t *a1 = a + len2; + uint64_t *t0 = tmp; + uint64_t *tmp_ = tmp + aLen; + uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_); + uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0); + for (uint32_t i = 0U; i < len2; i++) + { + uint64_t *os = t0; + uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]); + os[i] = x; + } + KRML_MAYBE_UNUSED_VAR(c1); + uint64_t c00 = c0; + KRML_MAYBE_UNUSED_VAR(c00); + uint64_t *t23 = tmp + aLen; + uint64_t *tmp1 = tmp + aLen + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, t0, tmp1, t23); + uint64_t *r01 = res; + uint64_t *r23 = res + aLen; + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, a0, tmp1, r01); + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, a1, tmp1, r23); + uint64_t *r011 = res; + uint64_t *r231 = res + aLen; + uint64_t *t01 = tmp; + uint64_t *t231 = tmp + aLen; + uint64_t *t45 = tmp + 2U * aLen; + uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01); + uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45); + uint64_t c5 = c2 - c3; + uint32_t aLen2 = aLen / 2U; + uint64_t *r0 = res + aLen2; + uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0); + uint64_t c4 = r10; + uint64_t c6 = c4; + uint64_t c7 = c5 + c6; + uint64_t *r = res + aLen + aLen2; + uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r); + uint64_t r1; + if (1U < aLen + aLen - (aLen + aLen2)) + { + uint64_t *a11 = r + 1U; + uint64_t *res1 = r + 1U; + uint64_t c = c01; + for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++) + { + uint64_t t1 = a11[4U * i]; + uint64_t *res_i0 = res1 + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0); + uint64_t t10 = a11[4U * i + 1U]; + uint64_t *res_i1 = res1 + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1); + uint64_t t11 = a11[4U * i + 2U]; + uint64_t *res_i2 = res1 + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2); + uint64_t t12 = a11[4U * i + 3U]; + uint64_t *res_i = res1 + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i); + } + for + (uint32_t + i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U; + i + < aLen + aLen - (aLen + aLen2) - 1U; + i++) + { + uint64_t t1 = a11[i]; + uint64_t *res_i = res1 + i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i); + } + uint64_t c10 = c; + r1 = c10; + } + else + { + r1 = c01; + } + uint64_t c8 = r1; + uint64_t c = c8; + uint64_t c9 = c; + KRML_MAYBE_UNUSED_VAR(c9); +} + +void +Hacl_Bignum_bn_add_mod_n_u32( + uint32_t len1, + uint32_t *n, + uint32_t *a, + uint32_t *b, + uint32_t *res +) +{ + uint32_t c0 = 0U; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint32_t t1 = a[4U * i]; + uint32_t t20 = b[4U * i]; + uint32_t *res_i0 = res + 4U * i; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0); + uint32_t t10 = a[4U * i + 1U]; + uint32_t t21 = b[4U * i + 1U]; + uint32_t *res_i1 = res + 4U * i + 1U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1); + uint32_t t11 = a[4U * i + 2U]; + uint32_t t22 = b[4U * i + 2U]; + uint32_t *res_i2 = res + 4U * i + 2U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2); + uint32_t t12 = a[4U * i + 3U]; + uint32_t t2 = b[4U * i + 3U]; + uint32_t *res_i = res + 4U * i + 3U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint32_t t1 = a[i]; + uint32_t t2 = b[i]; + uint32_t *res_i = res + i; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t2, res_i); + } + uint32_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint32_t), len1); + uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t)); + memset(tmp, 0U, len1 * sizeof (uint32_t)); + uint32_t c = 0U; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint32_t t1 = res[4U * i]; + uint32_t t20 = n[4U * i]; + uint32_t *res_i0 = tmp + 4U * i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0); + uint32_t t10 = res[4U * i + 1U]; + uint32_t t21 = n[4U * i + 1U]; + uint32_t *res_i1 = tmp + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1); + uint32_t t11 = res[4U * i + 2U]; + uint32_t t22 = n[4U * i + 2U]; + uint32_t *res_i2 = tmp + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2); + uint32_t t12 = res[4U * i + 3U]; + uint32_t t2 = n[4U * i + 3U]; + uint32_t *res_i = tmp + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint32_t t1 = res[i]; + uint32_t t2 = n[i]; + uint32_t *res_i = tmp + i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t2, res_i); + } + uint32_t c1 = c; + uint32_t c2 = c00 - c1; + for (uint32_t i = 0U; i < len1; i++) + { + uint32_t *os = res; + uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x; + } +} + +void +Hacl_Bignum_bn_add_mod_n_u64( + uint32_t len1, + uint64_t *n, + uint64_t *a, + uint64_t *b, + uint64_t *res +) +{ + uint64_t c0 = 0ULL; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint64_t t1 = a[4U * i]; + uint64_t t20 = b[4U * i]; + uint64_t *res_i0 = res + 4U * i; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0); + uint64_t t10 = a[4U * i + 1U]; + uint64_t t21 = b[4U * i + 1U]; + uint64_t *res_i1 = res + 4U * i + 1U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1); + uint64_t t11 = a[4U * i + 2U]; + uint64_t t22 = b[4U * i + 2U]; + uint64_t *res_i2 = res + 4U * i + 2U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2); + uint64_t t12 = a[4U * i + 3U]; + uint64_t t2 = b[4U * i + 3U]; + uint64_t *res_i = res + 4U * i + 3U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint64_t t1 = a[i]; + uint64_t t2 = b[i]; + uint64_t *res_i = res + i; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t2, res_i); + } + uint64_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint64_t), len1); + uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t)); + memset(tmp, 0U, len1 * sizeof (uint64_t)); + uint64_t c = 0ULL; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint64_t t1 = res[4U * i]; + uint64_t t20 = n[4U * i]; + uint64_t *res_i0 = tmp + 4U * i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); + uint64_t t10 = res[4U * i + 1U]; + uint64_t t21 = n[4U * i + 1U]; + uint64_t *res_i1 = tmp + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); + uint64_t t11 = res[4U * i + 2U]; + uint64_t t22 = n[4U * i + 2U]; + uint64_t *res_i2 = tmp + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); + uint64_t t12 = res[4U * i + 3U]; + uint64_t t2 = n[4U * i + 3U]; + uint64_t *res_i = tmp + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint64_t t1 = res[i]; + uint64_t t2 = n[i]; + uint64_t *res_i = tmp + i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t2, res_i); + } + uint64_t c1 = c; + uint64_t c2 = c00 - c1; + for (uint32_t i = 0U; i < len1; i++) + { + uint64_t *os = res; + uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x; + } +} + +void +Hacl_Bignum_bn_sub_mod_n_u32( + uint32_t len1, + uint32_t *n, + uint32_t *a, + uint32_t *b, + uint32_t *res +) +{ + uint32_t c0 = 0U; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint32_t t1 = a[4U * i]; + uint32_t t20 = b[4U * i]; + uint32_t *res_i0 = res + 4U * i; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0); + uint32_t t10 = a[4U * i + 1U]; + uint32_t t21 = b[4U * i + 1U]; + uint32_t *res_i1 = res + 4U * i + 1U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1); + uint32_t t11 = a[4U * i + 2U]; + uint32_t t22 = b[4U * i + 2U]; + uint32_t *res_i2 = res + 4U * i + 2U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2); + uint32_t t12 = a[4U * i + 3U]; + uint32_t t2 = b[4U * i + 3U]; + uint32_t *res_i = res + 4U * i + 3U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint32_t t1 = a[i]; + uint32_t t2 = b[i]; + uint32_t *res_i = res + i; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t2, res_i); + } + uint32_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint32_t), len1); + uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t)); + memset(tmp, 0U, len1 * sizeof (uint32_t)); + uint32_t c = 0U; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint32_t t1 = res[4U * i]; + uint32_t t20 = n[4U * i]; + uint32_t *res_i0 = tmp + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0); + uint32_t t10 = res[4U * i + 1U]; + uint32_t t21 = n[4U * i + 1U]; + uint32_t *res_i1 = tmp + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1); + uint32_t t11 = res[4U * i + 2U]; + uint32_t t22 = n[4U * i + 2U]; + uint32_t *res_i2 = tmp + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2); + uint32_t t12 = res[4U * i + 3U]; + uint32_t t2 = n[4U * i + 3U]; + uint32_t *res_i = tmp + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint32_t t1 = res[i]; + uint32_t t2 = n[i]; + uint32_t *res_i = tmp + i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i); + } + uint32_t c1 = c; + KRML_MAYBE_UNUSED_VAR(c1); + uint32_t c2 = 0U - c00; + for (uint32_t i = 0U; i < len1; i++) + { + uint32_t *os = res; + uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]); + os[i] = x; + } +} + +void +Hacl_Bignum_bn_sub_mod_n_u64( + uint32_t len1, + uint64_t *n, + uint64_t *a, + uint64_t *b, + uint64_t *res +) +{ + uint64_t c0 = 0ULL; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint64_t t1 = a[4U * i]; + uint64_t t20 = b[4U * i]; + uint64_t *res_i0 = res + 4U * i; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0); + uint64_t t10 = a[4U * i + 1U]; + uint64_t t21 = b[4U * i + 1U]; + uint64_t *res_i1 = res + 4U * i + 1U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1); + uint64_t t11 = a[4U * i + 2U]; + uint64_t t22 = b[4U * i + 2U]; + uint64_t *res_i2 = res + 4U * i + 2U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2); + uint64_t t12 = a[4U * i + 3U]; + uint64_t t2 = b[4U * i + 3U]; + uint64_t *res_i = res + 4U * i + 3U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint64_t t1 = a[i]; + uint64_t t2 = b[i]; + uint64_t *res_i = res + i; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t2, res_i); + } + uint64_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint64_t), len1); + uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t)); + memset(tmp, 0U, len1 * sizeof (uint64_t)); + uint64_t c = 0ULL; + for (uint32_t i = 0U; i < len1 / 4U; i++) + { + uint64_t t1 = res[4U * i]; + uint64_t t20 = n[4U * i]; + uint64_t *res_i0 = tmp + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); + uint64_t t10 = res[4U * i + 1U]; + uint64_t t21 = n[4U * i + 1U]; + uint64_t *res_i1 = tmp + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); + uint64_t t11 = res[4U * i + 2U]; + uint64_t t22 = n[4U * i + 2U]; + uint64_t *res_i2 = tmp + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); + uint64_t t12 = res[4U * i + 3U]; + uint64_t t2 = n[4U * i + 3U]; + uint64_t *res_i = tmp + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); + } + for (uint32_t i = len1 / 4U * 4U; i < len1; i++) + { + uint64_t t1 = res[i]; + uint64_t t2 = n[i]; + uint64_t *res_i = tmp + i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i); + } + uint64_t c1 = c; + KRML_MAYBE_UNUSED_VAR(c1); + uint64_t c2 = 0ULL - c00; + for (uint32_t i = 0U; i < len1; i++) + { + uint64_t *os = res; + uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]); + os[i] = x; + } +} + +uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0) +{ + uint32_t alpha = 2147483648U; + uint32_t beta = n0; + uint32_t ub = 0U; + uint32_t vb = 0U; + ub = 1U; + vb = 0U; + for (uint32_t i = 0U; i < 32U; i++) + { + uint32_t us = ub; + uint32_t vs = vb; + uint32_t u_is_odd = 0U - (us & 1U); + uint32_t beta_if_u_is_odd = beta & u_is_odd; + ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd); + uint32_t alpha_if_u_is_odd = alpha & u_is_odd; + vb = (vs >> 1U) + alpha_if_u_is_odd; + } + return vb; +} + +uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0) +{ + uint64_t alpha = 9223372036854775808ULL; + uint64_t beta = n0; + uint64_t ub = 0ULL; + uint64_t vb = 0ULL; + ub = 1ULL; + vb = 0ULL; + for (uint32_t i = 0U; i < 64U; i++) + { + uint64_t us = ub; + uint64_t vs = vb; + uint64_t u_is_odd = 0ULL - (us & 1ULL); + uint64_t beta_if_u_is_odd = beta & u_is_odd; + ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd); + uint64_t alpha_if_u_is_odd = alpha & u_is_odd; + vb = (vs >> 1U) + alpha_if_u_is_odd; + } + return vb; +} + +uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(one, 0U, len * sizeof (uint32_t)); + memset(one, 0U, len * sizeof (uint32_t)); + one[0U] = 1U; + uint32_t bit0 = n[0U] & 1U; + uint32_t m0 = 0U - bit0; + uint32_t acc = 0U; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); + uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + } + uint32_t m1 = acc; + return m0 & m1; +} + +void +Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32( + uint32_t len, + uint32_t nBits, + uint32_t *n, + uint32_t *res +) +{ + memset(res, 0U, len * sizeof (uint32_t)); + uint32_t i = nBits / 32U; + uint32_t j = nBits % 32U; + res[i] = res[i] | 1U << j; + for (uint32_t i0 = 0U; i0 < 64U * len - nBits; i0++) + { + Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res); + } +} + +static void +bn_mont_reduction_u32(uint32_t len, uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res) +{ + uint32_t c0 = 0U; + for (uint32_t i0 = 0U; i0 < len; i0++) + { + uint32_t qj = nInv * c[i0]; + uint32_t *res_j0 = c + i0; + uint32_t c1 = 0U; + for (uint32_t i = 0U; i < len / 4U; i++) + { + uint32_t a_i = n[4U * i]; + uint32_t *res_i0 = res_j0 + 4U * i; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); + uint32_t a_i0 = n[4U * i + 1U]; + uint32_t *res_i1 = res_j0 + 4U * i + 1U; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); + uint32_t a_i1 = n[4U * i + 2U]; + uint32_t *res_i2 = res_j0 + 4U * i + 2U; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); + uint32_t a_i2 = n[4U * i + 3U]; + uint32_t *res_i = res_j0 + 4U * i + 3U; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i); + } + for (uint32_t i = len / 4U * 4U; i < len; i++) + { + uint32_t a_i = n[i]; + uint32_t *res_i = res_j0 + i; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i); + } + uint32_t r = c1; + uint32_t c10 = r; + uint32_t *resb = c + len + i0; + uint32_t res_j = c[len + i0]; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb); + } + memcpy(res, c + len, (len + len - len) * sizeof (uint32_t)); + uint32_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(tmp, 0U, len * sizeof (uint32_t)); + uint32_t c1 = 0U; + for (uint32_t i = 0U; i < len / 4U; i++) + { + uint32_t t1 = res[4U * i]; + uint32_t t20 = n[4U * i]; + uint32_t *res_i0 = tmp + 4U * i; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0); + uint32_t t10 = res[4U * i + 1U]; + uint32_t t21 = n[4U * i + 1U]; + uint32_t *res_i1 = tmp + 4U * i + 1U; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1); + uint32_t t11 = res[4U * i + 2U]; + uint32_t t22 = n[4U * i + 2U]; + uint32_t *res_i2 = tmp + 4U * i + 2U; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2); + uint32_t t12 = res[4U * i + 3U]; + uint32_t t2 = n[4U * i + 3U]; + uint32_t *res_i = tmp + 4U * i + 3U; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i); + } + for (uint32_t i = len / 4U * 4U; i < len; i++) + { + uint32_t t1 = res[i]; + uint32_t t2 = n[i]; + uint32_t *res_i = tmp + i; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t2, res_i); + } + uint32_t c10 = c1; + uint32_t c2 = c00 - c10; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t *os = res; + uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x; + } +} + +void +Hacl_Bignum_Montgomery_bn_to_mont_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv, + uint32_t *r2, + uint32_t *a, + uint32_t *aM +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(c, 0U, (len + len) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len); + uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t)); + memset(tmp, 0U, 4U * len * sizeof (uint32_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp, c); + bn_mont_reduction_u32(len, n, nInv, c, aM); +} + +void +Hacl_Bignum_Montgomery_bn_from_mont_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *a +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *tmp = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(tmp, 0U, (len + len) * sizeof (uint32_t)); + memcpy(tmp, aM, len * sizeof (uint32_t)); + bn_mont_reduction_u32(len, n, nInv_u64, tmp, a); +} + +void +Hacl_Bignum_Montgomery_bn_mont_mul_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *bM, + uint32_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(c, 0U, (len + len) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len); + uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t)); + memset(tmp, 0U, 4U * len * sizeof (uint32_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c); + bn_mont_reduction_u32(len, n, nInv_u64, c, resM); +} + +void +Hacl_Bignum_Montgomery_bn_mont_sqr_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(c, 0U, (len + len) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len); + uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t)); + memset(tmp, 0U, 4U * len * sizeof (uint32_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c); + bn_mont_reduction_u32(len, n, nInv_u64, c, resM); +} + +uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(one, 0U, len * sizeof (uint64_t)); + memset(one, 0U, len * sizeof (uint64_t)); + one[0U] = 1ULL; + uint64_t bit0 = n[0U] & 1ULL; + uint64_t m0 = 0ULL - bit0; + uint64_t acc = 0ULL; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t m1 = acc; + return m0 & m1; +} + +void +Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64( + uint32_t len, + uint32_t nBits, + uint64_t *n, + uint64_t *res +) +{ + memset(res, 0U, len * sizeof (uint64_t)); + uint32_t i = nBits / 64U; + uint32_t j = nBits % 64U; + res[i] = res[i] | 1ULL << j; + for (uint32_t i0 = 0U; i0 < 128U * len - nBits; i0++) + { + Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res); + } +} + +static void +bn_mont_reduction_u64(uint32_t len, uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res) +{ + uint64_t c0 = 0ULL; + for (uint32_t i0 = 0U; i0 < len; i0++) + { + uint64_t qj = nInv * c[i0]; + uint64_t *res_j0 = c + i0; + uint64_t c1 = 0ULL; + for (uint32_t i = 0U; i < len / 4U; i++) + { + c1 = bn_mul_add4_u64(n+4*i,qj,res_j0+4*i,c1); + } + for (uint32_t i = len / 4U * 4U; i < len; i++) + { + uint64_t a_i = n[i]; + uint64_t *res_i = res_j0 + i; + c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i); + } + uint64_t r = c1; + uint64_t c10 = r; + uint64_t *resb = c + len + i0; + uint64_t res_j = c[len + i0]; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb); + } + memcpy(res, c + len, (len + len - len) * sizeof (uint64_t)); + uint64_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(tmp, 0U, len * sizeof (uint64_t)); + uint64_t c1 = 0ULL; + for (uint32_t i = 0U; i < len / 4U; i++) + { + uint64_t t1 = res[4U * i]; + uint64_t t20 = n[4U * i]; + uint64_t *res_i0 = tmp + 4U * i; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0); + uint64_t t10 = res[4U * i + 1U]; + uint64_t t21 = n[4U * i + 1U]; + uint64_t *res_i1 = tmp + 4U * i + 1U; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1); + uint64_t t11 = res[4U * i + 2U]; + uint64_t t22 = n[4U * i + 2U]; + uint64_t *res_i2 = tmp + 4U * i + 2U; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2); + uint64_t t12 = res[4U * i + 3U]; + uint64_t t2 = n[4U * i + 3U]; + uint64_t *res_i = tmp + 4U * i + 3U; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i); + } + for (uint32_t i = len / 4U * 4U; i < len; i++) + { + uint64_t t1 = res[i]; + uint64_t t2 = n[i]; + uint64_t *res_i = tmp + i; + c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t2, res_i); + } + uint64_t c10 = c1; + uint64_t c2 = c00 - c10; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t *os = res; + uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x; + } +} + +void +Hacl_Bignum_Montgomery_bn_to_mont_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv, + uint64_t *r2, + uint64_t *a, + uint64_t *aM +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(c, 0U, (len + len) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len); + uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t)); + memset(tmp, 0U, 4U * len * sizeof (uint64_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp, c); + bn_mont_reduction_u64(len, n, nInv, c, aM); +} + +void +Hacl_Bignum_Montgomery_bn_from_mont_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *a +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *tmp = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(tmp, 0U, (len + len) * sizeof (uint64_t)); + memcpy(tmp, aM, len * sizeof (uint64_t)); + bn_mont_reduction_u64(len, n, nInv_u64, tmp, a); +} + +void +Hacl_Bignum_Montgomery_bn_mont_mul_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *bM, + uint64_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(c, 0U, (len + len) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len); + uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t)); + memset(tmp, 0U, 4U * len * sizeof (uint64_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c); + bn_mont_reduction_u64(len, n, nInv_u64, c, resM); +} + +void +Hacl_Bignum_Montgomery_bn_mont_sqr_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(c, 0U, (len + len) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len); + uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t)); + memset(tmp, 0U, 4U * len * sizeof (uint64_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c); + bn_mont_reduction_u64(len, n, nInv_u64, c, resM); +} + +void +Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv, + uint32_t *c, + uint32_t *res +) +{ + uint32_t c0 = 0U; + for (uint32_t i0 = 0U; i0 < len; i0++) + { + uint32_t qj = nInv * c[i0]; + uint32_t *res_j0 = c + i0; + uint32_t c1 = 0U; + for (uint32_t i = 0U; i < len / 4U; i++) + { + uint32_t a_i = n[4U * i]; + uint32_t *res_i0 = res_j0 + 4U * i; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); + uint32_t a_i0 = n[4U * i + 1U]; + uint32_t *res_i1 = res_j0 + 4U * i + 1U; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); + uint32_t a_i1 = n[4U * i + 2U]; + uint32_t *res_i2 = res_j0 + 4U * i + 2U; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); + uint32_t a_i2 = n[4U * i + 3U]; + uint32_t *res_i = res_j0 + 4U * i + 3U; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i); + } + for (uint32_t i = len / 4U * 4U; i < len; i++) + { + uint32_t a_i = n[i]; + uint32_t *res_i = res_j0 + i; + c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i); + } + uint32_t r = c1; + uint32_t c10 = r; + uint32_t *resb = c + len + i0; + uint32_t res_j = c[len + i0]; + c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb); + } + memcpy(res, c + len, (len + len - len) * sizeof (uint32_t)); + uint32_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(tmp, 0U, len * sizeof (uint32_t)); + uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, res, n, tmp); + KRML_MAYBE_UNUSED_VAR(c1); + uint32_t m = 0U - c00; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t *os = res; + uint32_t x = (m & tmp[i]) | (~m & res[i]); + os[i] = x; + } +} + +static void +bn_almost_mont_mul_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *bM, + uint32_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(c, 0U, (len + len) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len); + uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t)); + memset(tmp, 0U, 4U * len * sizeof (uint32_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c); + Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM); +} + +static void +bn_almost_mont_sqr_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(c, 0U, (len + len) * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len); + uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t)); + memset(tmp, 0U, 4U * len * sizeof (uint32_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c); + Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM); +} + +void +Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv, + uint64_t *c, + uint64_t *res +) +{ + uint64_t c0 = 0ULL; + for (uint32_t i0 = 0U; i0 < len; i0++) + { + uint64_t qj = nInv * c[i0]; + uint64_t *res_j0 = c + i0; + uint64_t c1 = 0ULL; + for (uint32_t i = 0U; i < len / 4U; i++) + { + c1 = bn_mul_add4_u64(n+4*i,qj,res_j0+4*i,c1); + } + for (uint32_t i = len / 4U * 4U; i < len; i++) + { + uint64_t a_i = n[i]; + uint64_t *res_i = res_j0 + i; + c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i); + } + uint64_t r = c1; + uint64_t c10 = r; + uint64_t *resb = c + len + i0; + uint64_t res_j = c[len + i0]; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb); + } + memcpy(res, c + len, (len + len - len) * sizeof (uint64_t)); + uint64_t c00 = c0; + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(tmp, 0U, len * sizeof (uint64_t)); + uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, res, n, tmp); + KRML_MAYBE_UNUSED_VAR(c1); + uint64_t m = 0ULL - c00; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t *os = res; + uint64_t x = (m & tmp[i]) | (~m & res[i]); + os[i] = x; + } +} + +static void +bn_almost_mont_mul_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *bM, + uint64_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(c, 0U, (len + len) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len); + uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t)); + memset(tmp, 0U, 4U * len * sizeof (uint64_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c); + Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM); +} + +static void +bn_almost_mont_sqr_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *resM +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(c, 0U, (len + len) * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len); + uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t)); + memset(tmp, 0U, 4U * len * sizeof (uint64_t)); + Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c); + Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM); +} + +uint32_t +Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( + uint32_t len, + uint32_t *n, + uint32_t *a, + uint32_t bBits, + uint32_t *b +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(one, 0U, len * sizeof (uint32_t)); + memset(one, 0U, len * sizeof (uint32_t)); + one[0U] = 1U; + uint32_t bit0 = n[0U] & 1U; + uint32_t m0 = 0U - bit0; + uint32_t acc0 = 0U; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); + uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); + acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + } + uint32_t m10 = acc0; + uint32_t m00 = m0 & m10; + uint32_t bLen; + if (bBits == 0U) + { + bLen = 1U; + } + else + { + bLen = (bBits - 1U) / 32U + 1U; + } + uint32_t m1; + if (bBits < 32U * bLen) + { + KRML_CHECK_SIZE(sizeof (uint32_t), bLen); + uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t)); + memset(b2, 0U, bLen * sizeof (uint32_t)); + uint32_t i0 = bBits / 32U; + uint32_t j = bBits % 32U; + b2[i0] = b2[i0] | 1U << j; + uint32_t acc = 0U; + for (uint32_t i = 0U; i < bLen; i++) + { + uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); + uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + } + uint32_t res = acc; + m1 = res; + } + else + { + m1 = 0xFFFFFFFFU; + } + uint32_t acc = 0U; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); + uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))); + } + uint32_t m2 = acc; + uint32_t m = m1 & m2; + return m00 & m; +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( + uint32_t len, + uint32_t *n, + uint32_t mu, + uint32_t *r2, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +) +{ + if (bBits < 200U) + { + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(aM, 0U, len * sizeof (uint32_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(resM, 0U, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(ctx, 0U, (len + len) * sizeof (uint32_t)); + memcpy(ctx, n, len * sizeof (uint32_t)); + memcpy(ctx + len, r2, len * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + uint32_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + for (uint32_t i = 0U; i < bBits; i++) + { + uint32_t i1 = i / 32U; + uint32_t j = i % 32U; + uint32_t tmp = b[i1]; + uint32_t bit = tmp >> j & 1U; + if (!(bit == 0U)) + { + uint32_t *ctx_n0 = ctx; + bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM); + } + uint32_t *ctx_n0 = ctx; + bn_almost_mont_sqr_u32(len, ctx_n0, mu, aM, aM); + } + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); + return; + } + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(aM, 0U, len * sizeof (uint32_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(resM, 0U, len * sizeof (uint32_t)); + uint32_t bLen; + if (bBits == 0U) + { + bLen = 1U; + } + else + { + bLen = (bBits - 1U) / 32U + 1U; + } + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(ctx, 0U, (len + len) * sizeof (uint32_t)); + memcpy(ctx, n, len * sizeof (uint32_t)); + memcpy(ctx + len, r2, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len); + uint32_t *table = (uint32_t *)alloca(16U * len * sizeof (uint32_t)); + memset(table, 0U, 16U * len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(tmp, 0U, len * sizeof (uint32_t)); + uint32_t *t0 = table; + uint32_t *t1 = table + len; + uint32_t *ctx_n0 = ctx; + uint32_t *ctx_r20 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0); + memcpy(t1, aM, len * sizeof (uint32_t)); + KRML_MAYBE_FOR7(i, + 0U, + 7U, + 1U, + uint32_t *t11 = table + (i + 1U) * len; + uint32_t *ctx_n1 = ctx; + bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp); + memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t)); + uint32_t *t2 = table + (2U * i + 2U) * len; + uint32_t *ctx_n = ctx; + bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp); + memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t));); + if (bBits % 4U != 0U) + { + uint32_t i = bBits / 4U * 4U; + uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U); + uint32_t bits_l32 = bits_c; + const uint32_t *a_bits_l = table + bits_l32 * len; + memcpy(resM, (uint32_t *)a_bits_l, len * sizeof (uint32_t)); + } + else + { + uint32_t *ctx_n = ctx; + uint32_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + } + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *tmp0 = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(tmp0, 0U, len * sizeof (uint32_t)); + for (uint32_t i = 0U; i < bBits / 4U; i++) + { + KRML_MAYBE_FOR4(i0, + 0U, + 4U, + 1U, + uint32_t *ctx_n = ctx; + bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM);); + uint32_t k = bBits - bBits % 4U - 4U * i - 4U; + uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + uint32_t bits_l32 = bits_l; + const uint32_t *a_bits_l = table + bits_l32 * len; + memcpy(tmp0, (uint32_t *)a_bits_l, len * sizeof (uint32_t)); + uint32_t *ctx_n = ctx; + bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM); + } + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( + uint32_t len, + uint32_t *n, + uint32_t mu, + uint32_t *r2, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +) +{ + if (bBits < 200U) + { + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(aM, 0U, len * sizeof (uint32_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(resM, 0U, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(ctx, 0U, (len + len) * sizeof (uint32_t)); + memcpy(ctx, n, len * sizeof (uint32_t)); + memcpy(ctx + len, r2, len * sizeof (uint32_t)); + uint32_t sw = 0U; + uint32_t *ctx_n = ctx; + uint32_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + for (uint32_t i0 = 0U; i0 < bBits; i0++) + { + uint32_t i1 = (bBits - i0 - 1U) / 32U; + uint32_t j = (bBits - i0 - 1U) % 32U; + uint32_t tmp = b[i1]; + uint32_t bit = tmp >> j & 1U; + uint32_t sw1 = bit ^ sw; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]); + resM[i] = resM[i] ^ dummy; + aM[i] = aM[i] ^ dummy; + } + uint32_t *ctx_n0 = ctx; + bn_almost_mont_mul_u32(len, ctx_n0, mu, aM, resM, aM); + uint32_t *ctx_n1 = ctx; + bn_almost_mont_sqr_u32(len, ctx_n1, mu, resM, resM); + sw = bit; + } + uint32_t sw0 = sw; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]); + resM[i] = resM[i] ^ dummy; + aM[i] = aM[i] ^ dummy; + } + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); + return; + } + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(aM, 0U, len * sizeof (uint32_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u32(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(resM, 0U, len * sizeof (uint32_t)); + uint32_t bLen; + if (bBits == 0U) + { + bLen = 1U; + } + else + { + bLen = (bBits - 1U) / 32U + 1U; + } + KRML_CHECK_SIZE(sizeof (uint32_t), len + len); + uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t)); + memset(ctx, 0U, (len + len) * sizeof (uint32_t)); + memcpy(ctx, n, len * sizeof (uint32_t)); + memcpy(ctx + len, r2, len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len); + uint32_t *table = (uint32_t *)alloca(16U * len * sizeof (uint32_t)); + memset(table, 0U, 16U * len * sizeof (uint32_t)); + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(tmp, 0U, len * sizeof (uint32_t)); + uint32_t *t0 = table; + uint32_t *t1 = table + len; + uint32_t *ctx_n0 = ctx; + uint32_t *ctx_r20 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0); + memcpy(t1, aM, len * sizeof (uint32_t)); + KRML_MAYBE_FOR7(i, + 0U, + 7U, + 1U, + uint32_t *t11 = table + (i + 1U) * len; + uint32_t *ctx_n1 = ctx; + bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp); + memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t)); + uint32_t *t2 = table + (2U * i + 2U) * len; + uint32_t *ctx_n = ctx; + bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp); + memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t));); + if (bBits % 4U != 0U) + { + uint32_t i0 = bBits / 4U * 4U; + uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U); + memcpy(resM, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t)); + KRML_MAYBE_FOR15(i1, + 0U, + 15U, + 1U, + uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U); + const uint32_t *res_j = table + (i1 + 1U) * len; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t *os = resM; + uint32_t x = (c & res_j[i]) | (~c & resM[i]); + os[i] = x; + }); + } + else + { + uint32_t *ctx_n = ctx; + uint32_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); + } + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *tmp0 = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(tmp0, 0U, len * sizeof (uint32_t)); + for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++) + { + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint32_t *ctx_n = ctx; + bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM);); + uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; + uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U); + memcpy(tmp0, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t)); + KRML_MAYBE_FOR15(i1, + 0U, + 15U, + 1U, + uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U); + const uint32_t *res_j = table + (i1 + 1U) * len; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t *os = tmp0; + uint32_t x = (c & res_j[i]) | (~c & tmp0[i]); + os[i] = x; + }); + uint32_t *ctx_n = ctx; + bn_almost_mont_mul_u32(len, ctx_n, mu, resM, tmp0, resM); + } + Hacl_Bignum_Montgomery_bn_from_mont_u32(len, n, mu, resM, res); +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32( + uint32_t len, + uint32_t nBits, + uint32_t *n, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *r2 = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(r2, 0U, len * sizeof (uint32_t)); + Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r2); + uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len, n, mu, r2, a, bBits, b, res); +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32( + uint32_t len, + uint32_t nBits, + uint32_t *n, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +) +{ + KRML_CHECK_SIZE(sizeof (uint32_t), len); + uint32_t *r2 = (uint32_t *)alloca(len * sizeof (uint32_t)); + memset(r2, 0U, len * sizeof (uint32_t)); + Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r2); + uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(len, n, mu, r2, a, bBits, b, res); +} + +uint64_t +Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( + uint32_t len, + uint64_t *n, + uint64_t *a, + uint32_t bBits, + uint64_t *b +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(one, 0U, len * sizeof (uint64_t)); + memset(one, 0U, len * sizeof (uint64_t)); + one[0U] = 1ULL; + uint64_t bit0 = n[0U] & 1ULL; + uint64_t m0 = 0ULL - bit0; + uint64_t acc0 = 0ULL; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); + acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t m10 = acc0; + uint64_t m00 = m0 & m10; + uint32_t bLen; + if (bBits == 0U) + { + bLen = 1U; + } + else + { + bLen = (bBits - 1U) / 64U + 1U; + } + uint64_t m1; + if (bBits < 64U * bLen) + { + KRML_CHECK_SIZE(sizeof (uint64_t), bLen); + uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t)); + memset(b2, 0U, bLen * sizeof (uint64_t)); + uint32_t i0 = bBits / 64U; + uint32_t j = bBits % 64U; + b2[i0] = b2[i0] | 1ULL << j; + uint64_t acc = 0ULL; + for (uint32_t i = 0U; i < bLen; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t res = acc; + m1 = res; + } + else + { + m1 = 0xFFFFFFFFFFFFFFFFULL; + } + uint64_t acc = 0ULL; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t m2 = acc; + uint64_t m = m1 & m2; + return m00 & m; +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( + uint32_t len, + uint64_t *n, + uint64_t mu, + uint64_t *r2, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +) +{ + if (bBits < 200U) + { + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(aM, 0U, len * sizeof (uint64_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(resM, 0U, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(ctx, 0U, (len + len) * sizeof (uint64_t)); + memcpy(ctx, n, len * sizeof (uint64_t)); + memcpy(ctx + len, r2, len * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + uint64_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + for (uint32_t i = 0U; i < bBits; i++) + { + uint32_t i1 = i / 64U; + uint32_t j = i % 64U; + uint64_t tmp = b[i1]; + uint64_t bit = tmp >> j & 1ULL; + if (!(bit == 0ULL)) + { + uint64_t *ctx_n0 = ctx; + bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM); + } + uint64_t *ctx_n0 = ctx; + bn_almost_mont_sqr_u64(len, ctx_n0, mu, aM, aM); + } + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); + return; + } + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(aM, 0U, len * sizeof (uint64_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(resM, 0U, len * sizeof (uint64_t)); + uint32_t bLen; + if (bBits == 0U) + { + bLen = 1U; + } + else + { + bLen = (bBits - 1U) / 64U + 1U; + } + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(ctx, 0U, (len + len) * sizeof (uint64_t)); + memcpy(ctx, n, len * sizeof (uint64_t)); + memcpy(ctx + len, r2, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len); + uint64_t *table = (uint64_t *)alloca(16U * len * sizeof (uint64_t)); + memset(table, 0U, 16U * len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(tmp, 0U, len * sizeof (uint64_t)); + uint64_t *t0 = table; + uint64_t *t1 = table + len; + uint64_t *ctx_n0 = ctx; + uint64_t *ctx_r20 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0); + memcpy(t1, aM, len * sizeof (uint64_t)); + KRML_MAYBE_FOR7(i, + 0U, + 7U, + 1U, + uint64_t *t11 = table + (i + 1U) * len; + uint64_t *ctx_n1 = ctx; + bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp); + memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t)); + uint64_t *t2 = table + (2U * i + 2U) * len; + uint64_t *ctx_n = ctx; + bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp); + memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t));); + if (bBits % 4U != 0U) + { + uint32_t i = bBits / 4U * 4U; + uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U); + uint32_t bits_l32 = (uint32_t)bits_c; + const uint64_t *a_bits_l = table + bits_l32 * len; + memcpy(resM, (uint64_t *)a_bits_l, len * sizeof (uint64_t)); + } + else + { + uint64_t *ctx_n = ctx; + uint64_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + } + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *tmp0 = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(tmp0, 0U, len * sizeof (uint64_t)); + for (uint32_t i = 0U; i < bBits / 4U; i++) + { + KRML_MAYBE_FOR4(i0, + 0U, + 4U, + 1U, + uint64_t *ctx_n = ctx; + bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM);); + uint32_t k = bBits - bBits % 4U - 4U * i - 4U; + uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + uint32_t bits_l32 = (uint32_t)bits_l; + const uint64_t *a_bits_l = table + bits_l32 * len; + memcpy(tmp0, (uint64_t *)a_bits_l, len * sizeof (uint64_t)); + uint64_t *ctx_n = ctx; + bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM); + } + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( + uint32_t len, + uint64_t *n, + uint64_t mu, + uint64_t *r2, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +) +{ + if (bBits < 200U) + { + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(aM, 0U, len * sizeof (uint64_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(resM, 0U, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(ctx, 0U, (len + len) * sizeof (uint64_t)); + memcpy(ctx, n, len * sizeof (uint64_t)); + memcpy(ctx + len, r2, len * sizeof (uint64_t)); + uint64_t sw = 0ULL; + uint64_t *ctx_n = ctx; + uint64_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + for (uint32_t i0 = 0U; i0 < bBits; i0++) + { + uint32_t i1 = (bBits - i0 - 1U) / 64U; + uint32_t j = (bBits - i0 - 1U) % 64U; + uint64_t tmp = b[i1]; + uint64_t bit = tmp >> j & 1ULL; + uint64_t sw1 = bit ^ sw; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]); + resM[i] = resM[i] ^ dummy; + aM[i] = aM[i] ^ dummy; + } + uint64_t *ctx_n0 = ctx; + bn_almost_mont_mul_u64(len, ctx_n0, mu, aM, resM, aM); + uint64_t *ctx_n1 = ctx; + bn_almost_mont_sqr_u64(len, ctx_n1, mu, resM, resM); + sw = bit; + } + uint64_t sw0 = sw; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]); + resM[i] = resM[i] ^ dummy; + aM[i] = aM[i] ^ dummy; + } + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); + return; + } + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(aM, 0U, len * sizeof (uint64_t)); + Hacl_Bignum_Montgomery_bn_to_mont_u64(len, n, mu, r2, a, aM); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(resM, 0U, len * sizeof (uint64_t)); + uint32_t bLen; + if (bBits == 0U) + { + bLen = 1U; + } + else + { + bLen = (bBits - 1U) / 64U + 1U; + } + KRML_CHECK_SIZE(sizeof (uint64_t), len + len); + uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t)); + memset(ctx, 0U, (len + len) * sizeof (uint64_t)); + memcpy(ctx, n, len * sizeof (uint64_t)); + memcpy(ctx + len, r2, len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len); + uint64_t *table = (uint64_t *)alloca(16U * len * sizeof (uint64_t)); + memset(table, 0U, 16U * len * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(tmp, 0U, len * sizeof (uint64_t)); + uint64_t *t0 = table; + uint64_t *t1 = table + len; + uint64_t *ctx_n0 = ctx; + uint64_t *ctx_r20 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0); + memcpy(t1, aM, len * sizeof (uint64_t)); + KRML_MAYBE_FOR7(i, + 0U, + 7U, + 1U, + uint64_t *t11 = table + (i + 1U) * len; + uint64_t *ctx_n1 = ctx; + bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp); + memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t)); + uint64_t *t2 = table + (2U * i + 2U) * len; + uint64_t *ctx_n = ctx; + bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp); + memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t));); + if (bBits % 4U != 0U) + { + uint32_t i0 = bBits / 4U * 4U; + uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U); + memcpy(resM, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t)); + KRML_MAYBE_FOR15(i1, + 0U, + 15U, + 1U, + uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U)); + const uint64_t *res_j = table + (i1 + 1U) * len; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t *os = resM; + uint64_t x = (c & res_j[i]) | (~c & resM[i]); + os[i] = x; + }); + } + else + { + uint64_t *ctx_n = ctx; + uint64_t *ctx_r2 = ctx + len; + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); + } + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *tmp0 = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(tmp0, 0U, len * sizeof (uint64_t)); + for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++) + { + KRML_MAYBE_FOR4(i, + 0U, + 4U, + 1U, + uint64_t *ctx_n = ctx; + bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM);); + uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U; + uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U); + memcpy(tmp0, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t)); + KRML_MAYBE_FOR15(i1, + 0U, + 15U, + 1U, + uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U)); + const uint64_t *res_j = table + (i1 + 1U) * len; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t *os = tmp0; + uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + os[i] = x; + }); + uint64_t *ctx_n = ctx; + bn_almost_mont_mul_u64(len, ctx_n, mu, resM, tmp0, resM); + } + Hacl_Bignum_Montgomery_bn_from_mont_u64(len, n, mu, resM, res); +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64( + uint32_t len, + uint32_t nBits, + uint64_t *n, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *r2 = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(r2, 0U, len * sizeof (uint64_t)); + Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r2); + uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len, n, mu, r2, a, bBits, b, res); +} + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64( + uint32_t len, + uint32_t nBits, + uint64_t *n, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +) +{ + KRML_CHECK_SIZE(sizeof (uint64_t), len); + uint64_t *r2 = (uint64_t *)alloca(len * sizeof (uint64_t)); + memset(r2, 0U, len * sizeof (uint64_t)); + Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r2); + uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(len, n, mu, r2, a, bBits, b, res); +} + diff --git a/crypto/hacl_bignum.h b/crypto/hacl_bignum.h new file mode 100644 index 0000000000000..a2e8c6b3957a5 --- /dev/null +++ b/crypto/hacl_bignum.h @@ -0,0 +1,798 @@ +/* + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * Copyright (c) 2023 Cryspen + */ + +#ifndef CRYPTO_HACL_BIGNUM_H_ +#define CRYPTO_HACL_BIGNUM_H_ + +#include "hacl_lib.h" + +static inline uint32_t +Hacl_Bignum_Base_mul_wide_add2_u32(uint32_t a, uint32_t b, uint32_t c_in, uint32_t *out) +{ + uint32_t out0 = out[0U]; + uint64_t res = (uint64_t)a * (uint64_t)b + (uint64_t)out0; + res = (uint32_t)res + (uint64_t)c_in; + out[0] = res; + return (uint32_t)(res >> 32U); +} + +static inline uint64_t +Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64_t *out) +{ + uint64_t out0 = out[0U]; + FStar_UInt128_uint128 + res = FStar_UInt128_mul_wide(a, b); + res = FStar_UInt128_add(res,FStar_UInt128_uint64_to_uint128(out0)); + res = FStar_UInt128_add(res,FStar_UInt128_uint64_to_uint128(c_in)); + out[0U] = FStar_UInt128_uint128_to_uint64(res); + return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U)); +} + +static inline void +Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *res) +{ + uint32_t bnLen = (len - 1U) / 8U + 1U; + uint32_t tmpLen = 8U * bnLen; + KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); + uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t)); + memset(tmp, 0U, tmpLen * sizeof (uint8_t)); + memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); + for (uint32_t i = 0U; i < bnLen; i++) + { + uint64_t *os = res; + uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U); + uint64_t x = u; + os[i] = x; + } +} + +static inline void +Hacl_Bignum_Convert_bn_to_bytes_be_uint64(uint32_t len, uint64_t *b, uint8_t *res) +{ + uint32_t bnLen = (len - 1U) / 8U + 1U; + uint32_t tmpLen = 8U * bnLen; + KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); + uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t)); + memset(tmp, 0U, tmpLen * sizeof (uint8_t)); + for (uint32_t i = 0U; i < bnLen; i++) + { + store64_be(tmp + i * 8U, b[bnLen - i - 1U]); + } + memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t)); +} + +static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32_t *b) +{ + uint32_t priv = 0U; + for (uint32_t i = 0U; i < len; i++) + { + uint32_t mask = FStar_UInt32_eq_mask(b[i], 0U); + priv = (mask & priv) | (~mask & i); + } + return priv; +} + +static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64_t *b) +{ + uint64_t priv = 0ULL; + for (uint32_t i = 0U; i < len; i++) + { + uint64_t mask = FStar_UInt64_eq_mask(b[i], 0ULL); + priv = (mask & priv) | (~mask & (uint64_t)i); + } + return priv; +} + +static inline uint32_t +Hacl_Bignum_Lib_bn_get_bits_u32(uint32_t len, uint32_t *b, uint32_t i, uint32_t l) +{ + uint32_t i1 = i / 32U; + uint32_t j = i % 32U; + uint32_t p1 = b[i1] >> j; + uint32_t ite; + if (i1 + 1U < len && 0U < j) + { + ite = p1 | b[i1 + 1U] << (32U - j); + } + else + { + ite = p1; + } + return ite & ((1U << l) - 1U); +} + +static inline uint64_t +Hacl_Bignum_Lib_bn_get_bits_u64(uint32_t len, uint64_t *b, uint32_t i, uint32_t l) +{ + uint32_t i1 = i / 64U; + uint32_t j = i % 64U; + uint64_t p1 = b[i1] >> j; + uint64_t ite; + if (i1 + 1U < len && 0U < j) + { + ite = p1 | b[i1 + 1U] << (64U - j); + } + else + { + ite = p1; + } + return ite & ((1ULL << l) - 1ULL); +} + +static inline uint32_t +Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res) +{ + uint32_t c = 0U; + for (uint32_t i = 0U; i < aLen / 4U; i++) + { + uint32_t t1 = a[4U * i]; + uint32_t t20 = b[4U * i]; + uint32_t *res_i0 = res + 4U * i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0); + uint32_t t10 = a[4U * i + 1U]; + uint32_t t21 = b[4U * i + 1U]; + uint32_t *res_i1 = res + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1); + uint32_t t11 = a[4U * i + 2U]; + uint32_t t22 = b[4U * i + 2U]; + uint32_t *res_i2 = res + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2); + uint32_t t12 = a[4U * i + 3U]; + uint32_t t2 = b[4U * i + 3U]; + uint32_t *res_i = res + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i); + } + for (uint32_t i = aLen / 4U * 4U; i < aLen; i++) + { + uint32_t t1 = a[i]; + uint32_t t2 = b[i]; + uint32_t *res_i = res + i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t2, res_i); + } + return c; +} + +static inline uint64_t +Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res) +{ + uint64_t c = 0ULL; + for (uint32_t i = 0U; i < aLen / 4U; i++) + { + uint64_t t1 = a[4U * i]; + uint64_t t20 = b[4U * i]; + uint64_t *res_i0 = res + 4U * i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); + uint64_t t10 = a[4U * i + 1U]; + uint64_t t21 = b[4U * i + 1U]; + uint64_t *res_i1 = res + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); + uint64_t t11 = a[4U * i + 2U]; + uint64_t t22 = b[4U * i + 2U]; + uint64_t *res_i2 = res + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); + uint64_t t12 = a[4U * i + 3U]; + uint64_t t2 = b[4U * i + 3U]; + uint64_t *res_i = res + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); + } + for (uint32_t i = aLen / 4U * 4U; i < aLen; i++) + { + uint64_t t1 = a[i]; + uint64_t t2 = b[i]; + uint64_t *res_i = res + i; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t2, res_i); + } + return c; +} + +static inline uint32_t +Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res) +{ + uint32_t c = 0U; + for (uint32_t i = 0U; i < aLen / 4U; i++) + { + uint32_t t1 = a[4U * i]; + uint32_t t20 = b[4U * i]; + uint32_t *res_i0 = res + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0); + uint32_t t10 = a[4U * i + 1U]; + uint32_t t21 = b[4U * i + 1U]; + uint32_t *res_i1 = res + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1); + uint32_t t11 = a[4U * i + 2U]; + uint32_t t22 = b[4U * i + 2U]; + uint32_t *res_i2 = res + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2); + uint32_t t12 = a[4U * i + 3U]; + uint32_t t2 = b[4U * i + 3U]; + uint32_t *res_i = res + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i); + } + for (uint32_t i = aLen / 4U * 4U; i < aLen; i++) + { + uint32_t t1 = a[i]; + uint32_t t2 = b[i]; + uint32_t *res_i = res + i; + c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i); + } + return c; +} + +static inline uint64_t +Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res) +{ + uint64_t c = 0ULL; + for (uint32_t i = 0U; i < aLen / 4U; i++) + { + uint64_t t1 = a[4U * i]; + uint64_t t20 = b[4U * i]; + uint64_t *res_i0 = res + 4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); + uint64_t t10 = a[4U * i + 1U]; + uint64_t t21 = b[4U * i + 1U]; + uint64_t *res_i1 = res + 4U * i + 1U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); + uint64_t t11 = a[4U * i + 2U]; + uint64_t t22 = b[4U * i + 2U]; + uint64_t *res_i2 = res + 4U * i + 2U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); + uint64_t t12 = a[4U * i + 3U]; + uint64_t t2 = b[4U * i + 3U]; + uint64_t *res_i = res + 4U * i + 3U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); + } + for (uint32_t i = aLen / 4U * 4U; i < aLen; i++) + { + uint64_t t1 = a[i]; + uint64_t t2 = b[i]; + uint64_t *res_i = res + i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i); + } + return c; +} + +static inline void +Hacl_Bignum_Multiplication_bn_mul_u32( + uint32_t aLen, + uint32_t *a, + uint32_t bLen, + uint32_t *b, + uint32_t *res +) +{ + memset(res, 0U, (aLen + bLen) * sizeof (uint32_t)); + for (uint32_t i0 = 0U; i0 < bLen; i0++) + { + uint32_t bj = b[i0]; + uint32_t *res_j = res + i0; + uint32_t c = 0U; + for (uint32_t i = 0U; i < aLen / 4U; i++) + { + uint32_t a_i = a[4U * i]; + uint32_t *res_i0 = res_j + 4U * i; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0); + uint32_t a_i0 = a[4U * i + 1U]; + uint32_t *res_i1 = res_j + 4U * i + 1U; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1); + uint32_t a_i1 = a[4U * i + 2U]; + uint32_t *res_i2 = res_j + 4U * i + 2U; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2); + uint32_t a_i2 = a[4U * i + 3U]; + uint32_t *res_i = res_j + 4U * i + 3U; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i); + } + for (uint32_t i = aLen / 4U * 4U; i < aLen; i++) + { + uint32_t a_i = a[i]; + uint32_t *res_i = res_j + i; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i); + } + uint32_t r = c; + res[aLen + i0] = r; + } +} + +static inline uint64_t bn_mul_add4_u64(uint64_t* n, uint64_t qj, uint64_t* res_j0, uint64_t c1) { + FStar_UInt128_uint128 ab0 = FStar_UInt128_mul_wide(n[0], qj); + FStar_UInt128_uint128 ab1 = FStar_UInt128_mul_wide(n[1], qj); + ab0 = FStar_UInt128_add(ab0, FStar_UInt128_uint64_to_uint128(res_j0[0])); + ab1 = FStar_UInt128_add(ab1, FStar_UInt128_uint64_to_uint128(res_j0[1])); + FStar_UInt128_uint128 ab2 = FStar_UInt128_mul_wide(n[2], qj); + FStar_UInt128_uint128 ab3 = FStar_UInt128_mul_wide(n[3], qj); + ab2 = FStar_UInt128_add(ab2, FStar_UInt128_uint64_to_uint128(res_j0[2])); + ab3 = FStar_UInt128_add(ab3, FStar_UInt128_uint64_to_uint128(res_j0[3])); + + uint64_t carry = c1; + ab0 = FStar_UInt128_add(ab0, carry); + carry = FStar_UInt128_shift_right(ab0, 64U); + ab1 = FStar_UInt128_add(ab1, carry); + carry = FStar_UInt128_shift_right(ab1, 64U); + ab2 = FStar_UInt128_add(ab2, carry); + carry = FStar_UInt128_shift_right(ab2, 64U); + ab3 = FStar_UInt128_add(ab3, carry); + carry = FStar_UInt128_shift_right(ab3, 64U); + c1 = carry; + + res_j0[0] = ab0; + res_j0[1] = ab1; + res_j0[2] = ab2; + res_j0[3] = ab3; + return c1; +} + +static inline uint64_t bn_mul_add4_u64_intrin(uint64_t* n, uint64_t qj, uint64_t* res_j0, uint64_t c1) { + FStar_UInt128_uint128 ab0 = FStar_UInt128_mul_wide(n[0], qj); + FStar_UInt128_uint128 ab1 = FStar_UInt128_mul_wide(n[1], qj); + FStar_UInt128_uint128 ab2 = FStar_UInt128_mul_wide(n[2], qj); + FStar_UInt128_uint128 ab3 = FStar_UInt128_mul_wide(n[3], qj); + + uint64_t ab0l = ab0; + uint64_t ab1l = ab1; + uint64_t ab2l = ab2; + uint64_t ab3l = ab3; + + uint64_t c = 0; + + uint64_t abo0l, abo1l, abo2l, abo3l; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, ab0l, res_j0[0], &abo0l); + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, ab1l, res_j0[1], &abo1l); + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, ab2l, res_j0[2], &abo2l); + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, ab3l, res_j0[3], &abo3l); + + uint64_t ab0h = ab0 >> 64; + uint64_t ab1h = ab1 >> 64; + uint64_t ab2h = ab2 >> 64; + uint64_t ab3h = ab3 >> 64; + + uint64_t x = 0; + x = Lib_IntTypes_Intrinsics_add_carry_u64(x, abo0l, c1, res_j0); + x = Lib_IntTypes_Intrinsics_add_carry_u64(x, abo1l, ab0h, res_j0 + 1); + x = Lib_IntTypes_Intrinsics_add_carry_u64(x, abo2l, ab1h, res_j0 + 2); + x = Lib_IntTypes_Intrinsics_add_carry_u64(x, abo3l, ab2h, res_j0 + 3); + + x = Lib_IntTypes_Intrinsics_add_carry_u64(x, ab3h, c, &c1); + return c1; +} + + +static inline void +Hacl_Bignum_Multiplication_bn_mul_u64( + uint32_t aLen, + uint64_t *a, + uint32_t bLen, + uint64_t *b, + uint64_t *res +) +{ + memset(res, 0U, (aLen + bLen) * sizeof (uint64_t)); + for (uint32_t i0 = 0U; i0 < bLen; i0++) + { + uint64_t bj = b[i0]; + uint64_t *res_j = res + i0; + uint64_t c = 0ULL; + for (uint32_t i = 0U; i < aLen / 4U; i++) + { + c = bn_mul_add4_u64(a+4*i,bj,res_j+4*i,c); + } + for (uint32_t i = aLen / 4U * 4U; i < aLen; i++) + { + uint64_t a_i = a[i]; + uint64_t *res_i = res_j + i; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i); + } + uint64_t r = c; + res[aLen + i0] = r; + } +} + +static inline void +Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res) +{ + memset(res, 0U, (aLen + aLen) * sizeof (uint32_t)); + for (uint32_t i0 = 0U; i0 < aLen; i0++) + { + uint32_t *ab = a; + uint32_t a_j = a[i0]; + uint32_t *res_j = res + i0; + uint32_t c = 0U; + for (uint32_t i = 0U; i < i0 / 4U; i++) + { + uint32_t a_i = ab[4U * i]; + uint32_t *res_i0 = res_j + 4U * i; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0); + uint32_t a_i0 = ab[4U * i + 1U]; + uint32_t *res_i1 = res_j + 4U * i + 1U; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1); + uint32_t a_i1 = ab[4U * i + 2U]; + uint32_t *res_i2 = res_j + 4U * i + 2U; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2); + uint32_t a_i2 = ab[4U * i + 3U]; + uint32_t *res_i = res_j + 4U * i + 3U; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i); + } + for (uint32_t i = i0 / 4U * 4U; i < i0; i++) + { + uint32_t a_i = ab[i]; + uint32_t *res_i = res_j + i; + c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i); + } + uint32_t r = c; + res[i0 + i0] = r; + } + uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res); + KRML_MAYBE_UNUSED_VAR(c0); + KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); + uint32_t *tmp = (uint32_t *)alloca((aLen + aLen) * sizeof (uint32_t)); + memset(tmp, 0U, (aLen + aLen) * sizeof (uint32_t)); + for (uint32_t i = 0U; i < aLen; i++) + { + uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i]; + uint32_t hi = (uint32_t)(res1 >> 32U); + uint32_t lo = (uint32_t)res1; + tmp[2U * i] = lo; + tmp[2U * i + 1U] = hi; + } + uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res); + KRML_MAYBE_UNUSED_VAR(c1); +} + +static inline void +Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res) +{ + memset(res, 0U, (aLen + aLen) * sizeof (uint64_t)); + for (uint32_t i0 = 0U; i0 < aLen; i0++) + { + uint64_t *ab = a; + uint64_t a_j = a[i0]; + uint64_t *res_j = res + i0; + uint64_t c = 0ULL; + for (uint32_t i = 0U; i < i0 / 4U; i++) + { + c = bn_mul_add4_u64(ab+4*i,a_j,res_j+4*i,c); + } + for (uint32_t i = i0 / 4U * 4U; i < i0; i++) + { + uint64_t a_i = ab[i]; + uint64_t *res_i = res_j + i; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); + } + uint64_t r = c; + res[i0 + i0] = r; + } + uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res); + KRML_MAYBE_UNUSED_VAR(c0); + KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); + uint64_t *tmp = (uint64_t *)alloca((aLen + aLen) * sizeof (uint64_t)); + memset(tmp, 0U, (aLen + aLen) * sizeof (uint64_t)); + for (uint32_t i = 0U; i < aLen; i++) + { + FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]); + uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U)); + uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); + tmp[2U * i] = lo; + tmp[2U * i + 1U] = hi; + } + uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res); + KRML_MAYBE_UNUSED_VAR(c1); +} + +typedef struct Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32_s +{ + uint32_t len; + uint32_t *n; + uint32_t mu; + uint32_t *r2; +} +Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32; + +typedef struct Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64_s +{ + uint32_t len; + uint64_t *n; + uint64_t mu; + uint64_t *r2; +} +Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64; + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( + uint32_t aLen, + uint32_t *a, + uint32_t *b, + uint32_t *tmp, + uint32_t *res +); + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( + uint32_t aLen, + uint64_t *a, + uint64_t *b, + uint64_t *tmp, + uint64_t *res +); + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( + uint32_t aLen, + uint32_t *a, + uint32_t *tmp, + uint32_t *res +); + +void +Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( + uint32_t aLen, + uint64_t *a, + uint64_t *tmp, + uint64_t *res +); + +void +Hacl_Bignum_bn_add_mod_n_u32( + uint32_t len1, + uint32_t *n, + uint32_t *a, + uint32_t *b, + uint32_t *res +); + +void +Hacl_Bignum_bn_add_mod_n_u64( + uint32_t len1, + uint64_t *n, + uint64_t *a, + uint64_t *b, + uint64_t *res +); + +void +Hacl_Bignum_bn_sub_mod_n_u32( + uint32_t len1, + uint32_t *n, + uint32_t *a, + uint32_t *b, + uint32_t *res +); + +void +Hacl_Bignum_bn_sub_mod_n_u64( + uint32_t len1, + uint64_t *n, + uint64_t *a, + uint64_t *b, + uint64_t *res +); + +uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0); + +uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0); + +uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n); + +void +Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32( + uint32_t len, + uint32_t nBits, + uint32_t *n, + uint32_t *res +); + +void +Hacl_Bignum_Montgomery_bn_to_mont_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv, + uint32_t *r2, + uint32_t *a, + uint32_t *aM +); + +void +Hacl_Bignum_Montgomery_bn_from_mont_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *a +); + +void +Hacl_Bignum_Montgomery_bn_mont_mul_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *bM, + uint32_t *resM +); + +void +Hacl_Bignum_Montgomery_bn_mont_sqr_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv_u64, + uint32_t *aM, + uint32_t *resM +); + +uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n); + +void +Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64( + uint32_t len, + uint32_t nBits, + uint64_t *n, + uint64_t *res +); + +void +Hacl_Bignum_Montgomery_bn_to_mont_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv, + uint64_t *r2, + uint64_t *a, + uint64_t *aM +); + +void +Hacl_Bignum_Montgomery_bn_from_mont_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *a +); + +void +Hacl_Bignum_Montgomery_bn_mont_mul_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *bM, + uint64_t *resM +); + +void +Hacl_Bignum_Montgomery_bn_mont_sqr_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv_u64, + uint64_t *aM, + uint64_t *resM +); + +void +Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u32( + uint32_t len, + uint32_t *n, + uint32_t nInv, + uint32_t *c, + uint32_t *res +); + +void +Hacl_Bignum_AlmostMontgomery_bn_almost_mont_reduction_u64( + uint32_t len, + uint64_t *n, + uint64_t nInv, + uint64_t *c, + uint64_t *res +); + +uint32_t +Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( + uint32_t len, + uint32_t *n, + uint32_t *a, + uint32_t bBits, + uint32_t *b +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( + uint32_t len, + uint32_t *n, + uint32_t mu, + uint32_t *r2, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( + uint32_t len, + uint32_t *n, + uint32_t mu, + uint32_t *r2, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32( + uint32_t len, + uint32_t nBits, + uint32_t *n, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32( + uint32_t len, + uint32_t nBits, + uint32_t *n, + uint32_t *a, + uint32_t bBits, + uint32_t *b, + uint32_t *res +); + +uint64_t +Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( + uint32_t len, + uint64_t *n, + uint64_t *a, + uint32_t bBits, + uint64_t *b +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( + uint32_t len, + uint64_t *n, + uint64_t mu, + uint64_t *r2, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( + uint32_t len, + uint64_t *n, + uint64_t mu, + uint64_t *r2, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64( + uint32_t len, + uint32_t nBits, + uint64_t *n, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +); + +void +Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64( + uint32_t len, + uint32_t nBits, + uint64_t *n, + uint64_t *a, + uint32_t bBits, + uint64_t *b, + uint64_t *res +); + +#endif diff --git a/crypto/hacl_hash.h b/crypto/hacl_hash.h new file mode 100644 index 0000000000000..89a60a04a042a --- /dev/null +++ b/crypto/hacl_hash.h @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * Copyright (c) 2023 Cryspen + */ + +#ifndef CRYPTO_HACL_HASH_H_ +#define CRYPTO_HACL_HASH_H_ + +#include "hacl_lib.h" + +#define Spec_Hash_Definitions_SHA3_256 8 +#define Spec_Hash_Definitions_SHA3_224 9 +#define Spec_Hash_Definitions_SHA3_384 10 +#define Spec_Hash_Definitions_SHA3_512 11 +#define Spec_Hash_Definitions_Shake128 12 +#define Spec_Hash_Definitions_Shake256 13 + +typedef uint8_t Spec_Hash_Definitions_hash_alg; + +#define Hacl_Streaming_Types_Success 0 +#define Hacl_Streaming_Types_InvalidAlgorithm 1 +#define Hacl_Streaming_Types_InvalidLength 2 +#define Hacl_Streaming_Types_MaximumLengthExceeded 3 + +typedef uint8_t Hacl_Streaming_Types_error_code; + +struct Hacl_Streaming_MD_state_32_s { + uint32_t *block_state; + uint8_t *buf; + uint64_t total_len; +}; + +struct Hacl_Streaming_MD_state_64_s { + uint64_t *block_state; + uint8_t *buf; + uint64_t total_len; +}; + +static const uint32_t Hacl_Impl_SHA2_Generic_h224[8U] = { + (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, + (uint32_t)0xf70e5939U, (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, + (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U +}; + +static const uint32_t Hacl_Impl_SHA2_Generic_h256[8U] = { + (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, + (uint32_t)0xa54ff53aU, (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, + (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U +}; + +static const uint64_t Hacl_Impl_SHA2_Generic_h384[8U] = { + (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, + (uint64_t)0x9159015a3070dd17U, (uint64_t)0x152fecd8f70e5939U, + (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U, + (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U +}; + +static const uint64_t Hacl_Impl_SHA2_Generic_h512[8U] = { + (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, + (uint64_t)0x3c6ef372fe94f82bU, (uint64_t)0xa54ff53a5f1d36f1U, + (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU, + (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U +}; + +static const uint32_t Hacl_Impl_SHA2_Generic_k224_256[64U] = { + (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, + (uint32_t)0xe9b5dba5U, (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, + (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U, (uint32_t)0xd807aa98U, + (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U, + (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, + (uint32_t)0xc19bf174U, (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, + (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU, (uint32_t)0x2de92c6fU, + (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU, + (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, + (uint32_t)0xbf597fc7U, (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, + (uint32_t)0x06ca6351U, (uint32_t)0x14292967U, (uint32_t)0x27b70a85U, + (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U, + (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, + (uint32_t)0x92722c85U, (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, + (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U, (uint32_t)0xd192e819U, + (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U, + (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, + (uint32_t)0x34b0bcb5U, (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, + (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U, (uint32_t)0x748f82eeU, + (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U, + (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, + (uint32_t)0xc67178f2U +}; + +static const uint64_t Hacl_Impl_SHA2_Generic_k384_512[80U] = { + (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, + (uint64_t)0xb5c0fbcfec4d3b2fU, (uint64_t)0xe9b5dba58189dbbcU, + (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U, + (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, + (uint64_t)0xd807aa98a3030242U, (uint64_t)0x12835b0145706fbeU, + (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U, + (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, + (uint64_t)0x9bdc06a725c71235U, (uint64_t)0xc19bf174cf692694U, + (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U, + (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, + (uint64_t)0x2de92c6f592b0275U, (uint64_t)0x4a7484aa6ea6e483U, + (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U, + (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, + (uint64_t)0xb00327c898fb213fU, (uint64_t)0xbf597fc7beef0ee4U, + (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U, + (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, + (uint64_t)0x27b70a8546d22ffcU, (uint64_t)0x2e1b21385c26c926U, + (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU, + (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, + (uint64_t)0x81c2c92e47edaee6U, (uint64_t)0x92722c851482353bU, + (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U, + (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, + (uint64_t)0xd192e819d6ef5218U, (uint64_t)0xd69906245565a910U, + (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U, + (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, + (uint64_t)0x2748774cdf8eeb99U, (uint64_t)0x34b0bcb5e19b48a8U, + (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU, + (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, + (uint64_t)0x748f82ee5defb2fcU, (uint64_t)0x78a5636f43172f60U, + (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU, + (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, + (uint64_t)0xbef9a3f7b2c67915U, (uint64_t)0xc67178f2e372532bU, + (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U, + (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, + (uint64_t)0x06f067aa72176fbaU, (uint64_t)0x0a637dc5a2c898a6U, + (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU, + (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, + (uint64_t)0x3c9ebe0a15c9bebcU, (uint64_t)0x431d67c49c100d4cU, + (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU, + (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U +}; + +/* + * Reset an existing state to the initial hash state with empty data. + */ +void Hacl_Streaming_SHA2_init_256(struct Hacl_Streaming_MD_state_32_s *s); + +/* + * Feed an arbitrary amount of data into the hash. This function returns 0 for + * success, or 1 if the combined length of all of the data passed to + * `update_256` (since the last call to `init_256`) exceeds 2^61-1 bytes. + * + * This function is identical to the update function for SHA2_224. + */ +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_256(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *input, uint32_t input_len); + +/* + * Write the resulting hash into `dst`, an array of 32 bytes. The state remains + * valid after a call to `finish_256`, meaning the user may feed more data into + * the hash via `update_256`. (The finish_256 function operates on an internal + * copy of the state and therefore does not invalidate the client-held state + * `p`.) + */ +void Hacl_Streaming_SHA2_finish_256(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *dst); + +/* + * Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes. + */ +void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, + uint8_t *dst); + +void Hacl_Streaming_SHA2_init_224(struct Hacl_Streaming_MD_state_32_s *s); + +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_224(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *input, uint32_t input_len); + +/* + * Write the resulting hash into `dst`, an array of 28 bytes. The state remains + * valid after a call to `finish_224`, meaning the user may feed more data into + * the hash via `update_224`. + */ +void Hacl_Streaming_SHA2_finish_224(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *dst); + +/* + * Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes. + */ +void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, + uint8_t *dst); + +void Hacl_Streaming_SHA2_init_512(struct Hacl_Streaming_MD_state_64_s *s); + +/* + * Feed an arbitrary amount of data into the hash. This function returns 0 for + * success, or 1 if the combined length of all of the data passed to + * `update_512` (since the last call to `init_512`) exceeds 2^125-1 bytes. + * + * This function is identical to the update function for SHA2_384. + */ +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_512(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *input, uint32_t input_len); + +/* + * Write the resulting hash into `dst`, an array of 64 bytes. The state remains + * valid after a call to `finish_512`, meaning the user may feed more data into + * the hash via `update_512`. (The finish_512 function operates on an internal + * copy of the state and therefore does not invalidate the client-held state + * `p`.) + */ +void Hacl_Streaming_SHA2_finish_512(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *dst); + +/* + * Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes. + */ +void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, + uint8_t *dst); + +void Hacl_Streaming_SHA2_init_384(struct Hacl_Streaming_MD_state_64_s *s); + +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_384(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *input, uint32_t input_len); + +/* + * Write the resulting hash into `dst`, an array of 48 bytes. The state remains + * valid after a call to `finish_384`, meaning the user may feed more data into + * the hash via `update_384`. + */ +void Hacl_Streaming_SHA2_finish_384(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *dst); +/* + * Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes. + */ +void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, + uint8_t *dst); + +struct Hacl_Streaming_Keccak_hash_buf_s { + Spec_Hash_Definitions_hash_alg fst; + uint64_t *snd; +}; + +struct Hacl_Streaming_Keccak_state_s { + struct Hacl_Streaming_Keccak_hash_buf_s block_state; + uint8_t *buf; + uint64_t total_len; +}; + +Hacl_Streaming_Types_error_code +Hacl_Streaming_Keccak_update(struct Hacl_Streaming_Keccak_state_s *p, + uint8_t *data, uint32_t len); + +Hacl_Streaming_Types_error_code +Hacl_Streaming_Keccak_finish(struct Hacl_Streaming_Keccak_state_s *p, + uint8_t *out); + +void Hacl_SHA3_shake128_hacl(uint32_t inputByteLen, uint8_t *input, + uint32_t outputByteLen, uint8_t *output); + +void Hacl_SHA3_shake256_hacl(uint32_t inputByteLen, uint8_t *input, + uint32_t outputByteLen, uint8_t *output); + +void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output); + +void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output); + +void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output); + +void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output); + +#endif // CRYPTO_HACL_HASH_H_ diff --git a/crypto/hacl_lib.h b/crypto/hacl_lib.h new file mode 100644 index 0000000000000..35e312a0f21ec --- /dev/null +++ b/crypto/hacl_lib.h @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * Copyright (c) 2023 Cryspen + */ + +#ifndef CRYPTO_HACL_LIB_H_ +#define CRYPTO_HACL_LIB_H_ + +#include +#include +#include +#include +#include +#include + +#define alloca __builtin_alloca + +typedef u128 FStar_UInt128_uint128; + +static inline u128 FStar_UInt128_shift_left(u128 x, u32 y) +{ + return (x << y); +} + +static inline u128 FStar_UInt128_add(u128 x, u128 y) +{ + return (x + y); +} + +static inline u128 FStar_UInt128_uint64_to_uint128(u64 x) +{ + return ((u128)x); +} + +inline static u128 FStar_UInt128_mul_wide(u64 x, u64 y) { + return ((u128) x) * y; +} + +inline static uint64_t FStar_UInt128_uint128_to_uint64(u128 x) { + return (u64)x; +} + +inline static u128 FStar_UInt128_shift_right(u128 x, u32 y) { + return x >> y; +} + +#define KRML_NOINLINE noinline __maybe_unused +#define KRML_MAYBE_UNUSED_VAR(x) (void)(x) +#define KRML_HOST_CALLOC(x,y) kcalloc(x,y,GFP_KERNEL) +#define KRML_HOST_FREE(x) kfree(x) + +static KRML_NOINLINE u32 FStar_UInt32_eq_mask(u32 a, u32 b) +{ + u32 x = a ^ b; + u32 minus_x = ~x + (u32)1U; + u32 x_or_minus_x = x | minus_x; + u32 xnx = x_or_minus_x >> (u32)31U; + return xnx - (u32)1U; +} + +static KRML_NOINLINE u32 FStar_UInt32_gte_mask(u32 a, u32 b) +{ + u32 x = a; + u32 y = b; + u32 x_xor_y = x ^ y; + u32 x_sub_y = x - y; + u32 x_sub_y_xor_y = x_sub_y ^ y; + u32 q = x_xor_y | x_sub_y_xor_y; + u32 x_xor_q = x ^ q; + u32 x_xor_q_ = x_xor_q >> (u32)31U; + return x_xor_q_ - (u32)1U; +} + + +static KRML_NOINLINE u64 FStar_UInt64_eq_mask(u64 a, u64 b) +{ + u64 x = a ^ b; + u64 minus_x = ~x + (u64)1U; + u64 x_or_minus_x = x | minus_x; + u64 xnx = x_or_minus_x >> (u32)63U; + return xnx - (u64)1U; +} + +static KRML_NOINLINE u64 FStar_UInt64_gte_mask(u64 a, u64 b) +{ + u64 x = a; + u64 y = b; + u64 x_xor_y = x ^ y; + u64 x_sub_y = x - y; + u64 x_sub_y_xor_y = x_sub_y ^ y; + u64 q = x_xor_y | x_sub_y_xor_y; + u64 x_xor_q = x ^ q; + u64 x_xor_q_ = x_xor_q >> (u32)63U; + return x_xor_q_ - (u64)1U; +} + +static inline uint32_t +Hacl_IntTypes_Intrinsics_add_carry_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r) +{ + uint64_t res = (uint64_t)x + (uint64_t)cin + (uint64_t)y; + uint32_t c = (uint32_t)(res >> 32U); + r[0U] = (uint32_t)res; + return c; +} + +static inline uint32_t +Hacl_IntTypes_Intrinsics_sub_borrow_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r) +{ + uint64_t res = (uint64_t)x - (uint64_t)y - (uint64_t)cin; + uint32_t c = (uint32_t)(res >> 32U) & 1U; + r[0U] = (uint32_t)res; + return c; +} + + +static inline uint64_t +Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) +{ + u128 res = (u128) x + (u128) y + (cin & 1); + u64 c = (res >> 64) & 1; + r[0U] = res; + return c; +} + +static inline uint64_t +Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) +{ + u128 res = (u128) x - (u128) y - (cin & 1); + u64 c = (res >> 64) & 1; + r[0U] = res; + return c; +} + +/* +static inline uint64_t +Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) +{ + uint64_t cout = 0; + *r = __builtin_addcll(x,y,cin,&cout); + return cout; +} + +static inline uint64_t +Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) +{ + uint64_t cout = 0; + *r = __builtin_subcll(x,y,cin,&cout); + return cout; +} +*/ + +#define Lib_IntTypes_Intrinsics_sub_borrow_u32(x1, x2, x3, x4) \ + (Hacl_IntTypes_Intrinsics_sub_borrow_u32(x1, x2, x3, x4)) + +#define Lib_IntTypes_Intrinsics_add_carry_u32(x1, x2, x3, x4) \ + (Hacl_IntTypes_Intrinsics_add_carry_u32(x1, x2, x3, x4)) + +#define Lib_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4) \ + (Hacl_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4)) + +#define Lib_IntTypes_Intrinsics_add_carry_u64(x1, x2, x3, x4) \ + (Hacl_IntTypes_Intrinsics_add_carry_u64(x1, x2, x3, x4)) + +/* + * Loads and stores. These avoid undefined behavior due to unaligned memory + * accesses, via memcpy. + */ + +#define load32_be(b) (get_unaligned_be32(b)) +#define store32_be(b, i) put_unaligned_be32(i, b); +#define load64_be(b) (get_unaligned_be64(b)) +#define store64_be(b, i) put_unaligned_be64(i, b); + +#define load32_le(b) (get_unaligned_le32(b)) +#define store32_le(b, i) put_unaligned_le32(i, b); +#define load64_le(b) (get_unaligned_le64(b)) +#define store64_le(b, i) put_unaligned_le64(i, b); + +static inline void store128_be(u8 *buf, u128 x) +{ + store64_be(buf, (u64)(x >> 64)); + store64_be(buf + 8, (u64)(x)); +} + +#define KRML_CHECK_SIZE(size_elt, sz) {} + +/* Macros for prettier unrolling of loops */ +#define KRML_LOOP1(i, n, x) \ + { \ + x i += n; \ + } + +#define KRML_LOOP2(i, n, x) \ + KRML_LOOP1(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP3(i, n, x) \ + KRML_LOOP2(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP4(i, n, x) \ + KRML_LOOP2(i, n, x) \ + KRML_LOOP2(i, n, x) + +#define KRML_LOOP5(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP6(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP2(i, n, x) + +#define KRML_LOOP7(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP3(i, n, x) + +#define KRML_LOOP8(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP4(i, n, x) + +#define KRML_LOOP9(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP10(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP2(i, n, x) + +#define KRML_LOOP11(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP3(i, n, x) + +#define KRML_LOOP12(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP4(i, n, x) + +#define KRML_LOOP13(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP5(i, n, x) + +#define KRML_LOOP14(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP6(i, n, x) + +#define KRML_LOOP15(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP7(i, n, x) + +#define KRML_LOOP16(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP8(i, n, x) + +#define KRML_UNROLL_FOR(i, z, n, k, x) \ + do { \ + u32 i = z; \ + KRML_LOOP##n(i, k, x) \ + } while (0) + +#define KRML_ACTUAL_FOR(i, z, n, k, x) \ + do { \ + for (u32 i = z; i < n; i += k) { \ + x \ + } \ + } while (0) + +#define KRML_UNROLL_MAX 16 + +/* 1 is the number of loop iterations, i.e. (n - z)/k as evaluated by krml */ +#if 0 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR0(i, z, n, k, x) +#else +#define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 1 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x) +#else +#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 2 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x) +#else +#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 3 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x) +#else +#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 4 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x) +#else +#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 5 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x) +#else +#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 6 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x) +#else +#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 7 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x) +#else +#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 8 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x) +#else +#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 9 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x) +#else +#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 10 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x) +#else +#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 11 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x) +#else +#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 12 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x) +#else +#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 13 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x) +#else +#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 14 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x) +#else +#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 15 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x) +#else +#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 16 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x) +#else +#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#ifndef KRML_HOST_IGNORE +#define KRML_HOST_IGNORE(x) (void)(x) +#endif + +#endif // CRYPTO_HACL_LIB_H_ diff --git a/crypto/hacl_p256.h b/crypto/hacl_p256.h new file mode 100644 index 0000000000000..33a21ac06c8fa --- /dev/null +++ b/crypto/hacl_p256.h @@ -0,0 +1,718 @@ +/* + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * Copyright (c) 2023 Cryspen + */ + +#ifndef CRYPTO_HACL_P256_H_ +#define CRYPTO_HACL_P256_H_ + +#include "hacl_lib.h" + +/******************************************************************************* + + Verified C library for ECDSA and ECDH functions over the P-256 NIST curve. + + This module implements signing and verification, key validation, conversions + between various point representations, and ECDH key agreement. + +*******************************************************************************/ + +/*****************/ +/* ECDSA signing */ +/*****************/ + +/** +Create an ECDSA signature WITHOUT hashing first. + + This function is intended to receive a hash of the input. + For convenience, we recommend using one of the hash-and-sign combined functions above. + + The argument `msg` MUST be at least 32 bytes (i.e. `msg_len >= 32`). + + NOTE: The equivalent functions in OpenSSL and Fiat-Crypto both accept inputs + smaller than 32 bytes. These libraries left-pad the input with enough zeroes to + reach the minimum 32 byte size. Clients who need behavior identical to OpenSSL + need to perform the left-padding themselves. + + The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. + + The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. + + The function also checks whether `private_key` and `nonce` are valid values: + • 0 < `private_key` < the order of the curve + • 0 < `nonce` < the order of the curve +*/ +bool +Hacl_P256_ecdsa_sign_p256_without_hash( + uint8_t *signature, + uint32_t msg_len, + uint8_t *msg, + uint8_t *private_key, + uint8_t *nonce +); + + +/**********************/ +/* ECDSA verification */ +/**********************/ + +/** +Verify an ECDSA signature WITHOUT hashing first. + + This function is intended to receive a hash of the input. + For convenience, we recommend using one of the hash-and-verify combined functions above. + + The argument `msg` MUST be at least 32 bytes (i.e. `msg_len >= 32`). + + The function returns `true` if the signature is valid and `false` otherwise. + + The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + The argument `public_key` (x || y) points to 64 bytes of valid memory, i.e., uint8_t[64]. + The arguments `signature_r` and `signature_s` point to 32 bytes of valid memory, i.e., uint8_t[32]. + + The function also checks whether `public_key` is valid +*/ +bool +Hacl_P256_ecdsa_verif_without_hash( + uint32_t msg_len, + uint8_t *msg, + uint8_t *public_key, + uint8_t *signature_r, + uint8_t *signature_s +); + + +/******************/ +/* Key validation */ +/******************/ + +/** +Public key validation. + + The function returns `true` if a public key is valid and `false` otherwise. + + The argument `public_key` points to 64 bytes of valid memory, i.e., uint8_t[64]. + + The public key (x || y) is valid (with respect to SP 800-56A): + • the public key is not the “point at infinity”, represented as O. + • the affine x and y coordinates of the point represented by the public key are + in the range [0, p – 1] where p is the prime defining the finite field. + • y^2 = x^3 + ax + b where a and b are the coefficients of the curve equation. + The last extract is taken from: https://neilmadden.blog/2017/05/17/so-how-do-you-validate-nist-ecdh-public-keys/ +*/ +bool Hacl_P256_validate_public_key(uint8_t *public_key); + +/** +Private key validation. + + The function returns `true` if a private key is valid and `false` otherwise. + + The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + + The private key is valid: + • 0 < `private_key` < the order of the curve +*/ +bool Hacl_P256_validate_private_key(uint8_t *private_key); + +/******************************************************************************* + Parsing and Serializing public keys. + + A public key is a point (x, y) on the P-256 NIST curve. + + The point can be represented in the following three ways. + • raw = [ x || y ], 64 bytes + • uncompressed = [ 0x04 || x || y ], 65 bytes + • compressed = [ (0x02 for even `y` and 0x03 for odd `y`) || x ], 33 bytes + +*******************************************************************************/ + + +/** +Convert a public key from uncompressed to its raw form. + + The function returns `true` for successful conversion of a public key and `false` otherwise. + + The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. + + The function DOESN'T check whether (x, y) is a valid point. +*/ +bool Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw); + +/** +Convert a public key from compressed to its raw form. + + The function returns `true` for successful conversion of a public key and `false` otherwise. + + The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. + + The function also checks whether (x, y) is a valid point. +*/ +bool Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw); + +/** +Convert a public key from raw to its uncompressed form. + + The outparam `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. + The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + + The function DOESN'T check whether (x, y) is a valid point. +*/ +void Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk); + +/** +Convert a public key from raw to its compressed form. + + The outparam `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. + The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + + The function DOESN'T check whether (x, y) is a valid point. +*/ +void Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk); + + +static const +uint64_t +Hacl_P256_PrecompTable_precomp_basepoint_table_w4[192U] = + { + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U, + (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U, + (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U, + (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U, + (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U, + (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U, + (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U, + (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U, + (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U, + (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U, + (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U, + (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U, + (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U, + (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U, + (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U, + (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U, + (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U, + (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U, + (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U, + (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U, + (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U, + (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U, + (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U, + (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U, + (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U, + (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U, + (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U, + (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U, + (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U, + (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U, + (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U, + (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U, + (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U, + (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U, + (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U, + (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U, + (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U, + (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U, + (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U, + (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U, + (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U, + (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U, + (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U, + (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U, + (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U, + (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U, + (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U, + (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U, + (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U, + (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U, + (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U, + (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U, + (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U, + (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U, + (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U, + (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U, + (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U, + (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U, + (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U, + (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U, + (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U, + (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U, + (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U, + (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U, + (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U, + (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U, + (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U, + (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U + }; + +static const +uint64_t +Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4[192U] = + { + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1499621593102562565U, + (uint64_t)16692369783039433128U, (uint64_t)15337520135922861848U, + (uint64_t)5455737214495366228U, (uint64_t)17827017231032529600U, + (uint64_t)12413621606240782649U, (uint64_t)2290483008028286132U, + (uint64_t)15752017553340844820U, (uint64_t)4846430910634234874U, + (uint64_t)10861682798464583253U, (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U, + (uint64_t)9866710912401645115U, (uint64_t)1162548847543228595U, (uint64_t)7649967190445130486U, + (uint64_t)5212340432230915749U, (uint64_t)7572620550182916491U, (uint64_t)14876145112448665096U, + (uint64_t)2063227348838176167U, (uint64_t)3519435548295415847U, (uint64_t)8390400282019023103U, + (uint64_t)17666843593163037841U, (uint64_t)9450204148816496323U, (uint64_t)8483374507652916768U, + (uint64_t)6254661047265818424U, (uint64_t)16382127809582285023U, (uint64_t)125359443771153172U, + (uint64_t)1374336701588437897U, (uint64_t)11362596098420127726U, (uint64_t)2101654420738681387U, + (uint64_t)12772780342444840510U, (uint64_t)12546934328908550060U, + (uint64_t)8331880412333790397U, (uint64_t)11687262051473819904U, (uint64_t)8926848496503457587U, + (uint64_t)9603974142010467857U, (uint64_t)13199952163826973175U, (uint64_t)2189856264898797734U, + (uint64_t)11356074861870267226U, (uint64_t)2027714896422561895U, (uint64_t)5261606367808050149U, + (uint64_t)153855954337762312U, (uint64_t)6375919692894573986U, (uint64_t)12364041207536146533U, + (uint64_t)1891896010455057160U, (uint64_t)1568123795087313171U, (uint64_t)18138710056556660101U, + (uint64_t)6004886947510047736U, (uint64_t)4811859325589542932U, (uint64_t)3618763430148954981U, + (uint64_t)11434521746258554122U, (uint64_t)10086341535864049427U, + (uint64_t)8073421629570399570U, (uint64_t)12680586148814729338U, (uint64_t)9619958020761569612U, + (uint64_t)15827203580658384478U, (uint64_t)12832694810937550406U, + (uint64_t)14977975484447400910U, (uint64_t)5478002389061063653U, + (uint64_t)14731136312639060880U, (uint64_t)4317867687275472033U, (uint64_t)6642650962855259884U, + (uint64_t)2514254944289495285U, (uint64_t)14231405641534478436U, (uint64_t)4045448346091518946U, + (uint64_t)8985477013445972471U, (uint64_t)8869039454457032149U, (uint64_t)4356978486208692970U, + (uint64_t)10805288613335538577U, (uint64_t)12832353127812502042U, + (uint64_t)4576590051676547490U, (uint64_t)6728053735138655107U, (uint64_t)17814206719173206184U, + (uint64_t)79790138573994940U, (uint64_t)17920293215101822267U, (uint64_t)13422026625585728864U, + (uint64_t)5018058010492547271U, (uint64_t)110232326023384102U, (uint64_t)10834264070056942976U, + (uint64_t)15222249086119088588U, (uint64_t)15119439519142044997U, + (uint64_t)11655511970063167313U, (uint64_t)1614477029450566107U, (uint64_t)3619322817271059794U, + (uint64_t)9352862040415412867U, (uint64_t)14017522553242747074U, + (uint64_t)13138513643674040327U, (uint64_t)3610195242889455765U, (uint64_t)8371069193996567291U, + (uint64_t)12670227996544662654U, (uint64_t)1205961025092146303U, + (uint64_t)13106709934003962112U, (uint64_t)4350113471327723407U, + (uint64_t)15060941403739680459U, (uint64_t)13639127647823205030U, + (uint64_t)10790943339357725715U, (uint64_t)498760574280648264U, (uint64_t)17922071907832082887U, + (uint64_t)15122670976670152145U, (uint64_t)6275027991110214322U, (uint64_t)7250912847491816402U, + (uint64_t)15206617260142982380U, (uint64_t)3385668313694152877U, + (uint64_t)17522479771766801905U, (uint64_t)2965919117476170655U, (uint64_t)1553238516603269404U, + (uint64_t)5820770015631050991U, (uint64_t)4999445222232605348U, (uint64_t)9245650860833717444U, + (uint64_t)1508811811724230728U, (uint64_t)5190684913765614385U, (uint64_t)15692927070934536166U, + (uint64_t)12981978499190500902U, (uint64_t)5143491963193394698U, (uint64_t)7705698092144084129U, + (uint64_t)581120653055084783U, (uint64_t)13886552864486459714U, (uint64_t)6290301270652587255U, + (uint64_t)8663431529954393128U, (uint64_t)17033405846475472443U, (uint64_t)5206780355442651635U, + (uint64_t)12580364474736467688U, (uint64_t)17934601912005283310U, + (uint64_t)15119491731028933652U, (uint64_t)17848231399859044858U, + (uint64_t)4427673319524919329U, (uint64_t)2673607337074368008U, (uint64_t)14034876464294699949U, + (uint64_t)10938948975420813697U, (uint64_t)15202340615298669183U, + (uint64_t)5496603454069431071U, (uint64_t)2486526142064906845U, (uint64_t)4507882119510526802U, + (uint64_t)13888151172411390059U, (uint64_t)15049027856908071726U, + (uint64_t)9667231543181973158U, (uint64_t)6406671575277563202U, (uint64_t)3395801050331215139U, + (uint64_t)9813607433539108308U, (uint64_t)2681417728820980381U, (uint64_t)18407064643927113994U, + (uint64_t)7707177692113485527U, (uint64_t)14218149384635317074U, (uint64_t)3658668346206375919U, + (uint64_t)15404713991002362166U, (uint64_t)10152074687696195207U, + (uint64_t)10926946599582128139U, (uint64_t)16907298600007085320U, + (uint64_t)16544287219664720279U, (uint64_t)11007075933432813205U, + (uint64_t)8652245965145713599U, (uint64_t)7857626748965990384U, (uint64_t)5602306604520095870U, + (uint64_t)2525139243938658618U, (uint64_t)14405696176872077447U, + (uint64_t)18432270482137885332U, (uint64_t)9913880809120071177U, + (uint64_t)16896141737831216972U, (uint64_t)7484791498211214829U, + (uint64_t)15635259968266497469U, (uint64_t)8495118537612215624U, (uint64_t)4915477980562575356U, + (uint64_t)16453519279754924350U, (uint64_t)14462108244565406969U, + (uint64_t)14837837755237096687U, (uint64_t)14130171078892575346U, + (uint64_t)15423793222528491497U, (uint64_t)5460399262075036084U, + (uint64_t)16085440580308415349U, (uint64_t)26873200736954488U, (uint64_t)5603655807457499550U, + (uint64_t)3342202915871129617U, (uint64_t)1604413932150236626U, (uint64_t)9684226585089458974U, + (uint64_t)1213229904006618539U, (uint64_t)6782978662408837236U, (uint64_t)11197029877749307372U, + (uint64_t)14085968786551657744U, (uint64_t)17352273610494009342U, + (uint64_t)7876582961192434984U + }; + +static const +uint64_t +Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4[192U] = + { + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)14619254753077084366U, + (uint64_t)13913835116514008593U, (uint64_t)15060744674088488145U, + (uint64_t)17668414598203068685U, (uint64_t)10761169236902342334U, + (uint64_t)15467027479157446221U, (uint64_t)14989185522423469618U, + (uint64_t)14354539272510107003U, (uint64_t)14298211796392133693U, + (uint64_t)13270323784253711450U, (uint64_t)13380964971965046957U, + (uint64_t)8686204248456909699U, (uint64_t)17434630286744937066U, (uint64_t)1355903775279084720U, + (uint64_t)7554695053550308662U, (uint64_t)11354971222741863570U, (uint64_t)564601613420749879U, + (uint64_t)8466325837259054896U, (uint64_t)10752965181772434263U, + (uint64_t)11405876547368426319U, (uint64_t)13791894568738930940U, + (uint64_t)8230587134406354675U, (uint64_t)12415514098722758608U, + (uint64_t)18414183046995786744U, (uint64_t)15508000368227372870U, + (uint64_t)5781062464627999307U, (uint64_t)15339429052219195590U, + (uint64_t)16038703753810741903U, (uint64_t)9587718938298980714U, (uint64_t)4822658817952386407U, + (uint64_t)1376351024833260660U, (uint64_t)1120174910554766702U, (uint64_t)1730170933262569274U, + (uint64_t)5187428548444533500U, (uint64_t)16242053503368957131U, (uint64_t)3036811119519868279U, + (uint64_t)1760267587958926638U, (uint64_t)170244572981065185U, (uint64_t)8063080791967388171U, + (uint64_t)4824892826607692737U, (uint64_t)16286391083472040552U, + (uint64_t)11945158615253358747U, (uint64_t)14096887760410224200U, + (uint64_t)1613720831904557039U, (uint64_t)14316966673761197523U, + (uint64_t)17411006201485445341U, (uint64_t)8112301506943158801U, (uint64_t)2069889233927989984U, + (uint64_t)10082848378277483927U, (uint64_t)3609691194454404430U, (uint64_t)6110437205371933689U, + (uint64_t)9769135977342231601U, (uint64_t)11977962151783386478U, + (uint64_t)18088718692559983573U, (uint64_t)11741637975753055U, (uint64_t)11110390325701582190U, + (uint64_t)1341402251566067019U, (uint64_t)3028229550849726478U, (uint64_t)10438984083997451310U, + (uint64_t)12730851885100145709U, (uint64_t)11524169532089894189U, + (uint64_t)4523375903229602674U, (uint64_t)2028602258037385622U, (uint64_t)17082839063089388410U, + (uint64_t)6103921364634113167U, (uint64_t)17066180888225306102U, + (uint64_t)11395680486707876195U, (uint64_t)10952892272443345484U, + (uint64_t)8792831960605859401U, (uint64_t)14194485427742325139U, + (uint64_t)15146020821144305250U, (uint64_t)1654766014957123343U, (uint64_t)7955526243090948551U, + (uint64_t)3989277566080493308U, (uint64_t)12229385116397931231U, + (uint64_t)13430548930727025562U, (uint64_t)3434892688179800602U, (uint64_t)8431998794645622027U, + (uint64_t)12132530981596299272U, (uint64_t)2289461608863966999U, + (uint64_t)18345870950201487179U, (uint64_t)13517947207801901576U, + (uint64_t)5213113244172561159U, (uint64_t)17632986594098340879U, (uint64_t)4405251818133148856U, + (uint64_t)11783009269435447793U, (uint64_t)9332138983770046035U, + (uint64_t)12863411548922539505U, (uint64_t)3717030292816178224U, + (uint64_t)10026078446427137374U, (uint64_t)11167295326594317220U, + (uint64_t)12425328773141588668U, (uint64_t)5760335125172049352U, (uint64_t)9016843701117277863U, + (uint64_t)5657892835694680172U, (uint64_t)11025130589305387464U, (uint64_t)1368484957977406173U, + (uint64_t)17361351345281258834U, (uint64_t)1907113641956152700U, + (uint64_t)16439233413531427752U, (uint64_t)5893322296986588932U, + (uint64_t)14000206906171746627U, (uint64_t)14979266987545792900U, + (uint64_t)6926291766898221120U, (uint64_t)7162023296083360752U, (uint64_t)14762747553625382529U, + (uint64_t)12610831658612406849U, (uint64_t)10462926899548715515U, + (uint64_t)4794017723140405312U, (uint64_t)5234438200490163319U, (uint64_t)8019519110339576320U, + (uint64_t)7194604241290530100U, (uint64_t)12626770134810813246U, + (uint64_t)10793074474236419890U, (uint64_t)11323224347913978783U, + (uint64_t)16831128015895380245U, (uint64_t)18323094195124693378U, + (uint64_t)2361097165281567692U, (uint64_t)15755578675014279498U, + (uint64_t)14289876470325854580U, (uint64_t)12856787656093616839U, + (uint64_t)3578928531243900594U, (uint64_t)3847532758790503699U, (uint64_t)8377953190224748743U, + (uint64_t)3314546646092744596U, (uint64_t)800810188859334358U, (uint64_t)4626344124229343596U, + (uint64_t)6620381605850876621U, (uint64_t)11422073570955989527U, + (uint64_t)12676813626484814469U, (uint64_t)16725029886764122240U, + (uint64_t)16648497372773830008U, (uint64_t)9135702594931291048U, + (uint64_t)16080949688826680333U, (uint64_t)11528096561346602947U, + (uint64_t)2632498067099740984U, (uint64_t)11583842699108800714U, (uint64_t)8378404864573610526U, + (uint64_t)1076560261627788534U, (uint64_t)13836015994325032828U, + (uint64_t)11234295937817067909U, (uint64_t)5893659808396722708U, + (uint64_t)11277421142886984364U, (uint64_t)8968549037166726491U, + (uint64_t)14841374331394032822U, (uint64_t)9967344773947889341U, (uint64_t)8799244393578496085U, + (uint64_t)5094686877301601410U, (uint64_t)8780316747074726862U, (uint64_t)9119697306829835718U, + (uint64_t)15381243327921855368U, (uint64_t)2686250164449435196U, + (uint64_t)16466917280442198358U, (uint64_t)13791704489163125216U, + (uint64_t)16955859337117924272U, (uint64_t)17112836394923783642U, + (uint64_t)4639176427338618063U, (uint64_t)16770029310141094964U, + (uint64_t)11049953922966416185U, (uint64_t)12012669590884098968U, + (uint64_t)4859326885929417214U, (uint64_t)896380084392586061U, (uint64_t)7153028362977034008U, + (uint64_t)10540021163316263301U, (uint64_t)9318277998512936585U, + (uint64_t)18344496977694796523U, (uint64_t)11374737400567645494U, + (uint64_t)17158800051138212954U, (uint64_t)18343197867863253153U, + (uint64_t)18204799297967861226U, (uint64_t)15798973531606348828U, + (uint64_t)9870158263408310459U, (uint64_t)17578869832774612627U, (uint64_t)8395748875822696932U, + (uint64_t)15310679007370670872U, (uint64_t)11205576736030808860U, + (uint64_t)10123429210002838967U, (uint64_t)5910544144088393959U, + (uint64_t)14016615653353687369U, (uint64_t)11191676704772957822U + }; + +static const +uint64_t +Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4[192U] = + { + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)7870395003430845958U, + (uint64_t)18001862936410067720U, (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U, + (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U, (uint64_t)7139806720777708306U, + (uint64_t)8253938546650739833U, (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U, + (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U, (uint64_t)8392845221328116213U, + (uint64_t)14630296398338540788U, (uint64_t)4268947906723414372U, (uint64_t)9231207002243517909U, + (uint64_t)14261219637616504262U, (uint64_t)7786881626982345356U, + (uint64_t)11412720751765882139U, (uint64_t)14119585051365330009U, + (uint64_t)15281626286521302128U, (uint64_t)6350171933454266732U, + (uint64_t)16559468304937127866U, (uint64_t)13200760478271693417U, + (uint64_t)6733381546280350776U, (uint64_t)3801404890075189193U, (uint64_t)2741036364686993903U, + (uint64_t)3218612940540174008U, (uint64_t)10894914335165419505U, + (uint64_t)11862941430149998362U, (uint64_t)4223151729402839584U, (uint64_t)2913215088487087887U, + (uint64_t)14562168920104952953U, (uint64_t)2170089393468287453U, + (uint64_t)10520900655016579352U, (uint64_t)7040362608949989273U, (uint64_t)8376510559381705307U, + (uint64_t)9142237200448131532U, (uint64_t)5696859948123854080U, (uint64_t)925422306716081180U, + (uint64_t)11155545953469186421U, (uint64_t)1888208646862572812U, + (uint64_t)11151095998248845721U, (uint64_t)15793503271680275267U, + (uint64_t)7729877044494854851U, (uint64_t)6235134673193032913U, (uint64_t)7364280682182401564U, + (uint64_t)5479679373325519985U, (uint64_t)17966037684582301763U, + (uint64_t)14140891609330279185U, (uint64_t)5814744449740463867U, (uint64_t)5652588426712591652U, + (uint64_t)774745682988690912U, (uint64_t)13228255573220500373U, (uint64_t)11949122068786859397U, + (uint64_t)8021166392900770376U, (uint64_t)7994323710948720063U, (uint64_t)9924618472877849977U, + (uint64_t)17618517523141194266U, (uint64_t)2750424097794401714U, + (uint64_t)15481749570715253207U, (uint64_t)14646964509921760497U, + (uint64_t)1037442848094301355U, (uint64_t)6295995947389299132U, (uint64_t)16915049722317579514U, + (uint64_t)10493877400992990313U, (uint64_t)18391008753060553521U, (uint64_t)483942209623707598U, + (uint64_t)2017775662838016613U, (uint64_t)5933251998459363553U, (uint64_t)11789135019970707407U, + (uint64_t)5484123723153268336U, (uint64_t)13246954648848484954U, (uint64_t)4774374393926023505U, + (uint64_t)14863995618704457336U, (uint64_t)13220153167104973625U, + (uint64_t)5988445485312390826U, (uint64_t)17580359464028944682U, (uint64_t)7297100131969874771U, + (uint64_t)379931507867989375U, (uint64_t)10927113096513421444U, (uint64_t)17688881974428340857U, + (uint64_t)4259872578781463333U, (uint64_t)8573076295966784472U, (uint64_t)16389829450727275032U, + (uint64_t)1667243868963568259U, (uint64_t)17730726848925960919U, + (uint64_t)11408899874569778008U, (uint64_t)3576527582023272268U, + (uint64_t)16492920640224231656U, (uint64_t)7906130545972460130U, + (uint64_t)13878604278207681266U, (uint64_t)41446695125652041U, (uint64_t)8891615271337333503U, + (uint64_t)2594537723613594470U, (uint64_t)7699579176995770924U, (uint64_t)147458463055730655U, + (uint64_t)12120406862739088406U, (uint64_t)12044892493010567063U, + (uint64_t)8554076749615475136U, (uint64_t)1005097692260929999U, (uint64_t)2687202654471188715U, + (uint64_t)9457588752176879209U, (uint64_t)17472884880062444019U, (uint64_t)9792097892056020166U, + (uint64_t)2525246678512797150U, (uint64_t)15958903035313115662U, + (uint64_t)11336038170342247032U, (uint64_t)11560342382835141123U, + (uint64_t)6212009033479929024U, (uint64_t)8214308203775021229U, (uint64_t)8475469210070503698U, + (uint64_t)13287024123485719563U, (uint64_t)12956951963817520723U, + (uint64_t)10693035819908470465U, (uint64_t)11375478788224786725U, + (uint64_t)16934625208487120398U, (uint64_t)10094585729115874495U, + (uint64_t)2763884524395905776U, (uint64_t)13535890148969964883U, + (uint64_t)13514657411765064358U, (uint64_t)9903074440788027562U, + (uint64_t)17324720726421199990U, (uint64_t)2273931039117368789U, (uint64_t)3442641041506157854U, + (uint64_t)1119853641236409612U, (uint64_t)12037070344296077989U, (uint64_t)581736433335671746U, + (uint64_t)6019150647054369174U, (uint64_t)14864096138068789375U, (uint64_t)6652995210998318662U, + (uint64_t)12773883697029175304U, (uint64_t)12751275631451845119U, + (uint64_t)11449095003038250478U, (uint64_t)1025805267334366480U, (uint64_t)2764432500300815015U, + (uint64_t)18274564429002844381U, (uint64_t)10445634195592600351U, + (uint64_t)11814099592837202735U, (uint64_t)5006796893679120289U, (uint64_t)6908397253997261914U, + (uint64_t)13266696965302879279U, (uint64_t)7768715053015037430U, (uint64_t)3569923738654785686U, + (uint64_t)5844853453464857549U, (uint64_t)1837340805629559110U, (uint64_t)1034657624388283114U, + (uint64_t)711244516069456460U, (uint64_t)12519286026957934814U, (uint64_t)2613464944620837619U, + (uint64_t)10003023321338286213U, (uint64_t)7291332092642881376U, (uint64_t)9832199564117004897U, + (uint64_t)3280736694860799890U, (uint64_t)6416452202849179874U, (uint64_t)7326961381798642069U, + (uint64_t)8435688798040635029U, (uint64_t)16630141263910982958U, + (uint64_t)17222635514422533318U, (uint64_t)9482787389178881499U, (uint64_t)836561194658263905U, + (uint64_t)3405319043337616649U, (uint64_t)2786146577568026518U, (uint64_t)7625483685691626321U, + (uint64_t)6728084875304656716U, (uint64_t)1140997959232544268U, (uint64_t)12847384827606303792U, + (uint64_t)1719121337754572070U, (uint64_t)12863589482936438532U, (uint64_t)3880712899640530862U, + (uint64_t)2748456882813671564U, (uint64_t)4775988900044623019U, (uint64_t)8937847374382191162U, + (uint64_t)3767367347172252295U, (uint64_t)13468672401049388646U, + (uint64_t)14359032216842397576U, (uint64_t)2002555958685443975U, + (uint64_t)16488678606651526810U, (uint64_t)11826135409597474760U, + (uint64_t)15296495673182508601U + }; + +static const +uint64_t +Hacl_P256_PrecompTable_precomp_basepoint_table_w5[384U] = + { + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U, + (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U, + (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U, + (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U, + (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U, + (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U, + (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U, + (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U, + (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U, + (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U, + (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U, + (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U, + (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U, + (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U, + (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U, + (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U, + (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U, + (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U, + (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U, + (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U, + (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U, + (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U, + (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U, + (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U, + (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U, + (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U, + (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U, + (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U, + (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U, + (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U, + (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U, + (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U, + (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U, + (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U, + (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U, + (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U, + (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U, + (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U, + (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U, + (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U, + (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U, + (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U, + (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U, + (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U, + (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U, + (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U, + (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U, + (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U, + (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U, + (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U, + (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U, + (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U, + (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U, + (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U, + (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U, + (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U, + (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U, + (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U, + (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U, + (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U, + (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U, + (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U, + (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U, + (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U, + (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U, + (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U, + (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U, + (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U, + (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U, + (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U, + (uint64_t)13240193491554624643U, (uint64_t)12365034249541329920U, + (uint64_t)2924326828590977357U, (uint64_t)5687195797140589099U, (uint64_t)16880427227292834531U, + (uint64_t)9691471435758991112U, (uint64_t)16642385273732487288U, + (uint64_t)12173806747523009914U, (uint64_t)13142722756877876849U, + (uint64_t)8370377548305121979U, (uint64_t)17988526053752025426U, (uint64_t)4818750752684100334U, + (uint64_t)5669241919350361655U, (uint64_t)4964810303238518540U, (uint64_t)16709712747671533191U, + (uint64_t)4461414404267448242U, (uint64_t)3971798785139504238U, (uint64_t)6276818948740422136U, + (uint64_t)1426735892164275762U, (uint64_t)7943622674892418919U, (uint64_t)9864274225563929680U, + (uint64_t)57815533745003233U, (uint64_t)10893588105168960233U, (uint64_t)15739162732907069535U, + (uint64_t)3923866849462073470U, (uint64_t)12279826158399226875U, (uint64_t)1533015761334846582U, + (uint64_t)15860156818568437510U, (uint64_t)8252625373831297988U, (uint64_t)9666953804812706358U, + (uint64_t)8767785238646914634U, (uint64_t)14382179044941403551U, + (uint64_t)10401039907264254245U, (uint64_t)8584860003763157350U, (uint64_t)3120462679504470266U, + (uint64_t)8670255778748340069U, (uint64_t)5313789577940369984U, (uint64_t)16977072364454789224U, + (uint64_t)12199578693972188324U, (uint64_t)18211098771672599237U, + (uint64_t)12868831556008795030U, (uint64_t)5310155061431048194U, + (uint64_t)18114153238435112606U, (uint64_t)14482365809278304512U, + (uint64_t)12520721662723001511U, (uint64_t)405943624021143002U, (uint64_t)8146944101507657423U, + (uint64_t)181739317780393495U, (uint64_t)81743892273670099U, (uint64_t)14759561962550473930U, + (uint64_t)4592623849546992939U, (uint64_t)6916440441743449719U, (uint64_t)1304610503530809833U, + (uint64_t)5464930909232486441U, (uint64_t)15414883617496224671U, (uint64_t)8129283345256790U, + (uint64_t)18294252198413739489U, (uint64_t)17394115281884857288U, + (uint64_t)7808348415224731235U, (uint64_t)13195566655747230608U, (uint64_t)8568194219353949094U, + (uint64_t)15329813048672122440U, (uint64_t)9604275495885785744U, (uint64_t)1577712551205219835U, + (uint64_t)15964209008022052790U, (uint64_t)15087297920782098160U, + (uint64_t)3946031512438511898U, (uint64_t)10050061168984440631U, + (uint64_t)11382452014533138316U, (uint64_t)6313670788911952792U, + (uint64_t)12015989229696164014U, (uint64_t)5946702628076168852U, (uint64_t)5219995658774362841U, + (uint64_t)12230141881068377972U, (uint64_t)12361195202673441956U, + (uint64_t)4732862275653856711U, (uint64_t)17221430380805252370U, + (uint64_t)15397525953897375810U, (uint64_t)16557437297239563045U, + (uint64_t)10101683801868971351U, (uint64_t)1402611372245592868U, (uint64_t)1931806383735563658U, + (uint64_t)10991705207471512479U, (uint64_t)861333583207471392U, (uint64_t)15207766844626322355U, + (uint64_t)9224628129811432393U, (uint64_t)3497069567089055613U, (uint64_t)11956632757898590316U, + (uint64_t)8733729372586312960U, (uint64_t)18091521051714930927U, (uint64_t)77582787724373283U, + (uint64_t)9922437373519669237U, (uint64_t)3079321456325704615U, (uint64_t)12171198408512478457U, + (uint64_t)17179130884012147596U, (uint64_t)6839115479620367181U, (uint64_t)4421032569964105406U, + (uint64_t)10353331468657256053U, (uint64_t)17400988720335968824U, + (uint64_t)17138855889417480540U, (uint64_t)4507980080381370611U, + (uint64_t)10703175719793781886U, (uint64_t)12598516658725890426U, + (uint64_t)8353463412173898932U, (uint64_t)17703029389228422404U, (uint64_t)9313111267107226233U, + (uint64_t)5441322942995154196U, (uint64_t)8952817660034465484U, (uint64_t)17571113341183703118U, + (uint64_t)7375087953801067019U, (uint64_t)13381466302076453648U, (uint64_t)3218165271423914596U, + (uint64_t)16956372157249382685U, (uint64_t)509080090049418841U, (uint64_t)13374233893294084913U, + (uint64_t)2988537624204297086U, (uint64_t)4979195832939384620U, (uint64_t)3803931594068976394U, + (uint64_t)10731535883829627646U, (uint64_t)12954845047607194278U, + (uint64_t)10494298062560667399U, (uint64_t)4967351022190213065U, + (uint64_t)13391917938145756456U, (uint64_t)951370484866918160U, (uint64_t)13531334179067685307U, + (uint64_t)12868421357919390599U, (uint64_t)15918857042998130258U, + (uint64_t)17769743831936974016U, (uint64_t)7137921979260368809U, + (uint64_t)12461369180685892062U, (uint64_t)827476514081935199U, (uint64_t)15107282134224767230U, + (uint64_t)10084765752802805748U, (uint64_t)3303739059392464407U, + (uint64_t)17859532612136591428U, (uint64_t)10949414770405040164U, + (uint64_t)12838613589371008785U, (uint64_t)5554397169231540728U, + (uint64_t)18375114572169624408U, (uint64_t)15649286703242390139U, + (uint64_t)2957281557463706877U, (uint64_t)14000350446219393213U, + (uint64_t)14355199721749620351U, (uint64_t)2730856240099299695U, + (uint64_t)17528131000714705752U, (uint64_t)2537498525883536360U, (uint64_t)6121058967084509393U, + (uint64_t)16897667060435514221U, (uint64_t)12367869599571112440U, + (uint64_t)3388831797050807508U, (uint64_t)16791449724090982798U, (uint64_t)2673426123453294928U, + (uint64_t)11369313542384405846U, (uint64_t)15641960333586432634U, + (uint64_t)15080962589658958379U, (uint64_t)7747943772340226569U, (uint64_t)8075023376199159152U, + (uint64_t)8485093027378306528U, (uint64_t)13503706844122243648U, (uint64_t)8401961362938086226U, + (uint64_t)8125426002124226402U, (uint64_t)9005399361407785203U, (uint64_t)6847968030066906634U, + (uint64_t)11934937736309295197U, (uint64_t)5116750888594772351U, (uint64_t)2817039227179245227U, + (uint64_t)17724206901239332980U, (uint64_t)4985702708254058578U, (uint64_t)5786345435756642871U, + (uint64_t)17772527414940936938U, (uint64_t)1201320251272957006U, + (uint64_t)15787430120324348129U, (uint64_t)6305488781359965661U, + (uint64_t)12423900845502858433U, (uint64_t)17485949424202277720U, + (uint64_t)2062237315546855852U, (uint64_t)10353639467860902375U, (uint64_t)2315398490451287299U, + (uint64_t)15394572894814882621U, (uint64_t)232866113801165640U, (uint64_t)7413443736109338926U, + (uint64_t)902719806551551191U, (uint64_t)16568853118619045174U, (uint64_t)14202214862428279177U, + (uint64_t)11719595395278861192U, (uint64_t)5890053236389907647U, (uint64_t)9996196494965833627U, + (uint64_t)12967056942364782577U, (uint64_t)9034128755157395787U, + (uint64_t)17898204904710512655U, (uint64_t)8229373445062993977U, + (uint64_t)13580036169519833644U + }; + +static inline uint64_t +Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res) +{ + uint64_t c = (uint64_t)0U; + for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++) + { + uint64_t t1 = a[(uint32_t)4U * i]; + uint64_t t20 = b[(uint32_t)4U * i]; + uint64_t *res_i0 = res + (uint32_t)4U * i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); + uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; + uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; + uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); + uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; + uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; + uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); + uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; + uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; + uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); + } + for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++) + { + uint64_t t1 = a[i]; + uint64_t t2 = b[i]; + uint64_t *res_i = res + i; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i); + } + return c; +} + +static inline uint64_t +Hacl_Bignum_Lib_bn_get_bits_u64(uint32_t len, uint64_t *b, uint32_t i, uint32_t l) +{ + uint32_t i1 = i / (uint32_t)64U; + uint32_t j = i % (uint32_t)64U; + uint64_t p1 = b[i1] >> j; + uint64_t ite; + if (i1 + (uint32_t)1U < len && (uint32_t)0U < j) + { + ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); + } + else + { + ite = p1; + } + return ite & (((uint64_t)1U << l) - (uint64_t)1U); +} + +static inline uint64_t +Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64_t *out) +{ + uint64_t out0 = out[0U]; + FStar_UInt128_uint128 + res = + FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(a, b), + FStar_UInt128_uint64_to_uint128(c_in)), + FStar_UInt128_uint64_to_uint128(out0)); + out[0U] = FStar_UInt128_uint128_to_uint64(res); + return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); +} + +#endif // CRYPTO_HACL_P256_H_ diff --git a/crypto/hacl_rsa.h b/crypto/hacl_rsa.h new file mode 100644 index 0000000000000..4c68b10c71f1d --- /dev/null +++ b/crypto/hacl_rsa.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * Copyright (c) 2023 Cryspen + */ + +#ifndef CRYPTO_HACL_RSA_H_ +#define CRYPTO_HACL_RSA_H_ + +#include "hacl_lib.h" +#include "hacl_bignum.h" + +/** +Decrypt a message `cipher` and write the plaintext to `plain`. + +@param modBits Count of bits in the modulus (`n`). +@param eBits Count of bits in `e` value. +@param dBits Count of bits in `d` value. +@param skey Pointer to secret key created by `Hacl_RSA_new_rsa_load_skey`. +@param cipher Pointer to `ceil(modBits - 1 / 8)` bytes where the ciphertext is read from. +@param plain Pointer to `ceil(modBits / 8)` bytes where the plaintext is written to. + +@return Returns true if and only if decryption was successful. +*/ +bool +Hacl_RSA_rsa_dec( + uint32_t modBits, + uint32_t eBits, + uint32_t dBits, + uint64_t *skey, + uint8_t *cipher, + uint8_t *plain +); + +/** +Encrypt a message `plain` and write the ciphertext to `cipher`. + +@param modBits Count of bits in the modulus (`n`). +@param eBits Count of bits in `e` value. +@param dBits Count of bits in `d` value. +@param skey Pointer to secret key created by `Hacl_RSA_new_rsa_load_skey`. +@param plain Pointer to `ceil(modBits / 8)` bytes where the plaintext is written to. +@param cipher Pointer to `ceil(modBits - 1 / 8)` bytes where the ciphertext is read from. + +@return Returns true if and only if decryption was successful. +*/ +bool +Hacl_RSA_rsa_enc( + uint32_t modBits, + uint32_t eBits, + uint64_t *pkey, + uint8_t *plain, + uint8_t *cipher +); + +/** +Load a public key from key parts. + +@param modBits Count of bits in modulus (`n`). +@param eBits Count of bits in `e` value. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. + +@return Returns an allocated public key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. +*/ +uint64_t +*Hacl_RSA_new_rsa_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb); + +/** +Load a secret key from key parts. + +@param modBits Count of bits in modulus (`n`). +@param eBits Count of bits in `e` value. +@param dBits Count of bits in `d` value. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. + +@return Returns an allocated secret key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. +*/ +uint64_t +*Hacl_RSA_new_rsa_load_skey( + uint32_t modBits, + uint32_t eBits, + uint32_t dBits, + uint8_t *nb, + uint8_t *eb, + uint8_t *db +); + +#endif diff --git a/crypto/p256-hacl-generated.c b/crypto/p256-hacl-generated.c new file mode 100644 index 0000000000000..5829b9269cb1a --- /dev/null +++ b/crypto/p256-hacl-generated.c @@ -0,0 +1,1808 @@ +/* GPLv2 or MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + */ + + +#include "hacl_p256.h" + +static inline uint64_t +bn_is_zero_mask4(uint64_t *f) +{ + uint64_t bn_zero[4U] = { 0U }; + uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]); + mask = uu____0 & mask;); + uint64_t mask1 = mask; + uint64_t res = mask1; + return res; +} + +static inline bool +bn_is_zero_vartime4(uint64_t *f) +{ + uint64_t m = bn_is_zero_mask4(f); + return m == (uint64_t)0xFFFFFFFFFFFFFFFFU; +} + +static inline uint64_t +bn_is_eq_mask4(uint64_t *a, uint64_t *b) +{ + uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]); + mask = uu____0 & mask;); + uint64_t mask1 = mask; + return mask1; +} + +static inline bool +bn_is_eq_vartime4(uint64_t *a, uint64_t *b) +{ + uint64_t m = bn_is_eq_mask4(a, b); + return m == (uint64_t)0xFFFFFFFFFFFFFFFFU; +} + +static inline void +bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_t *y) +{ + uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U); + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = res; + uint64_t uu____0 = x[i]; + uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0)); + os[i] = x1;); +} + +static inline void +bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y) +{ + uint64_t c0 = (uint64_t)0U; + { + uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0); + uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1); + uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2); + uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i); + } + uint64_t c00 = c0; + uint64_t tmp[4U] = { 0U }; + uint64_t c = (uint64_t)0U; + { + uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); + uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); + uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); + uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); + } + uint64_t c1 = c; + uint64_t c2 = c00 - c1; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = res; + uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x1;); +} + +static inline uint64_t +bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y) +{ + uint64_t c = (uint64_t)0U; + { + uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); + uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); + uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); + uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); + } + uint64_t c0 = c; + return c0; +} + +static inline void +bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y) +{ + uint64_t c0 = (uint64_t)0U; + { + uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0); + uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1); + uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2); + uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i); + } + uint64_t c00 = c0; + uint64_t tmp[4U] = { 0U }; + uint64_t c = (uint64_t)0U; + { + uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); + uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); + uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); + uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); + } + uint64_t c1 = c; + KRML_HOST_IGNORE(c1); + uint64_t c2 = (uint64_t)0U - c00; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = res; + uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]); + os[i] = x1;); +} + +static inline void +bn_mul4(uint64_t *res, uint64_t *x, uint64_t *y) +{ + memset(res, 0U, (uint32_t)8U * sizeof(uint64_t)); + KRML_MAYBE_FOR4( + i0, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t bj = y[i0]; + uint64_t *res_j = res + i0; + uint64_t c = (uint64_t)0U; + { + uint64_t a_i = x[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); + uint64_t a_i0 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); + uint64_t a_i1 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); + uint64_t a_i2 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); + } uint64_t r = c; + res[(uint32_t)4U + i0] = r;); +} + +static inline void +bn_sqr4(uint64_t *res, uint64_t *x) +{ + memset(res, 0U, (uint32_t)8U * sizeof(uint64_t)); + KRML_MAYBE_FOR4( + i0, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *ab = x; + uint64_t a_j = x[i0]; + uint64_t *res_j = res + i0; + uint64_t c = (uint64_t)0U; + for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) { + uint64_t a_i = ab[(uint32_t)4U * i]; + uint64_t *res_i0 = res_j + (uint32_t)4U * i; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0); + uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; + uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1); + uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; + uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2); + uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; + uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i); + } for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) { + uint64_t a_i = ab[i]; + uint64_t *res_i = res_j + i; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); + } uint64_t r = c; + res[i0 + i0] = r;); + uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res); + KRML_HOST_IGNORE(c0); + uint64_t tmp[8U] = { 0U }; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(x[i], x[i]); + uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U)); + uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); + tmp[(uint32_t)2U * i] = lo; + tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); + uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res); + KRML_HOST_IGNORE(c1); +} + +static inline void +bn_to_bytes_be4(uint8_t *res, uint64_t *f) +{ + uint8_t tmp[32U] = { 0U }; + KRML_HOST_IGNORE(tmp); + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + store64_be(res + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]);); +} + +static inline void +bn_from_bytes_be4(uint64_t *res, uint8_t *b) +{ + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = res; + uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U); + uint64_t x = u; + os[i] = x;); +} + +static inline void +bn2_to_bytes_be4(uint8_t *res, uint64_t *x, uint64_t *y) +{ + bn_to_bytes_be4(res, x); + bn_to_bytes_be4(res + (uint32_t)32U, y); +} + +static inline void +make_prime(uint64_t *n) +{ + n[0U] = (uint64_t)0xffffffffffffffffU; + n[1U] = (uint64_t)0xffffffffU; + n[2U] = (uint64_t)0x0U; + n[3U] = (uint64_t)0xffffffff00000001U; +} + +static inline void +make_order(uint64_t *n) +{ + n[0U] = (uint64_t)0xf3b9cac2fc632551U; + n[1U] = (uint64_t)0xbce6faada7179e84U; + n[2U] = (uint64_t)0xffffffffffffffffU; + n[3U] = (uint64_t)0xffffffff00000000U; +} + +static inline void +make_a_coeff(uint64_t *a) +{ + a[0U] = (uint64_t)0xfffffffffffffffcU; + a[1U] = (uint64_t)0x3ffffffffU; + a[2U] = (uint64_t)0x0U; + a[3U] = (uint64_t)0xfffffffc00000004U; +} + +static inline void +make_b_coeff(uint64_t *b) +{ + b[0U] = (uint64_t)0xd89cdf6229c4bddfU; + b[1U] = (uint64_t)0xacf005cd78843090U; + b[2U] = (uint64_t)0xe5a220abf7212ed6U; + b[3U] = (uint64_t)0xdc30061d04874834U; +} + +static inline void +make_g_x(uint64_t *n) +{ + n[0U] = (uint64_t)0x79e730d418a9143cU; + n[1U] = (uint64_t)0x75ba95fc5fedb601U; + n[2U] = (uint64_t)0x79fb732b77622510U; + n[3U] = (uint64_t)0x18905f76a53755c6U; +} + +static inline void +make_g_y(uint64_t *n) +{ + n[0U] = (uint64_t)0xddf25357ce95560aU; + n[1U] = (uint64_t)0x8b4ab8e4ba19e45cU; + n[2U] = (uint64_t)0xd2e88688dd21f325U; + n[3U] = (uint64_t)0x8571ff1825885d85U; +} + +static inline void +make_fmont_R2(uint64_t *n) +{ + n[0U] = (uint64_t)0x3U; + n[1U] = (uint64_t)0xfffffffbffffffffU; + n[2U] = (uint64_t)0xfffffffffffffffeU; + n[3U] = (uint64_t)0x4fffffffdU; +} + +static inline void +make_fzero(uint64_t *n) +{ + n[0U] = (uint64_t)0U; + n[1U] = (uint64_t)0U; + n[2U] = (uint64_t)0U; + n[3U] = (uint64_t)0U; +} + +static inline void +make_fone(uint64_t *n) +{ + n[0U] = (uint64_t)0x1U; + n[1U] = (uint64_t)0xffffffff00000000U; + n[2U] = (uint64_t)0xffffffffffffffffU; + n[3U] = (uint64_t)0xfffffffeU; +} + +static inline uint64_t +bn_is_lt_prime_mask4(uint64_t *f) +{ + uint64_t tmp[4U] = { 0U }; + make_prime(tmp); + uint64_t c = bn_sub4(tmp, f, tmp); + return (uint64_t)0U - c; +} + +static inline uint64_t +feq_mask(uint64_t *a, uint64_t *b) +{ + uint64_t r = bn_is_eq_mask4(a, b); + return r; +} + +static inline void +fadd0(uint64_t *res, uint64_t *x, uint64_t *y) +{ + uint64_t n[4U] = { 0U }; + make_prime(n); + bn_add_mod4(res, n, x, y); +} + +static inline void +fsub0(uint64_t *res, uint64_t *x, uint64_t *y) +{ + uint64_t n[4U] = { 0U }; + make_prime(n); + bn_sub_mod4(res, n, x, y); +} + +static inline void +fnegate_conditional_vartime(uint64_t *f, bool is_negate) +{ + uint64_t zero[4U] = { 0U }; + if (is_negate) { + fsub0(f, zero, f); + } +} + +static inline void +mont_reduction(uint64_t *res, uint64_t *x) +{ + uint64_t n[4U] = { 0U }; + make_prime(n); + uint64_t c0 = (uint64_t)0U; + KRML_MAYBE_FOR4( + i0, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t qj = (uint64_t)1U * x[i0]; + uint64_t *res_j0 = x + i0; + uint64_t c = (uint64_t)0U; + { + uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0); + uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1); + uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2); + uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i); + } uint64_t r = c; + uint64_t c1 = r; + uint64_t *resb = x + (uint32_t)4U + i0; + uint64_t res_j = x[(uint32_t)4U + i0]; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);); + memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof(uint64_t)); + uint64_t c00 = c0; + uint64_t tmp[4U] = { 0U }; + uint64_t c = (uint64_t)0U; + { + uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); + uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); + uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); + uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); + } + uint64_t c1 = c; + uint64_t c2 = c00 - c1; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = res; + uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x1;); +} + +static inline void +fmul0(uint64_t *res, uint64_t *x, uint64_t *y) +{ + uint64_t tmp[8U] = { 0U }; + bn_mul4(tmp, x, y); + mont_reduction(res, tmp); +} + +static inline void +fsqr0(uint64_t *res, uint64_t *x) +{ + uint64_t tmp[8U] = { 0U }; + bn_sqr4(tmp, x); + mont_reduction(res, tmp); +} + +static inline void +from_mont(uint64_t *res, uint64_t *a) +{ + uint64_t tmp[8U] = { 0U }; + memcpy(tmp, a, (uint32_t)4U * sizeof(uint64_t)); + mont_reduction(res, tmp); +} + +static inline void +to_mont(uint64_t *res, uint64_t *a) +{ + uint64_t r2modn[4U] = { 0U }; + make_fmont_R2(r2modn); + fmul0(res, a, r2modn); +} + +static inline void +fmul_by_b_coeff(uint64_t *res, uint64_t *x) +{ + uint64_t b_coeff[4U] = { 0U }; + make_b_coeff(b_coeff); + fmul0(res, b_coeff, x); +} + +static inline void +fcube(uint64_t *res, uint64_t *x) +{ + fsqr0(res, x); + fmul0(res, res, x); +} + +static inline void +finv(uint64_t *res, uint64_t *a) +{ + uint64_t tmp[16U] = { 0U }; + uint64_t *x30 = tmp; + uint64_t *x2 = tmp + (uint32_t)4U; + uint64_t *tmp1 = tmp + (uint32_t)8U; + uint64_t *tmp2 = tmp + (uint32_t)12U; + memcpy(x2, a, (uint32_t)4U * sizeof(uint64_t)); + { + fsqr0(x2, x2); + } + fmul0(x2, x2, a); + memcpy(x30, x2, (uint32_t)4U * sizeof(uint64_t)); + { + fsqr0(x30, x30); + } + fmul0(x30, x30, a); + memcpy(tmp1, x30, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1);); + fmul0(tmp1, tmp1, x30); + memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, fsqr0(tmp2, tmp2);); + fmul0(tmp2, tmp2, tmp1); + memcpy(tmp1, tmp2, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1);); + fmul0(tmp1, tmp1, x30); + memcpy(x30, tmp1, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR15(i, (uint32_t)0U, (uint32_t)15U, (uint32_t)1U, fsqr0(x30, x30);); + fmul0(x30, x30, tmp1); + memcpy(tmp1, x30, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp1, tmp1);); + fmul0(tmp1, tmp1, x2); + memcpy(x2, tmp1, (uint32_t)4U * sizeof(uint64_t)); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { + fsqr0(x2, x2); + } + fmul0(x2, x2, a); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++) { + fsqr0(x2, x2); + } + fmul0(x2, x2, tmp1); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { + fsqr0(x2, x2); + } + fmul0(x2, x2, tmp1); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)30U; i++) { + fsqr0(x2, x2); + } + fmul0(x2, x2, x30); + KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(x2, x2);); + fmul0(tmp1, x2, a); + memcpy(res, tmp1, (uint32_t)4U * sizeof(uint64_t)); +} + +static inline void +fsqrt(uint64_t *res, uint64_t *a) +{ + uint64_t tmp[8U] = { 0U }; + uint64_t *tmp1 = tmp; + uint64_t *tmp2 = tmp + (uint32_t)4U; + memcpy(tmp1, a, (uint32_t)4U * sizeof(uint64_t)); + { + fsqr0(tmp1, tmp1); + } + fmul0(tmp1, tmp1, a); + memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp2, tmp2);); + fmul0(tmp2, tmp2, tmp1); + memcpy(tmp1, tmp2, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, fsqr0(tmp1, tmp1);); + fmul0(tmp1, tmp1, tmp2); + memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, fsqr0(tmp2, tmp2);); + fmul0(tmp2, tmp2, tmp1); + memcpy(tmp1, tmp2, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, fsqr0(tmp1, tmp1);); + fmul0(tmp1, tmp1, tmp2); + memcpy(tmp2, tmp1, (uint32_t)4U * sizeof(uint64_t)); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { + fsqr0(tmp2, tmp2); + } + fmul0(tmp2, tmp2, a); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)96U; i++) { + fsqr0(tmp2, tmp2); + } + fmul0(tmp2, tmp2, a); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)94U; i++) { + fsqr0(tmp2, tmp2); + } + memcpy(res, tmp2, (uint32_t)4U * sizeof(uint64_t)); +} + +static inline void +make_base_point(uint64_t *p) +{ + uint64_t *x = p; + uint64_t *y = p + (uint32_t)4U; + uint64_t *z = p + (uint32_t)8U; + make_g_x(x); + make_g_y(y); + make_fone(z); +} + +static inline void +make_point_at_inf(uint64_t *p) +{ + uint64_t *x = p; + uint64_t *y = p + (uint32_t)4U; + uint64_t *z = p + (uint32_t)8U; + make_fzero(x); + make_fone(y); + make_fzero(z); +} + +static inline bool +is_point_at_inf_vartime(uint64_t *p) +{ + uint64_t *pz = p + (uint32_t)8U; + return bn_is_zero_vartime4(pz); +} + +static inline void +to_aff_point(uint64_t *res, uint64_t *p) +{ + uint64_t zinv[4U] = { 0U }; + uint64_t *px = p; + uint64_t *py = p + (uint32_t)4U; + uint64_t *pz = p + (uint32_t)8U; + uint64_t *x = res; + uint64_t *y = res + (uint32_t)4U; + finv(zinv, pz); + fmul0(x, px, zinv); + fmul0(y, py, zinv); + from_mont(x, x); + from_mont(y, y); +} + +static inline void +to_aff_point_x(uint64_t *res, uint64_t *p) +{ + uint64_t zinv[4U] = { 0U }; + uint64_t *px = p; + uint64_t *pz = p + (uint32_t)8U; + finv(zinv, pz); + fmul0(res, px, zinv); + from_mont(res, res); +} + +static inline void +to_proj_point(uint64_t *res, uint64_t *p) +{ + uint64_t *px = p; + uint64_t *py = p + (uint32_t)4U; + uint64_t *rx = res; + uint64_t *ry = res + (uint32_t)4U; + uint64_t *rz = res + (uint32_t)8U; + to_mont(rx, px); + to_mont(ry, py); + make_fone(rz); +} + +static inline bool +is_on_curve_vartime(uint64_t *p) +{ + uint64_t rp[4U] = { 0U }; + uint64_t tx[4U] = { 0U }; + uint64_t ty[4U] = { 0U }; + uint64_t *px = p; + uint64_t *py = p + (uint32_t)4U; + to_mont(tx, px); + to_mont(ty, py); + uint64_t tmp[4U] = { 0U }; + fcube(rp, tx); + make_a_coeff(tmp); + fmul0(tmp, tmp, tx); + fadd0(rp, tmp, rp); + make_b_coeff(tmp); + fadd0(rp, tmp, rp); + fsqr0(ty, ty); + uint64_t r = feq_mask(ty, rp); + bool r0 = r == (uint64_t)0xFFFFFFFFFFFFFFFFU; + return r0; +} + +static inline void +aff_point_store(uint8_t *res, uint64_t *p) +{ + uint64_t *px = p; + uint64_t *py = p + (uint32_t)4U; + bn2_to_bytes_be4(res, px, py); +} + +static inline void +point_store(uint8_t *res, uint64_t *p) +{ + uint64_t aff_p[8U] = { 0U }; + to_aff_point(aff_p, p); + aff_point_store(res, aff_p); +} + +static inline bool +aff_point_load_vartime(uint64_t *p, uint8_t *b) +{ + uint8_t *p_x = b; + uint8_t *p_y = b + (uint32_t)32U; + uint64_t *bn_p_x = p; + uint64_t *bn_p_y = p + (uint32_t)4U; + bn_from_bytes_be4(bn_p_x, p_x); + bn_from_bytes_be4(bn_p_y, p_y); + uint64_t *px = p; + uint64_t *py = p + (uint32_t)4U; + uint64_t lessX = bn_is_lt_prime_mask4(px); + uint64_t lessY = bn_is_lt_prime_mask4(py); + uint64_t res = lessX & lessY; + bool is_xy_valid = res == (uint64_t)0xFFFFFFFFFFFFFFFFU; + if (!is_xy_valid) { + return false; + } + return is_on_curve_vartime(p); +} + +static inline bool +load_point_vartime(uint64_t *p, uint8_t *b) +{ + uint64_t p_aff[8U] = { 0U }; + bool res = aff_point_load_vartime(p_aff, b); + if (res) { + to_proj_point(p, p_aff); + } + return res; +} + +static inline bool +aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_t *s) +{ + uint8_t s0 = s[0U]; + uint8_t s01 = s0; + if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U)) { + return false; + } + uint8_t *xb = s + (uint32_t)1U; + bn_from_bytes_be4(x, xb); + uint64_t is_x_valid = bn_is_lt_prime_mask4(x); + bool is_x_valid1 = is_x_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU; + bool is_y_odd = s01 == (uint8_t)0x03U; + if (!is_x_valid1) { + return false; + } + uint64_t y2M[4U] = { 0U }; + uint64_t xM[4U] = { 0U }; + uint64_t yM[4U] = { 0U }; + to_mont(xM, x); + uint64_t tmp[4U] = { 0U }; + fcube(y2M, xM); + make_a_coeff(tmp); + fmul0(tmp, tmp, xM); + fadd0(y2M, tmp, y2M); + make_b_coeff(tmp); + fadd0(y2M, tmp, y2M); + fsqrt(yM, y2M); + from_mont(y, yM); + fsqr0(yM, yM); + uint64_t r = feq_mask(yM, y2M); + bool is_y_valid = r == (uint64_t)0xFFFFFFFFFFFFFFFFU; + bool is_y_valid0 = is_y_valid; + if (!is_y_valid0) { + return false; + } + uint64_t is_y_odd1 = y[0U] & (uint64_t)1U; + bool is_y_odd2 = is_y_odd1 == (uint64_t)1U; + fnegate_conditional_vartime(y, is_y_odd2 != is_y_odd); + return true; +} + +static inline void +point_double(uint64_t *res, uint64_t *p) +{ + uint64_t tmp[20U] = { 0U }; + uint64_t *x = p; + uint64_t *z = p + (uint32_t)8U; + uint64_t *x3 = res; + uint64_t *y3 = res + (uint32_t)4U; + uint64_t *z3 = res + (uint32_t)8U; + uint64_t *t0 = tmp; + uint64_t *t1 = tmp + (uint32_t)4U; + uint64_t *t2 = tmp + (uint32_t)8U; + uint64_t *t3 = tmp + (uint32_t)12U; + uint64_t *t4 = tmp + (uint32_t)16U; + uint64_t *x1 = p; + uint64_t *y = p + (uint32_t)4U; + uint64_t *z1 = p + (uint32_t)8U; + fsqr0(t0, x1); + fsqr0(t1, y); + fsqr0(t2, z1); + fmul0(t3, x1, y); + fadd0(t3, t3, t3); + fmul0(t4, y, z1); + fmul0(z3, x, z); + fadd0(z3, z3, z3); + fmul_by_b_coeff(y3, t2); + fsub0(y3, y3, z3); + fadd0(x3, y3, y3); + fadd0(y3, x3, y3); + fsub0(x3, t1, y3); + fadd0(y3, t1, y3); + fmul0(y3, x3, y3); + fmul0(x3, x3, t3); + fadd0(t3, t2, t2); + fadd0(t2, t2, t3); + fmul_by_b_coeff(z3, z3); + fsub0(z3, z3, t2); + fsub0(z3, z3, t0); + fadd0(t3, z3, z3); + fadd0(z3, z3, t3); + fadd0(t3, t0, t0); + fadd0(t0, t3, t0); + fsub0(t0, t0, t2); + fmul0(t0, t0, z3); + fadd0(y3, y3, t0); + fadd0(t0, t4, t4); + fmul0(z3, t0, z3); + fsub0(x3, x3, z3); + fmul0(z3, t0, t1); + fadd0(z3, z3, z3); + fadd0(z3, z3, z3); +} + +static inline void +point_add(uint64_t *res, uint64_t *p, uint64_t *q) +{ + uint64_t tmp[36U] = { 0U }; + uint64_t *t0 = tmp; + uint64_t *t1 = tmp + (uint32_t)24U; + uint64_t *x3 = t1; + uint64_t *y3 = t1 + (uint32_t)4U; + uint64_t *z3 = t1 + (uint32_t)8U; + uint64_t *t01 = t0; + uint64_t *t11 = t0 + (uint32_t)4U; + uint64_t *t2 = t0 + (uint32_t)8U; + uint64_t *t3 = t0 + (uint32_t)12U; + uint64_t *t4 = t0 + (uint32_t)16U; + uint64_t *t5 = t0 + (uint32_t)20U; + uint64_t *x1 = p; + uint64_t *y1 = p + (uint32_t)4U; + uint64_t *z10 = p + (uint32_t)8U; + uint64_t *x20 = q; + uint64_t *y20 = q + (uint32_t)4U; + uint64_t *z20 = q + (uint32_t)8U; + fmul0(t01, x1, x20); + fmul0(t11, y1, y20); + fmul0(t2, z10, z20); + fadd0(t3, x1, y1); + fadd0(t4, x20, y20); + fmul0(t3, t3, t4); + fadd0(t4, t01, t11); + uint64_t *y10 = p + (uint32_t)4U; + uint64_t *z11 = p + (uint32_t)8U; + uint64_t *y2 = q + (uint32_t)4U; + uint64_t *z21 = q + (uint32_t)8U; + fsub0(t3, t3, t4); + fadd0(t4, y10, z11); + fadd0(t5, y2, z21); + fmul0(t4, t4, t5); + fadd0(t5, t11, t2); + fsub0(t4, t4, t5); + uint64_t *x10 = p; + uint64_t *z1 = p + (uint32_t)8U; + uint64_t *x2 = q; + uint64_t *z2 = q + (uint32_t)8U; + fadd0(x3, x10, z1); + fadd0(y3, x2, z2); + fmul0(x3, x3, y3); + fadd0(y3, t01, t2); + fsub0(y3, x3, y3); + fmul_by_b_coeff(z3, t2); + fsub0(x3, y3, z3); + fadd0(z3, x3, x3); + fadd0(x3, x3, z3); + fsub0(z3, t11, x3); + fadd0(x3, t11, x3); + fmul_by_b_coeff(y3, y3); + fadd0(t11, t2, t2); + fadd0(t2, t11, t2); + fsub0(y3, y3, t2); + fsub0(y3, y3, t01); + fadd0(t11, y3, y3); + fadd0(y3, t11, y3); + fadd0(t11, t01, t01); + fadd0(t01, t11, t01); + fsub0(t01, t01, t2); + fmul0(t11, t4, y3); + fmul0(t2, t01, y3); + fmul0(y3, x3, z3); + fadd0(y3, y3, t2); + fmul0(x3, t3, x3); + fsub0(x3, x3, t11); + fmul0(z3, t4, z3); + fmul0(t11, t3, t01); + fadd0(z3, z3, t11); + memcpy(res, t1, (uint32_t)12U * sizeof(uint64_t)); +} + +static inline void +point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p) +{ + uint64_t table[192U] = { 0U }; + uint64_t tmp[12U] = { 0U }; + uint64_t *t0 = table; + uint64_t *t1 = table + (uint32_t)12U; + make_point_at_inf(t0); + memcpy(t1, p, (uint32_t)12U * sizeof(uint64_t)); + KRML_MAYBE_FOR7(i, + (uint32_t)0U, + (uint32_t)7U, + (uint32_t)1U, + uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)12U; + point_double(tmp, t11); + memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U, + tmp, + (uint32_t)12U * sizeof(uint64_t)); + uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U; + point_add(tmp, p, t2); + memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U, + tmp, + (uint32_t)12U * sizeof(uint64_t));); + make_point_at_inf(res); + uint64_t tmp0[12U] = { 0U }; + for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) { + KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res);); + uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U; + uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U); + memcpy(tmp0, (uint64_t *)table, (uint32_t)12U * sizeof(uint64_t)); + KRML_MAYBE_FOR15(i1, + (uint32_t)0U, + (uint32_t)15U, + (uint32_t)1U, + uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U)); + const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)12U; + KRML_MAYBE_FOR12(i, + (uint32_t)0U, + (uint32_t)12U, + (uint32_t)1U, + uint64_t *os = tmp0; + uint64_t x = (c & res_j[i]) | (~c & tmp0[i]); + os[i] = x;);); + point_add(res, res, tmp0); + } +} + +static inline void +precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp) +{ + memcpy(tmp, (uint64_t *)table, (uint32_t)12U * sizeof(uint64_t)); + KRML_MAYBE_FOR15(i0, + (uint32_t)0U, + (uint32_t)15U, + (uint32_t)1U, + uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U)); + const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)12U; + KRML_MAYBE_FOR12(i, + (uint32_t)0U, + (uint32_t)12U, + (uint32_t)1U, + uint64_t *os = tmp; + uint64_t x = (c & res_j[i]) | (~c & tmp[i]); + os[i] = x;);); +} + +static inline void +point_mul_g(uint64_t *res, uint64_t *scalar) +{ + uint64_t q1[12U] = { 0U }; + make_base_point(q1); + uint64_t + q2[12U] = { + (uint64_t)1499621593102562565U, (uint64_t)16692369783039433128U, + (uint64_t)15337520135922861848U, (uint64_t)5455737214495366228U, + (uint64_t)17827017231032529600U, (uint64_t)12413621606240782649U, + (uint64_t)2290483008028286132U, (uint64_t)15752017553340844820U, + (uint64_t)4846430910634234874U, (uint64_t)10861682798464583253U, + (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U + }; + uint64_t + q3[12U] = { + (uint64_t)14619254753077084366U, (uint64_t)13913835116514008593U, + (uint64_t)15060744674088488145U, (uint64_t)17668414598203068685U, + (uint64_t)10761169236902342334U, (uint64_t)15467027479157446221U, + (uint64_t)14989185522423469618U, (uint64_t)14354539272510107003U, + (uint64_t)14298211796392133693U, (uint64_t)13270323784253711450U, + (uint64_t)13380964971965046957U, (uint64_t)8686204248456909699U + }; + uint64_t + q4[12U] = { + (uint64_t)7870395003430845958U, (uint64_t)18001862936410067720U, + (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U, + (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U, + (uint64_t)7139806720777708306U, (uint64_t)8253938546650739833U, + (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U, + (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U + }; + uint64_t *r1 = scalar; + uint64_t *r2 = scalar + (uint32_t)1U; + uint64_t *r3 = scalar + (uint32_t)2U; + uint64_t *r4 = scalar + (uint32_t)3U; + make_point_at_inf(res); + uint64_t tmp[12U] = { 0U }; + KRML_MAYBE_FOR16(i, + (uint32_t)0U, + (uint32_t)16U, + (uint32_t)1U, + KRML_MAYBE_FOR4(i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res);); + uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; + uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U); + precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp); + point_add(res, res, tmp); + uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; + uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U); + precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp); + point_add(res, res, tmp); + uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; + uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U); + precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp); + point_add(res, res, tmp); + uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U; + uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U); + precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp); + point_add(res, res, tmp);); + KRML_HOST_IGNORE(q1); + KRML_HOST_IGNORE(q2); + KRML_HOST_IGNORE(q3); + KRML_HOST_IGNORE(q4); +} + +static inline void +point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t *q2) +{ + uint64_t q1[12U] = { 0U }; + make_base_point(q1); + uint64_t table2[384U] = { 0U }; + uint64_t tmp[12U] = { 0U }; + uint64_t *t0 = table2; + uint64_t *t1 = table2 + (uint32_t)12U; + make_point_at_inf(t0); + memcpy(t1, q2, (uint32_t)12U * sizeof(uint64_t)); + KRML_MAYBE_FOR15(i, + (uint32_t)0U, + (uint32_t)15U, + (uint32_t)1U, + uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)12U; + point_double(tmp, t11); + memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U, + tmp, + (uint32_t)12U * sizeof(uint64_t)); + uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U; + point_add(tmp, q2, t2); + memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U, + tmp, + (uint32_t)12U * sizeof(uint64_t));); + uint64_t tmp0[12U] = { 0U }; + uint32_t i0 = (uint32_t)255U; + uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U); + uint32_t bits_l32 = (uint32_t)bits_c; + const uint64_t + *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)12U; + memcpy(res, (uint64_t *)a_bits_l, (uint32_t)12U * sizeof(uint64_t)); + uint32_t i1 = (uint32_t)255U; + uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U); + uint32_t bits_l320 = (uint32_t)bits_c0; + const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)12U; + memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)12U * sizeof(uint64_t)); + point_add(res, res, tmp0); + uint64_t tmp1[12U] = { 0U }; + for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++) { + KRML_MAYBE_FOR5(i2, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, point_double(res, res);); + uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U; + uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U); + uint32_t bits_l321 = (uint32_t)bits_l; + const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)12U; + memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)12U * sizeof(uint64_t)); + point_add(res, res, tmp1); + uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U; + uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U); + uint32_t bits_l322 = (uint32_t)bits_l0; + const uint64_t + *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)12U; + memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)12U * sizeof(uint64_t)); + point_add(res, res, tmp1); + } +} + +static inline uint64_t +bn_is_lt_order_mask4(uint64_t *f) +{ + uint64_t tmp[4U] = { 0U }; + make_order(tmp); + uint64_t c = bn_sub4(tmp, f, tmp); + return (uint64_t)0U - c; +} + +static inline uint64_t +bn_is_lt_order_and_gt_zero_mask4(uint64_t *f) +{ + uint64_t is_lt_order = bn_is_lt_order_mask4(f); + uint64_t is_eq_zero = bn_is_zero_mask4(f); + return is_lt_order & ~is_eq_zero; +} + +static inline void +qmod_short(uint64_t *res, uint64_t *x) +{ + uint64_t tmp[4U] = { 0U }; + make_order(tmp); + uint64_t c = bn_sub4(tmp, x, tmp); + bn_cmovznz4(res, c, tmp, x); +} + +static inline void +qadd(uint64_t *res, uint64_t *x, uint64_t *y) +{ + uint64_t n[4U] = { 0U }; + make_order(n); + bn_add_mod4(res, n, x, y); +} + +static inline void +qmont_reduction(uint64_t *res, uint64_t *x) +{ + uint64_t n[4U] = { 0U }; + make_order(n); + uint64_t c0 = (uint64_t)0U; + KRML_MAYBE_FOR4( + i0, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t qj = (uint64_t)0xccd1c8aaee00bc4fU * x[i0]; + uint64_t *res_j0 = x + i0; + uint64_t c = (uint64_t)0U; + { + uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0); + uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1); + uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2); + uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i); + } uint64_t r = c; + uint64_t c1 = r; + uint64_t *resb = x + (uint32_t)4U + i0; + uint64_t res_j = x[(uint32_t)4U + i0]; + c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);); + memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof(uint64_t)); + uint64_t c00 = c0; + uint64_t tmp[4U] = { 0U }; + uint64_t c = (uint64_t)0U; + { + uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; + uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; + uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); + uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; + uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); + uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; + uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); + uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; + uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; + c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); + } + uint64_t c1 = c; + uint64_t c2 = c00 - c1; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = res; + uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]); + os[i] = x1;); +} + +static inline void +from_qmont(uint64_t *res, uint64_t *x) +{ + uint64_t tmp[8U] = { 0U }; + memcpy(tmp, x, (uint32_t)4U * sizeof(uint64_t)); + qmont_reduction(res, tmp); +} + +static inline void +qmul(uint64_t *res, uint64_t *x, uint64_t *y) +{ + uint64_t tmp[8U] = { 0U }; + bn_mul4(tmp, x, y); + qmont_reduction(res, tmp); +} + +static inline void +qsqr(uint64_t *res, uint64_t *x) +{ + uint64_t tmp[8U] = { 0U }; + bn_sqr4(tmp, x); + qmont_reduction(res, tmp); +} + +bool +Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key) +{ + uint64_t tmp[16U] = { 0U }; + uint64_t *sk = tmp; + uint64_t *pk = tmp + (uint32_t)4U; + bn_from_bytes_be4(sk, private_key); + uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk); + uint64_t oneq[4U] = { 0U }; + oneq[0U] = (uint64_t)1U; + oneq[1U] = (uint64_t)0U; + oneq[2U] = (uint64_t)0U; + oneq[3U] = (uint64_t)0U; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = sk; + uint64_t uu____0 = oneq[i]; + uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0)); + os[i] = x;); + uint64_t is_sk_valid = is_b_valid; + point_mul_g(pk, sk); + point_store(public_key, pk); + return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU; +} + +bool +Hacl_Impl_P256_DH_ecp256dh_r( + uint8_t *shared_secret, + uint8_t *their_pubkey, + uint8_t *private_key) +{ + uint64_t tmp[16U] = { 0U }; + uint64_t *sk = tmp; + uint64_t *pk = tmp + (uint32_t)4U; + bool is_pk_valid = load_point_vartime(pk, their_pubkey); + bn_from_bytes_be4(sk, private_key); + uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk); + uint64_t oneq[4U] = { 0U }; + oneq[0U] = (uint64_t)1U; + oneq[1U] = (uint64_t)0U; + oneq[2U] = (uint64_t)0U; + oneq[3U] = (uint64_t)0U; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = sk; + uint64_t uu____0 = oneq[i]; + uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0)); + os[i] = x;); + uint64_t is_sk_valid = is_b_valid; + uint64_t ss_proj[12U] = { 0U }; + if (is_pk_valid) { + point_mul(ss_proj, sk, pk); + point_store(shared_secret, ss_proj); + } + return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid; +} + +static inline void +qinv(uint64_t *res, uint64_t *r) +{ + uint64_t tmp[28U] = { 0U }; + uint64_t *x6 = tmp; + uint64_t *x_11 = tmp + (uint32_t)4U; + uint64_t *x_101 = tmp + (uint32_t)8U; + uint64_t *x_111 = tmp + (uint32_t)12U; + uint64_t *x_1111 = tmp + (uint32_t)16U; + uint64_t *x_10101 = tmp + (uint32_t)20U; + uint64_t *x_101111 = tmp + (uint32_t)24U; + memcpy(x6, r, (uint32_t)4U * sizeof(uint64_t)); + { + qsqr(x6, x6); + } + qmul(x_11, x6, r); + qmul(x_101, x6, x_11); + qmul(x_111, x6, x_101); + memcpy(x6, x_101, (uint32_t)4U * sizeof(uint64_t)); + { + qsqr(x6, x6); + } + qmul(x_1111, x_101, x6); + { + qsqr(x6, x6); + } + qmul(x_10101, x6, r); + memcpy(x6, x_10101, (uint32_t)4U * sizeof(uint64_t)); + { + qsqr(x6, x6); + } + qmul(x_101111, x_101, x6); + qmul(x6, x_10101, x6); + uint64_t tmp1[4U] = { 0U }; + KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(x6, x6);); + qmul(x6, x6, x_11); + memcpy(tmp1, x6, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x6); + memcpy(x6, tmp1, (uint32_t)4U * sizeof(uint64_t)); + KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, qsqr(x6, x6);); + qmul(x6, x6, tmp1); + memcpy(tmp1, x6, (uint32_t)4U * sizeof(uint64_t)); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++) { + qsqr(tmp1, tmp1); + } + qmul(tmp1, tmp1, x6); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) { + qsqr(tmp1, tmp1); + } + qmul(tmp1, tmp1, x6); + KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101111); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_111); + KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_11); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_1111); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_10101); + KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101); + KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101); + KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_111); + KRML_MAYBE_FOR9(i, (uint32_t)0U, (uint32_t)9U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101111); + KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_1111); + KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, r); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, r); + KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_1111); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_111); + KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_111); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_111); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101); + KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_11); + KRML_MAYBE_FOR10(i, (uint32_t)0U, (uint32_t)10U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_101111); + KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_11); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_11); + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_11); + KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, r); + KRML_MAYBE_FOR7(i, (uint32_t)0U, (uint32_t)7U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_10101); + KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1);); + qmul(tmp1, tmp1, x_1111); + memcpy(x6, tmp1, (uint32_t)4U * sizeof(uint64_t)); + memcpy(res, x6, (uint32_t)4U * sizeof(uint64_t)); +} + +static inline void +qmul_mont(uint64_t *sinv, uint64_t *b, uint64_t *res) +{ + uint64_t tmp[4U] = { 0U }; + from_qmont(tmp, b); + qmul(res, sinv, tmp); +} + +static inline bool +ecdsa_verify_msg_as_qelem( + uint64_t *m_q, + uint8_t *public_key, + uint8_t *signature_r, + uint8_t *signature_s) +{ + uint64_t tmp[28U] = { 0U }; + uint64_t *pk = tmp; + uint64_t *r_q = tmp + (uint32_t)12U; + uint64_t *s_q = tmp + (uint32_t)16U; + uint64_t *u1 = tmp + (uint32_t)20U; + uint64_t *u2 = tmp + (uint32_t)24U; + bool is_pk_valid = load_point_vartime(pk, public_key); + bn_from_bytes_be4(r_q, signature_r); + bn_from_bytes_be4(s_q, signature_s); + uint64_t is_r_valid = bn_is_lt_order_and_gt_zero_mask4(r_q); + uint64_t is_s_valid = bn_is_lt_order_and_gt_zero_mask4(s_q); + bool + is_rs_valid = + is_r_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_s_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU; + if (!(is_pk_valid && is_rs_valid)) { + return false; + } + uint64_t sinv[4U] = { 0U }; + qinv(sinv, s_q); + qmul_mont(sinv, m_q, u1); + qmul_mont(sinv, r_q, u2); + uint64_t res[12U] = { 0U }; + point_mul_double_g(res, u1, u2, pk); + if (is_point_at_inf_vartime(res)) { + return false; + } + uint64_t x[4U] = { 0U }; + to_aff_point_x(x, res); + qmod_short(x, x); + bool res1 = bn_is_eq_vartime4(x, r_q); + return res1; +} + +static inline bool +ecdsa_sign_msg_as_qelem( + uint8_t *signature, + uint64_t *m_q, + uint8_t *private_key, + uint8_t *nonce) +{ + uint64_t rsdk_q[16U] = { 0U }; + uint64_t *r_q = rsdk_q; + uint64_t *s_q = rsdk_q + (uint32_t)4U; + uint64_t *d_a = rsdk_q + (uint32_t)8U; + uint64_t *k_q = rsdk_q + (uint32_t)12U; + bn_from_bytes_be4(d_a, private_key); + uint64_t is_b_valid0 = bn_is_lt_order_and_gt_zero_mask4(d_a); + uint64_t oneq0[4U] = { 0U }; + oneq0[0U] = (uint64_t)1U; + oneq0[1U] = (uint64_t)0U; + oneq0[2U] = (uint64_t)0U; + oneq0[3U] = (uint64_t)0U; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = d_a; + uint64_t uu____0 = oneq0[i]; + uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0)); + os[i] = x;); + uint64_t is_sk_valid = is_b_valid0; + bn_from_bytes_be4(k_q, nonce); + uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(k_q); + uint64_t oneq[4U] = { 0U }; + oneq[0U] = (uint64_t)1U; + oneq[1U] = (uint64_t)0U; + oneq[2U] = (uint64_t)0U; + oneq[3U] = (uint64_t)0U; + KRML_MAYBE_FOR4(i, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + uint64_t *os = k_q; + uint64_t uu____1 = oneq[i]; + uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1)); + os[i] = x;); + uint64_t is_nonce_valid = is_b_valid; + uint64_t are_sk_nonce_valid = is_sk_valid & is_nonce_valid; + uint64_t p[12U] = { 0U }; + point_mul_g(p, k_q); + to_aff_point_x(r_q, p); + qmod_short(r_q, r_q); + uint64_t kinv[4U] = { 0U }; + qinv(kinv, k_q); + qmul(s_q, r_q, d_a); + from_qmont(m_q, m_q); + qadd(s_q, m_q, s_q); + qmul(s_q, kinv, s_q); + bn2_to_bytes_be4(signature, r_q, s_q); + uint64_t is_r_zero = bn_is_zero_mask4(r_q); + uint64_t is_s_zero = bn_is_zero_mask4(s_q); + uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero); + bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU; + return res; +} + +/******************************************************************************* + + Verified C library for ECDSA and ECDH functions over the P-256 NIST curve. + + This module implements signing and verification, key validation, conversions + between various point representations, and ECDH key agreement. + +*******************************************************************************/ + +/*****************/ +/* ECDSA signing */ +/*****************/ + +/** +Create an ECDSA signature WITHOUT hashing first. + + This function is intended to receive a hash of the input. + For convenience, we recommend using one of the hash-and-sign combined functions above. + + The argument `msg` MUST be at least 32 bytes (i.e. `msg_len >= 32`). + + NOTE: The equivalent functions in OpenSSL and Fiat-Crypto both accept inputs + smaller than 32 bytes. These libraries left-pad the input with enough zeroes to + reach the minimum 32 byte size. Clients who need behavior identical to OpenSSL + need to perform the left-padding themselves. + + The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. + + The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. + + The function also checks whether `private_key` and `nonce` are valid values: + • 0 < `private_key` < the order of the curve + • 0 < `nonce` < the order of the curve +*/ +bool +Hacl_P256_ecdsa_sign_p256_without_hash( + uint8_t *signature, + uint32_t msg_len, + uint8_t *msg, + uint8_t *private_key, + uint8_t *nonce) +{ + uint64_t m_q[4U] = { 0U }; + uint8_t mHash[32U] = { 0U }; + memcpy(mHash, msg, (uint32_t)32U * sizeof(uint8_t)); + KRML_HOST_IGNORE(msg_len); + uint8_t *mHash32 = mHash; + bn_from_bytes_be4(m_q, mHash32); + qmod_short(m_q, m_q); + bool res = ecdsa_sign_msg_as_qelem(signature, m_q, private_key, nonce); + return res; +} + +/**********************/ +/* ECDSA verification */ +/**********************/ + +/** +Verify an ECDSA signature WITHOUT hashing first. + + This function is intended to receive a hash of the input. + For convenience, we recommend using one of the hash-and-verify combined functions above. + + The argument `msg` MUST be at least 32 bytes (i.e. `msg_len >= 32`). + + The function returns `true` if the signature is valid and `false` otherwise. + + The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + The argument `public_key` (x || y) points to 64 bytes of valid memory, i.e., uint8_t[64]. + The arguments `signature_r` and `signature_s` point to 32 bytes of valid memory, i.e., uint8_t[32]. + + The function also checks whether `public_key` is valid +*/ +bool +Hacl_P256_ecdsa_verif_without_hash( + uint32_t msg_len, + uint8_t *msg, + uint8_t *public_key, + uint8_t *signature_r, + uint8_t *signature_s) +{ + uint64_t m_q[4U] = { 0U }; + uint8_t mHash[32U] = { 0U }; + memcpy(mHash, msg, (uint32_t)32U * sizeof(uint8_t)); + KRML_HOST_IGNORE(msg_len); + uint8_t *mHash32 = mHash; + bn_from_bytes_be4(m_q, mHash32); + qmod_short(m_q, m_q); + bool res = ecdsa_verify_msg_as_qelem(m_q, public_key, signature_r, signature_s); + return res; +} + +/******************/ +/* Key validation */ +/******************/ + +/** +Public key validation. + + The function returns `true` if a public key is valid and `false` otherwise. + + The argument `public_key` points to 64 bytes of valid memory, i.e., uint8_t[64]. + + The public key (x || y) is valid (with respect to SP 800-56A): + • the public key is not the “point at infinity”, represented as O. + • the affine x and y coordinates of the point represented by the public key are + in the range [0, p – 1] where p is the prime defining the finite field. + • y^2 = x^3 + ax + b where a and b are the coefficients of the curve equation. + The last extract is taken from: https://neilmadden.blog/2017/05/17/so-how-do-you-validate-nist-ecdh-public-keys/ +*/ +bool +Hacl_P256_validate_public_key(uint8_t *public_key) +{ + uint64_t point_jac[12U] = { 0U }; + bool res = load_point_vartime(point_jac, public_key); + return res; +} + +/** +Private key validation. + + The function returns `true` if a private key is valid and `false` otherwise. + + The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + + The private key is valid: + • 0 < `private_key` < the order of the curve +*/ +bool +Hacl_P256_validate_private_key(uint8_t *private_key) +{ + uint64_t bn_sk[4U] = { 0U }; + bn_from_bytes_be4(bn_sk, private_key); + uint64_t res = bn_is_lt_order_and_gt_zero_mask4(bn_sk); + return res == (uint64_t)0xFFFFFFFFFFFFFFFFU; +} + +/******************************************************************************* + Parsing and Serializing public keys. + + A public key is a point (x, y) on the P-256 NIST curve. + + The point can be represented in the following three ways. + • raw = [ x || y ], 64 bytes + • uncompressed = [ 0x04 || x || y ], 65 bytes + • compressed = [ (0x02 for even `y` and 0x03 for odd `y`) || x ], 33 bytes + +*******************************************************************************/ + +/** +Convert a public key from uncompressed to its raw form. + + The function returns `true` for successful conversion of a public key and `false` otherwise. + + The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. + + The function DOESN'T check whether (x, y) is a valid point. +*/ +bool +Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw) +{ + uint8_t pk0 = pk[0U]; + if (pk0 != (uint8_t)0x04U) { + return false; + } + memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof(uint8_t)); + return true; +} + +/** +Convert a public key from compressed to its raw form. + + The function returns `true` for successful conversion of a public key and `false` otherwise. + + The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. + + The function also checks whether (x, y) is a valid point. +*/ +bool +Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw) +{ + uint64_t xa[4U] = { 0U }; + uint64_t ya[4U] = { 0U }; + uint8_t *pk_xb = pk + (uint32_t)1U; + bool b = aff_point_decompress_vartime(xa, ya, pk); + if (b) { + memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof(uint8_t)); + bn_to_bytes_be4(pk_raw + (uint32_t)32U, ya); + } + return b; +} + +/** +Convert a public key from raw to its uncompressed form. + + The outparam `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. + The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + + The function DOESN'T check whether (x, y) is a valid point. +*/ +void +Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk) +{ + pk[0U] = (uint8_t)0x04U; + memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof(uint8_t)); +} + +/** +Convert a public key from raw to its compressed form. + + The outparam `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. + The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. + + The function DOESN'T check whether (x, y) is a valid point. +*/ +void +Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk) +{ + uint8_t *pk_x = pk_raw; + uint8_t *pk_y = pk_raw + (uint32_t)32U; + uint64_t bn_f[4U] = { 0U }; + bn_from_bytes_be4(bn_f, pk_y); + uint64_t is_odd_f = bn_f[0U] & (uint64_t)1U; + pk[0U] = (uint8_t)is_odd_f + (uint8_t)0x02U; + memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof(uint8_t)); +} + +/******************/ +/* ECDH agreement */ +/******************/ + +/** +Compute the public key from the private key. + + The function returns `true` if a private key is valid and `false` otherwise. + + The outparam `public_key` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + + The private key is valid: + • 0 < `private_key` < the order of the curve. +*/ +bool +Hacl_P256_dh_initiator(uint8_t *public_key, uint8_t *private_key) +{ + return Hacl_Impl_P256_DH_ecp256dh_i(public_key, private_key); +} + +/** +Execute the diffie-hellmann key exchange. + + The function returns `true` for successful creation of an ECDH shared secret and + `false` otherwise. + + The outparam `shared_secret` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `their_pubkey` points to 64 bytes of valid memory, i.e., uint8_t[64]. + The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + + The function also checks whether `private_key` and `their_pubkey` are valid. +*/ +bool +Hacl_P256_dh_responder(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t *private_key) +{ + return Hacl_Impl_P256_DH_ecp256dh_r(shared_secret, their_pubkey, private_key); +} diff --git a/crypto/rsa-hacl-generated.c b/crypto/rsa-hacl-generated.c new file mode 100644 index 0000000000000..551868b5dbcd5 --- /dev/null +++ b/crypto/rsa-hacl-generated.c @@ -0,0 +1,395 @@ +/* GPLv2 or MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + */ + +#include "hacl_rsa.h" + +static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b) +{ + uint32_t bLen = (bs - 1U) / 64U + 1U; + if (bs == 64U * bLen) + { + return 0xFFFFFFFFFFFFFFFFULL; + } + KRML_CHECK_SIZE(sizeof (uint64_t), bLen); + uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t)); + memset(b2, 0U, bLen * sizeof (uint64_t)); + uint32_t i0 = bs / 64U; + uint32_t j = bs % 64U; + b2[i0] = b2[i0] | 1ULL << j; + uint64_t acc = 0ULL; + for (uint32_t i = 0U; i < bLen; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t res = acc; + return res; +} + +uint64_t Hacl_Impl_RSA_Keys_check_modulus_u64(uint32_t modBits, uint64_t *n) +{ + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint64_t bits0 = n[0U] & 1ULL; + uint64_t m0 = 0ULL - bits0; + KRML_CHECK_SIZE(sizeof (uint64_t), nLen); + uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t)); + memset(b2, 0U, nLen * sizeof (uint64_t)); + uint32_t i0 = (modBits - 1U) / 64U; + uint32_t j = (modBits - 1U) % 64U; + b2[i0] = b2[i0] | 1ULL << j; + uint64_t acc = 0ULL; + for (uint32_t i = 0U; i < nLen; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t res = acc; + uint64_t m1 = res; + uint64_t m2 = check_num_bits_u64(modBits, n); + return m0 & (m1 & m2); +} + +uint64_t Hacl_Impl_RSA_Keys_check_exponent_u64(uint32_t eBits, uint64_t *e) +{ + uint32_t eLen = (eBits - 1U) / 64U + 1U; + KRML_CHECK_SIZE(sizeof (uint64_t), eLen); + uint64_t *bn_zero = (uint64_t *)alloca(eLen * sizeof (uint64_t)); + memset(bn_zero, 0U, eLen * sizeof (uint64_t)); + uint64_t mask = 0xFFFFFFFFFFFFFFFFULL; + for (uint32_t i = 0U; i < eLen; i++) + { + uint64_t uu____0 = FStar_UInt64_eq_mask(e[i], bn_zero[i]); + mask = uu____0 & mask; + } + uint64_t mask1 = mask; + uint64_t res = mask1; + uint64_t m0 = res; + uint64_t m1 = check_num_bits_u64(eBits, e); + return ~m0 & m1; +} + +/** +Decrypt a message `cipher` and write the plaintext to `plain`. + +@param modBits Count of bits in the modulus (`n`). +@param eBits Count of bits in `e` value. +@param dBits Count of bits in `d` value. +@param skey Pointer to secret key created by `Hacl_RSA_new_rsa_load_skey`. +@param cipher Pointer to `ceil(modBits - 1 / 8)` bytes where the ciphertext is read from. +@param plain Pointer to `ceil(modBits / 8)` bytes where the plaintext is written to. + +@return Returns true if and only if decryption was successful. +*/ +bool +Hacl_RSA_rsa_dec( + uint32_t modBits, + uint32_t eBits, + uint32_t dBits, + uint64_t *skey, + uint8_t *cipher, + uint8_t *plain +) +{ + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t emBits = modBits - 1U; + uint32_t emLen = (emBits - 1U) / 8U + 1U; + uint32_t k = (modBits - 1U) / 8U + 1U; + KRML_CHECK_SIZE(sizeof (uint64_t), nLen); + uint64_t *m = (uint64_t *)alloca(nLen * sizeof (uint64_t)); + memset(m, 0U, nLen * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), nLen); + uint64_t *s = (uint64_t *)alloca(nLen * sizeof (uint64_t)); + memset(s, 0U, nLen * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), nLen); + uint64_t *m_ = (uint64_t *)alloca(nLen * sizeof (uint64_t)); + memset(m_, 0U, nLen * sizeof (uint64_t)); + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(emLen, cipher, m); + uint32_t nLen1 = (modBits - 1U) / 64U + 1U; + uint32_t eLen = (eBits - 1U) / 64U + 1U; + uint64_t *n = skey; + uint64_t *r2 = skey + nLen1; + uint64_t *e = skey + nLen1 + nLen1; + uint64_t *d = skey + nLen1 + nLen1 + eLen; + uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - 1U) / 64U + 1U, + n, + mu, + r2, + m, + dBits, + d, + s); + uint64_t mu0 = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U, + n, + mu0, + r2, + s, + eBits, + e, + m_); + uint64_t mask = 0xFFFFFFFFFFFFFFFFULL; + for (uint32_t i = 0U; i < nLen1; i++) + { + uint64_t uu____0 = FStar_UInt64_eq_mask(m[i], m_[i]); + mask = uu____0 & mask; + } + uint64_t mask1 = mask; + uint64_t eq_m = mask1; + for (uint32_t i = 0U; i < nLen1; i++) + { + uint64_t *os = s; + uint64_t x = s[i]; + uint64_t x0 = eq_m & x; + os[i] = x0; + } + bool eq_b = eq_m == 0xFFFFFFFFFFFFFFFFULL; + Hacl_Bignum_Convert_bn_to_bytes_be_uint64(k, s, plain); + return eq_b; +} + +/** +Encrypt a message `plain` and write the ciphertext to `cipher`. + +@param modBits Count of bits in the modulus (`n`). +@param eBits Count of bits in `e` value. +@param dBits Count of bits in `d` value. +@param skey Pointer to secret key created by `Hacl_RSA_new_rsa_load_skey`. +@param plain Pointer to `ceil(modBits / 8)` bytes where the plaintext is written to. +@param cipher Pointer to `ceil(modBits - 1 / 8)` bytes where the ciphertext is read from. + +@return Returns true if and only if decryption was successful. +*/ +bool +Hacl_RSA_rsa_enc( + uint32_t modBits, + uint32_t eBits, + uint64_t *pkey, + uint8_t *plain, + uint8_t *cipher +) +{ + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t k = (modBits - 1U) / 8U + 1U; + uint32_t emBits = modBits - 1U; + uint32_t emLen = (emBits - 1U) / 8U + 1U; + KRML_CHECK_SIZE(sizeof (uint64_t), nLen); + uint64_t *s = (uint64_t *)alloca(nLen * sizeof (uint64_t)); + memset(s, 0U, nLen * sizeof (uint64_t)); + KRML_CHECK_SIZE(sizeof (uint64_t), (modBits - 1U) / 64U + 1U); + uint64_t *m = (uint64_t *)alloca(((modBits - 1U) / 64U + 1U) * sizeof (uint64_t)); + memset(m, 0U, ((modBits - 1U) / 64U + 1U) * sizeof (uint64_t)); + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(k, plain, s); + uint32_t nLen1 = (modBits - 1U) / 64U + 1U; + uint64_t *n = pkey; + uint64_t *r2 = pkey + nLen1; + uint64_t *e = pkey + nLen1 + nLen1; + uint64_t acc = 0ULL; + for (uint32_t i = 0U; i < nLen1; i++) + { + uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]); + uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]); + acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))); + } + uint64_t mask = acc; + bool res; + if (mask == 0xFFFFFFFFFFFFFFFFULL) + { + uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); + Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U, + n, + mu, + r2, + s, + eBits, + e, + m); + bool ite; + if (!((modBits - 1U) % 8U == 0U)) + { + ite = true; + } + else + { + uint32_t i = (modBits - 1U) / 64U; + uint32_t j = (modBits - 1U) % 64U; + uint64_t tmp = m[i]; + uint64_t get_bit = tmp >> j & 1ULL; + ite = get_bit == 0ULL; + } + if (ite) + { + res = true; + } + else + { + res = false; + } + } + else + { + res = false; + } + bool b = res; + uint64_t *m1 = m; + Hacl_Bignum_Convert_bn_to_bytes_be_uint64(emLen, m1, cipher); + return b; +} + +/** +Load a public key from key parts. + +@param modBits Count of bits in modulus (`n`). +@param eBits Count of bits in `e` value. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. + +@return Returns an allocated public key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. +*/ +uint64_t +*Hacl_RSA_new_rsa_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb) +{ + bool ite; + if (1U < modBits && 0U < eBits) + { + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t eLen = (eBits - 1U) / 64U + 1U; + ite = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen; + } + else + { + ite = false; + } + if (!ite) + { + return NULL; + } + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t eLen = (eBits - 1U) / 64U + 1U; + uint32_t pkeyLen = nLen + nLen + eLen; + KRML_CHECK_SIZE(sizeof (uint64_t), pkeyLen); + uint64_t *pkey = (uint64_t *)KRML_HOST_CALLOC(pkeyLen, sizeof (uint64_t)); + if (pkey == NULL) + { + return pkey; + } + uint64_t *pkey1 = pkey; + uint64_t *pkey2 = pkey1; + uint32_t nbLen = (modBits - 1U) / 8U + 1U; + uint32_t ebLen = (eBits - 1U) / 8U + 1U; + uint32_t nLen1 = (modBits - 1U) / 64U + 1U; + uint64_t *n = pkey2; + uint64_t *r2 = pkey2 + nLen1; + uint64_t *e = pkey2 + nLen1 + nLen1; + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n); + Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2); + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e); + uint64_t m0 = Hacl_Impl_RSA_Keys_check_modulus_u64(modBits, n); + uint64_t m1 = Hacl_Impl_RSA_Keys_check_exponent_u64(eBits, e); + uint64_t m = m0 & m1; + bool b = m == 0xFFFFFFFFFFFFFFFFULL; + if (b) + { + return pkey2; + } + KRML_HOST_FREE(pkey2); + return NULL; +} + +/** +Load a secret key from key parts. + +@param modBits Count of bits in modulus (`n`). +@param eBits Count of bits in `e` value. +@param dBits Count of bits in `d` value. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. + +@return Returns an allocated secret key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. +*/ +uint64_t +*Hacl_RSA_new_rsa_load_skey( + uint32_t modBits, + uint32_t eBits, + uint32_t dBits, + uint8_t *nb, + uint8_t *eb, + uint8_t *db +) +{ + bool ite0; + if (1U < modBits && 0U < eBits) + { + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t eLen = (eBits - 1U) / 64U + 1U; + ite0 = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen; + } + else + { + ite0 = false; + } + bool ite; + if (ite0 && 0U < dBits) + { + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t eLen = (eBits - 1U) / 64U + 1U; + uint32_t dLen = (dBits - 1U) / 64U + 1U; + ite = dLen <= 67108863U && 2U * nLen <= 0xffffffffU - eLen - dLen; + } + else + { + ite = false; + } + if (!ite) + { + return NULL; + } + uint32_t nLen = (modBits - 1U) / 64U + 1U; + uint32_t eLen = (eBits - 1U) / 64U + 1U; + uint32_t dLen = (dBits - 1U) / 64U + 1U; + uint32_t skeyLen = nLen + nLen + eLen + dLen; + KRML_CHECK_SIZE(sizeof (uint64_t), skeyLen); + uint64_t *skey = (uint64_t *)KRML_HOST_CALLOC(skeyLen, sizeof (uint64_t)); + if (skey == NULL) + { + return skey; + } + uint64_t *skey1 = skey; + uint64_t *skey2 = skey1; + uint32_t dbLen = (dBits - 1U) / 8U + 1U; + uint32_t nLen1 = (modBits - 1U) / 64U + 1U; + uint32_t eLen1 = (eBits - 1U) / 64U + 1U; + uint32_t pkeyLen = nLen1 + nLen1 + eLen1; + uint64_t *pkey = skey2; + uint64_t *d = skey2 + pkeyLen; + uint32_t nbLen1 = (modBits - 1U) / 8U + 1U; + uint32_t ebLen1 = (eBits - 1U) / 8U + 1U; + uint32_t nLen2 = (modBits - 1U) / 64U + 1U; + uint64_t *n = pkey; + uint64_t *r2 = pkey + nLen2; + uint64_t *e = pkey + nLen2 + nLen2; + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen1, nb, n); + Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2); + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen1, eb, e); + uint64_t m0 = Hacl_Impl_RSA_Keys_check_modulus_u64(modBits, n); + uint64_t m10 = Hacl_Impl_RSA_Keys_check_exponent_u64(eBits, e); + uint64_t m = m0 & m10; + bool b = m == 0xFFFFFFFFFFFFFFFFULL; + Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d); + uint64_t m1 = Hacl_Impl_RSA_Keys_check_exponent_u64(dBits, d); + bool b0 = b && m1 == 0xFFFFFFFFFFFFFFFFULL; + if (b0) + { + return skey2; + } + KRML_HOST_FREE(skey2); + return NULL; +} + diff --git a/crypto/rsa-hacl.c b/crypto/rsa-hacl.c new file mode 100644 index 0000000000000..ec89488350b20 --- /dev/null +++ b/crypto/rsa-hacl.c @@ -0,0 +1,318 @@ +/* + * GPLv2 or MIT License + * + * Copyright (c) 2023 Cryspen + * + */ + +#include "hacl_rsa.h" + +#include +#include +#include +#include +#include +#include +#include + +/** +RSA Key data structure +**/ + +struct hacl_rsa_key { + uint32_t modBits; + uint32_t eBits; + uint32_t dBits; + uint8_t* nbytes; + uint8_t* ebytes; + uint8_t* dbytes; +}; + +static inline struct hacl_rsa_key *rsa_get_key(struct crypto_akcipher *tfm) +{ + return akcipher_tfm_ctx(tfm); +} + +static int rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + const struct hacl_rsa_key *pkey = rsa_get_key(tfm); + int ret = 0; + //printk("<<< in hacl rsa_enc"); + //printk("<<< pkey->modbits:%d, pkey->ebits:%d, pkey->dbits:%d", pkey->modBits,pkey->eBits,pkey->dBits); + + if (unlikely(!pkey->nbytes || !pkey->ebytes)) { + ret = -EINVAL; + goto done; + } + + uint64_t* pk = Hacl_RSA_new_rsa_load_pkey(pkey->modBits,pkey->eBits,pkey->nbytes,pkey->ebytes); + + if (!pk) { + printk("<<< load pkey failed"); + ret = -EINVAL; + goto done; + } + + + unsigned int plain_len = (pkey->modBits - 1)/8 + 1; + unsigned int cipher_len = (pkey->modBits - 2)/8 + 1; + + // printk("req->src_len:%d, plain_len:%d, req->dst_len:%d, cipher_len:%d", req->src_len,plain_len,req->dst_len,cipher_len); + if (req->src_len > plain_len || req->dst_len != cipher_len) { + ret = -EINVAL; + goto pkdone; + } + unsigned char* buffer = kzalloc(plain_len+cipher_len, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + goto pkdone; + } + sg_copy_to_buffer(req->src, + sg_nents_for_len(req->src, req->src_len), + buffer+plain_len-req->src_len, req->src_len); + + ret = Hacl_RSA_rsa_enc(pkey->modBits,pkey->eBits,pk,buffer,buffer+plain_len); + + if (!ret) { + ret = -EBADMSG; + goto bufdone; + } + + // printk("<<< exiting hacl rsa_enc 5 with cipher_len=%d, dst_len=%d, nents=%d, cipher[0]=%x, cipher[15]=%x", + // cipher_len,req->dst_len,sg_nents_for_len(req->dst, req->dst_len),cbuffer[0],cbuffer[15]); + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + buffer+plain_len, cipher_len); + //printk("<<< exiting hacl rsa_enc 6 with copied = %d, cipher_len=%d, cipher[0]=%x, cipher[15]=%x", + // copied, cipher_len,cbuffer[0],cbuffer[15]); + + + bufdone: kfree(buffer); + pkdone: kfree(pk); + done: return !ret; +} + +static int rsa_dec(struct akcipher_request *req) +{ + // printk("<<< in rsa dec"); + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + const struct hacl_rsa_key *skey = rsa_get_key(tfm); + int ret = 0; + + if (unlikely(!skey->nbytes || !skey->dbytes || !skey->ebytes)) { + ret = -EINVAL; + goto done; + } + + uint64_t *sk = Hacl_RSA_new_rsa_load_skey(skey->modBits,skey->eBits,skey->dBits,skey->nbytes,skey->ebytes,skey->dbytes); + + if (!sk) { + printk("<<< load skey failed"); + ret = -EINVAL; + goto done; + } + + + unsigned int plain_len = (skey->modBits - 1)/8 + 1; + unsigned int cipher_len = (skey->modBits - 2)/8 + 1; + // printk("<<< pkey->modbits:%d, pkey->ebits:%d, pkey->dbits:%d", skey->modBits,skey->eBits,skey->dBits); + // printk("<<< req->src_len:%d, plain_len:%d, req->dst_len:%d, cipher_len:%d", req->src_len,plain_len,req->dst_len,cipher_len); + + if (req->src_len > cipher_len || req->dst_len != plain_len) { + printk("not the right lengths"); + ret = -EINVAL; + goto skdone; + } + + unsigned char* buffer = kzalloc(plain_len + cipher_len, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + goto skdone; + } + + sg_copy_to_buffer(req->src, + sg_nents_for_len(req->src, req->src_len), + buffer+cipher_len-req->src_len, req->src_len); + + ret = Hacl_RSA_rsa_dec(skey->modBits,skey->eBits,skey->dBits,sk,buffer,buffer+cipher_len); + + if (!ret) { + printk("<<< rsa_dec failed"); + ret = -EBADMSG; + goto bufdone; + } + + // printk("<<< exiting hacl rsa_dec 5 with plain_len=%d, dst_len=%d, nents=%d, plain[0]=%x, plain[15]=%x", + // plain_len,req->dst_len,sg_nents_for_len(req->dst, req->dst_len),buffer[cipher_len],buffer[cipher_len+15]); + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + buffer+cipher_len, req->dst_len); + +bufdone: kfree(buffer); + skdone: kfree(sk); + done: return !ret; +} + +static void rsa_free_key(struct hacl_rsa_key *key) +{ + if (key->nbytes != NULL) kfree(key->nbytes); + if (key->ebytes != NULL) kfree(key->ebytes); + if (key->dbytes != NULL) kfree(key->dbytes); + key->modBits = 0; + key->eBits = 0; + key->dBits = 0; + key->nbytes = NULL; + key->ebytes = NULL; + key->dbytes = NULL; +} + +static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + // printk("<<< calling hacl rsa_set_pub_key"); + + struct hacl_rsa_key *pkey = rsa_get_key(tfm); + struct rsa_key raw_key = {0}; + + int ret = 0; + + /* Free the old MPI key if any */ + rsa_free_key(pkey); + + ret = rsa_parse_pub_key(&raw_key, key, keylen); + if (ret) + return ret; + + int n_sz = raw_key.n_sz; + if (raw_key.n[0] == 0) { + n_sz -= 1; + } + pkey->modBits = n_sz * 8; + pkey->eBits = raw_key.e_sz * 8; + if (pkey->eBits == 24 && raw_key.e[0] == 1) pkey->eBits = 17; + pkey->dBits = 0; + + pkey->nbytes = kzalloc(n_sz, GFP_KERNEL); + if (!pkey->nbytes) + goto err; + memcpy(pkey->nbytes,raw_key.n+raw_key.n_sz-n_sz,n_sz); + + pkey->ebytes = kzalloc(raw_key.e_sz, GFP_KERNEL); + if (!pkey->ebytes) + goto err; + memcpy(pkey->ebytes,raw_key.e,raw_key.e_sz); + + pkey->dbytes = NULL; + return ret; + +err: + rsa_free_key(pkey); + return -ENOMEM; +} + +static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + // printk("<<< calling hacl rsa_set_priv_key"); + + struct hacl_rsa_key *skey = rsa_get_key(tfm); + struct rsa_key raw_key = {0}; + + int ret = 0; + + /* Free the old MPI key if any */ + rsa_free_key(skey); + + ret = rsa_parse_priv_key(&raw_key, key, keylen); + if (ret) + return ret; + + int n_sz = raw_key.n_sz; + if (raw_key.n[0] == 0) { + n_sz -= 1; + } + skey->modBits = n_sz * 8; + skey->eBits = raw_key.e_sz * 8; + if (skey->eBits == 24 && raw_key.e[0] == 1) skey->eBits = 17; + skey->dBits = raw_key.d_sz * 8; + + skey->nbytes = kzalloc(n_sz, GFP_KERNEL); + if (!skey->nbytes) + goto err; + memcpy(skey->nbytes,raw_key.n+raw_key.n_sz-n_sz,n_sz); + + skey->ebytes = kzalloc(raw_key.e_sz, GFP_KERNEL); + if (!skey->ebytes) + goto err; + memcpy(skey->ebytes,raw_key.e,raw_key.e_sz); + + skey->dbytes = kzalloc(raw_key.d_sz, GFP_KERNEL); + if (!skey->dbytes) + goto err; + memcpy(skey->dbytes,raw_key.d,raw_key.d_sz); + + return ret; + +err: + rsa_free_key(skey); + return -ENOMEM; +} + +static unsigned int rsa_max_size(struct crypto_akcipher *tfm) +{ + struct hacl_rsa_key *key = akcipher_tfm_ctx(tfm); + return ((key->modBits-1)/8)+1; +} + +static void rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct hacl_rsa_key *key = akcipher_tfm_ctx(tfm); + rsa_free_key(key); +} + +static struct akcipher_alg hacl_rsa = { + .encrypt = rsa_enc, + .decrypt = rsa_dec, + .set_priv_key = rsa_set_priv_key, + .set_pub_key = rsa_set_pub_key, + .max_size = rsa_max_size, + .exit = rsa_exit_tfm, + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-hacl", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct hacl_rsa_key), + }, +}; + +static int __init hacl_rsa_init(void) +{ + int err; + + err = crypto_register_akcipher(&hacl_rsa); + if (err) + return err; + + err = crypto_register_template(&rsa_pkcs1pad_tmpl); + if (err) { + crypto_unregister_akcipher(&hacl_rsa); + return err; + } + + return 0; +} + +static void __exit hacl_rsa_exit(void) +{ + crypto_unregister_template(&rsa_pkcs1pad_tmpl); + crypto_unregister_akcipher(&hacl_rsa); +} + +subsys_initcall(hacl_rsa_init); +module_exit(hacl_rsa_exit); +MODULE_ALIAS_CRYPTO("rsa"); +MODULE_ALIAS_CRYPTO("rsa-hacl"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Formally Verified RSA algorithm from HACL*"); diff --git a/crypto/sha2-hacl-generated.c b/crypto/sha2-hacl-generated.c new file mode 100644 index 0000000000000..a90e1cdc24a9f --- /dev/null +++ b/crypto/sha2-hacl-generated.c @@ -0,0 +1,953 @@ +/* + * GPLv2 or MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + */ + +#include "hacl_hash.h" +#include "hacl_lib.h" + +void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash) +{ + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + uint32_t *os = hash; + uint32_t x = Hacl_Impl_SHA2_Generic_h256[i]; + os[i] = x;); +} + +static inline void hacl_sha256_update(uint8_t *b, uint32_t *hash) +{ + uint32_t hash_old[8U] = { 0U }; + uint32_t ws[16U] = { 0U }; + memcpy(hash_old, hash, (uint32_t)8U * sizeof(uint32_t)); + uint8_t *b10 = b; + uint32_t u = load32_be(b10); + ws[0U] = u; + uint32_t u0 = load32_be(b10 + (uint32_t)4U); + ws[1U] = u0; + uint32_t u1 = load32_be(b10 + (uint32_t)8U); + ws[2U] = u1; + uint32_t u2 = load32_be(b10 + (uint32_t)12U); + ws[3U] = u2; + uint32_t u3 = load32_be(b10 + (uint32_t)16U); + ws[4U] = u3; + uint32_t u4 = load32_be(b10 + (uint32_t)20U); + ws[5U] = u4; + uint32_t u5 = load32_be(b10 + (uint32_t)24U); + ws[6U] = u5; + uint32_t u6 = load32_be(b10 + (uint32_t)28U); + ws[7U] = u6; + uint32_t u7 = load32_be(b10 + (uint32_t)32U); + ws[8U] = u7; + uint32_t u8 = load32_be(b10 + (uint32_t)36U); + ws[9U] = u8; + uint32_t u9 = load32_be(b10 + (uint32_t)40U); + ws[10U] = u9; + uint32_t u10 = load32_be(b10 + (uint32_t)44U); + ws[11U] = u10; + uint32_t u11 = load32_be(b10 + (uint32_t)48U); + ws[12U] = u11; + uint32_t u12 = load32_be(b10 + (uint32_t)52U); + ws[13U] = u12; + uint32_t u13 = load32_be(b10 + (uint32_t)56U); + ws[14U] = u13; + uint32_t u14 = load32_be(b10 + (uint32_t)60U); + ws[15U] = u14; + KRML_MAYBE_FOR4( + i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, + KRML_MAYBE_FOR16( + i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, + uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256 + [(uint32_t)16U * i0 + i]; + uint32_t ws_t = ws[i]; uint32_t a0 = hash[0U]; + uint32_t b0 = hash[1U]; uint32_t c0 = hash[2U]; + uint32_t d0 = hash[3U]; uint32_t e0 = hash[4U]; + uint32_t f0 = hash[5U]; uint32_t g0 = hash[6U]; + uint32_t h02 = hash[7U]; uint32_t k_e_t = k_t; + uint32_t t1 = + h02 + + ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U) ^ + ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U) ^ + (e0 << (uint32_t)7U | e0 >> (uint32_t)25U))) + + ((e0 & f0) ^ (~e0 & g0)) + k_e_t + ws_t; + uint32_t t2 = + ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U) ^ + ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U) ^ + (a0 << (uint32_t)10U | a0 >> (uint32_t)22U))) + + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); + uint32_t a1 = t1 + t2; uint32_t b1 = a0; + uint32_t c1 = b0; uint32_t d1 = c0; + uint32_t e1 = d0 + t1; uint32_t f1 = e0; + uint32_t g1 = f0; uint32_t h12 = g0; hash[0U] = a1; + hash[1U] = b1; hash[2U] = c1; hash[3U] = d1; + hash[4U] = e1; hash[5U] = f1; hash[6U] = g1; + hash[7U] = h12;); + if (i0 < (uint32_t)3U) { + KRML_MAYBE_FOR16( + i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, + uint32_t t16 = ws[i]; + uint32_t t15 = + ws[(i + (uint32_t)1U) % (uint32_t)16U]; + uint32_t t7 = + ws[(i + (uint32_t)9U) % (uint32_t)16U]; + uint32_t t2 = + ws[(i + (uint32_t)14U) % (uint32_t)16U]; + uint32_t s1 = (t2 << (uint32_t)15U | + t2 >> (uint32_t)17U) ^ + ((t2 << (uint32_t)13U | + t2 >> (uint32_t)19U) ^ + t2 >> (uint32_t)10U); + uint32_t s0 = (t15 << (uint32_t)25U | + t15 >> (uint32_t)7U) ^ + ((t15 << (uint32_t)14U | + t15 >> (uint32_t)18U) ^ + t15 >> (uint32_t)3U); + ws[i] = s1 + t7 + s0 + t16;); + }); + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + uint32_t *os = hash; + uint32_t x = hash[i] + hash_old[i]; os[i] = x;); +} + +void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, + uint32_t *st) +{ + uint32_t blocks = len / (uint32_t)64U; + for (uint32_t i = (uint32_t)0U; i < blocks; i++) { + uint8_t *b0 = b; + uint8_t *mb = b0 + i * (uint32_t)64U; + hacl_sha256_update(mb, st); + } +} + +void Hacl_SHA2_Scalar32_sha256_update_last(uint64_t totlen, uint32_t len, + uint8_t *b, uint32_t *hash) +{ + uint32_t blocks; + if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) { + blocks = (uint32_t)1U; + } else { + blocks = (uint32_t)2U; + } + uint32_t fin = blocks * (uint32_t)64U; + uint8_t last[128U] = { 0U }; + uint8_t totlen_buf[8U] = { 0U }; + uint64_t total_len_bits = totlen << (uint32_t)3U; + store64_be(totlen_buf, total_len_bits); + uint8_t *b0 = b; + memcpy(last, b0, len * sizeof(uint8_t)); + last[len] = (uint8_t)0x80U; + memcpy(last + fin - (uint32_t)8U, totlen_buf, + (uint32_t)8U * sizeof(uint8_t)); + uint8_t *last00 = last; + uint8_t *last10 = last + (uint32_t)64U; + uint8_t *l0 = last00; + uint8_t *l1 = last10; + uint8_t *lb0 = l0; + uint8_t *lb1 = l1; + uint8_t *last0 = lb0; + uint8_t *last1 = lb1; + hacl_sha256_update(last0, hash); + if (blocks > (uint32_t)1U) { + hacl_sha256_update(last1, hash); + return; + } +} + +void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h) +{ + uint8_t hbuf[32U] = { 0U }; + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + store32_be(hbuf + i * (uint32_t)4U, st[i]);); + memcpy(h, hbuf, (uint32_t)32U * sizeof(uint8_t)); +} + +void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash) +{ + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + uint32_t *os = hash; + uint32_t x = Hacl_Impl_SHA2_Generic_h224[i]; + os[i] = x;); +} + +static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) +{ + Hacl_SHA2_Scalar32_sha256_update_nblocks(len, b, st); +} + +void Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, + uint8_t *b, uint32_t *st) +{ + Hacl_SHA2_Scalar32_sha256_update_last(totlen, len, b, st); +} + +void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h) +{ + uint8_t hbuf[32U] = { 0U }; + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + store32_be(hbuf + i * (uint32_t)4U, st[i]);); + memcpy(h, hbuf, (uint32_t)28U * sizeof(uint8_t)); +} + +/** +Reset an existing state to the initial hash state with empty data. +*/ + +void Hacl_Streaming_SHA2_init_256(struct Hacl_Streaming_MD_state_32_s *s) +{ + struct Hacl_Streaming_MD_state_32_s scrut = *s; + uint8_t *buf = scrut.buf; + uint32_t *block_state = scrut.block_state; + Hacl_SHA2_Scalar32_sha256_init(block_state); + struct Hacl_Streaming_MD_state_32_s tmp = { + .block_state = block_state, + .buf = buf, + .total_len = (uint64_t)(uint32_t)0U + }; + s[0U] = tmp; +} + +static inline Hacl_Streaming_Types_error_code +update_224_256(struct Hacl_Streaming_MD_state_32_s *p, uint8_t *data, + uint32_t len) +{ + struct Hacl_Streaming_MD_state_32_s s = *p; + uint64_t total_len = s.total_len; + if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len) { + return Hacl_Streaming_Types_MaximumLengthExceeded; + } + uint32_t sz; + if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len > (uint64_t)0U) { + sz = (uint32_t)64U; + } else { + sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); + } + if (len <= (uint32_t)64U - sz) { + struct Hacl_Streaming_MD_state_32_s s1 = *p; + uint32_t *block_state1 = s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = (uint32_t)64U; + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); + } + uint8_t *buf2 = buf + sz1; + memcpy(buf2, data, len * sizeof(uint8_t)); + uint64_t total_len2 = total_len1 + (uint64_t)len; + *p = ((struct Hacl_Streaming_MD_state_32_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len2 }); + } else if (sz == (uint32_t)0U) { + struct Hacl_Streaming_MD_state_32_s s1 = *p; + uint32_t *block_state1 = s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = (uint32_t)64U; + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); + } + if (!(sz1 == (uint32_t)0U)) { + Hacl_SHA2_Scalar32_sha256_update_nblocks( + (uint32_t)64U, buf, block_state1); + } + uint32_t ite; + if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && + (uint64_t)len > (uint64_t)0U) { + ite = (uint32_t)64U; + } else { + ite = (uint32_t)((uint64_t)len % + (uint64_t)(uint32_t)64U); + } + uint32_t n_blocks = (len - ite) / (uint32_t)64U; + uint32_t data1_len = n_blocks * (uint32_t)64U; + uint32_t data2_len = len - data1_len; + uint8_t *data1 = data; + uint8_t *data2 = data + data1_len; + Hacl_SHA2_Scalar32_sha256_update_nblocks( + data1_len / (uint32_t)64U * (uint32_t)64U, data1, + block_state1); + uint8_t *dst = buf; + memcpy(dst, data2, data2_len * sizeof(uint8_t)); + *p = ((struct Hacl_Streaming_MD_state_32_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)len }); + } else { + uint32_t diff = (uint32_t)64U - sz; + uint8_t *data1 = data; + uint8_t *data2 = data + diff; + struct Hacl_Streaming_MD_state_32_s s1 = *p; + uint32_t *block_state10 = s1.block_state; + uint8_t *buf0 = s1.buf; + uint64_t total_len10 = s1.total_len; + uint32_t sz10; + if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len10 > (uint64_t)0U) { + sz10 = (uint32_t)64U; + } else { + sz10 = (uint32_t)(total_len10 % + (uint64_t)(uint32_t)64U); + } + uint8_t *buf2 = buf0 + sz10; + memcpy(buf2, data1, diff * sizeof(uint8_t)); + uint64_t total_len2 = total_len10 + (uint64_t)diff; + *p = ((struct Hacl_Streaming_MD_state_32_s){ + .block_state = block_state10, + .buf = buf0, + .total_len = total_len2 }); + struct Hacl_Streaming_MD_state_32_s s10 = *p; + uint32_t *block_state1 = s10.block_state; + uint8_t *buf = s10.buf; + uint64_t total_len1 = s10.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = (uint32_t)64U; + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); + } + if (!(sz1 == (uint32_t)0U)) { + Hacl_SHA2_Scalar32_sha256_update_nblocks( + (uint32_t)64U, buf, block_state1); + } + uint32_t ite; + if ((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U == + (uint64_t)0U && + (uint64_t)(len - diff) > (uint64_t)0U) { + ite = (uint32_t)64U; + } else { + ite = (uint32_t)((uint64_t)(len - diff) % + (uint64_t)(uint32_t)64U); + } + uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U; + uint32_t data1_len = n_blocks * (uint32_t)64U; + uint32_t data2_len = len - diff - data1_len; + uint8_t *data11 = data2; + uint8_t *data21 = data2 + data1_len; + Hacl_SHA2_Scalar32_sha256_update_nblocks( + data1_len / (uint32_t)64U * (uint32_t)64U, data11, + block_state1); + uint8_t *dst = buf; + memcpy(dst, data21, data2_len * sizeof(uint8_t)); + *p = ((struct Hacl_Streaming_MD_state_32_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)(len - diff) }); + } + return Hacl_Streaming_Types_Success; +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_256` +(since the last call to `init_256`) exceeds 2^61-1 bytes. + +This function is identical to the update function for SHA2_224. +*/ +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_256(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *input, uint32_t input_len) +{ + return update_224_256(p, input, input_len); +} + +/** +Write the resulting hash into `dst`, an array of 32 bytes. The state remains +valid after a call to `finish_256`, meaning the user may feed more data into +the hash via `update_256`. (The finish_256 function operates on an internal copy +of the state and therefore does not invalidate the client-held state `p`.) +*/ +void Hacl_Streaming_SHA2_finish_256(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *dst) +{ + struct Hacl_Streaming_MD_state_32_s scrut = *p; + uint32_t *block_state = scrut.block_state; + uint8_t *buf_ = scrut.buf; + uint64_t total_len = scrut.total_len; + uint32_t r; + if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len > (uint64_t)0U) { + r = (uint32_t)64U; + } else { + r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); + } + uint8_t *buf_1 = buf_; + uint32_t tmp_block_state[8U] = { 0U }; + memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof(uint32_t)); + uint32_t ite; + if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) { + ite = (uint32_t)64U; + } else { + ite = r % (uint32_t)64U; + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, + tmp_block_state); + uint64_t prev_len_last = total_len - (uint64_t)r; + Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r, r, + buf_last, tmp_block_state); + Hacl_SHA2_Scalar32_sha256_finish(tmp_block_state, dst); +} + +/** +Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes. +*/ +void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, + uint8_t *dst) +{ + uint8_t *ib = input; + uint8_t *rb = dst; + uint32_t st[8U] = { 0U }; + Hacl_SHA2_Scalar32_sha256_init(st); + uint32_t rem = input_len % (uint32_t)64U; + uint64_t len_ = (uint64_t)input_len; + Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st); + uint32_t rem1 = input_len % (uint32_t)64U; + uint8_t *b0 = ib; + uint8_t *lb = b0 + input_len - rem1; + Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st); + Hacl_SHA2_Scalar32_sha256_finish(st, rb); +} + +void Hacl_Streaming_SHA2_init_224(struct Hacl_Streaming_MD_state_32_s *s) +{ + struct Hacl_Streaming_MD_state_32_s scrut = *s; + uint8_t *buf = scrut.buf; + uint32_t *block_state = scrut.block_state; + Hacl_SHA2_Scalar32_sha224_init(block_state); + struct Hacl_Streaming_MD_state_32_s tmp = { + .block_state = block_state, + .buf = buf, + .total_len = (uint64_t)(uint32_t)0U + }; + s[0U] = tmp; +} + +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_224(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *input, uint32_t input_len) +{ + return update_224_256(p, input, input_len); +} + +/** +Write the resulting hash into `dst`, an array of 28 bytes. The state remains +valid after a call to `finish_224`, meaning the user may feed more data into +the hash via `update_224`. +*/ +void Hacl_Streaming_SHA2_finish_224(struct Hacl_Streaming_MD_state_32_s *p, + uint8_t *dst) +{ + struct Hacl_Streaming_MD_state_32_s scrut = *p; + uint32_t *block_state = scrut.block_state; + uint8_t *buf_ = scrut.buf; + uint64_t total_len = scrut.total_len; + uint32_t r; + if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && + total_len > (uint64_t)0U) { + r = (uint32_t)64U; + } else { + r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); + } + uint8_t *buf_1 = buf_; + uint32_t tmp_block_state[8U] = { 0U }; + memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof(uint32_t)); + uint32_t ite; + if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) { + ite = (uint32_t)64U; + } else { + ite = r % (uint32_t)64U; + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state); + uint64_t prev_len_last = total_len - (uint64_t)r; + Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r, r, + buf_last, tmp_block_state); + Hacl_SHA2_Scalar32_sha224_finish(tmp_block_state, dst); +} + +/** +Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes. +*/ +void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, + uint8_t *dst) +{ + uint8_t *ib = input; + uint8_t *rb = dst; + uint32_t st[8U] = { 0U }; + Hacl_SHA2_Scalar32_sha224_init(st); + uint32_t rem = input_len % (uint32_t)64U; + uint64_t len_ = (uint64_t)input_len; + sha224_update_nblocks(input_len, ib, st); + uint32_t rem1 = input_len % (uint32_t)64U; + uint8_t *b0 = ib; + uint8_t *lb = b0 + input_len - rem1; + Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st); + Hacl_SHA2_Scalar32_sha224_finish(st, rb); +} + +void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash) +{ + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + uint64_t *os = hash; + uint64_t x = Hacl_Impl_SHA2_Generic_h512[i]; + os[i] = x;); +} + +static inline void sha512_update(uint8_t *b, uint64_t *hash) +{ + uint64_t hash_old[8U] = { 0U }; + uint64_t ws[16U] = { 0U }; + memcpy(hash_old, hash, (uint32_t)8U * sizeof(uint64_t)); + uint8_t *b10 = b; + uint64_t u = load64_be(b10); + ws[0U] = u; + uint64_t u0 = load64_be(b10 + (uint32_t)8U); + ws[1U] = u0; + uint64_t u1 = load64_be(b10 + (uint32_t)16U); + ws[2U] = u1; + uint64_t u2 = load64_be(b10 + (uint32_t)24U); + ws[3U] = u2; + uint64_t u3 = load64_be(b10 + (uint32_t)32U); + ws[4U] = u3; + uint64_t u4 = load64_be(b10 + (uint32_t)40U); + ws[5U] = u4; + uint64_t u5 = load64_be(b10 + (uint32_t)48U); + ws[6U] = u5; + uint64_t u6 = load64_be(b10 + (uint32_t)56U); + ws[7U] = u6; + uint64_t u7 = load64_be(b10 + (uint32_t)64U); + ws[8U] = u7; + uint64_t u8 = load64_be(b10 + (uint32_t)72U); + ws[9U] = u8; + uint64_t u9 = load64_be(b10 + (uint32_t)80U); + ws[10U] = u9; + uint64_t u10 = load64_be(b10 + (uint32_t)88U); + ws[11U] = u10; + uint64_t u11 = load64_be(b10 + (uint32_t)96U); + ws[12U] = u11; + uint64_t u12 = load64_be(b10 + (uint32_t)104U); + ws[13U] = u12; + uint64_t u13 = load64_be(b10 + (uint32_t)112U); + ws[14U] = u13; + uint64_t u14 = load64_be(b10 + (uint32_t)120U); + ws[15U] = u14; + KRML_MAYBE_FOR5( + i0, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, + KRML_MAYBE_FOR16( + i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, + uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512 + [(uint32_t)16U * i0 + i]; + uint64_t ws_t = ws[i]; uint64_t a0 = hash[0U]; + uint64_t b0 = hash[1U]; uint64_t c0 = hash[2U]; + uint64_t d0 = hash[3U]; uint64_t e0 = hash[4U]; + uint64_t f0 = hash[5U]; uint64_t g0 = hash[6U]; + uint64_t h02 = hash[7U]; uint64_t k_e_t = k_t; + uint64_t t1 = + h02 + + ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U) ^ + ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U) ^ + (e0 << (uint32_t)23U | e0 >> (uint32_t)41U))) + + ((e0 & f0) ^ (~e0 & g0)) + k_e_t + ws_t; + uint64_t t2 = + ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U) ^ + ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U) ^ + (a0 << (uint32_t)25U | a0 >> (uint32_t)39U))) + + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); + uint64_t a1 = t1 + t2; uint64_t b1 = a0; + uint64_t c1 = b0; uint64_t d1 = c0; + uint64_t e1 = d0 + t1; uint64_t f1 = e0; + uint64_t g1 = f0; uint64_t h12 = g0; hash[0U] = a1; + hash[1U] = b1; hash[2U] = c1; hash[3U] = d1; + hash[4U] = e1; hash[5U] = f1; hash[6U] = g1; + hash[7U] = h12;); + if (i0 < (uint32_t)4U) { + KRML_MAYBE_FOR16( + i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, + uint64_t t16 = ws[i]; + uint64_t t15 = + ws[(i + (uint32_t)1U) % (uint32_t)16U]; + uint64_t t7 = + ws[(i + (uint32_t)9U) % (uint32_t)16U]; + uint64_t t2 = + ws[(i + (uint32_t)14U) % (uint32_t)16U]; + uint64_t s1 = (t2 << (uint32_t)45U | + t2 >> (uint32_t)19U) ^ + ((t2 << (uint32_t)3U | + t2 >> (uint32_t)61U) ^ + t2 >> (uint32_t)6U); + uint64_t s0 = (t15 << (uint32_t)63U | + t15 >> (uint32_t)1U) ^ + ((t15 << (uint32_t)56U | + t15 >> (uint32_t)8U) ^ + t15 >> (uint32_t)7U); + ws[i] = s1 + t7 + s0 + t16;); + }); + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + uint64_t *os = hash; + uint64_t x = hash[i] + hash_old[i]; os[i] = x;); +} + +void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, + uint64_t *st) +{ + uint32_t blocks = len / (uint32_t)128U; + for (uint32_t i = (uint32_t)0U; i < blocks; i++) { + uint8_t *b0 = b; + uint8_t *mb = b0 + i * (uint32_t)128U; + sha512_update(mb, st); + } +} + +void Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_uint128 totlen, + uint32_t len, uint8_t *b, + uint64_t *hash) +{ + uint32_t blocks; + if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U) { + blocks = (uint32_t)1U; + } else { + blocks = (uint32_t)2U; + } + uint32_t fin = blocks * (uint32_t)128U; + uint8_t last[256U] = { 0U }; + uint8_t totlen_buf[16U] = { 0U }; + FStar_UInt128_uint128 total_len_bits = + FStar_UInt128_shift_left(totlen, (uint32_t)3U); + store128_be(totlen_buf, total_len_bits); + uint8_t *b0 = b; + memcpy(last, b0, len * sizeof(uint8_t)); + last[len] = (uint8_t)0x80U; + memcpy(last + fin - (uint32_t)16U, totlen_buf, + (uint32_t)16U * sizeof(uint8_t)); + uint8_t *last00 = last; + uint8_t *last10 = last + (uint32_t)128U; + uint8_t *l0 = last00; + uint8_t *l1 = last10; + uint8_t *lb0 = l0; + uint8_t *lb1 = l1; + uint8_t *last0 = lb0; + uint8_t *last1 = lb1; + sha512_update(last0, hash); + if (blocks > (uint32_t)1U) { + sha512_update(last1, hash); + return; + } +} + +void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h) +{ + uint8_t hbuf[64U] = { 0U }; + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + store64_be(hbuf + i * (uint32_t)8U, st[i]);); + memcpy(h, hbuf, (uint32_t)64U * sizeof(uint8_t)); +} + +void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash) +{ + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + uint64_t *os = hash; + uint64_t x = Hacl_Impl_SHA2_Generic_h384[i]; + os[i] = x;); +} + +void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, + uint64_t *st) +{ + Hacl_SHA2_Scalar32_sha512_update_nblocks(len, b, st); +} + +void Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_uint128 totlen, + uint32_t len, uint8_t *b, + uint64_t *st) +{ + Hacl_SHA2_Scalar32_sha512_update_last(totlen, len, b, st); +} + +void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h) +{ + uint8_t hbuf[64U] = { 0U }; + KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, + store64_be(hbuf + i * (uint32_t)8U, st[i]);); + memcpy(h, hbuf, (uint32_t)48U * sizeof(uint8_t)); +} + +void Hacl_Streaming_SHA2_init_512(struct Hacl_Streaming_MD_state_64_s *s) +{ + struct Hacl_Streaming_MD_state_64_s scrut = *s; + uint8_t *buf = scrut.buf; + uint64_t *block_state = scrut.block_state; + Hacl_SHA2_Scalar32_sha512_init(block_state); + struct Hacl_Streaming_MD_state_64_s tmp = { + .block_state = block_state, + .buf = buf, + .total_len = (uint64_t)(uint32_t)0U + }; + s[0U] = tmp; +} + +static inline Hacl_Streaming_Types_error_code +update_384_512(struct Hacl_Streaming_MD_state_64_s *p, uint8_t *data, + uint32_t len) +{ + struct Hacl_Streaming_MD_state_64_s s = *p; + uint64_t total_len = s.total_len; + if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len) { + return Hacl_Streaming_Types_MaximumLengthExceeded; + } + uint32_t sz; + if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len > (uint64_t)0U) { + sz = (uint32_t)128U; + } else { + sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); + } + if (len <= (uint32_t)128U - sz) { + struct Hacl_Streaming_MD_state_64_s s1 = *p; + uint64_t *block_state1 = s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = (uint32_t)128U; + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); + } + uint8_t *buf2 = buf + sz1; + memcpy(buf2, data, len * sizeof(uint8_t)); + uint64_t total_len2 = total_len1 + (uint64_t)len; + *p = ((struct Hacl_Streaming_MD_state_64_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len2 }); + } else if (sz == (uint32_t)0U) { + struct Hacl_Streaming_MD_state_64_s s1 = *p; + uint64_t *block_state1 = s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = (uint32_t)128U; + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); + } + if (!(sz1 == (uint32_t)0U)) { + Hacl_SHA2_Scalar32_sha512_update_nblocks( + (uint32_t)128U, buf, block_state1); + } + uint32_t ite; + if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && + (uint64_t)len > (uint64_t)0U) { + ite = (uint32_t)128U; + } else { + ite = (uint32_t)((uint64_t)len % + (uint64_t)(uint32_t)128U); + } + uint32_t n_blocks = (len - ite) / (uint32_t)128U; + uint32_t data1_len = n_blocks * (uint32_t)128U; + uint32_t data2_len = len - data1_len; + uint8_t *data1 = data; + uint8_t *data2 = data + data1_len; + Hacl_SHA2_Scalar32_sha512_update_nblocks( + data1_len / (uint32_t)128U * (uint32_t)128U, data1, + block_state1); + uint8_t *dst = buf; + memcpy(dst, data2, data2_len * sizeof(uint8_t)); + *p = ((struct Hacl_Streaming_MD_state_64_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)len }); + } else { + uint32_t diff = (uint32_t)128U - sz; + uint8_t *data1 = data; + uint8_t *data2 = data + diff; + struct Hacl_Streaming_MD_state_64_s s1 = *p; + uint64_t *block_state10 = s1.block_state; + uint8_t *buf0 = s1.buf; + uint64_t total_len10 = s1.total_len; + uint32_t sz10; + if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len10 > (uint64_t)0U) { + sz10 = (uint32_t)128U; + } else { + sz10 = (uint32_t)(total_len10 % + (uint64_t)(uint32_t)128U); + } + uint8_t *buf2 = buf0 + sz10; + memcpy(buf2, data1, diff * sizeof(uint8_t)); + uint64_t total_len2 = total_len10 + (uint64_t)diff; + *p = ((struct Hacl_Streaming_MD_state_64_s){ + .block_state = block_state10, + .buf = buf0, + .total_len = total_len2 }); + struct Hacl_Streaming_MD_state_64_s s10 = *p; + uint64_t *block_state1 = s10.block_state; + uint8_t *buf = s10.buf; + uint64_t total_len1 = s10.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = (uint32_t)128U; + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); + } + if (!(sz1 == (uint32_t)0U)) { + Hacl_SHA2_Scalar32_sha512_update_nblocks( + (uint32_t)128U, buf, block_state1); + } + uint32_t ite; + if ((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U == + (uint64_t)0U && + (uint64_t)(len - diff) > (uint64_t)0U) { + ite = (uint32_t)128U; + } else { + ite = (uint32_t)((uint64_t)(len - diff) % + (uint64_t)(uint32_t)128U); + } + uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U; + uint32_t data1_len = n_blocks * (uint32_t)128U; + uint32_t data2_len = len - diff - data1_len; + uint8_t *data11 = data2; + uint8_t *data21 = data2 + data1_len; + Hacl_SHA2_Scalar32_sha512_update_nblocks( + data1_len / (uint32_t)128U * (uint32_t)128U, data11, + block_state1); + uint8_t *dst = buf; + memcpy(dst, data21, data2_len * sizeof(uint8_t)); + *p = ((struct Hacl_Streaming_MD_state_64_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)(len - diff) }); + } + return Hacl_Streaming_Types_Success; +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_512` +(since the last call to `init_512`) exceeds 2^125-1 bytes. + +This function is identical to the update function for SHA2_384. +*/ +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_512(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *input, uint32_t input_len) +{ + return update_384_512(p, input, input_len); +} + +/** +Write the resulting hash into `dst`, an array of 64 bytes. The state remains +valid after a call to `finish_512`, meaning the user may feed more data into +the hash via `update_512`. (The finish_512 function operates on an internal copy +of the state and therefore does not invalidate the client-held state `p`.) +*/ +void Hacl_Streaming_SHA2_finish_512(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *dst) +{ + struct Hacl_Streaming_MD_state_64_s scrut = *p; + uint64_t *block_state = scrut.block_state; + uint8_t *buf_ = scrut.buf; + uint64_t total_len = scrut.total_len; + uint32_t r; + if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len > (uint64_t)0U) { + r = (uint32_t)128U; + } else { + r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); + } + uint8_t *buf_1 = buf_; + uint64_t tmp_block_state[8U] = { 0U }; + memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof(uint64_t)); + uint32_t ite; + if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) { + ite = (uint32_t)128U; + } else { + ite = r % (uint32_t)128U; + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, + tmp_block_state); + uint64_t prev_len_last = total_len - (uint64_t)r; + Hacl_SHA2_Scalar32_sha512_update_last( + FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last), + FStar_UInt128_uint64_to_uint128((uint64_t)r)), + r, buf_last, tmp_block_state); + Hacl_SHA2_Scalar32_sha512_finish(tmp_block_state, dst); +} + +void Hacl_Streaming_SHA2_init_384(struct Hacl_Streaming_MD_state_64_s *s) +{ + struct Hacl_Streaming_MD_state_64_s scrut = *s; + uint8_t *buf = scrut.buf; + uint64_t *block_state = scrut.block_state; + Hacl_SHA2_Scalar32_sha384_init(block_state); + struct Hacl_Streaming_MD_state_64_s tmp = { + .block_state = block_state, + .buf = buf, + .total_len = (uint64_t)(uint32_t)0U + }; + s[0U] = tmp; +} + +Hacl_Streaming_Types_error_code +Hacl_Streaming_SHA2_update_384(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *input, uint32_t input_len) +{ + return update_384_512(p, input, input_len); +} + +/* + * Write the resulting hash into `dst`, an array of 48 bytes. The state remains + * valid after a call to `finish_384`, meaning the user may feed more data into + * the hash via `update_384`. + */ +void Hacl_Streaming_SHA2_finish_384(struct Hacl_Streaming_MD_state_64_s *p, + uint8_t *dst) +{ + struct Hacl_Streaming_MD_state_64_s scrut = *p; + uint64_t *block_state = scrut.block_state; + uint8_t *buf_ = scrut.buf; + uint64_t total_len = scrut.total_len; + uint32_t r; + if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && + total_len > (uint64_t)0U) { + r = (uint32_t)128U; + } else { + r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); + } + uint8_t *buf_1 = buf_; + uint64_t tmp_block_state[8U] = { 0U }; + memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof(uint64_t)); + uint32_t ite; + if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) { + ite = (uint32_t)128U; + } else { + ite = r % (uint32_t)128U; + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, + tmp_block_state); + uint64_t prev_len_last = total_len - (uint64_t)r; + Hacl_SHA2_Scalar32_sha384_update_last( + FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last), + FStar_UInt128_uint64_to_uint128((uint64_t)r)), + r, buf_last, tmp_block_state); + Hacl_SHA2_Scalar32_sha384_finish(tmp_block_state, dst); +} diff --git a/crypto/sha2-hacl.c b/crypto/sha2-hacl.c new file mode 100644 index 0000000000000..6400fbce0ae0b --- /dev/null +++ b/crypto/sha2-hacl.c @@ -0,0 +1,191 @@ +/* + * GPLv2 or MIT License + * + * Copyright (c) 2023 Cryspen + * + */ + +#include +#include + +#include "hacl_hash.h" +#include "hacl_lib.h" + +int hacl_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_MD_state_32_s st; + st.block_state = sctx->state; + st.buf = sctx->buf; + st.total_len = sctx->count; + uint8_t res = Hacl_Streaming_SHA2_update_256(&st, (u8 *)data, len); + sctx->count = st.total_len; + return res; +} +EXPORT_SYMBOL(hacl_sha256_update); + +static int hacl_sha256_final(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_MD_state_32_s st; + st.block_state = sctx->state; + st.buf = sctx->buf; + st.total_len = sctx->count; + if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) + Hacl_Streaming_SHA2_finish_224(&st, out); + else + Hacl_Streaming_SHA2_finish_256(&st, out); + return 0; +} + +int hacl_sha256_finup(struct shash_desc *desc, const u8 *data, unsigned int len, + u8 *hash) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_MD_state_32_s st; + st.block_state = sctx->state; + st.buf = sctx->buf; + st.total_len = sctx->count; + Hacl_Streaming_SHA2_update_256(&st, (u8 *)data, len); + if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) + Hacl_Streaming_SHA2_finish_224(&st, hash); + else + Hacl_Streaming_SHA2_finish_256(&st, hash); + return 0; +} +EXPORT_SYMBOL(hacl_sha256_finup); + +int hacl_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_MD_state_64_s st; + st.block_state = sctx->state; + st.buf = sctx->buf; + st.total_len = sctx->count[0]; + uint8_t res = Hacl_Streaming_SHA2_update_512(&st, (u8 *)data, len); + sctx->count[0] = st.total_len; + return res; +} +EXPORT_SYMBOL(hacl_sha512_update); + +static int hacl_sha512_final(struct shash_desc *desc, u8 *hash) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_MD_state_64_s st; + st.block_state = sctx->state; + st.buf = sctx->buf; + st.total_len = sctx->count[0]; + if (crypto_shash_digestsize(desc->tfm) == SHA384_DIGEST_SIZE) + Hacl_Streaming_SHA2_finish_384(&st, hash); + else + Hacl_Streaming_SHA2_finish_512(&st, hash); + return 0; +} + +int hacl_sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, + u8 *hash) +{ + struct sha512_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_MD_state_64_s st; + st.block_state = sctx->state; + st.buf = sctx->buf; + st.total_len = sctx->count[0]; + uint8_t res = Hacl_Streaming_SHA2_update_512(&st, (u8 *)data, len); + if (res == 0) { + if (crypto_shash_digestsize(desc->tfm) == SHA384_DIGEST_SIZE) + Hacl_Streaming_SHA2_finish_384(&st, hash); + else + Hacl_Streaming_SHA2_finish_512(&st, hash); + return 0; + } else { + return res; + } +} +EXPORT_SYMBOL(hacl_sha512_finup); + +static struct shash_alg sha2_hacl_algs[4] = { { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_base_init, + .update = hacl_sha256_update, + .final = hacl_sha256_final, + .finup = hacl_sha256_finup, + .descsize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-hacl", + .cra_priority = 100, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + }, { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_base_init, + .update = hacl_sha256_update, + .final = hacl_sha256_final, + .finup = hacl_sha256_finup, + .descsize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-hacl", + .cra_priority = 100, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + }, { + .digestsize = SHA384_DIGEST_SIZE, + .init = sha384_base_init, + .update = hacl_sha512_update, + .final = hacl_sha512_final, + .finup = hacl_sha512_finup, + .descsize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-hacl", + .cra_priority = 100, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + }, { + .digestsize = SHA512_DIGEST_SIZE, + .init = sha512_base_init, + .update = hacl_sha512_update, + .final = hacl_sha512_final, + .finup = hacl_sha512_finup, + .descsize = sizeof(struct sha512_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-hacl", + .cra_priority = 100, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } + } +}; + +static int __init sha2_hacl_mod_init(void) +{ + return crypto_register_shashes(sha2_hacl_algs, + ARRAY_SIZE(sha2_hacl_algs)); +} + +static void __exit sha2_hacl_mod_fini(void) +{ + crypto_unregister_shashes(sha2_hacl_algs, ARRAY_SIZE(sha2_hacl_algs)); +} + +subsys_initcall(sha2_hacl_mod_init); +module_exit(sha2_hacl_mod_fini); + +MODULE_LICENSE("GPLv2 or MIT"); +MODULE_DESCRIPTION("Formally Verified SHA-2 Secure Hash Algorithm from HACL*"); + +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha224-hacl"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha256-hacl"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha384-hacl"); +MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha512-hacl"); diff --git a/crypto/sha3-hacl-generated.c b/crypto/sha3-hacl-generated.c new file mode 100644 index 0000000000000..53d624ccb166d --- /dev/null +++ b/crypto/sha3-hacl-generated.c @@ -0,0 +1,581 @@ +/* GPLv2 or MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + */ + +#include "hacl_hash.h" +#include "hacl_lib.h" + +static uint32_t block_len(Spec_Hash_Definitions_hash_alg a) +{ + switch (a) { + case Spec_Hash_Definitions_SHA3_224: { + return (uint32_t)144U; + } + case Spec_Hash_Definitions_SHA3_256: { + return (uint32_t)136U; + } + case Spec_Hash_Definitions_SHA3_384: { + return (uint32_t)104U; + } + case Spec_Hash_Definitions_SHA3_512: { + return (uint32_t)72U; + } + case Spec_Hash_Definitions_Shake128: { + return (uint32_t)168U; + } + case Spec_Hash_Definitions_Shake256: { + return (uint32_t)136U; + } + default: { + return (uint32_t)144U; + } + } +} + +static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a) +{ + switch (a) { + case Spec_Hash_Definitions_SHA3_224: { + return (uint32_t)28U; + } + case Spec_Hash_Definitions_SHA3_256: { + return (uint32_t)32U; + } + case Spec_Hash_Definitions_SHA3_384: { + return (uint32_t)48U; + } + case Spec_Hash_Definitions_SHA3_512: { + return (uint32_t)64U; + } + default: { + return (uint32_t)64U; + } + } +} + +static void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, + uint64_t *s) +{ + uint8_t block[200U] = { 0U }; + memcpy(block, input, rateInBytes * sizeof(uint8_t)); + for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++) { + uint64_t u = load64_le(block + i * (uint32_t)8U); + uint64_t x = u; + s[i] = s[i] ^ x; + } +} + +static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res) +{ + uint8_t block[200U] = { 0U }; + for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++) { + uint64_t sj = s[i]; + store64_le(block + i * (uint32_t)8U, sj); + } + memcpy(res, block, rateInBytes * sizeof(uint8_t)); +} + +static const uint32_t keccak_rotc[24U] = { + (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, + (uint32_t)15U, (uint32_t)21U, (uint32_t)28U, (uint32_t)36U, + (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U, + (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, + (uint32_t)25U, (uint32_t)43U, (uint32_t)62U, (uint32_t)18U, + (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U +}; + +static const uint32_t keccak_piln[24U] = { + (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, + (uint32_t)18U, (uint32_t)3U, (uint32_t)5U, (uint32_t)16U, + (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U, + (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, + (uint32_t)12U, (uint32_t)2U, (uint32_t)20U, (uint32_t)14U, + (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U +}; + +static const uint64_t keccak_rndc[24U] = { + (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, + (uint64_t)0x800000000000808aU, (uint64_t)0x8000000080008000U, + (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U, + (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, + (uint64_t)0x000000000000008aU, (uint64_t)0x0000000000000088U, + (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU, + (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, + (uint64_t)0x8000000000008089U, (uint64_t)0x8000000000008003U, + (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U, + (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, + (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008080U, + (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U +}; + +static void Hacl_Impl_SHA3_state_permute(uint64_t *s) +{ + for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++) { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, + _C[i] = s[i + (uint32_t)0U] ^ + (s[i + (uint32_t)5U] ^ + (s[i + (uint32_t)10U] ^ + (s[i + (uint32_t)15U] ^ + s[i + (uint32_t)20U])));); + KRML_MAYBE_FOR5( + i1, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, + uint64_t uu____0 = + _C[(i1 + (uint32_t)1U) % (uint32_t)5U]; + uint64_t _D = _C[(i1 + (uint32_t)4U) % (uint32_t)5U] ^ + (uu____0 << (uint32_t)1U | + uu____0 >> (uint32_t)63U); + KRML_MAYBE_FOR5( + i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, + s[i1 + (uint32_t)5U * i] = + s[i1 + (uint32_t)5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t curr = x; + for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++) { + uint32_t _Y = keccak_piln[i]; + uint32_t r = keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = curr; + s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r); + curr = temp; + } + KRML_MAYBE_FOR5( + i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, + uint64_t v0 = s[(uint32_t)0U + (uint32_t)5U * i] ^ + (~s[(uint32_t)1U + (uint32_t)5U * i] & + s[(uint32_t)2U + (uint32_t)5U * i]); + uint64_t v1 = s[(uint32_t)1U + (uint32_t)5U * i] ^ + (~s[(uint32_t)2U + (uint32_t)5U * i] & + s[(uint32_t)3U + (uint32_t)5U * i]); + uint64_t v2 = s[(uint32_t)2U + (uint32_t)5U * i] ^ + (~s[(uint32_t)3U + (uint32_t)5U * i] & + s[(uint32_t)4U + (uint32_t)5U * i]); + uint64_t v3 = s[(uint32_t)3U + (uint32_t)5U * i] ^ + (~s[(uint32_t)4U + (uint32_t)5U * i] & + s[(uint32_t)0U + (uint32_t)5U * i]); + uint64_t v4 = s[(uint32_t)4U + (uint32_t)5U * i] ^ + (~s[(uint32_t)0U + (uint32_t)5U * i] & + s[(uint32_t)1U + (uint32_t)5U * i]); + s[(uint32_t)0U + (uint32_t)5U * i] = v0; + s[(uint32_t)1U + (uint32_t)5U * i] = v1; + s[(uint32_t)2U + (uint32_t)5U * i] = v2; + s[(uint32_t)3U + (uint32_t)5U * i] = v3; + s[(uint32_t)4U + (uint32_t)5U * i] = v4;); + uint64_t c = keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } +} + +static void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, + uint64_t *s) +{ + Hacl_Impl_SHA3_loadState(rateInBytes, block, s); + Hacl_Impl_SHA3_state_permute(s); +} + +static void absorb(uint64_t *s, uint32_t rateInBytes, uint32_t inputByteLen, + uint8_t *input, uint8_t delimitedSuffix) +{ + uint32_t n_blocks = inputByteLen / rateInBytes; + uint32_t rem = inputByteLen % rateInBytes; + for (uint32_t i = (uint32_t)0U; i < n_blocks; i++) { + uint8_t *block = input + i * rateInBytes; + Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s); + } + uint8_t *last = input + n_blocks * rateInBytes; + uint8_t lastBlock_[200U] = { 0U }; + uint8_t *lastBlock = lastBlock_; + memcpy(lastBlock, last, rem * sizeof(uint8_t)); + lastBlock[rem] = delimitedSuffix; + Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s); + if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && + rem == rateInBytes - (uint32_t)1U) { + Hacl_Impl_SHA3_state_permute(s); + } + uint8_t nextBlock_[200U] = { 0U }; + uint8_t *nextBlock = nextBlock_; + nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U; + Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s); + Hacl_Impl_SHA3_state_permute(s); +} + +static void Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_hash_alg a, + uint64_t *s, uint8_t *blocks, + uint32_t n_blocks) +{ + for (uint32_t i = (uint32_t)0U; i < n_blocks; i++) { + uint8_t *block = blocks + i * block_len(a); + Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s); + } +} + +static void Hacl_Hash_SHA3_update_last_sha3(Spec_Hash_Definitions_hash_alg a, + uint64_t *s, uint8_t *input, + uint32_t input_len) +{ + uint8_t suffix; + if (a == Spec_Hash_Definitions_Shake128 || + a == Spec_Hash_Definitions_Shake256) { + suffix = (uint8_t)0x1fU; + } else { + suffix = (uint8_t)0x06U; + } + uint32_t len = block_len(a); + if (input_len == len) { + Hacl_Impl_SHA3_absorb_inner(len, input, s); + uint8_t *uu____0 = input + input_len; + uint8_t lastBlock_[200U] = { 0U }; + uint8_t *lastBlock = lastBlock_; + memcpy(lastBlock, uu____0, (uint32_t)0U * sizeof(uint8_t)); + lastBlock[0U] = suffix; + Hacl_Impl_SHA3_loadState(len, lastBlock, s); + if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && + (uint32_t)0U == len - (uint32_t)1U) { + Hacl_Impl_SHA3_state_permute(s); + } + uint8_t nextBlock_[200U] = { 0U }; + uint8_t *nextBlock = nextBlock_; + nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U; + Hacl_Impl_SHA3_loadState(len, nextBlock, s); + Hacl_Impl_SHA3_state_permute(s); + return; + } + uint8_t lastBlock_[200U] = { 0U }; + uint8_t *lastBlock = lastBlock_; + memcpy(lastBlock, input, input_len * sizeof(uint8_t)); + lastBlock[input_len] = suffix; + Hacl_Impl_SHA3_loadState(len, lastBlock, s); + if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && + input_len == len - (uint32_t)1U) { + Hacl_Impl_SHA3_state_permute(s); + } + uint8_t nextBlock_[200U] = { 0U }; + uint8_t *nextBlock = nextBlock_; + nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U; + Hacl_Impl_SHA3_loadState(len, nextBlock, s); + Hacl_Impl_SHA3_state_permute(s); +} + +typedef struct hash_buf2_s { + struct Hacl_Streaming_Keccak_hash_buf_s fst; + struct Hacl_Streaming_Keccak_hash_buf_s snd; +} hash_buf2; + +static Spec_Hash_Definitions_hash_alg +Hacl_Streaming_Keccak_get_alg(struct Hacl_Streaming_Keccak_state_s *s) +{ + struct Hacl_Streaming_Keccak_state_s scrut = *s; + struct Hacl_Streaming_Keccak_hash_buf_s block_state = scrut.block_state; + return block_state.fst; +} + +void Hacl_Streaming_Keccak_reset(struct Hacl_Streaming_Keccak_state_s *s) +{ + struct Hacl_Streaming_Keccak_state_s scrut = *s; + uint8_t *buf = scrut.buf; + struct Hacl_Streaming_Keccak_hash_buf_s block_state = scrut.block_state; + Spec_Hash_Definitions_hash_alg i = block_state.fst; + KRML_HOST_IGNORE(i); + uint64_t *s1 = block_state.snd; + memset(s1, 0U, (uint32_t)25U * sizeof(uint64_t)); + struct Hacl_Streaming_Keccak_state_s tmp = { + .block_state = block_state, + .buf = buf, + .total_len = (uint64_t)(uint32_t)0U + }; + s[0U] = tmp; +} + +Hacl_Streaming_Types_error_code +Hacl_Streaming_Keccak_update(struct Hacl_Streaming_Keccak_state_s *p, + uint8_t *data, uint32_t len) +{ + struct Hacl_Streaming_Keccak_state_s s = *p; + struct Hacl_Streaming_Keccak_hash_buf_s block_state = s.block_state; + uint64_t total_len = s.total_len; + Spec_Hash_Definitions_hash_alg i = block_state.fst; + if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len) { + return Hacl_Streaming_Types_MaximumLengthExceeded; + } + uint32_t sz; + if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && + total_len > (uint64_t)0U) { + sz = block_len(i); + } else { + sz = (uint32_t)(total_len % (uint64_t)block_len(i)); + } + if (len <= block_len(i) - sz) { + struct Hacl_Streaming_Keccak_state_s s1 = *p; + struct Hacl_Streaming_Keccak_hash_buf_s block_state1 = + s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = block_len(i); + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i)); + } + uint8_t *buf2 = buf + sz1; + memcpy(buf2, data, len * sizeof(uint8_t)); + uint64_t total_len2 = total_len1 + (uint64_t)len; + *p = ((struct Hacl_Streaming_Keccak_state_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len2 }); + } else if (sz == (uint32_t)0U) { + struct Hacl_Streaming_Keccak_state_s s1 = *p; + struct Hacl_Streaming_Keccak_hash_buf_s block_state1 = + s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = block_len(i); + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i)); + } + if (!(sz1 == (uint32_t)0U)) { + Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; + uint64_t *s2 = block_state1.snd; + Hacl_Hash_SHA3_update_multi_sha3( + a1, s2, buf, block_len(i) / block_len(a1)); + } + uint32_t ite; + if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && + (uint64_t)len > (uint64_t)0U) { + ite = block_len(i); + } else { + ite = (uint32_t)((uint64_t)len % + (uint64_t)block_len(i)); + } + uint32_t n_blocks = (len - ite) / block_len(i); + uint32_t data1_len = n_blocks * block_len(i); + uint32_t data2_len = len - data1_len; + uint8_t *data1 = data; + uint8_t *data2 = data + data1_len; + Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; + uint64_t *s2 = block_state1.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, + data1_len / block_len(a1)); + uint8_t *dst = buf; + memcpy(dst, data2, data2_len * sizeof(uint8_t)); + *p = ((struct Hacl_Streaming_Keccak_state_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)len }); + } else { + uint32_t diff = block_len(i) - sz; + uint8_t *data1 = data; + uint8_t *data2 = data + diff; + struct Hacl_Streaming_Keccak_state_s s1 = *p; + struct Hacl_Streaming_Keccak_hash_buf_s block_state10 = + s1.block_state; + uint8_t *buf0 = s1.buf; + uint64_t total_len10 = s1.total_len; + uint32_t sz10; + if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && + total_len10 > (uint64_t)0U) { + sz10 = block_len(i); + } else { + sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i)); + } + uint8_t *buf2 = buf0 + sz10; + memcpy(buf2, data1, diff * sizeof(uint8_t)); + uint64_t total_len2 = total_len10 + (uint64_t)diff; + *p = ((struct Hacl_Streaming_Keccak_state_s){ + .block_state = block_state10, + .buf = buf0, + .total_len = total_len2 }); + struct Hacl_Streaming_Keccak_state_s s10 = *p; + struct Hacl_Streaming_Keccak_hash_buf_s block_state1 = + s10.block_state; + uint8_t *buf = s10.buf; + uint64_t total_len1 = s10.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && + total_len1 > (uint64_t)0U) { + sz1 = block_len(i); + } else { + sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i)); + } + if (!(sz1 == (uint32_t)0U)) { + Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; + uint64_t *s2 = block_state1.snd; + Hacl_Hash_SHA3_update_multi_sha3( + a1, s2, buf, block_len(i) / block_len(a1)); + } + uint32_t ite; + if ((uint64_t)(len - diff) % (uint64_t)block_len(i) == + (uint64_t)0U && + (uint64_t)(len - diff) > (uint64_t)0U) { + ite = block_len(i); + } else { + ite = (uint32_t)((uint64_t)(len - diff) % + (uint64_t)block_len(i)); + } + uint32_t n_blocks = (len - diff - ite) / block_len(i); + uint32_t data1_len = n_blocks * block_len(i); + uint32_t data2_len = len - diff - data1_len; + uint8_t *data11 = data2; + uint8_t *data21 = data2 + data1_len; + Spec_Hash_Definitions_hash_alg a1 = block_state1.fst; + uint64_t *s2 = block_state1.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data11, + data1_len / block_len(a1)); + uint8_t *dst = buf; + memcpy(dst, data21, data2_len * sizeof(uint8_t)); + *p = ((struct Hacl_Streaming_Keccak_state_s){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)(len - diff) }); + } + return Hacl_Streaming_Types_Success; +} + +static void Hacl_Impl_SHA3_squeeze(uint64_t *s, uint32_t rateInBytes, + uint32_t outputByteLen, uint8_t *output) +{ + uint32_t outBlocks = outputByteLen / rateInBytes; + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t *last = output + outputByteLen - remOut; + uint8_t *blocks = output; + for (uint32_t i = (uint32_t)0U; i < outBlocks; i++) { + storeState(rateInBytes, s, blocks + i * rateInBytes); + Hacl_Impl_SHA3_state_permute(s); + } + storeState(remOut, s, last); +} + +static void finish_(Spec_Hash_Definitions_hash_alg a, + struct Hacl_Streaming_Keccak_state_s *p, uint8_t *dst, + uint32_t l) +{ + struct Hacl_Streaming_Keccak_state_s scrut0 = *p; + struct Hacl_Streaming_Keccak_hash_buf_s block_state = + scrut0.block_state; + uint8_t *buf_ = scrut0.buf; + uint64_t total_len = scrut0.total_len; + uint32_t r; + if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && + total_len > (uint64_t)0U) { + r = block_len(a); + } else { + r = (uint32_t)(total_len % (uint64_t)block_len(a)); + } + uint8_t *buf_1 = buf_; + uint64_t buf[25U] = { 0U }; + struct Hacl_Streaming_Keccak_hash_buf_s tmp_block_state = { + .fst = a, .snd = buf + }; + hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state }; + uint64_t *s_dst = scrut.snd.snd; + uint64_t *s_src = scrut.fst.snd; + memcpy(s_dst, s_src, (uint32_t)25U * sizeof(uint64_t)); + uint32_t ite; + if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U) { + ite = block_len(a); + } else { + ite = r % block_len(a); + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst; + uint64_t *s0 = tmp_block_state.snd; + Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, + (uint32_t)0U / block_len(a1)); + Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst; + uint64_t *s1 = tmp_block_state.snd; + Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r); + Spec_Hash_Definitions_hash_alg a11 = tmp_block_state.fst; + uint64_t *s = tmp_block_state.snd; + if (a11 == Spec_Hash_Definitions_Shake128 || + a11 == Spec_Hash_Definitions_Shake256) { + Hacl_Impl_SHA3_squeeze(s, block_len(a11), l, dst); + return; + } + Hacl_Impl_SHA3_squeeze(s, block_len(a11), hash_len(a11), dst); +} + +Hacl_Streaming_Types_error_code +Hacl_Streaming_Keccak_finish(struct Hacl_Streaming_Keccak_state_s *s, + uint8_t *dst) +{ + Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s); + if (a1 == Spec_Hash_Definitions_Shake128 || + a1 == Spec_Hash_Definitions_Shake256) { + return Hacl_Streaming_Types_InvalidAlgorithm; + } + finish_(a1, s, dst, hash_len(a1)); + return Hacl_Streaming_Types_Success; +} + +Hacl_Streaming_Types_error_code +Hacl_Streaming_Keccak_squeeze(struct Hacl_Streaming_Keccak_state_s *s, + uint8_t *dst, uint32_t l) +{ + Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s); + if (!(a1 == Spec_Hash_Definitions_Shake128 || + a1 == Spec_Hash_Definitions_Shake256)) { + return Hacl_Streaming_Types_InvalidAlgorithm; + } + if (l == (uint32_t)0U) { + return Hacl_Streaming_Types_InvalidLength; + } + finish_(a1, s, dst, l); + return Hacl_Streaming_Types_Success; +} + +static void Hacl_Impl_SHA3_keccak(uint32_t rate, uint32_t capacity, + uint32_t inputByteLen, uint8_t *input, + uint8_t delimitedSuffix, + uint32_t outputByteLen, uint8_t *output) +{ + uint32_t rateInBytes = rate / (uint32_t)8U; + uint64_t s[25U] = { 0U }; + absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix); + Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output); +} + +void Hacl_SHA3_shake128_hacl(uint32_t inputByteLen, uint8_t *input, + uint32_t outputByteLen, uint8_t *output) +{ + Hacl_Impl_SHA3_keccak((uint32_t)1344U, (uint32_t)256U, inputByteLen, + input, (uint8_t)0x1FU, outputByteLen, output); +} + +void Hacl_SHA3_shake256_hacl(uint32_t inputByteLen, uint8_t *input, + uint32_t outputByteLen, uint8_t *output) +{ + Hacl_Impl_SHA3_keccak((uint32_t)1088U, (uint32_t)512U, inputByteLen, + input, (uint8_t)0x1FU, outputByteLen, output); +} + +void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output) +{ + Hacl_Impl_SHA3_keccak((uint32_t)1152U, (uint32_t)448U, inputByteLen, + input, (uint8_t)0x06U, (uint32_t)28U, output); +} + +void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output) +{ + Hacl_Impl_SHA3_keccak((uint32_t)1088U, (uint32_t)512U, inputByteLen, + input, (uint8_t)0x06U, (uint32_t)32U, output); +} + +void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output) +{ + Hacl_Impl_SHA3_keccak((uint32_t)832U, (uint32_t)768U, inputByteLen, + input, (uint8_t)0x06U, (uint32_t)48U, output); +} + +void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output) +{ + Hacl_Impl_SHA3_keccak((uint32_t)576U, (uint32_t)1024U, inputByteLen, + input, (uint8_t)0x06U, (uint32_t)64U, output); +} diff --git a/crypto/sha3-hacl.c b/crypto/sha3-hacl.c new file mode 100644 index 0000000000000..c9241aa4aaf24 --- /dev/null +++ b/crypto/sha3-hacl.c @@ -0,0 +1,149 @@ +/* GPLv2 or MIT License + * + * Copyright (c) 2023 Cryspen + * + */ + +#include "hacl_hash.h" +#include "hacl_lib.h" + +#include + +int hacl_sha3_init(struct shash_desc *desc) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + sctx->rsiz = 200 - 2 * digest_size; + sctx->rsizw = sctx->rsiz / 8; + sctx->partial = 0; + memset(sctx->st, 0, sizeof(sctx->st)); + return 0; +} +EXPORT_SYMBOL(hacl_sha3_init); + +Spec_Hash_Definitions_hash_alg hacl_sha3_alg(unsigned int rsiz) +{ + switch (rsiz) { + case 144: { + return Spec_Hash_Definitions_SHA3_224; + } + case 136: { + return Spec_Hash_Definitions_SHA3_256; + } + case 104: { + return Spec_Hash_Definitions_SHA3_384; + } + case 72: { + return Spec_Hash_Definitions_SHA3_512; + } + default: { + return Spec_Hash_Definitions_SHA3_256; + } + } +} + +int hacl_sha3_update(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_Keccak_state_s st; + st.block_state.fst = hacl_sha3_alg(sctx->rsiz); + st.block_state.snd = sctx->st; + st.buf = sctx->buf; + st.total_len = 0; + uint8_t ret = Hacl_Streaming_Keccak_update(&st, (uint8_t *)data, len); + if (ret > 0) { + return -1; + } else { + return 0; + } +} +EXPORT_SYMBOL(hacl_sha3_update); + +int hacl_sha3_final(struct shash_desc *desc, u8 *out) +{ + struct sha3_state *sctx = shash_desc_ctx(desc); + struct Hacl_Streaming_Keccak_state_s st; + st.block_state.fst = hacl_sha3_alg(sctx->rsiz); + st.block_state.snd = sctx->st; + st.buf = sctx->buf; + st.total_len = 0; + uint8_t ret = Hacl_Streaming_Keccak_finish(&st, out); + if (ret > 0) { + return -1; + } else { + return 0; + } +} +EXPORT_SYMBOL(hacl_sha3_final); + +static struct shash_alg algs[] = { + { + .digestsize = SHA3_224_DIGEST_SIZE, + .init = hacl_sha3_init, + .update = hacl_sha3_update, + .final = hacl_sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-224", + .base.cra_driver_name = "sha3-224-hacl", + .base.cra_blocksize = SHA3_224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + }, + { + .digestsize = SHA3_256_DIGEST_SIZE, + .init = hacl_sha3_init, + .update = hacl_sha3_update, + .final = hacl_sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-256", + .base.cra_driver_name = "sha3-256-hacl", + .base.cra_blocksize = SHA3_256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + }, + { + .digestsize = SHA3_384_DIGEST_SIZE, + .init = hacl_sha3_init, + .update = hacl_sha3_update, + .final = hacl_sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-384", + .base.cra_driver_name = "sha3-384-hacl", + .base.cra_blocksize = SHA3_384_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + }, + { + .digestsize = SHA3_512_DIGEST_SIZE, + .init = hacl_sha3_init, + .update = hacl_sha3_update, + .final = hacl_sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-512", + .base.cra_driver_name = "sha3-512-hacl", + .base.cra_blocksize = SHA3_512_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + } +}; + +static int __init sha3_hacl_mod_init(void) +{ + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} + +static void __exit sha3_hacl_mod_fini(void) +{ + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} + +subsys_initcall(sha3_hacl_mod_init); +module_exit(sha3_hacl_mod_fini); + +MODULE_LICENSE("GPLv2 or MIT"); +MODULE_DESCRIPTION("Formally Verified SHA-3 Secure Hash Algorithm from HACL*"); + +MODULE_ALIAS_CRYPTO("sha3-224"); +MODULE_ALIAS_CRYPTO("sha3-224-hacl"); +MODULE_ALIAS_CRYPTO("sha3-256"); +MODULE_ALIAS_CRYPTO("sha3-256-hacl"); +MODULE_ALIAS_CRYPTO("sha3-384"); +MODULE_ALIAS_CRYPTO("sha3-384-hacl"); +MODULE_ALIAS_CRYPTO("sha3-512"); +MODULE_ALIAS_CRYPTO("sha3-512-hacl"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 216878c8bc3d6..72c84437d7fd6 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4095,6 +4095,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, if (!req) goto free_xbuf; + // The following is to prevent a stack overflow on large keys + if (vecs->key_len > 256) { + err = 0; + goto free_xbuf; + } + // End of stack overflow avoidance + crypto_init_wait(&wait); key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len, diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h index 4f6c1a68882fa..5601da0a1ff36 100644 --- a/include/crypto/internal/ecc.h +++ b/include/crypto/internal/ecc.h @@ -182,6 +182,9 @@ int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits); u64 vli_sub(u64 *result, const u64 *left, const u64 *right, unsigned int ndigits); +void vli_mod_add(u64 *result, const u64 *left, const u64 *right, + const u64 *mod, unsigned int ndigits); + /** * vli_from_be64() - Load vli from big-endian u64 array * @@ -225,6 +228,9 @@ void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, const u64 *mod, unsigned int ndigits); +void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, + const struct ecc_curve *curve); + /** * vli_num_bits() - Counts the number of bits required for vli. * @@ -260,6 +266,10 @@ void ecc_free_point(struct ecc_point *p); */ bool ecc_point_is_zero(const struct ecc_point *point); +void ecc_point_mult(struct ecc_point *result, const struct ecc_point *point, + const u64 *scalar, u64 *initial_z, + const struct ecc_curve *curve, unsigned int ndigits); + /** * ecc_point_mult_shamir() - Add two points multiplied by scalars * diff --git a/zeta/.astylerc b/zeta/.astylerc new file mode 100644 index 0000000000000..74eace8c9bd95 --- /dev/null +++ b/zeta/.astylerc @@ -0,0 +1,31 @@ +# Pre-formats files according to the Kernel code style. +# +# AStyle prepares code so that clang-format can do the final formatting. +# +# Required since clang-format does not handle: +# - designated struct initializers +# - removing existing tabs +# +align-pointer=name +align-reference=name +attach-closing-while +break-one-line-headers +convert-tabs +delete-empty-lines +indent-preproc-define +indent=spaces=8 +lineend=linux +max-code-length=80 +max-continuation-indent=80 +min-conditional-indent=0 +mode=c +pad-comma +pad-header +pad-oper +preserve-date +squeeze-ws +style=kr +suffix=none +unpad-brackets +unpad-paren +verbose \ No newline at end of file diff --git a/zeta/.clang-format b/zeta/.clang-format new file mode 100644 index 0000000000000..72d680c57861b --- /dev/null +++ b/zeta/.clang-format @@ -0,0 +1,687 @@ +# Inherited from /.clang-format +# +# Changes: +# - UseTab: Never +# - ReflowComments: true +# - SortIncludes: true +# +--- +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignConsecutiveMacros: true +AlignEscapedNewlines: Left +AlignOperands: true +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: false +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: true + AfterNamespace: true + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +BreakBeforeInheritanceComma: false +BreakBeforeTernaryOperators: false +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeComma +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 8 +ContinuationIndentWidth: 8 +Cpp11BracedListStyle: false +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: false + +# Taken from: +# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ tools/ \ +# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \ +# | LC_ALL=C sort -u +ForEachMacros: + - '__ata_qc_for_each' + - '__bio_for_each_bvec' + - '__bio_for_each_segment' + - '__evlist__for_each_entry' + - '__evlist__for_each_entry_continue' + - '__evlist__for_each_entry_from' + - '__evlist__for_each_entry_reverse' + - '__evlist__for_each_entry_safe' + - '__for_each_mem_range' + - '__for_each_mem_range_rev' + - '__for_each_thread' + - '__hlist_for_each_rcu' + - '__map__for_each_symbol_by_name' + - '__perf_evlist__for_each_entry' + - '__perf_evlist__for_each_entry_reverse' + - '__perf_evlist__for_each_entry_safe' + - '__rq_for_each_bio' + - '__shost_for_each_device' + - 'apei_estatus_for_each_section' + - 'ata_for_each_dev' + - 'ata_for_each_link' + - 'ata_qc_for_each' + - 'ata_qc_for_each_raw' + - 'ata_qc_for_each_with_internal' + - 'ax25_for_each' + - 'ax25_uid_for_each' + - 'bio_for_each_bvec' + - 'bio_for_each_bvec_all' + - 'bio_for_each_folio_all' + - 'bio_for_each_integrity_vec' + - 'bio_for_each_segment' + - 'bio_for_each_segment_all' + - 'bio_list_for_each' + - 'bip_for_each_vec' + - 'bond_for_each_slave' + - 'bond_for_each_slave_rcu' + - 'bpf__perf_for_each_map' + - 'bpf__perf_for_each_map_named' + - 'bpf_for_each_spilled_reg' + - 'bpf_object__for_each_map' + - 'bpf_object__for_each_program' + - 'bpf_object__for_each_safe' + - 'bpf_perf_object__for_each' + - 'btree_for_each_safe128' + - 'btree_for_each_safe32' + - 'btree_for_each_safe64' + - 'btree_for_each_safel' + - 'card_for_each_dev' + - 'cgroup_taskset_for_each' + - 'cgroup_taskset_for_each_leader' + - 'cpufreq_for_each_efficient_entry_idx' + - 'cpufreq_for_each_entry' + - 'cpufreq_for_each_entry_idx' + - 'cpufreq_for_each_valid_entry' + - 'cpufreq_for_each_valid_entry_idx' + - 'css_for_each_child' + - 'css_for_each_descendant_post' + - 'css_for_each_descendant_pre' + - 'damon_for_each_region' + - 'damon_for_each_region_safe' + - 'damon_for_each_scheme' + - 'damon_for_each_scheme_safe' + - 'damon_for_each_target' + - 'damon_for_each_target_safe' + - 'data__for_each_file' + - 'data__for_each_file_new' + - 'data__for_each_file_start' + - 'device_for_each_child_node' + - 'displayid_iter_for_each' + - 'dma_fence_array_for_each' + - 'dma_fence_chain_for_each' + - 'dma_fence_unwrap_for_each' + - 'dma_resv_for_each_fence' + - 'dma_resv_for_each_fence_unlocked' + - 'do_for_each_ftrace_op' + - 'drm_atomic_crtc_for_each_plane' + - 'drm_atomic_crtc_state_for_each_plane' + - 'drm_atomic_crtc_state_for_each_plane_state' + - 'drm_atomic_for_each_plane_damage' + - 'drm_client_for_each_connector_iter' + - 'drm_client_for_each_modeset' + - 'drm_connector_for_each_possible_encoder' + - 'drm_for_each_bridge_in_chain' + - 'drm_for_each_connector_iter' + - 'drm_for_each_crtc' + - 'drm_for_each_crtc_reverse' + - 'drm_for_each_encoder' + - 'drm_for_each_encoder_mask' + - 'drm_for_each_fb' + - 'drm_for_each_legacy_plane' + - 'drm_for_each_plane' + - 'drm_for_each_plane_mask' + - 'drm_for_each_privobj' + - 'drm_mm_for_each_hole' + - 'drm_mm_for_each_node' + - 'drm_mm_for_each_node_in_range' + - 'drm_mm_for_each_node_safe' + - 'dsa_switch_for_each_available_port' + - 'dsa_switch_for_each_cpu_port' + - 'dsa_switch_for_each_port' + - 'dsa_switch_for_each_port_continue_reverse' + - 'dsa_switch_for_each_port_safe' + - 'dsa_switch_for_each_user_port' + - 'dsa_tree_for_each_user_port' + - 'dso__for_each_symbol' + - 'dsos__for_each_with_build_id' + - 'elf_hash_for_each_possible' + - 'elf_section__for_each_rel' + - 'elf_section__for_each_rela' + - 'elf_symtab__for_each_symbol' + - 'evlist__for_each_cpu' + - 'evlist__for_each_entry' + - 'evlist__for_each_entry_continue' + - 'evlist__for_each_entry_from' + - 'evlist__for_each_entry_reverse' + - 'evlist__for_each_entry_safe' + - 'flow_action_for_each' + - 'for_each_acpi_dev_match' + - 'for_each_active_dev_scope' + - 'for_each_active_drhd_unit' + - 'for_each_active_iommu' + - 'for_each_active_route' + - 'for_each_aggr_pgid' + - 'for_each_available_child_of_node' + - 'for_each_bench' + - 'for_each_bio' + - 'for_each_board_func_rsrc' + - 'for_each_btf_ext_rec' + - 'for_each_btf_ext_sec' + - 'for_each_bvec' + - 'for_each_card_auxs' + - 'for_each_card_auxs_safe' + - 'for_each_card_components' + - 'for_each_card_dapms' + - 'for_each_card_pre_auxs' + - 'for_each_card_prelinks' + - 'for_each_card_rtds' + - 'for_each_card_rtds_safe' + - 'for_each_card_widgets' + - 'for_each_card_widgets_safe' + - 'for_each_cgroup_storage_type' + - 'for_each_child_of_node' + - 'for_each_clear_bit' + - 'for_each_clear_bit_from' + - 'for_each_clear_bitrange' + - 'for_each_clear_bitrange_from' + - 'for_each_cmd' + - 'for_each_cmsghdr' + - 'for_each_collection' + - 'for_each_comp_order' + - 'for_each_compatible_node' + - 'for_each_component_dais' + - 'for_each_component_dais_safe' + - 'for_each_console' + - 'for_each_console_srcu' + - 'for_each_cpu' + - 'for_each_cpu_and' + - 'for_each_cpu_wrap' + - 'for_each_dapm_widgets' + - 'for_each_dedup_cand' + - 'for_each_dev_addr' + - 'for_each_dev_scope' + - 'for_each_dma_cap_mask' + - 'for_each_dpcm_be' + - 'for_each_dpcm_be_rollback' + - 'for_each_dpcm_be_safe' + - 'for_each_dpcm_fe' + - 'for_each_drhd_unit' + - 'for_each_dss_dev' + - 'for_each_efi_memory_desc' + - 'for_each_efi_memory_desc_in_map' + - 'for_each_element' + - 'for_each_element_extid' + - 'for_each_element_id' + - 'for_each_endpoint_of_node' + - 'for_each_event' + - 'for_each_event_tps' + - 'for_each_evictable_lru' + - 'for_each_fib6_node_rt_rcu' + - 'for_each_fib6_walker_rt' + - 'for_each_free_mem_pfn_range_in_zone' + - 'for_each_free_mem_pfn_range_in_zone_from' + - 'for_each_free_mem_range' + - 'for_each_free_mem_range_reverse' + - 'for_each_func_rsrc' + - 'for_each_group_device' + - 'for_each_group_evsel' + - 'for_each_group_member' + - 'for_each_hstate' + - 'for_each_if' + - 'for_each_inject_fn' + - 'for_each_insn' + - 'for_each_insn_prefix' + - 'for_each_intid' + - 'for_each_iommu' + - 'for_each_ip_tunnel_rcu' + - 'for_each_irq_nr' + - 'for_each_lang' + - 'for_each_link_codecs' + - 'for_each_link_cpus' + - 'for_each_link_platforms' + - 'for_each_lru' + - 'for_each_matching_node' + - 'for_each_matching_node_and_match' + - 'for_each_mem_pfn_range' + - 'for_each_mem_range' + - 'for_each_mem_range_rev' + - 'for_each_mem_region' + - 'for_each_member' + - 'for_each_memory' + - 'for_each_migratetype_order' + - 'for_each_missing_reg' + - 'for_each_net' + - 'for_each_net_continue_reverse' + - 'for_each_net_rcu' + - 'for_each_netdev' + - 'for_each_netdev_continue' + - 'for_each_netdev_continue_rcu' + - 'for_each_netdev_continue_reverse' + - 'for_each_netdev_feature' + - 'for_each_netdev_in_bond_rcu' + - 'for_each_netdev_rcu' + - 'for_each_netdev_reverse' + - 'for_each_netdev_safe' + - 'for_each_new_connector_in_state' + - 'for_each_new_crtc_in_state' + - 'for_each_new_mst_mgr_in_state' + - 'for_each_new_plane_in_state' + - 'for_each_new_plane_in_state_reverse' + - 'for_each_new_private_obj_in_state' + - 'for_each_new_reg' + - 'for_each_node' + - 'for_each_node_by_name' + - 'for_each_node_by_type' + - 'for_each_node_mask' + - 'for_each_node_state' + - 'for_each_node_with_cpus' + - 'for_each_node_with_property' + - 'for_each_nonreserved_multicast_dest_pgid' + - 'for_each_of_allnodes' + - 'for_each_of_allnodes_from' + - 'for_each_of_cpu_node' + - 'for_each_of_pci_range' + - 'for_each_old_connector_in_state' + - 'for_each_old_crtc_in_state' + - 'for_each_old_mst_mgr_in_state' + - 'for_each_old_plane_in_state' + - 'for_each_old_private_obj_in_state' + - 'for_each_oldnew_connector_in_state' + - 'for_each_oldnew_crtc_in_state' + - 'for_each_oldnew_mst_mgr_in_state' + - 'for_each_oldnew_plane_in_state' + - 'for_each_oldnew_plane_in_state_reverse' + - 'for_each_oldnew_private_obj_in_state' + - 'for_each_online_cpu' + - 'for_each_online_node' + - 'for_each_online_pgdat' + - 'for_each_path' + - 'for_each_pci_bridge' + - 'for_each_pci_dev' + - 'for_each_pcm_streams' + - 'for_each_physmem_range' + - 'for_each_populated_zone' + - 'for_each_possible_cpu' + - 'for_each_present_cpu' + - 'for_each_prime_number' + - 'for_each_prime_number_from' + - 'for_each_probe_cache_entry' + - 'for_each_process' + - 'for_each_process_thread' + - 'for_each_prop_codec_conf' + - 'for_each_prop_dai_codec' + - 'for_each_prop_dai_cpu' + - 'for_each_prop_dlc_codecs' + - 'for_each_prop_dlc_cpus' + - 'for_each_prop_dlc_platforms' + - 'for_each_property_of_node' + - 'for_each_reg' + - 'for_each_reg_filtered' + - 'for_each_registered_fb' + - 'for_each_requested_gpio' + - 'for_each_requested_gpio_in_range' + - 'for_each_reserved_mem_range' + - 'for_each_reserved_mem_region' + - 'for_each_rtd_codec_dais' + - 'for_each_rtd_components' + - 'for_each_rtd_cpu_dais' + - 'for_each_rtd_dais' + - 'for_each_script' + - 'for_each_sec' + - 'for_each_set_bit' + - 'for_each_set_bit_from' + - 'for_each_set_bitrange' + - 'for_each_set_bitrange_from' + - 'for_each_set_clump8' + - 'for_each_sg' + - 'for_each_sg_dma_page' + - 'for_each_sg_page' + - 'for_each_sgtable_dma_page' + - 'for_each_sgtable_dma_sg' + - 'for_each_sgtable_page' + - 'for_each_sgtable_sg' + - 'for_each_shell_test' + - 'for_each_sibling_event' + - 'for_each_subelement' + - 'for_each_subelement_extid' + - 'for_each_subelement_id' + - 'for_each_sublist' + - 'for_each_subsystem' + - 'for_each_supported_activate_fn' + - 'for_each_supported_inject_fn' + - 'for_each_test' + - 'for_each_thread' + - 'for_each_token' + - 'for_each_unicast_dest_pgid' + - 'for_each_vsi' + - 'for_each_wakeup_source' + - 'for_each_zone' + - 'for_each_zone_zonelist' + - 'for_each_zone_zonelist_nodemask' + - 'func_for_each_insn' + - 'fwnode_for_each_available_child_node' + - 'fwnode_for_each_child_node' + - 'fwnode_graph_for_each_endpoint' + - 'gadget_for_each_ep' + - 'genradix_for_each' + - 'genradix_for_each_from' + - 'hash_for_each' + - 'hash_for_each_possible' + - 'hash_for_each_possible_rcu' + - 'hash_for_each_possible_rcu_notrace' + - 'hash_for_each_possible_safe' + - 'hash_for_each_rcu' + - 'hash_for_each_safe' + - 'hashmap__for_each_entry' + - 'hashmap__for_each_entry_safe' + - 'hashmap__for_each_key_entry' + - 'hashmap__for_each_key_entry_safe' + - 'hctx_for_each_ctx' + - 'hists__for_each_format' + - 'hists__for_each_sort_list' + - 'hlist_bl_for_each_entry' + - 'hlist_bl_for_each_entry_rcu' + - 'hlist_bl_for_each_entry_safe' + - 'hlist_for_each' + - 'hlist_for_each_entry' + - 'hlist_for_each_entry_continue' + - 'hlist_for_each_entry_continue_rcu' + - 'hlist_for_each_entry_continue_rcu_bh' + - 'hlist_for_each_entry_from' + - 'hlist_for_each_entry_from_rcu' + - 'hlist_for_each_entry_rcu' + - 'hlist_for_each_entry_rcu_bh' + - 'hlist_for_each_entry_rcu_notrace' + - 'hlist_for_each_entry_safe' + - 'hlist_for_each_entry_srcu' + - 'hlist_for_each_safe' + - 'hlist_nulls_for_each_entry' + - 'hlist_nulls_for_each_entry_from' + - 'hlist_nulls_for_each_entry_rcu' + - 'hlist_nulls_for_each_entry_safe' + - 'i3c_bus_for_each_i2cdev' + - 'i3c_bus_for_each_i3cdev' + - 'idr_for_each_entry' + - 'idr_for_each_entry_continue' + - 'idr_for_each_entry_continue_ul' + - 'idr_for_each_entry_ul' + - 'in_dev_for_each_ifa_rcu' + - 'in_dev_for_each_ifa_rtnl' + - 'inet_bind_bucket_for_each' + - 'inet_lhash2_for_each_icsk' + - 'inet_lhash2_for_each_icsk_continue' + - 'inet_lhash2_for_each_icsk_rcu' + - 'interval_tree_for_each_double_span' + - 'interval_tree_for_each_span' + - 'intlist__for_each_entry' + - 'intlist__for_each_entry_safe' + - 'iopt_for_each_contig_area' + - 'kcore_copy__for_each_phdr' + - 'key_for_each' + - 'key_for_each_safe' + - 'klp_for_each_func' + - 'klp_for_each_func_safe' + - 'klp_for_each_func_static' + - 'klp_for_each_object' + - 'klp_for_each_object_safe' + - 'klp_for_each_object_static' + - 'kunit_suite_for_each_test_case' + - 'kvm_for_each_memslot' + - 'kvm_for_each_memslot_in_gfn_range' + - 'kvm_for_each_vcpu' + - 'libbpf_nla_for_each_attr' + - 'list_for_each' + - 'list_for_each_codec' + - 'list_for_each_codec_safe' + - 'list_for_each_continue' + - 'list_for_each_entry' + - 'list_for_each_entry_continue' + - 'list_for_each_entry_continue_rcu' + - 'list_for_each_entry_continue_reverse' + - 'list_for_each_entry_from' + - 'list_for_each_entry_from_rcu' + - 'list_for_each_entry_from_reverse' + - 'list_for_each_entry_lockless' + - 'list_for_each_entry_rcu' + - 'list_for_each_entry_reverse' + - 'list_for_each_entry_safe' + - 'list_for_each_entry_safe_continue' + - 'list_for_each_entry_safe_from' + - 'list_for_each_entry_safe_reverse' + - 'list_for_each_entry_srcu' + - 'list_for_each_from' + - 'list_for_each_prev' + - 'list_for_each_prev_safe' + - 'list_for_each_safe' + - 'llist_for_each' + - 'llist_for_each_entry' + - 'llist_for_each_entry_safe' + - 'llist_for_each_safe' + - 'map__for_each_symbol' + - 'map__for_each_symbol_by_name' + - 'map_for_each_event' + - 'map_for_each_metric' + - 'maps__for_each_entry' + - 'maps__for_each_entry_safe' + - 'mci_for_each_dimm' + - 'media_device_for_each_entity' + - 'media_device_for_each_intf' + - 'media_device_for_each_link' + - 'media_device_for_each_pad' + - 'msi_for_each_desc' + - 'nanddev_io_for_each_page' + - 'netdev_for_each_lower_dev' + - 'netdev_for_each_lower_private' + - 'netdev_for_each_lower_private_rcu' + - 'netdev_for_each_mc_addr' + - 'netdev_for_each_uc_addr' + - 'netdev_for_each_upper_dev_rcu' + - 'netdev_hw_addr_list_for_each' + - 'nft_rule_for_each_expr' + - 'nla_for_each_attr' + - 'nla_for_each_nested' + - 'nlmsg_for_each_attr' + - 'nlmsg_for_each_msg' + - 'nr_neigh_for_each' + - 'nr_neigh_for_each_safe' + - 'nr_node_for_each' + - 'nr_node_for_each_safe' + - 'of_for_each_phandle' + - 'of_property_for_each_string' + - 'of_property_for_each_u32' + - 'pci_bus_for_each_resource' + - 'pci_dev_for_each_resource' + - 'pcl_for_each_chunk' + - 'pcl_for_each_segment' + - 'pcm_for_each_format' + - 'perf_config_items__for_each_entry' + - 'perf_config_sections__for_each_entry' + - 'perf_config_set__for_each_entry' + - 'perf_cpu_map__for_each_cpu' + - 'perf_evlist__for_each_entry' + - 'perf_evlist__for_each_entry_reverse' + - 'perf_evlist__for_each_entry_safe' + - 'perf_evlist__for_each_evsel' + - 'perf_evlist__for_each_mmap' + - 'perf_hpp_list__for_each_format' + - 'perf_hpp_list__for_each_format_safe' + - 'perf_hpp_list__for_each_sort_list' + - 'perf_hpp_list__for_each_sort_list_safe' + - 'perf_pmu__for_each_hybrid_pmu' + - 'ping_portaddr_for_each_entry' + - 'ping_portaddr_for_each_entry_rcu' + - 'plist_for_each' + - 'plist_for_each_continue' + - 'plist_for_each_entry' + - 'plist_for_each_entry_continue' + - 'plist_for_each_entry_safe' + - 'plist_for_each_safe' + - 'pnp_for_each_card' + - 'pnp_for_each_dev' + - 'protocol_for_each_card' + - 'protocol_for_each_dev' + - 'queue_for_each_hw_ctx' + - 'radix_tree_for_each_slot' + - 'radix_tree_for_each_tagged' + - 'rb_for_each' + - 'rbtree_postorder_for_each_entry_safe' + - 'rdma_for_each_block' + - 'rdma_for_each_port' + - 'rdma_umem_for_each_dma_block' + - 'resort_rb__for_each_entry' + - 'resource_list_for_each_entry' + - 'resource_list_for_each_entry_safe' + - 'rhl_for_each_entry_rcu' + - 'rhl_for_each_rcu' + - 'rht_for_each' + - 'rht_for_each_entry' + - 'rht_for_each_entry_from' + - 'rht_for_each_entry_rcu' + - 'rht_for_each_entry_rcu_from' + - 'rht_for_each_entry_safe' + - 'rht_for_each_from' + - 'rht_for_each_rcu' + - 'rht_for_each_rcu_from' + - 'rq_for_each_bvec' + - 'rq_for_each_segment' + - 'rq_list_for_each' + - 'rq_list_for_each_safe' + - 'scsi_for_each_prot_sg' + - 'scsi_for_each_sg' + - 'sctp_for_each_hentry' + - 'sctp_skb_for_each' + - 'sec_for_each_insn' + - 'sec_for_each_insn_continue' + - 'sec_for_each_insn_from' + - 'shdma_for_each_chan' + - 'shost_for_each_device' + - 'sk_for_each' + - 'sk_for_each_bound' + - 'sk_for_each_entry_offset_rcu' + - 'sk_for_each_from' + - 'sk_for_each_rcu' + - 'sk_for_each_safe' + - 'sk_nulls_for_each' + - 'sk_nulls_for_each_from' + - 'sk_nulls_for_each_rcu' + - 'snd_array_for_each' + - 'snd_pcm_group_for_each_entry' + - 'snd_soc_dapm_widget_for_each_path' + - 'snd_soc_dapm_widget_for_each_path_safe' + - 'snd_soc_dapm_widget_for_each_sink_path' + - 'snd_soc_dapm_widget_for_each_source_path' + - 'strlist__for_each_entry' + - 'strlist__for_each_entry_safe' + - 'sym_for_each_insn' + - 'sym_for_each_insn_continue_reverse' + - 'symbols__for_each_entry' + - 'tb_property_for_each' + - 'tcf_act_for_each_action' + - 'tcf_exts_for_each_action' + - 'udp_portaddr_for_each_entry' + - 'udp_portaddr_for_each_entry_rcu' + - 'usb_hub_for_each_child' + - 'v4l2_device_for_each_subdev' + - 'v4l2_m2m_for_each_dst_buf' + - 'v4l2_m2m_for_each_dst_buf_safe' + - 'v4l2_m2m_for_each_src_buf' + - 'v4l2_m2m_for_each_src_buf_safe' + - 'virtio_device_for_each_vq' + - 'while_for_each_ftrace_op' + - 'xa_for_each' + - 'xa_for_each_marked' + - 'xa_for_each_range' + - 'xa_for_each_start' + - 'xas_for_each' + - 'xas_for_each_conflict' + - 'xas_for_each_marked' + - 'xbc_array_for_each_value' + - 'xbc_for_each_key_value' + - 'xbc_node_for_each_array_value' + - 'xbc_node_for_each_child' + - 'xbc_node_for_each_key_value' + - 'xbc_node_for_each_subkey' + - 'zorro_for_each_dev' + +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '.*' + Priority: 1 +IncludeIsMainRegex: '(Test)?$' +IndentCaseLabels: false +IndentGotoLabels: false +IndentPPDirectives: None +IndentWidth: 8 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 8 +ObjCSpaceAfterProperty: true +ObjCSpaceBeforeProtocolList: true + +# Taken from git's rules +PenaltyBreakAssignment: 10 +PenaltyBreakBeforeFirstCallParameter: 30 +PenaltyBreakComment: 10 +PenaltyBreakFirstLessLess: 0 +PenaltyBreakString: 10 +PenaltyExcessCharacter: 100 +PenaltyReturnTypeOnItsOwnLine: 60 + +PointerAlignment: Right +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: false +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatementsExceptForEachMacros +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp03 +TabWidth: 8 +UseTab: Never +... diff --git a/zeta/.gitignore b/zeta/.gitignore new file mode 100644 index 0000000000000..c723267ba32f9 --- /dev/null +++ b/zeta/.gitignore @@ -0,0 +1,2 @@ +!.astylerc +!.clang-format diff --git a/zeta/Makefile b/zeta/Makefile new file mode 100644 index 0000000000000..4e0d502ee7806 --- /dev/null +++ b/zeta/Makefile @@ -0,0 +1,31 @@ +this-makefile := $(lastword $(MAKEFILE_LIST)) +ZETA := $(realpath $(dir $(this-makefile))) +LINUX_ROOT:=${ZETA}/.. + +# FILES is a space-separated list of files. +# +# $ make format FILES="/crypto/sha256_generic.c /crypto/sha512_generic.c" +# +# Note the leading `/` (slash), which indicates the position of +# the input files with respect to the kernel source code. +FILES= + +ALL_FILES:=$(addprefix ${LINUX_ROOT}, ${FILES}) + +all: + @echo "Possible targets: format, lint" + +check-env: +ifndef FILES + $(error FILES env-var is not defined) +endif + +format: check-env + # Tested with astyle v3.8.4 + astyle --project=${ZETA}/.astylerc ${ALL_FILES} + # Tested with clang-format v17 + clang-format --verbose -i --style=file:${ZETA}/.clang-format ${ALL_FILES} + +lint: check-env + # Tested with cpplint v1.5.5 + cpplint --filter=-whitespace/braces,-readability/casting,-build/include_subdir ${ALL_FILES} diff --git a/zeta/readme.md b/zeta/readme.md new file mode 100644 index 0000000000000..3fe48b85e93b4 --- /dev/null +++ b/zeta/readme.md @@ -0,0 +1,136 @@ +## Zeta + +Auxiliary directory for tools and configuration files. + + +### Branches and History + +**Premise**: The `cf-linux-rolling-stable` branch keeps track of the +`linux-rolling-stable` branch. + +``` +o +| <- zeta (commit) +| <- linux-rolling-stable, cf-linux-rolling-stable +| += +``` + +Definitions: +- Zeta: Auxiliary commits for devel/testing (do not touch the kernel code). +- hacl: All the commits that include new cryptographic code from hacl. + +``` +o +| <- hacl-hash (commits) +| <- cf-zeta (commits) +| <- linux-rolling-stable, cf-linux-rolling-stable, +| += +``` + +Naturally, the `linux-rolling-stable` branch moves due to the upstream activity. + +``` +o +| <- linux-rolling-stable +| +| o +| | <- hacl-hash (commits) +| | <- cf-zeta (commit) +|---/ <- cf-linux-rolling-stable, +| += +``` + +Rebase: to catch up with upstream, commits must be rebased. +It is expected that this is an easy rebase (merge strategy), because the +contributed files will barely touch the kernel code. +Thus, we aim for a linear history. + +``` + o + | <- hacl-hash (commits) +o | <- cf-zeta (commit) +| <- linux-rolling-stable---/ <- cf-linux-rolling-stable, +| +| += +``` + +### Code Style and Formatting + +[F.1]: https://www.kernel.org/doc/html/latest/process/coding-style.html +[F.2]: https://www.kernel.org/doc/html/latest/process/clang-format.html +[F.asytle]: https://astyle.sourceforge.net/ +[F.clangf]: https://clang.llvm.org/docs/ClangFormat.html +[F.c99]: https://en.cppreference.com/w/c/types/integer + +The following are (non-exhuastive) guidelines for code style taken from +[Kernel coding style][F.1] and the [Clang kernel documentation][F.2]. + +- 8chars identation and 80 columns. +- Pointer is attached to name, so `int *x`. +- Use [C99][F.c99] fixed-size types. +- Avoid the use of typedef on structs and standard types. +- Comment Style +```c +/* + * lorem ipsum dolor sit amet + * lorem ipsum dolor sit amet + * lorem ipsum dolor sit amet + */ +``` + +- Defines are ALLCAPS and consecutive defines are aligned: +```c +#define CONSTANT_LARGE_NAME_SIZE 2 +#define CONSTANT_MEDIUM_SIZE 3 +#define CONSTANT_SMALL 4 +``` + +#### Formatting + +To format files. run + +```sh +$ make -C zeta format FILES="/crypto/sha256_generic.c /crypto/sha512_generic.c" +``` + +Pass in `FILES` the files to be formatted. Note the leading `/` (slash), which +indicates the position of the files with respect to the kernel source code. + +This will invoke [astyle][F.asytle] followed by [clang-format][F.clangf] commands. +AStyle is used to prepare the files for clang-format, as the latter does +not catch issues that astyle does. + +### Linting + +[L.cpplint]: https://github.com/cpplint/cpplint + +Zeta contains a linter script to be used as a reference to spot coding errors. +The [cpplinter][L.cpplint] could be verbose as it targets C++, however it helps +to remove minor issues. + +```sh +$ make -C zeta lint FILES="/crypto/sha256_generic.c /crypto/sha512_generic.c" +``` + +Pass in `FILES` the files to be linted. Note the leading `/` (slash), which +indicates the position of the files with respect to the kernel source code. + +### Automated performance testing + +Any merge requests opened against the `cf-zeta` branch will automatically +trigger the execution of the [Crypto algorithm implementation testing +pipeline](.github/workflows/crypto-test-harness.yml) GitHub Action. + +This action builds the kernel as a User-mode Linux (UML) binary +(https://docs.kernel.org/virt/uml/user_mode_linux_howto_v2.html), using the +kernel config defined in +[zeta/test-artifacts/config-um](zeta/test-artifacts/config-um), and runs a [test +script](zeta/test-artifacts/test-script.sh) in it. + +New kernel configuration options added as part of development work should be +added to the [test kernel config](zeta/test-artifacts/config-um), and if needed +new test clauses added on the test script. \ No newline at end of file diff --git a/zeta/rsa_bench/Makefile b/zeta/rsa_bench/Makefile new file mode 100644 index 0000000000000..b52112d1383b9 --- /dev/null +++ b/zeta/rsa_bench/Makefile @@ -0,0 +1,18 @@ +all: compile + +compile: rsa.go rsa_test.go + go build -o rsa.exe rsa.go + go test -c -o bench.exe rsa.go rsa_test.go + + +rsa: rsa.exe + ./rsa.exe + +load_parser: + modprobe pkcs8_key_parser + +benchmark: rsa.go rsa_test.go + ./bench.exe -test.v -test.bench=. + +clean: + rm -f ./bench.exe ./rsa.exe diff --git a/zeta/rsa_bench/readme.md b/zeta/rsa_bench/readme.md new file mode 100644 index 0000000000000..25e6f7ef916c9 --- /dev/null +++ b/zeta/rsa_bench/readme.md @@ -0,0 +1,29 @@ +## Benchmark of In-Kernel RSA Signing from User Space + +This program uses the Linux API to access to in-kernel cryptographic +operations. This Go program makes direct syscalls to the kernel similarly +to the `keyctl` utility command. + +To run an example: + $ make example + +To run a benchmark: + $ make benchmark + +After that, the output looks like: + +``` +BenchmarkRSAKernel +BenchmarkRSAKernel-16 283 4283867 ns/op +BenchmarkRSAGo +BenchmarkRSAGo-16 1412 908581 ns/op +``` + +The difference in time is expected as the program should wait for the +operating system to respond the syscall, and move memory between the kernel +space and the user space. + +Known Issues: +- "failed to load the private key into the keyring: bad message" + This means the parser is not loaded. To solve this issue run: + $ sudo modprobe pkcs8_key_parser diff --git a/zeta/rsa_bench/rsa.go b/zeta/rsa_bench/rsa.go new file mode 100644 index 0000000000000..2c51139778e2e --- /dev/null +++ b/zeta/rsa_bench/rsa.go @@ -0,0 +1,145 @@ +package main + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "log" + "syscall" + "unsafe" +) + +type KeySerial int32 +type Keyring int32 +type KeyOps = uintptr + +const ( + KEY_SPEC_PROCESS_KEYRING Keyring = -2 + KEYCTL_PKEY_SIGN KeyOps = 27 + KEYCTL_PKEY_VERIFY KeyOps = 28 +) + +var ( + keyTypeAsym = []byte("asymmetric\x00") + sha256pkcs1 = []byte("enc=pkcs1 hash=sha256\x00") +) + +func (keyring Keyring) LoadAsym(desc string, payload []byte) (KeySerial, error) { + cdesc := []byte(desc + "\x00") + serial, _, errno := syscall.Syscall6( + syscall.SYS_ADD_KEY, + uintptr(unsafe.Pointer(&keyTypeAsym[0])), + uintptr(unsafe.Pointer(&cdesc[0])), + uintptr(unsafe.Pointer(&payload[0])), + uintptr(len(payload)), + uintptr(keyring), + uintptr(0), + ) + if errno == 0 { + return KeySerial(serial), nil + } + + return KeySerial(serial), errno +} + +type pkeyParams struct { + key_id KeySerial + in_len uint32 + out_or_in2_len uint32 + __spare [7]uint32 +} + +func (key KeySerial) Sign(info, digest, signature []byte) error { + var params pkeyParams + params.key_id = key + params.in_len = uint32(len(digest)) + params.out_or_in2_len = uint32(len(signature)) + + _, _, errno := syscall.Syscall6( + syscall.SYS_KEYCTL, KEYCTL_PKEY_SIGN, + uintptr(unsafe.Pointer(¶ms)), + uintptr(unsafe.Pointer(&info[0])), + uintptr(unsafe.Pointer(&digest[0])), + uintptr(unsafe.Pointer(&signature[0])), + uintptr(0), + ) + if errno == 0 { + return nil + } + + return errno +} + +func (key KeySerial) Verify(info, digest, signature []byte) error { + var params pkeyParams + params.key_id = key + params.in_len = uint32(len(digest)) + params.out_or_in2_len = uint32(len(signature)) + + _, _, errno := syscall.Syscall6( + syscall.SYS_KEYCTL, KEYCTL_PKEY_VERIFY, + uintptr(unsafe.Pointer(¶ms)), + uintptr(unsafe.Pointer(&info[0])), + uintptr(unsafe.Pointer(&digest[0])), + uintptr(unsafe.Pointer(&signature[0])), + uintptr(0), + ) + if errno == 0 { + return nil + } + + return errno +} + +func loadKeyToKernel(key crypto.PrivateKey) KeySerial { + pkcs8, err := x509.MarshalPKCS8PrivateKey(key) + if err != nil { + log.Fatalf("failed to serialize the private key to PKCS8 blob: %v", err) + } + + serial, err := KEY_SPEC_PROCESS_KEYRING.LoadAsym("test rsa key", pkcs8) + if err != nil { + log.Fatalf("failed to load the private key into the keyring: %v", err) + } + + log.Printf("Loaded key to the kernel with ID: %v", serial) + + return serial +} + +func main() { + const N = 2048 + + var ( + msg = []byte("hello world") + digest = sha256.Sum256(msg) + signature [N / 8]byte + ) + + priv, err := rsa.GenerateKey(rand.Reader, N) + if err != nil { + log.Fatalf("failed to generate private key: %v", err) + } + + keyInKernel := loadKeyToKernel(priv) + + err = keyInKernel.Sign(sha256pkcs1, digest[:], signature[:]) + if err != nil { + log.Fatalf("failed to sign the digest: %v", err) + } + log.Printf("Signature from Kernel: %x...", signature[:10]) + + err = keyInKernel.Verify(sha256pkcs1, digest[:], signature[:]) + if err != nil { + log.Fatalf("failed to verify the digest: %v", err) + } + log.Printf("Valid signature from Kernel: %v", err == nil) + + err = rsa.VerifyPKCS1v15(&priv.PublicKey, crypto.SHA256, digest[:], signature[:]) + log.Printf("Valid signature from Go: %v", err == nil) + if err != nil { + log.Fatalf("failed to verify the signature: %v", err) + } +} diff --git a/zeta/rsa_bench/rsa_test.go b/zeta/rsa_bench/rsa_test.go new file mode 100644 index 0000000000000..853ca6644e73e --- /dev/null +++ b/zeta/rsa_bench/rsa_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "testing" +) + +func kernelSetup(b *testing.B) (KeySerial, []byte, []byte) { + const N = 2048 + + var ( + msg = []byte("hello world") + digest = sha256.Sum256(msg) + signature [N / 8]byte + ) + + priv, err := rsa.GenerateKey(rand.Reader, N) + if err != nil { + b.Fatalf("failed to generate private key: %v", err) + } + + keyInKernel := loadKeyToKernel(priv) + + return keyInKernel, digest[:], signature[:] +} + +func BenchmarkRSAKernelSign(b *testing.B) { + keyInKernel, digest, signature := kernelSetup(b) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := keyInKernel.Sign(sha256pkcs1, digest[:], signature[:]) + if err != nil { + b.Fatalf("failed to sign the digest: %v", err) + } + } +} + +func BenchmarkRSAKernelVerify(b *testing.B) { + keyInKernel, digest, signature := kernelSetup(b) + + err := keyInKernel.Sign(sha256pkcs1, digest[:], signature[:]) + if err != nil { + b.Fatalf("failed to sign the digest: %v", err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := keyInKernel.Verify(sha256pkcs1, digest[:], signature[:]) + if err != nil { + b.Fatalf("failed to sign the digest: %v", err) + } + } +} + +func BenchmarkRSAGo(b *testing.B) { + const N = 2048 + + var ( + msg = []byte("hello world") + digest = sha256.Sum256(msg) + ) + + priv, err := rsa.GenerateKey(rand.Reader, N) + if err != nil { + b.Fatalf("failed to generate private key: %v", err) + } + + signature, err := priv.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + b.Fatalf("failed to sign the digest: %v", err) + } + + b.Run("Sign", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := priv.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + b.Fatalf("failed to sign the digest: %v", err) + } + } + }) + + b.Run("Verify", func(b *testing.B) { + for i := 0; i < b.N; i++ { + err := rsa.VerifyPKCS1v15(&priv.PublicKey, crypto.SHA256, digest[:], signature[:]) + if err != nil { + b.Fatalf("failed to sign the digest: %v", err) + } + } + }) +} diff --git a/zeta/test-artifacts/config-um b/zeta/test-artifacts/config-um new file mode 100644 index 0000000000000..b7c9b183457c6 --- /dev/null +++ b/zeta/test-artifacts/config-um @@ -0,0 +1,1894 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/um 6.5.4 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (Debian 12.2.0-14) 12.2.0" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=120200 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=24000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=24000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=124 +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=128 +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_SHOW=y +# end of IRQ subsystem + +CONFIG_GENERIC_CLOCKEVENTS=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y + +# +# BPF subsystem +# +# CONFIG_BPF_SYSCALL is not set +# end of BPF subsystem + +CONFIG_PREEMPT_NONE_BUILD=y +CONFIG_PREEMPT_NONE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +# +# RCU Subsystem +# +CONFIG_TINY_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TINY_SRCU=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=14 + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_CGROUPS=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +# CONFIG_MEMCG is not set +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +# CONFIG_CGROUP_PIDS is not set +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +# CONFIG_KALLSYMS_ALL is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_EMBEDDED is not set + +# +# Kernel Performance Events And Counters +# +# end of Kernel Performance Events And Counters + +# CONFIG_PROFILING is not set +# end of General setup + +# +# UML-specific options +# +CONFIG_UML=y +CONFIG_MMU=y +CONFIG_NO_IOMEM=y +CONFIG_NO_IOPORT_MAP=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_HZ=100 +CONFIG_NR_CPUS=1 +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y + +# +# Host processor type and features +# +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +# end of Host processor type and features + +CONFIG_UML_X86=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_3_LEVEL_PGTABLES=y +CONFIG_GENERIC_HWEIGHT=y +# CONFIG_STATIC_LINK is not set +CONFIG_LD_SCRIPT_DYN=y +CONFIG_LD_SCRIPT_DYN_RPATH=y +CONFIG_HOSTFS=y +CONFIG_MCONSOLE=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_KERNEL_STACK_ORDER=2 +# CONFIG_MMAPPER is not set +CONFIG_PGTABLE_LEVELS=3 +# CONFIG_UML_TIME_TRAVEL_SUPPORT is not set +# end of UML-specific options + +# +# UML Character Devices +# +CONFIG_STDERR_CONSOLE=y +CONFIG_SSL=y +CONFIG_NULL_CHAN=y +CONFIG_PORT_CHAN=y +CONFIG_PTY_CHAN=y +CONFIG_TTY_CHAN=y +CONFIG_XTERM_CHAN=y +CONFIG_XTERM_CHAN_DEFAULT_EMULATOR="xterm" +CONFIG_CON_ZERO_CHAN="fd:0,fd:1" +CONFIG_CON_CHAN="pts" +CONFIG_SSL_CHAN="pts" +CONFIG_UML_SOUND=m +# end of UML Character Devices + +# +# UML Network Devices +# +CONFIG_UML_NET=y +CONFIG_UML_NET_ETHERTAP=y +CONFIG_UML_NET_TUNTAP=y +CONFIG_UML_NET_SLIP=y +CONFIG_UML_NET_DAEMON=y +CONFIG_UML_NET_DAEMON_DEFAULT_SOCK="/tmp/uml.ctl" +# CONFIG_UML_NET_VECTOR is not set +# CONFIG_UML_NET_VDE is not set +CONFIG_UML_NET_MCAST=y +# CONFIG_UML_NET_PCAP is not set +CONFIG_UML_NET_SLIRP=y +# end of UML Network Devices + +# CONFIG_VIRTIO_UML is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_PM_SLEEP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# end of Power management options + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_RUST=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_NO_PREEMPT=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y + +# +# GCOV-based kernel profiling +# +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_ICQ=y +# CONFIG_BLK_DEV_BSGLIB is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_DEV_THROTTLING is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_EFI_PARTITION=y +# end of Partition Types + +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=m +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SWAP=y +# CONFIG_ZSWAP is not set + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +# end of SLAB allocator options + +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_COMPAT_BRK=y +CONFIG_FLATMEM=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PAGE_REPORTING is not set +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_NEED_PER_CPU_KM=y +# CONFIG_CMA is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set + +# +# GUP_TEST needs to have DEBUG_FS enabled +# +# CONFIG_ANON_VMA_NAME is not set +# CONFIG_USERFAULTFD is not set +# CONFIG_LRU_GEN is not set + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +# CONFIG_XFRM_USER is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_NET_IPVTI is not set +# CONFIG_NET_FOU is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_MPTCP is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +# CONFIG_NETFILTER is not set +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_MPLS is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_MAX_SKB_FRAGS=17 +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +# CONFIG_MCTP is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +# CONFIG_FAILOVER is not set +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +# CONFIG_PCCARD is not set + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_DEVICES=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# CONFIG_CONNECTOR is not set + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_ZRAM is not set +CONFIG_BLK_DEV_UBD=y +# CONFIG_BLK_DEV_UBD_SYNC is not set +CONFIG_BLK_DEV_COW_COMMON=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +# CONFIG_NVME_TCP is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_DUMMY_IRQ is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_93CX6 is not set +# end of EEPROM support + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ECHO is not set +# CONFIG_PVPANIC is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# end of SCSI device support + +# CONFIG_MD is not set +# CONFIG_TARGET_CORE is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +CONFIG_DUMMY=m +# CONFIG_WIREGUARD is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_IPVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=m +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +# CONFIG_NLMON is not set +CONFIG_ETHERNET=y +CONFIG_NET_VENDOR_ALACRITECH=y +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ASIX=y +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_NET_VENDOR_CORTINA=y +CONFIG_NET_VENDOR_DAVICOM=y +CONFIG_NET_VENDOR_ENGLEDER=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_FUNGIBLE=y +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_NET_VENDOR_LITEX=y +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_VCAP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_NI=y +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NET_VENDOR_8390=y +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_RMNET is not set +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_NET_VENDOR_SYNOPSYS=y +CONFIG_NET_VENDOR_VERTEXCOM=y +CONFIG_NET_VENDOR_VIA=y +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_PHYLIB is not set +# CONFIG_PSE_CONTROLLER is not set +# CONFIG_MDIO_DEVICE is not set + +# +# PCS device drivers +# +# end of PCS device drivers + +CONFIG_PPP=m +# CONFIG_PPP_BSDCOMP is not set +# CONFIG_PPP_DEFLATE is not set +# CONFIG_PPP_FILTER is not set +# CONFIG_PPP_MPPE is not set +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPPOE is not set +CONFIG_PPPOE_HASH_BITS=4 +# CONFIG_PPP_ASYNC is not set +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_SLIP=m +CONFIG_SLHC=m +# CONFIG_SLIP_COMPRESSED is not set +# CONFIG_SLIP_SMART is not set +# CONFIG_SLIP_MODE_SLIP6 is not set + +# +# Host-side USB support is needed for USB Network Adapter support +# +CONFIG_WLAN=y +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +CONFIG_WLAN_VENDOR_ATMEL=y +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_WLAN_VENDOR_MICROCHIP=y +CONFIG_WLAN_VENDOR_PURELIFI=y +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_WLAN_VENDOR_SILABS=y +CONFIG_WLAN_VENDOR_ST=y +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WLAN_VENDOR_ZYDAS=y +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_WAN is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_NET_FAILOVER is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_USERIO is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=32 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y +# CONFIG_N_GSM is not set +# CONFIG_NULL_TTY is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_VIRTIO_CONSOLE is not set +CONFIG_HW_RANDOM=y +CONFIG_UML_RANDOM=y +CONFIG_DEVMEM=y +# end of Character devices + +# +# I2C support +# +# CONFIG_I2C is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +# end of PTP clock support + +# CONFIG_PINCTRL is not set +# CONFIG_GPIOLIB is not set +# CONFIG_POWER_RESET is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# +# Graphics support +# +# end of Graphics support + +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_GLORIOUS is not set +# CONFIG_HID_VIVALDI is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +CONFIG_HID_REDRAGON=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_TOPRE is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# end of HID-BPF support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_MENU=y +# CONFIG_VDPA is not set +CONFIG_VHOST_MENU=y +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_COMMON_CLK is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# end of Rpmsg drivers + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# end of PHY drivers for Broadcom platforms +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_RAS is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_DAX is not set +# CONFIG_NVMEM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_TEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +CONFIG_REISERFS_FS=y +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_REISERFS_FS_XATTR is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +CONFIG_QUOTA=y +# CONFIG_QUOTA_NETLINK_INTERFACE is not set +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=m +# CONFIG_FUSE_FS is not set +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +# CONFIG_ZISOFS is not set +# CONFIG_UDF_FS is not set +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_TMPFS_XATTR is not set +# CONFIG_TMPFS_INODE64 is not set +CONFIG_MEMFD_CREATE=y +# CONFIG_CONFIGFS_FS is not set +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_SMB_SERVER is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +CONFIG_RANDSTRUCT_NONE=y +# end of Kernel hardening options +# end of Security options + +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# HACL implementation +# +CONFIG_CRYPTO_SHA2_HACL=y +# end of HACL implementation + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=y +CONFIG_CRYPTO_ECDH=y +CONFIG_CRYPTO_ECDSA=y +CONFIG_CRYPTO_ECRDSA=y +# CONFIG_CRYPTO_SM2 is not set +CONFIG_CRYPTO_CURVE25519=y +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_TI=y +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARIA is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=y +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +CONFIG_CRYPTO_ADIANTUM=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_CHACHA20=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_HCTR2=y +CONFIG_CRYPTO_KEYWRAP=y +CONFIG_CRYPTO_LRW=y +CONFIG_CRYPTO_OFB=y +# CONFIG_CRYPTO_PCBC is not set +CONFIG_CRYPTO_XCTR=y +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_NHPOLY1305=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +CONFIG_CRYPTO_AEGIS128=y +CONFIG_CRYPTO_CHACHA20POLY1305=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_ESSIV=y +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +CONFIG_CRYPTO_POLYVAL=y +CONFIG_CRYPTO_POLY1305=y +# CONFIG_CRYPTO_RMD160 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +# CONFIG_CRYPTO_SM3_GENERIC is not set +CONFIG_CRYPTO_STREEBOG=y +# CONFIG_CRYPTO_VMAC is not set +# CONFIG_CRYPTO_WP512 is not set +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_CRCT10DIF=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_842=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_LZ4HC=y +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HW=y + +# +# Certificates for signature checking +# +# end of Certificates for signature checking + +# +# Library routines +# +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_ARCH_HAS_STRNCPY_FROM_USER=y +CONFIG_ARCH_HAS_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +# CONFIG_CORDIC is not set +# CONFIG_PRIME_NUMBERS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=y +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y +# CONFIG_CRYPTO_LIB_CHACHA is not set +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=y +# CONFIG_CRYPTO_LIB_CURVE25519 is not set +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y +# CONFIG_CRYPTO_LIB_POLY1305 is not set +# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +# CONFIG_CRC_CCITT is not set +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC64_ROCKSOFT is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=y +CONFIG_842_DECOMPRESS=y +CONFIG_ZLIB_INFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +# CONFIG_XZ_DEC_MICROLZMA is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_NO_DMA=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_DQL=y +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +# CONFIG_IRQ_POLL is not set +CONFIG_MPILIB=y +CONFIG_OID_REGISTRY=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DYNAMIC_DEBUG_CORE is not set +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_PAHOLE_HAS_SPLIT_BTF=y +CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +# CONFIG_DEBUG_FS is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SOFTLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y + +# +# Debug kernel data structures +# +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_LATENCYTOP is not set +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SAMPLES is not set + +# +# um Debugging +# +# CONFIG_GPROF is not set +CONFIG_EARLY_PRINTK=y +# end of um Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/zeta/test-artifacts/init b/zeta/test-artifacts/init new file mode 100755 index 0000000000000..2c7cf3997e758 --- /dev/null +++ b/zeta/test-artifacts/init @@ -0,0 +1,11 @@ +#!/bin/busybox sh + +/bin/busybox --install -s /bin + +echo "tcrypt: starting tcrypt test execution" + +./test-script.sh + +mount proc /proc -t proc + +poweroff -f diff --git a/zeta/test-artifacts/test-script.sh b/zeta/test-artifacts/test-script.sh new file mode 100755 index 0000000000000..3a7b53c6db47d --- /dev/null +++ b/zeta/test-artifacts/test-script.sh @@ -0,0 +1,31 @@ +#!/bin/busybox sh + +echo "tcrypt: starting CRYPTO_SHA2_HACL" +modprobe tcrypt mode=300 alg=sha224-hacl sec=2 +modprobe tcrypt mode=300 alg=sha256-hacl sec=2 +modprobe tcrypt mode=300 alg=sha384-hacl sec=2 +modprobe tcrypt mode=300 alg=sha512-hacl sec=2 + +echo "tcrypt: starting SHA2 (256) test" +echo "tcrypt: testing sha256 generic implementation" +modprobe tcrypt mode=300 alg=sha256-generic sec=2 + +echo "tcrypt starting SHA2 (512) test" +echo "tcrypt: testing sha512 generic implementation" +modprobe tcrypt mode=300 alg=sha512-generic sec=2 + +echo "tcrypt: starting SHA3 (256) test" +echo "tcrypt: testing sha3-256 generic implementation" +modprobe tcrypt mode=300 alg=sha3-256-generic sec=2 + +echo "tcrypt: starting SHA3 (512) test" +echo "tcrypt: testing sha3-512 generic implementation" +modprobe tcrypt mode=300 alg=sha3-512-generic sec=2 + +echo "tcrypt: starting BLAKE2b (256) test" +echo "tcrypt: testing blake2b-256 generic implementation" +modprobe tcrypt mode=300 alg=blake2b-256-generic sec=2 + +echo "tcrypt: starting BLAKE2b (512) test" +echo "tcrypt: testing blake2b-512 generic implementation" +modprobe tcrypt mode=300 alg=blake2b-512-generic sec=2 \ No newline at end of file