pq_crypto 0.3.2 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +56 -0
- data/CHANGELOG.md +62 -0
- data/GET_STARTED.md +366 -40
- data/README.md +76 -233
- data/SECURITY.md +107 -82
- data/ext/pqcrypto/extconf.rb +169 -87
- data/ext/pqcrypto/mldsa_api.h +1 -48
- data/ext/pqcrypto/mlkem_api.h +1 -18
- data/ext/pqcrypto/pq_externalmu.c +89 -204
- data/ext/pqcrypto/pqcrypto_native_api.h +129 -0
- data/ext/pqcrypto/pqcrypto_ruby_secure.c +484 -84
- data/ext/pqcrypto/pqcrypto_secure.c +203 -78
- data/ext/pqcrypto/pqcrypto_secure.h +53 -14
- data/ext/pqcrypto/pqcrypto_version.h +7 -0
- data/ext/pqcrypto/randombytes.h +9 -0
- data/ext/pqcrypto/vendor/.vendored +10 -5
- data/ext/pqcrypto/vendor/mldsa-native/BUILDING.md +105 -0
- data/ext/pqcrypto/vendor/mldsa-native/LICENSE +286 -0
- data/ext/pqcrypto/vendor/mldsa-native/META.yml +24 -0
- data/ext/pqcrypto/vendor/mldsa-native/README.md +221 -0
- data/ext/pqcrypto/vendor/mldsa-native/SECURITY.md +8 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/mldsa_native.c +721 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/mldsa_native.h +975 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/mldsa_native_asm.S +724 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/mldsa_native_config.h +723 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/cbmc.h +166 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/common.h +321 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/ct.c +21 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/ct.h +385 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/debug.c +73 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/debug.h +130 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/fips202.c +277 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/fips202.h +244 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/fips202x4.c +182 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/fips202x4.h +117 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/keccakf1600.c +438 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/keccakf1600.h +105 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/auto.h +71 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/fips202_native_aarch64.h +62 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/keccak_f1600_x1_scalar_asm.S +376 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/keccak_f1600_x1_v84a_asm.S +204 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/keccak_f1600_x2_v84a_asm.S +259 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/keccak_f1600_x4_v8a_scalar_hybrid_asm.S +1077 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/keccak_f1600_x4_v8a_v84a_scalar_hybrid_asm.S +987 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/src/keccakf1600_round_constants.c +41 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/x1_scalar.h +26 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/x1_v84a.h +35 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/x2_v84a.h +37 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/x4_v8a_scalar.h +27 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/aarch64/x4_v8a_v84a_scalar.h +36 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/api.h +69 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/armv81m/README.md +10 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/armv81m/mve.h +32 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/armv81m/src/fips202_native_armv81m.h +20 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/armv81m/src/keccak_f1600_x4_mve.S +638 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/armv81m/src/keccak_f1600_x4_mve.c +136 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/armv81m/src/keccakf1600_round_constants.c +52 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/auto.h +29 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/x86_64/src/KeccakP_1600_times4_SIMD256.c +488 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/x86_64/src/KeccakP_1600_times4_SIMD256.h +16 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/fips202/native/x86_64/xkcp.h +31 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/meta.h +247 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/aarch64_zetas.c +231 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/arith_native_aarch64.h +150 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/intt.S +753 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/mld_polyvecl_pointwise_acc_montgomery_l4.S +129 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/mld_polyvecl_pointwise_acc_montgomery_l5.S +145 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/mld_polyvecl_pointwise_acc_montgomery_l7.S +177 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/ntt.S +653 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/pointwise_montgomery.S +79 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/poly_caddq_asm.S +53 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/poly_chknorm_asm.S +55 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/poly_decompose_32_asm.S +85 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/poly_decompose_88_asm.S +85 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/poly_use_hint_32_asm.S +102 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/poly_use_hint_88_asm.S +110 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/polyz_unpack_17_asm.S +72 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/polyz_unpack_19_asm.S +69 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/polyz_unpack_table.c +40 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/rej_uniform_asm.S +189 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/rej_uniform_eta2_asm.S +135 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/rej_uniform_eta4_asm.S +128 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/rej_uniform_eta_table.c +543 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/aarch64/src/rej_uniform_table.c +62 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/api.h +649 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/meta.h +23 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/meta.h +315 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/arith_native_x86_64.h +124 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/consts.c +157 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/consts.h +27 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/intt.S +2311 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/ntt.S +2383 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/nttunpack.S +239 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/pointwise.S +131 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/pointwise_acc_l4.S +139 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/pointwise_acc_l5.S +155 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/pointwise_acc_l7.S +187 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/poly_caddq_avx2.c +61 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/poly_chknorm_avx2.c +52 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/poly_decompose_32_avx2.c +155 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/poly_decompose_88_avx2.c +155 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/poly_use_hint_32_avx2.c +102 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/poly_use_hint_88_avx2.c +104 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/polyz_unpack_17_avx2.c +91 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/polyz_unpack_19_avx2.c +93 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/rej_uniform_avx2.c +126 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/rej_uniform_eta2_avx2.c +155 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/rej_uniform_eta4_avx2.c +139 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/native/x86_64/src/rej_uniform_table.c +160 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/packing.c +293 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/packing.h +224 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/params.h +77 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/poly.c +991 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/poly.h +393 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/poly_kl.c +946 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/poly_kl.h +360 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/polyvec.c +877 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/polyvec.h +725 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/randombytes.h +26 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/reduce.h +139 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/rounding.h +249 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/sign.c +1511 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/sign.h +806 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/symmetric.h +68 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/sys.h +268 -0
- data/ext/pqcrypto/vendor/mldsa-native/mldsa/src/zetas.inc +55 -0
- data/ext/pqcrypto/vendor/mlkem-native/BUILDING.md +104 -0
- data/ext/pqcrypto/vendor/mlkem-native/LICENSE +294 -0
- data/ext/pqcrypto/vendor/mlkem-native/META.yml +30 -0
- data/ext/pqcrypto/vendor/mlkem-native/README.md +223 -0
- data/ext/pqcrypto/vendor/mlkem-native/RELEASE.md +86 -0
- data/ext/pqcrypto/vendor/mlkem-native/SECURITY.md +8 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/README.md +23 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/mlkem_native.c +660 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/mlkem_native.h +538 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/mlkem_native_asm.S +681 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/mlkem_native_config.h +709 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/cbmc.h +174 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/common.h +274 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/compress.c +717 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/compress.h +688 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/debug.c +64 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/debug.h +128 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/fips202.c +251 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/fips202.h +158 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/fips202x4.c +208 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/fips202x4.h +80 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/keccakf1600.c +463 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/keccakf1600.h +98 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/auto.h +70 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/fips202_native_aarch64.h +69 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/keccak_f1600_x1_scalar_asm.S +375 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/keccak_f1600_x1_v84a_asm.S +203 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/keccak_f1600_x2_v84a_asm.S +258 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/keccak_f1600_x4_v8a_scalar_hybrid_asm.S +1076 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/keccak_f1600_x4_v8a_v84a_scalar_hybrid_asm.S +986 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/src/keccakf1600_round_constants.c +46 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/x1_scalar.h +25 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/x1_v84a.h +34 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/x2_v84a.h +35 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/x4_v8a_scalar.h +26 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/aarch64/x4_v8a_v84a_scalar.h +35 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/api.h +117 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/README.md +10 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/mve.h +79 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/src/fips202_native_armv81m.h +35 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/src/keccak_f1600_x4_mve.S +667 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/src/keccak_f1600_x4_mve.c +40 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/src/keccakf1600_round_constants.c +51 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/src/state_extract_bytes_x4_mve.S +290 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/armv81m/src/state_xor_bytes_x4_mve.S +314 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/auto.h +28 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/x86_64/keccak_f1600_x4_avx2.h +33 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/x86_64/src/fips202_native_x86_64.h +41 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/x86_64/src/keccak_f1600_x4_avx2.S +451 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/fips202/native/x86_64/src/keccakf1600_constants.c +51 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/indcpa.c +622 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/indcpa.h +156 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/kem.c +446 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/kem.h +326 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/README.md +16 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/meta.h +122 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/aarch64_zetas.c +174 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/arith_native_aarch64.h +177 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/intt.S +628 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/ntt.S +562 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/poly_mulcache_compute_asm.S +127 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/poly_reduce_asm.S +150 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/poly_tobytes_asm.S +117 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/poly_tomont_asm.S +98 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/polyvec_basemul_acc_montgomery_cached_asm_k2.S +261 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/polyvec_basemul_acc_montgomery_cached_asm_k3.S +314 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/polyvec_basemul_acc_montgomery_cached_asm_k4.S +368 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/rej_uniform_asm.S +226 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/aarch64/src/rej_uniform_table.c +542 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/api.h +637 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/meta.h +25 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/README.md +11 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/meta.h +128 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/arith_native_riscv64.h +45 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/rv64v_debug.c +81 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/rv64v_debug.h +145 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/rv64v_izetas.inc +27 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/rv64v_poly.c +805 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/rv64v_zetas.inc +27 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/riscv64/src/rv64v_zetas_basemul.inc +39 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/README.md +4 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/meta.h +304 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/arith_native_x86_64.h +309 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/compress_consts.c +94 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/compress_consts.h +45 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/consts.c +102 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/consts.h +25 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/intt.S +719 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/mulcache_compute.S +90 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/ntt.S +639 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/nttfrombytes.S +193 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/ntttobytes.S +181 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/nttunpack.S +174 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_compress_d10.S +382 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_compress_d11.S +448 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_compress_d4.S +163 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_compress_d5.S +220 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_decompress_d10.S +228 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_decompress_d11.S +277 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_decompress_d4.S +180 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/poly_decompress_d5.S +192 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k2.S +502 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k3.S +750 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k4.S +998 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/reduce.S +218 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/rej_uniform_asm.S +103 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/rej_uniform_table.c +544 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/native/x86_64/src/tomont.S +155 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/params.h +76 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/poly.c +572 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/poly.h +317 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/poly_k.c +502 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/poly_k.h +668 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/randombytes.h +60 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/sampling.c +362 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/sampling.h +118 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/symmetric.h +70 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/sys.h +260 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/verify.c +20 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/verify.h +464 -0
- data/ext/pqcrypto/vendor/mlkem-native/mlkem/src/zetas.inc +30 -0
- data/lib/pq_crypto/algorithm_registry.rb +200 -0
- data/lib/pq_crypto/hybrid_kem.rb +1 -12
- data/lib/pq_crypto/kem.rb +104 -13
- data/lib/pq_crypto/pkcs8.rb +387 -0
- data/lib/pq_crypto/serialization.rb +1 -14
- data/lib/pq_crypto/signature.rb +123 -17
- data/lib/pq_crypto/spki.rb +131 -0
- data/lib/pq_crypto/version.rb +1 -1
- data/lib/pq_crypto.rb +79 -20
- data/script/vendor_libs.rb +88 -155
- metadata +241 -73
- data/ext/pqcrypto/vendor/pqclean/common/aes.c +0 -639
- data/ext/pqcrypto/vendor/pqclean/common/aes.h +0 -64
- data/ext/pqcrypto/vendor/pqclean/common/compat.h +0 -73
- data/ext/pqcrypto/vendor/pqclean/common/crypto_declassify.h +0 -7
- data/ext/pqcrypto/vendor/pqclean/common/fips202.c +0 -928
- data/ext/pqcrypto/vendor/pqclean/common/fips202.h +0 -166
- data/ext/pqcrypto/vendor/pqclean/common/keccak2x/feat.S +0 -168
- data/ext/pqcrypto/vendor/pqclean/common/keccak2x/fips202x2.c +0 -684
- data/ext/pqcrypto/vendor/pqclean/common/keccak2x/fips202x2.h +0 -60
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/KeccakP-1600-times4-SIMD256.c +0 -1028
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/KeccakP-1600-times4-SnP.h +0 -50
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/KeccakP-1600-unrolling.macros +0 -198
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/Makefile +0 -8
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/Makefile.Microsoft_nmake +0 -8
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/SIMD256-config.h +0 -3
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/align.h +0 -34
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/brg_endian.h +0 -142
- data/ext/pqcrypto/vendor/pqclean/common/nistseedexpander.c +0 -101
- data/ext/pqcrypto/vendor/pqclean/common/nistseedexpander.h +0 -39
- data/ext/pqcrypto/vendor/pqclean/common/randombytes.c +0 -355
- data/ext/pqcrypto/vendor/pqclean/common/randombytes.h +0 -27
- data/ext/pqcrypto/vendor/pqclean/common/sha2.c +0 -769
- data/ext/pqcrypto/vendor/pqclean/common/sha2.h +0 -173
- data/ext/pqcrypto/vendor/pqclean/common/sp800-185.c +0 -156
- data/ext/pqcrypto/vendor/pqclean/common/sp800-185.h +0 -27
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/LICENSE +0 -5
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/Makefile +0 -19
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/Makefile.Microsoft_nmake +0 -23
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/api.h +0 -18
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/cbd.c +0 -83
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/cbd.h +0 -11
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/indcpa.c +0 -327
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/indcpa.h +0 -22
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/kem.c +0 -164
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/kem.h +0 -23
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/ntt.c +0 -146
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/ntt.h +0 -14
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/params.h +0 -36
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/poly.c +0 -299
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/poly.h +0 -37
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/polyvec.c +0 -188
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/polyvec.h +0 -26
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/reduce.c +0 -41
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/reduce.h +0 -13
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/symmetric-shake.c +0 -71
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/symmetric.h +0 -30
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/verify.c +0 -67
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/verify.h +0 -13
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/LICENSE +0 -5
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/Makefile +0 -19
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/Makefile.Microsoft_nmake +0 -23
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/api.h +0 -50
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/ntt.c +0 -98
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/ntt.h +0 -10
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/packing.c +0 -261
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/packing.h +0 -31
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/params.h +0 -44
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/poly.c +0 -799
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/poly.h +0 -52
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/polyvec.c +0 -415
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/polyvec.h +0 -65
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/reduce.c +0 -69
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/reduce.h +0 -17
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/rounding.c +0 -92
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/rounding.h +0 -14
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/sign.c +0 -407
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/sign.h +0 -47
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/symmetric-shake.c +0 -26
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/symmetric.h +0 -34
|
@@ -0,0 +1,991 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Copyright (c) The mldsa-native project authors
|
|
3
|
+
* Copyright (c) The mlkem-native project authors
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/* References
|
|
8
|
+
* ==========
|
|
9
|
+
*
|
|
10
|
+
* - [FIPS204]
|
|
11
|
+
* FIPS 204 Module-Lattice-Based Digital Signature Standard
|
|
12
|
+
* National Institute of Standards and Technology
|
|
13
|
+
* https://csrc.nist.gov/pubs/fips/204/final
|
|
14
|
+
*
|
|
15
|
+
* - [REF]
|
|
16
|
+
* CRYSTALS-Dilithium reference implementation
|
|
17
|
+
* Bai, Ducas, Kiltz, Lepoint, Lyubashevsky, Schwabe, Seiler, Stehlé
|
|
18
|
+
* https://github.com/pq-crystals/dilithium/tree/master/ref
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
#include "poly.h"
|
|
22
|
+
|
|
23
|
+
#include "common.h"
|
|
24
|
+
#include "ct.h"
|
|
25
|
+
#include "debug.h"
|
|
26
|
+
#include "reduce.h"
|
|
27
|
+
#include "rounding.h"
|
|
28
|
+
#include "symmetric.h"
|
|
29
|
+
|
|
30
|
+
#if !defined(MLD_CONFIG_MULTILEVEL_NO_SHARED)
|
|
31
|
+
#include "zetas.inc"
|
|
32
|
+
|
|
33
|
+
MLD_INTERNAL_API
|
|
34
|
+
void mld_poly_reduce(mld_poly *a)
|
|
35
|
+
{
|
|
36
|
+
unsigned int i;
|
|
37
|
+
mld_assert_bound(a->coeffs, MLDSA_N, INT32_MIN, MLD_REDUCE32_DOMAIN_MAX);
|
|
38
|
+
|
|
39
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
40
|
+
__loop__(
|
|
41
|
+
invariant(i <= MLDSA_N)
|
|
42
|
+
invariant(forall(k0, i, MLDSA_N, a->coeffs[k0] == loop_entry(*a).coeffs[k0]))
|
|
43
|
+
invariant(array_bound(a->coeffs, 0, i, -MLD_REDUCE32_RANGE_MAX, MLD_REDUCE32_RANGE_MAX))
|
|
44
|
+
decreases(MLDSA_N - i))
|
|
45
|
+
{
|
|
46
|
+
a->coeffs[i] = mld_reduce32(a->coeffs[i]);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
mld_assert_bound(a->coeffs, MLDSA_N, -MLD_REDUCE32_RANGE_MAX,
|
|
50
|
+
MLD_REDUCE32_RANGE_MAX);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
MLD_STATIC_TESTABLE void mld_poly_caddq_c(mld_poly *a)
|
|
54
|
+
__contract__(
|
|
55
|
+
requires(memory_no_alias(a, sizeof(mld_poly)))
|
|
56
|
+
requires(array_abs_bound(a->coeffs, 0, MLDSA_N, MLDSA_Q))
|
|
57
|
+
assigns(memory_slice(a, sizeof(mld_poly)))
|
|
58
|
+
ensures(array_bound(a->coeffs, 0, MLDSA_N, 0, MLDSA_Q))
|
|
59
|
+
)
|
|
60
|
+
{
|
|
61
|
+
unsigned int i;
|
|
62
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q);
|
|
63
|
+
|
|
64
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
65
|
+
__loop__(
|
|
66
|
+
invariant(i <= MLDSA_N)
|
|
67
|
+
invariant(forall(k0, i, MLDSA_N, a->coeffs[k0] == loop_entry(*a).coeffs[k0]))
|
|
68
|
+
invariant(array_bound(a->coeffs, 0, i, 0, MLDSA_Q))
|
|
69
|
+
decreases(MLDSA_N - i)
|
|
70
|
+
)
|
|
71
|
+
{
|
|
72
|
+
a->coeffs[i] = mld_caddq(a->coeffs[i]);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
MLD_INTERNAL_API
|
|
79
|
+
void mld_poly_caddq(mld_poly *a)
|
|
80
|
+
{
|
|
81
|
+
#if defined(MLD_USE_NATIVE_POLY_CADDQ)
|
|
82
|
+
int ret;
|
|
83
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q);
|
|
84
|
+
ret = mld_poly_caddq_native(a->coeffs);
|
|
85
|
+
if (ret == MLD_NATIVE_FUNC_SUCCESS)
|
|
86
|
+
{
|
|
87
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
#endif /* MLD_USE_NATIVE_POLY_CADDQ */
|
|
91
|
+
mld_poly_caddq_c(a);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/* Reference: We use destructive version (output=first input) to avoid
|
|
95
|
+
* reasoning about aliasing in the CBMC specification */
|
|
96
|
+
MLD_INTERNAL_API
|
|
97
|
+
void mld_poly_add(mld_poly *r, const mld_poly *b)
|
|
98
|
+
{
|
|
99
|
+
unsigned int i;
|
|
100
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
101
|
+
__loop__(
|
|
102
|
+
assigns(i, memory_slice(r, sizeof(mld_poly)))
|
|
103
|
+
invariant(i <= MLDSA_N)
|
|
104
|
+
invariant(forall(k0, i, MLDSA_N, r->coeffs[k0] == loop_entry(*r).coeffs[k0]))
|
|
105
|
+
invariant(forall(k1, 0, i, r->coeffs[k1] == loop_entry(*r).coeffs[k1] + b->coeffs[k1]))
|
|
106
|
+
invariant(forall(k2, 0, i, r->coeffs[k2] < MLD_REDUCE32_DOMAIN_MAX))
|
|
107
|
+
invariant(forall(k2, 0, i, r->coeffs[k2] >= INT32_MIN))
|
|
108
|
+
decreases(MLDSA_N - i)
|
|
109
|
+
)
|
|
110
|
+
{
|
|
111
|
+
r->coeffs[i] = r->coeffs[i] + b->coeffs[i];
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
/* Reference: We use destructive version (output=first input) to avoid
|
|
116
|
+
* reasoning about aliasing in the CBMC specification */
|
|
117
|
+
MLD_INTERNAL_API
|
|
118
|
+
void mld_poly_sub(mld_poly *r, const mld_poly *b)
|
|
119
|
+
{
|
|
120
|
+
unsigned int i;
|
|
121
|
+
mld_assert_abs_bound(b->coeffs, MLDSA_N, MLDSA_Q);
|
|
122
|
+
mld_assert_abs_bound(r->coeffs, MLDSA_N, MLDSA_Q);
|
|
123
|
+
|
|
124
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
125
|
+
__loop__(
|
|
126
|
+
invariant(i <= MLDSA_N)
|
|
127
|
+
invariant(array_bound(r->coeffs, 0, i, INT32_MIN, MLD_REDUCE32_DOMAIN_MAX))
|
|
128
|
+
invariant(forall(k0, i, MLDSA_N, r->coeffs[k0] == loop_entry(*r).coeffs[k0]))
|
|
129
|
+
decreases(MLDSA_N - i)
|
|
130
|
+
)
|
|
131
|
+
{
|
|
132
|
+
r->coeffs[i] = r->coeffs[i] - b->coeffs[i];
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
mld_assert_bound(r->coeffs, MLDSA_N, INT32_MIN, MLD_REDUCE32_DOMAIN_MAX);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
MLD_INTERNAL_API
|
|
139
|
+
void mld_poly_shiftl(mld_poly *a)
|
|
140
|
+
{
|
|
141
|
+
unsigned int i;
|
|
142
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, 1 << 10);
|
|
143
|
+
|
|
144
|
+
for (i = 0; i < MLDSA_N; i++)
|
|
145
|
+
__loop__(
|
|
146
|
+
invariant(i <= MLDSA_N)
|
|
147
|
+
invariant(array_bound(a->coeffs, 0, i, 0, MLDSA_Q))
|
|
148
|
+
invariant(forall(k0, i, MLDSA_N, a->coeffs[k0] == loop_entry(*a).coeffs[k0]))
|
|
149
|
+
decreases(MLDSA_N - i))
|
|
150
|
+
{
|
|
151
|
+
/* Reference: uses a left shift by MLDSA_D which is undefined behaviour in
|
|
152
|
+
* C90/C99
|
|
153
|
+
*/
|
|
154
|
+
a->coeffs[i] *= (1 << MLDSA_D);
|
|
155
|
+
}
|
|
156
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
static MLD_INLINE int32_t mld_fqmul(int32_t a, int32_t b)
|
|
161
|
+
__contract__(
|
|
162
|
+
requires(b > -MLDSA_Q_HALF && b < MLDSA_Q_HALF)
|
|
163
|
+
ensures(return_value > -MLDSA_Q && return_value < MLDSA_Q)
|
|
164
|
+
)
|
|
165
|
+
{
|
|
166
|
+
/* Bounds: We argue in mld_montgomery_reduce() that the reult
|
|
167
|
+
* of Montgomery reduction is < MLDSA_Q if the input is smaller
|
|
168
|
+
* than 2^31 * MLDSA_Q in absolute value. Indeed, we have:
|
|
169
|
+
*
|
|
170
|
+
* |a * b| = |a| * |b|
|
|
171
|
+
* < 2^31 * MLDSA_Q_HALF
|
|
172
|
+
* < 2^31 * MLDSA_Q
|
|
173
|
+
*/
|
|
174
|
+
return mld_montgomery_reduce((int64_t)a * (int64_t)b);
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/* mld_ntt_butterfly_block()
|
|
178
|
+
*
|
|
179
|
+
* Computes a block CT butterflies with a fixed twiddle factor,
|
|
180
|
+
* using Montgomery multiplication.
|
|
181
|
+
*
|
|
182
|
+
* Parameters:
|
|
183
|
+
* - r: Pointer to base of polynomial (_not_ the base of butterfly block)
|
|
184
|
+
* - zeta: Twiddle factor to use for the butterfly. This must be in
|
|
185
|
+
* Montgomery form and signed canonical.
|
|
186
|
+
* - start: Offset to the beginning of the butterfly block
|
|
187
|
+
* - len: Index difference between coefficients subject to a butterfly
|
|
188
|
+
* - bound: Ghost variable describing coefficient bound: Prior to `start`,
|
|
189
|
+
* coefficients must be bound by `bound + MLDSA_Q`. Post `start`,
|
|
190
|
+
* they must be bound by `bound`.
|
|
191
|
+
* When this function returns, output coefficients in the index range
|
|
192
|
+
* [start, start+2*len) have bound bumped to `bound + MLDSA_Q`.
|
|
193
|
+
* Example:
|
|
194
|
+
* - start=8, len=4
|
|
195
|
+
* This would compute the following four butterflies
|
|
196
|
+
* 8 -- 12
|
|
197
|
+
* 9 -- 13
|
|
198
|
+
* 10 -- 14
|
|
199
|
+
* 11 -- 15
|
|
200
|
+
* - start=4, len=2
|
|
201
|
+
* This would compute the following two butterflies
|
|
202
|
+
* 4 -- 6
|
|
203
|
+
* 5 -- 7
|
|
204
|
+
*/
|
|
205
|
+
|
|
206
|
+
/* Reference: Embedded in `ntt()` in the reference implementation @[REF]. */
|
|
207
|
+
static MLD_INLINE void mld_ntt_butterfly_block(int32_t r[MLDSA_N],
|
|
208
|
+
const int32_t zeta,
|
|
209
|
+
const unsigned start,
|
|
210
|
+
const unsigned len,
|
|
211
|
+
const unsigned bound)
|
|
212
|
+
__contract__(
|
|
213
|
+
requires(start < MLDSA_N)
|
|
214
|
+
requires(1 <= len && len <= MLDSA_N / 2 && start + 2 * len <= MLDSA_N)
|
|
215
|
+
requires(0 <= bound && bound < INT32_MAX - MLDSA_Q)
|
|
216
|
+
requires(-MLDSA_Q_HALF < zeta && zeta < MLDSA_Q_HALF)
|
|
217
|
+
requires(memory_no_alias(r, sizeof(int32_t) * MLDSA_N))
|
|
218
|
+
requires(array_abs_bound(r, 0, start, bound + MLDSA_Q))
|
|
219
|
+
requires(array_abs_bound(r, start, MLDSA_N, bound))
|
|
220
|
+
assigns(memory_slice(r, sizeof(int32_t) * MLDSA_N))
|
|
221
|
+
ensures(array_abs_bound(r, 0, start + 2*len, bound + MLDSA_Q))
|
|
222
|
+
ensures(array_abs_bound(r, start + 2 * len, MLDSA_N, bound)))
|
|
223
|
+
{
|
|
224
|
+
/* `bound` is a ghost variable only needed in the CBMC specification */
|
|
225
|
+
unsigned j;
|
|
226
|
+
((void)bound);
|
|
227
|
+
for (j = start; j < start + len; j++)
|
|
228
|
+
__loop__(
|
|
229
|
+
invariant(start <= j && j <= start + len)
|
|
230
|
+
/*
|
|
231
|
+
* Coefficients are updated in strided pairs, so the bounds for the
|
|
232
|
+
* intermediate states alternate twice between the old and new bound
|
|
233
|
+
*/
|
|
234
|
+
invariant(array_abs_bound(r, 0, j, bound + MLDSA_Q))
|
|
235
|
+
invariant(array_abs_bound(r, j, start + len, bound))
|
|
236
|
+
invariant(array_abs_bound(r, start + len, j + len, bound + MLDSA_Q))
|
|
237
|
+
invariant(array_abs_bound(r, j + len, MLDSA_N, bound))
|
|
238
|
+
decreases(start + len - j))
|
|
239
|
+
{
|
|
240
|
+
int32_t t;
|
|
241
|
+
t = mld_fqmul(r[j + len], zeta);
|
|
242
|
+
r[j + len] = r[j] - t;
|
|
243
|
+
r[j] = r[j] + t;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
/* mld_ntt_layer()
|
|
248
|
+
*
|
|
249
|
+
* Compute one layer of forward NTT
|
|
250
|
+
*
|
|
251
|
+
* Parameters:
|
|
252
|
+
* - r: Pointer to base of polynomial
|
|
253
|
+
* - layer: Indicates which layer is being applied.
|
|
254
|
+
*/
|
|
255
|
+
|
|
256
|
+
/* Reference: Embedded in `ntt()` in the reference implementation @[REF]. */
|
|
257
|
+
static MLD_INLINE void mld_ntt_layer(int32_t r[MLDSA_N], const unsigned layer)
|
|
258
|
+
__contract__(
|
|
259
|
+
requires(memory_no_alias(r, sizeof(int32_t) * MLDSA_N))
|
|
260
|
+
requires(1 <= layer && layer <= 8)
|
|
261
|
+
requires(array_abs_bound(r, 0, MLDSA_N, layer * MLDSA_Q))
|
|
262
|
+
assigns(memory_slice(r, sizeof(int32_t) * MLDSA_N))
|
|
263
|
+
ensures(array_abs_bound(r, 0, MLDSA_N, (layer + 1) * MLDSA_Q)))
|
|
264
|
+
{
|
|
265
|
+
unsigned start, k, len;
|
|
266
|
+
/* Twiddle factors for layer n are at indices 2^(n-1)..2^n-1. */
|
|
267
|
+
k = 1u << (layer - 1);
|
|
268
|
+
len = (unsigned)MLDSA_N >> layer;
|
|
269
|
+
for (start = 0; start < MLDSA_N; start += 2 * len)
|
|
270
|
+
__loop__(
|
|
271
|
+
invariant(start < MLDSA_N + 2 * len)
|
|
272
|
+
invariant(k <= MLDSA_N)
|
|
273
|
+
invariant(2 * len * k == start + MLDSA_N)
|
|
274
|
+
invariant(array_abs_bound(r, 0, start, layer * MLDSA_Q + MLDSA_Q))
|
|
275
|
+
invariant(array_abs_bound(r, start, MLDSA_N, layer * MLDSA_Q))
|
|
276
|
+
decreases(MLDSA_N - start))
|
|
277
|
+
{
|
|
278
|
+
int32_t zeta = mld_zetas[k++];
|
|
279
|
+
mld_ntt_butterfly_block(r, zeta, start, len, layer * MLDSA_Q);
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
MLD_STATIC_TESTABLE void mld_poly_ntt_c(mld_poly *a)
|
|
284
|
+
__contract__(
|
|
285
|
+
requires(memory_no_alias(a, sizeof(mld_poly)))
|
|
286
|
+
requires(array_abs_bound(a->coeffs, 0, MLDSA_N, MLDSA_Q))
|
|
287
|
+
assigns(memory_slice(a, sizeof(mld_poly)))
|
|
288
|
+
ensures(array_abs_bound(a->coeffs, 0, MLDSA_N, MLD_NTT_BOUND))
|
|
289
|
+
)
|
|
290
|
+
{
|
|
291
|
+
unsigned int layer;
|
|
292
|
+
int32_t *r;
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q);
|
|
296
|
+
r = a->coeffs;
|
|
297
|
+
|
|
298
|
+
for (layer = 1; layer < 9; layer++)
|
|
299
|
+
__loop__(
|
|
300
|
+
invariant(1 <= layer && layer <= 9)
|
|
301
|
+
invariant(array_abs_bound(r, 0, MLDSA_N, layer * MLDSA_Q))
|
|
302
|
+
decreases(9 - layer)
|
|
303
|
+
)
|
|
304
|
+
{
|
|
305
|
+
mld_ntt_layer(r, layer);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
MLD_INTERNAL_API
|
|
312
|
+
void mld_poly_ntt(mld_poly *a)
|
|
313
|
+
{
|
|
314
|
+
#if defined(MLD_USE_NATIVE_NTT)
|
|
315
|
+
int ret;
|
|
316
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q);
|
|
317
|
+
ret = mld_ntt_native(a->coeffs);
|
|
318
|
+
if (ret == MLD_NATIVE_FUNC_SUCCESS)
|
|
319
|
+
{
|
|
320
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND);
|
|
321
|
+
return;
|
|
322
|
+
}
|
|
323
|
+
#endif /* MLD_USE_NATIVE_NTT */
|
|
324
|
+
mld_poly_ntt_c(a);
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
/*************************************************
|
|
328
|
+
* Name: mld_fqscale
|
|
329
|
+
*
|
|
330
|
+
* Description: Scales a field element by mont/256 , i.e., performs Montgomery
|
|
331
|
+
* multiplication by mont^2/256.
|
|
332
|
+
* Input is expected to have absolute value smaller than
|
|
333
|
+
* 256 * MLDSA_Q.
|
|
334
|
+
* Output has absolute value smaller than MLD_INTT_BOUND.
|
|
335
|
+
*
|
|
336
|
+
* Arguments: - int32_t a: Field element to be scaled.
|
|
337
|
+
**************************************************/
|
|
338
|
+
static MLD_INLINE int32_t mld_fqscale(int32_t a)
|
|
339
|
+
__contract__(
|
|
340
|
+
requires(a > -256*MLDSA_Q && a < 256*MLDSA_Q)
|
|
341
|
+
ensures(return_value > -MLD_INTT_BOUND && return_value < MLD_INTT_BOUND)
|
|
342
|
+
)
|
|
343
|
+
{
|
|
344
|
+
/* check-magic: 41978 == pow(2,64-8,MLDSA_Q) */
|
|
345
|
+
const int32_t f = 41978;
|
|
346
|
+
/* Bounds: MLD_INTT_BOUND is MLDSA_Q, so the bounds reasoning is just
|
|
347
|
+
* a special case of that in mld_fqmul(). */
|
|
348
|
+
return mld_montgomery_reduce((int64_t)a * f);
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
/* Reference: Embedded into `invntt_tomont()` in the reference implementation
|
|
352
|
+
* @[REF] */
|
|
353
|
+
static MLD_INLINE void mld_invntt_layer(int32_t r[MLDSA_N], unsigned layer)
|
|
354
|
+
__contract__(
|
|
355
|
+
requires(memory_no_alias(r, sizeof(int32_t) * MLDSA_N))
|
|
356
|
+
requires(1 <= layer && layer <= 8)
|
|
357
|
+
requires(array_abs_bound(r, 0, MLDSA_N, (MLDSA_N >> layer) * MLDSA_Q))
|
|
358
|
+
assigns(memory_slice(r, sizeof(int32_t) * MLDSA_N))
|
|
359
|
+
ensures(array_abs_bound(r, 0, MLDSA_N, (MLDSA_N >> (layer - 1)) * MLDSA_Q)))
|
|
360
|
+
{
|
|
361
|
+
unsigned start, k, len;
|
|
362
|
+
len = (unsigned)MLDSA_N >> layer;
|
|
363
|
+
k = (1u << layer) - 1;
|
|
364
|
+
for (start = 0; start < MLDSA_N; start += 2 * len)
|
|
365
|
+
__loop__(
|
|
366
|
+
invariant(start <= MLDSA_N && k <= 255)
|
|
367
|
+
invariant(2 * len * k + start == 2 * MLDSA_N - 2 * len)
|
|
368
|
+
invariant(array_abs_bound(r, 0, start, (MLDSA_N >> (layer - 1)) * MLDSA_Q))
|
|
369
|
+
invariant(array_abs_bound(r, start, MLDSA_N, (MLDSA_N >> layer) * MLDSA_Q))
|
|
370
|
+
decreases(MLDSA_N - start))
|
|
371
|
+
{
|
|
372
|
+
unsigned j;
|
|
373
|
+
int32_t zeta = -mld_zetas[k--];
|
|
374
|
+
|
|
375
|
+
for (j = start; j < start + len; j++)
|
|
376
|
+
__loop__(
|
|
377
|
+
invariant(start <= j && j <= start + len)
|
|
378
|
+
invariant(array_abs_bound(r, 0, start, (MLDSA_N >> (layer - 1)) * MLDSA_Q))
|
|
379
|
+
invariant(array_abs_bound(r, start, j, (MLDSA_N >> (layer - 1)) * MLDSA_Q))
|
|
380
|
+
invariant(array_abs_bound(r, j, start + len, (MLDSA_N >> layer) * MLDSA_Q))
|
|
381
|
+
invariant(array_abs_bound(r, start + len, MLDSA_N, (MLDSA_N >> layer) * MLDSA_Q))
|
|
382
|
+
decreases(start + len - j))
|
|
383
|
+
{
|
|
384
|
+
int32_t t = r[j];
|
|
385
|
+
r[j] = t + r[j + len];
|
|
386
|
+
r[j + len] = t - r[j + len];
|
|
387
|
+
r[j + len] = mld_fqmul(r[j + len], zeta);
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
MLD_STATIC_TESTABLE void mld_poly_invntt_tomont_c(mld_poly *a)
|
|
393
|
+
__contract__(
|
|
394
|
+
requires(memory_no_alias(a, sizeof(mld_poly)))
|
|
395
|
+
requires(array_abs_bound(a->coeffs, 0, MLDSA_N, MLDSA_Q))
|
|
396
|
+
assigns(memory_slice(a, sizeof(mld_poly)))
|
|
397
|
+
ensures(array_abs_bound(a->coeffs, 0, MLDSA_N, MLD_INTT_BOUND))
|
|
398
|
+
)
|
|
399
|
+
{
|
|
400
|
+
unsigned int layer, j;
|
|
401
|
+
int32_t *r;
|
|
402
|
+
|
|
403
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q);
|
|
404
|
+
|
|
405
|
+
r = a->coeffs;
|
|
406
|
+
for (layer = 8; layer >= 1; layer--)
|
|
407
|
+
__loop__(
|
|
408
|
+
invariant(layer <= 8)
|
|
409
|
+
/* Absolute bounds increase from 1Q before layer 8 */
|
|
410
|
+
/* up to 256Q after layer 1 */
|
|
411
|
+
invariant(array_abs_bound(r, 0, MLDSA_N, (MLDSA_N >> layer) * MLDSA_Q))
|
|
412
|
+
decreases(layer))
|
|
413
|
+
{
|
|
414
|
+
mld_invntt_layer(r, layer);
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
/* Coefficient bounds are now at 256Q. We now scale by mont / 256,
|
|
418
|
+
* i.e., compute the Montgomery multiplication by mont^2 / 256.
|
|
419
|
+
* mont corrects the mont^-1 factor introduced in the basemul.
|
|
420
|
+
* 1/256 performs that scaling of the inverse NTT.
|
|
421
|
+
* The reduced value is bounded by MLD_INTT_BOUND in absolute
|
|
422
|
+
* value.*/
|
|
423
|
+
for (j = 0; j < MLDSA_N; ++j)
|
|
424
|
+
__loop__(
|
|
425
|
+
invariant(j <= MLDSA_N)
|
|
426
|
+
invariant(array_abs_bound(r, 0, j, MLD_INTT_BOUND))
|
|
427
|
+
invariant(array_abs_bound(r, j, MLDSA_N, MLDSA_N * MLDSA_Q))
|
|
428
|
+
decreases(MLDSA_N - j)
|
|
429
|
+
)
|
|
430
|
+
{
|
|
431
|
+
r[j] = mld_fqscale(r[j]);
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_INTT_BOUND);
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
MLD_INTERNAL_API
|
|
439
|
+
void mld_poly_invntt_tomont(mld_poly *a)
|
|
440
|
+
{
|
|
441
|
+
#if defined(MLD_USE_NATIVE_INTT)
|
|
442
|
+
int ret;
|
|
443
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q);
|
|
444
|
+
ret = mld_intt_native(a->coeffs);
|
|
445
|
+
if (ret == MLD_NATIVE_FUNC_SUCCESS)
|
|
446
|
+
{
|
|
447
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_INTT_BOUND);
|
|
448
|
+
return;
|
|
449
|
+
}
|
|
450
|
+
#endif /* MLD_USE_NATIVE_INTT */
|
|
451
|
+
mld_poly_invntt_tomont_c(a);
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
MLD_STATIC_TESTABLE void mld_poly_pointwise_montgomery_c(mld_poly *c,
|
|
455
|
+
const mld_poly *a,
|
|
456
|
+
const mld_poly *b)
|
|
457
|
+
__contract__(
|
|
458
|
+
requires(memory_no_alias(a, sizeof(mld_poly)))
|
|
459
|
+
requires(memory_no_alias(b, sizeof(mld_poly)))
|
|
460
|
+
requires(memory_no_alias(c, sizeof(mld_poly)))
|
|
461
|
+
requires(array_abs_bound(a->coeffs, 0, MLDSA_N, MLD_NTT_BOUND))
|
|
462
|
+
requires(array_abs_bound(b->coeffs, 0, MLDSA_N, MLD_NTT_BOUND))
|
|
463
|
+
assigns(memory_slice(c, sizeof(mld_poly)))
|
|
464
|
+
ensures(array_abs_bound(c->coeffs, 0, MLDSA_N, MLDSA_Q))
|
|
465
|
+
)
|
|
466
|
+
{
|
|
467
|
+
unsigned int i;
|
|
468
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND);
|
|
469
|
+
mld_assert_abs_bound(b->coeffs, MLDSA_N, MLD_NTT_BOUND);
|
|
470
|
+
|
|
471
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
472
|
+
__loop__(
|
|
473
|
+
invariant(i <= MLDSA_N)
|
|
474
|
+
invariant(array_abs_bound(c->coeffs, 0, i, MLDSA_Q))
|
|
475
|
+
decreases(MLDSA_N - i)
|
|
476
|
+
)
|
|
477
|
+
{
|
|
478
|
+
c->coeffs[i] = mld_montgomery_reduce((int64_t)a->coeffs[i] * b->coeffs[i]);
|
|
479
|
+
}
|
|
480
|
+
mld_assert_abs_bound(c->coeffs, MLDSA_N, MLDSA_Q);
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
MLD_INTERNAL_API
|
|
484
|
+
void mld_poly_pointwise_montgomery(mld_poly *c, const mld_poly *a,
|
|
485
|
+
const mld_poly *b)
|
|
486
|
+
{
|
|
487
|
+
#if defined(MLD_USE_NATIVE_POINTWISE_MONTGOMERY)
|
|
488
|
+
int ret;
|
|
489
|
+
mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND);
|
|
490
|
+
mld_assert_abs_bound(b->coeffs, MLDSA_N, MLD_NTT_BOUND);
|
|
491
|
+
ret = mld_poly_pointwise_montgomery_native(c->coeffs, a->coeffs, b->coeffs);
|
|
492
|
+
if (ret == MLD_NATIVE_FUNC_SUCCESS)
|
|
493
|
+
{
|
|
494
|
+
mld_assert_abs_bound(c->coeffs, MLDSA_N, MLDSA_Q);
|
|
495
|
+
return;
|
|
496
|
+
}
|
|
497
|
+
#endif /* MLD_USE_NATIVE_POINTWISE_MONTGOMERY */
|
|
498
|
+
mld_poly_pointwise_montgomery_c(c, a, b);
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
MLD_INTERNAL_API
|
|
502
|
+
void mld_poly_power2round(mld_poly *a1, mld_poly *a0, const mld_poly *a)
|
|
503
|
+
{
|
|
504
|
+
unsigned int i;
|
|
505
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
506
|
+
|
|
507
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
508
|
+
__loop__(
|
|
509
|
+
assigns(i, memory_slice(a0, sizeof(mld_poly)), memory_slice(a1, sizeof(mld_poly)))
|
|
510
|
+
invariant(i <= MLDSA_N)
|
|
511
|
+
invariant(array_bound(a0->coeffs, 0, i, -(MLD_2_POW_D/2)+1, (MLD_2_POW_D/2)+1))
|
|
512
|
+
invariant(array_bound(a1->coeffs, 0, i, 0, ((MLDSA_Q - 1) / MLD_2_POW_D) + 1))
|
|
513
|
+
decreases(MLDSA_N - i)
|
|
514
|
+
)
|
|
515
|
+
{
|
|
516
|
+
mld_power2round(&a0->coeffs[i], &a1->coeffs[i], a->coeffs[i]);
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
mld_assert_bound(a0->coeffs, MLDSA_N, -(MLD_2_POW_D / 2) + 1,
|
|
520
|
+
(MLD_2_POW_D / 2) + 1);
|
|
521
|
+
mld_assert_bound(a1->coeffs, MLDSA_N, 0, ((MLDSA_Q - 1) / MLD_2_POW_D) + 1);
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
#ifndef MLD_POLY_UNIFORM_NBLOCKS
|
|
525
|
+
#define MLD_POLY_UNIFORM_NBLOCKS \
|
|
526
|
+
((768 + MLD_STREAM128_BLOCKBYTES - 1) / MLD_STREAM128_BLOCKBYTES)
|
|
527
|
+
#endif
|
|
528
|
+
/* Reference: `mld_rej_uniform()` in the reference implementation @[REF].
|
|
529
|
+
* - Our signature differs from the reference implementation
|
|
530
|
+
* in that it adds the offset and always expects the base of the
|
|
531
|
+
* target buffer. This avoids shifting the buffer base in the
|
|
532
|
+
* caller, which appears tricky to reason about. */
|
|
533
|
+
MLD_STATIC_TESTABLE unsigned int mld_rej_uniform_c(int32_t *a,
|
|
534
|
+
unsigned int target,
|
|
535
|
+
unsigned int offset,
|
|
536
|
+
const uint8_t *buf,
|
|
537
|
+
unsigned int buflen)
|
|
538
|
+
__contract__(
|
|
539
|
+
requires(offset <= target && target <= MLDSA_N)
|
|
540
|
+
requires(buflen <= (MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES) && buflen % 3 == 0)
|
|
541
|
+
requires(memory_no_alias(a, sizeof(int32_t) * target))
|
|
542
|
+
requires(memory_no_alias(buf, buflen))
|
|
543
|
+
requires(array_bound(a, 0, offset, 0, MLDSA_Q))
|
|
544
|
+
assigns(memory_slice(a, sizeof(int32_t) * target))
|
|
545
|
+
ensures(offset <= return_value && return_value <= target)
|
|
546
|
+
ensures(array_bound(a, 0, return_value, 0, MLDSA_Q))
|
|
547
|
+
)
|
|
548
|
+
{
|
|
549
|
+
unsigned int ctr, pos;
|
|
550
|
+
uint32_t t;
|
|
551
|
+
mld_assert_bound(a, offset, 0, MLDSA_Q);
|
|
552
|
+
|
|
553
|
+
ctr = offset;
|
|
554
|
+
pos = 0;
|
|
555
|
+
/* pos + 3 cannot overflow due to the assumption
|
|
556
|
+
buflen <= (MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES) */
|
|
557
|
+
while (ctr < target && pos + 3 <= buflen)
|
|
558
|
+
__loop__(
|
|
559
|
+
invariant(offset <= ctr && ctr <= target && pos <= buflen)
|
|
560
|
+
invariant(array_bound(a, 0, ctr, 0, MLDSA_Q))
|
|
561
|
+
decreases(buflen - pos))
|
|
562
|
+
{
|
|
563
|
+
t = buf[pos++];
|
|
564
|
+
t |= (uint32_t)buf[pos++] << 8;
|
|
565
|
+
t |= (uint32_t)buf[pos++] << 16;
|
|
566
|
+
t &= 0x7FFFFF;
|
|
567
|
+
|
|
568
|
+
if (t < MLDSA_Q)
|
|
569
|
+
{
|
|
570
|
+
a[ctr++] = (int32_t)t;
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
mld_assert_bound(a, ctr, 0, MLDSA_Q);
|
|
575
|
+
|
|
576
|
+
return ctr;
|
|
577
|
+
}
|
|
578
|
+
/*************************************************
|
|
579
|
+
* Name: mld_rej_uniform
|
|
580
|
+
*
|
|
581
|
+
* Description: Sample uniformly random coefficients in [0, MLDSA_Q-1] by
|
|
582
|
+
* performing rejection sampling on array of random bytes.
|
|
583
|
+
*
|
|
584
|
+
* Arguments: - int32_t *a: pointer to output array (allocated)
|
|
585
|
+
* - unsigned int target: requested number of coefficients to
|
|
586
|
+
*sample
|
|
587
|
+
* - unsigned int offset: number of coefficients already sampled
|
|
588
|
+
* - const uint8_t *buf: array of random bytes to sample from
|
|
589
|
+
* - unsigned int buflen: length of array of random bytes (must be
|
|
590
|
+
* multiple of 3)
|
|
591
|
+
*
|
|
592
|
+
* Returns number of sampled coefficients. Can be smaller than len if not enough
|
|
593
|
+
* random bytes were given.
|
|
594
|
+
**************************************************/
|
|
595
|
+
|
|
596
|
+
/* Reference: `mld_rej_uniform()` in the reference implementation @[REF].
|
|
597
|
+
* - Our signature differs from the reference implementation
|
|
598
|
+
* in that it adds the offset and always expects the base of the
|
|
599
|
+
* target buffer. This avoids shifting the buffer base in the
|
|
600
|
+
* caller, which appears tricky to reason about. */
|
|
601
|
+
static unsigned int mld_rej_uniform(int32_t *a, unsigned int target,
|
|
602
|
+
unsigned int offset, const uint8_t *buf,
|
|
603
|
+
unsigned int buflen)
|
|
604
|
+
__contract__(
|
|
605
|
+
requires(offset <= target && target <= MLDSA_N)
|
|
606
|
+
requires(buflen <= (MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES) && buflen % 3 == 0)
|
|
607
|
+
requires(memory_no_alias(a, sizeof(int32_t) * target))
|
|
608
|
+
requires(memory_no_alias(buf, buflen))
|
|
609
|
+
requires(array_bound(a, 0, offset, 0, MLDSA_Q))
|
|
610
|
+
assigns(memory_slice(a, sizeof(int32_t) * target))
|
|
611
|
+
ensures(offset <= return_value && return_value <= target)
|
|
612
|
+
ensures(array_bound(a, 0, return_value, 0, MLDSA_Q))
|
|
613
|
+
)
|
|
614
|
+
{
|
|
615
|
+
#if defined(MLD_USE_NATIVE_REJ_UNIFORM)
|
|
616
|
+
int ret;
|
|
617
|
+
mld_assert_bound(a, offset, 0, MLDSA_Q);
|
|
618
|
+
if (offset == 0)
|
|
619
|
+
{
|
|
620
|
+
ret = mld_rej_uniform_native(a, target, buf, buflen);
|
|
621
|
+
if (ret != MLD_NATIVE_FUNC_FALLBACK)
|
|
622
|
+
{
|
|
623
|
+
unsigned res = (unsigned)ret;
|
|
624
|
+
mld_assert_bound(a, res, 0, MLDSA_Q);
|
|
625
|
+
return res;
|
|
626
|
+
}
|
|
627
|
+
}
|
|
628
|
+
#endif /* MLD_USE_NATIVE_REJ_UNIFORM */
|
|
629
|
+
|
|
630
|
+
return mld_rej_uniform_c(a, target, offset, buf, buflen);
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
/* Reference: poly_uniform() in the reference implementation @[REF].
|
|
634
|
+
* - Simplified from reference by removing buffer tail handling
|
|
635
|
+
* since buflen % 3 = 0 always holds true (MLD_STREAM128_BLOCKBYTES
|
|
636
|
+
* = 168).
|
|
637
|
+
* - Modified rej_uniform interface to track offset directly.
|
|
638
|
+
* - Pass nonce packed in the extended seed array instead of a third
|
|
639
|
+
* argument.
|
|
640
|
+
* */
|
|
641
|
+
MLD_INTERNAL_API
|
|
642
|
+
void mld_poly_uniform(mld_poly *a, const uint8_t seed[MLDSA_SEEDBYTES + 2])
|
|
643
|
+
{
|
|
644
|
+
unsigned int ctr;
|
|
645
|
+
unsigned int buflen = MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES;
|
|
646
|
+
MLD_ALIGN uint8_t buf[MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES];
|
|
647
|
+
mld_xof128_ctx state;
|
|
648
|
+
|
|
649
|
+
mld_xof128_init(&state);
|
|
650
|
+
mld_xof128_absorb_once(&state, seed, MLDSA_SEEDBYTES + 2);
|
|
651
|
+
mld_xof128_squeezeblocks(buf, MLD_POLY_UNIFORM_NBLOCKS, &state);
|
|
652
|
+
|
|
653
|
+
ctr = mld_rej_uniform(a->coeffs, MLDSA_N, 0, buf, buflen);
|
|
654
|
+
buflen = MLD_STREAM128_BLOCKBYTES;
|
|
655
|
+
while (ctr < MLDSA_N)
|
|
656
|
+
__loop__(
|
|
657
|
+
assigns(ctr, state, memory_slice(a, sizeof(mld_poly)), object_whole(buf))
|
|
658
|
+
invariant(ctr <= MLDSA_N)
|
|
659
|
+
invariant(array_bound(a->coeffs, 0, ctr, 0, MLDSA_Q))
|
|
660
|
+
invariant(state.pos <= SHAKE128_RATE)
|
|
661
|
+
)
|
|
662
|
+
{
|
|
663
|
+
mld_xof128_squeezeblocks(buf, 1, &state);
|
|
664
|
+
ctr = mld_rej_uniform(a->coeffs, MLDSA_N, ctr, buf, buflen);
|
|
665
|
+
}
|
|
666
|
+
mld_xof128_release(&state);
|
|
667
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
668
|
+
|
|
669
|
+
/* @[FIPS204, Section 3.6.3] Destruction of intermediate values. */
|
|
670
|
+
mld_zeroize(buf, sizeof(buf));
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
#if !defined(MLD_CONFIG_SERIAL_FIPS202_ONLY) && !defined(MLD_CONFIG_REDUCE_RAM)
|
|
674
|
+
MLD_INTERNAL_API
|
|
675
|
+
void mld_poly_uniform_4x(mld_poly *vec0, mld_poly *vec1, mld_poly *vec2,
|
|
676
|
+
mld_poly *vec3,
|
|
677
|
+
uint8_t seed[4][MLD_ALIGN_UP(MLDSA_SEEDBYTES + 2)])
|
|
678
|
+
{
|
|
679
|
+
/* Temporary buffers for XOF output before rejection sampling */
|
|
680
|
+
MLD_ALIGN uint8_t
|
|
681
|
+
buf[4][MLD_ALIGN_UP(MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES)];
|
|
682
|
+
|
|
683
|
+
/* Tracks the number of coefficients we have already sampled */
|
|
684
|
+
unsigned ctr[4];
|
|
685
|
+
mld_xof128_x4_ctx state;
|
|
686
|
+
unsigned buflen;
|
|
687
|
+
|
|
688
|
+
mld_xof128_x4_init(&state);
|
|
689
|
+
mld_xof128_x4_absorb(&state, seed, MLDSA_SEEDBYTES + 2);
|
|
690
|
+
|
|
691
|
+
/*
|
|
692
|
+
* Initially, squeeze heuristic number of MLD_POLY_UNIFORM_NBLOCKS.
|
|
693
|
+
* This should generate the matrix entries with high probability.
|
|
694
|
+
*/
|
|
695
|
+
|
|
696
|
+
mld_xof128_x4_squeezeblocks(buf, MLD_POLY_UNIFORM_NBLOCKS, &state);
|
|
697
|
+
buflen = MLD_POLY_UNIFORM_NBLOCKS * MLD_STREAM128_BLOCKBYTES;
|
|
698
|
+
ctr[0] = mld_rej_uniform(vec0->coeffs, MLDSA_N, 0, buf[0], buflen);
|
|
699
|
+
ctr[1] = mld_rej_uniform(vec1->coeffs, MLDSA_N, 0, buf[1], buflen);
|
|
700
|
+
ctr[2] = mld_rej_uniform(vec2->coeffs, MLDSA_N, 0, buf[2], buflen);
|
|
701
|
+
ctr[3] = mld_rej_uniform(vec3->coeffs, MLDSA_N, 0, buf[3], buflen);
|
|
702
|
+
|
|
703
|
+
/*
|
|
704
|
+
* So long as not all matrix entries have been generated, squeeze
|
|
705
|
+
* one more block a time until we're done.
|
|
706
|
+
*/
|
|
707
|
+
buflen = MLD_STREAM128_BLOCKBYTES;
|
|
708
|
+
while (ctr[0] < MLDSA_N || ctr[1] < MLDSA_N || ctr[2] < MLDSA_N ||
|
|
709
|
+
ctr[3] < MLDSA_N)
|
|
710
|
+
__loop__(
|
|
711
|
+
assigns(ctr, state, object_whole(buf),
|
|
712
|
+
memory_slice(vec0, sizeof(mld_poly)), memory_slice(vec1, sizeof(mld_poly)),
|
|
713
|
+
memory_slice(vec2, sizeof(mld_poly)), memory_slice(vec3, sizeof(mld_poly)))
|
|
714
|
+
invariant(ctr[0] <= MLDSA_N && ctr[1] <= MLDSA_N)
|
|
715
|
+
invariant(ctr[2] <= MLDSA_N && ctr[3] <= MLDSA_N)
|
|
716
|
+
invariant(array_bound(vec0->coeffs, 0, ctr[0], 0, MLDSA_Q))
|
|
717
|
+
invariant(array_bound(vec1->coeffs, 0, ctr[1], 0, MLDSA_Q))
|
|
718
|
+
invariant(array_bound(vec2->coeffs, 0, ctr[2], 0, MLDSA_Q))
|
|
719
|
+
invariant(array_bound(vec3->coeffs, 0, ctr[3], 0, MLDSA_Q)))
|
|
720
|
+
{
|
|
721
|
+
mld_xof128_x4_squeezeblocks(buf, 1, &state);
|
|
722
|
+
ctr[0] = mld_rej_uniform(vec0->coeffs, MLDSA_N, ctr[0], buf[0], buflen);
|
|
723
|
+
ctr[1] = mld_rej_uniform(vec1->coeffs, MLDSA_N, ctr[1], buf[1], buflen);
|
|
724
|
+
ctr[2] = mld_rej_uniform(vec2->coeffs, MLDSA_N, ctr[2], buf[2], buflen);
|
|
725
|
+
ctr[3] = mld_rej_uniform(vec3->coeffs, MLDSA_N, ctr[3], buf[3], buflen);
|
|
726
|
+
}
|
|
727
|
+
mld_xof128_x4_release(&state);
|
|
728
|
+
|
|
729
|
+
mld_assert_bound(vec0->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
730
|
+
mld_assert_bound(vec1->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
731
|
+
mld_assert_bound(vec2->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
732
|
+
mld_assert_bound(vec3->coeffs, MLDSA_N, 0, MLDSA_Q);
|
|
733
|
+
|
|
734
|
+
/* @[FIPS204, Section 3.6.3] Destruction of intermediate values. */
|
|
735
|
+
mld_zeroize(buf, sizeof(buf));
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
#endif /* !MLD_CONFIG_SERIAL_FIPS202_ONLY && !MLD_CONFIG_REDUCE_RAM */
|
|
739
|
+
|
|
740
|
+
MLD_INTERNAL_API
|
|
741
|
+
void mld_polyt1_pack(uint8_t r[MLDSA_POLYT1_PACKEDBYTES], const mld_poly *a)
|
|
742
|
+
{
|
|
743
|
+
unsigned int i;
|
|
744
|
+
mld_assert_bound(a->coeffs, MLDSA_N, 0, 1 << 10);
|
|
745
|
+
|
|
746
|
+
for (i = 0; i < MLDSA_N / 4; ++i)
|
|
747
|
+
__loop__(
|
|
748
|
+
invariant(i <= MLDSA_N/4)
|
|
749
|
+
decreases(MLDSA_N / 4 - i))
|
|
750
|
+
{
|
|
751
|
+
r[5 * i + 0] = (uint8_t)((a->coeffs[4 * i + 0] >> 0) & 0xFF);
|
|
752
|
+
r[5 * i + 1] =
|
|
753
|
+
(uint8_t)(((a->coeffs[4 * i + 0] >> 8) | (a->coeffs[4 * i + 1] << 2)) &
|
|
754
|
+
0xFF);
|
|
755
|
+
r[5 * i + 2] =
|
|
756
|
+
(uint8_t)(((a->coeffs[4 * i + 1] >> 6) | (a->coeffs[4 * i + 2] << 4)) &
|
|
757
|
+
0xFF);
|
|
758
|
+
r[5 * i + 3] =
|
|
759
|
+
(uint8_t)(((a->coeffs[4 * i + 2] >> 4) | (a->coeffs[4 * i + 3] << 6)) &
|
|
760
|
+
0xFF);
|
|
761
|
+
r[5 * i + 4] = (uint8_t)((a->coeffs[4 * i + 3] >> 2) & 0xFF);
|
|
762
|
+
}
|
|
763
|
+
}
|
|
764
|
+
|
|
765
|
+
MLD_INTERNAL_API
|
|
766
|
+
void mld_polyt1_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYT1_PACKEDBYTES])
|
|
767
|
+
{
|
|
768
|
+
unsigned int i;
|
|
769
|
+
|
|
770
|
+
for (i = 0; i < MLDSA_N / 4; ++i)
|
|
771
|
+
__loop__(
|
|
772
|
+
invariant(i <= MLDSA_N/4)
|
|
773
|
+
invariant(array_bound(r->coeffs, 0, i*4, 0, 1 << 10))
|
|
774
|
+
decreases(MLDSA_N / 4 - i))
|
|
775
|
+
{
|
|
776
|
+
r->coeffs[4 * i + 0] =
|
|
777
|
+
((a[5 * i + 0] >> 0) | ((int32_t)a[5 * i + 1] << 8)) & 0x3FF;
|
|
778
|
+
r->coeffs[4 * i + 1] =
|
|
779
|
+
((a[5 * i + 1] >> 2) | ((int32_t)a[5 * i + 2] << 6)) & 0x3FF;
|
|
780
|
+
r->coeffs[4 * i + 2] =
|
|
781
|
+
((a[5 * i + 2] >> 4) | ((int32_t)a[5 * i + 3] << 4)) & 0x3FF;
|
|
782
|
+
r->coeffs[4 * i + 3] =
|
|
783
|
+
((a[5 * i + 3] >> 6) | ((int32_t)a[5 * i + 4] << 2)) & 0x3FF;
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
mld_assert_bound(r->coeffs, MLDSA_N, 0, 1 << 10);
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
MLD_INTERNAL_API
|
|
790
|
+
void mld_polyt0_pack(uint8_t r[MLDSA_POLYT0_PACKEDBYTES], const mld_poly *a)
|
|
791
|
+
{
|
|
792
|
+
unsigned int i;
|
|
793
|
+
uint32_t t[8];
|
|
794
|
+
|
|
795
|
+
mld_assert_bound(a->coeffs, MLDSA_N, -(1 << (MLDSA_D - 1)) + 1,
|
|
796
|
+
(1 << (MLDSA_D - 1)) + 1);
|
|
797
|
+
|
|
798
|
+
for (i = 0; i < MLDSA_N / 8; ++i)
|
|
799
|
+
__loop__(
|
|
800
|
+
invariant(i <= MLDSA_N/8)
|
|
801
|
+
decreases(MLDSA_N / 8 - i))
|
|
802
|
+
{
|
|
803
|
+
/* Safety: a->coeffs[i] <= (1 << (MLDSA_D - 1) as they are output of
|
|
804
|
+
* power2round, hence, these casts are safe. */
|
|
805
|
+
t[0] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 0]);
|
|
806
|
+
t[1] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 1]);
|
|
807
|
+
t[2] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 2]);
|
|
808
|
+
t[3] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 3]);
|
|
809
|
+
t[4] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 4]);
|
|
810
|
+
t[5] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 5]);
|
|
811
|
+
t[6] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 6]);
|
|
812
|
+
t[7] = (uint32_t)((1 << (MLDSA_D - 1)) - a->coeffs[8 * i + 7]);
|
|
813
|
+
|
|
814
|
+
r[13 * i + 0] = (uint8_t)((t[0]) & 0xFF);
|
|
815
|
+
r[13 * i + 1] = (uint8_t)((t[0] >> 8) & 0xFF);
|
|
816
|
+
r[13 * i + 1] |= (uint8_t)((t[1] << 5) & 0xFF);
|
|
817
|
+
r[13 * i + 2] = (uint8_t)((t[1] >> 3) & 0xFF);
|
|
818
|
+
r[13 * i + 3] = (uint8_t)((t[1] >> 11) & 0xFF);
|
|
819
|
+
r[13 * i + 3] |= (uint8_t)((t[2] << 2) & 0xFF);
|
|
820
|
+
r[13 * i + 4] = (uint8_t)((t[2] >> 6) & 0xFF);
|
|
821
|
+
r[13 * i + 4] |= (uint8_t)((t[3] << 7) & 0xFF);
|
|
822
|
+
r[13 * i + 5] = (uint8_t)((t[3] >> 1) & 0xFF);
|
|
823
|
+
r[13 * i + 6] = (uint8_t)((t[3] >> 9) & 0xFF);
|
|
824
|
+
r[13 * i + 6] |= (uint8_t)((t[4] << 4) & 0xFF);
|
|
825
|
+
r[13 * i + 7] = (uint8_t)((t[4] >> 4) & 0xFF);
|
|
826
|
+
r[13 * i + 8] = (uint8_t)((t[4] >> 12) & 0xFF);
|
|
827
|
+
r[13 * i + 8] |= (uint8_t)((t[5] << 1) & 0xFF);
|
|
828
|
+
r[13 * i + 9] = (uint8_t)((t[5] >> 7) & 0xFF);
|
|
829
|
+
r[13 * i + 9] |= (uint8_t)((t[6] << 6) & 0xFF);
|
|
830
|
+
r[13 * i + 10] = (uint8_t)((t[6] >> 2) & 0xFF);
|
|
831
|
+
r[13 * i + 11] = (uint8_t)((t[6] >> 10) & 0xFF);
|
|
832
|
+
r[13 * i + 11] |= (uint8_t)((t[7] << 3) & 0xFF);
|
|
833
|
+
r[13 * i + 12] = (uint8_t)((t[7] >> 5) & 0xFF);
|
|
834
|
+
}
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
MLD_INTERNAL_API
|
|
838
|
+
void mld_polyt0_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYT0_PACKEDBYTES])
|
|
839
|
+
{
|
|
840
|
+
unsigned int i;
|
|
841
|
+
|
|
842
|
+
for (i = 0; i < MLDSA_N / 8; ++i)
|
|
843
|
+
__loop__(
|
|
844
|
+
invariant(i <= MLDSA_N/8)
|
|
845
|
+
invariant(array_bound(r->coeffs, 0, i*8, -(1<<(MLDSA_D-1)) + 1, (1<<(MLDSA_D-1)) + 1))
|
|
846
|
+
decreases(MLDSA_N / 8 - i))
|
|
847
|
+
{
|
|
848
|
+
r->coeffs[8 * i + 0] = a[13 * i + 0];
|
|
849
|
+
r->coeffs[8 * i + 0] |= (int32_t)a[13 * i + 1] << 8;
|
|
850
|
+
r->coeffs[8 * i + 0] &= 0x1FFF;
|
|
851
|
+
|
|
852
|
+
r->coeffs[8 * i + 1] = a[13 * i + 1] >> 5;
|
|
853
|
+
r->coeffs[8 * i + 1] |= (int32_t)a[13 * i + 2] << 3;
|
|
854
|
+
r->coeffs[8 * i + 1] |= (int32_t)a[13 * i + 3] << 11;
|
|
855
|
+
r->coeffs[8 * i + 1] &= 0x1FFF;
|
|
856
|
+
|
|
857
|
+
r->coeffs[8 * i + 2] = a[13 * i + 3] >> 2;
|
|
858
|
+
r->coeffs[8 * i + 2] |= (int32_t)a[13 * i + 4] << 6;
|
|
859
|
+
r->coeffs[8 * i + 2] &= 0x1FFF;
|
|
860
|
+
|
|
861
|
+
r->coeffs[8 * i + 3] = a[13 * i + 4] >> 7;
|
|
862
|
+
r->coeffs[8 * i + 3] |= (int32_t)a[13 * i + 5] << 1;
|
|
863
|
+
r->coeffs[8 * i + 3] |= (int32_t)a[13 * i + 6] << 9;
|
|
864
|
+
r->coeffs[8 * i + 3] &= 0x1FFF;
|
|
865
|
+
|
|
866
|
+
r->coeffs[8 * i + 4] = a[13 * i + 6] >> 4;
|
|
867
|
+
r->coeffs[8 * i + 4] |= (int32_t)a[13 * i + 7] << 4;
|
|
868
|
+
r->coeffs[8 * i + 4] |= (int32_t)a[13 * i + 8] << 12;
|
|
869
|
+
r->coeffs[8 * i + 4] &= 0x1FFF;
|
|
870
|
+
|
|
871
|
+
r->coeffs[8 * i + 5] = a[13 * i + 8] >> 1;
|
|
872
|
+
r->coeffs[8 * i + 5] |= (int32_t)a[13 * i + 9] << 7;
|
|
873
|
+
r->coeffs[8 * i + 5] &= 0x1FFF;
|
|
874
|
+
|
|
875
|
+
r->coeffs[8 * i + 6] = a[13 * i + 9] >> 6;
|
|
876
|
+
r->coeffs[8 * i + 6] |= (int32_t)a[13 * i + 10] << 2;
|
|
877
|
+
r->coeffs[8 * i + 6] |= (int32_t)a[13 * i + 11] << 10;
|
|
878
|
+
r->coeffs[8 * i + 6] &= 0x1FFF;
|
|
879
|
+
|
|
880
|
+
r->coeffs[8 * i + 7] = a[13 * i + 11] >> 3;
|
|
881
|
+
r->coeffs[8 * i + 7] |= (int32_t)a[13 * i + 12] << 5;
|
|
882
|
+
r->coeffs[8 * i + 7] &= 0x1FFF;
|
|
883
|
+
|
|
884
|
+
r->coeffs[8 * i + 0] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 0];
|
|
885
|
+
r->coeffs[8 * i + 1] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 1];
|
|
886
|
+
r->coeffs[8 * i + 2] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 2];
|
|
887
|
+
r->coeffs[8 * i + 3] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 3];
|
|
888
|
+
r->coeffs[8 * i + 4] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 4];
|
|
889
|
+
r->coeffs[8 * i + 5] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 5];
|
|
890
|
+
r->coeffs[8 * i + 6] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 6];
|
|
891
|
+
r->coeffs[8 * i + 7] = (1 << (MLDSA_D - 1)) - r->coeffs[8 * i + 7];
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
mld_assert_bound(r->coeffs, MLDSA_N, -(1 << (MLDSA_D - 1)) + 1,
|
|
895
|
+
(1 << (MLDSA_D - 1)) + 1);
|
|
896
|
+
}
|
|
897
|
+
|
|
898
|
+
MLD_STATIC_TESTABLE uint32_t mld_poly_chknorm_c(const mld_poly *a, int32_t B)
|
|
899
|
+
__contract__(
|
|
900
|
+
requires(memory_no_alias(a, sizeof(mld_poly)))
|
|
901
|
+
requires(0 <= B && B <= MLDSA_Q - MLD_REDUCE32_RANGE_MAX)
|
|
902
|
+
requires(array_bound(a->coeffs, 0, MLDSA_N, -MLD_REDUCE32_RANGE_MAX, MLD_REDUCE32_RANGE_MAX))
|
|
903
|
+
ensures(return_value == 0 || return_value == 0xFFFFFFFF)
|
|
904
|
+
ensures((return_value == 0) == array_abs_bound(a->coeffs, 0, MLDSA_N, B))
|
|
905
|
+
)
|
|
906
|
+
{
|
|
907
|
+
unsigned int i;
|
|
908
|
+
uint32_t t = 0;
|
|
909
|
+
mld_assert_bound(a->coeffs, MLDSA_N, -MLD_REDUCE32_RANGE_MAX,
|
|
910
|
+
MLD_REDUCE32_RANGE_MAX);
|
|
911
|
+
for (i = 0; i < MLDSA_N; ++i)
|
|
912
|
+
__loop__(
|
|
913
|
+
invariant(i <= MLDSA_N)
|
|
914
|
+
invariant(t == 0 || t == 0xFFFFFFFF)
|
|
915
|
+
invariant((t == 0) == array_abs_bound(a->coeffs, 0, i, B))
|
|
916
|
+
decreases(MLDSA_N - i)
|
|
917
|
+
)
|
|
918
|
+
{
|
|
919
|
+
/*
|
|
920
|
+
* Since we know that -MLD_REDUCE32_RANGE_MAX <= a < MLD_REDUCE32_RANGE_MAX,
|
|
921
|
+
* and B <= MLDSA_Q - MLD_REDUCE32_RANGE_MAX, to check if
|
|
922
|
+
* -B < (a mod± MLDSA_Q) < B, it suffices to check if -B < a < B.
|
|
923
|
+
*
|
|
924
|
+
* We prove this to be true using the following CBMC assertions.
|
|
925
|
+
* a ==> b expressed as !a || b to also allow run-time assertion.
|
|
926
|
+
*/
|
|
927
|
+
mld_assert(a->coeffs[i] < B || a->coeffs[i] - MLDSA_Q <= -B);
|
|
928
|
+
mld_assert(a->coeffs[i] > -B || a->coeffs[i] + MLDSA_Q >= B);
|
|
929
|
+
|
|
930
|
+
/* Reference: Leaks which coefficient violates the bound via a conditional.
|
|
931
|
+
* We are more conservative to reduce the number of declassifications in
|
|
932
|
+
* constant-time testing.
|
|
933
|
+
*/
|
|
934
|
+
|
|
935
|
+
/* if (abs(a[i]) >= B) */
|
|
936
|
+
t |= mld_ct_cmask_neg_i32(B - 1 - mld_ct_abs_i32(a->coeffs[i]));
|
|
937
|
+
}
|
|
938
|
+
|
|
939
|
+
return t;
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
/* Reference: explicitly checks the bound B to be <= (MLDSA_Q - 1) / 8).
|
|
943
|
+
* This is unnecessary as it's always a compile-time constant.
|
|
944
|
+
* We instead model it as a precondition.
|
|
945
|
+
* Checking the bound is performed using a conditional arguing
|
|
946
|
+
* that it is okay to leak which coefficient violates the bound (while the
|
|
947
|
+
* coefficient itself must remain secret).
|
|
948
|
+
* We instead perform everything in constant-time.
|
|
949
|
+
* Also it is sufficient to check that it is smaller than
|
|
950
|
+
* MLDSA_Q - MLD_REDUCE32_RANGE_MAX > (MLDSA_Q - 1) / 8).
|
|
951
|
+
*/
|
|
952
|
+
MLD_INTERNAL_API
|
|
953
|
+
uint32_t mld_poly_chknorm(const mld_poly *a, int32_t B)
|
|
954
|
+
{
|
|
955
|
+
#if defined(MLD_USE_NATIVE_POLY_CHKNORM)
|
|
956
|
+
int ret;
|
|
957
|
+
int success;
|
|
958
|
+
mld_assert_bound(a->coeffs, MLDSA_N, -MLD_REDUCE32_RANGE_MAX,
|
|
959
|
+
MLD_REDUCE32_RANGE_MAX);
|
|
960
|
+
/* The native backend returns 0 if all coefficients are within the bound,
|
|
961
|
+
* 1 if at least one coefficient exceeds the bound, and
|
|
962
|
+
* -1 (MLD_NATIVE_FUNC_FALLBACK) if the platform does not have the
|
|
963
|
+
* required capabilities to run the native function.
|
|
964
|
+
*/
|
|
965
|
+
ret = mld_poly_chknorm_native(a->coeffs, B);
|
|
966
|
+
|
|
967
|
+
success = (ret != MLD_NATIVE_FUNC_FALLBACK);
|
|
968
|
+
/* Constant-time: It would be fine to leak the return value of chknorm
|
|
969
|
+
* entirely (as it is fine to leak if any coefficient exceeded the bound or
|
|
970
|
+
* not). However, it is cleaner to perform declassification in sign.c.
|
|
971
|
+
* Hence, here we only declassify if the native function returned
|
|
972
|
+
* MLD_NATIVE_FUNC_FALLBACK or not (which solely depends on system
|
|
973
|
+
* capabilities).
|
|
974
|
+
*/
|
|
975
|
+
MLD_CT_TESTING_DECLASSIFY(&success, sizeof(int));
|
|
976
|
+
if (success)
|
|
977
|
+
{
|
|
978
|
+
/* Convert 0 / 1 to 0 / 0xFFFFFFFF here */
|
|
979
|
+
return mld_ct_cmask_nonzero_u32((uint32_t)ret);
|
|
980
|
+
}
|
|
981
|
+
#endif /* MLD_USE_NATIVE_POLY_CHKNORM */
|
|
982
|
+
return mld_poly_chknorm_c(a, B);
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
#else /* !MLD_CONFIG_MULTILEVEL_NO_SHARED */
|
|
986
|
+
MLD_EMPTY_CU(mld_poly)
|
|
987
|
+
#endif /* MLD_CONFIG_MULTILEVEL_NO_SHARED */
|
|
988
|
+
|
|
989
|
+
/* To facilitate single-compilation-unit (SCU) builds, undefine all macros.
|
|
990
|
+
* Don't modify by hand -- this is auto-generated by scripts/autogen. */
|
|
991
|
+
#undef MLD_POLY_UNIFORM_NBLOCKS
|