pq_crypto 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/ci.yml +37 -0
- data/CHANGELOG.md +29 -0
- data/GET_STARTED.md +65 -0
- data/LICENSE.txt +21 -0
- data/README.md +135 -0
- data/SECURITY.md +57 -0
- data/ext/pqcrypto/extconf.rb +157 -0
- data/ext/pqcrypto/mldsa_api.h +51 -0
- data/ext/pqcrypto/mlkem_api.h +21 -0
- data/ext/pqcrypto/pqcrypto_ruby_secure.c +889 -0
- data/ext/pqcrypto/pqcrypto_secure.c +1178 -0
- data/ext/pqcrypto/pqcrypto_secure.h +135 -0
- data/ext/pqcrypto/vendor/.vendored +5 -0
- data/ext/pqcrypto/vendor/pqclean/common/aes.c +639 -0
- data/ext/pqcrypto/vendor/pqclean/common/aes.h +64 -0
- data/ext/pqcrypto/vendor/pqclean/common/compat.h +73 -0
- data/ext/pqcrypto/vendor/pqclean/common/crypto_declassify.h +7 -0
- data/ext/pqcrypto/vendor/pqclean/common/fips202.c +928 -0
- data/ext/pqcrypto/vendor/pqclean/common/fips202.h +166 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak2x/feat.S +168 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak2x/fips202x2.c +684 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak2x/fips202x2.h +60 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/KeccakP-1600-times4-SIMD256.c +1028 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/KeccakP-1600-times4-SnP.h +50 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/KeccakP-1600-unrolling.macros +198 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/Makefile +8 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/Makefile.Microsoft_nmake +8 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/SIMD256-config.h +3 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/align.h +34 -0
- data/ext/pqcrypto/vendor/pqclean/common/keccak4x/brg_endian.h +142 -0
- data/ext/pqcrypto/vendor/pqclean/common/nistseedexpander.c +101 -0
- data/ext/pqcrypto/vendor/pqclean/common/nistseedexpander.h +39 -0
- data/ext/pqcrypto/vendor/pqclean/common/randombytes.c +355 -0
- data/ext/pqcrypto/vendor/pqclean/common/randombytes.h +27 -0
- data/ext/pqcrypto/vendor/pqclean/common/sha2.c +769 -0
- data/ext/pqcrypto/vendor/pqclean/common/sha2.h +173 -0
- data/ext/pqcrypto/vendor/pqclean/common/sp800-185.c +156 -0
- data/ext/pqcrypto/vendor/pqclean/common/sp800-185.h +27 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/LICENSE +5 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/Makefile +19 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/Makefile.Microsoft_nmake +23 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/api.h +18 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/cbd.c +83 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/cbd.h +11 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/indcpa.c +327 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/indcpa.h +22 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/kem.c +164 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/kem.h +23 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/ntt.c +146 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/ntt.h +14 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/params.h +36 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/poly.c +299 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/poly.h +37 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/polyvec.c +188 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/polyvec.h +26 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/reduce.c +41 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/reduce.h +13 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/symmetric-shake.c +71 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/symmetric.h +30 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/verify.c +67 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_kem/ml-kem-768/clean/verify.h +13 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/LICENSE +5 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/Makefile +19 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/Makefile.Microsoft_nmake +23 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/api.h +50 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/ntt.c +98 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/ntt.h +10 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/packing.c +261 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/packing.h +31 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/params.h +44 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/poly.c +799 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/poly.h +52 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/polyvec.c +415 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/polyvec.h +65 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/reduce.c +69 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/reduce.h +17 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/rounding.c +92 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/rounding.h +14 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/sign.c +407 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/sign.h +47 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/symmetric-shake.c +26 -0
- data/ext/pqcrypto/vendor/pqclean/crypto_sign/ml-dsa-65/clean/symmetric.h +34 -0
- data/lib/pq_crypto/errors.rb +10 -0
- data/lib/pq_crypto/hybrid_kem.rb +106 -0
- data/lib/pq_crypto/kem.rb +199 -0
- data/lib/pq_crypto/serialization.rb +102 -0
- data/lib/pq_crypto/signature.rb +198 -0
- data/lib/pq_crypto/version.rb +5 -0
- data/lib/pq_crypto.rb +177 -0
- data/lib/pqcrypto.rb +3 -0
- data/script/vendor_libs.rb +199 -0
- metadata +195 -0
|
@@ -0,0 +1,639 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* AES implementation based on code from BearSSL (https://bearssl.org/)
|
|
3
|
+
* by Thomas Pornin.
|
|
4
|
+
*
|
|
5
|
+
*
|
|
6
|
+
* Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
|
|
7
|
+
*
|
|
8
|
+
* Permission is hereby granted, free of charge, to any person obtaining
|
|
9
|
+
* a copy of this software and associated documentation files (the
|
|
10
|
+
* "Software"), to deal in the Software without restriction, including
|
|
11
|
+
* without limitation the rights to use, copy, modify, merge, publish,
|
|
12
|
+
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
13
|
+
* permit persons to whom the Software is furnished to do so, subject to
|
|
14
|
+
* the following conditions:
|
|
15
|
+
*
|
|
16
|
+
* The above copyright notice and this permission notice shall be
|
|
17
|
+
* included in all copies or substantial portions of the Software.
|
|
18
|
+
*
|
|
19
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
20
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
21
|
+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
22
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
23
|
+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
24
|
+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
25
|
+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
* SOFTWARE.
|
|
27
|
+
*/
|
|
28
|
+
|
|
29
|
+
#include <stdint.h>
|
|
30
|
+
#include <string.h>
|
|
31
|
+
|
|
32
|
+
#include "aes.h"
|
|
33
|
+
|
|
34
|
+
static inline uint32_t br_dec32le(const unsigned char *src) {
|
|
35
|
+
return (uint32_t)src[0]
|
|
36
|
+
| ((uint32_t)src[1] << 8)
|
|
37
|
+
| ((uint32_t)src[2] << 16)
|
|
38
|
+
| ((uint32_t)src[3] << 24);
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
static void br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) {
|
|
42
|
+
while (num-- > 0) {
|
|
43
|
+
*v ++ = br_dec32le(src);
|
|
44
|
+
src += 4;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
static inline uint32_t br_swap32(uint32_t x) {
|
|
49
|
+
x = ((x & (uint32_t)0x00FF00FF) << 8)
|
|
50
|
+
| ((x >> 8) & (uint32_t)0x00FF00FF);
|
|
51
|
+
return (x << 16) | (x >> 16);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
static inline void br_enc32le(unsigned char *dst, uint32_t x) {
|
|
55
|
+
dst[0] = (unsigned char)x;
|
|
56
|
+
dst[1] = (unsigned char)(x >> 8);
|
|
57
|
+
dst[2] = (unsigned char)(x >> 16);
|
|
58
|
+
dst[3] = (unsigned char)(x >> 24);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
static void br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) {
|
|
62
|
+
while (num-- > 0) {
|
|
63
|
+
br_enc32le(dst, *v ++);
|
|
64
|
+
dst += 4;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
static void br_aes_ct64_bitslice_Sbox(uint64_t *q) {
|
|
69
|
+
/*
|
|
70
|
+
* This S-box implementation is a straightforward translation of
|
|
71
|
+
* the circuit described by Boyar and Peralta in "A new
|
|
72
|
+
* combinational logic minimization technique with applications
|
|
73
|
+
* to cryptology" (https://eprint.iacr.org/2009/191.pdf).
|
|
74
|
+
*
|
|
75
|
+
* Note that variables x* (input) and s* (output) are numbered
|
|
76
|
+
* in "reverse" order (x0 is the high bit, x7 is the low bit).
|
|
77
|
+
*/
|
|
78
|
+
|
|
79
|
+
uint64_t x0, x1, x2, x3, x4, x5, x6, x7;
|
|
80
|
+
uint64_t y1, y2, y3, y4, y5, y6, y7, y8, y9;
|
|
81
|
+
uint64_t y10, y11, y12, y13, y14, y15, y16, y17, y18, y19;
|
|
82
|
+
uint64_t y20, y21;
|
|
83
|
+
uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9;
|
|
84
|
+
uint64_t z10, z11, z12, z13, z14, z15, z16, z17;
|
|
85
|
+
uint64_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
|
|
86
|
+
uint64_t t10, t11, t12, t13, t14, t15, t16, t17, t18, t19;
|
|
87
|
+
uint64_t t20, t21, t22, t23, t24, t25, t26, t27, t28, t29;
|
|
88
|
+
uint64_t t30, t31, t32, t33, t34, t35, t36, t37, t38, t39;
|
|
89
|
+
uint64_t t40, t41, t42, t43, t44, t45, t46, t47, t48, t49;
|
|
90
|
+
uint64_t t50, t51, t52, t53, t54, t55, t56, t57, t58, t59;
|
|
91
|
+
uint64_t t60, t61, t62, t63, t64, t65, t66, t67;
|
|
92
|
+
uint64_t s0, s1, s2, s3, s4, s5, s6, s7;
|
|
93
|
+
|
|
94
|
+
x0 = q[7];
|
|
95
|
+
x1 = q[6];
|
|
96
|
+
x2 = q[5];
|
|
97
|
+
x3 = q[4];
|
|
98
|
+
x4 = q[3];
|
|
99
|
+
x5 = q[2];
|
|
100
|
+
x6 = q[1];
|
|
101
|
+
x7 = q[0];
|
|
102
|
+
|
|
103
|
+
/*
|
|
104
|
+
* Top linear transformation.
|
|
105
|
+
*/
|
|
106
|
+
y14 = x3 ^ x5;
|
|
107
|
+
y13 = x0 ^ x6;
|
|
108
|
+
y9 = x0 ^ x3;
|
|
109
|
+
y8 = x0 ^ x5;
|
|
110
|
+
t0 = x1 ^ x2;
|
|
111
|
+
y1 = t0 ^ x7;
|
|
112
|
+
y4 = y1 ^ x3;
|
|
113
|
+
y12 = y13 ^ y14;
|
|
114
|
+
y2 = y1 ^ x0;
|
|
115
|
+
y5 = y1 ^ x6;
|
|
116
|
+
y3 = y5 ^ y8;
|
|
117
|
+
t1 = x4 ^ y12;
|
|
118
|
+
y15 = t1 ^ x5;
|
|
119
|
+
y20 = t1 ^ x1;
|
|
120
|
+
y6 = y15 ^ x7;
|
|
121
|
+
y10 = y15 ^ t0;
|
|
122
|
+
y11 = y20 ^ y9;
|
|
123
|
+
y7 = x7 ^ y11;
|
|
124
|
+
y17 = y10 ^ y11;
|
|
125
|
+
y19 = y10 ^ y8;
|
|
126
|
+
y16 = t0 ^ y11;
|
|
127
|
+
y21 = y13 ^ y16;
|
|
128
|
+
y18 = x0 ^ y16;
|
|
129
|
+
|
|
130
|
+
/*
|
|
131
|
+
* Non-linear section.
|
|
132
|
+
*/
|
|
133
|
+
t2 = y12 & y15;
|
|
134
|
+
t3 = y3 & y6;
|
|
135
|
+
t4 = t3 ^ t2;
|
|
136
|
+
t5 = y4 & x7;
|
|
137
|
+
t6 = t5 ^ t2;
|
|
138
|
+
t7 = y13 & y16;
|
|
139
|
+
t8 = y5 & y1;
|
|
140
|
+
t9 = t8 ^ t7;
|
|
141
|
+
t10 = y2 & y7;
|
|
142
|
+
t11 = t10 ^ t7;
|
|
143
|
+
t12 = y9 & y11;
|
|
144
|
+
t13 = y14 & y17;
|
|
145
|
+
t14 = t13 ^ t12;
|
|
146
|
+
t15 = y8 & y10;
|
|
147
|
+
t16 = t15 ^ t12;
|
|
148
|
+
t17 = t4 ^ t14;
|
|
149
|
+
t18 = t6 ^ t16;
|
|
150
|
+
t19 = t9 ^ t14;
|
|
151
|
+
t20 = t11 ^ t16;
|
|
152
|
+
t21 = t17 ^ y20;
|
|
153
|
+
t22 = t18 ^ y19;
|
|
154
|
+
t23 = t19 ^ y21;
|
|
155
|
+
t24 = t20 ^ y18;
|
|
156
|
+
|
|
157
|
+
t25 = t21 ^ t22;
|
|
158
|
+
t26 = t21 & t23;
|
|
159
|
+
t27 = t24 ^ t26;
|
|
160
|
+
t28 = t25 & t27;
|
|
161
|
+
t29 = t28 ^ t22;
|
|
162
|
+
t30 = t23 ^ t24;
|
|
163
|
+
t31 = t22 ^ t26;
|
|
164
|
+
t32 = t31 & t30;
|
|
165
|
+
t33 = t32 ^ t24;
|
|
166
|
+
t34 = t23 ^ t33;
|
|
167
|
+
t35 = t27 ^ t33;
|
|
168
|
+
t36 = t24 & t35;
|
|
169
|
+
t37 = t36 ^ t34;
|
|
170
|
+
t38 = t27 ^ t36;
|
|
171
|
+
t39 = t29 & t38;
|
|
172
|
+
t40 = t25 ^ t39;
|
|
173
|
+
|
|
174
|
+
t41 = t40 ^ t37;
|
|
175
|
+
t42 = t29 ^ t33;
|
|
176
|
+
t43 = t29 ^ t40;
|
|
177
|
+
t44 = t33 ^ t37;
|
|
178
|
+
t45 = t42 ^ t41;
|
|
179
|
+
z0 = t44 & y15;
|
|
180
|
+
z1 = t37 & y6;
|
|
181
|
+
z2 = t33 & x7;
|
|
182
|
+
z3 = t43 & y16;
|
|
183
|
+
z4 = t40 & y1;
|
|
184
|
+
z5 = t29 & y7;
|
|
185
|
+
z6 = t42 & y11;
|
|
186
|
+
z7 = t45 & y17;
|
|
187
|
+
z8 = t41 & y10;
|
|
188
|
+
z9 = t44 & y12;
|
|
189
|
+
z10 = t37 & y3;
|
|
190
|
+
z11 = t33 & y4;
|
|
191
|
+
z12 = t43 & y13;
|
|
192
|
+
z13 = t40 & y5;
|
|
193
|
+
z14 = t29 & y2;
|
|
194
|
+
z15 = t42 & y9;
|
|
195
|
+
z16 = t45 & y14;
|
|
196
|
+
z17 = t41 & y8;
|
|
197
|
+
|
|
198
|
+
/*
|
|
199
|
+
* Bottom linear transformation.
|
|
200
|
+
*/
|
|
201
|
+
t46 = z15 ^ z16;
|
|
202
|
+
t47 = z10 ^ z11;
|
|
203
|
+
t48 = z5 ^ z13;
|
|
204
|
+
t49 = z9 ^ z10;
|
|
205
|
+
t50 = z2 ^ z12;
|
|
206
|
+
t51 = z2 ^ z5;
|
|
207
|
+
t52 = z7 ^ z8;
|
|
208
|
+
t53 = z0 ^ z3;
|
|
209
|
+
t54 = z6 ^ z7;
|
|
210
|
+
t55 = z16 ^ z17;
|
|
211
|
+
t56 = z12 ^ t48;
|
|
212
|
+
t57 = t50 ^ t53;
|
|
213
|
+
t58 = z4 ^ t46;
|
|
214
|
+
t59 = z3 ^ t54;
|
|
215
|
+
t60 = t46 ^ t57;
|
|
216
|
+
t61 = z14 ^ t57;
|
|
217
|
+
t62 = t52 ^ t58;
|
|
218
|
+
t63 = t49 ^ t58;
|
|
219
|
+
t64 = z4 ^ t59;
|
|
220
|
+
t65 = t61 ^ t62;
|
|
221
|
+
t66 = z1 ^ t63;
|
|
222
|
+
s0 = t59 ^ t63;
|
|
223
|
+
s6 = t56 ^ ~t62;
|
|
224
|
+
s7 = t48 ^ ~t60;
|
|
225
|
+
t67 = t64 ^ t65;
|
|
226
|
+
s3 = t53 ^ t66;
|
|
227
|
+
s4 = t51 ^ t66;
|
|
228
|
+
s5 = t47 ^ t65;
|
|
229
|
+
s1 = t64 ^ ~s3;
|
|
230
|
+
s2 = t55 ^ ~t67;
|
|
231
|
+
|
|
232
|
+
q[7] = s0;
|
|
233
|
+
q[6] = s1;
|
|
234
|
+
q[5] = s2;
|
|
235
|
+
q[4] = s3;
|
|
236
|
+
q[3] = s4;
|
|
237
|
+
q[2] = s5;
|
|
238
|
+
q[1] = s6;
|
|
239
|
+
q[0] = s7;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
static void br_aes_ct64_ortho(uint64_t *q) {
|
|
243
|
+
#define SWAPN(cl, ch, s, x, y) do { \
|
|
244
|
+
uint64_t a, b; \
|
|
245
|
+
a = (x); \
|
|
246
|
+
b = (y); \
|
|
247
|
+
(x) = (a & (uint64_t)(cl)) | ((b & (uint64_t)(cl)) << (s)); \
|
|
248
|
+
(y) = ((a & (uint64_t)(ch)) >> (s)) | (b & (uint64_t)(ch)); \
|
|
249
|
+
} while (0)
|
|
250
|
+
|
|
251
|
+
#define SWAP2(x, y) SWAPN(0x5555555555555555, 0xAAAAAAAAAAAAAAAA, 1, x, y)
|
|
252
|
+
#define SWAP4(x, y) SWAPN(0x3333333333333333, 0xCCCCCCCCCCCCCCCC, 2, x, y)
|
|
253
|
+
#define SWAP8(x, y) SWAPN(0x0F0F0F0F0F0F0F0F, 0xF0F0F0F0F0F0F0F0, 4, x, y)
|
|
254
|
+
|
|
255
|
+
SWAP2(q[0], q[1]);
|
|
256
|
+
SWAP2(q[2], q[3]);
|
|
257
|
+
SWAP2(q[4], q[5]);
|
|
258
|
+
SWAP2(q[6], q[7]);
|
|
259
|
+
|
|
260
|
+
SWAP4(q[0], q[2]);
|
|
261
|
+
SWAP4(q[1], q[3]);
|
|
262
|
+
SWAP4(q[4], q[6]);
|
|
263
|
+
SWAP4(q[5], q[7]);
|
|
264
|
+
|
|
265
|
+
SWAP8(q[0], q[4]);
|
|
266
|
+
SWAP8(q[1], q[5]);
|
|
267
|
+
SWAP8(q[2], q[6]);
|
|
268
|
+
SWAP8(q[3], q[7]);
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
static void br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) {
|
|
272
|
+
uint64_t x0, x1, x2, x3;
|
|
273
|
+
|
|
274
|
+
x0 = w[0];
|
|
275
|
+
x1 = w[1];
|
|
276
|
+
x2 = w[2];
|
|
277
|
+
x3 = w[3];
|
|
278
|
+
x0 |= (x0 << 16);
|
|
279
|
+
x1 |= (x1 << 16);
|
|
280
|
+
x2 |= (x2 << 16);
|
|
281
|
+
x3 |= (x3 << 16);
|
|
282
|
+
x0 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
283
|
+
x1 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
284
|
+
x2 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
285
|
+
x3 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
286
|
+
x0 |= (x0 << 8);
|
|
287
|
+
x1 |= (x1 << 8);
|
|
288
|
+
x2 |= (x2 << 8);
|
|
289
|
+
x3 |= (x3 << 8);
|
|
290
|
+
x0 &= (uint64_t)0x00FF00FF00FF00FF;
|
|
291
|
+
x1 &= (uint64_t)0x00FF00FF00FF00FF;
|
|
292
|
+
x2 &= (uint64_t)0x00FF00FF00FF00FF;
|
|
293
|
+
x3 &= (uint64_t)0x00FF00FF00FF00FF;
|
|
294
|
+
*q0 = x0 | (x2 << 8);
|
|
295
|
+
*q1 = x1 | (x3 << 8);
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
static void br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) {
|
|
299
|
+
uint64_t x0, x1, x2, x3;
|
|
300
|
+
|
|
301
|
+
x0 = q0 & (uint64_t)0x00FF00FF00FF00FF;
|
|
302
|
+
x1 = q1 & (uint64_t)0x00FF00FF00FF00FF;
|
|
303
|
+
x2 = (q0 >> 8) & (uint64_t)0x00FF00FF00FF00FF;
|
|
304
|
+
x3 = (q1 >> 8) & (uint64_t)0x00FF00FF00FF00FF;
|
|
305
|
+
x0 |= (x0 >> 8);
|
|
306
|
+
x1 |= (x1 >> 8);
|
|
307
|
+
x2 |= (x2 >> 8);
|
|
308
|
+
x3 |= (x3 >> 8);
|
|
309
|
+
x0 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
310
|
+
x1 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
311
|
+
x2 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
312
|
+
x3 &= (uint64_t)0x0000FFFF0000FFFF;
|
|
313
|
+
w[0] = (uint32_t)x0 | (uint32_t)(x0 >> 16);
|
|
314
|
+
w[1] = (uint32_t)x1 | (uint32_t)(x1 >> 16);
|
|
315
|
+
w[2] = (uint32_t)x2 | (uint32_t)(x2 >> 16);
|
|
316
|
+
w[3] = (uint32_t)x3 | (uint32_t)(x3 >> 16);
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
static const unsigned char Rcon[] = {
|
|
320
|
+
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
|
|
321
|
+
};
|
|
322
|
+
|
|
323
|
+
static uint32_t sub_word(uint32_t x) {
|
|
324
|
+
uint64_t q[8];
|
|
325
|
+
|
|
326
|
+
memset(q, 0, sizeof q);
|
|
327
|
+
q[0] = x;
|
|
328
|
+
br_aes_ct64_ortho(q);
|
|
329
|
+
br_aes_ct64_bitslice_Sbox(q);
|
|
330
|
+
br_aes_ct64_ortho(q);
|
|
331
|
+
return (uint32_t)q[0];
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
static void br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) {
|
|
335
|
+
unsigned int i, j, k, nk, nkf;
|
|
336
|
+
uint32_t tmp;
|
|
337
|
+
uint32_t skey[60];
|
|
338
|
+
unsigned nrounds = 10 + ((key_len - 16) >> 2);
|
|
339
|
+
|
|
340
|
+
nk = (key_len >> 2);
|
|
341
|
+
nkf = ((nrounds + 1) << 2);
|
|
342
|
+
br_range_dec32le(skey, (key_len >> 2), key);
|
|
343
|
+
tmp = skey[(key_len >> 2) - 1];
|
|
344
|
+
for (i = nk, j = 0, k = 0; i < nkf; i ++) {
|
|
345
|
+
if (j == 0) {
|
|
346
|
+
tmp = (tmp << 24) | (tmp >> 8);
|
|
347
|
+
tmp = sub_word(tmp) ^ Rcon[k];
|
|
348
|
+
} else if (nk > 6 && j == 4) {
|
|
349
|
+
tmp = sub_word(tmp);
|
|
350
|
+
}
|
|
351
|
+
tmp ^= skey[i - nk];
|
|
352
|
+
skey[i] = tmp;
|
|
353
|
+
if (++ j == nk) {
|
|
354
|
+
j = 0;
|
|
355
|
+
k ++;
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
for (i = 0, j = 0; i < nkf; i += 4, j += 2) {
|
|
360
|
+
uint64_t q[8];
|
|
361
|
+
|
|
362
|
+
br_aes_ct64_interleave_in(&q[0], &q[4], skey + i);
|
|
363
|
+
q[1] = q[0];
|
|
364
|
+
q[2] = q[0];
|
|
365
|
+
q[3] = q[0];
|
|
366
|
+
q[5] = q[4];
|
|
367
|
+
q[6] = q[4];
|
|
368
|
+
q[7] = q[4];
|
|
369
|
+
br_aes_ct64_ortho(q);
|
|
370
|
+
comp_skey[j + 0] =
|
|
371
|
+
(q[0] & (uint64_t)0x1111111111111111)
|
|
372
|
+
| (q[1] & (uint64_t)0x2222222222222222)
|
|
373
|
+
| (q[2] & (uint64_t)0x4444444444444444)
|
|
374
|
+
| (q[3] & (uint64_t)0x8888888888888888);
|
|
375
|
+
comp_skey[j + 1] =
|
|
376
|
+
(q[4] & (uint64_t)0x1111111111111111)
|
|
377
|
+
| (q[5] & (uint64_t)0x2222222222222222)
|
|
378
|
+
| (q[6] & (uint64_t)0x4444444444444444)
|
|
379
|
+
| (q[7] & (uint64_t)0x8888888888888888);
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
static void br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) {
|
|
384
|
+
unsigned u, v, n;
|
|
385
|
+
|
|
386
|
+
n = (nrounds + 1) << 1;
|
|
387
|
+
for (u = 0, v = 0; u < n; u ++, v += 4) {
|
|
388
|
+
uint64_t x0, x1, x2, x3;
|
|
389
|
+
|
|
390
|
+
x0 = x1 = x2 = x3 = comp_skey[u];
|
|
391
|
+
x0 &= (uint64_t)0x1111111111111111;
|
|
392
|
+
x1 &= (uint64_t)0x2222222222222222;
|
|
393
|
+
x2 &= (uint64_t)0x4444444444444444;
|
|
394
|
+
x3 &= (uint64_t)0x8888888888888888;
|
|
395
|
+
x1 >>= 1;
|
|
396
|
+
x2 >>= 2;
|
|
397
|
+
x3 >>= 3;
|
|
398
|
+
skey[v + 0] = (x0 << 4) - x0;
|
|
399
|
+
skey[v + 1] = (x1 << 4) - x1;
|
|
400
|
+
skey[v + 2] = (x2 << 4) - x2;
|
|
401
|
+
skey[v + 3] = (x3 << 4) - x3;
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
static inline void add_round_key(uint64_t *q, const uint64_t *sk) {
|
|
406
|
+
q[0] ^= sk[0];
|
|
407
|
+
q[1] ^= sk[1];
|
|
408
|
+
q[2] ^= sk[2];
|
|
409
|
+
q[3] ^= sk[3];
|
|
410
|
+
q[4] ^= sk[4];
|
|
411
|
+
q[5] ^= sk[5];
|
|
412
|
+
q[6] ^= sk[6];
|
|
413
|
+
q[7] ^= sk[7];
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
static inline void shift_rows(uint64_t *q) {
|
|
417
|
+
int i;
|
|
418
|
+
|
|
419
|
+
for (i = 0; i < 8; i ++) {
|
|
420
|
+
uint64_t x;
|
|
421
|
+
|
|
422
|
+
x = q[i];
|
|
423
|
+
q[i] = (x & (uint64_t)0x000000000000FFFF)
|
|
424
|
+
| ((x & (uint64_t)0x00000000FFF00000) >> 4)
|
|
425
|
+
| ((x & (uint64_t)0x00000000000F0000) << 12)
|
|
426
|
+
| ((x & (uint64_t)0x0000FF0000000000) >> 8)
|
|
427
|
+
| ((x & (uint64_t)0x000000FF00000000) << 8)
|
|
428
|
+
| ((x & (uint64_t)0xF000000000000000) >> 12)
|
|
429
|
+
| ((x & (uint64_t)0x0FFF000000000000) << 4);
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
static inline uint64_t rotr32(uint64_t x) {
|
|
434
|
+
return (x << 32) | (x >> 32);
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
static inline void mix_columns(uint64_t *q) {
|
|
438
|
+
uint64_t q0, q1, q2, q3, q4, q5, q6, q7;
|
|
439
|
+
uint64_t r0, r1, r2, r3, r4, r5, r6, r7;
|
|
440
|
+
|
|
441
|
+
q0 = q[0];
|
|
442
|
+
q1 = q[1];
|
|
443
|
+
q2 = q[2];
|
|
444
|
+
q3 = q[3];
|
|
445
|
+
q4 = q[4];
|
|
446
|
+
q5 = q[5];
|
|
447
|
+
q6 = q[6];
|
|
448
|
+
q7 = q[7];
|
|
449
|
+
r0 = (q0 >> 16) | (q0 << 48);
|
|
450
|
+
r1 = (q1 >> 16) | (q1 << 48);
|
|
451
|
+
r2 = (q2 >> 16) | (q2 << 48);
|
|
452
|
+
r3 = (q3 >> 16) | (q3 << 48);
|
|
453
|
+
r4 = (q4 >> 16) | (q4 << 48);
|
|
454
|
+
r5 = (q5 >> 16) | (q5 << 48);
|
|
455
|
+
r6 = (q6 >> 16) | (q6 << 48);
|
|
456
|
+
r7 = (q7 >> 16) | (q7 << 48);
|
|
457
|
+
|
|
458
|
+
q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0);
|
|
459
|
+
q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1);
|
|
460
|
+
q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2);
|
|
461
|
+
q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3);
|
|
462
|
+
q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4);
|
|
463
|
+
q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5);
|
|
464
|
+
q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6);
|
|
465
|
+
q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7);
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
static void inc4_be(uint32_t *x) {
|
|
469
|
+
uint32_t t = br_swap32(*x) + 4;
|
|
470
|
+
*x = br_swap32(t);
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
static void aes_ecb4x(unsigned char out[64], const uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) {
|
|
474
|
+
uint32_t w[16];
|
|
475
|
+
uint64_t q[8];
|
|
476
|
+
unsigned int i;
|
|
477
|
+
|
|
478
|
+
memcpy(w, ivw, sizeof(w));
|
|
479
|
+
for (i = 0; i < 4; i++) {
|
|
480
|
+
br_aes_ct64_interleave_in(&q[i], &q[i + 4], w + (i << 2));
|
|
481
|
+
}
|
|
482
|
+
br_aes_ct64_ortho(q);
|
|
483
|
+
|
|
484
|
+
add_round_key(q, sk_exp);
|
|
485
|
+
for (i = 1; i < nrounds; i++) {
|
|
486
|
+
br_aes_ct64_bitslice_Sbox(q);
|
|
487
|
+
shift_rows(q);
|
|
488
|
+
mix_columns(q);
|
|
489
|
+
add_round_key(q, sk_exp + (i << 3));
|
|
490
|
+
}
|
|
491
|
+
br_aes_ct64_bitslice_Sbox(q);
|
|
492
|
+
shift_rows(q);
|
|
493
|
+
add_round_key(q, sk_exp + 8 * nrounds);
|
|
494
|
+
|
|
495
|
+
br_aes_ct64_ortho(q);
|
|
496
|
+
for (i = 0; i < 4; i ++) {
|
|
497
|
+
br_aes_ct64_interleave_out(w + (i << 2), q[i], q[i + 4]);
|
|
498
|
+
}
|
|
499
|
+
br_range_enc32le(out, w, 16);
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
static void aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) {
|
|
503
|
+
aes_ecb4x(out, ivw, sk_exp, nrounds);
|
|
504
|
+
|
|
505
|
+
/* Increase counter for next 4 blocks */
|
|
506
|
+
inc4_be(ivw + 3);
|
|
507
|
+
inc4_be(ivw + 7);
|
|
508
|
+
inc4_be(ivw + 11);
|
|
509
|
+
inc4_be(ivw + 15);
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
static void aes_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const uint64_t *rkeys, unsigned int nrounds) {
|
|
513
|
+
uint32_t blocks[16];
|
|
514
|
+
unsigned char t[64];
|
|
515
|
+
|
|
516
|
+
while (nblocks >= 4) {
|
|
517
|
+
br_range_dec32le(blocks, 16, in);
|
|
518
|
+
aes_ecb4x(out, blocks, rkeys, nrounds);
|
|
519
|
+
nblocks -= 4;
|
|
520
|
+
in += 64;
|
|
521
|
+
out += 64;
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
if (nblocks) {
|
|
525
|
+
br_range_dec32le(blocks, nblocks * 4, in);
|
|
526
|
+
aes_ecb4x(t, blocks, rkeys, nrounds);
|
|
527
|
+
memcpy(out, t, nblocks * 16);
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
static void aes_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const uint64_t *rkeys, unsigned int nrounds) {
|
|
532
|
+
uint32_t ivw[16];
|
|
533
|
+
size_t i;
|
|
534
|
+
uint32_t cc = 0;
|
|
535
|
+
|
|
536
|
+
br_range_dec32le(ivw, 3, iv);
|
|
537
|
+
memcpy(ivw + 4, ivw, 3 * sizeof(uint32_t));
|
|
538
|
+
memcpy(ivw + 8, ivw, 3 * sizeof(uint32_t));
|
|
539
|
+
memcpy(ivw + 12, ivw, 3 * sizeof(uint32_t));
|
|
540
|
+
ivw[ 3] = br_swap32(cc);
|
|
541
|
+
ivw[ 7] = br_swap32(cc + 1);
|
|
542
|
+
ivw[11] = br_swap32(cc + 2);
|
|
543
|
+
ivw[15] = br_swap32(cc + 3);
|
|
544
|
+
|
|
545
|
+
while (outlen > 64) {
|
|
546
|
+
aes_ctr4x(out, ivw, rkeys, nrounds);
|
|
547
|
+
out += 64;
|
|
548
|
+
outlen -= 64;
|
|
549
|
+
}
|
|
550
|
+
if (outlen > 0) {
|
|
551
|
+
unsigned char tmp[64];
|
|
552
|
+
aes_ctr4x(tmp, ivw, rkeys, nrounds);
|
|
553
|
+
for (i = 0; i < outlen; i++) {
|
|
554
|
+
out[i] = tmp[i];
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key) {
|
|
560
|
+
uint64_t skey[22];
|
|
561
|
+
|
|
562
|
+
r->sk_exp = malloc(sizeof(uint64_t) * PQC_AES128_STATESIZE);
|
|
563
|
+
if (r->sk_exp == NULL) {
|
|
564
|
+
exit(111);
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
br_aes_ct64_keysched(skey, key, 16);
|
|
568
|
+
br_aes_ct64_skey_expand(r->sk_exp, skey, 10);
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key) {
|
|
572
|
+
aes128_ecb_keyexp(r, key);
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key) {
|
|
576
|
+
uint64_t skey[26];
|
|
577
|
+
r->sk_exp = malloc(sizeof(uint64_t) * PQC_AES192_STATESIZE);
|
|
578
|
+
if (r->sk_exp == NULL) {
|
|
579
|
+
exit(111);
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
br_aes_ct64_keysched(skey, key, 24);
|
|
583
|
+
br_aes_ct64_skey_expand(r->sk_exp, skey, 12);
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key) {
|
|
587
|
+
aes192_ecb_keyexp(r, key);
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key) {
|
|
591
|
+
uint64_t skey[30];
|
|
592
|
+
r->sk_exp = malloc(sizeof(uint64_t) * PQC_AES256_STATESIZE);
|
|
593
|
+
if (r->sk_exp == NULL) {
|
|
594
|
+
exit(111);
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
br_aes_ct64_keysched(skey, key, 32);
|
|
598
|
+
br_aes_ct64_skey_expand(r->sk_exp, skey, 14);
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key) {
|
|
602
|
+
aes256_ecb_keyexp(r, key);
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx) {
|
|
606
|
+
aes_ecb(out, in, nblocks, ctx->sk_exp, 10);
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx) {
|
|
610
|
+
aes_ctr(out, outlen, iv, ctx->sk_exp, 10);
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx) {
|
|
614
|
+
aes_ecb(out, in, nblocks, ctx->sk_exp, 12);
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx) {
|
|
618
|
+
aes_ctr(out, outlen, iv, ctx->sk_exp, 12);
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx) {
|
|
622
|
+
aes_ecb(out, in, nblocks, ctx->sk_exp, 14);
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx) {
|
|
626
|
+
aes_ctr(out, outlen, iv, ctx->sk_exp, 14);
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
void aes128_ctx_release(aes128ctx *r) {
|
|
630
|
+
free(r->sk_exp);
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
void aes192_ctx_release(aes192ctx *r) {
|
|
634
|
+
free(r->sk_exp);
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
void aes256_ctx_release(aes256ctx *r) {
|
|
638
|
+
free(r->sk_exp);
|
|
639
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
#ifndef AES_H
|
|
2
|
+
#define AES_H
|
|
3
|
+
|
|
4
|
+
#include <stdint.h>
|
|
5
|
+
#include <stdlib.h>
|
|
6
|
+
|
|
7
|
+
#define AES128_KEYBYTES 16
|
|
8
|
+
#define AES192_KEYBYTES 24
|
|
9
|
+
#define AES256_KEYBYTES 32
|
|
10
|
+
#define AESCTR_NONCEBYTES 12
|
|
11
|
+
#define AES_BLOCKBYTES 16
|
|
12
|
+
|
|
13
|
+
// We've put these states on the heap to make sure ctx_release is used.
|
|
14
|
+
#define PQC_AES128_STATESIZE 88
|
|
15
|
+
typedef struct {
|
|
16
|
+
uint64_t *sk_exp;
|
|
17
|
+
} aes128ctx;
|
|
18
|
+
|
|
19
|
+
#define PQC_AES192_STATESIZE 104
|
|
20
|
+
typedef struct {
|
|
21
|
+
uint64_t *sk_exp;
|
|
22
|
+
} aes192ctx;
|
|
23
|
+
|
|
24
|
+
#define PQC_AES256_STATESIZE 120
|
|
25
|
+
typedef struct {
|
|
26
|
+
uint64_t *sk_exp;
|
|
27
|
+
} aes256ctx;
|
|
28
|
+
|
|
29
|
+
/** Initializes the context **/
|
|
30
|
+
void aes128_ecb_keyexp(aes128ctx *r, const unsigned char *key);
|
|
31
|
+
|
|
32
|
+
void aes128_ctr_keyexp(aes128ctx *r, const unsigned char *key);
|
|
33
|
+
|
|
34
|
+
void aes128_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes128ctx *ctx);
|
|
35
|
+
|
|
36
|
+
void aes128_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes128ctx *ctx);
|
|
37
|
+
|
|
38
|
+
/** Frees the context **/
|
|
39
|
+
void aes128_ctx_release(aes128ctx *r);
|
|
40
|
+
|
|
41
|
+
/** Initializes the context **/
|
|
42
|
+
void aes192_ecb_keyexp(aes192ctx *r, const unsigned char *key);
|
|
43
|
+
|
|
44
|
+
void aes192_ctr_keyexp(aes192ctx *r, const unsigned char *key);
|
|
45
|
+
|
|
46
|
+
void aes192_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes192ctx *ctx);
|
|
47
|
+
|
|
48
|
+
void aes192_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes192ctx *ctx);
|
|
49
|
+
|
|
50
|
+
void aes192_ctx_release(aes192ctx *r);
|
|
51
|
+
|
|
52
|
+
/** Initializes the context **/
|
|
53
|
+
void aes256_ecb_keyexp(aes256ctx *r, const unsigned char *key);
|
|
54
|
+
|
|
55
|
+
void aes256_ctr_keyexp(aes256ctx *r, const unsigned char *key);
|
|
56
|
+
|
|
57
|
+
void aes256_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const aes256ctx *ctx);
|
|
58
|
+
|
|
59
|
+
void aes256_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const aes256ctx *ctx);
|
|
60
|
+
|
|
61
|
+
/** Frees the context **/
|
|
62
|
+
void aes256_ctx_release(aes256ctx *r);
|
|
63
|
+
|
|
64
|
+
#endif
|