digest-blake3 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,559 @@
1
+ #include "blake3_impl.h"
2
+
3
+ #include <immintrin.h>
4
+
5
+ #define DEGREE 4
6
+
7
+ #define _mm_shuffle_ps2(a, b, c) \
8
+ (_mm_castps_si128( \
9
+ _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))
10
+
11
+ INLINE __m128i loadu(const uint8_t src[16]) {
12
+ return _mm_loadu_si128((const __m128i *)src);
13
+ }
14
+
15
+ INLINE void storeu(__m128i src, uint8_t dest[16]) {
16
+ _mm_storeu_si128((__m128i *)dest, src);
17
+ }
18
+
19
+ INLINE __m128i addv(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }
20
+
21
+ // Note that clang-format doesn't like the name "xor" for some reason.
22
+ INLINE __m128i xorv(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }
23
+
24
+ INLINE __m128i set1(uint32_t x) { return _mm_set1_epi32((int32_t)x); }
25
+
26
+ INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
27
+ return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);
28
+ }
29
+
30
+ INLINE __m128i rot16(__m128i x) {
31
+ return _mm_shuffle_epi8(
32
+ x, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2));
33
+ }
34
+
35
+ INLINE __m128i rot12(__m128i x) {
36
+ return xorv(_mm_srli_epi32(x, 12), _mm_slli_epi32(x, 32 - 12));
37
+ }
38
+
39
+ INLINE __m128i rot8(__m128i x) {
40
+ return _mm_shuffle_epi8(
41
+ x, _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1));
42
+ }
43
+
44
+ INLINE __m128i rot7(__m128i x) {
45
+ return xorv(_mm_srli_epi32(x, 7), _mm_slli_epi32(x, 32 - 7));
46
+ }
47
+
48
+ INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
49
+ __m128i m) {
50
+ *row0 = addv(addv(*row0, m), *row1);
51
+ *row3 = xorv(*row3, *row0);
52
+ *row3 = rot16(*row3);
53
+ *row2 = addv(*row2, *row3);
54
+ *row1 = xorv(*row1, *row2);
55
+ *row1 = rot12(*row1);
56
+ }
57
+
58
+ INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
59
+ __m128i m) {
60
+ *row0 = addv(addv(*row0, m), *row1);
61
+ *row3 = xorv(*row3, *row0);
62
+ *row3 = rot8(*row3);
63
+ *row2 = addv(*row2, *row3);
64
+ *row1 = xorv(*row1, *row2);
65
+ *row1 = rot7(*row1);
66
+ }
67
+
68
+ // Note the optimization here of leaving row1 as the unrotated row, rather than
69
+ // row0. All the message loads below are adjusted to compensate for this. See
70
+ // discussion at https://github.com/sneves/blake2-avx2/pull/4
71
+ INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
72
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));
73
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
74
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));
75
+ }
76
+
77
+ INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
78
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));
79
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
80
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));
81
+ }
82
+
83
+ INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8],
84
+ const uint8_t block[BLAKE3_BLOCK_LEN],
85
+ uint8_t block_len, uint64_t counter, uint8_t flags) {
86
+ rows[0] = loadu((uint8_t *)&cv[0]);
87
+ rows[1] = loadu((uint8_t *)&cv[4]);
88
+ rows[2] = set4(IV[0], IV[1], IV[2], IV[3]);
89
+ rows[3] = set4(counter_low(counter), counter_high(counter),
90
+ (uint32_t)block_len, (uint32_t)flags);
91
+
92
+ __m128i m0 = loadu(&block[sizeof(__m128i) * 0]);
93
+ __m128i m1 = loadu(&block[sizeof(__m128i) * 1]);
94
+ __m128i m2 = loadu(&block[sizeof(__m128i) * 2]);
95
+ __m128i m3 = loadu(&block[sizeof(__m128i) * 3]);
96
+
97
+ __m128i t0, t1, t2, t3, tt;
98
+
99
+ // Round 1. The first round permutes the message words from the original
100
+ // input order, into the groups that get mixed in parallel.
101
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0)); // 6 4 2 0
102
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
103
+ t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1)); // 7 5 3 1
104
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
105
+ diagonalize(&rows[0], &rows[2], &rows[3]);
106
+ t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0)); // 14 12 10 8
107
+ t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3)); // 12 10 8 14
108
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
109
+ t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1)); // 15 13 11 9
110
+ t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3)); // 13 11 9 15
111
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
112
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
113
+ m0 = t0;
114
+ m1 = t1;
115
+ m2 = t2;
116
+ m3 = t3;
117
+
118
+ // Round 2. This round and all following rounds apply a fixed permutation
119
+ // to the message words from the round before.
120
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
121
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
122
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
123
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
124
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
125
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
126
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
127
+ diagonalize(&rows[0], &rows[2], &rows[3]);
128
+ t2 = _mm_unpacklo_epi64(m3, m1);
129
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
130
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
131
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
132
+ t3 = _mm_unpackhi_epi32(m1, m3);
133
+ tt = _mm_unpacklo_epi32(m2, t3);
134
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
135
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
136
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
137
+ m0 = t0;
138
+ m1 = t1;
139
+ m2 = t2;
140
+ m3 = t3;
141
+
142
+ // Round 3
143
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
144
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
145
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
146
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
147
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
148
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
149
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
150
+ diagonalize(&rows[0], &rows[2], &rows[3]);
151
+ t2 = _mm_unpacklo_epi64(m3, m1);
152
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
153
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
154
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
155
+ t3 = _mm_unpackhi_epi32(m1, m3);
156
+ tt = _mm_unpacklo_epi32(m2, t3);
157
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
158
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
159
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
160
+ m0 = t0;
161
+ m1 = t1;
162
+ m2 = t2;
163
+ m3 = t3;
164
+
165
+ // Round 4
166
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
167
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
168
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
169
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
170
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
171
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
172
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
173
+ diagonalize(&rows[0], &rows[2], &rows[3]);
174
+ t2 = _mm_unpacklo_epi64(m3, m1);
175
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
176
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
177
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
178
+ t3 = _mm_unpackhi_epi32(m1, m3);
179
+ tt = _mm_unpacklo_epi32(m2, t3);
180
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
181
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
182
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
183
+ m0 = t0;
184
+ m1 = t1;
185
+ m2 = t2;
186
+ m3 = t3;
187
+
188
+ // Round 5
189
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
190
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
191
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
192
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
193
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
194
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
195
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
196
+ diagonalize(&rows[0], &rows[2], &rows[3]);
197
+ t2 = _mm_unpacklo_epi64(m3, m1);
198
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
199
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
200
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
201
+ t3 = _mm_unpackhi_epi32(m1, m3);
202
+ tt = _mm_unpacklo_epi32(m2, t3);
203
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
204
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
205
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
206
+ m0 = t0;
207
+ m1 = t1;
208
+ m2 = t2;
209
+ m3 = t3;
210
+
211
+ // Round 6
212
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
213
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
214
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
215
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
216
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
217
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
218
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
219
+ diagonalize(&rows[0], &rows[2], &rows[3]);
220
+ t2 = _mm_unpacklo_epi64(m3, m1);
221
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
222
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
223
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
224
+ t3 = _mm_unpackhi_epi32(m1, m3);
225
+ tt = _mm_unpacklo_epi32(m2, t3);
226
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
227
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
228
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
229
+ m0 = t0;
230
+ m1 = t1;
231
+ m2 = t2;
232
+ m3 = t3;
233
+
234
+ // Round 7
235
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
236
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
237
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
238
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
239
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
240
+ t1 = _mm_blend_epi16(tt, t1, 0xCC);
241
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
242
+ diagonalize(&rows[0], &rows[2], &rows[3]);
243
+ t2 = _mm_unpacklo_epi64(m3, m1);
244
+ tt = _mm_blend_epi16(t2, m2, 0xC0);
245
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
246
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
247
+ t3 = _mm_unpackhi_epi32(m1, m3);
248
+ tt = _mm_unpacklo_epi32(m2, t3);
249
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
250
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
251
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
252
+ }
253
+
254
+ void blake3_compress_in_place_sse41(uint32_t cv[8],
255
+ const uint8_t block[BLAKE3_BLOCK_LEN],
256
+ uint8_t block_len, uint64_t counter,
257
+ uint8_t flags) {
258
+ __m128i rows[4];
259
+ compress_pre(rows, cv, block, block_len, counter, flags);
260
+ storeu(xorv(rows[0], rows[2]), (uint8_t *)&cv[0]);
261
+ storeu(xorv(rows[1], rows[3]), (uint8_t *)&cv[4]);
262
+ }
263
+
264
+ void blake3_compress_xof_sse41(const uint32_t cv[8],
265
+ const uint8_t block[BLAKE3_BLOCK_LEN],
266
+ uint8_t block_len, uint64_t counter,
267
+ uint8_t flags, uint8_t out[64]) {
268
+ __m128i rows[4];
269
+ compress_pre(rows, cv, block, block_len, counter, flags);
270
+ storeu(xorv(rows[0], rows[2]), &out[0]);
271
+ storeu(xorv(rows[1], rows[3]), &out[16]);
272
+ storeu(xorv(rows[2], loadu((uint8_t *)&cv[0])), &out[32]);
273
+ storeu(xorv(rows[3], loadu((uint8_t *)&cv[4])), &out[48]);
274
+ }
275
+
276
+ INLINE void round_fn(__m128i v[16], __m128i m[16], size_t r) {
277
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
278
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
279
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
280
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
281
+ v[0] = addv(v[0], v[4]);
282
+ v[1] = addv(v[1], v[5]);
283
+ v[2] = addv(v[2], v[6]);
284
+ v[3] = addv(v[3], v[7]);
285
+ v[12] = xorv(v[12], v[0]);
286
+ v[13] = xorv(v[13], v[1]);
287
+ v[14] = xorv(v[14], v[2]);
288
+ v[15] = xorv(v[15], v[3]);
289
+ v[12] = rot16(v[12]);
290
+ v[13] = rot16(v[13]);
291
+ v[14] = rot16(v[14]);
292
+ v[15] = rot16(v[15]);
293
+ v[8] = addv(v[8], v[12]);
294
+ v[9] = addv(v[9], v[13]);
295
+ v[10] = addv(v[10], v[14]);
296
+ v[11] = addv(v[11], v[15]);
297
+ v[4] = xorv(v[4], v[8]);
298
+ v[5] = xorv(v[5], v[9]);
299
+ v[6] = xorv(v[6], v[10]);
300
+ v[7] = xorv(v[7], v[11]);
301
+ v[4] = rot12(v[4]);
302
+ v[5] = rot12(v[5]);
303
+ v[6] = rot12(v[6]);
304
+ v[7] = rot12(v[7]);
305
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
306
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
307
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
308
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
309
+ v[0] = addv(v[0], v[4]);
310
+ v[1] = addv(v[1], v[5]);
311
+ v[2] = addv(v[2], v[6]);
312
+ v[3] = addv(v[3], v[7]);
313
+ v[12] = xorv(v[12], v[0]);
314
+ v[13] = xorv(v[13], v[1]);
315
+ v[14] = xorv(v[14], v[2]);
316
+ v[15] = xorv(v[15], v[3]);
317
+ v[12] = rot8(v[12]);
318
+ v[13] = rot8(v[13]);
319
+ v[14] = rot8(v[14]);
320
+ v[15] = rot8(v[15]);
321
+ v[8] = addv(v[8], v[12]);
322
+ v[9] = addv(v[9], v[13]);
323
+ v[10] = addv(v[10], v[14]);
324
+ v[11] = addv(v[11], v[15]);
325
+ v[4] = xorv(v[4], v[8]);
326
+ v[5] = xorv(v[5], v[9]);
327
+ v[6] = xorv(v[6], v[10]);
328
+ v[7] = xorv(v[7], v[11]);
329
+ v[4] = rot7(v[4]);
330
+ v[5] = rot7(v[5]);
331
+ v[6] = rot7(v[6]);
332
+ v[7] = rot7(v[7]);
333
+
334
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
335
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
336
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
337
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
338
+ v[0] = addv(v[0], v[5]);
339
+ v[1] = addv(v[1], v[6]);
340
+ v[2] = addv(v[2], v[7]);
341
+ v[3] = addv(v[3], v[4]);
342
+ v[15] = xorv(v[15], v[0]);
343
+ v[12] = xorv(v[12], v[1]);
344
+ v[13] = xorv(v[13], v[2]);
345
+ v[14] = xorv(v[14], v[3]);
346
+ v[15] = rot16(v[15]);
347
+ v[12] = rot16(v[12]);
348
+ v[13] = rot16(v[13]);
349
+ v[14] = rot16(v[14]);
350
+ v[10] = addv(v[10], v[15]);
351
+ v[11] = addv(v[11], v[12]);
352
+ v[8] = addv(v[8], v[13]);
353
+ v[9] = addv(v[9], v[14]);
354
+ v[5] = xorv(v[5], v[10]);
355
+ v[6] = xorv(v[6], v[11]);
356
+ v[7] = xorv(v[7], v[8]);
357
+ v[4] = xorv(v[4], v[9]);
358
+ v[5] = rot12(v[5]);
359
+ v[6] = rot12(v[6]);
360
+ v[7] = rot12(v[7]);
361
+ v[4] = rot12(v[4]);
362
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
363
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
364
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
365
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
366
+ v[0] = addv(v[0], v[5]);
367
+ v[1] = addv(v[1], v[6]);
368
+ v[2] = addv(v[2], v[7]);
369
+ v[3] = addv(v[3], v[4]);
370
+ v[15] = xorv(v[15], v[0]);
371
+ v[12] = xorv(v[12], v[1]);
372
+ v[13] = xorv(v[13], v[2]);
373
+ v[14] = xorv(v[14], v[3]);
374
+ v[15] = rot8(v[15]);
375
+ v[12] = rot8(v[12]);
376
+ v[13] = rot8(v[13]);
377
+ v[14] = rot8(v[14]);
378
+ v[10] = addv(v[10], v[15]);
379
+ v[11] = addv(v[11], v[12]);
380
+ v[8] = addv(v[8], v[13]);
381
+ v[9] = addv(v[9], v[14]);
382
+ v[5] = xorv(v[5], v[10]);
383
+ v[6] = xorv(v[6], v[11]);
384
+ v[7] = xorv(v[7], v[8]);
385
+ v[4] = xorv(v[4], v[9]);
386
+ v[5] = rot7(v[5]);
387
+ v[6] = rot7(v[6]);
388
+ v[7] = rot7(v[7]);
389
+ v[4] = rot7(v[4]);
390
+ }
391
+
392
+ INLINE void transpose_vecs(__m128i vecs[DEGREE]) {
393
+ // Interleave 32-bit lates. The low unpack is lanes 00/11 and the high is
394
+ // 22/33. Note that this doesn't split the vector into two lanes, as the
395
+ // AVX2 counterparts do.
396
+ __m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
397
+ __m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
398
+ __m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
399
+ __m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
400
+
401
+ // Interleave 64-bit lanes.
402
+ __m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);
403
+ __m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);
404
+ __m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);
405
+ __m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);
406
+
407
+ vecs[0] = abcd_0;
408
+ vecs[1] = abcd_1;
409
+ vecs[2] = abcd_2;
410
+ vecs[3] = abcd_3;
411
+ }
412
+
413
+ INLINE void transpose_msg_vecs(const uint8_t *const *inputs,
414
+ size_t block_offset, __m128i out[16]) {
415
+ out[0] = loadu(&inputs[0][block_offset + 0 * sizeof(__m128i)]);
416
+ out[1] = loadu(&inputs[1][block_offset + 0 * sizeof(__m128i)]);
417
+ out[2] = loadu(&inputs[2][block_offset + 0 * sizeof(__m128i)]);
418
+ out[3] = loadu(&inputs[3][block_offset + 0 * sizeof(__m128i)]);
419
+ out[4] = loadu(&inputs[0][block_offset + 1 * sizeof(__m128i)]);
420
+ out[5] = loadu(&inputs[1][block_offset + 1 * sizeof(__m128i)]);
421
+ out[6] = loadu(&inputs[2][block_offset + 1 * sizeof(__m128i)]);
422
+ out[7] = loadu(&inputs[3][block_offset + 1 * sizeof(__m128i)]);
423
+ out[8] = loadu(&inputs[0][block_offset + 2 * sizeof(__m128i)]);
424
+ out[9] = loadu(&inputs[1][block_offset + 2 * sizeof(__m128i)]);
425
+ out[10] = loadu(&inputs[2][block_offset + 2 * sizeof(__m128i)]);
426
+ out[11] = loadu(&inputs[3][block_offset + 2 * sizeof(__m128i)]);
427
+ out[12] = loadu(&inputs[0][block_offset + 3 * sizeof(__m128i)]);
428
+ out[13] = loadu(&inputs[1][block_offset + 3 * sizeof(__m128i)]);
429
+ out[14] = loadu(&inputs[2][block_offset + 3 * sizeof(__m128i)]);
430
+ out[15] = loadu(&inputs[3][block_offset + 3 * sizeof(__m128i)]);
431
+ for (size_t i = 0; i < 4; ++i) {
432
+ _mm_prefetch(&inputs[i][block_offset + 256], _MM_HINT_T0);
433
+ }
434
+ transpose_vecs(&out[0]);
435
+ transpose_vecs(&out[4]);
436
+ transpose_vecs(&out[8]);
437
+ transpose_vecs(&out[12]);
438
+ }
439
+
440
+ INLINE void load_counters(uint64_t counter, bool increment_counter,
441
+ __m128i *out_lo, __m128i *out_hi) {
442
+ const __m128i mask = _mm_set1_epi32(-(int32_t)increment_counter);
443
+ const __m128i add0 = _mm_set_epi32(3, 2, 1, 0);
444
+ const __m128i add1 = _mm_and_si128(mask, add0);
445
+ __m128i l = _mm_add_epi32(_mm_set1_epi32(counter), add1);
446
+ __m128i carry = _mm_cmpgt_epi32(_mm_xor_si128(add1, _mm_set1_epi32(0x80000000)),
447
+ _mm_xor_si128( l, _mm_set1_epi32(0x80000000)));
448
+ __m128i h = _mm_sub_epi32(_mm_set1_epi32(counter >> 32), carry);
449
+ *out_lo = l;
450
+ *out_hi = h;
451
+ }
452
+
453
+ void blake3_hash4_sse41(const uint8_t *const *inputs, size_t blocks,
454
+ const uint32_t key[8], uint64_t counter,
455
+ bool increment_counter, uint8_t flags,
456
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
457
+ __m128i h_vecs[8] = {
458
+ set1(key[0]), set1(key[1]), set1(key[2]), set1(key[3]),
459
+ set1(key[4]), set1(key[5]), set1(key[6]), set1(key[7]),
460
+ };
461
+ __m128i counter_low_vec, counter_high_vec;
462
+ load_counters(counter, increment_counter, &counter_low_vec,
463
+ &counter_high_vec);
464
+ uint8_t block_flags = flags | flags_start;
465
+
466
+ for (size_t block = 0; block < blocks; block++) {
467
+ if (block + 1 == blocks) {
468
+ block_flags |= flags_end;
469
+ }
470
+ __m128i block_len_vec = set1(BLAKE3_BLOCK_LEN);
471
+ __m128i block_flags_vec = set1(block_flags);
472
+ __m128i msg_vecs[16];
473
+ transpose_msg_vecs(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
474
+
475
+ __m128i v[16] = {
476
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
477
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
478
+ set1(IV[0]), set1(IV[1]), set1(IV[2]), set1(IV[3]),
479
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
480
+ };
481
+ round_fn(v, msg_vecs, 0);
482
+ round_fn(v, msg_vecs, 1);
483
+ round_fn(v, msg_vecs, 2);
484
+ round_fn(v, msg_vecs, 3);
485
+ round_fn(v, msg_vecs, 4);
486
+ round_fn(v, msg_vecs, 5);
487
+ round_fn(v, msg_vecs, 6);
488
+ h_vecs[0] = xorv(v[0], v[8]);
489
+ h_vecs[1] = xorv(v[1], v[9]);
490
+ h_vecs[2] = xorv(v[2], v[10]);
491
+ h_vecs[3] = xorv(v[3], v[11]);
492
+ h_vecs[4] = xorv(v[4], v[12]);
493
+ h_vecs[5] = xorv(v[5], v[13]);
494
+ h_vecs[6] = xorv(v[6], v[14]);
495
+ h_vecs[7] = xorv(v[7], v[15]);
496
+
497
+ block_flags = flags;
498
+ }
499
+
500
+ transpose_vecs(&h_vecs[0]);
501
+ transpose_vecs(&h_vecs[4]);
502
+ // The first four vecs now contain the first half of each output, and the
503
+ // second four vecs contain the second half of each output.
504
+ storeu(h_vecs[0], &out[0 * sizeof(__m128i)]);
505
+ storeu(h_vecs[4], &out[1 * sizeof(__m128i)]);
506
+ storeu(h_vecs[1], &out[2 * sizeof(__m128i)]);
507
+ storeu(h_vecs[5], &out[3 * sizeof(__m128i)]);
508
+ storeu(h_vecs[2], &out[4 * sizeof(__m128i)]);
509
+ storeu(h_vecs[6], &out[5 * sizeof(__m128i)]);
510
+ storeu(h_vecs[3], &out[6 * sizeof(__m128i)]);
511
+ storeu(h_vecs[7], &out[7 * sizeof(__m128i)]);
512
+ }
513
+
514
+ INLINE void hash_one_sse41(const uint8_t *input, size_t blocks,
515
+ const uint32_t key[8], uint64_t counter,
516
+ uint8_t flags, uint8_t flags_start,
517
+ uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN]) {
518
+ uint32_t cv[8];
519
+ memcpy(cv, key, BLAKE3_KEY_LEN);
520
+ uint8_t block_flags = flags | flags_start;
521
+ while (blocks > 0) {
522
+ if (blocks == 1) {
523
+ block_flags |= flags_end;
524
+ }
525
+ blake3_compress_in_place_sse41(cv, input, BLAKE3_BLOCK_LEN, counter,
526
+ block_flags);
527
+ input = &input[BLAKE3_BLOCK_LEN];
528
+ blocks -= 1;
529
+ block_flags = flags;
530
+ }
531
+ memcpy(out, cv, BLAKE3_OUT_LEN);
532
+ }
533
+
534
+ void blake3_hash_many_sse41(const uint8_t *const *inputs, size_t num_inputs,
535
+ size_t blocks, const uint32_t key[8],
536
+ uint64_t counter, bool increment_counter,
537
+ uint8_t flags, uint8_t flags_start,
538
+ uint8_t flags_end, uint8_t *out) {
539
+ while (num_inputs >= DEGREE) {
540
+ blake3_hash4_sse41(inputs, blocks, key, counter, increment_counter, flags,
541
+ flags_start, flags_end, out);
542
+ if (increment_counter) {
543
+ counter += DEGREE;
544
+ }
545
+ inputs += DEGREE;
546
+ num_inputs -= DEGREE;
547
+ out = &out[DEGREE * BLAKE3_OUT_LEN];
548
+ }
549
+ while (num_inputs > 0) {
550
+ hash_one_sse41(inputs[0], blocks, key, counter, flags, flags_start,
551
+ flags_end, out);
552
+ if (increment_counter) {
553
+ counter += 1;
554
+ }
555
+ inputs += 1;
556
+ num_inputs -= 1;
557
+ out = &out[BLAKE3_OUT_LEN];
558
+ }
559
+ }