digest-blake3 0.34.0 → 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a3db2fab1165a083a1a83d5c656c1c737d53f853de91babcb6c9c0e74ec7e23a
4
- data.tar.gz: d4692ef2c6326a70ffa0cad5ed90219daa96c0940c0e9986d9ee7b4469d6b48d
3
+ metadata.gz: 0c5247f143c1ea592ae0a27eb47e4be05f98ba9541f4c4903760679dedc61390
4
+ data.tar.gz: 19f94a89b322ef99c68d782802234712602751121634bcc6ff0d8b08ee4b96f7
5
5
  SHA512:
6
- metadata.gz: 7ef86ba9e54408a68179d43678d7863d1af3d51e6002315d4607e377f2a142d374f4dc0e4d5f8ddde641063d3b5e2f93214fb10274aba849eee757d5f884d854
7
- data.tar.gz: e8bf900ad7eece0df62964ca7695af5c8681cbe23c3b7e6cc2af4b0ac2c1d6b3f74de987d7998f4627a5bfcb164c2bafa7ffda8bf8f96be5075ca44761aa2c23
6
+ metadata.gz: ec13b3884ede2244aaba21c075090c626e2d42215fb522da21b59d04f201ba4c3eff4476889991b05c6be50d723143ca6325c22a4557474f22f8cc3f10d9c119
7
+ data.tar.gz: f153c3747737049cf2eeca38a7c63d10250f87cb3103d9c794221100aa3a659929bf5b8e53b799e86b41543f978f1cfc92ef4900e8d2d1af533182f4d32d25a0
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- digest-blake3 (0.34.0)
4
+ digest-blake3 (0.37.0)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
data/README.md CHANGED
@@ -58,7 +58,7 @@ To install this gem onto your local machine, run `bundle exec rake install`. To
58
58
 
59
59
  ## Contributing
60
60
 
61
- Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/digest-blake3.
61
+ Bug reports and pull requests are welcome on GitHub at https://github.com/willbryant/digest-blake3.
62
62
 
63
63
  ## License
64
64
 
@@ -81,7 +81,7 @@ INLINE void output_chaining_value(const output_t *self, uint8_t cv[32]) {
81
81
  memcpy(cv_words, self->input_cv, 32);
82
82
  blake3_compress_in_place(cv_words, self->block, self->block_len,
83
83
  self->counter, self->flags);
84
- memcpy(cv, cv_words, 32);
84
+ store_cv_words(cv, cv_words);
85
85
  }
86
86
 
87
87
  INLINE void output_root_bytes(const output_t *self, uint64_t seek, uint8_t *out,
@@ -335,7 +335,7 @@ INLINE void compress_subtree_to_parent_node(
335
335
  assert(input_len > BLAKE3_CHUNK_LEN);
336
336
  #endif
337
337
 
338
- uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
338
+ uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
339
339
  size_t num_cvs = blake3_compress_subtree_wide(input, input_len, key,
340
340
  chunk_counter, flags, cv_array);
341
341
 
@@ -367,10 +367,11 @@ void blake3_hasher_init_keyed(blake3_hasher *self,
367
367
  hasher_init_base(self, key_words, KEYED_HASH);
368
368
  }
369
369
 
370
- void blake3_hasher_init_derive_key(blake3_hasher *self, const char *context) {
370
+ void blake3_hasher_init_derive_key_raw(blake3_hasher *self, const void *context,
371
+ size_t context_len) {
371
372
  blake3_hasher context_hasher;
372
373
  hasher_init_base(&context_hasher, IV, DERIVE_KEY_CONTEXT);
373
- blake3_hasher_update(&context_hasher, context, strlen(context));
374
+ blake3_hasher_update(&context_hasher, context, context_len);
374
375
  uint8_t context_key[BLAKE3_KEY_LEN];
375
376
  blake3_hasher_finalize(&context_hasher, context_key, BLAKE3_KEY_LEN);
376
377
  uint32_t context_key_words[8];
@@ -378,6 +379,10 @@ void blake3_hasher_init_derive_key(blake3_hasher *self, const char *context) {
378
379
  hasher_init_base(self, context_key_words, DERIVE_KEY_MATERIAL);
379
380
  }
380
381
 
382
+ void blake3_hasher_init_derive_key(blake3_hasher *self, const char *context) {
383
+ blake3_hasher_init_derive_key_raw(self, context, strlen(context));
384
+ }
385
+
381
386
  // As described in hasher_push_cv() below, we do "lazy merging", delaying
382
387
  // merges until right before the next CV is about to be added. This is
383
388
  // different from the reference implementation. Another difference is that we
@@ -42,6 +42,8 @@ void blake3_hasher_init(blake3_hasher *self);
42
42
  void blake3_hasher_init_keyed(blake3_hasher *self,
43
43
  const uint8_t key[BLAKE3_KEY_LEN]);
44
44
  void blake3_hasher_init_derive_key(blake3_hasher *self, const char *context);
45
+ void blake3_hasher_init_derive_key_raw(blake3_hasher *self, const void *context,
46
+ size_t context_len);
45
47
  void blake3_hasher_update(blake3_hasher *self, const void *input,
46
48
  size_t input_len);
47
49
  void blake3_hasher_finalize(const blake3_hasher *self, uint8_t *out,
@@ -1,3 +1,17 @@
1
+ #if defined(__ELF__) && defined(__linux__)
2
+ .section .note.GNU-stack,"",%progbits
3
+ #endif
4
+
5
+ #if defined(__ELF__) && defined(__CET__) && defined(__has_include)
6
+ #if __has_include(<cet.h>)
7
+ #include <cet.h>
8
+ #endif
9
+ #endif
10
+
11
+ #if !defined(_CET_ENDBR)
12
+ #define _CET_ENDBR
13
+ #endif
14
+
1
15
  .intel_syntax noprefix
2
16
  .global _blake3_hash_many_avx2
3
17
  .global blake3_hash_many_avx2
@@ -9,6 +23,7 @@
9
23
  .p2align 6
10
24
  _blake3_hash_many_avx2:
11
25
  blake3_hash_many_avx2:
26
+ _CET_ENDBR
12
27
  push r15
13
28
  push r14
14
29
  push r13
@@ -1,5 +1,18 @@
1
- .intel_syntax noprefix
1
+ #if defined(__ELF__) && defined(__linux__)
2
+ .section .note.GNU-stack,"",%progbits
3
+ #endif
4
+
5
+ #if defined(__ELF__) && defined(__CET__) && defined(__has_include)
6
+ #if __has_include(<cet.h>)
7
+ #include <cet.h>
8
+ #endif
9
+ #endif
10
+
11
+ #if !defined(_CET_ENDBR)
12
+ #define _CET_ENDBR
13
+ #endif
2
14
 
15
+ .intel_syntax noprefix
3
16
  .global _blake3_hash_many_avx512
4
17
  .global blake3_hash_many_avx512
5
18
  .global blake3_compress_in_place_avx512
@@ -15,6 +28,7 @@
15
28
  .p2align 6
16
29
  _blake3_hash_many_avx512:
17
30
  blake3_hash_many_avx512:
31
+ _CET_ENDBR
18
32
  push r15
19
33
  push r14
20
34
  push r13
@@ -2372,6 +2386,7 @@ blake3_hash_many_avx512:
2372
2386
  .p2align 6
2373
2387
  _blake3_compress_in_place_avx512:
2374
2388
  blake3_compress_in_place_avx512:
2389
+ _CET_ENDBR
2375
2390
  vmovdqu xmm0, xmmword ptr [rdi]
2376
2391
  vmovdqu xmm1, xmmword ptr [rdi+0x10]
2377
2392
  movzx eax, r8b
@@ -2454,6 +2469,7 @@ blake3_compress_in_place_avx512:
2454
2469
  .p2align 6
2455
2470
  _blake3_compress_xof_avx512:
2456
2471
  blake3_compress_xof_avx512:
2472
+ _CET_ENDBR
2457
2473
  vmovdqu xmm0, xmmword ptr [rdi]
2458
2474
  vmovdqu xmm1, xmmword ptr [rdi+0x10]
2459
2475
  movzx eax, r8b
@@ -149,6 +149,12 @@ void blake3_compress_in_place(uint32_t cv[8],
149
149
  return;
150
150
  }
151
151
  #endif
152
+ #if !defined(BLAKE3_NO_SSE2)
153
+ if (features & SSE2) {
154
+ blake3_compress_in_place_sse2(cv, block, block_len, counter, flags);
155
+ return;
156
+ }
157
+ #endif
152
158
  #endif
153
159
  blake3_compress_in_place_portable(cv, block, block_len, counter, flags);
154
160
  }
@@ -171,6 +177,12 @@ void blake3_compress_xof(const uint32_t cv[8],
171
177
  return;
172
178
  }
173
179
  #endif
180
+ #if !defined(BLAKE3_NO_SSE2)
181
+ if (features & SSE2) {
182
+ blake3_compress_xof_sse2(cv, block, block_len, counter, flags, out);
183
+ return;
184
+ }
185
+ #endif
174
186
  #endif
175
187
  blake3_compress_xof_portable(cv, block, block_len, counter, flags, out);
176
188
  }
@@ -205,6 +217,14 @@ void blake3_hash_many(const uint8_t *const *inputs, size_t num_inputs,
205
217
  return;
206
218
  }
207
219
  #endif
220
+ #if !defined(BLAKE3_NO_SSE2)
221
+ if (features & SSE2) {
222
+ blake3_hash_many_sse2(inputs, num_inputs, blocks, key, counter,
223
+ increment_counter, flags, flags_start, flags_end,
224
+ out);
225
+ return;
226
+ }
227
+ #endif
208
228
  #endif
209
229
 
210
230
  #if defined(BLAKE3_USE_NEON)
@@ -237,6 +257,11 @@ size_t blake3_simd_degree(void) {
237
257
  return 4;
238
258
  }
239
259
  #endif
260
+ #if !defined(BLAKE3_NO_SSE2)
261
+ if (features & SSE2) {
262
+ return 4;
263
+ }
264
+ #endif
240
265
  #endif
241
266
  #if defined(BLAKE3_USE_NEON)
242
267
  return 4;
@@ -146,6 +146,25 @@ INLINE void load_key_words(const uint8_t key[BLAKE3_KEY_LEN],
146
146
  key_words[7] = load32(&key[7 * 4]);
147
147
  }
148
148
 
149
+ INLINE void store32(void *dst, uint32_t w) {
150
+ uint8_t *p = (uint8_t *)dst;
151
+ p[0] = (uint8_t)(w >> 0);
152
+ p[1] = (uint8_t)(w >> 8);
153
+ p[2] = (uint8_t)(w >> 16);
154
+ p[3] = (uint8_t)(w >> 24);
155
+ }
156
+
157
+ INLINE void store_cv_words(uint8_t bytes_out[32], uint32_t cv_words[8]) {
158
+ store32(&bytes_out[0 * 4], cv_words[0]);
159
+ store32(&bytes_out[1 * 4], cv_words[1]);
160
+ store32(&bytes_out[2 * 4], cv_words[2]);
161
+ store32(&bytes_out[3 * 4], cv_words[3]);
162
+ store32(&bytes_out[4 * 4], cv_words[4]);
163
+ store32(&bytes_out[5 * 4], cv_words[5]);
164
+ store32(&bytes_out[6 * 4], cv_words[6]);
165
+ store32(&bytes_out[7 * 4], cv_words[7]);
166
+ }
167
+
149
168
  void blake3_compress_in_place(uint32_t cv[8],
150
169
  const uint8_t block[BLAKE3_BLOCK_LEN],
151
170
  uint8_t block_len, uint64_t counter,
@@ -182,6 +201,21 @@ void blake3_hash_many_portable(const uint8_t *const *inputs, size_t num_inputs,
182
201
  uint8_t flags_end, uint8_t *out);
183
202
 
184
203
  #if defined(IS_X86)
204
+ #if !defined(BLAKE3_NO_SSE2)
205
+ void blake3_compress_in_place_sse2(uint32_t cv[8],
206
+ const uint8_t block[BLAKE3_BLOCK_LEN],
207
+ uint8_t block_len, uint64_t counter,
208
+ uint8_t flags);
209
+ void blake3_compress_xof_sse2(const uint32_t cv[8],
210
+ const uint8_t block[BLAKE3_BLOCK_LEN],
211
+ uint8_t block_len, uint64_t counter,
212
+ uint8_t flags, uint8_t out[64]);
213
+ void blake3_hash_many_sse2(const uint8_t *const *inputs, size_t num_inputs,
214
+ size_t blocks, const uint32_t key[8],
215
+ uint64_t counter, bool increment_counter,
216
+ uint8_t flags, uint8_t flags_start,
217
+ uint8_t flags_end, uint8_t *out);
218
+ #endif
185
219
  #if !defined(BLAKE3_NO_SSE41)
186
220
  void blake3_compress_in_place_sse41(uint32_t cv[8],
187
221
  const uint8_t block[BLAKE3_BLOCK_LEN],
@@ -1,14 +1,6 @@
1
1
  #include "blake3_impl.h"
2
2
  #include <string.h>
3
3
 
4
- INLINE void store32(void *dst, uint32_t w) {
5
- uint8_t *p = (uint8_t *)dst;
6
- p[0] = (uint8_t)(w >> 0);
7
- p[1] = (uint8_t)(w >> 8);
8
- p[2] = (uint8_t)(w >> 16);
9
- p[3] = (uint8_t)(w >> 24);
10
- }
11
-
12
4
  INLINE uint32_t rotr32(uint32_t w, uint32_t c) {
13
5
  return (w >> c) | (w << (32 - c));
14
6
  }
@@ -147,7 +139,7 @@ INLINE void hash_one_portable(const uint8_t *input, size_t blocks,
147
139
  blocks -= 1;
148
140
  block_flags = flags;
149
141
  }
150
- memcpy(out, cv, 32);
142
+ store_cv_words(out, cv);
151
143
  }
152
144
 
153
145
  void blake3_hash_many_portable(const uint8_t *const *inputs, size_t num_inputs,
@@ -0,0 +1,565 @@
1
+ #include "blake3_impl.h"
2
+
3
+ #include <immintrin.h>
4
+
5
+ #define DEGREE 4
6
+
7
+ #define _mm_shuffle_ps2(a, b, c) \
8
+ (_mm_castps_si128( \
9
+ _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), (c))))
10
+
11
+ INLINE __m128i loadu(const uint8_t src[16]) {
12
+ return _mm_loadu_si128((const __m128i *)src);
13
+ }
14
+
15
+ INLINE void storeu(__m128i src, uint8_t dest[16]) {
16
+ _mm_storeu_si128((__m128i *)dest, src);
17
+ }
18
+
19
+ INLINE __m128i addv(__m128i a, __m128i b) { return _mm_add_epi32(a, b); }
20
+
21
+ // Note that clang-format doesn't like the name "xor" for some reason.
22
+ INLINE __m128i xorv(__m128i a, __m128i b) { return _mm_xor_si128(a, b); }
23
+
24
+ INLINE __m128i set1(uint32_t x) { return _mm_set1_epi32((int32_t)x); }
25
+
26
+ INLINE __m128i set4(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
27
+ return _mm_setr_epi32((int32_t)a, (int32_t)b, (int32_t)c, (int32_t)d);
28
+ }
29
+
30
+ INLINE __m128i rot16(__m128i x) {
31
+ return _mm_shufflehi_epi16(_mm_shufflelo_epi16(x, 0xB1), 0xB1);
32
+ }
33
+
34
+ INLINE __m128i rot12(__m128i x) {
35
+ return xorv(_mm_srli_epi32(x, 12), _mm_slli_epi32(x, 32 - 12));
36
+ }
37
+
38
+ INLINE __m128i rot8(__m128i x) {
39
+ return xorv(_mm_srli_epi32(x, 8), _mm_slli_epi32(x, 32 - 8));
40
+ }
41
+
42
+ INLINE __m128i rot7(__m128i x) {
43
+ return xorv(_mm_srli_epi32(x, 7), _mm_slli_epi32(x, 32 - 7));
44
+ }
45
+
46
+ INLINE void g1(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
47
+ __m128i m) {
48
+ *row0 = addv(addv(*row0, m), *row1);
49
+ *row3 = xorv(*row3, *row0);
50
+ *row3 = rot16(*row3);
51
+ *row2 = addv(*row2, *row3);
52
+ *row1 = xorv(*row1, *row2);
53
+ *row1 = rot12(*row1);
54
+ }
55
+
56
+ INLINE void g2(__m128i *row0, __m128i *row1, __m128i *row2, __m128i *row3,
57
+ __m128i m) {
58
+ *row0 = addv(addv(*row0, m), *row1);
59
+ *row3 = xorv(*row3, *row0);
60
+ *row3 = rot8(*row3);
61
+ *row2 = addv(*row2, *row3);
62
+ *row1 = xorv(*row1, *row2);
63
+ *row1 = rot7(*row1);
64
+ }
65
+
66
+ // Note the optimization here of leaving row1 as the unrotated row, rather than
67
+ // row0. All the message loads below are adjusted to compensate for this. See
68
+ // discussion at https://github.com/sneves/blake2-avx2/pull/4
69
+ INLINE void diagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
70
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(2, 1, 0, 3));
71
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
72
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(0, 3, 2, 1));
73
+ }
74
+
75
+ INLINE void undiagonalize(__m128i *row0, __m128i *row2, __m128i *row3) {
76
+ *row0 = _mm_shuffle_epi32(*row0, _MM_SHUFFLE(0, 3, 2, 1));
77
+ *row3 = _mm_shuffle_epi32(*row3, _MM_SHUFFLE(1, 0, 3, 2));
78
+ *row2 = _mm_shuffle_epi32(*row2, _MM_SHUFFLE(2, 1, 0, 3));
79
+ }
80
+
81
+ INLINE __m128i blend_epi16(__m128i a, __m128i b, const int imm8) {
82
+ const __m128i bits = _mm_set_epi16(0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01);
83
+ __m128i mask = _mm_set1_epi16(imm8);
84
+ mask = _mm_and_si128(mask, bits);
85
+ mask = _mm_cmpeq_epi16(mask, bits);
86
+ return _mm_or_si128(_mm_and_si128(mask, b), _mm_andnot_si128(mask, a));
87
+ }
88
+
89
+ INLINE void compress_pre(__m128i rows[4], const uint32_t cv[8],
90
+ const uint8_t block[BLAKE3_BLOCK_LEN],
91
+ uint8_t block_len, uint64_t counter, uint8_t flags) {
92
+ rows[0] = loadu((uint8_t *)&cv[0]);
93
+ rows[1] = loadu((uint8_t *)&cv[4]);
94
+ rows[2] = set4(IV[0], IV[1], IV[2], IV[3]);
95
+ rows[3] = set4(counter_low(counter), counter_high(counter),
96
+ (uint32_t)block_len, (uint32_t)flags);
97
+
98
+ __m128i m0 = loadu(&block[sizeof(__m128i) * 0]);
99
+ __m128i m1 = loadu(&block[sizeof(__m128i) * 1]);
100
+ __m128i m2 = loadu(&block[sizeof(__m128i) * 2]);
101
+ __m128i m3 = loadu(&block[sizeof(__m128i) * 3]);
102
+
103
+ __m128i t0, t1, t2, t3, tt;
104
+
105
+ // Round 1. The first round permutes the message words from the original
106
+ // input order, into the groups that get mixed in parallel.
107
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(2, 0, 2, 0)); // 6 4 2 0
108
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
109
+ t1 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 3, 1)); // 7 5 3 1
110
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
111
+ diagonalize(&rows[0], &rows[2], &rows[3]);
112
+ t2 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(2, 0, 2, 0)); // 14 12 10 8
113
+ t2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2, 1, 0, 3)); // 12 10 8 14
114
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
115
+ t3 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 1, 3, 1)); // 15 13 11 9
116
+ t3 = _mm_shuffle_epi32(t3, _MM_SHUFFLE(2, 1, 0, 3)); // 13 11 9 15
117
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
118
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
119
+ m0 = t0;
120
+ m1 = t1;
121
+ m2 = t2;
122
+ m3 = t3;
123
+
124
+ // Round 2. This round and all following rounds apply a fixed permutation
125
+ // to the message words from the round before.
126
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
127
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
128
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
129
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
130
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
131
+ t1 = blend_epi16(tt, t1, 0xCC);
132
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
133
+ diagonalize(&rows[0], &rows[2], &rows[3]);
134
+ t2 = _mm_unpacklo_epi64(m3, m1);
135
+ tt = blend_epi16(t2, m2, 0xC0);
136
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
137
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
138
+ t3 = _mm_unpackhi_epi32(m1, m3);
139
+ tt = _mm_unpacklo_epi32(m2, t3);
140
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
141
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
142
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
143
+ m0 = t0;
144
+ m1 = t1;
145
+ m2 = t2;
146
+ m3 = t3;
147
+
148
+ // Round 3
149
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
150
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
151
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
152
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
153
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
154
+ t1 = blend_epi16(tt, t1, 0xCC);
155
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
156
+ diagonalize(&rows[0], &rows[2], &rows[3]);
157
+ t2 = _mm_unpacklo_epi64(m3, m1);
158
+ tt = blend_epi16(t2, m2, 0xC0);
159
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
160
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
161
+ t3 = _mm_unpackhi_epi32(m1, m3);
162
+ tt = _mm_unpacklo_epi32(m2, t3);
163
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
164
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
165
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
166
+ m0 = t0;
167
+ m1 = t1;
168
+ m2 = t2;
169
+ m3 = t3;
170
+
171
+ // Round 4
172
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
173
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
174
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
175
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
176
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
177
+ t1 = blend_epi16(tt, t1, 0xCC);
178
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
179
+ diagonalize(&rows[0], &rows[2], &rows[3]);
180
+ t2 = _mm_unpacklo_epi64(m3, m1);
181
+ tt = blend_epi16(t2, m2, 0xC0);
182
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
183
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
184
+ t3 = _mm_unpackhi_epi32(m1, m3);
185
+ tt = _mm_unpacklo_epi32(m2, t3);
186
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
187
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
188
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
189
+ m0 = t0;
190
+ m1 = t1;
191
+ m2 = t2;
192
+ m3 = t3;
193
+
194
+ // Round 5
195
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
196
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
197
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
198
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
199
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
200
+ t1 = blend_epi16(tt, t1, 0xCC);
201
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
202
+ diagonalize(&rows[0], &rows[2], &rows[3]);
203
+ t2 = _mm_unpacklo_epi64(m3, m1);
204
+ tt = blend_epi16(t2, m2, 0xC0);
205
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
206
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
207
+ t3 = _mm_unpackhi_epi32(m1, m3);
208
+ tt = _mm_unpacklo_epi32(m2, t3);
209
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
210
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
211
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
212
+ m0 = t0;
213
+ m1 = t1;
214
+ m2 = t2;
215
+ m3 = t3;
216
+
217
+ // Round 6
218
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
219
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
220
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
221
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
222
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
223
+ t1 = blend_epi16(tt, t1, 0xCC);
224
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
225
+ diagonalize(&rows[0], &rows[2], &rows[3]);
226
+ t2 = _mm_unpacklo_epi64(m3, m1);
227
+ tt = blend_epi16(t2, m2, 0xC0);
228
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
229
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
230
+ t3 = _mm_unpackhi_epi32(m1, m3);
231
+ tt = _mm_unpacklo_epi32(m2, t3);
232
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
233
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
234
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
235
+ m0 = t0;
236
+ m1 = t1;
237
+ m2 = t2;
238
+ m3 = t3;
239
+
240
+ // Round 7
241
+ t0 = _mm_shuffle_ps2(m0, m1, _MM_SHUFFLE(3, 1, 1, 2));
242
+ t0 = _mm_shuffle_epi32(t0, _MM_SHUFFLE(0, 3, 2, 1));
243
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t0);
244
+ t1 = _mm_shuffle_ps2(m2, m3, _MM_SHUFFLE(3, 3, 2, 2));
245
+ tt = _mm_shuffle_epi32(m0, _MM_SHUFFLE(0, 0, 3, 3));
246
+ t1 = blend_epi16(tt, t1, 0xCC);
247
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t1);
248
+ diagonalize(&rows[0], &rows[2], &rows[3]);
249
+ t2 = _mm_unpacklo_epi64(m3, m1);
250
+ tt = blend_epi16(t2, m2, 0xC0);
251
+ t2 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(1, 3, 2, 0));
252
+ g1(&rows[0], &rows[1], &rows[2], &rows[3], t2);
253
+ t3 = _mm_unpackhi_epi32(m1, m3);
254
+ tt = _mm_unpacklo_epi32(m2, t3);
255
+ t3 = _mm_shuffle_epi32(tt, _MM_SHUFFLE(0, 1, 3, 2));
256
+ g2(&rows[0], &rows[1], &rows[2], &rows[3], t3);
257
+ undiagonalize(&rows[0], &rows[2], &rows[3]);
258
+ }
259
+
260
+ void blake3_compress_in_place_sse2(uint32_t cv[8],
261
+ const uint8_t block[BLAKE3_BLOCK_LEN],
262
+ uint8_t block_len, uint64_t counter,
263
+ uint8_t flags) {
264
+ __m128i rows[4];
265
+ compress_pre(rows, cv, block, block_len, counter, flags);
266
+ storeu(xorv(rows[0], rows[2]), (uint8_t *)&cv[0]);
267
+ storeu(xorv(rows[1], rows[3]), (uint8_t *)&cv[4]);
268
+ }
269
+
270
+ void blake3_compress_xof_sse2(const uint32_t cv[8],
271
+ const uint8_t block[BLAKE3_BLOCK_LEN],
272
+ uint8_t block_len, uint64_t counter,
273
+ uint8_t flags, uint8_t out[64]) {
274
+ __m128i rows[4];
275
+ compress_pre(rows, cv, block, block_len, counter, flags);
276
+ storeu(xorv(rows[0], rows[2]), &out[0]);
277
+ storeu(xorv(rows[1], rows[3]), &out[16]);
278
+ storeu(xorv(rows[2], loadu((uint8_t *)&cv[0])), &out[32]);
279
+ storeu(xorv(rows[3], loadu((uint8_t *)&cv[4])), &out[48]);
280
+ }
281
+
282
+ INLINE void round_fn(__m128i v[16], __m128i m[16], size_t r) {
283
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][0]]);
284
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][2]]);
285
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][4]]);
286
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][6]]);
287
+ v[0] = addv(v[0], v[4]);
288
+ v[1] = addv(v[1], v[5]);
289
+ v[2] = addv(v[2], v[6]);
290
+ v[3] = addv(v[3], v[7]);
291
+ v[12] = xorv(v[12], v[0]);
292
+ v[13] = xorv(v[13], v[1]);
293
+ v[14] = xorv(v[14], v[2]);
294
+ v[15] = xorv(v[15], v[3]);
295
+ v[12] = rot16(v[12]);
296
+ v[13] = rot16(v[13]);
297
+ v[14] = rot16(v[14]);
298
+ v[15] = rot16(v[15]);
299
+ v[8] = addv(v[8], v[12]);
300
+ v[9] = addv(v[9], v[13]);
301
+ v[10] = addv(v[10], v[14]);
302
+ v[11] = addv(v[11], v[15]);
303
+ v[4] = xorv(v[4], v[8]);
304
+ v[5] = xorv(v[5], v[9]);
305
+ v[6] = xorv(v[6], v[10]);
306
+ v[7] = xorv(v[7], v[11]);
307
+ v[4] = rot12(v[4]);
308
+ v[5] = rot12(v[5]);
309
+ v[6] = rot12(v[6]);
310
+ v[7] = rot12(v[7]);
311
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][1]]);
312
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][3]]);
313
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][5]]);
314
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][7]]);
315
+ v[0] = addv(v[0], v[4]);
316
+ v[1] = addv(v[1], v[5]);
317
+ v[2] = addv(v[2], v[6]);
318
+ v[3] = addv(v[3], v[7]);
319
+ v[12] = xorv(v[12], v[0]);
320
+ v[13] = xorv(v[13], v[1]);
321
+ v[14] = xorv(v[14], v[2]);
322
+ v[15] = xorv(v[15], v[3]);
323
+ v[12] = rot8(v[12]);
324
+ v[13] = rot8(v[13]);
325
+ v[14] = rot8(v[14]);
326
+ v[15] = rot8(v[15]);
327
+ v[8] = addv(v[8], v[12]);
328
+ v[9] = addv(v[9], v[13]);
329
+ v[10] = addv(v[10], v[14]);
330
+ v[11] = addv(v[11], v[15]);
331
+ v[4] = xorv(v[4], v[8]);
332
+ v[5] = xorv(v[5], v[9]);
333
+ v[6] = xorv(v[6], v[10]);
334
+ v[7] = xorv(v[7], v[11]);
335
+ v[4] = rot7(v[4]);
336
+ v[5] = rot7(v[5]);
337
+ v[6] = rot7(v[6]);
338
+ v[7] = rot7(v[7]);
339
+
340
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][8]]);
341
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][10]]);
342
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][12]]);
343
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][14]]);
344
+ v[0] = addv(v[0], v[5]);
345
+ v[1] = addv(v[1], v[6]);
346
+ v[2] = addv(v[2], v[7]);
347
+ v[3] = addv(v[3], v[4]);
348
+ v[15] = xorv(v[15], v[0]);
349
+ v[12] = xorv(v[12], v[1]);
350
+ v[13] = xorv(v[13], v[2]);
351
+ v[14] = xorv(v[14], v[3]);
352
+ v[15] = rot16(v[15]);
353
+ v[12] = rot16(v[12]);
354
+ v[13] = rot16(v[13]);
355
+ v[14] = rot16(v[14]);
356
+ v[10] = addv(v[10], v[15]);
357
+ v[11] = addv(v[11], v[12]);
358
+ v[8] = addv(v[8], v[13]);
359
+ v[9] = addv(v[9], v[14]);
360
+ v[5] = xorv(v[5], v[10]);
361
+ v[6] = xorv(v[6], v[11]);
362
+ v[7] = xorv(v[7], v[8]);
363
+ v[4] = xorv(v[4], v[9]);
364
+ v[5] = rot12(v[5]);
365
+ v[6] = rot12(v[6]);
366
+ v[7] = rot12(v[7]);
367
+ v[4] = rot12(v[4]);
368
+ v[0] = addv(v[0], m[(size_t)MSG_SCHEDULE[r][9]]);
369
+ v[1] = addv(v[1], m[(size_t)MSG_SCHEDULE[r][11]]);
370
+ v[2] = addv(v[2], m[(size_t)MSG_SCHEDULE[r][13]]);
371
+ v[3] = addv(v[3], m[(size_t)MSG_SCHEDULE[r][15]]);
372
+ v[0] = addv(v[0], v[5]);
373
+ v[1] = addv(v[1], v[6]);
374
+ v[2] = addv(v[2], v[7]);
375
+ v[3] = addv(v[3], v[4]);
376
+ v[15] = xorv(v[15], v[0]);
377
+ v[12] = xorv(v[12], v[1]);
378
+ v[13] = xorv(v[13], v[2]);
379
+ v[14] = xorv(v[14], v[3]);
380
+ v[15] = rot8(v[15]);
381
+ v[12] = rot8(v[12]);
382
+ v[13] = rot8(v[13]);
383
+ v[14] = rot8(v[14]);
384
+ v[10] = addv(v[10], v[15]);
385
+ v[11] = addv(v[11], v[12]);
386
+ v[8] = addv(v[8], v[13]);
387
+ v[9] = addv(v[9], v[14]);
388
+ v[5] = xorv(v[5], v[10]);
389
+ v[6] = xorv(v[6], v[11]);
390
+ v[7] = xorv(v[7], v[8]);
391
+ v[4] = xorv(v[4], v[9]);
392
+ v[5] = rot7(v[5]);
393
+ v[6] = rot7(v[6]);
394
+ v[7] = rot7(v[7]);
395
+ v[4] = rot7(v[4]);
396
+ }
397
+
398
+ INLINE void transpose_vecs(__m128i vecs[DEGREE]) {
399
+ // Interleave 32-bit lates. The low unpack is lanes 00/11 and the high is
400
+ // 22/33. Note that this doesn't split the vector into two lanes, as the
401
+ // AVX2 counterparts do.
402
+ __m128i ab_01 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
403
+ __m128i ab_23 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
404
+ __m128i cd_01 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
405
+ __m128i cd_23 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
406
+
407
+ // Interleave 64-bit lanes.
408
+ __m128i abcd_0 = _mm_unpacklo_epi64(ab_01, cd_01);
409
+ __m128i abcd_1 = _mm_unpackhi_epi64(ab_01, cd_01);
410
+ __m128i abcd_2 = _mm_unpacklo_epi64(ab_23, cd_23);
411
+ __m128i abcd_3 = _mm_unpackhi_epi64(ab_23, cd_23);
412
+
413
+ vecs[0] = abcd_0;
414
+ vecs[1] = abcd_1;
415
+ vecs[2] = abcd_2;
416
+ vecs[3] = abcd_3;
417
+ }
418
+
419
+ INLINE void transpose_msg_vecs(const uint8_t *const *inputs,
420
+ size_t block_offset, __m128i out[16]) {
421
+ out[0] = loadu(&inputs[0][block_offset + 0 * sizeof(__m128i)]);
422
+ out[1] = loadu(&inputs[1][block_offset + 0 * sizeof(__m128i)]);
423
+ out[2] = loadu(&inputs[2][block_offset + 0 * sizeof(__m128i)]);
424
+ out[3] = loadu(&inputs[3][block_offset + 0 * sizeof(__m128i)]);
425
+ out[4] = loadu(&inputs[0][block_offset + 1 * sizeof(__m128i)]);
426
+ out[5] = loadu(&inputs[1][block_offset + 1 * sizeof(__m128i)]);
427
+ out[6] = loadu(&inputs[2][block_offset + 1 * sizeof(__m128i)]);
428
+ out[7] = loadu(&inputs[3][block_offset + 1 * sizeof(__m128i)]);
429
+ out[8] = loadu(&inputs[0][block_offset + 2 * sizeof(__m128i)]);
430
+ out[9] = loadu(&inputs[1][block_offset + 2 * sizeof(__m128i)]);
431
+ out[10] = loadu(&inputs[2][block_offset + 2 * sizeof(__m128i)]);
432
+ out[11] = loadu(&inputs[3][block_offset + 2 * sizeof(__m128i)]);
433
+ out[12] = loadu(&inputs[0][block_offset + 3 * sizeof(__m128i)]);
434
+ out[13] = loadu(&inputs[1][block_offset + 3 * sizeof(__m128i)]);
435
+ out[14] = loadu(&inputs[2][block_offset + 3 * sizeof(__m128i)]);
436
+ out[15] = loadu(&inputs[3][block_offset + 3 * sizeof(__m128i)]);
437
+ for (size_t i = 0; i < 4; ++i) {
438
+ _mm_prefetch(&inputs[i][block_offset + 256], _MM_HINT_T0);
439
+ }
440
+ transpose_vecs(&out[0]);
441
+ transpose_vecs(&out[4]);
442
+ transpose_vecs(&out[8]);
443
+ transpose_vecs(&out[12]);
444
+ }
445
+
446
+ INLINE void load_counters(uint64_t counter, bool increment_counter,
447
+ __m128i *out_lo, __m128i *out_hi) {
448
+ const __m128i mask = _mm_set1_epi32(-(int32_t)increment_counter);
449
+ const __m128i add0 = _mm_set_epi32(3, 2, 1, 0);
450
+ const __m128i add1 = _mm_and_si128(mask, add0);
451
+ __m128i l = _mm_add_epi32(_mm_set1_epi32(counter), add1);
452
+ __m128i carry = _mm_cmpgt_epi32(_mm_xor_si128(add1, _mm_set1_epi32(0x80000000)),
453
+ _mm_xor_si128( l, _mm_set1_epi32(0x80000000)));
454
+ __m128i h = _mm_sub_epi32(_mm_set1_epi32(counter >> 32), carry);
455
+ *out_lo = l;
456
+ *out_hi = h;
457
+ }
458
+
459
+ void blake3_hash4_sse2(const uint8_t *const *inputs, size_t blocks,
460
+ const uint32_t key[8], uint64_t counter,
461
+ bool increment_counter, uint8_t flags,
462
+ uint8_t flags_start, uint8_t flags_end, uint8_t *out) {
463
+ __m128i h_vecs[8] = {
464
+ set1(key[0]), set1(key[1]), set1(key[2]), set1(key[3]),
465
+ set1(key[4]), set1(key[5]), set1(key[6]), set1(key[7]),
466
+ };
467
+ __m128i counter_low_vec, counter_high_vec;
468
+ load_counters(counter, increment_counter, &counter_low_vec,
469
+ &counter_high_vec);
470
+ uint8_t block_flags = flags | flags_start;
471
+
472
+ for (size_t block = 0; block < blocks; block++) {
473
+ if (block + 1 == blocks) {
474
+ block_flags |= flags_end;
475
+ }
476
+ __m128i block_len_vec = set1(BLAKE3_BLOCK_LEN);
477
+ __m128i block_flags_vec = set1(block_flags);
478
+ __m128i msg_vecs[16];
479
+ transpose_msg_vecs(inputs, block * BLAKE3_BLOCK_LEN, msg_vecs);
480
+
481
+ __m128i v[16] = {
482
+ h_vecs[0], h_vecs[1], h_vecs[2], h_vecs[3],
483
+ h_vecs[4], h_vecs[5], h_vecs[6], h_vecs[7],
484
+ set1(IV[0]), set1(IV[1]), set1(IV[2]), set1(IV[3]),
485
+ counter_low_vec, counter_high_vec, block_len_vec, block_flags_vec,
486
+ };
487
+ round_fn(v, msg_vecs, 0);
488
+ round_fn(v, msg_vecs, 1);
489
+ round_fn(v, msg_vecs, 2);
490
+ round_fn(v, msg_vecs, 3);
491
+ round_fn(v, msg_vecs, 4);
492
+ round_fn(v, msg_vecs, 5);
493
+ round_fn(v, msg_vecs, 6);
494
+ h_vecs[0] = xorv(v[0], v[8]);
495
+ h_vecs[1] = xorv(v[1], v[9]);
496
+ h_vecs[2] = xorv(v[2], v[10]);
497
+ h_vecs[3] = xorv(v[3], v[11]);
498
+ h_vecs[4] = xorv(v[4], v[12]);
499
+ h_vecs[5] = xorv(v[5], v[13]);
500
+ h_vecs[6] = xorv(v[6], v[14]);
501
+ h_vecs[7] = xorv(v[7], v[15]);
502
+
503
+ block_flags = flags;
504
+ }
505
+
506
+ transpose_vecs(&h_vecs[0]);
507
+ transpose_vecs(&h_vecs[4]);
508
+ // The first four vecs now contain the first half of each output, and the
509
+ // second four vecs contain the second half of each output.
510
+ storeu(h_vecs[0], &out[0 * sizeof(__m128i)]);
511
+ storeu(h_vecs[4], &out[1 * sizeof(__m128i)]);
512
+ storeu(h_vecs[1], &out[2 * sizeof(__m128i)]);
513
+ storeu(h_vecs[5], &out[3 * sizeof(__m128i)]);
514
+ storeu(h_vecs[2], &out[4 * sizeof(__m128i)]);
515
+ storeu(h_vecs[6], &out[5 * sizeof(__m128i)]);
516
+ storeu(h_vecs[3], &out[6 * sizeof(__m128i)]);
517
+ storeu(h_vecs[7], &out[7 * sizeof(__m128i)]);
518
+ }
519
+
520
+ INLINE void hash_one_sse2(const uint8_t *input, size_t blocks,
521
+ const uint32_t key[8], uint64_t counter,
522
+ uint8_t flags, uint8_t flags_start,
523
+ uint8_t flags_end, uint8_t out[BLAKE3_OUT_LEN]) {
524
+ uint32_t cv[8];
525
+ memcpy(cv, key, BLAKE3_KEY_LEN);
526
+ uint8_t block_flags = flags | flags_start;
527
+ while (blocks > 0) {
528
+ if (blocks == 1) {
529
+ block_flags |= flags_end;
530
+ }
531
+ blake3_compress_in_place_sse2(cv, input, BLAKE3_BLOCK_LEN, counter,
532
+ block_flags);
533
+ input = &input[BLAKE3_BLOCK_LEN];
534
+ blocks -= 1;
535
+ block_flags = flags;
536
+ }
537
+ memcpy(out, cv, BLAKE3_OUT_LEN);
538
+ }
539
+
540
+ void blake3_hash_many_sse2(const uint8_t *const *inputs, size_t num_inputs,
541
+ size_t blocks, const uint32_t key[8],
542
+ uint64_t counter, bool increment_counter,
543
+ uint8_t flags, uint8_t flags_start,
544
+ uint8_t flags_end, uint8_t *out) {
545
+ while (num_inputs >= DEGREE) {
546
+ blake3_hash4_sse2(inputs, blocks, key, counter, increment_counter, flags,
547
+ flags_start, flags_end, out);
548
+ if (increment_counter) {
549
+ counter += DEGREE;
550
+ }
551
+ inputs += DEGREE;
552
+ num_inputs -= DEGREE;
553
+ out = &out[DEGREE * BLAKE3_OUT_LEN];
554
+ }
555
+ while (num_inputs > 0) {
556
+ hash_one_sse2(inputs[0], blocks, key, counter, flags, flags_start,
557
+ flags_end, out);
558
+ if (increment_counter) {
559
+ counter += 1;
560
+ }
561
+ inputs += 1;
562
+ num_inputs -= 1;
563
+ out = &out[BLAKE3_OUT_LEN];
564
+ }
565
+ }