scrypt 2.1.1 → 3.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +6 -2
- data.tar.gz.sig +1 -2
- data/Rakefile +2 -1
- data/ext/scrypt/Rakefile +2 -1
- data/ext/scrypt/cpusupport.h +105 -0
- data/ext/scrypt/crypto_scrypt.c +253 -0
- data/ext/scrypt/crypto_scrypt.h +1 -0
- data/ext/scrypt/crypto_scrypt_internal.h +0 -0
- data/ext/scrypt/crypto_scrypt_smix.c +214 -0
- data/ext/scrypt/crypto_scrypt_smix.h +14 -0
- data/ext/scrypt/{crypto_scrypt-sse.c → crypto_scrypt_smix_sse2.c} +21 -142
- data/ext/scrypt/crypto_scrypt_smix_sse2.h +16 -0
- data/ext/scrypt/insecure_memzero.c +19 -0
- data/ext/scrypt/insecure_memzero.h +37 -0
- data/ext/scrypt/sha256.c +344 -229
- data/ext/scrypt/sha256.h +84 -50
- data/ext/scrypt/warnp.c +76 -0
- data/ext/scrypt/warnp.h +59 -0
- data/lib/scrypt/version.rb +1 -1
- data/scrypt.gemspec +3 -2
- data/spec/scrypt/engine_spec.rb +4 -2
- metadata +33 -20
- metadata.gz.sig +0 -0
@@ -0,0 +1,14 @@
|
|
1
|
+
#ifndef _CRYPTO_SCRYPT_SMIX_H_
|
2
|
+
#define _CRYPTO_SCRYPT_SMIX_H_
|
3
|
+
|
4
|
+
/**
|
5
|
+
* crypto_scrypt_smix(B, r, N, V, XY):
|
6
|
+
* Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
|
7
|
+
* the temporary storage V must be 128rN bytes in length; the temporary
|
8
|
+
* storage XY must be 256r + 64 bytes in length. The value N must be a
|
9
|
+
* power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
|
10
|
+
* multiple of 64 bytes.
|
11
|
+
*/
|
12
|
+
void crypto_scrypt_smix(uint8_t *, size_t, uint64_t, void *, void *);
|
13
|
+
|
14
|
+
#endif /* !_CRYPTO_SCRYPT_SMIX_H_ */
|
@@ -26,36 +26,27 @@
|
|
26
26
|
* This file was originally written by Colin Percival as part of the Tarsnap
|
27
27
|
* online backup system.
|
28
28
|
*/
|
29
|
-
#include "
|
30
|
-
|
31
|
-
#include <sys/types.h>
|
32
|
-
#ifndef __MINGW32__
|
33
|
-
#include <sys/mman.h>
|
34
|
-
#endif
|
29
|
+
#include "cpusupport.h"
|
30
|
+
#ifdef CPUSUPPORT_X86_SSE2
|
35
31
|
|
36
32
|
#include <emmintrin.h>
|
37
|
-
#include <errno.h>
|
38
33
|
#include <stdint.h>
|
39
|
-
#include <stdlib.h>
|
40
|
-
#include <string.h>
|
41
34
|
|
42
|
-
#include "sha256.h"
|
43
35
|
#include "sysendian.h"
|
44
36
|
|
45
|
-
#include "
|
37
|
+
#include "crypto_scrypt_smix_sse2.h"
|
46
38
|
|
47
|
-
static void blkcpy(void *, void *, size_t);
|
48
|
-
static void blkxor(void *, void *, size_t);
|
39
|
+
static void blkcpy(void *, const void *, size_t);
|
40
|
+
static void blkxor(void *, const void *, size_t);
|
49
41
|
static void salsa20_8(__m128i *);
|
50
|
-
static void blockmix_salsa8(__m128i *, __m128i *, __m128i *, size_t);
|
51
|
-
static uint64_t integerify(void *, size_t);
|
52
|
-
static void smix(uint8_t *, size_t, uint64_t, void *, void *);
|
42
|
+
static void blockmix_salsa8(const __m128i *, __m128i *, __m128i *, size_t);
|
43
|
+
static uint64_t integerify(const void *, size_t);
|
53
44
|
|
54
45
|
static void
|
55
|
-
blkcpy(void * dest, void * src, size_t len)
|
46
|
+
blkcpy(void * dest, const void * src, size_t len)
|
56
47
|
{
|
57
48
|
__m128i * D = dest;
|
58
|
-
__m128i * S = src;
|
49
|
+
const __m128i * S = src;
|
59
50
|
size_t L = len / 16;
|
60
51
|
size_t i;
|
61
52
|
|
@@ -64,10 +55,10 @@ blkcpy(void * dest, void * src, size_t len)
|
|
64
55
|
}
|
65
56
|
|
66
57
|
static void
|
67
|
-
blkxor(void * dest, void * src, size_t len)
|
58
|
+
blkxor(void * dest, const void * src, size_t len)
|
68
59
|
{
|
69
60
|
__m128i * D = dest;
|
70
|
-
__m128i * S = src;
|
61
|
+
const __m128i * S = src;
|
71
62
|
size_t L = len / 16;
|
72
63
|
size_t i;
|
73
64
|
|
@@ -144,7 +135,7 @@ salsa20_8(__m128i B[4])
|
|
144
135
|
* temporary space X must be 64 bytes.
|
145
136
|
*/
|
146
137
|
static void
|
147
|
-
blockmix_salsa8(__m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
|
138
|
+
blockmix_salsa8(const __m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
|
148
139
|
{
|
149
140
|
size_t i;
|
150
141
|
|
@@ -174,25 +165,28 @@ blockmix_salsa8(__m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
|
|
174
165
|
/**
|
175
166
|
* integerify(B, r):
|
176
167
|
* Return the result of parsing B_{2r-1} as a little-endian integer.
|
168
|
+
* Note that B's layout is permuted compared to the generic implementation.
|
177
169
|
*/
|
178
170
|
static uint64_t
|
179
|
-
integerify(void * B, size_t r)
|
171
|
+
integerify(const void * B, size_t r)
|
180
172
|
{
|
181
|
-
uint32_t * X = (void *)((uintptr_t)(B) + (2 * r - 1) * 64);
|
173
|
+
const uint32_t * X = (const void *)((uintptr_t)(B) + (2 * r - 1) * 64);
|
182
174
|
|
183
175
|
return (((uint64_t)(X[13]) << 32) + X[0]);
|
184
176
|
}
|
185
177
|
|
186
178
|
/**
|
187
|
-
*
|
179
|
+
* crypto_scrypt_smix_sse2(B, r, N, V, XY):
|
188
180
|
* Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
|
189
181
|
* the temporary storage V must be 128rN bytes in length; the temporary
|
190
182
|
* storage XY must be 256r + 64 bytes in length. The value N must be a
|
191
183
|
* power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
|
192
184
|
* multiple of 64 bytes.
|
185
|
+
*
|
186
|
+
* Use SSE2 instructions.
|
193
187
|
*/
|
194
|
-
|
195
|
-
|
188
|
+
void
|
189
|
+
crypto_scrypt_smix_sse2(uint8_t * B, size_t r, uint64_t N, void * V, void * XY)
|
196
190
|
{
|
197
191
|
__m128i * X = XY;
|
198
192
|
__m128i * Y = (void *)((uintptr_t)(XY) + 128 * r);
|
@@ -251,119 +245,4 @@ smix(uint8_t * B, size_t r, uint64_t N, void * V, void * XY)
|
|
251
245
|
}
|
252
246
|
}
|
253
247
|
|
254
|
-
|
255
|
-
* crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
|
256
|
-
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
|
257
|
-
* p, buflen) and write the result into buf. The parameters r, p, and buflen
|
258
|
-
* must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N
|
259
|
-
* must be a power of 2 greater than 1.
|
260
|
-
*
|
261
|
-
* Return 0 on success; or -1 on error.
|
262
|
-
*/
|
263
|
-
int
|
264
|
-
crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
|
265
|
-
const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t _r, uint32_t _p,
|
266
|
-
uint8_t * buf, size_t buflen)
|
267
|
-
{
|
268
|
-
void * B0, * V0, * XY0;
|
269
|
-
uint8_t * B;
|
270
|
-
uint32_t * V;
|
271
|
-
uint32_t * XY;
|
272
|
-
size_t r = _r, p = _p;
|
273
|
-
uint32_t i;
|
274
|
-
|
275
|
-
/* Sanity-check parameters. */
|
276
|
-
#if SIZE_MAX > UINT32_MAX
|
277
|
-
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
|
278
|
-
errno = EFBIG;
|
279
|
-
goto err0;
|
280
|
-
}
|
281
|
-
#endif
|
282
|
-
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
|
283
|
-
errno = EFBIG;
|
284
|
-
goto err0;
|
285
|
-
}
|
286
|
-
if (((N & (N - 1)) != 0) || (N < 2)) {
|
287
|
-
errno = EINVAL;
|
288
|
-
goto err0;
|
289
|
-
}
|
290
|
-
if ((r > SIZE_MAX / 128 / p) ||
|
291
|
-
#if SIZE_MAX / 256 <= UINT32_MAX
|
292
|
-
(r > (SIZE_MAX - 64) / 256) ||
|
293
|
-
#endif
|
294
|
-
(N > SIZE_MAX / 128 / r)) {
|
295
|
-
errno = ENOMEM;
|
296
|
-
goto err0;
|
297
|
-
}
|
298
|
-
|
299
|
-
/* Allocate memory. */
|
300
|
-
#ifdef HAVE_POSIX_MEMALIGN
|
301
|
-
if ((errno = posix_memalign(&B0, 64, 128 * r * p)) != 0)
|
302
|
-
goto err0;
|
303
|
-
B = (uint8_t *)(B0);
|
304
|
-
if ((errno = posix_memalign(&XY0, 64, 256 * r + 64)) != 0)
|
305
|
-
goto err1;
|
306
|
-
XY = (uint32_t *)(XY0);
|
307
|
-
#ifndef MAP_ANON
|
308
|
-
if ((errno = posix_memalign(&V0, 64, 128 * r * N)) != 0)
|
309
|
-
goto err2;
|
310
|
-
V = (uint32_t *)(V0);
|
311
|
-
#endif
|
312
|
-
#else
|
313
|
-
if ((B0 = malloc(128 * r * p + 63)) == NULL)
|
314
|
-
goto err0;
|
315
|
-
B = (uint8_t *)(((uintptr_t)(B0) + 63) & ~ (uintptr_t)(63));
|
316
|
-
if ((XY0 = malloc(256 * r + 64 + 63)) == NULL)
|
317
|
-
goto err1;
|
318
|
-
XY = (uint32_t *)(((uintptr_t)(XY0) + 63) & ~ (uintptr_t)(63));
|
319
|
-
#ifndef MAP_ANON
|
320
|
-
if ((V0 = malloc(128 * r * N + 63)) == NULL)
|
321
|
-
goto err2;
|
322
|
-
V = (uint32_t *)(((uintptr_t)(V0) + 63) & ~ (uintptr_t)(63));
|
323
|
-
#endif
|
324
|
-
#endif
|
325
|
-
#ifdef MAP_ANON
|
326
|
-
if ((V0 = mmap(NULL, 128 * r * N, PROT_READ | PROT_WRITE,
|
327
|
-
#ifdef MAP_NOCORE
|
328
|
-
MAP_ANON | MAP_PRIVATE | MAP_NOCORE,
|
329
|
-
#else
|
330
|
-
MAP_ANON | MAP_PRIVATE,
|
331
|
-
#endif
|
332
|
-
-1, 0)) == MAP_FAILED)
|
333
|
-
goto err2;
|
334
|
-
V = (uint32_t *)(V0);
|
335
|
-
#endif
|
336
|
-
|
337
|
-
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
|
338
|
-
PBKDF2_scrypt_SHA256(passwd, passwdlen, salt, saltlen, 1, B, p * 128 * r);
|
339
|
-
|
340
|
-
/* 2: for i = 0 to p - 1 do */
|
341
|
-
for (i = 0; i < p; i++) {
|
342
|
-
/* 3: B_i <-- MF(B_i, N) */
|
343
|
-
smix(&B[i * 128 * r], r, N, V, XY);
|
344
|
-
}
|
345
|
-
|
346
|
-
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
|
347
|
-
PBKDF2_scrypt_SHA256(passwd, passwdlen, B, p * 128 * r, 1, buf, buflen);
|
348
|
-
|
349
|
-
/* Free memory. */
|
350
|
-
#ifdef MAP_ANON
|
351
|
-
if (munmap(V0, 128 * r * N))
|
352
|
-
goto err2;
|
353
|
-
#else
|
354
|
-
free(V0);
|
355
|
-
#endif
|
356
|
-
free(XY0);
|
357
|
-
free(B0);
|
358
|
-
|
359
|
-
/* Success! */
|
360
|
-
return (0);
|
361
|
-
|
362
|
-
err2:
|
363
|
-
free(XY0);
|
364
|
-
err1:
|
365
|
-
free(B0);
|
366
|
-
err0:
|
367
|
-
/* Failure! */
|
368
|
-
return (-1);
|
369
|
-
}
|
248
|
+
#endif /* CPUSUPPORT_X86_SSE2 */
|
@@ -0,0 +1,16 @@
|
|
1
|
+
#ifndef _CRYPTO_SCRYPT_SMIX_SSE2_H_
|
2
|
+
#define _CRYPTO_SCRYPT_SMIX_SSE2_H_
|
3
|
+
|
4
|
+
/**
|
5
|
+
* crypto_scrypt_smix_sse2(B, r, N, V, XY):
|
6
|
+
* Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
|
7
|
+
* the temporary storage V must be 128rN bytes in length; the temporary
|
8
|
+
* storage XY must be 256r + 64 bytes in length. The value N must be a
|
9
|
+
* power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
|
10
|
+
* multiple of 64 bytes.
|
11
|
+
*
|
12
|
+
* Use SSE2 instructions.
|
13
|
+
*/
|
14
|
+
void crypto_scrypt_smix_sse2(uint8_t *, size_t, uint64_t, void *, void *);
|
15
|
+
|
16
|
+
#endif /* !_CRYPTO_SCRYPT_SMIX_SSE2_H_ */
|
@@ -0,0 +1,19 @@
|
|
1
|
+
#include <stddef.h>
|
2
|
+
#include <stdint.h>
|
3
|
+
|
4
|
+
#include "insecure_memzero.h"
|
5
|
+
|
6
|
+
/* Function which does the zeroing. */
|
7
|
+
static void
|
8
|
+
insecure_memzero_func(volatile void * buf, size_t len)
|
9
|
+
{
|
10
|
+
volatile uint8_t * _buf = buf;
|
11
|
+
size_t i;
|
12
|
+
|
13
|
+
for (i = 0; i < len; i++)
|
14
|
+
_buf[i] = 0;
|
15
|
+
}
|
16
|
+
|
17
|
+
/* Pointer to memory-zeroing function. */
|
18
|
+
void (* volatile insecure_memzero_ptr)(volatile void *, size_t) =
|
19
|
+
insecure_memzero_func;
|
@@ -0,0 +1,37 @@
|
|
1
|
+
#ifndef _INSECURE_MEMZERO_H_
|
2
|
+
#define _INSECURE_MEMZERO_H_
|
3
|
+
|
4
|
+
#include <stddef.h>
|
5
|
+
|
6
|
+
/* Pointer to memory-zeroing function. */
|
7
|
+
extern void (* volatile insecure_memzero_ptr)(volatile void *, size_t);
|
8
|
+
|
9
|
+
/**
|
10
|
+
* insecure_memzero(buf, len):
|
11
|
+
* Attempt to zero ${len} bytes at ${buf} in spite of optimizing compilers'
|
12
|
+
* best (standards-compliant) attempts to remove the buffer-zeroing. In
|
13
|
+
* particular, to avoid performing the zeroing, a compiler would need to
|
14
|
+
* use optimistic devirtualization; recognize that non-volatile objects do not
|
15
|
+
* need to be treated as volatile, even if they are accessed via volatile
|
16
|
+
* qualified pointers; and perform link-time optimization; in addition to the
|
17
|
+
* dead-code elimination which often causes buffer-zeroing to be elided.
|
18
|
+
*
|
19
|
+
* Note however that zeroing a buffer does not guarantee that the data held
|
20
|
+
* in the buffer is not stored elsewhere; in particular, there may be copies
|
21
|
+
* held in CPU registers or in anonymous allocations on the stack, even if
|
22
|
+
* every named variable is successfully sanitized. Solving the "wipe data
|
23
|
+
* from the system" problem will require a C language extension which does not
|
24
|
+
* yet exist.
|
25
|
+
*
|
26
|
+
* For more information, see:
|
27
|
+
* http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html
|
28
|
+
* http://www.daemonology.net/blog/2014-09-06-zeroing-buffers-is-insufficient.html
|
29
|
+
*/
|
30
|
+
static inline void
|
31
|
+
insecure_memzero(volatile void * buf, size_t len)
|
32
|
+
{
|
33
|
+
|
34
|
+
(insecure_memzero_ptr)(buf, len);
|
35
|
+
}
|
36
|
+
|
37
|
+
#endif /* !_INSECURE_MEMZERO_H_ */
|
data/ext/scrypt/sha256.c
CHANGED
@@ -1,66 +1,67 @@
|
|
1
|
-
|
2
|
-
* Copyright 2005,2007,2009 Colin Percival
|
3
|
-
* All rights reserved.
|
4
|
-
*
|
5
|
-
* Redistribution and use in source and binary forms, with or without
|
6
|
-
* modification, are permitted provided that the following conditions
|
7
|
-
* are met:
|
8
|
-
* 1. Redistributions of source code must retain the above copyright
|
9
|
-
* notice, this list of conditions and the following disclaimer.
|
10
|
-
* 2. Redistributions in binary form must reproduce the above copyright
|
11
|
-
* notice, this list of conditions and the following disclaimer in the
|
12
|
-
* documentation and/or other materials provided with the distribution.
|
13
|
-
*
|
14
|
-
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
15
|
-
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
16
|
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
17
|
-
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
18
|
-
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
19
|
-
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
20
|
-
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
21
|
-
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
22
|
-
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
23
|
-
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
24
|
-
* SUCH DAMAGE.
|
25
|
-
*/
|
26
|
-
#include "scrypt_platform.h"
|
27
|
-
|
28
|
-
#include <sys/types.h>
|
29
|
-
|
1
|
+
#include <assert.h>
|
30
2
|
#include <stdint.h>
|
31
3
|
#include <string.h>
|
32
4
|
|
5
|
+
#include "insecure_memzero.h"
|
33
6
|
#include "sysendian.h"
|
34
7
|
|
35
8
|
#include "sha256.h"
|
36
9
|
|
37
10
|
/*
|
38
11
|
* Encode a length len/4 vector of (uint32_t) into a length len vector of
|
39
|
-
* (
|
12
|
+
* (uint8_t) in big-endian form. Assumes len is a multiple of 4.
|
40
13
|
*/
|
41
14
|
static void
|
42
|
-
be32enc_vect(
|
15
|
+
be32enc_vect(uint8_t * dst, const uint32_t * src, size_t len)
|
43
16
|
{
|
44
17
|
size_t i;
|
45
18
|
|
19
|
+
/* Sanity-check. */
|
20
|
+
assert(len % 4 == 0);
|
21
|
+
|
22
|
+
/* Encode vector, one word at a time. */
|
46
23
|
for (i = 0; i < len / 4; i++)
|
47
24
|
be32enc(dst + i * 4, src[i]);
|
48
25
|
}
|
49
26
|
|
50
27
|
/*
|
51
|
-
* Decode a big-endian length len vector of (
|
28
|
+
* Decode a big-endian length len vector of (uint8_t) into a length
|
52
29
|
* len/4 vector of (uint32_t). Assumes len is a multiple of 4.
|
53
30
|
*/
|
54
31
|
static void
|
55
|
-
be32dec_vect(uint32_t *dst, const
|
32
|
+
be32dec_vect(uint32_t * dst, const uint8_t * src, size_t len)
|
56
33
|
{
|
57
34
|
size_t i;
|
58
35
|
|
36
|
+
/* Sanity-check. */
|
37
|
+
assert(len % 4 == 0);
|
38
|
+
|
39
|
+
/* Decode vector, one word at a time. */
|
59
40
|
for (i = 0; i < len / 4; i++)
|
60
41
|
dst[i] = be32dec(src + i * 4);
|
61
42
|
}
|
62
43
|
|
63
|
-
/*
|
44
|
+
/* SHA256 round constants. */
|
45
|
+
static const uint32_t K[64] = {
|
46
|
+
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
|
47
|
+
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
|
48
|
+
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
|
49
|
+
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
|
50
|
+
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
|
51
|
+
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
|
52
|
+
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
|
53
|
+
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
|
54
|
+
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
|
55
|
+
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
|
56
|
+
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
|
57
|
+
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
|
58
|
+
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
|
59
|
+
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
|
60
|
+
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
|
61
|
+
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
62
|
+
};
|
63
|
+
|
64
|
+
/* Elementary functions used by SHA256 */
|
64
65
|
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
|
65
66
|
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
|
66
67
|
#define SHR(x, n) (x >> n)
|
@@ -70,118 +71,86 @@ be32dec_vect(uint32_t *dst, const unsigned char *src, size_t len)
|
|
70
71
|
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3))
|
71
72
|
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHR(x, 10))
|
72
73
|
|
73
|
-
/*
|
74
|
+
/* SHA256 round function */
|
74
75
|
#define RND(a, b, c, d, e, f, g, h, k) \
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
h = t0 + t1;
|
76
|
+
h += S1(e) + Ch(e, f, g) + k; \
|
77
|
+
d += h; \
|
78
|
+
h += S0(a) + Maj(a, b, c);
|
79
79
|
|
80
80
|
/* Adjusted round function for rotating state */
|
81
|
-
#define RNDr(S, W, i,
|
81
|
+
#define RNDr(S, W, i, ii) \
|
82
82
|
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
83
|
+
S[(66 - i) % 8], S[(67 - i) % 8], \
|
84
|
+
S[(68 - i) % 8], S[(69 - i) % 8], \
|
85
|
+
S[(70 - i) % 8], S[(71 - i) % 8], \
|
86
|
+
W[i + ii] + K[i + ii])
|
87
|
+
|
88
|
+
/* Message schedule computation */
|
89
|
+
#define MSCH(W, ii, i) \
|
90
|
+
W[i + ii + 16] = s1(W[i + ii + 14]) + W[i + ii + 9] + s0(W[i + ii + 1]) + W[i + ii]
|
87
91
|
|
88
92
|
/*
|
89
|
-
*
|
93
|
+
* SHA256 block compression function. The 256-bit state is transformed via
|
90
94
|
* the 512-bit input block to produce a new state.
|
91
95
|
*/
|
92
96
|
static void
|
93
|
-
|
97
|
+
SHA256_Transform(uint32_t state[static restrict 8],
|
98
|
+
const uint8_t block[static restrict 64],
|
99
|
+
uint32_t W[static restrict 64], uint32_t S[static restrict 8])
|
94
100
|
{
|
95
|
-
uint32_t W[64];
|
96
|
-
uint32_t S[8];
|
97
|
-
uint32_t t0, t1;
|
98
101
|
int i;
|
99
102
|
|
100
|
-
/* 1. Prepare message schedule W. */
|
103
|
+
/* 1. Prepare the first part of the message schedule W. */
|
101
104
|
be32dec_vect(W, block, 64);
|
102
|
-
for (i = 16; i < 64; i++)
|
103
|
-
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
|
104
105
|
|
105
106
|
/* 2. Initialize working variables. */
|
106
107
|
memcpy(S, state, 32);
|
107
108
|
|
108
109
|
/* 3. Mix. */
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
RNDr(S, W, 39, 0x92722c85);
|
149
|
-
RNDr(S, W, 40, 0xa2bfe8a1);
|
150
|
-
RNDr(S, W, 41, 0xa81a664b);
|
151
|
-
RNDr(S, W, 42, 0xc24b8b70);
|
152
|
-
RNDr(S, W, 43, 0xc76c51a3);
|
153
|
-
RNDr(S, W, 44, 0xd192e819);
|
154
|
-
RNDr(S, W, 45, 0xd6990624);
|
155
|
-
RNDr(S, W, 46, 0xf40e3585);
|
156
|
-
RNDr(S, W, 47, 0x106aa070);
|
157
|
-
RNDr(S, W, 48, 0x19a4c116);
|
158
|
-
RNDr(S, W, 49, 0x1e376c08);
|
159
|
-
RNDr(S, W, 50, 0x2748774c);
|
160
|
-
RNDr(S, W, 51, 0x34b0bcb5);
|
161
|
-
RNDr(S, W, 52, 0x391c0cb3);
|
162
|
-
RNDr(S, W, 53, 0x4ed8aa4a);
|
163
|
-
RNDr(S, W, 54, 0x5b9cca4f);
|
164
|
-
RNDr(S, W, 55, 0x682e6ff3);
|
165
|
-
RNDr(S, W, 56, 0x748f82ee);
|
166
|
-
RNDr(S, W, 57, 0x78a5636f);
|
167
|
-
RNDr(S, W, 58, 0x84c87814);
|
168
|
-
RNDr(S, W, 59, 0x8cc70208);
|
169
|
-
RNDr(S, W, 60, 0x90befffa);
|
170
|
-
RNDr(S, W, 61, 0xa4506ceb);
|
171
|
-
RNDr(S, W, 62, 0xbef9a3f7);
|
172
|
-
RNDr(S, W, 63, 0xc67178f2);
|
173
|
-
|
174
|
-
/* 4. Mix local working variables into global state */
|
110
|
+
for (i = 0; i < 64; i += 16) {
|
111
|
+
RNDr(S, W, 0, i);
|
112
|
+
RNDr(S, W, 1, i);
|
113
|
+
RNDr(S, W, 2, i);
|
114
|
+
RNDr(S, W, 3, i);
|
115
|
+
RNDr(S, W, 4, i);
|
116
|
+
RNDr(S, W, 5, i);
|
117
|
+
RNDr(S, W, 6, i);
|
118
|
+
RNDr(S, W, 7, i);
|
119
|
+
RNDr(S, W, 8, i);
|
120
|
+
RNDr(S, W, 9, i);
|
121
|
+
RNDr(S, W, 10, i);
|
122
|
+
RNDr(S, W, 11, i);
|
123
|
+
RNDr(S, W, 12, i);
|
124
|
+
RNDr(S, W, 13, i);
|
125
|
+
RNDr(S, W, 14, i);
|
126
|
+
RNDr(S, W, 15, i);
|
127
|
+
|
128
|
+
if (i == 48)
|
129
|
+
break;
|
130
|
+
MSCH(W, 0, i);
|
131
|
+
MSCH(W, 1, i);
|
132
|
+
MSCH(W, 2, i);
|
133
|
+
MSCH(W, 3, i);
|
134
|
+
MSCH(W, 4, i);
|
135
|
+
MSCH(W, 5, i);
|
136
|
+
MSCH(W, 6, i);
|
137
|
+
MSCH(W, 7, i);
|
138
|
+
MSCH(W, 8, i);
|
139
|
+
MSCH(W, 9, i);
|
140
|
+
MSCH(W, 10, i);
|
141
|
+
MSCH(W, 11, i);
|
142
|
+
MSCH(W, 12, i);
|
143
|
+
MSCH(W, 13, i);
|
144
|
+
MSCH(W, 14, i);
|
145
|
+
MSCH(W, 15, i);
|
146
|
+
}
|
147
|
+
|
148
|
+
/* 4. Mix local working variables into global state. */
|
175
149
|
for (i = 0; i < 8; i++)
|
176
150
|
state[i] += S[i];
|
177
|
-
|
178
|
-
/* Clean the stack. */
|
179
|
-
memset(W, 0, 256);
|
180
|
-
memset(S, 0, 32);
|
181
|
-
t0 = t1 = 0;
|
182
151
|
}
|
183
152
|
|
184
|
-
static
|
153
|
+
static const uint8_t PAD[64] = {
|
185
154
|
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
186
155
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
187
156
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
@@ -190,180 +159,313 @@ static unsigned char PAD[64] = {
|
|
190
159
|
|
191
160
|
/* Add padding and terminating bit-count. */
|
192
161
|
static void
|
193
|
-
|
162
|
+
SHA256_Pad(SHA256_CTX * ctx, uint32_t tmp32[static restrict 72])
|
194
163
|
{
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
164
|
+
size_t r;
|
165
|
+
|
166
|
+
/* Figure out how many bytes we have buffered. */
|
167
|
+
r = (ctx->count >> 3) & 0x3f;
|
168
|
+
|
169
|
+
/* Pad to 56 mod 64, transforming if we finish a block en route. */
|
170
|
+
if (r < 56) {
|
171
|
+
/* Pad to 56 mod 64. */
|
172
|
+
memcpy(&ctx->buf[r], PAD, 56 - r);
|
173
|
+
} else {
|
174
|
+
/* Finish the current block and mix. */
|
175
|
+
memcpy(&ctx->buf[r], PAD, 64 - r);
|
176
|
+
SHA256_Transform(ctx->state, ctx->buf, &tmp32[0], &tmp32[64]);
|
177
|
+
|
178
|
+
/* The start of the final block is all zeroes. */
|
179
|
+
memset(&ctx->buf[0], 0, 56);
|
180
|
+
}
|
181
|
+
|
182
|
+
/* Add the terminating bit-count. */
|
183
|
+
be64enc(&ctx->buf[56], ctx->count);
|
184
|
+
|
185
|
+
/* Mix in the final block. */
|
186
|
+
SHA256_Transform(ctx->state, ctx->buf, &tmp32[0], &tmp32[64]);
|
211
187
|
}
|
212
188
|
|
213
|
-
/*
|
189
|
+
/* Magic initialization constants. */
|
190
|
+
static const uint32_t initstate[8] = {
|
191
|
+
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
|
192
|
+
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
|
193
|
+
};
|
194
|
+
|
195
|
+
/**
|
196
|
+
* SHA256_Init(ctx):
|
197
|
+
* Initialize the SHA256 context ${ctx}.
|
198
|
+
*/
|
214
199
|
void
|
215
|
-
|
200
|
+
SHA256_Init(SHA256_CTX * ctx)
|
216
201
|
{
|
217
202
|
|
218
|
-
/* Zero bits processed so far */
|
219
|
-
ctx->count
|
220
|
-
|
221
|
-
/*
|
222
|
-
ctx->state
|
223
|
-
ctx->state[1] = 0xBB67AE85;
|
224
|
-
ctx->state[2] = 0x3C6EF372;
|
225
|
-
ctx->state[3] = 0xA54FF53A;
|
226
|
-
ctx->state[4] = 0x510E527F;
|
227
|
-
ctx->state[5] = 0x9B05688C;
|
228
|
-
ctx->state[6] = 0x1F83D9AB;
|
229
|
-
ctx->state[7] = 0x5BE0CD19;
|
203
|
+
/* Zero bits processed so far. */
|
204
|
+
ctx->count = 0;
|
205
|
+
|
206
|
+
/* Initialize state. */
|
207
|
+
memcpy(ctx->state, initstate, sizeof(initstate));
|
230
208
|
}
|
231
209
|
|
232
|
-
|
233
|
-
|
234
|
-
|
210
|
+
/**
|
211
|
+
* SHA256_Update(ctx, in, len):
|
212
|
+
* Input ${len} bytes from ${in} into the SHA256 context ${ctx}.
|
213
|
+
*/
|
214
|
+
static void
|
215
|
+
_SHA256_Update(SHA256_CTX * ctx, const void * in, size_t len,
|
216
|
+
uint32_t tmp32[static restrict 72])
|
235
217
|
{
|
236
|
-
uint32_t bitlen[2];
|
237
218
|
uint32_t r;
|
238
|
-
const
|
219
|
+
const uint8_t * src = in;
|
239
220
|
|
240
|
-
/*
|
241
|
-
|
221
|
+
/* Return immediately if we have nothing to do. */
|
222
|
+
if (len == 0)
|
223
|
+
return;
|
242
224
|
|
243
|
-
/*
|
244
|
-
|
245
|
-
bitlen[0] = (uint32_t)(len >> 29);
|
225
|
+
/* Number of bytes left in the buffer from previous updates. */
|
226
|
+
r = (ctx->count >> 3) & 0x3f;
|
246
227
|
|
247
|
-
/* Update number of bits */
|
248
|
-
|
249
|
-
ctx->count[0]++;
|
250
|
-
ctx->count[0] += bitlen[0];
|
228
|
+
/* Update number of bits. */
|
229
|
+
ctx->count += (uint64_t)(len) << 3;
|
251
230
|
|
252
|
-
/* Handle the case where we don't need to perform any transforms */
|
231
|
+
/* Handle the case where we don't need to perform any transforms. */
|
253
232
|
if (len < 64 - r) {
|
254
233
|
memcpy(&ctx->buf[r], src, len);
|
255
234
|
return;
|
256
235
|
}
|
257
236
|
|
258
|
-
/* Finish the current block */
|
237
|
+
/* Finish the current block. */
|
259
238
|
memcpy(&ctx->buf[r], src, 64 - r);
|
260
|
-
|
239
|
+
SHA256_Transform(ctx->state, ctx->buf, &tmp32[0], &tmp32[64]);
|
261
240
|
src += 64 - r;
|
262
241
|
len -= 64 - r;
|
263
242
|
|
264
|
-
/* Perform complete blocks */
|
243
|
+
/* Perform complete blocks. */
|
265
244
|
while (len >= 64) {
|
266
|
-
|
245
|
+
SHA256_Transform(ctx->state, src, &tmp32[0], &tmp32[64]);
|
267
246
|
src += 64;
|
268
247
|
len -= 64;
|
269
248
|
}
|
270
249
|
|
271
|
-
/* Copy left over data into buffer */
|
250
|
+
/* Copy left over data into buffer. */
|
272
251
|
memcpy(ctx->buf, src, len);
|
273
252
|
}
|
274
253
|
|
275
|
-
/*
|
276
|
-
* SHA-256 finalization. Pads the input data, exports the hash value,
|
277
|
-
* and clears the context state.
|
278
|
-
*/
|
254
|
+
/* Wrapper function for intermediate-values sanitization. */
|
279
255
|
void
|
280
|
-
|
256
|
+
SHA256_Update(SHA256_CTX * ctx, const void * in, size_t len)
|
257
|
+
{
|
258
|
+
uint32_t tmp32[72];
|
259
|
+
|
260
|
+
/* Call the real function. */
|
261
|
+
_SHA256_Update(ctx, in, len, tmp32);
|
262
|
+
|
263
|
+
/* Clean the stack. */
|
264
|
+
insecure_memzero(tmp32, 288);
|
265
|
+
}
|
266
|
+
|
267
|
+
/**
|
268
|
+
* SHA256_Final(digest, ctx):
|
269
|
+
* Output the SHA256 hash of the data input to the context ${ctx} into the
|
270
|
+
* buffer ${digest}.
|
271
|
+
*/
|
272
|
+
static void
|
273
|
+
_SHA256_Final(uint8_t digest[32], SHA256_CTX * ctx,
|
274
|
+
uint32_t tmp32[static restrict 72])
|
281
275
|
{
|
282
276
|
|
283
|
-
/* Add padding */
|
284
|
-
|
277
|
+
/* Add padding. */
|
278
|
+
SHA256_Pad(ctx, tmp32);
|
285
279
|
|
286
|
-
/* Write the hash */
|
280
|
+
/* Write the hash. */
|
287
281
|
be32enc_vect(digest, ctx->state, 32);
|
282
|
+
}
|
288
283
|
|
289
|
-
|
290
|
-
|
284
|
+
/* Wrapper function for intermediate-values sanitization. */
|
285
|
+
void
|
286
|
+
SHA256_Final(uint8_t digest[32], SHA256_CTX * ctx)
|
287
|
+
{
|
288
|
+
uint32_t tmp32[72];
|
289
|
+
|
290
|
+
/* Call the real function. */
|
291
|
+
_SHA256_Final(digest, ctx, tmp32);
|
292
|
+
|
293
|
+
/* Clear the context state. */
|
294
|
+
insecure_memzero(ctx, sizeof(SHA256_CTX));
|
295
|
+
|
296
|
+
/* Clean the stack. */
|
297
|
+
insecure_memzero(tmp32, 288);
|
291
298
|
}
|
292
299
|
|
293
|
-
|
300
|
+
/**
|
301
|
+
* SHA256_Buf(in, len, digest):
|
302
|
+
* Compute the SHA256 hash of ${len} bytes from $in} and write it to ${digest}.
|
303
|
+
*/
|
294
304
|
void
|
295
|
-
|
305
|
+
SHA256_Buf(const void * in, size_t len, uint8_t digest[32])
|
296
306
|
{
|
297
|
-
|
298
|
-
|
299
|
-
|
307
|
+
SHA256_CTX ctx;
|
308
|
+
uint32_t tmp32[72];
|
309
|
+
|
310
|
+
SHA256_Init(&ctx);
|
311
|
+
_SHA256_Update(&ctx, in, len, tmp32);
|
312
|
+
_SHA256_Final(digest, &ctx, tmp32);
|
313
|
+
|
314
|
+
/* Clean the stack. */
|
315
|
+
insecure_memzero(&ctx, sizeof(SHA256_CTX));
|
316
|
+
insecure_memzero(tmp32, 288);
|
317
|
+
}
|
318
|
+
|
319
|
+
/**
|
320
|
+
* HMAC_SHA256_Init(ctx, K, Klen):
|
321
|
+
* Initialize the HMAC-SHA256 context ${ctx} with ${Klen} bytes of key from
|
322
|
+
* ${K}.
|
323
|
+
*/
|
324
|
+
static void
|
325
|
+
_HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen,
|
326
|
+
uint32_t tmp32[static restrict 72], uint8_t pad[static restrict 64],
|
327
|
+
uint8_t khash[static restrict 32])
|
328
|
+
{
|
329
|
+
const uint8_t * K = _K;
|
300
330
|
size_t i;
|
301
331
|
|
302
|
-
/* If Klen > 64, the key is really
|
332
|
+
/* If Klen > 64, the key is really SHA256(K). */
|
303
333
|
if (Klen > 64) {
|
304
|
-
|
305
|
-
|
306
|
-
|
334
|
+
SHA256_Init(&ctx->ictx);
|
335
|
+
_SHA256_Update(&ctx->ictx, K, Klen, tmp32);
|
336
|
+
_SHA256_Final(khash, &ctx->ictx, tmp32);
|
307
337
|
K = khash;
|
308
338
|
Klen = 32;
|
309
339
|
}
|
310
340
|
|
311
|
-
/* Inner
|
312
|
-
|
341
|
+
/* Inner SHA256 operation is SHA256(K xor [block of 0x36] || data). */
|
342
|
+
SHA256_Init(&ctx->ictx);
|
313
343
|
memset(pad, 0x36, 64);
|
314
344
|
for (i = 0; i < Klen; i++)
|
315
345
|
pad[i] ^= K[i];
|
316
|
-
|
346
|
+
_SHA256_Update(&ctx->ictx, pad, 64, tmp32);
|
317
347
|
|
318
|
-
/* Outer
|
319
|
-
|
348
|
+
/* Outer SHA256 operation is SHA256(K xor [block of 0x5c] || hash). */
|
349
|
+
SHA256_Init(&ctx->octx);
|
320
350
|
memset(pad, 0x5c, 64);
|
321
351
|
for (i = 0; i < Klen; i++)
|
322
352
|
pad[i] ^= K[i];
|
323
|
-
|
353
|
+
_SHA256_Update(&ctx->octx, pad, 64, tmp32);
|
354
|
+
}
|
355
|
+
|
356
|
+
/* Wrapper function for intermediate-values sanitization. */
|
357
|
+
void
|
358
|
+
HMAC_SHA256_Init(HMAC_SHA256_CTX * ctx, const void * _K, size_t Klen)
|
359
|
+
{
|
360
|
+
uint32_t tmp32[72];
|
361
|
+
uint8_t pad[64];
|
362
|
+
uint8_t khash[32];
|
363
|
+
|
364
|
+
/* Call the real function. */
|
365
|
+
_HMAC_SHA256_Init(ctx, _K, Klen, tmp32, pad, khash);
|
324
366
|
|
325
367
|
/* Clean the stack. */
|
326
|
-
|
368
|
+
insecure_memzero(tmp32, 288);
|
369
|
+
insecure_memzero(khash, 32);
|
370
|
+
insecure_memzero(pad, 64);
|
371
|
+
}
|
372
|
+
|
373
|
+
/**
|
374
|
+
* HMAC_SHA256_Update(ctx, in, len):
|
375
|
+
* Input ${len} bytes from ${in} into the HMAC-SHA256 context ${ctx}.
|
376
|
+
*/
|
377
|
+
static void
|
378
|
+
_HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void * in, size_t len,
|
379
|
+
uint32_t tmp32[static restrict 72])
|
380
|
+
{
|
381
|
+
|
382
|
+
/* Feed data to the inner SHA256 operation. */
|
383
|
+
_SHA256_Update(&ctx->ictx, in, len, tmp32);
|
327
384
|
}
|
328
385
|
|
329
|
-
/*
|
386
|
+
/* Wrapper function for intermediate-values sanitization. */
|
330
387
|
void
|
331
|
-
|
388
|
+
HMAC_SHA256_Update(HMAC_SHA256_CTX * ctx, const void * in, size_t len)
|
332
389
|
{
|
390
|
+
uint32_t tmp32[72];
|
333
391
|
|
334
|
-
/*
|
335
|
-
|
392
|
+
/* Call the real function. */
|
393
|
+
_HMAC_SHA256_Update(ctx, in, len, tmp32);
|
394
|
+
|
395
|
+
/* Clean the stack. */
|
396
|
+
insecure_memzero(tmp32, 288);
|
397
|
+
}
|
398
|
+
|
399
|
+
/**
|
400
|
+
* HMAC_SHA256_Final(digest, ctx):
|
401
|
+
* Output the HMAC-SHA256 of the data input to the context ${ctx} into the
|
402
|
+
* buffer ${digest}.
|
403
|
+
*/
|
404
|
+
static void
|
405
|
+
_HMAC_SHA256_Final(uint8_t digest[32], HMAC_SHA256_CTX * ctx,
|
406
|
+
uint32_t tmp32[static restrict 72], uint8_t ihash[static restrict 32])
|
407
|
+
{
|
408
|
+
|
409
|
+
/* Finish the inner SHA256 operation. */
|
410
|
+
_SHA256_Final(ihash, &ctx->ictx, tmp32);
|
411
|
+
|
412
|
+
/* Feed the inner hash to the outer SHA256 operation. */
|
413
|
+
_SHA256_Update(&ctx->octx, ihash, 32, tmp32);
|
414
|
+
|
415
|
+
/* Finish the outer SHA256 operation. */
|
416
|
+
_SHA256_Final(digest, &ctx->octx, tmp32);
|
336
417
|
}
|
337
418
|
|
338
|
-
/*
|
419
|
+
/* Wrapper function for intermediate-values sanitization. */
|
339
420
|
void
|
340
|
-
|
421
|
+
HMAC_SHA256_Final(uint8_t digest[32], HMAC_SHA256_CTX * ctx)
|
341
422
|
{
|
342
|
-
|
423
|
+
uint32_t tmp32[72];
|
424
|
+
uint8_t ihash[32];
|
343
425
|
|
344
|
-
/*
|
345
|
-
|
426
|
+
/* Call the real function. */
|
427
|
+
_HMAC_SHA256_Final(digest, ctx, tmp32, ihash);
|
346
428
|
|
347
|
-
/*
|
348
|
-
|
429
|
+
/* Clean the stack. */
|
430
|
+
insecure_memzero(tmp32, 288);
|
431
|
+
insecure_memzero(ihash, 32);
|
432
|
+
}
|
433
|
+
|
434
|
+
/**
|
435
|
+
* HMAC_SHA256_Buf(K, Klen, in, len, digest):
|
436
|
+
* Compute the HMAC-SHA256 of ${len} bytes from ${in} using the key ${K} of
|
437
|
+
* length ${Klen}, and write the result to ${digest}.
|
438
|
+
*/
|
439
|
+
void
|
440
|
+
HMAC_SHA256_Buf(const void * K, size_t Klen, const void * in, size_t len,
|
441
|
+
uint8_t digest[32])
|
442
|
+
{
|
443
|
+
HMAC_SHA256_CTX ctx;
|
444
|
+
uint32_t tmp32[72];
|
445
|
+
uint8_t tmp8[96];
|
349
446
|
|
350
|
-
|
351
|
-
|
447
|
+
_HMAC_SHA256_Init(&ctx, K, Klen, tmp32, &tmp8[0], &tmp8[64]);
|
448
|
+
_HMAC_SHA256_Update(&ctx, in, len, tmp32);
|
449
|
+
_HMAC_SHA256_Final(digest, &ctx, tmp32, &tmp8[0]);
|
352
450
|
|
353
451
|
/* Clean the stack. */
|
354
|
-
|
452
|
+
insecure_memzero(&ctx, sizeof(HMAC_SHA256_CTX));
|
453
|
+
insecure_memzero(tmp32, 288);
|
454
|
+
insecure_memzero(tmp8, 96);
|
355
455
|
}
|
356
456
|
|
357
457
|
/**
|
358
|
-
*
|
359
|
-
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-
|
458
|
+
* PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen):
|
459
|
+
* Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and
|
360
460
|
* write the output to buf. The value dkLen must be at most 32 * (2^32 - 1).
|
361
461
|
*/
|
362
462
|
void
|
363
|
-
|
463
|
+
PBKDF2_SHA256(const uint8_t * passwd, size_t passwdlen, const uint8_t * salt,
|
364
464
|
size_t saltlen, uint64_t c, uint8_t * buf, size_t dkLen)
|
365
465
|
{
|
366
|
-
|
466
|
+
HMAC_SHA256_CTX Phctx, PShctx, hctx;
|
467
|
+
uint32_t tmp32[72];
|
468
|
+
uint8_t tmp8[96];
|
367
469
|
size_t i;
|
368
470
|
uint8_t ivec[4];
|
369
471
|
uint8_t U[32];
|
@@ -372,9 +474,16 @@ PBKDF2_scrypt_SHA256(const uint8_t * passwd, size_t passwdlen, const uint8_t * s
|
|
372
474
|
int k;
|
373
475
|
size_t clen;
|
374
476
|
|
477
|
+
/* Sanity-check. */
|
478
|
+
assert(dkLen <= 32 * (size_t)(UINT32_MAX));
|
479
|
+
|
480
|
+
/* Compute HMAC state after processing P. */
|
481
|
+
_HMAC_SHA256_Init(&Phctx, passwd, passwdlen,
|
482
|
+
tmp32, &tmp8[0], &tmp8[64]);
|
483
|
+
|
375
484
|
/* Compute HMAC state after processing P and S. */
|
376
|
-
|
377
|
-
|
485
|
+
memcpy(&PShctx, &Phctx, sizeof(HMAC_SHA256_CTX));
|
486
|
+
_HMAC_SHA256_Update(&PShctx, salt, saltlen, tmp32);
|
378
487
|
|
379
488
|
/* Iterate through the blocks. */
|
380
489
|
for (i = 0; i * 32 < dkLen; i++) {
|
@@ -382,18 +491,18 @@ PBKDF2_scrypt_SHA256(const uint8_t * passwd, size_t passwdlen, const uint8_t * s
|
|
382
491
|
be32enc(ivec, (uint32_t)(i + 1));
|
383
492
|
|
384
493
|
/* Compute U_1 = PRF(P, S || INT(i)). */
|
385
|
-
memcpy(&hctx, &PShctx, sizeof(
|
386
|
-
|
387
|
-
|
494
|
+
memcpy(&hctx, &PShctx, sizeof(HMAC_SHA256_CTX));
|
495
|
+
_HMAC_SHA256_Update(&hctx, ivec, 4, tmp32);
|
496
|
+
_HMAC_SHA256_Final(U, &hctx, tmp32, tmp8);
|
388
497
|
|
389
498
|
/* T_i = U_1 ... */
|
390
499
|
memcpy(T, U, 32);
|
391
500
|
|
392
501
|
for (j = 2; j <= c; j++) {
|
393
502
|
/* Compute U_j. */
|
394
|
-
|
395
|
-
|
396
|
-
|
503
|
+
memcpy(&hctx, &Phctx, sizeof(HMAC_SHA256_CTX));
|
504
|
+
_HMAC_SHA256_Update(&hctx, U, 32, tmp32);
|
505
|
+
_HMAC_SHA256_Final(U, &hctx, tmp32, tmp8);
|
397
506
|
|
398
507
|
/* ... xor U_j ... */
|
399
508
|
for (k = 0; k < 32; k++)
|
@@ -407,6 +516,12 @@ PBKDF2_scrypt_SHA256(const uint8_t * passwd, size_t passwdlen, const uint8_t * s
|
|
407
516
|
memcpy(&buf[i * 32], T, clen);
|
408
517
|
}
|
409
518
|
|
410
|
-
/* Clean
|
411
|
-
|
519
|
+
/* Clean the stack. */
|
520
|
+
insecure_memzero(&Phctx, sizeof(HMAC_SHA256_CTX));
|
521
|
+
insecure_memzero(&PShctx, sizeof(HMAC_SHA256_CTX));
|
522
|
+
insecure_memzero(&hctx, sizeof(HMAC_SHA256_CTX));
|
523
|
+
insecure_memzero(tmp32, 288);
|
524
|
+
insecure_memzero(tmp8, 96);
|
525
|
+
insecure_memzero(U, 32);
|
526
|
+
insecure_memzero(T, 32);
|
412
527
|
}
|