scrypt 2.0.2 → 3.0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +3 -2
- data/README.md +3 -3
- data/Rakefile +5 -3
- data/ext/scrypt/Rakefile +5 -1
- data/ext/scrypt/cpusupport.h +105 -0
- data/ext/scrypt/crypto_scrypt.c +257 -0
- data/ext/scrypt/crypto_scrypt.h +1 -0
- data/ext/scrypt/crypto_scrypt_internal.h +0 -0
- data/ext/scrypt/crypto_scrypt_smix.c +214 -0
- data/ext/scrypt/crypto_scrypt_smix.h +14 -0
- data/ext/scrypt/{crypto_scrypt-sse.c → crypto_scrypt_smix_sse2.c} +21 -142
- data/ext/scrypt/crypto_scrypt_smix_sse2.h +16 -0
- data/ext/scrypt/insecure_memzero.c +19 -0
- data/ext/scrypt/insecure_memzero.h +37 -0
- data/ext/scrypt/sha256.c +344 -229
- data/ext/scrypt/sha256.h +84 -50
- data/ext/scrypt/warnp.c +76 -0
- data/ext/scrypt/warnp.h +59 -0
- data/lib/scrypt/security_utils.rb +23 -0
- data/lib/scrypt/version.rb +1 -1
- data/lib/scrypt.rb +4 -3
- data/scrypt.gemspec +4 -3
- data/spec/scrypt/engine_spec.rb +23 -21
- data/spec/scrypt/password_spec.rb +25 -25
- data/spec/scrypt/utils_spec.rb +12 -0
- data.tar.gz.sig +0 -0
- metadata +38 -22
- metadata.gz.sig +2 -2
@@ -26,36 +26,27 @@
|
|
26
26
|
* This file was originally written by Colin Percival as part of the Tarsnap
|
27
27
|
* online backup system.
|
28
28
|
*/
|
29
|
-
#include "
|
30
|
-
|
31
|
-
#include <sys/types.h>
|
32
|
-
#ifndef __MINGW32__
|
33
|
-
#include <sys/mman.h>
|
34
|
-
#endif
|
29
|
+
#include "cpusupport.h"
|
30
|
+
#ifdef CPUSUPPORT_X86_SSE2
|
35
31
|
|
36
32
|
#include <emmintrin.h>
|
37
|
-
#include <errno.h>
|
38
33
|
#include <stdint.h>
|
39
|
-
#include <stdlib.h>
|
40
|
-
#include <string.h>
|
41
34
|
|
42
|
-
#include "sha256.h"
|
43
35
|
#include "sysendian.h"
|
44
36
|
|
45
|
-
#include "
|
37
|
+
#include "crypto_scrypt_smix_sse2.h"
|
46
38
|
|
47
|
-
static void blkcpy(void *, void *, size_t);
|
48
|
-
static void blkxor(void *, void *, size_t);
|
39
|
+
static void blkcpy(void *, const void *, size_t);
|
40
|
+
static void blkxor(void *, const void *, size_t);
|
49
41
|
static void salsa20_8(__m128i *);
|
50
|
-
static void blockmix_salsa8(__m128i *, __m128i *, __m128i *, size_t);
|
51
|
-
static uint64_t integerify(void *, size_t);
|
52
|
-
static void smix(uint8_t *, size_t, uint64_t, void *, void *);
|
42
|
+
static void blockmix_salsa8(const __m128i *, __m128i *, __m128i *, size_t);
|
43
|
+
static uint64_t integerify(const void *, size_t);
|
53
44
|
|
54
45
|
static void
|
55
|
-
blkcpy(void * dest, void * src, size_t len)
|
46
|
+
blkcpy(void * dest, const void * src, size_t len)
|
56
47
|
{
|
57
48
|
__m128i * D = dest;
|
58
|
-
__m128i * S = src;
|
49
|
+
const __m128i * S = src;
|
59
50
|
size_t L = len / 16;
|
60
51
|
size_t i;
|
61
52
|
|
@@ -64,10 +55,10 @@ blkcpy(void * dest, void * src, size_t len)
|
|
64
55
|
}
|
65
56
|
|
66
57
|
static void
|
67
|
-
blkxor(void * dest, void * src, size_t len)
|
58
|
+
blkxor(void * dest, const void * src, size_t len)
|
68
59
|
{
|
69
60
|
__m128i * D = dest;
|
70
|
-
__m128i * S = src;
|
61
|
+
const __m128i * S = src;
|
71
62
|
size_t L = len / 16;
|
72
63
|
size_t i;
|
73
64
|
|
@@ -144,7 +135,7 @@ salsa20_8(__m128i B[4])
|
|
144
135
|
* temporary space X must be 64 bytes.
|
145
136
|
*/
|
146
137
|
static void
|
147
|
-
blockmix_salsa8(__m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
|
138
|
+
blockmix_salsa8(const __m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
|
148
139
|
{
|
149
140
|
size_t i;
|
150
141
|
|
@@ -174,25 +165,28 @@ blockmix_salsa8(__m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
|
|
174
165
|
/**
|
175
166
|
* integerify(B, r):
|
176
167
|
* Return the result of parsing B_{2r-1} as a little-endian integer.
|
168
|
+
* Note that B's layout is permuted compared to the generic implementation.
|
177
169
|
*/
|
178
170
|
static uint64_t
|
179
|
-
integerify(void * B, size_t r)
|
171
|
+
integerify(const void * B, size_t r)
|
180
172
|
{
|
181
|
-
uint32_t * X = (void *)((uintptr_t)(B) + (2 * r - 1) * 64);
|
173
|
+
const uint32_t * X = (const void *)((uintptr_t)(B) + (2 * r - 1) * 64);
|
182
174
|
|
183
175
|
return (((uint64_t)(X[13]) << 32) + X[0]);
|
184
176
|
}
|
185
177
|
|
186
178
|
/**
|
187
|
-
*
|
179
|
+
* crypto_scrypt_smix_sse2(B, r, N, V, XY):
|
188
180
|
* Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
|
189
181
|
* the temporary storage V must be 128rN bytes in length; the temporary
|
190
182
|
* storage XY must be 256r + 64 bytes in length. The value N must be a
|
191
183
|
* power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
|
192
184
|
* multiple of 64 bytes.
|
185
|
+
*
|
186
|
+
* Use SSE2 instructions.
|
193
187
|
*/
|
194
|
-
|
195
|
-
|
188
|
+
void
|
189
|
+
crypto_scrypt_smix_sse2(uint8_t * B, size_t r, uint64_t N, void * V, void * XY)
|
196
190
|
{
|
197
191
|
__m128i * X = XY;
|
198
192
|
__m128i * Y = (void *)((uintptr_t)(XY) + 128 * r);
|
@@ -251,119 +245,4 @@ smix(uint8_t * B, size_t r, uint64_t N, void * V, void * XY)
|
|
251
245
|
}
|
252
246
|
}
|
253
247
|
|
254
|
-
|
255
|
-
* crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
|
256
|
-
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
|
257
|
-
* p, buflen) and write the result into buf. The parameters r, p, and buflen
|
258
|
-
* must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N
|
259
|
-
* must be a power of 2 greater than 1.
|
260
|
-
*
|
261
|
-
* Return 0 on success; or -1 on error.
|
262
|
-
*/
|
263
|
-
int
|
264
|
-
crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
|
265
|
-
const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t _r, uint32_t _p,
|
266
|
-
uint8_t * buf, size_t buflen)
|
267
|
-
{
|
268
|
-
void * B0, * V0, * XY0;
|
269
|
-
uint8_t * B;
|
270
|
-
uint32_t * V;
|
271
|
-
uint32_t * XY;
|
272
|
-
size_t r = _r, p = _p;
|
273
|
-
uint32_t i;
|
274
|
-
|
275
|
-
/* Sanity-check parameters. */
|
276
|
-
#if SIZE_MAX > UINT32_MAX
|
277
|
-
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
|
278
|
-
errno = EFBIG;
|
279
|
-
goto err0;
|
280
|
-
}
|
281
|
-
#endif
|
282
|
-
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
|
283
|
-
errno = EFBIG;
|
284
|
-
goto err0;
|
285
|
-
}
|
286
|
-
if (((N & (N - 1)) != 0) || (N < 2)) {
|
287
|
-
errno = EINVAL;
|
288
|
-
goto err0;
|
289
|
-
}
|
290
|
-
if ((r > SIZE_MAX / 128 / p) ||
|
291
|
-
#if SIZE_MAX / 256 <= UINT32_MAX
|
292
|
-
(r > (SIZE_MAX - 64) / 256) ||
|
293
|
-
#endif
|
294
|
-
(N > SIZE_MAX / 128 / r)) {
|
295
|
-
errno = ENOMEM;
|
296
|
-
goto err0;
|
297
|
-
}
|
298
|
-
|
299
|
-
/* Allocate memory. */
|
300
|
-
#ifdef HAVE_POSIX_MEMALIGN
|
301
|
-
if ((errno = posix_memalign(&B0, 64, 128 * r * p)) != 0)
|
302
|
-
goto err0;
|
303
|
-
B = (uint8_t *)(B0);
|
304
|
-
if ((errno = posix_memalign(&XY0, 64, 256 * r + 64)) != 0)
|
305
|
-
goto err1;
|
306
|
-
XY = (uint32_t *)(XY0);
|
307
|
-
#ifndef MAP_ANON
|
308
|
-
if ((errno = posix_memalign(&V0, 64, 128 * r * N)) != 0)
|
309
|
-
goto err2;
|
310
|
-
V = (uint32_t *)(V0);
|
311
|
-
#endif
|
312
|
-
#else
|
313
|
-
if ((B0 = malloc(128 * r * p + 63)) == NULL)
|
314
|
-
goto err0;
|
315
|
-
B = (uint8_t *)(((uintptr_t)(B0) + 63) & ~ (uintptr_t)(63));
|
316
|
-
if ((XY0 = malloc(256 * r + 64 + 63)) == NULL)
|
317
|
-
goto err1;
|
318
|
-
XY = (uint32_t *)(((uintptr_t)(XY0) + 63) & ~ (uintptr_t)(63));
|
319
|
-
#ifndef MAP_ANON
|
320
|
-
if ((V0 = malloc(128 * r * N + 63)) == NULL)
|
321
|
-
goto err2;
|
322
|
-
V = (uint32_t *)(((uintptr_t)(V0) + 63) & ~ (uintptr_t)(63));
|
323
|
-
#endif
|
324
|
-
#endif
|
325
|
-
#ifdef MAP_ANON
|
326
|
-
if ((V0 = mmap(NULL, 128 * r * N, PROT_READ | PROT_WRITE,
|
327
|
-
#ifdef MAP_NOCORE
|
328
|
-
MAP_ANON | MAP_PRIVATE | MAP_NOCORE,
|
329
|
-
#else
|
330
|
-
MAP_ANON | MAP_PRIVATE,
|
331
|
-
#endif
|
332
|
-
-1, 0)) == MAP_FAILED)
|
333
|
-
goto err2;
|
334
|
-
V = (uint32_t *)(V0);
|
335
|
-
#endif
|
336
|
-
|
337
|
-
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
|
338
|
-
PBKDF2_scrypt_SHA256(passwd, passwdlen, salt, saltlen, 1, B, p * 128 * r);
|
339
|
-
|
340
|
-
/* 2: for i = 0 to p - 1 do */
|
341
|
-
for (i = 0; i < p; i++) {
|
342
|
-
/* 3: B_i <-- MF(B_i, N) */
|
343
|
-
smix(&B[i * 128 * r], r, N, V, XY);
|
344
|
-
}
|
345
|
-
|
346
|
-
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
|
347
|
-
PBKDF2_scrypt_SHA256(passwd, passwdlen, B, p * 128 * r, 1, buf, buflen);
|
348
|
-
|
349
|
-
/* Free memory. */
|
350
|
-
#ifdef MAP_ANON
|
351
|
-
if (munmap(V0, 128 * r * N))
|
352
|
-
goto err2;
|
353
|
-
#else
|
354
|
-
free(V0);
|
355
|
-
#endif
|
356
|
-
free(XY0);
|
357
|
-
free(B0);
|
358
|
-
|
359
|
-
/* Success! */
|
360
|
-
return (0);
|
361
|
-
|
362
|
-
err2:
|
363
|
-
free(XY0);
|
364
|
-
err1:
|
365
|
-
free(B0);
|
366
|
-
err0:
|
367
|
-
/* Failure! */
|
368
|
-
return (-1);
|
369
|
-
}
|
248
|
+
#endif /* CPUSUPPORT_X86_SSE2 */
|
@@ -0,0 +1,16 @@
|
|
1
|
+
#ifndef _CRYPTO_SCRYPT_SMIX_SSE2_H_
|
2
|
+
#define _CRYPTO_SCRYPT_SMIX_SSE2_H_
|
3
|
+
|
4
|
+
/**
|
5
|
+
* crypto_scrypt_smix_sse2(B, r, N, V, XY):
|
6
|
+
* Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
|
7
|
+
* the temporary storage V must be 128rN bytes in length; the temporary
|
8
|
+
* storage XY must be 256r + 64 bytes in length. The value N must be a
|
9
|
+
* power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
|
10
|
+
* multiple of 64 bytes.
|
11
|
+
*
|
12
|
+
* Use SSE2 instructions.
|
13
|
+
*/
|
14
|
+
void crypto_scrypt_smix_sse2(uint8_t *, size_t, uint64_t, void *, void *);
|
15
|
+
|
16
|
+
#endif /* !_CRYPTO_SCRYPT_SMIX_SSE2_H_ */
|
@@ -0,0 +1,19 @@
|
|
1
|
+
#include <stddef.h>
|
2
|
+
#include <stdint.h>
|
3
|
+
|
4
|
+
#include "insecure_memzero.h"
|
5
|
+
|
6
|
+
/* Function which does the zeroing. */
|
7
|
+
static void
|
8
|
+
insecure_memzero_func(volatile void * buf, size_t len)
|
9
|
+
{
|
10
|
+
volatile uint8_t * _buf = buf;
|
11
|
+
size_t i;
|
12
|
+
|
13
|
+
for (i = 0; i < len; i++)
|
14
|
+
_buf[i] = 0;
|
15
|
+
}
|
16
|
+
|
17
|
+
/* Pointer to memory-zeroing function. */
|
18
|
+
void (* volatile insecure_memzero_ptr)(volatile void *, size_t) =
|
19
|
+
insecure_memzero_func;
|
@@ -0,0 +1,37 @@
|
|
1
|
+
#ifndef _INSECURE_MEMZERO_H_
|
2
|
+
#define _INSECURE_MEMZERO_H_
|
3
|
+
|
4
|
+
#include <stddef.h>
|
5
|
+
|
6
|
+
/* Pointer to memory-zeroing function. */
|
7
|
+
extern void (* volatile insecure_memzero_ptr)(volatile void *, size_t);
|
8
|
+
|
9
|
+
/**
|
10
|
+
* insecure_memzero(buf, len):
|
11
|
+
* Attempt to zero ${len} bytes at ${buf} in spite of optimizing compilers'
|
12
|
+
* best (standards-compliant) attempts to remove the buffer-zeroing. In
|
13
|
+
* particular, to avoid performing the zeroing, a compiler would need to
|
14
|
+
* use optimistic devirtualization; recognize that non-volatile objects do not
|
15
|
+
* need to be treated as volatile, even if they are accessed via volatile
|
16
|
+
* qualified pointers; and perform link-time optimization; in addition to the
|
17
|
+
* dead-code elimination which often causes buffer-zeroing to be elided.
|
18
|
+
*
|
19
|
+
* Note however that zeroing a buffer does not guarantee that the data held
|
20
|
+
* in the buffer is not stored elsewhere; in particular, there may be copies
|
21
|
+
* held in CPU registers or in anonymous allocations on the stack, even if
|
22
|
+
* every named variable is successfully sanitized. Solving the "wipe data
|
23
|
+
* from the system" problem will require a C language extension which does not
|
24
|
+
* yet exist.
|
25
|
+
*
|
26
|
+
* For more information, see:
|
27
|
+
* http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html
|
28
|
+
* http://www.daemonology.net/blog/2014-09-06-zeroing-buffers-is-insufficient.html
|
29
|
+
*/
|
30
|
+
static inline void
|
31
|
+
insecure_memzero(volatile void * buf, size_t len)
|
32
|
+
{
|
33
|
+
|
34
|
+
(insecure_memzero_ptr)(buf, len);
|
35
|
+
}
|
36
|
+
|
37
|
+
#endif /* !_INSECURE_MEMZERO_H_ */
|