scrypt 2.0.2 → 3.0.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 2c836e8c175f08a62b2dd0fc2732874265aab163
4
- data.tar.gz: a022b4b68060d2dd28b5ee469883ac666bb819f6
3
+ metadata.gz: 981c040787ad037c2e3dfcabc394b7b3c0cacb81
4
+ data.tar.gz: f82dfc646bfee4356d66c6cecfbc6dc2e03e7e7f
5
5
  SHA512:
6
- metadata.gz: b6d28a4faf27640f452966df5a925b5f1aa2aba6364714c236632e88dcf132af2a3c1a9092ff6f1f9487d7f3fcd6ed600129af957fdd8656fa6a20d36dad438e
7
- data.tar.gz: 41c2e4894c9ebbd533449fd10cab5f7e3f49e8bc9bf1675667787f5ffc685e8ecde2dcbdd7122c9627d70bc116cc9830fb46c20b095c23195e1e8b7b27669258
6
+ metadata.gz: 98879e9b989ac3ad638cd00393164d89a228c6c162c36e8eee12d6d0931eab2454bc50dd94b702742801333e0beb6fa94ebbc321db16024ffadf856fc98fb328
7
+ data.tar.gz: f6860d45961fe7c6cf23eae1d62c8382750aa772077ffd88f03a038f54f57c122980803420abf6a3e490821fad396e0ca826d4c5dfc68705b2a6cc8c9110f6cb
checksums.yaml.gz.sig CHANGED
@@ -1,2 +1,3 @@
1
- )$ e����0b���a8=�����AP;�Ff9kN����tf9*��R&5
2
- 3}<.�)�|�\�*z<���9�3�#�'�+�|&G!����ُb��+'=����[㚍�z�T��<���H�-�������کb@�l��g�F�T�W9��e���vÛ�5|��ԺI���S����' {�b����հ���J<a����l��ƸUv�/l��
1
+ (~;��0u|p
2
+ hXy(r�*�3��yKF
3
+ zؼP�m-�E��@���DJ����ˑ? Et'bT+���y�O<z��#��"�K4g�&(v��n^����y�z���7�B�+7��%��:r0J����2��!n�t�����7�K\�z�G�
data/README.md CHANGED
@@ -1,4 +1,4 @@
1
- scrypt [![Build Status](https://secure.travis-ci.org/pbhogan/scrypt.png)](http://travis-ci.org/pbhogan/scrypt)
1
+ scrypt [![Build Status](https://secure.travis-ci.org/pbhogan/scrypt.svg)](http://travis-ci.org/pbhogan/scrypt)
2
2
  ======
3
3
 
4
4
  The scrypt key derivation function is designed to be far more secure against hardware brute-force attacks than alternative functions such as PBKDF2 or bcrypt.
@@ -37,13 +37,13 @@ password == "a paltry guess" # => false
37
37
  Password.create takes five options which will determine the key length and salt size, as well as the cost limits of the computation:
38
38
 
39
39
  * `:key_len` specifies the length in bytes of the key you want to generate. The default is 32 bytes (256 bits). Minimum is 16 bytes (128 bits). Maximum is 512 bytes (4096 bits).
40
- * `:salt_size` specifies the size in bytes of the random salt you want to generate. The default and minimum is 8 bytes (64 bits). Maximum is 32 bytes (256 bits).
40
+ * `:salt_size` specifies the size in bytes of the random salt you want to generate. The default and maximum is 32 bytes (256 bits). Minimum is 8 bytes (64 bits).
41
41
  * `:max_time` specifies the maximum number of seconds the computation should take.
42
42
  * `:max_mem` specifies the maximum number of bytes the computation should take. A value of 0 specifies no upper limit. The minimum is always 1 MB.
43
43
  * `:max_memfrac` specifies the maximum memory in a fraction of available resources to use. Any value equal to 0 or greater than 0.5 will result in 0.5 being used.
44
44
  * `:cost` specifies a cost string (e.g. `'400$8$19$'`) from the `calibrate` method. The `:max_*` options will be ignored if this option is given, or if `calibrate!` has been called.
45
45
 
46
- Default options will result in calculation time of approx. 200 ms with 1 MB memory use.
46
+ Default options will result in calculation time of approx. 200 ms with 16 MB memory use.
47
47
 
48
48
  ## Other things you can do
49
49
 
data/Rakefile CHANGED
@@ -24,10 +24,14 @@ end
24
24
  desc "FFI compiler"
25
25
  namespace "ffi-compiler" do
26
26
  FFI::Compiler::CompileTask.new('ext/scrypt/scrypt_ext') do |t|
27
- t.cflags << "-Wall -msse -msse2"
27
+ t.cflags << "-Wall -std=c99"
28
+ t.cflags << "-msse -msse2" if t.platform.arch.include? "86"
28
29
  t.cflags << "-D_GNU_SOURCE=1" if RbConfig::CONFIG["host_os"].downcase =~ /mingw/
30
+ t.cflags << "-D__need_timespec" if RbConfig::CONFIG['host_os'].downcase =~ /linux/
29
31
  t.cflags << "-arch x86_64 -arch i386" if t.platform.mac?
30
32
  t.ldflags << "-arch x86_64 -arch i386" if t.platform.mac?
33
+
34
+ t.add_define 'WINDOWS_OS' if FFI::Platform.windows?
31
35
  end
32
36
  end
33
37
  task :compile_ffi => ["ffi-compiler:default"]
@@ -58,5 +62,3 @@ Gem::PackageTask.new(gem_spec) do |pkg|
58
62
  pkg.need_tar = true
59
63
  pkg.package_dir = 'pkg'
60
64
  end
61
-
62
-
data/ext/scrypt/Rakefile CHANGED
@@ -1,9 +1,13 @@
1
1
  require 'ffi-compiler/compile_task'
2
2
 
3
3
  FFI::Compiler::CompileTask.new('scrypt_ext') do |t|
4
- t.cflags << "-Wall -msse -msse2"
4
+ t.cflags << "-Wall -std=c99"
5
+ t.cflags << "-msse -msse2" if t.platform.arch.include? "86"
5
6
  t.cflags << "-D_GNU_SOURCE=1" if RbConfig::CONFIG["host_os"].downcase =~ /mingw/
7
+ t.cflags << "-D__need_timespec" if RbConfig::CONFIG['host_os'].downcase =~ /linux/
6
8
  t.cflags << "-arch x86_64 -arch i386" if t.platform.mac?
7
9
  t.ldflags << "-arch x86_64 -arch i386" if t.platform.mac?
8
10
  t.export '../../lib/scrypt/scrypt_ext.rb'
11
+
12
+ t.add_define 'WINDOWS_OS' if FFI::Platform.windows?
9
13
  end
@@ -0,0 +1,105 @@
1
+ #ifndef _CPUSUPPORT_H_
2
+ #define _CPUSUPPORT_H_
3
+
4
+ /*
5
+ * To enable support for non-portable CPU features at compile time, one or
6
+ * more CPUSUPPORT_ARCH_FEATURE macros should be defined. This can be done
7
+ * directly on the compiler command line via -D CPUSUPPORT_ARCH_FEATURE or
8
+ * -D CPUSUPPORT_ARCH_FEATURE=1; or a file can be created with the
9
+ * necessary #define lines and then -D CPUSUPPORT_CONFIG_FILE=cpuconfig.h
10
+ * (or similar) can be provided to include that file here.
11
+ */
12
+ #ifdef CPUSUPPORT_CONFIG_FILE
13
+ #include CPUSUPPORT_CONFIG_FILE
14
+ #endif
15
+
16
+ /**
17
+ * The CPUSUPPORT_FEATURE macro declares the necessary variables and
18
+ * functions for detecting CPU feature support at run time. The function
19
+ * defined in the macro acts to cache the result of the ..._detect function
20
+ * using the ..._present and ..._init variables. The _detect function and the
21
+ * _present and _init variables are turn defined by CPUSUPPORT_FEATURE_DECL in
22
+ * appropriate cpusupport_foo_bar.c file.
23
+ *
24
+ * In order to allow CPUSUPPORT_FEATURE to be used for features which do not
25
+ * have corresponding CPUSUPPORT_FEATURE_DECL blocks in another source file,
26
+ * we abuse the C preprocessor: If CPUSUPPORT_${enabler} is defined to 1, then
27
+ * we access _present_1, _init_1, and _detect_1; but if it is not defined, we
28
+ * access _present_CPUSUPPORT_${enabler} etc., which we define as static, thus
29
+ * preventing the compiler from emitting a reference to an external symbol.
30
+ *
31
+ * In this way, it becomes possible to issue CPUSUPPORT_FEATURE invocations
32
+ * for nonexistent features without running afoul of the requirement that
33
+ * "If an identifier declared with external linkage is used... in the entire
34
+ * program there shall be exactly one external definition" (C99 standard, 6.9
35
+ * paragraph 5). In practice, this means that users of the cpusupport code
36
+ * can omit build and runtime detection files without changing the framework
37
+ * code.
38
+ */
39
+ #define CPUSUPPORT_FEATURE__(arch_feature, enabler, enabled) \
40
+ static int cpusupport_ ## arch_feature ## _present ## _CPUSUPPORT_ ## enabler; \
41
+ static int cpusupport_ ## arch_feature ## _init ## _CPUSUPPORT_ ## enabler; \
42
+ static inline int cpusupport_ ## arch_feature ## _detect ## _CPUSUPPORT_ ## enabler(void) { return (0); } \
43
+ extern int cpusupport_ ## arch_feature ## _present_ ## enabled; \
44
+ extern int cpusupport_ ## arch_feature ## _init_ ## enabled; \
45
+ int cpusupport_ ## arch_feature ## _detect_ ## enabled(void); \
46
+ \
47
+ static inline int \
48
+ cpusupport_ ## arch_feature(void) \
49
+ { \
50
+ \
51
+ if (cpusupport_ ## arch_feature ## _present_ ## enabled) \
52
+ return (1); \
53
+ else if (cpusupport_ ## arch_feature ## _init_ ## enabled) \
54
+ return (0); \
55
+ cpusupport_ ## arch_feature ## _present_ ## enabled = \
56
+ cpusupport_ ## arch_feature ## _detect_ ## enabled(); \
57
+ cpusupport_ ## arch_feature ## _init_ ## enabled = 1; \
58
+ return (cpusupport_ ## arch_feature ## _present_ ## enabled); \
59
+ } \
60
+ static void (* cpusupport_ ## arch_feature ## _dummyptr)(void); \
61
+ static inline void \
62
+ cpusupport_ ## arch_feature ## _dummyfunc(void) \
63
+ { \
64
+ \
65
+ (void)cpusupport_ ## arch_feature ## _present ## _CPUSUPPORT_ ## enabler; \
66
+ (void)cpusupport_ ## arch_feature ## _init ## _CPUSUPPORT_ ## enabler; \
67
+ (void)cpusupport_ ## arch_feature ## _detect ## _CPUSUPPORT_ ## enabler; \
68
+ (void)cpusupport_ ## arch_feature ## _present_ ## enabled; \
69
+ (void)cpusupport_ ## arch_feature ## _init_ ## enabled; \
70
+ (void)cpusupport_ ## arch_feature ## _detect_ ## enabled; \
71
+ (void)cpusupport_ ## arch_feature ## _dummyptr; \
72
+ } \
73
+ static void (* cpusupport_ ## arch_feature ## _dummyptr)(void) = cpusupport_ ## arch_feature ## _dummyfunc; \
74
+ struct cpusupport_ ## arch_feature ## _dummy
75
+ #define CPUSUPPORT_FEATURE_(arch_feature, enabler, enabled) \
76
+ CPUSUPPORT_FEATURE__(arch_feature, enabler, enabled)
77
+ #define CPUSUPPORT_FEATURE(arch, feature, enabler) \
78
+ CPUSUPPORT_FEATURE_(arch ## _ ## feature, enabler, CPUSUPPORT_ ## enabler)
79
+
80
+ /*
81
+ * CPUSUPPORT_FEATURE_DECL(arch, feature):
82
+ * Macro which defines variables and provides a function declaration for
83
+ * detecting the presence of "feature" on the "arch" architecture. The
84
+ * function body following this macro expansion must return nonzero if the
85
+ * feature is present, or zero if the feature is not present or the detection
86
+ * fails for any reason.
87
+ */
88
+ #define CPUSUPPORT_FEATURE_DECL(arch, feature) \
89
+ int cpusupport_ ## arch ## _ ## feature ## _present_1 = 0; \
90
+ int cpusupport_ ## arch ## _ ## feature ## _init_1 = 0; \
91
+ int \
92
+ cpusupport_ ## arch ## _ ## feature ## _detect_1(void)
93
+
94
+ /*
95
+ * List of features. If a feature here is not enabled by the appropriate
96
+ * CPUSUPPORT_ARCH_FEATURE macro being defined, it has no effect; but if the
97
+ * relevant macro may be defined (e.g., by Build/cpusupport.sh successfully
98
+ * compiling Build/cpusupport-ARCH-FEATURE.c) then the C file containing the
99
+ * corresponding run-time detection code (cpusupport_arch_feature.c) must be
100
+ * compiled and linked in.
101
+ */
102
+ CPUSUPPORT_FEATURE(x86, aesni, X86_AESNI);
103
+ CPUSUPPORT_FEATURE(x86, sse2, X86_SSE2);
104
+
105
+ #endif /* !_CPUSUPPORT_H_ */
@@ -0,0 +1,257 @@
1
+ /*-
2
+ * Copyright 2009 Colin Percival
3
+ * All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions
7
+ * are met:
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ * 2. Redistributions in binary form must reproduce the above copyright
11
+ * notice, this list of conditions and the following disclaimer in the
12
+ * documentation and/or other materials provided with the distribution.
13
+ *
14
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
+ * SUCH DAMAGE.
25
+ *
26
+ * This file was originally written by Colin Percival as part of the Tarsnap
27
+ * online backup system.
28
+ */
29
+ /* #include "bsdtar_platform.h" */
30
+
31
+ #include <sys/types.h>
32
+ #if !defined(WINDOWS_OS)
33
+ #include <sys/mman.h>
34
+ #ifndef HAVE_MMAP
35
+ #define HAVE_MMAP 1
36
+ #endif
37
+ #endif
38
+ #include <errno.h>
39
+ #include <stdint.h>
40
+ #include <stdlib.h>
41
+ #include <string.h>
42
+
43
+ #include "cpusupport.h"
44
+ #include "sha256.h"
45
+ //#include "warnp.h"
46
+
47
+ #include "crypto_scrypt_smix.h"
48
+ #include "crypto_scrypt_smix_sse2.h"
49
+
50
+ #include "crypto_scrypt.h"
51
+ #include "warnp.h"
52
+
53
+ static void (*smix_func)(uint8_t *, size_t, uint64_t, void *, void *) = NULL;
54
+
55
+ /**
56
+ * _crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen, smix):
57
+ * Perform the requested scrypt computation, using ${smix} as the smix routine.
58
+ */
59
+ static int
60
+ _crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
61
+ const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t _r, uint32_t _p,
62
+ uint8_t * buf, size_t buflen,
63
+ void (*smix)(uint8_t *, size_t, uint64_t, void *, void *))
64
+ {
65
+ void * B0, * V0, * XY0;
66
+ uint8_t * B;
67
+ uint32_t * V;
68
+ uint32_t * XY;
69
+ size_t r = _r, p = _p;
70
+ uint32_t i;
71
+
72
+ /* Sanity-check parameters. */
73
+ #if SIZE_MAX > UINT32_MAX
74
+ if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
75
+ errno = EFBIG;
76
+ goto err0;
77
+ }
78
+ #endif
79
+ if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
80
+ errno = EFBIG;
81
+ goto err0;
82
+ }
83
+ if (((N & (N - 1)) != 0) || (N < 2)) {
84
+ errno = EINVAL;
85
+ goto err0;
86
+ }
87
+ if ((r > SIZE_MAX / 128 / p) ||
88
+ #if SIZE_MAX / 256 <= UINT32_MAX
89
+ (r > (SIZE_MAX - 64) / 256) ||
90
+ #endif
91
+ (N > SIZE_MAX / 128 / r)) {
92
+ errno = ENOMEM;
93
+ goto err0;
94
+ }
95
+
96
+ /* Allocate memory. */
97
+ #ifdef HAVE_POSIX_MEMALIGN
98
+ if ((errno = posix_memalign(&B0, 64, 128 * r * p)) != 0)
99
+ goto err0;
100
+ B = (uint8_t *)(B0);
101
+ if ((errno = posix_memalign(&XY0, 64, 256 * r + 64)) != 0)
102
+ goto err1;
103
+ XY = (uint32_t *)(XY0);
104
+ #if !defined(MAP_ANON) || !defined(HAVE_MMAP)
105
+ if ((errno = posix_memalign(&V0, 64, 128 * r * N)) != 0)
106
+ goto err2;
107
+ V = (uint32_t *)(V0);
108
+ #endif
109
+ #else
110
+ if ((B0 = malloc(128 * r * p + 63)) == NULL)
111
+ goto err0;
112
+ B = (uint8_t *)(((uintptr_t)(B0) + 63) & ~ (uintptr_t)(63));
113
+ if ((XY0 = malloc(256 * r + 64 + 63)) == NULL)
114
+ goto err1;
115
+ XY = (uint32_t *)(((uintptr_t)(XY0) + 63) & ~ (uintptr_t)(63));
116
+ #if !defined(MAP_ANON) || !defined(HAVE_MMAP)
117
+ if ((V0 = malloc(128 * r * N + 63)) == NULL)
118
+ goto err2;
119
+ V = (uint32_t *)(((uintptr_t)(V0) + 63) & ~ (uintptr_t)(63));
120
+ #endif
121
+ #endif
122
+ #if defined(MAP_ANON) && defined(HAVE_MMAP)
123
+ if ((V0 = mmap(NULL, 128 * r * N, PROT_READ | PROT_WRITE,
124
+ #ifdef MAP_NOCORE
125
+ MAP_ANON | MAP_PRIVATE | MAP_NOCORE,
126
+ #else
127
+ MAP_ANON | MAP_PRIVATE,
128
+ #endif
129
+ -1, 0)) == MAP_FAILED)
130
+ goto err2;
131
+ V = (uint32_t *)(V0);
132
+ #endif
133
+
134
+ /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
135
+ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, p * 128 * r);
136
+
137
+ /* 2: for i = 0 to p - 1 do */
138
+ for (i = 0; i < p; i++) {
139
+ /* 3: B_i <-- MF(B_i, N) */
140
+ (smix)(&B[i * 128 * r], r, N, V, XY);
141
+ }
142
+
143
+ /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
144
+ PBKDF2_SHA256(passwd, passwdlen, B, p * 128 * r, 1, buf, buflen);
145
+
146
+ /* Free memory. */
147
+ #if defined(MAP_ANON) && defined(HAVE_MMAP)
148
+ if (munmap(V0, 128 * r * N))
149
+ goto err2;
150
+ #else
151
+ free(V0);
152
+ #endif
153
+ free(XY0);
154
+ free(B0);
155
+
156
+ /* Success! */
157
+ return (0);
158
+
159
+ err2:
160
+ free(XY0);
161
+ err1:
162
+ free(B0);
163
+ err0:
164
+ /* Failure! */
165
+ return (-1);
166
+ }
167
+
168
+ #define TESTLEN 64
169
+ static struct scrypt_test {
170
+ const char * passwd;
171
+ const char * salt;
172
+ uint64_t N;
173
+ uint32_t r;
174
+ uint32_t p;
175
+ uint8_t result[TESTLEN];
176
+ } testcase = {
177
+ .passwd = "pleaseletmein",
178
+ .salt = "SodiumChloride",
179
+ .N = 16,
180
+ .r = 8,
181
+ .p = 1,
182
+ .result = {
183
+ 0x25, 0xa9, 0xfa, 0x20, 0x7f, 0x87, 0xca, 0x09,
184
+ 0xa4, 0xef, 0x8b, 0x9f, 0x77, 0x7a, 0xca, 0x16,
185
+ 0xbe, 0xb7, 0x84, 0xae, 0x18, 0x30, 0xbf, 0xbf,
186
+ 0xd3, 0x83, 0x25, 0xaa, 0xbb, 0x93, 0x77, 0xdf,
187
+ 0x1b, 0xa7, 0x84, 0xd7, 0x46, 0xea, 0x27, 0x3b,
188
+ 0xf5, 0x16, 0xa4, 0x6f, 0xbf, 0xac, 0xf5, 0x11,
189
+ 0xc5, 0xbe, 0xba, 0x4c, 0x4a, 0xb3, 0xac, 0xc7,
190
+ 0xfa, 0x6f, 0x46, 0x0b, 0x6c, 0x0f, 0x47, 0x7b,
191
+ }
192
+ };
193
+
194
+ static int
195
+ testsmix(void (*smix)(uint8_t *, size_t, uint64_t, void *, void *))
196
+ {
197
+ uint8_t hbuf[TESTLEN];
198
+
199
+ /* Perform the computation. */
200
+ if (_crypto_scrypt(
201
+ (const uint8_t *)testcase.passwd, strlen(testcase.passwd),
202
+ (const uint8_t *)testcase.salt, strlen(testcase.salt),
203
+ testcase.N, testcase.r, testcase.p, hbuf, TESTLEN, smix))
204
+ return (-1);
205
+
206
+ /* Does it match? */
207
+ return (memcmp(testcase.result, hbuf, TESTLEN));
208
+ }
209
+
210
+ static void
211
+ selectsmix(void)
212
+ {
213
+
214
+ #ifdef CPUSUPPORT_X86_SSE2
215
+ /* If we're running on an SSE2-capable CPU, try that code. */
216
+ if (cpusupport_x86_sse2()) {
217
+ /* If SSE2ized smix works, use it. */
218
+ if (!testsmix(crypto_scrypt_smix_sse2)) {
219
+ smix_func = crypto_scrypt_smix_sse2;
220
+ return;
221
+ }
222
+ warn0("Disabling broken SSE2 scrypt support - please report bug!");
223
+ }
224
+ #endif
225
+
226
+ /* If generic smix works, use it. */
227
+ if (!testsmix(crypto_scrypt_smix)) {
228
+ smix_func = crypto_scrypt_smix;
229
+ return;
230
+ }
231
+ warn0("Generic scrypt code is broken - please report bug!");
232
+
233
+ /* If we get here, something really bad happened. */
234
+ abort();
235
+ }
236
+
237
+ /**
238
+ * crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
239
+ * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
240
+ * p, buflen) and write the result into buf. The parameters r, p, and buflen
241
+ * must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N
242
+ * must be a power of 2 greater than 1.
243
+ *
244
+ * Return 0 on success; or -1 on error.
245
+ */
246
+ int
247
+ crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
248
+ const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t _r, uint32_t _p,
249
+ uint8_t * buf, size_t buflen)
250
+ {
251
+
252
+ if (smix_func == NULL)
253
+ selectsmix();
254
+
255
+ return (_crypto_scrypt(passwd, passwdlen, salt, saltlen, N, _r, _p,
256
+ buf, buflen, smix_func));
257
+ }
@@ -30,6 +30,7 @@
30
30
  #define _CRYPTO_SCRYPT_H_
31
31
 
32
32
  #include <stdint.h>
33
+ #include <unistd.h>
33
34
 
34
35
  /**
35
36
  * crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
File without changes
@@ -0,0 +1,214 @@
1
+ /*-
2
+ * Copyright 2009 Colin Percival
3
+ * All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions
7
+ * are met:
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ * 2. Redistributions in binary form must reproduce the above copyright
11
+ * notice, this list of conditions and the following disclaimer in the
12
+ * documentation and/or other materials provided with the distribution.
13
+ *
14
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
+ * SUCH DAMAGE.
25
+ *
26
+ * This file was originally written by Colin Percival as part of the Tarsnap
27
+ * online backup system.
28
+ */
29
+ #include <stdint.h>
30
+ #include <string.h>
31
+
32
+ #include "sysendian.h"
33
+
34
+ #include "crypto_scrypt_smix.h"
35
+
36
+ static void blkcpy(void *, const void *, size_t);
37
+ static void blkxor(void *, const void *, size_t);
38
+ static void salsa20_8(uint32_t[16]);
39
+ static void blockmix_salsa8(const uint32_t *, uint32_t *, uint32_t *, size_t);
40
+ static uint64_t integerify(const void *, size_t);
41
+
42
+ static void
43
+ blkcpy(void * dest, const void * src, size_t len)
44
+ {
45
+ size_t * D = dest;
46
+ const size_t * S = src;
47
+ size_t L = len / sizeof(size_t);
48
+ size_t i;
49
+
50
+ for (i = 0; i < L; i++)
51
+ D[i] = S[i];
52
+ }
53
+
54
+ static void
55
+ blkxor(void * dest, const void * src, size_t len)
56
+ {
57
+ size_t * D = dest;
58
+ const size_t * S = src;
59
+ size_t L = len / sizeof(size_t);
60
+ size_t i;
61
+
62
+ for (i = 0; i < L; i++)
63
+ D[i] ^= S[i];
64
+ }
65
+
66
+ /**
67
+ * salsa20_8(B):
68
+ * Apply the salsa20/8 core to the provided block.
69
+ */
70
+ static void
71
+ salsa20_8(uint32_t B[16])
72
+ {
73
+ uint32_t x[16];
74
+ size_t i;
75
+
76
+ blkcpy(x, B, 64);
77
+ for (i = 0; i < 8; i += 2) {
78
+ #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
79
+ /* Operate on columns. */
80
+ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9);
81
+ x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18);
82
+
83
+ x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9);
84
+ x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18);
85
+
86
+ x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9);
87
+ x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18);
88
+
89
+ x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9);
90
+ x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18);
91
+
92
+ /* Operate on rows. */
93
+ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9);
94
+ x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18);
95
+
96
+ x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9);
97
+ x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18);
98
+
99
+ x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9);
100
+ x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18);
101
+
102
+ x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9);
103
+ x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18);
104
+ #undef R
105
+ }
106
+ for (i = 0; i < 16; i++)
107
+ B[i] += x[i];
108
+ }
109
+
110
+ /**
111
+ * blockmix_salsa8(Bin, Bout, X, r):
112
+ * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
113
+ * bytes in length; the output Bout must also be the same size. The
114
+ * temporary space X must be 64 bytes.
115
+ */
116
+ static void
117
+ blockmix_salsa8(const uint32_t * Bin, uint32_t * Bout, uint32_t * X, size_t r)
118
+ {
119
+ size_t i;
120
+
121
+ /* 1: X <-- B_{2r - 1} */
122
+ blkcpy(X, &Bin[(2 * r - 1) * 16], 64);
123
+
124
+ /* 2: for i = 0 to 2r - 1 do */
125
+ for (i = 0; i < 2 * r; i += 2) {
126
+ /* 3: X <-- H(X \xor B_i) */
127
+ blkxor(X, &Bin[i * 16], 64);
128
+ salsa20_8(X);
129
+
130
+ /* 4: Y_i <-- X */
131
+ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
132
+ blkcpy(&Bout[i * 8], X, 64);
133
+
134
+ /* 3: X <-- H(X \xor B_i) */
135
+ blkxor(X, &Bin[i * 16 + 16], 64);
136
+ salsa20_8(X);
137
+
138
+ /* 4: Y_i <-- X */
139
+ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
140
+ blkcpy(&Bout[i * 8 + r * 16], X, 64);
141
+ }
142
+ }
143
+
144
+ /**
145
+ * integerify(B, r):
146
+ * Return the result of parsing B_{2r-1} as a little-endian integer.
147
+ */
148
+ static uint64_t
149
+ integerify(const void * B, size_t r)
150
+ {
151
+ const uint32_t * X = (const void *)((uintptr_t)(B) + (2 * r - 1) * 64);
152
+
153
+ return (((uint64_t)(X[1]) << 32) + X[0]);
154
+ }
155
+
156
+ /**
157
+ * crypto_scrypt_smix(B, r, N, V, XY):
158
+ * Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
159
+ * the temporary storage V must be 128rN bytes in length; the temporary
160
+ * storage XY must be 256r + 64 bytes in length. The value N must be a
161
+ * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
162
+ * multiple of 64 bytes.
163
+ */
164
+ void
165
+ crypto_scrypt_smix(uint8_t * B, size_t r, uint64_t N, void * _V, void * XY)
166
+ {
167
+ uint32_t * X = XY;
168
+ uint32_t * Y = (void *)((uint8_t *)(XY) + 128 * r);
169
+ uint32_t * Z = (void *)((uint8_t *)(XY) + 256 * r);
170
+ uint32_t * V = _V;
171
+ uint64_t i;
172
+ uint64_t j;
173
+ size_t k;
174
+
175
+ /* 1: X <-- B */
176
+ for (k = 0; k < 32 * r; k++)
177
+ X[k] = le32dec(&B[4 * k]);
178
+
179
+ /* 2: for i = 0 to N - 1 do */
180
+ for (i = 0; i < N; i += 2) {
181
+ /* 3: V_i <-- X */
182
+ blkcpy(&V[i * (32 * r)], X, 128 * r);
183
+
184
+ /* 4: X <-- H(X) */
185
+ blockmix_salsa8(X, Y, Z, r);
186
+
187
+ /* 3: V_i <-- X */
188
+ blkcpy(&V[(i + 1) * (32 * r)], Y, 128 * r);
189
+
190
+ /* 4: X <-- H(X) */
191
+ blockmix_salsa8(Y, X, Z, r);
192
+ }
193
+
194
+ /* 6: for i = 0 to N - 1 do */
195
+ for (i = 0; i < N; i += 2) {
196
+ /* 7: j <-- Integerify(X) mod N */
197
+ j = integerify(X, r) & (N - 1);
198
+
199
+ /* 8: X <-- H(X \xor V_j) */
200
+ blkxor(X, &V[j * (32 * r)], 128 * r);
201
+ blockmix_salsa8(X, Y, Z, r);
202
+
203
+ /* 7: j <-- Integerify(X) mod N */
204
+ j = integerify(Y, r) & (N - 1);
205
+
206
+ /* 8: X <-- H(X \xor V_j) */
207
+ blkxor(Y, &V[j * (32 * r)], 128 * r);
208
+ blockmix_salsa8(Y, X, Z, r);
209
+ }
210
+
211
+ /* 10: B' <-- X */
212
+ for (k = 0; k < 32 * r; k++)
213
+ le32enc(&B[4 * k], X[k]);
214
+ }
@@ -0,0 +1,14 @@
1
+ #ifndef _CRYPTO_SCRYPT_SMIX_H_
2
+ #define _CRYPTO_SCRYPT_SMIX_H_
3
+
4
+ /**
5
+ * crypto_scrypt_smix(B, r, N, V, XY):
6
+ * Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
7
+ * the temporary storage V must be 128rN bytes in length; the temporary
8
+ * storage XY must be 256r + 64 bytes in length. The value N must be a
9
+ * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
10
+ * multiple of 64 bytes.
11
+ */
12
+ void crypto_scrypt_smix(uint8_t *, size_t, uint64_t, void *, void *);
13
+
14
+ #endif /* !_CRYPTO_SCRYPT_SMIX_H_ */