scrypt 2.1.1 → 3.0.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 97e4aa9bd943a970d5809dec76392d57b80b86ab
4
- data.tar.gz: d4ed6d6695db090fa341ffaae1f050d0d0ff6301
3
+ metadata.gz: cfb68eca4a9da928746e910f6102bfcc5aa6930e
4
+ data.tar.gz: a8a5b75ac77e99bdc4600f5d33006855ee1abd8b
5
5
  SHA512:
6
- metadata.gz: e73b9d972111d155c7b75e8f84317a8dfa6995f6103be3c03e8c430feead8694c41cc96dfbc12ce5234e34916c7f7261abd5658c7428eca0236d68b3feb0bfab
7
- data.tar.gz: fd8b8d3934186ae12ed492e14e66b078e5c3e56c44960e15cb5c197f76fc909671bbbc2c69e826f62c3b887bee6ecfa958f3fe8a8fb6662b52ffd3f1380e1796
6
+ metadata.gz: e8bf255e9a3ade2a4b6386d0cda4cf25d6026cef9aaf554634a22c3e1b04bdfcd7320df33f6a50c6660363123e43cfff579ce7b82220757a0ed55010e9f83af7
7
+ data.tar.gz: 4a6d07b2914c43fe9f98f7c8ade8dda6296f4e62e517f15c21ca175046df1cef451de373ed8fbef042d35ffed013606c3b9315ab3c0365cdcb2651311f2e696f
checksums.yaml.gz.sig CHANGED
@@ -1,2 +1,6 @@
1
- �ԸR؎��R�(id{��dj���J�*3ylZw�~g��N�$�p(G�ƪ����o7��� ���vs5�X�Q��0P�$� �\E���p�!h�ʹ
2
- �''�r���!4��eE��|��K`��t������X��H�J:�����it:1���i��w�kP2�#����[vc��6�,9�>lcQ����2g�/� ݰUP����׊�9��.�Qb�,�2Y.R�h��]���sC6f�e�����u7�h�h�Ϣ��
1
+
2
+ vS��]Fc(ì\�
3
+ �T��X�>�L3�T�<�=<�!�pأ[Q*`�͠3ѵ`�<e[G#$��ͲP
4
+ ��[M�H���m��q
5
+ �e��p�~*�C{����Iܭ�)H��
6
+ ���>4��b���Rz3B���2!��S�ԧO�؇�ӻ��ۡ6�9~��3��������&PwFz������%�ާ�Ewp��0�D'(�8b���E�p� 8_k]YK�:}
data.tar.gz.sig CHANGED
@@ -1,2 +1 @@
1
- ��8���kv���.4��J��/T}qȖ����S�����tKO3�Q\�t�XSg��"[�)l�-��v�vi��N�7���8�������t�G����̿7
2
- (�a�S~E2IJ�7� ����y��4
1
+ \��=��*1
data/Rakefile CHANGED
@@ -24,7 +24,8 @@ end
24
24
  desc "FFI compiler"
25
25
  namespace "ffi-compiler" do
26
26
  FFI::Compiler::CompileTask.new('ext/scrypt/scrypt_ext') do |t|
27
- t.cflags << "-Wall -msse -msse2"
27
+ t.cflags << "-Wall -std=c99"
28
+ t.cflags << "-msse -msse2" if t.platform.arch.include? "86"
28
29
  t.cflags << "-D_GNU_SOURCE=1" if RbConfig::CONFIG["host_os"].downcase =~ /mingw/
29
30
  t.cflags << "-arch x86_64 -arch i386" if t.platform.mac?
30
31
  t.ldflags << "-arch x86_64 -arch i386" if t.platform.mac?
data/ext/scrypt/Rakefile CHANGED
@@ -1,7 +1,8 @@
1
1
  require 'ffi-compiler/compile_task'
2
2
 
3
3
  FFI::Compiler::CompileTask.new('scrypt_ext') do |t|
4
- t.cflags << "-Wall -msse -msse2"
4
+ t.cflags << "-Wall -std=c99"
5
+ t.cflags << "-msse -msse2" if t.platform.arch.include? "86"
5
6
  t.cflags << "-D_GNU_SOURCE=1" if RbConfig::CONFIG["host_os"].downcase =~ /mingw/
6
7
  t.cflags << "-arch x86_64 -arch i386" if t.platform.mac?
7
8
  t.ldflags << "-arch x86_64 -arch i386" if t.platform.mac?
@@ -0,0 +1,105 @@
1
+ #ifndef _CPUSUPPORT_H_
2
+ #define _CPUSUPPORT_H_
3
+
4
+ /*
5
+ * To enable support for non-portable CPU features at compile time, one or
6
+ * more CPUSUPPORT_ARCH_FEATURE macros should be defined. This can be done
7
+ * directly on the compiler command line via -D CPUSUPPORT_ARCH_FEATURE or
8
+ * -D CPUSUPPORT_ARCH_FEATURE=1; or a file can be created with the
9
+ * necessary #define lines and then -D CPUSUPPORT_CONFIG_FILE=cpuconfig.h
10
+ * (or similar) can be provided to include that file here.
11
+ */
12
+ #ifdef CPUSUPPORT_CONFIG_FILE
13
+ #include CPUSUPPORT_CONFIG_FILE
14
+ #endif
15
+
16
+ /**
17
+ * The CPUSUPPORT_FEATURE macro declares the necessary variables and
18
+ * functions for detecting CPU feature support at run time. The function
19
+ * defined in the macro acts to cache the result of the ..._detect function
20
+ * using the ..._present and ..._init variables. The _detect function and the
21
+ * _present and _init variables are turn defined by CPUSUPPORT_FEATURE_DECL in
22
+ * appropriate cpusupport_foo_bar.c file.
23
+ *
24
+ * In order to allow CPUSUPPORT_FEATURE to be used for features which do not
25
+ * have corresponding CPUSUPPORT_FEATURE_DECL blocks in another source file,
26
+ * we abuse the C preprocessor: If CPUSUPPORT_${enabler} is defined to 1, then
27
+ * we access _present_1, _init_1, and _detect_1; but if it is not defined, we
28
+ * access _present_CPUSUPPORT_${enabler} etc., which we define as static, thus
29
+ * preventing the compiler from emitting a reference to an external symbol.
30
+ *
31
+ * In this way, it becomes possible to issue CPUSUPPORT_FEATURE invocations
32
+ * for nonexistent features without running afoul of the requirement that
33
+ * "If an identifier declared with external linkage is used... in the entire
34
+ * program there shall be exactly one external definition" (C99 standard, 6.9
35
+ * paragraph 5). In practice, this means that users of the cpusupport code
36
+ * can omit build and runtime detection files without changing the framework
37
+ * code.
38
+ */
39
+ #define CPUSUPPORT_FEATURE__(arch_feature, enabler, enabled) \
40
+ static int cpusupport_ ## arch_feature ## _present ## _CPUSUPPORT_ ## enabler; \
41
+ static int cpusupport_ ## arch_feature ## _init ## _CPUSUPPORT_ ## enabler; \
42
+ static inline int cpusupport_ ## arch_feature ## _detect ## _CPUSUPPORT_ ## enabler(void) { return (0); } \
43
+ extern int cpusupport_ ## arch_feature ## _present_ ## enabled; \
44
+ extern int cpusupport_ ## arch_feature ## _init_ ## enabled; \
45
+ int cpusupport_ ## arch_feature ## _detect_ ## enabled(void); \
46
+ \
47
+ static inline int \
48
+ cpusupport_ ## arch_feature(void) \
49
+ { \
50
+ \
51
+ if (cpusupport_ ## arch_feature ## _present_ ## enabled) \
52
+ return (1); \
53
+ else if (cpusupport_ ## arch_feature ## _init_ ## enabled) \
54
+ return (0); \
55
+ cpusupport_ ## arch_feature ## _present_ ## enabled = \
56
+ cpusupport_ ## arch_feature ## _detect_ ## enabled(); \
57
+ cpusupport_ ## arch_feature ## _init_ ## enabled = 1; \
58
+ return (cpusupport_ ## arch_feature ## _present_ ## enabled); \
59
+ } \
60
+ static void (* cpusupport_ ## arch_feature ## _dummyptr)(void); \
61
+ static inline void \
62
+ cpusupport_ ## arch_feature ## _dummyfunc(void) \
63
+ { \
64
+ \
65
+ (void)cpusupport_ ## arch_feature ## _present ## _CPUSUPPORT_ ## enabler; \
66
+ (void)cpusupport_ ## arch_feature ## _init ## _CPUSUPPORT_ ## enabler; \
67
+ (void)cpusupport_ ## arch_feature ## _detect ## _CPUSUPPORT_ ## enabler; \
68
+ (void)cpusupport_ ## arch_feature ## _present_ ## enabled; \
69
+ (void)cpusupport_ ## arch_feature ## _init_ ## enabled; \
70
+ (void)cpusupport_ ## arch_feature ## _detect_ ## enabled; \
71
+ (void)cpusupport_ ## arch_feature ## _dummyptr; \
72
+ } \
73
+ static void (* cpusupport_ ## arch_feature ## _dummyptr)(void) = cpusupport_ ## arch_feature ## _dummyfunc; \
74
+ struct cpusupport_ ## arch_feature ## _dummy
75
+ #define CPUSUPPORT_FEATURE_(arch_feature, enabler, enabled) \
76
+ CPUSUPPORT_FEATURE__(arch_feature, enabler, enabled)
77
+ #define CPUSUPPORT_FEATURE(arch, feature, enabler) \
78
+ CPUSUPPORT_FEATURE_(arch ## _ ## feature, enabler, CPUSUPPORT_ ## enabler)
79
+
80
+ /*
81
+ * CPUSUPPORT_FEATURE_DECL(arch, feature):
82
+ * Macro which defines variables and provides a function declaration for
83
+ * detecting the presence of "feature" on the "arch" architecture. The
84
+ * function body following this macro expansion must return nonzero if the
85
+ * feature is present, or zero if the feature is not present or the detection
86
+ * fails for any reason.
87
+ */
88
+ #define CPUSUPPORT_FEATURE_DECL(arch, feature) \
89
+ int cpusupport_ ## arch ## _ ## feature ## _present_1 = 0; \
90
+ int cpusupport_ ## arch ## _ ## feature ## _init_1 = 0; \
91
+ int \
92
+ cpusupport_ ## arch ## _ ## feature ## _detect_1(void)
93
+
94
+ /*
95
+ * List of features. If a feature here is not enabled by the appropriate
96
+ * CPUSUPPORT_ARCH_FEATURE macro being defined, it has no effect; but if the
97
+ * relevant macro may be defined (e.g., by Build/cpusupport.sh successfully
98
+ * compiling Build/cpusupport-ARCH-FEATURE.c) then the C file containing the
99
+ * corresponding run-time detection code (cpusupport_arch_feature.c) must be
100
+ * compiled and linked in.
101
+ */
102
+ CPUSUPPORT_FEATURE(x86, aesni, X86_AESNI);
103
+ CPUSUPPORT_FEATURE(x86, sse2, X86_SSE2);
104
+
105
+ #endif /* !_CPUSUPPORT_H_ */
@@ -0,0 +1,253 @@
1
+ /*-
2
+ * Copyright 2009 Colin Percival
3
+ * All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions
7
+ * are met:
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ * 2. Redistributions in binary form must reproduce the above copyright
11
+ * notice, this list of conditions and the following disclaimer in the
12
+ * documentation and/or other materials provided with the distribution.
13
+ *
14
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
+ * SUCH DAMAGE.
25
+ *
26
+ * This file was originally written by Colin Percival as part of the Tarsnap
27
+ * online backup system.
28
+ */
29
+ /* #include "bsdtar_platform.h" */
30
+
31
+ #include <sys/types.h>
32
+ #include <sys/mman.h>
33
+
34
+ #include <errno.h>
35
+ #include <stdint.h>
36
+ #include <stdlib.h>
37
+ #include <string.h>
38
+
39
+ #include "cpusupport.h"
40
+ #include "sha256.h"
41
+ //#include "warnp.h"
42
+
43
+ #include "crypto_scrypt_smix.h"
44
+ #include "crypto_scrypt_smix_sse2.h"
45
+
46
+ #include "crypto_scrypt.h"
47
+ #include "warnp.h"
48
+
49
+ static void (*smix_func)(uint8_t *, size_t, uint64_t, void *, void *) = NULL;
50
+
51
+ /**
52
+ * _crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen, smix):
53
+ * Perform the requested scrypt computation, using ${smix} as the smix routine.
54
+ */
55
+ static int
56
+ _crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
57
+ const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t _r, uint32_t _p,
58
+ uint8_t * buf, size_t buflen,
59
+ void (*smix)(uint8_t *, size_t, uint64_t, void *, void *))
60
+ {
61
+ void * B0, * V0, * XY0;
62
+ uint8_t * B;
63
+ uint32_t * V;
64
+ uint32_t * XY;
65
+ size_t r = _r, p = _p;
66
+ uint32_t i;
67
+
68
+ /* Sanity-check parameters. */
69
+ #if SIZE_MAX > UINT32_MAX
70
+ if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
71
+ errno = EFBIG;
72
+ goto err0;
73
+ }
74
+ #endif
75
+ if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
76
+ errno = EFBIG;
77
+ goto err0;
78
+ }
79
+ if (((N & (N - 1)) != 0) || (N < 2)) {
80
+ errno = EINVAL;
81
+ goto err0;
82
+ }
83
+ if ((r > SIZE_MAX / 128 / p) ||
84
+ #if SIZE_MAX / 256 <= UINT32_MAX
85
+ (r > (SIZE_MAX - 64) / 256) ||
86
+ #endif
87
+ (N > SIZE_MAX / 128 / r)) {
88
+ errno = ENOMEM;
89
+ goto err0;
90
+ }
91
+
92
+ /* Allocate memory. */
93
+ #ifdef HAVE_POSIX_MEMALIGN
94
+ if ((errno = posix_memalign(&B0, 64, 128 * r * p)) != 0)
95
+ goto err0;
96
+ B = (uint8_t *)(B0);
97
+ if ((errno = posix_memalign(&XY0, 64, 256 * r + 64)) != 0)
98
+ goto err1;
99
+ XY = (uint32_t *)(XY0);
100
+ #if !defined(MAP_ANON) || !defined(HAVE_MMAP)
101
+ if ((errno = posix_memalign(&V0, 64, 128 * r * N)) != 0)
102
+ goto err2;
103
+ V = (uint32_t *)(V0);
104
+ #endif
105
+ #else
106
+ if ((B0 = malloc(128 * r * p + 63)) == NULL)
107
+ goto err0;
108
+ B = (uint8_t *)(((uintptr_t)(B0) + 63) & ~ (uintptr_t)(63));
109
+ if ((XY0 = malloc(256 * r + 64 + 63)) == NULL)
110
+ goto err1;
111
+ XY = (uint32_t *)(((uintptr_t)(XY0) + 63) & ~ (uintptr_t)(63));
112
+ #if !defined(MAP_ANON) || !defined(HAVE_MMAP)
113
+ if ((V0 = malloc(128 * r * N + 63)) == NULL)
114
+ goto err2;
115
+ V = (uint32_t *)(((uintptr_t)(V0) + 63) & ~ (uintptr_t)(63));
116
+ #endif
117
+ #endif
118
+ #if defined(MAP_ANON) && defined(HAVE_MMAP)
119
+ if ((V0 = mmap(NULL, 128 * r * N, PROT_READ | PROT_WRITE,
120
+ #ifdef MAP_NOCORE
121
+ MAP_ANON | MAP_PRIVATE | MAP_NOCORE,
122
+ #else
123
+ MAP_ANON | MAP_PRIVATE,
124
+ #endif
125
+ -1, 0)) == MAP_FAILED)
126
+ goto err2;
127
+ V = (uint32_t *)(V0);
128
+ #endif
129
+
130
+ /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
131
+ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, p * 128 * r);
132
+
133
+ /* 2: for i = 0 to p - 1 do */
134
+ for (i = 0; i < p; i++) {
135
+ /* 3: B_i <-- MF(B_i, N) */
136
+ (smix)(&B[i * 128 * r], r, N, V, XY);
137
+ }
138
+
139
+ /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
140
+ PBKDF2_SHA256(passwd, passwdlen, B, p * 128 * r, 1, buf, buflen);
141
+
142
+ /* Free memory. */
143
+ #if defined(MAP_ANON) && defined(HAVE_MMAP)
144
+ if (munmap(V0, 128 * r * N))
145
+ goto err2;
146
+ #else
147
+ free(V0);
148
+ #endif
149
+ free(XY0);
150
+ free(B0);
151
+
152
+ /* Success! */
153
+ return (0);
154
+
155
+ err2:
156
+ free(XY0);
157
+ err1:
158
+ free(B0);
159
+ err0:
160
+ /* Failure! */
161
+ return (-1);
162
+ }
163
+
164
+ #define TESTLEN 64
165
+ static struct scrypt_test {
166
+ const char * passwd;
167
+ const char * salt;
168
+ uint64_t N;
169
+ uint32_t r;
170
+ uint32_t p;
171
+ uint8_t result[TESTLEN];
172
+ } testcase = {
173
+ .passwd = "pleaseletmein",
174
+ .salt = "SodiumChloride",
175
+ .N = 16,
176
+ .r = 8,
177
+ .p = 1,
178
+ .result = {
179
+ 0x25, 0xa9, 0xfa, 0x20, 0x7f, 0x87, 0xca, 0x09,
180
+ 0xa4, 0xef, 0x8b, 0x9f, 0x77, 0x7a, 0xca, 0x16,
181
+ 0xbe, 0xb7, 0x84, 0xae, 0x18, 0x30, 0xbf, 0xbf,
182
+ 0xd3, 0x83, 0x25, 0xaa, 0xbb, 0x93, 0x77, 0xdf,
183
+ 0x1b, 0xa7, 0x84, 0xd7, 0x46, 0xea, 0x27, 0x3b,
184
+ 0xf5, 0x16, 0xa4, 0x6f, 0xbf, 0xac, 0xf5, 0x11,
185
+ 0xc5, 0xbe, 0xba, 0x4c, 0x4a, 0xb3, 0xac, 0xc7,
186
+ 0xfa, 0x6f, 0x46, 0x0b, 0x6c, 0x0f, 0x47, 0x7b,
187
+ }
188
+ };
189
+
190
+ static int
191
+ testsmix(void (*smix)(uint8_t *, size_t, uint64_t, void *, void *))
192
+ {
193
+ uint8_t hbuf[TESTLEN];
194
+
195
+ /* Perform the computation. */
196
+ if (_crypto_scrypt(
197
+ (const uint8_t *)testcase.passwd, strlen(testcase.passwd),
198
+ (const uint8_t *)testcase.salt, strlen(testcase.salt),
199
+ testcase.N, testcase.r, testcase.p, hbuf, TESTLEN, smix))
200
+ return (-1);
201
+
202
+ /* Does it match? */
203
+ return (memcmp(testcase.result, hbuf, TESTLEN));
204
+ }
205
+
206
+ static void
207
+ selectsmix(void)
208
+ {
209
+
210
+ #ifdef CPUSUPPORT_X86_SSE2
211
+ /* If we're running on an SSE2-capable CPU, try that code. */
212
+ if (cpusupport_x86_sse2()) {
213
+ /* If SSE2ized smix works, use it. */
214
+ if (!testsmix(crypto_scrypt_smix_sse2)) {
215
+ smix_func = crypto_scrypt_smix_sse2;
216
+ return;
217
+ }
218
+ warn0("Disabling broken SSE2 scrypt support - please report bug!");
219
+ }
220
+ #endif
221
+
222
+ /* If generic smix works, use it. */
223
+ if (!testsmix(crypto_scrypt_smix)) {
224
+ smix_func = crypto_scrypt_smix;
225
+ return;
226
+ }
227
+ warn0("Generic scrypt code is broken - please report bug!");
228
+
229
+ /* If we get here, something really bad happened. */
230
+ abort();
231
+ }
232
+
233
+ /**
234
+ * crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
235
+ * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
236
+ * p, buflen) and write the result into buf. The parameters r, p, and buflen
237
+ * must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N
238
+ * must be a power of 2 greater than 1.
239
+ *
240
+ * Return 0 on success; or -1 on error.
241
+ */
242
+ int
243
+ crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
244
+ const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t _r, uint32_t _p,
245
+ uint8_t * buf, size_t buflen)
246
+ {
247
+
248
+ if (smix_func == NULL)
249
+ selectsmix();
250
+
251
+ return (_crypto_scrypt(passwd, passwdlen, salt, saltlen, N, _r, _p,
252
+ buf, buflen, smix_func));
253
+ }
@@ -30,6 +30,7 @@
30
30
  #define _CRYPTO_SCRYPT_H_
31
31
 
32
32
  #include <stdint.h>
33
+ #include <unistd.h>
33
34
 
34
35
  /**
35
36
  * crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
File without changes
@@ -0,0 +1,214 @@
1
+ /*-
2
+ * Copyright 2009 Colin Percival
3
+ * All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions
7
+ * are met:
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ * 2. Redistributions in binary form must reproduce the above copyright
11
+ * notice, this list of conditions and the following disclaimer in the
12
+ * documentation and/or other materials provided with the distribution.
13
+ *
14
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
+ * SUCH DAMAGE.
25
+ *
26
+ * This file was originally written by Colin Percival as part of the Tarsnap
27
+ * online backup system.
28
+ */
29
+ #include <stdint.h>
30
+ #include <string.h>
31
+
32
+ #include "sysendian.h"
33
+
34
+ #include "crypto_scrypt_smix.h"
35
+
36
+ static void blkcpy(void *, const void *, size_t);
37
+ static void blkxor(void *, const void *, size_t);
38
+ static void salsa20_8(uint32_t[16]);
39
+ static void blockmix_salsa8(const uint32_t *, uint32_t *, uint32_t *, size_t);
40
+ static uint64_t integerify(const void *, size_t);
41
+
42
+ static void
43
+ blkcpy(void * dest, const void * src, size_t len)
44
+ {
45
+ size_t * D = dest;
46
+ const size_t * S = src;
47
+ size_t L = len / sizeof(size_t);
48
+ size_t i;
49
+
50
+ for (i = 0; i < L; i++)
51
+ D[i] = S[i];
52
+ }
53
+
54
+ static void
55
+ blkxor(void * dest, const void * src, size_t len)
56
+ {
57
+ size_t * D = dest;
58
+ const size_t * S = src;
59
+ size_t L = len / sizeof(size_t);
60
+ size_t i;
61
+
62
+ for (i = 0; i < L; i++)
63
+ D[i] ^= S[i];
64
+ }
65
+
66
+ /**
67
+ * salsa20_8(B):
68
+ * Apply the salsa20/8 core to the provided block.
69
+ */
70
+ static void
71
+ salsa20_8(uint32_t B[16])
72
+ {
73
+ uint32_t x[16];
74
+ size_t i;
75
+
76
+ blkcpy(x, B, 64);
77
+ for (i = 0; i < 8; i += 2) {
78
+ #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
79
+ /* Operate on columns. */
80
+ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9);
81
+ x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18);
82
+
83
+ x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9);
84
+ x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18);
85
+
86
+ x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9);
87
+ x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18);
88
+
89
+ x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9);
90
+ x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18);
91
+
92
+ /* Operate on rows. */
93
+ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9);
94
+ x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18);
95
+
96
+ x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9);
97
+ x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18);
98
+
99
+ x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9);
100
+ x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18);
101
+
102
+ x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9);
103
+ x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18);
104
+ #undef R
105
+ }
106
+ for (i = 0; i < 16; i++)
107
+ B[i] += x[i];
108
+ }
109
+
110
+ /**
111
+ * blockmix_salsa8(Bin, Bout, X, r):
112
+ * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
113
+ * bytes in length; the output Bout must also be the same size. The
114
+ * temporary space X must be 64 bytes.
115
+ */
116
+ static void
117
+ blockmix_salsa8(const uint32_t * Bin, uint32_t * Bout, uint32_t * X, size_t r)
118
+ {
119
+ size_t i;
120
+
121
+ /* 1: X <-- B_{2r - 1} */
122
+ blkcpy(X, &Bin[(2 * r - 1) * 16], 64);
123
+
124
+ /* 2: for i = 0 to 2r - 1 do */
125
+ for (i = 0; i < 2 * r; i += 2) {
126
+ /* 3: X <-- H(X \xor B_i) */
127
+ blkxor(X, &Bin[i * 16], 64);
128
+ salsa20_8(X);
129
+
130
+ /* 4: Y_i <-- X */
131
+ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
132
+ blkcpy(&Bout[i * 8], X, 64);
133
+
134
+ /* 3: X <-- H(X \xor B_i) */
135
+ blkxor(X, &Bin[i * 16 + 16], 64);
136
+ salsa20_8(X);
137
+
138
+ /* 4: Y_i <-- X */
139
+ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
140
+ blkcpy(&Bout[i * 8 + r * 16], X, 64);
141
+ }
142
+ }
143
+
144
+ /**
145
+ * integerify(B, r):
146
+ * Return the result of parsing B_{2r-1} as a little-endian integer.
147
+ */
148
+ static uint64_t
149
+ integerify(const void * B, size_t r)
150
+ {
151
+ const uint32_t * X = (const void *)((uintptr_t)(B) + (2 * r - 1) * 64);
152
+
153
+ return (((uint64_t)(X[1]) << 32) + X[0]);
154
+ }
155
+
156
+ /**
157
+ * crypto_scrypt_smix(B, r, N, V, XY):
158
+ * Compute B = SMix_r(B, N). The input B must be 128r bytes in length;
159
+ * the temporary storage V must be 128rN bytes in length; the temporary
160
+ * storage XY must be 256r + 64 bytes in length. The value N must be a
161
+ * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a
162
+ * multiple of 64 bytes.
163
+ */
164
+ void
165
+ crypto_scrypt_smix(uint8_t * B, size_t r, uint64_t N, void * _V, void * XY)
166
+ {
167
+ uint32_t * X = XY;
168
+ uint32_t * Y = (void *)((uint8_t *)(XY) + 128 * r);
169
+ uint32_t * Z = (void *)((uint8_t *)(XY) + 256 * r);
170
+ uint32_t * V = _V;
171
+ uint64_t i;
172
+ uint64_t j;
173
+ size_t k;
174
+
175
+ /* 1: X <-- B */
176
+ for (k = 0; k < 32 * r; k++)
177
+ X[k] = le32dec(&B[4 * k]);
178
+
179
+ /* 2: for i = 0 to N - 1 do */
180
+ for (i = 0; i < N; i += 2) {
181
+ /* 3: V_i <-- X */
182
+ blkcpy(&V[i * (32 * r)], X, 128 * r);
183
+
184
+ /* 4: X <-- H(X) */
185
+ blockmix_salsa8(X, Y, Z, r);
186
+
187
+ /* 3: V_i <-- X */
188
+ blkcpy(&V[(i + 1) * (32 * r)], Y, 128 * r);
189
+
190
+ /* 4: X <-- H(X) */
191
+ blockmix_salsa8(Y, X, Z, r);
192
+ }
193
+
194
+ /* 6: for i = 0 to N - 1 do */
195
+ for (i = 0; i < N; i += 2) {
196
+ /* 7: j <-- Integerify(X) mod N */
197
+ j = integerify(X, r) & (N - 1);
198
+
199
+ /* 8: X <-- H(X \xor V_j) */
200
+ blkxor(X, &V[j * (32 * r)], 128 * r);
201
+ blockmix_salsa8(X, Y, Z, r);
202
+
203
+ /* 7: j <-- Integerify(X) mod N */
204
+ j = integerify(Y, r) & (N - 1);
205
+
206
+ /* 8: X <-- H(X \xor V_j) */
207
+ blkxor(Y, &V[j * (32 * r)], 128 * r);
208
+ blockmix_salsa8(Y, X, Z, r);
209
+ }
210
+
211
+ /* 10: B' <-- X */
212
+ for (k = 0; k < 32 * r; k++)
213
+ le32enc(&B[4 * k], X[k]);
214
+ }