dualcone 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +21 -0
- data/README.md +116 -0
- data/ext/dualcone/dualcone.c +288 -0
- data/ext/dualcone/dualcone.h +49 -0
- data/ext/dualcone/extconf.rb +36 -0
- data/lib/dualcone.rb +4 -0
- data/lib/dualcone/version.rb +5 -0
- data/vendor/libhydrogen/LICENSE +18 -0
- data/vendor/libhydrogen/Makefile +61 -0
- data/vendor/libhydrogen/README.md +36 -0
- data/vendor/libhydrogen/hydrogen.c +18 -0
- data/vendor/libhydrogen/hydrogen.h +331 -0
- data/vendor/libhydrogen/impl/common.h +321 -0
- data/vendor/libhydrogen/impl/core.h +223 -0
- data/vendor/libhydrogen/impl/gimli-core.h +25 -0
- data/vendor/libhydrogen/impl/gimli-core/portable.h +39 -0
- data/vendor/libhydrogen/impl/gimli-core/sse2.h +100 -0
- data/vendor/libhydrogen/impl/hash.h +140 -0
- data/vendor/libhydrogen/impl/hydrogen_p.h +83 -0
- data/vendor/libhydrogen/impl/kdf.h +20 -0
- data/vendor/libhydrogen/impl/kx.h +535 -0
- data/vendor/libhydrogen/impl/pwhash.h +281 -0
- data/vendor/libhydrogen/impl/random.h +465 -0
- data/vendor/libhydrogen/impl/secretbox.h +236 -0
- data/vendor/libhydrogen/impl/sign.h +207 -0
- data/vendor/libhydrogen/impl/x25519.h +384 -0
- metadata +186 -0
@@ -0,0 +1,321 @@
|
|
1
|
+
#include <errno.h>
|
2
|
+
#include <limits.h>
|
3
|
+
#include <stdbool.h>
|
4
|
+
#include <stdint.h>
|
5
|
+
#include <stdlib.h>
|
6
|
+
#include <string.h>
|
7
|
+
|
8
|
+
#if !defined(__unix__) && (defined(__APPLE__) || defined(__linux__))
|
9
|
+
#define __unix__ 1
|
10
|
+
#endif
|
11
|
+
#ifndef __GNUC__
|
12
|
+
#define __restrict__
|
13
|
+
#endif
|
14
|
+
|
15
|
+
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
|
16
|
+
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
17
|
+
#define NATIVE_BIG_ENDIAN
|
18
|
+
#endif
|
19
|
+
#ifndef NATIVE_BIG_ENDIAN
|
20
|
+
#ifndef NATIVE_LITTLE_ENDIAN
|
21
|
+
#define NATIVE_LITTLE_ENDIAN
|
22
|
+
#endif
|
23
|
+
#endif
|
24
|
+
|
25
|
+
#ifndef TLS
|
26
|
+
#if defined(_WIN32) && !defined(__GNUC__)
|
27
|
+
#define TLS __declspec(thread)
|
28
|
+
#elif (defined(__clang__) || defined(__GNUC__)) && defined(__unix__)
|
29
|
+
#define TLS __thread
|
30
|
+
#else
|
31
|
+
#define TLS
|
32
|
+
#endif
|
33
|
+
#endif
|
34
|
+
|
35
|
+
#ifndef SIZE_MAX
|
36
|
+
#define SIZE_MAX ((size_t) -1)
|
37
|
+
#endif
|
38
|
+
|
39
|
+
#ifdef __OpenBSD__
|
40
|
+
#define HAVE_EXPLICIT_BZERO 1
|
41
|
+
#elif defined(__GLIBC__) && defined(__GLIBC_PREREQ) && defined(_GNU_SOURCE)
|
42
|
+
#if __GLIBC_PREREQ(2, 25)
|
43
|
+
#define HAVE_EXPLICIT_BZERO 1
|
44
|
+
#endif
|
45
|
+
#endif
|
46
|
+
|
47
|
+
#define COMPILER_ASSERT(X) (void) sizeof(char[(X) ? 1 : -1])
|
48
|
+
|
49
|
+
#define ROTL32(x, b) (uint32_t)(((x) << (b)) | ((x) >> (32 - (b))))
|
50
|
+
#define ROTL64(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
|
51
|
+
#define ROTR32(x, b) (uint32_t)(((x) >> (b)) | ((x) << (32 - (b))))
|
52
|
+
#define ROTR64(x, b) (uint64_t)(((x) >> (b)) | ((x) << (64 - (b))))
|
53
|
+
|
54
|
+
#define LOAD64_LE(SRC) load64_le(SRC)
|
55
|
+
static inline uint64_t
|
56
|
+
load64_le(const uint8_t src[8])
|
57
|
+
{
|
58
|
+
#ifdef NATIVE_LITTLE_ENDIAN
|
59
|
+
uint64_t w;
|
60
|
+
memcpy(&w, src, sizeof w);
|
61
|
+
return w;
|
62
|
+
#else
|
63
|
+
uint64_t w = (uint64_t) src[0];
|
64
|
+
w |= (uint64_t) src[1] << 8;
|
65
|
+
w |= (uint64_t) src[2] << 16;
|
66
|
+
w |= (uint64_t) src[3] << 24;
|
67
|
+
w |= (uint64_t) src[4] << 32;
|
68
|
+
w |= (uint64_t) src[5] << 40;
|
69
|
+
w |= (uint64_t) src[6] << 48;
|
70
|
+
w |= (uint64_t) src[7] << 56;
|
71
|
+
return w;
|
72
|
+
#endif
|
73
|
+
}
|
74
|
+
|
75
|
+
#define STORE64_LE(DST, W) store64_le((DST), (W))
|
76
|
+
static inline void
|
77
|
+
store64_le(uint8_t dst[8], uint64_t w)
|
78
|
+
{
|
79
|
+
#ifdef NATIVE_LITTLE_ENDIAN
|
80
|
+
memcpy(dst, &w, sizeof w);
|
81
|
+
#else
|
82
|
+
dst[0] = (uint8_t) w;
|
83
|
+
w >>= 8;
|
84
|
+
dst[1] = (uint8_t) w;
|
85
|
+
w >>= 8;
|
86
|
+
dst[2] = (uint8_t) w;
|
87
|
+
w >>= 8;
|
88
|
+
dst[3] = (uint8_t) w;
|
89
|
+
w >>= 8;
|
90
|
+
dst[4] = (uint8_t) w;
|
91
|
+
w >>= 8;
|
92
|
+
dst[5] = (uint8_t) w;
|
93
|
+
w >>= 8;
|
94
|
+
dst[6] = (uint8_t) w;
|
95
|
+
w >>= 8;
|
96
|
+
dst[7] = (uint8_t) w;
|
97
|
+
#endif
|
98
|
+
}
|
99
|
+
|
100
|
+
#define LOAD32_LE(SRC) load32_le(SRC)
|
101
|
+
static inline uint32_t
|
102
|
+
load32_le(const uint8_t src[4])
|
103
|
+
{
|
104
|
+
#ifdef NATIVE_LITTLE_ENDIAN
|
105
|
+
uint32_t w;
|
106
|
+
memcpy(&w, src, sizeof w);
|
107
|
+
return w;
|
108
|
+
#else
|
109
|
+
uint32_t w = (uint32_t) src[0];
|
110
|
+
w |= (uint32_t) src[1] << 8;
|
111
|
+
w |= (uint32_t) src[2] << 16;
|
112
|
+
w |= (uint32_t) src[3] << 24;
|
113
|
+
return w;
|
114
|
+
#endif
|
115
|
+
}
|
116
|
+
|
117
|
+
#define STORE32_LE(DST, W) store32_le((DST), (W))
|
118
|
+
static inline void
|
119
|
+
store32_le(uint8_t dst[4], uint32_t w)
|
120
|
+
{
|
121
|
+
#ifdef NATIVE_LITTLE_ENDIAN
|
122
|
+
memcpy(dst, &w, sizeof w);
|
123
|
+
#else
|
124
|
+
dst[0] = (uint8_t) w;
|
125
|
+
w >>= 8;
|
126
|
+
dst[1] = (uint8_t) w;
|
127
|
+
w >>= 8;
|
128
|
+
dst[2] = (uint8_t) w;
|
129
|
+
w >>= 8;
|
130
|
+
dst[3] = (uint8_t) w;
|
131
|
+
#endif
|
132
|
+
}
|
133
|
+
|
134
|
+
#define LOAD16_LE(SRC) load16_le(SRC)
|
135
|
+
static inline uint16_t
|
136
|
+
load16_le(const uint8_t src[2])
|
137
|
+
{
|
138
|
+
#ifdef NATIVE_LITTLE_ENDIAN
|
139
|
+
uint16_t w;
|
140
|
+
memcpy(&w, src, sizeof w);
|
141
|
+
return w;
|
142
|
+
#else
|
143
|
+
uint16_t w = (uint16_t) src[0];
|
144
|
+
w |= (uint16_t) src[1] << 8;
|
145
|
+
return w;
|
146
|
+
#endif
|
147
|
+
}
|
148
|
+
|
149
|
+
#define STORE16_LE(DST, W) store16_le((DST), (W))
|
150
|
+
static inline void
|
151
|
+
store16_le(uint8_t dst[2], uint16_t w)
|
152
|
+
{
|
153
|
+
#ifdef NATIVE_LITTLE_ENDIAN
|
154
|
+
memcpy(dst, &w, sizeof w);
|
155
|
+
#else
|
156
|
+
dst[0] = (uint8_t) w;
|
157
|
+
w >>= 8;
|
158
|
+
dst[1] = (uint8_t) w;
|
159
|
+
#endif
|
160
|
+
}
|
161
|
+
|
162
|
+
/* ----- */
|
163
|
+
|
164
|
+
#define LOAD64_BE(SRC) load64_be(SRC)
|
165
|
+
static inline uint64_t
|
166
|
+
load64_be(const uint8_t src[8])
|
167
|
+
{
|
168
|
+
#ifdef NATIVE_BIG_ENDIAN
|
169
|
+
uint64_t w;
|
170
|
+
memcpy(&w, src, sizeof w);
|
171
|
+
return w;
|
172
|
+
#else
|
173
|
+
uint64_t w = (uint64_t) src[7];
|
174
|
+
w |= (uint64_t) src[6] << 8;
|
175
|
+
w |= (uint64_t) src[5] << 16;
|
176
|
+
w |= (uint64_t) src[4] << 24;
|
177
|
+
w |= (uint64_t) src[3] << 32;
|
178
|
+
w |= (uint64_t) src[2] << 40;
|
179
|
+
w |= (uint64_t) src[1] << 48;
|
180
|
+
w |= (uint64_t) src[0] << 56;
|
181
|
+
return w;
|
182
|
+
#endif
|
183
|
+
}
|
184
|
+
|
185
|
+
#define STORE64_BE(DST, W) store64_be((DST), (W))
|
186
|
+
static inline void
|
187
|
+
store64_be(uint8_t dst[8], uint64_t w)
|
188
|
+
{
|
189
|
+
#ifdef NATIVE_BIG_ENDIAN
|
190
|
+
memcpy(dst, &w, sizeof w);
|
191
|
+
#else
|
192
|
+
dst[7] = (uint8_t) w;
|
193
|
+
w >>= 8;
|
194
|
+
dst[6] = (uint8_t) w;
|
195
|
+
w >>= 8;
|
196
|
+
dst[5] = (uint8_t) w;
|
197
|
+
w >>= 8;
|
198
|
+
dst[4] = (uint8_t) w;
|
199
|
+
w >>= 8;
|
200
|
+
dst[3] = (uint8_t) w;
|
201
|
+
w >>= 8;
|
202
|
+
dst[2] = (uint8_t) w;
|
203
|
+
w >>= 8;
|
204
|
+
dst[1] = (uint8_t) w;
|
205
|
+
w >>= 8;
|
206
|
+
dst[0] = (uint8_t) w;
|
207
|
+
#endif
|
208
|
+
}
|
209
|
+
|
210
|
+
#define LOAD32_BE(SRC) load32_be(SRC)
|
211
|
+
static inline uint32_t
|
212
|
+
load32_be(const uint8_t src[4])
|
213
|
+
{
|
214
|
+
#ifdef NATIVE_BIG_ENDIAN
|
215
|
+
uint32_t w;
|
216
|
+
memcpy(&w, src, sizeof w);
|
217
|
+
return w;
|
218
|
+
#else
|
219
|
+
uint32_t w = (uint32_t) src[3];
|
220
|
+
w |= (uint32_t) src[2] << 8;
|
221
|
+
w |= (uint32_t) src[1] << 16;
|
222
|
+
w |= (uint32_t) src[0] << 24;
|
223
|
+
return w;
|
224
|
+
#endif
|
225
|
+
}
|
226
|
+
|
227
|
+
#define STORE32_BE(DST, W) store32_be((DST), (W))
|
228
|
+
static inline void
|
229
|
+
store32_be(uint8_t dst[4], uint32_t w)
|
230
|
+
{
|
231
|
+
#ifdef NATIVE_BIG_ENDIAN
|
232
|
+
memcpy(dst, &w, sizeof w);
|
233
|
+
#else
|
234
|
+
dst[3] = (uint8_t) w;
|
235
|
+
w >>= 8;
|
236
|
+
dst[2] = (uint8_t) w;
|
237
|
+
w >>= 8;
|
238
|
+
dst[1] = (uint8_t) w;
|
239
|
+
w >>= 8;
|
240
|
+
dst[0] = (uint8_t) w;
|
241
|
+
#endif
|
242
|
+
}
|
243
|
+
|
244
|
+
#define LOAD16_BE(SRC) load16_be(SRC)
|
245
|
+
static inline uint16_t
|
246
|
+
load16_be(const uint8_t src[2])
|
247
|
+
{
|
248
|
+
#ifdef NATIVE_BIG_ENDIAN
|
249
|
+
uint16_t w;
|
250
|
+
memcpy(&w, src, sizeof w);
|
251
|
+
return w;
|
252
|
+
#else
|
253
|
+
uint16_t w = (uint16_t) src[1];
|
254
|
+
w |= (uint16_t) src[0] << 8;
|
255
|
+
return w;
|
256
|
+
#endif
|
257
|
+
}
|
258
|
+
|
259
|
+
#define STORE16_BE(DST, W) store16_be((DST), (W))
|
260
|
+
static inline void
|
261
|
+
store16_be(uint8_t dst[2], uint16_t w)
|
262
|
+
{
|
263
|
+
#ifdef NATIVE_BIG_ENDIAN
|
264
|
+
memcpy(dst, &w, sizeof w);
|
265
|
+
#else
|
266
|
+
dst[1] = (uint8_t) w;
|
267
|
+
w >>= 8;
|
268
|
+
dst[0] = (uint8_t) w;
|
269
|
+
#endif
|
270
|
+
}
|
271
|
+
|
272
|
+
static inline void
|
273
|
+
mem_cpy(void *__restrict__ dst_, const void *__restrict__ src_, size_t n)
|
274
|
+
{
|
275
|
+
unsigned char * dst = (unsigned char *) dst_;
|
276
|
+
const unsigned char *src = (const unsigned char *) src_;
|
277
|
+
size_t i;
|
278
|
+
|
279
|
+
for (i = 0; i < n; i++) {
|
280
|
+
dst[i] = src[i];
|
281
|
+
}
|
282
|
+
}
|
283
|
+
|
284
|
+
static inline void
|
285
|
+
mem_zero(void *dst_, size_t n)
|
286
|
+
{
|
287
|
+
unsigned char *dst = (unsigned char *) dst_;
|
288
|
+
size_t i;
|
289
|
+
|
290
|
+
for (i = 0; i < n; i++) {
|
291
|
+
dst[i] = 0;
|
292
|
+
}
|
293
|
+
}
|
294
|
+
|
295
|
+
static inline void
|
296
|
+
mem_xor(void *__restrict__ dst_, const void *__restrict__ src_, size_t n)
|
297
|
+
{
|
298
|
+
unsigned char * dst = (unsigned char *) dst_;
|
299
|
+
const unsigned char *src = (const unsigned char *) src_;
|
300
|
+
size_t i;
|
301
|
+
|
302
|
+
for (i = 0; i < n; i++) {
|
303
|
+
dst[i] ^= src[i];
|
304
|
+
}
|
305
|
+
}
|
306
|
+
|
307
|
+
static inline void
|
308
|
+
mem_xor2(void *__restrict__ dst_, const void *__restrict__ src1_, const void *__restrict__ src2_,
|
309
|
+
size_t n)
|
310
|
+
{
|
311
|
+
unsigned char * dst = (unsigned char *) dst_;
|
312
|
+
const unsigned char *src1 = (const unsigned char *) src1_;
|
313
|
+
const unsigned char *src2 = (const unsigned char *) src2_;
|
314
|
+
size_t i;
|
315
|
+
|
316
|
+
for (i = 0; i < n; i++) {
|
317
|
+
dst[i] = src1[i] ^ src2[i];
|
318
|
+
}
|
319
|
+
}
|
320
|
+
|
321
|
+
static const uint8_t zero[64] = { 0 };
|
@@ -0,0 +1,223 @@
|
|
1
|
+
int
|
2
|
+
hydro_init(void)
|
3
|
+
{
|
4
|
+
if (hydro_random_init() != 0) {
|
5
|
+
abort();
|
6
|
+
}
|
7
|
+
return 0;
|
8
|
+
}
|
9
|
+
|
10
|
+
void
|
11
|
+
hydro_memzero(void *pnt, size_t len)
|
12
|
+
{
|
13
|
+
#ifdef HAVE_EXPLICIT_BZERO
|
14
|
+
explicit_bzero(pnt, len);
|
15
|
+
#else
|
16
|
+
volatile unsigned char *volatile pnt_ = (volatile unsigned char *volatile) pnt;
|
17
|
+
size_t i = (size_t) 0U;
|
18
|
+
|
19
|
+
while (i < len) {
|
20
|
+
pnt_[i++] = 0U;
|
21
|
+
}
|
22
|
+
#endif
|
23
|
+
}
|
24
|
+
|
25
|
+
void
|
26
|
+
hydro_increment(uint8_t *n, size_t len)
|
27
|
+
{
|
28
|
+
size_t i;
|
29
|
+
uint_fast16_t c = 1U;
|
30
|
+
|
31
|
+
for (i = 0; i < len; i++) {
|
32
|
+
c += (uint_fast16_t) n[i];
|
33
|
+
n[i] = (uint8_t) c;
|
34
|
+
c >>= 8;
|
35
|
+
}
|
36
|
+
}
|
37
|
+
|
38
|
+
char *
|
39
|
+
hydro_bin2hex(char *hex, size_t hex_maxlen, const uint8_t *bin, size_t bin_len)
|
40
|
+
{
|
41
|
+
size_t i = (size_t) 0U;
|
42
|
+
unsigned int x;
|
43
|
+
int b;
|
44
|
+
int c;
|
45
|
+
|
46
|
+
if (bin_len >= SIZE_MAX / 2 || hex_maxlen <= bin_len * 2U) {
|
47
|
+
abort();
|
48
|
+
}
|
49
|
+
while (i < bin_len) {
|
50
|
+
c = bin[i] & 0xf;
|
51
|
+
b = bin[i] >> 4;
|
52
|
+
x = (unsigned char) (87U + c + (((c - 10U) >> 8) & ~38U)) << 8 |
|
53
|
+
(unsigned char) (87U + b + (((b - 10U) >> 8) & ~38U));
|
54
|
+
hex[i * 2U] = (char) x;
|
55
|
+
x >>= 8;
|
56
|
+
hex[i * 2U + 1U] = (char) x;
|
57
|
+
i++;
|
58
|
+
}
|
59
|
+
hex[i * 2U] = 0U;
|
60
|
+
|
61
|
+
return hex;
|
62
|
+
}
|
63
|
+
|
64
|
+
int
|
65
|
+
hydro_hex2bin(uint8_t *bin, size_t bin_maxlen, const char *hex, size_t hex_len, const char *ignore,
|
66
|
+
const char **hex_end_p)
|
67
|
+
{
|
68
|
+
size_t bin_pos = (size_t) 0U;
|
69
|
+
size_t hex_pos = (size_t) 0U;
|
70
|
+
int ret = 0;
|
71
|
+
unsigned char c;
|
72
|
+
unsigned char c_alpha0, c_alpha;
|
73
|
+
unsigned char c_num0, c_num;
|
74
|
+
uint8_t c_acc = 0U;
|
75
|
+
uint8_t c_val;
|
76
|
+
unsigned char state = 0U;
|
77
|
+
|
78
|
+
while (hex_pos < hex_len) {
|
79
|
+
c = (unsigned char) hex[hex_pos];
|
80
|
+
c_num = c ^ 48U;
|
81
|
+
c_num0 = (c_num - 10U) >> 8;
|
82
|
+
c_alpha = (c & ~32U) - 55U;
|
83
|
+
c_alpha0 = ((c_alpha - 10U) ^ (c_alpha - 16U)) >> 8;
|
84
|
+
if ((c_num0 | c_alpha0) == 0U) {
|
85
|
+
if (ignore != NULL && state == 0U && strchr(ignore, c) != NULL) {
|
86
|
+
hex_pos++;
|
87
|
+
continue;
|
88
|
+
}
|
89
|
+
break;
|
90
|
+
}
|
91
|
+
c_val = (uint8_t)((c_num0 & c_num) | (c_alpha0 & c_alpha));
|
92
|
+
if (bin_pos >= bin_maxlen) {
|
93
|
+
ret = -1;
|
94
|
+
errno = ERANGE;
|
95
|
+
break;
|
96
|
+
}
|
97
|
+
if (state == 0U) {
|
98
|
+
c_acc = c_val * 16U;
|
99
|
+
} else {
|
100
|
+
bin[bin_pos++] = c_acc | c_val;
|
101
|
+
}
|
102
|
+
state = ~state;
|
103
|
+
hex_pos++;
|
104
|
+
}
|
105
|
+
if (state != 0U) {
|
106
|
+
hex_pos--;
|
107
|
+
errno = EINVAL;
|
108
|
+
ret = -1;
|
109
|
+
}
|
110
|
+
if (ret != 0) {
|
111
|
+
bin_pos = (size_t) 0U;
|
112
|
+
}
|
113
|
+
if (hex_end_p != NULL) {
|
114
|
+
*hex_end_p = &hex[hex_pos];
|
115
|
+
} else if (hex_pos != hex_len) {
|
116
|
+
errno = EINVAL;
|
117
|
+
ret = -1;
|
118
|
+
}
|
119
|
+
if (ret != 0) {
|
120
|
+
return ret;
|
121
|
+
}
|
122
|
+
return (int) bin_pos;
|
123
|
+
}
|
124
|
+
|
125
|
+
bool
|
126
|
+
hydro_equal(const void *b1_, const void *b2_, size_t len)
|
127
|
+
{
|
128
|
+
const volatile uint8_t *volatile b1 = (const volatile uint8_t *volatile) b1_;
|
129
|
+
const uint8_t *b2 = (const uint8_t *) b2_;
|
130
|
+
size_t i;
|
131
|
+
uint8_t d = (uint8_t) 0U;
|
132
|
+
|
133
|
+
if (b1 == b2) {
|
134
|
+
d = ~d;
|
135
|
+
}
|
136
|
+
for (i = 0U; i < len; i++) {
|
137
|
+
d |= b1[i] ^ b2[i];
|
138
|
+
}
|
139
|
+
return (bool) (1 & ((d - 1) >> 8));
|
140
|
+
}
|
141
|
+
|
142
|
+
int
|
143
|
+
hydro_compare(const uint8_t *b1_, const uint8_t *b2_, size_t len)
|
144
|
+
{
|
145
|
+
const volatile uint8_t *volatile b1 = (const volatile uint8_t *volatile) b1_;
|
146
|
+
const uint8_t *b2 = (const uint8_t *) b2_;
|
147
|
+
uint8_t gt = 0U;
|
148
|
+
uint8_t eq = 1U;
|
149
|
+
size_t i;
|
150
|
+
|
151
|
+
i = len;
|
152
|
+
while (i != 0U) {
|
153
|
+
i--;
|
154
|
+
gt |= ((b2[i] - b1[i]) >> 8) & eq;
|
155
|
+
eq &= ((b2[i] ^ b1[i]) - 1) >> 8;
|
156
|
+
}
|
157
|
+
return (int) (gt + gt + eq) - 1;
|
158
|
+
}
|
159
|
+
|
160
|
+
int
|
161
|
+
hydro_pad(unsigned char *buf, size_t unpadded_buflen, size_t blocksize, size_t max_buflen)
|
162
|
+
{
|
163
|
+
unsigned char * tail;
|
164
|
+
size_t i;
|
165
|
+
size_t xpadlen;
|
166
|
+
size_t xpadded_len;
|
167
|
+
volatile unsigned char mask;
|
168
|
+
unsigned char barrier_mask;
|
169
|
+
|
170
|
+
if (blocksize <= 0U || max_buflen > INT_MAX) {
|
171
|
+
return -1;
|
172
|
+
}
|
173
|
+
xpadlen = blocksize - 1U;
|
174
|
+
if ((blocksize & (blocksize - 1U)) == 0U) {
|
175
|
+
xpadlen -= unpadded_buflen & (blocksize - 1U);
|
176
|
+
} else {
|
177
|
+
xpadlen -= unpadded_buflen % blocksize;
|
178
|
+
}
|
179
|
+
if (SIZE_MAX - unpadded_buflen <= xpadlen) {
|
180
|
+
return -1;
|
181
|
+
}
|
182
|
+
xpadded_len = unpadded_buflen + xpadlen;
|
183
|
+
if (xpadded_len >= max_buflen) {
|
184
|
+
return -1;
|
185
|
+
}
|
186
|
+
tail = &buf[xpadded_len];
|
187
|
+
mask = 0U;
|
188
|
+
for (i = 0; i < blocksize; i++) {
|
189
|
+
barrier_mask = (unsigned char) (((i ^ xpadlen) - 1U) >> ((sizeof(size_t) - 1U) * CHAR_BIT));
|
190
|
+
tail[-i] = (tail[-i] & mask) | (0x80 & barrier_mask);
|
191
|
+
mask |= barrier_mask;
|
192
|
+
}
|
193
|
+
return (int) (xpadded_len + 1);
|
194
|
+
}
|
195
|
+
|
196
|
+
int
|
197
|
+
hydro_unpad(const unsigned char *buf, size_t padded_buflen, size_t blocksize)
|
198
|
+
{
|
199
|
+
const unsigned char *tail;
|
200
|
+
unsigned char acc = 0U;
|
201
|
+
unsigned char c;
|
202
|
+
unsigned char valid = 0U;
|
203
|
+
volatile size_t pad_len = 0U;
|
204
|
+
size_t i;
|
205
|
+
size_t is_barrier;
|
206
|
+
|
207
|
+
if (padded_buflen < blocksize || blocksize <= 0U) {
|
208
|
+
return -1;
|
209
|
+
}
|
210
|
+
tail = &buf[padded_buflen - 1U];
|
211
|
+
|
212
|
+
for (i = 0U; i < blocksize; i++) {
|
213
|
+
c = tail[-i];
|
214
|
+
is_barrier = (((acc - 1U) & (pad_len - 1U) & ((c ^ 0x80) - 1U)) >> 8) & 1U;
|
215
|
+
acc |= c;
|
216
|
+
pad_len |= (i & -is_barrier);
|
217
|
+
valid |= (unsigned char) is_barrier;
|
218
|
+
}
|
219
|
+
if (valid == 0) {
|
220
|
+
return -1;
|
221
|
+
}
|
222
|
+
return (int) (padded_buflen - 1 - pad_len);
|
223
|
+
}
|