grpc 1.10.0.pre1 → 1.10.0.pre2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (66) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +3 -3
  3. data/src/core/ext/filters/max_age/max_age_filter.cc +3 -0
  4. data/src/core/lib/surface/version.cc +1 -1
  5. data/src/ruby/lib/grpc/version.rb +1 -1
  6. data/third_party/boringssl/crypto/fipsmodule/aes/aes.c +1100 -0
  7. data/third_party/boringssl/crypto/fipsmodule/aes/key_wrap.c +138 -0
  8. data/third_party/boringssl/crypto/fipsmodule/aes/mode_wrappers.c +112 -0
  9. data/third_party/boringssl/crypto/fipsmodule/bn/add.c +375 -0
  10. data/third_party/boringssl/crypto/fipsmodule/bn/asm/x86_64-gcc.c +537 -0
  11. data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +370 -0
  12. data/third_party/boringssl/crypto/fipsmodule/bn/bytes.c +269 -0
  13. data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +239 -0
  14. data/third_party/boringssl/crypto/fipsmodule/bn/ctx.c +303 -0
  15. data/third_party/boringssl/crypto/fipsmodule/bn/div.c +726 -0
  16. data/third_party/boringssl/crypto/fipsmodule/bn/exponentiation.c +1233 -0
  17. data/third_party/boringssl/crypto/fipsmodule/bn/gcd.c +627 -0
  18. data/third_party/boringssl/crypto/fipsmodule/bn/generic.c +715 -0
  19. data/third_party/boringssl/crypto/fipsmodule/bn/jacobi.c +146 -0
  20. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +430 -0
  21. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +207 -0
  22. data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +869 -0
  23. data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +894 -0
  24. data/third_party/boringssl/crypto/fipsmodule/bn/random.c +283 -0
  25. data/third_party/boringssl/crypto/fipsmodule/bn/rsaz_exp.c +254 -0
  26. data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +307 -0
  27. data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +502 -0
  28. data/third_party/boringssl/crypto/fipsmodule/cipher/aead.c +284 -0
  29. data/third_party/boringssl/crypto/fipsmodule/cipher/cipher.c +613 -0
  30. data/third_party/boringssl/crypto/fipsmodule/cipher/e_aes.c +1437 -0
  31. data/third_party/boringssl/crypto/fipsmodule/cipher/e_des.c +233 -0
  32. data/third_party/boringssl/crypto/fipsmodule/des/des.c +785 -0
  33. data/third_party/boringssl/crypto/fipsmodule/digest/digest.c +256 -0
  34. data/third_party/boringssl/crypto/fipsmodule/digest/digests.c +280 -0
  35. data/third_party/boringssl/crypto/fipsmodule/ec/ec.c +842 -0
  36. data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +517 -0
  37. data/third_party/boringssl/crypto/fipsmodule/ec/ec_montgomery.c +304 -0
  38. data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +404 -0
  39. data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +1165 -0
  40. data/third_party/boringssl/crypto/fipsmodule/ec/p256-64.c +1708 -0
  41. data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.c +561 -0
  42. data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +1113 -0
  43. data/third_party/boringssl/crypto/fipsmodule/ec/util-64.c +109 -0
  44. data/third_party/boringssl/crypto/fipsmodule/ec/wnaf.c +456 -0
  45. data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +444 -0
  46. data/third_party/boringssl/crypto/fipsmodule/hmac/hmac.c +228 -0
  47. data/third_party/boringssl/crypto/fipsmodule/md4/md4.c +254 -0
  48. data/third_party/boringssl/crypto/fipsmodule/md5/md5.c +298 -0
  49. data/third_party/boringssl/crypto/fipsmodule/modes/cbc.c +212 -0
  50. data/third_party/boringssl/crypto/fipsmodule/modes/cfb.c +230 -0
  51. data/third_party/boringssl/crypto/fipsmodule/modes/ctr.c +219 -0
  52. data/third_party/boringssl/crypto/fipsmodule/modes/gcm.c +1074 -0
  53. data/third_party/boringssl/crypto/fipsmodule/modes/ofb.c +95 -0
  54. data/third_party/boringssl/crypto/fipsmodule/modes/polyval.c +91 -0
  55. data/third_party/boringssl/crypto/fipsmodule/rand/ctrdrbg.c +200 -0
  56. data/third_party/boringssl/crypto/fipsmodule/rand/rand.c +358 -0
  57. data/third_party/boringssl/crypto/fipsmodule/rand/urandom.c +302 -0
  58. data/third_party/boringssl/crypto/fipsmodule/rsa/blinding.c +263 -0
  59. data/third_party/boringssl/crypto/fipsmodule/rsa/padding.c +692 -0
  60. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +855 -0
  61. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +1061 -0
  62. data/third_party/boringssl/crypto/fipsmodule/sha/sha1-altivec.c +361 -0
  63. data/third_party/boringssl/crypto/fipsmodule/sha/sha1.c +375 -0
  64. data/third_party/boringssl/crypto/fipsmodule/sha/sha256.c +337 -0
  65. data/third_party/boringssl/crypto/fipsmodule/sha/sha512.c +608 -0
  66. metadata +62 -2
@@ -0,0 +1,361 @@
1
+ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
2
+ * All rights reserved.
3
+ *
4
+ * This package is an SSL implementation written
5
+ * by Eric Young (eay@cryptsoft.com).
6
+ * The implementation was written so as to conform with Netscapes SSL.
7
+ *
8
+ * This library is free for commercial and non-commercial use as long as
9
+ * the following conditions are aheared to. The following conditions
10
+ * apply to all code found in this distribution, be it the RC4, RSA,
11
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
12
+ * included with this distribution is covered by the same copyright terms
13
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
14
+ *
15
+ * Copyright remains Eric Young's, and as such any Copyright notices in
16
+ * the code are not to be removed.
17
+ * If this package is used in a product, Eric Young should be given attribution
18
+ * as the author of the parts of the library used.
19
+ * This can be in the form of a textual message at program startup or
20
+ * in documentation (online or textual) provided with the package.
21
+ *
22
+ * Redistribution and use in source and binary forms, with or without
23
+ * modification, are permitted provided that the following conditions
24
+ * are met:
25
+ * 1. Redistributions of source code must retain the copyright
26
+ * notice, this list of conditions and the following disclaimer.
27
+ * 2. Redistributions in binary form must reproduce the above copyright
28
+ * notice, this list of conditions and the following disclaimer in the
29
+ * documentation and/or other materials provided with the distribution.
30
+ * 3. All advertising materials mentioning features or use of this software
31
+ * must display the following acknowledgement:
32
+ * "This product includes cryptographic software written by
33
+ * Eric Young (eay@cryptsoft.com)"
34
+ * The word 'cryptographic' can be left out if the rouines from the library
35
+ * being used are not cryptographic related :-).
36
+ * 4. If you include any Windows specific code (or a derivative thereof) from
37
+ * the apps directory (application code) you must include an acknowledgement:
38
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
39
+ *
40
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
41
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50
+ * SUCH DAMAGE.
51
+ *
52
+ * The licence and distribution terms for any publically available version or
53
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
54
+ * copied and put under another distribution licence
55
+ * [including the GNU Public Licence.] */
56
+
57
+ // Altivec-optimized SHA1 in C. This is tested on ppc64le only.
58
+ //
59
+ // References:
60
+ // https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
61
+ // http://arctic.org/~dean/crypto/sha1.html
62
+ //
63
+ // This code used the generic SHA-1 from OpenSSL as a basis and AltiVec
64
+ // optimisations were added on top.
65
+
66
+ #include <openssl/sha.h>
67
+
68
+ #if defined(OPENSSL_PPC64LE)
69
+
70
+ #include <altivec.h>
71
+
72
+ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
73
+
74
+ static uint32_t rotate(uint32_t a, int n) { return (a << n) | (a >> (32 - n)); }
75
+
76
+ typedef vector unsigned int vec_uint32_t;
77
+ typedef vector unsigned char vec_uint8_t;
78
+
79
+ // Vector constants
80
+ static const vec_uint8_t k_swap_endianness = {3, 2, 1, 0, 7, 6, 5, 4,
81
+ 11, 10, 9, 8, 15, 14, 13, 12};
82
+
83
+ // Shift amounts for byte and bit shifts and rotations
84
+ static const vec_uint8_t k_4_bytes = {32, 32, 32, 32, 32, 32, 32, 32,
85
+ 32, 32, 32, 32, 32, 32, 32, 32};
86
+ static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96,
87
+ 96, 96, 96, 96, 96, 96, 96, 96};
88
+
89
+ #define K_00_19 0x5a827999UL
90
+ #define K_20_39 0x6ed9eba1UL
91
+ #define K_40_59 0x8f1bbcdcUL
92
+ #define K_60_79 0xca62c1d6UL
93
+
94
+ // Vector versions of the above.
95
+ static const vec_uint32_t K_00_19_x_4 = {K_00_19, K_00_19, K_00_19, K_00_19};
96
+ static const vec_uint32_t K_20_39_x_4 = {K_20_39, K_20_39, K_20_39, K_20_39};
97
+ static const vec_uint32_t K_40_59_x_4 = {K_40_59, K_40_59, K_40_59, K_40_59};
98
+ static const vec_uint32_t K_60_79_x_4 = {K_60_79, K_60_79, K_60_79, K_60_79};
99
+
100
+ // vector message scheduling: compute message schedule for round i..i+3 where i
101
+ // is divisible by 4. We return the schedule w[i..i+3] as a vector. In
102
+ // addition, we also precompute sum w[i..+3] and an additive constant K. This
103
+ // is done to offload some computation of f() in the integer execution units.
104
+ //
105
+ // Byte shifting code below may not be correct for big-endian systems.
106
+ static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data,
107
+ vec_uint32_t k) {
108
+ const vector unsigned char unaligned_data =
109
+ vec_vsx_ld(0, (const unsigned char*) data);
110
+ const vec_uint32_t v = (vec_uint32_t) unaligned_data;
111
+ const vec_uint32_t w = vec_perm(v, v, k_swap_endianness);
112
+ vec_st(w + k, 0, pre_added);
113
+ return w;
114
+ }
115
+
116
+ // Compute w[i..i+3] using these steps for i in [16, 20, 24, 28]
117
+ //
118
+ // w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1
119
+ // w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1
120
+ // w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1
121
+ // w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1
122
+ //
123
+ // w[ i] = w'[ i]
124
+ // w[i+1] = w'[i+1]
125
+ // w[i+2] = w'[i+2]
126
+ // w[i+3] = w'[i+3] ^ (w'[i] <<< 1)
127
+ static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4,
128
+ vec_uint32_t minus_8, vec_uint32_t minus_12,
129
+ vec_uint32_t minus_16, vec_uint32_t k) {
130
+ const vec_uint32_t minus_3 = vec_sro(minus_4, k_4_bytes);
131
+ const vec_uint32_t minus_14 = vec_sld((minus_12), (minus_16), 8);
132
+ const vec_uint32_t k_1_bit = vec_splat_u32(1);
133
+ const vec_uint32_t w_prime =
134
+ vec_rl(minus_3 ^ minus_8 ^ minus_14 ^ minus_16, k_1_bit);
135
+ const vec_uint32_t w =
136
+ w_prime ^ vec_rl(vec_slo(w_prime, k_12_bytes), k_1_bit);
137
+ vec_st(w + k, 0, pre_added);
138
+ return w;
139
+ }
140
+
141
+ // Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76]
142
+ // w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2
143
+ static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4,
144
+ vec_uint32_t minus_8, vec_uint32_t minus_16,
145
+ vec_uint32_t minus_28, vec_uint32_t minus_32,
146
+ vec_uint32_t k) {
147
+ const vec_uint32_t minus_6 = vec_sld(minus_4, minus_8, 8);
148
+ const vec_uint32_t k_2_bits = vec_splat_u32(2);
149
+ const vec_uint32_t w =
150
+ vec_rl(minus_6 ^ minus_16 ^ minus_28 ^ minus_32, k_2_bits);
151
+ vec_st(w + k, 0, pre_added);
152
+ return w;
153
+ }
154
+
155
+ // As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
156
+ // to the code in F_00_19. Wei attributes these optimisations to Peter
157
+ // Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
158
+ // F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
159
+ // tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
160
+ #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
161
+ #define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
162
+ #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
163
+ #define F_60_79(b, c, d) F_20_39(b, c, d)
164
+
165
+ // We pre-added the K constants during message scheduling.
166
+ #define BODY_00_19(i, a, b, c, d, e, f) \
167
+ do { \
168
+ (f) = w[i] + (e) + rotate((a), 5) + F_00_19((b), (c), (d)); \
169
+ (b) = rotate((b), 30); \
170
+ } while (0)
171
+
172
+ #define BODY_20_39(i, a, b, c, d, e, f) \
173
+ do { \
174
+ (f) = w[i] + (e) + rotate((a), 5) + F_20_39((b), (c), (d)); \
175
+ (b) = rotate((b), 30); \
176
+ } while (0)
177
+
178
+ #define BODY_40_59(i, a, b, c, d, e, f) \
179
+ do { \
180
+ (f) = w[i] + (e) + rotate((a), 5) + F_40_59((b), (c), (d)); \
181
+ (b) = rotate((b), 30); \
182
+ } while (0)
183
+
184
+ #define BODY_60_79(i, a, b, c, d, e, f) \
185
+ do { \
186
+ (f) = w[i] + (e) + rotate((a), 5) + F_60_79((b), (c), (d)); \
187
+ (b) = rotate((b), 30); \
188
+ } while (0)
189
+
190
+ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
191
+ uint32_t A, B, C, D, E, T;
192
+
193
+ A = state[0];
194
+ B = state[1];
195
+ C = state[2];
196
+ D = state[3];
197
+ E = state[4];
198
+
199
+ for (;;) {
200
+ vec_uint32_t vw[20];
201
+ const uint32_t *w = (const uint32_t *)&vw;
202
+
203
+ vec_uint32_t k = K_00_19_x_4;
204
+ const vec_uint32_t w0 = sched_00_15(vw + 0, data + 0, k);
205
+ BODY_00_19(0, A, B, C, D, E, T);
206
+ BODY_00_19(1, T, A, B, C, D, E);
207
+ BODY_00_19(2, E, T, A, B, C, D);
208
+ BODY_00_19(3, D, E, T, A, B, C);
209
+
210
+ const vec_uint32_t w4 = sched_00_15(vw + 1, data + 16, k);
211
+ BODY_00_19(4, C, D, E, T, A, B);
212
+ BODY_00_19(5, B, C, D, E, T, A);
213
+ BODY_00_19(6, A, B, C, D, E, T);
214
+ BODY_00_19(7, T, A, B, C, D, E);
215
+
216
+ const vec_uint32_t w8 = sched_00_15(vw + 2, data + 32, k);
217
+ BODY_00_19(8, E, T, A, B, C, D);
218
+ BODY_00_19(9, D, E, T, A, B, C);
219
+ BODY_00_19(10, C, D, E, T, A, B);
220
+ BODY_00_19(11, B, C, D, E, T, A);
221
+
222
+ const vec_uint32_t w12 = sched_00_15(vw + 3, data + 48, k);
223
+ BODY_00_19(12, A, B, C, D, E, T);
224
+ BODY_00_19(13, T, A, B, C, D, E);
225
+ BODY_00_19(14, E, T, A, B, C, D);
226
+ BODY_00_19(15, D, E, T, A, B, C);
227
+
228
+ const vec_uint32_t w16 = sched_16_31(vw + 4, w12, w8, w4, w0, k);
229
+ BODY_00_19(16, C, D, E, T, A, B);
230
+ BODY_00_19(17, B, C, D, E, T, A);
231
+ BODY_00_19(18, A, B, C, D, E, T);
232
+ BODY_00_19(19, T, A, B, C, D, E);
233
+
234
+ k = K_20_39_x_4;
235
+ const vec_uint32_t w20 = sched_16_31(vw + 5, w16, w12, w8, w4, k);
236
+ BODY_20_39(20, E, T, A, B, C, D);
237
+ BODY_20_39(21, D, E, T, A, B, C);
238
+ BODY_20_39(22, C, D, E, T, A, B);
239
+ BODY_20_39(23, B, C, D, E, T, A);
240
+
241
+ const vec_uint32_t w24 = sched_16_31(vw + 6, w20, w16, w12, w8, k);
242
+ BODY_20_39(24, A, B, C, D, E, T);
243
+ BODY_20_39(25, T, A, B, C, D, E);
244
+ BODY_20_39(26, E, T, A, B, C, D);
245
+ BODY_20_39(27, D, E, T, A, B, C);
246
+
247
+ const vec_uint32_t w28 = sched_16_31(vw + 7, w24, w20, w16, w12, k);
248
+ BODY_20_39(28, C, D, E, T, A, B);
249
+ BODY_20_39(29, B, C, D, E, T, A);
250
+ BODY_20_39(30, A, B, C, D, E, T);
251
+ BODY_20_39(31, T, A, B, C, D, E);
252
+
253
+ const vec_uint32_t w32 = sched_32_79(vw + 8, w28, w24, w16, w4, w0, k);
254
+ BODY_20_39(32, E, T, A, B, C, D);
255
+ BODY_20_39(33, D, E, T, A, B, C);
256
+ BODY_20_39(34, C, D, E, T, A, B);
257
+ BODY_20_39(35, B, C, D, E, T, A);
258
+
259
+ const vec_uint32_t w36 = sched_32_79(vw + 9, w32, w28, w20, w8, w4, k);
260
+ BODY_20_39(36, A, B, C, D, E, T);
261
+ BODY_20_39(37, T, A, B, C, D, E);
262
+ BODY_20_39(38, E, T, A, B, C, D);
263
+ BODY_20_39(39, D, E, T, A, B, C);
264
+
265
+ k = K_40_59_x_4;
266
+ const vec_uint32_t w40 = sched_32_79(vw + 10, w36, w32, w24, w12, w8, k);
267
+ BODY_40_59(40, C, D, E, T, A, B);
268
+ BODY_40_59(41, B, C, D, E, T, A);
269
+ BODY_40_59(42, A, B, C, D, E, T);
270
+ BODY_40_59(43, T, A, B, C, D, E);
271
+
272
+ const vec_uint32_t w44 = sched_32_79(vw + 11, w40, w36, w28, w16, w12, k);
273
+ BODY_40_59(44, E, T, A, B, C, D);
274
+ BODY_40_59(45, D, E, T, A, B, C);
275
+ BODY_40_59(46, C, D, E, T, A, B);
276
+ BODY_40_59(47, B, C, D, E, T, A);
277
+
278
+ const vec_uint32_t w48 = sched_32_79(vw + 12, w44, w40, w32, w20, w16, k);
279
+ BODY_40_59(48, A, B, C, D, E, T);
280
+ BODY_40_59(49, T, A, B, C, D, E);
281
+ BODY_40_59(50, E, T, A, B, C, D);
282
+ BODY_40_59(51, D, E, T, A, B, C);
283
+
284
+ const vec_uint32_t w52 = sched_32_79(vw + 13, w48, w44, w36, w24, w20, k);
285
+ BODY_40_59(52, C, D, E, T, A, B);
286
+ BODY_40_59(53, B, C, D, E, T, A);
287
+ BODY_40_59(54, A, B, C, D, E, T);
288
+ BODY_40_59(55, T, A, B, C, D, E);
289
+
290
+ const vec_uint32_t w56 = sched_32_79(vw + 14, w52, w48, w40, w28, w24, k);
291
+ BODY_40_59(56, E, T, A, B, C, D);
292
+ BODY_40_59(57, D, E, T, A, B, C);
293
+ BODY_40_59(58, C, D, E, T, A, B);
294
+ BODY_40_59(59, B, C, D, E, T, A);
295
+
296
+ k = K_60_79_x_4;
297
+ const vec_uint32_t w60 = sched_32_79(vw + 15, w56, w52, w44, w32, w28, k);
298
+ BODY_60_79(60, A, B, C, D, E, T);
299
+ BODY_60_79(61, T, A, B, C, D, E);
300
+ BODY_60_79(62, E, T, A, B, C, D);
301
+ BODY_60_79(63, D, E, T, A, B, C);
302
+
303
+ const vec_uint32_t w64 = sched_32_79(vw + 16, w60, w56, w48, w36, w32, k);
304
+ BODY_60_79(64, C, D, E, T, A, B);
305
+ BODY_60_79(65, B, C, D, E, T, A);
306
+ BODY_60_79(66, A, B, C, D, E, T);
307
+ BODY_60_79(67, T, A, B, C, D, E);
308
+
309
+ const vec_uint32_t w68 = sched_32_79(vw + 17, w64, w60, w52, w40, w36, k);
310
+ BODY_60_79(68, E, T, A, B, C, D);
311
+ BODY_60_79(69, D, E, T, A, B, C);
312
+ BODY_60_79(70, C, D, E, T, A, B);
313
+ BODY_60_79(71, B, C, D, E, T, A);
314
+
315
+ const vec_uint32_t w72 = sched_32_79(vw + 18, w68, w64, w56, w44, w40, k);
316
+ BODY_60_79(72, A, B, C, D, E, T);
317
+ BODY_60_79(73, T, A, B, C, D, E);
318
+ BODY_60_79(74, E, T, A, B, C, D);
319
+ BODY_60_79(75, D, E, T, A, B, C);
320
+
321
+ // We don't use the last value
322
+ (void)sched_32_79(vw + 19, w72, w68, w60, w48, w44, k);
323
+ BODY_60_79(76, C, D, E, T, A, B);
324
+ BODY_60_79(77, B, C, D, E, T, A);
325
+ BODY_60_79(78, A, B, C, D, E, T);
326
+ BODY_60_79(79, T, A, B, C, D, E);
327
+
328
+ const uint32_t mask = 0xffffffffUL;
329
+ state[0] = (state[0] + E) & mask;
330
+ state[1] = (state[1] + T) & mask;
331
+ state[2] = (state[2] + A) & mask;
332
+ state[3] = (state[3] + B) & mask;
333
+ state[4] = (state[4] + C) & mask;
334
+
335
+ data += 64;
336
+ if (--num == 0) {
337
+ break;
338
+ }
339
+
340
+ A = state[0];
341
+ B = state[1];
342
+ C = state[2];
343
+ D = state[3];
344
+ E = state[4];
345
+ }
346
+ }
347
+
348
+ #endif // OPENSSL_PPC64LE
349
+
350
+ #undef K_00_19
351
+ #undef K_20_39
352
+ #undef K_40_59
353
+ #undef K_60_79
354
+ #undef F_00_19
355
+ #undef F_20_39
356
+ #undef F_40_59
357
+ #undef F_60_79
358
+ #undef BODY_00_19
359
+ #undef BODY_20_39
360
+ #undef BODY_40_59
361
+ #undef BODY_60_79
@@ -0,0 +1,375 @@
1
+ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
2
+ * All rights reserved.
3
+ *
4
+ * This package is an SSL implementation written
5
+ * by Eric Young (eay@cryptsoft.com).
6
+ * The implementation was written so as to conform with Netscapes SSL.
7
+ *
8
+ * This library is free for commercial and non-commercial use as long as
9
+ * the following conditions are aheared to. The following conditions
10
+ * apply to all code found in this distribution, be it the RC4, RSA,
11
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
12
+ * included with this distribution is covered by the same copyright terms
13
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
14
+ *
15
+ * Copyright remains Eric Young's, and as such any Copyright notices in
16
+ * the code are not to be removed.
17
+ * If this package is used in a product, Eric Young should be given attribution
18
+ * as the author of the parts of the library used.
19
+ * This can be in the form of a textual message at program startup or
20
+ * in documentation (online or textual) provided with the package.
21
+ *
22
+ * Redistribution and use in source and binary forms, with or without
23
+ * modification, are permitted provided that the following conditions
24
+ * are met:
25
+ * 1. Redistributions of source code must retain the copyright
26
+ * notice, this list of conditions and the following disclaimer.
27
+ * 2. Redistributions in binary form must reproduce the above copyright
28
+ * notice, this list of conditions and the following disclaimer in the
29
+ * documentation and/or other materials provided with the distribution.
30
+ * 3. All advertising materials mentioning features or use of this software
31
+ * must display the following acknowledgement:
32
+ * "This product includes cryptographic software written by
33
+ * Eric Young (eay@cryptsoft.com)"
34
+ * The word 'cryptographic' can be left out if the rouines from the library
35
+ * being used are not cryptographic related :-).
36
+ * 4. If you include any Windows specific code (or a derivative thereof) from
37
+ * the apps directory (application code) you must include an acknowledgement:
38
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
39
+ *
40
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
41
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50
+ * SUCH DAMAGE.
51
+ *
52
+ * The licence and distribution terms for any publically available version or
53
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
54
+ * copied and put under another distribution licence
55
+ * [including the GNU Public Licence.] */
56
+
57
+ #include <openssl/sha.h>
58
+
59
+ #include <string.h>
60
+
61
+ #include <openssl/mem.h>
62
+
63
+ #include "../../internal.h"
64
+
65
+
66
+ #if (!defined(OPENSSL_NO_ASM) && \
67
+ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
68
+ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))) || \
69
+ defined(OPENSSL_PPC64LE)
70
+ #define SHA1_ASM
71
+ #endif
72
+
73
+ int SHA1_Init(SHA_CTX *sha) {
74
+ OPENSSL_memset(sha, 0, sizeof(SHA_CTX));
75
+ sha->h[0] = 0x67452301UL;
76
+ sha->h[1] = 0xefcdab89UL;
77
+ sha->h[2] = 0x98badcfeUL;
78
+ sha->h[3] = 0x10325476UL;
79
+ sha->h[4] = 0xc3d2e1f0UL;
80
+ return 1;
81
+ }
82
+
83
+ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) {
84
+ SHA_CTX ctx;
85
+ SHA1_Init(&ctx);
86
+ SHA1_Update(&ctx, data, len);
87
+ SHA1_Final(out, &ctx);
88
+ OPENSSL_cleanse(&ctx, sizeof(ctx));
89
+ return out;
90
+ }
91
+
92
+ #define DATA_ORDER_IS_BIG_ENDIAN
93
+
94
+ #define HASH_CTX SHA_CTX
95
+ #define HASH_CBLOCK 64
96
+ #define HASH_MAKE_STRING(c, s) \
97
+ do { \
98
+ uint32_t ll; \
99
+ ll = (c)->h[0]; \
100
+ HOST_l2c(ll, (s)); \
101
+ ll = (c)->h[1]; \
102
+ HOST_l2c(ll, (s)); \
103
+ ll = (c)->h[2]; \
104
+ HOST_l2c(ll, (s)); \
105
+ ll = (c)->h[3]; \
106
+ HOST_l2c(ll, (s)); \
107
+ ll = (c)->h[4]; \
108
+ HOST_l2c(ll, (s)); \
109
+ } while (0)
110
+
111
+ #define HASH_UPDATE SHA1_Update
112
+ #define HASH_TRANSFORM SHA1_Transform
113
+ #define HASH_FINAL SHA1_Final
114
+ #define HASH_BLOCK_DATA_ORDER sha1_block_data_order
115
+ #define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
116
+ #define Xupdate(a, ix, ia, ib, ic, id) \
117
+ do { \
118
+ (a) = ((ia) ^ (ib) ^ (ic) ^ (id)); \
119
+ (ix) = (a) = ROTATE((a), 1); \
120
+ } while (0)
121
+
122
+ #ifndef SHA1_ASM
123
+ static
124
+ #endif
125
+ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
126
+
127
+ #include "../digest/md32_common.h"
128
+
129
+ #define K_00_19 0x5a827999UL
130
+ #define K_20_39 0x6ed9eba1UL
131
+ #define K_40_59 0x8f1bbcdcUL
132
+ #define K_60_79 0xca62c1d6UL
133
+
134
+ // As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
135
+ // to the code in F_00_19. Wei attributes these optimisations to Peter
136
+ // Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
137
+ // F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
138
+ // tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
139
+ #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
140
+ #define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
141
+ #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
142
+ #define F_60_79(b, c, d) F_20_39(b, c, d)
143
+
144
+ #define BODY_00_15(i, a, b, c, d, e, f, xi) \
145
+ do { \
146
+ (f) = (xi) + (e) + K_00_19 + ROTATE((a), 5) + F_00_19((b), (c), (d)); \
147
+ (b) = ROTATE((b), 30); \
148
+ } while (0)
149
+
150
+ #define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \
151
+ do { \
152
+ Xupdate(f, xi, xa, xb, xc, xd); \
153
+ (f) += (e) + K_00_19 + ROTATE((a), 5) + F_00_19((b), (c), (d)); \
154
+ (b) = ROTATE((b), 30); \
155
+ } while (0)
156
+
157
+ #define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \
158
+ do { \
159
+ Xupdate(f, xi, xa, xb, xc, xd); \
160
+ (f) += (e) + K_20_39 + ROTATE((a), 5) + F_20_39((b), (c), (d)); \
161
+ (b) = ROTATE((b), 30); \
162
+ } while (0)
163
+
164
+ #define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd) \
165
+ do { \
166
+ Xupdate(f, xa, xa, xb, xc, xd); \
167
+ (f) += (e) + K_20_39 + ROTATE((a), 5) + F_20_39((b), (c), (d)); \
168
+ (b) = ROTATE((b), 30); \
169
+ } while (0)
170
+
171
+ #define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd) \
172
+ do { \
173
+ Xupdate(f, xa, xa, xb, xc, xd); \
174
+ (f) += (e) + K_40_59 + ROTATE((a), 5) + F_40_59((b), (c), (d)); \
175
+ (b) = ROTATE((b), 30); \
176
+ } while (0)
177
+
178
+ #define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd) \
179
+ do { \
180
+ Xupdate(f, xa, xa, xb, xc, xd); \
181
+ (f) = (xa) + (e) + K_60_79 + ROTATE((a), 5) + F_60_79((b), (c), (d)); \
182
+ (b) = ROTATE((b), 30); \
183
+ } while (0)
184
+
185
+ #ifdef X
186
+ #undef X
187
+ #endif
188
+
189
+ /* Originally X was an array. As it's automatic it's natural
190
+ * to expect RISC compiler to accomodate at least part of it in
191
+ * the register bank, isn't it? Unfortunately not all compilers
192
+ * "find" this expectation reasonable:-( On order to make such
193
+ * compilers generate better code I replace X[] with a bunch of
194
+ * X0, X1, etc. See the function body below...
195
+ * <appro@fy.chalmers.se> */
196
+ #define X(i) XX##i
197
+
198
+ #if !defined(SHA1_ASM)
199
+ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
200
+ size_t num) {
201
+ register uint32_t A, B, C, D, E, T, l;
202
+ uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
203
+ XX11, XX12, XX13, XX14, XX15;
204
+
205
+ A = state[0];
206
+ B = state[1];
207
+ C = state[2];
208
+ D = state[3];
209
+ E = state[4];
210
+
211
+ for (;;) {
212
+ HOST_c2l(data, l);
213
+ X(0) = l;
214
+ HOST_c2l(data, l);
215
+ X(1) = l;
216
+ BODY_00_15(0, A, B, C, D, E, T, X(0));
217
+ HOST_c2l(data, l);
218
+ X(2) = l;
219
+ BODY_00_15(1, T, A, B, C, D, E, X(1));
220
+ HOST_c2l(data, l);
221
+ X(3) = l;
222
+ BODY_00_15(2, E, T, A, B, C, D, X(2));
223
+ HOST_c2l(data, l);
224
+ X(4) = l;
225
+ BODY_00_15(3, D, E, T, A, B, C, X(3));
226
+ HOST_c2l(data, l);
227
+ X(5) = l;
228
+ BODY_00_15(4, C, D, E, T, A, B, X(4));
229
+ HOST_c2l(data, l);
230
+ X(6) = l;
231
+ BODY_00_15(5, B, C, D, E, T, A, X(5));
232
+ HOST_c2l(data, l);
233
+ X(7) = l;
234
+ BODY_00_15(6, A, B, C, D, E, T, X(6));
235
+ HOST_c2l(data, l);
236
+ X(8) = l;
237
+ BODY_00_15(7, T, A, B, C, D, E, X(7));
238
+ HOST_c2l(data, l);
239
+ X(9) = l;
240
+ BODY_00_15(8, E, T, A, B, C, D, X(8));
241
+ HOST_c2l(data, l);
242
+ X(10) = l;
243
+ BODY_00_15(9, D, E, T, A, B, C, X(9));
244
+ HOST_c2l(data, l);
245
+ X(11) = l;
246
+ BODY_00_15(10, C, D, E, T, A, B, X(10));
247
+ HOST_c2l(data, l);
248
+ X(12) = l;
249
+ BODY_00_15(11, B, C, D, E, T, A, X(11));
250
+ HOST_c2l(data, l);
251
+ X(13) = l;
252
+ BODY_00_15(12, A, B, C, D, E, T, X(12));
253
+ HOST_c2l(data, l);
254
+ X(14) = l;
255
+ BODY_00_15(13, T, A, B, C, D, E, X(13));
256
+ HOST_c2l(data, l);
257
+ X(15) = l;
258
+ BODY_00_15(14, E, T, A, B, C, D, X(14));
259
+ BODY_00_15(15, D, E, T, A, B, C, X(15));
260
+
261
+ BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13));
262
+ BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14));
263
+ BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15));
264
+ BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0));
265
+
266
+ BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1));
267
+ BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2));
268
+ BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3));
269
+ BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4));
270
+ BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5));
271
+ BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6));
272
+ BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7));
273
+ BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8));
274
+ BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9));
275
+ BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10));
276
+ BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11));
277
+ BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12));
278
+
279
+ BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13));
280
+ BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14));
281
+ BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15));
282
+ BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0));
283
+ BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1));
284
+ BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2));
285
+ BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3));
286
+ BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4));
287
+
288
+ BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5));
289
+ BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6));
290
+ BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7));
291
+ BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8));
292
+ BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9));
293
+ BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10));
294
+ BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11));
295
+ BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12));
296
+ BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13));
297
+ BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14));
298
+ BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15));
299
+ BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0));
300
+ BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1));
301
+ BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2));
302
+ BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3));
303
+ BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4));
304
+ BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5));
305
+ BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6));
306
+ BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7));
307
+ BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8));
308
+
309
+ BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9));
310
+ BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10));
311
+ BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11));
312
+ BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12));
313
+ BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13));
314
+ BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14));
315
+ BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15));
316
+ BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0));
317
+ BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1));
318
+ BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2));
319
+ BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3));
320
+ BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4));
321
+ BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5));
322
+ BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6));
323
+ BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7));
324
+ BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8));
325
+ BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9));
326
+ BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10));
327
+ BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
328
+ BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
329
+
330
+ state[0] = (state[0] + E) & 0xffffffffL;
331
+ state[1] = (state[1] + T) & 0xffffffffL;
332
+ state[2] = (state[2] + A) & 0xffffffffL;
333
+ state[3] = (state[3] + B) & 0xffffffffL;
334
+ state[4] = (state[4] + C) & 0xffffffffL;
335
+
336
+ if (--num == 0) {
337
+ break;
338
+ }
339
+
340
+ A = state[0];
341
+ B = state[1];
342
+ C = state[2];
343
+ D = state[3];
344
+ E = state[4];
345
+ }
346
+ }
347
+ #endif
348
+
349
+ #undef DATA_ORDER_IS_BIG_ENDIAN
350
+ #undef HASH_CTX
351
+ #undef HASH_CBLOCK
352
+ #undef HASH_MAKE_STRING
353
+ #undef HASH_UPDATE
354
+ #undef HASH_TRANSFORM
355
+ #undef HASH_FINAL
356
+ #undef HASH_BLOCK_DATA_ORDER
357
+ #undef ROTATE
358
+ #undef Xupdate
359
+ #undef K_00_19
360
+ #undef K_20_39
361
+ #undef K_40_59
362
+ #undef K_60_79
363
+ #undef F_00_19
364
+ #undef F_20_39
365
+ #undef F_40_59
366
+ #undef F_60_79
367
+ #undef BODY_00_15
368
+ #undef BODY_16_19
369
+ #undef BODY_20_31
370
+ #undef BODY_32_39
371
+ #undef BODY_40_59
372
+ #undef BODY_60_79
373
+ #undef X
374
+ #undef HOST_c2l
375
+ #undef HOST_l2c