ring-native 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +9 -0
  3. data/Gemfile +3 -0
  4. data/README.md +22 -0
  5. data/Rakefile +1 -0
  6. data/ext/ring/extconf.rb +29 -0
  7. data/lib/ring/native.rb +8 -0
  8. data/lib/ring/native/version.rb +5 -0
  9. data/ring-native.gemspec +25 -0
  10. data/vendor/ring/BUILDING.md +40 -0
  11. data/vendor/ring/Cargo.toml +43 -0
  12. data/vendor/ring/LICENSE +185 -0
  13. data/vendor/ring/Makefile +35 -0
  14. data/vendor/ring/PORTING.md +163 -0
  15. data/vendor/ring/README.md +113 -0
  16. data/vendor/ring/STYLE.md +197 -0
  17. data/vendor/ring/appveyor.yml +27 -0
  18. data/vendor/ring/build.rs +108 -0
  19. data/vendor/ring/crypto/aes/aes.c +1142 -0
  20. data/vendor/ring/crypto/aes/aes_test.Windows.vcxproj +25 -0
  21. data/vendor/ring/crypto/aes/aes_test.cc +93 -0
  22. data/vendor/ring/crypto/aes/asm/aes-586.pl +2368 -0
  23. data/vendor/ring/crypto/aes/asm/aes-armv4.pl +1249 -0
  24. data/vendor/ring/crypto/aes/asm/aes-x86_64.pl +2246 -0
  25. data/vendor/ring/crypto/aes/asm/aesni-x86.pl +1318 -0
  26. data/vendor/ring/crypto/aes/asm/aesni-x86_64.pl +2084 -0
  27. data/vendor/ring/crypto/aes/asm/aesv8-armx.pl +675 -0
  28. data/vendor/ring/crypto/aes/asm/bsaes-armv7.pl +1364 -0
  29. data/vendor/ring/crypto/aes/asm/bsaes-x86_64.pl +1565 -0
  30. data/vendor/ring/crypto/aes/asm/vpaes-x86.pl +841 -0
  31. data/vendor/ring/crypto/aes/asm/vpaes-x86_64.pl +1116 -0
  32. data/vendor/ring/crypto/aes/internal.h +87 -0
  33. data/vendor/ring/crypto/aes/mode_wrappers.c +61 -0
  34. data/vendor/ring/crypto/bn/add.c +394 -0
  35. data/vendor/ring/crypto/bn/asm/armv4-mont.pl +694 -0
  36. data/vendor/ring/crypto/bn/asm/armv8-mont.pl +1503 -0
  37. data/vendor/ring/crypto/bn/asm/bn-586.pl +774 -0
  38. data/vendor/ring/crypto/bn/asm/co-586.pl +287 -0
  39. data/vendor/ring/crypto/bn/asm/rsaz-avx2.pl +1882 -0
  40. data/vendor/ring/crypto/bn/asm/x86-mont.pl +592 -0
  41. data/vendor/ring/crypto/bn/asm/x86_64-gcc.c +599 -0
  42. data/vendor/ring/crypto/bn/asm/x86_64-mont.pl +1393 -0
  43. data/vendor/ring/crypto/bn/asm/x86_64-mont5.pl +3507 -0
  44. data/vendor/ring/crypto/bn/bn.c +352 -0
  45. data/vendor/ring/crypto/bn/bn_asn1.c +74 -0
  46. data/vendor/ring/crypto/bn/bn_test.Windows.vcxproj +25 -0
  47. data/vendor/ring/crypto/bn/bn_test.cc +1696 -0
  48. data/vendor/ring/crypto/bn/cmp.c +200 -0
  49. data/vendor/ring/crypto/bn/convert.c +433 -0
  50. data/vendor/ring/crypto/bn/ctx.c +311 -0
  51. data/vendor/ring/crypto/bn/div.c +594 -0
  52. data/vendor/ring/crypto/bn/exponentiation.c +1335 -0
  53. data/vendor/ring/crypto/bn/gcd.c +711 -0
  54. data/vendor/ring/crypto/bn/generic.c +1019 -0
  55. data/vendor/ring/crypto/bn/internal.h +316 -0
  56. data/vendor/ring/crypto/bn/montgomery.c +516 -0
  57. data/vendor/ring/crypto/bn/mul.c +888 -0
  58. data/vendor/ring/crypto/bn/prime.c +829 -0
  59. data/vendor/ring/crypto/bn/random.c +334 -0
  60. data/vendor/ring/crypto/bn/rsaz_exp.c +262 -0
  61. data/vendor/ring/crypto/bn/rsaz_exp.h +53 -0
  62. data/vendor/ring/crypto/bn/shift.c +276 -0
  63. data/vendor/ring/crypto/bytestring/bytestring_test.Windows.vcxproj +25 -0
  64. data/vendor/ring/crypto/bytestring/bytestring_test.cc +421 -0
  65. data/vendor/ring/crypto/bytestring/cbb.c +399 -0
  66. data/vendor/ring/crypto/bytestring/cbs.c +227 -0
  67. data/vendor/ring/crypto/bytestring/internal.h +46 -0
  68. data/vendor/ring/crypto/chacha/chacha_generic.c +140 -0
  69. data/vendor/ring/crypto/chacha/chacha_vec.c +323 -0
  70. data/vendor/ring/crypto/chacha/chacha_vec_arm.S +1447 -0
  71. data/vendor/ring/crypto/chacha/chacha_vec_arm_generate.go +153 -0
  72. data/vendor/ring/crypto/cipher/cipher_test.Windows.vcxproj +25 -0
  73. data/vendor/ring/crypto/cipher/e_aes.c +390 -0
  74. data/vendor/ring/crypto/cipher/e_chacha20poly1305.c +208 -0
  75. data/vendor/ring/crypto/cipher/internal.h +173 -0
  76. data/vendor/ring/crypto/cipher/test/aes_128_gcm_tests.txt +543 -0
  77. data/vendor/ring/crypto/cipher/test/aes_128_key_wrap_tests.txt +9 -0
  78. data/vendor/ring/crypto/cipher/test/aes_256_gcm_tests.txt +475 -0
  79. data/vendor/ring/crypto/cipher/test/aes_256_key_wrap_tests.txt +23 -0
  80. data/vendor/ring/crypto/cipher/test/chacha20_poly1305_old_tests.txt +422 -0
  81. data/vendor/ring/crypto/cipher/test/chacha20_poly1305_tests.txt +484 -0
  82. data/vendor/ring/crypto/cipher/test/cipher_test.txt +100 -0
  83. data/vendor/ring/crypto/constant_time_test.Windows.vcxproj +25 -0
  84. data/vendor/ring/crypto/constant_time_test.c +304 -0
  85. data/vendor/ring/crypto/cpu-arm-asm.S +32 -0
  86. data/vendor/ring/crypto/cpu-arm.c +199 -0
  87. data/vendor/ring/crypto/cpu-intel.c +261 -0
  88. data/vendor/ring/crypto/crypto.c +151 -0
  89. data/vendor/ring/crypto/curve25519/asm/x25519-arm.S +2118 -0
  90. data/vendor/ring/crypto/curve25519/curve25519.c +4888 -0
  91. data/vendor/ring/crypto/curve25519/x25519_test.cc +128 -0
  92. data/vendor/ring/crypto/digest/md32_common.h +181 -0
  93. data/vendor/ring/crypto/ec/asm/p256-x86_64-asm.pl +2725 -0
  94. data/vendor/ring/crypto/ec/ec.c +193 -0
  95. data/vendor/ring/crypto/ec/ec_curves.c +61 -0
  96. data/vendor/ring/crypto/ec/ec_key.c +228 -0
  97. data/vendor/ring/crypto/ec/ec_montgomery.c +114 -0
  98. data/vendor/ring/crypto/ec/example_mul.Windows.vcxproj +25 -0
  99. data/vendor/ring/crypto/ec/internal.h +243 -0
  100. data/vendor/ring/crypto/ec/oct.c +253 -0
  101. data/vendor/ring/crypto/ec/p256-64.c +1794 -0
  102. data/vendor/ring/crypto/ec/p256-x86_64-table.h +9548 -0
  103. data/vendor/ring/crypto/ec/p256-x86_64.c +509 -0
  104. data/vendor/ring/crypto/ec/simple.c +1007 -0
  105. data/vendor/ring/crypto/ec/util-64.c +183 -0
  106. data/vendor/ring/crypto/ec/wnaf.c +508 -0
  107. data/vendor/ring/crypto/ecdh/ecdh.c +155 -0
  108. data/vendor/ring/crypto/ecdsa/ecdsa.c +304 -0
  109. data/vendor/ring/crypto/ecdsa/ecdsa_asn1.c +193 -0
  110. data/vendor/ring/crypto/ecdsa/ecdsa_test.Windows.vcxproj +25 -0
  111. data/vendor/ring/crypto/ecdsa/ecdsa_test.cc +327 -0
  112. data/vendor/ring/crypto/header_removed.h +17 -0
  113. data/vendor/ring/crypto/internal.h +495 -0
  114. data/vendor/ring/crypto/libring.Windows.vcxproj +101 -0
  115. data/vendor/ring/crypto/mem.c +98 -0
  116. data/vendor/ring/crypto/modes/asm/aesni-gcm-x86_64.pl +1045 -0
  117. data/vendor/ring/crypto/modes/asm/ghash-armv4.pl +517 -0
  118. data/vendor/ring/crypto/modes/asm/ghash-x86.pl +1393 -0
  119. data/vendor/ring/crypto/modes/asm/ghash-x86_64.pl +1741 -0
  120. data/vendor/ring/crypto/modes/asm/ghashv8-armx.pl +422 -0
  121. data/vendor/ring/crypto/modes/ctr.c +226 -0
  122. data/vendor/ring/crypto/modes/gcm.c +1206 -0
  123. data/vendor/ring/crypto/modes/gcm_test.Windows.vcxproj +25 -0
  124. data/vendor/ring/crypto/modes/gcm_test.c +348 -0
  125. data/vendor/ring/crypto/modes/internal.h +299 -0
  126. data/vendor/ring/crypto/perlasm/arm-xlate.pl +170 -0
  127. data/vendor/ring/crypto/perlasm/readme +100 -0
  128. data/vendor/ring/crypto/perlasm/x86_64-xlate.pl +1164 -0
  129. data/vendor/ring/crypto/perlasm/x86asm.pl +292 -0
  130. data/vendor/ring/crypto/perlasm/x86gas.pl +263 -0
  131. data/vendor/ring/crypto/perlasm/x86masm.pl +200 -0
  132. data/vendor/ring/crypto/perlasm/x86nasm.pl +187 -0
  133. data/vendor/ring/crypto/poly1305/poly1305.c +331 -0
  134. data/vendor/ring/crypto/poly1305/poly1305_arm.c +301 -0
  135. data/vendor/ring/crypto/poly1305/poly1305_arm_asm.S +2015 -0
  136. data/vendor/ring/crypto/poly1305/poly1305_test.Windows.vcxproj +25 -0
  137. data/vendor/ring/crypto/poly1305/poly1305_test.cc +80 -0
  138. data/vendor/ring/crypto/poly1305/poly1305_test.txt +52 -0
  139. data/vendor/ring/crypto/poly1305/poly1305_vec.c +892 -0
  140. data/vendor/ring/crypto/rand/asm/rdrand-x86_64.pl +75 -0
  141. data/vendor/ring/crypto/rand/internal.h +32 -0
  142. data/vendor/ring/crypto/rand/rand.c +189 -0
  143. data/vendor/ring/crypto/rand/urandom.c +219 -0
  144. data/vendor/ring/crypto/rand/windows.c +56 -0
  145. data/vendor/ring/crypto/refcount_c11.c +66 -0
  146. data/vendor/ring/crypto/refcount_lock.c +53 -0
  147. data/vendor/ring/crypto/refcount_test.Windows.vcxproj +25 -0
  148. data/vendor/ring/crypto/refcount_test.c +58 -0
  149. data/vendor/ring/crypto/rsa/blinding.c +462 -0
  150. data/vendor/ring/crypto/rsa/internal.h +108 -0
  151. data/vendor/ring/crypto/rsa/padding.c +300 -0
  152. data/vendor/ring/crypto/rsa/rsa.c +450 -0
  153. data/vendor/ring/crypto/rsa/rsa_asn1.c +261 -0
  154. data/vendor/ring/crypto/rsa/rsa_impl.c +944 -0
  155. data/vendor/ring/crypto/rsa/rsa_test.Windows.vcxproj +25 -0
  156. data/vendor/ring/crypto/rsa/rsa_test.cc +437 -0
  157. data/vendor/ring/crypto/sha/asm/sha-armv8.pl +436 -0
  158. data/vendor/ring/crypto/sha/asm/sha-x86_64.pl +2390 -0
  159. data/vendor/ring/crypto/sha/asm/sha256-586.pl +1275 -0
  160. data/vendor/ring/crypto/sha/asm/sha256-armv4.pl +735 -0
  161. data/vendor/ring/crypto/sha/asm/sha256-armv8.pl +14 -0
  162. data/vendor/ring/crypto/sha/asm/sha256-x86_64.pl +14 -0
  163. data/vendor/ring/crypto/sha/asm/sha512-586.pl +911 -0
  164. data/vendor/ring/crypto/sha/asm/sha512-armv4.pl +666 -0
  165. data/vendor/ring/crypto/sha/asm/sha512-armv8.pl +14 -0
  166. data/vendor/ring/crypto/sha/asm/sha512-x86_64.pl +14 -0
  167. data/vendor/ring/crypto/sha/sha1.c +271 -0
  168. data/vendor/ring/crypto/sha/sha256.c +204 -0
  169. data/vendor/ring/crypto/sha/sha512.c +355 -0
  170. data/vendor/ring/crypto/test/file_test.cc +326 -0
  171. data/vendor/ring/crypto/test/file_test.h +181 -0
  172. data/vendor/ring/crypto/test/malloc.cc +150 -0
  173. data/vendor/ring/crypto/test/scoped_types.h +95 -0
  174. data/vendor/ring/crypto/test/test.Windows.vcxproj +35 -0
  175. data/vendor/ring/crypto/test/test_util.cc +46 -0
  176. data/vendor/ring/crypto/test/test_util.h +41 -0
  177. data/vendor/ring/crypto/thread_none.c +55 -0
  178. data/vendor/ring/crypto/thread_pthread.c +165 -0
  179. data/vendor/ring/crypto/thread_test.Windows.vcxproj +25 -0
  180. data/vendor/ring/crypto/thread_test.c +200 -0
  181. data/vendor/ring/crypto/thread_win.c +282 -0
  182. data/vendor/ring/examples/checkdigest.rs +103 -0
  183. data/vendor/ring/include/openssl/aes.h +121 -0
  184. data/vendor/ring/include/openssl/arm_arch.h +129 -0
  185. data/vendor/ring/include/openssl/base.h +156 -0
  186. data/vendor/ring/include/openssl/bn.h +794 -0
  187. data/vendor/ring/include/openssl/buffer.h +18 -0
  188. data/vendor/ring/include/openssl/bytestring.h +235 -0
  189. data/vendor/ring/include/openssl/chacha.h +37 -0
  190. data/vendor/ring/include/openssl/cmac.h +76 -0
  191. data/vendor/ring/include/openssl/cpu.h +184 -0
  192. data/vendor/ring/include/openssl/crypto.h +43 -0
  193. data/vendor/ring/include/openssl/curve25519.h +88 -0
  194. data/vendor/ring/include/openssl/ec.h +225 -0
  195. data/vendor/ring/include/openssl/ec_key.h +129 -0
  196. data/vendor/ring/include/openssl/ecdh.h +110 -0
  197. data/vendor/ring/include/openssl/ecdsa.h +156 -0
  198. data/vendor/ring/include/openssl/err.h +201 -0
  199. data/vendor/ring/include/openssl/mem.h +101 -0
  200. data/vendor/ring/include/openssl/obj_mac.h +71 -0
  201. data/vendor/ring/include/openssl/opensslfeatures.h +68 -0
  202. data/vendor/ring/include/openssl/opensslv.h +18 -0
  203. data/vendor/ring/include/openssl/ossl_typ.h +18 -0
  204. data/vendor/ring/include/openssl/poly1305.h +51 -0
  205. data/vendor/ring/include/openssl/rand.h +70 -0
  206. data/vendor/ring/include/openssl/rsa.h +399 -0
  207. data/vendor/ring/include/openssl/thread.h +133 -0
  208. data/vendor/ring/include/openssl/type_check.h +71 -0
  209. data/vendor/ring/mk/Common.props +63 -0
  210. data/vendor/ring/mk/Windows.props +42 -0
  211. data/vendor/ring/mk/WindowsTest.props +18 -0
  212. data/vendor/ring/mk/appveyor.bat +62 -0
  213. data/vendor/ring/mk/bottom_of_makefile.mk +54 -0
  214. data/vendor/ring/mk/ring.mk +266 -0
  215. data/vendor/ring/mk/top_of_makefile.mk +214 -0
  216. data/vendor/ring/mk/travis.sh +40 -0
  217. data/vendor/ring/mk/update-travis-yml.py +229 -0
  218. data/vendor/ring/ring.sln +153 -0
  219. data/vendor/ring/src/aead.rs +682 -0
  220. data/vendor/ring/src/agreement.rs +248 -0
  221. data/vendor/ring/src/c.rs +129 -0
  222. data/vendor/ring/src/constant_time.rs +37 -0
  223. data/vendor/ring/src/der.rs +96 -0
  224. data/vendor/ring/src/digest.rs +690 -0
  225. data/vendor/ring/src/digest_tests.txt +57 -0
  226. data/vendor/ring/src/ecc.rs +28 -0
  227. data/vendor/ring/src/ecc_build.rs +279 -0
  228. data/vendor/ring/src/ecc_curves.rs +117 -0
  229. data/vendor/ring/src/ed25519_tests.txt +2579 -0
  230. data/vendor/ring/src/exe_tests.rs +46 -0
  231. data/vendor/ring/src/ffi.rs +29 -0
  232. data/vendor/ring/src/file_test.rs +187 -0
  233. data/vendor/ring/src/hkdf.rs +153 -0
  234. data/vendor/ring/src/hkdf_tests.txt +59 -0
  235. data/vendor/ring/src/hmac.rs +414 -0
  236. data/vendor/ring/src/hmac_tests.txt +97 -0
  237. data/vendor/ring/src/input.rs +312 -0
  238. data/vendor/ring/src/lib.rs +41 -0
  239. data/vendor/ring/src/pbkdf2.rs +265 -0
  240. data/vendor/ring/src/pbkdf2_tests.txt +113 -0
  241. data/vendor/ring/src/polyfill.rs +57 -0
  242. data/vendor/ring/src/rand.rs +28 -0
  243. data/vendor/ring/src/signature.rs +314 -0
  244. data/vendor/ring/third-party/NIST/README.md +9 -0
  245. data/vendor/ring/third-party/NIST/SHAVS/SHA1LongMsg.rsp +263 -0
  246. data/vendor/ring/third-party/NIST/SHAVS/SHA1Monte.rsp +309 -0
  247. data/vendor/ring/third-party/NIST/SHAVS/SHA1ShortMsg.rsp +267 -0
  248. data/vendor/ring/third-party/NIST/SHAVS/SHA224LongMsg.rsp +263 -0
  249. data/vendor/ring/third-party/NIST/SHAVS/SHA224Monte.rsp +309 -0
  250. data/vendor/ring/third-party/NIST/SHAVS/SHA224ShortMsg.rsp +267 -0
  251. data/vendor/ring/third-party/NIST/SHAVS/SHA256LongMsg.rsp +263 -0
  252. data/vendor/ring/third-party/NIST/SHAVS/SHA256Monte.rsp +309 -0
  253. data/vendor/ring/third-party/NIST/SHAVS/SHA256ShortMsg.rsp +267 -0
  254. data/vendor/ring/third-party/NIST/SHAVS/SHA384LongMsg.rsp +519 -0
  255. data/vendor/ring/third-party/NIST/SHAVS/SHA384Monte.rsp +309 -0
  256. data/vendor/ring/third-party/NIST/SHAVS/SHA384ShortMsg.rsp +523 -0
  257. data/vendor/ring/third-party/NIST/SHAVS/SHA512LongMsg.rsp +519 -0
  258. data/vendor/ring/third-party/NIST/SHAVS/SHA512Monte.rsp +309 -0
  259. data/vendor/ring/third-party/NIST/SHAVS/SHA512ShortMsg.rsp +523 -0
  260. data/vendor/ring/third-party/NIST/sha256sums.txt +1 -0
  261. metadata +333 -0
@@ -0,0 +1,1206 @@
1
+ /* ====================================================================
2
+ * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright
9
+ * notice, this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright
12
+ * notice, this list of conditions and the following disclaimer in
13
+ * the documentation and/or other materials provided with the
14
+ * distribution.
15
+ *
16
+ * 3. All advertising materials mentioning features or use of this
17
+ * software must display the following acknowledgment:
18
+ * "This product includes software developed by the OpenSSL Project
19
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
20
+ *
21
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22
+ * endorse or promote products derived from this software without
23
+ * prior written permission. For written permission, please contact
24
+ * openssl-core@openssl.org.
25
+ *
26
+ * 5. Products derived from this software may not be called "OpenSSL"
27
+ * nor may "OpenSSL" appear in their names without prior written
28
+ * permission of the OpenSSL Project.
29
+ *
30
+ * 6. Redistributions of any form whatsoever must retain the following
31
+ * acknowledgment:
32
+ * "This product includes software developed by the OpenSSL Project
33
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
34
+ *
35
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
47
+ * ==================================================================== */
48
+
49
+ #include <openssl/base.h>
50
+
51
+ #include <assert.h>
52
+ #include <string.h>
53
+
54
+ #include <openssl/mem.h>
55
+ #include <openssl/cpu.h>
56
+
57
+ #include "internal.h"
58
+
59
+
60
+ #if !defined(OPENSSL_NO_ASM) && \
61
+ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
62
+ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))
63
+ #define GHASH_ASM
64
+ #endif
65
+
66
+ #if defined(BSWAP4) && STRICT_ALIGNMENT == 1
67
+ /* redefine, because alignment is ensured */
68
+ #undef GETU32
69
+ #define GETU32(p) BSWAP4(*(const uint32_t *)(p))
70
+ #undef PUTU32
71
+ #define PUTU32(p, v) *(uint32_t *)(p) = BSWAP4(v)
72
+ #endif
73
+
74
+ #define PACK(s) ((size_t)(s) << (sizeof(size_t) * 8 - 16))
75
+ #define REDUCE1BIT(V) \
76
+ do { \
77
+ if (sizeof(size_t) == 8) { \
78
+ uint64_t T = UINT64_C(0xe100000000000000) & (0 - (V.lo & 1)); \
79
+ V.lo = (V.hi << 63) | (V.lo >> 1); \
80
+ V.hi = (V.hi >> 1) ^ T; \
81
+ } else { \
82
+ uint32_t T = 0xe1000000U & (0 - (uint32_t)(V.lo & 1)); \
83
+ V.lo = (V.hi << 63) | (V.lo >> 1); \
84
+ V.hi = (V.hi >> 1) ^ ((uint64_t)T << 32); \
85
+ } \
86
+ } while (0)
87
+
88
+ // kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
89
+ // bits of a |size_t|.
90
+ static const size_t kSizeTWithoutLower4Bits = (size_t) -16;
91
+
92
+ static void gcm_init_4bit(u128 Htable[16], uint64_t H[2]) {
93
+ u128 V;
94
+
95
+ Htable[0].hi = 0;
96
+ Htable[0].lo = 0;
97
+ V.hi = H[0];
98
+ V.lo = H[1];
99
+
100
+ Htable[8] = V;
101
+ REDUCE1BIT(V);
102
+ Htable[4] = V;
103
+ REDUCE1BIT(V);
104
+ Htable[2] = V;
105
+ REDUCE1BIT(V);
106
+ Htable[1] = V;
107
+ Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
108
+ V = Htable[4];
109
+ Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
110
+ Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
111
+ Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
112
+ V = Htable[8];
113
+ Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
114
+ Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
115
+ Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
116
+ Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
117
+ Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
118
+ Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
119
+ Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
120
+
121
+ #if defined(GHASH_ASM) && defined(OPENSSL_ARM)
122
+ /* ARM assembler expects specific dword order in Htable. */
123
+ {
124
+ int j;
125
+ const union {
126
+ long one;
127
+ char little;
128
+ } is_endian = {1};
129
+
130
+ if (is_endian.little) {
131
+ for (j = 0; j < 16; ++j) {
132
+ V = Htable[j];
133
+ Htable[j].hi = V.lo;
134
+ Htable[j].lo = V.hi;
135
+ }
136
+ } else {
137
+ for (j = 0; j < 16; ++j) {
138
+ V = Htable[j];
139
+ Htable[j].hi = V.lo << 32 | V.lo >> 32;
140
+ Htable[j].lo = V.hi << 32 | V.hi >> 32;
141
+ }
142
+ }
143
+ }
144
+ #endif
145
+ }
146
+
147
+ #if !defined(GHASH_ASM) || defined(OPENSSL_AARCH64)
148
+ static const size_t rem_4bit[16] = {
149
+ PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
150
+ PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
151
+ PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
152
+ PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)};
153
+
154
+ static void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) {
155
+ u128 Z;
156
+ int cnt = 15;
157
+ size_t rem, nlo, nhi;
158
+ const union {
159
+ long one;
160
+ char little;
161
+ } is_endian = {1};
162
+
163
+ nlo = ((const uint8_t *)Xi)[15];
164
+ nhi = nlo >> 4;
165
+ nlo &= 0xf;
166
+
167
+ Z.hi = Htable[nlo].hi;
168
+ Z.lo = Htable[nlo].lo;
169
+
170
+ while (1) {
171
+ rem = (size_t)Z.lo & 0xf;
172
+ Z.lo = (Z.hi << 60) | (Z.lo >> 4);
173
+ Z.hi = (Z.hi >> 4);
174
+ if (sizeof(size_t) == 8) {
175
+ Z.hi ^= rem_4bit[rem];
176
+ } else {
177
+ Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
178
+ }
179
+
180
+ Z.hi ^= Htable[nhi].hi;
181
+ Z.lo ^= Htable[nhi].lo;
182
+
183
+ if (--cnt < 0) {
184
+ break;
185
+ }
186
+
187
+ nlo = ((const uint8_t *)Xi)[cnt];
188
+ nhi = nlo >> 4;
189
+ nlo &= 0xf;
190
+
191
+ rem = (size_t)Z.lo & 0xf;
192
+ Z.lo = (Z.hi << 60) | (Z.lo >> 4);
193
+ Z.hi = (Z.hi >> 4);
194
+ if (sizeof(size_t) == 8) {
195
+ Z.hi ^= rem_4bit[rem];
196
+ } else {
197
+ Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
198
+ }
199
+
200
+ Z.hi ^= Htable[nlo].hi;
201
+ Z.lo ^= Htable[nlo].lo;
202
+ }
203
+
204
+ if (is_endian.little) {
205
+ #ifdef BSWAP8
206
+ Xi[0] = BSWAP8(Z.hi);
207
+ Xi[1] = BSWAP8(Z.lo);
208
+ #else
209
+ uint8_t *p = (uint8_t *)Xi;
210
+ uint32_t v;
211
+ v = (uint32_t)(Z.hi >> 32);
212
+ PUTU32(p, v);
213
+ v = (uint32_t)(Z.hi);
214
+ PUTU32(p + 4, v);
215
+ v = (uint32_t)(Z.lo >> 32);
216
+ PUTU32(p + 8, v);
217
+ v = (uint32_t)(Z.lo);
218
+ PUTU32(p + 12, v);
219
+ #endif
220
+ } else {
221
+ Xi[0] = Z.hi;
222
+ Xi[1] = Z.lo;
223
+ }
224
+ }
225
+
226
+ /* Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
227
+ * details... Compiler-generated code doesn't seem to give any
228
+ * performance improvement, at least not on x86[_64]. It's here
229
+ * mostly as reference and a placeholder for possible future
230
+ * non-trivial optimization[s]... */
231
+ static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
232
+ size_t len) {
233
+ u128 Z;
234
+ int cnt;
235
+ size_t rem, nlo, nhi;
236
+ const union {
237
+ long one;
238
+ char little;
239
+ } is_endian = {1};
240
+
241
+ do {
242
+ cnt = 15;
243
+ nlo = ((const uint8_t *)Xi)[15];
244
+ nlo ^= inp[15];
245
+ nhi = nlo >> 4;
246
+ nlo &= 0xf;
247
+
248
+ Z.hi = Htable[nlo].hi;
249
+ Z.lo = Htable[nlo].lo;
250
+
251
+ while (1) {
252
+ rem = (size_t)Z.lo & 0xf;
253
+ Z.lo = (Z.hi << 60) | (Z.lo >> 4);
254
+ Z.hi = (Z.hi >> 4);
255
+ if (sizeof(size_t) == 8) {
256
+ Z.hi ^= rem_4bit[rem];
257
+ } else {
258
+ Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
259
+ }
260
+
261
+ Z.hi ^= Htable[nhi].hi;
262
+ Z.lo ^= Htable[nhi].lo;
263
+
264
+ if (--cnt < 0) {
265
+ break;
266
+ }
267
+
268
+ nlo = ((const uint8_t *)Xi)[cnt];
269
+ nlo ^= inp[cnt];
270
+ nhi = nlo >> 4;
271
+ nlo &= 0xf;
272
+
273
+ rem = (size_t)Z.lo & 0xf;
274
+ Z.lo = (Z.hi << 60) | (Z.lo >> 4);
275
+ Z.hi = (Z.hi >> 4);
276
+ if (sizeof(size_t) == 8) {
277
+ Z.hi ^= rem_4bit[rem];
278
+ } else {
279
+ Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
280
+ }
281
+
282
+ Z.hi ^= Htable[nlo].hi;
283
+ Z.lo ^= Htable[nlo].lo;
284
+ }
285
+
286
+ if (is_endian.little) {
287
+ #ifdef BSWAP8
288
+ Xi[0] = BSWAP8(Z.hi);
289
+ Xi[1] = BSWAP8(Z.lo);
290
+ #else
291
+ uint8_t *p = (uint8_t *)Xi;
292
+ uint32_t v;
293
+ v = (uint32_t)(Z.hi >> 32);
294
+ PUTU32(p, v);
295
+ v = (uint32_t)(Z.hi);
296
+ PUTU32(p + 4, v);
297
+ v = (uint32_t)(Z.lo >> 32);
298
+ PUTU32(p + 8, v);
299
+ v = (uint32_t)(Z.lo);
300
+ PUTU32(p + 12, v);
301
+ #endif
302
+ } else {
303
+ Xi[0] = Z.hi;
304
+ Xi[1] = Z.lo;
305
+ }
306
+ } while (inp += 16, len -= 16);
307
+ }
308
+ #else /* GHASH_ASM */
309
+ void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]);
310
+ void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
311
+ size_t len);
312
+ #endif
313
+
314
+ #define GCM_MUL(ctx, Xi) gcm_gmult_4bit(ctx->Xi.u, ctx->Htable)
315
+ #if defined(GHASH_ASM)
316
+ #define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len)
317
+ /* GHASH_CHUNK is "stride parameter" missioned to mitigate cache
318
+ * trashing effect. In other words idea is to hash data while it's
319
+ * still in L1 cache after encryption pass... */
320
+ #define GHASH_CHUNK (3 * 1024)
321
+ #endif
322
+
323
+
324
+ #if defined(GHASH_ASM)
325
+ #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
326
+ #define GHASH_ASM_X86_OR_64
327
+ #define GCM_FUNCREF_4BIT
328
+ void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
329
+ void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
330
+ void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
331
+ size_t len);
332
+
333
+ #if defined(OPENSSL_X86)
334
+ #define gcm_init_avx gcm_init_clmul
335
+ #define gcm_gmult_avx gcm_gmult_clmul
336
+ #define gcm_ghash_avx gcm_ghash_clmul
337
+ #else
338
+ void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]);
339
+ void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]);
340
+ void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len);
341
+ #endif
342
+
343
+ #if defined(OPENSSL_X86)
344
+ #define GHASH_ASM_X86
345
+ void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]);
346
+ void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
347
+ size_t len);
348
+
349
+ void gcm_gmult_4bit_x86(uint64_t Xi[2], const u128 Htable[16]);
350
+ void gcm_ghash_4bit_x86(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
351
+ size_t len);
352
+ #endif
353
+ #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
354
+ #include <openssl/arm_arch.h>
355
+ #if __ARM_MAX_ARCH__ >= 8
356
+ #define GHASH_ASM_ARM
357
+ #define GCM_FUNCREF_4BIT
358
+
359
+ static int pmull_capable(void) {
360
+ return CRYPTO_is_ARMv8_PMULL_capable();
361
+ }
362
+
363
+ void gcm_init_v8(u128 Htable[16], const uint64_t Xi[2]);
364
+ void gcm_gmult_v8(uint64_t Xi[2], const u128 Htable[16]);
365
+ void gcm_ghash_v8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
366
+ size_t len);
367
+
368
+ #if defined(OPENSSL_ARM)
369
+ /* 32-bit ARM also has support for doing GCM with NEON instructions. */
370
+ static int neon_capable(void) {
371
+ return CRYPTO_is_NEON_capable();
372
+ }
373
+
374
+ void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]);
375
+ void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]);
376
+ void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
377
+ size_t len);
378
+ #else
379
+ /* AArch64 only has the ARMv8 versions of functions. */
380
+ static int neon_capable(void) {
381
+ return 0;
382
+ }
383
+ void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]) {
384
+ abort();
385
+ }
386
+ void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]) {
387
+ abort();
388
+ }
389
+ void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
390
+ size_t len) {
391
+ abort();
392
+ }
393
+ #endif
394
+
395
+ #endif
396
+ #endif
397
+ #endif
398
+
399
+ #ifdef GCM_FUNCREF_4BIT
400
+ #undef GCM_MUL
401
+ #define GCM_MUL(ctx, Xi) (*gcm_gmult_p)(ctx->Xi.u, ctx->Htable)
402
+ #ifdef GHASH
403
+ #undef GHASH
404
+ #define GHASH(ctx, in, len) (*gcm_ghash_p)(ctx->Xi.u, ctx->Htable, in, len)
405
+ #endif
406
+ #endif
407
+
408
+ GCM128_CONTEXT *CRYPTO_gcm128_new(const void *key, block128_f block) {
409
+ GCM128_CONTEXT *ret;
410
+
411
+ ret = (GCM128_CONTEXT *)OPENSSL_malloc(sizeof(GCM128_CONTEXT));
412
+ if (ret != NULL) {
413
+ CRYPTO_gcm128_init(ret, key, block);
414
+ }
415
+
416
+ return ret;
417
+ }
418
+
419
+ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *key,
420
+ block128_f block) {
421
+ const union {
422
+ long one;
423
+ char little;
424
+ } is_endian = {1};
425
+
426
+ memset(ctx, 0, sizeof(*ctx));
427
+ ctx->block = block;
428
+
429
+ (*block)(ctx->H.c, ctx->H.c, key);
430
+
431
+ if (is_endian.little) {
432
+ /* H is stored in host byte order */
433
+ #ifdef BSWAP8
434
+ ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
435
+ ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
436
+ #else
437
+ uint8_t *p = ctx->H.c;
438
+ uint64_t hi, lo;
439
+ hi = (uint64_t)GETU32(p) << 32 | GETU32(p + 4);
440
+ lo = (uint64_t)GETU32(p + 8) << 32 | GETU32(p + 12);
441
+ ctx->H.u[0] = hi;
442
+ ctx->H.u[1] = lo;
443
+ #endif
444
+ }
445
+
446
+ #if defined(GHASH_ASM_X86_OR_64)
447
+ if (crypto_gcm_clmul_enabled()) {
448
+ if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
449
+ gcm_init_avx(ctx->Htable, ctx->H.u);
450
+ ctx->gmult = gcm_gmult_avx;
451
+ ctx->ghash = gcm_ghash_avx;
452
+ } else {
453
+ gcm_init_clmul(ctx->Htable, ctx->H.u);
454
+ ctx->gmult = gcm_gmult_clmul;
455
+ ctx->ghash = gcm_ghash_clmul;
456
+ }
457
+ return;
458
+ }
459
+ gcm_init_4bit(ctx->Htable, ctx->H.u);
460
+ #if defined(GHASH_ASM_X86) /* x86 only */
461
+ if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */
462
+ ctx->gmult = gcm_gmult_4bit_mmx;
463
+ ctx->ghash = gcm_ghash_4bit_mmx;
464
+ } else {
465
+ ctx->gmult = gcm_gmult_4bit_x86;
466
+ ctx->ghash = gcm_ghash_4bit_x86;
467
+ }
468
+ #else
469
+ ctx->gmult = gcm_gmult_4bit;
470
+ ctx->ghash = gcm_ghash_4bit;
471
+ #endif
472
+ #elif defined(GHASH_ASM_ARM)
473
+ if (pmull_capable()) {
474
+ gcm_init_v8(ctx->Htable, ctx->H.u);
475
+ ctx->gmult = gcm_gmult_v8;
476
+ ctx->ghash = gcm_ghash_v8;
477
+ } else if (neon_capable()) {
478
+ gcm_init_neon(ctx->Htable,ctx->H.u);
479
+ ctx->gmult = gcm_gmult_neon;
480
+ ctx->ghash = gcm_ghash_neon;
481
+ } else {
482
+ gcm_init_4bit(ctx->Htable, ctx->H.u);
483
+ ctx->gmult = gcm_gmult_4bit;
484
+ ctx->ghash = gcm_ghash_4bit;
485
+ }
486
+ #else
487
+ gcm_init_4bit(ctx->Htable, ctx->H.u);
488
+ ctx->gmult = gcm_gmult_4bit;
489
+ ctx->ghash = gcm_ghash_4bit;
490
+ #endif
491
+ }
492
+
493
+ void CRYPTO_gcm128_set_96_bit_iv(GCM128_CONTEXT *ctx, const void *key,
494
+ const uint8_t *iv) {
495
+ const union {
496
+ long one;
497
+ char little;
498
+ } is_endian = {1};
499
+ unsigned int ctr;
500
+
501
+ ctx->Yi.u[0] = 0;
502
+ ctx->Yi.u[1] = 0;
503
+ ctx->Xi.u[0] = 0;
504
+ ctx->Xi.u[1] = 0;
505
+ ctx->len.u[0] = 0; /* AAD length */
506
+ ctx->len.u[1] = 0; /* message length */
507
+ ctx->ares = 0;
508
+ ctx->mres = 0;
509
+
510
+ memcpy(ctx->Yi.c, iv, 12);
511
+ ctx->Yi.c[15] = 1;
512
+ ctr = 1;
513
+
514
+ (*ctx->block)(ctx->Yi.c, ctx->EK0.c, key);
515
+ ++ctr;
516
+ if (is_endian.little) {
517
+ PUTU32(ctx->Yi.c + 12, ctr);
518
+ } else {
519
+ ctx->Yi.d[3] = ctr;
520
+ }
521
+ }
522
+
523
+ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) {
524
+ size_t i;
525
+ unsigned int n;
526
+ uint64_t alen = ctx->len.u[0];
527
+ #ifdef GCM_FUNCREF_4BIT
528
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
529
+ #ifdef GHASH
530
+ void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
531
+ size_t len) = ctx->ghash;
532
+ #endif
533
+ #endif
534
+
535
+ if (ctx->len.u[1]) {
536
+ return 0;
537
+ }
538
+
539
+ alen += len;
540
+ if (alen > (UINT64_C(1) << 61) || (sizeof(len) == 8 && alen < len)) {
541
+ return 0;
542
+ }
543
+ ctx->len.u[0] = alen;
544
+
545
+ n = ctx->ares;
546
+ if (n) {
547
+ while (n && len) {
548
+ ctx->Xi.c[n] ^= *(aad++);
549
+ --len;
550
+ n = (n + 1) % 16;
551
+ }
552
+ if (n == 0) {
553
+ GCM_MUL(ctx, Xi);
554
+ } else {
555
+ ctx->ares = n;
556
+ return 1;
557
+ }
558
+ }
559
+
560
+ #ifdef GHASH
561
+ if ((i = (len & (size_t) - 16))) {
562
+ GHASH(ctx, aad, i);
563
+ aad += i;
564
+ len -= i;
565
+ }
566
+ #else
567
+ while (len >= 16) {
568
+ for (i = 0; i < 16; ++i) {
569
+ ctx->Xi.c[i] ^= aad[i];
570
+ }
571
+ GCM_MUL(ctx, Xi);
572
+ aad += 16;
573
+ len -= 16;
574
+ }
575
+ #endif
576
+ if (len) {
577
+ n = (unsigned int)len;
578
+ for (i = 0; i < len; ++i) {
579
+ ctx->Xi.c[i] ^= aad[i];
580
+ }
581
+ }
582
+
583
+ ctx->ares = n;
584
+ return 1;
585
+ }
586
+
587
+ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key,
588
+ const unsigned char *in, unsigned char *out,
589
+ size_t len) {
590
+ const union {
591
+ long one;
592
+ char little;
593
+ } is_endian = {1};
594
+ unsigned int n, ctr;
595
+ size_t i;
596
+ uint64_t mlen = ctx->len.u[1];
597
+ block128_f block = ctx->block;
598
+ #ifdef GCM_FUNCREF_4BIT
599
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
600
+ #ifdef GHASH
601
+ void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
602
+ size_t len) = ctx->ghash;
603
+ #endif
604
+ #endif
605
+
606
+ mlen += len;
607
+ if (mlen > ((UINT64_C(1) << 36) - 32) ||
608
+ (sizeof(len) == 8 && mlen < len)) {
609
+ return 0;
610
+ }
611
+ ctx->len.u[1] = mlen;
612
+
613
+ if (ctx->ares) {
614
+ /* First call to encrypt finalizes GHASH(AAD) */
615
+ GCM_MUL(ctx, Xi);
616
+ ctx->ares = 0;
617
+ }
618
+
619
+ if (is_endian.little) {
620
+ ctr = GETU32(ctx->Yi.c + 12);
621
+ } else {
622
+ ctr = ctx->Yi.d[3];
623
+ }
624
+
625
+ n = ctx->mres;
626
+ if (n) {
627
+ while (n && len) {
628
+ ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
629
+ --len;
630
+ n = (n + 1) % 16;
631
+ }
632
+ if (n == 0) {
633
+ GCM_MUL(ctx, Xi);
634
+ } else {
635
+ ctx->mres = n;
636
+ return 1;
637
+ }
638
+ }
639
+ if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out) % sizeof(size_t) != 0) {
640
+ for (i = 0; i < len; ++i) {
641
+ if (n == 0) {
642
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
643
+ ++ctr;
644
+ if (is_endian.little) {
645
+ PUTU32(ctx->Yi.c + 12, ctr);
646
+ } else {
647
+ ctx->Yi.d[3] = ctr;
648
+ }
649
+ }
650
+ ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
651
+ n = (n + 1) % 16;
652
+ if (n == 0) {
653
+ GCM_MUL(ctx, Xi);
654
+ }
655
+ }
656
+
657
+ ctx->mres = n;
658
+ return 1;
659
+ }
660
+ #if defined(GHASH) && defined(GHASH_CHUNK)
661
+ while (len >= GHASH_CHUNK) {
662
+ size_t j = GHASH_CHUNK;
663
+
664
+ while (j) {
665
+ size_t *out_t = (size_t *)out;
666
+ const size_t *in_t = (const size_t *)in;
667
+
668
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
669
+ ++ctr;
670
+ if (is_endian.little) {
671
+ PUTU32(ctx->Yi.c + 12, ctr);
672
+ } else {
673
+ ctx->Yi.d[3] = ctr;
674
+ }
675
+ for (i = 0; i < 16 / sizeof(size_t); ++i) {
676
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
677
+ }
678
+ out += 16;
679
+ in += 16;
680
+ j -= 16;
681
+ }
682
+ GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
683
+ len -= GHASH_CHUNK;
684
+ }
685
+ if ((i = (len & (size_t) - 16))) {
686
+ size_t j = i;
687
+
688
+ while (len >= 16) {
689
+ size_t *out_t = (size_t *)out;
690
+ const size_t *in_t = (const size_t *)in;
691
+
692
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
693
+ ++ctr;
694
+ if (is_endian.little) {
695
+ PUTU32(ctx->Yi.c + 12, ctr);
696
+ } else {
697
+ ctx->Yi.d[3] = ctr;
698
+ }
699
+ for (i = 0; i < 16 / sizeof(size_t); ++i) {
700
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
701
+ }
702
+ out += 16;
703
+ in += 16;
704
+ len -= 16;
705
+ }
706
+ GHASH(ctx, out - j, j);
707
+ }
708
+ #else
709
+ while (len >= 16) {
710
+ size_t *out_t = (size_t *)out;
711
+ const size_t *in_t = (const size_t *)in;
712
+
713
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
714
+ ++ctr;
715
+ if (is_endian.little) {
716
+ PUTU32(ctx->Yi.c + 12, ctr);
717
+ } else {
718
+ ctx->Yi.d[3] = ctr;
719
+ }
720
+ for (i = 0; i < 16 / sizeof(size_t); ++i) {
721
+ ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i];
722
+ }
723
+ GCM_MUL(ctx, Xi);
724
+ out += 16;
725
+ in += 16;
726
+ len -= 16;
727
+ }
728
+ #endif
729
+ if (len) {
730
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
731
+ ++ctr;
732
+ if (is_endian.little) {
733
+ PUTU32(ctx->Yi.c + 12, ctr);
734
+ } else {
735
+ ctx->Yi.d[3] = ctr;
736
+ }
737
+ while (len--) {
738
+ ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
739
+ ++n;
740
+ }
741
+ }
742
+
743
+ ctx->mres = n;
744
+ return 1;
745
+ }
746
+
747
+ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key,
748
+ const unsigned char *in, unsigned char *out,
749
+ size_t len) {
750
+ const union {
751
+ long one;
752
+ char little;
753
+ } is_endian = {1};
754
+ unsigned int n, ctr;
755
+ size_t i;
756
+ uint64_t mlen = ctx->len.u[1];
757
+ block128_f block = ctx->block;
758
+ #ifdef GCM_FUNCREF_4BIT
759
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
760
+ #ifdef GHASH
761
+ void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
762
+ size_t len) = ctx->ghash;
763
+ #endif
764
+ #endif
765
+
766
+ mlen += len;
767
+ if (mlen > ((UINT64_C(1) << 36) - 32) ||
768
+ (sizeof(len) == 8 && mlen < len)) {
769
+ return 0;
770
+ }
771
+ ctx->len.u[1] = mlen;
772
+
773
+ if (ctx->ares) {
774
+ /* First call to decrypt finalizes GHASH(AAD) */
775
+ GCM_MUL(ctx, Xi);
776
+ ctx->ares = 0;
777
+ }
778
+
779
+ if (is_endian.little) {
780
+ ctr = GETU32(ctx->Yi.c + 12);
781
+ } else {
782
+ ctr = ctx->Yi.d[3];
783
+ }
784
+
785
+ n = ctx->mres;
786
+ if (n) {
787
+ while (n && len) {
788
+ uint8_t c = *(in++);
789
+ *(out++) = c ^ ctx->EKi.c[n];
790
+ ctx->Xi.c[n] ^= c;
791
+ --len;
792
+ n = (n + 1) % 16;
793
+ }
794
+ if (n == 0) {
795
+ GCM_MUL(ctx, Xi);
796
+ } else {
797
+ ctx->mres = n;
798
+ return 1;
799
+ }
800
+ }
801
+ if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out) % sizeof(size_t) != 0) {
802
+ for (i = 0; i < len; ++i) {
803
+ uint8_t c;
804
+ if (n == 0) {
805
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
806
+ ++ctr;
807
+ if (is_endian.little) {
808
+ PUTU32(ctx->Yi.c + 12, ctr);
809
+ } else {
810
+ ctx->Yi.d[3] = ctr;
811
+ }
812
+ }
813
+ c = in[i];
814
+ out[i] = c ^ ctx->EKi.c[n];
815
+ ctx->Xi.c[n] ^= c;
816
+ n = (n + 1) % 16;
817
+ if (n == 0) {
818
+ GCM_MUL(ctx, Xi);
819
+ }
820
+ }
821
+
822
+ ctx->mres = n;
823
+ return 1;
824
+ }
825
+ #if defined(GHASH) && defined(GHASH_CHUNK)
826
+ while (len >= GHASH_CHUNK) {
827
+ size_t j = GHASH_CHUNK;
828
+
829
+ GHASH(ctx, in, GHASH_CHUNK);
830
+ while (j) {
831
+ size_t *out_t = (size_t *)out;
832
+ const size_t *in_t = (const size_t *)in;
833
+
834
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
835
+ ++ctr;
836
+ if (is_endian.little) {
837
+ PUTU32(ctx->Yi.c + 12, ctr);
838
+ } else {
839
+ ctx->Yi.d[3] = ctr;
840
+ }
841
+ for (i = 0; i < 16 / sizeof(size_t); ++i) {
842
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
843
+ }
844
+ out += 16;
845
+ in += 16;
846
+ j -= 16;
847
+ }
848
+ len -= GHASH_CHUNK;
849
+ }
850
+ if ((i = (len & (size_t) - 16))) {
851
+ GHASH(ctx, in, i);
852
+ while (len >= 16) {
853
+ size_t *out_t = (size_t *)out;
854
+ const size_t *in_t = (const size_t *)in;
855
+
856
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
857
+ ++ctr;
858
+ if (is_endian.little) {
859
+ PUTU32(ctx->Yi.c + 12, ctr);
860
+ } else {
861
+ ctx->Yi.d[3] = ctr;
862
+ }
863
+ for (i = 0; i < 16 / sizeof(size_t); ++i) {
864
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
865
+ }
866
+ out += 16;
867
+ in += 16;
868
+ len -= 16;
869
+ }
870
+ }
871
+ #else
872
+ while (len >= 16) {
873
+ size_t *out_t = (size_t *)out;
874
+ const size_t *in_t = (const size_t *)in;
875
+
876
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
877
+ ++ctr;
878
+ if (is_endian.little) {
879
+ PUTU32(ctx->Yi.c + 12, ctr);
880
+ } else {
881
+ ctx->Yi.d[3] = ctr;
882
+ }
883
+ for (i = 0; i < 16 / sizeof(size_t); ++i) {
884
+ size_t c = in_t[i];
885
+ out_t[i] = c ^ ctx->EKi.t[i];
886
+ ctx->Xi.t[i] ^= c;
887
+ }
888
+ GCM_MUL(ctx, Xi);
889
+ out += 16;
890
+ in += 16;
891
+ len -= 16;
892
+ }
893
+ #endif
894
+ if (len) {
895
+ (*block)(ctx->Yi.c, ctx->EKi.c, key);
896
+ ++ctr;
897
+ if (is_endian.little) {
898
+ PUTU32(ctx->Yi.c + 12, ctr);
899
+ } else {
900
+ ctx->Yi.d[3] = ctr;
901
+ }
902
+ while (len--) {
903
+ uint8_t c = in[n];
904
+ ctx->Xi.c[n] ^= c;
905
+ out[n] = c ^ ctx->EKi.c[n];
906
+ ++n;
907
+ }
908
+ }
909
+
910
+ ctx->mres = n;
911
+ return 1;
912
+ }
913
+
914
+ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
915
+ const uint8_t *in, uint8_t *out, size_t len,
916
+ ctr128_f stream) {
917
+ const union {
918
+ long one;
919
+ char little;
920
+ } is_endian = {1};
921
+ unsigned int n, ctr;
922
+ uint64_t mlen = ctx->len.u[1];
923
+ #ifdef GCM_FUNCREF_4BIT
924
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
925
+ #ifdef GHASH
926
+ void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
927
+ size_t len) = ctx->ghash;
928
+ #endif
929
+ #endif
930
+
931
+ mlen += len;
932
+ if (mlen > ((UINT64_C(1) << 36) - 32) ||
933
+ (sizeof(len) == 8 && mlen < len)) {
934
+ return 0;
935
+ }
936
+ ctx->len.u[1] = mlen;
937
+
938
+ if (ctx->ares) {
939
+ /* First call to encrypt finalizes GHASH(AAD) */
940
+ GCM_MUL(ctx, Xi);
941
+ ctx->ares = 0;
942
+ }
943
+
944
+ if (is_endian.little) {
945
+ ctr = GETU32(ctx->Yi.c + 12);
946
+ } else {
947
+ ctr = ctx->Yi.d[3];
948
+ }
949
+
950
+ n = ctx->mres;
951
+ if (n) {
952
+ while (n && len) {
953
+ ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
954
+ --len;
955
+ n = (n + 1) % 16;
956
+ }
957
+ if (n == 0) {
958
+ GCM_MUL(ctx, Xi);
959
+ } else {
960
+ ctx->mres = n;
961
+ return 1;
962
+ }
963
+ }
964
+ #if defined(GHASH)
965
+ while (len >= GHASH_CHUNK) {
966
+ (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
967
+ ctr += GHASH_CHUNK / 16;
968
+ if (is_endian.little) {
969
+ PUTU32(ctx->Yi.c + 12, ctr);
970
+ } else {
971
+ ctx->Yi.d[3] = ctr;
972
+ }
973
+ GHASH(ctx, out, GHASH_CHUNK);
974
+ out += GHASH_CHUNK;
975
+ in += GHASH_CHUNK;
976
+ len -= GHASH_CHUNK;
977
+ }
978
+ #endif
979
+ size_t i = len & kSizeTWithoutLower4Bits;
980
+ if (i != 0) {
981
+ size_t j = i / 16;
982
+
983
+ (*stream)(in, out, j, key, ctx->Yi.c);
984
+ ctr += (unsigned int)j;
985
+ if (is_endian.little) {
986
+ PUTU32(ctx->Yi.c + 12, ctr);
987
+ } else {
988
+ ctx->Yi.d[3] = ctr;
989
+ }
990
+ in += i;
991
+ len -= i;
992
+ #if defined(GHASH)
993
+ GHASH(ctx, out, i);
994
+ out += i;
995
+ #else
996
+ while (j--) {
997
+ for (i = 0; i < 16; ++i) {
998
+ ctx->Xi.c[i] ^= out[i];
999
+ }
1000
+ GCM_MUL(ctx, Xi);
1001
+ out += 16;
1002
+ }
1003
+ #endif
1004
+ }
1005
+ if (len) {
1006
+ (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1007
+ ++ctr;
1008
+ if (is_endian.little) {
1009
+ PUTU32(ctx->Yi.c + 12, ctr);
1010
+ } else {
1011
+ ctx->Yi.d[3] = ctr;
1012
+ }
1013
+ while (len--) {
1014
+ ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1015
+ ++n;
1016
+ }
1017
+ }
1018
+
1019
+ ctx->mres = n;
1020
+ return 1;
1021
+ }
1022
+
1023
+ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
1024
+ const uint8_t *in, uint8_t *out, size_t len,
1025
+ ctr128_f stream) {
1026
+ const union {
1027
+ long one;
1028
+ char little;
1029
+ } is_endian = {1};
1030
+ unsigned int n, ctr;
1031
+ uint64_t mlen = ctx->len.u[1];
1032
+ #ifdef GCM_FUNCREF_4BIT
1033
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
1034
+ #ifdef GHASH
1035
+ void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
1036
+ size_t len) = ctx->ghash;
1037
+ #endif
1038
+ #endif
1039
+
1040
+ mlen += len;
1041
+ if (mlen > ((UINT64_C(1) << 36) - 32) ||
1042
+ (sizeof(len) == 8 && mlen < len)) {
1043
+ return 0;
1044
+ }
1045
+ ctx->len.u[1] = mlen;
1046
+
1047
+ if (ctx->ares) {
1048
+ /* First call to decrypt finalizes GHASH(AAD) */
1049
+ GCM_MUL(ctx, Xi);
1050
+ ctx->ares = 0;
1051
+ }
1052
+
1053
+ if (is_endian.little) {
1054
+ ctr = GETU32(ctx->Yi.c + 12);
1055
+ } else {
1056
+ ctr = ctx->Yi.d[3];
1057
+ }
1058
+
1059
+ n = ctx->mres;
1060
+ if (n) {
1061
+ while (n && len) {
1062
+ uint8_t c = *(in++);
1063
+ *(out++) = c ^ ctx->EKi.c[n];
1064
+ ctx->Xi.c[n] ^= c;
1065
+ --len;
1066
+ n = (n + 1) % 16;
1067
+ }
1068
+ if (n == 0) {
1069
+ GCM_MUL(ctx, Xi);
1070
+ } else {
1071
+ ctx->mres = n;
1072
+ return 1;
1073
+ }
1074
+ }
1075
+ #if defined(GHASH)
1076
+ while (len >= GHASH_CHUNK) {
1077
+ GHASH(ctx, in, GHASH_CHUNK);
1078
+ (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1079
+ ctr += GHASH_CHUNK / 16;
1080
+ if (is_endian.little) {
1081
+ PUTU32(ctx->Yi.c + 12, ctr);
1082
+ } else {
1083
+ ctx->Yi.d[3] = ctr;
1084
+ }
1085
+ out += GHASH_CHUNK;
1086
+ in += GHASH_CHUNK;
1087
+ len -= GHASH_CHUNK;
1088
+ }
1089
+ #endif
1090
+ size_t i = len & kSizeTWithoutLower4Bits;
1091
+ if (i != 0) {
1092
+ size_t j = i / 16;
1093
+
1094
+ #if defined(GHASH)
1095
+ GHASH(ctx, in, i);
1096
+ #else
1097
+ while (j--) {
1098
+ size_t k;
1099
+ for (k = 0; k < 16; ++k) {
1100
+ ctx->Xi.c[k] ^= in[k];
1101
+ }
1102
+ GCM_MUL(ctx, Xi);
1103
+ in += 16;
1104
+ }
1105
+ j = i / 16;
1106
+ in -= i;
1107
+ #endif
1108
+ (*stream)(in, out, j, key, ctx->Yi.c);
1109
+ ctr += (unsigned int)j;
1110
+ if (is_endian.little) {
1111
+ PUTU32(ctx->Yi.c + 12, ctr);
1112
+ } else {
1113
+ ctx->Yi.d[3] = ctr;
1114
+ }
1115
+ out += i;
1116
+ in += i;
1117
+ len -= i;
1118
+ }
1119
+ if (len) {
1120
+ (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1121
+ ++ctr;
1122
+ if (is_endian.little) {
1123
+ PUTU32(ctx->Yi.c + 12, ctr);
1124
+ } else {
1125
+ ctx->Yi.d[3] = ctr;
1126
+ }
1127
+ while (len--) {
1128
+ uint8_t c = in[n];
1129
+ ctx->Xi.c[n] ^= c;
1130
+ out[n] = c ^ ctx->EKi.c[n];
1131
+ ++n;
1132
+ }
1133
+ }
1134
+
1135
+ ctx->mres = n;
1136
+ return 1;
1137
+ }
1138
+
1139
+ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag,
1140
+ size_t len) {
1141
+ const union {
1142
+ long one;
1143
+ char little;
1144
+ } is_endian = {1};
1145
+ uint64_t alen = ctx->len.u[0] << 3;
1146
+ uint64_t clen = ctx->len.u[1] << 3;
1147
+ #ifdef GCM_FUNCREF_4BIT
1148
+ void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
1149
+ #endif
1150
+
1151
+ if (ctx->mres || ctx->ares) {
1152
+ GCM_MUL(ctx, Xi);
1153
+ }
1154
+
1155
+ if (is_endian.little) {
1156
+ #ifdef BSWAP8
1157
+ alen = BSWAP8(alen);
1158
+ clen = BSWAP8(clen);
1159
+ #else
1160
+ uint8_t *p = ctx->len.c;
1161
+
1162
+ ctx->len.u[0] = alen;
1163
+ ctx->len.u[1] = clen;
1164
+
1165
+ alen = (uint64_t)GETU32(p) << 32 | GETU32(p + 4);
1166
+ clen = (uint64_t)GETU32(p + 8) << 32 | GETU32(p + 12);
1167
+ #endif
1168
+ }
1169
+
1170
+ ctx->Xi.u[0] ^= alen;
1171
+ ctx->Xi.u[1] ^= clen;
1172
+ GCM_MUL(ctx, Xi);
1173
+
1174
+ ctx->Xi.u[0] ^= ctx->EK0.u[0];
1175
+ ctx->Xi.u[1] ^= ctx->EK0.u[1];
1176
+
1177
+ if (tag && len <= sizeof(ctx->Xi)) {
1178
+ return CRYPTO_memcmp(ctx->Xi.c, tag, len) == 0;
1179
+ } else {
1180
+ return 0;
1181
+ }
1182
+ }
1183
+
1184
+ void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag,
1185
+ size_t len) {
1186
+ CRYPTO_gcm128_finish(ctx, NULL, 0);
1187
+ memcpy(tag, ctx->Xi.c, len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
1188
+ }
1189
+
1190
+ void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx) {
1191
+ if (ctx) {
1192
+ OPENSSL_cleanse(ctx, sizeof(*ctx));
1193
+ OPENSSL_free(ctx);
1194
+ }
1195
+ }
1196
+
1197
+ #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
1198
+ int crypto_gcm_clmul_enabled(void) {
1199
+ #ifdef GHASH_ASM
1200
+ return OPENSSL_ia32cap_P[0] & (1 << 24) && /* check FXSR bit */
1201
+ OPENSSL_ia32cap_P[1] & (1 << 1); /* check PCLMULQDQ bit */
1202
+ #else
1203
+ return 0;
1204
+ #endif
1205
+ }
1206
+ #endif