ring-native 0.0.0 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (267) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/CHANGES.md +7 -0
  4. data/Makefile +5 -0
  5. data/README.md +12 -5
  6. data/Rakefile +4 -0
  7. data/ext/ring/extconf.rb +4 -5
  8. data/lib/ring/native.rb +3 -1
  9. data/lib/ring/native/version.rb +5 -1
  10. data/ring-native.gemspec +6 -6
  11. data/vendor/ring-ffi/Cargo.lock +26 -0
  12. data/vendor/ring-ffi/Cargo.toml +45 -0
  13. data/vendor/ring-ffi/LICENSE +16 -0
  14. data/vendor/ring-ffi/README.md +59 -0
  15. data/vendor/ring-ffi/src/lib.rs +79 -0
  16. metadata +10 -255
  17. data/vendor/ring/BUILDING.md +0 -40
  18. data/vendor/ring/Cargo.toml +0 -43
  19. data/vendor/ring/LICENSE +0 -185
  20. data/vendor/ring/Makefile +0 -35
  21. data/vendor/ring/PORTING.md +0 -163
  22. data/vendor/ring/README.md +0 -113
  23. data/vendor/ring/STYLE.md +0 -197
  24. data/vendor/ring/appveyor.yml +0 -27
  25. data/vendor/ring/build.rs +0 -108
  26. data/vendor/ring/crypto/aes/aes.c +0 -1142
  27. data/vendor/ring/crypto/aes/aes_test.Windows.vcxproj +0 -25
  28. data/vendor/ring/crypto/aes/aes_test.cc +0 -93
  29. data/vendor/ring/crypto/aes/asm/aes-586.pl +0 -2368
  30. data/vendor/ring/crypto/aes/asm/aes-armv4.pl +0 -1249
  31. data/vendor/ring/crypto/aes/asm/aes-x86_64.pl +0 -2246
  32. data/vendor/ring/crypto/aes/asm/aesni-x86.pl +0 -1318
  33. data/vendor/ring/crypto/aes/asm/aesni-x86_64.pl +0 -2084
  34. data/vendor/ring/crypto/aes/asm/aesv8-armx.pl +0 -675
  35. data/vendor/ring/crypto/aes/asm/bsaes-armv7.pl +0 -1364
  36. data/vendor/ring/crypto/aes/asm/bsaes-x86_64.pl +0 -1565
  37. data/vendor/ring/crypto/aes/asm/vpaes-x86.pl +0 -841
  38. data/vendor/ring/crypto/aes/asm/vpaes-x86_64.pl +0 -1116
  39. data/vendor/ring/crypto/aes/internal.h +0 -87
  40. data/vendor/ring/crypto/aes/mode_wrappers.c +0 -61
  41. data/vendor/ring/crypto/bn/add.c +0 -394
  42. data/vendor/ring/crypto/bn/asm/armv4-mont.pl +0 -694
  43. data/vendor/ring/crypto/bn/asm/armv8-mont.pl +0 -1503
  44. data/vendor/ring/crypto/bn/asm/bn-586.pl +0 -774
  45. data/vendor/ring/crypto/bn/asm/co-586.pl +0 -287
  46. data/vendor/ring/crypto/bn/asm/rsaz-avx2.pl +0 -1882
  47. data/vendor/ring/crypto/bn/asm/x86-mont.pl +0 -592
  48. data/vendor/ring/crypto/bn/asm/x86_64-gcc.c +0 -599
  49. data/vendor/ring/crypto/bn/asm/x86_64-mont.pl +0 -1393
  50. data/vendor/ring/crypto/bn/asm/x86_64-mont5.pl +0 -3507
  51. data/vendor/ring/crypto/bn/bn.c +0 -352
  52. data/vendor/ring/crypto/bn/bn_asn1.c +0 -74
  53. data/vendor/ring/crypto/bn/bn_test.Windows.vcxproj +0 -25
  54. data/vendor/ring/crypto/bn/bn_test.cc +0 -1696
  55. data/vendor/ring/crypto/bn/cmp.c +0 -200
  56. data/vendor/ring/crypto/bn/convert.c +0 -433
  57. data/vendor/ring/crypto/bn/ctx.c +0 -311
  58. data/vendor/ring/crypto/bn/div.c +0 -594
  59. data/vendor/ring/crypto/bn/exponentiation.c +0 -1335
  60. data/vendor/ring/crypto/bn/gcd.c +0 -711
  61. data/vendor/ring/crypto/bn/generic.c +0 -1019
  62. data/vendor/ring/crypto/bn/internal.h +0 -316
  63. data/vendor/ring/crypto/bn/montgomery.c +0 -516
  64. data/vendor/ring/crypto/bn/mul.c +0 -888
  65. data/vendor/ring/crypto/bn/prime.c +0 -829
  66. data/vendor/ring/crypto/bn/random.c +0 -334
  67. data/vendor/ring/crypto/bn/rsaz_exp.c +0 -262
  68. data/vendor/ring/crypto/bn/rsaz_exp.h +0 -53
  69. data/vendor/ring/crypto/bn/shift.c +0 -276
  70. data/vendor/ring/crypto/bytestring/bytestring_test.Windows.vcxproj +0 -25
  71. data/vendor/ring/crypto/bytestring/bytestring_test.cc +0 -421
  72. data/vendor/ring/crypto/bytestring/cbb.c +0 -399
  73. data/vendor/ring/crypto/bytestring/cbs.c +0 -227
  74. data/vendor/ring/crypto/bytestring/internal.h +0 -46
  75. data/vendor/ring/crypto/chacha/chacha_generic.c +0 -140
  76. data/vendor/ring/crypto/chacha/chacha_vec.c +0 -323
  77. data/vendor/ring/crypto/chacha/chacha_vec_arm.S +0 -1447
  78. data/vendor/ring/crypto/chacha/chacha_vec_arm_generate.go +0 -153
  79. data/vendor/ring/crypto/cipher/cipher_test.Windows.vcxproj +0 -25
  80. data/vendor/ring/crypto/cipher/e_aes.c +0 -390
  81. data/vendor/ring/crypto/cipher/e_chacha20poly1305.c +0 -208
  82. data/vendor/ring/crypto/cipher/internal.h +0 -173
  83. data/vendor/ring/crypto/cipher/test/aes_128_gcm_tests.txt +0 -543
  84. data/vendor/ring/crypto/cipher/test/aes_128_key_wrap_tests.txt +0 -9
  85. data/vendor/ring/crypto/cipher/test/aes_256_gcm_tests.txt +0 -475
  86. data/vendor/ring/crypto/cipher/test/aes_256_key_wrap_tests.txt +0 -23
  87. data/vendor/ring/crypto/cipher/test/chacha20_poly1305_old_tests.txt +0 -422
  88. data/vendor/ring/crypto/cipher/test/chacha20_poly1305_tests.txt +0 -484
  89. data/vendor/ring/crypto/cipher/test/cipher_test.txt +0 -100
  90. data/vendor/ring/crypto/constant_time_test.Windows.vcxproj +0 -25
  91. data/vendor/ring/crypto/constant_time_test.c +0 -304
  92. data/vendor/ring/crypto/cpu-arm-asm.S +0 -32
  93. data/vendor/ring/crypto/cpu-arm.c +0 -199
  94. data/vendor/ring/crypto/cpu-intel.c +0 -261
  95. data/vendor/ring/crypto/crypto.c +0 -151
  96. data/vendor/ring/crypto/curve25519/asm/x25519-arm.S +0 -2118
  97. data/vendor/ring/crypto/curve25519/curve25519.c +0 -4888
  98. data/vendor/ring/crypto/curve25519/x25519_test.cc +0 -128
  99. data/vendor/ring/crypto/digest/md32_common.h +0 -181
  100. data/vendor/ring/crypto/ec/asm/p256-x86_64-asm.pl +0 -2725
  101. data/vendor/ring/crypto/ec/ec.c +0 -193
  102. data/vendor/ring/crypto/ec/ec_curves.c +0 -61
  103. data/vendor/ring/crypto/ec/ec_key.c +0 -228
  104. data/vendor/ring/crypto/ec/ec_montgomery.c +0 -114
  105. data/vendor/ring/crypto/ec/example_mul.Windows.vcxproj +0 -25
  106. data/vendor/ring/crypto/ec/internal.h +0 -243
  107. data/vendor/ring/crypto/ec/oct.c +0 -253
  108. data/vendor/ring/crypto/ec/p256-64.c +0 -1794
  109. data/vendor/ring/crypto/ec/p256-x86_64-table.h +0 -9548
  110. data/vendor/ring/crypto/ec/p256-x86_64.c +0 -509
  111. data/vendor/ring/crypto/ec/simple.c +0 -1007
  112. data/vendor/ring/crypto/ec/util-64.c +0 -183
  113. data/vendor/ring/crypto/ec/wnaf.c +0 -508
  114. data/vendor/ring/crypto/ecdh/ecdh.c +0 -155
  115. data/vendor/ring/crypto/ecdsa/ecdsa.c +0 -304
  116. data/vendor/ring/crypto/ecdsa/ecdsa_asn1.c +0 -193
  117. data/vendor/ring/crypto/ecdsa/ecdsa_test.Windows.vcxproj +0 -25
  118. data/vendor/ring/crypto/ecdsa/ecdsa_test.cc +0 -327
  119. data/vendor/ring/crypto/header_removed.h +0 -17
  120. data/vendor/ring/crypto/internal.h +0 -495
  121. data/vendor/ring/crypto/libring.Windows.vcxproj +0 -101
  122. data/vendor/ring/crypto/mem.c +0 -98
  123. data/vendor/ring/crypto/modes/asm/aesni-gcm-x86_64.pl +0 -1045
  124. data/vendor/ring/crypto/modes/asm/ghash-armv4.pl +0 -517
  125. data/vendor/ring/crypto/modes/asm/ghash-x86.pl +0 -1393
  126. data/vendor/ring/crypto/modes/asm/ghash-x86_64.pl +0 -1741
  127. data/vendor/ring/crypto/modes/asm/ghashv8-armx.pl +0 -422
  128. data/vendor/ring/crypto/modes/ctr.c +0 -226
  129. data/vendor/ring/crypto/modes/gcm.c +0 -1206
  130. data/vendor/ring/crypto/modes/gcm_test.Windows.vcxproj +0 -25
  131. data/vendor/ring/crypto/modes/gcm_test.c +0 -348
  132. data/vendor/ring/crypto/modes/internal.h +0 -299
  133. data/vendor/ring/crypto/perlasm/arm-xlate.pl +0 -170
  134. data/vendor/ring/crypto/perlasm/readme +0 -100
  135. data/vendor/ring/crypto/perlasm/x86_64-xlate.pl +0 -1164
  136. data/vendor/ring/crypto/perlasm/x86asm.pl +0 -292
  137. data/vendor/ring/crypto/perlasm/x86gas.pl +0 -263
  138. data/vendor/ring/crypto/perlasm/x86masm.pl +0 -200
  139. data/vendor/ring/crypto/perlasm/x86nasm.pl +0 -187
  140. data/vendor/ring/crypto/poly1305/poly1305.c +0 -331
  141. data/vendor/ring/crypto/poly1305/poly1305_arm.c +0 -301
  142. data/vendor/ring/crypto/poly1305/poly1305_arm_asm.S +0 -2015
  143. data/vendor/ring/crypto/poly1305/poly1305_test.Windows.vcxproj +0 -25
  144. data/vendor/ring/crypto/poly1305/poly1305_test.cc +0 -80
  145. data/vendor/ring/crypto/poly1305/poly1305_test.txt +0 -52
  146. data/vendor/ring/crypto/poly1305/poly1305_vec.c +0 -892
  147. data/vendor/ring/crypto/rand/asm/rdrand-x86_64.pl +0 -75
  148. data/vendor/ring/crypto/rand/internal.h +0 -32
  149. data/vendor/ring/crypto/rand/rand.c +0 -189
  150. data/vendor/ring/crypto/rand/urandom.c +0 -219
  151. data/vendor/ring/crypto/rand/windows.c +0 -56
  152. data/vendor/ring/crypto/refcount_c11.c +0 -66
  153. data/vendor/ring/crypto/refcount_lock.c +0 -53
  154. data/vendor/ring/crypto/refcount_test.Windows.vcxproj +0 -25
  155. data/vendor/ring/crypto/refcount_test.c +0 -58
  156. data/vendor/ring/crypto/rsa/blinding.c +0 -462
  157. data/vendor/ring/crypto/rsa/internal.h +0 -108
  158. data/vendor/ring/crypto/rsa/padding.c +0 -300
  159. data/vendor/ring/crypto/rsa/rsa.c +0 -450
  160. data/vendor/ring/crypto/rsa/rsa_asn1.c +0 -261
  161. data/vendor/ring/crypto/rsa/rsa_impl.c +0 -944
  162. data/vendor/ring/crypto/rsa/rsa_test.Windows.vcxproj +0 -25
  163. data/vendor/ring/crypto/rsa/rsa_test.cc +0 -437
  164. data/vendor/ring/crypto/sha/asm/sha-armv8.pl +0 -436
  165. data/vendor/ring/crypto/sha/asm/sha-x86_64.pl +0 -2390
  166. data/vendor/ring/crypto/sha/asm/sha256-586.pl +0 -1275
  167. data/vendor/ring/crypto/sha/asm/sha256-armv4.pl +0 -735
  168. data/vendor/ring/crypto/sha/asm/sha256-armv8.pl +0 -14
  169. data/vendor/ring/crypto/sha/asm/sha256-x86_64.pl +0 -14
  170. data/vendor/ring/crypto/sha/asm/sha512-586.pl +0 -911
  171. data/vendor/ring/crypto/sha/asm/sha512-armv4.pl +0 -666
  172. data/vendor/ring/crypto/sha/asm/sha512-armv8.pl +0 -14
  173. data/vendor/ring/crypto/sha/asm/sha512-x86_64.pl +0 -14
  174. data/vendor/ring/crypto/sha/sha1.c +0 -271
  175. data/vendor/ring/crypto/sha/sha256.c +0 -204
  176. data/vendor/ring/crypto/sha/sha512.c +0 -355
  177. data/vendor/ring/crypto/test/file_test.cc +0 -326
  178. data/vendor/ring/crypto/test/file_test.h +0 -181
  179. data/vendor/ring/crypto/test/malloc.cc +0 -150
  180. data/vendor/ring/crypto/test/scoped_types.h +0 -95
  181. data/vendor/ring/crypto/test/test.Windows.vcxproj +0 -35
  182. data/vendor/ring/crypto/test/test_util.cc +0 -46
  183. data/vendor/ring/crypto/test/test_util.h +0 -41
  184. data/vendor/ring/crypto/thread_none.c +0 -55
  185. data/vendor/ring/crypto/thread_pthread.c +0 -165
  186. data/vendor/ring/crypto/thread_test.Windows.vcxproj +0 -25
  187. data/vendor/ring/crypto/thread_test.c +0 -200
  188. data/vendor/ring/crypto/thread_win.c +0 -282
  189. data/vendor/ring/examples/checkdigest.rs +0 -103
  190. data/vendor/ring/include/openssl/aes.h +0 -121
  191. data/vendor/ring/include/openssl/arm_arch.h +0 -129
  192. data/vendor/ring/include/openssl/base.h +0 -156
  193. data/vendor/ring/include/openssl/bn.h +0 -794
  194. data/vendor/ring/include/openssl/buffer.h +0 -18
  195. data/vendor/ring/include/openssl/bytestring.h +0 -235
  196. data/vendor/ring/include/openssl/chacha.h +0 -37
  197. data/vendor/ring/include/openssl/cmac.h +0 -76
  198. data/vendor/ring/include/openssl/cpu.h +0 -184
  199. data/vendor/ring/include/openssl/crypto.h +0 -43
  200. data/vendor/ring/include/openssl/curve25519.h +0 -88
  201. data/vendor/ring/include/openssl/ec.h +0 -225
  202. data/vendor/ring/include/openssl/ec_key.h +0 -129
  203. data/vendor/ring/include/openssl/ecdh.h +0 -110
  204. data/vendor/ring/include/openssl/ecdsa.h +0 -156
  205. data/vendor/ring/include/openssl/err.h +0 -201
  206. data/vendor/ring/include/openssl/mem.h +0 -101
  207. data/vendor/ring/include/openssl/obj_mac.h +0 -71
  208. data/vendor/ring/include/openssl/opensslfeatures.h +0 -68
  209. data/vendor/ring/include/openssl/opensslv.h +0 -18
  210. data/vendor/ring/include/openssl/ossl_typ.h +0 -18
  211. data/vendor/ring/include/openssl/poly1305.h +0 -51
  212. data/vendor/ring/include/openssl/rand.h +0 -70
  213. data/vendor/ring/include/openssl/rsa.h +0 -399
  214. data/vendor/ring/include/openssl/thread.h +0 -133
  215. data/vendor/ring/include/openssl/type_check.h +0 -71
  216. data/vendor/ring/mk/Common.props +0 -63
  217. data/vendor/ring/mk/Windows.props +0 -42
  218. data/vendor/ring/mk/WindowsTest.props +0 -18
  219. data/vendor/ring/mk/appveyor.bat +0 -62
  220. data/vendor/ring/mk/bottom_of_makefile.mk +0 -54
  221. data/vendor/ring/mk/ring.mk +0 -266
  222. data/vendor/ring/mk/top_of_makefile.mk +0 -214
  223. data/vendor/ring/mk/travis.sh +0 -40
  224. data/vendor/ring/mk/update-travis-yml.py +0 -229
  225. data/vendor/ring/ring.sln +0 -153
  226. data/vendor/ring/src/aead.rs +0 -682
  227. data/vendor/ring/src/agreement.rs +0 -248
  228. data/vendor/ring/src/c.rs +0 -129
  229. data/vendor/ring/src/constant_time.rs +0 -37
  230. data/vendor/ring/src/der.rs +0 -96
  231. data/vendor/ring/src/digest.rs +0 -690
  232. data/vendor/ring/src/digest_tests.txt +0 -57
  233. data/vendor/ring/src/ecc.rs +0 -28
  234. data/vendor/ring/src/ecc_build.rs +0 -279
  235. data/vendor/ring/src/ecc_curves.rs +0 -117
  236. data/vendor/ring/src/ed25519_tests.txt +0 -2579
  237. data/vendor/ring/src/exe_tests.rs +0 -46
  238. data/vendor/ring/src/ffi.rs +0 -29
  239. data/vendor/ring/src/file_test.rs +0 -187
  240. data/vendor/ring/src/hkdf.rs +0 -153
  241. data/vendor/ring/src/hkdf_tests.txt +0 -59
  242. data/vendor/ring/src/hmac.rs +0 -414
  243. data/vendor/ring/src/hmac_tests.txt +0 -97
  244. data/vendor/ring/src/input.rs +0 -312
  245. data/vendor/ring/src/lib.rs +0 -41
  246. data/vendor/ring/src/pbkdf2.rs +0 -265
  247. data/vendor/ring/src/pbkdf2_tests.txt +0 -113
  248. data/vendor/ring/src/polyfill.rs +0 -57
  249. data/vendor/ring/src/rand.rs +0 -28
  250. data/vendor/ring/src/signature.rs +0 -314
  251. data/vendor/ring/third-party/NIST/README.md +0 -9
  252. data/vendor/ring/third-party/NIST/SHAVS/SHA1LongMsg.rsp +0 -263
  253. data/vendor/ring/third-party/NIST/SHAVS/SHA1Monte.rsp +0 -309
  254. data/vendor/ring/third-party/NIST/SHAVS/SHA1ShortMsg.rsp +0 -267
  255. data/vendor/ring/third-party/NIST/SHAVS/SHA224LongMsg.rsp +0 -263
  256. data/vendor/ring/third-party/NIST/SHAVS/SHA224Monte.rsp +0 -309
  257. data/vendor/ring/third-party/NIST/SHAVS/SHA224ShortMsg.rsp +0 -267
  258. data/vendor/ring/third-party/NIST/SHAVS/SHA256LongMsg.rsp +0 -263
  259. data/vendor/ring/third-party/NIST/SHAVS/SHA256Monte.rsp +0 -309
  260. data/vendor/ring/third-party/NIST/SHAVS/SHA256ShortMsg.rsp +0 -267
  261. data/vendor/ring/third-party/NIST/SHAVS/SHA384LongMsg.rsp +0 -519
  262. data/vendor/ring/third-party/NIST/SHAVS/SHA384Monte.rsp +0 -309
  263. data/vendor/ring/third-party/NIST/SHAVS/SHA384ShortMsg.rsp +0 -523
  264. data/vendor/ring/third-party/NIST/SHAVS/SHA512LongMsg.rsp +0 -519
  265. data/vendor/ring/third-party/NIST/SHAVS/SHA512Monte.rsp +0 -309
  266. data/vendor/ring/third-party/NIST/SHAVS/SHA512ShortMsg.rsp +0 -523
  267. data/vendor/ring/third-party/NIST/sha256sums.txt +0 -1
@@ -1,422 +0,0 @@
1
- #!/usr/bin/env perl
2
- #
3
- # ====================================================================
4
- # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5
- # project. The module is, however, dual licensed under OpenSSL and
6
- # CRYPTOGAMS licenses depending on where you obtain it. For further
7
- # details see http://www.openssl.org/~appro/cryptogams/.
8
- # ====================================================================
9
- #
10
- # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
11
- #
12
- # June 2014
13
- #
14
- # Initial version was developed in tight cooperation with Ard
15
- # Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
16
- # other assembly modules. Just like aesv8-armx.pl this module
17
- # supports both AArch32 and AArch64 execution modes.
18
- #
19
- # July 2014
20
- #
21
- # Implement 2x aggregated reduction [see ghash-x86.pl for background
22
- # information].
23
- #
24
- # Current performance in cycles per processed byte:
25
- #
26
- # PMULL[2] 32-bit NEON(*)
27
- # Apple A7 0.92 5.62
28
- # Cortex-A53 1.01 8.39
29
- # Cortex-A57 1.17 7.61
30
- # Denver 0.71 6.02
31
- #
32
- # (*) presented for reference/comparison purposes;
33
-
34
- $flavour = shift;
35
- $output = shift;
36
-
37
- $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
38
- ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
39
- ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
40
- die "can't locate arm-xlate.pl";
41
-
42
- open OUT,"| \"$^X\" $xlate $flavour $output";
43
- *STDOUT=*OUT;
44
-
45
- $Xi="x0"; # argument block
46
- $Htbl="x1";
47
- $inp="x2";
48
- $len="x3";
49
-
50
- $inc="x12";
51
-
52
- {
53
- my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
54
- my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
55
-
56
- $code=<<___;
57
- #include <openssl/arm_arch.h>
58
-
59
- .text
60
- ___
61
- $code.=<<___ if ($flavour =~ /64/);
62
- #if !defined(__clang__)
63
- .arch armv8-a+crypto
64
- #endif
65
- ___
66
- $code.=".fpu neon\n.code 32\n" if ($flavour !~ /64/);
67
-
68
- ################################################################################
69
- # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
70
- #
71
- # input: 128-bit H - secret parameter E(K,0^128)
72
- # output: precomputed table filled with degrees of twisted H;
73
- # H is twisted to handle reverse bitness of GHASH;
74
- # only few of 16 slots of Htable[16] are used;
75
- # data is opaque to outside world (which allows to
76
- # optimize the code independently);
77
- #
78
- $code.=<<___;
79
- .global gcm_init_v8
80
- .type gcm_init_v8,%function
81
- .align 4
82
- gcm_init_v8:
83
- vld1.64 {$t1},[x1] @ load input H
84
- vmov.i8 $xC2,#0xe1
85
- vshl.i64 $xC2,$xC2,#57 @ 0xc2.0
86
- vext.8 $IN,$t1,$t1,#8
87
- vshr.u64 $t2,$xC2,#63
88
- vdup.32 $t1,${t1}[1]
89
- vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01
90
- vshr.u64 $t2,$IN,#63
91
- vshr.s32 $t1,$t1,#31 @ broadcast carry bit
92
- vand $t2,$t2,$t0
93
- vshl.i64 $IN,$IN,#1
94
- vext.8 $t2,$t2,$t2,#8
95
- vand $t0,$t0,$t1
96
- vorr $IN,$IN,$t2 @ H<<<=1
97
- veor $H,$IN,$t0 @ twisted H
98
- vst1.64 {$H},[x0],#16 @ store Htable[0]
99
-
100
- @ calculate H^2
101
- vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing
102
- vpmull.p64 $Xl,$H,$H
103
- veor $t0,$t0,$H
104
- vpmull2.p64 $Xh,$H,$H
105
- vpmull.p64 $Xm,$t0,$t0
106
-
107
- vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
108
- veor $t2,$Xl,$Xh
109
- veor $Xm,$Xm,$t1
110
- veor $Xm,$Xm,$t2
111
- vpmull.p64 $t2,$Xl,$xC2 @ 1st phase
112
-
113
- vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
114
- vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
115
- veor $Xl,$Xm,$t2
116
-
117
- vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
118
- vpmull.p64 $Xl,$Xl,$xC2
119
- veor $t2,$t2,$Xh
120
- veor $H2,$Xl,$t2
121
-
122
- vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing
123
- veor $t1,$t1,$H2
124
- vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed
125
- vst1.64 {$Hhl-$H2},[x0] @ store Htable[1..2]
126
-
127
- ret
128
- .size gcm_init_v8,.-gcm_init_v8
129
- ___
130
- ################################################################################
131
- # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
132
- #
133
- # input: Xi - current hash value;
134
- # Htable - table precomputed in gcm_init_v8;
135
- # output: Xi - next hash value Xi;
136
- #
137
- $code.=<<___;
138
- .global gcm_gmult_v8
139
- .type gcm_gmult_v8,%function
140
- .align 4
141
- gcm_gmult_v8:
142
- vld1.64 {$t1},[$Xi] @ load Xi
143
- vmov.i8 $xC2,#0xe1
144
- vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ...
145
- vshl.u64 $xC2,$xC2,#57
146
- #ifndef __ARMEB__
147
- vrev64.8 $t1,$t1
148
- #endif
149
- vext.8 $IN,$t1,$t1,#8
150
-
151
- vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
152
- veor $t1,$t1,$IN @ Karatsuba pre-processing
153
- vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
154
- vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
155
-
156
- vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
157
- veor $t2,$Xl,$Xh
158
- veor $Xm,$Xm,$t1
159
- veor $Xm,$Xm,$t2
160
- vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
161
-
162
- vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
163
- vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
164
- veor $Xl,$Xm,$t2
165
-
166
- vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
167
- vpmull.p64 $Xl,$Xl,$xC2
168
- veor $t2,$t2,$Xh
169
- veor $Xl,$Xl,$t2
170
-
171
- #ifndef __ARMEB__
172
- vrev64.8 $Xl,$Xl
173
- #endif
174
- vext.8 $Xl,$Xl,$Xl,#8
175
- vst1.64 {$Xl},[$Xi] @ write out Xi
176
-
177
- ret
178
- .size gcm_gmult_v8,.-gcm_gmult_v8
179
- ___
180
- ################################################################################
181
- # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
182
- #
183
- # input: table precomputed in gcm_init_v8;
184
- # current hash value Xi;
185
- # pointer to input data;
186
- # length of input data in bytes, but divisible by block size;
187
- # output: next hash value Xi;
188
- #
189
- $code.=<<___;
190
- .global gcm_ghash_v8
191
- .type gcm_ghash_v8,%function
192
- .align 4
193
- gcm_ghash_v8:
194
- ___
195
- $code.=<<___ if ($flavour !~ /64/);
196
- vstmdb sp!,{d8-d15} @ 32-bit ABI says so
197
- ___
198
- $code.=<<___;
199
- vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
200
- @ "[rotated]" means that
201
- @ loaded value would have
202
- @ to be rotated in order to
203
- @ make it appear as in
204
- @ alorithm specification
205
- subs $len,$len,#32 @ see if $len is 32 or larger
206
- mov $inc,#16 @ $inc is used as post-
207
- @ increment for input pointer;
208
- @ as loop is modulo-scheduled
209
- @ $inc is zeroed just in time
210
- @ to preclude oversteping
211
- @ inp[len], which means that
212
- @ last block[s] are actually
213
- @ loaded twice, but last
214
- @ copy is not processed
215
- vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2
216
- vmov.i8 $xC2,#0xe1
217
- vld1.64 {$H2},[$Htbl]
218
- cclr $inc,eq @ is it time to zero $inc?
219
- vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi
220
- vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0]
221
- vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant
222
- #ifndef __ARMEB__
223
- vrev64.8 $t0,$t0
224
- vrev64.8 $Xl,$Xl
225
- #endif
226
- vext.8 $IN,$t0,$t0,#8 @ rotate I[0]
227
- b.lo .Lodd_tail_v8 @ $len was less than 32
228
- ___
229
- { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
230
- #######
231
- # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
232
- # [(H*Ii+1) + (H*Xi+1)] mod P =
233
- # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
234
- #
235
- $code.=<<___;
236
- vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1]
237
- #ifndef __ARMEB__
238
- vrev64.8 $t1,$t1
239
- #endif
240
- vext.8 $In,$t1,$t1,#8
241
- veor $IN,$IN,$Xl @ I[i]^=Xi
242
- vpmull.p64 $Xln,$H,$In @ H·Ii+1
243
- veor $t1,$t1,$In @ Karatsuba pre-processing
244
- vpmull2.p64 $Xhn,$H,$In
245
- b .Loop_mod2x_v8
246
-
247
- .align 4
248
- .Loop_mod2x_v8:
249
- vext.8 $t2,$IN,$IN,#8
250
- subs $len,$len,#32 @ is there more data?
251
- vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo
252
- cclr $inc,lo @ is it time to zero $inc?
253
-
254
- vpmull.p64 $Xmn,$Hhl,$t1
255
- veor $t2,$t2,$IN @ Karatsuba pre-processing
256
- vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi
257
- veor $Xl,$Xl,$Xln @ accumulate
258
- vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
259
- vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2]
260
-
261
- veor $Xh,$Xh,$Xhn
262
- cclr $inc,eq @ is it time to zero $inc?
263
- veor $Xm,$Xm,$Xmn
264
-
265
- vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
266
- veor $t2,$Xl,$Xh
267
- veor $Xm,$Xm,$t1
268
- vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3]
269
- #ifndef __ARMEB__
270
- vrev64.8 $t0,$t0
271
- #endif
272
- veor $Xm,$Xm,$t2
273
- vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
274
-
275
- #ifndef __ARMEB__
276
- vrev64.8 $t1,$t1
277
- #endif
278
- vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
279
- vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
280
- vext.8 $In,$t1,$t1,#8
281
- vext.8 $IN,$t0,$t0,#8
282
- veor $Xl,$Xm,$t2
283
- vpmull.p64 $Xln,$H,$In @ H·Ii+1
284
- veor $IN,$IN,$Xh @ accumulate $IN early
285
-
286
- vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
287
- vpmull.p64 $Xl,$Xl,$xC2
288
- veor $IN,$IN,$t2
289
- veor $t1,$t1,$In @ Karatsuba pre-processing
290
- veor $IN,$IN,$Xl
291
- vpmull2.p64 $Xhn,$H,$In
292
- b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes
293
-
294
- veor $Xh,$Xh,$t2
295
- vext.8 $IN,$t0,$t0,#8 @ re-construct $IN
296
- adds $len,$len,#32 @ re-construct $len
297
- veor $Xl,$Xl,$Xh @ re-construct $Xl
298
- b.eq .Ldone_v8 @ is $len zero?
299
- ___
300
- }
301
- $code.=<<___;
302
- .Lodd_tail_v8:
303
- vext.8 $t2,$Xl,$Xl,#8
304
- veor $IN,$IN,$Xl @ inp^=Xi
305
- veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi
306
-
307
- vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
308
- veor $t1,$t1,$IN @ Karatsuba pre-processing
309
- vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
310
- vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
311
-
312
- vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
313
- veor $t2,$Xl,$Xh
314
- veor $Xm,$Xm,$t1
315
- veor $Xm,$Xm,$t2
316
- vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction
317
-
318
- vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
319
- vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
320
- veor $Xl,$Xm,$t2
321
-
322
- vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
323
- vpmull.p64 $Xl,$Xl,$xC2
324
- veor $t2,$t2,$Xh
325
- veor $Xl,$Xl,$t2
326
-
327
- .Ldone_v8:
328
- #ifndef __ARMEB__
329
- vrev64.8 $Xl,$Xl
330
- #endif
331
- vext.8 $Xl,$Xl,$Xl,#8
332
- vst1.64 {$Xl},[$Xi] @ write out Xi
333
-
334
- ___
335
- $code.=<<___ if ($flavour !~ /64/);
336
- vldmia sp!,{d8-d15} @ 32-bit ABI says so
337
- ___
338
- $code.=<<___;
339
- ret
340
- .size gcm_ghash_v8,.-gcm_ghash_v8
341
- ___
342
- }
343
- $code.=<<___;
344
- .asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
345
- .align 2
346
- ___
347
-
348
- if ($flavour =~ /64/) { ######## 64-bit code
349
- sub unvmov {
350
- my $arg=shift;
351
-
352
- $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
353
- sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
354
- }
355
- foreach(split("\n",$code)) {
356
- s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
357
- s/vmov\.i8/movi/o or # fix up legacy mnemonics
358
- s/vmov\s+(.*)/unvmov($1)/geo or
359
- s/vext\.8/ext/o or
360
- s/vshr\.s/sshr\.s/o or
361
- s/vshr/ushr/o or
362
- s/^(\s+)v/$1/o or # strip off v prefix
363
- s/\bbx\s+lr\b/ret/o;
364
-
365
- s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
366
- s/@\s/\/\//o; # old->new style commentary
367
-
368
- # fix up remainig legacy suffixes
369
- s/\.[ui]?8(\s)/$1/o;
370
- s/\.[uis]?32//o and s/\.16b/\.4s/go;
371
- m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
372
- m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
373
- s/\.[uisp]?64//o and s/\.16b/\.2d/go;
374
- s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
375
-
376
- print $_,"\n";
377
- }
378
- } else { ######## 32-bit code
379
- sub unvdup32 {
380
- my $arg=shift;
381
-
382
- $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
383
- sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
384
- }
385
- sub unvpmullp64 {
386
- my ($mnemonic,$arg)=@_;
387
-
388
- if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
389
- my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
390
- |(($2&7)<<17)|(($2&8)<<4)
391
- |(($3&7)<<1) |(($3&8)<<2);
392
- $word |= 0x00010001 if ($mnemonic =~ "2");
393
- # since ARMv7 instructions are always encoded little-endian.
394
- # correct solution is to use .inst directive, but older
395
- # assemblers don't implement it:-(
396
- sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
397
- $word&0xff,($word>>8)&0xff,
398
- ($word>>16)&0xff,($word>>24)&0xff,
399
- $mnemonic,$arg;
400
- }
401
- }
402
-
403
- foreach(split("\n",$code)) {
404
- s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
405
- s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
406
- s/\/\/\s?/@ /o; # new->old style commentary
407
-
408
- # fix up remainig new-style suffixes
409
- s/\],#[0-9]+/]!/o;
410
-
411
- s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
412
- s/vdup\.32\s+(.*)/unvdup32($1)/geo or
413
- s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
414
- s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
415
- s/^(\s+)b\./$1b/o or
416
- s/^(\s+)ret/$1bx\tlr/o;
417
-
418
- print $_,"\n";
419
- }
420
- }
421
-
422
- close STDOUT; # enforce flush
@@ -1,226 +0,0 @@
1
- /* ====================================================================
2
- * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions
6
- * are met:
7
- *
8
- * 1. Redistributions of source code must retain the above copyright
9
- * notice, this list of conditions and the following disclaimer.
10
- *
11
- * 2. Redistributions in binary form must reproduce the above copyright
12
- * notice, this list of conditions and the following disclaimer in
13
- * the documentation and/or other materials provided with the
14
- * distribution.
15
- *
16
- * 3. All advertising materials mentioning features or use of this
17
- * software must display the following acknowledgment:
18
- * "This product includes software developed by the OpenSSL Project
19
- * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
20
- *
21
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22
- * endorse or promote products derived from this software without
23
- * prior written permission. For written permission, please contact
24
- * openssl-core@openssl.org.
25
- *
26
- * 5. Products derived from this software may not be called "OpenSSL"
27
- * nor may "OpenSSL" appear in their names without prior written
28
- * permission of the OpenSSL Project.
29
- *
30
- * 6. Redistributions of any form whatsoever must retain the following
31
- * acknowledgment:
32
- * "This product includes software developed by the OpenSSL Project
33
- * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
34
- *
35
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46
- * OF THE POSSIBILITY OF SUCH DAMAGE.
47
- * ==================================================================== */
48
-
49
- #include <openssl/type_check.h>
50
-
51
- #include <assert.h>
52
- #include <string.h>
53
-
54
- #include "internal.h"
55
-
56
-
57
- /* NOTE: the IV/counter CTR mode is big-endian. The code itself
58
- * is endian-neutral. */
59
-
60
- /* increment counter (128-bit int) by 1 */
61
- static void ctr128_inc(uint8_t *counter) {
62
- uint32_t n = 16;
63
- uint8_t c;
64
-
65
- do {
66
- --n;
67
- c = counter[n];
68
- ++c;
69
- counter[n] = c;
70
- if (c) {
71
- return;
72
- }
73
- } while (n);
74
- }
75
-
76
- OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size);
77
-
78
- /* The input encrypted as though 128bit counter mode is being used. The extra
79
- * state information to record how much of the 128bit block we have used is
80
- * contained in *num, and the encrypted counter is kept in ecount_buf. Both
81
- * *num and ecount_buf must be initialised with zeros before the first call to
82
- * CRYPTO_ctr128_encrypt().
83
- *
84
- * This algorithm assumes that the counter is in the x lower bits of the IV
85
- * (ivec), and that the application has full control over overflow and the rest
86
- * of the IV. This implementation takes NO responsibility for checking that
87
- * the counter doesn't overflow into the rest of the IV when incremented. */
88
- void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
89
- const void *key, uint8_t ivec[16],
90
- uint8_t ecount_buf[16], unsigned int *num,
91
- block128_f block) {
92
- unsigned int n;
93
-
94
- assert(key && ecount_buf && num);
95
- assert(len == 0 || (in && out));
96
- assert(*num < 16);
97
-
98
- n = *num;
99
-
100
- while (n && len) {
101
- *(out++) = *(in++) ^ ecount_buf[n];
102
- --len;
103
- n = (n + 1) % 16;
104
- }
105
-
106
- #if STRICT_ALIGNMENT
107
- if (((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
108
- size_t l = 0;
109
- while (l < len) {
110
- if (n == 0) {
111
- (*block)(ivec, ecount_buf, key);
112
- ctr128_inc(ivec);
113
- }
114
- out[l] = in[l] ^ ecount_buf[n];
115
- ++l;
116
- n = (n + 1) % 16;
117
- }
118
-
119
- *num = n;
120
- return;
121
- }
122
- #endif
123
-
124
- while (len >= 16) {
125
- (*block)(ivec, ecount_buf, key);
126
- ctr128_inc(ivec);
127
- for (; n < 16; n += sizeof(size_t)) {
128
- *(size_t *)(out + n) = *(size_t *)(in + n) ^ *(size_t *)(ecount_buf + n);
129
- }
130
- len -= 16;
131
- out += 16;
132
- in += 16;
133
- n = 0;
134
- }
135
- if (len) {
136
- (*block)(ivec, ecount_buf, key);
137
- ctr128_inc(ivec);
138
- while (len--) {
139
- out[n] = in[n] ^ ecount_buf[n];
140
- ++n;
141
- }
142
- }
143
- *num = n;
144
- }
145
-
146
- /* increment upper 96 bits of 128-bit counter by 1 */
147
- static void ctr96_inc(uint8_t *counter) {
148
- uint32_t n = 12;
149
- uint8_t c;
150
-
151
- do {
152
- --n;
153
- c = counter[n];
154
- ++c;
155
- counter[n] = c;
156
- if (c) {
157
- return;
158
- }
159
- } while (n);
160
- }
161
-
162
- void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out,
163
- size_t len, const void *key,
164
- uint8_t ivec[16],
165
- uint8_t ecount_buf[16],
166
- unsigned int *num, ctr128_f func) {
167
- unsigned int n, ctr32;
168
-
169
- assert(key && ecount_buf && num);
170
- assert(len == 0 || (in && out));
171
- assert(*num < 16);
172
-
173
- n = *num;
174
-
175
- while (n && len) {
176
- *(out++) = *(in++) ^ ecount_buf[n];
177
- --len;
178
- n = (n + 1) % 16;
179
- }
180
-
181
- ctr32 = GETU32(ivec + 12);
182
- while (len >= 16) {
183
- size_t blocks = len / 16;
184
- /* 1<<28 is just a not-so-small yet not-so-large number...
185
- * Below condition is practically never met, but it has to
186
- * be checked for code correctness. */
187
- if (sizeof(size_t) > sizeof(unsigned int) && blocks > (1U << 28)) {
188
- blocks = (1U << 28);
189
- }
190
- /* As (*func) operates on 32-bit counter, caller
191
- * has to handle overflow. 'if' below detects the
192
- * overflow, which is then handled by limiting the
193
- * amount of blocks to the exact overflow point... */
194
- ctr32 += (uint32_t)blocks;
195
- if (ctr32 < blocks) {
196
- blocks -= ctr32;
197
- ctr32 = 0;
198
- }
199
- (*func)(in, out, blocks, key, ivec);
200
- /* (*func) does not update ivec, caller does: */
201
- PUTU32(ivec + 12, ctr32);
202
- /* ... overflow was detected, propogate carry. */
203
- if (ctr32 == 0) {
204
- ctr96_inc(ivec);
205
- }
206
- blocks *= 16;
207
- len -= blocks;
208
- out += blocks;
209
- in += blocks;
210
- }
211
- if (len) {
212
- memset(ecount_buf, 0, 16);
213
- (*func)(ecount_buf, ecount_buf, 1, key, ivec);
214
- ++ctr32;
215
- PUTU32(ivec + 12, ctr32);
216
- if (ctr32 == 0) {
217
- ctr96_inc(ivec);
218
- }
219
- while (len--) {
220
- out[n] = in[n] ^ ecount_buf[n];
221
- ++n;
222
- }
223
- }
224
-
225
- *num = n;
226
- }