ring-native 0.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (261) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +9 -0
  3. data/Gemfile +3 -0
  4. data/README.md +22 -0
  5. data/Rakefile +1 -0
  6. data/ext/ring/extconf.rb +29 -0
  7. data/lib/ring/native.rb +8 -0
  8. data/lib/ring/native/version.rb +5 -0
  9. data/ring-native.gemspec +25 -0
  10. data/vendor/ring/BUILDING.md +40 -0
  11. data/vendor/ring/Cargo.toml +43 -0
  12. data/vendor/ring/LICENSE +185 -0
  13. data/vendor/ring/Makefile +35 -0
  14. data/vendor/ring/PORTING.md +163 -0
  15. data/vendor/ring/README.md +113 -0
  16. data/vendor/ring/STYLE.md +197 -0
  17. data/vendor/ring/appveyor.yml +27 -0
  18. data/vendor/ring/build.rs +108 -0
  19. data/vendor/ring/crypto/aes/aes.c +1142 -0
  20. data/vendor/ring/crypto/aes/aes_test.Windows.vcxproj +25 -0
  21. data/vendor/ring/crypto/aes/aes_test.cc +93 -0
  22. data/vendor/ring/crypto/aes/asm/aes-586.pl +2368 -0
  23. data/vendor/ring/crypto/aes/asm/aes-armv4.pl +1249 -0
  24. data/vendor/ring/crypto/aes/asm/aes-x86_64.pl +2246 -0
  25. data/vendor/ring/crypto/aes/asm/aesni-x86.pl +1318 -0
  26. data/vendor/ring/crypto/aes/asm/aesni-x86_64.pl +2084 -0
  27. data/vendor/ring/crypto/aes/asm/aesv8-armx.pl +675 -0
  28. data/vendor/ring/crypto/aes/asm/bsaes-armv7.pl +1364 -0
  29. data/vendor/ring/crypto/aes/asm/bsaes-x86_64.pl +1565 -0
  30. data/vendor/ring/crypto/aes/asm/vpaes-x86.pl +841 -0
  31. data/vendor/ring/crypto/aes/asm/vpaes-x86_64.pl +1116 -0
  32. data/vendor/ring/crypto/aes/internal.h +87 -0
  33. data/vendor/ring/crypto/aes/mode_wrappers.c +61 -0
  34. data/vendor/ring/crypto/bn/add.c +394 -0
  35. data/vendor/ring/crypto/bn/asm/armv4-mont.pl +694 -0
  36. data/vendor/ring/crypto/bn/asm/armv8-mont.pl +1503 -0
  37. data/vendor/ring/crypto/bn/asm/bn-586.pl +774 -0
  38. data/vendor/ring/crypto/bn/asm/co-586.pl +287 -0
  39. data/vendor/ring/crypto/bn/asm/rsaz-avx2.pl +1882 -0
  40. data/vendor/ring/crypto/bn/asm/x86-mont.pl +592 -0
  41. data/vendor/ring/crypto/bn/asm/x86_64-gcc.c +599 -0
  42. data/vendor/ring/crypto/bn/asm/x86_64-mont.pl +1393 -0
  43. data/vendor/ring/crypto/bn/asm/x86_64-mont5.pl +3507 -0
  44. data/vendor/ring/crypto/bn/bn.c +352 -0
  45. data/vendor/ring/crypto/bn/bn_asn1.c +74 -0
  46. data/vendor/ring/crypto/bn/bn_test.Windows.vcxproj +25 -0
  47. data/vendor/ring/crypto/bn/bn_test.cc +1696 -0
  48. data/vendor/ring/crypto/bn/cmp.c +200 -0
  49. data/vendor/ring/crypto/bn/convert.c +433 -0
  50. data/vendor/ring/crypto/bn/ctx.c +311 -0
  51. data/vendor/ring/crypto/bn/div.c +594 -0
  52. data/vendor/ring/crypto/bn/exponentiation.c +1335 -0
  53. data/vendor/ring/crypto/bn/gcd.c +711 -0
  54. data/vendor/ring/crypto/bn/generic.c +1019 -0
  55. data/vendor/ring/crypto/bn/internal.h +316 -0
  56. data/vendor/ring/crypto/bn/montgomery.c +516 -0
  57. data/vendor/ring/crypto/bn/mul.c +888 -0
  58. data/vendor/ring/crypto/bn/prime.c +829 -0
  59. data/vendor/ring/crypto/bn/random.c +334 -0
  60. data/vendor/ring/crypto/bn/rsaz_exp.c +262 -0
  61. data/vendor/ring/crypto/bn/rsaz_exp.h +53 -0
  62. data/vendor/ring/crypto/bn/shift.c +276 -0
  63. data/vendor/ring/crypto/bytestring/bytestring_test.Windows.vcxproj +25 -0
  64. data/vendor/ring/crypto/bytestring/bytestring_test.cc +421 -0
  65. data/vendor/ring/crypto/bytestring/cbb.c +399 -0
  66. data/vendor/ring/crypto/bytestring/cbs.c +227 -0
  67. data/vendor/ring/crypto/bytestring/internal.h +46 -0
  68. data/vendor/ring/crypto/chacha/chacha_generic.c +140 -0
  69. data/vendor/ring/crypto/chacha/chacha_vec.c +323 -0
  70. data/vendor/ring/crypto/chacha/chacha_vec_arm.S +1447 -0
  71. data/vendor/ring/crypto/chacha/chacha_vec_arm_generate.go +153 -0
  72. data/vendor/ring/crypto/cipher/cipher_test.Windows.vcxproj +25 -0
  73. data/vendor/ring/crypto/cipher/e_aes.c +390 -0
  74. data/vendor/ring/crypto/cipher/e_chacha20poly1305.c +208 -0
  75. data/vendor/ring/crypto/cipher/internal.h +173 -0
  76. data/vendor/ring/crypto/cipher/test/aes_128_gcm_tests.txt +543 -0
  77. data/vendor/ring/crypto/cipher/test/aes_128_key_wrap_tests.txt +9 -0
  78. data/vendor/ring/crypto/cipher/test/aes_256_gcm_tests.txt +475 -0
  79. data/vendor/ring/crypto/cipher/test/aes_256_key_wrap_tests.txt +23 -0
  80. data/vendor/ring/crypto/cipher/test/chacha20_poly1305_old_tests.txt +422 -0
  81. data/vendor/ring/crypto/cipher/test/chacha20_poly1305_tests.txt +484 -0
  82. data/vendor/ring/crypto/cipher/test/cipher_test.txt +100 -0
  83. data/vendor/ring/crypto/constant_time_test.Windows.vcxproj +25 -0
  84. data/vendor/ring/crypto/constant_time_test.c +304 -0
  85. data/vendor/ring/crypto/cpu-arm-asm.S +32 -0
  86. data/vendor/ring/crypto/cpu-arm.c +199 -0
  87. data/vendor/ring/crypto/cpu-intel.c +261 -0
  88. data/vendor/ring/crypto/crypto.c +151 -0
  89. data/vendor/ring/crypto/curve25519/asm/x25519-arm.S +2118 -0
  90. data/vendor/ring/crypto/curve25519/curve25519.c +4888 -0
  91. data/vendor/ring/crypto/curve25519/x25519_test.cc +128 -0
  92. data/vendor/ring/crypto/digest/md32_common.h +181 -0
  93. data/vendor/ring/crypto/ec/asm/p256-x86_64-asm.pl +2725 -0
  94. data/vendor/ring/crypto/ec/ec.c +193 -0
  95. data/vendor/ring/crypto/ec/ec_curves.c +61 -0
  96. data/vendor/ring/crypto/ec/ec_key.c +228 -0
  97. data/vendor/ring/crypto/ec/ec_montgomery.c +114 -0
  98. data/vendor/ring/crypto/ec/example_mul.Windows.vcxproj +25 -0
  99. data/vendor/ring/crypto/ec/internal.h +243 -0
  100. data/vendor/ring/crypto/ec/oct.c +253 -0
  101. data/vendor/ring/crypto/ec/p256-64.c +1794 -0
  102. data/vendor/ring/crypto/ec/p256-x86_64-table.h +9548 -0
  103. data/vendor/ring/crypto/ec/p256-x86_64.c +509 -0
  104. data/vendor/ring/crypto/ec/simple.c +1007 -0
  105. data/vendor/ring/crypto/ec/util-64.c +183 -0
  106. data/vendor/ring/crypto/ec/wnaf.c +508 -0
  107. data/vendor/ring/crypto/ecdh/ecdh.c +155 -0
  108. data/vendor/ring/crypto/ecdsa/ecdsa.c +304 -0
  109. data/vendor/ring/crypto/ecdsa/ecdsa_asn1.c +193 -0
  110. data/vendor/ring/crypto/ecdsa/ecdsa_test.Windows.vcxproj +25 -0
  111. data/vendor/ring/crypto/ecdsa/ecdsa_test.cc +327 -0
  112. data/vendor/ring/crypto/header_removed.h +17 -0
  113. data/vendor/ring/crypto/internal.h +495 -0
  114. data/vendor/ring/crypto/libring.Windows.vcxproj +101 -0
  115. data/vendor/ring/crypto/mem.c +98 -0
  116. data/vendor/ring/crypto/modes/asm/aesni-gcm-x86_64.pl +1045 -0
  117. data/vendor/ring/crypto/modes/asm/ghash-armv4.pl +517 -0
  118. data/vendor/ring/crypto/modes/asm/ghash-x86.pl +1393 -0
  119. data/vendor/ring/crypto/modes/asm/ghash-x86_64.pl +1741 -0
  120. data/vendor/ring/crypto/modes/asm/ghashv8-armx.pl +422 -0
  121. data/vendor/ring/crypto/modes/ctr.c +226 -0
  122. data/vendor/ring/crypto/modes/gcm.c +1206 -0
  123. data/vendor/ring/crypto/modes/gcm_test.Windows.vcxproj +25 -0
  124. data/vendor/ring/crypto/modes/gcm_test.c +348 -0
  125. data/vendor/ring/crypto/modes/internal.h +299 -0
  126. data/vendor/ring/crypto/perlasm/arm-xlate.pl +170 -0
  127. data/vendor/ring/crypto/perlasm/readme +100 -0
  128. data/vendor/ring/crypto/perlasm/x86_64-xlate.pl +1164 -0
  129. data/vendor/ring/crypto/perlasm/x86asm.pl +292 -0
  130. data/vendor/ring/crypto/perlasm/x86gas.pl +263 -0
  131. data/vendor/ring/crypto/perlasm/x86masm.pl +200 -0
  132. data/vendor/ring/crypto/perlasm/x86nasm.pl +187 -0
  133. data/vendor/ring/crypto/poly1305/poly1305.c +331 -0
  134. data/vendor/ring/crypto/poly1305/poly1305_arm.c +301 -0
  135. data/vendor/ring/crypto/poly1305/poly1305_arm_asm.S +2015 -0
  136. data/vendor/ring/crypto/poly1305/poly1305_test.Windows.vcxproj +25 -0
  137. data/vendor/ring/crypto/poly1305/poly1305_test.cc +80 -0
  138. data/vendor/ring/crypto/poly1305/poly1305_test.txt +52 -0
  139. data/vendor/ring/crypto/poly1305/poly1305_vec.c +892 -0
  140. data/vendor/ring/crypto/rand/asm/rdrand-x86_64.pl +75 -0
  141. data/vendor/ring/crypto/rand/internal.h +32 -0
  142. data/vendor/ring/crypto/rand/rand.c +189 -0
  143. data/vendor/ring/crypto/rand/urandom.c +219 -0
  144. data/vendor/ring/crypto/rand/windows.c +56 -0
  145. data/vendor/ring/crypto/refcount_c11.c +66 -0
  146. data/vendor/ring/crypto/refcount_lock.c +53 -0
  147. data/vendor/ring/crypto/refcount_test.Windows.vcxproj +25 -0
  148. data/vendor/ring/crypto/refcount_test.c +58 -0
  149. data/vendor/ring/crypto/rsa/blinding.c +462 -0
  150. data/vendor/ring/crypto/rsa/internal.h +108 -0
  151. data/vendor/ring/crypto/rsa/padding.c +300 -0
  152. data/vendor/ring/crypto/rsa/rsa.c +450 -0
  153. data/vendor/ring/crypto/rsa/rsa_asn1.c +261 -0
  154. data/vendor/ring/crypto/rsa/rsa_impl.c +944 -0
  155. data/vendor/ring/crypto/rsa/rsa_test.Windows.vcxproj +25 -0
  156. data/vendor/ring/crypto/rsa/rsa_test.cc +437 -0
  157. data/vendor/ring/crypto/sha/asm/sha-armv8.pl +436 -0
  158. data/vendor/ring/crypto/sha/asm/sha-x86_64.pl +2390 -0
  159. data/vendor/ring/crypto/sha/asm/sha256-586.pl +1275 -0
  160. data/vendor/ring/crypto/sha/asm/sha256-armv4.pl +735 -0
  161. data/vendor/ring/crypto/sha/asm/sha256-armv8.pl +14 -0
  162. data/vendor/ring/crypto/sha/asm/sha256-x86_64.pl +14 -0
  163. data/vendor/ring/crypto/sha/asm/sha512-586.pl +911 -0
  164. data/vendor/ring/crypto/sha/asm/sha512-armv4.pl +666 -0
  165. data/vendor/ring/crypto/sha/asm/sha512-armv8.pl +14 -0
  166. data/vendor/ring/crypto/sha/asm/sha512-x86_64.pl +14 -0
  167. data/vendor/ring/crypto/sha/sha1.c +271 -0
  168. data/vendor/ring/crypto/sha/sha256.c +204 -0
  169. data/vendor/ring/crypto/sha/sha512.c +355 -0
  170. data/vendor/ring/crypto/test/file_test.cc +326 -0
  171. data/vendor/ring/crypto/test/file_test.h +181 -0
  172. data/vendor/ring/crypto/test/malloc.cc +150 -0
  173. data/vendor/ring/crypto/test/scoped_types.h +95 -0
  174. data/vendor/ring/crypto/test/test.Windows.vcxproj +35 -0
  175. data/vendor/ring/crypto/test/test_util.cc +46 -0
  176. data/vendor/ring/crypto/test/test_util.h +41 -0
  177. data/vendor/ring/crypto/thread_none.c +55 -0
  178. data/vendor/ring/crypto/thread_pthread.c +165 -0
  179. data/vendor/ring/crypto/thread_test.Windows.vcxproj +25 -0
  180. data/vendor/ring/crypto/thread_test.c +200 -0
  181. data/vendor/ring/crypto/thread_win.c +282 -0
  182. data/vendor/ring/examples/checkdigest.rs +103 -0
  183. data/vendor/ring/include/openssl/aes.h +121 -0
  184. data/vendor/ring/include/openssl/arm_arch.h +129 -0
  185. data/vendor/ring/include/openssl/base.h +156 -0
  186. data/vendor/ring/include/openssl/bn.h +794 -0
  187. data/vendor/ring/include/openssl/buffer.h +18 -0
  188. data/vendor/ring/include/openssl/bytestring.h +235 -0
  189. data/vendor/ring/include/openssl/chacha.h +37 -0
  190. data/vendor/ring/include/openssl/cmac.h +76 -0
  191. data/vendor/ring/include/openssl/cpu.h +184 -0
  192. data/vendor/ring/include/openssl/crypto.h +43 -0
  193. data/vendor/ring/include/openssl/curve25519.h +88 -0
  194. data/vendor/ring/include/openssl/ec.h +225 -0
  195. data/vendor/ring/include/openssl/ec_key.h +129 -0
  196. data/vendor/ring/include/openssl/ecdh.h +110 -0
  197. data/vendor/ring/include/openssl/ecdsa.h +156 -0
  198. data/vendor/ring/include/openssl/err.h +201 -0
  199. data/vendor/ring/include/openssl/mem.h +101 -0
  200. data/vendor/ring/include/openssl/obj_mac.h +71 -0
  201. data/vendor/ring/include/openssl/opensslfeatures.h +68 -0
  202. data/vendor/ring/include/openssl/opensslv.h +18 -0
  203. data/vendor/ring/include/openssl/ossl_typ.h +18 -0
  204. data/vendor/ring/include/openssl/poly1305.h +51 -0
  205. data/vendor/ring/include/openssl/rand.h +70 -0
  206. data/vendor/ring/include/openssl/rsa.h +399 -0
  207. data/vendor/ring/include/openssl/thread.h +133 -0
  208. data/vendor/ring/include/openssl/type_check.h +71 -0
  209. data/vendor/ring/mk/Common.props +63 -0
  210. data/vendor/ring/mk/Windows.props +42 -0
  211. data/vendor/ring/mk/WindowsTest.props +18 -0
  212. data/vendor/ring/mk/appveyor.bat +62 -0
  213. data/vendor/ring/mk/bottom_of_makefile.mk +54 -0
  214. data/vendor/ring/mk/ring.mk +266 -0
  215. data/vendor/ring/mk/top_of_makefile.mk +214 -0
  216. data/vendor/ring/mk/travis.sh +40 -0
  217. data/vendor/ring/mk/update-travis-yml.py +229 -0
  218. data/vendor/ring/ring.sln +153 -0
  219. data/vendor/ring/src/aead.rs +682 -0
  220. data/vendor/ring/src/agreement.rs +248 -0
  221. data/vendor/ring/src/c.rs +129 -0
  222. data/vendor/ring/src/constant_time.rs +37 -0
  223. data/vendor/ring/src/der.rs +96 -0
  224. data/vendor/ring/src/digest.rs +690 -0
  225. data/vendor/ring/src/digest_tests.txt +57 -0
  226. data/vendor/ring/src/ecc.rs +28 -0
  227. data/vendor/ring/src/ecc_build.rs +279 -0
  228. data/vendor/ring/src/ecc_curves.rs +117 -0
  229. data/vendor/ring/src/ed25519_tests.txt +2579 -0
  230. data/vendor/ring/src/exe_tests.rs +46 -0
  231. data/vendor/ring/src/ffi.rs +29 -0
  232. data/vendor/ring/src/file_test.rs +187 -0
  233. data/vendor/ring/src/hkdf.rs +153 -0
  234. data/vendor/ring/src/hkdf_tests.txt +59 -0
  235. data/vendor/ring/src/hmac.rs +414 -0
  236. data/vendor/ring/src/hmac_tests.txt +97 -0
  237. data/vendor/ring/src/input.rs +312 -0
  238. data/vendor/ring/src/lib.rs +41 -0
  239. data/vendor/ring/src/pbkdf2.rs +265 -0
  240. data/vendor/ring/src/pbkdf2_tests.txt +113 -0
  241. data/vendor/ring/src/polyfill.rs +57 -0
  242. data/vendor/ring/src/rand.rs +28 -0
  243. data/vendor/ring/src/signature.rs +314 -0
  244. data/vendor/ring/third-party/NIST/README.md +9 -0
  245. data/vendor/ring/third-party/NIST/SHAVS/SHA1LongMsg.rsp +263 -0
  246. data/vendor/ring/third-party/NIST/SHAVS/SHA1Monte.rsp +309 -0
  247. data/vendor/ring/third-party/NIST/SHAVS/SHA1ShortMsg.rsp +267 -0
  248. data/vendor/ring/third-party/NIST/SHAVS/SHA224LongMsg.rsp +263 -0
  249. data/vendor/ring/third-party/NIST/SHAVS/SHA224Monte.rsp +309 -0
  250. data/vendor/ring/third-party/NIST/SHAVS/SHA224ShortMsg.rsp +267 -0
  251. data/vendor/ring/third-party/NIST/SHAVS/SHA256LongMsg.rsp +263 -0
  252. data/vendor/ring/third-party/NIST/SHAVS/SHA256Monte.rsp +309 -0
  253. data/vendor/ring/third-party/NIST/SHAVS/SHA256ShortMsg.rsp +267 -0
  254. data/vendor/ring/third-party/NIST/SHAVS/SHA384LongMsg.rsp +519 -0
  255. data/vendor/ring/third-party/NIST/SHAVS/SHA384Monte.rsp +309 -0
  256. data/vendor/ring/third-party/NIST/SHAVS/SHA384ShortMsg.rsp +523 -0
  257. data/vendor/ring/third-party/NIST/SHAVS/SHA512LongMsg.rsp +519 -0
  258. data/vendor/ring/third-party/NIST/SHAVS/SHA512Monte.rsp +309 -0
  259. data/vendor/ring/third-party/NIST/SHAVS/SHA512ShortMsg.rsp +523 -0
  260. data/vendor/ring/third-party/NIST/sha256sums.txt +1 -0
  261. metadata +333 -0
@@ -0,0 +1,1116 @@
1
+ #!/usr/bin/env perl
2
+
3
+ ######################################################################
4
+ ## Constant-time SSSE3 AES core implementation.
5
+ ## version 0.1
6
+ ##
7
+ ## By Mike Hamburg (Stanford University), 2009
8
+ ## Public domain.
9
+ ##
10
+ ## For details see http://shiftleft.org/papers/vector_aes/ and
11
+ ## http://crypto.stanford.edu/vpaes/.
12
+
13
+ ######################################################################
14
+ # September 2011.
15
+ #
16
+ # Interface to OpenSSL as "almost" drop-in replacement for
17
+ # aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
18
+ # doesn't handle partial vectors (doesn't have to if called from
19
+ # EVP only). "Drop-in" implies that this module doesn't share key
20
+ # schedule structure with the original nor does it make assumption
21
+ # about its alignment...
22
+ #
23
+ # Performance summary. aes-x86_64.pl column lists large-block CBC
24
+ # encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
25
+ # byte processed with 128-bit key, and vpaes-x86_64.pl column -
26
+ # [also large-block CBC] encrypt/decrypt.
27
+ #
28
+ # aes-x86_64.pl vpaes-x86_64.pl
29
+ #
30
+ # Core 2(**) 29.6/41.1/14.3 21.9/25.2(***)
31
+ # Nehalem 29.6/40.3/14.6 10.0/11.8
32
+ # Atom 57.3/74.2/32.1 60.9/77.2(***)
33
+ # Silvermont 52.7/64.0/19.5 48.8/60.8(***)
34
+ #
35
+ # (*) "Hyper-threading" in the context refers rather to cache shared
36
+ # among multiple cores, than to specifically Intel HTT. As vast
37
+ # majority of contemporary cores share cache, slower code path
38
+ # is common place. In other words "with-hyper-threading-off"
39
+ # results are presented mostly for reference purposes.
40
+ #
41
+ # (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
42
+ #
43
+ # (***) Less impressive improvement on Core 2 and Atom is due to slow
44
+ # pshufb, yet it's respectable +36%/62% improvement on Core 2
45
+ # (as implied, over "hyper-threading-safe" code path).
46
+ #
47
+ # <appro@openssl.org>
48
+
49
+ $flavour = shift;
50
+ $output = shift;
51
+ if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
52
+
53
+ $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
54
+
55
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
56
+ ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
57
+ ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
58
+ die "can't locate x86_64-xlate.pl";
59
+
60
+ open OUT,"| \"$^X\" $xlate $flavour $output";
61
+ *STDOUT=*OUT;
62
+
63
+ $PREFIX="vpaes";
64
+
65
+ $code.=<<___;
66
+ .text
67
+
68
+ ##
69
+ ## _aes_encrypt_core
70
+ ##
71
+ ## AES-encrypt %xmm0.
72
+ ##
73
+ ## Inputs:
74
+ ## %xmm0 = input
75
+ ## %xmm9-%xmm15 as in _vpaes_preheat
76
+ ## (%rdx) = scheduled keys
77
+ ##
78
+ ## Output in %xmm0
79
+ ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
80
+ ## Preserves %xmm6 - %xmm8 so you get some local vectors
81
+ ##
82
+ ##
83
+ .type _vpaes_encrypt_core,\@abi-omnipotent
84
+ .align 16
85
+ _vpaes_encrypt_core:
86
+ mov %rdx, %r9
87
+ mov \$16, %r11
88
+ mov 240(%rdx),%eax
89
+ movdqa %xmm9, %xmm1
90
+ movdqa .Lk_ipt(%rip), %xmm2 # iptlo
91
+ pandn %xmm0, %xmm1
92
+ movdqu (%r9), %xmm5 # round0 key
93
+ psrld \$4, %xmm1
94
+ pand %xmm9, %xmm0
95
+ pshufb %xmm0, %xmm2
96
+ movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
97
+ pshufb %xmm1, %xmm0
98
+ pxor %xmm5, %xmm2
99
+ add \$16, %r9
100
+ pxor %xmm2, %xmm0
101
+ lea .Lk_mc_backward(%rip),%r10
102
+ jmp .Lenc_entry
103
+
104
+ .align 16
105
+ .Lenc_loop:
106
+ # middle of middle round
107
+ movdqa %xmm13, %xmm4 # 4 : sb1u
108
+ movdqa %xmm12, %xmm0 # 0 : sb1t
109
+ pshufb %xmm2, %xmm4 # 4 = sb1u
110
+ pshufb %xmm3, %xmm0 # 0 = sb1t
111
+ pxor %xmm5, %xmm4 # 4 = sb1u + k
112
+ movdqa %xmm15, %xmm5 # 4 : sb2u
113
+ pxor %xmm4, %xmm0 # 0 = A
114
+ movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
115
+ pshufb %xmm2, %xmm5 # 4 = sb2u
116
+ movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
117
+ movdqa %xmm14, %xmm2 # 2 : sb2t
118
+ pshufb %xmm3, %xmm2 # 2 = sb2t
119
+ movdqa %xmm0, %xmm3 # 3 = A
120
+ pxor %xmm5, %xmm2 # 2 = 2A
121
+ pshufb %xmm1, %xmm0 # 0 = B
122
+ add \$16, %r9 # next key
123
+ pxor %xmm2, %xmm0 # 0 = 2A+B
124
+ pshufb %xmm4, %xmm3 # 3 = D
125
+ add \$16, %r11 # next mc
126
+ pxor %xmm0, %xmm3 # 3 = 2A+B+D
127
+ pshufb %xmm1, %xmm0 # 0 = 2B+C
128
+ and \$0x30, %r11 # ... mod 4
129
+ sub \$1,%rax # nr--
130
+ pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D
131
+
132
+ .Lenc_entry:
133
+ # top of round
134
+ movdqa %xmm9, %xmm1 # 1 : i
135
+ movdqa %xmm11, %xmm5 # 2 : a/k
136
+ pandn %xmm0, %xmm1 # 1 = i<<4
137
+ psrld \$4, %xmm1 # 1 = i
138
+ pand %xmm9, %xmm0 # 0 = k
139
+ pshufb %xmm0, %xmm5 # 2 = a/k
140
+ movdqa %xmm10, %xmm3 # 3 : 1/i
141
+ pxor %xmm1, %xmm0 # 0 = j
142
+ pshufb %xmm1, %xmm3 # 3 = 1/i
143
+ movdqa %xmm10, %xmm4 # 4 : 1/j
144
+ pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k
145
+ pshufb %xmm0, %xmm4 # 4 = 1/j
146
+ movdqa %xmm10, %xmm2 # 2 : 1/iak
147
+ pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
148
+ pshufb %xmm3, %xmm2 # 2 = 1/iak
149
+ movdqa %xmm10, %xmm3 # 3 : 1/jak
150
+ pxor %xmm0, %xmm2 # 2 = io
151
+ pshufb %xmm4, %xmm3 # 3 = 1/jak
152
+ movdqu (%r9), %xmm5
153
+ pxor %xmm1, %xmm3 # 3 = jo
154
+ jnz .Lenc_loop
155
+
156
+ # middle of last round
157
+ movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
158
+ movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
159
+ pshufb %xmm2, %xmm4 # 4 = sbou
160
+ pxor %xmm5, %xmm4 # 4 = sb1u + k
161
+ pshufb %xmm3, %xmm0 # 0 = sb1t
162
+ movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
163
+ pxor %xmm4, %xmm0 # 0 = A
164
+ pshufb %xmm1, %xmm0
165
+ ret
166
+ .size _vpaes_encrypt_core,.-_vpaes_encrypt_core
167
+
168
+ ##
169
+ ## Decryption core
170
+ ##
171
+ ## Same API as encryption core.
172
+ ##
173
+ .type _vpaes_decrypt_core,\@abi-omnipotent
174
+ .align 16
175
+ _vpaes_decrypt_core:
176
+ mov %rdx, %r9 # load key
177
+ mov 240(%rdx),%eax
178
+ movdqa %xmm9, %xmm1
179
+ movdqa .Lk_dipt(%rip), %xmm2 # iptlo
180
+ pandn %xmm0, %xmm1
181
+ mov %rax, %r11
182
+ psrld \$4, %xmm1
183
+ movdqu (%r9), %xmm5 # round0 key
184
+ shl \$4, %r11
185
+ pand %xmm9, %xmm0
186
+ pshufb %xmm0, %xmm2
187
+ movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi
188
+ xor \$0x30, %r11
189
+ lea .Lk_dsbd(%rip),%r10
190
+ pshufb %xmm1, %xmm0
191
+ and \$0x30, %r11
192
+ pxor %xmm5, %xmm2
193
+ movdqa .Lk_mc_forward+48(%rip), %xmm5
194
+ pxor %xmm2, %xmm0
195
+ add \$16, %r9
196
+ add %r10, %r11
197
+ jmp .Ldec_entry
198
+
199
+ .align 16
200
+ .Ldec_loop:
201
+ ##
202
+ ## Inverse mix columns
203
+ ##
204
+ movdqa -0x20(%r10),%xmm4 # 4 : sb9u
205
+ movdqa -0x10(%r10),%xmm1 # 0 : sb9t
206
+ pshufb %xmm2, %xmm4 # 4 = sb9u
207
+ pshufb %xmm3, %xmm1 # 0 = sb9t
208
+ pxor %xmm4, %xmm0
209
+ movdqa 0x00(%r10),%xmm4 # 4 : sbdu
210
+ pxor %xmm1, %xmm0 # 0 = ch
211
+ movdqa 0x10(%r10),%xmm1 # 0 : sbdt
212
+
213
+ pshufb %xmm2, %xmm4 # 4 = sbdu
214
+ pshufb %xmm5, %xmm0 # MC ch
215
+ pshufb %xmm3, %xmm1 # 0 = sbdt
216
+ pxor %xmm4, %xmm0 # 4 = ch
217
+ movdqa 0x20(%r10),%xmm4 # 4 : sbbu
218
+ pxor %xmm1, %xmm0 # 0 = ch
219
+ movdqa 0x30(%r10),%xmm1 # 0 : sbbt
220
+
221
+ pshufb %xmm2, %xmm4 # 4 = sbbu
222
+ pshufb %xmm5, %xmm0 # MC ch
223
+ pshufb %xmm3, %xmm1 # 0 = sbbt
224
+ pxor %xmm4, %xmm0 # 4 = ch
225
+ movdqa 0x40(%r10),%xmm4 # 4 : sbeu
226
+ pxor %xmm1, %xmm0 # 0 = ch
227
+ movdqa 0x50(%r10),%xmm1 # 0 : sbet
228
+
229
+ pshufb %xmm2, %xmm4 # 4 = sbeu
230
+ pshufb %xmm5, %xmm0 # MC ch
231
+ pshufb %xmm3, %xmm1 # 0 = sbet
232
+ pxor %xmm4, %xmm0 # 4 = ch
233
+ add \$16, %r9 # next round key
234
+ palignr \$12, %xmm5, %xmm5
235
+ pxor %xmm1, %xmm0 # 0 = ch
236
+ sub \$1,%rax # nr--
237
+
238
+ .Ldec_entry:
239
+ # top of round
240
+ movdqa %xmm9, %xmm1 # 1 : i
241
+ pandn %xmm0, %xmm1 # 1 = i<<4
242
+ movdqa %xmm11, %xmm2 # 2 : a/k
243
+ psrld \$4, %xmm1 # 1 = i
244
+ pand %xmm9, %xmm0 # 0 = k
245
+ pshufb %xmm0, %xmm2 # 2 = a/k
246
+ movdqa %xmm10, %xmm3 # 3 : 1/i
247
+ pxor %xmm1, %xmm0 # 0 = j
248
+ pshufb %xmm1, %xmm3 # 3 = 1/i
249
+ movdqa %xmm10, %xmm4 # 4 : 1/j
250
+ pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
251
+ pshufb %xmm0, %xmm4 # 4 = 1/j
252
+ pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
253
+ movdqa %xmm10, %xmm2 # 2 : 1/iak
254
+ pshufb %xmm3, %xmm2 # 2 = 1/iak
255
+ movdqa %xmm10, %xmm3 # 3 : 1/jak
256
+ pxor %xmm0, %xmm2 # 2 = io
257
+ pshufb %xmm4, %xmm3 # 3 = 1/jak
258
+ movdqu (%r9), %xmm0
259
+ pxor %xmm1, %xmm3 # 3 = jo
260
+ jnz .Ldec_loop
261
+
262
+ # middle of last round
263
+ movdqa 0x60(%r10), %xmm4 # 3 : sbou
264
+ pshufb %xmm2, %xmm4 # 4 = sbou
265
+ pxor %xmm0, %xmm4 # 4 = sb1u + k
266
+ movdqa 0x70(%r10), %xmm0 # 0 : sbot
267
+ movdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
268
+ pshufb %xmm3, %xmm0 # 0 = sb1t
269
+ pxor %xmm4, %xmm0 # 0 = A
270
+ pshufb %xmm2, %xmm0
271
+ ret
272
+ .size _vpaes_decrypt_core,.-_vpaes_decrypt_core
273
+
274
+ ########################################################
275
+ ## ##
276
+ ## AES key schedule ##
277
+ ## ##
278
+ ########################################################
279
+ .type _vpaes_schedule_core,\@abi-omnipotent
280
+ .align 16
281
+ _vpaes_schedule_core:
282
+ # rdi = key
283
+ # rsi = size in bits
284
+ # rdx = buffer
285
+ # rcx = direction. 0=encrypt, 1=decrypt
286
+
287
+ call _vpaes_preheat # load the tables
288
+ movdqa .Lk_rcon(%rip), %xmm8 # load rcon
289
+ movdqu (%rdi), %xmm0 # load key (unaligned)
290
+
291
+ # input transform
292
+ movdqa %xmm0, %xmm3
293
+ lea .Lk_ipt(%rip), %r11
294
+ call _vpaes_schedule_transform
295
+ movdqa %xmm0, %xmm7
296
+
297
+ lea .Lk_sr(%rip),%r10
298
+ test %rcx, %rcx
299
+ jnz .Lschedule_am_decrypting
300
+
301
+ # encrypting, output zeroth round key after transform
302
+ movdqu %xmm0, (%rdx)
303
+ jmp .Lschedule_go
304
+
305
+ .Lschedule_am_decrypting:
306
+ # decrypting, output zeroth round key after shiftrows
307
+ movdqa (%r8,%r10),%xmm1
308
+ pshufb %xmm1, %xmm3
309
+ movdqu %xmm3, (%rdx)
310
+ xor \$0x30, %r8
311
+
312
+ .Lschedule_go:
313
+ cmp \$192, %esi
314
+ ja .Lschedule_256
315
+ je .Lschedule_192
316
+ # 128: fall though
317
+
318
+ ##
319
+ ## .schedule_128
320
+ ##
321
+ ## 128-bit specific part of key schedule.
322
+ ##
323
+ ## This schedule is really simple, because all its parts
324
+ ## are accomplished by the subroutines.
325
+ ##
326
+ .Lschedule_128:
327
+ mov \$10, %esi
328
+
329
+ .Loop_schedule_128:
330
+ call _vpaes_schedule_round
331
+ dec %rsi
332
+ jz .Lschedule_mangle_last
333
+ call _vpaes_schedule_mangle # write output
334
+ jmp .Loop_schedule_128
335
+
336
+ ##
337
+ ## .aes_schedule_192
338
+ ##
339
+ ## 192-bit specific part of key schedule.
340
+ ##
341
+ ## The main body of this schedule is the same as the 128-bit
342
+ ## schedule, but with more smearing. The long, high side is
343
+ ## stored in %xmm7 as before, and the short, low side is in
344
+ ## the high bits of %xmm6.
345
+ ##
346
+ ## This schedule is somewhat nastier, however, because each
347
+ ## round produces 192 bits of key material, or 1.5 round keys.
348
+ ## Therefore, on each cycle we do 2 rounds and produce 3 round
349
+ ## keys.
350
+ ##
351
+ .align 16
352
+ .Lschedule_192:
353
+ movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
354
+ call _vpaes_schedule_transform # input transform
355
+ movdqa %xmm0, %xmm6 # save short part
356
+ pxor %xmm4, %xmm4 # clear 4
357
+ movhlps %xmm4, %xmm6 # clobber low side with zeros
358
+ mov \$4, %esi
359
+
360
+ .Loop_schedule_192:
361
+ call _vpaes_schedule_round
362
+ palignr \$8,%xmm6,%xmm0
363
+ call _vpaes_schedule_mangle # save key n
364
+ call _vpaes_schedule_192_smear
365
+ call _vpaes_schedule_mangle # save key n+1
366
+ call _vpaes_schedule_round
367
+ dec %rsi
368
+ jz .Lschedule_mangle_last
369
+ call _vpaes_schedule_mangle # save key n+2
370
+ call _vpaes_schedule_192_smear
371
+ jmp .Loop_schedule_192
372
+
373
+ ##
374
+ ## .aes_schedule_256
375
+ ##
376
+ ## 256-bit specific part of key schedule.
377
+ ##
378
+ ## The structure here is very similar to the 128-bit
379
+ ## schedule, but with an additional "low side" in
380
+ ## %xmm6. The low side's rounds are the same as the
381
+ ## high side's, except no rcon and no rotation.
382
+ ##
383
+ .align 16
384
+ .Lschedule_256:
385
+ movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
386
+ call _vpaes_schedule_transform # input transform
387
+ mov \$7, %esi
388
+
389
+ .Loop_schedule_256:
390
+ call _vpaes_schedule_mangle # output low result
391
+ movdqa %xmm0, %xmm6 # save cur_lo in xmm6
392
+
393
+ # high round
394
+ call _vpaes_schedule_round
395
+ dec %rsi
396
+ jz .Lschedule_mangle_last
397
+ call _vpaes_schedule_mangle
398
+
399
+ # low round. swap xmm7 and xmm6
400
+ pshufd \$0xFF, %xmm0, %xmm0
401
+ movdqa %xmm7, %xmm5
402
+ movdqa %xmm6, %xmm7
403
+ call _vpaes_schedule_low_round
404
+ movdqa %xmm5, %xmm7
405
+
406
+ jmp .Loop_schedule_256
407
+
408
+
409
+ ##
410
+ ## .aes_schedule_mangle_last
411
+ ##
412
+ ## Mangler for last round of key schedule
413
+ ## Mangles %xmm0
414
+ ## when encrypting, outputs out(%xmm0) ^ 63
415
+ ## when decrypting, outputs unskew(%xmm0)
416
+ ##
417
+ ## Always called right before return... jumps to cleanup and exits
418
+ ##
419
+ .align 16
420
+ .Lschedule_mangle_last:
421
+ # schedule last round key from xmm0
422
+ lea .Lk_deskew(%rip),%r11 # prepare to deskew
423
+ test %rcx, %rcx
424
+ jnz .Lschedule_mangle_last_dec
425
+
426
+ # encrypting
427
+ movdqa (%r8,%r10),%xmm1
428
+ pshufb %xmm1, %xmm0 # output permute
429
+ lea .Lk_opt(%rip), %r11 # prepare to output transform
430
+ add \$32, %rdx
431
+
432
+ .Lschedule_mangle_last_dec:
433
+ add \$-16, %rdx
434
+ pxor .Lk_s63(%rip), %xmm0
435
+ call _vpaes_schedule_transform # output transform
436
+ movdqu %xmm0, (%rdx) # save last key
437
+
438
+ # cleanup
439
+ pxor %xmm0, %xmm0
440
+ pxor %xmm1, %xmm1
441
+ pxor %xmm2, %xmm2
442
+ pxor %xmm3, %xmm3
443
+ pxor %xmm4, %xmm4
444
+ pxor %xmm5, %xmm5
445
+ pxor %xmm6, %xmm6
446
+ pxor %xmm7, %xmm7
447
+ ret
448
+ .size _vpaes_schedule_core,.-_vpaes_schedule_core
449
+
450
+ ##
451
+ ## .aes_schedule_192_smear
452
+ ##
453
+ ## Smear the short, low side in the 192-bit key schedule.
454
+ ##
455
+ ## Inputs:
456
+ ## %xmm7: high side, b a x y
457
+ ## %xmm6: low side, d c 0 0
458
+ ## %xmm13: 0
459
+ ##
460
+ ## Outputs:
461
+ ## %xmm6: b+c+d b+c 0 0
462
+ ## %xmm0: b+c+d b+c b a
463
+ ##
464
+ .type _vpaes_schedule_192_smear,\@abi-omnipotent
465
+ .align 16
466
+ _vpaes_schedule_192_smear:
467
+ pshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
468
+ pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
469
+ pxor %xmm1, %xmm6 # -> c+d c 0 0
470
+ pxor %xmm1, %xmm1
471
+ pxor %xmm0, %xmm6 # -> b+c+d b+c b a
472
+ movdqa %xmm6, %xmm0
473
+ movhlps %xmm1, %xmm6 # clobber low side with zeros
474
+ ret
475
+ .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
476
+
477
+ ##
478
+ ## .aes_schedule_round
479
+ ##
480
+ ## Runs one main round of the key schedule on %xmm0, %xmm7
481
+ ##
482
+ ## Specifically, runs subbytes on the high dword of %xmm0
483
+ ## then rotates it by one byte and xors into the low dword of
484
+ ## %xmm7.
485
+ ##
486
+ ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
487
+ ## next rcon.
488
+ ##
489
+ ## Smears the dwords of %xmm7 by xoring the low into the
490
+ ## second low, result into third, result into highest.
491
+ ##
492
+ ## Returns results in %xmm7 = %xmm0.
493
+ ## Clobbers %xmm1-%xmm4, %r11.
494
+ ##
495
+ .type _vpaes_schedule_round,\@abi-omnipotent
496
+ .align 16
497
+ _vpaes_schedule_round:
498
+ # extract rcon from xmm8
499
+ pxor %xmm1, %xmm1
500
+ palignr \$15, %xmm8, %xmm1
501
+ palignr \$15, %xmm8, %xmm8
502
+ pxor %xmm1, %xmm7
503
+
504
+ # rotate
505
+ pshufd \$0xFF, %xmm0, %xmm0
506
+ palignr \$1, %xmm0, %xmm0
507
+
508
+ # fall through...
509
+
510
+ # low round: same as high round, but no rotation and no rcon.
511
+ _vpaes_schedule_low_round:
512
+ # smear xmm7
513
+ movdqa %xmm7, %xmm1
514
+ pslldq \$4, %xmm7
515
+ pxor %xmm1, %xmm7
516
+ movdqa %xmm7, %xmm1
517
+ pslldq \$8, %xmm7
518
+ pxor %xmm1, %xmm7
519
+ pxor .Lk_s63(%rip), %xmm7
520
+
521
+ # subbytes
522
+ movdqa %xmm9, %xmm1
523
+ pandn %xmm0, %xmm1
524
+ psrld \$4, %xmm1 # 1 = i
525
+ pand %xmm9, %xmm0 # 0 = k
526
+ movdqa %xmm11, %xmm2 # 2 : a/k
527
+ pshufb %xmm0, %xmm2 # 2 = a/k
528
+ pxor %xmm1, %xmm0 # 0 = j
529
+ movdqa %xmm10, %xmm3 # 3 : 1/i
530
+ pshufb %xmm1, %xmm3 # 3 = 1/i
531
+ pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
532
+ movdqa %xmm10, %xmm4 # 4 : 1/j
533
+ pshufb %xmm0, %xmm4 # 4 = 1/j
534
+ pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
535
+ movdqa %xmm10, %xmm2 # 2 : 1/iak
536
+ pshufb %xmm3, %xmm2 # 2 = 1/iak
537
+ pxor %xmm0, %xmm2 # 2 = io
538
+ movdqa %xmm10, %xmm3 # 3 : 1/jak
539
+ pshufb %xmm4, %xmm3 # 3 = 1/jak
540
+ pxor %xmm1, %xmm3 # 3 = jo
541
+ movdqa %xmm13, %xmm4 # 4 : sbou
542
+ pshufb %xmm2, %xmm4 # 4 = sbou
543
+ movdqa %xmm12, %xmm0 # 0 : sbot
544
+ pshufb %xmm3, %xmm0 # 0 = sb1t
545
+ pxor %xmm4, %xmm0 # 0 = sbox output
546
+
547
+ # add in smeared stuff
548
+ pxor %xmm7, %xmm0
549
+ movdqa %xmm0, %xmm7
550
+ ret
551
+ .size _vpaes_schedule_round,.-_vpaes_schedule_round
552
+
553
+ ##
554
+ ## .aes_schedule_transform
555
+ ##
556
+ ## Linear-transform %xmm0 according to tables at (%r11)
557
+ ##
558
+ ## Requires that %xmm9 = 0x0F0F... as in preheat
559
+ ## Output in %xmm0
560
+ ## Clobbers %xmm1, %xmm2
561
+ ##
562
+ .type _vpaes_schedule_transform,\@abi-omnipotent
563
+ .align 16
564
+ _vpaes_schedule_transform:
565
+ movdqa %xmm9, %xmm1
566
+ pandn %xmm0, %xmm1
567
+ psrld \$4, %xmm1
568
+ pand %xmm9, %xmm0
569
+ movdqa (%r11), %xmm2 # lo
570
+ pshufb %xmm0, %xmm2
571
+ movdqa 16(%r11), %xmm0 # hi
572
+ pshufb %xmm1, %xmm0
573
+ pxor %xmm2, %xmm0
574
+ ret
575
+ .size _vpaes_schedule_transform,.-_vpaes_schedule_transform
576
+
577
+ ##
578
+ ## .aes_schedule_mangle
579
+ ##
580
+ ## Mangle xmm0 from (basis-transformed) standard version
581
+ ## to our version.
582
+ ##
583
+ ## On encrypt,
584
+ ## xor with 0x63
585
+ ## multiply by circulant 0,1,1,1
586
+ ## apply shiftrows transform
587
+ ##
588
+ ## On decrypt,
589
+ ## xor with 0x63
590
+ ## multiply by "inverse mixcolumns" circulant E,B,D,9
591
+ ## deskew
592
+ ## apply shiftrows transform
593
+ ##
594
+ ##
595
+ ## Writes out to (%rdx), and increments or decrements it
596
+ ## Keeps track of round number mod 4 in %r8
597
+ ## Preserves xmm0
598
+ ## Clobbers xmm1-xmm5
599
+ ##
600
+ .type _vpaes_schedule_mangle,\@abi-omnipotent
601
+ .align 16
602
+ _vpaes_schedule_mangle:
603
+ movdqa %xmm0, %xmm4 # save xmm0 for later
604
+ movdqa .Lk_mc_forward(%rip),%xmm5
605
+ test %rcx, %rcx
606
+ jnz .Lschedule_mangle_dec
607
+
608
+ # encrypting
609
+ add \$16, %rdx
610
+ pxor .Lk_s63(%rip),%xmm4
611
+ pshufb %xmm5, %xmm4
612
+ movdqa %xmm4, %xmm3
613
+ pshufb %xmm5, %xmm4
614
+ pxor %xmm4, %xmm3
615
+ pshufb %xmm5, %xmm4
616
+ pxor %xmm4, %xmm3
617
+
618
+ jmp .Lschedule_mangle_both
619
+ .align 16
620
+ .Lschedule_mangle_dec:
621
+ # inverse mix columns
622
+ lea .Lk_dksd(%rip),%r11
623
+ movdqa %xmm9, %xmm1
624
+ pandn %xmm4, %xmm1
625
+ psrld \$4, %xmm1 # 1 = hi
626
+ pand %xmm9, %xmm4 # 4 = lo
627
+
628
+ movdqa 0x00(%r11), %xmm2
629
+ pshufb %xmm4, %xmm2
630
+ movdqa 0x10(%r11), %xmm3
631
+ pshufb %xmm1, %xmm3
632
+ pxor %xmm2, %xmm3
633
+ pshufb %xmm5, %xmm3
634
+
635
+ movdqa 0x20(%r11), %xmm2
636
+ pshufb %xmm4, %xmm2
637
+ pxor %xmm3, %xmm2
638
+ movdqa 0x30(%r11), %xmm3
639
+ pshufb %xmm1, %xmm3
640
+ pxor %xmm2, %xmm3
641
+ pshufb %xmm5, %xmm3
642
+
643
+ movdqa 0x40(%r11), %xmm2
644
+ pshufb %xmm4, %xmm2
645
+ pxor %xmm3, %xmm2
646
+ movdqa 0x50(%r11), %xmm3
647
+ pshufb %xmm1, %xmm3
648
+ pxor %xmm2, %xmm3
649
+ pshufb %xmm5, %xmm3
650
+
651
+ movdqa 0x60(%r11), %xmm2
652
+ pshufb %xmm4, %xmm2
653
+ pxor %xmm3, %xmm2
654
+ movdqa 0x70(%r11), %xmm3
655
+ pshufb %xmm1, %xmm3
656
+ pxor %xmm2, %xmm3
657
+
658
+ add \$-16, %rdx
659
+
660
+ .Lschedule_mangle_both:
661
+ movdqa (%r8,%r10),%xmm1
662
+ pshufb %xmm1,%xmm3
663
+ add \$-16, %r8
664
+ and \$0x30, %r8
665
+ movdqu %xmm3, (%rdx)
666
+ ret
667
+ .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
668
+
669
+ #
670
+ # Interface to OpenSSL
671
+ #
672
+ .globl ${PREFIX}_set_encrypt_key
673
+ .type ${PREFIX}_set_encrypt_key,\@function,3
674
+ .align 16
675
+ ${PREFIX}_set_encrypt_key:
676
+ ___
677
+ $code.=<<___ if ($win64);
678
+ lea -0xb8(%rsp),%rsp
679
+ movaps %xmm6,0x10(%rsp)
680
+ movaps %xmm7,0x20(%rsp)
681
+ movaps %xmm8,0x30(%rsp)
682
+ movaps %xmm9,0x40(%rsp)
683
+ movaps %xmm10,0x50(%rsp)
684
+ movaps %xmm11,0x60(%rsp)
685
+ movaps %xmm12,0x70(%rsp)
686
+ movaps %xmm13,0x80(%rsp)
687
+ movaps %xmm14,0x90(%rsp)
688
+ movaps %xmm15,0xa0(%rsp)
689
+ .Lenc_key_body:
690
+ ___
691
+ $code.=<<___;
692
+ mov %esi,%eax
693
+ shr \$5,%eax
694
+ add \$5,%eax
695
+ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
696
+
697
+ mov \$0,%ecx
698
+ mov \$0x30,%r8d
699
+ call _vpaes_schedule_core
700
+ ___
701
+ $code.=<<___ if ($win64);
702
+ movaps 0x10(%rsp),%xmm6
703
+ movaps 0x20(%rsp),%xmm7
704
+ movaps 0x30(%rsp),%xmm8
705
+ movaps 0x40(%rsp),%xmm9
706
+ movaps 0x50(%rsp),%xmm10
707
+ movaps 0x60(%rsp),%xmm11
708
+ movaps 0x70(%rsp),%xmm12
709
+ movaps 0x80(%rsp),%xmm13
710
+ movaps 0x90(%rsp),%xmm14
711
+ movaps 0xa0(%rsp),%xmm15
712
+ lea 0xb8(%rsp),%rsp
713
+ .Lenc_key_epilogue:
714
+ ___
715
+ $code.=<<___;
716
+ xor %eax,%eax
717
+ ret
718
+ .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
719
+
720
+ .globl ${PREFIX}_set_decrypt_key
721
+ .type ${PREFIX}_set_decrypt_key,\@function,3
722
+ .align 16
723
+ ${PREFIX}_set_decrypt_key:
724
+ ___
725
+ $code.=<<___ if ($win64);
726
+ lea -0xb8(%rsp),%rsp
727
+ movaps %xmm6,0x10(%rsp)
728
+ movaps %xmm7,0x20(%rsp)
729
+ movaps %xmm8,0x30(%rsp)
730
+ movaps %xmm9,0x40(%rsp)
731
+ movaps %xmm10,0x50(%rsp)
732
+ movaps %xmm11,0x60(%rsp)
733
+ movaps %xmm12,0x70(%rsp)
734
+ movaps %xmm13,0x80(%rsp)
735
+ movaps %xmm14,0x90(%rsp)
736
+ movaps %xmm15,0xa0(%rsp)
737
+ .Ldec_key_body:
738
+ ___
739
+ $code.=<<___;
740
+ mov %esi,%eax
741
+ shr \$5,%eax
742
+ add \$5,%eax
743
+ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
744
+ shl \$4,%eax
745
+ lea 16(%rdx,%rax),%rdx
746
+
747
+ mov \$1,%ecx
748
+ mov %esi,%r8d
749
+ shr \$1,%r8d
750
+ and \$32,%r8d
751
+ xor \$32,%r8d # nbits==192?0:32
752
+ call _vpaes_schedule_core
753
+ ___
754
+ $code.=<<___ if ($win64);
755
+ movaps 0x10(%rsp),%xmm6
756
+ movaps 0x20(%rsp),%xmm7
757
+ movaps 0x30(%rsp),%xmm8
758
+ movaps 0x40(%rsp),%xmm9
759
+ movaps 0x50(%rsp),%xmm10
760
+ movaps 0x60(%rsp),%xmm11
761
+ movaps 0x70(%rsp),%xmm12
762
+ movaps 0x80(%rsp),%xmm13
763
+ movaps 0x90(%rsp),%xmm14
764
+ movaps 0xa0(%rsp),%xmm15
765
+ lea 0xb8(%rsp),%rsp
766
+ .Ldec_key_epilogue:
767
+ ___
768
+ $code.=<<___;
769
+ xor %eax,%eax
770
+ ret
771
+ .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
772
+
773
+ .globl ${PREFIX}_encrypt
774
+ .type ${PREFIX}_encrypt,\@function,3
775
+ .align 16
776
+ ${PREFIX}_encrypt:
777
+ ___
778
+ $code.=<<___ if ($win64);
779
+ lea -0xb8(%rsp),%rsp
780
+ movaps %xmm6,0x10(%rsp)
781
+ movaps %xmm7,0x20(%rsp)
782
+ movaps %xmm8,0x30(%rsp)
783
+ movaps %xmm9,0x40(%rsp)
784
+ movaps %xmm10,0x50(%rsp)
785
+ movaps %xmm11,0x60(%rsp)
786
+ movaps %xmm12,0x70(%rsp)
787
+ movaps %xmm13,0x80(%rsp)
788
+ movaps %xmm14,0x90(%rsp)
789
+ movaps %xmm15,0xa0(%rsp)
790
+ .Lenc_body:
791
+ ___
792
+ $code.=<<___;
793
+ movdqu (%rdi),%xmm0
794
+ call _vpaes_preheat
795
+ call _vpaes_encrypt_core
796
+ movdqu %xmm0,(%rsi)
797
+ ___
798
+ $code.=<<___ if ($win64);
799
+ movaps 0x10(%rsp),%xmm6
800
+ movaps 0x20(%rsp),%xmm7
801
+ movaps 0x30(%rsp),%xmm8
802
+ movaps 0x40(%rsp),%xmm9
803
+ movaps 0x50(%rsp),%xmm10
804
+ movaps 0x60(%rsp),%xmm11
805
+ movaps 0x70(%rsp),%xmm12
806
+ movaps 0x80(%rsp),%xmm13
807
+ movaps 0x90(%rsp),%xmm14
808
+ movaps 0xa0(%rsp),%xmm15
809
+ lea 0xb8(%rsp),%rsp
810
+ .Lenc_epilogue:
811
+ ___
812
+ $code.=<<___;
813
+ ret
814
+ .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
815
+
816
+ .globl ${PREFIX}_decrypt
817
+ .type ${PREFIX}_decrypt,\@function,3
818
+ .align 16
819
+ ${PREFIX}_decrypt:
820
+ ___
821
+ $code.=<<___ if ($win64);
822
+ lea -0xb8(%rsp),%rsp
823
+ movaps %xmm6,0x10(%rsp)
824
+ movaps %xmm7,0x20(%rsp)
825
+ movaps %xmm8,0x30(%rsp)
826
+ movaps %xmm9,0x40(%rsp)
827
+ movaps %xmm10,0x50(%rsp)
828
+ movaps %xmm11,0x60(%rsp)
829
+ movaps %xmm12,0x70(%rsp)
830
+ movaps %xmm13,0x80(%rsp)
831
+ movaps %xmm14,0x90(%rsp)
832
+ movaps %xmm15,0xa0(%rsp)
833
+ .Ldec_body:
834
+ ___
835
+ $code.=<<___;
836
+ movdqu (%rdi),%xmm0
837
+ call _vpaes_preheat
838
+ call _vpaes_decrypt_core
839
+ movdqu %xmm0,(%rsi)
840
+ ___
841
+ $code.=<<___ if ($win64);
842
+ movaps 0x10(%rsp),%xmm6
843
+ movaps 0x20(%rsp),%xmm7
844
+ movaps 0x30(%rsp),%xmm8
845
+ movaps 0x40(%rsp),%xmm9
846
+ movaps 0x50(%rsp),%xmm10
847
+ movaps 0x60(%rsp),%xmm11
848
+ movaps 0x70(%rsp),%xmm12
849
+ movaps 0x80(%rsp),%xmm13
850
+ movaps 0x90(%rsp),%xmm14
851
+ movaps 0xa0(%rsp),%xmm15
852
+ lea 0xb8(%rsp),%rsp
853
+ .Ldec_epilogue:
854
+ ___
855
+ $code.=<<___;
856
+ ret
857
+ .size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
858
+ ___
859
+ $code.=<<___;
860
+ ##
861
+ ## _aes_preheat
862
+ ##
863
+ ## Fills register %r10 -> .aes_consts (so you can -fPIC)
864
+ ## and %xmm9-%xmm15 as specified below.
865
+ ##
866
+ .type _vpaes_preheat,\@abi-omnipotent
867
+ .align 16
868
+ _vpaes_preheat:
869
+ lea .Lk_s0F(%rip), %r10
870
+ movdqa -0x20(%r10), %xmm10 # .Lk_inv
871
+ movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
872
+ movdqa 0x00(%r10), %xmm9 # .Lk_s0F
873
+ movdqa 0x30(%r10), %xmm13 # .Lk_sb1
874
+ movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16
875
+ movdqa 0x50(%r10), %xmm15 # .Lk_sb2
876
+ movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
877
+ ret
878
+ .size _vpaes_preheat,.-_vpaes_preheat
879
+ ########################################################
880
+ ## ##
881
+ ## Constants ##
882
+ ## ##
883
+ ########################################################
884
+ .type _vpaes_consts,\@object
885
+ .align 64
886
+ _vpaes_consts:
887
+ .Lk_inv: # inv, inva
888
+ .quad 0x0E05060F0D080180, 0x040703090A0B0C02
889
+ .quad 0x01040A060F0B0780, 0x030D0E0C02050809
890
+
891
+ .Lk_s0F: # s0F
892
+ .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
893
+
894
+ .Lk_ipt: # input transform (lo, hi)
895
+ .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
896
+ .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
897
+
898
+ .Lk_sb1: # sb1u, sb1t
899
+ .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
900
+ .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
901
+ .Lk_sb2: # sb2u, sb2t
902
+ .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
903
+ .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
904
+ .Lk_sbo: # sbou, sbot
905
+ .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
906
+ .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
907
+
908
+ .Lk_mc_forward: # mc_forward
909
+ .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
910
+ .quad 0x080B0A0904070605, 0x000302010C0F0E0D
911
+ .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
912
+ .quad 0x000302010C0F0E0D, 0x080B0A0904070605
913
+
914
+ .Lk_mc_backward:# mc_backward
915
+ .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
916
+ .quad 0x020100030E0D0C0F, 0x0A09080B06050407
917
+ .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
918
+ .quad 0x0A09080B06050407, 0x020100030E0D0C0F
919
+
920
+ .Lk_sr: # sr
921
+ .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
922
+ .quad 0x030E09040F0A0500, 0x0B06010C07020D08
923
+ .quad 0x0F060D040B020900, 0x070E050C030A0108
924
+ .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
925
+
926
+ .Lk_rcon: # rcon
927
+ .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
928
+
929
+ .Lk_s63: # s63: all equal to 0x63 transformed
930
+ .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
931
+
932
+ .Lk_opt: # output transform
933
+ .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
934
+ .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
935
+
936
+ .Lk_deskew: # deskew tables: inverts the sbox's "skew"
937
+ .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
938
+ .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
939
+
940
+ ##
941
+ ## Decryption stuff
942
+ ## Key schedule constants
943
+ ##
944
+ .Lk_dksd: # decryption key schedule: invskew x*D
945
+ .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
946
+ .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
947
+ .Lk_dksb: # decryption key schedule: invskew x*B
948
+ .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
949
+ .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
950
+ .Lk_dkse: # decryption key schedule: invskew x*E + 0x63
951
+ .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
952
+ .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
953
+ .Lk_dks9: # decryption key schedule: invskew x*9
954
+ .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
955
+ .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
956
+
957
+ ##
958
+ ## Decryption stuff
959
+ ## Round function constants
960
+ ##
961
+ .Lk_dipt: # decryption input transform
962
+ .quad 0x0F505B040B545F00, 0x154A411E114E451A
963
+ .quad 0x86E383E660056500, 0x12771772F491F194
964
+
965
+ .Lk_dsb9: # decryption sbox output *9*u, *9*t
966
+ .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
967
+ .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
968
+ .Lk_dsbd: # decryption sbox output *D*u, *D*t
969
+ .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
970
+ .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
971
+ .Lk_dsbb: # decryption sbox output *B*u, *B*t
972
+ .quad 0xD022649296B44200, 0x602646F6B0F2D404
973
+ .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
974
+ .Lk_dsbe: # decryption sbox output *E*u, *E*t
975
+ .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
976
+ .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
977
+ .Lk_dsbo: # decryption sbox final output
978
+ .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
979
+ .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
980
+ .asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
981
+ .align 64
982
+ .size _vpaes_consts,.-_vpaes_consts
983
+ ___
984
+
985
+ if ($win64) {
986
+ # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
987
+ # CONTEXT *context,DISPATCHER_CONTEXT *disp)
988
+ $rec="%rcx";
989
+ $frame="%rdx";
990
+ $context="%r8";
991
+ $disp="%r9";
992
+
993
+ $code.=<<___;
994
+ .extern __imp_RtlVirtualUnwind
995
+ .type se_handler,\@abi-omnipotent
996
+ .align 16
997
+ se_handler:
998
+ push %rsi
999
+ push %rdi
1000
+ push %rbx
1001
+ push %rbp
1002
+ push %r12
1003
+ push %r13
1004
+ push %r14
1005
+ push %r15
1006
+ pushfq
1007
+ sub \$64,%rsp
1008
+
1009
+ mov 120($context),%rax # pull context->Rax
1010
+ mov 248($context),%rbx # pull context->Rip
1011
+
1012
+ mov 8($disp),%rsi # disp->ImageBase
1013
+ mov 56($disp),%r11 # disp->HandlerData
1014
+
1015
+ mov 0(%r11),%r10d # HandlerData[0]
1016
+ lea (%rsi,%r10),%r10 # prologue label
1017
+ cmp %r10,%rbx # context->Rip<prologue label
1018
+ jb .Lin_prologue
1019
+
1020
+ mov 152($context),%rax # pull context->Rsp
1021
+
1022
+ mov 4(%r11),%r10d # HandlerData[1]
1023
+ lea (%rsi,%r10),%r10 # epilogue label
1024
+ cmp %r10,%rbx # context->Rip>=epilogue label
1025
+ jae .Lin_prologue
1026
+
1027
+ lea 16(%rax),%rsi # %xmm save area
1028
+ lea 512($context),%rdi # &context.Xmm6
1029
+ mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1030
+ .long 0xa548f3fc # cld; rep movsq
1031
+ lea 0xb8(%rax),%rax # adjust stack pointer
1032
+
1033
+ .Lin_prologue:
1034
+ mov 8(%rax),%rdi
1035
+ mov 16(%rax),%rsi
1036
+ mov %rax,152($context) # restore context->Rsp
1037
+ mov %rsi,168($context) # restore context->Rsi
1038
+ mov %rdi,176($context) # restore context->Rdi
1039
+
1040
+ mov 40($disp),%rdi # disp->ContextRecord
1041
+ mov $context,%rsi # context
1042
+ mov \$`1232/8`,%ecx # sizeof(CONTEXT)
1043
+ .long 0xa548f3fc # cld; rep movsq
1044
+
1045
+ mov $disp,%rsi
1046
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1047
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
1048
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
1049
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1050
+ mov 40(%rsi),%r10 # disp->ContextRecord
1051
+ lea 56(%rsi),%r11 # &disp->HandlerData
1052
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
1053
+ mov %r10,32(%rsp) # arg5
1054
+ mov %r11,40(%rsp) # arg6
1055
+ mov %r12,48(%rsp) # arg7
1056
+ mov %rcx,56(%rsp) # arg8, (NULL)
1057
+ call *__imp_RtlVirtualUnwind(%rip)
1058
+
1059
+ mov \$1,%eax # ExceptionContinueSearch
1060
+ add \$64,%rsp
1061
+ popfq
1062
+ pop %r15
1063
+ pop %r14
1064
+ pop %r13
1065
+ pop %r12
1066
+ pop %rbp
1067
+ pop %rbx
1068
+ pop %rdi
1069
+ pop %rsi
1070
+ ret
1071
+ .size se_handler,.-se_handler
1072
+
1073
+ .section .pdata
1074
+ .align 4
1075
+ .rva .LSEH_begin_${PREFIX}_set_encrypt_key
1076
+ .rva .LSEH_end_${PREFIX}_set_encrypt_key
1077
+ .rva .LSEH_info_${PREFIX}_set_encrypt_key
1078
+
1079
+ .rva .LSEH_begin_${PREFIX}_set_decrypt_key
1080
+ .rva .LSEH_end_${PREFIX}_set_decrypt_key
1081
+ .rva .LSEH_info_${PREFIX}_set_decrypt_key
1082
+
1083
+ .rva .LSEH_begin_${PREFIX}_encrypt
1084
+ .rva .LSEH_end_${PREFIX}_encrypt
1085
+ .rva .LSEH_info_${PREFIX}_encrypt
1086
+
1087
+ .rva .LSEH_begin_${PREFIX}_decrypt
1088
+ .rva .LSEH_end_${PREFIX}_decrypt
1089
+ .rva .LSEH_info_${PREFIX}_decrypt
1090
+
1091
+ .section .xdata
1092
+ .align 8
1093
+ .LSEH_info_${PREFIX}_set_encrypt_key:
1094
+ .byte 9,0,0,0
1095
+ .rva se_handler
1096
+ .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[]
1097
+ .LSEH_info_${PREFIX}_set_decrypt_key:
1098
+ .byte 9,0,0,0
1099
+ .rva se_handler
1100
+ .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[]
1101
+ .LSEH_info_${PREFIX}_encrypt:
1102
+ .byte 9,0,0,0
1103
+ .rva se_handler
1104
+ .rva .Lenc_body,.Lenc_epilogue # HandlerData[]
1105
+ .LSEH_info_${PREFIX}_decrypt:
1106
+ .byte 9,0,0,0
1107
+ .rva se_handler
1108
+ .rva .Ldec_body,.Ldec_epilogue # HandlerData[]
1109
+ ___
1110
+ }
1111
+
1112
+ $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1113
+
1114
+ print $code;
1115
+
1116
+ close STDOUT;