ruby_nacl 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (499) hide show
  1. data/Changelog +0 -0
  2. data/README.md +49 -0
  3. data/ext/ruby_nacl/NaCl/MACROS +56 -0
  4. data/ext/ruby_nacl/NaCl/OPERATIONS +11 -0
  5. data/ext/ruby_nacl/NaCl/PROTOTYPES.c +26 -0
  6. data/ext/ruby_nacl/NaCl/PROTOTYPES.cpp +17 -0
  7. data/ext/ruby_nacl/NaCl/README +1 -0
  8. data/ext/ruby_nacl/NaCl/commandline/nacl-sha256.c +64 -0
  9. data/ext/ruby_nacl/NaCl/commandline/nacl-sha512.c +64 -0
  10. data/ext/ruby_nacl/NaCl/cpucycles/alpha.c +80 -0
  11. data/ext/ruby_nacl/NaCl/cpucycles/alpha.h +27 -0
  12. data/ext/ruby_nacl/NaCl/cpucycles/amd64cpuinfo.c +16 -0
  13. data/ext/ruby_nacl/NaCl/cpucycles/amd64cpuinfo.h +27 -0
  14. data/ext/ruby_nacl/NaCl/cpucycles/amd64cpuspeed.c +25 -0
  15. data/ext/ruby_nacl/NaCl/cpucycles/amd64cpuspeed.h +27 -0
  16. data/ext/ruby_nacl/NaCl/cpucycles/amd64tscfreq.c +18 -0
  17. data/ext/ruby_nacl/NaCl/cpucycles/amd64tscfreq.h +27 -0
  18. data/ext/ruby_nacl/NaCl/cpucycles/celllinux.c +83 -0
  19. data/ext/ruby_nacl/NaCl/cpucycles/celllinux.h +27 -0
  20. data/ext/ruby_nacl/NaCl/cpucycles/cortex.c +73 -0
  21. data/ext/ruby_nacl/NaCl/cpucycles/cortex.h +27 -0
  22. data/ext/ruby_nacl/NaCl/cpucycles/dev4ns.c +62 -0
  23. data/ext/ruby_nacl/NaCl/cpucycles/dev4ns.h +27 -0
  24. data/ext/ruby_nacl/NaCl/cpucycles/do +105 -0
  25. data/ext/ruby_nacl/NaCl/cpucycles/gettimeofday.c +32 -0
  26. data/ext/ruby_nacl/NaCl/cpucycles/gettimeofday.h +27 -0
  27. data/ext/ruby_nacl/NaCl/cpucycles/hppapstat.c +26 -0
  28. data/ext/ruby_nacl/NaCl/cpucycles/hppapstat.h +27 -0
  29. data/ext/ruby_nacl/NaCl/cpucycles/ia64cpuinfo.c +15 -0
  30. data/ext/ruby_nacl/NaCl/cpucycles/ia64cpuinfo.h +27 -0
  31. data/ext/ruby_nacl/NaCl/cpucycles/mips.c +65 -0
  32. data/ext/ruby_nacl/NaCl/cpucycles/mips.h +27 -0
  33. data/ext/ruby_nacl/NaCl/cpucycles/monotonic.c +34 -0
  34. data/ext/ruby_nacl/NaCl/cpucycles/monotonic.h +27 -0
  35. data/ext/ruby_nacl/NaCl/cpucycles/monotoniccpuinfo.c +33 -0
  36. data/ext/ruby_nacl/NaCl/cpucycles/monotoniccpuinfo.h +27 -0
  37. data/ext/ruby_nacl/NaCl/cpucycles/osfreq.c +65 -0
  38. data/ext/ruby_nacl/NaCl/cpucycles/powerpccpuinfo.c +95 -0
  39. data/ext/ruby_nacl/NaCl/cpucycles/powerpccpuinfo.h +27 -0
  40. data/ext/ruby_nacl/NaCl/cpucycles/powerpcmacos.c +42 -0
  41. data/ext/ruby_nacl/NaCl/cpucycles/powerpcmacos.h +27 -0
  42. data/ext/ruby_nacl/NaCl/cpucycles/sgi.c +38 -0
  43. data/ext/ruby_nacl/NaCl/cpucycles/sgi.h +27 -0
  44. data/ext/ruby_nacl/NaCl/cpucycles/sparc32cpuinfo.c +16 -0
  45. data/ext/ruby_nacl/NaCl/cpucycles/sparc32cpuinfo.h +27 -0
  46. data/ext/ruby_nacl/NaCl/cpucycles/sparccpuinfo.c +15 -0
  47. data/ext/ruby_nacl/NaCl/cpucycles/sparccpuinfo.h +27 -0
  48. data/ext/ruby_nacl/NaCl/cpucycles/test.c +77 -0
  49. data/ext/ruby_nacl/NaCl/cpucycles/x86cpuinfo.c +15 -0
  50. data/ext/ruby_nacl/NaCl/cpucycles/x86cpuinfo.h +27 -0
  51. data/ext/ruby_nacl/NaCl/cpucycles/x86cpuspeed.c +24 -0
  52. data/ext/ruby_nacl/NaCl/cpucycles/x86cpuspeed.h +27 -0
  53. data/ext/ruby_nacl/NaCl/cpucycles/x86estimate.c +59 -0
  54. data/ext/ruby_nacl/NaCl/cpucycles/x86estimate.h +27 -0
  55. data/ext/ruby_nacl/NaCl/cpucycles/x86tscfreq.c +17 -0
  56. data/ext/ruby_nacl/NaCl/cpucycles/x86tscfreq.h +27 -0
  57. data/ext/ruby_nacl/NaCl/cpuid/cbytes.c +16 -0
  58. data/ext/ruby_nacl/NaCl/cpuid/cpuid.c +41 -0
  59. data/ext/ruby_nacl/NaCl/cpuid/do +37 -0
  60. data/ext/ruby_nacl/NaCl/cpuid/unknown.c +7 -0
  61. data/ext/ruby_nacl/NaCl/cpuid/x86.c +41 -0
  62. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha256/checksum +1 -0
  63. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha256/ref/api.h +2 -0
  64. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha256/ref/hmac.c +83 -0
  65. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha256/ref/verify.c +9 -0
  66. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha256/used +0 -0
  67. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha512256/checksum +1 -0
  68. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha512256/ref/api.h +2 -0
  69. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha512256/ref/hmac.c +86 -0
  70. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha512256/ref/verify.c +9 -0
  71. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha512256/selected +0 -0
  72. data/ext/ruby_nacl/NaCl/crypto_auth/hmacsha512256/used +0 -0
  73. data/ext/ruby_nacl/NaCl/crypto_auth/measure.c +69 -0
  74. data/ext/ruby_nacl/NaCl/crypto_auth/try.c +119 -0
  75. data/ext/ruby_nacl/NaCl/crypto_auth/wrapper-auth.cpp +11 -0
  76. data/ext/ruby_nacl/NaCl/crypto_auth/wrapper-verify.cpp +14 -0
  77. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/checksum +1 -0
  78. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/ref/after.c +22 -0
  79. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/ref/api.h +6 -0
  80. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/ref/before.c +17 -0
  81. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/ref/box.c +27 -0
  82. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/ref/keypair.c +12 -0
  83. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/selected +0 -0
  84. data/ext/ruby_nacl/NaCl/crypto_box/curve25519xsalsa20poly1305/used +0 -0
  85. data/ext/ruby_nacl/NaCl/crypto_box/measure.c +137 -0
  86. data/ext/ruby_nacl/NaCl/crypto_box/try.c +195 -0
  87. data/ext/ruby_nacl/NaCl/crypto_box/wrapper-box.cpp +24 -0
  88. data/ext/ruby_nacl/NaCl/crypto_box/wrapper-keypair.cpp +12 -0
  89. data/ext/ruby_nacl/NaCl/crypto_box/wrapper-open.cpp +27 -0
  90. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/checksum +1 -0
  91. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/ref/api.h +4 -0
  92. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/ref/core.c +135 -0
  93. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/ref/implementors +1 -0
  94. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/ref2/api.h +4 -0
  95. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/ref2/core.c +108 -0
  96. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/ref2/implementors +1 -0
  97. data/ext/ruby_nacl/NaCl/crypto_core/hsalsa20/used +0 -0
  98. data/ext/ruby_nacl/NaCl/crypto_core/measure.c +18 -0
  99. data/ext/ruby_nacl/NaCl/crypto_core/salsa20/checksum +1 -0
  100. data/ext/ruby_nacl/NaCl/crypto_core/salsa20/ref/api.h +4 -0
  101. data/ext/ruby_nacl/NaCl/crypto_core/salsa20/ref/core.c +134 -0
  102. data/ext/ruby_nacl/NaCl/crypto_core/salsa20/ref/implementors +1 -0
  103. data/ext/ruby_nacl/NaCl/crypto_core/salsa20/used +0 -0
  104. data/ext/ruby_nacl/NaCl/crypto_core/salsa2012/checksum +1 -0
  105. data/ext/ruby_nacl/NaCl/crypto_core/salsa2012/ref/api.h +4 -0
  106. data/ext/ruby_nacl/NaCl/crypto_core/salsa2012/ref/core.c +134 -0
  107. data/ext/ruby_nacl/NaCl/crypto_core/salsa2012/ref/implementors +1 -0
  108. data/ext/ruby_nacl/NaCl/crypto_core/salsa2012/used +0 -0
  109. data/ext/ruby_nacl/NaCl/crypto_core/salsa208/checksum +1 -0
  110. data/ext/ruby_nacl/NaCl/crypto_core/salsa208/ref/api.h +4 -0
  111. data/ext/ruby_nacl/NaCl/crypto_core/salsa208/ref/core.c +134 -0
  112. data/ext/ruby_nacl/NaCl/crypto_core/salsa208/ref/implementors +1 -0
  113. data/ext/ruby_nacl/NaCl/crypto_core/salsa208/used +0 -0
  114. data/ext/ruby_nacl/NaCl/crypto_core/try.c +116 -0
  115. data/ext/ruby_nacl/NaCl/crypto_core/wrapper-empty.cpp +0 -0
  116. data/ext/ruby_nacl/NaCl/crypto_hash/measure.c +66 -0
  117. data/ext/ruby_nacl/NaCl/crypto_hash/sha256/checksum +1 -0
  118. data/ext/ruby_nacl/NaCl/crypto_hash/sha256/ref/api.h +1 -0
  119. data/ext/ruby_nacl/NaCl/crypto_hash/sha256/ref/hash.c +69 -0
  120. data/ext/ruby_nacl/NaCl/crypto_hash/sha256/ref/implementors +1 -0
  121. data/ext/ruby_nacl/NaCl/crypto_hash/sha256/used +0 -0
  122. data/ext/ruby_nacl/NaCl/crypto_hash/sha512/checksum +1 -0
  123. data/ext/ruby_nacl/NaCl/crypto_hash/sha512/ref/api.h +1 -0
  124. data/ext/ruby_nacl/NaCl/crypto_hash/sha512/ref/hash.c +71 -0
  125. data/ext/ruby_nacl/NaCl/crypto_hash/sha512/ref/implementors +1 -0
  126. data/ext/ruby_nacl/NaCl/crypto_hash/sha512/selected +0 -0
  127. data/ext/ruby_nacl/NaCl/crypto_hash/sha512/used +0 -0
  128. data/ext/ruby_nacl/NaCl/crypto_hash/try.c +77 -0
  129. data/ext/ruby_nacl/NaCl/crypto_hash/wrapper-hash.cpp +10 -0
  130. data/ext/ruby_nacl/NaCl/crypto_hashblocks/measure.c +18 -0
  131. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/checksum +1 -0
  132. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/inplace/api.h +2 -0
  133. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/inplace/blocks.c +228 -0
  134. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/inplace/implementors +1 -0
  135. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/ref/api.h +2 -0
  136. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/ref/blocks.c +212 -0
  137. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/ref/implementors +1 -0
  138. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha256/used +0 -0
  139. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/checksum +1 -0
  140. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/inplace/api.h +2 -0
  141. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/inplace/blocks.c +256 -0
  142. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/inplace/implementors +1 -0
  143. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/ref/api.h +2 -0
  144. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/ref/blocks.c +239 -0
  145. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/ref/implementors +1 -0
  146. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/selected +0 -0
  147. data/ext/ruby_nacl/NaCl/crypto_hashblocks/sha512/used +0 -0
  148. data/ext/ruby_nacl/NaCl/crypto_hashblocks/try.c +79 -0
  149. data/ext/ruby_nacl/NaCl/crypto_hashblocks/wrapper-empty.cpp +0 -0
  150. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/measure.c +69 -0
  151. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/53/api.h +2 -0
  152. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/53/auth.c +1616 -0
  153. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/53/verify.c +9 -0
  154. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/amd64/api.h +2 -0
  155. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/amd64/auth.s +2787 -0
  156. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/amd64/constants.s +85 -0
  157. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/amd64/verify.c +9 -0
  158. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/checksum +1 -0
  159. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/ref/api.h +2 -0
  160. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/ref/auth.c +104 -0
  161. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/ref/verify.c +9 -0
  162. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/selected +0 -0
  163. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/used +0 -0
  164. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/x86/api.h +2 -0
  165. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/x86/auth.s +2779 -0
  166. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/x86/constants.s +85 -0
  167. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/poly1305/x86/verify.c +9 -0
  168. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/try.c +119 -0
  169. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/wrapper-auth.cpp +11 -0
  170. data/ext/ruby_nacl/NaCl/crypto_onetimeauth/wrapper-verify.cpp +14 -0
  171. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/api.h +2 -0
  172. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/base.c +8 -0
  173. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/const.s +114 -0
  174. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/fromdouble.s +195 -0
  175. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/implementors +1 -0
  176. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/init.s +13 -0
  177. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/mainloop.s +3990 -0
  178. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/mult.s +410 -0
  179. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/smult.c +91 -0
  180. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/square.s +298 -0
  181. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/athlon/todouble.s +144 -0
  182. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/checksum +1 -0
  183. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/donna_c64/api.h +2 -0
  184. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/donna_c64/base.c +8 -0
  185. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/donna_c64/implementors +1 -0
  186. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/donna_c64/smult.c +477 -0
  187. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/ref/api.h +2 -0
  188. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/ref/base.c +16 -0
  189. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/ref/implementors +1 -0
  190. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/ref/smult.c +265 -0
  191. data/ext/ruby_nacl/NaCl/crypto_scalarmult/curve25519/used +0 -0
  192. data/ext/ruby_nacl/NaCl/crypto_scalarmult/measure.c +61 -0
  193. data/ext/ruby_nacl/NaCl/crypto_scalarmult/try.c +126 -0
  194. data/ext/ruby_nacl/NaCl/crypto_scalarmult/wrapper-base.cpp +11 -0
  195. data/ext/ruby_nacl/NaCl/crypto_scalarmult/wrapper-mult.cpp +12 -0
  196. data/ext/ruby_nacl/NaCl/crypto_secretbox/measure.c +75 -0
  197. data/ext/ruby_nacl/NaCl/crypto_secretbox/try.c +129 -0
  198. data/ext/ruby_nacl/NaCl/crypto_secretbox/wrapper-box.cpp +19 -0
  199. data/ext/ruby_nacl/NaCl/crypto_secretbox/wrapper-open.cpp +22 -0
  200. data/ext/ruby_nacl/NaCl/crypto_secretbox/xsalsa20poly1305/checksum +1 -0
  201. data/ext/ruby_nacl/NaCl/crypto_secretbox/xsalsa20poly1305/ref/api.h +4 -0
  202. data/ext/ruby_nacl/NaCl/crypto_secretbox/xsalsa20poly1305/ref/box.c +35 -0
  203. data/ext/ruby_nacl/NaCl/crypto_secretbox/xsalsa20poly1305/selected +0 -0
  204. data/ext/ruby_nacl/NaCl/crypto_secretbox/xsalsa20poly1305/used +0 -0
  205. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/api.h +3 -0
  206. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/fe25519.c +345 -0
  207. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/fe25519.h +54 -0
  208. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/ge25519.c +227 -0
  209. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/ge25519.h +34 -0
  210. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/sc25519.c +146 -0
  211. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/sc25519.h +51 -0
  212. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/ref/sign.c +103 -0
  213. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/selected +0 -0
  214. data/ext/ruby_nacl/NaCl/crypto_sign/edwards25519sha512batch/used +0 -0
  215. data/ext/ruby_nacl/NaCl/crypto_sign/measure.c +83 -0
  216. data/ext/ruby_nacl/NaCl/crypto_sign/try.c +86 -0
  217. data/ext/ruby_nacl/NaCl/crypto_sign/wrapper-keypair.cpp +12 -0
  218. data/ext/ruby_nacl/NaCl/crypto_sign/wrapper-sign-open.cpp +24 -0
  219. data/ext/ruby_nacl/NaCl/crypto_sign/wrapper-sign.cpp +23 -0
  220. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/checksum +1 -0
  221. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/core2/afternm.s +12308 -0
  222. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/core2/api.h +3 -0
  223. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/core2/beforenm.s +13694 -0
  224. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/core2/stream.c +14 -0
  225. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/core2/xor.c +15 -0
  226. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/core2/xor_afternm.s +12407 -0
  227. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/afternm.c +158 -0
  228. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/api.h +3 -0
  229. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/beforenm.c +59 -0
  230. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/common.c +64 -0
  231. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/common.h +788 -0
  232. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/consts.c +14 -0
  233. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/consts.h +28 -0
  234. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/int128.c +128 -0
  235. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/int128.h +47 -0
  236. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/stream.c +28 -0
  237. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/types.h +10 -0
  238. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/portable/xor_afternm.c +180 -0
  239. data/ext/ruby_nacl/NaCl/crypto_stream/aes128ctr/used +0 -0
  240. data/ext/ruby_nacl/NaCl/crypto_stream/measure.c +73 -0
  241. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/amd64_xmm6/api.h +2 -0
  242. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/amd64_xmm6/implementors +1 -0
  243. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/amd64_xmm6/stream.s +4823 -0
  244. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/checksum +1 -0
  245. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/ref/api.h +2 -0
  246. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/ref/implementors +1 -0
  247. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/ref/stream.c +49 -0
  248. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/ref/xor.c +52 -0
  249. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/used +0 -0
  250. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/x86_xmm5/api.h +2 -0
  251. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/x86_xmm5/implementors +1 -0
  252. data/ext/ruby_nacl/NaCl/crypto_stream/salsa20/x86_xmm5/stream.s +5078 -0
  253. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/amd64_xmm6/api.h +2 -0
  254. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/amd64_xmm6/implementors +1 -0
  255. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/amd64_xmm6/stream.s +4823 -0
  256. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/checksum +1 -0
  257. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/ref/api.h +2 -0
  258. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/ref/implementors +1 -0
  259. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/ref/stream.c +49 -0
  260. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/ref/xor.c +52 -0
  261. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/used +0 -0
  262. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/x86_xmm5/api.h +2 -0
  263. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/x86_xmm5/implementors +1 -0
  264. data/ext/ruby_nacl/NaCl/crypto_stream/salsa2012/x86_xmm5/stream.s +5078 -0
  265. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/amd64_xmm6/api.h +2 -0
  266. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/amd64_xmm6/implementors +1 -0
  267. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/amd64_xmm6/stream.s +4823 -0
  268. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/checksum +1 -0
  269. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/ref/api.h +2 -0
  270. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/ref/implementors +1 -0
  271. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/ref/stream.c +49 -0
  272. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/ref/xor.c +52 -0
  273. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/used +0 -0
  274. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/x86_xmm5/api.h +2 -0
  275. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/x86_xmm5/implementors +1 -0
  276. data/ext/ruby_nacl/NaCl/crypto_stream/salsa208/x86_xmm5/stream.s +5078 -0
  277. data/ext/ruby_nacl/NaCl/crypto_stream/try.c +124 -0
  278. data/ext/ruby_nacl/NaCl/crypto_stream/wrapper-stream.cpp +12 -0
  279. data/ext/ruby_nacl/NaCl/crypto_stream/wrapper-xor.cpp +17 -0
  280. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/checksum +1 -0
  281. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/ref/api.h +2 -0
  282. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/ref/implementors +1 -0
  283. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/ref/stream.c +22 -0
  284. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/ref/xor.c +23 -0
  285. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/selected +0 -0
  286. data/ext/ruby_nacl/NaCl/crypto_stream/xsalsa20/used +0 -0
  287. data/ext/ruby_nacl/NaCl/crypto_stream.h +18 -0
  288. data/ext/ruby_nacl/NaCl/crypto_stream_aes128ctr.h +33 -0
  289. data/ext/ruby_nacl/NaCl/crypto_verify/16/checksum +1 -0
  290. data/ext/ruby_nacl/NaCl/crypto_verify/16/ref/api.h +1 -0
  291. data/ext/ruby_nacl/NaCl/crypto_verify/16/ref/verify.c +24 -0
  292. data/ext/ruby_nacl/NaCl/crypto_verify/16/used +0 -0
  293. data/ext/ruby_nacl/NaCl/crypto_verify/32/checksum +1 -0
  294. data/ext/ruby_nacl/NaCl/crypto_verify/32/ref/api.h +1 -0
  295. data/ext/ruby_nacl/NaCl/crypto_verify/32/ref/verify.c +40 -0
  296. data/ext/ruby_nacl/NaCl/crypto_verify/32/used +0 -0
  297. data/ext/ruby_nacl/NaCl/crypto_verify/measure.c +18 -0
  298. data/ext/ruby_nacl/NaCl/crypto_verify/try.c +75 -0
  299. data/ext/ruby_nacl/NaCl/crypto_verify/wrapper-empty.cpp +0 -0
  300. data/ext/ruby_nacl/NaCl/curvecp/LIBS +31 -0
  301. data/ext/ruby_nacl/NaCl/curvecp/README +10 -0
  302. data/ext/ruby_nacl/NaCl/curvecp/SOURCES +36 -0
  303. data/ext/ruby_nacl/NaCl/curvecp/TARGETS +5 -0
  304. data/ext/ruby_nacl/NaCl/curvecp/blocking.c +12 -0
  305. data/ext/ruby_nacl/NaCl/curvecp/blocking.h +7 -0
  306. data/ext/ruby_nacl/NaCl/curvecp/byte.h +8 -0
  307. data/ext/ruby_nacl/NaCl/curvecp/byte_copy.c +8 -0
  308. data/ext/ruby_nacl/NaCl/curvecp/byte_isequal.c +10 -0
  309. data/ext/ruby_nacl/NaCl/curvecp/byte_zero.c +7 -0
  310. data/ext/ruby_nacl/NaCl/curvecp/crypto_block.c +35 -0
  311. data/ext/ruby_nacl/NaCl/curvecp/crypto_block.h +4 -0
  312. data/ext/ruby_nacl/NaCl/curvecp/curvecpclient.c +476 -0
  313. data/ext/ruby_nacl/NaCl/curvecp/curvecpmakekey.c +57 -0
  314. data/ext/ruby_nacl/NaCl/curvecp/curvecpmessage.c +654 -0
  315. data/ext/ruby_nacl/NaCl/curvecp/curvecpprintkey.c +46 -0
  316. data/ext/ruby_nacl/NaCl/curvecp/curvecpserver.c +497 -0
  317. data/ext/ruby_nacl/NaCl/curvecp/die.c +42 -0
  318. data/ext/ruby_nacl/NaCl/curvecp/die.h +16 -0
  319. data/ext/ruby_nacl/NaCl/curvecp/e.c +106 -0
  320. data/ext/ruby_nacl/NaCl/curvecp/e.h +438 -0
  321. data/ext/ruby_nacl/NaCl/curvecp/hexparse.c +25 -0
  322. data/ext/ruby_nacl/NaCl/curvecp/hexparse.h +6 -0
  323. data/ext/ruby_nacl/NaCl/curvecp/load.c +33 -0
  324. data/ext/ruby_nacl/NaCl/curvecp/load.h +6 -0
  325. data/ext/ruby_nacl/NaCl/curvecp/nameparse.c +19 -0
  326. data/ext/ruby_nacl/NaCl/curvecp/nameparse.h +6 -0
  327. data/ext/ruby_nacl/NaCl/curvecp/nanoseconds.c +27 -0
  328. data/ext/ruby_nacl/NaCl/curvecp/nanoseconds.h +6 -0
  329. data/ext/ruby_nacl/NaCl/curvecp/open.h +10 -0
  330. data/ext/ruby_nacl/NaCl/curvecp/open_cwd.c +6 -0
  331. data/ext/ruby_nacl/NaCl/curvecp/open_lock.c +19 -0
  332. data/ext/ruby_nacl/NaCl/curvecp/open_pipe.c +15 -0
  333. data/ext/ruby_nacl/NaCl/curvecp/open_read.c +17 -0
  334. data/ext/ruby_nacl/NaCl/curvecp/open_write.c +17 -0
  335. data/ext/ruby_nacl/NaCl/curvecp/portparse.c +14 -0
  336. data/ext/ruby_nacl/NaCl/curvecp/portparse.h +6 -0
  337. data/ext/ruby_nacl/NaCl/curvecp/randommod.c +14 -0
  338. data/ext/ruby_nacl/NaCl/curvecp/randommod.h +6 -0
  339. data/ext/ruby_nacl/NaCl/curvecp/safenonce.c +74 -0
  340. data/ext/ruby_nacl/NaCl/curvecp/safenonce.h +6 -0
  341. data/ext/ruby_nacl/NaCl/curvecp/savesync.c +24 -0
  342. data/ext/ruby_nacl/NaCl/curvecp/savesync.h +6 -0
  343. data/ext/ruby_nacl/NaCl/curvecp/socket.h +9 -0
  344. data/ext/ruby_nacl/NaCl/curvecp/socket_bind.c +15 -0
  345. data/ext/ruby_nacl/NaCl/curvecp/socket_recv.c +23 -0
  346. data/ext/ruby_nacl/NaCl/curvecp/socket_send.c +19 -0
  347. data/ext/ruby_nacl/NaCl/curvecp/socket_udp.c +36 -0
  348. data/ext/ruby_nacl/NaCl/curvecp/uint16_pack.c +7 -0
  349. data/ext/ruby_nacl/NaCl/curvecp/uint16_pack.h +8 -0
  350. data/ext/ruby_nacl/NaCl/curvecp/uint16_unpack.c +9 -0
  351. data/ext/ruby_nacl/NaCl/curvecp/uint16_unpack.h +8 -0
  352. data/ext/ruby_nacl/NaCl/curvecp/uint32_pack.c +9 -0
  353. data/ext/ruby_nacl/NaCl/curvecp/uint32_pack.h +8 -0
  354. data/ext/ruby_nacl/NaCl/curvecp/uint32_unpack.c +11 -0
  355. data/ext/ruby_nacl/NaCl/curvecp/uint32_unpack.h +8 -0
  356. data/ext/ruby_nacl/NaCl/curvecp/uint64_pack.c +13 -0
  357. data/ext/ruby_nacl/NaCl/curvecp/uint64_pack.h +8 -0
  358. data/ext/ruby_nacl/NaCl/curvecp/uint64_unpack.c +15 -0
  359. data/ext/ruby_nacl/NaCl/curvecp/uint64_unpack.h +8 -0
  360. data/ext/ruby_nacl/NaCl/curvecp/writeall.c +27 -0
  361. data/ext/ruby_nacl/NaCl/curvecp/writeall.h +6 -0
  362. data/ext/ruby_nacl/NaCl/do +468 -0
  363. data/ext/ruby_nacl/NaCl/inttypes/crypto_int16.c +3 -0
  364. data/ext/ruby_nacl/NaCl/inttypes/crypto_int32.c +3 -0
  365. data/ext/ruby_nacl/NaCl/inttypes/crypto_int64.c +3 -0
  366. data/ext/ruby_nacl/NaCl/inttypes/crypto_int8.c +3 -0
  367. data/ext/ruby_nacl/NaCl/inttypes/crypto_uint16.c +3 -0
  368. data/ext/ruby_nacl/NaCl/inttypes/crypto_uint32.c +3 -0
  369. data/ext/ruby_nacl/NaCl/inttypes/crypto_uint64.c +3 -0
  370. data/ext/ruby_nacl/NaCl/inttypes/crypto_uint8.c +3 -0
  371. data/ext/ruby_nacl/NaCl/inttypes/do +47 -0
  372. data/ext/ruby_nacl/NaCl/inttypes/signed.h +17 -0
  373. data/ext/ruby_nacl/NaCl/inttypes/unsigned.h +17 -0
  374. data/ext/ruby_nacl/NaCl/measure-anything.c +225 -0
  375. data/ext/ruby_nacl/NaCl/okcompilers/abiname.c +45 -0
  376. data/ext/ruby_nacl/NaCl/okcompilers/archivers +2 -0
  377. data/ext/ruby_nacl/NaCl/okcompilers/c +8 -0
  378. data/ext/ruby_nacl/NaCl/okcompilers/cpp +8 -0
  379. data/ext/ruby_nacl/NaCl/okcompilers/do +196 -0
  380. data/ext/ruby_nacl/NaCl/okcompilers/lib.c +29 -0
  381. data/ext/ruby_nacl/NaCl/okcompilers/lib.cpp +19 -0
  382. data/ext/ruby_nacl/NaCl/okcompilers/main.c +25 -0
  383. data/ext/ruby_nacl/NaCl/okcompilers/main.cpp +22 -0
  384. data/ext/ruby_nacl/NaCl/randombytes/devurandom.c +34 -0
  385. data/ext/ruby_nacl/NaCl/randombytes/devurandom.h +24 -0
  386. data/ext/ruby_nacl/NaCl/randombytes/do +43 -0
  387. data/ext/ruby_nacl/NaCl/randombytes/test.c +15 -0
  388. data/ext/ruby_nacl/NaCl/tests/auth.c +19 -0
  389. data/ext/ruby_nacl/NaCl/tests/auth.out +4 -0
  390. data/ext/ruby_nacl/NaCl/tests/auth2.c +34 -0
  391. data/ext/ruby_nacl/NaCl/tests/auth2.out +4 -0
  392. data/ext/ruby_nacl/NaCl/tests/auth3.c +34 -0
  393. data/ext/ruby_nacl/NaCl/tests/auth3.out +1 -0
  394. data/ext/ruby_nacl/NaCl/tests/auth4.cpp +44 -0
  395. data/ext/ruby_nacl/NaCl/tests/auth4.out +1 -0
  396. data/ext/ruby_nacl/NaCl/tests/auth5.c +36 -0
  397. data/ext/ruby_nacl/NaCl/tests/auth5.out +0 -0
  398. data/ext/ruby_nacl/NaCl/tests/auth6.cpp +46 -0
  399. data/ext/ruby_nacl/NaCl/tests/auth6.out +0 -0
  400. data/ext/ruby_nacl/NaCl/tests/box.c +63 -0
  401. data/ext/ruby_nacl/NaCl/tests/box.out +19 -0
  402. data/ext/ruby_nacl/NaCl/tests/box2.c +64 -0
  403. data/ext/ruby_nacl/NaCl/tests/box2.out +17 -0
  404. data/ext/ruby_nacl/NaCl/tests/box3.cpp +60 -0
  405. data/ext/ruby_nacl/NaCl/tests/box3.out +19 -0
  406. data/ext/ruby_nacl/NaCl/tests/box4.cpp +66 -0
  407. data/ext/ruby_nacl/NaCl/tests/box4.out +17 -0
  408. data/ext/ruby_nacl/NaCl/tests/box5.cpp +30 -0
  409. data/ext/ruby_nacl/NaCl/tests/box5.out +0 -0
  410. data/ext/ruby_nacl/NaCl/tests/box6.cpp +43 -0
  411. data/ext/ruby_nacl/NaCl/tests/box6.out +0 -0
  412. data/ext/ruby_nacl/NaCl/tests/box7.c +36 -0
  413. data/ext/ruby_nacl/NaCl/tests/box7.out +0 -0
  414. data/ext/ruby_nacl/NaCl/tests/box8.c +41 -0
  415. data/ext/ruby_nacl/NaCl/tests/box8.out +0 -0
  416. data/ext/ruby_nacl/NaCl/tests/core1.c +30 -0
  417. data/ext/ruby_nacl/NaCl/tests/core1.out +4 -0
  418. data/ext/ruby_nacl/NaCl/tests/core2.c +33 -0
  419. data/ext/ruby_nacl/NaCl/tests/core2.out +4 -0
  420. data/ext/ruby_nacl/NaCl/tests/core3.c +41 -0
  421. data/ext/ruby_nacl/NaCl/tests/core3.out +1 -0
  422. data/ext/ruby_nacl/NaCl/tests/core4.c +33 -0
  423. data/ext/ruby_nacl/NaCl/tests/core4.out +8 -0
  424. data/ext/ruby_nacl/NaCl/tests/core5.c +32 -0
  425. data/ext/ruby_nacl/NaCl/tests/core5.out +4 -0
  426. data/ext/ruby_nacl/NaCl/tests/core6.c +47 -0
  427. data/ext/ruby_nacl/NaCl/tests/core6.out +4 -0
  428. data/ext/ruby_nacl/NaCl/tests/hash.c +14 -0
  429. data/ext/ruby_nacl/NaCl/tests/hash.out +1 -0
  430. data/ext/ruby_nacl/NaCl/tests/hash2.cpp +18 -0
  431. data/ext/ruby_nacl/NaCl/tests/hash2.out +1 -0
  432. data/ext/ruby_nacl/NaCl/tests/hash3.c +14 -0
  433. data/ext/ruby_nacl/NaCl/tests/hash3.out +1 -0
  434. data/ext/ruby_nacl/NaCl/tests/hash4.cpp +18 -0
  435. data/ext/ruby_nacl/NaCl/tests/hash4.out +1 -0
  436. data/ext/ruby_nacl/NaCl/tests/onetimeauth.c +42 -0
  437. data/ext/ruby_nacl/NaCl/tests/onetimeauth.out +2 -0
  438. data/ext/ruby_nacl/NaCl/tests/onetimeauth2.c +40 -0
  439. data/ext/ruby_nacl/NaCl/tests/onetimeauth2.out +1 -0
  440. data/ext/ruby_nacl/NaCl/tests/onetimeauth5.cpp +46 -0
  441. data/ext/ruby_nacl/NaCl/tests/onetimeauth5.out +2 -0
  442. data/ext/ruby_nacl/NaCl/tests/onetimeauth6.cpp +50 -0
  443. data/ext/ruby_nacl/NaCl/tests/onetimeauth6.out +1 -0
  444. data/ext/ruby_nacl/NaCl/tests/onetimeauth7.c +36 -0
  445. data/ext/ruby_nacl/NaCl/tests/onetimeauth7.out +0 -0
  446. data/ext/ruby_nacl/NaCl/tests/onetimeauth8.cpp +46 -0
  447. data/ext/ruby_nacl/NaCl/tests/onetimeauth8.out +0 -0
  448. data/ext/ruby_nacl/NaCl/tests/scalarmult.c +23 -0
  449. data/ext/ruby_nacl/NaCl/tests/scalarmult.out +4 -0
  450. data/ext/ruby_nacl/NaCl/tests/scalarmult2.c +23 -0
  451. data/ext/ruby_nacl/NaCl/tests/scalarmult2.out +4 -0
  452. data/ext/ruby_nacl/NaCl/tests/scalarmult3.cpp +31 -0
  453. data/ext/ruby_nacl/NaCl/tests/scalarmult3.out +4 -0
  454. data/ext/ruby_nacl/NaCl/tests/scalarmult4.cpp +31 -0
  455. data/ext/ruby_nacl/NaCl/tests/scalarmult4.out +4 -0
  456. data/ext/ruby_nacl/NaCl/tests/scalarmult5.c +30 -0
  457. data/ext/ruby_nacl/NaCl/tests/scalarmult5.out +4 -0
  458. data/ext/ruby_nacl/NaCl/tests/scalarmult6.c +30 -0
  459. data/ext/ruby_nacl/NaCl/tests/scalarmult6.out +4 -0
  460. data/ext/ruby_nacl/NaCl/tests/scalarmult7.cpp +32 -0
  461. data/ext/ruby_nacl/NaCl/tests/scalarmult7.out +4 -0
  462. data/ext/ruby_nacl/NaCl/tests/secretbox.c +56 -0
  463. data/ext/ruby_nacl/NaCl/tests/secretbox.out +19 -0
  464. data/ext/ruby_nacl/NaCl/tests/secretbox2.c +57 -0
  465. data/ext/ruby_nacl/NaCl/tests/secretbox2.out +17 -0
  466. data/ext/ruby_nacl/NaCl/tests/secretbox3.cpp +52 -0
  467. data/ext/ruby_nacl/NaCl/tests/secretbox3.out +19 -0
  468. data/ext/ruby_nacl/NaCl/tests/secretbox4.cpp +54 -0
  469. data/ext/ruby_nacl/NaCl/tests/secretbox4.out +17 -0
  470. data/ext/ruby_nacl/NaCl/tests/secretbox5.cpp +29 -0
  471. data/ext/ruby_nacl/NaCl/tests/secretbox5.out +0 -0
  472. data/ext/ruby_nacl/NaCl/tests/secretbox6.cpp +42 -0
  473. data/ext/ruby_nacl/NaCl/tests/secretbox6.out +0 -0
  474. data/ext/ruby_nacl/NaCl/tests/secretbox7.c +32 -0
  475. data/ext/ruby_nacl/NaCl/tests/secretbox7.out +0 -0
  476. data/ext/ruby_nacl/NaCl/tests/secretbox8.c +37 -0
  477. data/ext/ruby_nacl/NaCl/tests/secretbox8.out +0 -0
  478. data/ext/ruby_nacl/NaCl/tests/stream.c +29 -0
  479. data/ext/ruby_nacl/NaCl/tests/stream.out +1 -0
  480. data/ext/ruby_nacl/NaCl/tests/stream2.c +27 -0
  481. data/ext/ruby_nacl/NaCl/tests/stream2.out +1 -0
  482. data/ext/ruby_nacl/NaCl/tests/stream3.c +28 -0
  483. data/ext/ruby_nacl/NaCl/tests/stream3.out +4 -0
  484. data/ext/ruby_nacl/NaCl/tests/stream4.c +53 -0
  485. data/ext/ruby_nacl/NaCl/tests/stream4.out +17 -0
  486. data/ext/ruby_nacl/NaCl/tests/stream5.cpp +29 -0
  487. data/ext/ruby_nacl/NaCl/tests/stream5.out +1 -0
  488. data/ext/ruby_nacl/NaCl/tests/stream6.cpp +27 -0
  489. data/ext/ruby_nacl/NaCl/tests/stream6.out +1 -0
  490. data/ext/ruby_nacl/NaCl/tests/stream7.cpp +30 -0
  491. data/ext/ruby_nacl/NaCl/tests/stream7.out +4 -0
  492. data/ext/ruby_nacl/NaCl/tests/stream8.cpp +56 -0
  493. data/ext/ruby_nacl/NaCl/tests/stream8.out +17 -0
  494. data/ext/ruby_nacl/NaCl/try-anything.c +173 -0
  495. data/ext/ruby_nacl/NaCl/version +1 -0
  496. data/ext/ruby_nacl/extconf.rb +18 -0
  497. data/ext/ruby_nacl/ruby_nacl.cpp +147 -0
  498. data/ext/ruby_nacl/ruby_nacl.h +49 -0
  499. metadata +554 -0
@@ -0,0 +1,5078 @@
1
+
2
+ # qhasm: int32 a
3
+
4
+ # qhasm: stack32 arg1
5
+
6
+ # qhasm: stack32 arg2
7
+
8
+ # qhasm: stack32 arg3
9
+
10
+ # qhasm: stack32 arg4
11
+
12
+ # qhasm: stack32 arg5
13
+
14
+ # qhasm: stack32 arg6
15
+
16
+ # qhasm: input arg1
17
+
18
+ # qhasm: input arg2
19
+
20
+ # qhasm: input arg3
21
+
22
+ # qhasm: input arg4
23
+
24
+ # qhasm: input arg5
25
+
26
+ # qhasm: input arg6
27
+
28
+ # qhasm: int32 eax
29
+
30
+ # qhasm: int32 ebx
31
+
32
+ # qhasm: int32 esi
33
+
34
+ # qhasm: int32 edi
35
+
36
+ # qhasm: int32 ebp
37
+
38
+ # qhasm: caller eax
39
+
40
+ # qhasm: caller ebx
41
+
42
+ # qhasm: caller esi
43
+
44
+ # qhasm: caller edi
45
+
46
+ # qhasm: caller ebp
47
+
48
+ # qhasm: int32 k
49
+
50
+ # qhasm: int32 kbits
51
+
52
+ # qhasm: int32 iv
53
+
54
+ # qhasm: int32 i
55
+
56
+ # qhasm: stack128 x0
57
+
58
+ # qhasm: stack128 x1
59
+
60
+ # qhasm: stack128 x2
61
+
62
+ # qhasm: stack128 x3
63
+
64
+ # qhasm: int32 m
65
+
66
+ # qhasm: stack32 out_stack
67
+
68
+ # qhasm: int32 out
69
+
70
+ # qhasm: stack32 bytes_stack
71
+
72
+ # qhasm: int32 bytes
73
+
74
+ # qhasm: stack32 eax_stack
75
+
76
+ # qhasm: stack32 ebx_stack
77
+
78
+ # qhasm: stack32 esi_stack
79
+
80
+ # qhasm: stack32 edi_stack
81
+
82
+ # qhasm: stack32 ebp_stack
83
+
84
+ # qhasm: int6464 diag0
85
+
86
+ # qhasm: int6464 diag1
87
+
88
+ # qhasm: int6464 diag2
89
+
90
+ # qhasm: int6464 diag3
91
+
92
+ # qhasm: int6464 a0
93
+
94
+ # qhasm: int6464 a1
95
+
96
+ # qhasm: int6464 a2
97
+
98
+ # qhasm: int6464 a3
99
+
100
+ # qhasm: int6464 a4
101
+
102
+ # qhasm: int6464 a5
103
+
104
+ # qhasm: int6464 a6
105
+
106
+ # qhasm: int6464 a7
107
+
108
+ # qhasm: int6464 b0
109
+
110
+ # qhasm: int6464 b1
111
+
112
+ # qhasm: int6464 b2
113
+
114
+ # qhasm: int6464 b3
115
+
116
+ # qhasm: int6464 b4
117
+
118
+ # qhasm: int6464 b5
119
+
120
+ # qhasm: int6464 b6
121
+
122
+ # qhasm: int6464 b7
123
+
124
+ # qhasm: int6464 z0
125
+
126
+ # qhasm: int6464 z1
127
+
128
+ # qhasm: int6464 z2
129
+
130
+ # qhasm: int6464 z3
131
+
132
+ # qhasm: int6464 z4
133
+
134
+ # qhasm: int6464 z5
135
+
136
+ # qhasm: int6464 z6
137
+
138
+ # qhasm: int6464 z7
139
+
140
+ # qhasm: int6464 z8
141
+
142
+ # qhasm: int6464 z9
143
+
144
+ # qhasm: int6464 z10
145
+
146
+ # qhasm: int6464 z11
147
+
148
+ # qhasm: int6464 z12
149
+
150
+ # qhasm: int6464 z13
151
+
152
+ # qhasm: int6464 z14
153
+
154
+ # qhasm: int6464 z15
155
+
156
+ # qhasm: stack128 z0_stack
157
+
158
+ # qhasm: stack128 z1_stack
159
+
160
+ # qhasm: stack128 z2_stack
161
+
162
+ # qhasm: stack128 z3_stack
163
+
164
+ # qhasm: stack128 z4_stack
165
+
166
+ # qhasm: stack128 z5_stack
167
+
168
+ # qhasm: stack128 z6_stack
169
+
170
+ # qhasm: stack128 z7_stack
171
+
172
+ # qhasm: stack128 z8_stack
173
+
174
+ # qhasm: stack128 z9_stack
175
+
176
+ # qhasm: stack128 z10_stack
177
+
178
+ # qhasm: stack128 z11_stack
179
+
180
+ # qhasm: stack128 z12_stack
181
+
182
+ # qhasm: stack128 z13_stack
183
+
184
+ # qhasm: stack128 z14_stack
185
+
186
+ # qhasm: stack128 z15_stack
187
+
188
+ # qhasm: stack128 orig0
189
+
190
+ # qhasm: stack128 orig1
191
+
192
+ # qhasm: stack128 orig2
193
+
194
+ # qhasm: stack128 orig3
195
+
196
+ # qhasm: stack128 orig4
197
+
198
+ # qhasm: stack128 orig5
199
+
200
+ # qhasm: stack128 orig6
201
+
202
+ # qhasm: stack128 orig7
203
+
204
+ # qhasm: stack128 orig8
205
+
206
+ # qhasm: stack128 orig9
207
+
208
+ # qhasm: stack128 orig10
209
+
210
+ # qhasm: stack128 orig11
211
+
212
+ # qhasm: stack128 orig12
213
+
214
+ # qhasm: stack128 orig13
215
+
216
+ # qhasm: stack128 orig14
217
+
218
+ # qhasm: stack128 orig15
219
+
220
+ # qhasm: int6464 p
221
+
222
+ # qhasm: int6464 q
223
+
224
+ # qhasm: int6464 r
225
+
226
+ # qhasm: int6464 s
227
+
228
+ # qhasm: int6464 t
229
+
230
+ # qhasm: int6464 u
231
+
232
+ # qhasm: int6464 v
233
+
234
+ # qhasm: int6464 w
235
+
236
+ # qhasm: int6464 mp
237
+
238
+ # qhasm: int6464 mq
239
+
240
+ # qhasm: int6464 mr
241
+
242
+ # qhasm: int6464 ms
243
+
244
+ # qhasm: int6464 mt
245
+
246
+ # qhasm: int6464 mu
247
+
248
+ # qhasm: int6464 mv
249
+
250
+ # qhasm: int6464 mw
251
+
252
+ # qhasm: int32 in0
253
+
254
+ # qhasm: int32 in1
255
+
256
+ # qhasm: int32 in2
257
+
258
+ # qhasm: int32 in3
259
+
260
+ # qhasm: int32 in4
261
+
262
+ # qhasm: int32 in5
263
+
264
+ # qhasm: int32 in6
265
+
266
+ # qhasm: int32 in7
267
+
268
+ # qhasm: int32 in8
269
+
270
+ # qhasm: int32 in9
271
+
272
+ # qhasm: int32 in10
273
+
274
+ # qhasm: int32 in11
275
+
276
+ # qhasm: int32 in12
277
+
278
+ # qhasm: int32 in13
279
+
280
+ # qhasm: int32 in14
281
+
282
+ # qhasm: int32 in15
283
+
284
+ # qhasm: stack512 tmp
285
+
286
+ # qhasm: stack32 ctarget
287
+
288
+ # qhasm: enter crypto_stream_salsa2012_x86_xmm5
289
+ .text
290
+ .p2align 5
291
+ .globl _crypto_stream_salsa2012_x86_xmm5
292
+ .globl crypto_stream_salsa2012_x86_xmm5
293
+ _crypto_stream_salsa2012_x86_xmm5:
294
+ crypto_stream_salsa2012_x86_xmm5:
295
+ mov %esp,%eax
296
+ and $31,%eax
297
+ add $704,%eax
298
+ sub %eax,%esp
299
+
300
+ # qhasm: eax_stack = eax
301
+ # asm 1: movl <eax=int32#1,>eax_stack=stack32#1
302
+ # asm 2: movl <eax=%eax,>eax_stack=0(%esp)
303
+ movl %eax,0(%esp)
304
+
305
+ # qhasm: ebx_stack = ebx
306
+ # asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
307
+ # asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
308
+ movl %ebx,4(%esp)
309
+
310
+ # qhasm: esi_stack = esi
311
+ # asm 1: movl <esi=int32#5,>esi_stack=stack32#3
312
+ # asm 2: movl <esi=%esi,>esi_stack=8(%esp)
313
+ movl %esi,8(%esp)
314
+
315
+ # qhasm: edi_stack = edi
316
+ # asm 1: movl <edi=int32#6,>edi_stack=stack32#4
317
+ # asm 2: movl <edi=%edi,>edi_stack=12(%esp)
318
+ movl %edi,12(%esp)
319
+
320
+ # qhasm: ebp_stack = ebp
321
+ # asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
322
+ # asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
323
+ movl %ebp,16(%esp)
324
+
325
+ # qhasm: bytes = arg2
326
+ # asm 1: movl <arg2=stack32#-2,>bytes=int32#3
327
+ # asm 2: movl <arg2=8(%esp,%eax),>bytes=%edx
328
+ movl 8(%esp,%eax),%edx
329
+
330
+ # qhasm: out = arg1
331
+ # asm 1: movl <arg1=stack32#-1,>out=int32#6
332
+ # asm 2: movl <arg1=4(%esp,%eax),>out=%edi
333
+ movl 4(%esp,%eax),%edi
334
+
335
+ # qhasm: m = out
336
+ # asm 1: mov <out=int32#6,>m=int32#5
337
+ # asm 2: mov <out=%edi,>m=%esi
338
+ mov %edi,%esi
339
+
340
+ # qhasm: iv = arg4
341
+ # asm 1: movl <arg4=stack32#-4,>iv=int32#4
342
+ # asm 2: movl <arg4=16(%esp,%eax),>iv=%ebx
343
+ movl 16(%esp,%eax),%ebx
344
+
345
+ # qhasm: k = arg5
346
+ # asm 1: movl <arg5=stack32#-5,>k=int32#7
347
+ # asm 2: movl <arg5=20(%esp,%eax),>k=%ebp
348
+ movl 20(%esp,%eax),%ebp
349
+
350
+ # qhasm: unsigned>? bytes - 0
351
+ # asm 1: cmp $0,<bytes=int32#3
352
+ # asm 2: cmp $0,<bytes=%edx
353
+ cmp $0,%edx
354
+ # comment:fp stack unchanged by jump
355
+
356
+ # qhasm: goto done if !unsigned>
357
+ jbe ._done
358
+
359
+ # qhasm: a = 0
360
+ # asm 1: mov $0,>a=int32#1
361
+ # asm 2: mov $0,>a=%eax
362
+ mov $0,%eax
363
+
364
+ # qhasm: i = bytes
365
+ # asm 1: mov <bytes=int32#3,>i=int32#2
366
+ # asm 2: mov <bytes=%edx,>i=%ecx
367
+ mov %edx,%ecx
368
+
369
+ # qhasm: while (i) { *out++ = a; --i }
370
+ rep stosb
371
+
372
+ # qhasm: out -= bytes
373
+ # asm 1: subl <bytes=int32#3,<out=int32#6
374
+ # asm 2: subl <bytes=%edx,<out=%edi
375
+ subl %edx,%edi
376
+ # comment:fp stack unchanged by jump
377
+
378
+ # qhasm: goto start
379
+ jmp ._start
380
+
381
+ # qhasm: enter crypto_stream_salsa2012_x86_xmm5_xor
382
+ .text
383
+ .p2align 5
384
+ .globl _crypto_stream_salsa2012_x86_xmm5_xor
385
+ .globl crypto_stream_salsa2012_x86_xmm5_xor
386
+ _crypto_stream_salsa2012_x86_xmm5_xor:
387
+ crypto_stream_salsa2012_x86_xmm5_xor:
388
+ mov %esp,%eax
389
+ and $31,%eax
390
+ add $704,%eax
391
+ sub %eax,%esp
392
+
393
+ # qhasm: eax_stack = eax
394
+ # asm 1: movl <eax=int32#1,>eax_stack=stack32#1
395
+ # asm 2: movl <eax=%eax,>eax_stack=0(%esp)
396
+ movl %eax,0(%esp)
397
+
398
+ # qhasm: ebx_stack = ebx
399
+ # asm 1: movl <ebx=int32#4,>ebx_stack=stack32#2
400
+ # asm 2: movl <ebx=%ebx,>ebx_stack=4(%esp)
401
+ movl %ebx,4(%esp)
402
+
403
+ # qhasm: esi_stack = esi
404
+ # asm 1: movl <esi=int32#5,>esi_stack=stack32#3
405
+ # asm 2: movl <esi=%esi,>esi_stack=8(%esp)
406
+ movl %esi,8(%esp)
407
+
408
+ # qhasm: edi_stack = edi
409
+ # asm 1: movl <edi=int32#6,>edi_stack=stack32#4
410
+ # asm 2: movl <edi=%edi,>edi_stack=12(%esp)
411
+ movl %edi,12(%esp)
412
+
413
+ # qhasm: ebp_stack = ebp
414
+ # asm 1: movl <ebp=int32#7,>ebp_stack=stack32#5
415
+ # asm 2: movl <ebp=%ebp,>ebp_stack=16(%esp)
416
+ movl %ebp,16(%esp)
417
+
418
+ # qhasm: out = arg1
419
+ # asm 1: movl <arg1=stack32#-1,>out=int32#6
420
+ # asm 2: movl <arg1=4(%esp,%eax),>out=%edi
421
+ movl 4(%esp,%eax),%edi
422
+
423
+ # qhasm: m = arg2
424
+ # asm 1: movl <arg2=stack32#-2,>m=int32#5
425
+ # asm 2: movl <arg2=8(%esp,%eax),>m=%esi
426
+ movl 8(%esp,%eax),%esi
427
+
428
+ # qhasm: bytes = arg3
429
+ # asm 1: movl <arg3=stack32#-3,>bytes=int32#3
430
+ # asm 2: movl <arg3=12(%esp,%eax),>bytes=%edx
431
+ movl 12(%esp,%eax),%edx
432
+
433
+ # qhasm: iv = arg5
434
+ # asm 1: movl <arg5=stack32#-5,>iv=int32#4
435
+ # asm 2: movl <arg5=20(%esp,%eax),>iv=%ebx
436
+ movl 20(%esp,%eax),%ebx
437
+
438
+ # qhasm: k = arg6
439
+ # asm 1: movl <arg6=stack32#-6,>k=int32#7
440
+ # asm 2: movl <arg6=24(%esp,%eax),>k=%ebp
441
+ movl 24(%esp,%eax),%ebp
442
+
443
+ # qhasm: unsigned>? bytes - 0
444
+ # asm 1: cmp $0,<bytes=int32#3
445
+ # asm 2: cmp $0,<bytes=%edx
446
+ cmp $0,%edx
447
+ # comment:fp stack unchanged by jump
448
+
449
+ # qhasm: goto done if !unsigned>
450
+ jbe ._done
451
+ # comment:fp stack unchanged by fallthrough
452
+
453
+ # qhasm: start:
454
+ ._start:
455
+
456
+ # qhasm: out_stack = out
457
+ # asm 1: movl <out=int32#6,>out_stack=stack32#6
458
+ # asm 2: movl <out=%edi,>out_stack=20(%esp)
459
+ movl %edi,20(%esp)
460
+
461
+ # qhasm: bytes_stack = bytes
462
+ # asm 1: movl <bytes=int32#3,>bytes_stack=stack32#7
463
+ # asm 2: movl <bytes=%edx,>bytes_stack=24(%esp)
464
+ movl %edx,24(%esp)
465
+
466
+ # qhasm: in4 = *(uint32 *) (k + 12)
467
+ # asm 1: movl 12(<k=int32#7),>in4=int32#1
468
+ # asm 2: movl 12(<k=%ebp),>in4=%eax
469
+ movl 12(%ebp),%eax
470
+
471
+ # qhasm: in12 = *(uint32 *) (k + 20)
472
+ # asm 1: movl 20(<k=int32#7),>in12=int32#2
473
+ # asm 2: movl 20(<k=%ebp),>in12=%ecx
474
+ movl 20(%ebp),%ecx
475
+
476
+ # qhasm: ((uint32 *)&x3)[0] = in4
477
+ # asm 1: movl <in4=int32#1,>x3=stack128#1
478
+ # asm 2: movl <in4=%eax,>x3=32(%esp)
479
+ movl %eax,32(%esp)
480
+
481
+ # qhasm: ((uint32 *)&x1)[0] = in12
482
+ # asm 1: movl <in12=int32#2,>x1=stack128#2
483
+ # asm 2: movl <in12=%ecx,>x1=48(%esp)
484
+ movl %ecx,48(%esp)
485
+
486
+ # qhasm: in0 = 1634760805
487
+ # asm 1: mov $1634760805,>in0=int32#1
488
+ # asm 2: mov $1634760805,>in0=%eax
489
+ mov $1634760805,%eax
490
+
491
+ # qhasm: in8 = 0
492
+ # asm 1: mov $0,>in8=int32#2
493
+ # asm 2: mov $0,>in8=%ecx
494
+ mov $0,%ecx
495
+
496
+ # qhasm: ((uint32 *)&x0)[0] = in0
497
+ # asm 1: movl <in0=int32#1,>x0=stack128#3
498
+ # asm 2: movl <in0=%eax,>x0=64(%esp)
499
+ movl %eax,64(%esp)
500
+
501
+ # qhasm: ((uint32 *)&x2)[0] = in8
502
+ # asm 1: movl <in8=int32#2,>x2=stack128#4
503
+ # asm 2: movl <in8=%ecx,>x2=80(%esp)
504
+ movl %ecx,80(%esp)
505
+
506
+ # qhasm: in6 = *(uint32 *) (iv + 0)
507
+ # asm 1: movl 0(<iv=int32#4),>in6=int32#1
508
+ # asm 2: movl 0(<iv=%ebx),>in6=%eax
509
+ movl 0(%ebx),%eax
510
+
511
+ # qhasm: in7 = *(uint32 *) (iv + 4)
512
+ # asm 1: movl 4(<iv=int32#4),>in7=int32#2
513
+ # asm 2: movl 4(<iv=%ebx),>in7=%ecx
514
+ movl 4(%ebx),%ecx
515
+
516
+ # qhasm: ((uint32 *)&x1)[2] = in6
517
+ # asm 1: movl <in6=int32#1,8+<x1=stack128#2
518
+ # asm 2: movl <in6=%eax,8+<x1=48(%esp)
519
+ movl %eax,8+48(%esp)
520
+
521
+ # qhasm: ((uint32 *)&x2)[3] = in7
522
+ # asm 1: movl <in7=int32#2,12+<x2=stack128#4
523
+ # asm 2: movl <in7=%ecx,12+<x2=80(%esp)
524
+ movl %ecx,12+80(%esp)
525
+
526
+ # qhasm: in9 = 0
527
+ # asm 1: mov $0,>in9=int32#1
528
+ # asm 2: mov $0,>in9=%eax
529
+ mov $0,%eax
530
+
531
+ # qhasm: in10 = 2036477234
532
+ # asm 1: mov $2036477234,>in10=int32#2
533
+ # asm 2: mov $2036477234,>in10=%ecx
534
+ mov $2036477234,%ecx
535
+
536
+ # qhasm: ((uint32 *)&x3)[1] = in9
537
+ # asm 1: movl <in9=int32#1,4+<x3=stack128#1
538
+ # asm 2: movl <in9=%eax,4+<x3=32(%esp)
539
+ movl %eax,4+32(%esp)
540
+
541
+ # qhasm: ((uint32 *)&x0)[2] = in10
542
+ # asm 1: movl <in10=int32#2,8+<x0=stack128#3
543
+ # asm 2: movl <in10=%ecx,8+<x0=64(%esp)
544
+ movl %ecx,8+64(%esp)
545
+
546
+ # qhasm: in1 = *(uint32 *) (k + 0)
547
+ # asm 1: movl 0(<k=int32#7),>in1=int32#1
548
+ # asm 2: movl 0(<k=%ebp),>in1=%eax
549
+ movl 0(%ebp),%eax
550
+
551
+ # qhasm: in2 = *(uint32 *) (k + 4)
552
+ # asm 1: movl 4(<k=int32#7),>in2=int32#2
553
+ # asm 2: movl 4(<k=%ebp),>in2=%ecx
554
+ movl 4(%ebp),%ecx
555
+
556
+ # qhasm: in3 = *(uint32 *) (k + 8)
557
+ # asm 1: movl 8(<k=int32#7),>in3=int32#3
558
+ # asm 2: movl 8(<k=%ebp),>in3=%edx
559
+ movl 8(%ebp),%edx
560
+
561
+ # qhasm: in5 = 857760878
562
+ # asm 1: mov $857760878,>in5=int32#4
563
+ # asm 2: mov $857760878,>in5=%ebx
564
+ mov $857760878,%ebx
565
+
566
+ # qhasm: ((uint32 *)&x1)[1] = in1
567
+ # asm 1: movl <in1=int32#1,4+<x1=stack128#2
568
+ # asm 2: movl <in1=%eax,4+<x1=48(%esp)
569
+ movl %eax,4+48(%esp)
570
+
571
+ # qhasm: ((uint32 *)&x2)[2] = in2
572
+ # asm 1: movl <in2=int32#2,8+<x2=stack128#4
573
+ # asm 2: movl <in2=%ecx,8+<x2=80(%esp)
574
+ movl %ecx,8+80(%esp)
575
+
576
+ # qhasm: ((uint32 *)&x3)[3] = in3
577
+ # asm 1: movl <in3=int32#3,12+<x3=stack128#1
578
+ # asm 2: movl <in3=%edx,12+<x3=32(%esp)
579
+ movl %edx,12+32(%esp)
580
+
581
+ # qhasm: ((uint32 *)&x0)[1] = in5
582
+ # asm 1: movl <in5=int32#4,4+<x0=stack128#3
583
+ # asm 2: movl <in5=%ebx,4+<x0=64(%esp)
584
+ movl %ebx,4+64(%esp)
585
+
586
+ # qhasm: in11 = *(uint32 *) (k + 16)
587
+ # asm 1: movl 16(<k=int32#7),>in11=int32#1
588
+ # asm 2: movl 16(<k=%ebp),>in11=%eax
589
+ movl 16(%ebp),%eax
590
+
591
+ # qhasm: in13 = *(uint32 *) (k + 24)
592
+ # asm 1: movl 24(<k=int32#7),>in13=int32#2
593
+ # asm 2: movl 24(<k=%ebp),>in13=%ecx
594
+ movl 24(%ebp),%ecx
595
+
596
+ # qhasm: in14 = *(uint32 *) (k + 28)
597
+ # asm 1: movl 28(<k=int32#7),>in14=int32#3
598
+ # asm 2: movl 28(<k=%ebp),>in14=%edx
599
+ movl 28(%ebp),%edx
600
+
601
+ # qhasm: in15 = 1797285236
602
+ # asm 1: mov $1797285236,>in15=int32#4
603
+ # asm 2: mov $1797285236,>in15=%ebx
604
+ mov $1797285236,%ebx
605
+
606
+ # qhasm: ((uint32 *)&x1)[3] = in11
607
+ # asm 1: movl <in11=int32#1,12+<x1=stack128#2
608
+ # asm 2: movl <in11=%eax,12+<x1=48(%esp)
609
+ movl %eax,12+48(%esp)
610
+
611
+ # qhasm: ((uint32 *)&x2)[1] = in13
612
+ # asm 1: movl <in13=int32#2,4+<x2=stack128#4
613
+ # asm 2: movl <in13=%ecx,4+<x2=80(%esp)
614
+ movl %ecx,4+80(%esp)
615
+
616
+ # qhasm: ((uint32 *)&x3)[2] = in14
617
+ # asm 1: movl <in14=int32#3,8+<x3=stack128#1
618
+ # asm 2: movl <in14=%edx,8+<x3=32(%esp)
619
+ movl %edx,8+32(%esp)
620
+
621
+ # qhasm: ((uint32 *)&x0)[3] = in15
622
+ # asm 1: movl <in15=int32#4,12+<x0=stack128#3
623
+ # asm 2: movl <in15=%ebx,12+<x0=64(%esp)
624
+ movl %ebx,12+64(%esp)
625
+
626
+ # qhasm: bytes = bytes_stack
627
+ # asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
628
+ # asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
629
+ movl 24(%esp),%eax
630
+
631
+ # qhasm: unsigned<? bytes - 256
632
+ # asm 1: cmp $256,<bytes=int32#1
633
+ # asm 2: cmp $256,<bytes=%eax
634
+ cmp $256,%eax
635
+ # comment:fp stack unchanged by jump
636
+
637
+ # qhasm: goto bytesbetween1and255 if unsigned<
638
+ jb ._bytesbetween1and255
639
+
640
+ # qhasm: z0 = x0
641
+ # asm 1: movdqa <x0=stack128#3,>z0=int6464#1
642
+ # asm 2: movdqa <x0=64(%esp),>z0=%xmm0
643
+ movdqa 64(%esp),%xmm0
644
+
645
+ # qhasm: z5 = z0[1,1,1,1]
646
+ # asm 1: pshufd $0x55,<z0=int6464#1,>z5=int6464#2
647
+ # asm 2: pshufd $0x55,<z0=%xmm0,>z5=%xmm1
648
+ pshufd $0x55,%xmm0,%xmm1
649
+
650
+ # qhasm: z10 = z0[2,2,2,2]
651
+ # asm 1: pshufd $0xaa,<z0=int6464#1,>z10=int6464#3
652
+ # asm 2: pshufd $0xaa,<z0=%xmm0,>z10=%xmm2
653
+ pshufd $0xaa,%xmm0,%xmm2
654
+
655
+ # qhasm: z15 = z0[3,3,3,3]
656
+ # asm 1: pshufd $0xff,<z0=int6464#1,>z15=int6464#4
657
+ # asm 2: pshufd $0xff,<z0=%xmm0,>z15=%xmm3
658
+ pshufd $0xff,%xmm0,%xmm3
659
+
660
+ # qhasm: z0 = z0[0,0,0,0]
661
+ # asm 1: pshufd $0x00,<z0=int6464#1,>z0=int6464#1
662
+ # asm 2: pshufd $0x00,<z0=%xmm0,>z0=%xmm0
663
+ pshufd $0x00,%xmm0,%xmm0
664
+
665
+ # qhasm: orig5 = z5
666
+ # asm 1: movdqa <z5=int6464#2,>orig5=stack128#5
667
+ # asm 2: movdqa <z5=%xmm1,>orig5=96(%esp)
668
+ movdqa %xmm1,96(%esp)
669
+
670
+ # qhasm: orig10 = z10
671
+ # asm 1: movdqa <z10=int6464#3,>orig10=stack128#6
672
+ # asm 2: movdqa <z10=%xmm2,>orig10=112(%esp)
673
+ movdqa %xmm2,112(%esp)
674
+
675
+ # qhasm: orig15 = z15
676
+ # asm 1: movdqa <z15=int6464#4,>orig15=stack128#7
677
+ # asm 2: movdqa <z15=%xmm3,>orig15=128(%esp)
678
+ movdqa %xmm3,128(%esp)
679
+
680
+ # qhasm: orig0 = z0
681
+ # asm 1: movdqa <z0=int6464#1,>orig0=stack128#8
682
+ # asm 2: movdqa <z0=%xmm0,>orig0=144(%esp)
683
+ movdqa %xmm0,144(%esp)
684
+
685
+ # qhasm: z1 = x1
686
+ # asm 1: movdqa <x1=stack128#2,>z1=int6464#1
687
+ # asm 2: movdqa <x1=48(%esp),>z1=%xmm0
688
+ movdqa 48(%esp),%xmm0
689
+
690
+ # qhasm: z6 = z1[2,2,2,2]
691
+ # asm 1: pshufd $0xaa,<z1=int6464#1,>z6=int6464#2
692
+ # asm 2: pshufd $0xaa,<z1=%xmm0,>z6=%xmm1
693
+ pshufd $0xaa,%xmm0,%xmm1
694
+
695
+ # qhasm: z11 = z1[3,3,3,3]
696
+ # asm 1: pshufd $0xff,<z1=int6464#1,>z11=int6464#3
697
+ # asm 2: pshufd $0xff,<z1=%xmm0,>z11=%xmm2
698
+ pshufd $0xff,%xmm0,%xmm2
699
+
700
+ # qhasm: z12 = z1[0,0,0,0]
701
+ # asm 1: pshufd $0x00,<z1=int6464#1,>z12=int6464#4
702
+ # asm 2: pshufd $0x00,<z1=%xmm0,>z12=%xmm3
703
+ pshufd $0x00,%xmm0,%xmm3
704
+
705
+ # qhasm: z1 = z1[1,1,1,1]
706
+ # asm 1: pshufd $0x55,<z1=int6464#1,>z1=int6464#1
707
+ # asm 2: pshufd $0x55,<z1=%xmm0,>z1=%xmm0
708
+ pshufd $0x55,%xmm0,%xmm0
709
+
710
+ # qhasm: orig6 = z6
711
+ # asm 1: movdqa <z6=int6464#2,>orig6=stack128#9
712
+ # asm 2: movdqa <z6=%xmm1,>orig6=160(%esp)
713
+ movdqa %xmm1,160(%esp)
714
+
715
+ # qhasm: orig11 = z11
716
+ # asm 1: movdqa <z11=int6464#3,>orig11=stack128#10
717
+ # asm 2: movdqa <z11=%xmm2,>orig11=176(%esp)
718
+ movdqa %xmm2,176(%esp)
719
+
720
+ # qhasm: orig12 = z12
721
+ # asm 1: movdqa <z12=int6464#4,>orig12=stack128#11
722
+ # asm 2: movdqa <z12=%xmm3,>orig12=192(%esp)
723
+ movdqa %xmm3,192(%esp)
724
+
725
+ # qhasm: orig1 = z1
726
+ # asm 1: movdqa <z1=int6464#1,>orig1=stack128#12
727
+ # asm 2: movdqa <z1=%xmm0,>orig1=208(%esp)
728
+ movdqa %xmm0,208(%esp)
729
+
730
+ # qhasm: z2 = x2
731
+ # asm 1: movdqa <x2=stack128#4,>z2=int6464#1
732
+ # asm 2: movdqa <x2=80(%esp),>z2=%xmm0
733
+ movdqa 80(%esp),%xmm0
734
+
735
+ # qhasm: z7 = z2[3,3,3,3]
736
+ # asm 1: pshufd $0xff,<z2=int6464#1,>z7=int6464#2
737
+ # asm 2: pshufd $0xff,<z2=%xmm0,>z7=%xmm1
738
+ pshufd $0xff,%xmm0,%xmm1
739
+
740
+ # qhasm: z13 = z2[1,1,1,1]
741
+ # asm 1: pshufd $0x55,<z2=int6464#1,>z13=int6464#3
742
+ # asm 2: pshufd $0x55,<z2=%xmm0,>z13=%xmm2
743
+ pshufd $0x55,%xmm0,%xmm2
744
+
745
+ # qhasm: z2 = z2[2,2,2,2]
746
+ # asm 1: pshufd $0xaa,<z2=int6464#1,>z2=int6464#1
747
+ # asm 2: pshufd $0xaa,<z2=%xmm0,>z2=%xmm0
748
+ pshufd $0xaa,%xmm0,%xmm0
749
+
750
+ # qhasm: orig7 = z7
751
+ # asm 1: movdqa <z7=int6464#2,>orig7=stack128#13
752
+ # asm 2: movdqa <z7=%xmm1,>orig7=224(%esp)
753
+ movdqa %xmm1,224(%esp)
754
+
755
+ # qhasm: orig13 = z13
756
+ # asm 1: movdqa <z13=int6464#3,>orig13=stack128#14
757
+ # asm 2: movdqa <z13=%xmm2,>orig13=240(%esp)
758
+ movdqa %xmm2,240(%esp)
759
+
760
+ # qhasm: orig2 = z2
761
+ # asm 1: movdqa <z2=int6464#1,>orig2=stack128#15
762
+ # asm 2: movdqa <z2=%xmm0,>orig2=256(%esp)
763
+ movdqa %xmm0,256(%esp)
764
+
765
+ # qhasm: z3 = x3
766
+ # asm 1: movdqa <x3=stack128#1,>z3=int6464#1
767
+ # asm 2: movdqa <x3=32(%esp),>z3=%xmm0
768
+ movdqa 32(%esp),%xmm0
769
+
770
+ # qhasm: z4 = z3[0,0,0,0]
771
+ # asm 1: pshufd $0x00,<z3=int6464#1,>z4=int6464#2
772
+ # asm 2: pshufd $0x00,<z3=%xmm0,>z4=%xmm1
773
+ pshufd $0x00,%xmm0,%xmm1
774
+
775
+ # qhasm: z14 = z3[2,2,2,2]
776
+ # asm 1: pshufd $0xaa,<z3=int6464#1,>z14=int6464#3
777
+ # asm 2: pshufd $0xaa,<z3=%xmm0,>z14=%xmm2
778
+ pshufd $0xaa,%xmm0,%xmm2
779
+
780
+ # qhasm: z3 = z3[3,3,3,3]
781
+ # asm 1: pshufd $0xff,<z3=int6464#1,>z3=int6464#1
782
+ # asm 2: pshufd $0xff,<z3=%xmm0,>z3=%xmm0
783
+ pshufd $0xff,%xmm0,%xmm0
784
+
785
+ # qhasm: orig4 = z4
786
+ # asm 1: movdqa <z4=int6464#2,>orig4=stack128#16
787
+ # asm 2: movdqa <z4=%xmm1,>orig4=272(%esp)
788
+ movdqa %xmm1,272(%esp)
789
+
790
+ # qhasm: orig14 = z14
791
+ # asm 1: movdqa <z14=int6464#3,>orig14=stack128#17
792
+ # asm 2: movdqa <z14=%xmm2,>orig14=288(%esp)
793
+ movdqa %xmm2,288(%esp)
794
+
795
+ # qhasm: orig3 = z3
796
+ # asm 1: movdqa <z3=int6464#1,>orig3=stack128#18
797
+ # asm 2: movdqa <z3=%xmm0,>orig3=304(%esp)
798
+ movdqa %xmm0,304(%esp)
799
+
800
+ # qhasm: bytesatleast256:
801
+ ._bytesatleast256:
802
+
803
+ # qhasm: in8 = ((uint32 *)&x2)[0]
804
+ # asm 1: movl <x2=stack128#4,>in8=int32#2
805
+ # asm 2: movl <x2=80(%esp),>in8=%ecx
806
+ movl 80(%esp),%ecx
807
+
808
+ # qhasm: in9 = ((uint32 *)&x3)[1]
809
+ # asm 1: movl 4+<x3=stack128#1,>in9=int32#3
810
+ # asm 2: movl 4+<x3=32(%esp),>in9=%edx
811
+ movl 4+32(%esp),%edx
812
+
813
+ # qhasm: ((uint32 *) &orig8)[0] = in8
814
+ # asm 1: movl <in8=int32#2,>orig8=stack128#19
815
+ # asm 2: movl <in8=%ecx,>orig8=320(%esp)
816
+ movl %ecx,320(%esp)
817
+
818
+ # qhasm: ((uint32 *) &orig9)[0] = in9
819
+ # asm 1: movl <in9=int32#3,>orig9=stack128#20
820
+ # asm 2: movl <in9=%edx,>orig9=336(%esp)
821
+ movl %edx,336(%esp)
822
+
823
+ # qhasm: carry? in8 += 1
824
+ # asm 1: add $1,<in8=int32#2
825
+ # asm 2: add $1,<in8=%ecx
826
+ add $1,%ecx
827
+
828
+ # qhasm: in9 += 0 + carry
829
+ # asm 1: adc $0,<in9=int32#3
830
+ # asm 2: adc $0,<in9=%edx
831
+ adc $0,%edx
832
+
833
+ # qhasm: ((uint32 *) &orig8)[1] = in8
834
+ # asm 1: movl <in8=int32#2,4+<orig8=stack128#19
835
+ # asm 2: movl <in8=%ecx,4+<orig8=320(%esp)
836
+ movl %ecx,4+320(%esp)
837
+
838
+ # qhasm: ((uint32 *) &orig9)[1] = in9
839
+ # asm 1: movl <in9=int32#3,4+<orig9=stack128#20
840
+ # asm 2: movl <in9=%edx,4+<orig9=336(%esp)
841
+ movl %edx,4+336(%esp)
842
+
843
+ # qhasm: carry? in8 += 1
844
+ # asm 1: add $1,<in8=int32#2
845
+ # asm 2: add $1,<in8=%ecx
846
+ add $1,%ecx
847
+
848
+ # qhasm: in9 += 0 + carry
849
+ # asm 1: adc $0,<in9=int32#3
850
+ # asm 2: adc $0,<in9=%edx
851
+ adc $0,%edx
852
+
853
+ # qhasm: ((uint32 *) &orig8)[2] = in8
854
+ # asm 1: movl <in8=int32#2,8+<orig8=stack128#19
855
+ # asm 2: movl <in8=%ecx,8+<orig8=320(%esp)
856
+ movl %ecx,8+320(%esp)
857
+
858
+ # qhasm: ((uint32 *) &orig9)[2] = in9
859
+ # asm 1: movl <in9=int32#3,8+<orig9=stack128#20
860
+ # asm 2: movl <in9=%edx,8+<orig9=336(%esp)
861
+ movl %edx,8+336(%esp)
862
+
863
+ # qhasm: carry? in8 += 1
864
+ # asm 1: add $1,<in8=int32#2
865
+ # asm 2: add $1,<in8=%ecx
866
+ add $1,%ecx
867
+
868
+ # qhasm: in9 += 0 + carry
869
+ # asm 1: adc $0,<in9=int32#3
870
+ # asm 2: adc $0,<in9=%edx
871
+ adc $0,%edx
872
+
873
+ # qhasm: ((uint32 *) &orig8)[3] = in8
874
+ # asm 1: movl <in8=int32#2,12+<orig8=stack128#19
875
+ # asm 2: movl <in8=%ecx,12+<orig8=320(%esp)
876
+ movl %ecx,12+320(%esp)
877
+
878
+ # qhasm: ((uint32 *) &orig9)[3] = in9
879
+ # asm 1: movl <in9=int32#3,12+<orig9=stack128#20
880
+ # asm 2: movl <in9=%edx,12+<orig9=336(%esp)
881
+ movl %edx,12+336(%esp)
882
+
883
+ # qhasm: carry? in8 += 1
884
+ # asm 1: add $1,<in8=int32#2
885
+ # asm 2: add $1,<in8=%ecx
886
+ add $1,%ecx
887
+
888
+ # qhasm: in9 += 0 + carry
889
+ # asm 1: adc $0,<in9=int32#3
890
+ # asm 2: adc $0,<in9=%edx
891
+ adc $0,%edx
892
+
893
+ # qhasm: ((uint32 *)&x2)[0] = in8
894
+ # asm 1: movl <in8=int32#2,>x2=stack128#4
895
+ # asm 2: movl <in8=%ecx,>x2=80(%esp)
896
+ movl %ecx,80(%esp)
897
+
898
+ # qhasm: ((uint32 *)&x3)[1] = in9
899
+ # asm 1: movl <in9=int32#3,4+<x3=stack128#1
900
+ # asm 2: movl <in9=%edx,4+<x3=32(%esp)
901
+ movl %edx,4+32(%esp)
902
+
903
+ # qhasm: bytes_stack = bytes
904
+ # asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
905
+ # asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
906
+ movl %eax,24(%esp)
907
+
908
+ # qhasm: i = 12
909
+ # asm 1: mov $12,>i=int32#1
910
+ # asm 2: mov $12,>i=%eax
911
+ mov $12,%eax
912
+
913
+ # qhasm: z5 = orig5
914
+ # asm 1: movdqa <orig5=stack128#5,>z5=int6464#1
915
+ # asm 2: movdqa <orig5=96(%esp),>z5=%xmm0
916
+ movdqa 96(%esp),%xmm0
917
+
918
+ # qhasm: z10 = orig10
919
+ # asm 1: movdqa <orig10=stack128#6,>z10=int6464#2
920
+ # asm 2: movdqa <orig10=112(%esp),>z10=%xmm1
921
+ movdqa 112(%esp),%xmm1
922
+
923
+ # qhasm: z15 = orig15
924
+ # asm 1: movdqa <orig15=stack128#7,>z15=int6464#3
925
+ # asm 2: movdqa <orig15=128(%esp),>z15=%xmm2
926
+ movdqa 128(%esp),%xmm2
927
+
928
+ # qhasm: z14 = orig14
929
+ # asm 1: movdqa <orig14=stack128#17,>z14=int6464#4
930
+ # asm 2: movdqa <orig14=288(%esp),>z14=%xmm3
931
+ movdqa 288(%esp),%xmm3
932
+
933
+ # qhasm: z3 = orig3
934
+ # asm 1: movdqa <orig3=stack128#18,>z3=int6464#5
935
+ # asm 2: movdqa <orig3=304(%esp),>z3=%xmm4
936
+ movdqa 304(%esp),%xmm4
937
+
938
+ # qhasm: z6 = orig6
939
+ # asm 1: movdqa <orig6=stack128#9,>z6=int6464#6
940
+ # asm 2: movdqa <orig6=160(%esp),>z6=%xmm5
941
+ movdqa 160(%esp),%xmm5
942
+
943
+ # qhasm: z11 = orig11
944
+ # asm 1: movdqa <orig11=stack128#10,>z11=int6464#7
945
+ # asm 2: movdqa <orig11=176(%esp),>z11=%xmm6
946
+ movdqa 176(%esp),%xmm6
947
+
948
+ # qhasm: z1 = orig1
949
+ # asm 1: movdqa <orig1=stack128#12,>z1=int6464#8
950
+ # asm 2: movdqa <orig1=208(%esp),>z1=%xmm7
951
+ movdqa 208(%esp),%xmm7
952
+
953
+ # qhasm: z5_stack = z5
954
+ # asm 1: movdqa <z5=int6464#1,>z5_stack=stack128#21
955
+ # asm 2: movdqa <z5=%xmm0,>z5_stack=352(%esp)
956
+ movdqa %xmm0,352(%esp)
957
+
958
+ # qhasm: z10_stack = z10
959
+ # asm 1: movdqa <z10=int6464#2,>z10_stack=stack128#22
960
+ # asm 2: movdqa <z10=%xmm1,>z10_stack=368(%esp)
961
+ movdqa %xmm1,368(%esp)
962
+
963
+ # qhasm: z15_stack = z15
964
+ # asm 1: movdqa <z15=int6464#3,>z15_stack=stack128#23
965
+ # asm 2: movdqa <z15=%xmm2,>z15_stack=384(%esp)
966
+ movdqa %xmm2,384(%esp)
967
+
968
+ # qhasm: z14_stack = z14
969
+ # asm 1: movdqa <z14=int6464#4,>z14_stack=stack128#24
970
+ # asm 2: movdqa <z14=%xmm3,>z14_stack=400(%esp)
971
+ movdqa %xmm3,400(%esp)
972
+
973
+ # qhasm: z3_stack = z3
974
+ # asm 1: movdqa <z3=int6464#5,>z3_stack=stack128#25
975
+ # asm 2: movdqa <z3=%xmm4,>z3_stack=416(%esp)
976
+ movdqa %xmm4,416(%esp)
977
+
978
+ # qhasm: z6_stack = z6
979
+ # asm 1: movdqa <z6=int6464#6,>z6_stack=stack128#26
980
+ # asm 2: movdqa <z6=%xmm5,>z6_stack=432(%esp)
981
+ movdqa %xmm5,432(%esp)
982
+
983
+ # qhasm: z11_stack = z11
984
+ # asm 1: movdqa <z11=int6464#7,>z11_stack=stack128#27
985
+ # asm 2: movdqa <z11=%xmm6,>z11_stack=448(%esp)
986
+ movdqa %xmm6,448(%esp)
987
+
988
+ # qhasm: z1_stack = z1
989
+ # asm 1: movdqa <z1=int6464#8,>z1_stack=stack128#28
990
+ # asm 2: movdqa <z1=%xmm7,>z1_stack=464(%esp)
991
+ movdqa %xmm7,464(%esp)
992
+
993
+ # qhasm: z7 = orig7
994
+ # asm 1: movdqa <orig7=stack128#13,>z7=int6464#5
995
+ # asm 2: movdqa <orig7=224(%esp),>z7=%xmm4
996
+ movdqa 224(%esp),%xmm4
997
+
998
+ # qhasm: z13 = orig13
999
+ # asm 1: movdqa <orig13=stack128#14,>z13=int6464#6
1000
+ # asm 2: movdqa <orig13=240(%esp),>z13=%xmm5
1001
+ movdqa 240(%esp),%xmm5
1002
+
1003
+ # qhasm: z2 = orig2
1004
+ # asm 1: movdqa <orig2=stack128#15,>z2=int6464#7
1005
+ # asm 2: movdqa <orig2=256(%esp),>z2=%xmm6
1006
+ movdqa 256(%esp),%xmm6
1007
+
1008
+ # qhasm: z9 = orig9
1009
+ # asm 1: movdqa <orig9=stack128#20,>z9=int6464#8
1010
+ # asm 2: movdqa <orig9=336(%esp),>z9=%xmm7
1011
+ movdqa 336(%esp),%xmm7
1012
+
1013
+ # qhasm: p = orig0
1014
+ # asm 1: movdqa <orig0=stack128#8,>p=int6464#1
1015
+ # asm 2: movdqa <orig0=144(%esp),>p=%xmm0
1016
+ movdqa 144(%esp),%xmm0
1017
+
1018
+ # qhasm: t = orig12
1019
+ # asm 1: movdqa <orig12=stack128#11,>t=int6464#3
1020
+ # asm 2: movdqa <orig12=192(%esp),>t=%xmm2
1021
+ movdqa 192(%esp),%xmm2
1022
+
1023
+ # qhasm: q = orig4
1024
+ # asm 1: movdqa <orig4=stack128#16,>q=int6464#4
1025
+ # asm 2: movdqa <orig4=272(%esp),>q=%xmm3
1026
+ movdqa 272(%esp),%xmm3
1027
+
1028
+ # qhasm: r = orig8
1029
+ # asm 1: movdqa <orig8=stack128#19,>r=int6464#2
1030
+ # asm 2: movdqa <orig8=320(%esp),>r=%xmm1
1031
+ movdqa 320(%esp),%xmm1
1032
+
1033
+ # qhasm: z7_stack = z7
1034
+ # asm 1: movdqa <z7=int6464#5,>z7_stack=stack128#29
1035
+ # asm 2: movdqa <z7=%xmm4,>z7_stack=480(%esp)
1036
+ movdqa %xmm4,480(%esp)
1037
+
1038
+ # qhasm: z13_stack = z13
1039
+ # asm 1: movdqa <z13=int6464#6,>z13_stack=stack128#30
1040
+ # asm 2: movdqa <z13=%xmm5,>z13_stack=496(%esp)
1041
+ movdqa %xmm5,496(%esp)
1042
+
1043
+ # qhasm: z2_stack = z2
1044
+ # asm 1: movdqa <z2=int6464#7,>z2_stack=stack128#31
1045
+ # asm 2: movdqa <z2=%xmm6,>z2_stack=512(%esp)
1046
+ movdqa %xmm6,512(%esp)
1047
+
1048
+ # qhasm: z9_stack = z9
1049
+ # asm 1: movdqa <z9=int6464#8,>z9_stack=stack128#32
1050
+ # asm 2: movdqa <z9=%xmm7,>z9_stack=528(%esp)
1051
+ movdqa %xmm7,528(%esp)
1052
+
1053
+ # qhasm: z0_stack = p
1054
+ # asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1055
+ # asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1056
+ movdqa %xmm0,544(%esp)
1057
+
1058
+ # qhasm: z12_stack = t
1059
+ # asm 1: movdqa <t=int6464#3,>z12_stack=stack128#34
1060
+ # asm 2: movdqa <t=%xmm2,>z12_stack=560(%esp)
1061
+ movdqa %xmm2,560(%esp)
1062
+
1063
+ # qhasm: z4_stack = q
1064
+ # asm 1: movdqa <q=int6464#4,>z4_stack=stack128#35
1065
+ # asm 2: movdqa <q=%xmm3,>z4_stack=576(%esp)
1066
+ movdqa %xmm3,576(%esp)
1067
+
1068
+ # qhasm: z8_stack = r
1069
+ # asm 1: movdqa <r=int6464#2,>z8_stack=stack128#36
1070
+ # asm 2: movdqa <r=%xmm1,>z8_stack=592(%esp)
1071
+ movdqa %xmm1,592(%esp)
1072
+
1073
+ # qhasm: mainloop1:
1074
+ ._mainloop1:
1075
+
1076
+ # qhasm: assign xmm0 to p
1077
+
1078
+ # qhasm: assign xmm1 to r
1079
+
1080
+ # qhasm: assign xmm2 to t
1081
+
1082
+ # qhasm: assign xmm3 to q
1083
+
1084
+ # qhasm: s = t
1085
+ # asm 1: movdqa <t=int6464#3,>s=int6464#7
1086
+ # asm 2: movdqa <t=%xmm2,>s=%xmm6
1087
+ movdqa %xmm2,%xmm6
1088
+
1089
+ # qhasm: uint32323232 t += p
1090
+ # asm 1: paddd <p=int6464#1,<t=int6464#3
1091
+ # asm 2: paddd <p=%xmm0,<t=%xmm2
1092
+ paddd %xmm0,%xmm2
1093
+
1094
+ # qhasm: u = t
1095
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
1096
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
1097
+ movdqa %xmm2,%xmm4
1098
+
1099
+ # qhasm: uint32323232 t >>= 25
1100
+ # asm 1: psrld $25,<t=int6464#3
1101
+ # asm 2: psrld $25,<t=%xmm2
1102
+ psrld $25,%xmm2
1103
+
1104
+ # qhasm: q ^= t
1105
+ # asm 1: pxor <t=int6464#3,<q=int6464#4
1106
+ # asm 2: pxor <t=%xmm2,<q=%xmm3
1107
+ pxor %xmm2,%xmm3
1108
+
1109
+ # qhasm: uint32323232 u <<= 7
1110
+ # asm 1: pslld $7,<u=int6464#5
1111
+ # asm 2: pslld $7,<u=%xmm4
1112
+ pslld $7,%xmm4
1113
+
1114
+ # qhasm: q ^= u
1115
+ # asm 1: pxor <u=int6464#5,<q=int6464#4
1116
+ # asm 2: pxor <u=%xmm4,<q=%xmm3
1117
+ pxor %xmm4,%xmm3
1118
+
1119
+ # qhasm: z4_stack = q
1120
+ # asm 1: movdqa <q=int6464#4,>z4_stack=stack128#33
1121
+ # asm 2: movdqa <q=%xmm3,>z4_stack=544(%esp)
1122
+ movdqa %xmm3,544(%esp)
1123
+
1124
+ # qhasm: t = p
1125
+ # asm 1: movdqa <p=int6464#1,>t=int6464#3
1126
+ # asm 2: movdqa <p=%xmm0,>t=%xmm2
1127
+ movdqa %xmm0,%xmm2
1128
+
1129
+ # qhasm: uint32323232 t += q
1130
+ # asm 1: paddd <q=int6464#4,<t=int6464#3
1131
+ # asm 2: paddd <q=%xmm3,<t=%xmm2
1132
+ paddd %xmm3,%xmm2
1133
+
1134
+ # qhasm: u = t
1135
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
1136
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
1137
+ movdqa %xmm2,%xmm4
1138
+
1139
+ # qhasm: uint32323232 t >>= 23
1140
+ # asm 1: psrld $23,<t=int6464#3
1141
+ # asm 2: psrld $23,<t=%xmm2
1142
+ psrld $23,%xmm2
1143
+
1144
+ # qhasm: r ^= t
1145
+ # asm 1: pxor <t=int6464#3,<r=int6464#2
1146
+ # asm 2: pxor <t=%xmm2,<r=%xmm1
1147
+ pxor %xmm2,%xmm1
1148
+
1149
+ # qhasm: uint32323232 u <<= 9
1150
+ # asm 1: pslld $9,<u=int6464#5
1151
+ # asm 2: pslld $9,<u=%xmm4
1152
+ pslld $9,%xmm4
1153
+
1154
+ # qhasm: r ^= u
1155
+ # asm 1: pxor <u=int6464#5,<r=int6464#2
1156
+ # asm 2: pxor <u=%xmm4,<r=%xmm1
1157
+ pxor %xmm4,%xmm1
1158
+
1159
+ # qhasm: z8_stack = r
1160
+ # asm 1: movdqa <r=int6464#2,>z8_stack=stack128#34
1161
+ # asm 2: movdqa <r=%xmm1,>z8_stack=560(%esp)
1162
+ movdqa %xmm1,560(%esp)
1163
+
1164
+ # qhasm: uint32323232 q += r
1165
+ # asm 1: paddd <r=int6464#2,<q=int6464#4
1166
+ # asm 2: paddd <r=%xmm1,<q=%xmm3
1167
+ paddd %xmm1,%xmm3
1168
+
1169
+ # qhasm: u = q
1170
+ # asm 1: movdqa <q=int6464#4,>u=int6464#3
1171
+ # asm 2: movdqa <q=%xmm3,>u=%xmm2
1172
+ movdqa %xmm3,%xmm2
1173
+
1174
+ # qhasm: uint32323232 q >>= 19
1175
+ # asm 1: psrld $19,<q=int6464#4
1176
+ # asm 2: psrld $19,<q=%xmm3
1177
+ psrld $19,%xmm3
1178
+
1179
+ # qhasm: s ^= q
1180
+ # asm 1: pxor <q=int6464#4,<s=int6464#7
1181
+ # asm 2: pxor <q=%xmm3,<s=%xmm6
1182
+ pxor %xmm3,%xmm6
1183
+
1184
+ # qhasm: uint32323232 u <<= 13
1185
+ # asm 1: pslld $13,<u=int6464#3
1186
+ # asm 2: pslld $13,<u=%xmm2
1187
+ pslld $13,%xmm2
1188
+
1189
+ # qhasm: s ^= u
1190
+ # asm 1: pxor <u=int6464#3,<s=int6464#7
1191
+ # asm 2: pxor <u=%xmm2,<s=%xmm6
1192
+ pxor %xmm2,%xmm6
1193
+
1194
+ # qhasm: mt = z1_stack
1195
+ # asm 1: movdqa <z1_stack=stack128#28,>mt=int6464#3
1196
+ # asm 2: movdqa <z1_stack=464(%esp),>mt=%xmm2
1197
+ movdqa 464(%esp),%xmm2
1198
+
1199
+ # qhasm: mp = z5_stack
1200
+ # asm 1: movdqa <z5_stack=stack128#21,>mp=int6464#5
1201
+ # asm 2: movdqa <z5_stack=352(%esp),>mp=%xmm4
1202
+ movdqa 352(%esp),%xmm4
1203
+
1204
+ # qhasm: mq = z9_stack
1205
+ # asm 1: movdqa <z9_stack=stack128#32,>mq=int6464#4
1206
+ # asm 2: movdqa <z9_stack=528(%esp),>mq=%xmm3
1207
+ movdqa 528(%esp),%xmm3
1208
+
1209
+ # qhasm: mr = z13_stack
1210
+ # asm 1: movdqa <z13_stack=stack128#30,>mr=int6464#6
1211
+ # asm 2: movdqa <z13_stack=496(%esp),>mr=%xmm5
1212
+ movdqa 496(%esp),%xmm5
1213
+
1214
+ # qhasm: z12_stack = s
1215
+ # asm 1: movdqa <s=int6464#7,>z12_stack=stack128#30
1216
+ # asm 2: movdqa <s=%xmm6,>z12_stack=496(%esp)
1217
+ movdqa %xmm6,496(%esp)
1218
+
1219
+ # qhasm: uint32323232 r += s
1220
+ # asm 1: paddd <s=int6464#7,<r=int6464#2
1221
+ # asm 2: paddd <s=%xmm6,<r=%xmm1
1222
+ paddd %xmm6,%xmm1
1223
+
1224
+ # qhasm: u = r
1225
+ # asm 1: movdqa <r=int6464#2,>u=int6464#7
1226
+ # asm 2: movdqa <r=%xmm1,>u=%xmm6
1227
+ movdqa %xmm1,%xmm6
1228
+
1229
+ # qhasm: uint32323232 r >>= 14
1230
+ # asm 1: psrld $14,<r=int6464#2
1231
+ # asm 2: psrld $14,<r=%xmm1
1232
+ psrld $14,%xmm1
1233
+
1234
+ # qhasm: p ^= r
1235
+ # asm 1: pxor <r=int6464#2,<p=int6464#1
1236
+ # asm 2: pxor <r=%xmm1,<p=%xmm0
1237
+ pxor %xmm1,%xmm0
1238
+
1239
+ # qhasm: uint32323232 u <<= 18
1240
+ # asm 1: pslld $18,<u=int6464#7
1241
+ # asm 2: pslld $18,<u=%xmm6
1242
+ pslld $18,%xmm6
1243
+
1244
+ # qhasm: p ^= u
1245
+ # asm 1: pxor <u=int6464#7,<p=int6464#1
1246
+ # asm 2: pxor <u=%xmm6,<p=%xmm0
1247
+ pxor %xmm6,%xmm0
1248
+
1249
+ # qhasm: z0_stack = p
1250
+ # asm 1: movdqa <p=int6464#1,>z0_stack=stack128#21
1251
+ # asm 2: movdqa <p=%xmm0,>z0_stack=352(%esp)
1252
+ movdqa %xmm0,352(%esp)
1253
+
1254
+ # qhasm: assign xmm2 to mt
1255
+
1256
+ # qhasm: assign xmm3 to mq
1257
+
1258
+ # qhasm: assign xmm4 to mp
1259
+
1260
+ # qhasm: assign xmm5 to mr
1261
+
1262
+ # qhasm: ms = mt
1263
+ # asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1264
+ # asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1265
+ movdqa %xmm2,%xmm6
1266
+
1267
+ # qhasm: uint32323232 mt += mp
1268
+ # asm 1: paddd <mp=int6464#5,<mt=int6464#3
1269
+ # asm 2: paddd <mp=%xmm4,<mt=%xmm2
1270
+ paddd %xmm4,%xmm2
1271
+
1272
+ # qhasm: mu = mt
1273
+ # asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1274
+ # asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1275
+ movdqa %xmm2,%xmm0
1276
+
1277
+ # qhasm: uint32323232 mt >>= 25
1278
+ # asm 1: psrld $25,<mt=int6464#3
1279
+ # asm 2: psrld $25,<mt=%xmm2
1280
+ psrld $25,%xmm2
1281
+
1282
+ # qhasm: mq ^= mt
1283
+ # asm 1: pxor <mt=int6464#3,<mq=int6464#4
1284
+ # asm 2: pxor <mt=%xmm2,<mq=%xmm3
1285
+ pxor %xmm2,%xmm3
1286
+
1287
+ # qhasm: uint32323232 mu <<= 7
1288
+ # asm 1: pslld $7,<mu=int6464#1
1289
+ # asm 2: pslld $7,<mu=%xmm0
1290
+ pslld $7,%xmm0
1291
+
1292
+ # qhasm: mq ^= mu
1293
+ # asm 1: pxor <mu=int6464#1,<mq=int6464#4
1294
+ # asm 2: pxor <mu=%xmm0,<mq=%xmm3
1295
+ pxor %xmm0,%xmm3
1296
+
1297
+ # qhasm: z9_stack = mq
1298
+ # asm 1: movdqa <mq=int6464#4,>z9_stack=stack128#32
1299
+ # asm 2: movdqa <mq=%xmm3,>z9_stack=528(%esp)
1300
+ movdqa %xmm3,528(%esp)
1301
+
1302
+ # qhasm: mt = mp
1303
+ # asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1304
+ # asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1305
+ movdqa %xmm4,%xmm0
1306
+
1307
+ # qhasm: uint32323232 mt += mq
1308
+ # asm 1: paddd <mq=int6464#4,<mt=int6464#1
1309
+ # asm 2: paddd <mq=%xmm3,<mt=%xmm0
1310
+ paddd %xmm3,%xmm0
1311
+
1312
+ # qhasm: mu = mt
1313
+ # asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1314
+ # asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1315
+ movdqa %xmm0,%xmm1
1316
+
1317
+ # qhasm: uint32323232 mt >>= 23
1318
+ # asm 1: psrld $23,<mt=int6464#1
1319
+ # asm 2: psrld $23,<mt=%xmm0
1320
+ psrld $23,%xmm0
1321
+
1322
+ # qhasm: mr ^= mt
1323
+ # asm 1: pxor <mt=int6464#1,<mr=int6464#6
1324
+ # asm 2: pxor <mt=%xmm0,<mr=%xmm5
1325
+ pxor %xmm0,%xmm5
1326
+
1327
+ # qhasm: uint32323232 mu <<= 9
1328
+ # asm 1: pslld $9,<mu=int6464#2
1329
+ # asm 2: pslld $9,<mu=%xmm1
1330
+ pslld $9,%xmm1
1331
+
1332
+ # qhasm: mr ^= mu
1333
+ # asm 1: pxor <mu=int6464#2,<mr=int6464#6
1334
+ # asm 2: pxor <mu=%xmm1,<mr=%xmm5
1335
+ pxor %xmm1,%xmm5
1336
+
1337
+ # qhasm: z13_stack = mr
1338
+ # asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#35
1339
+ # asm 2: movdqa <mr=%xmm5,>z13_stack=576(%esp)
1340
+ movdqa %xmm5,576(%esp)
1341
+
1342
+ # qhasm: uint32323232 mq += mr
1343
+ # asm 1: paddd <mr=int6464#6,<mq=int6464#4
1344
+ # asm 2: paddd <mr=%xmm5,<mq=%xmm3
1345
+ paddd %xmm5,%xmm3
1346
+
1347
+ # qhasm: mu = mq
1348
+ # asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1349
+ # asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1350
+ movdqa %xmm3,%xmm0
1351
+
1352
+ # qhasm: uint32323232 mq >>= 19
1353
+ # asm 1: psrld $19,<mq=int6464#4
1354
+ # asm 2: psrld $19,<mq=%xmm3
1355
+ psrld $19,%xmm3
1356
+
1357
+ # qhasm: ms ^= mq
1358
+ # asm 1: pxor <mq=int6464#4,<ms=int6464#7
1359
+ # asm 2: pxor <mq=%xmm3,<ms=%xmm6
1360
+ pxor %xmm3,%xmm6
1361
+
1362
+ # qhasm: uint32323232 mu <<= 13
1363
+ # asm 1: pslld $13,<mu=int6464#1
1364
+ # asm 2: pslld $13,<mu=%xmm0
1365
+ pslld $13,%xmm0
1366
+
1367
+ # qhasm: ms ^= mu
1368
+ # asm 1: pxor <mu=int6464#1,<ms=int6464#7
1369
+ # asm 2: pxor <mu=%xmm0,<ms=%xmm6
1370
+ pxor %xmm0,%xmm6
1371
+
1372
+ # qhasm: t = z6_stack
1373
+ # asm 1: movdqa <z6_stack=stack128#26,>t=int6464#3
1374
+ # asm 2: movdqa <z6_stack=432(%esp),>t=%xmm2
1375
+ movdqa 432(%esp),%xmm2
1376
+
1377
+ # qhasm: p = z10_stack
1378
+ # asm 1: movdqa <z10_stack=stack128#22,>p=int6464#1
1379
+ # asm 2: movdqa <z10_stack=368(%esp),>p=%xmm0
1380
+ movdqa 368(%esp),%xmm0
1381
+
1382
+ # qhasm: q = z14_stack
1383
+ # asm 1: movdqa <z14_stack=stack128#24,>q=int6464#4
1384
+ # asm 2: movdqa <z14_stack=400(%esp),>q=%xmm3
1385
+ movdqa 400(%esp),%xmm3
1386
+
1387
+ # qhasm: r = z2_stack
1388
+ # asm 1: movdqa <z2_stack=stack128#31,>r=int6464#2
1389
+ # asm 2: movdqa <z2_stack=512(%esp),>r=%xmm1
1390
+ movdqa 512(%esp),%xmm1
1391
+
1392
+ # qhasm: z1_stack = ms
1393
+ # asm 1: movdqa <ms=int6464#7,>z1_stack=stack128#22
1394
+ # asm 2: movdqa <ms=%xmm6,>z1_stack=368(%esp)
1395
+ movdqa %xmm6,368(%esp)
1396
+
1397
+ # qhasm: uint32323232 mr += ms
1398
+ # asm 1: paddd <ms=int6464#7,<mr=int6464#6
1399
+ # asm 2: paddd <ms=%xmm6,<mr=%xmm5
1400
+ paddd %xmm6,%xmm5
1401
+
1402
+ # qhasm: mu = mr
1403
+ # asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1404
+ # asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1405
+ movdqa %xmm5,%xmm6
1406
+
1407
+ # qhasm: uint32323232 mr >>= 14
1408
+ # asm 1: psrld $14,<mr=int6464#6
1409
+ # asm 2: psrld $14,<mr=%xmm5
1410
+ psrld $14,%xmm5
1411
+
1412
+ # qhasm: mp ^= mr
1413
+ # asm 1: pxor <mr=int6464#6,<mp=int6464#5
1414
+ # asm 2: pxor <mr=%xmm5,<mp=%xmm4
1415
+ pxor %xmm5,%xmm4
1416
+
1417
+ # qhasm: uint32323232 mu <<= 18
1418
+ # asm 1: pslld $18,<mu=int6464#7
1419
+ # asm 2: pslld $18,<mu=%xmm6
1420
+ pslld $18,%xmm6
1421
+
1422
+ # qhasm: mp ^= mu
1423
+ # asm 1: pxor <mu=int6464#7,<mp=int6464#5
1424
+ # asm 2: pxor <mu=%xmm6,<mp=%xmm4
1425
+ pxor %xmm6,%xmm4
1426
+
1427
+ # qhasm: z5_stack = mp
1428
+ # asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#24
1429
+ # asm 2: movdqa <mp=%xmm4,>z5_stack=400(%esp)
1430
+ movdqa %xmm4,400(%esp)
1431
+
1432
+ # qhasm: assign xmm0 to p
1433
+
1434
+ # qhasm: assign xmm1 to r
1435
+
1436
+ # qhasm: assign xmm2 to t
1437
+
1438
+ # qhasm: assign xmm3 to q
1439
+
1440
+ # qhasm: s = t
1441
+ # asm 1: movdqa <t=int6464#3,>s=int6464#7
1442
+ # asm 2: movdqa <t=%xmm2,>s=%xmm6
1443
+ movdqa %xmm2,%xmm6
1444
+
1445
+ # qhasm: uint32323232 t += p
1446
+ # asm 1: paddd <p=int6464#1,<t=int6464#3
1447
+ # asm 2: paddd <p=%xmm0,<t=%xmm2
1448
+ paddd %xmm0,%xmm2
1449
+
1450
+ # qhasm: u = t
1451
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
1452
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
1453
+ movdqa %xmm2,%xmm4
1454
+
1455
+ # qhasm: uint32323232 t >>= 25
1456
+ # asm 1: psrld $25,<t=int6464#3
1457
+ # asm 2: psrld $25,<t=%xmm2
1458
+ psrld $25,%xmm2
1459
+
1460
+ # qhasm: q ^= t
1461
+ # asm 1: pxor <t=int6464#3,<q=int6464#4
1462
+ # asm 2: pxor <t=%xmm2,<q=%xmm3
1463
+ pxor %xmm2,%xmm3
1464
+
1465
+ # qhasm: uint32323232 u <<= 7
1466
+ # asm 1: pslld $7,<u=int6464#5
1467
+ # asm 2: pslld $7,<u=%xmm4
1468
+ pslld $7,%xmm4
1469
+
1470
+ # qhasm: q ^= u
1471
+ # asm 1: pxor <u=int6464#5,<q=int6464#4
1472
+ # asm 2: pxor <u=%xmm4,<q=%xmm3
1473
+ pxor %xmm4,%xmm3
1474
+
1475
+ # qhasm: z14_stack = q
1476
+ # asm 1: movdqa <q=int6464#4,>z14_stack=stack128#36
1477
+ # asm 2: movdqa <q=%xmm3,>z14_stack=592(%esp)
1478
+ movdqa %xmm3,592(%esp)
1479
+
1480
+ # qhasm: t = p
1481
+ # asm 1: movdqa <p=int6464#1,>t=int6464#3
1482
+ # asm 2: movdqa <p=%xmm0,>t=%xmm2
1483
+ movdqa %xmm0,%xmm2
1484
+
1485
+ # qhasm: uint32323232 t += q
1486
+ # asm 1: paddd <q=int6464#4,<t=int6464#3
1487
+ # asm 2: paddd <q=%xmm3,<t=%xmm2
1488
+ paddd %xmm3,%xmm2
1489
+
1490
+ # qhasm: u = t
1491
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
1492
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
1493
+ movdqa %xmm2,%xmm4
1494
+
1495
+ # qhasm: uint32323232 t >>= 23
1496
+ # asm 1: psrld $23,<t=int6464#3
1497
+ # asm 2: psrld $23,<t=%xmm2
1498
+ psrld $23,%xmm2
1499
+
1500
+ # qhasm: r ^= t
1501
+ # asm 1: pxor <t=int6464#3,<r=int6464#2
1502
+ # asm 2: pxor <t=%xmm2,<r=%xmm1
1503
+ pxor %xmm2,%xmm1
1504
+
1505
+ # qhasm: uint32323232 u <<= 9
1506
+ # asm 1: pslld $9,<u=int6464#5
1507
+ # asm 2: pslld $9,<u=%xmm4
1508
+ pslld $9,%xmm4
1509
+
1510
+ # qhasm: r ^= u
1511
+ # asm 1: pxor <u=int6464#5,<r=int6464#2
1512
+ # asm 2: pxor <u=%xmm4,<r=%xmm1
1513
+ pxor %xmm4,%xmm1
1514
+
1515
+ # qhasm: z2_stack = r
1516
+ # asm 1: movdqa <r=int6464#2,>z2_stack=stack128#26
1517
+ # asm 2: movdqa <r=%xmm1,>z2_stack=432(%esp)
1518
+ movdqa %xmm1,432(%esp)
1519
+
1520
+ # qhasm: uint32323232 q += r
1521
+ # asm 1: paddd <r=int6464#2,<q=int6464#4
1522
+ # asm 2: paddd <r=%xmm1,<q=%xmm3
1523
+ paddd %xmm1,%xmm3
1524
+
1525
+ # qhasm: u = q
1526
+ # asm 1: movdqa <q=int6464#4,>u=int6464#3
1527
+ # asm 2: movdqa <q=%xmm3,>u=%xmm2
1528
+ movdqa %xmm3,%xmm2
1529
+
1530
+ # qhasm: uint32323232 q >>= 19
1531
+ # asm 1: psrld $19,<q=int6464#4
1532
+ # asm 2: psrld $19,<q=%xmm3
1533
+ psrld $19,%xmm3
1534
+
1535
+ # qhasm: s ^= q
1536
+ # asm 1: pxor <q=int6464#4,<s=int6464#7
1537
+ # asm 2: pxor <q=%xmm3,<s=%xmm6
1538
+ pxor %xmm3,%xmm6
1539
+
1540
+ # qhasm: uint32323232 u <<= 13
1541
+ # asm 1: pslld $13,<u=int6464#3
1542
+ # asm 2: pslld $13,<u=%xmm2
1543
+ pslld $13,%xmm2
1544
+
1545
+ # qhasm: s ^= u
1546
+ # asm 1: pxor <u=int6464#3,<s=int6464#7
1547
+ # asm 2: pxor <u=%xmm2,<s=%xmm6
1548
+ pxor %xmm2,%xmm6
1549
+
1550
+ # qhasm: mt = z11_stack
1551
+ # asm 1: movdqa <z11_stack=stack128#27,>mt=int6464#3
1552
+ # asm 2: movdqa <z11_stack=448(%esp),>mt=%xmm2
1553
+ movdqa 448(%esp),%xmm2
1554
+
1555
+ # qhasm: mp = z15_stack
1556
+ # asm 1: movdqa <z15_stack=stack128#23,>mp=int6464#5
1557
+ # asm 2: movdqa <z15_stack=384(%esp),>mp=%xmm4
1558
+ movdqa 384(%esp),%xmm4
1559
+
1560
+ # qhasm: mq = z3_stack
1561
+ # asm 1: movdqa <z3_stack=stack128#25,>mq=int6464#4
1562
+ # asm 2: movdqa <z3_stack=416(%esp),>mq=%xmm3
1563
+ movdqa 416(%esp),%xmm3
1564
+
1565
+ # qhasm: mr = z7_stack
1566
+ # asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1567
+ # asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1568
+ movdqa 480(%esp),%xmm5
1569
+
1570
+ # qhasm: z6_stack = s
1571
+ # asm 1: movdqa <s=int6464#7,>z6_stack=stack128#23
1572
+ # asm 2: movdqa <s=%xmm6,>z6_stack=384(%esp)
1573
+ movdqa %xmm6,384(%esp)
1574
+
1575
+ # qhasm: uint32323232 r += s
1576
+ # asm 1: paddd <s=int6464#7,<r=int6464#2
1577
+ # asm 2: paddd <s=%xmm6,<r=%xmm1
1578
+ paddd %xmm6,%xmm1
1579
+
1580
+ # qhasm: u = r
1581
+ # asm 1: movdqa <r=int6464#2,>u=int6464#7
1582
+ # asm 2: movdqa <r=%xmm1,>u=%xmm6
1583
+ movdqa %xmm1,%xmm6
1584
+
1585
+ # qhasm: uint32323232 r >>= 14
1586
+ # asm 1: psrld $14,<r=int6464#2
1587
+ # asm 2: psrld $14,<r=%xmm1
1588
+ psrld $14,%xmm1
1589
+
1590
+ # qhasm: p ^= r
1591
+ # asm 1: pxor <r=int6464#2,<p=int6464#1
1592
+ # asm 2: pxor <r=%xmm1,<p=%xmm0
1593
+ pxor %xmm1,%xmm0
1594
+
1595
+ # qhasm: uint32323232 u <<= 18
1596
+ # asm 1: pslld $18,<u=int6464#7
1597
+ # asm 2: pslld $18,<u=%xmm6
1598
+ pslld $18,%xmm6
1599
+
1600
+ # qhasm: p ^= u
1601
+ # asm 1: pxor <u=int6464#7,<p=int6464#1
1602
+ # asm 2: pxor <u=%xmm6,<p=%xmm0
1603
+ pxor %xmm6,%xmm0
1604
+
1605
+ # qhasm: z10_stack = p
1606
+ # asm 1: movdqa <p=int6464#1,>z10_stack=stack128#27
1607
+ # asm 2: movdqa <p=%xmm0,>z10_stack=448(%esp)
1608
+ movdqa %xmm0,448(%esp)
1609
+
1610
+ # qhasm: assign xmm2 to mt
1611
+
1612
+ # qhasm: assign xmm3 to mq
1613
+
1614
+ # qhasm: assign xmm4 to mp
1615
+
1616
+ # qhasm: assign xmm5 to mr
1617
+
1618
+ # qhasm: ms = mt
1619
+ # asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1620
+ # asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1621
+ movdqa %xmm2,%xmm6
1622
+
1623
+ # qhasm: uint32323232 mt += mp
1624
+ # asm 1: paddd <mp=int6464#5,<mt=int6464#3
1625
+ # asm 2: paddd <mp=%xmm4,<mt=%xmm2
1626
+ paddd %xmm4,%xmm2
1627
+
1628
+ # qhasm: mu = mt
1629
+ # asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1630
+ # asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1631
+ movdqa %xmm2,%xmm0
1632
+
1633
+ # qhasm: uint32323232 mt >>= 25
1634
+ # asm 1: psrld $25,<mt=int6464#3
1635
+ # asm 2: psrld $25,<mt=%xmm2
1636
+ psrld $25,%xmm2
1637
+
1638
+ # qhasm: mq ^= mt
1639
+ # asm 1: pxor <mt=int6464#3,<mq=int6464#4
1640
+ # asm 2: pxor <mt=%xmm2,<mq=%xmm3
1641
+ pxor %xmm2,%xmm3
1642
+
1643
+ # qhasm: uint32323232 mu <<= 7
1644
+ # asm 1: pslld $7,<mu=int6464#1
1645
+ # asm 2: pslld $7,<mu=%xmm0
1646
+ pslld $7,%xmm0
1647
+
1648
+ # qhasm: mq ^= mu
1649
+ # asm 1: pxor <mu=int6464#1,<mq=int6464#4
1650
+ # asm 2: pxor <mu=%xmm0,<mq=%xmm3
1651
+ pxor %xmm0,%xmm3
1652
+
1653
+ # qhasm: z3_stack = mq
1654
+ # asm 1: movdqa <mq=int6464#4,>z3_stack=stack128#25
1655
+ # asm 2: movdqa <mq=%xmm3,>z3_stack=416(%esp)
1656
+ movdqa %xmm3,416(%esp)
1657
+
1658
+ # qhasm: mt = mp
1659
+ # asm 1: movdqa <mp=int6464#5,>mt=int6464#1
1660
+ # asm 2: movdqa <mp=%xmm4,>mt=%xmm0
1661
+ movdqa %xmm4,%xmm0
1662
+
1663
+ # qhasm: uint32323232 mt += mq
1664
+ # asm 1: paddd <mq=int6464#4,<mt=int6464#1
1665
+ # asm 2: paddd <mq=%xmm3,<mt=%xmm0
1666
+ paddd %xmm3,%xmm0
1667
+
1668
+ # qhasm: mu = mt
1669
+ # asm 1: movdqa <mt=int6464#1,>mu=int6464#2
1670
+ # asm 2: movdqa <mt=%xmm0,>mu=%xmm1
1671
+ movdqa %xmm0,%xmm1
1672
+
1673
+ # qhasm: uint32323232 mt >>= 23
1674
+ # asm 1: psrld $23,<mt=int6464#1
1675
+ # asm 2: psrld $23,<mt=%xmm0
1676
+ psrld $23,%xmm0
1677
+
1678
+ # qhasm: mr ^= mt
1679
+ # asm 1: pxor <mt=int6464#1,<mr=int6464#6
1680
+ # asm 2: pxor <mt=%xmm0,<mr=%xmm5
1681
+ pxor %xmm0,%xmm5
1682
+
1683
+ # qhasm: uint32323232 mu <<= 9
1684
+ # asm 1: pslld $9,<mu=int6464#2
1685
+ # asm 2: pslld $9,<mu=%xmm1
1686
+ pslld $9,%xmm1
1687
+
1688
+ # qhasm: mr ^= mu
1689
+ # asm 1: pxor <mu=int6464#2,<mr=int6464#6
1690
+ # asm 2: pxor <mu=%xmm1,<mr=%xmm5
1691
+ pxor %xmm1,%xmm5
1692
+
1693
+ # qhasm: z7_stack = mr
1694
+ # asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
1695
+ # asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
1696
+ movdqa %xmm5,480(%esp)
1697
+
1698
+ # qhasm: uint32323232 mq += mr
1699
+ # asm 1: paddd <mr=int6464#6,<mq=int6464#4
1700
+ # asm 2: paddd <mr=%xmm5,<mq=%xmm3
1701
+ paddd %xmm5,%xmm3
1702
+
1703
+ # qhasm: mu = mq
1704
+ # asm 1: movdqa <mq=int6464#4,>mu=int6464#1
1705
+ # asm 2: movdqa <mq=%xmm3,>mu=%xmm0
1706
+ movdqa %xmm3,%xmm0
1707
+
1708
+ # qhasm: uint32323232 mq >>= 19
1709
+ # asm 1: psrld $19,<mq=int6464#4
1710
+ # asm 2: psrld $19,<mq=%xmm3
1711
+ psrld $19,%xmm3
1712
+
1713
+ # qhasm: ms ^= mq
1714
+ # asm 1: pxor <mq=int6464#4,<ms=int6464#7
1715
+ # asm 2: pxor <mq=%xmm3,<ms=%xmm6
1716
+ pxor %xmm3,%xmm6
1717
+
1718
+ # qhasm: uint32323232 mu <<= 13
1719
+ # asm 1: pslld $13,<mu=int6464#1
1720
+ # asm 2: pslld $13,<mu=%xmm0
1721
+ pslld $13,%xmm0
1722
+
1723
+ # qhasm: ms ^= mu
1724
+ # asm 1: pxor <mu=int6464#1,<ms=int6464#7
1725
+ # asm 2: pxor <mu=%xmm0,<ms=%xmm6
1726
+ pxor %xmm0,%xmm6
1727
+
1728
+ # qhasm: t = z3_stack
1729
+ # asm 1: movdqa <z3_stack=stack128#25,>t=int6464#3
1730
+ # asm 2: movdqa <z3_stack=416(%esp),>t=%xmm2
1731
+ movdqa 416(%esp),%xmm2
1732
+
1733
+ # qhasm: p = z0_stack
1734
+ # asm 1: movdqa <z0_stack=stack128#21,>p=int6464#1
1735
+ # asm 2: movdqa <z0_stack=352(%esp),>p=%xmm0
1736
+ movdqa 352(%esp),%xmm0
1737
+
1738
+ # qhasm: q = z1_stack
1739
+ # asm 1: movdqa <z1_stack=stack128#22,>q=int6464#4
1740
+ # asm 2: movdqa <z1_stack=368(%esp),>q=%xmm3
1741
+ movdqa 368(%esp),%xmm3
1742
+
1743
+ # qhasm: r = z2_stack
1744
+ # asm 1: movdqa <z2_stack=stack128#26,>r=int6464#2
1745
+ # asm 2: movdqa <z2_stack=432(%esp),>r=%xmm1
1746
+ movdqa 432(%esp),%xmm1
1747
+
1748
+ # qhasm: z11_stack = ms
1749
+ # asm 1: movdqa <ms=int6464#7,>z11_stack=stack128#21
1750
+ # asm 2: movdqa <ms=%xmm6,>z11_stack=352(%esp)
1751
+ movdqa %xmm6,352(%esp)
1752
+
1753
+ # qhasm: uint32323232 mr += ms
1754
+ # asm 1: paddd <ms=int6464#7,<mr=int6464#6
1755
+ # asm 2: paddd <ms=%xmm6,<mr=%xmm5
1756
+ paddd %xmm6,%xmm5
1757
+
1758
+ # qhasm: mu = mr
1759
+ # asm 1: movdqa <mr=int6464#6,>mu=int6464#7
1760
+ # asm 2: movdqa <mr=%xmm5,>mu=%xmm6
1761
+ movdqa %xmm5,%xmm6
1762
+
1763
+ # qhasm: uint32323232 mr >>= 14
1764
+ # asm 1: psrld $14,<mr=int6464#6
1765
+ # asm 2: psrld $14,<mr=%xmm5
1766
+ psrld $14,%xmm5
1767
+
1768
+ # qhasm: mp ^= mr
1769
+ # asm 1: pxor <mr=int6464#6,<mp=int6464#5
1770
+ # asm 2: pxor <mr=%xmm5,<mp=%xmm4
1771
+ pxor %xmm5,%xmm4
1772
+
1773
+ # qhasm: uint32323232 mu <<= 18
1774
+ # asm 1: pslld $18,<mu=int6464#7
1775
+ # asm 2: pslld $18,<mu=%xmm6
1776
+ pslld $18,%xmm6
1777
+
1778
+ # qhasm: mp ^= mu
1779
+ # asm 1: pxor <mu=int6464#7,<mp=int6464#5
1780
+ # asm 2: pxor <mu=%xmm6,<mp=%xmm4
1781
+ pxor %xmm6,%xmm4
1782
+
1783
+ # qhasm: z15_stack = mp
1784
+ # asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#22
1785
+ # asm 2: movdqa <mp=%xmm4,>z15_stack=368(%esp)
1786
+ movdqa %xmm4,368(%esp)
1787
+
1788
+ # qhasm: assign xmm0 to p
1789
+
1790
+ # qhasm: assign xmm1 to r
1791
+
1792
+ # qhasm: assign xmm2 to t
1793
+
1794
+ # qhasm: assign xmm3 to q
1795
+
1796
+ # qhasm: s = t
1797
+ # asm 1: movdqa <t=int6464#3,>s=int6464#7
1798
+ # asm 2: movdqa <t=%xmm2,>s=%xmm6
1799
+ movdqa %xmm2,%xmm6
1800
+
1801
+ # qhasm: uint32323232 t += p
1802
+ # asm 1: paddd <p=int6464#1,<t=int6464#3
1803
+ # asm 2: paddd <p=%xmm0,<t=%xmm2
1804
+ paddd %xmm0,%xmm2
1805
+
1806
+ # qhasm: u = t
1807
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
1808
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
1809
+ movdqa %xmm2,%xmm4
1810
+
1811
+ # qhasm: uint32323232 t >>= 25
1812
+ # asm 1: psrld $25,<t=int6464#3
1813
+ # asm 2: psrld $25,<t=%xmm2
1814
+ psrld $25,%xmm2
1815
+
1816
+ # qhasm: q ^= t
1817
+ # asm 1: pxor <t=int6464#3,<q=int6464#4
1818
+ # asm 2: pxor <t=%xmm2,<q=%xmm3
1819
+ pxor %xmm2,%xmm3
1820
+
1821
+ # qhasm: uint32323232 u <<= 7
1822
+ # asm 1: pslld $7,<u=int6464#5
1823
+ # asm 2: pslld $7,<u=%xmm4
1824
+ pslld $7,%xmm4
1825
+
1826
+ # qhasm: q ^= u
1827
+ # asm 1: pxor <u=int6464#5,<q=int6464#4
1828
+ # asm 2: pxor <u=%xmm4,<q=%xmm3
1829
+ pxor %xmm4,%xmm3
1830
+
1831
+ # qhasm: z1_stack = q
1832
+ # asm 1: movdqa <q=int6464#4,>z1_stack=stack128#28
1833
+ # asm 2: movdqa <q=%xmm3,>z1_stack=464(%esp)
1834
+ movdqa %xmm3,464(%esp)
1835
+
1836
+ # qhasm: t = p
1837
+ # asm 1: movdqa <p=int6464#1,>t=int6464#3
1838
+ # asm 2: movdqa <p=%xmm0,>t=%xmm2
1839
+ movdqa %xmm0,%xmm2
1840
+
1841
+ # qhasm: uint32323232 t += q
1842
+ # asm 1: paddd <q=int6464#4,<t=int6464#3
1843
+ # asm 2: paddd <q=%xmm3,<t=%xmm2
1844
+ paddd %xmm3,%xmm2
1845
+
1846
+ # qhasm: u = t
1847
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
1848
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
1849
+ movdqa %xmm2,%xmm4
1850
+
1851
+ # qhasm: uint32323232 t >>= 23
1852
+ # asm 1: psrld $23,<t=int6464#3
1853
+ # asm 2: psrld $23,<t=%xmm2
1854
+ psrld $23,%xmm2
1855
+
1856
+ # qhasm: r ^= t
1857
+ # asm 1: pxor <t=int6464#3,<r=int6464#2
1858
+ # asm 2: pxor <t=%xmm2,<r=%xmm1
1859
+ pxor %xmm2,%xmm1
1860
+
1861
+ # qhasm: uint32323232 u <<= 9
1862
+ # asm 1: pslld $9,<u=int6464#5
1863
+ # asm 2: pslld $9,<u=%xmm4
1864
+ pslld $9,%xmm4
1865
+
1866
+ # qhasm: r ^= u
1867
+ # asm 1: pxor <u=int6464#5,<r=int6464#2
1868
+ # asm 2: pxor <u=%xmm4,<r=%xmm1
1869
+ pxor %xmm4,%xmm1
1870
+
1871
+ # qhasm: z2_stack = r
1872
+ # asm 1: movdqa <r=int6464#2,>z2_stack=stack128#31
1873
+ # asm 2: movdqa <r=%xmm1,>z2_stack=512(%esp)
1874
+ movdqa %xmm1,512(%esp)
1875
+
1876
+ # qhasm: uint32323232 q += r
1877
+ # asm 1: paddd <r=int6464#2,<q=int6464#4
1878
+ # asm 2: paddd <r=%xmm1,<q=%xmm3
1879
+ paddd %xmm1,%xmm3
1880
+
1881
+ # qhasm: u = q
1882
+ # asm 1: movdqa <q=int6464#4,>u=int6464#3
1883
+ # asm 2: movdqa <q=%xmm3,>u=%xmm2
1884
+ movdqa %xmm3,%xmm2
1885
+
1886
+ # qhasm: uint32323232 q >>= 19
1887
+ # asm 1: psrld $19,<q=int6464#4
1888
+ # asm 2: psrld $19,<q=%xmm3
1889
+ psrld $19,%xmm3
1890
+
1891
+ # qhasm: s ^= q
1892
+ # asm 1: pxor <q=int6464#4,<s=int6464#7
1893
+ # asm 2: pxor <q=%xmm3,<s=%xmm6
1894
+ pxor %xmm3,%xmm6
1895
+
1896
+ # qhasm: uint32323232 u <<= 13
1897
+ # asm 1: pslld $13,<u=int6464#3
1898
+ # asm 2: pslld $13,<u=%xmm2
1899
+ pslld $13,%xmm2
1900
+
1901
+ # qhasm: s ^= u
1902
+ # asm 1: pxor <u=int6464#3,<s=int6464#7
1903
+ # asm 2: pxor <u=%xmm2,<s=%xmm6
1904
+ pxor %xmm2,%xmm6
1905
+
1906
+ # qhasm: mt = z4_stack
1907
+ # asm 1: movdqa <z4_stack=stack128#33,>mt=int6464#3
1908
+ # asm 2: movdqa <z4_stack=544(%esp),>mt=%xmm2
1909
+ movdqa 544(%esp),%xmm2
1910
+
1911
+ # qhasm: mp = z5_stack
1912
+ # asm 1: movdqa <z5_stack=stack128#24,>mp=int6464#5
1913
+ # asm 2: movdqa <z5_stack=400(%esp),>mp=%xmm4
1914
+ movdqa 400(%esp),%xmm4
1915
+
1916
+ # qhasm: mq = z6_stack
1917
+ # asm 1: movdqa <z6_stack=stack128#23,>mq=int6464#4
1918
+ # asm 2: movdqa <z6_stack=384(%esp),>mq=%xmm3
1919
+ movdqa 384(%esp),%xmm3
1920
+
1921
+ # qhasm: mr = z7_stack
1922
+ # asm 1: movdqa <z7_stack=stack128#29,>mr=int6464#6
1923
+ # asm 2: movdqa <z7_stack=480(%esp),>mr=%xmm5
1924
+ movdqa 480(%esp),%xmm5
1925
+
1926
+ # qhasm: z3_stack = s
1927
+ # asm 1: movdqa <s=int6464#7,>z3_stack=stack128#25
1928
+ # asm 2: movdqa <s=%xmm6,>z3_stack=416(%esp)
1929
+ movdqa %xmm6,416(%esp)
1930
+
1931
+ # qhasm: uint32323232 r += s
1932
+ # asm 1: paddd <s=int6464#7,<r=int6464#2
1933
+ # asm 2: paddd <s=%xmm6,<r=%xmm1
1934
+ paddd %xmm6,%xmm1
1935
+
1936
+ # qhasm: u = r
1937
+ # asm 1: movdqa <r=int6464#2,>u=int6464#7
1938
+ # asm 2: movdqa <r=%xmm1,>u=%xmm6
1939
+ movdqa %xmm1,%xmm6
1940
+
1941
+ # qhasm: uint32323232 r >>= 14
1942
+ # asm 1: psrld $14,<r=int6464#2
1943
+ # asm 2: psrld $14,<r=%xmm1
1944
+ psrld $14,%xmm1
1945
+
1946
+ # qhasm: p ^= r
1947
+ # asm 1: pxor <r=int6464#2,<p=int6464#1
1948
+ # asm 2: pxor <r=%xmm1,<p=%xmm0
1949
+ pxor %xmm1,%xmm0
1950
+
1951
+ # qhasm: uint32323232 u <<= 18
1952
+ # asm 1: pslld $18,<u=int6464#7
1953
+ # asm 2: pslld $18,<u=%xmm6
1954
+ pslld $18,%xmm6
1955
+
1956
+ # qhasm: p ^= u
1957
+ # asm 1: pxor <u=int6464#7,<p=int6464#1
1958
+ # asm 2: pxor <u=%xmm6,<p=%xmm0
1959
+ pxor %xmm6,%xmm0
1960
+
1961
+ # qhasm: z0_stack = p
1962
+ # asm 1: movdqa <p=int6464#1,>z0_stack=stack128#33
1963
+ # asm 2: movdqa <p=%xmm0,>z0_stack=544(%esp)
1964
+ movdqa %xmm0,544(%esp)
1965
+
1966
+ # qhasm: assign xmm2 to mt
1967
+
1968
+ # qhasm: assign xmm3 to mq
1969
+
1970
+ # qhasm: assign xmm4 to mp
1971
+
1972
+ # qhasm: assign xmm5 to mr
1973
+
1974
+ # qhasm: ms = mt
1975
+ # asm 1: movdqa <mt=int6464#3,>ms=int6464#7
1976
+ # asm 2: movdqa <mt=%xmm2,>ms=%xmm6
1977
+ movdqa %xmm2,%xmm6
1978
+
1979
+ # qhasm: uint32323232 mt += mp
1980
+ # asm 1: paddd <mp=int6464#5,<mt=int6464#3
1981
+ # asm 2: paddd <mp=%xmm4,<mt=%xmm2
1982
+ paddd %xmm4,%xmm2
1983
+
1984
+ # qhasm: mu = mt
1985
+ # asm 1: movdqa <mt=int6464#3,>mu=int6464#1
1986
+ # asm 2: movdqa <mt=%xmm2,>mu=%xmm0
1987
+ movdqa %xmm2,%xmm0
1988
+
1989
+ # qhasm: uint32323232 mt >>= 25
1990
+ # asm 1: psrld $25,<mt=int6464#3
1991
+ # asm 2: psrld $25,<mt=%xmm2
1992
+ psrld $25,%xmm2
1993
+
1994
+ # qhasm: mq ^= mt
1995
+ # asm 1: pxor <mt=int6464#3,<mq=int6464#4
1996
+ # asm 2: pxor <mt=%xmm2,<mq=%xmm3
1997
+ pxor %xmm2,%xmm3
1998
+
1999
+ # qhasm: uint32323232 mu <<= 7
2000
+ # asm 1: pslld $7,<mu=int6464#1
2001
+ # asm 2: pslld $7,<mu=%xmm0
2002
+ pslld $7,%xmm0
2003
+
2004
+ # qhasm: mq ^= mu
2005
+ # asm 1: pxor <mu=int6464#1,<mq=int6464#4
2006
+ # asm 2: pxor <mu=%xmm0,<mq=%xmm3
2007
+ pxor %xmm0,%xmm3
2008
+
2009
+ # qhasm: z6_stack = mq
2010
+ # asm 1: movdqa <mq=int6464#4,>z6_stack=stack128#26
2011
+ # asm 2: movdqa <mq=%xmm3,>z6_stack=432(%esp)
2012
+ movdqa %xmm3,432(%esp)
2013
+
2014
+ # qhasm: mt = mp
2015
+ # asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2016
+ # asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2017
+ movdqa %xmm4,%xmm0
2018
+
2019
+ # qhasm: uint32323232 mt += mq
2020
+ # asm 1: paddd <mq=int6464#4,<mt=int6464#1
2021
+ # asm 2: paddd <mq=%xmm3,<mt=%xmm0
2022
+ paddd %xmm3,%xmm0
2023
+
2024
+ # qhasm: mu = mt
2025
+ # asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2026
+ # asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2027
+ movdqa %xmm0,%xmm1
2028
+
2029
+ # qhasm: uint32323232 mt >>= 23
2030
+ # asm 1: psrld $23,<mt=int6464#1
2031
+ # asm 2: psrld $23,<mt=%xmm0
2032
+ psrld $23,%xmm0
2033
+
2034
+ # qhasm: mr ^= mt
2035
+ # asm 1: pxor <mt=int6464#1,<mr=int6464#6
2036
+ # asm 2: pxor <mt=%xmm0,<mr=%xmm5
2037
+ pxor %xmm0,%xmm5
2038
+
2039
+ # qhasm: uint32323232 mu <<= 9
2040
+ # asm 1: pslld $9,<mu=int6464#2
2041
+ # asm 2: pslld $9,<mu=%xmm1
2042
+ pslld $9,%xmm1
2043
+
2044
+ # qhasm: mr ^= mu
2045
+ # asm 1: pxor <mu=int6464#2,<mr=int6464#6
2046
+ # asm 2: pxor <mu=%xmm1,<mr=%xmm5
2047
+ pxor %xmm1,%xmm5
2048
+
2049
+ # qhasm: z7_stack = mr
2050
+ # asm 1: movdqa <mr=int6464#6,>z7_stack=stack128#29
2051
+ # asm 2: movdqa <mr=%xmm5,>z7_stack=480(%esp)
2052
+ movdqa %xmm5,480(%esp)
2053
+
2054
+ # qhasm: uint32323232 mq += mr
2055
+ # asm 1: paddd <mr=int6464#6,<mq=int6464#4
2056
+ # asm 2: paddd <mr=%xmm5,<mq=%xmm3
2057
+ paddd %xmm5,%xmm3
2058
+
2059
+ # qhasm: mu = mq
2060
+ # asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2061
+ # asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2062
+ movdqa %xmm3,%xmm0
2063
+
2064
+ # qhasm: uint32323232 mq >>= 19
2065
+ # asm 1: psrld $19,<mq=int6464#4
2066
+ # asm 2: psrld $19,<mq=%xmm3
2067
+ psrld $19,%xmm3
2068
+
2069
+ # qhasm: ms ^= mq
2070
+ # asm 1: pxor <mq=int6464#4,<ms=int6464#7
2071
+ # asm 2: pxor <mq=%xmm3,<ms=%xmm6
2072
+ pxor %xmm3,%xmm6
2073
+
2074
+ # qhasm: uint32323232 mu <<= 13
2075
+ # asm 1: pslld $13,<mu=int6464#1
2076
+ # asm 2: pslld $13,<mu=%xmm0
2077
+ pslld $13,%xmm0
2078
+
2079
+ # qhasm: ms ^= mu
2080
+ # asm 1: pxor <mu=int6464#1,<ms=int6464#7
2081
+ # asm 2: pxor <mu=%xmm0,<ms=%xmm6
2082
+ pxor %xmm0,%xmm6
2083
+
2084
+ # qhasm: t = z9_stack
2085
+ # asm 1: movdqa <z9_stack=stack128#32,>t=int6464#3
2086
+ # asm 2: movdqa <z9_stack=528(%esp),>t=%xmm2
2087
+ movdqa 528(%esp),%xmm2
2088
+
2089
+ # qhasm: p = z10_stack
2090
+ # asm 1: movdqa <z10_stack=stack128#27,>p=int6464#1
2091
+ # asm 2: movdqa <z10_stack=448(%esp),>p=%xmm0
2092
+ movdqa 448(%esp),%xmm0
2093
+
2094
+ # qhasm: q = z11_stack
2095
+ # asm 1: movdqa <z11_stack=stack128#21,>q=int6464#4
2096
+ # asm 2: movdqa <z11_stack=352(%esp),>q=%xmm3
2097
+ movdqa 352(%esp),%xmm3
2098
+
2099
+ # qhasm: r = z8_stack
2100
+ # asm 1: movdqa <z8_stack=stack128#34,>r=int6464#2
2101
+ # asm 2: movdqa <z8_stack=560(%esp),>r=%xmm1
2102
+ movdqa 560(%esp),%xmm1
2103
+
2104
+ # qhasm: z4_stack = ms
2105
+ # asm 1: movdqa <ms=int6464#7,>z4_stack=stack128#34
2106
+ # asm 2: movdqa <ms=%xmm6,>z4_stack=560(%esp)
2107
+ movdqa %xmm6,560(%esp)
2108
+
2109
+ # qhasm: uint32323232 mr += ms
2110
+ # asm 1: paddd <ms=int6464#7,<mr=int6464#6
2111
+ # asm 2: paddd <ms=%xmm6,<mr=%xmm5
2112
+ paddd %xmm6,%xmm5
2113
+
2114
+ # qhasm: mu = mr
2115
+ # asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2116
+ # asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2117
+ movdqa %xmm5,%xmm6
2118
+
2119
+ # qhasm: uint32323232 mr >>= 14
2120
+ # asm 1: psrld $14,<mr=int6464#6
2121
+ # asm 2: psrld $14,<mr=%xmm5
2122
+ psrld $14,%xmm5
2123
+
2124
+ # qhasm: mp ^= mr
2125
+ # asm 1: pxor <mr=int6464#6,<mp=int6464#5
2126
+ # asm 2: pxor <mr=%xmm5,<mp=%xmm4
2127
+ pxor %xmm5,%xmm4
2128
+
2129
+ # qhasm: uint32323232 mu <<= 18
2130
+ # asm 1: pslld $18,<mu=int6464#7
2131
+ # asm 2: pslld $18,<mu=%xmm6
2132
+ pslld $18,%xmm6
2133
+
2134
+ # qhasm: mp ^= mu
2135
+ # asm 1: pxor <mu=int6464#7,<mp=int6464#5
2136
+ # asm 2: pxor <mu=%xmm6,<mp=%xmm4
2137
+ pxor %xmm6,%xmm4
2138
+
2139
+ # qhasm: z5_stack = mp
2140
+ # asm 1: movdqa <mp=int6464#5,>z5_stack=stack128#21
2141
+ # asm 2: movdqa <mp=%xmm4,>z5_stack=352(%esp)
2142
+ movdqa %xmm4,352(%esp)
2143
+
2144
+ # qhasm: assign xmm0 to p
2145
+
2146
+ # qhasm: assign xmm1 to r
2147
+
2148
+ # qhasm: assign xmm2 to t
2149
+
2150
+ # qhasm: assign xmm3 to q
2151
+
2152
+ # qhasm: s = t
2153
+ # asm 1: movdqa <t=int6464#3,>s=int6464#7
2154
+ # asm 2: movdqa <t=%xmm2,>s=%xmm6
2155
+ movdqa %xmm2,%xmm6
2156
+
2157
+ # qhasm: uint32323232 t += p
2158
+ # asm 1: paddd <p=int6464#1,<t=int6464#3
2159
+ # asm 2: paddd <p=%xmm0,<t=%xmm2
2160
+ paddd %xmm0,%xmm2
2161
+
2162
+ # qhasm: u = t
2163
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
2164
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
2165
+ movdqa %xmm2,%xmm4
2166
+
2167
+ # qhasm: uint32323232 t >>= 25
2168
+ # asm 1: psrld $25,<t=int6464#3
2169
+ # asm 2: psrld $25,<t=%xmm2
2170
+ psrld $25,%xmm2
2171
+
2172
+ # qhasm: q ^= t
2173
+ # asm 1: pxor <t=int6464#3,<q=int6464#4
2174
+ # asm 2: pxor <t=%xmm2,<q=%xmm3
2175
+ pxor %xmm2,%xmm3
2176
+
2177
+ # qhasm: uint32323232 u <<= 7
2178
+ # asm 1: pslld $7,<u=int6464#5
2179
+ # asm 2: pslld $7,<u=%xmm4
2180
+ pslld $7,%xmm4
2181
+
2182
+ # qhasm: q ^= u
2183
+ # asm 1: pxor <u=int6464#5,<q=int6464#4
2184
+ # asm 2: pxor <u=%xmm4,<q=%xmm3
2185
+ pxor %xmm4,%xmm3
2186
+
2187
+ # qhasm: z11_stack = q
2188
+ # asm 1: movdqa <q=int6464#4,>z11_stack=stack128#27
2189
+ # asm 2: movdqa <q=%xmm3,>z11_stack=448(%esp)
2190
+ movdqa %xmm3,448(%esp)
2191
+
2192
+ # qhasm: t = p
2193
+ # asm 1: movdqa <p=int6464#1,>t=int6464#3
2194
+ # asm 2: movdqa <p=%xmm0,>t=%xmm2
2195
+ movdqa %xmm0,%xmm2
2196
+
2197
+ # qhasm: uint32323232 t += q
2198
+ # asm 1: paddd <q=int6464#4,<t=int6464#3
2199
+ # asm 2: paddd <q=%xmm3,<t=%xmm2
2200
+ paddd %xmm3,%xmm2
2201
+
2202
+ # qhasm: u = t
2203
+ # asm 1: movdqa <t=int6464#3,>u=int6464#5
2204
+ # asm 2: movdqa <t=%xmm2,>u=%xmm4
2205
+ movdqa %xmm2,%xmm4
2206
+
2207
+ # qhasm: uint32323232 t >>= 23
2208
+ # asm 1: psrld $23,<t=int6464#3
2209
+ # asm 2: psrld $23,<t=%xmm2
2210
+ psrld $23,%xmm2
2211
+
2212
+ # qhasm: r ^= t
2213
+ # asm 1: pxor <t=int6464#3,<r=int6464#2
2214
+ # asm 2: pxor <t=%xmm2,<r=%xmm1
2215
+ pxor %xmm2,%xmm1
2216
+
2217
+ # qhasm: uint32323232 u <<= 9
2218
+ # asm 1: pslld $9,<u=int6464#5
2219
+ # asm 2: pslld $9,<u=%xmm4
2220
+ pslld $9,%xmm4
2221
+
2222
+ # qhasm: r ^= u
2223
+ # asm 1: pxor <u=int6464#5,<r=int6464#2
2224
+ # asm 2: pxor <u=%xmm4,<r=%xmm1
2225
+ pxor %xmm4,%xmm1
2226
+
2227
+ # qhasm: z8_stack = r
2228
+ # asm 1: movdqa <r=int6464#2,>z8_stack=stack128#37
2229
+ # asm 2: movdqa <r=%xmm1,>z8_stack=608(%esp)
2230
+ movdqa %xmm1,608(%esp)
2231
+
2232
+ # qhasm: uint32323232 q += r
2233
+ # asm 1: paddd <r=int6464#2,<q=int6464#4
2234
+ # asm 2: paddd <r=%xmm1,<q=%xmm3
2235
+ paddd %xmm1,%xmm3
2236
+
2237
+ # qhasm: u = q
2238
+ # asm 1: movdqa <q=int6464#4,>u=int6464#3
2239
+ # asm 2: movdqa <q=%xmm3,>u=%xmm2
2240
+ movdqa %xmm3,%xmm2
2241
+
2242
+ # qhasm: uint32323232 q >>= 19
2243
+ # asm 1: psrld $19,<q=int6464#4
2244
+ # asm 2: psrld $19,<q=%xmm3
2245
+ psrld $19,%xmm3
2246
+
2247
+ # qhasm: s ^= q
2248
+ # asm 1: pxor <q=int6464#4,<s=int6464#7
2249
+ # asm 2: pxor <q=%xmm3,<s=%xmm6
2250
+ pxor %xmm3,%xmm6
2251
+
2252
+ # qhasm: uint32323232 u <<= 13
2253
+ # asm 1: pslld $13,<u=int6464#3
2254
+ # asm 2: pslld $13,<u=%xmm2
2255
+ pslld $13,%xmm2
2256
+
2257
+ # qhasm: s ^= u
2258
+ # asm 1: pxor <u=int6464#3,<s=int6464#7
2259
+ # asm 2: pxor <u=%xmm2,<s=%xmm6
2260
+ pxor %xmm2,%xmm6
2261
+
2262
+ # qhasm: mt = z14_stack
2263
+ # asm 1: movdqa <z14_stack=stack128#36,>mt=int6464#3
2264
+ # asm 2: movdqa <z14_stack=592(%esp),>mt=%xmm2
2265
+ movdqa 592(%esp),%xmm2
2266
+
2267
+ # qhasm: mp = z15_stack
2268
+ # asm 1: movdqa <z15_stack=stack128#22,>mp=int6464#5
2269
+ # asm 2: movdqa <z15_stack=368(%esp),>mp=%xmm4
2270
+ movdqa 368(%esp),%xmm4
2271
+
2272
+ # qhasm: mq = z12_stack
2273
+ # asm 1: movdqa <z12_stack=stack128#30,>mq=int6464#4
2274
+ # asm 2: movdqa <z12_stack=496(%esp),>mq=%xmm3
2275
+ movdqa 496(%esp),%xmm3
2276
+
2277
+ # qhasm: mr = z13_stack
2278
+ # asm 1: movdqa <z13_stack=stack128#35,>mr=int6464#6
2279
+ # asm 2: movdqa <z13_stack=576(%esp),>mr=%xmm5
2280
+ movdqa 576(%esp),%xmm5
2281
+
2282
+ # qhasm: z9_stack = s
2283
+ # asm 1: movdqa <s=int6464#7,>z9_stack=stack128#32
2284
+ # asm 2: movdqa <s=%xmm6,>z9_stack=528(%esp)
2285
+ movdqa %xmm6,528(%esp)
2286
+
2287
+ # qhasm: uint32323232 r += s
2288
+ # asm 1: paddd <s=int6464#7,<r=int6464#2
2289
+ # asm 2: paddd <s=%xmm6,<r=%xmm1
2290
+ paddd %xmm6,%xmm1
2291
+
2292
+ # qhasm: u = r
2293
+ # asm 1: movdqa <r=int6464#2,>u=int6464#7
2294
+ # asm 2: movdqa <r=%xmm1,>u=%xmm6
2295
+ movdqa %xmm1,%xmm6
2296
+
2297
+ # qhasm: uint32323232 r >>= 14
2298
+ # asm 1: psrld $14,<r=int6464#2
2299
+ # asm 2: psrld $14,<r=%xmm1
2300
+ psrld $14,%xmm1
2301
+
2302
+ # qhasm: p ^= r
2303
+ # asm 1: pxor <r=int6464#2,<p=int6464#1
2304
+ # asm 2: pxor <r=%xmm1,<p=%xmm0
2305
+ pxor %xmm1,%xmm0
2306
+
2307
+ # qhasm: uint32323232 u <<= 18
2308
+ # asm 1: pslld $18,<u=int6464#7
2309
+ # asm 2: pslld $18,<u=%xmm6
2310
+ pslld $18,%xmm6
2311
+
2312
+ # qhasm: p ^= u
2313
+ # asm 1: pxor <u=int6464#7,<p=int6464#1
2314
+ # asm 2: pxor <u=%xmm6,<p=%xmm0
2315
+ pxor %xmm6,%xmm0
2316
+
2317
+ # qhasm: z10_stack = p
2318
+ # asm 1: movdqa <p=int6464#1,>z10_stack=stack128#22
2319
+ # asm 2: movdqa <p=%xmm0,>z10_stack=368(%esp)
2320
+ movdqa %xmm0,368(%esp)
2321
+
2322
+ # qhasm: assign xmm2 to mt
2323
+
2324
+ # qhasm: assign xmm3 to mq
2325
+
2326
+ # qhasm: assign xmm4 to mp
2327
+
2328
+ # qhasm: assign xmm5 to mr
2329
+
2330
+ # qhasm: ms = mt
2331
+ # asm 1: movdqa <mt=int6464#3,>ms=int6464#7
2332
+ # asm 2: movdqa <mt=%xmm2,>ms=%xmm6
2333
+ movdqa %xmm2,%xmm6
2334
+
2335
+ # qhasm: uint32323232 mt += mp
2336
+ # asm 1: paddd <mp=int6464#5,<mt=int6464#3
2337
+ # asm 2: paddd <mp=%xmm4,<mt=%xmm2
2338
+ paddd %xmm4,%xmm2
2339
+
2340
+ # qhasm: mu = mt
2341
+ # asm 1: movdqa <mt=int6464#3,>mu=int6464#1
2342
+ # asm 2: movdqa <mt=%xmm2,>mu=%xmm0
2343
+ movdqa %xmm2,%xmm0
2344
+
2345
+ # qhasm: uint32323232 mt >>= 25
2346
+ # asm 1: psrld $25,<mt=int6464#3
2347
+ # asm 2: psrld $25,<mt=%xmm2
2348
+ psrld $25,%xmm2
2349
+
2350
+ # qhasm: mq ^= mt
2351
+ # asm 1: pxor <mt=int6464#3,<mq=int6464#4
2352
+ # asm 2: pxor <mt=%xmm2,<mq=%xmm3
2353
+ pxor %xmm2,%xmm3
2354
+
2355
+ # qhasm: uint32323232 mu <<= 7
2356
+ # asm 1: pslld $7,<mu=int6464#1
2357
+ # asm 2: pslld $7,<mu=%xmm0
2358
+ pslld $7,%xmm0
2359
+
2360
+ # qhasm: mq ^= mu
2361
+ # asm 1: pxor <mu=int6464#1,<mq=int6464#4
2362
+ # asm 2: pxor <mu=%xmm0,<mq=%xmm3
2363
+ pxor %xmm0,%xmm3
2364
+
2365
+ # qhasm: z12_stack = mq
2366
+ # asm 1: movdqa <mq=int6464#4,>z12_stack=stack128#35
2367
+ # asm 2: movdqa <mq=%xmm3,>z12_stack=576(%esp)
2368
+ movdqa %xmm3,576(%esp)
2369
+
2370
+ # qhasm: mt = mp
2371
+ # asm 1: movdqa <mp=int6464#5,>mt=int6464#1
2372
+ # asm 2: movdqa <mp=%xmm4,>mt=%xmm0
2373
+ movdqa %xmm4,%xmm0
2374
+
2375
+ # qhasm: uint32323232 mt += mq
2376
+ # asm 1: paddd <mq=int6464#4,<mt=int6464#1
2377
+ # asm 2: paddd <mq=%xmm3,<mt=%xmm0
2378
+ paddd %xmm3,%xmm0
2379
+
2380
+ # qhasm: mu = mt
2381
+ # asm 1: movdqa <mt=int6464#1,>mu=int6464#2
2382
+ # asm 2: movdqa <mt=%xmm0,>mu=%xmm1
2383
+ movdqa %xmm0,%xmm1
2384
+
2385
+ # qhasm: uint32323232 mt >>= 23
2386
+ # asm 1: psrld $23,<mt=int6464#1
2387
+ # asm 2: psrld $23,<mt=%xmm0
2388
+ psrld $23,%xmm0
2389
+
2390
+ # qhasm: mr ^= mt
2391
+ # asm 1: pxor <mt=int6464#1,<mr=int6464#6
2392
+ # asm 2: pxor <mt=%xmm0,<mr=%xmm5
2393
+ pxor %xmm0,%xmm5
2394
+
2395
+ # qhasm: uint32323232 mu <<= 9
2396
+ # asm 1: pslld $9,<mu=int6464#2
2397
+ # asm 2: pslld $9,<mu=%xmm1
2398
+ pslld $9,%xmm1
2399
+
2400
+ # qhasm: mr ^= mu
2401
+ # asm 1: pxor <mu=int6464#2,<mr=int6464#6
2402
+ # asm 2: pxor <mu=%xmm1,<mr=%xmm5
2403
+ pxor %xmm1,%xmm5
2404
+
2405
+ # qhasm: z13_stack = mr
2406
+ # asm 1: movdqa <mr=int6464#6,>z13_stack=stack128#30
2407
+ # asm 2: movdqa <mr=%xmm5,>z13_stack=496(%esp)
2408
+ movdqa %xmm5,496(%esp)
2409
+
2410
+ # qhasm: uint32323232 mq += mr
2411
+ # asm 1: paddd <mr=int6464#6,<mq=int6464#4
2412
+ # asm 2: paddd <mr=%xmm5,<mq=%xmm3
2413
+ paddd %xmm5,%xmm3
2414
+
2415
+ # qhasm: mu = mq
2416
+ # asm 1: movdqa <mq=int6464#4,>mu=int6464#1
2417
+ # asm 2: movdqa <mq=%xmm3,>mu=%xmm0
2418
+ movdqa %xmm3,%xmm0
2419
+
2420
+ # qhasm: uint32323232 mq >>= 19
2421
+ # asm 1: psrld $19,<mq=int6464#4
2422
+ # asm 2: psrld $19,<mq=%xmm3
2423
+ psrld $19,%xmm3
2424
+
2425
+ # qhasm: ms ^= mq
2426
+ # asm 1: pxor <mq=int6464#4,<ms=int6464#7
2427
+ # asm 2: pxor <mq=%xmm3,<ms=%xmm6
2428
+ pxor %xmm3,%xmm6
2429
+
2430
+ # qhasm: uint32323232 mu <<= 13
2431
+ # asm 1: pslld $13,<mu=int6464#1
2432
+ # asm 2: pslld $13,<mu=%xmm0
2433
+ pslld $13,%xmm0
2434
+
2435
+ # qhasm: ms ^= mu
2436
+ # asm 1: pxor <mu=int6464#1,<ms=int6464#7
2437
+ # asm 2: pxor <mu=%xmm0,<ms=%xmm6
2438
+ pxor %xmm0,%xmm6
2439
+
2440
+ # qhasm: t = z12_stack
2441
+ # asm 1: movdqa <z12_stack=stack128#35,>t=int6464#3
2442
+ # asm 2: movdqa <z12_stack=576(%esp),>t=%xmm2
2443
+ movdqa 576(%esp),%xmm2
2444
+
2445
+ # qhasm: p = z0_stack
2446
+ # asm 1: movdqa <z0_stack=stack128#33,>p=int6464#1
2447
+ # asm 2: movdqa <z0_stack=544(%esp),>p=%xmm0
2448
+ movdqa 544(%esp),%xmm0
2449
+
2450
+ # qhasm: q = z4_stack
2451
+ # asm 1: movdqa <z4_stack=stack128#34,>q=int6464#4
2452
+ # asm 2: movdqa <z4_stack=560(%esp),>q=%xmm3
2453
+ movdqa 560(%esp),%xmm3
2454
+
2455
+ # qhasm: r = z8_stack
2456
+ # asm 1: movdqa <z8_stack=stack128#37,>r=int6464#2
2457
+ # asm 2: movdqa <z8_stack=608(%esp),>r=%xmm1
2458
+ movdqa 608(%esp),%xmm1
2459
+
2460
+ # qhasm: z14_stack = ms
2461
+ # asm 1: movdqa <ms=int6464#7,>z14_stack=stack128#24
2462
+ # asm 2: movdqa <ms=%xmm6,>z14_stack=400(%esp)
2463
+ movdqa %xmm6,400(%esp)
2464
+
2465
+ # qhasm: uint32323232 mr += ms
2466
+ # asm 1: paddd <ms=int6464#7,<mr=int6464#6
2467
+ # asm 2: paddd <ms=%xmm6,<mr=%xmm5
2468
+ paddd %xmm6,%xmm5
2469
+
2470
+ # qhasm: mu = mr
2471
+ # asm 1: movdqa <mr=int6464#6,>mu=int6464#7
2472
+ # asm 2: movdqa <mr=%xmm5,>mu=%xmm6
2473
+ movdqa %xmm5,%xmm6
2474
+
2475
+ # qhasm: uint32323232 mr >>= 14
2476
+ # asm 1: psrld $14,<mr=int6464#6
2477
+ # asm 2: psrld $14,<mr=%xmm5
2478
+ psrld $14,%xmm5
2479
+
2480
+ # qhasm: mp ^= mr
2481
+ # asm 1: pxor <mr=int6464#6,<mp=int6464#5
2482
+ # asm 2: pxor <mr=%xmm5,<mp=%xmm4
2483
+ pxor %xmm5,%xmm4
2484
+
2485
+ # qhasm: uint32323232 mu <<= 18
2486
+ # asm 1: pslld $18,<mu=int6464#7
2487
+ # asm 2: pslld $18,<mu=%xmm6
2488
+ pslld $18,%xmm6
2489
+
2490
+ # qhasm: mp ^= mu
2491
+ # asm 1: pxor <mu=int6464#7,<mp=int6464#5
2492
+ # asm 2: pxor <mu=%xmm6,<mp=%xmm4
2493
+ pxor %xmm6,%xmm4
2494
+
2495
+ # qhasm: z15_stack = mp
2496
+ # asm 1: movdqa <mp=int6464#5,>z15_stack=stack128#23
2497
+ # asm 2: movdqa <mp=%xmm4,>z15_stack=384(%esp)
2498
+ movdqa %xmm4,384(%esp)
2499
+
2500
+ # qhasm: unsigned>? i -= 2
2501
+ # asm 1: sub $2,<i=int32#1
2502
+ # asm 2: sub $2,<i=%eax
2503
+ sub $2,%eax
2504
+ # comment:fp stack unchanged by jump
2505
+
2506
+ # qhasm: goto mainloop1 if unsigned>
2507
+ ja ._mainloop1
2508
+
2509
+ # qhasm: out = out_stack
2510
+ # asm 1: movl <out_stack=stack32#6,>out=int32#6
2511
+ # asm 2: movl <out_stack=20(%esp),>out=%edi
2512
+ movl 20(%esp),%edi
2513
+
2514
+ # qhasm: z0 = z0_stack
2515
+ # asm 1: movdqa <z0_stack=stack128#33,>z0=int6464#1
2516
+ # asm 2: movdqa <z0_stack=544(%esp),>z0=%xmm0
2517
+ movdqa 544(%esp),%xmm0
2518
+
2519
+ # qhasm: z1 = z1_stack
2520
+ # asm 1: movdqa <z1_stack=stack128#28,>z1=int6464#2
2521
+ # asm 2: movdqa <z1_stack=464(%esp),>z1=%xmm1
2522
+ movdqa 464(%esp),%xmm1
2523
+
2524
+ # qhasm: z2 = z2_stack
2525
+ # asm 1: movdqa <z2_stack=stack128#31,>z2=int6464#3
2526
+ # asm 2: movdqa <z2_stack=512(%esp),>z2=%xmm2
2527
+ movdqa 512(%esp),%xmm2
2528
+
2529
+ # qhasm: z3 = z3_stack
2530
+ # asm 1: movdqa <z3_stack=stack128#25,>z3=int6464#4
2531
+ # asm 2: movdqa <z3_stack=416(%esp),>z3=%xmm3
2532
+ movdqa 416(%esp),%xmm3
2533
+
2534
+ # qhasm: uint32323232 z0 += orig0
2535
+ # asm 1: paddd <orig0=stack128#8,<z0=int6464#1
2536
+ # asm 2: paddd <orig0=144(%esp),<z0=%xmm0
2537
+ paddd 144(%esp),%xmm0
2538
+
2539
+ # qhasm: uint32323232 z1 += orig1
2540
+ # asm 1: paddd <orig1=stack128#12,<z1=int6464#2
2541
+ # asm 2: paddd <orig1=208(%esp),<z1=%xmm1
2542
+ paddd 208(%esp),%xmm1
2543
+
2544
+ # qhasm: uint32323232 z2 += orig2
2545
+ # asm 1: paddd <orig2=stack128#15,<z2=int6464#3
2546
+ # asm 2: paddd <orig2=256(%esp),<z2=%xmm2
2547
+ paddd 256(%esp),%xmm2
2548
+
2549
+ # qhasm: uint32323232 z3 += orig3
2550
+ # asm 1: paddd <orig3=stack128#18,<z3=int6464#4
2551
+ # asm 2: paddd <orig3=304(%esp),<z3=%xmm3
2552
+ paddd 304(%esp),%xmm3
2553
+
2554
+ # qhasm: in0 = z0
2555
+ # asm 1: movd <z0=int6464#1,>in0=int32#1
2556
+ # asm 2: movd <z0=%xmm0,>in0=%eax
2557
+ movd %xmm0,%eax
2558
+
2559
+ # qhasm: in1 = z1
2560
+ # asm 1: movd <z1=int6464#2,>in1=int32#2
2561
+ # asm 2: movd <z1=%xmm1,>in1=%ecx
2562
+ movd %xmm1,%ecx
2563
+
2564
+ # qhasm: in2 = z2
2565
+ # asm 1: movd <z2=int6464#3,>in2=int32#3
2566
+ # asm 2: movd <z2=%xmm2,>in2=%edx
2567
+ movd %xmm2,%edx
2568
+
2569
+ # qhasm: in3 = z3
2570
+ # asm 1: movd <z3=int6464#4,>in3=int32#4
2571
+ # asm 2: movd <z3=%xmm3,>in3=%ebx
2572
+ movd %xmm3,%ebx
2573
+
2574
+ # qhasm: z0 <<<= 96
2575
+ # asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2576
+ # asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2577
+ pshufd $0x39,%xmm0,%xmm0
2578
+
2579
+ # qhasm: z1 <<<= 96
2580
+ # asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2581
+ # asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2582
+ pshufd $0x39,%xmm1,%xmm1
2583
+
2584
+ # qhasm: z2 <<<= 96
2585
+ # asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2586
+ # asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2587
+ pshufd $0x39,%xmm2,%xmm2
2588
+
2589
+ # qhasm: z3 <<<= 96
2590
+ # asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2591
+ # asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2592
+ pshufd $0x39,%xmm3,%xmm3
2593
+
2594
+ # qhasm: in0 ^= *(uint32 *) (m + 0)
2595
+ # asm 1: xorl 0(<m=int32#5),<in0=int32#1
2596
+ # asm 2: xorl 0(<m=%esi),<in0=%eax
2597
+ xorl 0(%esi),%eax
2598
+
2599
+ # qhasm: in1 ^= *(uint32 *) (m + 4)
2600
+ # asm 1: xorl 4(<m=int32#5),<in1=int32#2
2601
+ # asm 2: xorl 4(<m=%esi),<in1=%ecx
2602
+ xorl 4(%esi),%ecx
2603
+
2604
+ # qhasm: in2 ^= *(uint32 *) (m + 8)
2605
+ # asm 1: xorl 8(<m=int32#5),<in2=int32#3
2606
+ # asm 2: xorl 8(<m=%esi),<in2=%edx
2607
+ xorl 8(%esi),%edx
2608
+
2609
+ # qhasm: in3 ^= *(uint32 *) (m + 12)
2610
+ # asm 1: xorl 12(<m=int32#5),<in3=int32#4
2611
+ # asm 2: xorl 12(<m=%esi),<in3=%ebx
2612
+ xorl 12(%esi),%ebx
2613
+
2614
+ # qhasm: *(uint32 *) (out + 0) = in0
2615
+ # asm 1: movl <in0=int32#1,0(<out=int32#6)
2616
+ # asm 2: movl <in0=%eax,0(<out=%edi)
2617
+ movl %eax,0(%edi)
2618
+
2619
+ # qhasm: *(uint32 *) (out + 4) = in1
2620
+ # asm 1: movl <in1=int32#2,4(<out=int32#6)
2621
+ # asm 2: movl <in1=%ecx,4(<out=%edi)
2622
+ movl %ecx,4(%edi)
2623
+
2624
+ # qhasm: *(uint32 *) (out + 8) = in2
2625
+ # asm 1: movl <in2=int32#3,8(<out=int32#6)
2626
+ # asm 2: movl <in2=%edx,8(<out=%edi)
2627
+ movl %edx,8(%edi)
2628
+
2629
+ # qhasm: *(uint32 *) (out + 12) = in3
2630
+ # asm 1: movl <in3=int32#4,12(<out=int32#6)
2631
+ # asm 2: movl <in3=%ebx,12(<out=%edi)
2632
+ movl %ebx,12(%edi)
2633
+
2634
+ # qhasm: in0 = z0
2635
+ # asm 1: movd <z0=int6464#1,>in0=int32#1
2636
+ # asm 2: movd <z0=%xmm0,>in0=%eax
2637
+ movd %xmm0,%eax
2638
+
2639
+ # qhasm: in1 = z1
2640
+ # asm 1: movd <z1=int6464#2,>in1=int32#2
2641
+ # asm 2: movd <z1=%xmm1,>in1=%ecx
2642
+ movd %xmm1,%ecx
2643
+
2644
+ # qhasm: in2 = z2
2645
+ # asm 1: movd <z2=int6464#3,>in2=int32#3
2646
+ # asm 2: movd <z2=%xmm2,>in2=%edx
2647
+ movd %xmm2,%edx
2648
+
2649
+ # qhasm: in3 = z3
2650
+ # asm 1: movd <z3=int6464#4,>in3=int32#4
2651
+ # asm 2: movd <z3=%xmm3,>in3=%ebx
2652
+ movd %xmm3,%ebx
2653
+
2654
+ # qhasm: z0 <<<= 96
2655
+ # asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2656
+ # asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2657
+ pshufd $0x39,%xmm0,%xmm0
2658
+
2659
+ # qhasm: z1 <<<= 96
2660
+ # asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2661
+ # asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2662
+ pshufd $0x39,%xmm1,%xmm1
2663
+
2664
+ # qhasm: z2 <<<= 96
2665
+ # asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2666
+ # asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2667
+ pshufd $0x39,%xmm2,%xmm2
2668
+
2669
+ # qhasm: z3 <<<= 96
2670
+ # asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2671
+ # asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2672
+ pshufd $0x39,%xmm3,%xmm3
2673
+
2674
+ # qhasm: in0 ^= *(uint32 *) (m + 64)
2675
+ # asm 1: xorl 64(<m=int32#5),<in0=int32#1
2676
+ # asm 2: xorl 64(<m=%esi),<in0=%eax
2677
+ xorl 64(%esi),%eax
2678
+
2679
+ # qhasm: in1 ^= *(uint32 *) (m + 68)
2680
+ # asm 1: xorl 68(<m=int32#5),<in1=int32#2
2681
+ # asm 2: xorl 68(<m=%esi),<in1=%ecx
2682
+ xorl 68(%esi),%ecx
2683
+
2684
+ # qhasm: in2 ^= *(uint32 *) (m + 72)
2685
+ # asm 1: xorl 72(<m=int32#5),<in2=int32#3
2686
+ # asm 2: xorl 72(<m=%esi),<in2=%edx
2687
+ xorl 72(%esi),%edx
2688
+
2689
+ # qhasm: in3 ^= *(uint32 *) (m + 76)
2690
+ # asm 1: xorl 76(<m=int32#5),<in3=int32#4
2691
+ # asm 2: xorl 76(<m=%esi),<in3=%ebx
2692
+ xorl 76(%esi),%ebx
2693
+
2694
+ # qhasm: *(uint32 *) (out + 64) = in0
2695
+ # asm 1: movl <in0=int32#1,64(<out=int32#6)
2696
+ # asm 2: movl <in0=%eax,64(<out=%edi)
2697
+ movl %eax,64(%edi)
2698
+
2699
+ # qhasm: *(uint32 *) (out + 68) = in1
2700
+ # asm 1: movl <in1=int32#2,68(<out=int32#6)
2701
+ # asm 2: movl <in1=%ecx,68(<out=%edi)
2702
+ movl %ecx,68(%edi)
2703
+
2704
+ # qhasm: *(uint32 *) (out + 72) = in2
2705
+ # asm 1: movl <in2=int32#3,72(<out=int32#6)
2706
+ # asm 2: movl <in2=%edx,72(<out=%edi)
2707
+ movl %edx,72(%edi)
2708
+
2709
+ # qhasm: *(uint32 *) (out + 76) = in3
2710
+ # asm 1: movl <in3=int32#4,76(<out=int32#6)
2711
+ # asm 2: movl <in3=%ebx,76(<out=%edi)
2712
+ movl %ebx,76(%edi)
2713
+
2714
+ # qhasm: in0 = z0
2715
+ # asm 1: movd <z0=int6464#1,>in0=int32#1
2716
+ # asm 2: movd <z0=%xmm0,>in0=%eax
2717
+ movd %xmm0,%eax
2718
+
2719
+ # qhasm: in1 = z1
2720
+ # asm 1: movd <z1=int6464#2,>in1=int32#2
2721
+ # asm 2: movd <z1=%xmm1,>in1=%ecx
2722
+ movd %xmm1,%ecx
2723
+
2724
+ # qhasm: in2 = z2
2725
+ # asm 1: movd <z2=int6464#3,>in2=int32#3
2726
+ # asm 2: movd <z2=%xmm2,>in2=%edx
2727
+ movd %xmm2,%edx
2728
+
2729
+ # qhasm: in3 = z3
2730
+ # asm 1: movd <z3=int6464#4,>in3=int32#4
2731
+ # asm 2: movd <z3=%xmm3,>in3=%ebx
2732
+ movd %xmm3,%ebx
2733
+
2734
+ # qhasm: z0 <<<= 96
2735
+ # asm 1: pshufd $0x39,<z0=int6464#1,<z0=int6464#1
2736
+ # asm 2: pshufd $0x39,<z0=%xmm0,<z0=%xmm0
2737
+ pshufd $0x39,%xmm0,%xmm0
2738
+
2739
+ # qhasm: z1 <<<= 96
2740
+ # asm 1: pshufd $0x39,<z1=int6464#2,<z1=int6464#2
2741
+ # asm 2: pshufd $0x39,<z1=%xmm1,<z1=%xmm1
2742
+ pshufd $0x39,%xmm1,%xmm1
2743
+
2744
+ # qhasm: z2 <<<= 96
2745
+ # asm 1: pshufd $0x39,<z2=int6464#3,<z2=int6464#3
2746
+ # asm 2: pshufd $0x39,<z2=%xmm2,<z2=%xmm2
2747
+ pshufd $0x39,%xmm2,%xmm2
2748
+
2749
+ # qhasm: z3 <<<= 96
2750
+ # asm 1: pshufd $0x39,<z3=int6464#4,<z3=int6464#4
2751
+ # asm 2: pshufd $0x39,<z3=%xmm3,<z3=%xmm3
2752
+ pshufd $0x39,%xmm3,%xmm3
2753
+
2754
+ # qhasm: in0 ^= *(uint32 *) (m + 128)
2755
+ # asm 1: xorl 128(<m=int32#5),<in0=int32#1
2756
+ # asm 2: xorl 128(<m=%esi),<in0=%eax
2757
+ xorl 128(%esi),%eax
2758
+
2759
+ # qhasm: in1 ^= *(uint32 *) (m + 132)
2760
+ # asm 1: xorl 132(<m=int32#5),<in1=int32#2
2761
+ # asm 2: xorl 132(<m=%esi),<in1=%ecx
2762
+ xorl 132(%esi),%ecx
2763
+
2764
+ # qhasm: in2 ^= *(uint32 *) (m + 136)
2765
+ # asm 1: xorl 136(<m=int32#5),<in2=int32#3
2766
+ # asm 2: xorl 136(<m=%esi),<in2=%edx
2767
+ xorl 136(%esi),%edx
2768
+
2769
+ # qhasm: in3 ^= *(uint32 *) (m + 140)
2770
+ # asm 1: xorl 140(<m=int32#5),<in3=int32#4
2771
+ # asm 2: xorl 140(<m=%esi),<in3=%ebx
2772
+ xorl 140(%esi),%ebx
2773
+
2774
+ # qhasm: *(uint32 *) (out + 128) = in0
2775
+ # asm 1: movl <in0=int32#1,128(<out=int32#6)
2776
+ # asm 2: movl <in0=%eax,128(<out=%edi)
2777
+ movl %eax,128(%edi)
2778
+
2779
+ # qhasm: *(uint32 *) (out + 132) = in1
2780
+ # asm 1: movl <in1=int32#2,132(<out=int32#6)
2781
+ # asm 2: movl <in1=%ecx,132(<out=%edi)
2782
+ movl %ecx,132(%edi)
2783
+
2784
+ # qhasm: *(uint32 *) (out + 136) = in2
2785
+ # asm 1: movl <in2=int32#3,136(<out=int32#6)
2786
+ # asm 2: movl <in2=%edx,136(<out=%edi)
2787
+ movl %edx,136(%edi)
2788
+
2789
+ # qhasm: *(uint32 *) (out + 140) = in3
2790
+ # asm 1: movl <in3=int32#4,140(<out=int32#6)
2791
+ # asm 2: movl <in3=%ebx,140(<out=%edi)
2792
+ movl %ebx,140(%edi)
2793
+
2794
+ # qhasm: in0 = z0
2795
+ # asm 1: movd <z0=int6464#1,>in0=int32#1
2796
+ # asm 2: movd <z0=%xmm0,>in0=%eax
2797
+ movd %xmm0,%eax
2798
+
2799
+ # qhasm: in1 = z1
2800
+ # asm 1: movd <z1=int6464#2,>in1=int32#2
2801
+ # asm 2: movd <z1=%xmm1,>in1=%ecx
2802
+ movd %xmm1,%ecx
2803
+
2804
+ # qhasm: in2 = z2
2805
+ # asm 1: movd <z2=int6464#3,>in2=int32#3
2806
+ # asm 2: movd <z2=%xmm2,>in2=%edx
2807
+ movd %xmm2,%edx
2808
+
2809
+ # qhasm: in3 = z3
2810
+ # asm 1: movd <z3=int6464#4,>in3=int32#4
2811
+ # asm 2: movd <z3=%xmm3,>in3=%ebx
2812
+ movd %xmm3,%ebx
2813
+
2814
+ # qhasm: in0 ^= *(uint32 *) (m + 192)
2815
+ # asm 1: xorl 192(<m=int32#5),<in0=int32#1
2816
+ # asm 2: xorl 192(<m=%esi),<in0=%eax
2817
+ xorl 192(%esi),%eax
2818
+
2819
+ # qhasm: in1 ^= *(uint32 *) (m + 196)
2820
+ # asm 1: xorl 196(<m=int32#5),<in1=int32#2
2821
+ # asm 2: xorl 196(<m=%esi),<in1=%ecx
2822
+ xorl 196(%esi),%ecx
2823
+
2824
+ # qhasm: in2 ^= *(uint32 *) (m + 200)
2825
+ # asm 1: xorl 200(<m=int32#5),<in2=int32#3
2826
+ # asm 2: xorl 200(<m=%esi),<in2=%edx
2827
+ xorl 200(%esi),%edx
2828
+
2829
+ # qhasm: in3 ^= *(uint32 *) (m + 204)
2830
+ # asm 1: xorl 204(<m=int32#5),<in3=int32#4
2831
+ # asm 2: xorl 204(<m=%esi),<in3=%ebx
2832
+ xorl 204(%esi),%ebx
2833
+
2834
+ # qhasm: *(uint32 *) (out + 192) = in0
2835
+ # asm 1: movl <in0=int32#1,192(<out=int32#6)
2836
+ # asm 2: movl <in0=%eax,192(<out=%edi)
2837
+ movl %eax,192(%edi)
2838
+
2839
+ # qhasm: *(uint32 *) (out + 196) = in1
2840
+ # asm 1: movl <in1=int32#2,196(<out=int32#6)
2841
+ # asm 2: movl <in1=%ecx,196(<out=%edi)
2842
+ movl %ecx,196(%edi)
2843
+
2844
+ # qhasm: *(uint32 *) (out + 200) = in2
2845
+ # asm 1: movl <in2=int32#3,200(<out=int32#6)
2846
+ # asm 2: movl <in2=%edx,200(<out=%edi)
2847
+ movl %edx,200(%edi)
2848
+
2849
+ # qhasm: *(uint32 *) (out + 204) = in3
2850
+ # asm 1: movl <in3=int32#4,204(<out=int32#6)
2851
+ # asm 2: movl <in3=%ebx,204(<out=%edi)
2852
+ movl %ebx,204(%edi)
2853
+
2854
+ # qhasm: z4 = z4_stack
2855
+ # asm 1: movdqa <z4_stack=stack128#34,>z4=int6464#1
2856
+ # asm 2: movdqa <z4_stack=560(%esp),>z4=%xmm0
2857
+ movdqa 560(%esp),%xmm0
2858
+
2859
+ # qhasm: z5 = z5_stack
2860
+ # asm 1: movdqa <z5_stack=stack128#21,>z5=int6464#2
2861
+ # asm 2: movdqa <z5_stack=352(%esp),>z5=%xmm1
2862
+ movdqa 352(%esp),%xmm1
2863
+
2864
+ # qhasm: z6 = z6_stack
2865
+ # asm 1: movdqa <z6_stack=stack128#26,>z6=int6464#3
2866
+ # asm 2: movdqa <z6_stack=432(%esp),>z6=%xmm2
2867
+ movdqa 432(%esp),%xmm2
2868
+
2869
+ # qhasm: z7 = z7_stack
2870
+ # asm 1: movdqa <z7_stack=stack128#29,>z7=int6464#4
2871
+ # asm 2: movdqa <z7_stack=480(%esp),>z7=%xmm3
2872
+ movdqa 480(%esp),%xmm3
2873
+
2874
+ # qhasm: uint32323232 z4 += orig4
2875
+ # asm 1: paddd <orig4=stack128#16,<z4=int6464#1
2876
+ # asm 2: paddd <orig4=272(%esp),<z4=%xmm0
2877
+ paddd 272(%esp),%xmm0
2878
+
2879
+ # qhasm: uint32323232 z5 += orig5
2880
+ # asm 1: paddd <orig5=stack128#5,<z5=int6464#2
2881
+ # asm 2: paddd <orig5=96(%esp),<z5=%xmm1
2882
+ paddd 96(%esp),%xmm1
2883
+
2884
+ # qhasm: uint32323232 z6 += orig6
2885
+ # asm 1: paddd <orig6=stack128#9,<z6=int6464#3
2886
+ # asm 2: paddd <orig6=160(%esp),<z6=%xmm2
2887
+ paddd 160(%esp),%xmm2
2888
+
2889
+ # qhasm: uint32323232 z7 += orig7
2890
+ # asm 1: paddd <orig7=stack128#13,<z7=int6464#4
2891
+ # asm 2: paddd <orig7=224(%esp),<z7=%xmm3
2892
+ paddd 224(%esp),%xmm3
2893
+
2894
+ # qhasm: in4 = z4
2895
+ # asm 1: movd <z4=int6464#1,>in4=int32#1
2896
+ # asm 2: movd <z4=%xmm0,>in4=%eax
2897
+ movd %xmm0,%eax
2898
+
2899
+ # qhasm: in5 = z5
2900
+ # asm 1: movd <z5=int6464#2,>in5=int32#2
2901
+ # asm 2: movd <z5=%xmm1,>in5=%ecx
2902
+ movd %xmm1,%ecx
2903
+
2904
+ # qhasm: in6 = z6
2905
+ # asm 1: movd <z6=int6464#3,>in6=int32#3
2906
+ # asm 2: movd <z6=%xmm2,>in6=%edx
2907
+ movd %xmm2,%edx
2908
+
2909
+ # qhasm: in7 = z7
2910
+ # asm 1: movd <z7=int6464#4,>in7=int32#4
2911
+ # asm 2: movd <z7=%xmm3,>in7=%ebx
2912
+ movd %xmm3,%ebx
2913
+
2914
+ # qhasm: z4 <<<= 96
2915
+ # asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2916
+ # asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2917
+ pshufd $0x39,%xmm0,%xmm0
2918
+
2919
+ # qhasm: z5 <<<= 96
2920
+ # asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
2921
+ # asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
2922
+ pshufd $0x39,%xmm1,%xmm1
2923
+
2924
+ # qhasm: z6 <<<= 96
2925
+ # asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
2926
+ # asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
2927
+ pshufd $0x39,%xmm2,%xmm2
2928
+
2929
+ # qhasm: z7 <<<= 96
2930
+ # asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
2931
+ # asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
2932
+ pshufd $0x39,%xmm3,%xmm3
2933
+
2934
+ # qhasm: in4 ^= *(uint32 *) (m + 16)
2935
+ # asm 1: xorl 16(<m=int32#5),<in4=int32#1
2936
+ # asm 2: xorl 16(<m=%esi),<in4=%eax
2937
+ xorl 16(%esi),%eax
2938
+
2939
+ # qhasm: in5 ^= *(uint32 *) (m + 20)
2940
+ # asm 1: xorl 20(<m=int32#5),<in5=int32#2
2941
+ # asm 2: xorl 20(<m=%esi),<in5=%ecx
2942
+ xorl 20(%esi),%ecx
2943
+
2944
+ # qhasm: in6 ^= *(uint32 *) (m + 24)
2945
+ # asm 1: xorl 24(<m=int32#5),<in6=int32#3
2946
+ # asm 2: xorl 24(<m=%esi),<in6=%edx
2947
+ xorl 24(%esi),%edx
2948
+
2949
+ # qhasm: in7 ^= *(uint32 *) (m + 28)
2950
+ # asm 1: xorl 28(<m=int32#5),<in7=int32#4
2951
+ # asm 2: xorl 28(<m=%esi),<in7=%ebx
2952
+ xorl 28(%esi),%ebx
2953
+
2954
+ # qhasm: *(uint32 *) (out + 16) = in4
2955
+ # asm 1: movl <in4=int32#1,16(<out=int32#6)
2956
+ # asm 2: movl <in4=%eax,16(<out=%edi)
2957
+ movl %eax,16(%edi)
2958
+
2959
+ # qhasm: *(uint32 *) (out + 20) = in5
2960
+ # asm 1: movl <in5=int32#2,20(<out=int32#6)
2961
+ # asm 2: movl <in5=%ecx,20(<out=%edi)
2962
+ movl %ecx,20(%edi)
2963
+
2964
+ # qhasm: *(uint32 *) (out + 24) = in6
2965
+ # asm 1: movl <in6=int32#3,24(<out=int32#6)
2966
+ # asm 2: movl <in6=%edx,24(<out=%edi)
2967
+ movl %edx,24(%edi)
2968
+
2969
+ # qhasm: *(uint32 *) (out + 28) = in7
2970
+ # asm 1: movl <in7=int32#4,28(<out=int32#6)
2971
+ # asm 2: movl <in7=%ebx,28(<out=%edi)
2972
+ movl %ebx,28(%edi)
2973
+
2974
+ # qhasm: in4 = z4
2975
+ # asm 1: movd <z4=int6464#1,>in4=int32#1
2976
+ # asm 2: movd <z4=%xmm0,>in4=%eax
2977
+ movd %xmm0,%eax
2978
+
2979
+ # qhasm: in5 = z5
2980
+ # asm 1: movd <z5=int6464#2,>in5=int32#2
2981
+ # asm 2: movd <z5=%xmm1,>in5=%ecx
2982
+ movd %xmm1,%ecx
2983
+
2984
+ # qhasm: in6 = z6
2985
+ # asm 1: movd <z6=int6464#3,>in6=int32#3
2986
+ # asm 2: movd <z6=%xmm2,>in6=%edx
2987
+ movd %xmm2,%edx
2988
+
2989
+ # qhasm: in7 = z7
2990
+ # asm 1: movd <z7=int6464#4,>in7=int32#4
2991
+ # asm 2: movd <z7=%xmm3,>in7=%ebx
2992
+ movd %xmm3,%ebx
2993
+
2994
+ # qhasm: z4 <<<= 96
2995
+ # asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
2996
+ # asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
2997
+ pshufd $0x39,%xmm0,%xmm0
2998
+
2999
+ # qhasm: z5 <<<= 96
3000
+ # asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3001
+ # asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3002
+ pshufd $0x39,%xmm1,%xmm1
3003
+
3004
+ # qhasm: z6 <<<= 96
3005
+ # asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3006
+ # asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3007
+ pshufd $0x39,%xmm2,%xmm2
3008
+
3009
+ # qhasm: z7 <<<= 96
3010
+ # asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3011
+ # asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3012
+ pshufd $0x39,%xmm3,%xmm3
3013
+
3014
+ # qhasm: in4 ^= *(uint32 *) (m + 80)
3015
+ # asm 1: xorl 80(<m=int32#5),<in4=int32#1
3016
+ # asm 2: xorl 80(<m=%esi),<in4=%eax
3017
+ xorl 80(%esi),%eax
3018
+
3019
+ # qhasm: in5 ^= *(uint32 *) (m + 84)
3020
+ # asm 1: xorl 84(<m=int32#5),<in5=int32#2
3021
+ # asm 2: xorl 84(<m=%esi),<in5=%ecx
3022
+ xorl 84(%esi),%ecx
3023
+
3024
+ # qhasm: in6 ^= *(uint32 *) (m + 88)
3025
+ # asm 1: xorl 88(<m=int32#5),<in6=int32#3
3026
+ # asm 2: xorl 88(<m=%esi),<in6=%edx
3027
+ xorl 88(%esi),%edx
3028
+
3029
+ # qhasm: in7 ^= *(uint32 *) (m + 92)
3030
+ # asm 1: xorl 92(<m=int32#5),<in7=int32#4
3031
+ # asm 2: xorl 92(<m=%esi),<in7=%ebx
3032
+ xorl 92(%esi),%ebx
3033
+
3034
+ # qhasm: *(uint32 *) (out + 80) = in4
3035
+ # asm 1: movl <in4=int32#1,80(<out=int32#6)
3036
+ # asm 2: movl <in4=%eax,80(<out=%edi)
3037
+ movl %eax,80(%edi)
3038
+
3039
+ # qhasm: *(uint32 *) (out + 84) = in5
3040
+ # asm 1: movl <in5=int32#2,84(<out=int32#6)
3041
+ # asm 2: movl <in5=%ecx,84(<out=%edi)
3042
+ movl %ecx,84(%edi)
3043
+
3044
+ # qhasm: *(uint32 *) (out + 88) = in6
3045
+ # asm 1: movl <in6=int32#3,88(<out=int32#6)
3046
+ # asm 2: movl <in6=%edx,88(<out=%edi)
3047
+ movl %edx,88(%edi)
3048
+
3049
+ # qhasm: *(uint32 *) (out + 92) = in7
3050
+ # asm 1: movl <in7=int32#4,92(<out=int32#6)
3051
+ # asm 2: movl <in7=%ebx,92(<out=%edi)
3052
+ movl %ebx,92(%edi)
3053
+
3054
+ # qhasm: in4 = z4
3055
+ # asm 1: movd <z4=int6464#1,>in4=int32#1
3056
+ # asm 2: movd <z4=%xmm0,>in4=%eax
3057
+ movd %xmm0,%eax
3058
+
3059
+ # qhasm: in5 = z5
3060
+ # asm 1: movd <z5=int6464#2,>in5=int32#2
3061
+ # asm 2: movd <z5=%xmm1,>in5=%ecx
3062
+ movd %xmm1,%ecx
3063
+
3064
+ # qhasm: in6 = z6
3065
+ # asm 1: movd <z6=int6464#3,>in6=int32#3
3066
+ # asm 2: movd <z6=%xmm2,>in6=%edx
3067
+ movd %xmm2,%edx
3068
+
3069
+ # qhasm: in7 = z7
3070
+ # asm 1: movd <z7=int6464#4,>in7=int32#4
3071
+ # asm 2: movd <z7=%xmm3,>in7=%ebx
3072
+ movd %xmm3,%ebx
3073
+
3074
+ # qhasm: z4 <<<= 96
3075
+ # asm 1: pshufd $0x39,<z4=int6464#1,<z4=int6464#1
3076
+ # asm 2: pshufd $0x39,<z4=%xmm0,<z4=%xmm0
3077
+ pshufd $0x39,%xmm0,%xmm0
3078
+
3079
+ # qhasm: z5 <<<= 96
3080
+ # asm 1: pshufd $0x39,<z5=int6464#2,<z5=int6464#2
3081
+ # asm 2: pshufd $0x39,<z5=%xmm1,<z5=%xmm1
3082
+ pshufd $0x39,%xmm1,%xmm1
3083
+
3084
+ # qhasm: z6 <<<= 96
3085
+ # asm 1: pshufd $0x39,<z6=int6464#3,<z6=int6464#3
3086
+ # asm 2: pshufd $0x39,<z6=%xmm2,<z6=%xmm2
3087
+ pshufd $0x39,%xmm2,%xmm2
3088
+
3089
+ # qhasm: z7 <<<= 96
3090
+ # asm 1: pshufd $0x39,<z7=int6464#4,<z7=int6464#4
3091
+ # asm 2: pshufd $0x39,<z7=%xmm3,<z7=%xmm3
3092
+ pshufd $0x39,%xmm3,%xmm3
3093
+
3094
+ # qhasm: in4 ^= *(uint32 *) (m + 144)
3095
+ # asm 1: xorl 144(<m=int32#5),<in4=int32#1
3096
+ # asm 2: xorl 144(<m=%esi),<in4=%eax
3097
+ xorl 144(%esi),%eax
3098
+
3099
+ # qhasm: in5 ^= *(uint32 *) (m + 148)
3100
+ # asm 1: xorl 148(<m=int32#5),<in5=int32#2
3101
+ # asm 2: xorl 148(<m=%esi),<in5=%ecx
3102
+ xorl 148(%esi),%ecx
3103
+
3104
+ # qhasm: in6 ^= *(uint32 *) (m + 152)
3105
+ # asm 1: xorl 152(<m=int32#5),<in6=int32#3
3106
+ # asm 2: xorl 152(<m=%esi),<in6=%edx
3107
+ xorl 152(%esi),%edx
3108
+
3109
+ # qhasm: in7 ^= *(uint32 *) (m + 156)
3110
+ # asm 1: xorl 156(<m=int32#5),<in7=int32#4
3111
+ # asm 2: xorl 156(<m=%esi),<in7=%ebx
3112
+ xorl 156(%esi),%ebx
3113
+
3114
+ # qhasm: *(uint32 *) (out + 144) = in4
3115
+ # asm 1: movl <in4=int32#1,144(<out=int32#6)
3116
+ # asm 2: movl <in4=%eax,144(<out=%edi)
3117
+ movl %eax,144(%edi)
3118
+
3119
+ # qhasm: *(uint32 *) (out + 148) = in5
3120
+ # asm 1: movl <in5=int32#2,148(<out=int32#6)
3121
+ # asm 2: movl <in5=%ecx,148(<out=%edi)
3122
+ movl %ecx,148(%edi)
3123
+
3124
+ # qhasm: *(uint32 *) (out + 152) = in6
3125
+ # asm 1: movl <in6=int32#3,152(<out=int32#6)
3126
+ # asm 2: movl <in6=%edx,152(<out=%edi)
3127
+ movl %edx,152(%edi)
3128
+
3129
+ # qhasm: *(uint32 *) (out + 156) = in7
3130
+ # asm 1: movl <in7=int32#4,156(<out=int32#6)
3131
+ # asm 2: movl <in7=%ebx,156(<out=%edi)
3132
+ movl %ebx,156(%edi)
3133
+
3134
+ # qhasm: in4 = z4
3135
+ # asm 1: movd <z4=int6464#1,>in4=int32#1
3136
+ # asm 2: movd <z4=%xmm0,>in4=%eax
3137
+ movd %xmm0,%eax
3138
+
3139
+ # qhasm: in5 = z5
3140
+ # asm 1: movd <z5=int6464#2,>in5=int32#2
3141
+ # asm 2: movd <z5=%xmm1,>in5=%ecx
3142
+ movd %xmm1,%ecx
3143
+
3144
+ # qhasm: in6 = z6
3145
+ # asm 1: movd <z6=int6464#3,>in6=int32#3
3146
+ # asm 2: movd <z6=%xmm2,>in6=%edx
3147
+ movd %xmm2,%edx
3148
+
3149
+ # qhasm: in7 = z7
3150
+ # asm 1: movd <z7=int6464#4,>in7=int32#4
3151
+ # asm 2: movd <z7=%xmm3,>in7=%ebx
3152
+ movd %xmm3,%ebx
3153
+
3154
+ # qhasm: in4 ^= *(uint32 *) (m + 208)
3155
+ # asm 1: xorl 208(<m=int32#5),<in4=int32#1
3156
+ # asm 2: xorl 208(<m=%esi),<in4=%eax
3157
+ xorl 208(%esi),%eax
3158
+
3159
+ # qhasm: in5 ^= *(uint32 *) (m + 212)
3160
+ # asm 1: xorl 212(<m=int32#5),<in5=int32#2
3161
+ # asm 2: xorl 212(<m=%esi),<in5=%ecx
3162
+ xorl 212(%esi),%ecx
3163
+
3164
+ # qhasm: in6 ^= *(uint32 *) (m + 216)
3165
+ # asm 1: xorl 216(<m=int32#5),<in6=int32#3
3166
+ # asm 2: xorl 216(<m=%esi),<in6=%edx
3167
+ xorl 216(%esi),%edx
3168
+
3169
+ # qhasm: in7 ^= *(uint32 *) (m + 220)
3170
+ # asm 1: xorl 220(<m=int32#5),<in7=int32#4
3171
+ # asm 2: xorl 220(<m=%esi),<in7=%ebx
3172
+ xorl 220(%esi),%ebx
3173
+
3174
+ # qhasm: *(uint32 *) (out + 208) = in4
3175
+ # asm 1: movl <in4=int32#1,208(<out=int32#6)
3176
+ # asm 2: movl <in4=%eax,208(<out=%edi)
3177
+ movl %eax,208(%edi)
3178
+
3179
+ # qhasm: *(uint32 *) (out + 212) = in5
3180
+ # asm 1: movl <in5=int32#2,212(<out=int32#6)
3181
+ # asm 2: movl <in5=%ecx,212(<out=%edi)
3182
+ movl %ecx,212(%edi)
3183
+
3184
+ # qhasm: *(uint32 *) (out + 216) = in6
3185
+ # asm 1: movl <in6=int32#3,216(<out=int32#6)
3186
+ # asm 2: movl <in6=%edx,216(<out=%edi)
3187
+ movl %edx,216(%edi)
3188
+
3189
+ # qhasm: *(uint32 *) (out + 220) = in7
3190
+ # asm 1: movl <in7=int32#4,220(<out=int32#6)
3191
+ # asm 2: movl <in7=%ebx,220(<out=%edi)
3192
+ movl %ebx,220(%edi)
3193
+
3194
+ # qhasm: z8 = z8_stack
3195
+ # asm 1: movdqa <z8_stack=stack128#37,>z8=int6464#1
3196
+ # asm 2: movdqa <z8_stack=608(%esp),>z8=%xmm0
3197
+ movdqa 608(%esp),%xmm0
3198
+
3199
+ # qhasm: z9 = z9_stack
3200
+ # asm 1: movdqa <z9_stack=stack128#32,>z9=int6464#2
3201
+ # asm 2: movdqa <z9_stack=528(%esp),>z9=%xmm1
3202
+ movdqa 528(%esp),%xmm1
3203
+
3204
+ # qhasm: z10 = z10_stack
3205
+ # asm 1: movdqa <z10_stack=stack128#22,>z10=int6464#3
3206
+ # asm 2: movdqa <z10_stack=368(%esp),>z10=%xmm2
3207
+ movdqa 368(%esp),%xmm2
3208
+
3209
+ # qhasm: z11 = z11_stack
3210
+ # asm 1: movdqa <z11_stack=stack128#27,>z11=int6464#4
3211
+ # asm 2: movdqa <z11_stack=448(%esp),>z11=%xmm3
3212
+ movdqa 448(%esp),%xmm3
3213
+
3214
+ # qhasm: uint32323232 z8 += orig8
3215
+ # asm 1: paddd <orig8=stack128#19,<z8=int6464#1
3216
+ # asm 2: paddd <orig8=320(%esp),<z8=%xmm0
3217
+ paddd 320(%esp),%xmm0
3218
+
3219
+ # qhasm: uint32323232 z9 += orig9
3220
+ # asm 1: paddd <orig9=stack128#20,<z9=int6464#2
3221
+ # asm 2: paddd <orig9=336(%esp),<z9=%xmm1
3222
+ paddd 336(%esp),%xmm1
3223
+
3224
+ # qhasm: uint32323232 z10 += orig10
3225
+ # asm 1: paddd <orig10=stack128#6,<z10=int6464#3
3226
+ # asm 2: paddd <orig10=112(%esp),<z10=%xmm2
3227
+ paddd 112(%esp),%xmm2
3228
+
3229
+ # qhasm: uint32323232 z11 += orig11
3230
+ # asm 1: paddd <orig11=stack128#10,<z11=int6464#4
3231
+ # asm 2: paddd <orig11=176(%esp),<z11=%xmm3
3232
+ paddd 176(%esp),%xmm3
3233
+
3234
+ # qhasm: in8 = z8
3235
+ # asm 1: movd <z8=int6464#1,>in8=int32#1
3236
+ # asm 2: movd <z8=%xmm0,>in8=%eax
3237
+ movd %xmm0,%eax
3238
+
3239
+ # qhasm: in9 = z9
3240
+ # asm 1: movd <z9=int6464#2,>in9=int32#2
3241
+ # asm 2: movd <z9=%xmm1,>in9=%ecx
3242
+ movd %xmm1,%ecx
3243
+
3244
+ # qhasm: in10 = z10
3245
+ # asm 1: movd <z10=int6464#3,>in10=int32#3
3246
+ # asm 2: movd <z10=%xmm2,>in10=%edx
3247
+ movd %xmm2,%edx
3248
+
3249
+ # qhasm: in11 = z11
3250
+ # asm 1: movd <z11=int6464#4,>in11=int32#4
3251
+ # asm 2: movd <z11=%xmm3,>in11=%ebx
3252
+ movd %xmm3,%ebx
3253
+
3254
+ # qhasm: z8 <<<= 96
3255
+ # asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3256
+ # asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3257
+ pshufd $0x39,%xmm0,%xmm0
3258
+
3259
+ # qhasm: z9 <<<= 96
3260
+ # asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3261
+ # asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3262
+ pshufd $0x39,%xmm1,%xmm1
3263
+
3264
+ # qhasm: z10 <<<= 96
3265
+ # asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3266
+ # asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3267
+ pshufd $0x39,%xmm2,%xmm2
3268
+
3269
+ # qhasm: z11 <<<= 96
3270
+ # asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3271
+ # asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3272
+ pshufd $0x39,%xmm3,%xmm3
3273
+
3274
+ # qhasm: in8 ^= *(uint32 *) (m + 32)
3275
+ # asm 1: xorl 32(<m=int32#5),<in8=int32#1
3276
+ # asm 2: xorl 32(<m=%esi),<in8=%eax
3277
+ xorl 32(%esi),%eax
3278
+
3279
+ # qhasm: in9 ^= *(uint32 *) (m + 36)
3280
+ # asm 1: xorl 36(<m=int32#5),<in9=int32#2
3281
+ # asm 2: xorl 36(<m=%esi),<in9=%ecx
3282
+ xorl 36(%esi),%ecx
3283
+
3284
+ # qhasm: in10 ^= *(uint32 *) (m + 40)
3285
+ # asm 1: xorl 40(<m=int32#5),<in10=int32#3
3286
+ # asm 2: xorl 40(<m=%esi),<in10=%edx
3287
+ xorl 40(%esi),%edx
3288
+
3289
+ # qhasm: in11 ^= *(uint32 *) (m + 44)
3290
+ # asm 1: xorl 44(<m=int32#5),<in11=int32#4
3291
+ # asm 2: xorl 44(<m=%esi),<in11=%ebx
3292
+ xorl 44(%esi),%ebx
3293
+
3294
+ # qhasm: *(uint32 *) (out + 32) = in8
3295
+ # asm 1: movl <in8=int32#1,32(<out=int32#6)
3296
+ # asm 2: movl <in8=%eax,32(<out=%edi)
3297
+ movl %eax,32(%edi)
3298
+
3299
+ # qhasm: *(uint32 *) (out + 36) = in9
3300
+ # asm 1: movl <in9=int32#2,36(<out=int32#6)
3301
+ # asm 2: movl <in9=%ecx,36(<out=%edi)
3302
+ movl %ecx,36(%edi)
3303
+
3304
+ # qhasm: *(uint32 *) (out + 40) = in10
3305
+ # asm 1: movl <in10=int32#3,40(<out=int32#6)
3306
+ # asm 2: movl <in10=%edx,40(<out=%edi)
3307
+ movl %edx,40(%edi)
3308
+
3309
+ # qhasm: *(uint32 *) (out + 44) = in11
3310
+ # asm 1: movl <in11=int32#4,44(<out=int32#6)
3311
+ # asm 2: movl <in11=%ebx,44(<out=%edi)
3312
+ movl %ebx,44(%edi)
3313
+
3314
+ # qhasm: in8 = z8
3315
+ # asm 1: movd <z8=int6464#1,>in8=int32#1
3316
+ # asm 2: movd <z8=%xmm0,>in8=%eax
3317
+ movd %xmm0,%eax
3318
+
3319
+ # qhasm: in9 = z9
3320
+ # asm 1: movd <z9=int6464#2,>in9=int32#2
3321
+ # asm 2: movd <z9=%xmm1,>in9=%ecx
3322
+ movd %xmm1,%ecx
3323
+
3324
+ # qhasm: in10 = z10
3325
+ # asm 1: movd <z10=int6464#3,>in10=int32#3
3326
+ # asm 2: movd <z10=%xmm2,>in10=%edx
3327
+ movd %xmm2,%edx
3328
+
3329
+ # qhasm: in11 = z11
3330
+ # asm 1: movd <z11=int6464#4,>in11=int32#4
3331
+ # asm 2: movd <z11=%xmm3,>in11=%ebx
3332
+ movd %xmm3,%ebx
3333
+
3334
+ # qhasm: z8 <<<= 96
3335
+ # asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3336
+ # asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3337
+ pshufd $0x39,%xmm0,%xmm0
3338
+
3339
+ # qhasm: z9 <<<= 96
3340
+ # asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3341
+ # asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3342
+ pshufd $0x39,%xmm1,%xmm1
3343
+
3344
+ # qhasm: z10 <<<= 96
3345
+ # asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3346
+ # asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3347
+ pshufd $0x39,%xmm2,%xmm2
3348
+
3349
+ # qhasm: z11 <<<= 96
3350
+ # asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3351
+ # asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3352
+ pshufd $0x39,%xmm3,%xmm3
3353
+
3354
+ # qhasm: in8 ^= *(uint32 *) (m + 96)
3355
+ # asm 1: xorl 96(<m=int32#5),<in8=int32#1
3356
+ # asm 2: xorl 96(<m=%esi),<in8=%eax
3357
+ xorl 96(%esi),%eax
3358
+
3359
+ # qhasm: in9 ^= *(uint32 *) (m + 100)
3360
+ # asm 1: xorl 100(<m=int32#5),<in9=int32#2
3361
+ # asm 2: xorl 100(<m=%esi),<in9=%ecx
3362
+ xorl 100(%esi),%ecx
3363
+
3364
+ # qhasm: in10 ^= *(uint32 *) (m + 104)
3365
+ # asm 1: xorl 104(<m=int32#5),<in10=int32#3
3366
+ # asm 2: xorl 104(<m=%esi),<in10=%edx
3367
+ xorl 104(%esi),%edx
3368
+
3369
+ # qhasm: in11 ^= *(uint32 *) (m + 108)
3370
+ # asm 1: xorl 108(<m=int32#5),<in11=int32#4
3371
+ # asm 2: xorl 108(<m=%esi),<in11=%ebx
3372
+ xorl 108(%esi),%ebx
3373
+
3374
+ # qhasm: *(uint32 *) (out + 96) = in8
3375
+ # asm 1: movl <in8=int32#1,96(<out=int32#6)
3376
+ # asm 2: movl <in8=%eax,96(<out=%edi)
3377
+ movl %eax,96(%edi)
3378
+
3379
+ # qhasm: *(uint32 *) (out + 100) = in9
3380
+ # asm 1: movl <in9=int32#2,100(<out=int32#6)
3381
+ # asm 2: movl <in9=%ecx,100(<out=%edi)
3382
+ movl %ecx,100(%edi)
3383
+
3384
+ # qhasm: *(uint32 *) (out + 104) = in10
3385
+ # asm 1: movl <in10=int32#3,104(<out=int32#6)
3386
+ # asm 2: movl <in10=%edx,104(<out=%edi)
3387
+ movl %edx,104(%edi)
3388
+
3389
+ # qhasm: *(uint32 *) (out + 108) = in11
3390
+ # asm 1: movl <in11=int32#4,108(<out=int32#6)
3391
+ # asm 2: movl <in11=%ebx,108(<out=%edi)
3392
+ movl %ebx,108(%edi)
3393
+
3394
+ # qhasm: in8 = z8
3395
+ # asm 1: movd <z8=int6464#1,>in8=int32#1
3396
+ # asm 2: movd <z8=%xmm0,>in8=%eax
3397
+ movd %xmm0,%eax
3398
+
3399
+ # qhasm: in9 = z9
3400
+ # asm 1: movd <z9=int6464#2,>in9=int32#2
3401
+ # asm 2: movd <z9=%xmm1,>in9=%ecx
3402
+ movd %xmm1,%ecx
3403
+
3404
+ # qhasm: in10 = z10
3405
+ # asm 1: movd <z10=int6464#3,>in10=int32#3
3406
+ # asm 2: movd <z10=%xmm2,>in10=%edx
3407
+ movd %xmm2,%edx
3408
+
3409
+ # qhasm: in11 = z11
3410
+ # asm 1: movd <z11=int6464#4,>in11=int32#4
3411
+ # asm 2: movd <z11=%xmm3,>in11=%ebx
3412
+ movd %xmm3,%ebx
3413
+
3414
+ # qhasm: z8 <<<= 96
3415
+ # asm 1: pshufd $0x39,<z8=int6464#1,<z8=int6464#1
3416
+ # asm 2: pshufd $0x39,<z8=%xmm0,<z8=%xmm0
3417
+ pshufd $0x39,%xmm0,%xmm0
3418
+
3419
+ # qhasm: z9 <<<= 96
3420
+ # asm 1: pshufd $0x39,<z9=int6464#2,<z9=int6464#2
3421
+ # asm 2: pshufd $0x39,<z9=%xmm1,<z9=%xmm1
3422
+ pshufd $0x39,%xmm1,%xmm1
3423
+
3424
+ # qhasm: z10 <<<= 96
3425
+ # asm 1: pshufd $0x39,<z10=int6464#3,<z10=int6464#3
3426
+ # asm 2: pshufd $0x39,<z10=%xmm2,<z10=%xmm2
3427
+ pshufd $0x39,%xmm2,%xmm2
3428
+
3429
+ # qhasm: z11 <<<= 96
3430
+ # asm 1: pshufd $0x39,<z11=int6464#4,<z11=int6464#4
3431
+ # asm 2: pshufd $0x39,<z11=%xmm3,<z11=%xmm3
3432
+ pshufd $0x39,%xmm3,%xmm3
3433
+
3434
+ # qhasm: in8 ^= *(uint32 *) (m + 160)
3435
+ # asm 1: xorl 160(<m=int32#5),<in8=int32#1
3436
+ # asm 2: xorl 160(<m=%esi),<in8=%eax
3437
+ xorl 160(%esi),%eax
3438
+
3439
+ # qhasm: in9 ^= *(uint32 *) (m + 164)
3440
+ # asm 1: xorl 164(<m=int32#5),<in9=int32#2
3441
+ # asm 2: xorl 164(<m=%esi),<in9=%ecx
3442
+ xorl 164(%esi),%ecx
3443
+
3444
+ # qhasm: in10 ^= *(uint32 *) (m + 168)
3445
+ # asm 1: xorl 168(<m=int32#5),<in10=int32#3
3446
+ # asm 2: xorl 168(<m=%esi),<in10=%edx
3447
+ xorl 168(%esi),%edx
3448
+
3449
+ # qhasm: in11 ^= *(uint32 *) (m + 172)
3450
+ # asm 1: xorl 172(<m=int32#5),<in11=int32#4
3451
+ # asm 2: xorl 172(<m=%esi),<in11=%ebx
3452
+ xorl 172(%esi),%ebx
3453
+
3454
+ # qhasm: *(uint32 *) (out + 160) = in8
3455
+ # asm 1: movl <in8=int32#1,160(<out=int32#6)
3456
+ # asm 2: movl <in8=%eax,160(<out=%edi)
3457
+ movl %eax,160(%edi)
3458
+
3459
+ # qhasm: *(uint32 *) (out + 164) = in9
3460
+ # asm 1: movl <in9=int32#2,164(<out=int32#6)
3461
+ # asm 2: movl <in9=%ecx,164(<out=%edi)
3462
+ movl %ecx,164(%edi)
3463
+
3464
+ # qhasm: *(uint32 *) (out + 168) = in10
3465
+ # asm 1: movl <in10=int32#3,168(<out=int32#6)
3466
+ # asm 2: movl <in10=%edx,168(<out=%edi)
3467
+ movl %edx,168(%edi)
3468
+
3469
+ # qhasm: *(uint32 *) (out + 172) = in11
3470
+ # asm 1: movl <in11=int32#4,172(<out=int32#6)
3471
+ # asm 2: movl <in11=%ebx,172(<out=%edi)
3472
+ movl %ebx,172(%edi)
3473
+
3474
+ # qhasm: in8 = z8
3475
+ # asm 1: movd <z8=int6464#1,>in8=int32#1
3476
+ # asm 2: movd <z8=%xmm0,>in8=%eax
3477
+ movd %xmm0,%eax
3478
+
3479
+ # qhasm: in9 = z9
3480
+ # asm 1: movd <z9=int6464#2,>in9=int32#2
3481
+ # asm 2: movd <z9=%xmm1,>in9=%ecx
3482
+ movd %xmm1,%ecx
3483
+
3484
+ # qhasm: in10 = z10
3485
+ # asm 1: movd <z10=int6464#3,>in10=int32#3
3486
+ # asm 2: movd <z10=%xmm2,>in10=%edx
3487
+ movd %xmm2,%edx
3488
+
3489
+ # qhasm: in11 = z11
3490
+ # asm 1: movd <z11=int6464#4,>in11=int32#4
3491
+ # asm 2: movd <z11=%xmm3,>in11=%ebx
3492
+ movd %xmm3,%ebx
3493
+
3494
+ # qhasm: in8 ^= *(uint32 *) (m + 224)
3495
+ # asm 1: xorl 224(<m=int32#5),<in8=int32#1
3496
+ # asm 2: xorl 224(<m=%esi),<in8=%eax
3497
+ xorl 224(%esi),%eax
3498
+
3499
+ # qhasm: in9 ^= *(uint32 *) (m + 228)
3500
+ # asm 1: xorl 228(<m=int32#5),<in9=int32#2
3501
+ # asm 2: xorl 228(<m=%esi),<in9=%ecx
3502
+ xorl 228(%esi),%ecx
3503
+
3504
+ # qhasm: in10 ^= *(uint32 *) (m + 232)
3505
+ # asm 1: xorl 232(<m=int32#5),<in10=int32#3
3506
+ # asm 2: xorl 232(<m=%esi),<in10=%edx
3507
+ xorl 232(%esi),%edx
3508
+
3509
+ # qhasm: in11 ^= *(uint32 *) (m + 236)
3510
+ # asm 1: xorl 236(<m=int32#5),<in11=int32#4
3511
+ # asm 2: xorl 236(<m=%esi),<in11=%ebx
3512
+ xorl 236(%esi),%ebx
3513
+
3514
+ # qhasm: *(uint32 *) (out + 224) = in8
3515
+ # asm 1: movl <in8=int32#1,224(<out=int32#6)
3516
+ # asm 2: movl <in8=%eax,224(<out=%edi)
3517
+ movl %eax,224(%edi)
3518
+
3519
+ # qhasm: *(uint32 *) (out + 228) = in9
3520
+ # asm 1: movl <in9=int32#2,228(<out=int32#6)
3521
+ # asm 2: movl <in9=%ecx,228(<out=%edi)
3522
+ movl %ecx,228(%edi)
3523
+
3524
+ # qhasm: *(uint32 *) (out + 232) = in10
3525
+ # asm 1: movl <in10=int32#3,232(<out=int32#6)
3526
+ # asm 2: movl <in10=%edx,232(<out=%edi)
3527
+ movl %edx,232(%edi)
3528
+
3529
+ # qhasm: *(uint32 *) (out + 236) = in11
3530
+ # asm 1: movl <in11=int32#4,236(<out=int32#6)
3531
+ # asm 2: movl <in11=%ebx,236(<out=%edi)
3532
+ movl %ebx,236(%edi)
3533
+
3534
+ # qhasm: z12 = z12_stack
3535
+ # asm 1: movdqa <z12_stack=stack128#35,>z12=int6464#1
3536
+ # asm 2: movdqa <z12_stack=576(%esp),>z12=%xmm0
3537
+ movdqa 576(%esp),%xmm0
3538
+
3539
+ # qhasm: z13 = z13_stack
3540
+ # asm 1: movdqa <z13_stack=stack128#30,>z13=int6464#2
3541
+ # asm 2: movdqa <z13_stack=496(%esp),>z13=%xmm1
3542
+ movdqa 496(%esp),%xmm1
3543
+
3544
+ # qhasm: z14 = z14_stack
3545
+ # asm 1: movdqa <z14_stack=stack128#24,>z14=int6464#3
3546
+ # asm 2: movdqa <z14_stack=400(%esp),>z14=%xmm2
3547
+ movdqa 400(%esp),%xmm2
3548
+
3549
+ # qhasm: z15 = z15_stack
3550
+ # asm 1: movdqa <z15_stack=stack128#23,>z15=int6464#4
3551
+ # asm 2: movdqa <z15_stack=384(%esp),>z15=%xmm3
3552
+ movdqa 384(%esp),%xmm3
3553
+
3554
+ # qhasm: uint32323232 z12 += orig12
3555
+ # asm 1: paddd <orig12=stack128#11,<z12=int6464#1
3556
+ # asm 2: paddd <orig12=192(%esp),<z12=%xmm0
3557
+ paddd 192(%esp),%xmm0
3558
+
3559
+ # qhasm: uint32323232 z13 += orig13
3560
+ # asm 1: paddd <orig13=stack128#14,<z13=int6464#2
3561
+ # asm 2: paddd <orig13=240(%esp),<z13=%xmm1
3562
+ paddd 240(%esp),%xmm1
3563
+
3564
+ # qhasm: uint32323232 z14 += orig14
3565
+ # asm 1: paddd <orig14=stack128#17,<z14=int6464#3
3566
+ # asm 2: paddd <orig14=288(%esp),<z14=%xmm2
3567
+ paddd 288(%esp),%xmm2
3568
+
3569
+ # qhasm: uint32323232 z15 += orig15
3570
+ # asm 1: paddd <orig15=stack128#7,<z15=int6464#4
3571
+ # asm 2: paddd <orig15=128(%esp),<z15=%xmm3
3572
+ paddd 128(%esp),%xmm3
3573
+
3574
+ # qhasm: in12 = z12
3575
+ # asm 1: movd <z12=int6464#1,>in12=int32#1
3576
+ # asm 2: movd <z12=%xmm0,>in12=%eax
3577
+ movd %xmm0,%eax
3578
+
3579
+ # qhasm: in13 = z13
3580
+ # asm 1: movd <z13=int6464#2,>in13=int32#2
3581
+ # asm 2: movd <z13=%xmm1,>in13=%ecx
3582
+ movd %xmm1,%ecx
3583
+
3584
+ # qhasm: in14 = z14
3585
+ # asm 1: movd <z14=int6464#3,>in14=int32#3
3586
+ # asm 2: movd <z14=%xmm2,>in14=%edx
3587
+ movd %xmm2,%edx
3588
+
3589
+ # qhasm: in15 = z15
3590
+ # asm 1: movd <z15=int6464#4,>in15=int32#4
3591
+ # asm 2: movd <z15=%xmm3,>in15=%ebx
3592
+ movd %xmm3,%ebx
3593
+
3594
+ # qhasm: z12 <<<= 96
3595
+ # asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3596
+ # asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3597
+ pshufd $0x39,%xmm0,%xmm0
3598
+
3599
+ # qhasm: z13 <<<= 96
3600
+ # asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3601
+ # asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3602
+ pshufd $0x39,%xmm1,%xmm1
3603
+
3604
+ # qhasm: z14 <<<= 96
3605
+ # asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3606
+ # asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3607
+ pshufd $0x39,%xmm2,%xmm2
3608
+
3609
+ # qhasm: z15 <<<= 96
3610
+ # asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3611
+ # asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3612
+ pshufd $0x39,%xmm3,%xmm3
3613
+
3614
+ # qhasm: in12 ^= *(uint32 *) (m + 48)
3615
+ # asm 1: xorl 48(<m=int32#5),<in12=int32#1
3616
+ # asm 2: xorl 48(<m=%esi),<in12=%eax
3617
+ xorl 48(%esi),%eax
3618
+
3619
+ # qhasm: in13 ^= *(uint32 *) (m + 52)
3620
+ # asm 1: xorl 52(<m=int32#5),<in13=int32#2
3621
+ # asm 2: xorl 52(<m=%esi),<in13=%ecx
3622
+ xorl 52(%esi),%ecx
3623
+
3624
+ # qhasm: in14 ^= *(uint32 *) (m + 56)
3625
+ # asm 1: xorl 56(<m=int32#5),<in14=int32#3
3626
+ # asm 2: xorl 56(<m=%esi),<in14=%edx
3627
+ xorl 56(%esi),%edx
3628
+
3629
+ # qhasm: in15 ^= *(uint32 *) (m + 60)
3630
+ # asm 1: xorl 60(<m=int32#5),<in15=int32#4
3631
+ # asm 2: xorl 60(<m=%esi),<in15=%ebx
3632
+ xorl 60(%esi),%ebx
3633
+
3634
+ # qhasm: *(uint32 *) (out + 48) = in12
3635
+ # asm 1: movl <in12=int32#1,48(<out=int32#6)
3636
+ # asm 2: movl <in12=%eax,48(<out=%edi)
3637
+ movl %eax,48(%edi)
3638
+
3639
+ # qhasm: *(uint32 *) (out + 52) = in13
3640
+ # asm 1: movl <in13=int32#2,52(<out=int32#6)
3641
+ # asm 2: movl <in13=%ecx,52(<out=%edi)
3642
+ movl %ecx,52(%edi)
3643
+
3644
+ # qhasm: *(uint32 *) (out + 56) = in14
3645
+ # asm 1: movl <in14=int32#3,56(<out=int32#6)
3646
+ # asm 2: movl <in14=%edx,56(<out=%edi)
3647
+ movl %edx,56(%edi)
3648
+
3649
+ # qhasm: *(uint32 *) (out + 60) = in15
3650
+ # asm 1: movl <in15=int32#4,60(<out=int32#6)
3651
+ # asm 2: movl <in15=%ebx,60(<out=%edi)
3652
+ movl %ebx,60(%edi)
3653
+
3654
+ # qhasm: in12 = z12
3655
+ # asm 1: movd <z12=int6464#1,>in12=int32#1
3656
+ # asm 2: movd <z12=%xmm0,>in12=%eax
3657
+ movd %xmm0,%eax
3658
+
3659
+ # qhasm: in13 = z13
3660
+ # asm 1: movd <z13=int6464#2,>in13=int32#2
3661
+ # asm 2: movd <z13=%xmm1,>in13=%ecx
3662
+ movd %xmm1,%ecx
3663
+
3664
+ # qhasm: in14 = z14
3665
+ # asm 1: movd <z14=int6464#3,>in14=int32#3
3666
+ # asm 2: movd <z14=%xmm2,>in14=%edx
3667
+ movd %xmm2,%edx
3668
+
3669
+ # qhasm: in15 = z15
3670
+ # asm 1: movd <z15=int6464#4,>in15=int32#4
3671
+ # asm 2: movd <z15=%xmm3,>in15=%ebx
3672
+ movd %xmm3,%ebx
3673
+
3674
+ # qhasm: z12 <<<= 96
3675
+ # asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3676
+ # asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3677
+ pshufd $0x39,%xmm0,%xmm0
3678
+
3679
+ # qhasm: z13 <<<= 96
3680
+ # asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3681
+ # asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3682
+ pshufd $0x39,%xmm1,%xmm1
3683
+
3684
+ # qhasm: z14 <<<= 96
3685
+ # asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3686
+ # asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3687
+ pshufd $0x39,%xmm2,%xmm2
3688
+
3689
+ # qhasm: z15 <<<= 96
3690
+ # asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3691
+ # asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3692
+ pshufd $0x39,%xmm3,%xmm3
3693
+
3694
+ # qhasm: in12 ^= *(uint32 *) (m + 112)
3695
+ # asm 1: xorl 112(<m=int32#5),<in12=int32#1
3696
+ # asm 2: xorl 112(<m=%esi),<in12=%eax
3697
+ xorl 112(%esi),%eax
3698
+
3699
+ # qhasm: in13 ^= *(uint32 *) (m + 116)
3700
+ # asm 1: xorl 116(<m=int32#5),<in13=int32#2
3701
+ # asm 2: xorl 116(<m=%esi),<in13=%ecx
3702
+ xorl 116(%esi),%ecx
3703
+
3704
+ # qhasm: in14 ^= *(uint32 *) (m + 120)
3705
+ # asm 1: xorl 120(<m=int32#5),<in14=int32#3
3706
+ # asm 2: xorl 120(<m=%esi),<in14=%edx
3707
+ xorl 120(%esi),%edx
3708
+
3709
+ # qhasm: in15 ^= *(uint32 *) (m + 124)
3710
+ # asm 1: xorl 124(<m=int32#5),<in15=int32#4
3711
+ # asm 2: xorl 124(<m=%esi),<in15=%ebx
3712
+ xorl 124(%esi),%ebx
3713
+
3714
+ # qhasm: *(uint32 *) (out + 112) = in12
3715
+ # asm 1: movl <in12=int32#1,112(<out=int32#6)
3716
+ # asm 2: movl <in12=%eax,112(<out=%edi)
3717
+ movl %eax,112(%edi)
3718
+
3719
+ # qhasm: *(uint32 *) (out + 116) = in13
3720
+ # asm 1: movl <in13=int32#2,116(<out=int32#6)
3721
+ # asm 2: movl <in13=%ecx,116(<out=%edi)
3722
+ movl %ecx,116(%edi)
3723
+
3724
+ # qhasm: *(uint32 *) (out + 120) = in14
3725
+ # asm 1: movl <in14=int32#3,120(<out=int32#6)
3726
+ # asm 2: movl <in14=%edx,120(<out=%edi)
3727
+ movl %edx,120(%edi)
3728
+
3729
+ # qhasm: *(uint32 *) (out + 124) = in15
3730
+ # asm 1: movl <in15=int32#4,124(<out=int32#6)
3731
+ # asm 2: movl <in15=%ebx,124(<out=%edi)
3732
+ movl %ebx,124(%edi)
3733
+
3734
+ # qhasm: in12 = z12
3735
+ # asm 1: movd <z12=int6464#1,>in12=int32#1
3736
+ # asm 2: movd <z12=%xmm0,>in12=%eax
3737
+ movd %xmm0,%eax
3738
+
3739
+ # qhasm: in13 = z13
3740
+ # asm 1: movd <z13=int6464#2,>in13=int32#2
3741
+ # asm 2: movd <z13=%xmm1,>in13=%ecx
3742
+ movd %xmm1,%ecx
3743
+
3744
+ # qhasm: in14 = z14
3745
+ # asm 1: movd <z14=int6464#3,>in14=int32#3
3746
+ # asm 2: movd <z14=%xmm2,>in14=%edx
3747
+ movd %xmm2,%edx
3748
+
3749
+ # qhasm: in15 = z15
3750
+ # asm 1: movd <z15=int6464#4,>in15=int32#4
3751
+ # asm 2: movd <z15=%xmm3,>in15=%ebx
3752
+ movd %xmm3,%ebx
3753
+
3754
+ # qhasm: z12 <<<= 96
3755
+ # asm 1: pshufd $0x39,<z12=int6464#1,<z12=int6464#1
3756
+ # asm 2: pshufd $0x39,<z12=%xmm0,<z12=%xmm0
3757
+ pshufd $0x39,%xmm0,%xmm0
3758
+
3759
+ # qhasm: z13 <<<= 96
3760
+ # asm 1: pshufd $0x39,<z13=int6464#2,<z13=int6464#2
3761
+ # asm 2: pshufd $0x39,<z13=%xmm1,<z13=%xmm1
3762
+ pshufd $0x39,%xmm1,%xmm1
3763
+
3764
+ # qhasm: z14 <<<= 96
3765
+ # asm 1: pshufd $0x39,<z14=int6464#3,<z14=int6464#3
3766
+ # asm 2: pshufd $0x39,<z14=%xmm2,<z14=%xmm2
3767
+ pshufd $0x39,%xmm2,%xmm2
3768
+
3769
+ # qhasm: z15 <<<= 96
3770
+ # asm 1: pshufd $0x39,<z15=int6464#4,<z15=int6464#4
3771
+ # asm 2: pshufd $0x39,<z15=%xmm3,<z15=%xmm3
3772
+ pshufd $0x39,%xmm3,%xmm3
3773
+
3774
+ # qhasm: in12 ^= *(uint32 *) (m + 176)
3775
+ # asm 1: xorl 176(<m=int32#5),<in12=int32#1
3776
+ # asm 2: xorl 176(<m=%esi),<in12=%eax
3777
+ xorl 176(%esi),%eax
3778
+
3779
+ # qhasm: in13 ^= *(uint32 *) (m + 180)
3780
+ # asm 1: xorl 180(<m=int32#5),<in13=int32#2
3781
+ # asm 2: xorl 180(<m=%esi),<in13=%ecx
3782
+ xorl 180(%esi),%ecx
3783
+
3784
+ # qhasm: in14 ^= *(uint32 *) (m + 184)
3785
+ # asm 1: xorl 184(<m=int32#5),<in14=int32#3
3786
+ # asm 2: xorl 184(<m=%esi),<in14=%edx
3787
+ xorl 184(%esi),%edx
3788
+
3789
+ # qhasm: in15 ^= *(uint32 *) (m + 188)
3790
+ # asm 1: xorl 188(<m=int32#5),<in15=int32#4
3791
+ # asm 2: xorl 188(<m=%esi),<in15=%ebx
3792
+ xorl 188(%esi),%ebx
3793
+
3794
+ # qhasm: *(uint32 *) (out + 176) = in12
3795
+ # asm 1: movl <in12=int32#1,176(<out=int32#6)
3796
+ # asm 2: movl <in12=%eax,176(<out=%edi)
3797
+ movl %eax,176(%edi)
3798
+
3799
+ # qhasm: *(uint32 *) (out + 180) = in13
3800
+ # asm 1: movl <in13=int32#2,180(<out=int32#6)
3801
+ # asm 2: movl <in13=%ecx,180(<out=%edi)
3802
+ movl %ecx,180(%edi)
3803
+
3804
+ # qhasm: *(uint32 *) (out + 184) = in14
3805
+ # asm 1: movl <in14=int32#3,184(<out=int32#6)
3806
+ # asm 2: movl <in14=%edx,184(<out=%edi)
3807
+ movl %edx,184(%edi)
3808
+
3809
+ # qhasm: *(uint32 *) (out + 188) = in15
3810
+ # asm 1: movl <in15=int32#4,188(<out=int32#6)
3811
+ # asm 2: movl <in15=%ebx,188(<out=%edi)
3812
+ movl %ebx,188(%edi)
3813
+
3814
+ # qhasm: in12 = z12
3815
+ # asm 1: movd <z12=int6464#1,>in12=int32#1
3816
+ # asm 2: movd <z12=%xmm0,>in12=%eax
3817
+ movd %xmm0,%eax
3818
+
3819
+ # qhasm: in13 = z13
3820
+ # asm 1: movd <z13=int6464#2,>in13=int32#2
3821
+ # asm 2: movd <z13=%xmm1,>in13=%ecx
3822
+ movd %xmm1,%ecx
3823
+
3824
+ # qhasm: in14 = z14
3825
+ # asm 1: movd <z14=int6464#3,>in14=int32#3
3826
+ # asm 2: movd <z14=%xmm2,>in14=%edx
3827
+ movd %xmm2,%edx
3828
+
3829
+ # qhasm: in15 = z15
3830
+ # asm 1: movd <z15=int6464#4,>in15=int32#4
3831
+ # asm 2: movd <z15=%xmm3,>in15=%ebx
3832
+ movd %xmm3,%ebx
3833
+
3834
+ # qhasm: in12 ^= *(uint32 *) (m + 240)
3835
+ # asm 1: xorl 240(<m=int32#5),<in12=int32#1
3836
+ # asm 2: xorl 240(<m=%esi),<in12=%eax
3837
+ xorl 240(%esi),%eax
3838
+
3839
+ # qhasm: in13 ^= *(uint32 *) (m + 244)
3840
+ # asm 1: xorl 244(<m=int32#5),<in13=int32#2
3841
+ # asm 2: xorl 244(<m=%esi),<in13=%ecx
3842
+ xorl 244(%esi),%ecx
3843
+
3844
+ # qhasm: in14 ^= *(uint32 *) (m + 248)
3845
+ # asm 1: xorl 248(<m=int32#5),<in14=int32#3
3846
+ # asm 2: xorl 248(<m=%esi),<in14=%edx
3847
+ xorl 248(%esi),%edx
3848
+
3849
+ # qhasm: in15 ^= *(uint32 *) (m + 252)
3850
+ # asm 1: xorl 252(<m=int32#5),<in15=int32#4
3851
+ # asm 2: xorl 252(<m=%esi),<in15=%ebx
3852
+ xorl 252(%esi),%ebx
3853
+
3854
+ # qhasm: *(uint32 *) (out + 240) = in12
3855
+ # asm 1: movl <in12=int32#1,240(<out=int32#6)
3856
+ # asm 2: movl <in12=%eax,240(<out=%edi)
3857
+ movl %eax,240(%edi)
3858
+
3859
+ # qhasm: *(uint32 *) (out + 244) = in13
3860
+ # asm 1: movl <in13=int32#2,244(<out=int32#6)
3861
+ # asm 2: movl <in13=%ecx,244(<out=%edi)
3862
+ movl %ecx,244(%edi)
3863
+
3864
+ # qhasm: *(uint32 *) (out + 248) = in14
3865
+ # asm 1: movl <in14=int32#3,248(<out=int32#6)
3866
+ # asm 2: movl <in14=%edx,248(<out=%edi)
3867
+ movl %edx,248(%edi)
3868
+
3869
+ # qhasm: *(uint32 *) (out + 252) = in15
3870
+ # asm 1: movl <in15=int32#4,252(<out=int32#6)
3871
+ # asm 2: movl <in15=%ebx,252(<out=%edi)
3872
+ movl %ebx,252(%edi)
3873
+
3874
+ # qhasm: bytes = bytes_stack
3875
+ # asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
3876
+ # asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
3877
+ movl 24(%esp),%eax
3878
+
3879
+ # qhasm: bytes -= 256
3880
+ # asm 1: sub $256,<bytes=int32#1
3881
+ # asm 2: sub $256,<bytes=%eax
3882
+ sub $256,%eax
3883
+
3884
+ # qhasm: m += 256
3885
+ # asm 1: add $256,<m=int32#5
3886
+ # asm 2: add $256,<m=%esi
3887
+ add $256,%esi
3888
+
3889
+ # qhasm: out += 256
3890
+ # asm 1: add $256,<out=int32#6
3891
+ # asm 2: add $256,<out=%edi
3892
+ add $256,%edi
3893
+
3894
+ # qhasm: out_stack = out
3895
+ # asm 1: movl <out=int32#6,>out_stack=stack32#6
3896
+ # asm 2: movl <out=%edi,>out_stack=20(%esp)
3897
+ movl %edi,20(%esp)
3898
+
3899
+ # qhasm: unsigned<? bytes - 256
3900
+ # asm 1: cmp $256,<bytes=int32#1
3901
+ # asm 2: cmp $256,<bytes=%eax
3902
+ cmp $256,%eax
3903
+ # comment:fp stack unchanged by jump
3904
+
3905
+ # qhasm: goto bytesatleast256 if !unsigned<
3906
+ jae ._bytesatleast256
3907
+
3908
+ # qhasm: unsigned>? bytes - 0
3909
+ # asm 1: cmp $0,<bytes=int32#1
3910
+ # asm 2: cmp $0,<bytes=%eax
3911
+ cmp $0,%eax
3912
+ # comment:fp stack unchanged by jump
3913
+
3914
+ # qhasm: goto done if !unsigned>
3915
+ jbe ._done
3916
+ # comment:fp stack unchanged by fallthrough
3917
+
3918
+ # qhasm: bytesbetween1and255:
3919
+ ._bytesbetween1and255:
3920
+
3921
+ # qhasm: unsigned<? bytes - 64
3922
+ # asm 1: cmp $64,<bytes=int32#1
3923
+ # asm 2: cmp $64,<bytes=%eax
3924
+ cmp $64,%eax
3925
+ # comment:fp stack unchanged by jump
3926
+
3927
+ # qhasm: goto nocopy if !unsigned<
3928
+ jae ._nocopy
3929
+
3930
+ # qhasm: ctarget = out
3931
+ # asm 1: movl <out=int32#6,>ctarget=stack32#6
3932
+ # asm 2: movl <out=%edi,>ctarget=20(%esp)
3933
+ movl %edi,20(%esp)
3934
+
3935
+ # qhasm: out = &tmp
3936
+ # asm 1: leal <tmp=stack512#1,>out=int32#6
3937
+ # asm 2: leal <tmp=640(%esp),>out=%edi
3938
+ leal 640(%esp),%edi
3939
+
3940
+ # qhasm: i = bytes
3941
+ # asm 1: mov <bytes=int32#1,>i=int32#2
3942
+ # asm 2: mov <bytes=%eax,>i=%ecx
3943
+ mov %eax,%ecx
3944
+
3945
+ # qhasm: while (i) { *out++ = *m++; --i }
3946
+ rep movsb
3947
+
3948
+ # qhasm: out = &tmp
3949
+ # asm 1: leal <tmp=stack512#1,>out=int32#6
3950
+ # asm 2: leal <tmp=640(%esp),>out=%edi
3951
+ leal 640(%esp),%edi
3952
+
3953
+ # qhasm: m = &tmp
3954
+ # asm 1: leal <tmp=stack512#1,>m=int32#5
3955
+ # asm 2: leal <tmp=640(%esp),>m=%esi
3956
+ leal 640(%esp),%esi
3957
+ # comment:fp stack unchanged by fallthrough
3958
+
3959
+ # qhasm: nocopy:
3960
+ ._nocopy:
3961
+
3962
+ # qhasm: bytes_stack = bytes
3963
+ # asm 1: movl <bytes=int32#1,>bytes_stack=stack32#7
3964
+ # asm 2: movl <bytes=%eax,>bytes_stack=24(%esp)
3965
+ movl %eax,24(%esp)
3966
+
3967
+ # qhasm: diag0 = x0
3968
+ # asm 1: movdqa <x0=stack128#3,>diag0=int6464#1
3969
+ # asm 2: movdqa <x0=64(%esp),>diag0=%xmm0
3970
+ movdqa 64(%esp),%xmm0
3971
+
3972
+ # qhasm: diag1 = x1
3973
+ # asm 1: movdqa <x1=stack128#2,>diag1=int6464#2
3974
+ # asm 2: movdqa <x1=48(%esp),>diag1=%xmm1
3975
+ movdqa 48(%esp),%xmm1
3976
+
3977
+ # qhasm: diag2 = x2
3978
+ # asm 1: movdqa <x2=stack128#4,>diag2=int6464#3
3979
+ # asm 2: movdqa <x2=80(%esp),>diag2=%xmm2
3980
+ movdqa 80(%esp),%xmm2
3981
+
3982
+ # qhasm: diag3 = x3
3983
+ # asm 1: movdqa <x3=stack128#1,>diag3=int6464#4
3984
+ # asm 2: movdqa <x3=32(%esp),>diag3=%xmm3
3985
+ movdqa 32(%esp),%xmm3
3986
+
3987
+ # qhasm: a0 = diag1
3988
+ # asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
3989
+ # asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
3990
+ movdqa %xmm1,%xmm4
3991
+
3992
+ # qhasm: i = 12
3993
+ # asm 1: mov $12,>i=int32#1
3994
+ # asm 2: mov $12,>i=%eax
3995
+ mov $12,%eax
3996
+
3997
+ # qhasm: mainloop2:
3998
+ ._mainloop2:
3999
+
4000
+ # qhasm: uint32323232 a0 += diag0
4001
+ # asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4002
+ # asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4003
+ paddd %xmm0,%xmm4
4004
+
4005
+ # qhasm: a1 = diag0
4006
+ # asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4007
+ # asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4008
+ movdqa %xmm0,%xmm5
4009
+
4010
+ # qhasm: b0 = a0
4011
+ # asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4012
+ # asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4013
+ movdqa %xmm4,%xmm6
4014
+
4015
+ # qhasm: uint32323232 a0 <<= 7
4016
+ # asm 1: pslld $7,<a0=int6464#5
4017
+ # asm 2: pslld $7,<a0=%xmm4
4018
+ pslld $7,%xmm4
4019
+
4020
+ # qhasm: uint32323232 b0 >>= 25
4021
+ # asm 1: psrld $25,<b0=int6464#7
4022
+ # asm 2: psrld $25,<b0=%xmm6
4023
+ psrld $25,%xmm6
4024
+
4025
+ # qhasm: diag3 ^= a0
4026
+ # asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4027
+ # asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4028
+ pxor %xmm4,%xmm3
4029
+
4030
+ # qhasm: diag3 ^= b0
4031
+ # asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4032
+ # asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4033
+ pxor %xmm6,%xmm3
4034
+
4035
+ # qhasm: uint32323232 a1 += diag3
4036
+ # asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4037
+ # asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4038
+ paddd %xmm3,%xmm5
4039
+
4040
+ # qhasm: a2 = diag3
4041
+ # asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4042
+ # asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4043
+ movdqa %xmm3,%xmm4
4044
+
4045
+ # qhasm: b1 = a1
4046
+ # asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4047
+ # asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4048
+ movdqa %xmm5,%xmm6
4049
+
4050
+ # qhasm: uint32323232 a1 <<= 9
4051
+ # asm 1: pslld $9,<a1=int6464#6
4052
+ # asm 2: pslld $9,<a1=%xmm5
4053
+ pslld $9,%xmm5
4054
+
4055
+ # qhasm: uint32323232 b1 >>= 23
4056
+ # asm 1: psrld $23,<b1=int6464#7
4057
+ # asm 2: psrld $23,<b1=%xmm6
4058
+ psrld $23,%xmm6
4059
+
4060
+ # qhasm: diag2 ^= a1
4061
+ # asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4062
+ # asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4063
+ pxor %xmm5,%xmm2
4064
+
4065
+ # qhasm: diag3 <<<= 32
4066
+ # asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4067
+ # asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4068
+ pshufd $0x93,%xmm3,%xmm3
4069
+
4070
+ # qhasm: diag2 ^= b1
4071
+ # asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4072
+ # asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4073
+ pxor %xmm6,%xmm2
4074
+
4075
+ # qhasm: uint32323232 a2 += diag2
4076
+ # asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4077
+ # asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4078
+ paddd %xmm2,%xmm4
4079
+
4080
+ # qhasm: a3 = diag2
4081
+ # asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4082
+ # asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4083
+ movdqa %xmm2,%xmm5
4084
+
4085
+ # qhasm: b2 = a2
4086
+ # asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4087
+ # asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4088
+ movdqa %xmm4,%xmm6
4089
+
4090
+ # qhasm: uint32323232 a2 <<= 13
4091
+ # asm 1: pslld $13,<a2=int6464#5
4092
+ # asm 2: pslld $13,<a2=%xmm4
4093
+ pslld $13,%xmm4
4094
+
4095
+ # qhasm: uint32323232 b2 >>= 19
4096
+ # asm 1: psrld $19,<b2=int6464#7
4097
+ # asm 2: psrld $19,<b2=%xmm6
4098
+ psrld $19,%xmm6
4099
+
4100
+ # qhasm: diag1 ^= a2
4101
+ # asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4102
+ # asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4103
+ pxor %xmm4,%xmm1
4104
+
4105
+ # qhasm: diag2 <<<= 64
4106
+ # asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4107
+ # asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4108
+ pshufd $0x4e,%xmm2,%xmm2
4109
+
4110
+ # qhasm: diag1 ^= b2
4111
+ # asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4112
+ # asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4113
+ pxor %xmm6,%xmm1
4114
+
4115
+ # qhasm: uint32323232 a3 += diag1
4116
+ # asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4117
+ # asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4118
+ paddd %xmm1,%xmm5
4119
+
4120
+ # qhasm: a4 = diag3
4121
+ # asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4122
+ # asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4123
+ movdqa %xmm3,%xmm4
4124
+
4125
+ # qhasm: b3 = a3
4126
+ # asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4127
+ # asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4128
+ movdqa %xmm5,%xmm6
4129
+
4130
+ # qhasm: uint32323232 a3 <<= 18
4131
+ # asm 1: pslld $18,<a3=int6464#6
4132
+ # asm 2: pslld $18,<a3=%xmm5
4133
+ pslld $18,%xmm5
4134
+
4135
+ # qhasm: uint32323232 b3 >>= 14
4136
+ # asm 1: psrld $14,<b3=int6464#7
4137
+ # asm 2: psrld $14,<b3=%xmm6
4138
+ psrld $14,%xmm6
4139
+
4140
+ # qhasm: diag0 ^= a3
4141
+ # asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4142
+ # asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4143
+ pxor %xmm5,%xmm0
4144
+
4145
+ # qhasm: diag1 <<<= 96
4146
+ # asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4147
+ # asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4148
+ pshufd $0x39,%xmm1,%xmm1
4149
+
4150
+ # qhasm: diag0 ^= b3
4151
+ # asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4152
+ # asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4153
+ pxor %xmm6,%xmm0
4154
+
4155
+ # qhasm: uint32323232 a4 += diag0
4156
+ # asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4157
+ # asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4158
+ paddd %xmm0,%xmm4
4159
+
4160
+ # qhasm: a5 = diag0
4161
+ # asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4162
+ # asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4163
+ movdqa %xmm0,%xmm5
4164
+
4165
+ # qhasm: b4 = a4
4166
+ # asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4167
+ # asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4168
+ movdqa %xmm4,%xmm6
4169
+
4170
+ # qhasm: uint32323232 a4 <<= 7
4171
+ # asm 1: pslld $7,<a4=int6464#5
4172
+ # asm 2: pslld $7,<a4=%xmm4
4173
+ pslld $7,%xmm4
4174
+
4175
+ # qhasm: uint32323232 b4 >>= 25
4176
+ # asm 1: psrld $25,<b4=int6464#7
4177
+ # asm 2: psrld $25,<b4=%xmm6
4178
+ psrld $25,%xmm6
4179
+
4180
+ # qhasm: diag1 ^= a4
4181
+ # asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4182
+ # asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4183
+ pxor %xmm4,%xmm1
4184
+
4185
+ # qhasm: diag1 ^= b4
4186
+ # asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4187
+ # asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4188
+ pxor %xmm6,%xmm1
4189
+
4190
+ # qhasm: uint32323232 a5 += diag1
4191
+ # asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4192
+ # asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4193
+ paddd %xmm1,%xmm5
4194
+
4195
+ # qhasm: a6 = diag1
4196
+ # asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4197
+ # asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4198
+ movdqa %xmm1,%xmm4
4199
+
4200
+ # qhasm: b5 = a5
4201
+ # asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4202
+ # asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4203
+ movdqa %xmm5,%xmm6
4204
+
4205
+ # qhasm: uint32323232 a5 <<= 9
4206
+ # asm 1: pslld $9,<a5=int6464#6
4207
+ # asm 2: pslld $9,<a5=%xmm5
4208
+ pslld $9,%xmm5
4209
+
4210
+ # qhasm: uint32323232 b5 >>= 23
4211
+ # asm 1: psrld $23,<b5=int6464#7
4212
+ # asm 2: psrld $23,<b5=%xmm6
4213
+ psrld $23,%xmm6
4214
+
4215
+ # qhasm: diag2 ^= a5
4216
+ # asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4217
+ # asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4218
+ pxor %xmm5,%xmm2
4219
+
4220
+ # qhasm: diag1 <<<= 32
4221
+ # asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4222
+ # asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4223
+ pshufd $0x93,%xmm1,%xmm1
4224
+
4225
+ # qhasm: diag2 ^= b5
4226
+ # asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4227
+ # asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4228
+ pxor %xmm6,%xmm2
4229
+
4230
+ # qhasm: uint32323232 a6 += diag2
4231
+ # asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4232
+ # asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4233
+ paddd %xmm2,%xmm4
4234
+
4235
+ # qhasm: a7 = diag2
4236
+ # asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4237
+ # asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4238
+ movdqa %xmm2,%xmm5
4239
+
4240
+ # qhasm: b6 = a6
4241
+ # asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4242
+ # asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4243
+ movdqa %xmm4,%xmm6
4244
+
4245
+ # qhasm: uint32323232 a6 <<= 13
4246
+ # asm 1: pslld $13,<a6=int6464#5
4247
+ # asm 2: pslld $13,<a6=%xmm4
4248
+ pslld $13,%xmm4
4249
+
4250
+ # qhasm: uint32323232 b6 >>= 19
4251
+ # asm 1: psrld $19,<b6=int6464#7
4252
+ # asm 2: psrld $19,<b6=%xmm6
4253
+ psrld $19,%xmm6
4254
+
4255
+ # qhasm: diag3 ^= a6
4256
+ # asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4257
+ # asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4258
+ pxor %xmm4,%xmm3
4259
+
4260
+ # qhasm: diag2 <<<= 64
4261
+ # asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4262
+ # asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4263
+ pshufd $0x4e,%xmm2,%xmm2
4264
+
4265
+ # qhasm: diag3 ^= b6
4266
+ # asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4267
+ # asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4268
+ pxor %xmm6,%xmm3
4269
+
4270
+ # qhasm: uint32323232 a7 += diag3
4271
+ # asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4272
+ # asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4273
+ paddd %xmm3,%xmm5
4274
+
4275
+ # qhasm: a0 = diag1
4276
+ # asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4277
+ # asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4278
+ movdqa %xmm1,%xmm4
4279
+
4280
+ # qhasm: b7 = a7
4281
+ # asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4282
+ # asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4283
+ movdqa %xmm5,%xmm6
4284
+
4285
+ # qhasm: uint32323232 a7 <<= 18
4286
+ # asm 1: pslld $18,<a7=int6464#6
4287
+ # asm 2: pslld $18,<a7=%xmm5
4288
+ pslld $18,%xmm5
4289
+
4290
+ # qhasm: uint32323232 b7 >>= 14
4291
+ # asm 1: psrld $14,<b7=int6464#7
4292
+ # asm 2: psrld $14,<b7=%xmm6
4293
+ psrld $14,%xmm6
4294
+
4295
+ # qhasm: diag0 ^= a7
4296
+ # asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4297
+ # asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4298
+ pxor %xmm5,%xmm0
4299
+
4300
+ # qhasm: diag3 <<<= 96
4301
+ # asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4302
+ # asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4303
+ pshufd $0x39,%xmm3,%xmm3
4304
+
4305
+ # qhasm: diag0 ^= b7
4306
+ # asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4307
+ # asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4308
+ pxor %xmm6,%xmm0
4309
+
4310
+ # qhasm: uint32323232 a0 += diag0
4311
+ # asm 1: paddd <diag0=int6464#1,<a0=int6464#5
4312
+ # asm 2: paddd <diag0=%xmm0,<a0=%xmm4
4313
+ paddd %xmm0,%xmm4
4314
+
4315
+ # qhasm: a1 = diag0
4316
+ # asm 1: movdqa <diag0=int6464#1,>a1=int6464#6
4317
+ # asm 2: movdqa <diag0=%xmm0,>a1=%xmm5
4318
+ movdqa %xmm0,%xmm5
4319
+
4320
+ # qhasm: b0 = a0
4321
+ # asm 1: movdqa <a0=int6464#5,>b0=int6464#7
4322
+ # asm 2: movdqa <a0=%xmm4,>b0=%xmm6
4323
+ movdqa %xmm4,%xmm6
4324
+
4325
+ # qhasm: uint32323232 a0 <<= 7
4326
+ # asm 1: pslld $7,<a0=int6464#5
4327
+ # asm 2: pslld $7,<a0=%xmm4
4328
+ pslld $7,%xmm4
4329
+
4330
+ # qhasm: uint32323232 b0 >>= 25
4331
+ # asm 1: psrld $25,<b0=int6464#7
4332
+ # asm 2: psrld $25,<b0=%xmm6
4333
+ psrld $25,%xmm6
4334
+
4335
+ # qhasm: diag3 ^= a0
4336
+ # asm 1: pxor <a0=int6464#5,<diag3=int6464#4
4337
+ # asm 2: pxor <a0=%xmm4,<diag3=%xmm3
4338
+ pxor %xmm4,%xmm3
4339
+
4340
+ # qhasm: diag3 ^= b0
4341
+ # asm 1: pxor <b0=int6464#7,<diag3=int6464#4
4342
+ # asm 2: pxor <b0=%xmm6,<diag3=%xmm3
4343
+ pxor %xmm6,%xmm3
4344
+
4345
+ # qhasm: uint32323232 a1 += diag3
4346
+ # asm 1: paddd <diag3=int6464#4,<a1=int6464#6
4347
+ # asm 2: paddd <diag3=%xmm3,<a1=%xmm5
4348
+ paddd %xmm3,%xmm5
4349
+
4350
+ # qhasm: a2 = diag3
4351
+ # asm 1: movdqa <diag3=int6464#4,>a2=int6464#5
4352
+ # asm 2: movdqa <diag3=%xmm3,>a2=%xmm4
4353
+ movdqa %xmm3,%xmm4
4354
+
4355
+ # qhasm: b1 = a1
4356
+ # asm 1: movdqa <a1=int6464#6,>b1=int6464#7
4357
+ # asm 2: movdqa <a1=%xmm5,>b1=%xmm6
4358
+ movdqa %xmm5,%xmm6
4359
+
4360
+ # qhasm: uint32323232 a1 <<= 9
4361
+ # asm 1: pslld $9,<a1=int6464#6
4362
+ # asm 2: pslld $9,<a1=%xmm5
4363
+ pslld $9,%xmm5
4364
+
4365
+ # qhasm: uint32323232 b1 >>= 23
4366
+ # asm 1: psrld $23,<b1=int6464#7
4367
+ # asm 2: psrld $23,<b1=%xmm6
4368
+ psrld $23,%xmm6
4369
+
4370
+ # qhasm: diag2 ^= a1
4371
+ # asm 1: pxor <a1=int6464#6,<diag2=int6464#3
4372
+ # asm 2: pxor <a1=%xmm5,<diag2=%xmm2
4373
+ pxor %xmm5,%xmm2
4374
+
4375
+ # qhasm: diag3 <<<= 32
4376
+ # asm 1: pshufd $0x93,<diag3=int6464#4,<diag3=int6464#4
4377
+ # asm 2: pshufd $0x93,<diag3=%xmm3,<diag3=%xmm3
4378
+ pshufd $0x93,%xmm3,%xmm3
4379
+
4380
+ # qhasm: diag2 ^= b1
4381
+ # asm 1: pxor <b1=int6464#7,<diag2=int6464#3
4382
+ # asm 2: pxor <b1=%xmm6,<diag2=%xmm2
4383
+ pxor %xmm6,%xmm2
4384
+
4385
+ # qhasm: uint32323232 a2 += diag2
4386
+ # asm 1: paddd <diag2=int6464#3,<a2=int6464#5
4387
+ # asm 2: paddd <diag2=%xmm2,<a2=%xmm4
4388
+ paddd %xmm2,%xmm4
4389
+
4390
+ # qhasm: a3 = diag2
4391
+ # asm 1: movdqa <diag2=int6464#3,>a3=int6464#6
4392
+ # asm 2: movdqa <diag2=%xmm2,>a3=%xmm5
4393
+ movdqa %xmm2,%xmm5
4394
+
4395
+ # qhasm: b2 = a2
4396
+ # asm 1: movdqa <a2=int6464#5,>b2=int6464#7
4397
+ # asm 2: movdqa <a2=%xmm4,>b2=%xmm6
4398
+ movdqa %xmm4,%xmm6
4399
+
4400
+ # qhasm: uint32323232 a2 <<= 13
4401
+ # asm 1: pslld $13,<a2=int6464#5
4402
+ # asm 2: pslld $13,<a2=%xmm4
4403
+ pslld $13,%xmm4
4404
+
4405
+ # qhasm: uint32323232 b2 >>= 19
4406
+ # asm 1: psrld $19,<b2=int6464#7
4407
+ # asm 2: psrld $19,<b2=%xmm6
4408
+ psrld $19,%xmm6
4409
+
4410
+ # qhasm: diag1 ^= a2
4411
+ # asm 1: pxor <a2=int6464#5,<diag1=int6464#2
4412
+ # asm 2: pxor <a2=%xmm4,<diag1=%xmm1
4413
+ pxor %xmm4,%xmm1
4414
+
4415
+ # qhasm: diag2 <<<= 64
4416
+ # asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4417
+ # asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4418
+ pshufd $0x4e,%xmm2,%xmm2
4419
+
4420
+ # qhasm: diag1 ^= b2
4421
+ # asm 1: pxor <b2=int6464#7,<diag1=int6464#2
4422
+ # asm 2: pxor <b2=%xmm6,<diag1=%xmm1
4423
+ pxor %xmm6,%xmm1
4424
+
4425
+ # qhasm: uint32323232 a3 += diag1
4426
+ # asm 1: paddd <diag1=int6464#2,<a3=int6464#6
4427
+ # asm 2: paddd <diag1=%xmm1,<a3=%xmm5
4428
+ paddd %xmm1,%xmm5
4429
+
4430
+ # qhasm: a4 = diag3
4431
+ # asm 1: movdqa <diag3=int6464#4,>a4=int6464#5
4432
+ # asm 2: movdqa <diag3=%xmm3,>a4=%xmm4
4433
+ movdqa %xmm3,%xmm4
4434
+
4435
+ # qhasm: b3 = a3
4436
+ # asm 1: movdqa <a3=int6464#6,>b3=int6464#7
4437
+ # asm 2: movdqa <a3=%xmm5,>b3=%xmm6
4438
+ movdqa %xmm5,%xmm6
4439
+
4440
+ # qhasm: uint32323232 a3 <<= 18
4441
+ # asm 1: pslld $18,<a3=int6464#6
4442
+ # asm 2: pslld $18,<a3=%xmm5
4443
+ pslld $18,%xmm5
4444
+
4445
+ # qhasm: uint32323232 b3 >>= 14
4446
+ # asm 1: psrld $14,<b3=int6464#7
4447
+ # asm 2: psrld $14,<b3=%xmm6
4448
+ psrld $14,%xmm6
4449
+
4450
+ # qhasm: diag0 ^= a3
4451
+ # asm 1: pxor <a3=int6464#6,<diag0=int6464#1
4452
+ # asm 2: pxor <a3=%xmm5,<diag0=%xmm0
4453
+ pxor %xmm5,%xmm0
4454
+
4455
+ # qhasm: diag1 <<<= 96
4456
+ # asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4457
+ # asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4458
+ pshufd $0x39,%xmm1,%xmm1
4459
+
4460
+ # qhasm: diag0 ^= b3
4461
+ # asm 1: pxor <b3=int6464#7,<diag0=int6464#1
4462
+ # asm 2: pxor <b3=%xmm6,<diag0=%xmm0
4463
+ pxor %xmm6,%xmm0
4464
+
4465
+ # qhasm: uint32323232 a4 += diag0
4466
+ # asm 1: paddd <diag0=int6464#1,<a4=int6464#5
4467
+ # asm 2: paddd <diag0=%xmm0,<a4=%xmm4
4468
+ paddd %xmm0,%xmm4
4469
+
4470
+ # qhasm: a5 = diag0
4471
+ # asm 1: movdqa <diag0=int6464#1,>a5=int6464#6
4472
+ # asm 2: movdqa <diag0=%xmm0,>a5=%xmm5
4473
+ movdqa %xmm0,%xmm5
4474
+
4475
+ # qhasm: b4 = a4
4476
+ # asm 1: movdqa <a4=int6464#5,>b4=int6464#7
4477
+ # asm 2: movdqa <a4=%xmm4,>b4=%xmm6
4478
+ movdqa %xmm4,%xmm6
4479
+
4480
+ # qhasm: uint32323232 a4 <<= 7
4481
+ # asm 1: pslld $7,<a4=int6464#5
4482
+ # asm 2: pslld $7,<a4=%xmm4
4483
+ pslld $7,%xmm4
4484
+
4485
+ # qhasm: uint32323232 b4 >>= 25
4486
+ # asm 1: psrld $25,<b4=int6464#7
4487
+ # asm 2: psrld $25,<b4=%xmm6
4488
+ psrld $25,%xmm6
4489
+
4490
+ # qhasm: diag1 ^= a4
4491
+ # asm 1: pxor <a4=int6464#5,<diag1=int6464#2
4492
+ # asm 2: pxor <a4=%xmm4,<diag1=%xmm1
4493
+ pxor %xmm4,%xmm1
4494
+
4495
+ # qhasm: diag1 ^= b4
4496
+ # asm 1: pxor <b4=int6464#7,<diag1=int6464#2
4497
+ # asm 2: pxor <b4=%xmm6,<diag1=%xmm1
4498
+ pxor %xmm6,%xmm1
4499
+
4500
+ # qhasm: uint32323232 a5 += diag1
4501
+ # asm 1: paddd <diag1=int6464#2,<a5=int6464#6
4502
+ # asm 2: paddd <diag1=%xmm1,<a5=%xmm5
4503
+ paddd %xmm1,%xmm5
4504
+
4505
+ # qhasm: a6 = diag1
4506
+ # asm 1: movdqa <diag1=int6464#2,>a6=int6464#5
4507
+ # asm 2: movdqa <diag1=%xmm1,>a6=%xmm4
4508
+ movdqa %xmm1,%xmm4
4509
+
4510
+ # qhasm: b5 = a5
4511
+ # asm 1: movdqa <a5=int6464#6,>b5=int6464#7
4512
+ # asm 2: movdqa <a5=%xmm5,>b5=%xmm6
4513
+ movdqa %xmm5,%xmm6
4514
+
4515
+ # qhasm: uint32323232 a5 <<= 9
4516
+ # asm 1: pslld $9,<a5=int6464#6
4517
+ # asm 2: pslld $9,<a5=%xmm5
4518
+ pslld $9,%xmm5
4519
+
4520
+ # qhasm: uint32323232 b5 >>= 23
4521
+ # asm 1: psrld $23,<b5=int6464#7
4522
+ # asm 2: psrld $23,<b5=%xmm6
4523
+ psrld $23,%xmm6
4524
+
4525
+ # qhasm: diag2 ^= a5
4526
+ # asm 1: pxor <a5=int6464#6,<diag2=int6464#3
4527
+ # asm 2: pxor <a5=%xmm5,<diag2=%xmm2
4528
+ pxor %xmm5,%xmm2
4529
+
4530
+ # qhasm: diag1 <<<= 32
4531
+ # asm 1: pshufd $0x93,<diag1=int6464#2,<diag1=int6464#2
4532
+ # asm 2: pshufd $0x93,<diag1=%xmm1,<diag1=%xmm1
4533
+ pshufd $0x93,%xmm1,%xmm1
4534
+
4535
+ # qhasm: diag2 ^= b5
4536
+ # asm 1: pxor <b5=int6464#7,<diag2=int6464#3
4537
+ # asm 2: pxor <b5=%xmm6,<diag2=%xmm2
4538
+ pxor %xmm6,%xmm2
4539
+
4540
+ # qhasm: uint32323232 a6 += diag2
4541
+ # asm 1: paddd <diag2=int6464#3,<a6=int6464#5
4542
+ # asm 2: paddd <diag2=%xmm2,<a6=%xmm4
4543
+ paddd %xmm2,%xmm4
4544
+
4545
+ # qhasm: a7 = diag2
4546
+ # asm 1: movdqa <diag2=int6464#3,>a7=int6464#6
4547
+ # asm 2: movdqa <diag2=%xmm2,>a7=%xmm5
4548
+ movdqa %xmm2,%xmm5
4549
+
4550
+ # qhasm: b6 = a6
4551
+ # asm 1: movdqa <a6=int6464#5,>b6=int6464#7
4552
+ # asm 2: movdqa <a6=%xmm4,>b6=%xmm6
4553
+ movdqa %xmm4,%xmm6
4554
+
4555
+ # qhasm: uint32323232 a6 <<= 13
4556
+ # asm 1: pslld $13,<a6=int6464#5
4557
+ # asm 2: pslld $13,<a6=%xmm4
4558
+ pslld $13,%xmm4
4559
+
4560
+ # qhasm: uint32323232 b6 >>= 19
4561
+ # asm 1: psrld $19,<b6=int6464#7
4562
+ # asm 2: psrld $19,<b6=%xmm6
4563
+ psrld $19,%xmm6
4564
+
4565
+ # qhasm: diag3 ^= a6
4566
+ # asm 1: pxor <a6=int6464#5,<diag3=int6464#4
4567
+ # asm 2: pxor <a6=%xmm4,<diag3=%xmm3
4568
+ pxor %xmm4,%xmm3
4569
+
4570
+ # qhasm: diag2 <<<= 64
4571
+ # asm 1: pshufd $0x4e,<diag2=int6464#3,<diag2=int6464#3
4572
+ # asm 2: pshufd $0x4e,<diag2=%xmm2,<diag2=%xmm2
4573
+ pshufd $0x4e,%xmm2,%xmm2
4574
+
4575
+ # qhasm: diag3 ^= b6
4576
+ # asm 1: pxor <b6=int6464#7,<diag3=int6464#4
4577
+ # asm 2: pxor <b6=%xmm6,<diag3=%xmm3
4578
+ pxor %xmm6,%xmm3
4579
+
4580
+ # qhasm: unsigned>? i -= 4
4581
+ # asm 1: sub $4,<i=int32#1
4582
+ # asm 2: sub $4,<i=%eax
4583
+ sub $4,%eax
4584
+
4585
+ # qhasm: uint32323232 a7 += diag3
4586
+ # asm 1: paddd <diag3=int6464#4,<a7=int6464#6
4587
+ # asm 2: paddd <diag3=%xmm3,<a7=%xmm5
4588
+ paddd %xmm3,%xmm5
4589
+
4590
+ # qhasm: a0 = diag1
4591
+ # asm 1: movdqa <diag1=int6464#2,>a0=int6464#5
4592
+ # asm 2: movdqa <diag1=%xmm1,>a0=%xmm4
4593
+ movdqa %xmm1,%xmm4
4594
+
4595
+ # qhasm: b7 = a7
4596
+ # asm 1: movdqa <a7=int6464#6,>b7=int6464#7
4597
+ # asm 2: movdqa <a7=%xmm5,>b7=%xmm6
4598
+ movdqa %xmm5,%xmm6
4599
+
4600
+ # qhasm: uint32323232 a7 <<= 18
4601
+ # asm 1: pslld $18,<a7=int6464#6
4602
+ # asm 2: pslld $18,<a7=%xmm5
4603
+ pslld $18,%xmm5
4604
+
4605
+ # qhasm: b0 = 0
4606
+ # asm 1: pxor >b0=int6464#8,>b0=int6464#8
4607
+ # asm 2: pxor >b0=%xmm7,>b0=%xmm7
4608
+ pxor %xmm7,%xmm7
4609
+
4610
+ # qhasm: uint32323232 b7 >>= 14
4611
+ # asm 1: psrld $14,<b7=int6464#7
4612
+ # asm 2: psrld $14,<b7=%xmm6
4613
+ psrld $14,%xmm6
4614
+
4615
+ # qhasm: diag0 ^= a7
4616
+ # asm 1: pxor <a7=int6464#6,<diag0=int6464#1
4617
+ # asm 2: pxor <a7=%xmm5,<diag0=%xmm0
4618
+ pxor %xmm5,%xmm0
4619
+
4620
+ # qhasm: diag3 <<<= 96
4621
+ # asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4622
+ # asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4623
+ pshufd $0x39,%xmm3,%xmm3
4624
+
4625
+ # qhasm: diag0 ^= b7
4626
+ # asm 1: pxor <b7=int6464#7,<diag0=int6464#1
4627
+ # asm 2: pxor <b7=%xmm6,<diag0=%xmm0
4628
+ pxor %xmm6,%xmm0
4629
+ # comment:fp stack unchanged by jump
4630
+
4631
+ # qhasm: goto mainloop2 if unsigned>
4632
+ ja ._mainloop2
4633
+
4634
+ # qhasm: uint32323232 diag0 += x0
4635
+ # asm 1: paddd <x0=stack128#3,<diag0=int6464#1
4636
+ # asm 2: paddd <x0=64(%esp),<diag0=%xmm0
4637
+ paddd 64(%esp),%xmm0
4638
+
4639
+ # qhasm: uint32323232 diag1 += x1
4640
+ # asm 1: paddd <x1=stack128#2,<diag1=int6464#2
4641
+ # asm 2: paddd <x1=48(%esp),<diag1=%xmm1
4642
+ paddd 48(%esp),%xmm1
4643
+
4644
+ # qhasm: uint32323232 diag2 += x2
4645
+ # asm 1: paddd <x2=stack128#4,<diag2=int6464#3
4646
+ # asm 2: paddd <x2=80(%esp),<diag2=%xmm2
4647
+ paddd 80(%esp),%xmm2
4648
+
4649
+ # qhasm: uint32323232 diag3 += x3
4650
+ # asm 1: paddd <x3=stack128#1,<diag3=int6464#4
4651
+ # asm 2: paddd <x3=32(%esp),<diag3=%xmm3
4652
+ paddd 32(%esp),%xmm3
4653
+
4654
+ # qhasm: in0 = diag0
4655
+ # asm 1: movd <diag0=int6464#1,>in0=int32#1
4656
+ # asm 2: movd <diag0=%xmm0,>in0=%eax
4657
+ movd %xmm0,%eax
4658
+
4659
+ # qhasm: in12 = diag1
4660
+ # asm 1: movd <diag1=int6464#2,>in12=int32#2
4661
+ # asm 2: movd <diag1=%xmm1,>in12=%ecx
4662
+ movd %xmm1,%ecx
4663
+
4664
+ # qhasm: in8 = diag2
4665
+ # asm 1: movd <diag2=int6464#3,>in8=int32#3
4666
+ # asm 2: movd <diag2=%xmm2,>in8=%edx
4667
+ movd %xmm2,%edx
4668
+
4669
+ # qhasm: in4 = diag3
4670
+ # asm 1: movd <diag3=int6464#4,>in4=int32#4
4671
+ # asm 2: movd <diag3=%xmm3,>in4=%ebx
4672
+ movd %xmm3,%ebx
4673
+
4674
+ # qhasm: diag0 <<<= 96
4675
+ # asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4676
+ # asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4677
+ pshufd $0x39,%xmm0,%xmm0
4678
+
4679
+ # qhasm: diag1 <<<= 96
4680
+ # asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4681
+ # asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4682
+ pshufd $0x39,%xmm1,%xmm1
4683
+
4684
+ # qhasm: diag2 <<<= 96
4685
+ # asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4686
+ # asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4687
+ pshufd $0x39,%xmm2,%xmm2
4688
+
4689
+ # qhasm: diag3 <<<= 96
4690
+ # asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4691
+ # asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4692
+ pshufd $0x39,%xmm3,%xmm3
4693
+
4694
+ # qhasm: in0 ^= *(uint32 *) (m + 0)
4695
+ # asm 1: xorl 0(<m=int32#5),<in0=int32#1
4696
+ # asm 2: xorl 0(<m=%esi),<in0=%eax
4697
+ xorl 0(%esi),%eax
4698
+
4699
+ # qhasm: in12 ^= *(uint32 *) (m + 48)
4700
+ # asm 1: xorl 48(<m=int32#5),<in12=int32#2
4701
+ # asm 2: xorl 48(<m=%esi),<in12=%ecx
4702
+ xorl 48(%esi),%ecx
4703
+
4704
+ # qhasm: in8 ^= *(uint32 *) (m + 32)
4705
+ # asm 1: xorl 32(<m=int32#5),<in8=int32#3
4706
+ # asm 2: xorl 32(<m=%esi),<in8=%edx
4707
+ xorl 32(%esi),%edx
4708
+
4709
+ # qhasm: in4 ^= *(uint32 *) (m + 16)
4710
+ # asm 1: xorl 16(<m=int32#5),<in4=int32#4
4711
+ # asm 2: xorl 16(<m=%esi),<in4=%ebx
4712
+ xorl 16(%esi),%ebx
4713
+
4714
+ # qhasm: *(uint32 *) (out + 0) = in0
4715
+ # asm 1: movl <in0=int32#1,0(<out=int32#6)
4716
+ # asm 2: movl <in0=%eax,0(<out=%edi)
4717
+ movl %eax,0(%edi)
4718
+
4719
+ # qhasm: *(uint32 *) (out + 48) = in12
4720
+ # asm 1: movl <in12=int32#2,48(<out=int32#6)
4721
+ # asm 2: movl <in12=%ecx,48(<out=%edi)
4722
+ movl %ecx,48(%edi)
4723
+
4724
+ # qhasm: *(uint32 *) (out + 32) = in8
4725
+ # asm 1: movl <in8=int32#3,32(<out=int32#6)
4726
+ # asm 2: movl <in8=%edx,32(<out=%edi)
4727
+ movl %edx,32(%edi)
4728
+
4729
+ # qhasm: *(uint32 *) (out + 16) = in4
4730
+ # asm 1: movl <in4=int32#4,16(<out=int32#6)
4731
+ # asm 2: movl <in4=%ebx,16(<out=%edi)
4732
+ movl %ebx,16(%edi)
4733
+
4734
+ # qhasm: in5 = diag0
4735
+ # asm 1: movd <diag0=int6464#1,>in5=int32#1
4736
+ # asm 2: movd <diag0=%xmm0,>in5=%eax
4737
+ movd %xmm0,%eax
4738
+
4739
+ # qhasm: in1 = diag1
4740
+ # asm 1: movd <diag1=int6464#2,>in1=int32#2
4741
+ # asm 2: movd <diag1=%xmm1,>in1=%ecx
4742
+ movd %xmm1,%ecx
4743
+
4744
+ # qhasm: in13 = diag2
4745
+ # asm 1: movd <diag2=int6464#3,>in13=int32#3
4746
+ # asm 2: movd <diag2=%xmm2,>in13=%edx
4747
+ movd %xmm2,%edx
4748
+
4749
+ # qhasm: in9 = diag3
4750
+ # asm 1: movd <diag3=int6464#4,>in9=int32#4
4751
+ # asm 2: movd <diag3=%xmm3,>in9=%ebx
4752
+ movd %xmm3,%ebx
4753
+
4754
+ # qhasm: diag0 <<<= 96
4755
+ # asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4756
+ # asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4757
+ pshufd $0x39,%xmm0,%xmm0
4758
+
4759
+ # qhasm: diag1 <<<= 96
4760
+ # asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4761
+ # asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4762
+ pshufd $0x39,%xmm1,%xmm1
4763
+
4764
+ # qhasm: diag2 <<<= 96
4765
+ # asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4766
+ # asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4767
+ pshufd $0x39,%xmm2,%xmm2
4768
+
4769
+ # qhasm: diag3 <<<= 96
4770
+ # asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4771
+ # asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4772
+ pshufd $0x39,%xmm3,%xmm3
4773
+
4774
+ # qhasm: in5 ^= *(uint32 *) (m + 20)
4775
+ # asm 1: xorl 20(<m=int32#5),<in5=int32#1
4776
+ # asm 2: xorl 20(<m=%esi),<in5=%eax
4777
+ xorl 20(%esi),%eax
4778
+
4779
+ # qhasm: in1 ^= *(uint32 *) (m + 4)
4780
+ # asm 1: xorl 4(<m=int32#5),<in1=int32#2
4781
+ # asm 2: xorl 4(<m=%esi),<in1=%ecx
4782
+ xorl 4(%esi),%ecx
4783
+
4784
+ # qhasm: in13 ^= *(uint32 *) (m + 52)
4785
+ # asm 1: xorl 52(<m=int32#5),<in13=int32#3
4786
+ # asm 2: xorl 52(<m=%esi),<in13=%edx
4787
+ xorl 52(%esi),%edx
4788
+
4789
+ # qhasm: in9 ^= *(uint32 *) (m + 36)
4790
+ # asm 1: xorl 36(<m=int32#5),<in9=int32#4
4791
+ # asm 2: xorl 36(<m=%esi),<in9=%ebx
4792
+ xorl 36(%esi),%ebx
4793
+
4794
+ # qhasm: *(uint32 *) (out + 20) = in5
4795
+ # asm 1: movl <in5=int32#1,20(<out=int32#6)
4796
+ # asm 2: movl <in5=%eax,20(<out=%edi)
4797
+ movl %eax,20(%edi)
4798
+
4799
+ # qhasm: *(uint32 *) (out + 4) = in1
4800
+ # asm 1: movl <in1=int32#2,4(<out=int32#6)
4801
+ # asm 2: movl <in1=%ecx,4(<out=%edi)
4802
+ movl %ecx,4(%edi)
4803
+
4804
+ # qhasm: *(uint32 *) (out + 52) = in13
4805
+ # asm 1: movl <in13=int32#3,52(<out=int32#6)
4806
+ # asm 2: movl <in13=%edx,52(<out=%edi)
4807
+ movl %edx,52(%edi)
4808
+
4809
+ # qhasm: *(uint32 *) (out + 36) = in9
4810
+ # asm 1: movl <in9=int32#4,36(<out=int32#6)
4811
+ # asm 2: movl <in9=%ebx,36(<out=%edi)
4812
+ movl %ebx,36(%edi)
4813
+
4814
+ # qhasm: in10 = diag0
4815
+ # asm 1: movd <diag0=int6464#1,>in10=int32#1
4816
+ # asm 2: movd <diag0=%xmm0,>in10=%eax
4817
+ movd %xmm0,%eax
4818
+
4819
+ # qhasm: in6 = diag1
4820
+ # asm 1: movd <diag1=int6464#2,>in6=int32#2
4821
+ # asm 2: movd <diag1=%xmm1,>in6=%ecx
4822
+ movd %xmm1,%ecx
4823
+
4824
+ # qhasm: in2 = diag2
4825
+ # asm 1: movd <diag2=int6464#3,>in2=int32#3
4826
+ # asm 2: movd <diag2=%xmm2,>in2=%edx
4827
+ movd %xmm2,%edx
4828
+
4829
+ # qhasm: in14 = diag3
4830
+ # asm 1: movd <diag3=int6464#4,>in14=int32#4
4831
+ # asm 2: movd <diag3=%xmm3,>in14=%ebx
4832
+ movd %xmm3,%ebx
4833
+
4834
+ # qhasm: diag0 <<<= 96
4835
+ # asm 1: pshufd $0x39,<diag0=int6464#1,<diag0=int6464#1
4836
+ # asm 2: pshufd $0x39,<diag0=%xmm0,<diag0=%xmm0
4837
+ pshufd $0x39,%xmm0,%xmm0
4838
+
4839
+ # qhasm: diag1 <<<= 96
4840
+ # asm 1: pshufd $0x39,<diag1=int6464#2,<diag1=int6464#2
4841
+ # asm 2: pshufd $0x39,<diag1=%xmm1,<diag1=%xmm1
4842
+ pshufd $0x39,%xmm1,%xmm1
4843
+
4844
+ # qhasm: diag2 <<<= 96
4845
+ # asm 1: pshufd $0x39,<diag2=int6464#3,<diag2=int6464#3
4846
+ # asm 2: pshufd $0x39,<diag2=%xmm2,<diag2=%xmm2
4847
+ pshufd $0x39,%xmm2,%xmm2
4848
+
4849
+ # qhasm: diag3 <<<= 96
4850
+ # asm 1: pshufd $0x39,<diag3=int6464#4,<diag3=int6464#4
4851
+ # asm 2: pshufd $0x39,<diag3=%xmm3,<diag3=%xmm3
4852
+ pshufd $0x39,%xmm3,%xmm3
4853
+
4854
+ # qhasm: in10 ^= *(uint32 *) (m + 40)
4855
+ # asm 1: xorl 40(<m=int32#5),<in10=int32#1
4856
+ # asm 2: xorl 40(<m=%esi),<in10=%eax
4857
+ xorl 40(%esi),%eax
4858
+
4859
+ # qhasm: in6 ^= *(uint32 *) (m + 24)
4860
+ # asm 1: xorl 24(<m=int32#5),<in6=int32#2
4861
+ # asm 2: xorl 24(<m=%esi),<in6=%ecx
4862
+ xorl 24(%esi),%ecx
4863
+
4864
+ # qhasm: in2 ^= *(uint32 *) (m + 8)
4865
+ # asm 1: xorl 8(<m=int32#5),<in2=int32#3
4866
+ # asm 2: xorl 8(<m=%esi),<in2=%edx
4867
+ xorl 8(%esi),%edx
4868
+
4869
+ # qhasm: in14 ^= *(uint32 *) (m + 56)
4870
+ # asm 1: xorl 56(<m=int32#5),<in14=int32#4
4871
+ # asm 2: xorl 56(<m=%esi),<in14=%ebx
4872
+ xorl 56(%esi),%ebx
4873
+
4874
+ # qhasm: *(uint32 *) (out + 40) = in10
4875
+ # asm 1: movl <in10=int32#1,40(<out=int32#6)
4876
+ # asm 2: movl <in10=%eax,40(<out=%edi)
4877
+ movl %eax,40(%edi)
4878
+
4879
+ # qhasm: *(uint32 *) (out + 24) = in6
4880
+ # asm 1: movl <in6=int32#2,24(<out=int32#6)
4881
+ # asm 2: movl <in6=%ecx,24(<out=%edi)
4882
+ movl %ecx,24(%edi)
4883
+
4884
+ # qhasm: *(uint32 *) (out + 8) = in2
4885
+ # asm 1: movl <in2=int32#3,8(<out=int32#6)
4886
+ # asm 2: movl <in2=%edx,8(<out=%edi)
4887
+ movl %edx,8(%edi)
4888
+
4889
+ # qhasm: *(uint32 *) (out + 56) = in14
4890
+ # asm 1: movl <in14=int32#4,56(<out=int32#6)
4891
+ # asm 2: movl <in14=%ebx,56(<out=%edi)
4892
+ movl %ebx,56(%edi)
4893
+
4894
+ # qhasm: in15 = diag0
4895
+ # asm 1: movd <diag0=int6464#1,>in15=int32#1
4896
+ # asm 2: movd <diag0=%xmm0,>in15=%eax
4897
+ movd %xmm0,%eax
4898
+
4899
+ # qhasm: in11 = diag1
4900
+ # asm 1: movd <diag1=int6464#2,>in11=int32#2
4901
+ # asm 2: movd <diag1=%xmm1,>in11=%ecx
4902
+ movd %xmm1,%ecx
4903
+
4904
+ # qhasm: in7 = diag2
4905
+ # asm 1: movd <diag2=int6464#3,>in7=int32#3
4906
+ # asm 2: movd <diag2=%xmm2,>in7=%edx
4907
+ movd %xmm2,%edx
4908
+
4909
+ # qhasm: in3 = diag3
4910
+ # asm 1: movd <diag3=int6464#4,>in3=int32#4
4911
+ # asm 2: movd <diag3=%xmm3,>in3=%ebx
4912
+ movd %xmm3,%ebx
4913
+
4914
+ # qhasm: in15 ^= *(uint32 *) (m + 60)
4915
+ # asm 1: xorl 60(<m=int32#5),<in15=int32#1
4916
+ # asm 2: xorl 60(<m=%esi),<in15=%eax
4917
+ xorl 60(%esi),%eax
4918
+
4919
+ # qhasm: in11 ^= *(uint32 *) (m + 44)
4920
+ # asm 1: xorl 44(<m=int32#5),<in11=int32#2
4921
+ # asm 2: xorl 44(<m=%esi),<in11=%ecx
4922
+ xorl 44(%esi),%ecx
4923
+
4924
+ # qhasm: in7 ^= *(uint32 *) (m + 28)
4925
+ # asm 1: xorl 28(<m=int32#5),<in7=int32#3
4926
+ # asm 2: xorl 28(<m=%esi),<in7=%edx
4927
+ xorl 28(%esi),%edx
4928
+
4929
+ # qhasm: in3 ^= *(uint32 *) (m + 12)
4930
+ # asm 1: xorl 12(<m=int32#5),<in3=int32#4
4931
+ # asm 2: xorl 12(<m=%esi),<in3=%ebx
4932
+ xorl 12(%esi),%ebx
4933
+
4934
+ # qhasm: *(uint32 *) (out + 60) = in15
4935
+ # asm 1: movl <in15=int32#1,60(<out=int32#6)
4936
+ # asm 2: movl <in15=%eax,60(<out=%edi)
4937
+ movl %eax,60(%edi)
4938
+
4939
+ # qhasm: *(uint32 *) (out + 44) = in11
4940
+ # asm 1: movl <in11=int32#2,44(<out=int32#6)
4941
+ # asm 2: movl <in11=%ecx,44(<out=%edi)
4942
+ movl %ecx,44(%edi)
4943
+
4944
+ # qhasm: *(uint32 *) (out + 28) = in7
4945
+ # asm 1: movl <in7=int32#3,28(<out=int32#6)
4946
+ # asm 2: movl <in7=%edx,28(<out=%edi)
4947
+ movl %edx,28(%edi)
4948
+
4949
+ # qhasm: *(uint32 *) (out + 12) = in3
4950
+ # asm 1: movl <in3=int32#4,12(<out=int32#6)
4951
+ # asm 2: movl <in3=%ebx,12(<out=%edi)
4952
+ movl %ebx,12(%edi)
4953
+
4954
+ # qhasm: bytes = bytes_stack
4955
+ # asm 1: movl <bytes_stack=stack32#7,>bytes=int32#1
4956
+ # asm 2: movl <bytes_stack=24(%esp),>bytes=%eax
4957
+ movl 24(%esp),%eax
4958
+
4959
+ # qhasm: in8 = ((uint32 *)&x2)[0]
4960
+ # asm 1: movl <x2=stack128#4,>in8=int32#2
4961
+ # asm 2: movl <x2=80(%esp),>in8=%ecx
4962
+ movl 80(%esp),%ecx
4963
+
4964
+ # qhasm: in9 = ((uint32 *)&x3)[1]
4965
+ # asm 1: movl 4+<x3=stack128#1,>in9=int32#3
4966
+ # asm 2: movl 4+<x3=32(%esp),>in9=%edx
4967
+ movl 4+32(%esp),%edx
4968
+
4969
+ # qhasm: carry? in8 += 1
4970
+ # asm 1: add $1,<in8=int32#2
4971
+ # asm 2: add $1,<in8=%ecx
4972
+ add $1,%ecx
4973
+
4974
+ # qhasm: in9 += 0 + carry
4975
+ # asm 1: adc $0,<in9=int32#3
4976
+ # asm 2: adc $0,<in9=%edx
4977
+ adc $0,%edx
4978
+
4979
+ # qhasm: ((uint32 *)&x2)[0] = in8
4980
+ # asm 1: movl <in8=int32#2,>x2=stack128#4
4981
+ # asm 2: movl <in8=%ecx,>x2=80(%esp)
4982
+ movl %ecx,80(%esp)
4983
+
4984
+ # qhasm: ((uint32 *)&x3)[1] = in9
4985
+ # asm 1: movl <in9=int32#3,4+<x3=stack128#1
4986
+ # asm 2: movl <in9=%edx,4+<x3=32(%esp)
4987
+ movl %edx,4+32(%esp)
4988
+
4989
+ # qhasm: unsigned>? unsigned<? bytes - 64
4990
+ # asm 1: cmp $64,<bytes=int32#1
4991
+ # asm 2: cmp $64,<bytes=%eax
4992
+ cmp $64,%eax
4993
+ # comment:fp stack unchanged by jump
4994
+
4995
+ # qhasm: goto bytesatleast65 if unsigned>
4996
+ ja ._bytesatleast65
4997
+ # comment:fp stack unchanged by jump
4998
+
4999
+ # qhasm: goto bytesatleast64 if !unsigned<
5000
+ jae ._bytesatleast64
5001
+
5002
+ # qhasm: m = out
5003
+ # asm 1: mov <out=int32#6,>m=int32#5
5004
+ # asm 2: mov <out=%edi,>m=%esi
5005
+ mov %edi,%esi
5006
+
5007
+ # qhasm: out = ctarget
5008
+ # asm 1: movl <ctarget=stack32#6,>out=int32#6
5009
+ # asm 2: movl <ctarget=20(%esp),>out=%edi
5010
+ movl 20(%esp),%edi
5011
+
5012
+ # qhasm: i = bytes
5013
+ # asm 1: mov <bytes=int32#1,>i=int32#2
5014
+ # asm 2: mov <bytes=%eax,>i=%ecx
5015
+ mov %eax,%ecx
5016
+
5017
+ # qhasm: while (i) { *out++ = *m++; --i }
5018
+ rep movsb
5019
+ # comment:fp stack unchanged by fallthrough
5020
+
5021
+ # qhasm: bytesatleast64:
5022
+ ._bytesatleast64:
5023
+ # comment:fp stack unchanged by fallthrough
5024
+
5025
+ # qhasm: done:
5026
+ ._done:
5027
+
5028
+ # qhasm: eax = eax_stack
5029
+ # asm 1: movl <eax_stack=stack32#1,>eax=int32#1
5030
+ # asm 2: movl <eax_stack=0(%esp),>eax=%eax
5031
+ movl 0(%esp),%eax
5032
+
5033
+ # qhasm: ebx = ebx_stack
5034
+ # asm 1: movl <ebx_stack=stack32#2,>ebx=int32#4
5035
+ # asm 2: movl <ebx_stack=4(%esp),>ebx=%ebx
5036
+ movl 4(%esp),%ebx
5037
+
5038
+ # qhasm: esi = esi_stack
5039
+ # asm 1: movl <esi_stack=stack32#3,>esi=int32#5
5040
+ # asm 2: movl <esi_stack=8(%esp),>esi=%esi
5041
+ movl 8(%esp),%esi
5042
+
5043
+ # qhasm: edi = edi_stack
5044
+ # asm 1: movl <edi_stack=stack32#4,>edi=int32#6
5045
+ # asm 2: movl <edi_stack=12(%esp),>edi=%edi
5046
+ movl 12(%esp),%edi
5047
+
5048
+ # qhasm: ebp = ebp_stack
5049
+ # asm 1: movl <ebp_stack=stack32#5,>ebp=int32#7
5050
+ # asm 2: movl <ebp_stack=16(%esp),>ebp=%ebp
5051
+ movl 16(%esp),%ebp
5052
+
5053
+ # qhasm: leave
5054
+ add %eax,%esp
5055
+ xor %eax,%eax
5056
+ ret
5057
+
5058
+ # qhasm: bytesatleast65:
5059
+ ._bytesatleast65:
5060
+
5061
+ # qhasm: bytes -= 64
5062
+ # asm 1: sub $64,<bytes=int32#1
5063
+ # asm 2: sub $64,<bytes=%eax
5064
+ sub $64,%eax
5065
+
5066
+ # qhasm: out += 64
5067
+ # asm 1: add $64,<out=int32#6
5068
+ # asm 2: add $64,<out=%edi
5069
+ add $64,%edi
5070
+
5071
+ # qhasm: m += 64
5072
+ # asm 1: add $64,<m=int32#5
5073
+ # asm 2: add $64,<m=%esi
5074
+ add $64,%esi
5075
+ # comment:fp stack unchanged by jump
5076
+
5077
+ # qhasm: goto bytesbetween1and255
5078
+ jmp ._bytesbetween1and255