crabstone 3.0.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (302) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGES.md +61 -0
  3. data/LICENSE +25 -0
  4. data/MANIFEST +312 -0
  5. data/README.md +103 -0
  6. data/Rakefile +27 -0
  7. data/bin/genconst +66 -0
  8. data/bin/genreg +99 -0
  9. data/crabstone.gemspec +27 -0
  10. data/examples/hello_world.rb +43 -0
  11. data/lib/arch/arm.rb +128 -0
  12. data/lib/arch/arm64.rb +167 -0
  13. data/lib/arch/arm64_const.rb +1055 -0
  14. data/lib/arch/arm64_registers.rb +295 -0
  15. data/lib/arch/arm_const.rb +777 -0
  16. data/lib/arch/arm_registers.rb +149 -0
  17. data/lib/arch/mips.rb +78 -0
  18. data/lib/arch/mips_const.rb +850 -0
  19. data/lib/arch/mips_registers.rb +208 -0
  20. data/lib/arch/ppc.rb +90 -0
  21. data/lib/arch/ppc_const.rb +1181 -0
  22. data/lib/arch/ppc_registers.rb +209 -0
  23. data/lib/arch/sparc.rb +79 -0
  24. data/lib/arch/sparc_const.rb +461 -0
  25. data/lib/arch/sparc_registers.rb +121 -0
  26. data/lib/arch/systemz.rb +79 -0
  27. data/lib/arch/sysz_const.rb +779 -0
  28. data/lib/arch/sysz_registers.rb +66 -0
  29. data/lib/arch/x86.rb +107 -0
  30. data/lib/arch/x86_const.rb +1698 -0
  31. data/lib/arch/x86_registers.rb +265 -0
  32. data/lib/arch/xcore.rb +78 -0
  33. data/lib/arch/xcore_const.rb +185 -0
  34. data/lib/arch/xcore_registers.rb +57 -0
  35. data/lib/crabstone.rb +564 -0
  36. data/test/MC/AArch64/basic-a64-instructions.s.cs +2014 -0
  37. data/test/MC/AArch64/gicv3-regs.s.cs +111 -0
  38. data/test/MC/AArch64/neon-2velem.s.cs +113 -0
  39. data/test/MC/AArch64/neon-3vdiff.s.cs +143 -0
  40. data/test/MC/AArch64/neon-aba-abd.s.cs +28 -0
  41. data/test/MC/AArch64/neon-across.s.cs +40 -0
  42. data/test/MC/AArch64/neon-add-pairwise.s.cs +11 -0
  43. data/test/MC/AArch64/neon-add-sub-instructions.s.cs +21 -0
  44. data/test/MC/AArch64/neon-bitwise-instructions.s.cs +17 -0
  45. data/test/MC/AArch64/neon-compare-instructions.s.cs +136 -0
  46. data/test/MC/AArch64/neon-crypto.s.cs +15 -0
  47. data/test/MC/AArch64/neon-extract.s.cs +3 -0
  48. data/test/MC/AArch64/neon-facge-facgt.s.cs +13 -0
  49. data/test/MC/AArch64/neon-frsqrt-frecp.s.cs +7 -0
  50. data/test/MC/AArch64/neon-halving-add-sub.s.cs +25 -0
  51. data/test/MC/AArch64/neon-max-min-pairwise.s.cs +37 -0
  52. data/test/MC/AArch64/neon-max-min.s.cs +37 -0
  53. data/test/MC/AArch64/neon-mla-mls-instructions.s.cs +19 -0
  54. data/test/MC/AArch64/neon-mov.s.cs +74 -0
  55. data/test/MC/AArch64/neon-mul-div-instructions.s.cs +24 -0
  56. data/test/MC/AArch64/neon-perm.s.cs +43 -0
  57. data/test/MC/AArch64/neon-rounding-halving-add.s.cs +13 -0
  58. data/test/MC/AArch64/neon-rounding-shift.s.cs +15 -0
  59. data/test/MC/AArch64/neon-saturating-add-sub.s.cs +29 -0
  60. data/test/MC/AArch64/neon-saturating-rounding-shift.s.cs +15 -0
  61. data/test/MC/AArch64/neon-saturating-shift.s.cs +15 -0
  62. data/test/MC/AArch64/neon-scalar-abs.s.cs +8 -0
  63. data/test/MC/AArch64/neon-scalar-add-sub.s.cs +3 -0
  64. data/test/MC/AArch64/neon-scalar-by-elem-mla.s.cs +13 -0
  65. data/test/MC/AArch64/neon-scalar-by-elem-mul.s.cs +13 -0
  66. data/test/MC/AArch64/neon-scalar-by-elem-saturating-mla.s.cs +15 -0
  67. data/test/MC/AArch64/neon-scalar-by-elem-saturating-mul.s.cs +18 -0
  68. data/test/MC/AArch64/neon-scalar-compare.s.cs +12 -0
  69. data/test/MC/AArch64/neon-scalar-cvt.s.cs +34 -0
  70. data/test/MC/AArch64/neon-scalar-dup.s.cs +23 -0
  71. data/test/MC/AArch64/neon-scalar-extract-narrow.s.cs +10 -0
  72. data/test/MC/AArch64/neon-scalar-fp-compare.s.cs +21 -0
  73. data/test/MC/AArch64/neon-scalar-mul.s.cs +13 -0
  74. data/test/MC/AArch64/neon-scalar-neg.s.cs +6 -0
  75. data/test/MC/AArch64/neon-scalar-recip.s.cs +11 -0
  76. data/test/MC/AArch64/neon-scalar-reduce-pairwise.s.cs +3 -0
  77. data/test/MC/AArch64/neon-scalar-rounding-shift.s.cs +3 -0
  78. data/test/MC/AArch64/neon-scalar-saturating-add-sub.s.cs +25 -0
  79. data/test/MC/AArch64/neon-scalar-saturating-rounding-shift.s.cs +9 -0
  80. data/test/MC/AArch64/neon-scalar-saturating-shift.s.cs +9 -0
  81. data/test/MC/AArch64/neon-scalar-shift-imm.s.cs +42 -0
  82. data/test/MC/AArch64/neon-scalar-shift.s.cs +3 -0
  83. data/test/MC/AArch64/neon-shift-left-long.s.cs +13 -0
  84. data/test/MC/AArch64/neon-shift.s.cs +22 -0
  85. data/test/MC/AArch64/neon-simd-copy.s.cs +42 -0
  86. data/test/MC/AArch64/neon-simd-ldst-multi-elem.s.cs +197 -0
  87. data/test/MC/AArch64/neon-simd-ldst-one-elem.s.cs +129 -0
  88. data/test/MC/AArch64/neon-simd-misc.s.cs +213 -0
  89. data/test/MC/AArch64/neon-simd-post-ldst-multi-elem.s.cs +107 -0
  90. data/test/MC/AArch64/neon-simd-shift.s.cs +151 -0
  91. data/test/MC/AArch64/neon-tbl.s.cs +21 -0
  92. data/test/MC/AArch64/trace-regs.s.cs +383 -0
  93. data/test/MC/ARM/arm-aliases.s.cs +7 -0
  94. data/test/MC/ARM/arm-arithmetic-aliases.s.cs +50 -0
  95. data/test/MC/ARM/arm-it-block.s.cs +2 -0
  96. data/test/MC/ARM/arm-memory-instructions.s.cs +138 -0
  97. data/test/MC/ARM/arm-shift-encoding.s.cs +50 -0
  98. data/test/MC/ARM/arm-thumb-trustzone.s.cs +3 -0
  99. data/test/MC/ARM/arm-trustzone.s.cs +3 -0
  100. data/test/MC/ARM/arm_addrmode2.s.cs +15 -0
  101. data/test/MC/ARM/arm_addrmode3.s.cs +9 -0
  102. data/test/MC/ARM/arm_instructions.s.cs +25 -0
  103. data/test/MC/ARM/basic-arm-instructions-v8.s.cs +10 -0
  104. data/test/MC/ARM/basic-arm-instructions.s.cs +997 -0
  105. data/test/MC/ARM/basic-thumb-instructions.s.cs +130 -0
  106. data/test/MC/ARM/basic-thumb2-instructions-v8.s.cs +1 -0
  107. data/test/MC/ARM/basic-thumb2-instructions.s.cs +1242 -0
  108. data/test/MC/ARM/crc32-thumb.s.cs +7 -0
  109. data/test/MC/ARM/crc32.s.cs +7 -0
  110. data/test/MC/ARM/dot-req.s.cs +3 -0
  111. data/test/MC/ARM/fp-armv8.s.cs +52 -0
  112. data/test/MC/ARM/idiv-thumb.s.cs +3 -0
  113. data/test/MC/ARM/idiv.s.cs +3 -0
  114. data/test/MC/ARM/load-store-acquire-release-v8-thumb.s.cs +15 -0
  115. data/test/MC/ARM/load-store-acquire-release-v8.s.cs +15 -0
  116. data/test/MC/ARM/mode-switch.s.cs +7 -0
  117. data/test/MC/ARM/neon-abs-encoding.s.cs +15 -0
  118. data/test/MC/ARM/neon-absdiff-encoding.s.cs +39 -0
  119. data/test/MC/ARM/neon-add-encoding.s.cs +119 -0
  120. data/test/MC/ARM/neon-bitcount-encoding.s.cs +15 -0
  121. data/test/MC/ARM/neon-bitwise-encoding.s.cs +126 -0
  122. data/test/MC/ARM/neon-cmp-encoding.s.cs +88 -0
  123. data/test/MC/ARM/neon-convert-encoding.s.cs +27 -0
  124. data/test/MC/ARM/neon-crypto.s.cs +16 -0
  125. data/test/MC/ARM/neon-dup-encoding.s.cs +13 -0
  126. data/test/MC/ARM/neon-minmax-encoding.s.cs +57 -0
  127. data/test/MC/ARM/neon-mov-encoding.s.cs +76 -0
  128. data/test/MC/ARM/neon-mul-accum-encoding.s.cs +39 -0
  129. data/test/MC/ARM/neon-mul-encoding.s.cs +72 -0
  130. data/test/MC/ARM/neon-neg-encoding.s.cs +15 -0
  131. data/test/MC/ARM/neon-pairwise-encoding.s.cs +47 -0
  132. data/test/MC/ARM/neon-reciprocal-encoding.s.cs +13 -0
  133. data/test/MC/ARM/neon-reverse-encoding.s.cs +13 -0
  134. data/test/MC/ARM/neon-satshift-encoding.s.cs +75 -0
  135. data/test/MC/ARM/neon-shift-encoding.s.cs +238 -0
  136. data/test/MC/ARM/neon-shiftaccum-encoding.s.cs +97 -0
  137. data/test/MC/ARM/neon-shuffle-encoding.s.cs +59 -0
  138. data/test/MC/ARM/neon-sub-encoding.s.cs +82 -0
  139. data/test/MC/ARM/neon-table-encoding.s.cs +9 -0
  140. data/test/MC/ARM/neon-v8.s.cs +38 -0
  141. data/test/MC/ARM/neon-vld-encoding.s.cs +213 -0
  142. data/test/MC/ARM/neon-vst-encoding.s.cs +120 -0
  143. data/test/MC/ARM/neon-vswp.s.cs +3 -0
  144. data/test/MC/ARM/neont2-abs-encoding.s.cs +15 -0
  145. data/test/MC/ARM/neont2-absdiff-encoding.s.cs +39 -0
  146. data/test/MC/ARM/neont2-add-encoding.s.cs +65 -0
  147. data/test/MC/ARM/neont2-bitcount-encoding.s.cs +15 -0
  148. data/test/MC/ARM/neont2-bitwise-encoding.s.cs +15 -0
  149. data/test/MC/ARM/neont2-cmp-encoding.s.cs +17 -0
  150. data/test/MC/ARM/neont2-convert-encoding.s.cs +19 -0
  151. data/test/MC/ARM/neont2-dup-encoding.s.cs +19 -0
  152. data/test/MC/ARM/neont2-minmax-encoding.s.cs +57 -0
  153. data/test/MC/ARM/neont2-mov-encoding.s.cs +58 -0
  154. data/test/MC/ARM/neont2-mul-accum-encoding.s.cs +41 -0
  155. data/test/MC/ARM/neont2-mul-encoding.s.cs +31 -0
  156. data/test/MC/ARM/neont2-neg-encoding.s.cs +15 -0
  157. data/test/MC/ARM/neont2-pairwise-encoding.s.cs +43 -0
  158. data/test/MC/ARM/neont2-reciprocal-encoding.s.cs +13 -0
  159. data/test/MC/ARM/neont2-reverse-encoding.s.cs +13 -0
  160. data/test/MC/ARM/neont2-satshift-encoding.s.cs +75 -0
  161. data/test/MC/ARM/neont2-shift-encoding.s.cs +80 -0
  162. data/test/MC/ARM/neont2-shiftaccum-encoding.s.cs +97 -0
  163. data/test/MC/ARM/neont2-shuffle-encoding.s.cs +23 -0
  164. data/test/MC/ARM/neont2-sub-encoding.s.cs +23 -0
  165. data/test/MC/ARM/neont2-table-encoding.s.cs +9 -0
  166. data/test/MC/ARM/neont2-vld-encoding.s.cs +51 -0
  167. data/test/MC/ARM/neont2-vst-encoding.s.cs +48 -0
  168. data/test/MC/ARM/simple-fp-encoding.s.cs +157 -0
  169. data/test/MC/ARM/thumb-fp-armv8.s.cs +51 -0
  170. data/test/MC/ARM/thumb-hints.s.cs +12 -0
  171. data/test/MC/ARM/thumb-neon-crypto.s.cs +16 -0
  172. data/test/MC/ARM/thumb-neon-v8.s.cs +38 -0
  173. data/test/MC/ARM/thumb-shift-encoding.s.cs +19 -0
  174. data/test/MC/ARM/thumb.s.cs +19 -0
  175. data/test/MC/ARM/thumb2-b.w-encodingT4.s.cs +2 -0
  176. data/test/MC/ARM/thumb2-branches.s.cs +85 -0
  177. data/test/MC/ARM/thumb2-mclass.s.cs +41 -0
  178. data/test/MC/ARM/thumb2-narrow-dp.ll.cs +379 -0
  179. data/test/MC/ARM/thumb2-pldw.s.cs +2 -0
  180. data/test/MC/ARM/vfp4-thumb.s.cs +13 -0
  181. data/test/MC/ARM/vfp4.s.cs +13 -0
  182. data/test/MC/ARM/vpush-vpop-thumb.s.cs +9 -0
  183. data/test/MC/ARM/vpush-vpop.s.cs +9 -0
  184. data/test/MC/Mips/hilo-addressing.s.cs +4 -0
  185. data/test/MC/Mips/micromips-alu-instructions-EB.s.cs +33 -0
  186. data/test/MC/Mips/micromips-alu-instructions.s.cs +33 -0
  187. data/test/MC/Mips/micromips-branch-instructions-EB.s.cs +11 -0
  188. data/test/MC/Mips/micromips-branch-instructions.s.cs +11 -0
  189. data/test/MC/Mips/micromips-expansions.s.cs +20 -0
  190. data/test/MC/Mips/micromips-jump-instructions-EB.s.cs +5 -0
  191. data/test/MC/Mips/micromips-jump-instructions.s.cs +6 -0
  192. data/test/MC/Mips/micromips-loadstore-instructions-EB.s.cs +9 -0
  193. data/test/MC/Mips/micromips-loadstore-instructions.s.cs +9 -0
  194. data/test/MC/Mips/micromips-loadstore-unaligned-EB.s.cs +5 -0
  195. data/test/MC/Mips/micromips-loadstore-unaligned.s.cs +5 -0
  196. data/test/MC/Mips/micromips-movcond-instructions-EB.s.cs +5 -0
  197. data/test/MC/Mips/micromips-movcond-instructions.s.cs +5 -0
  198. data/test/MC/Mips/micromips-multiply-instructions-EB.s.cs +5 -0
  199. data/test/MC/Mips/micromips-multiply-instructions.s.cs +5 -0
  200. data/test/MC/Mips/micromips-shift-instructions-EB.s.cs +9 -0
  201. data/test/MC/Mips/micromips-shift-instructions.s.cs +9 -0
  202. data/test/MC/Mips/micromips-trap-instructions-EB.s.cs +13 -0
  203. data/test/MC/Mips/micromips-trap-instructions.s.cs +13 -0
  204. data/test/MC/Mips/mips-alu-instructions.s.cs +53 -0
  205. data/test/MC/Mips/mips-control-instructions-64.s.cs +33 -0
  206. data/test/MC/Mips/mips-control-instructions.s.cs +33 -0
  207. data/test/MC/Mips/mips-coprocessor-encodings.s.cs +17 -0
  208. data/test/MC/Mips/mips-dsp-instructions.s.cs +43 -0
  209. data/test/MC/Mips/mips-expansions.s.cs +20 -0
  210. data/test/MC/Mips/mips-fpu-instructions.s.cs +93 -0
  211. data/test/MC/Mips/mips-jump-instructions.s.cs +1 -0
  212. data/test/MC/Mips/mips-memory-instructions.s.cs +17 -0
  213. data/test/MC/Mips/mips-register-names.s.cs +33 -0
  214. data/test/MC/Mips/mips64-alu-instructions.s.cs +47 -0
  215. data/test/MC/Mips/mips64-instructions.s.cs +3 -0
  216. data/test/MC/Mips/mips64-register-names.s.cs +33 -0
  217. data/test/MC/Mips/mips_directives.s.cs +12 -0
  218. data/test/MC/Mips/nabi-regs.s.cs +12 -0
  219. data/test/MC/Mips/set-at-directive.s.cs +6 -0
  220. data/test/MC/Mips/test_2r.s.cs +16 -0
  221. data/test/MC/Mips/test_2rf.s.cs +33 -0
  222. data/test/MC/Mips/test_3r.s.cs +243 -0
  223. data/test/MC/Mips/test_3rf.s.cs +83 -0
  224. data/test/MC/Mips/test_bit.s.cs +49 -0
  225. data/test/MC/Mips/test_cbranch.s.cs +11 -0
  226. data/test/MC/Mips/test_ctrlregs.s.cs +33 -0
  227. data/test/MC/Mips/test_elm.s.cs +16 -0
  228. data/test/MC/Mips/test_elm_insert.s.cs +4 -0
  229. data/test/MC/Mips/test_elm_insve.s.cs +5 -0
  230. data/test/MC/Mips/test_i10.s.cs +5 -0
  231. data/test/MC/Mips/test_i5.s.cs +45 -0
  232. data/test/MC/Mips/test_i8.s.cs +11 -0
  233. data/test/MC/Mips/test_lsa.s.cs +5 -0
  234. data/test/MC/Mips/test_mi10.s.cs +24 -0
  235. data/test/MC/Mips/test_vec.s.cs +8 -0
  236. data/test/MC/PowerPC/ppc64-encoding-bookII.s.cs +25 -0
  237. data/test/MC/PowerPC/ppc64-encoding-bookIII.s.cs +35 -0
  238. data/test/MC/PowerPC/ppc64-encoding-ext.s.cs +535 -0
  239. data/test/MC/PowerPC/ppc64-encoding-fp.s.cs +110 -0
  240. data/test/MC/PowerPC/ppc64-encoding-vmx.s.cs +170 -0
  241. data/test/MC/PowerPC/ppc64-encoding.s.cs +202 -0
  242. data/test/MC/PowerPC/ppc64-operands.s.cs +32 -0
  243. data/test/MC/README +6 -0
  244. data/test/MC/Sparc/sparc-alu-instructions.s.cs +47 -0
  245. data/test/MC/Sparc/sparc-atomic-instructions.s.cs +7 -0
  246. data/test/MC/Sparc/sparc-ctrl-instructions.s.cs +11 -0
  247. data/test/MC/Sparc/sparc-fp-instructions.s.cs +59 -0
  248. data/test/MC/Sparc/sparc-mem-instructions.s.cs +25 -0
  249. data/test/MC/Sparc/sparc-vis.s.cs +2 -0
  250. data/test/MC/Sparc/sparc64-alu-instructions.s.cs +13 -0
  251. data/test/MC/Sparc/sparc64-ctrl-instructions.s.cs +102 -0
  252. data/test/MC/Sparc/sparcv8-instructions.s.cs +7 -0
  253. data/test/MC/Sparc/sparcv9-instructions.s.cs +1 -0
  254. data/test/MC/SystemZ/insn-good-z196.s.cs +589 -0
  255. data/test/MC/SystemZ/insn-good.s.cs +2265 -0
  256. data/test/MC/SystemZ/regs-good.s.cs +45 -0
  257. data/test/MC/X86/3DNow.s.cs +29 -0
  258. data/test/MC/X86/address-size.s.cs +5 -0
  259. data/test/MC/X86/avx512-encodings.s.cs +12 -0
  260. data/test/MC/X86/intel-syntax-encoding.s.cs +30 -0
  261. data/test/MC/X86/x86-32-avx.s.cs +833 -0
  262. data/test/MC/X86/x86-32-fma3.s.cs +169 -0
  263. data/test/MC/X86/x86-32-ms-inline-asm.s.cs +27 -0
  264. data/test/MC/X86/x86_64-avx-clmul-encoding.s.cs +11 -0
  265. data/test/MC/X86/x86_64-avx-encoding.s.cs +1058 -0
  266. data/test/MC/X86/x86_64-bmi-encoding.s.cs +51 -0
  267. data/test/MC/X86/x86_64-encoding.s.cs +59 -0
  268. data/test/MC/X86/x86_64-fma3-encoding.s.cs +169 -0
  269. data/test/MC/X86/x86_64-fma4-encoding.s.cs +98 -0
  270. data/test/MC/X86/x86_64-hle-encoding.s.cs +3 -0
  271. data/test/MC/X86/x86_64-imm-widths.s.cs +27 -0
  272. data/test/MC/X86/x86_64-rand-encoding.s.cs +13 -0
  273. data/test/MC/X86/x86_64-rtm-encoding.s.cs +4 -0
  274. data/test/MC/X86/x86_64-sse4a.s.cs +1 -0
  275. data/test/MC/X86/x86_64-tbm-encoding.s.cs +40 -0
  276. data/test/MC/X86/x86_64-xop-encoding.s.cs +152 -0
  277. data/test/README +6 -0
  278. data/test/test.rb +205 -0
  279. data/test/test.rb.SPEC +235 -0
  280. data/test/test_arm.rb +202 -0
  281. data/test/test_arm.rb.SPEC +275 -0
  282. data/test/test_arm64.rb +150 -0
  283. data/test/test_arm64.rb.SPEC +116 -0
  284. data/test/test_detail.rb +228 -0
  285. data/test/test_detail.rb.SPEC +322 -0
  286. data/test/test_exhaustive.rb +80 -0
  287. data/test/test_mips.rb +118 -0
  288. data/test/test_mips.rb.SPEC +91 -0
  289. data/test/test_ppc.rb +137 -0
  290. data/test/test_ppc.rb.SPEC +84 -0
  291. data/test/test_sanity.rb +83 -0
  292. data/test/test_skipdata.rb +111 -0
  293. data/test/test_skipdata.rb.SPEC +58 -0
  294. data/test/test_sparc.rb +113 -0
  295. data/test/test_sparc.rb.SPEC +116 -0
  296. data/test/test_sysz.rb +111 -0
  297. data/test/test_sysz.rb.SPEC +61 -0
  298. data/test/test_x86.rb +189 -0
  299. data/test/test_x86.rb.SPEC +579 -0
  300. data/test/test_xcore.rb +100 -0
  301. data/test/test_xcore.rb.SPEC +75 -0
  302. metadata +393 -0
@@ -0,0 +1,13 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0xfb,0xff,0x20,0x04 = vrecpe.u32 d16, d16
3
+ 0xfb,0xff,0x60,0x04 = vrecpe.u32 q8, q8
4
+ 0xfb,0xff,0x20,0x05 = vrecpe.f32 d16, d16
5
+ 0xfb,0xff,0x60,0x05 = vrecpe.f32 q8, q8
6
+ 0x40,0xef,0xb1,0x0f = vrecps.f32 d16, d16, d17
7
+ 0x40,0xef,0xf2,0x0f = vrecps.f32 q8, q8, q9
8
+ 0xfb,0xff,0xa0,0x04 = vrsqrte.u32 d16, d16
9
+ 0xfb,0xff,0xe0,0x04 = vrsqrte.u32 q8, q8
10
+ 0xfb,0xff,0xa0,0x05 = vrsqrte.f32 d16, d16
11
+ 0xfb,0xff,0xe0,0x05 = vrsqrte.f32 q8, q8
12
+ 0x60,0xef,0xb1,0x0f = vrsqrts.f32 d16, d16, d17
13
+ 0x60,0xef,0xf2,0x0f = vrsqrts.f32 q8, q8, q9
@@ -0,0 +1,13 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0xf0,0xff,0x20,0x00 = vrev64.8 d16, d16
3
+ 0xf4,0xff,0x20,0x00 = vrev64.16 d16, d16
4
+ 0xf8,0xff,0x20,0x00 = vrev64.32 d16, d16
5
+ 0xf0,0xff,0x60,0x00 = vrev64.8 q8, q8
6
+ 0xf4,0xff,0x60,0x00 = vrev64.16 q8, q8
7
+ 0xf8,0xff,0x60,0x00 = vrev64.32 q8, q8
8
+ 0xf0,0xff,0xa0,0x00 = vrev32.8 d16, d16
9
+ 0xf4,0xff,0xa0,0x00 = vrev32.16 d16, d16
10
+ 0xf0,0xff,0xe0,0x00 = vrev32.8 q8, q8
11
+ 0xf4,0xff,0xe0,0x00 = vrev32.16 q8, q8
12
+ 0xf0,0xff,0x20,0x01 = vrev16.8 d16, d16
13
+ 0xf0,0xff,0x60,0x01 = vrev16.8 q8, q8
@@ -0,0 +1,75 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0x41,0xef,0xb0,0x04 = vqshl.s8 d16, d16, d17
3
+ 0x51,0xef,0xb0,0x04 = vqshl.s16 d16, d16, d17
4
+ 0x61,0xef,0xb0,0x04 = vqshl.s32 d16, d16, d17
5
+ 0x71,0xef,0xb0,0x04 = vqshl.s64 d16, d16, d17
6
+ 0x41,0xff,0xb0,0x04 = vqshl.u8 d16, d16, d17
7
+ 0x51,0xff,0xb0,0x04 = vqshl.u16 d16, d16, d17
8
+ 0x61,0xff,0xb0,0x04 = vqshl.u32 d16, d16, d17
9
+ 0x71,0xff,0xb0,0x04 = vqshl.u64 d16, d16, d17
10
+ 0x42,0xef,0xf0,0x04 = vqshl.s8 q8, q8, q9
11
+ 0x52,0xef,0xf0,0x04 = vqshl.s16 q8, q8, q9
12
+ 0x62,0xef,0xf0,0x04 = vqshl.s32 q8, q8, q9
13
+ 0x72,0xef,0xf0,0x04 = vqshl.s64 q8, q8, q9
14
+ 0x42,0xff,0xf0,0x04 = vqshl.u8 q8, q8, q9
15
+ 0x52,0xff,0xf0,0x04 = vqshl.u16 q8, q8, q9
16
+ 0x62,0xff,0xf0,0x04 = vqshl.u32 q8, q8, q9
17
+ 0x72,0xff,0xf0,0x04 = vqshl.u64 q8, q8, q9
18
+ 0xcf,0xef,0x30,0x07 = vqshl.s8 d16, d16, #7
19
+ 0xdf,0xef,0x30,0x07 = vqshl.s16 d16, d16, #15
20
+ 0xff,0xef,0x30,0x07 = vqshl.s32 d16, d16, #31
21
+ 0xff,0xef,0xb0,0x07 = vqshl.s64 d16, d16, #63
22
+ 0xcf,0xff,0x30,0x07 = vqshl.u8 d16, d16, #7
23
+ 0xdf,0xff,0x30,0x07 = vqshl.u16 d16, d16, #15
24
+ 0xff,0xff,0x30,0x07 = vqshl.u32 d16, d16, #31
25
+ 0xff,0xff,0xb0,0x07 = vqshl.u64 d16, d16, #63
26
+ 0xcf,0xff,0x30,0x06 = vqshlu.s8 d16, d16, #7
27
+ 0xdf,0xff,0x30,0x06 = vqshlu.s16 d16, d16, #15
28
+ 0xff,0xff,0x30,0x06 = vqshlu.s32 d16, d16, #31
29
+ 0xff,0xff,0xb0,0x06 = vqshlu.s64 d16, d16, #63
30
+ 0xcf,0xef,0x70,0x07 = vqshl.s8 q8, q8, #7
31
+ 0xdf,0xef,0x70,0x07 = vqshl.s16 q8, q8, #15
32
+ 0xff,0xef,0x70,0x07 = vqshl.s32 q8, q8, #31
33
+ 0xff,0xef,0xf0,0x07 = vqshl.s64 q8, q8, #63
34
+ 0xcf,0xff,0x70,0x07 = vqshl.u8 q8, q8, #7
35
+ 0xdf,0xff,0x70,0x07 = vqshl.u16 q8, q8, #15
36
+ 0xff,0xff,0x70,0x07 = vqshl.u32 q8, q8, #31
37
+ 0xff,0xff,0xf0,0x07 = vqshl.u64 q8, q8, #63
38
+ 0xcf,0xff,0x70,0x06 = vqshlu.s8 q8, q8, #7
39
+ 0xdf,0xff,0x70,0x06 = vqshlu.s16 q8, q8, #15
40
+ 0xff,0xff,0x70,0x06 = vqshlu.s32 q8, q8, #31
41
+ 0xff,0xff,0xf0,0x06 = vqshlu.s64 q8, q8, #63
42
+ 0x41,0xef,0xb0,0x05 = vqrshl.s8 d16, d16, d17
43
+ 0x51,0xef,0xb0,0x05 = vqrshl.s16 d16, d16, d17
44
+ 0x61,0xef,0xb0,0x05 = vqrshl.s32 d16, d16, d17
45
+ 0x71,0xef,0xb0,0x05 = vqrshl.s64 d16, d16, d17
46
+ 0x41,0xff,0xb0,0x05 = vqrshl.u8 d16, d16, d17
47
+ 0x51,0xff,0xb0,0x05 = vqrshl.u16 d16, d16, d17
48
+ 0x61,0xff,0xb0,0x05 = vqrshl.u32 d16, d16, d17
49
+ 0x71,0xff,0xb0,0x05 = vqrshl.u64 d16, d16, d17
50
+ 0x42,0xef,0xf0,0x05 = vqrshl.s8 q8, q8, q9
51
+ 0x52,0xef,0xf0,0x05 = vqrshl.s16 q8, q8, q9
52
+ 0x62,0xef,0xf0,0x05 = vqrshl.s32 q8, q8, q9
53
+ 0x72,0xef,0xf0,0x05 = vqrshl.s64 q8, q8, q9
54
+ 0x42,0xff,0xf0,0x05 = vqrshl.u8 q8, q8, q9
55
+ 0x52,0xff,0xf0,0x05 = vqrshl.u16 q8, q8, q9
56
+ 0x62,0xff,0xf0,0x05 = vqrshl.u32 q8, q8, q9
57
+ 0x72,0xff,0xf0,0x05 = vqrshl.u64 q8, q8, q9
58
+ 0xc8,0xef,0x30,0x09 = vqshrn.s16 d16, q8, #8
59
+ 0xd0,0xef,0x30,0x09 = vqshrn.s32 d16, q8, #16
60
+ 0xe0,0xef,0x30,0x09 = vqshrn.s64 d16, q8, #32
61
+ 0xc8,0xff,0x30,0x09 = vqshrn.u16 d16, q8, #8
62
+ 0xd0,0xff,0x30,0x09 = vqshrn.u32 d16, q8, #16
63
+ 0xe0,0xff,0x30,0x09 = vqshrn.u64 d16, q8, #32
64
+ 0xc8,0xff,0x30,0x08 = vqshrun.s16 d16, q8, #8
65
+ 0xd0,0xff,0x30,0x08 = vqshrun.s32 d16, q8, #16
66
+ 0xe0,0xff,0x30,0x08 = vqshrun.s64 d16, q8, #32
67
+ 0xc8,0xef,0x70,0x09 = vqrshrn.s16 d16, q8, #8
68
+ 0xd0,0xef,0x70,0x09 = vqrshrn.s32 d16, q8, #16
69
+ 0xe0,0xef,0x70,0x09 = vqrshrn.s64 d16, q8, #32
70
+ 0xc8,0xff,0x70,0x09 = vqrshrn.u16 d16, q8, #8
71
+ 0xd0,0xff,0x70,0x09 = vqrshrn.u32 d16, q8, #16
72
+ 0xe0,0xff,0x70,0x09 = vqrshrn.u64 d16, q8, #32
73
+ 0xc8,0xff,0x70,0x08 = vqrshrun.s16 d16, q8, #8
74
+ 0xd0,0xff,0x70,0x08 = vqrshrun.s32 d16, q8, #16
75
+ 0xe0,0xff,0x70,0x08 = vqrshrun.s64 d16, q8, #32
@@ -0,0 +1,80 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0x40,0xff,0xa1,0x04 = vshl.u8 d16, d17, d16
3
+ 0x50,0xff,0xa1,0x04 = vshl.u16 d16, d17, d16
4
+ 0x60,0xff,0xa1,0x04 = vshl.u32 d16, d17, d16
5
+ 0x70,0xff,0xa1,0x04 = vshl.u64 d16, d17, d16
6
+ 0xcf,0xef,0x30,0x05 = vshl.i8 d16, d16, #7
7
+ 0xdf,0xef,0x30,0x05 = vshl.i16 d16, d16, #15
8
+ 0xff,0xef,0x30,0x05 = vshl.i32 d16, d16, #31
9
+ 0xff,0xef,0xb0,0x05 = vshl.i64 d16, d16, #63
10
+ 0x40,0xff,0xe2,0x04 = vshl.u8 q8, q9, q8
11
+ 0x50,0xff,0xe2,0x04 = vshl.u16 q8, q9, q8
12
+ 0x60,0xff,0xe2,0x04 = vshl.u32 q8, q9, q8
13
+ 0x70,0xff,0xe2,0x04 = vshl.u64 q8, q9, q8
14
+ 0xcf,0xef,0x70,0x05 = vshl.i8 q8, q8, #7
15
+ 0xdf,0xef,0x70,0x05 = vshl.i16 q8, q8, #15
16
+ 0xff,0xef,0x70,0x05 = vshl.i32 q8, q8, #31
17
+ 0xff,0xef,0xf0,0x05 = vshl.i64 q8, q8, #63
18
+ 0xc8,0xff,0x30,0x00 = vshr.u8 d16, d16, #8
19
+ 0xd0,0xff,0x30,0x00 = vshr.u16 d16, d16, #16
20
+ 0xe0,0xff,0x30,0x00 = vshr.u32 d16, d16, #32
21
+ 0xc0,0xff,0xb0,0x00 = vshr.u64 d16, d16, #64
22
+ 0xc8,0xff,0x70,0x00 = vshr.u8 q8, q8, #8
23
+ 0xd0,0xff,0x70,0x00 = vshr.u16 q8, q8, #16
24
+ 0xe0,0xff,0x70,0x00 = vshr.u32 q8, q8, #32
25
+ 0xc0,0xff,0xf0,0x00 = vshr.u64 q8, q8, #64
26
+ 0xc8,0xef,0x30,0x00 = vshr.s8 d16, d16, #8
27
+ 0xd0,0xef,0x30,0x00 = vshr.s16 d16, d16, #16
28
+ 0xe0,0xef,0x30,0x00 = vshr.s32 d16, d16, #32
29
+ 0xc0,0xef,0xb0,0x00 = vshr.s64 d16, d16, #64
30
+ 0xc8,0xef,0x70,0x00 = vshr.s8 q8, q8, #8
31
+ 0xd0,0xef,0x70,0x00 = vshr.s16 q8, q8, #16
32
+ 0xe0,0xef,0x70,0x00 = vshr.s32 q8, q8, #32
33
+ 0xc0,0xef,0xf0,0x00 = vshr.s64 q8, q8, #64
34
+ 0xcf,0xef,0x30,0x0a = vshll.s8 q8, d16, #7
35
+ 0xdf,0xef,0x30,0x0a = vshll.s16 q8, d16, #15
36
+ 0xff,0xef,0x30,0x0a = vshll.s32 q8, d16, #31
37
+ 0xcf,0xff,0x30,0x0a = vshll.u8 q8, d16, #7
38
+ 0xdf,0xff,0x30,0x0a = vshll.u16 q8, d16, #15
39
+ 0xff,0xff,0x30,0x0a = vshll.u32 q8, d16, #31
40
+ 0xf2,0xff,0x20,0x03 = vshll.i8 q8, d16, #8
41
+ 0xf6,0xff,0x20,0x03 = vshll.i16 q8, d16, #16
42
+ 0xfa,0xff,0x20,0x03 = vshll.i32 q8, d16, #32
43
+ 0xc8,0xef,0x30,0x08 = vshrn.i16 d16, q8, #8
44
+ 0xd0,0xef,0x30,0x08 = vshrn.i32 d16, q8, #16
45
+ 0xe0,0xef,0x30,0x08 = vshrn.i64 d16, q8, #32
46
+ 0x40,0xef,0xa1,0x05 = vrshl.s8 d16, d17, d16
47
+ 0x50,0xef,0xa1,0x05 = vrshl.s16 d16, d17, d16
48
+ 0x60,0xef,0xa1,0x05 = vrshl.s32 d16, d17, d16
49
+ 0x70,0xef,0xa1,0x05 = vrshl.s64 d16, d17, d16
50
+ 0x40,0xff,0xa1,0x05 = vrshl.u8 d16, d17, d16
51
+ 0x50,0xff,0xa1,0x05 = vrshl.u16 d16, d17, d16
52
+ 0x60,0xff,0xa1,0x05 = vrshl.u32 d16, d17, d16
53
+ 0x70,0xff,0xa1,0x05 = vrshl.u64 d16, d17, d16
54
+ 0x40,0xef,0xe2,0x05 = vrshl.s8 q8, q9, q8
55
+ 0x50,0xef,0xe2,0x05 = vrshl.s16 q8, q9, q8
56
+ 0x60,0xef,0xe2,0x05 = vrshl.s32 q8, q9, q8
57
+ 0x70,0xef,0xe2,0x05 = vrshl.s64 q8, q9, q8
58
+ 0x40,0xff,0xe2,0x05 = vrshl.u8 q8, q9, q8
59
+ 0x50,0xff,0xe2,0x05 = vrshl.u16 q8, q9, q8
60
+ 0x60,0xff,0xe2,0x05 = vrshl.u32 q8, q9, q8
61
+ 0x70,0xff,0xe2,0x05 = vrshl.u64 q8, q9, q8
62
+ 0xc8,0xef,0x30,0x02 = vrshr.s8 d16, d16, #8
63
+ 0xd0,0xef,0x30,0x02 = vrshr.s16 d16, d16, #16
64
+ 0xe0,0xef,0x30,0x02 = vrshr.s32 d16, d16, #32
65
+ 0xc0,0xef,0xb0,0x02 = vrshr.s64 d16, d16, #64
66
+ 0xc8,0xff,0x30,0x02 = vrshr.u8 d16, d16, #8
67
+ 0xd0,0xff,0x30,0x02 = vrshr.u16 d16, d16, #16
68
+ 0xe0,0xff,0x30,0x02 = vrshr.u32 d16, d16, #32
69
+ 0xc0,0xff,0xb0,0x02 = vrshr.u64 d16, d16, #64
70
+ 0xc8,0xef,0x70,0x02 = vrshr.s8 q8, q8, #8
71
+ 0xd0,0xef,0x70,0x02 = vrshr.s16 q8, q8, #16
72
+ 0xe0,0xef,0x70,0x02 = vrshr.s32 q8, q8, #32
73
+ 0xc0,0xef,0xf0,0x02 = vrshr.s64 q8, q8, #64
74
+ 0xc8,0xff,0x70,0x02 = vrshr.u8 q8, q8, #8
75
+ 0xd0,0xff,0x70,0x02 = vrshr.u16 q8, q8, #16
76
+ 0xe0,0xff,0x70,0x02 = vrshr.u32 q8, q8, #32
77
+ 0xc0,0xff,0xf0,0x02 = vrshr.u64 q8, q8, #64
78
+ 0xc8,0xef,0x70,0x08 = vrshrn.i16 d16, q8, #8
79
+ 0xd0,0xef,0x70,0x08 = vrshrn.i32 d16, q8, #16
80
+ 0xe0,0xef,0x70,0x08 = vrshrn.i64 d16, q8, #32
@@ -0,0 +1,97 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0xc8,0xef,0x30,0x11 = vsra.s8 d17, d16, #8
3
+ 0x90,0xef,0x1e,0xf1 = vsra.s16 d15, d14, #16
4
+ 0xa0,0xef,0x1c,0xd1 = vsra.s32 d13, d12, #32
5
+ 0x80,0xef,0x9a,0xb1 = vsra.s64 d11, d10, #64
6
+ 0x88,0xef,0x54,0xe1 = vsra.s8 q7, q2, #8
7
+ 0x90,0xef,0x5c,0x61 = vsra.s16 q3, q6, #16
8
+ 0xe0,0xef,0x5a,0x21 = vsra.s32 q9, q5, #32
9
+ 0xc0,0xef,0xd8,0x01 = vsra.s64 q8, q4, #64
10
+ 0xc8,0xff,0x30,0x11 = vsra.u8 d17, d16, #8
11
+ 0x95,0xff,0x1e,0xb1 = vsra.u16 d11, d14, #11
12
+ 0xaa,0xff,0x1f,0xc1 = vsra.u32 d12, d15, #22
13
+ 0x8a,0xff,0xb0,0xd1 = vsra.u64 d13, d16, #54
14
+ 0x88,0xff,0x5e,0x21 = vsra.u8 q1, q7, #8
15
+ 0x9a,0xff,0x5e,0x41 = vsra.u16 q2, q7, #6
16
+ 0xab,0xff,0x5c,0x61 = vsra.u32 q3, q6, #21
17
+ 0xa7,0xff,0xda,0x81 = vsra.u64 q4, q5, #25
18
+ 0xc8,0xef,0x30,0x01 = vsra.s8 d16, d16, #8
19
+ 0x90,0xef,0x1e,0xe1 = vsra.s16 d14, d14, #16
20
+ 0xa0,0xef,0x1c,0xc1 = vsra.s32 d12, d12, #32
21
+ 0x80,0xef,0x9a,0xa1 = vsra.s64 d10, d10, #64
22
+ 0x88,0xef,0x54,0x41 = vsra.s8 q2, q2, #8
23
+ 0x90,0xef,0x5c,0xc1 = vsra.s16 q6, q6, #16
24
+ 0xa0,0xef,0x5a,0xa1 = vsra.s32 q5, q5, #32
25
+ 0x80,0xef,0xd8,0x81 = vsra.s64 q4, q4, #64
26
+ 0xc8,0xff,0x30,0x01 = vsra.u8 d16, d16, #8
27
+ 0x95,0xff,0x1e,0xe1 = vsra.u16 d14, d14, #11
28
+ 0xaa,0xff,0x1f,0xf1 = vsra.u32 d15, d15, #22
29
+ 0xca,0xff,0xb0,0x01 = vsra.u64 d16, d16, #54
30
+ 0x88,0xff,0x5e,0xe1 = vsra.u8 q7, q7, #8
31
+ 0x9a,0xff,0x5e,0xe1 = vsra.u16 q7, q7, #6
32
+ 0xab,0xff,0x5c,0xc1 = vsra.u32 q6, q6, #21
33
+ 0xa7,0xff,0xda,0xa1 = vsra.u64 q5, q5, #25
34
+ 0x88,0xef,0x3a,0x53 = vrsra.s8 d5, d26, #8
35
+ 0x90,0xef,0x39,0x63 = vrsra.s16 d6, d25, #16
36
+ 0xa0,0xef,0x38,0x73 = vrsra.s32 d7, d24, #32
37
+ 0x80,0xef,0xb7,0xe3 = vrsra.s64 d14, d23, #64
38
+ 0x88,0xff,0x36,0xf3 = vrsra.u8 d15, d22, #8
39
+ 0xd0,0xff,0x35,0x03 = vrsra.u16 d16, d21, #16
40
+ 0xe0,0xff,0x34,0x13 = vrsra.u32 d17, d20, #32
41
+ 0xc0,0xff,0xb3,0x23 = vrsra.u64 d18, d19, #64
42
+ 0x88,0xef,0x54,0x23 = vrsra.s8 q1, q2, #8
43
+ 0x90,0xef,0x56,0x43 = vrsra.s16 q2, q3, #16
44
+ 0xa0,0xef,0x58,0x63 = vrsra.s32 q3, q4, #32
45
+ 0x80,0xef,0xda,0x83 = vrsra.s64 q4, q5, #64
46
+ 0x88,0xff,0x5c,0xa3 = vrsra.u8 q5, q6, #8
47
+ 0x90,0xff,0x5e,0xc3 = vrsra.u16 q6, q7, #16
48
+ 0xa0,0xff,0x70,0xe3 = vrsra.u32 q7, q8, #32
49
+ 0xc0,0xff,0xf2,0x03 = vrsra.u64 q8, q9, #64
50
+ 0xc8,0xef,0x3a,0xa3 = vrsra.s8 d26, d26, #8
51
+ 0xd0,0xef,0x39,0x93 = vrsra.s16 d25, d25, #16
52
+ 0xe0,0xef,0x38,0x83 = vrsra.s32 d24, d24, #32
53
+ 0xc0,0xef,0xb7,0x73 = vrsra.s64 d23, d23, #64
54
+ 0xc8,0xff,0x36,0x63 = vrsra.u8 d22, d22, #8
55
+ 0xd0,0xff,0x35,0x53 = vrsra.u16 d21, d21, #16
56
+ 0xe0,0xff,0x34,0x43 = vrsra.u32 d20, d20, #32
57
+ 0xc0,0xff,0xb3,0x33 = vrsra.u64 d19, d19, #64
58
+ 0x88,0xef,0x54,0x43 = vrsra.s8 q2, q2, #8
59
+ 0x90,0xef,0x56,0x63 = vrsra.s16 q3, q3, #16
60
+ 0xa0,0xef,0x58,0x83 = vrsra.s32 q4, q4, #32
61
+ 0x80,0xef,0xda,0xa3 = vrsra.s64 q5, q5, #64
62
+ 0x88,0xff,0x5c,0xc3 = vrsra.u8 q6, q6, #8
63
+ 0x90,0xff,0x5e,0xe3 = vrsra.u16 q7, q7, #16
64
+ 0xe0,0xff,0x70,0x03 = vrsra.u32 q8, q8, #32
65
+ 0xc0,0xff,0xf2,0x23 = vrsra.u64 q9, q9, #64
66
+ 0x8f,0xff,0x1c,0xb5 = vsli.8 d11, d12, #7
67
+ 0x9f,0xff,0x1d,0xc5 = vsli.16 d12, d13, #15
68
+ 0xbf,0xff,0x1e,0xd5 = vsli.32 d13, d14, #31
69
+ 0xbf,0xff,0x9f,0xe5 = vsli.64 d14, d15, #63
70
+ 0x8f,0xff,0x70,0x25 = vsli.8 q1, q8, #7
71
+ 0x9f,0xff,0x5e,0x45 = vsli.16 q2, q7, #15
72
+ 0xbf,0xff,0x58,0x65 = vsli.32 q3, q4, #31
73
+ 0xbf,0xff,0xda,0x85 = vsli.64 q4, q5, #63
74
+ 0xc8,0xff,0x1b,0xc4 = vsri.8 d28, d11, #8
75
+ 0xd0,0xff,0x1c,0xa4 = vsri.16 d26, d12, #16
76
+ 0xe0,0xff,0x1d,0x84 = vsri.32 d24, d13, #32
77
+ 0xc0,0xff,0x9e,0x54 = vsri.64 d21, d14, #64
78
+ 0x88,0xff,0x70,0x24 = vsri.8 q1, q8, #8
79
+ 0x90,0xff,0x54,0xa4 = vsri.16 q5, q2, #16
80
+ 0xa0,0xff,0x58,0xe4 = vsri.32 q7, q4, #32
81
+ 0xc0,0xff,0xdc,0x24 = vsri.64 q9, q6, #64
82
+ 0x8f,0xff,0x1c,0xc5 = vsli.8 d12, d12, #7
83
+ 0x9f,0xff,0x1d,0xd5 = vsli.16 d13, d13, #15
84
+ 0xbf,0xff,0x1e,0xe5 = vsli.32 d14, d14, #31
85
+ 0xbf,0xff,0x9f,0xf5 = vsli.64 d15, d15, #63
86
+ 0xcf,0xff,0x70,0x05 = vsli.8 q8, q8, #7
87
+ 0x9f,0xff,0x5e,0xe5 = vsli.16 q7, q7, #15
88
+ 0xbf,0xff,0x58,0x85 = vsli.32 q4, q4, #31
89
+ 0xbf,0xff,0xda,0xa5 = vsli.64 q5, q5, #63
90
+ 0x88,0xff,0x1b,0xb4 = vsri.8 d11, d11, #8
91
+ 0x90,0xff,0x1c,0xc4 = vsri.16 d12, d12, #16
92
+ 0xa0,0xff,0x1d,0xd4 = vsri.32 d13, d13, #32
93
+ 0x80,0xff,0x9e,0xe4 = vsri.64 d14, d14, #64
94
+ 0xc8,0xff,0x70,0x04 = vsri.8 q8, q8, #8
95
+ 0x90,0xff,0x54,0x44 = vsri.16 q2, q2, #16
96
+ 0xa0,0xff,0x58,0x84 = vsri.32 q4, q4, #32
97
+ 0x80,0xff,0xdc,0xc4 = vsri.64 q6, q6, #64
@@ -0,0 +1,23 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0xf1,0xef,0xa0,0x03 = vext.8 d16, d17, d16, #3
3
+ 0xf1,0xef,0xa0,0x05 = vext.8 d16, d17, d16, #5
4
+ 0xf2,0xef,0xe0,0x03 = vext.8 q8, q9, q8, #3
5
+ 0xf2,0xef,0xe0,0x07 = vext.8 q8, q9, q8, #7
6
+ 0xf1,0xef,0xa0,0x06 = vext.16 d16, d17, d16, #3
7
+ 0xf2,0xef,0xe0,0x0c = vext.32 q8, q9, q8, #3
8
+ 0xf2,0xff,0xa0,0x10 = vtrn.8 d17, d16
9
+ 0xf6,0xff,0xa0,0x10 = vtrn.16 d17, d16
10
+ 0xfa,0xff,0xa0,0x10 = vtrn.32 d17, d16
11
+ 0xf2,0xff,0xe0,0x20 = vtrn.8 q9, q8
12
+ 0xf6,0xff,0xe0,0x20 = vtrn.16 q9, q8
13
+ 0xfa,0xff,0xe0,0x20 = vtrn.32 q9, q8
14
+ 0xf2,0xff,0x20,0x11 = vuzp.8 d17, d16
15
+ 0xf6,0xff,0x20,0x11 = vuzp.16 d17, d16
16
+ 0xf2,0xff,0x60,0x21 = vuzp.8 q9, q8
17
+ 0xf6,0xff,0x60,0x21 = vuzp.16 q9, q8
18
+ 0xfa,0xff,0x60,0x21 = vuzp.32 q9, q8
19
+ 0xf2,0xff,0xa0,0x11 = vzip.8 d17, d16
20
+ 0xf6,0xff,0xa0,0x11 = vzip.16 d17, d16
21
+ 0xf2,0xff,0xe0,0x21 = vzip.8 q9, q8
22
+ 0xf6,0xff,0xe0,0x21 = vzip.16 q9, q8
23
+ 0xfa,0xff,0xe0,0x21 = vzip.32 q9, q8
@@ -0,0 +1,23 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0xf1,0xef,0xa0,0x03 = vext.8 d16, d17, d16, #3
3
+ 0xf1,0xef,0xa0,0x05 = vext.8 d16, d17, d16, #5
4
+ 0xf2,0xef,0xe0,0x03 = vext.8 q8, q9, q8, #3
5
+ 0xf2,0xef,0xe0,0x07 = vext.8 q8, q9, q8, #7
6
+ 0xf1,0xef,0xa0,0x06 = vext.16 d16, d17, d16, #3
7
+ 0xf2,0xef,0xe0,0x0c = vext.32 q8, q9, q8, #3
8
+ 0xf2,0xff,0xa0,0x10 = vtrn.8 d17, d16
9
+ 0xf6,0xff,0xa0,0x10 = vtrn.16 d17, d16
10
+ 0xfa,0xff,0xa0,0x10 = vtrn.32 d17, d16
11
+ 0xf2,0xff,0xe0,0x20 = vtrn.8 q9, q8
12
+ 0xf6,0xff,0xe0,0x20 = vtrn.16 q9, q8
13
+ 0xfa,0xff,0xe0,0x20 = vtrn.32 q9, q8
14
+ 0xf2,0xff,0x20,0x11 = vuzp.8 d17, d16
15
+ 0xf6,0xff,0x20,0x11 = vuzp.16 d17, d16
16
+ 0xf2,0xff,0x60,0x21 = vuzp.8 q9, q8
17
+ 0xf6,0xff,0x60,0x21 = vuzp.16 q9, q8
18
+ 0xfa,0xff,0x60,0x21 = vuzp.32 q9, q8
19
+ 0xf2,0xff,0xa0,0x11 = vzip.8 d17, d16
20
+ 0xf6,0xff,0xa0,0x11 = vzip.16 d17, d16
21
+ 0xf2,0xff,0xe0,0x21 = vzip.8 q9, q8
22
+ 0xf6,0xff,0xe0,0x21 = vzip.16 q9, q8
23
+ 0xfa,0xff,0xe0,0x21 = vzip.32 q9, q8
@@ -0,0 +1,9 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0xf1,0xff,0xa0,0x08 = vtbl.8 d16, {d17}, d16
3
+ 0xf0,0xff,0xa2,0x09 = vtbl.8 d16, {d16, d17}, d18
4
+ 0xf0,0xff,0xa4,0x0a = vtbl.8 d16, {d16, d17, d18}, d20
5
+ 0xf0,0xff,0xa4,0x0b = vtbl.8 d16, {d16, d17, d18, d19}, d20
6
+ 0xf0,0xff,0xe1,0x28 = vtbx.8 d18, {d16}, d17
7
+ 0xf0,0xff,0xe2,0x39 = vtbx.8 d19, {d16, d17}, d18
8
+ 0xf0,0xff,0xe5,0x4a = vtbx.8 d20, {d16, d17, d18}, d21
9
+ 0xf0,0xff,0xe5,0x4b = vtbx.8 d20, {d16, d17, d18, d19}, d21
@@ -0,0 +1,51 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0x60,0xf9,0x1f,0x07 = vld1.8 {d16}, [r0:64]
3
+ 0x60,0xf9,0x4f,0x07 = vld1.16 {d16}, [r0]
4
+ 0x60,0xf9,0x8f,0x07 = vld1.32 {d16}, [r0]
5
+ 0x60,0xf9,0xcf,0x07 = vld1.64 {d16}, [r0]
6
+ 0x60,0xf9,0x1f,0x0a = vld1.8 {d16, d17}, [r0:64]
7
+ 0x60,0xf9,0x6f,0x0a = vld1.16 {d16, d17}, [r0:128]
8
+ 0x60,0xf9,0x8f,0x0a = vld1.32 {d16, d17}, [r0]
9
+ 0x60,0xf9,0xcf,0x0a = vld1.64 {d16, d17}, [r0]
10
+ 0x60,0xf9,0x1f,0x08 = vld2.8 {d16, d17}, [r0:64]
11
+ 0x60,0xf9,0x6f,0x08 = vld2.16 {d16, d17}, [r0:128]
12
+ 0x60,0xf9,0x8f,0x08 = vld2.32 {d16, d17}, [r0]
13
+ 0x60,0xf9,0x1f,0x03 = vld2.8 {d16, d17, d18, d19}, [r0:64]
14
+ 0x60,0xf9,0x6f,0x03 = vld2.16 {d16, d17, d18, d19}, [r0:128]
15
+ 0x60,0xf9,0xbf,0x03 = vld2.32 {d16, d17, d18, d19}, [r0:256]
16
+ 0x60,0xf9,0x1f,0x04 = vld3.8 {d16, d17, d18}, [r0:64]
17
+ 0x60,0xf9,0x4f,0x04 = vld3.16 {d16, d17, d18}, [r0]
18
+ 0x60,0xf9,0x8f,0x04 = vld3.32 {d16, d17, d18}, [r0]
19
+ 0x60,0xf9,0x1d,0x05 = vld3.8 {d16, d18, d20}, [r0:64]!
20
+ 0x60,0xf9,0x1d,0x15 = vld3.8 {d17, d19, d21}, [r0:64]!
21
+ 0x60,0xf9,0x4d,0x05 = vld3.16 {d16, d18, d20}, [r0]!
22
+ 0x60,0xf9,0x4d,0x15 = vld3.16 {d17, d19, d21}, [r0]!
23
+ 0x60,0xf9,0x8d,0x05 = vld3.32 {d16, d18, d20}, [r0]!
24
+ 0x60,0xf9,0x8d,0x15 = vld3.32 {d17, d19, d21}, [r0]!
25
+ 0x60,0xf9,0x1f,0x00 = vld4.8 {d16, d17, d18, d19}, [r0:64]
26
+ 0x60,0xf9,0x6f,0x00 = vld4.16 {d16, d17, d18, d19}, [r0:128]
27
+ 0x60,0xf9,0xbf,0x00 = vld4.32 {d16, d17, d18, d19}, [r0:256]
28
+ 0x60,0xf9,0x3d,0x01 = vld4.8 {d16, d18, d20, d22}, [r0:256]!
29
+ 0x60,0xf9,0x3d,0x11 = vld4.8 {d17, d19, d21, d23}, [r0:256]!
30
+ 0x60,0xf9,0x4d,0x01 = vld4.16 {d16, d18, d20, d22}, [r0]!
31
+ 0x60,0xf9,0x4d,0x11 = vld4.16 {d17, d19, d21, d23}, [r0]!
32
+ 0x60,0xf9,0x8d,0x01 = vld4.32 {d16, d18, d20, d22}, [r0]!
33
+ 0x60,0xf9,0x8d,0x11 = vld4.32 {d17, d19, d21, d23}, [r0]!
34
+ 0xe0,0xf9,0x6f,0x00 = vld1.8 {d16[3]}, [r0]
35
+ 0xe0,0xf9,0x9f,0x04 = vld1.16 {d16[2]}, [r0:16]
36
+ 0xe0,0xf9,0xbf,0x08 = vld1.32 {d16[1]}, [r0:32]
37
+ 0xe0,0xf9,0x3f,0x01 = vld2.8 {d16[1], d17[1]}, [r0:16]
38
+ 0xe0,0xf9,0x5f,0x05 = vld2.16 {d16[1], d17[1]}, [r0:32]
39
+ 0xe0,0xf9,0x8f,0x09 = vld2.32 {d16[1], d17[1]}, [r0]
40
+ 0xe0,0xf9,0x6f,0x15 = vld2.16 {d17[1], d19[1]}, [r0]
41
+ 0xe0,0xf9,0x5f,0x19 = vld2.32 {d17[0], d19[0]}, [r0:64]
42
+ 0xe0,0xf9,0x2f,0x02 = vld3.8 {d16[1], d17[1], d18[1]}, [r0]
43
+ 0xe0,0xf9,0x4f,0x06 = vld3.16 {d16[1], d17[1], d18[1]}, [r0]
44
+ 0xe0,0xf9,0x8f,0x0a = vld3.32 {d16[1], d17[1], d18[1]}, [r0]
45
+ 0xe0,0xf9,0x6f,0x06 = vld3.16 {d16[1], d18[1], d20[1]}, [r0]
46
+ 0xe0,0xf9,0xcf,0x1a = vld3.32 {d17[1], d19[1], d21[1]}, [r0]
47
+ 0xe0,0xf9,0x3f,0x03 = vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0:32]
48
+ 0xe0,0xf9,0x4f,0x07 = vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [r0]
49
+ 0xe0,0xf9,0xaf,0x0b = vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0:128]
50
+ 0xe0,0xf9,0x7f,0x07 = vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [r0:64]
51
+ 0xe0,0xf9,0x4f,0x1b = vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
@@ -0,0 +1,48 @@
1
+ # CS_ARCH_ARM, CS_MODE_THUMB, None
2
+ 0x40,0xf9,0x1f,0x07 = vst1.8 {d16}, [r0:64]
3
+ 0x40,0xf9,0x4f,0x07 = vst1.16 {d16}, [r0]
4
+ 0x40,0xf9,0x8f,0x07 = vst1.32 {d16}, [r0]
5
+ 0x40,0xf9,0xcf,0x07 = vst1.64 {d16}, [r0]
6
+ 0x40,0xf9,0x1f,0x0a = vst1.8 {d16, d17}, [r0:64]
7
+ 0x40,0xf9,0x6f,0x0a = vst1.16 {d16, d17}, [r0:128]
8
+ 0x40,0xf9,0x8f,0x0a = vst1.32 {d16, d17}, [r0]
9
+ 0x40,0xf9,0xcf,0x0a = vst1.64 {d16, d17}, [r0]
10
+ 0x40,0xf9,0x1f,0x08 = vst2.8 {d16, d17}, [r0:64]
11
+ 0x40,0xf9,0x6f,0x08 = vst2.16 {d16, d17}, [r0:128]
12
+ 0x40,0xf9,0x8f,0x08 = vst2.32 {d16, d17}, [r0]
13
+ 0x40,0xf9,0x1f,0x03 = vst2.8 {d16, d17, d18, d19}, [r0:64]
14
+ 0x40,0xf9,0x6f,0x03 = vst2.16 {d16, d17, d18, d19}, [r0:128]
15
+ 0x40,0xf9,0xbf,0x03 = vst2.32 {d16, d17, d18, d19}, [r0:256]
16
+ 0x40,0xf9,0x1f,0x04 = vst3.8 {d16, d17, d18}, [r0:64]
17
+ 0x40,0xf9,0x4f,0x04 = vst3.16 {d16, d17, d18}, [r0]
18
+ 0x40,0xf9,0x8f,0x04 = vst3.32 {d16, d17, d18}, [r0]
19
+ 0x40,0xf9,0x1d,0x05 = vst3.8 {d16, d18, d20}, [r0:64]!
20
+ 0x40,0xf9,0x1d,0x15 = vst3.8 {d17, d19, d21}, [r0:64]!
21
+ 0x40,0xf9,0x4d,0x05 = vst3.16 {d16, d18, d20}, [r0]!
22
+ 0x40,0xf9,0x4d,0x15 = vst3.16 {d17, d19, d21}, [r0]!
23
+ 0x40,0xf9,0x8d,0x05 = vst3.32 {d16, d18, d20}, [r0]!
24
+ 0x40,0xf9,0x8d,0x15 = vst3.32 {d17, d19, d21}, [r0]!
25
+ 0x40,0xf9,0x1f,0x00 = vst4.8 {d16, d17, d18, d19}, [r0:64]
26
+ 0x40,0xf9,0x6f,0x00 = vst4.16 {d16, d17, d18, d19}, [r0:128]
27
+ 0x40,0xf9,0x3d,0x01 = vst4.8 {d16, d18, d20, d22}, [r0:256]!
28
+ 0x40,0xf9,0x3d,0x11 = vst4.8 {d17, d19, d21, d23}, [r0:256]!
29
+ 0x40,0xf9,0x4d,0x01 = vst4.16 {d16, d18, d20, d22}, [r0]!
30
+ 0x40,0xf9,0x4d,0x11 = vst4.16 {d17, d19, d21, d23}, [r0]!
31
+ 0x40,0xf9,0x8d,0x01 = vst4.32 {d16, d18, d20, d22}, [r0]!
32
+ 0x40,0xf9,0x8d,0x11 = vst4.32 {d17, d19, d21, d23}, [r0]!
33
+ 0xc0,0xf9,0x3f,0x01 = vst2.8 {d16[1], d17[1]}, [r0:16]
34
+ 0xc0,0xf9,0x5f,0x05 = vst2.16 {d16[1], d17[1]}, [r0:32]
35
+ 0xc0,0xf9,0x8f,0x09 = vst2.32 {d16[1], d17[1]}, [r0]
36
+ 0xc0,0xf9,0x6f,0x15 = vst2.16 {d17[1], d19[1]}, [r0]
37
+ 0xc0,0xf9,0x5f,0x19 = vst2.32 {d17[0], d19[0]}, [r0:64]
38
+ 0xc0,0xf9,0x2f,0x02 = vst3.8 {d16[1], d17[1], d18[1]}, [r0]
39
+ 0xc0,0xf9,0x4f,0x06 = vst3.16 {d16[1], d17[1], d18[1]}, [r0]
40
+ 0xc0,0xf9,0x8f,0x0a = vst3.32 {d16[1], d17[1], d18[1]}, [r0]
41
+ 0xc0,0xf9,0xaf,0x16 = vst3.16 {d17[2], d19[2], d21[2]}, [r0]
42
+ 0xc0,0xf9,0x4f,0x0a = vst3.32 {d16[0], d18[0], d20[0]}, [r0]
43
+ 0xc0,0xf9,0x3f,0x03 = vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0:32]
44
+ 0xc0,0xf9,0x4f,0x07 = vst4.16 {d16[1], d17[1], d18[1], d19[1]}, [r0]
45
+ 0xc0,0xf9,0xaf,0x0b = vst4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0:128]
46
+ 0xc0,0xf9,0xff,0x17 = vst4.16 {d17[3], d19[3], d21[3], d23[3]}, [r0:64]
47
+ 0xc0,0xf9,0x4f,0x1b = vst4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
48
+ 0x04,0xf9,0x0f,0x89 = vst2.8 {d8, d10}, [r4]
@@ -0,0 +1,157 @@
1
+ # CS_ARCH_ARM, CS_MODE_ARM, None
2
+ 0xa0,0x0b,0x71,0xee = vadd.f64 d16, d17, d16
3
+ 0x80,0x0a,0x30,0xee = vadd.f32 s0, s1, s0
4
+ 0xe0,0x0b,0x71,0xee = vsub.f64 d16, d17, d16
5
+ 0xc0,0x0a,0x30,0xee = vsub.f32 s0, s1, s0
6
+ 0xa0,0x0b,0xc1,0xee = vdiv.f64 d16, d17, d16
7
+ 0x80,0x0a,0x80,0xee = vdiv.f32 s0, s1, s0
8
+ 0xa3,0x2a,0xc2,0xee = vdiv.f32 s5, s5, s7
9
+ 0x07,0x5b,0x85,0xee = vdiv.f64 d5, d5, d7
10
+ 0xa0,0x0b,0x61,0xee = vmul.f64 d16, d17, d16
11
+ 0xa1,0x4b,0x64,0xee = vmul.f64 d20, d20, d17
12
+ 0x80,0x0a,0x20,0xee = vmul.f32 s0, s1, s0
13
+ 0xaa,0x5a,0x65,0xee = vmul.f32 s11, s11, s21
14
+ 0xe0,0x0b,0x61,0xee = vnmul.f64 d16, d17, d16
15
+ 0xc0,0x0a,0x20,0xee = vnmul.f32 s0, s1, s0
16
+ 0xe0,0x1b,0xf4,0xee = vcmpe.f64 d17, d16
17
+ 0xc0,0x0a,0xf4,0xee = vcmpe.f32 s1, s0
18
+ 0xc0,0x0b,0xf5,0xee = vcmpe.f64 d16, #0
19
+ 0xc0,0x0a,0xb5,0xee = vcmpe.f32 s0, #0
20
+ 0xe0,0x0b,0xf0,0xee = vabs.f64 d16, d16
21
+ 0xc0,0x0a,0xb0,0xee = vabs.f32 s0, s0
22
+ 0xe0,0x0b,0xb7,0xee = vcvt.f32.f64 s0, d16
23
+ 0xc0,0x0a,0xf7,0xee = vcvt.f64.f32 d16, s0
24
+ 0x60,0x0b,0xf1,0xee = vneg.f64 d16, d16
25
+ 0x40,0x0a,0xb1,0xee = vneg.f32 s0, s0
26
+ 0xe0,0x0b,0xf1,0xee = vsqrt.f64 d16, d16
27
+ 0xc0,0x0a,0xb1,0xee = vsqrt.f32 s0, s0
28
+ 0xc0,0x0b,0xf8,0xee = vcvt.f64.s32 d16, s0
29
+ 0xc0,0x0a,0xb8,0xee = vcvt.f32.s32 s0, s0
30
+ 0x40,0x0b,0xf8,0xee = vcvt.f64.u32 d16, s0
31
+ 0x40,0x0a,0xb8,0xee = vcvt.f32.u32 s0, s0
32
+ 0xe0,0x0b,0xbd,0xee = vcvt.s32.f64 s0, d16
33
+ 0xc0,0x0a,0xbd,0xee = vcvt.s32.f32 s0, s0
34
+ 0xe0,0x0b,0xbc,0xee = vcvt.u32.f64 s0, d16
35
+ 0xc0,0x0a,0xbc,0xee = vcvt.u32.f32 s0, s0
36
+ 0xa1,0x0b,0x42,0xee = vmla.f64 d16, d18, d17
37
+ 0x00,0x0a,0x41,0xee = vmla.f32 s1, s2, s0
38
+ 0xe1,0x0b,0x42,0xee = vmls.f64 d16, d18, d17
39
+ 0x40,0x0a,0x41,0xee = vmls.f32 s1, s2, s0
40
+ 0xe1,0x0b,0x52,0xee = vnmla.f64 d16, d18, d17
41
+ 0x40,0x0a,0x51,0xee = vnmla.f32 s1, s2, s0
42
+ 0xa1,0x0b,0x52,0xee = vnmls.f64 d16, d18, d17
43
+ 0x00,0x0a,0x51,0xee = vnmls.f32 s1, s2, s0
44
+ 0x10,0xfa,0xf1,0xee = vmrs APSR_nzcv, fpscr
45
+ 0x10,0xfa,0xf1,0xee = vmrs APSR_nzcv, fpscr
46
+ 0x10,0xfa,0xf1,0xee = vmrs APSR_nzcv, fpscr
47
+ 0x10,0x2a,0xf0,0xee = vmrs r2, fpsid
48
+ 0x10,0x3a,0xf0,0xee = vmrs r3, fpsid
49
+ 0x10,0x4a,0xf7,0xee = vmrs r4, mvfr0
50
+ 0x10,0x5a,0xf6,0xee = vmrs r5, mvfr1
51
+ 0x60,0x0b,0xf1,0x1e = vnegne.f64 d16, d16
52
+ 0x10,0x0a,0x00,0x1e = vmovne s0, r0
53
+ 0x10,0x1a,0x00,0x0e = vmoveq s0, r1
54
+ 0x10,0x1a,0x11,0xee = vmov r1, s2
55
+ 0x10,0x3a,0x02,0xee = vmov s4, r3
56
+ 0x12,0x1b,0x55,0xec = vmov r1, r5, d2
57
+ 0x14,0x3b,0x49,0xec = vmov d4, r3, r9
58
+ 0x10,0x0a,0xf1,0xee = vmrs r0, fpscr
59
+ 0x10,0x0a,0xf8,0xee = vmrs r0, fpexc
60
+ 0x10,0x0a,0xf0,0xee = vmrs r0, fpsid
61
+ 0x10,0x1a,0xf9,0xee = vmrs r1, fpinst
62
+ 0x10,0x8a,0xfa,0xee = vmrs r8, fpinst2
63
+ 0x10,0x0a,0xe1,0xee = vmsr fpscr, r0
64
+ 0x10,0x0a,0xe8,0xee = vmsr fpexc, r0
65
+ 0x10,0x0a,0xe0,0xee = vmsr fpsid, r0
66
+ 0x10,0x3a,0xe9,0xee = vmsr fpinst, r3
67
+ 0x10,0x4a,0xea,0xee = vmsr fpinst2, r4
68
+ 0x08,0x0b,0xf0,0xee = vmov.f64 d16, #3.000000e+00
69
+ 0x08,0x0a,0xb0,0xee = vmov.f32 s0, #3.000000e+00
70
+ 0x08,0x0b,0xf8,0xee = vmov.f64 d16, #-3.000000e+00
71
+ 0x08,0x0a,0xb8,0xee = vmov.f32 s0, #-3.000000e+00
72
+ 0x10,0x0a,0x00,0xee = vmov s0, r0
73
+ 0x90,0x1a,0x00,0xee = vmov s1, r1
74
+ 0x10,0x2a,0x01,0xee = vmov s2, r2
75
+ 0x90,0x3a,0x01,0xee = vmov s3, r3
76
+ 0x10,0x0a,0x10,0xee = vmov r0, s0
77
+ 0x90,0x1a,0x10,0xee = vmov r1, s1
78
+ 0x10,0x2a,0x11,0xee = vmov r2, s2
79
+ 0x90,0x3a,0x11,0xee = vmov r3, s3
80
+ 0x30,0x0b,0x51,0xec = vmov r0, r1, d16
81
+ 0x31,0x1a,0x42,0xec = vmov s3, s4, r1, r2
82
+ 0x11,0x1a,0x42,0xec = vmov s2, s3, r1, r2
83
+ 0x31,0x1a,0x52,0xec = vmov r1, r2, s3, s4
84
+ 0x11,0x1a,0x52,0xec = vmov r1, r2, s2, s3
85
+ 0x1f,0x1b,0x42,0xec = vmov d15, r1, r2
86
+ 0x30,0x1b,0x42,0xec = vmov d16, r1, r2
87
+ 0x1f,0x1b,0x52,0xec = vmov r1, r2, d15
88
+ 0x30,0x1b,0x52,0xec = vmov r1, r2, d16
89
+ 0x00,0x1b,0xd0,0xed = vldr d17, [r0]
90
+ 0x00,0x0a,0x9e,0xed = vldr s0, [lr]
91
+ 0x00,0x0b,0x9e,0xed = vldr d0, [lr]
92
+ 0x08,0x1b,0x92,0xed = vldr d1, [r2, #32]
93
+ 0x08,0x1b,0x12,0xed = vldr d1, [r2, #-32]
94
+ 0x00,0x2b,0x93,0xed = vldr d2, [r3]
95
+ 0x00,0x3b,0x9f,0xed = vldr d3, [pc]
96
+ 0x00,0x3b,0x9f,0xed = vldr d3, [pc]
97
+ 0x00,0x3b,0x1f,0xed = vldr d3, [pc, #-0]
98
+ 0x00,0x6a,0xd0,0xed = vldr s13, [r0]
99
+ 0x08,0x0a,0xd2,0xed = vldr s1, [r2, #32]
100
+ 0x08,0x0a,0x52,0xed = vldr s1, [r2, #-32]
101
+ 0x00,0x1a,0x93,0xed = vldr s2, [r3]
102
+ 0x00,0x2a,0xdf,0xed = vldr s5, [pc]
103
+ 0x00,0x2a,0xdf,0xed = vldr s5, [pc]
104
+ 0x00,0x2a,0x5f,0xed = vldr s5, [pc, #-0]
105
+ 0x00,0x4b,0x81,0xed = vstr d4, [r1]
106
+ 0x06,0x4b,0x81,0xed = vstr d4, [r1, #24]
107
+ 0x06,0x4b,0x01,0xed = vstr d4, [r1, #-24]
108
+ 0x00,0x0a,0x8e,0xed = vstr s0, [lr]
109
+ 0x00,0x0b,0x8e,0xed = vstr d0, [lr]
110
+ 0x00,0x2a,0x81,0xed = vstr s4, [r1]
111
+ 0x06,0x2a,0x81,0xed = vstr s4, [r1, #24]
112
+ 0x06,0x2a,0x01,0xed = vstr s4, [r1, #-24]
113
+ 0x0c,0x2b,0x91,0xec = vldmia r1, {d2, d3, d4, d5, d6, d7}
114
+ 0x06,0x1a,0x91,0xec = vldmia r1, {s2, s3, s4, s5, s6, s7}
115
+ 0x0c,0x2b,0x81,0xec = vstmia r1, {d2, d3, d4, d5, d6, d7}
116
+ 0x06,0x1a,0x81,0xec = vstmia r1, {s2, s3, s4, s5, s6, s7}
117
+ 0x10,0x8b,0x2d,0xed = vpush {d8, d9, d10, d11, d12, d13, d14, d15}
118
+ 0x07,0x0b,0xb5,0xec = fldmiax r5!, {d0, d1, d2}
119
+ 0x05,0x4b,0x90,0x0c = fldmiaxeq r0, {d4, d5}
120
+ 0x07,0x4b,0x35,0x1d = fldmdbxne r5!, {d4, d5, d6}
121
+ 0x11,0x0b,0xa5,0xec = fstmiax r5!, {d0, d1, d2, d3, d4, d5, d6, d7}
122
+ 0x05,0x8b,0x84,0x0c = fstmiaxeq r4, {d8, d9}
123
+ 0x07,0x2b,0x27,0x1d = fstmdbxne r7!, {d2, d3, d4}
124
+ 0x40,0x0b,0xbd,0xee = vcvtr.s32.f64 s0, d0
125
+ 0x60,0x0a,0xbd,0xee = vcvtr.s32.f32 s0, s1
126
+ 0x40,0x0b,0xbc,0xee = vcvtr.u32.f64 s0, d0
127
+ 0x60,0x0a,0xbc,0xee = vcvtr.u32.f32 s0, s1
128
+ 0x90,0x8a,0x00,0xee = vmov s1, r8
129
+ 0x10,0x4a,0x01,0xee = vmov s2, r4
130
+ 0x90,0x6a,0x01,0xee = vmov s3, r6
131
+ 0x10,0x1a,0x02,0xee = vmov s4, r1
132
+ 0x90,0x2a,0x02,0xee = vmov s5, r2
133
+ 0x10,0x3a,0x03,0xee = vmov s6, r3
134
+ 0x10,0x1a,0x14,0xee = vmov r1, s8
135
+ 0x10,0x2a,0x12,0xee = vmov r2, s4
136
+ 0x10,0x3a,0x13,0xee = vmov r3, s6
137
+ 0x90,0x4a,0x10,0xee = vmov r4, s1
138
+ 0x10,0x5a,0x11,0xee = vmov r5, s2
139
+ 0x90,0x6a,0x11,0xee = vmov r6, s3
140
+ 0xc6,0x0a,0xbb,0xee = vcvt.f32.u32 s0, s0, #20
141
+ 0xc0,0x0b,0xba,0xee = vcvt.f64.s32 d0, d0, #32
142
+ 0x67,0x0a,0xbb,0xee = vcvt.f32.u16 s0, s0, #1
143
+ 0x40,0x0b,0xba,0xee = vcvt.f64.s16 d0, d0, #16
144
+ 0xc6,0x0a,0xfa,0xee = vcvt.f32.s32 s1, s1, #20
145
+ 0xc0,0x4b,0xfb,0xee = vcvt.f64.u32 d20, d20, #32
146
+ 0x67,0x8a,0xfa,0xee = vcvt.f32.s16 s17, s17, #1
147
+ 0x40,0x7b,0xfb,0xee = vcvt.f64.u16 d23, d23, #16
148
+ 0xc6,0x6a,0xbf,0xee = vcvt.u32.f32 s12, s12, #20
149
+ 0xc0,0x2b,0xbe,0xee = vcvt.s32.f64 d2, d2, #32
150
+ 0x67,0xea,0xbf,0xee = vcvt.u16.f32 s28, s28, #1
151
+ 0x40,0xfb,0xbe,0xee = vcvt.s16.f64 d15, d15, #16
152
+ 0xc6,0x0a,0xfe,0xee = vcvt.s32.f32 s1, s1, #20
153
+ 0xc0,0x4b,0xff,0xee = vcvt.u32.f64 d20, d20, #32
154
+ 0x67,0x8a,0xfe,0xee = vcvt.s16.f32 s17, s17, #1
155
+ 0x40,0x7b,0xff,0xee = vcvt.u16.f64 d23, d23, #16
156
+ 0x10,0x40,0x80,0xf2 = vmov.i32 d4, #0x0
157
+ 0x12,0x46,0x84,0xf2 = vmov.i32 d4, #0x42000000