libv8-sgonyea 3.3.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (500) hide show
  1. data/.gitignore +8 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +76 -0
  5. data/Rakefile +113 -0
  6. data/ext/libv8/extconf.rb +28 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +30 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  11. data/lib/libv8/v8/.gitignore +35 -0
  12. data/lib/libv8/v8/AUTHORS +44 -0
  13. data/lib/libv8/v8/ChangeLog +2839 -0
  14. data/lib/libv8/v8/LICENSE +52 -0
  15. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  16. data/lib/libv8/v8/LICENSE.v8 +26 -0
  17. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  18. data/lib/libv8/v8/SConstruct +1478 -0
  19. data/lib/libv8/v8/build/README.txt +49 -0
  20. data/lib/libv8/v8/build/all.gyp +18 -0
  21. data/lib/libv8/v8/build/armu.gypi +32 -0
  22. data/lib/libv8/v8/build/common.gypi +144 -0
  23. data/lib/libv8/v8/build/gyp_v8 +145 -0
  24. data/lib/libv8/v8/include/v8-debug.h +395 -0
  25. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  26. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  27. data/lib/libv8/v8/include/v8-testing.h +104 -0
  28. data/lib/libv8/v8/include/v8.h +4124 -0
  29. data/lib/libv8/v8/include/v8stdint.h +53 -0
  30. data/lib/libv8/v8/preparser/SConscript +38 -0
  31. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  32. data/lib/libv8/v8/src/SConscript +368 -0
  33. data/lib/libv8/v8/src/accessors.cc +767 -0
  34. data/lib/libv8/v8/src/accessors.h +123 -0
  35. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  36. data/lib/libv8/v8/src/allocation.cc +122 -0
  37. data/lib/libv8/v8/src/allocation.h +143 -0
  38. data/lib/libv8/v8/src/api.cc +5845 -0
  39. data/lib/libv8/v8/src/api.h +574 -0
  40. data/lib/libv8/v8/src/apinatives.js +110 -0
  41. data/lib/libv8/v8/src/apiutils.h +73 -0
  42. data/lib/libv8/v8/src/arguments.h +118 -0
  43. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  44. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  45. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  46. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  47. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  48. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  49. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  50. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  51. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  52. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  53. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  54. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  55. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  56. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  57. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  58. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  59. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  60. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  61. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  62. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  63. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  64. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  65. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  66. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  67. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  68. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  69. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  70. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  71. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  72. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  73. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  74. data/lib/libv8/v8/src/array.js +1366 -0
  75. data/lib/libv8/v8/src/assembler.cc +1207 -0
  76. data/lib/libv8/v8/src/assembler.h +858 -0
  77. data/lib/libv8/v8/src/ast-inl.h +112 -0
  78. data/lib/libv8/v8/src/ast.cc +1146 -0
  79. data/lib/libv8/v8/src/ast.h +2188 -0
  80. data/lib/libv8/v8/src/atomicops.h +167 -0
  81. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  82. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  83. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  84. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  85. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  86. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  87. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  88. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  89. data/lib/libv8/v8/src/bignum.cc +768 -0
  90. data/lib/libv8/v8/src/bignum.h +140 -0
  91. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  92. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  93. data/lib/libv8/v8/src/builtins.cc +1707 -0
  94. data/lib/libv8/v8/src/builtins.h +371 -0
  95. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  96. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  97. data/lib/libv8/v8/src/cached-powers.h +65 -0
  98. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  99. data/lib/libv8/v8/src/char-predicates.h +67 -0
  100. data/lib/libv8/v8/src/checks.cc +110 -0
  101. data/lib/libv8/v8/src/checks.h +296 -0
  102. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  103. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  104. data/lib/libv8/v8/src/circular-queue.h +103 -0
  105. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  106. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  107. data/lib/libv8/v8/src/code.h +70 -0
  108. data/lib/libv8/v8/src/codegen.cc +231 -0
  109. data/lib/libv8/v8/src/codegen.h +84 -0
  110. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  111. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  112. data/lib/libv8/v8/src/compiler.cc +786 -0
  113. data/lib/libv8/v8/src/compiler.h +312 -0
  114. data/lib/libv8/v8/src/contexts.cc +347 -0
  115. data/lib/libv8/v8/src/contexts.h +391 -0
  116. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  117. data/lib/libv8/v8/src/conversions.cc +1131 -0
  118. data/lib/libv8/v8/src/conversions.h +135 -0
  119. data/lib/libv8/v8/src/counters.cc +93 -0
  120. data/lib/libv8/v8/src/counters.h +254 -0
  121. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  122. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  123. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  124. data/lib/libv8/v8/src/cpu.h +69 -0
  125. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  126. data/lib/libv8/v8/src/d8-debug.h +158 -0
  127. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  128. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  129. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  130. data/lib/libv8/v8/src/d8.cc +803 -0
  131. data/lib/libv8/v8/src/d8.gyp +91 -0
  132. data/lib/libv8/v8/src/d8.h +235 -0
  133. data/lib/libv8/v8/src/d8.js +2798 -0
  134. data/lib/libv8/v8/src/data-flow.cc +66 -0
  135. data/lib/libv8/v8/src/data-flow.h +205 -0
  136. data/lib/libv8/v8/src/date.js +1103 -0
  137. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  138. data/lib/libv8/v8/src/dateparser.cc +178 -0
  139. data/lib/libv8/v8/src/dateparser.h +266 -0
  140. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  141. data/lib/libv8/v8/src/debug-agent.h +129 -0
  142. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  143. data/lib/libv8/v8/src/debug.cc +3165 -0
  144. data/lib/libv8/v8/src/debug.h +1057 -0
  145. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  146. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  147. data/lib/libv8/v8/src/disasm.h +80 -0
  148. data/lib/libv8/v8/src/disassembler.cc +343 -0
  149. data/lib/libv8/v8/src/disassembler.h +58 -0
  150. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  151. data/lib/libv8/v8/src/diy-fp.h +117 -0
  152. data/lib/libv8/v8/src/double.h +238 -0
  153. data/lib/libv8/v8/src/dtoa.cc +103 -0
  154. data/lib/libv8/v8/src/dtoa.h +85 -0
  155. data/lib/libv8/v8/src/execution.cc +849 -0
  156. data/lib/libv8/v8/src/execution.h +297 -0
  157. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  158. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  159. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  160. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  161. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  162. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  163. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  164. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  165. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  166. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  167. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  168. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  169. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  170. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  171. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  172. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  173. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  174. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  175. data/lib/libv8/v8/src/factory.cc +1222 -0
  176. data/lib/libv8/v8/src/factory.h +442 -0
  177. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  178. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  179. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  180. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  181. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  182. data/lib/libv8/v8/src/flags.cc +551 -0
  183. data/lib/libv8/v8/src/flags.h +79 -0
  184. data/lib/libv8/v8/src/frames-inl.h +247 -0
  185. data/lib/libv8/v8/src/frames.cc +1243 -0
  186. data/lib/libv8/v8/src/frames.h +870 -0
  187. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  188. data/lib/libv8/v8/src/full-codegen.h +771 -0
  189. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  190. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  191. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  192. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  193. data/lib/libv8/v8/src/global-handles.cc +665 -0
  194. data/lib/libv8/v8/src/global-handles.h +284 -0
  195. data/lib/libv8/v8/src/globals.h +325 -0
  196. data/lib/libv8/v8/src/handles-inl.h +177 -0
  197. data/lib/libv8/v8/src/handles.cc +987 -0
  198. data/lib/libv8/v8/src/handles.h +382 -0
  199. data/lib/libv8/v8/src/hashmap.cc +230 -0
  200. data/lib/libv8/v8/src/hashmap.h +123 -0
  201. data/lib/libv8/v8/src/heap-inl.h +704 -0
  202. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  203. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  204. data/lib/libv8/v8/src/heap.cc +5930 -0
  205. data/lib/libv8/v8/src/heap.h +2268 -0
  206. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  207. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  208. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  209. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  210. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  211. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  212. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  213. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  214. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  215. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  216. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  217. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  218. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  219. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  220. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  221. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  222. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  223. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  224. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  225. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  226. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  227. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  228. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  229. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  230. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  231. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  232. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  233. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  234. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  235. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  236. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  237. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  238. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  239. data/lib/libv8/v8/src/ic-inl.h +130 -0
  240. data/lib/libv8/v8/src/ic.cc +2577 -0
  241. data/lib/libv8/v8/src/ic.h +736 -0
  242. data/lib/libv8/v8/src/inspector.cc +63 -0
  243. data/lib/libv8/v8/src/inspector.h +62 -0
  244. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  245. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  246. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  247. data/lib/libv8/v8/src/isolate.cc +1869 -0
  248. data/lib/libv8/v8/src/isolate.h +1382 -0
  249. data/lib/libv8/v8/src/json-parser.cc +504 -0
  250. data/lib/libv8/v8/src/json-parser.h +161 -0
  251. data/lib/libv8/v8/src/json.js +342 -0
  252. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  253. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  254. data/lib/libv8/v8/src/list-inl.h +212 -0
  255. data/lib/libv8/v8/src/list.h +174 -0
  256. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  257. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  258. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  259. data/lib/libv8/v8/src/lithium.cc +190 -0
  260. data/lib/libv8/v8/src/lithium.h +597 -0
  261. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  262. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  263. data/lib/libv8/v8/src/liveedit.h +180 -0
  264. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  265. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  266. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  267. data/lib/libv8/v8/src/log-inl.h +59 -0
  268. data/lib/libv8/v8/src/log-utils.cc +428 -0
  269. data/lib/libv8/v8/src/log-utils.h +231 -0
  270. data/lib/libv8/v8/src/log.cc +1993 -0
  271. data/lib/libv8/v8/src/log.h +476 -0
  272. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  273. data/lib/libv8/v8/src/macros.py +178 -0
  274. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  275. data/lib/libv8/v8/src/mark-compact.h +506 -0
  276. data/lib/libv8/v8/src/math.js +264 -0
  277. data/lib/libv8/v8/src/messages.cc +179 -0
  278. data/lib/libv8/v8/src/messages.h +113 -0
  279. data/lib/libv8/v8/src/messages.js +1096 -0
  280. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  281. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  282. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  283. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  284. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  285. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  286. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  287. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  288. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  289. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  290. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  291. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  292. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  293. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  294. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  295. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  296. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  297. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  298. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  299. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  300. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  301. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  302. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  303. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  304. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  305. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  306. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  307. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  308. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  309. data/lib/libv8/v8/src/natives.h +64 -0
  310. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  311. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  312. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  313. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  314. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  315. data/lib/libv8/v8/src/objects.cc +10585 -0
  316. data/lib/libv8/v8/src/objects.h +6838 -0
  317. data/lib/libv8/v8/src/parser.cc +4997 -0
  318. data/lib/libv8/v8/src/parser.h +765 -0
  319. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  320. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  321. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  322. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  323. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  324. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  325. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  326. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  327. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  328. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  329. data/lib/libv8/v8/src/platform-tls.h +50 -0
  330. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  331. data/lib/libv8/v8/src/platform.h +667 -0
  332. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  333. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  334. data/lib/libv8/v8/src/preparse-data.h +225 -0
  335. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  336. data/lib/libv8/v8/src/preparser.cc +1450 -0
  337. data/lib/libv8/v8/src/preparser.h +493 -0
  338. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  339. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  340. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  341. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  342. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  343. data/lib/libv8/v8/src/property.cc +105 -0
  344. data/lib/libv8/v8/src/property.h +365 -0
  345. data/lib/libv8/v8/src/proxy.js +83 -0
  346. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  347. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  348. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  349. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  350. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  351. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  352. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  353. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  354. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  355. data/lib/libv8/v8/src/regexp.js +483 -0
  356. data/lib/libv8/v8/src/rewriter.cc +360 -0
  357. data/lib/libv8/v8/src/rewriter.h +50 -0
  358. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  359. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  360. data/lib/libv8/v8/src/runtime.cc +12227 -0
  361. data/lib/libv8/v8/src/runtime.h +652 -0
  362. data/lib/libv8/v8/src/runtime.js +649 -0
  363. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  364. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  365. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  366. data/lib/libv8/v8/src/scanner-base.h +670 -0
  367. data/lib/libv8/v8/src/scanner.cc +345 -0
  368. data/lib/libv8/v8/src/scanner.h +146 -0
  369. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  370. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  371. data/lib/libv8/v8/src/scopes.cc +1150 -0
  372. data/lib/libv8/v8/src/scopes.h +507 -0
  373. data/lib/libv8/v8/src/serialize.cc +1574 -0
  374. data/lib/libv8/v8/src/serialize.h +589 -0
  375. data/lib/libv8/v8/src/shell.h +55 -0
  376. data/lib/libv8/v8/src/simulator.h +43 -0
  377. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  378. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  379. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  380. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  381. data/lib/libv8/v8/src/snapshot.h +91 -0
  382. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  383. data/lib/libv8/v8/src/spaces.cc +3145 -0
  384. data/lib/libv8/v8/src/spaces.h +2369 -0
  385. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  386. data/lib/libv8/v8/src/splay-tree.h +205 -0
  387. data/lib/libv8/v8/src/string-search.cc +41 -0
  388. data/lib/libv8/v8/src/string-search.h +568 -0
  389. data/lib/libv8/v8/src/string-stream.cc +592 -0
  390. data/lib/libv8/v8/src/string-stream.h +191 -0
  391. data/lib/libv8/v8/src/string.js +994 -0
  392. data/lib/libv8/v8/src/strtod.cc +440 -0
  393. data/lib/libv8/v8/src/strtod.h +40 -0
  394. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  395. data/lib/libv8/v8/src/stub-cache.h +924 -0
  396. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  397. data/lib/libv8/v8/src/token.cc +63 -0
  398. data/lib/libv8/v8/src/token.h +288 -0
  399. data/lib/libv8/v8/src/type-info.cc +507 -0
  400. data/lib/libv8/v8/src/type-info.h +272 -0
  401. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  402. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  403. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  404. data/lib/libv8/v8/src/unicode.cc +1624 -0
  405. data/lib/libv8/v8/src/unicode.h +280 -0
  406. data/lib/libv8/v8/src/uri.js +408 -0
  407. data/lib/libv8/v8/src/utils-inl.h +48 -0
  408. data/lib/libv8/v8/src/utils.cc +371 -0
  409. data/lib/libv8/v8/src/utils.h +800 -0
  410. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  411. data/lib/libv8/v8/src/v8-counters.h +314 -0
  412. data/lib/libv8/v8/src/v8.cc +213 -0
  413. data/lib/libv8/v8/src/v8.h +131 -0
  414. data/lib/libv8/v8/src/v8checks.h +64 -0
  415. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  416. data/lib/libv8/v8/src/v8globals.h +512 -0
  417. data/lib/libv8/v8/src/v8memory.h +82 -0
  418. data/lib/libv8/v8/src/v8natives.js +1310 -0
  419. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  420. data/lib/libv8/v8/src/v8threads.cc +464 -0
  421. data/lib/libv8/v8/src/v8threads.h +165 -0
  422. data/lib/libv8/v8/src/v8utils.h +319 -0
  423. data/lib/libv8/v8/src/variables.cc +114 -0
  424. data/lib/libv8/v8/src/variables.h +167 -0
  425. data/lib/libv8/v8/src/version.cc +116 -0
  426. data/lib/libv8/v8/src/version.h +68 -0
  427. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  428. data/lib/libv8/v8/src/vm-state.h +71 -0
  429. data/lib/libv8/v8/src/win32-headers.h +96 -0
  430. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  431. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  432. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  433. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  434. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  435. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  436. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  437. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  438. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  439. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  440. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  441. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  442. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  443. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  444. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  445. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  446. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  447. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  448. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  449. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  450. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  451. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  452. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  453. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  454. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  455. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  456. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  457. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  458. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  459. data/lib/libv8/v8/src/zone-inl.h +140 -0
  460. data/lib/libv8/v8/src/zone.cc +196 -0
  461. data/lib/libv8/v8/src/zone.h +240 -0
  462. data/lib/libv8/v8/tools/codemap.js +265 -0
  463. data/lib/libv8/v8/tools/consarray.js +93 -0
  464. data/lib/libv8/v8/tools/csvparser.js +78 -0
  465. data/lib/libv8/v8/tools/disasm.py +92 -0
  466. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  467. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  468. data/lib/libv8/v8/tools/gcmole/README +62 -0
  469. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  470. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  471. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  472. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  473. data/lib/libv8/v8/tools/grokdump.py +841 -0
  474. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  475. data/lib/libv8/v8/tools/js2c.py +364 -0
  476. data/lib/libv8/v8/tools/jsmin.py +280 -0
  477. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  478. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  479. data/lib/libv8/v8/tools/logreader.js +185 -0
  480. data/lib/libv8/v8/tools/mac-nm +18 -0
  481. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  482. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  483. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  484. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  485. data/lib/libv8/v8/tools/presubmit.py +305 -0
  486. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  487. data/lib/libv8/v8/tools/profile.js +751 -0
  488. data/lib/libv8/v8/tools/profile_view.js +219 -0
  489. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  490. data/lib/libv8/v8/tools/splaytree.js +316 -0
  491. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  492. data/lib/libv8/v8/tools/test.py +1510 -0
  493. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  494. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  495. data/lib/libv8/v8/tools/utils.py +96 -0
  496. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  497. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  498. data/lib/libv8/version.rb +5 -0
  499. data/libv8.gemspec +36 -0
  500. metadata +578 -0
@@ -0,0 +1,2661 @@
1
+ // Copyright (c) 1994-2006 Sun Microsystems Inc.
2
+ // All Rights Reserved.
3
+ //
4
+ // Redistribution and use in source and binary forms, with or without
5
+ // modification, are permitted provided that the following conditions
6
+ // are met:
7
+ //
8
+ // - Redistributions of source code must retain the above copyright notice,
9
+ // this list of conditions and the following disclaimer.
10
+ //
11
+ // - Redistribution in binary form must reproduce the above copyright
12
+ // notice, this list of conditions and the following disclaimer in the
13
+ // documentation and/or other materials provided with the
14
+ // distribution.
15
+ //
16
+ // - Neither the name of Sun Microsystems or the names of contributors may
17
+ // be used to endorse or promote products derived from this software without
18
+ // specific prior written permission.
19
+ //
20
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23
+ // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24
+ // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25
+ // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26
+ // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28
+ // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29
+ // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30
+ // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31
+ // OF THE POSSIBILITY OF SUCH DAMAGE.
32
+
33
+ // The original source code covered by the above license above has been
34
+ // modified significantly by Google Inc.
35
+ // Copyright 2011 the V8 project authors. All rights reserved.
36
+
37
+ #include "v8.h"
38
+
39
+ #if defined(V8_TARGET_ARCH_ARM)
40
+
41
+ #include "arm/assembler-arm-inl.h"
42
+ #include "serialize.h"
43
+
44
+ namespace v8 {
45
+ namespace internal {
46
+
47
+ #ifdef DEBUG
48
+ bool CpuFeatures::initialized_ = false;
49
+ #endif
50
+ unsigned CpuFeatures::supported_ = 0;
51
+ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
52
+
53
+
54
+ // Get the CPU features enabled by the build. For cross compilation the
55
+ // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
56
+ // can be defined to enable ARMv7 and VFPv3 instructions when building the
57
+ // snapshot.
58
+ static uint64_t CpuFeaturesImpliedByCompiler() {
59
+ uint64_t answer = 0;
60
+ #ifdef CAN_USE_ARMV7_INSTRUCTIONS
61
+ answer |= 1u << ARMv7;
62
+ #endif // def CAN_USE_ARMV7_INSTRUCTIONS
63
+ #ifdef CAN_USE_VFP_INSTRUCTIONS
64
+ answer |= 1u << VFP3 | 1u << ARMv7;
65
+ #endif // def CAN_USE_VFP_INSTRUCTIONS
66
+
67
+ #ifdef __arm__
68
+ // If the compiler is allowed to use VFP then we can use VFP too in our code
69
+ // generation even when generating snapshots. This won't work for cross
70
+ // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
71
+ #if defined(__VFP_FP__) && !defined(__SOFTFP__)
72
+ answer |= 1u << VFP3 | 1u << ARMv7;
73
+ #endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
74
+ #endif // def __arm__
75
+
76
+ return answer;
77
+ }
78
+
79
+
80
+ void CpuFeatures::Probe() {
81
+ ASSERT(!initialized_);
82
+ #ifdef DEBUG
83
+ initialized_ = true;
84
+ #endif
85
+
86
+ // Get the features implied by the OS and the compiler settings. This is the
87
+ // minimal set of features which is also alowed for generated code in the
88
+ // snapshot.
89
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
90
+ supported_ |= CpuFeaturesImpliedByCompiler();
91
+
92
+ if (Serializer::enabled()) {
93
+ // No probing for features if we might serialize (generate snapshot).
94
+ return;
95
+ }
96
+
97
+ #ifndef __arm__
98
+ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
99
+ // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
100
+ if (FLAG_enable_vfp3) {
101
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
102
+ }
103
+ // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
104
+ if (FLAG_enable_armv7) {
105
+ supported_ |= 1u << ARMv7;
106
+ }
107
+ #else // def __arm__
108
+ // Probe for additional features not already known to be available.
109
+ if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
110
+ // This implementation also sets the VFP flags if runtime
111
+ // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
112
+ // 0406B, page A1-6.
113
+ supported_ |= 1u << VFP3 | 1u << ARMv7;
114
+ found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
115
+ }
116
+
117
+ if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
118
+ supported_ |= 1u << ARMv7;
119
+ found_by_runtime_probing_ |= 1u << ARMv7;
120
+ }
121
+ #endif
122
+ }
123
+
124
+
125
+ // -----------------------------------------------------------------------------
126
+ // Implementation of RelocInfo
127
+
128
+ const int RelocInfo::kApplyMask = 0;
129
+
130
+
131
+ bool RelocInfo::IsCodedSpecially() {
132
+ // The deserializer needs to know whether a pointer is specially coded. Being
133
+ // specially coded on ARM means that it is a movw/movt instruction. We don't
134
+ // generate those yet.
135
+ return false;
136
+ }
137
+
138
+
139
+
140
+ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
141
+ // Patch the code at the current address with the supplied instructions.
142
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
143
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
144
+ for (int i = 0; i < instruction_count; i++) {
145
+ *(pc + i) = *(instr + i);
146
+ }
147
+
148
+ // Indicate that code has changed.
149
+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
150
+ }
151
+
152
+
153
+ // Patch the code at the current PC with a call to the target address.
154
+ // Additional guard instructions can be added if required.
155
+ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
156
+ // Patch the code at the current address with a call to the target.
157
+ UNIMPLEMENTED();
158
+ }
159
+
160
+
161
+ // -----------------------------------------------------------------------------
162
+ // Implementation of Operand and MemOperand
163
+ // See assembler-arm-inl.h for inlined constructors
164
+
165
+ Operand::Operand(Handle<Object> handle) {
166
+ rm_ = no_reg;
167
+ // Verify all Objects referred by code are NOT in new space.
168
+ Object* obj = *handle;
169
+ ASSERT(!HEAP->InNewSpace(obj));
170
+ if (obj->IsHeapObject()) {
171
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
172
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
173
+ } else {
174
+ // no relocation needed
175
+ imm32_ = reinterpret_cast<intptr_t>(obj);
176
+ rmode_ = RelocInfo::NONE;
177
+ }
178
+ }
179
+
180
+
181
+ Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
182
+ ASSERT(is_uint5(shift_imm));
183
+ ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
184
+ rm_ = rm;
185
+ rs_ = no_reg;
186
+ shift_op_ = shift_op;
187
+ shift_imm_ = shift_imm & 31;
188
+ if (shift_op == RRX) {
189
+ // encoded as ROR with shift_imm == 0
190
+ ASSERT(shift_imm == 0);
191
+ shift_op_ = ROR;
192
+ shift_imm_ = 0;
193
+ }
194
+ }
195
+
196
+
197
+ Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
198
+ ASSERT(shift_op != RRX);
199
+ rm_ = rm;
200
+ rs_ = no_reg;
201
+ shift_op_ = shift_op;
202
+ rs_ = rs;
203
+ }
204
+
205
+
206
+ MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
207
+ rn_ = rn;
208
+ rm_ = no_reg;
209
+ offset_ = offset;
210
+ am_ = am;
211
+ }
212
+
213
+ MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
214
+ rn_ = rn;
215
+ rm_ = rm;
216
+ shift_op_ = LSL;
217
+ shift_imm_ = 0;
218
+ am_ = am;
219
+ }
220
+
221
+
222
+ MemOperand::MemOperand(Register rn, Register rm,
223
+ ShiftOp shift_op, int shift_imm, AddrMode am) {
224
+ ASSERT(is_uint5(shift_imm));
225
+ rn_ = rn;
226
+ rm_ = rm;
227
+ shift_op_ = shift_op;
228
+ shift_imm_ = shift_imm & 31;
229
+ am_ = am;
230
+ }
231
+
232
+
233
+ // -----------------------------------------------------------------------------
234
+ // Specific instructions, constants, and masks.
235
+
236
+ // add(sp, sp, 4) instruction (aka Pop())
237
+ const Instr kPopInstruction =
238
+ al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
239
+ // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
240
+ // register r is not encoded.
241
+ const Instr kPushRegPattern =
242
+ al | B26 | 4 | NegPreIndex | sp.code() * B16;
243
+ // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
244
+ // register r is not encoded.
245
+ const Instr kPopRegPattern =
246
+ al | B26 | L | 4 | PostIndex | sp.code() * B16;
247
+ // mov lr, pc
248
+ const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
249
+ // ldr rd, [pc, #offset]
250
+ const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
251
+ const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
252
+ // blxcc rm
253
+ const Instr kBlxRegMask =
254
+ 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
255
+ const Instr kBlxRegPattern =
256
+ B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
257
+ const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
258
+ const Instr kMovMvnPattern = 0xd * B21;
259
+ const Instr kMovMvnFlip = B22;
260
+ const Instr kMovLeaveCCMask = 0xdff * B16;
261
+ const Instr kMovLeaveCCPattern = 0x1a0 * B16;
262
+ const Instr kMovwMask = 0xff * B20;
263
+ const Instr kMovwPattern = 0x30 * B20;
264
+ const Instr kMovwLeaveCCFlip = 0x5 * B21;
265
+ const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
266
+ const Instr kCmpCmnPattern = 0x15 * B20;
267
+ const Instr kCmpCmnFlip = B21;
268
+ const Instr kAddSubFlip = 0x6 * B21;
269
+ const Instr kAndBicFlip = 0xe * B21;
270
+
271
+ // A mask for the Rd register for push, pop, ldr, str instructions.
272
+ const Instr kLdrRegFpOffsetPattern =
273
+ al | B26 | L | Offset | fp.code() * B16;
274
+ const Instr kStrRegFpOffsetPattern =
275
+ al | B26 | Offset | fp.code() * B16;
276
+ const Instr kLdrRegFpNegOffsetPattern =
277
+ al | B26 | L | NegOffset | fp.code() * B16;
278
+ const Instr kStrRegFpNegOffsetPattern =
279
+ al | B26 | NegOffset | fp.code() * B16;
280
+ const Instr kLdrStrInstrTypeMask = 0xffff0000;
281
+ const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
282
+ const Instr kLdrStrOffsetMask = 0x00000fff;
283
+
284
+
285
+ // Spare buffer.
286
+ static const int kMinimalBufferSize = 4*KB;
287
+
288
+
289
+ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
290
+ : AssemblerBase(arg_isolate),
291
+ positions_recorder_(this),
292
+ emit_debug_code_(FLAG_debug_code) {
293
+ if (buffer == NULL) {
294
+ // Do our own buffer management.
295
+ if (buffer_size <= kMinimalBufferSize) {
296
+ buffer_size = kMinimalBufferSize;
297
+
298
+ if (isolate()->assembler_spare_buffer() != NULL) {
299
+ buffer = isolate()->assembler_spare_buffer();
300
+ isolate()->set_assembler_spare_buffer(NULL);
301
+ }
302
+ }
303
+ if (buffer == NULL) {
304
+ buffer_ = NewArray<byte>(buffer_size);
305
+ } else {
306
+ buffer_ = static_cast<byte*>(buffer);
307
+ }
308
+ buffer_size_ = buffer_size;
309
+ own_buffer_ = true;
310
+
311
+ } else {
312
+ // Use externally provided buffer instead.
313
+ ASSERT(buffer_size > 0);
314
+ buffer_ = static_cast<byte*>(buffer);
315
+ buffer_size_ = buffer_size;
316
+ own_buffer_ = false;
317
+ }
318
+
319
+ // Setup buffer pointers.
320
+ ASSERT(buffer_ != NULL);
321
+ pc_ = buffer_;
322
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
323
+ num_prinfo_ = 0;
324
+ next_buffer_check_ = 0;
325
+ const_pool_blocked_nesting_ = 0;
326
+ no_const_pool_before_ = 0;
327
+ last_const_pool_end_ = 0;
328
+ last_bound_pos_ = 0;
329
+ ast_id_for_reloc_info_ = kNoASTId;
330
+ }
331
+
332
+
333
+ Assembler::~Assembler() {
334
+ ASSERT(const_pool_blocked_nesting_ == 0);
335
+ if (own_buffer_) {
336
+ if (isolate()->assembler_spare_buffer() == NULL &&
337
+ buffer_size_ == kMinimalBufferSize) {
338
+ isolate()->set_assembler_spare_buffer(buffer_);
339
+ } else {
340
+ DeleteArray(buffer_);
341
+ }
342
+ }
343
+ }
344
+
345
+
346
+ void Assembler::GetCode(CodeDesc* desc) {
347
+ // Emit constant pool if necessary.
348
+ CheckConstPool(true, false);
349
+ ASSERT(num_prinfo_ == 0);
350
+
351
+ // Setup code descriptor.
352
+ desc->buffer = buffer_;
353
+ desc->buffer_size = buffer_size_;
354
+ desc->instr_size = pc_offset();
355
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
356
+ }
357
+
358
+
359
+ void Assembler::Align(int m) {
360
+ ASSERT(m >= 4 && IsPowerOf2(m));
361
+ while ((pc_offset() & (m - 1)) != 0) {
362
+ nop();
363
+ }
364
+ }
365
+
366
+
367
+ void Assembler::CodeTargetAlign() {
368
+ // Preferred alignment of jump targets on some ARM chips.
369
+ Align(8);
370
+ }
371
+
372
+
373
+ Condition Assembler::GetCondition(Instr instr) {
374
+ return Instruction::ConditionField(instr);
375
+ }
376
+
377
+
378
+ bool Assembler::IsBranch(Instr instr) {
379
+ return (instr & (B27 | B25)) == (B27 | B25);
380
+ }
381
+
382
+
383
+ int Assembler::GetBranchOffset(Instr instr) {
384
+ ASSERT(IsBranch(instr));
385
+ // Take the jump offset in the lower 24 bits, sign extend it and multiply it
386
+ // with 4 to get the offset in bytes.
387
+ return ((instr & kImm24Mask) << 8) >> 6;
388
+ }
389
+
390
+
391
+ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
392
+ return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
393
+ }
394
+
395
+
396
+ int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
397
+ ASSERT(IsLdrRegisterImmediate(instr));
398
+ bool positive = (instr & B23) == B23;
399
+ int offset = instr & kOff12Mask; // Zero extended offset.
400
+ return positive ? offset : -offset;
401
+ }
402
+
403
+
404
+ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
405
+ ASSERT(IsLdrRegisterImmediate(instr));
406
+ bool positive = offset >= 0;
407
+ if (!positive) offset = -offset;
408
+ ASSERT(is_uint12(offset));
409
+ // Set bit indicating whether the offset should be added.
410
+ instr = (instr & ~B23) | (positive ? B23 : 0);
411
+ // Set the actual offset.
412
+ return (instr & ~kOff12Mask) | offset;
413
+ }
414
+
415
+
416
+ bool Assembler::IsStrRegisterImmediate(Instr instr) {
417
+ return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
418
+ }
419
+
420
+
421
+ Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
422
+ ASSERT(IsStrRegisterImmediate(instr));
423
+ bool positive = offset >= 0;
424
+ if (!positive) offset = -offset;
425
+ ASSERT(is_uint12(offset));
426
+ // Set bit indicating whether the offset should be added.
427
+ instr = (instr & ~B23) | (positive ? B23 : 0);
428
+ // Set the actual offset.
429
+ return (instr & ~kOff12Mask) | offset;
430
+ }
431
+
432
+
433
+ bool Assembler::IsAddRegisterImmediate(Instr instr) {
434
+ return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
435
+ }
436
+
437
+
438
+ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
439
+ ASSERT(IsAddRegisterImmediate(instr));
440
+ ASSERT(offset >= 0);
441
+ ASSERT(is_uint12(offset));
442
+ // Set the offset.
443
+ return (instr & ~kOff12Mask) | offset;
444
+ }
445
+
446
+
447
+ Register Assembler::GetRd(Instr instr) {
448
+ Register reg;
449
+ reg.code_ = Instruction::RdValue(instr);
450
+ return reg;
451
+ }
452
+
453
+
454
+ Register Assembler::GetRn(Instr instr) {
455
+ Register reg;
456
+ reg.code_ = Instruction::RnValue(instr);
457
+ return reg;
458
+ }
459
+
460
+
461
+ Register Assembler::GetRm(Instr instr) {
462
+ Register reg;
463
+ reg.code_ = Instruction::RmValue(instr);
464
+ return reg;
465
+ }
466
+
467
+
468
+ bool Assembler::IsPush(Instr instr) {
469
+ return ((instr & ~kRdMask) == kPushRegPattern);
470
+ }
471
+
472
+
473
+ bool Assembler::IsPop(Instr instr) {
474
+ return ((instr & ~kRdMask) == kPopRegPattern);
475
+ }
476
+
477
+
478
+ bool Assembler::IsStrRegFpOffset(Instr instr) {
479
+ return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
480
+ }
481
+
482
+
483
+ bool Assembler::IsLdrRegFpOffset(Instr instr) {
484
+ return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
485
+ }
486
+
487
+
488
+ bool Assembler::IsStrRegFpNegOffset(Instr instr) {
489
+ return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
490
+ }
491
+
492
+
493
+ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
494
+ return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
495
+ }
496
+
497
+
498
+ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
499
+ // Check the instruction is indeed a
500
+ // ldr<cond> <Rd>, [pc +/- offset_12].
501
+ return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
502
+ }
503
+
504
+
505
+ bool Assembler::IsTstImmediate(Instr instr) {
506
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
507
+ (I | TST | S);
508
+ }
509
+
510
+
511
+ bool Assembler::IsCmpRegister(Instr instr) {
512
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
513
+ (CMP | S);
514
+ }
515
+
516
+
517
+ bool Assembler::IsCmpImmediate(Instr instr) {
518
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
519
+ (I | CMP | S);
520
+ }
521
+
522
+
523
+ Register Assembler::GetCmpImmediateRegister(Instr instr) {
524
+ ASSERT(IsCmpImmediate(instr));
525
+ return GetRn(instr);
526
+ }
527
+
528
+
529
+ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
530
+ ASSERT(IsCmpImmediate(instr));
531
+ return instr & kOff12Mask;
532
+ }
533
+
534
+ // Labels refer to positions in the (to be) generated code.
535
+ // There are bound, linked, and unused labels.
536
+ //
537
+ // Bound labels refer to known positions in the already
538
+ // generated code. pos() is the position the label refers to.
539
+ //
540
+ // Linked labels refer to unknown positions in the code
541
+ // to be generated; pos() is the position of the last
542
+ // instruction using the label.
543
+
544
+
545
+ // The link chain is terminated by a negative code position (must be aligned)
546
+ const int kEndOfChain = -4;
547
+
548
+
549
+ int Assembler::target_at(int pos) {
550
+ Instr instr = instr_at(pos);
551
+ if ((instr & ~kImm24Mask) == 0) {
552
+ // Emitted label constant, not part of a branch.
553
+ return instr - (Code::kHeaderSize - kHeapObjectTag);
554
+ }
555
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
556
+ int imm26 = ((instr & kImm24Mask) << 8) >> 6;
557
+ if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
558
+ ((instr & B24) != 0)) {
559
+ // blx uses bit 24 to encode bit 2 of imm26
560
+ imm26 += 2;
561
+ }
562
+ return pos + kPcLoadDelta + imm26;
563
+ }
564
+
565
+
566
+ void Assembler::target_at_put(int pos, int target_pos) {
567
+ Instr instr = instr_at(pos);
568
+ if ((instr & ~kImm24Mask) == 0) {
569
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
570
+ // Emitted label constant, not part of a branch.
571
+ // Make label relative to Code* of generated Code object.
572
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
573
+ return;
574
+ }
575
+ int imm26 = target_pos - (pos + kPcLoadDelta);
576
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
577
+ if (Instruction::ConditionField(instr) == kSpecialCondition) {
578
+ // blx uses bit 24 to encode bit 2 of imm26
579
+ ASSERT((imm26 & 1) == 0);
580
+ instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
581
+ } else {
582
+ ASSERT((imm26 & 3) == 0);
583
+ instr &= ~kImm24Mask;
584
+ }
585
+ int imm24 = imm26 >> 2;
586
+ ASSERT(is_int24(imm24));
587
+ instr_at_put(pos, instr | (imm24 & kImm24Mask));
588
+ }
589
+
590
+
591
+ void Assembler::print(Label* L) {
592
+ if (L->is_unused()) {
593
+ PrintF("unused label\n");
594
+ } else if (L->is_bound()) {
595
+ PrintF("bound label to %d\n", L->pos());
596
+ } else if (L->is_linked()) {
597
+ Label l = *L;
598
+ PrintF("unbound label");
599
+ while (l.is_linked()) {
600
+ PrintF("@ %d ", l.pos());
601
+ Instr instr = instr_at(l.pos());
602
+ if ((instr & ~kImm24Mask) == 0) {
603
+ PrintF("value\n");
604
+ } else {
605
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
606
+ Condition cond = Instruction::ConditionField(instr);
607
+ const char* b;
608
+ const char* c;
609
+ if (cond == kSpecialCondition) {
610
+ b = "blx";
611
+ c = "";
612
+ } else {
613
+ if ((instr & B24) != 0)
614
+ b = "bl";
615
+ else
616
+ b = "b";
617
+
618
+ switch (cond) {
619
+ case eq: c = "eq"; break;
620
+ case ne: c = "ne"; break;
621
+ case hs: c = "hs"; break;
622
+ case lo: c = "lo"; break;
623
+ case mi: c = "mi"; break;
624
+ case pl: c = "pl"; break;
625
+ case vs: c = "vs"; break;
626
+ case vc: c = "vc"; break;
627
+ case hi: c = "hi"; break;
628
+ case ls: c = "ls"; break;
629
+ case ge: c = "ge"; break;
630
+ case lt: c = "lt"; break;
631
+ case gt: c = "gt"; break;
632
+ case le: c = "le"; break;
633
+ case al: c = ""; break;
634
+ default:
635
+ c = "";
636
+ UNREACHABLE();
637
+ }
638
+ }
639
+ PrintF("%s%s\n", b, c);
640
+ }
641
+ next(&l);
642
+ }
643
+ } else {
644
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
645
+ }
646
+ }
647
+
648
+
649
+ void Assembler::bind_to(Label* L, int pos) {
650
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
651
+ while (L->is_linked()) {
652
+ int fixup_pos = L->pos();
653
+ next(L); // call next before overwriting link with target at fixup_pos
654
+ target_at_put(fixup_pos, pos);
655
+ }
656
+ L->bind_to(pos);
657
+
658
+ // Keep track of the last bound label so we don't eliminate any instructions
659
+ // before a bound label.
660
+ if (pos > last_bound_pos_)
661
+ last_bound_pos_ = pos;
662
+ }
663
+
664
+
665
+ void Assembler::link_to(Label* L, Label* appendix) {
666
+ if (appendix->is_linked()) {
667
+ if (L->is_linked()) {
668
+ // Append appendix to L's list.
669
+ int fixup_pos;
670
+ int link = L->pos();
671
+ do {
672
+ fixup_pos = link;
673
+ link = target_at(fixup_pos);
674
+ } while (link > 0);
675
+ ASSERT(link == kEndOfChain);
676
+ target_at_put(fixup_pos, appendix->pos());
677
+ } else {
678
+ // L is empty, simply use appendix.
679
+ *L = *appendix;
680
+ }
681
+ }
682
+ appendix->Unuse(); // appendix should not be used anymore
683
+ }
684
+
685
+
686
+ void Assembler::bind(Label* L) {
687
+ ASSERT(!L->is_bound()); // label can only be bound once
688
+ bind_to(L, pc_offset());
689
+ }
690
+
691
+
692
+ void Assembler::next(Label* L) {
693
+ ASSERT(L->is_linked());
694
+ int link = target_at(L->pos());
695
+ if (link > 0) {
696
+ L->link_to(link);
697
+ } else {
698
+ ASSERT(link == kEndOfChain);
699
+ L->Unuse();
700
+ }
701
+ }
702
+
703
+
704
+ static Instr EncodeMovwImmediate(uint32_t immediate) {
705
+ ASSERT(immediate < 0x10000);
706
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
707
+ }
708
+
709
+
710
+ // Low-level code emission routines depending on the addressing mode.
711
+ // If this returns true then you have to use the rotate_imm and immed_8
712
+ // that it returns, because it may have already changed the instruction
713
+ // to match them!
714
+ static bool fits_shifter(uint32_t imm32,
715
+ uint32_t* rotate_imm,
716
+ uint32_t* immed_8,
717
+ Instr* instr) {
718
+ // imm32 must be unsigned.
719
+ for (int rot = 0; rot < 16; rot++) {
720
+ uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
721
+ if ((imm8 <= 0xff)) {
722
+ *rotate_imm = rot;
723
+ *immed_8 = imm8;
724
+ return true;
725
+ }
726
+ }
727
+ // If the opcode is one with a complementary version and the complementary
728
+ // immediate fits, change the opcode.
729
+ if (instr != NULL) {
730
+ if ((*instr & kMovMvnMask) == kMovMvnPattern) {
731
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
732
+ *instr ^= kMovMvnFlip;
733
+ return true;
734
+ } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
735
+ if (CpuFeatures::IsSupported(ARMv7)) {
736
+ if (imm32 < 0x10000) {
737
+ *instr ^= kMovwLeaveCCFlip;
738
+ *instr |= EncodeMovwImmediate(imm32);
739
+ *rotate_imm = *immed_8 = 0; // Not used for movw.
740
+ return true;
741
+ }
742
+ }
743
+ }
744
+ } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
745
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
746
+ *instr ^= kCmpCmnFlip;
747
+ return true;
748
+ }
749
+ } else {
750
+ Instr alu_insn = (*instr & kALUMask);
751
+ if (alu_insn == ADD ||
752
+ alu_insn == SUB) {
753
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
754
+ *instr ^= kAddSubFlip;
755
+ return true;
756
+ }
757
+ } else if (alu_insn == AND ||
758
+ alu_insn == BIC) {
759
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
760
+ *instr ^= kAndBicFlip;
761
+ return true;
762
+ }
763
+ }
764
+ }
765
+ }
766
+ return false;
767
+ }
768
+
769
+
770
+ // We have to use the temporary register for things that can be relocated even
771
+ // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
772
+ // space. There is no guarantee that the relocated location can be similarly
773
+ // encoded.
774
+ bool Operand::must_use_constant_pool() const {
775
+ if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
776
+ #ifdef DEBUG
777
+ if (!Serializer::enabled()) {
778
+ Serializer::TooLateToEnableNow();
779
+ }
780
+ #endif // def DEBUG
781
+ return Serializer::enabled();
782
+ } else if (rmode_ == RelocInfo::NONE) {
783
+ return false;
784
+ }
785
+ return true;
786
+ }
787
+
788
+
789
+ bool Operand::is_single_instruction(Instr instr) const {
790
+ if (rm_.is_valid()) return true;
791
+ uint32_t dummy1, dummy2;
792
+ if (must_use_constant_pool() ||
793
+ !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
794
+ // The immediate operand cannot be encoded as a shifter operand, or use of
795
+ // constant pool is required. For a mov instruction not setting the
796
+ // condition code additional instruction conventions can be used.
797
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
798
+ if (must_use_constant_pool() ||
799
+ !CpuFeatures::IsSupported(ARMv7)) {
800
+ // mov instruction will be an ldr from constant pool (one instruction).
801
+ return true;
802
+ } else {
803
+ // mov instruction will be a mov or movw followed by movt (two
804
+ // instructions).
805
+ return false;
806
+ }
807
+ } else {
808
+ // If this is not a mov or mvn instruction there will always an additional
809
+ // instructions - either mov or ldr. The mov might actually be two
810
+ // instructions mov or movw followed by movt so including the actual
811
+ // instruction two or three instructions will be generated.
812
+ return false;
813
+ }
814
+ } else {
815
+ // No use of constant pool and the immediate operand can be encoded as a
816
+ // shifter operand.
817
+ return true;
818
+ }
819
+ }
820
+
821
+
822
+ void Assembler::addrmod1(Instr instr,
823
+ Register rn,
824
+ Register rd,
825
+ const Operand& x) {
826
+ CheckBuffer();
827
+ ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
828
+ if (!x.rm_.is_valid()) {
829
+ // Immediate.
830
+ uint32_t rotate_imm;
831
+ uint32_t immed_8;
832
+ if (x.must_use_constant_pool() ||
833
+ !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
834
+ // The immediate operand cannot be encoded as a shifter operand, so load
835
+ // it first to register ip and change the original instruction to use ip.
836
+ // However, if the original instruction is a 'mov rd, x' (not setting the
837
+ // condition code), then replace it with a 'ldr rd, [pc]'.
838
+ CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
839
+ Condition cond = Instruction::ConditionField(instr);
840
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
841
+ if (x.must_use_constant_pool() ||
842
+ !CpuFeatures::IsSupported(ARMv7)) {
843
+ RecordRelocInfo(x.rmode_, x.imm32_);
844
+ ldr(rd, MemOperand(pc, 0), cond);
845
+ } else {
846
+ // Will probably use movw, will certainly not use constant pool.
847
+ mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
848
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
849
+ }
850
+ } else {
851
+ // If this is not a mov or mvn instruction we may still be able to avoid
852
+ // a constant pool entry by using mvn or movw.
853
+ if (!x.must_use_constant_pool() &&
854
+ (instr & kMovMvnMask) != kMovMvnPattern) {
855
+ mov(ip, x, LeaveCC, cond);
856
+ } else {
857
+ RecordRelocInfo(x.rmode_, x.imm32_);
858
+ ldr(ip, MemOperand(pc, 0), cond);
859
+ }
860
+ addrmod1(instr, rn, rd, Operand(ip));
861
+ }
862
+ return;
863
+ }
864
+ instr |= I | rotate_imm*B8 | immed_8;
865
+ } else if (!x.rs_.is_valid()) {
866
+ // Immediate shift.
867
+ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
868
+ } else {
869
+ // Register shift.
870
+ ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
871
+ instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
872
+ }
873
+ emit(instr | rn.code()*B16 | rd.code()*B12);
874
+ if (rn.is(pc) || x.rm_.is(pc)) {
875
+ // Block constant pool emission for one instruction after reading pc.
876
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
877
+ }
878
+ }
879
+
880
+
881
+ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
882
+ ASSERT((instr & ~(kCondMask | B | L)) == B26);
883
+ int am = x.am_;
884
+ if (!x.rm_.is_valid()) {
885
+ // Immediate offset.
886
+ int offset_12 = x.offset_;
887
+ if (offset_12 < 0) {
888
+ offset_12 = -offset_12;
889
+ am ^= U;
890
+ }
891
+ if (!is_uint12(offset_12)) {
892
+ // Immediate offset cannot be encoded, load it first to register ip
893
+ // rn (and rd in a load) should never be ip, or will be trashed.
894
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
895
+ mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
896
+ addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
897
+ return;
898
+ }
899
+ ASSERT(offset_12 >= 0); // no masking needed
900
+ instr |= offset_12;
901
+ } else {
902
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
903
+ // register offset the constructors make sure than both shift_imm_
904
+ // and shift_op_ are initialized.
905
+ ASSERT(!x.rm_.is(pc));
906
+ instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
907
+ }
908
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
909
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
910
+ }
911
+
912
+
913
+ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
914
+ ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
915
+ ASSERT(x.rn_.is_valid());
916
+ int am = x.am_;
917
+ if (!x.rm_.is_valid()) {
918
+ // Immediate offset.
919
+ int offset_8 = x.offset_;
920
+ if (offset_8 < 0) {
921
+ offset_8 = -offset_8;
922
+ am ^= U;
923
+ }
924
+ if (!is_uint8(offset_8)) {
925
+ // Immediate offset cannot be encoded, load it first to register ip
926
+ // rn (and rd in a load) should never be ip, or will be trashed.
927
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
928
+ mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
929
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
930
+ return;
931
+ }
932
+ ASSERT(offset_8 >= 0); // no masking needed
933
+ instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
934
+ } else if (x.shift_imm_ != 0) {
935
+ // Scaled register offset not supported, load index first
936
+ // rn (and rd in a load) should never be ip, or will be trashed.
937
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
938
+ mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
939
+ Instruction::ConditionField(instr));
940
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
941
+ return;
942
+ } else {
943
+ // Register offset.
944
+ ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
945
+ instr |= x.rm_.code();
946
+ }
947
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
948
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
949
+ }
950
+
951
+
952
+ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
953
+ ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
954
+ ASSERT(rl != 0);
955
+ ASSERT(!rn.is(pc));
956
+ emit(instr | rn.code()*B16 | rl);
957
+ }
958
+
959
+
960
+ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
961
+ // Unindexed addressing is not encoded by this function.
962
+ ASSERT_EQ((B27 | B26),
963
+ (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
964
+ ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
965
+ int am = x.am_;
966
+ int offset_8 = x.offset_;
967
+ ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
968
+ offset_8 >>= 2;
969
+ if (offset_8 < 0) {
970
+ offset_8 = -offset_8;
971
+ am ^= U;
972
+ }
973
+ ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
974
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
975
+
976
+ // Post-indexed addressing requires W == 1; different than in addrmod2/3.
977
+ if ((am & P) == 0)
978
+ am |= W;
979
+
980
+ ASSERT(offset_8 >= 0); // no masking needed
981
+ emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
982
+ }
983
+
984
+
985
+ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
986
+ int target_pos;
987
+ if (L->is_bound()) {
988
+ target_pos = L->pos();
989
+ } else {
990
+ if (L->is_linked()) {
991
+ target_pos = L->pos(); // L's link
992
+ } else {
993
+ target_pos = kEndOfChain;
994
+ }
995
+ L->link_to(pc_offset());
996
+ }
997
+
998
+ // Block the emission of the constant pool, since the branch instruction must
999
+ // be emitted at the pc offset recorded by the label.
1000
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
1001
+ return target_pos - (pc_offset() + kPcLoadDelta);
1002
+ }
1003
+
1004
+
1005
+ void Assembler::label_at_put(Label* L, int at_offset) {
1006
+ int target_pos;
1007
+ if (L->is_bound()) {
1008
+ target_pos = L->pos();
1009
+ } else {
1010
+ if (L->is_linked()) {
1011
+ target_pos = L->pos(); // L's link
1012
+ } else {
1013
+ target_pos = kEndOfChain;
1014
+ }
1015
+ L->link_to(at_offset);
1016
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1017
+ }
1018
+ }
1019
+
1020
+
1021
+ // Branch instructions.
1022
+ void Assembler::b(int branch_offset, Condition cond) {
1023
+ ASSERT((branch_offset & 3) == 0);
1024
+ int imm24 = branch_offset >> 2;
1025
+ ASSERT(is_int24(imm24));
1026
+ emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1027
+
1028
+ if (cond == al) {
1029
+ // Dead code is a good location to emit the constant pool.
1030
+ CheckConstPool(false, false);
1031
+ }
1032
+ }
1033
+
1034
+
1035
+ void Assembler::bl(int branch_offset, Condition cond) {
1036
+ positions_recorder()->WriteRecordedPositions();
1037
+ ASSERT((branch_offset & 3) == 0);
1038
+ int imm24 = branch_offset >> 2;
1039
+ ASSERT(is_int24(imm24));
1040
+ emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1041
+ }
1042
+
1043
+
1044
+ void Assembler::blx(int branch_offset) { // v5 and above
1045
+ positions_recorder()->WriteRecordedPositions();
1046
+ ASSERT((branch_offset & 1) == 0);
1047
+ int h = ((branch_offset & 2) >> 1)*B24;
1048
+ int imm24 = branch_offset >> 2;
1049
+ ASSERT(is_int24(imm24));
1050
+ emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1051
+ }
1052
+
1053
+
1054
+ void Assembler::blx(Register target, Condition cond) { // v5 and above
1055
+ positions_recorder()->WriteRecordedPositions();
1056
+ ASSERT(!target.is(pc));
1057
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1058
+ }
1059
+
1060
+
1061
+ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1062
+ positions_recorder()->WriteRecordedPositions();
1063
+ ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1064
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1065
+ }
1066
+
1067
+
1068
+ // Data-processing instructions.
1069
+
1070
+ void Assembler::and_(Register dst, Register src1, const Operand& src2,
1071
+ SBit s, Condition cond) {
1072
+ addrmod1(cond | AND | s, src1, dst, src2);
1073
+ }
1074
+
1075
+
1076
+ void Assembler::eor(Register dst, Register src1, const Operand& src2,
1077
+ SBit s, Condition cond) {
1078
+ addrmod1(cond | EOR | s, src1, dst, src2);
1079
+ }
1080
+
1081
+
1082
+ void Assembler::sub(Register dst, Register src1, const Operand& src2,
1083
+ SBit s, Condition cond) {
1084
+ addrmod1(cond | SUB | s, src1, dst, src2);
1085
+ }
1086
+
1087
+
1088
+ void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1089
+ SBit s, Condition cond) {
1090
+ addrmod1(cond | RSB | s, src1, dst, src2);
1091
+ }
1092
+
1093
+
1094
+ void Assembler::add(Register dst, Register src1, const Operand& src2,
1095
+ SBit s, Condition cond) {
1096
+ addrmod1(cond | ADD | s, src1, dst, src2);
1097
+ }
1098
+
1099
+
1100
+ void Assembler::adc(Register dst, Register src1, const Operand& src2,
1101
+ SBit s, Condition cond) {
1102
+ addrmod1(cond | ADC | s, src1, dst, src2);
1103
+ }
1104
+
1105
+
1106
+ void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1107
+ SBit s, Condition cond) {
1108
+ addrmod1(cond | SBC | s, src1, dst, src2);
1109
+ }
1110
+
1111
+
1112
+ void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1113
+ SBit s, Condition cond) {
1114
+ addrmod1(cond | RSC | s, src1, dst, src2);
1115
+ }
1116
+
1117
+
1118
+ void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1119
+ addrmod1(cond | TST | S, src1, r0, src2);
1120
+ }
1121
+
1122
+
1123
+ void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1124
+ addrmod1(cond | TEQ | S, src1, r0, src2);
1125
+ }
1126
+
1127
+
1128
+ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1129
+ addrmod1(cond | CMP | S, src1, r0, src2);
1130
+ }
1131
+
1132
+
1133
+ void Assembler::cmp_raw_immediate(
1134
+ Register src, int raw_immediate, Condition cond) {
1135
+ ASSERT(is_uint12(raw_immediate));
1136
+ emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1137
+ }
1138
+
1139
+
1140
+ void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1141
+ addrmod1(cond | CMN | S, src1, r0, src2);
1142
+ }
1143
+
1144
+
1145
+ void Assembler::orr(Register dst, Register src1, const Operand& src2,
1146
+ SBit s, Condition cond) {
1147
+ addrmod1(cond | ORR | s, src1, dst, src2);
1148
+ }
1149
+
1150
+
1151
+ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1152
+ if (dst.is(pc)) {
1153
+ positions_recorder()->WriteRecordedPositions();
1154
+ }
1155
+ // Don't allow nop instructions in the form mov rn, rn to be generated using
1156
+ // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1157
+ // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1158
+ ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1159
+ addrmod1(cond | MOV | s, r0, dst, src);
1160
+ }
1161
+
1162
+
1163
+ void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1164
+ ASSERT(immediate < 0x10000);
1165
+ mov(reg, Operand(immediate), LeaveCC, cond);
1166
+ }
1167
+
1168
+
1169
+ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1170
+ emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1171
+ }
1172
+
1173
+
1174
+ void Assembler::bic(Register dst, Register src1, const Operand& src2,
1175
+ SBit s, Condition cond) {
1176
+ addrmod1(cond | BIC | s, src1, dst, src2);
1177
+ }
1178
+
1179
+
1180
+ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1181
+ addrmod1(cond | MVN | s, r0, dst, src);
1182
+ }
1183
+
1184
+
1185
+ // Multiply instructions.
1186
+ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1187
+ SBit s, Condition cond) {
1188
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1189
+ emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1190
+ src2.code()*B8 | B7 | B4 | src1.code());
1191
+ }
1192
+
1193
+
1194
+ void Assembler::mul(Register dst, Register src1, Register src2,
1195
+ SBit s, Condition cond) {
1196
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1197
+ // dst goes in bits 16-19 for this instruction!
1198
+ emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1199
+ }
1200
+
1201
+
1202
+ void Assembler::smlal(Register dstL,
1203
+ Register dstH,
1204
+ Register src1,
1205
+ Register src2,
1206
+ SBit s,
1207
+ Condition cond) {
1208
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1209
+ ASSERT(!dstL.is(dstH));
1210
+ emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1211
+ src2.code()*B8 | B7 | B4 | src1.code());
1212
+ }
1213
+
1214
+
1215
+ void Assembler::smull(Register dstL,
1216
+ Register dstH,
1217
+ Register src1,
1218
+ Register src2,
1219
+ SBit s,
1220
+ Condition cond) {
1221
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1222
+ ASSERT(!dstL.is(dstH));
1223
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1224
+ src2.code()*B8 | B7 | B4 | src1.code());
1225
+ }
1226
+
1227
+
1228
+ void Assembler::umlal(Register dstL,
1229
+ Register dstH,
1230
+ Register src1,
1231
+ Register src2,
1232
+ SBit s,
1233
+ Condition cond) {
1234
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1235
+ ASSERT(!dstL.is(dstH));
1236
+ emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1237
+ src2.code()*B8 | B7 | B4 | src1.code());
1238
+ }
1239
+
1240
+
1241
+ void Assembler::umull(Register dstL,
1242
+ Register dstH,
1243
+ Register src1,
1244
+ Register src2,
1245
+ SBit s,
1246
+ Condition cond) {
1247
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1248
+ ASSERT(!dstL.is(dstH));
1249
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1250
+ src2.code()*B8 | B7 | B4 | src1.code());
1251
+ }
1252
+
1253
+
1254
+ // Miscellaneous arithmetic instructions.
1255
+ void Assembler::clz(Register dst, Register src, Condition cond) {
1256
+ // v5 and above.
1257
+ ASSERT(!dst.is(pc) && !src.is(pc));
1258
+ emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1259
+ 15*B8 | CLZ | src.code());
1260
+ }
1261
+
1262
+
1263
+ // Saturating instructions.
1264
+
1265
+ // Unsigned saturate.
1266
+ void Assembler::usat(Register dst,
1267
+ int satpos,
1268
+ const Operand& src,
1269
+ Condition cond) {
1270
+ // v6 and above.
1271
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
1272
+ ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1273
+ ASSERT((satpos >= 0) && (satpos <= 31));
1274
+ ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1275
+ ASSERT(src.rs_.is(no_reg));
1276
+
1277
+ int sh = 0;
1278
+ if (src.shift_op_ == ASR) {
1279
+ sh = 1;
1280
+ }
1281
+
1282
+ emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1283
+ src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1284
+ }
1285
+
1286
+
1287
+ // Bitfield manipulation instructions.
1288
+
1289
+ // Unsigned bit field extract.
1290
+ // Extracts #width adjacent bits from position #lsb in a register, and
1291
+ // writes them to the low bits of a destination register.
1292
+ // ubfx dst, src, #lsb, #width
1293
+ void Assembler::ubfx(Register dst,
1294
+ Register src,
1295
+ int lsb,
1296
+ int width,
1297
+ Condition cond) {
1298
+ // v7 and above.
1299
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
1300
+ ASSERT(!dst.is(pc) && !src.is(pc));
1301
+ ASSERT((lsb >= 0) && (lsb <= 31));
1302
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
1303
+ emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1304
+ lsb*B7 | B6 | B4 | src.code());
1305
+ }
1306
+
1307
+
1308
+ // Signed bit field extract.
1309
+ // Extracts #width adjacent bits from position #lsb in a register, and
1310
+ // writes them to the low bits of a destination register. The extracted
1311
+ // value is sign extended to fill the destination register.
1312
+ // sbfx dst, src, #lsb, #width
1313
+ void Assembler::sbfx(Register dst,
1314
+ Register src,
1315
+ int lsb,
1316
+ int width,
1317
+ Condition cond) {
1318
+ // v7 and above.
1319
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
1320
+ ASSERT(!dst.is(pc) && !src.is(pc));
1321
+ ASSERT((lsb >= 0) && (lsb <= 31));
1322
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
1323
+ emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1324
+ lsb*B7 | B6 | B4 | src.code());
1325
+ }
1326
+
1327
+
1328
+ // Bit field clear.
1329
+ // Sets #width adjacent bits at position #lsb in the destination register
1330
+ // to zero, preserving the value of the other bits.
1331
+ // bfc dst, #lsb, #width
1332
+ void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1333
+ // v7 and above.
1334
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
1335
+ ASSERT(!dst.is(pc));
1336
+ ASSERT((lsb >= 0) && (lsb <= 31));
1337
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
1338
+ int msb = lsb + width - 1;
1339
+ emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1340
+ }
1341
+
1342
+
1343
+ // Bit field insert.
1344
+ // Inserts #width adjacent bits from the low bits of the source register
1345
+ // into position #lsb of the destination register.
1346
+ // bfi dst, src, #lsb, #width
1347
+ void Assembler::bfi(Register dst,
1348
+ Register src,
1349
+ int lsb,
1350
+ int width,
1351
+ Condition cond) {
1352
+ // v7 and above.
1353
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
1354
+ ASSERT(!dst.is(pc) && !src.is(pc));
1355
+ ASSERT((lsb >= 0) && (lsb <= 31));
1356
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
1357
+ int msb = lsb + width - 1;
1358
+ emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1359
+ src.code());
1360
+ }
1361
+
1362
+
1363
+ // Status register access instructions.
1364
+ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1365
+ ASSERT(!dst.is(pc));
1366
+ emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1367
+ }
1368
+
1369
+
1370
+ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1371
+ Condition cond) {
1372
+ ASSERT(fields >= B16 && fields < B20); // at least one field set
1373
+ Instr instr;
1374
+ if (!src.rm_.is_valid()) {
1375
+ // Immediate.
1376
+ uint32_t rotate_imm;
1377
+ uint32_t immed_8;
1378
+ if (src.must_use_constant_pool() ||
1379
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1380
+ // Immediate operand cannot be encoded, load it first to register ip.
1381
+ RecordRelocInfo(src.rmode_, src.imm32_);
1382
+ ldr(ip, MemOperand(pc, 0), cond);
1383
+ msr(fields, Operand(ip), cond);
1384
+ return;
1385
+ }
1386
+ instr = I | rotate_imm*B8 | immed_8;
1387
+ } else {
1388
+ ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1389
+ instr = src.rm_.code();
1390
+ }
1391
+ emit(cond | instr | B24 | B21 | fields | 15*B12);
1392
+ }
1393
+
1394
+
1395
+ // Load/Store instructions.
1396
+ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1397
+ if (dst.is(pc)) {
1398
+ positions_recorder()->WriteRecordedPositions();
1399
+ }
1400
+ addrmod2(cond | B26 | L, dst, src);
1401
+ }
1402
+
1403
+
1404
+ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1405
+ addrmod2(cond | B26, src, dst);
1406
+ }
1407
+
1408
+
1409
+ void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1410
+ addrmod2(cond | B26 | B | L, dst, src);
1411
+ }
1412
+
1413
+
1414
+ void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1415
+ addrmod2(cond | B26 | B, src, dst);
1416
+ }
1417
+
1418
+
1419
+ void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1420
+ addrmod3(cond | L | B7 | H | B4, dst, src);
1421
+ }
1422
+
1423
+
1424
+ void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1425
+ addrmod3(cond | B7 | H | B4, src, dst);
1426
+ }
1427
+
1428
+
1429
+ void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1430
+ addrmod3(cond | L | B7 | S6 | B4, dst, src);
1431
+ }
1432
+
1433
+
1434
+ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1435
+ addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1436
+ }
1437
+
1438
+
1439
+ void Assembler::ldrd(Register dst1, Register dst2,
1440
+ const MemOperand& src, Condition cond) {
1441
+ ASSERT(CpuFeatures::IsEnabled(ARMv7));
1442
+ ASSERT(src.rm().is(no_reg));
1443
+ ASSERT(!dst1.is(lr)); // r14.
1444
+ ASSERT_EQ(0, dst1.code() % 2);
1445
+ ASSERT_EQ(dst1.code() + 1, dst2.code());
1446
+ addrmod3(cond | B7 | B6 | B4, dst1, src);
1447
+ }
1448
+
1449
+
1450
+ void Assembler::strd(Register src1, Register src2,
1451
+ const MemOperand& dst, Condition cond) {
1452
+ ASSERT(dst.rm().is(no_reg));
1453
+ ASSERT(!src1.is(lr)); // r14.
1454
+ ASSERT_EQ(0, src1.code() % 2);
1455
+ ASSERT_EQ(src1.code() + 1, src2.code());
1456
+ ASSERT(CpuFeatures::IsEnabled(ARMv7));
1457
+ addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1458
+ }
1459
+
1460
+ // Load/Store multiple instructions.
1461
+ void Assembler::ldm(BlockAddrMode am,
1462
+ Register base,
1463
+ RegList dst,
1464
+ Condition cond) {
1465
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1466
+ ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1467
+
1468
+ addrmod4(cond | B27 | am | L, base, dst);
1469
+
1470
+ // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1471
+ if (cond == al && (dst & pc.bit()) != 0) {
1472
+ // There is a slight chance that the ldm instruction was actually a call,
1473
+ // in which case it would be wrong to return into the constant pool; we
1474
+ // recognize this case by checking if the emission of the pool was blocked
1475
+ // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1476
+ // the case, we emit a jump over the pool.
1477
+ CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1478
+ }
1479
+ }
1480
+
1481
+
1482
+ void Assembler::stm(BlockAddrMode am,
1483
+ Register base,
1484
+ RegList src,
1485
+ Condition cond) {
1486
+ addrmod4(cond | B27 | am, base, src);
1487
+ }
1488
+
1489
+
1490
+ // Exception-generating instructions and debugging support.
1491
+ // Stops with a non-negative code less than kNumOfWatchedStops support
1492
+ // enabling/disabling and a counter feature. See simulator-arm.h .
1493
+ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1494
+ #ifndef __arm__
1495
+ ASSERT(code >= kDefaultStopCode);
1496
+ // The Simulator will handle the stop instruction and get the message address.
1497
+ // It expects to find the address just after the svc instruction.
1498
+ BlockConstPoolFor(2);
1499
+ if (code >= 0) {
1500
+ svc(kStopCode + code, cond);
1501
+ } else {
1502
+ svc(kStopCode + kMaxStopCode, cond);
1503
+ }
1504
+ emit(reinterpret_cast<Instr>(msg));
1505
+ #else // def __arm__
1506
+ #ifdef CAN_USE_ARMV5_INSTRUCTIONS
1507
+ if (cond != al) {
1508
+ Label skip;
1509
+ b(&skip, NegateCondition(cond));
1510
+ bkpt(0);
1511
+ bind(&skip);
1512
+ } else {
1513
+ bkpt(0);
1514
+ }
1515
+ #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1516
+ svc(0x9f0001, cond);
1517
+ #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1518
+ #endif // def __arm__
1519
+ }
1520
+
1521
+
1522
+ void Assembler::bkpt(uint32_t imm16) { // v5 and above
1523
+ ASSERT(is_uint16(imm16));
1524
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
1525
+ }
1526
+
1527
+
1528
+ void Assembler::svc(uint32_t imm24, Condition cond) {
1529
+ ASSERT(is_uint24(imm24));
1530
+ emit(cond | 15*B24 | imm24);
1531
+ }
1532
+
1533
+
1534
+ // Coprocessor instructions.
1535
+ void Assembler::cdp(Coprocessor coproc,
1536
+ int opcode_1,
1537
+ CRegister crd,
1538
+ CRegister crn,
1539
+ CRegister crm,
1540
+ int opcode_2,
1541
+ Condition cond) {
1542
+ ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1543
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1544
+ crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1545
+ }
1546
+
1547
+
1548
+ void Assembler::cdp2(Coprocessor coproc,
1549
+ int opcode_1,
1550
+ CRegister crd,
1551
+ CRegister crn,
1552
+ CRegister crm,
1553
+ int opcode_2) { // v5 and above
1554
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
1555
+ }
1556
+
1557
+
1558
+ void Assembler::mcr(Coprocessor coproc,
1559
+ int opcode_1,
1560
+ Register rd,
1561
+ CRegister crn,
1562
+ CRegister crm,
1563
+ int opcode_2,
1564
+ Condition cond) {
1565
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1566
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1567
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1568
+ }
1569
+
1570
+
1571
+ void Assembler::mcr2(Coprocessor coproc,
1572
+ int opcode_1,
1573
+ Register rd,
1574
+ CRegister crn,
1575
+ CRegister crm,
1576
+ int opcode_2) { // v5 and above
1577
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1578
+ }
1579
+
1580
+
1581
+ void Assembler::mrc(Coprocessor coproc,
1582
+ int opcode_1,
1583
+ Register rd,
1584
+ CRegister crn,
1585
+ CRegister crm,
1586
+ int opcode_2,
1587
+ Condition cond) {
1588
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1589
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1590
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1591
+ }
1592
+
1593
+
1594
+ void Assembler::mrc2(Coprocessor coproc,
1595
+ int opcode_1,
1596
+ Register rd,
1597
+ CRegister crn,
1598
+ CRegister crm,
1599
+ int opcode_2) { // v5 and above
1600
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1601
+ }
1602
+
1603
+
1604
+ void Assembler::ldc(Coprocessor coproc,
1605
+ CRegister crd,
1606
+ const MemOperand& src,
1607
+ LFlag l,
1608
+ Condition cond) {
1609
+ addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1610
+ }
1611
+
1612
+
1613
+ void Assembler::ldc(Coprocessor coproc,
1614
+ CRegister crd,
1615
+ Register rn,
1616
+ int option,
1617
+ LFlag l,
1618
+ Condition cond) {
1619
+ // Unindexed addressing.
1620
+ ASSERT(is_uint8(option));
1621
+ emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1622
+ coproc*B8 | (option & 255));
1623
+ }
1624
+
1625
+
1626
+ void Assembler::ldc2(Coprocessor coproc,
1627
+ CRegister crd,
1628
+ const MemOperand& src,
1629
+ LFlag l) { // v5 and above
1630
+ ldc(coproc, crd, src, l, kSpecialCondition);
1631
+ }
1632
+
1633
+
1634
+ void Assembler::ldc2(Coprocessor coproc,
1635
+ CRegister crd,
1636
+ Register rn,
1637
+ int option,
1638
+ LFlag l) { // v5 and above
1639
+ ldc(coproc, crd, rn, option, l, kSpecialCondition);
1640
+ }
1641
+
1642
+
1643
+ // Support for VFP.
1644
+
1645
+ void Assembler::vldr(const DwVfpRegister dst,
1646
+ const Register base,
1647
+ int offset,
1648
+ const Condition cond) {
1649
+ // Ddst = MEM(Rbase + offset).
1650
+ // Instruction details available in ARM DDI 0406A, A8-628.
1651
+ // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1652
+ // Vdst(15-12) | 1011(11-8) | offset
1653
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1654
+ int u = 1;
1655
+ if (offset < 0) {
1656
+ offset = -offset;
1657
+ u = 0;
1658
+ }
1659
+
1660
+ ASSERT(offset >= 0);
1661
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
1662
+ emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
1663
+ 0xB*B8 | ((offset / 4) & 255));
1664
+ } else {
1665
+ // Larger offsets must be handled by computing the correct address
1666
+ // in the ip register.
1667
+ ASSERT(!base.is(ip));
1668
+ if (u == 1) {
1669
+ add(ip, base, Operand(offset));
1670
+ } else {
1671
+ sub(ip, base, Operand(offset));
1672
+ }
1673
+ emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
1674
+ }
1675
+ }
1676
+
1677
+
1678
+ void Assembler::vldr(const DwVfpRegister dst,
1679
+ const MemOperand& operand,
1680
+ const Condition cond) {
1681
+ ASSERT(!operand.rm().is_valid());
1682
+ ASSERT(operand.am_ == Offset);
1683
+ vldr(dst, operand.rn(), operand.offset(), cond);
1684
+ }
1685
+
1686
+
1687
+ void Assembler::vldr(const SwVfpRegister dst,
1688
+ const Register base,
1689
+ int offset,
1690
+ const Condition cond) {
1691
+ // Sdst = MEM(Rbase + offset).
1692
+ // Instruction details available in ARM DDI 0406A, A8-628.
1693
+ // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1694
+ // Vdst(15-12) | 1010(11-8) | offset
1695
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1696
+ int u = 1;
1697
+ if (offset < 0) {
1698
+ offset = -offset;
1699
+ u = 0;
1700
+ }
1701
+ int sd, d;
1702
+ dst.split_code(&sd, &d);
1703
+ ASSERT(offset >= 0);
1704
+
1705
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
1706
+ emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
1707
+ 0xA*B8 | ((offset / 4) & 255));
1708
+ } else {
1709
+ // Larger offsets must be handled by computing the correct address
1710
+ // in the ip register.
1711
+ ASSERT(!base.is(ip));
1712
+ if (u == 1) {
1713
+ add(ip, base, Operand(offset));
1714
+ } else {
1715
+ sub(ip, base, Operand(offset));
1716
+ }
1717
+ emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1718
+ }
1719
+ }
1720
+
1721
+
1722
+ void Assembler::vldr(const SwVfpRegister dst,
1723
+ const MemOperand& operand,
1724
+ const Condition cond) {
1725
+ ASSERT(!operand.rm().is_valid());
1726
+ ASSERT(operand.am_ == Offset);
1727
+ vldr(dst, operand.rn(), operand.offset(), cond);
1728
+ }
1729
+
1730
+
1731
+ void Assembler::vstr(const DwVfpRegister src,
1732
+ const Register base,
1733
+ int offset,
1734
+ const Condition cond) {
1735
+ // MEM(Rbase + offset) = Dsrc.
1736
+ // Instruction details available in ARM DDI 0406A, A8-786.
1737
+ // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
1738
+ // Vsrc(15-12) | 1011(11-8) | (offset/4)
1739
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1740
+ int u = 1;
1741
+ if (offset < 0) {
1742
+ offset = -offset;
1743
+ u = 0;
1744
+ }
1745
+ ASSERT(offset >= 0);
1746
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
1747
+ emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
1748
+ 0xB*B8 | ((offset / 4) & 255));
1749
+ } else {
1750
+ // Larger offsets must be handled by computing the correct address
1751
+ // in the ip register.
1752
+ ASSERT(!base.is(ip));
1753
+ if (u == 1) {
1754
+ add(ip, base, Operand(offset));
1755
+ } else {
1756
+ sub(ip, base, Operand(offset));
1757
+ }
1758
+ emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
1759
+ }
1760
+ }
1761
+
1762
+
1763
+ void Assembler::vstr(const DwVfpRegister src,
1764
+ const MemOperand& operand,
1765
+ const Condition cond) {
1766
+ ASSERT(!operand.rm().is_valid());
1767
+ ASSERT(operand.am_ == Offset);
1768
+ vstr(src, operand.rn(), operand.offset(), cond);
1769
+ }
1770
+
1771
+
1772
+ void Assembler::vstr(const SwVfpRegister src,
1773
+ const Register base,
1774
+ int offset,
1775
+ const Condition cond) {
1776
+ // MEM(Rbase + offset) = SSrc.
1777
+ // Instruction details available in ARM DDI 0406A, A8-786.
1778
+ // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
1779
+ // Vdst(15-12) | 1010(11-8) | (offset/4)
1780
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1781
+ int u = 1;
1782
+ if (offset < 0) {
1783
+ offset = -offset;
1784
+ u = 0;
1785
+ }
1786
+ int sd, d;
1787
+ src.split_code(&sd, &d);
1788
+ ASSERT(offset >= 0);
1789
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
1790
+ emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
1791
+ 0xA*B8 | ((offset / 4) & 255));
1792
+ } else {
1793
+ // Larger offsets must be handled by computing the correct address
1794
+ // in the ip register.
1795
+ ASSERT(!base.is(ip));
1796
+ if (u == 1) {
1797
+ add(ip, base, Operand(offset));
1798
+ } else {
1799
+ sub(ip, base, Operand(offset));
1800
+ }
1801
+ emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1802
+ }
1803
+ }
1804
+
1805
+
1806
+ void Assembler::vstr(const SwVfpRegister src,
1807
+ const MemOperand& operand,
1808
+ const Condition cond) {
1809
+ ASSERT(!operand.rm().is_valid());
1810
+ ASSERT(operand.am_ == Offset);
1811
+ vldr(src, operand.rn(), operand.offset(), cond);
1812
+ }
1813
+
1814
+
1815
+ void Assembler::vldm(BlockAddrMode am,
1816
+ Register base,
1817
+ DwVfpRegister first,
1818
+ DwVfpRegister last,
1819
+ Condition cond) {
1820
+ // Instruction details available in ARM DDI 0406A, A8-626.
1821
+ // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
1822
+ // first(15-12) | 1010(11-8) | (count * 2)
1823
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1824
+ ASSERT_LE(first.code(), last.code());
1825
+ ASSERT(am == ia || am == ia_w || am == db_w);
1826
+ ASSERT(!base.is(pc));
1827
+
1828
+ int sd, d;
1829
+ first.split_code(&sd, &d);
1830
+ int count = last.code() - first.code() + 1;
1831
+ emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
1832
+ 0xB*B8 | count*2);
1833
+ }
1834
+
1835
+
1836
+ void Assembler::vstm(BlockAddrMode am,
1837
+ Register base,
1838
+ DwVfpRegister first,
1839
+ DwVfpRegister last,
1840
+ Condition cond) {
1841
+ // Instruction details available in ARM DDI 0406A, A8-784.
1842
+ // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
1843
+ // first(15-12) | 1011(11-8) | (count * 2)
1844
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1845
+ ASSERT_LE(first.code(), last.code());
1846
+ ASSERT(am == ia || am == ia_w || am == db_w);
1847
+ ASSERT(!base.is(pc));
1848
+
1849
+ int sd, d;
1850
+ first.split_code(&sd, &d);
1851
+ int count = last.code() - first.code() + 1;
1852
+ emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
1853
+ 0xB*B8 | count*2);
1854
+ }
1855
+
1856
+ void Assembler::vldm(BlockAddrMode am,
1857
+ Register base,
1858
+ SwVfpRegister first,
1859
+ SwVfpRegister last,
1860
+ Condition cond) {
1861
+ // Instruction details available in ARM DDI 0406A, A8-626.
1862
+ // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
1863
+ // first(15-12) | 1010(11-8) | (count/2)
1864
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1865
+ ASSERT_LE(first.code(), last.code());
1866
+ ASSERT(am == ia || am == ia_w || am == db_w);
1867
+ ASSERT(!base.is(pc));
1868
+
1869
+ int sd, d;
1870
+ first.split_code(&sd, &d);
1871
+ int count = last.code() - first.code() + 1;
1872
+ emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
1873
+ 0xA*B8 | count);
1874
+ }
1875
+
1876
+
1877
+ void Assembler::vstm(BlockAddrMode am,
1878
+ Register base,
1879
+ SwVfpRegister first,
1880
+ SwVfpRegister last,
1881
+ Condition cond) {
1882
+ // Instruction details available in ARM DDI 0406A, A8-784.
1883
+ // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
1884
+ // first(15-12) | 1011(11-8) | (count/2)
1885
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1886
+ ASSERT_LE(first.code(), last.code());
1887
+ ASSERT(am == ia || am == ia_w || am == db_w);
1888
+ ASSERT(!base.is(pc));
1889
+
1890
+ int sd, d;
1891
+ first.split_code(&sd, &d);
1892
+ int count = last.code() - first.code() + 1;
1893
+ emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
1894
+ 0xA*B8 | count);
1895
+ }
1896
+
1897
+ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1898
+ uint64_t i;
1899
+ memcpy(&i, &d, 8);
1900
+
1901
+ *lo = i & 0xffffffff;
1902
+ *hi = i >> 32;
1903
+ }
1904
+
1905
+ // Only works for little endian floating point formats.
1906
+ // We don't support VFP on the mixed endian floating point platform.
1907
+ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
1908
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1909
+
1910
+ // VMOV can accept an immediate of the form:
1911
+ //
1912
+ // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
1913
+ //
1914
+ // The immediate is encoded using an 8-bit quantity, comprised of two
1915
+ // 4-bit fields. For an 8-bit immediate of the form:
1916
+ //
1917
+ // [abcdefgh]
1918
+ //
1919
+ // where a is the MSB and h is the LSB, an immediate 64-bit double can be
1920
+ // created of the form:
1921
+ //
1922
+ // [aBbbbbbb,bbcdefgh,00000000,00000000,
1923
+ // 00000000,00000000,00000000,00000000]
1924
+ //
1925
+ // where B = ~b.
1926
+ //
1927
+
1928
+ uint32_t lo, hi;
1929
+ DoubleAsTwoUInt32(d, &lo, &hi);
1930
+
1931
+ // The most obvious constraint is the long block of zeroes.
1932
+ if ((lo != 0) || ((hi & 0xffff) != 0)) {
1933
+ return false;
1934
+ }
1935
+
1936
+ // Bits 62:55 must be all clear or all set.
1937
+ if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
1938
+ return false;
1939
+ }
1940
+
1941
+ // Bit 63 must be NOT bit 62.
1942
+ if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
1943
+ return false;
1944
+ }
1945
+
1946
+ // Create the encoded immediate in the form:
1947
+ // [00000000,0000abcd,00000000,0000efgh]
1948
+ *encoding = (hi >> 16) & 0xf; // Low nybble.
1949
+ *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
1950
+ *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
1951
+
1952
+ return true;
1953
+ }
1954
+
1955
+
1956
+ void Assembler::vmov(const DwVfpRegister dst,
1957
+ double imm,
1958
+ const Condition cond) {
1959
+ // Dd = immediate
1960
+ // Instruction details available in ARM DDI 0406B, A8-640.
1961
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1962
+
1963
+ uint32_t enc;
1964
+ if (FitsVMOVDoubleImmediate(imm, &enc)) {
1965
+ // The double can be encoded in the instruction.
1966
+ emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
1967
+ } else {
1968
+ // Synthesise the double from ARM immediates. This could be implemented
1969
+ // using vldr from a constant pool.
1970
+ uint32_t lo, hi;
1971
+ DoubleAsTwoUInt32(imm, &lo, &hi);
1972
+
1973
+ if (lo == hi) {
1974
+ // If the lo and hi parts of the double are equal, the literal is easier
1975
+ // to create. This is the case with 0.0.
1976
+ mov(ip, Operand(lo));
1977
+ vmov(dst, ip, ip);
1978
+ } else {
1979
+ // Move the low part of the double into the lower of the corresponsing S
1980
+ // registers of D register dst.
1981
+ mov(ip, Operand(lo));
1982
+ vmov(dst.low(), ip, cond);
1983
+
1984
+ // Move the high part of the double into the higher of the corresponsing S
1985
+ // registers of D register dst.
1986
+ mov(ip, Operand(hi));
1987
+ vmov(dst.high(), ip, cond);
1988
+ }
1989
+ }
1990
+ }
1991
+
1992
+
1993
+ void Assembler::vmov(const SwVfpRegister dst,
1994
+ const SwVfpRegister src,
1995
+ const Condition cond) {
1996
+ // Sd = Sm
1997
+ // Instruction details available in ARM DDI 0406B, A8-642.
1998
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1999
+ int sd, d, sm, m;
2000
+ dst.split_code(&sd, &d);
2001
+ src.split_code(&sm, &m);
2002
+ emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2003
+ }
2004
+
2005
+
2006
+ void Assembler::vmov(const DwVfpRegister dst,
2007
+ const DwVfpRegister src,
2008
+ const Condition cond) {
2009
+ // Dd = Dm
2010
+ // Instruction details available in ARM DDI 0406B, A8-642.
2011
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2012
+ emit(cond | 0xE*B24 | 0xB*B20 |
2013
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2014
+ }
2015
+
2016
+
2017
+ void Assembler::vmov(const DwVfpRegister dst,
2018
+ const Register src1,
2019
+ const Register src2,
2020
+ const Condition cond) {
2021
+ // Dm = <Rt,Rt2>.
2022
+ // Instruction details available in ARM DDI 0406A, A8-646.
2023
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2024
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2025
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2026
+ ASSERT(!src1.is(pc) && !src2.is(pc));
2027
+ emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2028
+ src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2029
+ }
2030
+
2031
+
2032
+ void Assembler::vmov(const Register dst1,
2033
+ const Register dst2,
2034
+ const DwVfpRegister src,
2035
+ const Condition cond) {
2036
+ // <Rt,Rt2> = Dm.
2037
+ // Instruction details available in ARM DDI 0406A, A8-646.
2038
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2039
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2040
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2041
+ ASSERT(!dst1.is(pc) && !dst2.is(pc));
2042
+ emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2043
+ dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2044
+ }
2045
+
2046
+
2047
+ void Assembler::vmov(const SwVfpRegister dst,
2048
+ const Register src,
2049
+ const Condition cond) {
2050
+ // Sn = Rt.
2051
+ // Instruction details available in ARM DDI 0406A, A8-642.
2052
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2053
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2054
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2055
+ ASSERT(!src.is(pc));
2056
+ int sn, n;
2057
+ dst.split_code(&sn, &n);
2058
+ emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2059
+ }
2060
+
2061
+
2062
+ void Assembler::vmov(const Register dst,
2063
+ const SwVfpRegister src,
2064
+ const Condition cond) {
2065
+ // Rt = Sn.
2066
+ // Instruction details available in ARM DDI 0406A, A8-642.
2067
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2068
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2069
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2070
+ ASSERT(!dst.is(pc));
2071
+ int sn, n;
2072
+ src.split_code(&sn, &n);
2073
+ emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2074
+ }
2075
+
2076
+
2077
+ // Type of data to read from or write to VFP register.
2078
+ // Used as specifier in generic vcvt instruction.
2079
+ enum VFPType { S32, U32, F32, F64 };
2080
+
2081
+
2082
+ static bool IsSignedVFPType(VFPType type) {
2083
+ switch (type) {
2084
+ case S32:
2085
+ return true;
2086
+ case U32:
2087
+ return false;
2088
+ default:
2089
+ UNREACHABLE();
2090
+ return false;
2091
+ }
2092
+ }
2093
+
2094
+
2095
+ static bool IsIntegerVFPType(VFPType type) {
2096
+ switch (type) {
2097
+ case S32:
2098
+ case U32:
2099
+ return true;
2100
+ case F32:
2101
+ case F64:
2102
+ return false;
2103
+ default:
2104
+ UNREACHABLE();
2105
+ return false;
2106
+ }
2107
+ }
2108
+
2109
+
2110
+ static bool IsDoubleVFPType(VFPType type) {
2111
+ switch (type) {
2112
+ case F32:
2113
+ return false;
2114
+ case F64:
2115
+ return true;
2116
+ default:
2117
+ UNREACHABLE();
2118
+ return false;
2119
+ }
2120
+ }
2121
+
2122
+
2123
+ // Split five bit reg_code based on size of reg_type.
2124
+ // 32-bit register codes are Vm:M
2125
+ // 64-bit register codes are M:Vm
2126
+ // where Vm is four bits, and M is a single bit.
2127
+ static void SplitRegCode(VFPType reg_type,
2128
+ int reg_code,
2129
+ int* vm,
2130
+ int* m) {
2131
+ ASSERT((reg_code >= 0) && (reg_code <= 31));
2132
+ if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2133
+ // 32 bit type.
2134
+ *m = reg_code & 0x1;
2135
+ *vm = reg_code >> 1;
2136
+ } else {
2137
+ // 64 bit type.
2138
+ *m = (reg_code & 0x10) >> 4;
2139
+ *vm = reg_code & 0x0F;
2140
+ }
2141
+ }
2142
+
2143
+
2144
+ // Encode vcvt.src_type.dst_type instruction.
2145
+ static Instr EncodeVCVT(const VFPType dst_type,
2146
+ const int dst_code,
2147
+ const VFPType src_type,
2148
+ const int src_code,
2149
+ VFPConversionMode mode,
2150
+ const Condition cond) {
2151
+ ASSERT(src_type != dst_type);
2152
+ int D, Vd, M, Vm;
2153
+ SplitRegCode(src_type, src_code, &Vm, &M);
2154
+ SplitRegCode(dst_type, dst_code, &Vd, &D);
2155
+
2156
+ if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2157
+ // Conversion between IEEE floating point and 32-bit integer.
2158
+ // Instruction details available in ARM DDI 0406B, A8.6.295.
2159
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2160
+ // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2161
+ ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2162
+
2163
+ int sz, opc2, op;
2164
+
2165
+ if (IsIntegerVFPType(dst_type)) {
2166
+ opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2167
+ sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2168
+ op = mode;
2169
+ } else {
2170
+ ASSERT(IsIntegerVFPType(src_type));
2171
+ opc2 = 0x0;
2172
+ sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2173
+ op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2174
+ }
2175
+
2176
+ return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2177
+ Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2178
+ } else {
2179
+ // Conversion between IEEE double and single precision.
2180
+ // Instruction details available in ARM DDI 0406B, A8.6.298.
2181
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2182
+ // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2183
+ int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2184
+ return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2185
+ Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2186
+ }
2187
+ }
2188
+
2189
+
2190
+ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2191
+ const SwVfpRegister src,
2192
+ VFPConversionMode mode,
2193
+ const Condition cond) {
2194
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2195
+ emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2196
+ }
2197
+
2198
+
2199
+ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2200
+ const SwVfpRegister src,
2201
+ VFPConversionMode mode,
2202
+ const Condition cond) {
2203
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2204
+ emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2205
+ }
2206
+
2207
+
2208
+ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2209
+ const SwVfpRegister src,
2210
+ VFPConversionMode mode,
2211
+ const Condition cond) {
2212
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2213
+ emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2214
+ }
2215
+
2216
+
2217
+ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2218
+ const DwVfpRegister src,
2219
+ VFPConversionMode mode,
2220
+ const Condition cond) {
2221
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2222
+ emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2223
+ }
2224
+
2225
+
2226
+ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2227
+ const DwVfpRegister src,
2228
+ VFPConversionMode mode,
2229
+ const Condition cond) {
2230
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2231
+ emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2232
+ }
2233
+
2234
+
2235
+ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2236
+ const SwVfpRegister src,
2237
+ VFPConversionMode mode,
2238
+ const Condition cond) {
2239
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2240
+ emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2241
+ }
2242
+
2243
+
2244
+ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2245
+ const DwVfpRegister src,
2246
+ VFPConversionMode mode,
2247
+ const Condition cond) {
2248
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2249
+ emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2250
+ }
2251
+
2252
+
2253
+ void Assembler::vneg(const DwVfpRegister dst,
2254
+ const DwVfpRegister src,
2255
+ const Condition cond) {
2256
+ emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
2257
+ 0x5*B9 | B8 | B6 | src.code());
2258
+ }
2259
+
2260
+
2261
+ void Assembler::vabs(const DwVfpRegister dst,
2262
+ const DwVfpRegister src,
2263
+ const Condition cond) {
2264
+ emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2265
+ 0x5*B9 | B8 | 0x3*B6 | src.code());
2266
+ }
2267
+
2268
+
2269
+ void Assembler::vadd(const DwVfpRegister dst,
2270
+ const DwVfpRegister src1,
2271
+ const DwVfpRegister src2,
2272
+ const Condition cond) {
2273
+ // Dd = vadd(Dn, Dm) double precision floating point addition.
2274
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2275
+ // Instruction details available in ARM DDI 0406A, A8-536.
2276
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2277
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2278
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2279
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2280
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2281
+ }
2282
+
2283
+
2284
+ void Assembler::vsub(const DwVfpRegister dst,
2285
+ const DwVfpRegister src1,
2286
+ const DwVfpRegister src2,
2287
+ const Condition cond) {
2288
+ // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2289
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2290
+ // Instruction details available in ARM DDI 0406A, A8-784.
2291
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2292
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
2293
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2294
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2295
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2296
+ }
2297
+
2298
+
2299
+ void Assembler::vmul(const DwVfpRegister dst,
2300
+ const DwVfpRegister src1,
2301
+ const DwVfpRegister src2,
2302
+ const Condition cond) {
2303
+ // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2304
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2305
+ // Instruction details available in ARM DDI 0406A, A8-784.
2306
+ // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2307
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2308
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2309
+ emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2310
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2311
+ }
2312
+
2313
+
2314
+ void Assembler::vdiv(const DwVfpRegister dst,
2315
+ const DwVfpRegister src1,
2316
+ const DwVfpRegister src2,
2317
+ const Condition cond) {
2318
+ // Dd = vdiv(Dn, Dm) double precision floating point division.
2319
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2320
+ // Instruction details available in ARM DDI 0406A, A8-584.
2321
+ // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2322
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2323
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2324
+ emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2325
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2326
+ }
2327
+
2328
+
2329
+ void Assembler::vcmp(const DwVfpRegister src1,
2330
+ const DwVfpRegister src2,
2331
+ const Condition cond) {
2332
+ // vcmp(Dd, Dm) double precision floating point comparison.
2333
+ // Instruction details available in ARM DDI 0406A, A8-570.
2334
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2335
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2336
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2337
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2338
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2339
+ }
2340
+
2341
+
2342
+ void Assembler::vcmp(const DwVfpRegister src1,
2343
+ const double src2,
2344
+ const Condition cond) {
2345
+ // vcmp(Dd, Dm) double precision floating point comparison.
2346
+ // Instruction details available in ARM DDI 0406A, A8-570.
2347
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
2348
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
2349
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2350
+ ASSERT(src2 == 0.0);
2351
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
2352
+ src1.code()*B12 | 0x5*B9 | B8 | B6);
2353
+ }
2354
+
2355
+
2356
+ void Assembler::vmsr(Register dst, Condition cond) {
2357
+ // Instruction details available in ARM DDI 0406A, A8-652.
2358
+ // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2359
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2360
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2361
+ emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2362
+ dst.code()*B12 | 0xA*B8 | B4);
2363
+ }
2364
+
2365
+
2366
+ void Assembler::vmrs(Register dst, Condition cond) {
2367
+ // Instruction details available in ARM DDI 0406A, A8-652.
2368
+ // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2369
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2370
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2371
+ emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2372
+ dst.code()*B12 | 0xA*B8 | B4);
2373
+ }
2374
+
2375
+
2376
+ void Assembler::vsqrt(const DwVfpRegister dst,
2377
+ const DwVfpRegister src,
2378
+ const Condition cond) {
2379
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2380
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
2381
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
2382
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2383
+ dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2384
+ }
2385
+
2386
+
2387
+ // Pseudo instructions.
2388
+ void Assembler::nop(int type) {
2389
+ // This is mov rx, rx.
2390
+ ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2391
+ emit(al | 13*B21 | type*B12 | type);
2392
+ }
2393
+
2394
+
2395
+ bool Assembler::IsNop(Instr instr, int type) {
2396
+ // Check for mov rx, rx where x = type.
2397
+ ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2398
+ return instr == (al | 13*B21 | type*B12 | type);
2399
+ }
2400
+
2401
+
2402
+ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2403
+ uint32_t dummy1;
2404
+ uint32_t dummy2;
2405
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2406
+ }
2407
+
2408
+
2409
+ void Assembler::BlockConstPoolFor(int instructions) {
2410
+ BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
2411
+ }
2412
+
2413
+
2414
+ // Debugging.
2415
+ void Assembler::RecordJSReturn() {
2416
+ positions_recorder()->WriteRecordedPositions();
2417
+ CheckBuffer();
2418
+ RecordRelocInfo(RelocInfo::JS_RETURN);
2419
+ }
2420
+
2421
+
2422
+ void Assembler::RecordDebugBreakSlot() {
2423
+ positions_recorder()->WriteRecordedPositions();
2424
+ CheckBuffer();
2425
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2426
+ }
2427
+
2428
+
2429
+ void Assembler::RecordComment(const char* msg) {
2430
+ if (FLAG_code_comments) {
2431
+ CheckBuffer();
2432
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2433
+ }
2434
+ }
2435
+
2436
+
2437
+ void Assembler::GrowBuffer() {
2438
+ if (!own_buffer_) FATAL("external code buffer is too small");
2439
+
2440
+ // Compute new buffer size.
2441
+ CodeDesc desc; // the new buffer
2442
+ if (buffer_size_ < 4*KB) {
2443
+ desc.buffer_size = 4*KB;
2444
+ } else if (buffer_size_ < 1*MB) {
2445
+ desc.buffer_size = 2*buffer_size_;
2446
+ } else {
2447
+ desc.buffer_size = buffer_size_ + 1*MB;
2448
+ }
2449
+ CHECK_GT(desc.buffer_size, 0); // no overflow
2450
+
2451
+ // Setup new buffer.
2452
+ desc.buffer = NewArray<byte>(desc.buffer_size);
2453
+
2454
+ desc.instr_size = pc_offset();
2455
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2456
+
2457
+ // Copy the data.
2458
+ int pc_delta = desc.buffer - buffer_;
2459
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2460
+ memmove(desc.buffer, buffer_, desc.instr_size);
2461
+ memmove(reloc_info_writer.pos() + rc_delta,
2462
+ reloc_info_writer.pos(), desc.reloc_size);
2463
+
2464
+ // Switch buffers.
2465
+ DeleteArray(buffer_);
2466
+ buffer_ = desc.buffer;
2467
+ buffer_size_ = desc.buffer_size;
2468
+ pc_ += pc_delta;
2469
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2470
+ reloc_info_writer.last_pc() + pc_delta);
2471
+
2472
+ // None of our relocation types are pc relative pointing outside the code
2473
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
2474
+ // to relocate any emitted relocation entries.
2475
+
2476
+ // Relocate pending relocation entries.
2477
+ for (int i = 0; i < num_prinfo_; i++) {
2478
+ RelocInfo& rinfo = prinfo_[i];
2479
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2480
+ rinfo.rmode() != RelocInfo::POSITION);
2481
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2482
+ rinfo.set_pc(rinfo.pc() + pc_delta);
2483
+ }
2484
+ }
2485
+ }
2486
+
2487
+
2488
+ void Assembler::db(uint8_t data) {
2489
+ // No relocation info should be pending while using db. db is used
2490
+ // to write pure data with no pointers and the constant pool should
2491
+ // be emitted before using db.
2492
+ ASSERT(num_prinfo_ == 0);
2493
+ CheckBuffer();
2494
+ *reinterpret_cast<uint8_t*>(pc_) = data;
2495
+ pc_ += sizeof(uint8_t);
2496
+ }
2497
+
2498
+
2499
+ void Assembler::dd(uint32_t data) {
2500
+ // No relocation info should be pending while using dd. dd is used
2501
+ // to write pure data with no pointers and the constant pool should
2502
+ // be emitted before using dd.
2503
+ ASSERT(num_prinfo_ == 0);
2504
+ CheckBuffer();
2505
+ *reinterpret_cast<uint32_t*>(pc_) = data;
2506
+ pc_ += sizeof(uint32_t);
2507
+ }
2508
+
2509
+
2510
+ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2511
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
2512
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2513
+ // Adjust code for new modes.
2514
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2515
+ || RelocInfo::IsJSReturn(rmode)
2516
+ || RelocInfo::IsComment(rmode)
2517
+ || RelocInfo::IsPosition(rmode));
2518
+ // These modes do not need an entry in the constant pool.
2519
+ } else {
2520
+ ASSERT(num_prinfo_ < kMaxNumPRInfo);
2521
+ prinfo_[num_prinfo_++] = rinfo;
2522
+ // Make sure the constant pool is not emitted in place of the next
2523
+ // instruction for which we just recorded relocation info.
2524
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
2525
+ }
2526
+ if (rinfo.rmode() != RelocInfo::NONE) {
2527
+ // Don't record external references unless the heap will be serialized.
2528
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2529
+ #ifdef DEBUG
2530
+ if (!Serializer::enabled()) {
2531
+ Serializer::TooLateToEnableNow();
2532
+ }
2533
+ #endif
2534
+ if (!Serializer::enabled() && !emit_debug_code()) {
2535
+ return;
2536
+ }
2537
+ }
2538
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2539
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2540
+ ASSERT(ast_id_for_reloc_info_ != kNoASTId);
2541
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
2542
+ ast_id_for_reloc_info_ = kNoASTId;
2543
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
2544
+ } else {
2545
+ reloc_info_writer.Write(&rinfo);
2546
+ }
2547
+ }
2548
+ }
2549
+
2550
+
2551
+ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2552
+ // Calculate the offset of the next check. It will be overwritten
2553
+ // when a const pool is generated or when const pools are being
2554
+ // blocked for a specific range.
2555
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
2556
+
2557
+ // There is nothing to do if there are no pending relocation info entries.
2558
+ if (num_prinfo_ == 0) return;
2559
+
2560
+ // We emit a constant pool at regular intervals of about kDistBetweenPools
2561
+ // or when requested by parameter force_emit (e.g. after each function).
2562
+ // We prefer not to emit a jump unless the max distance is reached or if we
2563
+ // are running low on slots, which can happen if a lot of constants are being
2564
+ // emitted (e.g. --debug-code and many static references).
2565
+ int dist = pc_offset() - last_const_pool_end_;
2566
+ if (!force_emit && dist < kMaxDistBetweenPools &&
2567
+ (require_jump || dist < kDistBetweenPools) &&
2568
+ // TODO(1236125): Cleanup the "magic" number below. We know that
2569
+ // the code generation will test every kCheckConstIntervalInst.
2570
+ // Thus we are safe as long as we generate less than 7 constant
2571
+ // entries per instruction.
2572
+ (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
2573
+ return;
2574
+ }
2575
+
2576
+ // If we did not return by now, we need to emit the constant pool soon.
2577
+
2578
+ // However, some small sequences of instructions must not be broken up by the
2579
+ // insertion of a constant pool; such sequences are protected by setting
2580
+ // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
2581
+ // both checked here. Also, recursive calls to CheckConstPool are blocked by
2582
+ // no_const_pool_before_.
2583
+ if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
2584
+ // Emission is currently blocked; make sure we try again as soon as
2585
+ // possible.
2586
+ if (const_pool_blocked_nesting_ > 0) {
2587
+ next_buffer_check_ = pc_offset() + kInstrSize;
2588
+ } else {
2589
+ next_buffer_check_ = no_const_pool_before_;
2590
+ }
2591
+
2592
+ // Something is wrong if emission is forced and blocked at the same time.
2593
+ ASSERT(!force_emit);
2594
+ return;
2595
+ }
2596
+
2597
+ int jump_instr = require_jump ? kInstrSize : 0;
2598
+
2599
+ // Check that the code buffer is large enough before emitting the constant
2600
+ // pool and relocation information (include the jump over the pool and the
2601
+ // constant pool marker).
2602
+ int max_needed_space =
2603
+ jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2604
+ while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2605
+
2606
+ // Block recursive calls to CheckConstPool.
2607
+ BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
2608
+ num_prinfo_*kInstrSize);
2609
+ // Don't bother to check for the emit calls below.
2610
+ next_buffer_check_ = no_const_pool_before_;
2611
+
2612
+ // Emit jump over constant pool if necessary.
2613
+ Label after_pool;
2614
+ if (require_jump) b(&after_pool);
2615
+
2616
+ RecordComment("[ Constant Pool");
2617
+
2618
+ // Put down constant pool marker "Undefined instruction" as specified by
2619
+ // A5.6 (ARMv7) Instruction set encoding.
2620
+ emit(kConstantPoolMarker | num_prinfo_);
2621
+
2622
+ // Emit constant pool entries.
2623
+ for (int i = 0; i < num_prinfo_; i++) {
2624
+ RelocInfo& rinfo = prinfo_[i];
2625
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2626
+ rinfo.rmode() != RelocInfo::POSITION &&
2627
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2628
+ Instr instr = instr_at(rinfo.pc());
2629
+
2630
+ // Instruction to patch must be a ldr/str [pc, #offset].
2631
+ // P and U set, B and W clear, Rn == pc, offset12 still 0.
2632
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
2633
+ (2*B25 | P | U | pc.code()*B16));
2634
+ int delta = pc_ - rinfo.pc() - 8;
2635
+ ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2636
+ if (delta < 0) {
2637
+ instr &= ~U;
2638
+ delta = -delta;
2639
+ }
2640
+ ASSERT(is_uint12(delta));
2641
+ instr_at_put(rinfo.pc(), instr + delta);
2642
+ emit(rinfo.data());
2643
+ }
2644
+ num_prinfo_ = 0;
2645
+ last_const_pool_end_ = pc_offset();
2646
+
2647
+ RecordComment("]");
2648
+
2649
+ if (after_pool.is_linked()) {
2650
+ bind(&after_pool);
2651
+ }
2652
+
2653
+ // Since a constant pool was just emitted, move the check offset forward by
2654
+ // the standard interval.
2655
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
2656
+ }
2657
+
2658
+
2659
+ } } // namespace v8::internal
2660
+
2661
+ #endif // V8_TARGET_ARCH_ARM