libv8-sgonyea 3.3.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (500) hide show
  1. data/.gitignore +8 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +76 -0
  5. data/Rakefile +113 -0
  6. data/ext/libv8/extconf.rb +28 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +30 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  11. data/lib/libv8/v8/.gitignore +35 -0
  12. data/lib/libv8/v8/AUTHORS +44 -0
  13. data/lib/libv8/v8/ChangeLog +2839 -0
  14. data/lib/libv8/v8/LICENSE +52 -0
  15. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  16. data/lib/libv8/v8/LICENSE.v8 +26 -0
  17. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  18. data/lib/libv8/v8/SConstruct +1478 -0
  19. data/lib/libv8/v8/build/README.txt +49 -0
  20. data/lib/libv8/v8/build/all.gyp +18 -0
  21. data/lib/libv8/v8/build/armu.gypi +32 -0
  22. data/lib/libv8/v8/build/common.gypi +144 -0
  23. data/lib/libv8/v8/build/gyp_v8 +145 -0
  24. data/lib/libv8/v8/include/v8-debug.h +395 -0
  25. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  26. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  27. data/lib/libv8/v8/include/v8-testing.h +104 -0
  28. data/lib/libv8/v8/include/v8.h +4124 -0
  29. data/lib/libv8/v8/include/v8stdint.h +53 -0
  30. data/lib/libv8/v8/preparser/SConscript +38 -0
  31. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  32. data/lib/libv8/v8/src/SConscript +368 -0
  33. data/lib/libv8/v8/src/accessors.cc +767 -0
  34. data/lib/libv8/v8/src/accessors.h +123 -0
  35. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  36. data/lib/libv8/v8/src/allocation.cc +122 -0
  37. data/lib/libv8/v8/src/allocation.h +143 -0
  38. data/lib/libv8/v8/src/api.cc +5845 -0
  39. data/lib/libv8/v8/src/api.h +574 -0
  40. data/lib/libv8/v8/src/apinatives.js +110 -0
  41. data/lib/libv8/v8/src/apiutils.h +73 -0
  42. data/lib/libv8/v8/src/arguments.h +118 -0
  43. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  44. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  45. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  46. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  47. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  48. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  49. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  50. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  51. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  52. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  53. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  54. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  55. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  56. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  57. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  58. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  59. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  60. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  61. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  62. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  63. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  64. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  65. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  66. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  67. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  68. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  69. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  70. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  71. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  72. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  73. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  74. data/lib/libv8/v8/src/array.js +1366 -0
  75. data/lib/libv8/v8/src/assembler.cc +1207 -0
  76. data/lib/libv8/v8/src/assembler.h +858 -0
  77. data/lib/libv8/v8/src/ast-inl.h +112 -0
  78. data/lib/libv8/v8/src/ast.cc +1146 -0
  79. data/lib/libv8/v8/src/ast.h +2188 -0
  80. data/lib/libv8/v8/src/atomicops.h +167 -0
  81. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  82. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  83. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  84. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  85. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  86. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  87. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  88. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  89. data/lib/libv8/v8/src/bignum.cc +768 -0
  90. data/lib/libv8/v8/src/bignum.h +140 -0
  91. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  92. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  93. data/lib/libv8/v8/src/builtins.cc +1707 -0
  94. data/lib/libv8/v8/src/builtins.h +371 -0
  95. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  96. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  97. data/lib/libv8/v8/src/cached-powers.h +65 -0
  98. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  99. data/lib/libv8/v8/src/char-predicates.h +67 -0
  100. data/lib/libv8/v8/src/checks.cc +110 -0
  101. data/lib/libv8/v8/src/checks.h +296 -0
  102. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  103. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  104. data/lib/libv8/v8/src/circular-queue.h +103 -0
  105. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  106. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  107. data/lib/libv8/v8/src/code.h +70 -0
  108. data/lib/libv8/v8/src/codegen.cc +231 -0
  109. data/lib/libv8/v8/src/codegen.h +84 -0
  110. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  111. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  112. data/lib/libv8/v8/src/compiler.cc +786 -0
  113. data/lib/libv8/v8/src/compiler.h +312 -0
  114. data/lib/libv8/v8/src/contexts.cc +347 -0
  115. data/lib/libv8/v8/src/contexts.h +391 -0
  116. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  117. data/lib/libv8/v8/src/conversions.cc +1131 -0
  118. data/lib/libv8/v8/src/conversions.h +135 -0
  119. data/lib/libv8/v8/src/counters.cc +93 -0
  120. data/lib/libv8/v8/src/counters.h +254 -0
  121. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  122. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  123. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  124. data/lib/libv8/v8/src/cpu.h +69 -0
  125. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  126. data/lib/libv8/v8/src/d8-debug.h +158 -0
  127. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  128. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  129. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  130. data/lib/libv8/v8/src/d8.cc +803 -0
  131. data/lib/libv8/v8/src/d8.gyp +91 -0
  132. data/lib/libv8/v8/src/d8.h +235 -0
  133. data/lib/libv8/v8/src/d8.js +2798 -0
  134. data/lib/libv8/v8/src/data-flow.cc +66 -0
  135. data/lib/libv8/v8/src/data-flow.h +205 -0
  136. data/lib/libv8/v8/src/date.js +1103 -0
  137. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  138. data/lib/libv8/v8/src/dateparser.cc +178 -0
  139. data/lib/libv8/v8/src/dateparser.h +266 -0
  140. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  141. data/lib/libv8/v8/src/debug-agent.h +129 -0
  142. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  143. data/lib/libv8/v8/src/debug.cc +3165 -0
  144. data/lib/libv8/v8/src/debug.h +1057 -0
  145. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  146. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  147. data/lib/libv8/v8/src/disasm.h +80 -0
  148. data/lib/libv8/v8/src/disassembler.cc +343 -0
  149. data/lib/libv8/v8/src/disassembler.h +58 -0
  150. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  151. data/lib/libv8/v8/src/diy-fp.h +117 -0
  152. data/lib/libv8/v8/src/double.h +238 -0
  153. data/lib/libv8/v8/src/dtoa.cc +103 -0
  154. data/lib/libv8/v8/src/dtoa.h +85 -0
  155. data/lib/libv8/v8/src/execution.cc +849 -0
  156. data/lib/libv8/v8/src/execution.h +297 -0
  157. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  158. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  159. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  160. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  161. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  162. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  163. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  164. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  165. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  166. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  167. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  168. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  169. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  170. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  171. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  172. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  173. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  174. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  175. data/lib/libv8/v8/src/factory.cc +1222 -0
  176. data/lib/libv8/v8/src/factory.h +442 -0
  177. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  178. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  179. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  180. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  181. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  182. data/lib/libv8/v8/src/flags.cc +551 -0
  183. data/lib/libv8/v8/src/flags.h +79 -0
  184. data/lib/libv8/v8/src/frames-inl.h +247 -0
  185. data/lib/libv8/v8/src/frames.cc +1243 -0
  186. data/lib/libv8/v8/src/frames.h +870 -0
  187. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  188. data/lib/libv8/v8/src/full-codegen.h +771 -0
  189. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  190. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  191. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  192. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  193. data/lib/libv8/v8/src/global-handles.cc +665 -0
  194. data/lib/libv8/v8/src/global-handles.h +284 -0
  195. data/lib/libv8/v8/src/globals.h +325 -0
  196. data/lib/libv8/v8/src/handles-inl.h +177 -0
  197. data/lib/libv8/v8/src/handles.cc +987 -0
  198. data/lib/libv8/v8/src/handles.h +382 -0
  199. data/lib/libv8/v8/src/hashmap.cc +230 -0
  200. data/lib/libv8/v8/src/hashmap.h +123 -0
  201. data/lib/libv8/v8/src/heap-inl.h +704 -0
  202. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  203. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  204. data/lib/libv8/v8/src/heap.cc +5930 -0
  205. data/lib/libv8/v8/src/heap.h +2268 -0
  206. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  207. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  208. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  209. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  210. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  211. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  212. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  213. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  214. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  215. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  216. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  217. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  218. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  219. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  220. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  221. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  222. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  223. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  224. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  225. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  226. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  227. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  228. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  229. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  230. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  231. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  232. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  233. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  234. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  235. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  236. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  237. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  238. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  239. data/lib/libv8/v8/src/ic-inl.h +130 -0
  240. data/lib/libv8/v8/src/ic.cc +2577 -0
  241. data/lib/libv8/v8/src/ic.h +736 -0
  242. data/lib/libv8/v8/src/inspector.cc +63 -0
  243. data/lib/libv8/v8/src/inspector.h +62 -0
  244. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  245. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  246. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  247. data/lib/libv8/v8/src/isolate.cc +1869 -0
  248. data/lib/libv8/v8/src/isolate.h +1382 -0
  249. data/lib/libv8/v8/src/json-parser.cc +504 -0
  250. data/lib/libv8/v8/src/json-parser.h +161 -0
  251. data/lib/libv8/v8/src/json.js +342 -0
  252. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  253. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  254. data/lib/libv8/v8/src/list-inl.h +212 -0
  255. data/lib/libv8/v8/src/list.h +174 -0
  256. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  257. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  258. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  259. data/lib/libv8/v8/src/lithium.cc +190 -0
  260. data/lib/libv8/v8/src/lithium.h +597 -0
  261. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  262. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  263. data/lib/libv8/v8/src/liveedit.h +180 -0
  264. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  265. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  266. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  267. data/lib/libv8/v8/src/log-inl.h +59 -0
  268. data/lib/libv8/v8/src/log-utils.cc +428 -0
  269. data/lib/libv8/v8/src/log-utils.h +231 -0
  270. data/lib/libv8/v8/src/log.cc +1993 -0
  271. data/lib/libv8/v8/src/log.h +476 -0
  272. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  273. data/lib/libv8/v8/src/macros.py +178 -0
  274. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  275. data/lib/libv8/v8/src/mark-compact.h +506 -0
  276. data/lib/libv8/v8/src/math.js +264 -0
  277. data/lib/libv8/v8/src/messages.cc +179 -0
  278. data/lib/libv8/v8/src/messages.h +113 -0
  279. data/lib/libv8/v8/src/messages.js +1096 -0
  280. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  281. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  282. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  283. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  284. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  285. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  286. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  287. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  288. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  289. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  290. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  291. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  292. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  293. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  294. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  295. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  296. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  297. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  298. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  299. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  300. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  301. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  302. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  303. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  304. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  305. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  306. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  307. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  308. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  309. data/lib/libv8/v8/src/natives.h +64 -0
  310. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  311. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  312. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  313. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  314. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  315. data/lib/libv8/v8/src/objects.cc +10585 -0
  316. data/lib/libv8/v8/src/objects.h +6838 -0
  317. data/lib/libv8/v8/src/parser.cc +4997 -0
  318. data/lib/libv8/v8/src/parser.h +765 -0
  319. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  320. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  321. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  322. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  323. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  324. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  325. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  326. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  327. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  328. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  329. data/lib/libv8/v8/src/platform-tls.h +50 -0
  330. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  331. data/lib/libv8/v8/src/platform.h +667 -0
  332. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  333. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  334. data/lib/libv8/v8/src/preparse-data.h +225 -0
  335. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  336. data/lib/libv8/v8/src/preparser.cc +1450 -0
  337. data/lib/libv8/v8/src/preparser.h +493 -0
  338. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  339. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  340. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  341. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  342. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  343. data/lib/libv8/v8/src/property.cc +105 -0
  344. data/lib/libv8/v8/src/property.h +365 -0
  345. data/lib/libv8/v8/src/proxy.js +83 -0
  346. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  347. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  348. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  349. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  350. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  351. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  352. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  353. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  354. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  355. data/lib/libv8/v8/src/regexp.js +483 -0
  356. data/lib/libv8/v8/src/rewriter.cc +360 -0
  357. data/lib/libv8/v8/src/rewriter.h +50 -0
  358. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  359. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  360. data/lib/libv8/v8/src/runtime.cc +12227 -0
  361. data/lib/libv8/v8/src/runtime.h +652 -0
  362. data/lib/libv8/v8/src/runtime.js +649 -0
  363. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  364. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  365. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  366. data/lib/libv8/v8/src/scanner-base.h +670 -0
  367. data/lib/libv8/v8/src/scanner.cc +345 -0
  368. data/lib/libv8/v8/src/scanner.h +146 -0
  369. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  370. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  371. data/lib/libv8/v8/src/scopes.cc +1150 -0
  372. data/lib/libv8/v8/src/scopes.h +507 -0
  373. data/lib/libv8/v8/src/serialize.cc +1574 -0
  374. data/lib/libv8/v8/src/serialize.h +589 -0
  375. data/lib/libv8/v8/src/shell.h +55 -0
  376. data/lib/libv8/v8/src/simulator.h +43 -0
  377. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  378. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  379. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  380. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  381. data/lib/libv8/v8/src/snapshot.h +91 -0
  382. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  383. data/lib/libv8/v8/src/spaces.cc +3145 -0
  384. data/lib/libv8/v8/src/spaces.h +2369 -0
  385. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  386. data/lib/libv8/v8/src/splay-tree.h +205 -0
  387. data/lib/libv8/v8/src/string-search.cc +41 -0
  388. data/lib/libv8/v8/src/string-search.h +568 -0
  389. data/lib/libv8/v8/src/string-stream.cc +592 -0
  390. data/lib/libv8/v8/src/string-stream.h +191 -0
  391. data/lib/libv8/v8/src/string.js +994 -0
  392. data/lib/libv8/v8/src/strtod.cc +440 -0
  393. data/lib/libv8/v8/src/strtod.h +40 -0
  394. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  395. data/lib/libv8/v8/src/stub-cache.h +924 -0
  396. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  397. data/lib/libv8/v8/src/token.cc +63 -0
  398. data/lib/libv8/v8/src/token.h +288 -0
  399. data/lib/libv8/v8/src/type-info.cc +507 -0
  400. data/lib/libv8/v8/src/type-info.h +272 -0
  401. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  402. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  403. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  404. data/lib/libv8/v8/src/unicode.cc +1624 -0
  405. data/lib/libv8/v8/src/unicode.h +280 -0
  406. data/lib/libv8/v8/src/uri.js +408 -0
  407. data/lib/libv8/v8/src/utils-inl.h +48 -0
  408. data/lib/libv8/v8/src/utils.cc +371 -0
  409. data/lib/libv8/v8/src/utils.h +800 -0
  410. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  411. data/lib/libv8/v8/src/v8-counters.h +314 -0
  412. data/lib/libv8/v8/src/v8.cc +213 -0
  413. data/lib/libv8/v8/src/v8.h +131 -0
  414. data/lib/libv8/v8/src/v8checks.h +64 -0
  415. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  416. data/lib/libv8/v8/src/v8globals.h +512 -0
  417. data/lib/libv8/v8/src/v8memory.h +82 -0
  418. data/lib/libv8/v8/src/v8natives.js +1310 -0
  419. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  420. data/lib/libv8/v8/src/v8threads.cc +464 -0
  421. data/lib/libv8/v8/src/v8threads.h +165 -0
  422. data/lib/libv8/v8/src/v8utils.h +319 -0
  423. data/lib/libv8/v8/src/variables.cc +114 -0
  424. data/lib/libv8/v8/src/variables.h +167 -0
  425. data/lib/libv8/v8/src/version.cc +116 -0
  426. data/lib/libv8/v8/src/version.h +68 -0
  427. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  428. data/lib/libv8/v8/src/vm-state.h +71 -0
  429. data/lib/libv8/v8/src/win32-headers.h +96 -0
  430. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  431. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  432. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  433. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  434. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  435. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  436. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  437. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  438. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  439. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  440. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  441. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  442. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  443. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  444. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  445. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  446. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  447. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  448. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  449. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  450. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  451. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  452. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  453. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  454. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  455. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  456. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  457. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  458. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  459. data/lib/libv8/v8/src/zone-inl.h +140 -0
  460. data/lib/libv8/v8/src/zone.cc +196 -0
  461. data/lib/libv8/v8/src/zone.h +240 -0
  462. data/lib/libv8/v8/tools/codemap.js +265 -0
  463. data/lib/libv8/v8/tools/consarray.js +93 -0
  464. data/lib/libv8/v8/tools/csvparser.js +78 -0
  465. data/lib/libv8/v8/tools/disasm.py +92 -0
  466. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  467. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  468. data/lib/libv8/v8/tools/gcmole/README +62 -0
  469. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  470. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  471. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  472. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  473. data/lib/libv8/v8/tools/grokdump.py +841 -0
  474. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  475. data/lib/libv8/v8/tools/js2c.py +364 -0
  476. data/lib/libv8/v8/tools/jsmin.py +280 -0
  477. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  478. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  479. data/lib/libv8/v8/tools/logreader.js +185 -0
  480. data/lib/libv8/v8/tools/mac-nm +18 -0
  481. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  482. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  483. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  484. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  485. data/lib/libv8/v8/tools/presubmit.py +305 -0
  486. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  487. data/lib/libv8/v8/tools/profile.js +751 -0
  488. data/lib/libv8/v8/tools/profile_view.js +219 -0
  489. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  490. data/lib/libv8/v8/tools/splaytree.js +316 -0
  491. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  492. data/lib/libv8/v8/tools/test.py +1510 -0
  493. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  494. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  495. data/lib/libv8/v8/tools/utils.py +96 -0
  496. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  497. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  498. data/lib/libv8/version.rb +5 -0
  499. data/libv8.gemspec +36 -0
  500. metadata +578 -0
@@ -0,0 +1,4526 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #include "v8.h"
29
+
30
+ #include "arm/lithium-codegen-arm.h"
31
+ #include "arm/lithium-gap-resolver-arm.h"
32
+ #include "code-stubs.h"
33
+ #include "stub-cache.h"
34
+
35
+ namespace v8 {
36
+ namespace internal {
37
+
38
+
39
+ class SafepointGenerator : public CallWrapper {
40
+ public:
41
+ SafepointGenerator(LCodeGen* codegen,
42
+ LPointerMap* pointers,
43
+ int deoptimization_index)
44
+ : codegen_(codegen),
45
+ pointers_(pointers),
46
+ deoptimization_index_(deoptimization_index) { }
47
+ virtual ~SafepointGenerator() { }
48
+
49
+ virtual void BeforeCall(int call_size) const {
50
+ ASSERT(call_size >= 0);
51
+ // Ensure that we have enough space after the previous safepoint position
52
+ // for the generated code there.
53
+ int call_end = codegen_->masm()->pc_offset() + call_size;
54
+ int prev_jump_end =
55
+ codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
56
+ if (call_end < prev_jump_end) {
57
+ int padding_size = prev_jump_end - call_end;
58
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
59
+ while (padding_size > 0) {
60
+ codegen_->masm()->nop();
61
+ padding_size -= Assembler::kInstrSize;
62
+ }
63
+ }
64
+ }
65
+
66
+ virtual void AfterCall() const {
67
+ codegen_->RecordSafepoint(pointers_, deoptimization_index_);
68
+ }
69
+
70
+ private:
71
+ LCodeGen* codegen_;
72
+ LPointerMap* pointers_;
73
+ int deoptimization_index_;
74
+ };
75
+
76
+
77
+ #define __ masm()->
78
+
79
+ bool LCodeGen::GenerateCode() {
80
+ HPhase phase("Code generation", chunk());
81
+ ASSERT(is_unused());
82
+ status_ = GENERATING;
83
+ CpuFeatures::Scope scope1(VFP3);
84
+ CpuFeatures::Scope scope2(ARMv7);
85
+ return GeneratePrologue() &&
86
+ GenerateBody() &&
87
+ GenerateDeferredCode() &&
88
+ GenerateDeoptJumpTable() &&
89
+ GenerateSafepointTable();
90
+ }
91
+
92
+
93
+ void LCodeGen::FinishCode(Handle<Code> code) {
94
+ ASSERT(is_done());
95
+ code->set_stack_slots(GetStackSlotCount());
96
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97
+ PopulateDeoptimizationData(code);
98
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
99
+ }
100
+
101
+
102
+ void LCodeGen::Abort(const char* format, ...) {
103
+ if (FLAG_trace_bailout) {
104
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
105
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
106
+ va_list arguments;
107
+ va_start(arguments, format);
108
+ OS::VPrint(format, arguments);
109
+ va_end(arguments);
110
+ PrintF("\n");
111
+ }
112
+ status_ = ABORTED;
113
+ }
114
+
115
+
116
+ void LCodeGen::Comment(const char* format, ...) {
117
+ if (!FLAG_code_comments) return;
118
+ char buffer[4 * KB];
119
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
120
+ va_list arguments;
121
+ va_start(arguments, format);
122
+ builder.AddFormattedList(format, arguments);
123
+ va_end(arguments);
124
+
125
+ // Copy the string before recording it in the assembler to avoid
126
+ // issues when the stack allocated buffer goes out of scope.
127
+ size_t length = builder.position();
128
+ Vector<char> copy = Vector<char>::New(length + 1);
129
+ memcpy(copy.start(), builder.Finalize(), copy.length());
130
+ masm()->RecordComment(copy.start());
131
+ }
132
+
133
+
134
+ bool LCodeGen::GeneratePrologue() {
135
+ ASSERT(is_generating());
136
+
137
+ #ifdef DEBUG
138
+ if (strlen(FLAG_stop_at) > 0 &&
139
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
140
+ __ stop("stop_at");
141
+ }
142
+ #endif
143
+
144
+ // r1: Callee's JS function.
145
+ // cp: Callee's context.
146
+ // fp: Caller's frame pointer.
147
+ // lr: Caller's pc.
148
+
149
+ // Strict mode functions need to replace the receiver with undefined
150
+ // when called as functions (without an explicit receiver
151
+ // object). r5 is zero for method calls and non-zero for function
152
+ // calls.
153
+ if (info_->is_strict_mode()) {
154
+ Label ok;
155
+ __ cmp(r5, Operand(0));
156
+ __ b(eq, &ok);
157
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
158
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
159
+ __ str(r2, MemOperand(sp, receiver_offset));
160
+ __ bind(&ok);
161
+ }
162
+
163
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
164
+ __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
165
+
166
+ // Reserve space for the stack slots needed by the code.
167
+ int slots = GetStackSlotCount();
168
+ if (slots > 0) {
169
+ if (FLAG_debug_code) {
170
+ __ mov(r0, Operand(slots));
171
+ __ mov(r2, Operand(kSlotsZapValue));
172
+ Label loop;
173
+ __ bind(&loop);
174
+ __ push(r2);
175
+ __ sub(r0, r0, Operand(1), SetCC);
176
+ __ b(ne, &loop);
177
+ } else {
178
+ __ sub(sp, sp, Operand(slots * kPointerSize));
179
+ }
180
+ }
181
+
182
+ // Possibly allocate a local context.
183
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
184
+ if (heap_slots > 0) {
185
+ Comment(";;; Allocate local context");
186
+ // Argument to NewContext is the function, which is in r1.
187
+ __ push(r1);
188
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
189
+ FastNewContextStub stub(heap_slots);
190
+ __ CallStub(&stub);
191
+ } else {
192
+ __ CallRuntime(Runtime::kNewContext, 1);
193
+ }
194
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
195
+ // Context is returned in both r0 and cp. It replaces the context
196
+ // passed to us. It's saved in the stack and kept live in cp.
197
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
198
+ // Copy any necessary parameters into the context.
199
+ int num_parameters = scope()->num_parameters();
200
+ for (int i = 0; i < num_parameters; i++) {
201
+ Slot* slot = scope()->parameter(i)->AsSlot();
202
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
203
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204
+ (num_parameters - 1 - i) * kPointerSize;
205
+ // Load parameter from stack.
206
+ __ ldr(r0, MemOperand(fp, parameter_offset));
207
+ // Store it in the context.
208
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
209
+ __ str(r0, MemOperand(cp, r1));
210
+ // Update the write barrier. This clobbers all involved
211
+ // registers, so we have to use two more registers to avoid
212
+ // clobbering cp.
213
+ __ mov(r2, Operand(cp));
214
+ __ RecordWrite(r2, Operand(r1), r3, r0);
215
+ }
216
+ }
217
+ Comment(";;; End allocate local context");
218
+ }
219
+
220
+ // Trace the call.
221
+ if (FLAG_trace) {
222
+ __ CallRuntime(Runtime::kTraceEnter, 0);
223
+ }
224
+ return !is_aborted();
225
+ }
226
+
227
+
228
+ bool LCodeGen::GenerateBody() {
229
+ ASSERT(is_generating());
230
+ bool emit_instructions = true;
231
+ for (current_instruction_ = 0;
232
+ !is_aborted() && current_instruction_ < instructions_->length();
233
+ current_instruction_++) {
234
+ LInstruction* instr = instructions_->at(current_instruction_);
235
+ if (instr->IsLabel()) {
236
+ LLabel* label = LLabel::cast(instr);
237
+ emit_instructions = !label->HasReplacement();
238
+ }
239
+
240
+ if (emit_instructions) {
241
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
242
+ instr->CompileToNative(this);
243
+ }
244
+ }
245
+ return !is_aborted();
246
+ }
247
+
248
+
249
+ LInstruction* LCodeGen::GetNextInstruction() {
250
+ if (current_instruction_ < instructions_->length() - 1) {
251
+ return instructions_->at(current_instruction_ + 1);
252
+ } else {
253
+ return NULL;
254
+ }
255
+ }
256
+
257
+
258
+ bool LCodeGen::GenerateDeferredCode() {
259
+ ASSERT(is_generating());
260
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
261
+ LDeferredCode* code = deferred_[i];
262
+ __ bind(code->entry());
263
+ code->Generate();
264
+ __ jmp(code->exit());
265
+ }
266
+
267
+ // Force constant pool emission at the end of the deferred code to make
268
+ // sure that no constant pools are emitted after.
269
+ masm()->CheckConstPool(true, false);
270
+
271
+ return !is_aborted();
272
+ }
273
+
274
+
275
+ bool LCodeGen::GenerateDeoptJumpTable() {
276
+ // Check that the jump table is accessible from everywhere in the function
277
+ // code, ie that offsets to the table can be encoded in the 24bit signed
278
+ // immediate of a branch instruction.
279
+ // To simplify we consider the code size from the first instruction to the
280
+ // end of the jump table. We also don't consider the pc load delta.
281
+ // Each entry in the jump table generates one instruction and inlines one
282
+ // 32bit data after it.
283
+ if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
284
+ deopt_jump_table_.length() * 2)) {
285
+ Abort("Generated code is too large");
286
+ }
287
+
288
+ // Block the constant pool emission during the jump table emission.
289
+ __ BlockConstPoolFor(deopt_jump_table_.length());
290
+ __ RecordComment("[ Deoptimisation jump table");
291
+ Label table_start;
292
+ __ bind(&table_start);
293
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
294
+ __ bind(&deopt_jump_table_[i].label);
295
+ __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
296
+ __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
297
+ }
298
+ ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
299
+ deopt_jump_table_.length() * 2);
300
+ __ RecordComment("]");
301
+
302
+ // The deoptimization jump table is the last part of the instruction
303
+ // sequence. Mark the generated code as done unless we bailed out.
304
+ if (!is_aborted()) status_ = DONE;
305
+ return !is_aborted();
306
+ }
307
+
308
+
309
+ bool LCodeGen::GenerateSafepointTable() {
310
+ ASSERT(is_done());
311
+ safepoints_.Emit(masm(), GetStackSlotCount());
312
+ return !is_aborted();
313
+ }
314
+
315
+
316
+ Register LCodeGen::ToRegister(int index) const {
317
+ return Register::FromAllocationIndex(index);
318
+ }
319
+
320
+
321
+ DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
322
+ return DoubleRegister::FromAllocationIndex(index);
323
+ }
324
+
325
+
326
+ Register LCodeGen::ToRegister(LOperand* op) const {
327
+ ASSERT(op->IsRegister());
328
+ return ToRegister(op->index());
329
+ }
330
+
331
+
332
+ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
333
+ if (op->IsRegister()) {
334
+ return ToRegister(op->index());
335
+ } else if (op->IsConstantOperand()) {
336
+ __ mov(scratch, ToOperand(op));
337
+ return scratch;
338
+ } else if (op->IsStackSlot() || op->IsArgument()) {
339
+ __ ldr(scratch, ToMemOperand(op));
340
+ return scratch;
341
+ }
342
+ UNREACHABLE();
343
+ return scratch;
344
+ }
345
+
346
+
347
+ DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
348
+ ASSERT(op->IsDoubleRegister());
349
+ return ToDoubleRegister(op->index());
350
+ }
351
+
352
+
353
+ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
354
+ SwVfpRegister flt_scratch,
355
+ DoubleRegister dbl_scratch) {
356
+ if (op->IsDoubleRegister()) {
357
+ return ToDoubleRegister(op->index());
358
+ } else if (op->IsConstantOperand()) {
359
+ LConstantOperand* const_op = LConstantOperand::cast(op);
360
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
361
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
362
+ if (r.IsInteger32()) {
363
+ ASSERT(literal->IsNumber());
364
+ __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
365
+ __ vmov(flt_scratch, ip);
366
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
367
+ return dbl_scratch;
368
+ } else if (r.IsDouble()) {
369
+ Abort("unsupported double immediate");
370
+ } else if (r.IsTagged()) {
371
+ Abort("unsupported tagged immediate");
372
+ }
373
+ } else if (op->IsStackSlot() || op->IsArgument()) {
374
+ // TODO(regis): Why is vldr not taking a MemOperand?
375
+ // __ vldr(dbl_scratch, ToMemOperand(op));
376
+ MemOperand mem_op = ToMemOperand(op);
377
+ __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
378
+ return dbl_scratch;
379
+ }
380
+ UNREACHABLE();
381
+ return dbl_scratch;
382
+ }
383
+
384
+
385
+ int LCodeGen::ToInteger32(LConstantOperand* op) const {
386
+ Handle<Object> value = chunk_->LookupLiteral(op);
387
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
388
+ ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
389
+ value->Number());
390
+ return static_cast<int32_t>(value->Number());
391
+ }
392
+
393
+
394
+ Operand LCodeGen::ToOperand(LOperand* op) {
395
+ if (op->IsConstantOperand()) {
396
+ LConstantOperand* const_op = LConstantOperand::cast(op);
397
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
398
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
399
+ if (r.IsInteger32()) {
400
+ ASSERT(literal->IsNumber());
401
+ return Operand(static_cast<int32_t>(literal->Number()));
402
+ } else if (r.IsDouble()) {
403
+ Abort("ToOperand Unsupported double immediate.");
404
+ }
405
+ ASSERT(r.IsTagged());
406
+ return Operand(literal);
407
+ } else if (op->IsRegister()) {
408
+ return Operand(ToRegister(op));
409
+ } else if (op->IsDoubleRegister()) {
410
+ Abort("ToOperand IsDoubleRegister unimplemented");
411
+ return Operand(0);
412
+ }
413
+ // Stack slots not implemented, use ToMemOperand instead.
414
+ UNREACHABLE();
415
+ return Operand(0);
416
+ }
417
+
418
+
419
+ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
420
+ ASSERT(!op->IsRegister());
421
+ ASSERT(!op->IsDoubleRegister());
422
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
423
+ int index = op->index();
424
+ if (index >= 0) {
425
+ // Local or spill slot. Skip the frame pointer, function, and
426
+ // context in the fixed part of the frame.
427
+ return MemOperand(fp, -(index + 3) * kPointerSize);
428
+ } else {
429
+ // Incoming parameter. Skip the return address.
430
+ return MemOperand(fp, -(index - 1) * kPointerSize);
431
+ }
432
+ }
433
+
434
+
435
+ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
436
+ ASSERT(op->IsDoubleStackSlot());
437
+ int index = op->index();
438
+ if (index >= 0) {
439
+ // Local or spill slot. Skip the frame pointer, function, context,
440
+ // and the first word of the double in the fixed part of the frame.
441
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
442
+ } else {
443
+ // Incoming parameter. Skip the return address and the first word of
444
+ // the double.
445
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
446
+ }
447
+ }
448
+
449
+
450
+ void LCodeGen::WriteTranslation(LEnvironment* environment,
451
+ Translation* translation) {
452
+ if (environment == NULL) return;
453
+
454
+ // The translation includes one command per value in the environment.
455
+ int translation_size = environment->values()->length();
456
+ // The output frame height does not include the parameters.
457
+ int height = translation_size - environment->parameter_count();
458
+
459
+ WriteTranslation(environment->outer(), translation);
460
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
461
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
462
+ for (int i = 0; i < translation_size; ++i) {
463
+ LOperand* value = environment->values()->at(i);
464
+ // spilled_registers_ and spilled_double_registers_ are either
465
+ // both NULL or both set.
466
+ if (environment->spilled_registers() != NULL && value != NULL) {
467
+ if (value->IsRegister() &&
468
+ environment->spilled_registers()[value->index()] != NULL) {
469
+ translation->MarkDuplicate();
470
+ AddToTranslation(translation,
471
+ environment->spilled_registers()[value->index()],
472
+ environment->HasTaggedValueAt(i));
473
+ } else if (
474
+ value->IsDoubleRegister() &&
475
+ environment->spilled_double_registers()[value->index()] != NULL) {
476
+ translation->MarkDuplicate();
477
+ AddToTranslation(
478
+ translation,
479
+ environment->spilled_double_registers()[value->index()],
480
+ false);
481
+ }
482
+ }
483
+
484
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
485
+ }
486
+ }
487
+
488
+
489
+ void LCodeGen::AddToTranslation(Translation* translation,
490
+ LOperand* op,
491
+ bool is_tagged) {
492
+ if (op == NULL) {
493
+ // TODO(twuerthinger): Introduce marker operands to indicate that this value
494
+ // is not present and must be reconstructed from the deoptimizer. Currently
495
+ // this is only used for the arguments object.
496
+ translation->StoreArgumentsObject();
497
+ } else if (op->IsStackSlot()) {
498
+ if (is_tagged) {
499
+ translation->StoreStackSlot(op->index());
500
+ } else {
501
+ translation->StoreInt32StackSlot(op->index());
502
+ }
503
+ } else if (op->IsDoubleStackSlot()) {
504
+ translation->StoreDoubleStackSlot(op->index());
505
+ } else if (op->IsArgument()) {
506
+ ASSERT(is_tagged);
507
+ int src_index = GetStackSlotCount() + op->index();
508
+ translation->StoreStackSlot(src_index);
509
+ } else if (op->IsRegister()) {
510
+ Register reg = ToRegister(op);
511
+ if (is_tagged) {
512
+ translation->StoreRegister(reg);
513
+ } else {
514
+ translation->StoreInt32Register(reg);
515
+ }
516
+ } else if (op->IsDoubleRegister()) {
517
+ DoubleRegister reg = ToDoubleRegister(op);
518
+ translation->StoreDoubleRegister(reg);
519
+ } else if (op->IsConstantOperand()) {
520
+ Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
521
+ int src_index = DefineDeoptimizationLiteral(literal);
522
+ translation->StoreLiteral(src_index);
523
+ } else {
524
+ UNREACHABLE();
525
+ }
526
+ }
527
+
528
+
529
+ void LCodeGen::CallCode(Handle<Code> code,
530
+ RelocInfo::Mode mode,
531
+ LInstruction* instr) {
532
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
533
+ }
534
+
535
+
536
+ void LCodeGen::CallCodeGeneric(Handle<Code> code,
537
+ RelocInfo::Mode mode,
538
+ LInstruction* instr,
539
+ SafepointMode safepoint_mode) {
540
+ ASSERT(instr != NULL);
541
+ LPointerMap* pointers = instr->pointer_map();
542
+ RecordPosition(pointers->position());
543
+ __ Call(code, mode);
544
+ RegisterLazyDeoptimization(instr, safepoint_mode);
545
+ }
546
+
547
+
548
+ void LCodeGen::CallRuntime(const Runtime::Function* function,
549
+ int num_arguments,
550
+ LInstruction* instr) {
551
+ ASSERT(instr != NULL);
552
+ LPointerMap* pointers = instr->pointer_map();
553
+ ASSERT(pointers != NULL);
554
+ RecordPosition(pointers->position());
555
+
556
+ __ CallRuntime(function, num_arguments);
557
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
558
+ }
559
+
560
+
561
+ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
562
+ int argc,
563
+ LInstruction* instr) {
564
+ __ CallRuntimeSaveDoubles(id);
565
+ RecordSafepointWithRegisters(
566
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
567
+ }
568
+
569
+
570
+ void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
571
+ SafepointMode safepoint_mode) {
572
+ // Create the environment to bailout to. If the call has side effects
573
+ // execution has to continue after the call otherwise execution can continue
574
+ // from a previous bailout point repeating the call.
575
+ LEnvironment* deoptimization_environment;
576
+ if (instr->HasDeoptimizationEnvironment()) {
577
+ deoptimization_environment = instr->deoptimization_environment();
578
+ } else {
579
+ deoptimization_environment = instr->environment();
580
+ }
581
+
582
+ RegisterEnvironmentForDeoptimization(deoptimization_environment);
583
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
584
+ RecordSafepoint(instr->pointer_map(),
585
+ deoptimization_environment->deoptimization_index());
586
+ } else {
587
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
588
+ RecordSafepointWithRegisters(
589
+ instr->pointer_map(),
590
+ 0,
591
+ deoptimization_environment->deoptimization_index());
592
+ }
593
+ }
594
+
595
+
596
+ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
597
+ if (!environment->HasBeenRegistered()) {
598
+ // Physical stack frame layout:
599
+ // -x ............. -4 0 ..................................... y
600
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
601
+
602
+ // Layout of the environment:
603
+ // 0 ..................................................... size-1
604
+ // [parameters] [locals] [expression stack including arguments]
605
+
606
+ // Layout of the translation:
607
+ // 0 ........................................................ size - 1 + 4
608
+ // [expression stack including arguments] [locals] [4 words] [parameters]
609
+ // |>------------ translation_size ------------<|
610
+
611
+ int frame_count = 0;
612
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
613
+ ++frame_count;
614
+ }
615
+ Translation translation(&translations_, frame_count);
616
+ WriteTranslation(environment, &translation);
617
+ int deoptimization_index = deoptimizations_.length();
618
+ environment->Register(deoptimization_index, translation.index());
619
+ deoptimizations_.Add(environment);
620
+ }
621
+ }
622
+
623
+
624
+ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
625
+ RegisterEnvironmentForDeoptimization(environment);
626
+ ASSERT(environment->HasBeenRegistered());
627
+ int id = environment->deoptimization_index();
628
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
629
+ ASSERT(entry != NULL);
630
+ if (entry == NULL) {
631
+ Abort("bailout was not prepared");
632
+ return;
633
+ }
634
+
635
+ ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
636
+
637
+ if (FLAG_deopt_every_n_times == 1 &&
638
+ info_->shared_info()->opt_count() == id) {
639
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
640
+ return;
641
+ }
642
+
643
+ if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
644
+
645
+ if (cc == al) {
646
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
647
+ } else {
648
+ // We often have several deopts to the same entry, reuse the last
649
+ // jump entry if this is the case.
650
+ if (deopt_jump_table_.is_empty() ||
651
+ (deopt_jump_table_.last().address != entry)) {
652
+ deopt_jump_table_.Add(JumpTableEntry(entry));
653
+ }
654
+ __ b(cc, &deopt_jump_table_.last().label);
655
+ }
656
+ }
657
+
658
+
659
+ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
660
+ int length = deoptimizations_.length();
661
+ if (length == 0) return;
662
+ ASSERT(FLAG_deopt);
663
+ Handle<DeoptimizationInputData> data =
664
+ factory()->NewDeoptimizationInputData(length, TENURED);
665
+
666
+ Handle<ByteArray> translations = translations_.CreateByteArray();
667
+ data->SetTranslationByteArray(*translations);
668
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
669
+
670
+ Handle<FixedArray> literals =
671
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
672
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
673
+ literals->set(i, *deoptimization_literals_[i]);
674
+ }
675
+ data->SetLiteralArray(*literals);
676
+
677
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
678
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
679
+
680
+ // Populate the deoptimization entries.
681
+ for (int i = 0; i < length; i++) {
682
+ LEnvironment* env = deoptimizations_[i];
683
+ data->SetAstId(i, Smi::FromInt(env->ast_id()));
684
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
685
+ data->SetArgumentsStackHeight(i,
686
+ Smi::FromInt(env->arguments_stack_height()));
687
+ }
688
+ code->set_deoptimization_data(*data);
689
+ }
690
+
691
+
692
+ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
693
+ int result = deoptimization_literals_.length();
694
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
695
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
696
+ }
697
+ deoptimization_literals_.Add(literal);
698
+ return result;
699
+ }
700
+
701
+
702
+ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
703
+ ASSERT(deoptimization_literals_.length() == 0);
704
+
705
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
706
+ chunk()->inlined_closures();
707
+
708
+ for (int i = 0, length = inlined_closures->length();
709
+ i < length;
710
+ i++) {
711
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
712
+ }
713
+
714
+ inlined_function_count_ = deoptimization_literals_.length();
715
+ }
716
+
717
+
718
+ void LCodeGen::RecordSafepoint(
719
+ LPointerMap* pointers,
720
+ Safepoint::Kind kind,
721
+ int arguments,
722
+ int deoptimization_index) {
723
+ ASSERT(expected_safepoint_kind_ == kind);
724
+
725
+ const ZoneList<LOperand*>* operands = pointers->operands();
726
+ Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
727
+ kind, arguments, deoptimization_index);
728
+ for (int i = 0; i < operands->length(); i++) {
729
+ LOperand* pointer = operands->at(i);
730
+ if (pointer->IsStackSlot()) {
731
+ safepoint.DefinePointerSlot(pointer->index());
732
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
733
+ safepoint.DefinePointerRegister(ToRegister(pointer));
734
+ }
735
+ }
736
+ if (kind & Safepoint::kWithRegisters) {
737
+ // Register cp always contains a pointer to the context.
738
+ safepoint.DefinePointerRegister(cp);
739
+ }
740
+ }
741
+
742
+
743
+ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
744
+ int deoptimization_index) {
745
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
746
+ }
747
+
748
+
749
+ void LCodeGen::RecordSafepoint(int deoptimization_index) {
750
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
751
+ RecordSafepoint(&empty_pointers, deoptimization_index);
752
+ }
753
+
754
+
755
+ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
756
+ int arguments,
757
+ int deoptimization_index) {
758
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
759
+ deoptimization_index);
760
+ }
761
+
762
+
763
+ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
764
+ LPointerMap* pointers,
765
+ int arguments,
766
+ int deoptimization_index) {
767
+ RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
768
+ deoptimization_index);
769
+ }
770
+
771
+
772
+ void LCodeGen::RecordPosition(int position) {
773
+ if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
774
+ masm()->positions_recorder()->RecordPosition(position);
775
+ }
776
+
777
+
778
+ void LCodeGen::DoLabel(LLabel* label) {
779
+ if (label->is_loop_header()) {
780
+ Comment(";;; B%d - LOOP entry", label->block_id());
781
+ } else {
782
+ Comment(";;; B%d", label->block_id());
783
+ }
784
+ __ bind(label->label());
785
+ current_block_ = label->block_id();
786
+ DoGap(label);
787
+ }
788
+
789
+
790
+ void LCodeGen::DoParallelMove(LParallelMove* move) {
791
+ resolver_.Resolve(move);
792
+ }
793
+
794
+
795
+ void LCodeGen::DoGap(LGap* gap) {
796
+ for (int i = LGap::FIRST_INNER_POSITION;
797
+ i <= LGap::LAST_INNER_POSITION;
798
+ i++) {
799
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
800
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
801
+ if (move != NULL) DoParallelMove(move);
802
+ }
803
+
804
+ LInstruction* next = GetNextInstruction();
805
+ if (next != NULL && next->IsLazyBailout()) {
806
+ int pc = masm()->pc_offset();
807
+ safepoints_.SetPcAfterGap(pc);
808
+ }
809
+ }
810
+
811
+
812
+ void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
813
+ DoGap(instr);
814
+ }
815
+
816
+
817
+ void LCodeGen::DoParameter(LParameter* instr) {
818
+ // Nothing to do.
819
+ }
820
+
821
+
822
+ void LCodeGen::DoCallStub(LCallStub* instr) {
823
+ ASSERT(ToRegister(instr->result()).is(r0));
824
+ switch (instr->hydrogen()->major_key()) {
825
+ case CodeStub::RegExpConstructResult: {
826
+ RegExpConstructResultStub stub;
827
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
828
+ break;
829
+ }
830
+ case CodeStub::RegExpExec: {
831
+ RegExpExecStub stub;
832
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
833
+ break;
834
+ }
835
+ case CodeStub::SubString: {
836
+ SubStringStub stub;
837
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
838
+ break;
839
+ }
840
+ case CodeStub::NumberToString: {
841
+ NumberToStringStub stub;
842
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
843
+ break;
844
+ }
845
+ case CodeStub::StringAdd: {
846
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
847
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
848
+ break;
849
+ }
850
+ case CodeStub::StringCompare: {
851
+ StringCompareStub stub;
852
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
853
+ break;
854
+ }
855
+ case CodeStub::TranscendentalCache: {
856
+ __ ldr(r0, MemOperand(sp, 0));
857
+ TranscendentalCacheStub stub(instr->transcendental_type(),
858
+ TranscendentalCacheStub::TAGGED);
859
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
860
+ break;
861
+ }
862
+ default:
863
+ UNREACHABLE();
864
+ }
865
+ }
866
+
867
+
868
+ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
869
+ // Nothing to do.
870
+ }
871
+
872
+
873
+ void LCodeGen::DoModI(LModI* instr) {
874
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
875
+ Register dividend = ToRegister(instr->InputAt(0));
876
+
877
+ int32_t divisor =
878
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
879
+
880
+ if (divisor < 0) divisor = -divisor;
881
+
882
+ Label positive_dividend, done;
883
+ __ cmp(dividend, Operand(0));
884
+ __ b(pl, &positive_dividend);
885
+ __ rsb(dividend, dividend, Operand(0));
886
+ __ and_(dividend, dividend, Operand(divisor - 1));
887
+ __ rsb(dividend, dividend, Operand(0), SetCC);
888
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
889
+ __ b(ne, &done);
890
+ DeoptimizeIf(al, instr->environment());
891
+ }
892
+ __ bind(&positive_dividend);
893
+ __ and_(dividend, dividend, Operand(divisor - 1));
894
+ __ bind(&done);
895
+ return;
896
+ }
897
+
898
+ // These registers hold untagged 32 bit values.
899
+ Register left = ToRegister(instr->InputAt(0));
900
+ Register right = ToRegister(instr->InputAt(1));
901
+ Register result = ToRegister(instr->result());
902
+
903
+ Register scratch = scratch0();
904
+ Register scratch2 = ToRegister(instr->TempAt(0));
905
+ DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
906
+ DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
907
+ DwVfpRegister quotient = double_scratch0();
908
+
909
+ ASSERT(result.is(left));
910
+
911
+ ASSERT(!dividend.is(divisor));
912
+ ASSERT(!dividend.is(quotient));
913
+ ASSERT(!divisor.is(quotient));
914
+ ASSERT(!scratch.is(left));
915
+ ASSERT(!scratch.is(right));
916
+ ASSERT(!scratch.is(result));
917
+
918
+ Label done, vfp_modulo, both_positive, right_negative;
919
+
920
+ // Check for x % 0.
921
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
922
+ __ cmp(right, Operand(0));
923
+ DeoptimizeIf(eq, instr->environment());
924
+ }
925
+
926
+ // (0 % x) must yield 0 (if x is finite, which is the case here).
927
+ __ cmp(left, Operand(0));
928
+ __ b(eq, &done);
929
+ // Preload right in a vfp register.
930
+ __ vmov(divisor.low(), right);
931
+ __ b(lt, &vfp_modulo);
932
+
933
+ __ cmp(left, Operand(right));
934
+ __ b(lt, &done);
935
+
936
+ // Check for (positive) power of two on the right hand side.
937
+ __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
938
+ scratch,
939
+ &right_negative,
940
+ &both_positive);
941
+ // Perform modulo operation (scratch contains right - 1).
942
+ __ and_(result, scratch, Operand(left));
943
+ __ b(&done);
944
+
945
+ __ bind(&right_negative);
946
+ // Negate right. The sign of the divisor does not matter.
947
+ __ rsb(right, right, Operand(0));
948
+
949
+ __ bind(&both_positive);
950
+ const int kUnfolds = 3;
951
+ // If the right hand side is smaller than the (nonnegative)
952
+ // left hand side, the left hand side is the result.
953
+ // Else try a few subtractions of the left hand side.
954
+ __ mov(scratch, left);
955
+ for (int i = 0; i < kUnfolds; i++) {
956
+ // Check if the left hand side is less or equal than the
957
+ // the right hand side.
958
+ __ cmp(scratch, Operand(right));
959
+ __ mov(result, scratch, LeaveCC, lt);
960
+ __ b(lt, &done);
961
+ // If not, reduce the left hand side by the right hand
962
+ // side and check again.
963
+ if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
964
+ }
965
+
966
+ __ bind(&vfp_modulo);
967
+ // Load the arguments in VFP registers.
968
+ // The divisor value is preloaded before. Be careful that 'right' is only live
969
+ // on entry.
970
+ __ vmov(dividend.low(), left);
971
+ // From here on don't use right as it may have been reallocated (for example
972
+ // to scratch2).
973
+ right = no_reg;
974
+
975
+ __ vcvt_f64_s32(dividend, dividend.low());
976
+ __ vcvt_f64_s32(divisor, divisor.low());
977
+
978
+ // We do not care about the sign of the divisor.
979
+ __ vabs(divisor, divisor);
980
+ // Compute the quotient and round it to a 32bit integer.
981
+ __ vdiv(quotient, dividend, divisor);
982
+ __ vcvt_s32_f64(quotient.low(), quotient);
983
+ __ vcvt_f64_s32(quotient, quotient.low());
984
+
985
+ // Compute the remainder in result.
986
+ DwVfpRegister double_scratch = dividend;
987
+ __ vmul(double_scratch, divisor, quotient);
988
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
989
+ __ vmov(scratch, double_scratch.low());
990
+
991
+ if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
992
+ __ sub(result, left, scratch);
993
+ } else {
994
+ Label ok;
995
+ // Check for -0.
996
+ __ sub(scratch2, left, scratch, SetCC);
997
+ __ b(ne, &ok);
998
+ __ cmp(left, Operand(0));
999
+ DeoptimizeIf(mi, instr->environment());
1000
+ __ bind(&ok);
1001
+ // Load the result and we are done.
1002
+ __ mov(result, scratch2);
1003
+ }
1004
+
1005
+ __ bind(&done);
1006
+ }
1007
+
1008
+
1009
+ void LCodeGen::DoDivI(LDivI* instr) {
1010
+ class DeferredDivI: public LDeferredCode {
1011
+ public:
1012
+ DeferredDivI(LCodeGen* codegen, LDivI* instr)
1013
+ : LDeferredCode(codegen), instr_(instr) { }
1014
+ virtual void Generate() {
1015
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1016
+ }
1017
+ private:
1018
+ LDivI* instr_;
1019
+ };
1020
+
1021
+ const Register left = ToRegister(instr->InputAt(0));
1022
+ const Register right = ToRegister(instr->InputAt(1));
1023
+ const Register scratch = scratch0();
1024
+ const Register result = ToRegister(instr->result());
1025
+
1026
+ // Check for x / 0.
1027
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1028
+ __ cmp(right, Operand(0));
1029
+ DeoptimizeIf(eq, instr->environment());
1030
+ }
1031
+
1032
+ // Check for (0 / -x) that will produce negative zero.
1033
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1034
+ Label left_not_zero;
1035
+ __ cmp(left, Operand(0));
1036
+ __ b(ne, &left_not_zero);
1037
+ __ cmp(right, Operand(0));
1038
+ DeoptimizeIf(mi, instr->environment());
1039
+ __ bind(&left_not_zero);
1040
+ }
1041
+
1042
+ // Check for (-kMinInt / -1).
1043
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1044
+ Label left_not_min_int;
1045
+ __ cmp(left, Operand(kMinInt));
1046
+ __ b(ne, &left_not_min_int);
1047
+ __ cmp(right, Operand(-1));
1048
+ DeoptimizeIf(eq, instr->environment());
1049
+ __ bind(&left_not_min_int);
1050
+ }
1051
+
1052
+ Label done, deoptimize;
1053
+ // Test for a few common cases first.
1054
+ __ cmp(right, Operand(1));
1055
+ __ mov(result, left, LeaveCC, eq);
1056
+ __ b(eq, &done);
1057
+
1058
+ __ cmp(right, Operand(2));
1059
+ __ tst(left, Operand(1), eq);
1060
+ __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1061
+ __ b(eq, &done);
1062
+
1063
+ __ cmp(right, Operand(4));
1064
+ __ tst(left, Operand(3), eq);
1065
+ __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1066
+ __ b(eq, &done);
1067
+
1068
+ // Call the stub. The numbers in r0 and r1 have
1069
+ // to be tagged to Smis. If that is not possible, deoptimize.
1070
+ DeferredDivI* deferred = new DeferredDivI(this, instr);
1071
+
1072
+ __ TrySmiTag(left, &deoptimize, scratch);
1073
+ __ TrySmiTag(right, &deoptimize, scratch);
1074
+
1075
+ __ b(al, deferred->entry());
1076
+ __ bind(deferred->exit());
1077
+
1078
+ // If the result in r0 is a Smi, untag it, else deoptimize.
1079
+ __ JumpIfNotSmi(result, &deoptimize);
1080
+ __ SmiUntag(result);
1081
+ __ b(&done);
1082
+
1083
+ __ bind(&deoptimize);
1084
+ DeoptimizeIf(al, instr->environment());
1085
+ __ bind(&done);
1086
+ }
1087
+
1088
+
1089
+ template<int T>
1090
+ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1091
+ Token::Value op) {
1092
+ Register left = ToRegister(instr->InputAt(0));
1093
+ Register right = ToRegister(instr->InputAt(1));
1094
+
1095
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1096
+ // Move left to r1 and right to r0 for the stub call.
1097
+ if (left.is(r1)) {
1098
+ __ Move(r0, right);
1099
+ } else if (left.is(r0) && right.is(r1)) {
1100
+ __ Swap(r0, r1, r2);
1101
+ } else if (left.is(r0)) {
1102
+ ASSERT(!right.is(r1));
1103
+ __ mov(r1, r0);
1104
+ __ mov(r0, right);
1105
+ } else {
1106
+ ASSERT(!left.is(r0) && !right.is(r0));
1107
+ __ mov(r0, right);
1108
+ __ mov(r1, left);
1109
+ }
1110
+ BinaryOpStub stub(op, OVERWRITE_LEFT);
1111
+ __ CallStub(&stub);
1112
+ RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1113
+ 0,
1114
+ Safepoint::kNoDeoptimizationIndex);
1115
+ // Overwrite the stored value of r0 with the result of the stub.
1116
+ __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1117
+ }
1118
+
1119
+
1120
+ void LCodeGen::DoMulI(LMulI* instr) {
1121
+ Register scratch = scratch0();
1122
+ Register left = ToRegister(instr->InputAt(0));
1123
+ Register right = EmitLoadRegister(instr->InputAt(1), scratch);
1124
+
1125
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
1126
+ !instr->InputAt(1)->IsConstantOperand()) {
1127
+ __ orr(ToRegister(instr->TempAt(0)), left, right);
1128
+ }
1129
+
1130
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1131
+ // scratch:left = left * right.
1132
+ __ smull(left, scratch, left, right);
1133
+ __ mov(ip, Operand(left, ASR, 31));
1134
+ __ cmp(ip, Operand(scratch));
1135
+ DeoptimizeIf(ne, instr->environment());
1136
+ } else {
1137
+ __ mul(left, left, right);
1138
+ }
1139
+
1140
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1141
+ // Bail out if the result is supposed to be negative zero.
1142
+ Label done;
1143
+ __ cmp(left, Operand(0));
1144
+ __ b(ne, &done);
1145
+ if (instr->InputAt(1)->IsConstantOperand()) {
1146
+ if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
1147
+ DeoptimizeIf(al, instr->environment());
1148
+ }
1149
+ } else {
1150
+ // Test the non-zero operand for negative sign.
1151
+ __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
1152
+ DeoptimizeIf(mi, instr->environment());
1153
+ }
1154
+ __ bind(&done);
1155
+ }
1156
+ }
1157
+
1158
+
1159
+ void LCodeGen::DoBitI(LBitI* instr) {
1160
+ LOperand* left = instr->InputAt(0);
1161
+ LOperand* right = instr->InputAt(1);
1162
+ ASSERT(left->Equals(instr->result()));
1163
+ ASSERT(left->IsRegister());
1164
+ Register result = ToRegister(left);
1165
+ Operand right_operand(no_reg);
1166
+
1167
+ if (right->IsStackSlot() || right->IsArgument()) {
1168
+ Register right_reg = EmitLoadRegister(right, ip);
1169
+ right_operand = Operand(right_reg);
1170
+ } else {
1171
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
1172
+ right_operand = ToOperand(right);
1173
+ }
1174
+
1175
+ switch (instr->op()) {
1176
+ case Token::BIT_AND:
1177
+ __ and_(result, ToRegister(left), right_operand);
1178
+ break;
1179
+ case Token::BIT_OR:
1180
+ __ orr(result, ToRegister(left), right_operand);
1181
+ break;
1182
+ case Token::BIT_XOR:
1183
+ __ eor(result, ToRegister(left), right_operand);
1184
+ break;
1185
+ default:
1186
+ UNREACHABLE();
1187
+ break;
1188
+ }
1189
+ }
1190
+
1191
+
1192
+ void LCodeGen::DoShiftI(LShiftI* instr) {
1193
+ Register scratch = scratch0();
1194
+ LOperand* left = instr->InputAt(0);
1195
+ LOperand* right = instr->InputAt(1);
1196
+ ASSERT(left->Equals(instr->result()));
1197
+ ASSERT(left->IsRegister());
1198
+ Register result = ToRegister(left);
1199
+ if (right->IsRegister()) {
1200
+ // Mask the right operand.
1201
+ __ and_(scratch, ToRegister(right), Operand(0x1F));
1202
+ switch (instr->op()) {
1203
+ case Token::SAR:
1204
+ __ mov(result, Operand(result, ASR, scratch));
1205
+ break;
1206
+ case Token::SHR:
1207
+ if (instr->can_deopt()) {
1208
+ __ mov(result, Operand(result, LSR, scratch), SetCC);
1209
+ DeoptimizeIf(mi, instr->environment());
1210
+ } else {
1211
+ __ mov(result, Operand(result, LSR, scratch));
1212
+ }
1213
+ break;
1214
+ case Token::SHL:
1215
+ __ mov(result, Operand(result, LSL, scratch));
1216
+ break;
1217
+ default:
1218
+ UNREACHABLE();
1219
+ break;
1220
+ }
1221
+ } else {
1222
+ int value = ToInteger32(LConstantOperand::cast(right));
1223
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1224
+ switch (instr->op()) {
1225
+ case Token::SAR:
1226
+ if (shift_count != 0) {
1227
+ __ mov(result, Operand(result, ASR, shift_count));
1228
+ }
1229
+ break;
1230
+ case Token::SHR:
1231
+ if (shift_count == 0 && instr->can_deopt()) {
1232
+ __ tst(result, Operand(0x80000000));
1233
+ DeoptimizeIf(ne, instr->environment());
1234
+ } else {
1235
+ __ mov(result, Operand(result, LSR, shift_count));
1236
+ }
1237
+ break;
1238
+ case Token::SHL:
1239
+ if (shift_count != 0) {
1240
+ __ mov(result, Operand(result, LSL, shift_count));
1241
+ }
1242
+ break;
1243
+ default:
1244
+ UNREACHABLE();
1245
+ break;
1246
+ }
1247
+ }
1248
+ }
1249
+
1250
+
1251
+ void LCodeGen::DoSubI(LSubI* instr) {
1252
+ LOperand* left = instr->InputAt(0);
1253
+ LOperand* right = instr->InputAt(1);
1254
+ ASSERT(left->Equals(instr->result()));
1255
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1256
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
1257
+
1258
+ if (right->IsStackSlot() || right->IsArgument()) {
1259
+ Register right_reg = EmitLoadRegister(right, ip);
1260
+ __ sub(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
1261
+ } else {
1262
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
1263
+ __ sub(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
1264
+ }
1265
+
1266
+ if (can_overflow) {
1267
+ DeoptimizeIf(vs, instr->environment());
1268
+ }
1269
+ }
1270
+
1271
+
1272
+ void LCodeGen::DoConstantI(LConstantI* instr) {
1273
+ ASSERT(instr->result()->IsRegister());
1274
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
1275
+ }
1276
+
1277
+
1278
+ void LCodeGen::DoConstantD(LConstantD* instr) {
1279
+ ASSERT(instr->result()->IsDoubleRegister());
1280
+ DwVfpRegister result = ToDoubleRegister(instr->result());
1281
+ double v = instr->value();
1282
+ __ vmov(result, v);
1283
+ }
1284
+
1285
+
1286
+ void LCodeGen::DoConstantT(LConstantT* instr) {
1287
+ ASSERT(instr->result()->IsRegister());
1288
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
1289
+ }
1290
+
1291
+
1292
+ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1293
+ Register result = ToRegister(instr->result());
1294
+ Register array = ToRegister(instr->InputAt(0));
1295
+ __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1296
+ }
1297
+
1298
+
1299
+ void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
1300
+ Register result = ToRegister(instr->result());
1301
+ Register array = ToRegister(instr->InputAt(0));
1302
+ __ ldr(result, FieldMemOperand(array, ExternalArray::kLengthOffset));
1303
+ }
1304
+
1305
+
1306
+ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
1307
+ Register result = ToRegister(instr->result());
1308
+ Register array = ToRegister(instr->InputAt(0));
1309
+ __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
1310
+ }
1311
+
1312
+
1313
+ void LCodeGen::DoValueOf(LValueOf* instr) {
1314
+ Register input = ToRegister(instr->InputAt(0));
1315
+ Register result = ToRegister(instr->result());
1316
+ Register map = ToRegister(instr->TempAt(0));
1317
+ ASSERT(input.is(result));
1318
+ Label done;
1319
+
1320
+ // If the object is a smi return the object.
1321
+ __ tst(input, Operand(kSmiTagMask));
1322
+ __ b(eq, &done);
1323
+
1324
+ // If the object is not a value type, return the object.
1325
+ __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1326
+ __ b(ne, &done);
1327
+ __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1328
+
1329
+ __ bind(&done);
1330
+ }
1331
+
1332
+
1333
+ void LCodeGen::DoBitNotI(LBitNotI* instr) {
1334
+ LOperand* input = instr->InputAt(0);
1335
+ ASSERT(input->Equals(instr->result()));
1336
+ __ mvn(ToRegister(input), Operand(ToRegister(input)));
1337
+ }
1338
+
1339
+
1340
+ void LCodeGen::DoThrow(LThrow* instr) {
1341
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1342
+ __ push(input_reg);
1343
+ CallRuntime(Runtime::kThrow, 1, instr);
1344
+
1345
+ if (FLAG_debug_code) {
1346
+ __ stop("Unreachable code.");
1347
+ }
1348
+ }
1349
+
1350
+
1351
+ void LCodeGen::DoAddI(LAddI* instr) {
1352
+ LOperand* left = instr->InputAt(0);
1353
+ LOperand* right = instr->InputAt(1);
1354
+ ASSERT(left->Equals(instr->result()));
1355
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1356
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
1357
+
1358
+ if (right->IsStackSlot() || right->IsArgument()) {
1359
+ Register right_reg = EmitLoadRegister(right, ip);
1360
+ __ add(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
1361
+ } else {
1362
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
1363
+ __ add(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
1364
+ }
1365
+
1366
+ if (can_overflow) {
1367
+ DeoptimizeIf(vs, instr->environment());
1368
+ }
1369
+ }
1370
+
1371
+
1372
+ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1373
+ DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1374
+ DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1375
+ switch (instr->op()) {
1376
+ case Token::ADD:
1377
+ __ vadd(left, left, right);
1378
+ break;
1379
+ case Token::SUB:
1380
+ __ vsub(left, left, right);
1381
+ break;
1382
+ case Token::MUL:
1383
+ __ vmul(left, left, right);
1384
+ break;
1385
+ case Token::DIV:
1386
+ __ vdiv(left, left, right);
1387
+ break;
1388
+ case Token::MOD: {
1389
+ // Save r0-r3 on the stack.
1390
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1391
+
1392
+ __ PrepareCallCFunction(0, 2, scratch0());
1393
+ __ SetCallCDoubleArguments(left, right);
1394
+ __ CallCFunction(
1395
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
1396
+ 0, 2);
1397
+ // Move the result in the double result register.
1398
+ __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
1399
+
1400
+ // Restore r0-r3.
1401
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1402
+ break;
1403
+ }
1404
+ default:
1405
+ UNREACHABLE();
1406
+ break;
1407
+ }
1408
+ }
1409
+
1410
+
1411
+ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1412
+ ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1413
+ ASSERT(ToRegister(instr->InputAt(1)).is(r0));
1414
+ ASSERT(ToRegister(instr->result()).is(r0));
1415
+
1416
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1417
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1418
+ }
1419
+
1420
+
1421
+ int LCodeGen::GetNextEmittedBlock(int block) {
1422
+ for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1423
+ LLabel* label = chunk_->GetLabel(i);
1424
+ if (!label->HasReplacement()) return i;
1425
+ }
1426
+ return -1;
1427
+ }
1428
+
1429
+
1430
+ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1431
+ int next_block = GetNextEmittedBlock(current_block_);
1432
+ right_block = chunk_->LookupDestination(right_block);
1433
+ left_block = chunk_->LookupDestination(left_block);
1434
+
1435
+ if (right_block == left_block) {
1436
+ EmitGoto(left_block);
1437
+ } else if (left_block == next_block) {
1438
+ __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1439
+ } else if (right_block == next_block) {
1440
+ __ b(cc, chunk_->GetAssemblyLabel(left_block));
1441
+ } else {
1442
+ __ b(cc, chunk_->GetAssemblyLabel(left_block));
1443
+ __ b(chunk_->GetAssemblyLabel(right_block));
1444
+ }
1445
+ }
1446
+
1447
+
1448
+ void LCodeGen::DoBranch(LBranch* instr) {
1449
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1450
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1451
+
1452
+ Representation r = instr->hydrogen()->representation();
1453
+ if (r.IsInteger32()) {
1454
+ Register reg = ToRegister(instr->InputAt(0));
1455
+ __ cmp(reg, Operand(0));
1456
+ EmitBranch(true_block, false_block, ne);
1457
+ } else if (r.IsDouble()) {
1458
+ DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1459
+ Register scratch = scratch0();
1460
+
1461
+ // Test the double value. Zero and NaN are false.
1462
+ __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1463
+ __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1464
+ EmitBranch(true_block, false_block, ne);
1465
+ } else {
1466
+ ASSERT(r.IsTagged());
1467
+ Register reg = ToRegister(instr->InputAt(0));
1468
+ if (instr->hydrogen()->type().IsBoolean()) {
1469
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
1470
+ __ cmp(reg, ip);
1471
+ EmitBranch(true_block, false_block, eq);
1472
+ } else {
1473
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1474
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1475
+
1476
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1477
+ __ cmp(reg, ip);
1478
+ __ b(eq, false_label);
1479
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
1480
+ __ cmp(reg, ip);
1481
+ __ b(eq, true_label);
1482
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
1483
+ __ cmp(reg, ip);
1484
+ __ b(eq, false_label);
1485
+ __ cmp(reg, Operand(0));
1486
+ __ b(eq, false_label);
1487
+ __ tst(reg, Operand(kSmiTagMask));
1488
+ __ b(eq, true_label);
1489
+
1490
+ // Test double values. Zero and NaN are false.
1491
+ Label call_stub;
1492
+ DoubleRegister dbl_scratch = d0;
1493
+ Register scratch = scratch0();
1494
+ __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1495
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
1496
+ __ cmp(scratch, Operand(ip));
1497
+ __ b(ne, &call_stub);
1498
+ __ sub(ip, reg, Operand(kHeapObjectTag));
1499
+ __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
1500
+ __ VFPCompareAndLoadFlags(dbl_scratch, 0.0, scratch);
1501
+ __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1502
+ __ b(ne, false_label);
1503
+ __ b(true_label);
1504
+
1505
+ // The conversion stub doesn't cause garbage collections so it's
1506
+ // safe to not record a safepoint after the call.
1507
+ __ bind(&call_stub);
1508
+ ToBooleanStub stub(reg);
1509
+ RegList saved_regs = kJSCallerSaved | kCalleeSaved;
1510
+ __ stm(db_w, sp, saved_regs);
1511
+ __ CallStub(&stub);
1512
+ __ cmp(reg, Operand(0));
1513
+ __ ldm(ia_w, sp, saved_regs);
1514
+ EmitBranch(true_block, false_block, ne);
1515
+ }
1516
+ }
1517
+ }
1518
+
1519
+
1520
+ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1521
+ block = chunk_->LookupDestination(block);
1522
+ int next_block = GetNextEmittedBlock(current_block_);
1523
+ if (block != next_block) {
1524
+ // Perform stack overflow check if this goto needs it before jumping.
1525
+ if (deferred_stack_check != NULL) {
1526
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1527
+ __ cmp(sp, Operand(ip));
1528
+ __ b(hs, chunk_->GetAssemblyLabel(block));
1529
+ __ jmp(deferred_stack_check->entry());
1530
+ deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1531
+ } else {
1532
+ __ jmp(chunk_->GetAssemblyLabel(block));
1533
+ }
1534
+ }
1535
+ }
1536
+
1537
+
1538
+ void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1539
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1540
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
1541
+ }
1542
+
1543
+
1544
+ void LCodeGen::DoGoto(LGoto* instr) {
1545
+ class DeferredStackCheck: public LDeferredCode {
1546
+ public:
1547
+ DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1548
+ : LDeferredCode(codegen), instr_(instr) { }
1549
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1550
+ private:
1551
+ LGoto* instr_;
1552
+ };
1553
+
1554
+ DeferredStackCheck* deferred = NULL;
1555
+ if (instr->include_stack_check()) {
1556
+ deferred = new DeferredStackCheck(this, instr);
1557
+ }
1558
+ EmitGoto(instr->block_id(), deferred);
1559
+ }
1560
+
1561
+
1562
+ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1563
+ Condition cond = kNoCondition;
1564
+ switch (op) {
1565
+ case Token::EQ:
1566
+ case Token::EQ_STRICT:
1567
+ cond = eq;
1568
+ break;
1569
+ case Token::LT:
1570
+ cond = is_unsigned ? lo : lt;
1571
+ break;
1572
+ case Token::GT:
1573
+ cond = is_unsigned ? hi : gt;
1574
+ break;
1575
+ case Token::LTE:
1576
+ cond = is_unsigned ? ls : le;
1577
+ break;
1578
+ case Token::GTE:
1579
+ cond = is_unsigned ? hs : ge;
1580
+ break;
1581
+ case Token::IN:
1582
+ case Token::INSTANCEOF:
1583
+ default:
1584
+ UNREACHABLE();
1585
+ }
1586
+ return cond;
1587
+ }
1588
+
1589
+
1590
+ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1591
+ __ cmp(ToRegister(left), ToRegister(right));
1592
+ }
1593
+
1594
+
1595
+ void LCodeGen::DoCmpID(LCmpID* instr) {
1596
+ LOperand* left = instr->InputAt(0);
1597
+ LOperand* right = instr->InputAt(1);
1598
+ LOperand* result = instr->result();
1599
+ Register scratch = scratch0();
1600
+
1601
+ Label unordered, done;
1602
+ if (instr->is_double()) {
1603
+ // Compare left and right as doubles and load the
1604
+ // resulting flags into the normal status register.
1605
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1606
+ // If a NaN is involved, i.e. the result is unordered (V set),
1607
+ // jump to unordered to return false.
1608
+ __ b(vs, &unordered);
1609
+ } else {
1610
+ EmitCmpI(left, right);
1611
+ }
1612
+
1613
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
1614
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
1615
+ __ b(cc, &done);
1616
+
1617
+ __ bind(&unordered);
1618
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
1619
+ __ bind(&done);
1620
+ }
1621
+
1622
+
1623
+ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1624
+ LOperand* left = instr->InputAt(0);
1625
+ LOperand* right = instr->InputAt(1);
1626
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1627
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1628
+
1629
+ if (instr->is_double()) {
1630
+ // Compare left and right as doubles and load the
1631
+ // resulting flags into the normal status register.
1632
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1633
+ // If a NaN is involved, i.e. the result is unordered (V set),
1634
+ // jump to false block label.
1635
+ __ b(vs, chunk_->GetAssemblyLabel(false_block));
1636
+ } else {
1637
+ EmitCmpI(left, right);
1638
+ }
1639
+
1640
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
1641
+ EmitBranch(true_block, false_block, cc);
1642
+ }
1643
+
1644
+
1645
+ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1646
+ Register left = ToRegister(instr->InputAt(0));
1647
+ Register right = ToRegister(instr->InputAt(1));
1648
+ Register result = ToRegister(instr->result());
1649
+
1650
+ __ cmp(left, Operand(right));
1651
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
1652
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
1653
+ }
1654
+
1655
+
1656
+ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1657
+ Register left = ToRegister(instr->InputAt(0));
1658
+ Register right = ToRegister(instr->InputAt(1));
1659
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1660
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1661
+
1662
+ __ cmp(left, Operand(right));
1663
+ EmitBranch(true_block, false_block, eq);
1664
+ }
1665
+
1666
+
1667
+ void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
1668
+ Register left = ToRegister(instr->InputAt(0));
1669
+ Register right = ToRegister(instr->InputAt(1));
1670
+ Register result = ToRegister(instr->result());
1671
+
1672
+ __ cmp(left, Operand(right));
1673
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
1674
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
1675
+ }
1676
+
1677
+
1678
+ void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
1679
+ Register left = ToRegister(instr->InputAt(0));
1680
+ Register right = ToRegister(instr->InputAt(1));
1681
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1682
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1683
+
1684
+ __ cmp(left, Operand(right));
1685
+ EmitBranch(true_block, false_block, eq);
1686
+ }
1687
+
1688
+
1689
+ void LCodeGen::DoIsNull(LIsNull* instr) {
1690
+ Register reg = ToRegister(instr->InputAt(0));
1691
+ Register result = ToRegister(instr->result());
1692
+
1693
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
1694
+ __ cmp(reg, ip);
1695
+ if (instr->is_strict()) {
1696
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
1697
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
1698
+ } else {
1699
+ Label true_value, false_value, done;
1700
+ __ b(eq, &true_value);
1701
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1702
+ __ cmp(ip, reg);
1703
+ __ b(eq, &true_value);
1704
+ __ tst(reg, Operand(kSmiTagMask));
1705
+ __ b(eq, &false_value);
1706
+ // Check for undetectable objects by looking in the bit field in
1707
+ // the map. The object has already been smi checked.
1708
+ Register scratch = result;
1709
+ __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1710
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1711
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
1712
+ __ b(ne, &true_value);
1713
+ __ bind(&false_value);
1714
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
1715
+ __ jmp(&done);
1716
+ __ bind(&true_value);
1717
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
1718
+ __ bind(&done);
1719
+ }
1720
+ }
1721
+
1722
+
1723
+ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1724
+ Register scratch = scratch0();
1725
+ Register reg = ToRegister(instr->InputAt(0));
1726
+
1727
+ // TODO(fsc): If the expression is known to be a smi, then it's
1728
+ // definitely not null. Jump to the false block.
1729
+
1730
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1731
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1732
+
1733
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
1734
+ __ cmp(reg, ip);
1735
+ if (instr->is_strict()) {
1736
+ EmitBranch(true_block, false_block, eq);
1737
+ } else {
1738
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1739
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1740
+ __ b(eq, true_label);
1741
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1742
+ __ cmp(reg, ip);
1743
+ __ b(eq, true_label);
1744
+ __ tst(reg, Operand(kSmiTagMask));
1745
+ __ b(eq, false_label);
1746
+ // Check for undetectable objects by looking in the bit field in
1747
+ // the map. The object has already been smi checked.
1748
+ __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1749
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1750
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
1751
+ EmitBranch(true_block, false_block, ne);
1752
+ }
1753
+ }
1754
+
1755
+
1756
+ Condition LCodeGen::EmitIsObject(Register input,
1757
+ Register temp1,
1758
+ Register temp2,
1759
+ Label* is_not_object,
1760
+ Label* is_object) {
1761
+ __ JumpIfSmi(input, is_not_object);
1762
+
1763
+ __ LoadRoot(temp1, Heap::kNullValueRootIndex);
1764
+ __ cmp(input, temp1);
1765
+ __ b(eq, is_object);
1766
+
1767
+ // Load map.
1768
+ __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1769
+ // Undetectable objects behave like undefined.
1770
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1771
+ __ tst(temp2, Operand(1 << Map::kIsUndetectable));
1772
+ __ b(ne, is_not_object);
1773
+
1774
+ // Load instance type and check that it is in object type range.
1775
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1776
+ __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
1777
+ __ b(lt, is_not_object);
1778
+ __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
1779
+ return le;
1780
+ }
1781
+
1782
+
1783
+ void LCodeGen::DoIsObject(LIsObject* instr) {
1784
+ Register reg = ToRegister(instr->InputAt(0));
1785
+ Register result = ToRegister(instr->result());
1786
+ Register temp = scratch0();
1787
+ Label is_false, is_true, done;
1788
+
1789
+ Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
1790
+ __ b(true_cond, &is_true);
1791
+
1792
+ __ bind(&is_false);
1793
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
1794
+ __ b(&done);
1795
+
1796
+ __ bind(&is_true);
1797
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
1798
+
1799
+ __ bind(&done);
1800
+ }
1801
+
1802
+
1803
+ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1804
+ Register reg = ToRegister(instr->InputAt(0));
1805
+ Register temp1 = ToRegister(instr->TempAt(0));
1806
+ Register temp2 = scratch0();
1807
+
1808
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1809
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1810
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1811
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1812
+
1813
+ Condition true_cond =
1814
+ EmitIsObject(reg, temp1, temp2, false_label, true_label);
1815
+
1816
+ EmitBranch(true_block, false_block, true_cond);
1817
+ }
1818
+
1819
+
1820
+ void LCodeGen::DoIsSmi(LIsSmi* instr) {
1821
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1822
+ Register result = ToRegister(instr->result());
1823
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1824
+ __ tst(input_reg, Operand(kSmiTagMask));
1825
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
1826
+ Label done;
1827
+ __ b(eq, &done);
1828
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
1829
+ __ bind(&done);
1830
+ }
1831
+
1832
+
1833
+ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1834
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1835
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1836
+
1837
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1838
+ __ tst(input_reg, Operand(kSmiTagMask));
1839
+ EmitBranch(true_block, false_block, eq);
1840
+ }
1841
+
1842
+
1843
+ void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
1844
+ Register input = ToRegister(instr->InputAt(0));
1845
+ Register result = ToRegister(instr->result());
1846
+
1847
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1848
+ Label false_label, done;
1849
+ __ JumpIfSmi(input, &false_label);
1850
+ __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1851
+ __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
1852
+ __ tst(result, Operand(1 << Map::kIsUndetectable));
1853
+ __ b(eq, &false_label);
1854
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
1855
+ __ jmp(&done);
1856
+ __ bind(&false_label);
1857
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
1858
+ __ bind(&done);
1859
+ }
1860
+
1861
+
1862
+ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1863
+ Register input = ToRegister(instr->InputAt(0));
1864
+ Register temp = ToRegister(instr->TempAt(0));
1865
+
1866
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1867
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1868
+
1869
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1870
+ __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1871
+ __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1872
+ __ tst(temp, Operand(1 << Map::kIsUndetectable));
1873
+ EmitBranch(true_block, false_block, ne);
1874
+ }
1875
+
1876
+
1877
+ static InstanceType TestType(HHasInstanceType* instr) {
1878
+ InstanceType from = instr->from();
1879
+ InstanceType to = instr->to();
1880
+ if (from == FIRST_TYPE) return to;
1881
+ ASSERT(from == to || to == LAST_TYPE);
1882
+ return from;
1883
+ }
1884
+
1885
+
1886
+ static Condition BranchCondition(HHasInstanceType* instr) {
1887
+ InstanceType from = instr->from();
1888
+ InstanceType to = instr->to();
1889
+ if (from == to) return eq;
1890
+ if (to == LAST_TYPE) return hs;
1891
+ if (from == FIRST_TYPE) return ls;
1892
+ UNREACHABLE();
1893
+ return eq;
1894
+ }
1895
+
1896
+
1897
+ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1898
+ Register input = ToRegister(instr->InputAt(0));
1899
+ Register result = ToRegister(instr->result());
1900
+
1901
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1902
+ Label done;
1903
+ __ tst(input, Operand(kSmiTagMask));
1904
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
1905
+ __ b(eq, &done);
1906
+ __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
1907
+ Condition cond = BranchCondition(instr->hydrogen());
1908
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
1909
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
1910
+ __ bind(&done);
1911
+ }
1912
+
1913
+
1914
+ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1915
+ Register scratch = scratch0();
1916
+ Register input = ToRegister(instr->InputAt(0));
1917
+
1918
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1919
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1920
+
1921
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1922
+
1923
+ __ tst(input, Operand(kSmiTagMask));
1924
+ __ b(eq, false_label);
1925
+
1926
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
1927
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1928
+ }
1929
+
1930
+
1931
+ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1932
+ Register input = ToRegister(instr->InputAt(0));
1933
+ Register result = ToRegister(instr->result());
1934
+
1935
+ if (FLAG_debug_code) {
1936
+ __ AbortIfNotString(input);
1937
+ }
1938
+
1939
+ __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
1940
+ __ IndexFromHash(result, result);
1941
+ }
1942
+
1943
+
1944
+ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1945
+ Register input = ToRegister(instr->InputAt(0));
1946
+ Register result = ToRegister(instr->result());
1947
+ Register scratch = scratch0();
1948
+
1949
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1950
+ __ ldr(scratch,
1951
+ FieldMemOperand(input, String::kHashFieldOffset));
1952
+ __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
1953
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
1954
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
1955
+ }
1956
+
1957
+
1958
+ void LCodeGen::DoHasCachedArrayIndexAndBranch(
1959
+ LHasCachedArrayIndexAndBranch* instr) {
1960
+ Register input = ToRegister(instr->InputAt(0));
1961
+ Register scratch = scratch0();
1962
+
1963
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1964
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1965
+
1966
+ __ ldr(scratch,
1967
+ FieldMemOperand(input, String::kHashFieldOffset));
1968
+ __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
1969
+ EmitBranch(true_block, false_block, eq);
1970
+ }
1971
+
1972
+
1973
+ // Branches to a label or falls through with the answer in flags. Trashes
1974
+ // the temp registers, but not the input. Only input and temp2 may alias.
1975
+ void LCodeGen::EmitClassOfTest(Label* is_true,
1976
+ Label* is_false,
1977
+ Handle<String>class_name,
1978
+ Register input,
1979
+ Register temp,
1980
+ Register temp2) {
1981
+ ASSERT(!input.is(temp));
1982
+ ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1983
+ __ tst(input, Operand(kSmiTagMask));
1984
+ __ b(eq, is_false);
1985
+ __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
1986
+ __ b(lt, is_false);
1987
+
1988
+ // Map is now in temp.
1989
+ // Functions have class 'Function'.
1990
+ __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
1991
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
1992
+ __ b(eq, is_true);
1993
+ } else {
1994
+ __ b(eq, is_false);
1995
+ }
1996
+
1997
+ // Check if the constructor in the map is a function.
1998
+ __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1999
+
2000
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
2001
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
2002
+ // LAST_JS_OBJECT_TYPE.
2003
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
2004
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
2005
+
2006
+ // Objects with a non-function constructor have class 'Object'.
2007
+ __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2008
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
2009
+ __ b(ne, is_true);
2010
+ } else {
2011
+ __ b(ne, is_false);
2012
+ }
2013
+
2014
+ // temp now contains the constructor function. Grab the
2015
+ // instance class name from there.
2016
+ __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2017
+ __ ldr(temp, FieldMemOperand(temp,
2018
+ SharedFunctionInfo::kInstanceClassNameOffset));
2019
+ // The class name we are testing against is a symbol because it's a literal.
2020
+ // The name in the constructor is a symbol because of the way the context is
2021
+ // booted. This routine isn't expected to work for random API-created
2022
+ // classes and it doesn't have to because you can't access it with natives
2023
+ // syntax. Since both sides are symbols it is sufficient to use an identity
2024
+ // comparison.
2025
+ __ cmp(temp, Operand(class_name));
2026
+ // End with the answer in flags.
2027
+ }
2028
+
2029
+
2030
+ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
2031
+ Register input = ToRegister(instr->InputAt(0));
2032
+ Register result = ToRegister(instr->result());
2033
+ ASSERT(input.is(result));
2034
+ Handle<String> class_name = instr->hydrogen()->class_name();
2035
+
2036
+ Label done, is_true, is_false;
2037
+
2038
+ EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
2039
+ __ b(ne, &is_false);
2040
+
2041
+ __ bind(&is_true);
2042
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
2043
+ __ jmp(&done);
2044
+
2045
+ __ bind(&is_false);
2046
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
2047
+ __ bind(&done);
2048
+ }
2049
+
2050
+
2051
+ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2052
+ Register input = ToRegister(instr->InputAt(0));
2053
+ Register temp = scratch0();
2054
+ Register temp2 = ToRegister(instr->TempAt(0));
2055
+ Handle<String> class_name = instr->hydrogen()->class_name();
2056
+
2057
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
2058
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
2059
+
2060
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
2061
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
2062
+
2063
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2064
+
2065
+ EmitBranch(true_block, false_block, eq);
2066
+ }
2067
+
2068
+
2069
+ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2070
+ Register reg = ToRegister(instr->InputAt(0));
2071
+ Register temp = ToRegister(instr->TempAt(0));
2072
+ int true_block = instr->true_block_id();
2073
+ int false_block = instr->false_block_id();
2074
+
2075
+ __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2076
+ __ cmp(temp, Operand(instr->map()));
2077
+ EmitBranch(true_block, false_block, eq);
2078
+ }
2079
+
2080
+
2081
+ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2082
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
2083
+ ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
2084
+
2085
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2086
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2087
+
2088
+ __ cmp(r0, Operand(0));
2089
+ __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2090
+ __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2091
+ }
2092
+
2093
+
2094
+ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
2095
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
2096
+ ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
2097
+
2098
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
2099
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
2100
+
2101
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2102
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2103
+ __ cmp(r0, Operand(0));
2104
+ EmitBranch(true_block, false_block, eq);
2105
+ }
2106
+
2107
+
2108
+ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2109
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2110
+ public:
2111
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2112
+ LInstanceOfKnownGlobal* instr)
2113
+ : LDeferredCode(codegen), instr_(instr) { }
2114
+ virtual void Generate() {
2115
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
2116
+ }
2117
+
2118
+ Label* map_check() { return &map_check_; }
2119
+
2120
+ private:
2121
+ LInstanceOfKnownGlobal* instr_;
2122
+ Label map_check_;
2123
+ };
2124
+
2125
+ DeferredInstanceOfKnownGlobal* deferred;
2126
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2127
+
2128
+ Label done, false_result;
2129
+ Register object = ToRegister(instr->InputAt(0));
2130
+ Register temp = ToRegister(instr->TempAt(0));
2131
+ Register result = ToRegister(instr->result());
2132
+
2133
+ ASSERT(object.is(r0));
2134
+ ASSERT(result.is(r0));
2135
+
2136
+ // A Smi is not instance of anything.
2137
+ __ JumpIfSmi(object, &false_result);
2138
+
2139
+ // This is the inlined call site instanceof cache. The two occurences of the
2140
+ // hole value will be patched to the last map/result pair generated by the
2141
+ // instanceof stub.
2142
+ Label cache_miss;
2143
+ Register map = temp;
2144
+ __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2145
+ __ bind(deferred->map_check()); // Label for calculating code patching.
2146
+ // We use Factory::the_hole_value() on purpose instead of loading from the
2147
+ // root array to force relocation to be able to later patch with
2148
+ // the cached map.
2149
+ __ mov(ip, Operand(factory()->the_hole_value()));
2150
+ __ cmp(map, Operand(ip));
2151
+ __ b(ne, &cache_miss);
2152
+ // We use Factory::the_hole_value() on purpose instead of loading from the
2153
+ // root array to force relocation to be able to later patch
2154
+ // with true or false.
2155
+ __ mov(result, Operand(factory()->the_hole_value()));
2156
+ __ b(&done);
2157
+
2158
+ // The inlined call site cache did not match. Check null and string before
2159
+ // calling the deferred code.
2160
+ __ bind(&cache_miss);
2161
+ // Null is not instance of anything.
2162
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
2163
+ __ cmp(object, Operand(ip));
2164
+ __ b(eq, &false_result);
2165
+
2166
+ // String values is not instance of anything.
2167
+ Condition is_string = masm_->IsObjectStringType(object, temp);
2168
+ __ b(is_string, &false_result);
2169
+
2170
+ // Go to the deferred code.
2171
+ __ b(deferred->entry());
2172
+
2173
+ __ bind(&false_result);
2174
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
2175
+
2176
+ // Here result has either true or false. Deferred code also produces true or
2177
+ // false object.
2178
+ __ bind(deferred->exit());
2179
+ __ bind(&done);
2180
+ }
2181
+
2182
+
2183
+ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2184
+ Label* map_check) {
2185
+ Register result = ToRegister(instr->result());
2186
+ ASSERT(result.is(r0));
2187
+
2188
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2189
+ flags = static_cast<InstanceofStub::Flags>(
2190
+ flags | InstanceofStub::kArgsInRegisters);
2191
+ flags = static_cast<InstanceofStub::Flags>(
2192
+ flags | InstanceofStub::kCallSiteInlineCheck);
2193
+ flags = static_cast<InstanceofStub::Flags>(
2194
+ flags | InstanceofStub::kReturnTrueFalseObject);
2195
+ InstanceofStub stub(flags);
2196
+
2197
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2198
+
2199
+ // Get the temp register reserved by the instruction. This needs to be r4 as
2200
+ // its slot of the pushing of safepoint registers is used to communicate the
2201
+ // offset to the location of the map check.
2202
+ Register temp = ToRegister(instr->TempAt(0));
2203
+ ASSERT(temp.is(r4));
2204
+ __ mov(InstanceofStub::right(), Operand(instr->function()));
2205
+ static const int kAdditionalDelta = 4;
2206
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2207
+ Label before_push_delta;
2208
+ __ bind(&before_push_delta);
2209
+ __ BlockConstPoolFor(kAdditionalDelta);
2210
+ __ mov(temp, Operand(delta * kPointerSize));
2211
+ __ StoreToSafepointRegisterSlot(temp, temp);
2212
+ CallCodeGeneric(stub.GetCode(),
2213
+ RelocInfo::CODE_TARGET,
2214
+ instr,
2215
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2216
+ // Put the result value into the result register slot and
2217
+ // restore all registers.
2218
+ __ StoreToSafepointRegisterSlot(result, result);
2219
+ }
2220
+
2221
+
2222
+ static Condition ComputeCompareCondition(Token::Value op) {
2223
+ switch (op) {
2224
+ case Token::EQ_STRICT:
2225
+ case Token::EQ:
2226
+ return eq;
2227
+ case Token::LT:
2228
+ return lt;
2229
+ case Token::GT:
2230
+ return gt;
2231
+ case Token::LTE:
2232
+ return le;
2233
+ case Token::GTE:
2234
+ return ge;
2235
+ default:
2236
+ UNREACHABLE();
2237
+ return kNoCondition;
2238
+ }
2239
+ }
2240
+
2241
+
2242
+ void LCodeGen::DoCmpT(LCmpT* instr) {
2243
+ Token::Value op = instr->op();
2244
+
2245
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
2246
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2247
+ __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2248
+
2249
+ Condition condition = ComputeCompareCondition(op);
2250
+ if (op == Token::GT || op == Token::LTE) {
2251
+ condition = ReverseCondition(condition);
2252
+ }
2253
+ __ LoadRoot(ToRegister(instr->result()),
2254
+ Heap::kTrueValueRootIndex,
2255
+ condition);
2256
+ __ LoadRoot(ToRegister(instr->result()),
2257
+ Heap::kFalseValueRootIndex,
2258
+ NegateCondition(condition));
2259
+ }
2260
+
2261
+
2262
+ void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
2263
+ Token::Value op = instr->op();
2264
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
2265
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
2266
+
2267
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
2268
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2269
+
2270
+ // The compare stub expects compare condition and the input operands
2271
+ // reversed for GT and LTE.
2272
+ Condition condition = ComputeCompareCondition(op);
2273
+ if (op == Token::GT || op == Token::LTE) {
2274
+ condition = ReverseCondition(condition);
2275
+ }
2276
+ __ cmp(r0, Operand(0));
2277
+ EmitBranch(true_block, false_block, condition);
2278
+ }
2279
+
2280
+
2281
+ void LCodeGen::DoReturn(LReturn* instr) {
2282
+ if (FLAG_trace) {
2283
+ // Push the return value on the stack as the parameter.
2284
+ // Runtime::TraceExit returns its parameter in r0.
2285
+ __ push(r0);
2286
+ __ CallRuntime(Runtime::kTraceExit, 1);
2287
+ }
2288
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2289
+ __ mov(sp, fp);
2290
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
2291
+ __ add(sp, sp, Operand(sp_delta));
2292
+ __ Jump(lr);
2293
+ }
2294
+
2295
+
2296
+ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2297
+ Register result = ToRegister(instr->result());
2298
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2299
+ __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2300
+ if (instr->hydrogen()->check_hole_value()) {
2301
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2302
+ __ cmp(result, ip);
2303
+ DeoptimizeIf(eq, instr->environment());
2304
+ }
2305
+ }
2306
+
2307
+
2308
+ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2309
+ ASSERT(ToRegister(instr->global_object()).is(r0));
2310
+ ASSERT(ToRegister(instr->result()).is(r0));
2311
+
2312
+ __ mov(r2, Operand(instr->name()));
2313
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2314
+ : RelocInfo::CODE_TARGET_CONTEXT;
2315
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2316
+ CallCode(ic, mode, instr);
2317
+ }
2318
+
2319
+
2320
+ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2321
+ Register value = ToRegister(instr->InputAt(0));
2322
+ Register scratch = scratch0();
2323
+
2324
+ // Load the cell.
2325
+ __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
2326
+
2327
+ // If the cell we are storing to contains the hole it could have
2328
+ // been deleted from the property dictionary. In that case, we need
2329
+ // to update the property details in the property dictionary to mark
2330
+ // it as no longer deleted.
2331
+ if (instr->hydrogen()->check_hole_value()) {
2332
+ Register scratch2 = ToRegister(instr->TempAt(0));
2333
+ __ ldr(scratch2,
2334
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2335
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2336
+ __ cmp(scratch2, ip);
2337
+ DeoptimizeIf(eq, instr->environment());
2338
+ }
2339
+
2340
+ // Store the value.
2341
+ __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2342
+ }
2343
+
2344
+
2345
+ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2346
+ ASSERT(ToRegister(instr->global_object()).is(r1));
2347
+ ASSERT(ToRegister(instr->value()).is(r0));
2348
+
2349
+ __ mov(r2, Operand(instr->name()));
2350
+ Handle<Code> ic = instr->strict_mode()
2351
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
2352
+ : isolate()->builtins()->StoreIC_Initialize();
2353
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2354
+ }
2355
+
2356
+
2357
+ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2358
+ Register context = ToRegister(instr->context());
2359
+ Register result = ToRegister(instr->result());
2360
+ __ ldr(result, ContextOperand(context, instr->slot_index()));
2361
+ }
2362
+
2363
+
2364
+ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2365
+ Register context = ToRegister(instr->context());
2366
+ Register value = ToRegister(instr->value());
2367
+ __ str(value, ContextOperand(context, instr->slot_index()));
2368
+ if (instr->needs_write_barrier()) {
2369
+ int offset = Context::SlotOffset(instr->slot_index());
2370
+ __ RecordWrite(context, Operand(offset), value, scratch0());
2371
+ }
2372
+ }
2373
+
2374
+
2375
+ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2376
+ Register object = ToRegister(instr->InputAt(0));
2377
+ Register result = ToRegister(instr->result());
2378
+ if (instr->hydrogen()->is_in_object()) {
2379
+ __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2380
+ } else {
2381
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2382
+ __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2383
+ }
2384
+ }
2385
+
2386
+
2387
+ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2388
+ Register object,
2389
+ Handle<Map> type,
2390
+ Handle<String> name) {
2391
+ LookupResult lookup;
2392
+ type->LookupInDescriptors(NULL, *name, &lookup);
2393
+ ASSERT(lookup.IsProperty() &&
2394
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2395
+ if (lookup.type() == FIELD) {
2396
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
2397
+ int offset = index * kPointerSize;
2398
+ if (index < 0) {
2399
+ // Negative property indices are in-object properties, indexed
2400
+ // from the end of the fixed part of the object.
2401
+ __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
2402
+ } else {
2403
+ // Non-negative property indices are in the properties array.
2404
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2405
+ __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2406
+ }
2407
+ } else {
2408
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2409
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
2410
+ }
2411
+ }
2412
+
2413
+
2414
+ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2415
+ Register object = ToRegister(instr->object());
2416
+ Register result = ToRegister(instr->result());
2417
+ Register scratch = scratch0();
2418
+ int map_count = instr->hydrogen()->types()->length();
2419
+ Handle<String> name = instr->hydrogen()->name();
2420
+ if (map_count == 0) {
2421
+ ASSERT(instr->hydrogen()->need_generic());
2422
+ __ mov(r2, Operand(name));
2423
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2424
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2425
+ } else {
2426
+ Label done;
2427
+ __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2428
+ for (int i = 0; i < map_count - 1; ++i) {
2429
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
2430
+ Label next;
2431
+ __ cmp(scratch, Operand(map));
2432
+ __ b(ne, &next);
2433
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2434
+ __ b(&done);
2435
+ __ bind(&next);
2436
+ }
2437
+ Handle<Map> map = instr->hydrogen()->types()->last();
2438
+ __ cmp(scratch, Operand(map));
2439
+ if (instr->hydrogen()->need_generic()) {
2440
+ Label generic;
2441
+ __ b(ne, &generic);
2442
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2443
+ __ b(&done);
2444
+ __ bind(&generic);
2445
+ __ mov(r2, Operand(name));
2446
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2447
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2448
+ } else {
2449
+ DeoptimizeIf(ne, instr->environment());
2450
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2451
+ }
2452
+ __ bind(&done);
2453
+ }
2454
+ }
2455
+
2456
+
2457
+ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2458
+ ASSERT(ToRegister(instr->object()).is(r0));
2459
+ ASSERT(ToRegister(instr->result()).is(r0));
2460
+
2461
+ // Name is always in r2.
2462
+ __ mov(r2, Operand(instr->name()));
2463
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2464
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2465
+ }
2466
+
2467
+
2468
+ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2469
+ Register scratch = scratch0();
2470
+ Register function = ToRegister(instr->function());
2471
+ Register result = ToRegister(instr->result());
2472
+
2473
+ // Check that the function really is a function. Load map into the
2474
+ // result register.
2475
+ __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2476
+ DeoptimizeIf(ne, instr->environment());
2477
+
2478
+ // Make sure that the function has an instance prototype.
2479
+ Label non_instance;
2480
+ __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2481
+ __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2482
+ __ b(ne, &non_instance);
2483
+
2484
+ // Get the prototype or initial map from the function.
2485
+ __ ldr(result,
2486
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2487
+
2488
+ // Check that the function has a prototype or an initial map.
2489
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2490
+ __ cmp(result, ip);
2491
+ DeoptimizeIf(eq, instr->environment());
2492
+
2493
+ // If the function does not have an initial map, we're done.
2494
+ Label done;
2495
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2496
+ __ b(ne, &done);
2497
+
2498
+ // Get the prototype from the initial map.
2499
+ __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2500
+ __ jmp(&done);
2501
+
2502
+ // Non-instance prototype: Fetch prototype from constructor field
2503
+ // in initial map.
2504
+ __ bind(&non_instance);
2505
+ __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2506
+
2507
+ // All done.
2508
+ __ bind(&done);
2509
+ }
2510
+
2511
+
2512
+ void LCodeGen::DoLoadElements(LLoadElements* instr) {
2513
+ Register result = ToRegister(instr->result());
2514
+ Register input = ToRegister(instr->InputAt(0));
2515
+ Register scratch = scratch0();
2516
+
2517
+ __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2518
+ if (FLAG_debug_code) {
2519
+ Label done;
2520
+ __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2521
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2522
+ __ cmp(scratch, ip);
2523
+ __ b(eq, &done);
2524
+ __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2525
+ __ cmp(scratch, ip);
2526
+ __ b(eq, &done);
2527
+ __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2528
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2529
+ __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
2530
+ __ cmp(scratch, Operand(kExternalArrayTypeCount));
2531
+ __ Check(cc, "Check for fast elements failed.");
2532
+ __ bind(&done);
2533
+ }
2534
+ }
2535
+
2536
+
2537
+ void LCodeGen::DoLoadExternalArrayPointer(
2538
+ LLoadExternalArrayPointer* instr) {
2539
+ Register to_reg = ToRegister(instr->result());
2540
+ Register from_reg = ToRegister(instr->InputAt(0));
2541
+ __ ldr(to_reg, FieldMemOperand(from_reg,
2542
+ ExternalArray::kExternalPointerOffset));
2543
+ }
2544
+
2545
+
2546
+ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2547
+ Register arguments = ToRegister(instr->arguments());
2548
+ Register length = ToRegister(instr->length());
2549
+ Register index = ToRegister(instr->index());
2550
+ Register result = ToRegister(instr->result());
2551
+
2552
+ // Bailout index is not a valid argument index. Use unsigned check to get
2553
+ // negative check for free.
2554
+ __ sub(length, length, index, SetCC);
2555
+ DeoptimizeIf(ls, instr->environment());
2556
+
2557
+ // There are two words between the frame pointer and the last argument.
2558
+ // Subtracting from length accounts for one of them add one more.
2559
+ __ add(length, length, Operand(1));
2560
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2561
+ }
2562
+
2563
+
2564
+ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2565
+ Register elements = ToRegister(instr->elements());
2566
+ Register key = EmitLoadRegister(instr->key(), scratch0());
2567
+ Register result = ToRegister(instr->result());
2568
+ Register scratch = scratch0();
2569
+ ASSERT(result.is(elements));
2570
+
2571
+ // Load the result.
2572
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2573
+ __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2574
+
2575
+ // Check for the hole value.
2576
+ if (instr->hydrogen()->RequiresHoleCheck()) {
2577
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2578
+ __ cmp(result, scratch);
2579
+ DeoptimizeIf(eq, instr->environment());
2580
+ }
2581
+ }
2582
+
2583
+
2584
+ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2585
+ LLoadKeyedSpecializedArrayElement* instr) {
2586
+ Register external_pointer = ToRegister(instr->external_pointer());
2587
+ Register key = no_reg;
2588
+ ExternalArrayType array_type = instr->array_type();
2589
+ bool key_is_constant = instr->key()->IsConstantOperand();
2590
+ int constant_key = 0;
2591
+ if (key_is_constant) {
2592
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2593
+ if (constant_key & 0xF0000000) {
2594
+ Abort("array index constant value too big.");
2595
+ }
2596
+ } else {
2597
+ key = ToRegister(instr->key());
2598
+ }
2599
+ int shift_size = ExternalArrayTypeToShiftSize(array_type);
2600
+
2601
+ if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
2602
+ CpuFeatures::Scope scope(VFP3);
2603
+ DwVfpRegister result(ToDoubleRegister(instr->result()));
2604
+ Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
2605
+ : Operand(key, LSL, shift_size));
2606
+ __ add(scratch0(), external_pointer, operand);
2607
+ if (array_type == kExternalFloatArray) {
2608
+ __ vldr(result.low(), scratch0(), 0);
2609
+ __ vcvt_f64_f32(result, result.low());
2610
+ } else { // i.e. array_type == kExternalDoubleArray
2611
+ __ vldr(result, scratch0(), 0);
2612
+ }
2613
+ } else {
2614
+ Register result(ToRegister(instr->result()));
2615
+ MemOperand mem_operand(key_is_constant
2616
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
2617
+ : MemOperand(external_pointer, key, LSL, shift_size));
2618
+ switch (array_type) {
2619
+ case kExternalByteArray:
2620
+ __ ldrsb(result, mem_operand);
2621
+ break;
2622
+ case kExternalUnsignedByteArray:
2623
+ case kExternalPixelArray:
2624
+ __ ldrb(result, mem_operand);
2625
+ break;
2626
+ case kExternalShortArray:
2627
+ __ ldrsh(result, mem_operand);
2628
+ break;
2629
+ case kExternalUnsignedShortArray:
2630
+ __ ldrh(result, mem_operand);
2631
+ break;
2632
+ case kExternalIntArray:
2633
+ __ ldr(result, mem_operand);
2634
+ break;
2635
+ case kExternalUnsignedIntArray:
2636
+ __ ldr(result, mem_operand);
2637
+ __ cmp(result, Operand(0x80000000));
2638
+ // TODO(danno): we could be more clever here, perhaps having a special
2639
+ // version of the stub that detects if the overflow case actually
2640
+ // happens, and generate code that returns a double rather than int.
2641
+ DeoptimizeIf(cs, instr->environment());
2642
+ break;
2643
+ case kExternalFloatArray:
2644
+ case kExternalDoubleArray:
2645
+ UNREACHABLE();
2646
+ break;
2647
+ }
2648
+ }
2649
+ }
2650
+
2651
+
2652
+ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2653
+ ASSERT(ToRegister(instr->object()).is(r1));
2654
+ ASSERT(ToRegister(instr->key()).is(r0));
2655
+
2656
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2657
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
2658
+ }
2659
+
2660
+
2661
+ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2662
+ Register scratch = scratch0();
2663
+ Register result = ToRegister(instr->result());
2664
+
2665
+ // Check if the calling frame is an arguments adaptor frame.
2666
+ Label done, adapted;
2667
+ __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2668
+ __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2669
+ __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2670
+
2671
+ // Result is the frame pointer for the frame if not adapted and for the real
2672
+ // frame below the adaptor frame if adapted.
2673
+ __ mov(result, fp, LeaveCC, ne);
2674
+ __ mov(result, scratch, LeaveCC, eq);
2675
+ }
2676
+
2677
+
2678
+ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2679
+ Register elem = ToRegister(instr->InputAt(0));
2680
+ Register result = ToRegister(instr->result());
2681
+
2682
+ Label done;
2683
+
2684
+ // If no arguments adaptor frame the number of arguments is fixed.
2685
+ __ cmp(fp, elem);
2686
+ __ mov(result, Operand(scope()->num_parameters()));
2687
+ __ b(eq, &done);
2688
+
2689
+ // Arguments adaptor frame present. Get argument length from there.
2690
+ __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2691
+ __ ldr(result,
2692
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2693
+ __ SmiUntag(result);
2694
+
2695
+ // Argument length is in result register.
2696
+ __ bind(&done);
2697
+ }
2698
+
2699
+
2700
+ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2701
+ Register receiver = ToRegister(instr->receiver());
2702
+ Register function = ToRegister(instr->function());
2703
+ Register length = ToRegister(instr->length());
2704
+ Register elements = ToRegister(instr->elements());
2705
+ Register scratch = scratch0();
2706
+ ASSERT(receiver.is(r0)); // Used for parameter count.
2707
+ ASSERT(function.is(r1)); // Required by InvokeFunction.
2708
+ ASSERT(ToRegister(instr->result()).is(r0));
2709
+
2710
+ // If the receiver is null or undefined, we have to pass the global object
2711
+ // as a receiver.
2712
+ Label global_object, receiver_ok;
2713
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2714
+ __ cmp(receiver, scratch);
2715
+ __ b(eq, &global_object);
2716
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2717
+ __ cmp(receiver, scratch);
2718
+ __ b(eq, &global_object);
2719
+
2720
+ // Deoptimize if the receiver is not a JS object.
2721
+ __ tst(receiver, Operand(kSmiTagMask));
2722
+ DeoptimizeIf(eq, instr->environment());
2723
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
2724
+ DeoptimizeIf(lo, instr->environment());
2725
+ __ jmp(&receiver_ok);
2726
+
2727
+ __ bind(&global_object);
2728
+ __ ldr(receiver, GlobalObjectOperand());
2729
+ __ bind(&receiver_ok);
2730
+
2731
+ // Copy the arguments to this function possibly from the
2732
+ // adaptor frame below it.
2733
+ const uint32_t kArgumentsLimit = 1 * KB;
2734
+ __ cmp(length, Operand(kArgumentsLimit));
2735
+ DeoptimizeIf(hi, instr->environment());
2736
+
2737
+ // Push the receiver and use the register to keep the original
2738
+ // number of arguments.
2739
+ __ push(receiver);
2740
+ __ mov(receiver, length);
2741
+ // The arguments are at a one pointer size offset from elements.
2742
+ __ add(elements, elements, Operand(1 * kPointerSize));
2743
+
2744
+ // Loop through the arguments pushing them onto the execution
2745
+ // stack.
2746
+ Label invoke, loop;
2747
+ // length is a small non-negative integer, due to the test above.
2748
+ __ cmp(length, Operand(0));
2749
+ __ b(eq, &invoke);
2750
+ __ bind(&loop);
2751
+ __ ldr(scratch, MemOperand(elements, length, LSL, 2));
2752
+ __ push(scratch);
2753
+ __ sub(length, length, Operand(1), SetCC);
2754
+ __ b(ne, &loop);
2755
+
2756
+ __ bind(&invoke);
2757
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2758
+ LPointerMap* pointers = instr->pointer_map();
2759
+ LEnvironment* env = instr->deoptimization_environment();
2760
+ RecordPosition(pointers->position());
2761
+ RegisterEnvironmentForDeoptimization(env);
2762
+ SafepointGenerator safepoint_generator(this,
2763
+ pointers,
2764
+ env->deoptimization_index());
2765
+ // The number of arguments is stored in receiver which is r0, as expected
2766
+ // by InvokeFunction.
2767
+ v8::internal::ParameterCount actual(receiver);
2768
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
2769
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2770
+ }
2771
+
2772
+
2773
+ void LCodeGen::DoPushArgument(LPushArgument* instr) {
2774
+ LOperand* argument = instr->InputAt(0);
2775
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2776
+ Abort("DoPushArgument not implemented for double type.");
2777
+ } else {
2778
+ Register argument_reg = EmitLoadRegister(argument, ip);
2779
+ __ push(argument_reg);
2780
+ }
2781
+ }
2782
+
2783
+
2784
+ void LCodeGen::DoContext(LContext* instr) {
2785
+ Register result = ToRegister(instr->result());
2786
+ __ mov(result, cp);
2787
+ }
2788
+
2789
+
2790
+ void LCodeGen::DoOuterContext(LOuterContext* instr) {
2791
+ Register context = ToRegister(instr->context());
2792
+ Register result = ToRegister(instr->result());
2793
+ __ ldr(result,
2794
+ MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
2795
+ __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
2796
+ }
2797
+
2798
+
2799
+ void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2800
+ Register context = ToRegister(instr->context());
2801
+ Register result = ToRegister(instr->result());
2802
+ __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2803
+ }
2804
+
2805
+
2806
+ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2807
+ Register global = ToRegister(instr->global());
2808
+ Register result = ToRegister(instr->result());
2809
+ __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2810
+ }
2811
+
2812
+
2813
+ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2814
+ int arity,
2815
+ LInstruction* instr,
2816
+ CallKind call_kind) {
2817
+ // Change context if needed.
2818
+ bool change_context =
2819
+ (info()->closure()->context() != function->context()) ||
2820
+ scope()->contains_with() ||
2821
+ (scope()->num_heap_slots() > 0);
2822
+ if (change_context) {
2823
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2824
+ }
2825
+
2826
+ // Set r0 to arguments count if adaption is not needed. Assumes that r0
2827
+ // is available to write to at this point.
2828
+ if (!function->NeedsArgumentsAdaption()) {
2829
+ __ mov(r0, Operand(arity));
2830
+ }
2831
+
2832
+ LPointerMap* pointers = instr->pointer_map();
2833
+ RecordPosition(pointers->position());
2834
+
2835
+ // Invoke function.
2836
+ __ SetCallKind(r5, call_kind);
2837
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2838
+ __ Call(ip);
2839
+
2840
+ // Setup deoptimization.
2841
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
2842
+
2843
+ // Restore context.
2844
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2845
+ }
2846
+
2847
+
2848
+ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2849
+ ASSERT(ToRegister(instr->result()).is(r0));
2850
+ __ mov(r1, Operand(instr->function()));
2851
+ CallKnownFunction(instr->function(),
2852
+ instr->arity(),
2853
+ instr,
2854
+ CALL_AS_METHOD);
2855
+ }
2856
+
2857
+
2858
+ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2859
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
2860
+ Register input = ToRegister(instr->InputAt(0));
2861
+ Register scratch = scratch0();
2862
+
2863
+ // Deoptimize if not a heap number.
2864
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2865
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
2866
+ __ cmp(scratch, Operand(ip));
2867
+ DeoptimizeIf(ne, instr->environment());
2868
+
2869
+ Label done;
2870
+ Register exponent = scratch0();
2871
+ scratch = no_reg;
2872
+ __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2873
+ // Check the sign of the argument. If the argument is positive, just
2874
+ // return it. We do not need to patch the stack since |input| and
2875
+ // |result| are the same register and |input| would be restored
2876
+ // unchanged by popping safepoint registers.
2877
+ __ tst(exponent, Operand(HeapNumber::kSignMask));
2878
+ __ b(eq, &done);
2879
+
2880
+ // Input is negative. Reverse its sign.
2881
+ // Preserve the value of all registers.
2882
+ {
2883
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2884
+
2885
+ // Registers were saved at the safepoint, so we can use
2886
+ // many scratch registers.
2887
+ Register tmp1 = input.is(r1) ? r0 : r1;
2888
+ Register tmp2 = input.is(r2) ? r0 : r2;
2889
+ Register tmp3 = input.is(r3) ? r0 : r3;
2890
+ Register tmp4 = input.is(r4) ? r0 : r4;
2891
+
2892
+ // exponent: floating point exponent value.
2893
+
2894
+ Label allocated, slow;
2895
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2896
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2897
+ __ b(&allocated);
2898
+
2899
+ // Slow case: Call the runtime system to do the number allocation.
2900
+ __ bind(&slow);
2901
+
2902
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2903
+ // Set the pointer to the new heap number in tmp.
2904
+ if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
2905
+ // Restore input_reg after call to runtime.
2906
+ __ LoadFromSafepointRegisterSlot(input, input);
2907
+ __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2908
+
2909
+ __ bind(&allocated);
2910
+ // exponent: floating point exponent value.
2911
+ // tmp1: allocated heap number.
2912
+ __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
2913
+ __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2914
+ __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2915
+ __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2916
+
2917
+ __ StoreToSafepointRegisterSlot(tmp1, input);
2918
+ }
2919
+
2920
+ __ bind(&done);
2921
+ }
2922
+
2923
+
2924
+ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2925
+ Register input = ToRegister(instr->InputAt(0));
2926
+ __ cmp(input, Operand(0));
2927
+ // We can make rsb conditional because the previous cmp instruction
2928
+ // will clear the V (overflow) flag and rsb won't set this flag
2929
+ // if input is positive.
2930
+ __ rsb(input, input, Operand(0), SetCC, mi);
2931
+ // Deoptimize on overflow.
2932
+ DeoptimizeIf(vs, instr->environment());
2933
+ }
2934
+
2935
+
2936
+ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2937
+ // Class for deferred case.
2938
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2939
+ public:
2940
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2941
+ LUnaryMathOperation* instr)
2942
+ : LDeferredCode(codegen), instr_(instr) { }
2943
+ virtual void Generate() {
2944
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2945
+ }
2946
+ private:
2947
+ LUnaryMathOperation* instr_;
2948
+ };
2949
+
2950
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
2951
+ Representation r = instr->hydrogen()->value()->representation();
2952
+ if (r.IsDouble()) {
2953
+ DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
2954
+ __ vabs(input, input);
2955
+ } else if (r.IsInteger32()) {
2956
+ EmitIntegerMathAbs(instr);
2957
+ } else {
2958
+ // Representation is tagged.
2959
+ DeferredMathAbsTaggedHeapNumber* deferred =
2960
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
2961
+ Register input = ToRegister(instr->InputAt(0));
2962
+ // Smi check.
2963
+ __ JumpIfNotSmi(input, deferred->entry());
2964
+ // If smi, handle it directly.
2965
+ EmitIntegerMathAbs(instr);
2966
+ __ bind(deferred->exit());
2967
+ }
2968
+ }
2969
+
2970
+
2971
+ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2972
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2973
+ Register result = ToRegister(instr->result());
2974
+ SwVfpRegister single_scratch = double_scratch0().low();
2975
+ Register scratch1 = scratch0();
2976
+ Register scratch2 = ToRegister(instr->TempAt(0));
2977
+
2978
+ __ EmitVFPTruncate(kRoundToMinusInf,
2979
+ single_scratch,
2980
+ input,
2981
+ scratch1,
2982
+ scratch2);
2983
+ DeoptimizeIf(ne, instr->environment());
2984
+
2985
+ // Move the result back to general purpose register r0.
2986
+ __ vmov(result, single_scratch);
2987
+
2988
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2989
+ // Test for -0.
2990
+ Label done;
2991
+ __ cmp(result, Operand(0));
2992
+ __ b(ne, &done);
2993
+ __ vmov(scratch1, input.high());
2994
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
2995
+ DeoptimizeIf(ne, instr->environment());
2996
+ __ bind(&done);
2997
+ }
2998
+ }
2999
+
3000
+
3001
+ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3002
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3003
+ Register result = ToRegister(instr->result());
3004
+ Register scratch1 = result;
3005
+ Register scratch2 = scratch0();
3006
+ Label done, check_sign_on_zero;
3007
+
3008
+ // Extract exponent bits.
3009
+ __ vmov(scratch1, input.high());
3010
+ __ ubfx(scratch2,
3011
+ scratch1,
3012
+ HeapNumber::kExponentShift,
3013
+ HeapNumber::kExponentBits);
3014
+
3015
+ // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3016
+ __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2));
3017
+ __ mov(result, Operand(0), LeaveCC, le);
3018
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3019
+ __ b(le, &check_sign_on_zero);
3020
+ } else {
3021
+ __ b(le, &done);
3022
+ }
3023
+
3024
+ // The following conversion will not work with numbers
3025
+ // outside of ]-2^32, 2^32[.
3026
+ __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32));
3027
+ DeoptimizeIf(ge, instr->environment());
3028
+
3029
+ // Save the original sign for later comparison.
3030
+ __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
3031
+
3032
+ __ vmov(double_scratch0(), 0.5);
3033
+ __ vadd(input, input, double_scratch0());
3034
+
3035
+ // Check sign of the result: if the sign changed, the input
3036
+ // value was in ]0.5, 0[ and the result should be -0.
3037
+ __ vmov(scratch1, input.high());
3038
+ __ eor(scratch1, scratch1, Operand(scratch2), SetCC);
3039
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3040
+ DeoptimizeIf(mi, instr->environment());
3041
+ } else {
3042
+ __ mov(result, Operand(0), LeaveCC, mi);
3043
+ __ b(mi, &done);
3044
+ }
3045
+
3046
+ __ EmitVFPTruncate(kRoundToMinusInf,
3047
+ double_scratch0().low(),
3048
+ input,
3049
+ scratch1,
3050
+ scratch2);
3051
+ DeoptimizeIf(ne, instr->environment());
3052
+ __ vmov(result, double_scratch0().low());
3053
+
3054
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3055
+ // Test for -0.
3056
+ __ cmp(result, Operand(0));
3057
+ __ b(ne, &done);
3058
+ __ bind(&check_sign_on_zero);
3059
+ __ vmov(scratch1, input.high());
3060
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
3061
+ DeoptimizeIf(ne, instr->environment());
3062
+ }
3063
+ __ bind(&done);
3064
+ }
3065
+
3066
+
3067
+ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3068
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3069
+ ASSERT(ToDoubleRegister(instr->result()).is(input));
3070
+ __ vsqrt(input, input);
3071
+ }
3072
+
3073
+
3074
+ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3075
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3076
+ Register scratch = scratch0();
3077
+ SwVfpRegister single_scratch = double_scratch0().low();
3078
+ DoubleRegister double_scratch = double_scratch0();
3079
+ ASSERT(ToDoubleRegister(instr->result()).is(input));
3080
+
3081
+ // Add +0 to convert -0 to +0.
3082
+ __ mov(scratch, Operand(0));
3083
+ __ vmov(single_scratch, scratch);
3084
+ __ vcvt_f64_s32(double_scratch, single_scratch);
3085
+ __ vadd(input, input, double_scratch);
3086
+ __ vsqrt(input, input);
3087
+ }
3088
+
3089
+
3090
+ void LCodeGen::DoPower(LPower* instr) {
3091
+ LOperand* left = instr->InputAt(0);
3092
+ LOperand* right = instr->InputAt(1);
3093
+ Register scratch = scratch0();
3094
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
3095
+ Representation exponent_type = instr->hydrogen()->right()->representation();
3096
+ if (exponent_type.IsDouble()) {
3097
+ // Prepare arguments and call C function.
3098
+ __ PrepareCallCFunction(0, 2, scratch);
3099
+ __ SetCallCDoubleArguments(ToDoubleRegister(left),
3100
+ ToDoubleRegister(right));
3101
+ __ CallCFunction(
3102
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
3103
+ } else if (exponent_type.IsInteger32()) {
3104
+ ASSERT(ToRegister(right).is(r0));
3105
+ // Prepare arguments and call C function.
3106
+ __ PrepareCallCFunction(1, 1, scratch);
3107
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
3108
+ __ CallCFunction(
3109
+ ExternalReference::power_double_int_function(isolate()), 1, 1);
3110
+ } else {
3111
+ ASSERT(exponent_type.IsTagged());
3112
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
3113
+
3114
+ Register right_reg = ToRegister(right);
3115
+
3116
+ // Check for smi on the right hand side.
3117
+ Label non_smi, call;
3118
+ __ JumpIfNotSmi(right_reg, &non_smi);
3119
+
3120
+ // Untag smi and convert it to a double.
3121
+ __ SmiUntag(right_reg);
3122
+ SwVfpRegister single_scratch = double_scratch0().low();
3123
+ __ vmov(single_scratch, right_reg);
3124
+ __ vcvt_f64_s32(result_reg, single_scratch);
3125
+ __ jmp(&call);
3126
+
3127
+ // Heap number map check.
3128
+ __ bind(&non_smi);
3129
+ __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
3130
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3131
+ __ cmp(scratch, Operand(ip));
3132
+ DeoptimizeIf(ne, instr->environment());
3133
+ int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
3134
+ __ add(scratch, right_reg, Operand(value_offset));
3135
+ __ vldr(result_reg, scratch, 0);
3136
+
3137
+ // Prepare arguments and call C function.
3138
+ __ bind(&call);
3139
+ __ PrepareCallCFunction(0, 2, scratch);
3140
+ __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
3141
+ __ CallCFunction(
3142
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
3143
+ }
3144
+ // Store the result in the result register.
3145
+ __ GetCFunctionDoubleResult(result_reg);
3146
+ }
3147
+
3148
+
3149
+ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3150
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
3151
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
3152
+ TranscendentalCacheStub::UNTAGGED);
3153
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3154
+ }
3155
+
3156
+
3157
+ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3158
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
3159
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
3160
+ TranscendentalCacheStub::UNTAGGED);
3161
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3162
+ }
3163
+
3164
+
3165
+ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3166
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
3167
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
3168
+ TranscendentalCacheStub::UNTAGGED);
3169
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3170
+ }
3171
+
3172
+
3173
+ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3174
+ switch (instr->op()) {
3175
+ case kMathAbs:
3176
+ DoMathAbs(instr);
3177
+ break;
3178
+ case kMathFloor:
3179
+ DoMathFloor(instr);
3180
+ break;
3181
+ case kMathRound:
3182
+ DoMathRound(instr);
3183
+ break;
3184
+ case kMathSqrt:
3185
+ DoMathSqrt(instr);
3186
+ break;
3187
+ case kMathPowHalf:
3188
+ DoMathPowHalf(instr);
3189
+ break;
3190
+ case kMathCos:
3191
+ DoMathCos(instr);
3192
+ break;
3193
+ case kMathSin:
3194
+ DoMathSin(instr);
3195
+ break;
3196
+ case kMathLog:
3197
+ DoMathLog(instr);
3198
+ break;
3199
+ default:
3200
+ Abort("Unimplemented type of LUnaryMathOperation.");
3201
+ UNREACHABLE();
3202
+ }
3203
+ }
3204
+
3205
+
3206
+ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3207
+ ASSERT(ToRegister(instr->function()).is(r1));
3208
+ ASSERT(instr->HasPointerMap());
3209
+ ASSERT(instr->HasDeoptimizationEnvironment());
3210
+ LPointerMap* pointers = instr->pointer_map();
3211
+ LEnvironment* env = instr->deoptimization_environment();
3212
+ RecordPosition(pointers->position());
3213
+ RegisterEnvironmentForDeoptimization(env);
3214
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
3215
+ ParameterCount count(instr->arity());
3216
+ __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3217
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3218
+ }
3219
+
3220
+
3221
+ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3222
+ ASSERT(ToRegister(instr->result()).is(r0));
3223
+
3224
+ int arity = instr->arity();
3225
+ Handle<Code> ic =
3226
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
3227
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
3228
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3229
+ }
3230
+
3231
+
3232
+ void LCodeGen::DoCallNamed(LCallNamed* instr) {
3233
+ ASSERT(ToRegister(instr->result()).is(r0));
3234
+
3235
+ int arity = instr->arity();
3236
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3237
+ Handle<Code> ic =
3238
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
3239
+ __ mov(r2, Operand(instr->name()));
3240
+ CallCode(ic, mode, instr);
3241
+ // Restore context register.
3242
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3243
+ }
3244
+
3245
+
3246
+ void LCodeGen::DoCallFunction(LCallFunction* instr) {
3247
+ ASSERT(ToRegister(instr->result()).is(r0));
3248
+
3249
+ int arity = instr->arity();
3250
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
3251
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3252
+ __ Drop(1);
3253
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3254
+ }
3255
+
3256
+
3257
+ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3258
+ ASSERT(ToRegister(instr->result()).is(r0));
3259
+
3260
+ int arity = instr->arity();
3261
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3262
+ Handle<Code> ic =
3263
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
3264
+ __ mov(r2, Operand(instr->name()));
3265
+ CallCode(ic, mode, instr);
3266
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3267
+ }
3268
+
3269
+
3270
+ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3271
+ ASSERT(ToRegister(instr->result()).is(r0));
3272
+ __ mov(r1, Operand(instr->target()));
3273
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3274
+ }
3275
+
3276
+
3277
+ void LCodeGen::DoCallNew(LCallNew* instr) {
3278
+ ASSERT(ToRegister(instr->InputAt(0)).is(r1));
3279
+ ASSERT(ToRegister(instr->result()).is(r0));
3280
+
3281
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
3282
+ __ mov(r0, Operand(instr->arity()));
3283
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
3284
+ }
3285
+
3286
+
3287
+ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3288
+ CallRuntime(instr->function(), instr->arity(), instr);
3289
+ }
3290
+
3291
+
3292
+ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3293
+ Register object = ToRegister(instr->object());
3294
+ Register value = ToRegister(instr->value());
3295
+ Register scratch = scratch0();
3296
+ int offset = instr->offset();
3297
+
3298
+ ASSERT(!object.is(value));
3299
+
3300
+ if (!instr->transition().is_null()) {
3301
+ __ mov(scratch, Operand(instr->transition()));
3302
+ __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3303
+ }
3304
+
3305
+ // Do the store.
3306
+ if (instr->is_in_object()) {
3307
+ __ str(value, FieldMemOperand(object, offset));
3308
+ if (instr->needs_write_barrier()) {
3309
+ // Update the write barrier for the object for in-object properties.
3310
+ __ RecordWrite(object, Operand(offset), value, scratch);
3311
+ }
3312
+ } else {
3313
+ __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3314
+ __ str(value, FieldMemOperand(scratch, offset));
3315
+ if (instr->needs_write_barrier()) {
3316
+ // Update the write barrier for the properties array.
3317
+ // object is used as a scratch register.
3318
+ __ RecordWrite(scratch, Operand(offset), value, object);
3319
+ }
3320
+ }
3321
+ }
3322
+
3323
+
3324
+ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3325
+ ASSERT(ToRegister(instr->object()).is(r1));
3326
+ ASSERT(ToRegister(instr->value()).is(r0));
3327
+
3328
+ // Name is always in r2.
3329
+ __ mov(r2, Operand(instr->name()));
3330
+ Handle<Code> ic = instr->strict_mode()
3331
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
3332
+ : isolate()->builtins()->StoreIC_Initialize();
3333
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
3334
+ }
3335
+
3336
+
3337
+ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3338
+ __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3339
+ DeoptimizeIf(hs, instr->environment());
3340
+ }
3341
+
3342
+
3343
+ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3344
+ Register value = ToRegister(instr->value());
3345
+ Register elements = ToRegister(instr->object());
3346
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3347
+ Register scratch = scratch0();
3348
+
3349
+ // Do the store.
3350
+ if (instr->key()->IsConstantOperand()) {
3351
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3352
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3353
+ int offset =
3354
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3355
+ __ str(value, FieldMemOperand(elements, offset));
3356
+ } else {
3357
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3358
+ __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3359
+ }
3360
+
3361
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
3362
+ // Compute address of modified element and store it into key register.
3363
+ __ add(key, scratch, Operand(FixedArray::kHeaderSize));
3364
+ __ RecordWrite(elements, key, value);
3365
+ }
3366
+ }
3367
+
3368
+
3369
+ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3370
+ LStoreKeyedSpecializedArrayElement* instr) {
3371
+
3372
+ Register external_pointer = ToRegister(instr->external_pointer());
3373
+ Register key = no_reg;
3374
+ ExternalArrayType array_type = instr->array_type();
3375
+ bool key_is_constant = instr->key()->IsConstantOperand();
3376
+ int constant_key = 0;
3377
+ if (key_is_constant) {
3378
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3379
+ if (constant_key & 0xF0000000) {
3380
+ Abort("array index constant value too big.");
3381
+ }
3382
+ } else {
3383
+ key = ToRegister(instr->key());
3384
+ }
3385
+ int shift_size = ExternalArrayTypeToShiftSize(array_type);
3386
+
3387
+ if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
3388
+ CpuFeatures::Scope scope(VFP3);
3389
+ DwVfpRegister value(ToDoubleRegister(instr->value()));
3390
+ Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
3391
+ : Operand(key, LSL, shift_size));
3392
+ __ add(scratch0(), external_pointer, operand);
3393
+ if (array_type == kExternalFloatArray) {
3394
+ __ vcvt_f32_f64(double_scratch0().low(), value);
3395
+ __ vstr(double_scratch0().low(), scratch0(), 0);
3396
+ } else { // i.e. array_type == kExternalDoubleArray
3397
+ __ vstr(value, scratch0(), 0);
3398
+ }
3399
+ } else {
3400
+ Register value(ToRegister(instr->value()));
3401
+ MemOperand mem_operand(key_is_constant
3402
+ ? MemOperand(external_pointer, constant_key * (1 << shift_size))
3403
+ : MemOperand(external_pointer, key, LSL, shift_size));
3404
+ switch (array_type) {
3405
+ case kExternalPixelArray:
3406
+ case kExternalByteArray:
3407
+ case kExternalUnsignedByteArray:
3408
+ __ strb(value, mem_operand);
3409
+ break;
3410
+ case kExternalShortArray:
3411
+ case kExternalUnsignedShortArray:
3412
+ __ strh(value, mem_operand);
3413
+ break;
3414
+ case kExternalIntArray:
3415
+ case kExternalUnsignedIntArray:
3416
+ __ str(value, mem_operand);
3417
+ break;
3418
+ case kExternalFloatArray:
3419
+ case kExternalDoubleArray:
3420
+ UNREACHABLE();
3421
+ break;
3422
+ }
3423
+ }
3424
+ }
3425
+
3426
+
3427
+ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3428
+ ASSERT(ToRegister(instr->object()).is(r2));
3429
+ ASSERT(ToRegister(instr->key()).is(r1));
3430
+ ASSERT(ToRegister(instr->value()).is(r0));
3431
+
3432
+ Handle<Code> ic = instr->strict_mode()
3433
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3434
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
3435
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
3436
+ }
3437
+
3438
+
3439
+ void LCodeGen::DoStringAdd(LStringAdd* instr) {
3440
+ __ push(ToRegister(instr->left()));
3441
+ __ push(ToRegister(instr->right()));
3442
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3443
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3444
+ }
3445
+
3446
+
3447
+ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3448
+ class DeferredStringCharCodeAt: public LDeferredCode {
3449
+ public:
3450
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3451
+ : LDeferredCode(codegen), instr_(instr) { }
3452
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3453
+ private:
3454
+ LStringCharCodeAt* instr_;
3455
+ };
3456
+
3457
+ Register scratch = scratch0();
3458
+ Register string = ToRegister(instr->string());
3459
+ Register index = no_reg;
3460
+ int const_index = -1;
3461
+ if (instr->index()->IsConstantOperand()) {
3462
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3463
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3464
+ if (!Smi::IsValid(const_index)) {
3465
+ // Guaranteed to be out of bounds because of the assert above.
3466
+ // So the bounds check that must dominate this instruction must
3467
+ // have deoptimized already.
3468
+ if (FLAG_debug_code) {
3469
+ __ Abort("StringCharCodeAt: out of bounds index.");
3470
+ }
3471
+ // No code needs to be generated.
3472
+ return;
3473
+ }
3474
+ } else {
3475
+ index = ToRegister(instr->index());
3476
+ }
3477
+ Register result = ToRegister(instr->result());
3478
+
3479
+ DeferredStringCharCodeAt* deferred =
3480
+ new DeferredStringCharCodeAt(this, instr);
3481
+
3482
+ Label flat_string, ascii_string, done;
3483
+
3484
+ // Fetch the instance type of the receiver into result register.
3485
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
3486
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3487
+
3488
+ // We need special handling for non-flat strings.
3489
+ STATIC_ASSERT(kSeqStringTag == 0);
3490
+ __ tst(result, Operand(kStringRepresentationMask));
3491
+ __ b(eq, &flat_string);
3492
+
3493
+ // Handle non-flat strings.
3494
+ __ tst(result, Operand(kIsConsStringMask));
3495
+ __ b(eq, deferred->entry());
3496
+
3497
+ // ConsString.
3498
+ // Check whether the right hand side is the empty string (i.e. if
3499
+ // this is really a flat string in a cons string). If that is not
3500
+ // the case we would rather go to the runtime system now to flatten
3501
+ // the string.
3502
+ __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
3503
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
3504
+ __ cmp(scratch, ip);
3505
+ __ b(ne, deferred->entry());
3506
+ // Get the first of the two strings and load its instance type.
3507
+ __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
3508
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
3509
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3510
+ // If the first cons component is also non-flat, then go to runtime.
3511
+ STATIC_ASSERT(kSeqStringTag == 0);
3512
+ __ tst(result, Operand(kStringRepresentationMask));
3513
+ __ b(ne, deferred->entry());
3514
+
3515
+ // Check for 1-byte or 2-byte string.
3516
+ __ bind(&flat_string);
3517
+ STATIC_ASSERT(kAsciiStringTag != 0);
3518
+ __ tst(result, Operand(kStringEncodingMask));
3519
+ __ b(ne, &ascii_string);
3520
+
3521
+ // 2-byte string.
3522
+ // Load the 2-byte character code into the result register.
3523
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3524
+ if (instr->index()->IsConstantOperand()) {
3525
+ __ ldrh(result,
3526
+ FieldMemOperand(string,
3527
+ SeqTwoByteString::kHeaderSize + 2 * const_index));
3528
+ } else {
3529
+ __ add(scratch,
3530
+ string,
3531
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3532
+ __ ldrh(result, MemOperand(scratch, index, LSL, 1));
3533
+ }
3534
+ __ jmp(&done);
3535
+
3536
+ // ASCII string.
3537
+ // Load the byte into the result register.
3538
+ __ bind(&ascii_string);
3539
+ if (instr->index()->IsConstantOperand()) {
3540
+ __ ldrb(result, FieldMemOperand(string,
3541
+ SeqAsciiString::kHeaderSize + const_index));
3542
+ } else {
3543
+ __ add(scratch,
3544
+ string,
3545
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3546
+ __ ldrb(result, MemOperand(scratch, index));
3547
+ }
3548
+ __ bind(&done);
3549
+ __ bind(deferred->exit());
3550
+ }
3551
+
3552
+
3553
+ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3554
+ Register string = ToRegister(instr->string());
3555
+ Register result = ToRegister(instr->result());
3556
+ Register scratch = scratch0();
3557
+
3558
+ // TODO(3095996): Get rid of this. For now, we need to make the
3559
+ // result register contain a valid pointer because it is already
3560
+ // contained in the register pointer map.
3561
+ __ mov(result, Operand(0));
3562
+
3563
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3564
+ __ push(string);
3565
+ // Push the index as a smi. This is safe because of the checks in
3566
+ // DoStringCharCodeAt above.
3567
+ if (instr->index()->IsConstantOperand()) {
3568
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3569
+ __ mov(scratch, Operand(Smi::FromInt(const_index)));
3570
+ __ push(scratch);
3571
+ } else {
3572
+ Register index = ToRegister(instr->index());
3573
+ __ SmiTag(index);
3574
+ __ push(index);
3575
+ }
3576
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3577
+ if (FLAG_debug_code) {
3578
+ __ AbortIfNotSmi(r0);
3579
+ }
3580
+ __ SmiUntag(r0);
3581
+ __ StoreToSafepointRegisterSlot(r0, result);
3582
+ }
3583
+
3584
+
3585
+ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3586
+ class DeferredStringCharFromCode: public LDeferredCode {
3587
+ public:
3588
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3589
+ : LDeferredCode(codegen), instr_(instr) { }
3590
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3591
+ private:
3592
+ LStringCharFromCode* instr_;
3593
+ };
3594
+
3595
+ DeferredStringCharFromCode* deferred =
3596
+ new DeferredStringCharFromCode(this, instr);
3597
+
3598
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3599
+ Register char_code = ToRegister(instr->char_code());
3600
+ Register result = ToRegister(instr->result());
3601
+ ASSERT(!char_code.is(result));
3602
+
3603
+ __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
3604
+ __ b(hi, deferred->entry());
3605
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3606
+ __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
3607
+ __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3608
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3609
+ __ cmp(result, ip);
3610
+ __ b(eq, deferred->entry());
3611
+ __ bind(deferred->exit());
3612
+ }
3613
+
3614
+
3615
+ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3616
+ Register char_code = ToRegister(instr->char_code());
3617
+ Register result = ToRegister(instr->result());
3618
+
3619
+ // TODO(3095996): Get rid of this. For now, we need to make the
3620
+ // result register contain a valid pointer because it is already
3621
+ // contained in the register pointer map.
3622
+ __ mov(result, Operand(0));
3623
+
3624
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3625
+ __ SmiTag(char_code);
3626
+ __ push(char_code);
3627
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3628
+ __ StoreToSafepointRegisterSlot(r0, result);
3629
+ }
3630
+
3631
+
3632
+ void LCodeGen::DoStringLength(LStringLength* instr) {
3633
+ Register string = ToRegister(instr->InputAt(0));
3634
+ Register result = ToRegister(instr->result());
3635
+ __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
3636
+ }
3637
+
3638
+
3639
+ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3640
+ LOperand* input = instr->InputAt(0);
3641
+ ASSERT(input->IsRegister() || input->IsStackSlot());
3642
+ LOperand* output = instr->result();
3643
+ ASSERT(output->IsDoubleRegister());
3644
+ SwVfpRegister single_scratch = double_scratch0().low();
3645
+ if (input->IsStackSlot()) {
3646
+ Register scratch = scratch0();
3647
+ __ ldr(scratch, ToMemOperand(input));
3648
+ __ vmov(single_scratch, scratch);
3649
+ } else {
3650
+ __ vmov(single_scratch, ToRegister(input));
3651
+ }
3652
+ __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
3653
+ }
3654
+
3655
+
3656
+ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3657
+ class DeferredNumberTagI: public LDeferredCode {
3658
+ public:
3659
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3660
+ : LDeferredCode(codegen), instr_(instr) { }
3661
+ virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3662
+ private:
3663
+ LNumberTagI* instr_;
3664
+ };
3665
+
3666
+ LOperand* input = instr->InputAt(0);
3667
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
3668
+ Register reg = ToRegister(input);
3669
+
3670
+ DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3671
+ __ SmiTag(reg, SetCC);
3672
+ __ b(vs, deferred->entry());
3673
+ __ bind(deferred->exit());
3674
+ }
3675
+
3676
+
3677
+ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3678
+ Label slow;
3679
+ Register reg = ToRegister(instr->InputAt(0));
3680
+ DoubleRegister dbl_scratch = d0;
3681
+ SwVfpRegister flt_scratch = s0;
3682
+
3683
+ // Preserve the value of all registers.
3684
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3685
+
3686
+ // There was overflow, so bits 30 and 31 of the original integer
3687
+ // disagree. Try to allocate a heap number in new space and store
3688
+ // the value in there. If that fails, call the runtime system.
3689
+ Label done;
3690
+ __ SmiUntag(reg);
3691
+ __ eor(reg, reg, Operand(0x80000000));
3692
+ __ vmov(flt_scratch, reg);
3693
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
3694
+ if (FLAG_inline_new) {
3695
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3696
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3697
+ if (!reg.is(r5)) __ mov(reg, r5);
3698
+ __ b(&done);
3699
+ }
3700
+
3701
+ // Slow case: Call the runtime system to do the number allocation.
3702
+ __ bind(&slow);
3703
+
3704
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
3705
+ // register is stored, as this register is in the pointer map, but contains an
3706
+ // integer value.
3707
+ __ mov(ip, Operand(0));
3708
+ __ StoreToSafepointRegisterSlot(ip, reg);
3709
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3710
+ if (!reg.is(r0)) __ mov(reg, r0);
3711
+
3712
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
3713
+ // number.
3714
+ __ bind(&done);
3715
+ __ sub(ip, reg, Operand(kHeapObjectTag));
3716
+ __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
3717
+ __ StoreToSafepointRegisterSlot(reg, reg);
3718
+ }
3719
+
3720
+
3721
+ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3722
+ class DeferredNumberTagD: public LDeferredCode {
3723
+ public:
3724
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3725
+ : LDeferredCode(codegen), instr_(instr) { }
3726
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3727
+ private:
3728
+ LNumberTagD* instr_;
3729
+ };
3730
+
3731
+ DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3732
+ Register scratch = scratch0();
3733
+ Register reg = ToRegister(instr->result());
3734
+ Register temp1 = ToRegister(instr->TempAt(0));
3735
+ Register temp2 = ToRegister(instr->TempAt(1));
3736
+
3737
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3738
+ if (FLAG_inline_new) {
3739
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3740
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3741
+ } else {
3742
+ __ jmp(deferred->entry());
3743
+ }
3744
+ __ bind(deferred->exit());
3745
+ __ sub(ip, reg, Operand(kHeapObjectTag));
3746
+ __ vstr(input_reg, ip, HeapNumber::kValueOffset);
3747
+ }
3748
+
3749
+
3750
+ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3751
+ // TODO(3095996): Get rid of this. For now, we need to make the
3752
+ // result register contain a valid pointer because it is already
3753
+ // contained in the register pointer map.
3754
+ Register reg = ToRegister(instr->result());
3755
+ __ mov(reg, Operand(0));
3756
+
3757
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3758
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3759
+ __ StoreToSafepointRegisterSlot(r0, reg);
3760
+ }
3761
+
3762
+
3763
+ void LCodeGen::DoSmiTag(LSmiTag* instr) {
3764
+ LOperand* input = instr->InputAt(0);
3765
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
3766
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3767
+ __ SmiTag(ToRegister(input));
3768
+ }
3769
+
3770
+
3771
+ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3772
+ LOperand* input = instr->InputAt(0);
3773
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
3774
+ if (instr->needs_check()) {
3775
+ ASSERT(kHeapObjectTag == 1);
3776
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
3777
+ __ SmiUntag(ToRegister(input), SetCC);
3778
+ DeoptimizeIf(cs, instr->environment());
3779
+ } else {
3780
+ __ SmiUntag(ToRegister(input));
3781
+ }
3782
+ }
3783
+
3784
+
3785
+ void LCodeGen::EmitNumberUntagD(Register input_reg,
3786
+ DoubleRegister result_reg,
3787
+ LEnvironment* env) {
3788
+ Register scratch = scratch0();
3789
+ SwVfpRegister flt_scratch = s0;
3790
+ ASSERT(!result_reg.is(d0));
3791
+
3792
+ Label load_smi, heap_number, done;
3793
+
3794
+ // Smi check.
3795
+ __ tst(input_reg, Operand(kSmiTagMask));
3796
+ __ b(eq, &load_smi);
3797
+
3798
+ // Heap number map check.
3799
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3800
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3801
+ __ cmp(scratch, Operand(ip));
3802
+ __ b(eq, &heap_number);
3803
+
3804
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3805
+ __ cmp(input_reg, Operand(ip));
3806
+ DeoptimizeIf(ne, env);
3807
+
3808
+ // Convert undefined to NaN.
3809
+ __ LoadRoot(ip, Heap::kNanValueRootIndex);
3810
+ __ sub(ip, ip, Operand(kHeapObjectTag));
3811
+ __ vldr(result_reg, ip, HeapNumber::kValueOffset);
3812
+ __ jmp(&done);
3813
+
3814
+ // Heap number to double register conversion.
3815
+ __ bind(&heap_number);
3816
+ __ sub(ip, input_reg, Operand(kHeapObjectTag));
3817
+ __ vldr(result_reg, ip, HeapNumber::kValueOffset);
3818
+ __ jmp(&done);
3819
+
3820
+ // Smi to double register conversion
3821
+ __ bind(&load_smi);
3822
+ __ SmiUntag(input_reg); // Untag smi before converting to float.
3823
+ __ vmov(flt_scratch, input_reg);
3824
+ __ vcvt_f64_s32(result_reg, flt_scratch);
3825
+ __ SmiTag(input_reg); // Retag smi.
3826
+ __ bind(&done);
3827
+ }
3828
+
3829
+
3830
+ class DeferredTaggedToI: public LDeferredCode {
3831
+ public:
3832
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3833
+ : LDeferredCode(codegen), instr_(instr) { }
3834
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3835
+ private:
3836
+ LTaggedToI* instr_;
3837
+ };
3838
+
3839
+
3840
+ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3841
+ Register input_reg = ToRegister(instr->InputAt(0));
3842
+ Register scratch1 = scratch0();
3843
+ Register scratch2 = ToRegister(instr->TempAt(0));
3844
+ DwVfpRegister double_scratch = double_scratch0();
3845
+ SwVfpRegister single_scratch = double_scratch.low();
3846
+
3847
+ ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
3848
+ ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
3849
+
3850
+ Label done;
3851
+
3852
+ // The input was optimistically untagged; revert it.
3853
+ // The carry flag is set when we reach this deferred code as we just executed
3854
+ // SmiUntag(heap_object, SetCC)
3855
+ ASSERT(kHeapObjectTag == 1);
3856
+ __ adc(input_reg, input_reg, Operand(input_reg));
3857
+
3858
+ // Heap number map check.
3859
+ __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3860
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3861
+ __ cmp(scratch1, Operand(ip));
3862
+
3863
+ if (instr->truncating()) {
3864
+ Register scratch3 = ToRegister(instr->TempAt(1));
3865
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
3866
+ ASSERT(!scratch3.is(input_reg) &&
3867
+ !scratch3.is(scratch1) &&
3868
+ !scratch3.is(scratch2));
3869
+ // Performs a truncating conversion of a floating point number as used by
3870
+ // the JS bitwise operations.
3871
+ Label heap_number;
3872
+ __ b(eq, &heap_number);
3873
+ // Check for undefined. Undefined is converted to zero for truncating
3874
+ // conversions.
3875
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3876
+ __ cmp(input_reg, Operand(ip));
3877
+ DeoptimizeIf(ne, instr->environment());
3878
+ __ mov(input_reg, Operand(0));
3879
+ __ b(&done);
3880
+
3881
+ __ bind(&heap_number);
3882
+ __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
3883
+ __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
3884
+
3885
+ __ EmitECMATruncate(input_reg,
3886
+ double_scratch2,
3887
+ single_scratch,
3888
+ scratch1,
3889
+ scratch2,
3890
+ scratch3);
3891
+
3892
+ } else {
3893
+ CpuFeatures::Scope scope(VFP3);
3894
+ // Deoptimize if we don't have a heap number.
3895
+ DeoptimizeIf(ne, instr->environment());
3896
+
3897
+ __ sub(ip, input_reg, Operand(kHeapObjectTag));
3898
+ __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
3899
+ __ EmitVFPTruncate(kRoundToZero,
3900
+ single_scratch,
3901
+ double_scratch,
3902
+ scratch1,
3903
+ scratch2,
3904
+ kCheckForInexactConversion);
3905
+ DeoptimizeIf(ne, instr->environment());
3906
+ // Load the result.
3907
+ __ vmov(input_reg, single_scratch);
3908
+
3909
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3910
+ __ cmp(input_reg, Operand(0));
3911
+ __ b(ne, &done);
3912
+ __ vmov(scratch1, double_scratch.high());
3913
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
3914
+ DeoptimizeIf(ne, instr->environment());
3915
+ }
3916
+ }
3917
+ __ bind(&done);
3918
+ }
3919
+
3920
+
3921
+ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3922
+ LOperand* input = instr->InputAt(0);
3923
+ ASSERT(input->IsRegister());
3924
+ ASSERT(input->Equals(instr->result()));
3925
+
3926
+ Register input_reg = ToRegister(input);
3927
+
3928
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3929
+
3930
+ // Optimistically untag the input.
3931
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
3932
+ __ SmiUntag(input_reg, SetCC);
3933
+ // Branch to deferred code if the input was tagged.
3934
+ // The deferred code will take care of restoring the tag.
3935
+ __ b(cs, deferred->entry());
3936
+ __ bind(deferred->exit());
3937
+ }
3938
+
3939
+
3940
+ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3941
+ LOperand* input = instr->InputAt(0);
3942
+ ASSERT(input->IsRegister());
3943
+ LOperand* result = instr->result();
3944
+ ASSERT(result->IsDoubleRegister());
3945
+
3946
+ Register input_reg = ToRegister(input);
3947
+ DoubleRegister result_reg = ToDoubleRegister(result);
3948
+
3949
+ EmitNumberUntagD(input_reg, result_reg, instr->environment());
3950
+ }
3951
+
3952
+
3953
+ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3954
+ Register result_reg = ToRegister(instr->result());
3955
+ Register scratch1 = scratch0();
3956
+ Register scratch2 = ToRegister(instr->TempAt(0));
3957
+ DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
3958
+ DwVfpRegister double_scratch = double_scratch0();
3959
+ SwVfpRegister single_scratch = double_scratch0().low();
3960
+
3961
+ Label done;
3962
+
3963
+ if (instr->truncating()) {
3964
+ Register scratch3 = ToRegister(instr->TempAt(1));
3965
+ __ EmitECMATruncate(result_reg,
3966
+ double_input,
3967
+ single_scratch,
3968
+ scratch1,
3969
+ scratch2,
3970
+ scratch3);
3971
+ } else {
3972
+ VFPRoundingMode rounding_mode = kRoundToMinusInf;
3973
+ __ EmitVFPTruncate(rounding_mode,
3974
+ single_scratch,
3975
+ double_input,
3976
+ scratch1,
3977
+ scratch2,
3978
+ kCheckForInexactConversion);
3979
+ // Deoptimize if we had a vfp invalid exception,
3980
+ // including inexact operation.
3981
+ DeoptimizeIf(ne, instr->environment());
3982
+ // Retrieve the result.
3983
+ __ vmov(result_reg, single_scratch);
3984
+ }
3985
+ __ bind(&done);
3986
+ }
3987
+
3988
+
3989
+ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3990
+ LOperand* input = instr->InputAt(0);
3991
+ __ tst(ToRegister(input), Operand(kSmiTagMask));
3992
+ DeoptimizeIf(ne, instr->environment());
3993
+ }
3994
+
3995
+
3996
+ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
3997
+ LOperand* input = instr->InputAt(0);
3998
+ __ tst(ToRegister(input), Operand(kSmiTagMask));
3999
+ DeoptimizeIf(eq, instr->environment());
4000
+ }
4001
+
4002
+
4003
+ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4004
+ Register input = ToRegister(instr->InputAt(0));
4005
+ Register scratch = scratch0();
4006
+
4007
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4008
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4009
+
4010
+ if (instr->hydrogen()->is_interval_check()) {
4011
+ InstanceType first;
4012
+ InstanceType last;
4013
+ instr->hydrogen()->GetCheckInterval(&first, &last);
4014
+
4015
+ __ cmp(scratch, Operand(first));
4016
+
4017
+ // If there is only one type in the interval check for equality.
4018
+ if (first == last) {
4019
+ DeoptimizeIf(ne, instr->environment());
4020
+ } else {
4021
+ DeoptimizeIf(lo, instr->environment());
4022
+ // Omit check for the last type.
4023
+ if (last != LAST_TYPE) {
4024
+ __ cmp(scratch, Operand(last));
4025
+ DeoptimizeIf(hi, instr->environment());
4026
+ }
4027
+ }
4028
+ } else {
4029
+ uint8_t mask;
4030
+ uint8_t tag;
4031
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4032
+
4033
+ if (IsPowerOf2(mask)) {
4034
+ ASSERT(tag == 0 || IsPowerOf2(tag));
4035
+ __ tst(scratch, Operand(mask));
4036
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
4037
+ } else {
4038
+ __ and_(scratch, scratch, Operand(mask));
4039
+ __ cmp(scratch, Operand(tag));
4040
+ DeoptimizeIf(ne, instr->environment());
4041
+ }
4042
+ }
4043
+ }
4044
+
4045
+
4046
+ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4047
+ ASSERT(instr->InputAt(0)->IsRegister());
4048
+ Register reg = ToRegister(instr->InputAt(0));
4049
+ __ cmp(reg, Operand(instr->hydrogen()->target()));
4050
+ DeoptimizeIf(ne, instr->environment());
4051
+ }
4052
+
4053
+
4054
+ void LCodeGen::DoCheckMap(LCheckMap* instr) {
4055
+ Register scratch = scratch0();
4056
+ LOperand* input = instr->InputAt(0);
4057
+ ASSERT(input->IsRegister());
4058
+ Register reg = ToRegister(input);
4059
+ __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
4060
+ __ cmp(scratch, Operand(instr->hydrogen()->map()));
4061
+ DeoptimizeIf(ne, instr->environment());
4062
+ }
4063
+
4064
+
4065
+ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4066
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4067
+ Register result_reg = ToRegister(instr->result());
4068
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4069
+ __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4070
+ }
4071
+
4072
+
4073
+ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4074
+ Register unclamped_reg = ToRegister(instr->unclamped());
4075
+ Register result_reg = ToRegister(instr->result());
4076
+ __ ClampUint8(result_reg, unclamped_reg);
4077
+ }
4078
+
4079
+
4080
+ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4081
+ Register scratch = scratch0();
4082
+ Register input_reg = ToRegister(instr->unclamped());
4083
+ Register result_reg = ToRegister(instr->result());
4084
+ DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4085
+ Label is_smi, done, heap_number;
4086
+
4087
+ // Both smi and heap number cases are handled.
4088
+ __ JumpIfSmi(input_reg, &is_smi);
4089
+
4090
+ // Check for heap number
4091
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4092
+ __ cmp(scratch, Operand(factory()->heap_number_map()));
4093
+ __ b(eq, &heap_number);
4094
+
4095
+ // Check for undefined. Undefined is converted to zero for clamping
4096
+ // conversions.
4097
+ __ cmp(input_reg, Operand(factory()->undefined_value()));
4098
+ DeoptimizeIf(ne, instr->environment());
4099
+ __ movt(input_reg, 0);
4100
+ __ jmp(&done);
4101
+
4102
+ // Heap number
4103
+ __ bind(&heap_number);
4104
+ __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4105
+ HeapNumber::kValueOffset));
4106
+ __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4107
+ __ jmp(&done);
4108
+
4109
+ // smi
4110
+ __ bind(&is_smi);
4111
+ __ SmiUntag(result_reg, input_reg);
4112
+ __ ClampUint8(result_reg, result_reg);
4113
+
4114
+ __ bind(&done);
4115
+ }
4116
+
4117
+
4118
+ void LCodeGen::LoadHeapObject(Register result,
4119
+ Handle<HeapObject> object) {
4120
+ if (heap()->InNewSpace(*object)) {
4121
+ Handle<JSGlobalPropertyCell> cell =
4122
+ factory()->NewJSGlobalPropertyCell(object);
4123
+ __ mov(result, Operand(cell));
4124
+ __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
4125
+ } else {
4126
+ __ mov(result, Operand(object));
4127
+ }
4128
+ }
4129
+
4130
+
4131
+ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4132
+ Register temp1 = ToRegister(instr->TempAt(0));
4133
+ Register temp2 = ToRegister(instr->TempAt(1));
4134
+
4135
+ Handle<JSObject> holder = instr->holder();
4136
+ Handle<JSObject> current_prototype = instr->prototype();
4137
+
4138
+ // Load prototype object.
4139
+ LoadHeapObject(temp1, current_prototype);
4140
+
4141
+ // Check prototype maps up to the holder.
4142
+ while (!current_prototype.is_identical_to(holder)) {
4143
+ __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4144
+ __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
4145
+ DeoptimizeIf(ne, instr->environment());
4146
+ current_prototype =
4147
+ Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4148
+ // Load next prototype object.
4149
+ LoadHeapObject(temp1, current_prototype);
4150
+ }
4151
+
4152
+ // Check the holder map.
4153
+ __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4154
+ __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
4155
+ DeoptimizeIf(ne, instr->environment());
4156
+ }
4157
+
4158
+
4159
+ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4160
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4161
+ __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4162
+ __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4163
+ __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
4164
+ __ Push(r3, r2, r1);
4165
+
4166
+ // Pick the right runtime function or stub to call.
4167
+ int length = instr->hydrogen()->length();
4168
+ if (instr->hydrogen()->IsCopyOnWrite()) {
4169
+ ASSERT(instr->hydrogen()->depth() == 1);
4170
+ FastCloneShallowArrayStub::Mode mode =
4171
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4172
+ FastCloneShallowArrayStub stub(mode, length);
4173
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4174
+ } else if (instr->hydrogen()->depth() > 1) {
4175
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4176
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4177
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4178
+ } else {
4179
+ FastCloneShallowArrayStub::Mode mode =
4180
+ FastCloneShallowArrayStub::CLONE_ELEMENTS;
4181
+ FastCloneShallowArrayStub stub(mode, length);
4182
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4183
+ }
4184
+ }
4185
+
4186
+
4187
+ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4188
+ __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4189
+ __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
4190
+ __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4191
+ __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
4192
+ __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
4193
+ __ Push(r4, r3, r2, r1);
4194
+
4195
+ // Pick the right runtime function to call.
4196
+ if (instr->hydrogen()->depth() > 1) {
4197
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4198
+ } else {
4199
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4200
+ }
4201
+ }
4202
+
4203
+
4204
+ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4205
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0));
4206
+ __ push(r0);
4207
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
4208
+ }
4209
+
4210
+
4211
+ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4212
+ Label materialized;
4213
+ // Registers will be used as follows:
4214
+ // r3 = JS function.
4215
+ // r7 = literals array.
4216
+ // r1 = regexp literal.
4217
+ // r0 = regexp literal clone.
4218
+ // r2 and r4-r6 are used as temporaries.
4219
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4220
+ __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4221
+ int literal_offset = FixedArray::kHeaderSize +
4222
+ instr->hydrogen()->literal_index() * kPointerSize;
4223
+ __ ldr(r1, FieldMemOperand(r7, literal_offset));
4224
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4225
+ __ cmp(r1, ip);
4226
+ __ b(ne, &materialized);
4227
+
4228
+ // Create regexp literal using runtime function
4229
+ // Result will be in r0.
4230
+ __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4231
+ __ mov(r5, Operand(instr->hydrogen()->pattern()));
4232
+ __ mov(r4, Operand(instr->hydrogen()->flags()));
4233
+ __ Push(r7, r6, r5, r4);
4234
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4235
+ __ mov(r1, r0);
4236
+
4237
+ __ bind(&materialized);
4238
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4239
+ Label allocated, runtime_allocate;
4240
+
4241
+ __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4242
+ __ jmp(&allocated);
4243
+
4244
+ __ bind(&runtime_allocate);
4245
+ __ mov(r0, Operand(Smi::FromInt(size)));
4246
+ __ Push(r1, r0);
4247
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4248
+ __ pop(r1);
4249
+
4250
+ __ bind(&allocated);
4251
+ // Copy the content into the newly allocated memory.
4252
+ // (Unroll copy loop once for better throughput).
4253
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4254
+ __ ldr(r3, FieldMemOperand(r1, i));
4255
+ __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
4256
+ __ str(r3, FieldMemOperand(r0, i));
4257
+ __ str(r2, FieldMemOperand(r0, i + kPointerSize));
4258
+ }
4259
+ if ((size % (2 * kPointerSize)) != 0) {
4260
+ __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
4261
+ __ str(r3, FieldMemOperand(r0, size - kPointerSize));
4262
+ }
4263
+ }
4264
+
4265
+
4266
+ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4267
+ // Use the fast case closure allocation code that allocates in new
4268
+ // space for nested functions that don't need literals cloning.
4269
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4270
+ bool pretenure = instr->hydrogen()->pretenure();
4271
+ if (!pretenure && shared_info->num_literals() == 0) {
4272
+ FastNewClosureStub stub(
4273
+ shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
4274
+ __ mov(r1, Operand(shared_info));
4275
+ __ push(r1);
4276
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4277
+ } else {
4278
+ __ mov(r2, Operand(shared_info));
4279
+ __ mov(r1, Operand(pretenure
4280
+ ? factory()->true_value()
4281
+ : factory()->false_value()));
4282
+ __ Push(cp, r2, r1);
4283
+ CallRuntime(Runtime::kNewClosure, 3, instr);
4284
+ }
4285
+ }
4286
+
4287
+
4288
+ void LCodeGen::DoTypeof(LTypeof* instr) {
4289
+ Register input = ToRegister(instr->InputAt(0));
4290
+ __ push(input);
4291
+ CallRuntime(Runtime::kTypeof, 1, instr);
4292
+ }
4293
+
4294
+
4295
+ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
4296
+ Register input = ToRegister(instr->InputAt(0));
4297
+ Register result = ToRegister(instr->result());
4298
+ Label true_label;
4299
+ Label false_label;
4300
+ Label done;
4301
+
4302
+ Condition final_branch_condition = EmitTypeofIs(&true_label,
4303
+ &false_label,
4304
+ input,
4305
+ instr->type_literal());
4306
+ __ b(final_branch_condition, &true_label);
4307
+ __ bind(&false_label);
4308
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
4309
+ __ b(&done);
4310
+
4311
+ __ bind(&true_label);
4312
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
4313
+
4314
+ __ bind(&done);
4315
+ }
4316
+
4317
+
4318
+ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4319
+ Register input = ToRegister(instr->InputAt(0));
4320
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
4321
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
4322
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
4323
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
4324
+
4325
+ Condition final_branch_condition = EmitTypeofIs(true_label,
4326
+ false_label,
4327
+ input,
4328
+ instr->type_literal());
4329
+
4330
+ EmitBranch(true_block, false_block, final_branch_condition);
4331
+ }
4332
+
4333
+
4334
+ Condition LCodeGen::EmitTypeofIs(Label* true_label,
4335
+ Label* false_label,
4336
+ Register input,
4337
+ Handle<String> type_name) {
4338
+ Condition final_branch_condition = kNoCondition;
4339
+ Register scratch = scratch0();
4340
+ if (type_name->Equals(heap()->number_symbol())) {
4341
+ __ JumpIfSmi(input, true_label);
4342
+ __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
4343
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4344
+ __ cmp(input, Operand(ip));
4345
+ final_branch_condition = eq;
4346
+
4347
+ } else if (type_name->Equals(heap()->string_symbol())) {
4348
+ __ JumpIfSmi(input, false_label);
4349
+ __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
4350
+ __ b(ge, false_label);
4351
+ __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4352
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
4353
+ final_branch_condition = eq;
4354
+
4355
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
4356
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
4357
+ __ b(eq, true_label);
4358
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
4359
+ final_branch_condition = eq;
4360
+
4361
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
4362
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
4363
+ __ b(eq, true_label);
4364
+ __ JumpIfSmi(input, false_label);
4365
+ // Check for undetectable objects => true.
4366
+ __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
4367
+ __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4368
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
4369
+ final_branch_condition = ne;
4370
+
4371
+ } else if (type_name->Equals(heap()->function_symbol())) {
4372
+ __ JumpIfSmi(input, false_label);
4373
+ __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE);
4374
+ final_branch_condition = ge;
4375
+
4376
+ } else if (type_name->Equals(heap()->object_symbol())) {
4377
+ __ JumpIfSmi(input, false_label);
4378
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
4379
+ __ b(eq, true_label);
4380
+ __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE);
4381
+ __ b(lo, false_label);
4382
+ __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE);
4383
+ __ b(hs, false_label);
4384
+ // Check for undetectable objects => false.
4385
+ __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4386
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
4387
+ final_branch_condition = eq;
4388
+
4389
+ } else {
4390
+ final_branch_condition = ne;
4391
+ __ b(false_label);
4392
+ // A dead branch instruction will be generated after this point.
4393
+ }
4394
+
4395
+ return final_branch_condition;
4396
+ }
4397
+
4398
+
4399
+ void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
4400
+ Register result = ToRegister(instr->result());
4401
+ Label true_label;
4402
+ Label false_label;
4403
+ Label done;
4404
+
4405
+ EmitIsConstructCall(result, scratch0());
4406
+ __ b(eq, &true_label);
4407
+
4408
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
4409
+ __ b(&done);
4410
+
4411
+
4412
+ __ bind(&true_label);
4413
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
4414
+
4415
+ __ bind(&done);
4416
+ }
4417
+
4418
+
4419
+ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4420
+ Register temp1 = ToRegister(instr->TempAt(0));
4421
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
4422
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
4423
+
4424
+ EmitIsConstructCall(temp1, scratch0());
4425
+ EmitBranch(true_block, false_block, eq);
4426
+ }
4427
+
4428
+
4429
+ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4430
+ ASSERT(!temp1.is(temp2));
4431
+ // Get the frame pointer for the calling frame.
4432
+ __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4433
+
4434
+ // Skip the arguments adaptor frame if it exists.
4435
+ Label check_frame_marker;
4436
+ __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4437
+ __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4438
+ __ b(ne, &check_frame_marker);
4439
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4440
+
4441
+ // Check the marker in the calling frame.
4442
+ __ bind(&check_frame_marker);
4443
+ __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4444
+ __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4445
+ }
4446
+
4447
+
4448
+ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4449
+ // No code for lazy bailout instruction. Used to capture environment after a
4450
+ // call for populating the safepoint data with deoptimization data.
4451
+ }
4452
+
4453
+
4454
+ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4455
+ DeoptimizeIf(al, instr->environment());
4456
+ }
4457
+
4458
+
4459
+ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4460
+ Register object = ToRegister(instr->object());
4461
+ Register key = ToRegister(instr->key());
4462
+ Register strict = scratch0();
4463
+ __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
4464
+ __ Push(object, key, strict);
4465
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4466
+ LPointerMap* pointers = instr->pointer_map();
4467
+ LEnvironment* env = instr->deoptimization_environment();
4468
+ RecordPosition(pointers->position());
4469
+ RegisterEnvironmentForDeoptimization(env);
4470
+ SafepointGenerator safepoint_generator(this,
4471
+ pointers,
4472
+ env->deoptimization_index());
4473
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4474
+ }
4475
+
4476
+
4477
+ void LCodeGen::DoIn(LIn* instr) {
4478
+ Register obj = ToRegister(instr->object());
4479
+ Register key = ToRegister(instr->key());
4480
+ __ Push(key, obj);
4481
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4482
+ LPointerMap* pointers = instr->pointer_map();
4483
+ LEnvironment* env = instr->deoptimization_environment();
4484
+ RecordPosition(pointers->position());
4485
+ RegisterEnvironmentForDeoptimization(env);
4486
+ SafepointGenerator safepoint_generator(this,
4487
+ pointers,
4488
+ env->deoptimization_index());
4489
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4490
+ }
4491
+
4492
+
4493
+ void LCodeGen::DoStackCheck(LStackCheck* instr) {
4494
+ // Perform stack overflow check.
4495
+ Label ok;
4496
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
4497
+ __ cmp(sp, Operand(ip));
4498
+ __ b(hs, &ok);
4499
+ StackCheckStub stub;
4500
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4501
+ __ bind(&ok);
4502
+ }
4503
+
4504
+
4505
+ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4506
+ // This is a pseudo-instruction that ensures that the environment here is
4507
+ // properly registered for deoptimization and records the assembler's PC
4508
+ // offset.
4509
+ LEnvironment* environment = instr->environment();
4510
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4511
+ instr->SpilledDoubleRegisterArray());
4512
+
4513
+ // If the environment were already registered, we would have no way of
4514
+ // backpatching it with the spill slot operands.
4515
+ ASSERT(!environment->HasBeenRegistered());
4516
+ RegisterEnvironmentForDeoptimization(environment);
4517
+ ASSERT(osr_pc_offset_ == -1);
4518
+ osr_pc_offset_ = masm()->pc_offset();
4519
+ }
4520
+
4521
+
4522
+
4523
+
4524
+ #undef __
4525
+
4526
+ } } // namespace v8::internal