libv8-sgonyea 3.3.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (500) hide show
  1. data/.gitignore +8 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +76 -0
  5. data/Rakefile +113 -0
  6. data/ext/libv8/extconf.rb +28 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +30 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  11. data/lib/libv8/v8/.gitignore +35 -0
  12. data/lib/libv8/v8/AUTHORS +44 -0
  13. data/lib/libv8/v8/ChangeLog +2839 -0
  14. data/lib/libv8/v8/LICENSE +52 -0
  15. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  16. data/lib/libv8/v8/LICENSE.v8 +26 -0
  17. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  18. data/lib/libv8/v8/SConstruct +1478 -0
  19. data/lib/libv8/v8/build/README.txt +49 -0
  20. data/lib/libv8/v8/build/all.gyp +18 -0
  21. data/lib/libv8/v8/build/armu.gypi +32 -0
  22. data/lib/libv8/v8/build/common.gypi +144 -0
  23. data/lib/libv8/v8/build/gyp_v8 +145 -0
  24. data/lib/libv8/v8/include/v8-debug.h +395 -0
  25. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  26. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  27. data/lib/libv8/v8/include/v8-testing.h +104 -0
  28. data/lib/libv8/v8/include/v8.h +4124 -0
  29. data/lib/libv8/v8/include/v8stdint.h +53 -0
  30. data/lib/libv8/v8/preparser/SConscript +38 -0
  31. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  32. data/lib/libv8/v8/src/SConscript +368 -0
  33. data/lib/libv8/v8/src/accessors.cc +767 -0
  34. data/lib/libv8/v8/src/accessors.h +123 -0
  35. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  36. data/lib/libv8/v8/src/allocation.cc +122 -0
  37. data/lib/libv8/v8/src/allocation.h +143 -0
  38. data/lib/libv8/v8/src/api.cc +5845 -0
  39. data/lib/libv8/v8/src/api.h +574 -0
  40. data/lib/libv8/v8/src/apinatives.js +110 -0
  41. data/lib/libv8/v8/src/apiutils.h +73 -0
  42. data/lib/libv8/v8/src/arguments.h +118 -0
  43. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  44. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  45. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  46. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  47. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  48. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  49. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  50. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  51. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  52. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  53. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  54. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  55. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  56. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  57. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  58. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  59. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  60. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  61. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  62. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  63. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  64. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  65. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  66. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  67. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  68. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  69. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  70. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  71. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  72. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  73. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  74. data/lib/libv8/v8/src/array.js +1366 -0
  75. data/lib/libv8/v8/src/assembler.cc +1207 -0
  76. data/lib/libv8/v8/src/assembler.h +858 -0
  77. data/lib/libv8/v8/src/ast-inl.h +112 -0
  78. data/lib/libv8/v8/src/ast.cc +1146 -0
  79. data/lib/libv8/v8/src/ast.h +2188 -0
  80. data/lib/libv8/v8/src/atomicops.h +167 -0
  81. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  82. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  83. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  84. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  85. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  86. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  87. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  88. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  89. data/lib/libv8/v8/src/bignum.cc +768 -0
  90. data/lib/libv8/v8/src/bignum.h +140 -0
  91. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  92. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  93. data/lib/libv8/v8/src/builtins.cc +1707 -0
  94. data/lib/libv8/v8/src/builtins.h +371 -0
  95. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  96. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  97. data/lib/libv8/v8/src/cached-powers.h +65 -0
  98. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  99. data/lib/libv8/v8/src/char-predicates.h +67 -0
  100. data/lib/libv8/v8/src/checks.cc +110 -0
  101. data/lib/libv8/v8/src/checks.h +296 -0
  102. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  103. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  104. data/lib/libv8/v8/src/circular-queue.h +103 -0
  105. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  106. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  107. data/lib/libv8/v8/src/code.h +70 -0
  108. data/lib/libv8/v8/src/codegen.cc +231 -0
  109. data/lib/libv8/v8/src/codegen.h +84 -0
  110. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  111. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  112. data/lib/libv8/v8/src/compiler.cc +786 -0
  113. data/lib/libv8/v8/src/compiler.h +312 -0
  114. data/lib/libv8/v8/src/contexts.cc +347 -0
  115. data/lib/libv8/v8/src/contexts.h +391 -0
  116. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  117. data/lib/libv8/v8/src/conversions.cc +1131 -0
  118. data/lib/libv8/v8/src/conversions.h +135 -0
  119. data/lib/libv8/v8/src/counters.cc +93 -0
  120. data/lib/libv8/v8/src/counters.h +254 -0
  121. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  122. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  123. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  124. data/lib/libv8/v8/src/cpu.h +69 -0
  125. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  126. data/lib/libv8/v8/src/d8-debug.h +158 -0
  127. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  128. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  129. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  130. data/lib/libv8/v8/src/d8.cc +803 -0
  131. data/lib/libv8/v8/src/d8.gyp +91 -0
  132. data/lib/libv8/v8/src/d8.h +235 -0
  133. data/lib/libv8/v8/src/d8.js +2798 -0
  134. data/lib/libv8/v8/src/data-flow.cc +66 -0
  135. data/lib/libv8/v8/src/data-flow.h +205 -0
  136. data/lib/libv8/v8/src/date.js +1103 -0
  137. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  138. data/lib/libv8/v8/src/dateparser.cc +178 -0
  139. data/lib/libv8/v8/src/dateparser.h +266 -0
  140. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  141. data/lib/libv8/v8/src/debug-agent.h +129 -0
  142. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  143. data/lib/libv8/v8/src/debug.cc +3165 -0
  144. data/lib/libv8/v8/src/debug.h +1057 -0
  145. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  146. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  147. data/lib/libv8/v8/src/disasm.h +80 -0
  148. data/lib/libv8/v8/src/disassembler.cc +343 -0
  149. data/lib/libv8/v8/src/disassembler.h +58 -0
  150. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  151. data/lib/libv8/v8/src/diy-fp.h +117 -0
  152. data/lib/libv8/v8/src/double.h +238 -0
  153. data/lib/libv8/v8/src/dtoa.cc +103 -0
  154. data/lib/libv8/v8/src/dtoa.h +85 -0
  155. data/lib/libv8/v8/src/execution.cc +849 -0
  156. data/lib/libv8/v8/src/execution.h +297 -0
  157. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  158. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  159. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  160. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  161. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  162. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  163. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  164. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  165. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  166. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  167. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  168. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  169. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  170. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  171. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  172. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  173. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  174. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  175. data/lib/libv8/v8/src/factory.cc +1222 -0
  176. data/lib/libv8/v8/src/factory.h +442 -0
  177. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  178. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  179. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  180. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  181. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  182. data/lib/libv8/v8/src/flags.cc +551 -0
  183. data/lib/libv8/v8/src/flags.h +79 -0
  184. data/lib/libv8/v8/src/frames-inl.h +247 -0
  185. data/lib/libv8/v8/src/frames.cc +1243 -0
  186. data/lib/libv8/v8/src/frames.h +870 -0
  187. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  188. data/lib/libv8/v8/src/full-codegen.h +771 -0
  189. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  190. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  191. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  192. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  193. data/lib/libv8/v8/src/global-handles.cc +665 -0
  194. data/lib/libv8/v8/src/global-handles.h +284 -0
  195. data/lib/libv8/v8/src/globals.h +325 -0
  196. data/lib/libv8/v8/src/handles-inl.h +177 -0
  197. data/lib/libv8/v8/src/handles.cc +987 -0
  198. data/lib/libv8/v8/src/handles.h +382 -0
  199. data/lib/libv8/v8/src/hashmap.cc +230 -0
  200. data/lib/libv8/v8/src/hashmap.h +123 -0
  201. data/lib/libv8/v8/src/heap-inl.h +704 -0
  202. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  203. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  204. data/lib/libv8/v8/src/heap.cc +5930 -0
  205. data/lib/libv8/v8/src/heap.h +2268 -0
  206. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  207. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  208. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  209. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  210. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  211. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  212. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  213. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  214. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  215. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  216. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  217. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  218. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  219. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  220. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  221. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  222. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  223. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  224. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  225. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  226. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  227. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  228. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  229. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  230. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  231. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  232. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  233. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  234. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  235. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  236. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  237. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  238. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  239. data/lib/libv8/v8/src/ic-inl.h +130 -0
  240. data/lib/libv8/v8/src/ic.cc +2577 -0
  241. data/lib/libv8/v8/src/ic.h +736 -0
  242. data/lib/libv8/v8/src/inspector.cc +63 -0
  243. data/lib/libv8/v8/src/inspector.h +62 -0
  244. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  245. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  246. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  247. data/lib/libv8/v8/src/isolate.cc +1869 -0
  248. data/lib/libv8/v8/src/isolate.h +1382 -0
  249. data/lib/libv8/v8/src/json-parser.cc +504 -0
  250. data/lib/libv8/v8/src/json-parser.h +161 -0
  251. data/lib/libv8/v8/src/json.js +342 -0
  252. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  253. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  254. data/lib/libv8/v8/src/list-inl.h +212 -0
  255. data/lib/libv8/v8/src/list.h +174 -0
  256. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  257. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  258. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  259. data/lib/libv8/v8/src/lithium.cc +190 -0
  260. data/lib/libv8/v8/src/lithium.h +597 -0
  261. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  262. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  263. data/lib/libv8/v8/src/liveedit.h +180 -0
  264. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  265. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  266. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  267. data/lib/libv8/v8/src/log-inl.h +59 -0
  268. data/lib/libv8/v8/src/log-utils.cc +428 -0
  269. data/lib/libv8/v8/src/log-utils.h +231 -0
  270. data/lib/libv8/v8/src/log.cc +1993 -0
  271. data/lib/libv8/v8/src/log.h +476 -0
  272. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  273. data/lib/libv8/v8/src/macros.py +178 -0
  274. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  275. data/lib/libv8/v8/src/mark-compact.h +506 -0
  276. data/lib/libv8/v8/src/math.js +264 -0
  277. data/lib/libv8/v8/src/messages.cc +179 -0
  278. data/lib/libv8/v8/src/messages.h +113 -0
  279. data/lib/libv8/v8/src/messages.js +1096 -0
  280. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  281. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  282. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  283. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  284. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  285. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  286. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  287. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  288. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  289. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  290. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  291. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  292. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  293. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  294. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  295. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  296. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  297. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  298. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  299. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  300. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  301. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  302. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  303. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  304. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  305. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  306. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  307. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  308. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  309. data/lib/libv8/v8/src/natives.h +64 -0
  310. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  311. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  312. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  313. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  314. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  315. data/lib/libv8/v8/src/objects.cc +10585 -0
  316. data/lib/libv8/v8/src/objects.h +6838 -0
  317. data/lib/libv8/v8/src/parser.cc +4997 -0
  318. data/lib/libv8/v8/src/parser.h +765 -0
  319. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  320. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  321. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  322. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  323. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  324. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  325. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  326. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  327. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  328. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  329. data/lib/libv8/v8/src/platform-tls.h +50 -0
  330. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  331. data/lib/libv8/v8/src/platform.h +667 -0
  332. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  333. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  334. data/lib/libv8/v8/src/preparse-data.h +225 -0
  335. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  336. data/lib/libv8/v8/src/preparser.cc +1450 -0
  337. data/lib/libv8/v8/src/preparser.h +493 -0
  338. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  339. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  340. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  341. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  342. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  343. data/lib/libv8/v8/src/property.cc +105 -0
  344. data/lib/libv8/v8/src/property.h +365 -0
  345. data/lib/libv8/v8/src/proxy.js +83 -0
  346. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  347. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  348. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  349. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  350. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  351. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  352. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  353. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  354. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  355. data/lib/libv8/v8/src/regexp.js +483 -0
  356. data/lib/libv8/v8/src/rewriter.cc +360 -0
  357. data/lib/libv8/v8/src/rewriter.h +50 -0
  358. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  359. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  360. data/lib/libv8/v8/src/runtime.cc +12227 -0
  361. data/lib/libv8/v8/src/runtime.h +652 -0
  362. data/lib/libv8/v8/src/runtime.js +649 -0
  363. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  364. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  365. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  366. data/lib/libv8/v8/src/scanner-base.h +670 -0
  367. data/lib/libv8/v8/src/scanner.cc +345 -0
  368. data/lib/libv8/v8/src/scanner.h +146 -0
  369. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  370. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  371. data/lib/libv8/v8/src/scopes.cc +1150 -0
  372. data/lib/libv8/v8/src/scopes.h +507 -0
  373. data/lib/libv8/v8/src/serialize.cc +1574 -0
  374. data/lib/libv8/v8/src/serialize.h +589 -0
  375. data/lib/libv8/v8/src/shell.h +55 -0
  376. data/lib/libv8/v8/src/simulator.h +43 -0
  377. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  378. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  379. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  380. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  381. data/lib/libv8/v8/src/snapshot.h +91 -0
  382. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  383. data/lib/libv8/v8/src/spaces.cc +3145 -0
  384. data/lib/libv8/v8/src/spaces.h +2369 -0
  385. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  386. data/lib/libv8/v8/src/splay-tree.h +205 -0
  387. data/lib/libv8/v8/src/string-search.cc +41 -0
  388. data/lib/libv8/v8/src/string-search.h +568 -0
  389. data/lib/libv8/v8/src/string-stream.cc +592 -0
  390. data/lib/libv8/v8/src/string-stream.h +191 -0
  391. data/lib/libv8/v8/src/string.js +994 -0
  392. data/lib/libv8/v8/src/strtod.cc +440 -0
  393. data/lib/libv8/v8/src/strtod.h +40 -0
  394. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  395. data/lib/libv8/v8/src/stub-cache.h +924 -0
  396. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  397. data/lib/libv8/v8/src/token.cc +63 -0
  398. data/lib/libv8/v8/src/token.h +288 -0
  399. data/lib/libv8/v8/src/type-info.cc +507 -0
  400. data/lib/libv8/v8/src/type-info.h +272 -0
  401. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  402. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  403. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  404. data/lib/libv8/v8/src/unicode.cc +1624 -0
  405. data/lib/libv8/v8/src/unicode.h +280 -0
  406. data/lib/libv8/v8/src/uri.js +408 -0
  407. data/lib/libv8/v8/src/utils-inl.h +48 -0
  408. data/lib/libv8/v8/src/utils.cc +371 -0
  409. data/lib/libv8/v8/src/utils.h +800 -0
  410. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  411. data/lib/libv8/v8/src/v8-counters.h +314 -0
  412. data/lib/libv8/v8/src/v8.cc +213 -0
  413. data/lib/libv8/v8/src/v8.h +131 -0
  414. data/lib/libv8/v8/src/v8checks.h +64 -0
  415. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  416. data/lib/libv8/v8/src/v8globals.h +512 -0
  417. data/lib/libv8/v8/src/v8memory.h +82 -0
  418. data/lib/libv8/v8/src/v8natives.js +1310 -0
  419. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  420. data/lib/libv8/v8/src/v8threads.cc +464 -0
  421. data/lib/libv8/v8/src/v8threads.h +165 -0
  422. data/lib/libv8/v8/src/v8utils.h +319 -0
  423. data/lib/libv8/v8/src/variables.cc +114 -0
  424. data/lib/libv8/v8/src/variables.h +167 -0
  425. data/lib/libv8/v8/src/version.cc +116 -0
  426. data/lib/libv8/v8/src/version.h +68 -0
  427. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  428. data/lib/libv8/v8/src/vm-state.h +71 -0
  429. data/lib/libv8/v8/src/win32-headers.h +96 -0
  430. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  431. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  432. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  433. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  434. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  435. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  436. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  437. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  438. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  439. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  440. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  441. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  442. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  443. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  444. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  445. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  446. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  447. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  448. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  449. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  450. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  451. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  452. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  453. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  454. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  455. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  456. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  457. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  458. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  459. data/lib/libv8/v8/src/zone-inl.h +140 -0
  460. data/lib/libv8/v8/src/zone.cc +196 -0
  461. data/lib/libv8/v8/src/zone.h +240 -0
  462. data/lib/libv8/v8/tools/codemap.js +265 -0
  463. data/lib/libv8/v8/tools/consarray.js +93 -0
  464. data/lib/libv8/v8/tools/csvparser.js +78 -0
  465. data/lib/libv8/v8/tools/disasm.py +92 -0
  466. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  467. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  468. data/lib/libv8/v8/tools/gcmole/README +62 -0
  469. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  470. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  471. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  472. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  473. data/lib/libv8/v8/tools/grokdump.py +841 -0
  474. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  475. data/lib/libv8/v8/tools/js2c.py +364 -0
  476. data/lib/libv8/v8/tools/jsmin.py +280 -0
  477. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  478. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  479. data/lib/libv8/v8/tools/logreader.js +185 -0
  480. data/lib/libv8/v8/tools/mac-nm +18 -0
  481. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  482. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  483. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  484. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  485. data/lib/libv8/v8/tools/presubmit.py +305 -0
  486. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  487. data/lib/libv8/v8/tools/profile.js +751 -0
  488. data/lib/libv8/v8/tools/profile_view.js +219 -0
  489. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  490. data/lib/libv8/v8/tools/splaytree.js +316 -0
  491. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  492. data/lib/libv8/v8/tools/test.py +1510 -0
  493. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  494. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  495. data/lib/libv8/v8/tools/utils.py +96 -0
  496. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  497. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  498. data/lib/libv8/version.rb +5 -0
  499. data/libv8.gemspec +36 -0
  500. metadata +578 -0
@@ -0,0 +1,4461 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #include "v8.h"
29
+
30
+ #if defined(V8_TARGET_ARCH_IA32)
31
+
32
+ #include "ia32/lithium-codegen-ia32.h"
33
+ #include "code-stubs.h"
34
+ #include "deoptimizer.h"
35
+ #include "stub-cache.h"
36
+
37
+ namespace v8 {
38
+ namespace internal {
39
+
40
+
41
+ // When invoking builtins, we need to record the safepoint in the middle of
42
+ // the invoke instruction sequence generated by the macro assembler.
43
+ class SafepointGenerator : public CallWrapper {
44
+ public:
45
+ SafepointGenerator(LCodeGen* codegen,
46
+ LPointerMap* pointers,
47
+ int deoptimization_index)
48
+ : codegen_(codegen),
49
+ pointers_(pointers),
50
+ deoptimization_index_(deoptimization_index) {}
51
+ virtual ~SafepointGenerator() { }
52
+
53
+ virtual void BeforeCall(int call_size) const {}
54
+
55
+ virtual void AfterCall() const {
56
+ codegen_->RecordSafepoint(pointers_, deoptimization_index_);
57
+ }
58
+
59
+ private:
60
+ LCodeGen* codegen_;
61
+ LPointerMap* pointers_;
62
+ int deoptimization_index_;
63
+ };
64
+
65
+
66
+ #define __ masm()->
67
+
68
+ bool LCodeGen::GenerateCode() {
69
+ HPhase phase("Code generation", chunk());
70
+ ASSERT(is_unused());
71
+ status_ = GENERATING;
72
+ CpuFeatures::Scope scope(SSE2);
73
+ return GeneratePrologue() &&
74
+ GenerateBody() &&
75
+ GenerateDeferredCode() &&
76
+ GenerateSafepointTable();
77
+ }
78
+
79
+
80
+ void LCodeGen::FinishCode(Handle<Code> code) {
81
+ ASSERT(is_done());
82
+ code->set_stack_slots(GetStackSlotCount());
83
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
84
+ PopulateDeoptimizationData(code);
85
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
86
+ }
87
+
88
+
89
+ void LCodeGen::Abort(const char* format, ...) {
90
+ if (FLAG_trace_bailout) {
91
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
92
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
93
+ va_list arguments;
94
+ va_start(arguments, format);
95
+ OS::VPrint(format, arguments);
96
+ va_end(arguments);
97
+ PrintF("\n");
98
+ }
99
+ status_ = ABORTED;
100
+ }
101
+
102
+
103
+ void LCodeGen::Comment(const char* format, ...) {
104
+ if (!FLAG_code_comments) return;
105
+ char buffer[4 * KB];
106
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
107
+ va_list arguments;
108
+ va_start(arguments, format);
109
+ builder.AddFormattedList(format, arguments);
110
+ va_end(arguments);
111
+
112
+ // Copy the string before recording it in the assembler to avoid
113
+ // issues when the stack allocated buffer goes out of scope.
114
+ size_t length = builder.position();
115
+ Vector<char> copy = Vector<char>::New(length + 1);
116
+ memcpy(copy.start(), builder.Finalize(), copy.length());
117
+ masm()->RecordComment(copy.start());
118
+ }
119
+
120
+
121
+ bool LCodeGen::GeneratePrologue() {
122
+ ASSERT(is_generating());
123
+
124
+ #ifdef DEBUG
125
+ if (strlen(FLAG_stop_at) > 0 &&
126
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
127
+ __ int3();
128
+ }
129
+ #endif
130
+
131
+ // Strict mode functions need to replace the receiver with undefined
132
+ // when called as functions (without an explicit receiver
133
+ // object). ecx is zero for method calls and non-zero for function
134
+ // calls.
135
+ if (info_->is_strict_mode()) {
136
+ Label ok;
137
+ __ test(ecx, Operand(ecx));
138
+ __ j(zero, &ok, Label::kNear);
139
+ // +1 for return address.
140
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
141
+ __ mov(Operand(esp, receiver_offset),
142
+ Immediate(isolate()->factory()->undefined_value()));
143
+ __ bind(&ok);
144
+ }
145
+
146
+ __ push(ebp); // Caller's frame pointer.
147
+ __ mov(ebp, esp);
148
+ __ push(esi); // Callee's context.
149
+ __ push(edi); // Callee's JS function.
150
+
151
+ // Reserve space for the stack slots needed by the code.
152
+ int slots = GetStackSlotCount();
153
+ if (slots > 0) {
154
+ if (FLAG_debug_code) {
155
+ __ mov(Operand(eax), Immediate(slots));
156
+ Label loop;
157
+ __ bind(&loop);
158
+ __ push(Immediate(kSlotsZapValue));
159
+ __ dec(eax);
160
+ __ j(not_zero, &loop);
161
+ } else {
162
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
163
+ #ifdef _MSC_VER
164
+ // On windows, you may not access the stack more than one page below
165
+ // the most recently mapped page. To make the allocated area randomly
166
+ // accessible, we write to each page in turn (the value is irrelevant).
167
+ const int kPageSize = 4 * KB;
168
+ for (int offset = slots * kPointerSize - kPageSize;
169
+ offset > 0;
170
+ offset -= kPageSize) {
171
+ __ mov(Operand(esp, offset), eax);
172
+ }
173
+ #endif
174
+ }
175
+ }
176
+
177
+ // Possibly allocate a local context.
178
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
179
+ if (heap_slots > 0) {
180
+ Comment(";;; Allocate local context");
181
+ // Argument to NewContext is the function, which is still in edi.
182
+ __ push(edi);
183
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
184
+ FastNewContextStub stub(heap_slots);
185
+ __ CallStub(&stub);
186
+ } else {
187
+ __ CallRuntime(Runtime::kNewContext, 1);
188
+ }
189
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
190
+ // Context is returned in both eax and esi. It replaces the context
191
+ // passed to us. It's saved in the stack and kept live in esi.
192
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
193
+
194
+ // Copy parameters into context if necessary.
195
+ int num_parameters = scope()->num_parameters();
196
+ for (int i = 0; i < num_parameters; i++) {
197
+ Slot* slot = scope()->parameter(i)->AsSlot();
198
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
199
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
200
+ (num_parameters - 1 - i) * kPointerSize;
201
+ // Load parameter from stack.
202
+ __ mov(eax, Operand(ebp, parameter_offset));
203
+ // Store it in the context.
204
+ int context_offset = Context::SlotOffset(slot->index());
205
+ __ mov(Operand(esi, context_offset), eax);
206
+ // Update the write barrier. This clobbers all involved
207
+ // registers, so we have to use a third register to avoid
208
+ // clobbering esi.
209
+ __ mov(ecx, esi);
210
+ __ RecordWrite(ecx, context_offset, eax, ebx);
211
+ }
212
+ }
213
+ Comment(";;; End allocate local context");
214
+ }
215
+
216
+ // Trace the call.
217
+ if (FLAG_trace) {
218
+ // We have not executed any compiled code yet, so esi still holds the
219
+ // incoming context.
220
+ __ CallRuntime(Runtime::kTraceEnter, 0);
221
+ }
222
+ return !is_aborted();
223
+ }
224
+
225
+
226
+ bool LCodeGen::GenerateBody() {
227
+ ASSERT(is_generating());
228
+ bool emit_instructions = true;
229
+ for (current_instruction_ = 0;
230
+ !is_aborted() && current_instruction_ < instructions_->length();
231
+ current_instruction_++) {
232
+ LInstruction* instr = instructions_->at(current_instruction_);
233
+ if (instr->IsLabel()) {
234
+ LLabel* label = LLabel::cast(instr);
235
+ emit_instructions = !label->HasReplacement();
236
+ }
237
+
238
+ if (emit_instructions) {
239
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
240
+ instr->CompileToNative(this);
241
+ }
242
+ }
243
+ return !is_aborted();
244
+ }
245
+
246
+
247
+ LInstruction* LCodeGen::GetNextInstruction() {
248
+ if (current_instruction_ < instructions_->length() - 1) {
249
+ return instructions_->at(current_instruction_ + 1);
250
+ } else {
251
+ return NULL;
252
+ }
253
+ }
254
+
255
+
256
+ bool LCodeGen::GenerateDeferredCode() {
257
+ ASSERT(is_generating());
258
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
259
+ LDeferredCode* code = deferred_[i];
260
+ __ bind(code->entry());
261
+ code->Generate();
262
+ __ jmp(code->exit());
263
+ }
264
+
265
+ // Deferred code is the last part of the instruction sequence. Mark
266
+ // the generated code as done unless we bailed out.
267
+ if (!is_aborted()) status_ = DONE;
268
+ return !is_aborted();
269
+ }
270
+
271
+
272
+ bool LCodeGen::GenerateSafepointTable() {
273
+ ASSERT(is_done());
274
+ safepoints_.Emit(masm(), GetStackSlotCount());
275
+ return !is_aborted();
276
+ }
277
+
278
+
279
+ Register LCodeGen::ToRegister(int index) const {
280
+ return Register::FromAllocationIndex(index);
281
+ }
282
+
283
+
284
+ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
285
+ return XMMRegister::FromAllocationIndex(index);
286
+ }
287
+
288
+
289
+ Register LCodeGen::ToRegister(LOperand* op) const {
290
+ ASSERT(op->IsRegister());
291
+ return ToRegister(op->index());
292
+ }
293
+
294
+
295
+ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
296
+ ASSERT(op->IsDoubleRegister());
297
+ return ToDoubleRegister(op->index());
298
+ }
299
+
300
+
301
+ int LCodeGen::ToInteger32(LConstantOperand* op) const {
302
+ Handle<Object> value = chunk_->LookupLiteral(op);
303
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
304
+ ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
305
+ value->Number());
306
+ return static_cast<int32_t>(value->Number());
307
+ }
308
+
309
+
310
+ Immediate LCodeGen::ToImmediate(LOperand* op) {
311
+ LConstantOperand* const_op = LConstantOperand::cast(op);
312
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
313
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
314
+ if (r.IsInteger32()) {
315
+ ASSERT(literal->IsNumber());
316
+ return Immediate(static_cast<int32_t>(literal->Number()));
317
+ } else if (r.IsDouble()) {
318
+ Abort("unsupported double immediate");
319
+ }
320
+ ASSERT(r.IsTagged());
321
+ return Immediate(literal);
322
+ }
323
+
324
+
325
+ Operand LCodeGen::ToOperand(LOperand* op) const {
326
+ if (op->IsRegister()) return Operand(ToRegister(op));
327
+ if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
328
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
329
+ int index = op->index();
330
+ if (index >= 0) {
331
+ // Local or spill slot. Skip the frame pointer, function, and
332
+ // context in the fixed part of the frame.
333
+ return Operand(ebp, -(index + 3) * kPointerSize);
334
+ } else {
335
+ // Incoming parameter. Skip the return address.
336
+ return Operand(ebp, -(index - 1) * kPointerSize);
337
+ }
338
+ }
339
+
340
+
341
+ Operand LCodeGen::HighOperand(LOperand* op) {
342
+ ASSERT(op->IsDoubleStackSlot());
343
+ int index = op->index();
344
+ int offset = (index >= 0) ? index + 3 : index - 1;
345
+ return Operand(ebp, -offset * kPointerSize);
346
+ }
347
+
348
+
349
+ void LCodeGen::WriteTranslation(LEnvironment* environment,
350
+ Translation* translation) {
351
+ if (environment == NULL) return;
352
+
353
+ // The translation includes one command per value in the environment.
354
+ int translation_size = environment->values()->length();
355
+ // The output frame height does not include the parameters.
356
+ int height = translation_size - environment->parameter_count();
357
+
358
+ WriteTranslation(environment->outer(), translation);
359
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
360
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
361
+ for (int i = 0; i < translation_size; ++i) {
362
+ LOperand* value = environment->values()->at(i);
363
+ // spilled_registers_ and spilled_double_registers_ are either
364
+ // both NULL or both set.
365
+ if (environment->spilled_registers() != NULL && value != NULL) {
366
+ if (value->IsRegister() &&
367
+ environment->spilled_registers()[value->index()] != NULL) {
368
+ translation->MarkDuplicate();
369
+ AddToTranslation(translation,
370
+ environment->spilled_registers()[value->index()],
371
+ environment->HasTaggedValueAt(i));
372
+ } else if (
373
+ value->IsDoubleRegister() &&
374
+ environment->spilled_double_registers()[value->index()] != NULL) {
375
+ translation->MarkDuplicate();
376
+ AddToTranslation(
377
+ translation,
378
+ environment->spilled_double_registers()[value->index()],
379
+ false);
380
+ }
381
+ }
382
+
383
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
384
+ }
385
+ }
386
+
387
+
388
+ void LCodeGen::AddToTranslation(Translation* translation,
389
+ LOperand* op,
390
+ bool is_tagged) {
391
+ if (op == NULL) {
392
+ // TODO(twuerthinger): Introduce marker operands to indicate that this value
393
+ // is not present and must be reconstructed from the deoptimizer. Currently
394
+ // this is only used for the arguments object.
395
+ translation->StoreArgumentsObject();
396
+ } else if (op->IsStackSlot()) {
397
+ if (is_tagged) {
398
+ translation->StoreStackSlot(op->index());
399
+ } else {
400
+ translation->StoreInt32StackSlot(op->index());
401
+ }
402
+ } else if (op->IsDoubleStackSlot()) {
403
+ translation->StoreDoubleStackSlot(op->index());
404
+ } else if (op->IsArgument()) {
405
+ ASSERT(is_tagged);
406
+ int src_index = GetStackSlotCount() + op->index();
407
+ translation->StoreStackSlot(src_index);
408
+ } else if (op->IsRegister()) {
409
+ Register reg = ToRegister(op);
410
+ if (is_tagged) {
411
+ translation->StoreRegister(reg);
412
+ } else {
413
+ translation->StoreInt32Register(reg);
414
+ }
415
+ } else if (op->IsDoubleRegister()) {
416
+ XMMRegister reg = ToDoubleRegister(op);
417
+ translation->StoreDoubleRegister(reg);
418
+ } else if (op->IsConstantOperand()) {
419
+ Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
420
+ int src_index = DefineDeoptimizationLiteral(literal);
421
+ translation->StoreLiteral(src_index);
422
+ } else {
423
+ UNREACHABLE();
424
+ }
425
+ }
426
+
427
+
428
+ void LCodeGen::CallCodeGeneric(Handle<Code> code,
429
+ RelocInfo::Mode mode,
430
+ LInstruction* instr,
431
+ ContextMode context_mode,
432
+ SafepointMode safepoint_mode) {
433
+ ASSERT(instr != NULL);
434
+ LPointerMap* pointers = instr->pointer_map();
435
+ RecordPosition(pointers->position());
436
+
437
+ if (context_mode == RESTORE_CONTEXT) {
438
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
439
+ }
440
+ __ call(code, mode);
441
+
442
+ RegisterLazyDeoptimization(instr, safepoint_mode);
443
+
444
+ // Signal that we don't inline smi code before these stubs in the
445
+ // optimizing code generator.
446
+ if (code->kind() == Code::BINARY_OP_IC ||
447
+ code->kind() == Code::COMPARE_IC) {
448
+ __ nop();
449
+ }
450
+ }
451
+
452
+
453
+ void LCodeGen::CallCode(Handle<Code> code,
454
+ RelocInfo::Mode mode,
455
+ LInstruction* instr,
456
+ ContextMode context_mode) {
457
+ CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
458
+ }
459
+
460
+
461
+ void LCodeGen::CallRuntime(const Runtime::Function* fun,
462
+ int argc,
463
+ LInstruction* instr,
464
+ ContextMode context_mode) {
465
+ ASSERT(instr != NULL);
466
+ ASSERT(instr->HasPointerMap());
467
+ LPointerMap* pointers = instr->pointer_map();
468
+ RecordPosition(pointers->position());
469
+
470
+ if (context_mode == RESTORE_CONTEXT) {
471
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
472
+ }
473
+ __ CallRuntime(fun, argc);
474
+
475
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
476
+ }
477
+
478
+
479
+ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
480
+ int argc,
481
+ LInstruction* instr) {
482
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
483
+ __ CallRuntimeSaveDoubles(id);
484
+ RecordSafepointWithRegisters(
485
+ instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
486
+ }
487
+
488
+
489
+ void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
490
+ SafepointMode safepoint_mode) {
491
+ // Create the environment to bailout to. If the call has side effects
492
+ // execution has to continue after the call otherwise execution can continue
493
+ // from a previous bailout point repeating the call.
494
+ LEnvironment* deoptimization_environment;
495
+ if (instr->HasDeoptimizationEnvironment()) {
496
+ deoptimization_environment = instr->deoptimization_environment();
497
+ } else {
498
+ deoptimization_environment = instr->environment();
499
+ }
500
+
501
+ RegisterEnvironmentForDeoptimization(deoptimization_environment);
502
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
503
+ RecordSafepoint(instr->pointer_map(),
504
+ deoptimization_environment->deoptimization_index());
505
+ } else {
506
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
507
+ RecordSafepointWithRegisters(
508
+ instr->pointer_map(),
509
+ 0,
510
+ deoptimization_environment->deoptimization_index());
511
+ }
512
+ }
513
+
514
+
515
+ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
516
+ if (!environment->HasBeenRegistered()) {
517
+ // Physical stack frame layout:
518
+ // -x ............. -4 0 ..................................... y
519
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
520
+
521
+ // Layout of the environment:
522
+ // 0 ..................................................... size-1
523
+ // [parameters] [locals] [expression stack including arguments]
524
+
525
+ // Layout of the translation:
526
+ // 0 ........................................................ size - 1 + 4
527
+ // [expression stack including arguments] [locals] [4 words] [parameters]
528
+ // |>------------ translation_size ------------<|
529
+
530
+ int frame_count = 0;
531
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
532
+ ++frame_count;
533
+ }
534
+ Translation translation(&translations_, frame_count);
535
+ WriteTranslation(environment, &translation);
536
+ int deoptimization_index = deoptimizations_.length();
537
+ environment->Register(deoptimization_index, translation.index());
538
+ deoptimizations_.Add(environment);
539
+ }
540
+ }
541
+
542
+
543
+ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
544
+ RegisterEnvironmentForDeoptimization(environment);
545
+ ASSERT(environment->HasBeenRegistered());
546
+ int id = environment->deoptimization_index();
547
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
548
+ ASSERT(entry != NULL);
549
+ if (entry == NULL) {
550
+ Abort("bailout was not prepared");
551
+ return;
552
+ }
553
+
554
+ if (FLAG_deopt_every_n_times != 0) {
555
+ Handle<SharedFunctionInfo> shared(info_->shared_info());
556
+ Label no_deopt;
557
+ __ pushfd();
558
+ __ push(eax);
559
+ __ push(ebx);
560
+ __ mov(ebx, shared);
561
+ __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
562
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
563
+ __ j(not_zero, &no_deopt, Label::kNear);
564
+ if (FLAG_trap_on_deopt) __ int3();
565
+ __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
566
+ __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
567
+ __ pop(ebx);
568
+ __ pop(eax);
569
+ __ popfd();
570
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
571
+
572
+ __ bind(&no_deopt);
573
+ __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
574
+ __ pop(ebx);
575
+ __ pop(eax);
576
+ __ popfd();
577
+ }
578
+
579
+ if (cc == no_condition) {
580
+ if (FLAG_trap_on_deopt) __ int3();
581
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
582
+ } else {
583
+ if (FLAG_trap_on_deopt) {
584
+ Label done;
585
+ __ j(NegateCondition(cc), &done, Label::kNear);
586
+ __ int3();
587
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
588
+ __ bind(&done);
589
+ } else {
590
+ __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
591
+ }
592
+ }
593
+ }
594
+
595
+
596
+ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
597
+ int length = deoptimizations_.length();
598
+ if (length == 0) return;
599
+ ASSERT(FLAG_deopt);
600
+ Handle<DeoptimizationInputData> data =
601
+ factory()->NewDeoptimizationInputData(length, TENURED);
602
+
603
+ Handle<ByteArray> translations = translations_.CreateByteArray();
604
+ data->SetTranslationByteArray(*translations);
605
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
606
+
607
+ Handle<FixedArray> literals =
608
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
609
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
610
+ literals->set(i, *deoptimization_literals_[i]);
611
+ }
612
+ data->SetLiteralArray(*literals);
613
+
614
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
615
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
616
+
617
+ // Populate the deoptimization entries.
618
+ for (int i = 0; i < length; i++) {
619
+ LEnvironment* env = deoptimizations_[i];
620
+ data->SetAstId(i, Smi::FromInt(env->ast_id()));
621
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
622
+ data->SetArgumentsStackHeight(i,
623
+ Smi::FromInt(env->arguments_stack_height()));
624
+ }
625
+ code->set_deoptimization_data(*data);
626
+ }
627
+
628
+
629
+ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
630
+ int result = deoptimization_literals_.length();
631
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
632
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
633
+ }
634
+ deoptimization_literals_.Add(literal);
635
+ return result;
636
+ }
637
+
638
+
639
+ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
640
+ ASSERT(deoptimization_literals_.length() == 0);
641
+
642
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
643
+ chunk()->inlined_closures();
644
+
645
+ for (int i = 0, length = inlined_closures->length();
646
+ i < length;
647
+ i++) {
648
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
649
+ }
650
+
651
+ inlined_function_count_ = deoptimization_literals_.length();
652
+ }
653
+
654
+
655
+ void LCodeGen::RecordSafepoint(
656
+ LPointerMap* pointers,
657
+ Safepoint::Kind kind,
658
+ int arguments,
659
+ int deoptimization_index) {
660
+ ASSERT(kind == expected_safepoint_kind_);
661
+ const ZoneList<LOperand*>* operands = pointers->operands();
662
+ Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
663
+ kind, arguments, deoptimization_index);
664
+ for (int i = 0; i < operands->length(); i++) {
665
+ LOperand* pointer = operands->at(i);
666
+ if (pointer->IsStackSlot()) {
667
+ safepoint.DefinePointerSlot(pointer->index());
668
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
669
+ safepoint.DefinePointerRegister(ToRegister(pointer));
670
+ }
671
+ }
672
+ }
673
+
674
+
675
+ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
676
+ int deoptimization_index) {
677
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
678
+ }
679
+
680
+
681
+ void LCodeGen::RecordSafepoint(int deoptimization_index) {
682
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
683
+ RecordSafepoint(&empty_pointers, deoptimization_index);
684
+ }
685
+
686
+
687
+ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
688
+ int arguments,
689
+ int deoptimization_index) {
690
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
691
+ deoptimization_index);
692
+ }
693
+
694
+
695
+ void LCodeGen::RecordPosition(int position) {
696
+ if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
697
+ masm()->positions_recorder()->RecordPosition(position);
698
+ }
699
+
700
+
701
+ void LCodeGen::DoLabel(LLabel* label) {
702
+ if (label->is_loop_header()) {
703
+ Comment(";;; B%d - LOOP entry", label->block_id());
704
+ } else {
705
+ Comment(";;; B%d", label->block_id());
706
+ }
707
+ __ bind(label->label());
708
+ current_block_ = label->block_id();
709
+ DoGap(label);
710
+ }
711
+
712
+
713
+ void LCodeGen::DoParallelMove(LParallelMove* move) {
714
+ resolver_.Resolve(move);
715
+ }
716
+
717
+
718
+ void LCodeGen::DoGap(LGap* gap) {
719
+ for (int i = LGap::FIRST_INNER_POSITION;
720
+ i <= LGap::LAST_INNER_POSITION;
721
+ i++) {
722
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
723
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
724
+ if (move != NULL) DoParallelMove(move);
725
+ }
726
+
727
+ LInstruction* next = GetNextInstruction();
728
+ if (next != NULL && next->IsLazyBailout()) {
729
+ int pc = masm()->pc_offset();
730
+ safepoints_.SetPcAfterGap(pc);
731
+ }
732
+ }
733
+
734
+
735
+ void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
736
+ DoGap(instr);
737
+ }
738
+
739
+
740
+ void LCodeGen::DoParameter(LParameter* instr) {
741
+ // Nothing to do.
742
+ }
743
+
744
+
745
+ void LCodeGen::DoCallStub(LCallStub* instr) {
746
+ ASSERT(ToRegister(instr->context()).is(esi));
747
+ ASSERT(ToRegister(instr->result()).is(eax));
748
+ switch (instr->hydrogen()->major_key()) {
749
+ case CodeStub::RegExpConstructResult: {
750
+ RegExpConstructResultStub stub;
751
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
752
+ break;
753
+ }
754
+ case CodeStub::RegExpExec: {
755
+ RegExpExecStub stub;
756
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
757
+ break;
758
+ }
759
+ case CodeStub::SubString: {
760
+ SubStringStub stub;
761
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
762
+ break;
763
+ }
764
+ case CodeStub::NumberToString: {
765
+ NumberToStringStub stub;
766
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
767
+ break;
768
+ }
769
+ case CodeStub::StringAdd: {
770
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
771
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
772
+ break;
773
+ }
774
+ case CodeStub::StringCompare: {
775
+ StringCompareStub stub;
776
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
777
+ break;
778
+ }
779
+ case CodeStub::TranscendentalCache: {
780
+ TranscendentalCacheStub stub(instr->transcendental_type(),
781
+ TranscendentalCacheStub::TAGGED);
782
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
783
+ break;
784
+ }
785
+ default:
786
+ UNREACHABLE();
787
+ }
788
+ }
789
+
790
+
791
+ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
792
+ // Nothing to do.
793
+ }
794
+
795
+
796
+ void LCodeGen::DoModI(LModI* instr) {
797
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
798
+ Register dividend = ToRegister(instr->InputAt(0));
799
+
800
+ int32_t divisor =
801
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
802
+
803
+ if (divisor < 0) divisor = -divisor;
804
+
805
+ Label positive_dividend, done;
806
+ __ test(dividend, Operand(dividend));
807
+ __ j(not_sign, &positive_dividend, Label::kNear);
808
+ __ neg(dividend);
809
+ __ and_(dividend, divisor - 1);
810
+ __ neg(dividend);
811
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
812
+ __ j(not_zero, &done, Label::kNear);
813
+ DeoptimizeIf(no_condition, instr->environment());
814
+ }
815
+ __ bind(&positive_dividend);
816
+ __ and_(dividend, divisor - 1);
817
+ __ bind(&done);
818
+ } else {
819
+ Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
820
+ Register left_reg = ToRegister(instr->InputAt(0));
821
+ Register right_reg = ToRegister(instr->InputAt(1));
822
+ Register result_reg = ToRegister(instr->result());
823
+
824
+ ASSERT(left_reg.is(eax));
825
+ ASSERT(result_reg.is(edx));
826
+ ASSERT(!right_reg.is(eax));
827
+ ASSERT(!right_reg.is(edx));
828
+
829
+ // Check for x % 0.
830
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
831
+ __ test(right_reg, Operand(right_reg));
832
+ DeoptimizeIf(zero, instr->environment());
833
+ }
834
+
835
+ __ test(left_reg, Operand(left_reg));
836
+ __ j(zero, &remainder_eq_dividend, Label::kNear);
837
+ __ j(sign, &slow, Label::kNear);
838
+
839
+ __ test(right_reg, Operand(right_reg));
840
+ __ j(not_sign, &both_positive, Label::kNear);
841
+ // The sign of the divisor doesn't matter.
842
+ __ neg(right_reg);
843
+
844
+ __ bind(&both_positive);
845
+ // If the dividend is smaller than the nonnegative
846
+ // divisor, the dividend is the result.
847
+ __ cmp(left_reg, Operand(right_reg));
848
+ __ j(less, &remainder_eq_dividend, Label::kNear);
849
+
850
+ // Check if the divisor is a PowerOfTwo integer.
851
+ Register scratch = ToRegister(instr->TempAt(0));
852
+ __ mov(scratch, right_reg);
853
+ __ sub(Operand(scratch), Immediate(1));
854
+ __ test(scratch, Operand(right_reg));
855
+ __ j(not_zero, &do_subtraction, Label::kNear);
856
+ __ and_(left_reg, Operand(scratch));
857
+ __ jmp(&remainder_eq_dividend, Label::kNear);
858
+
859
+ __ bind(&do_subtraction);
860
+ const int kUnfolds = 3;
861
+ // Try a few subtractions of the dividend.
862
+ __ mov(scratch, left_reg);
863
+ for (int i = 0; i < kUnfolds; i++) {
864
+ // Reduce the dividend by the divisor.
865
+ __ sub(left_reg, Operand(right_reg));
866
+ // Check if the dividend is less than the divisor.
867
+ __ cmp(left_reg, Operand(right_reg));
868
+ __ j(less, &remainder_eq_dividend, Label::kNear);
869
+ }
870
+ __ mov(left_reg, scratch);
871
+
872
+ // Slow case, using idiv instruction.
873
+ __ bind(&slow);
874
+ // Sign extend to edx.
875
+ __ cdq();
876
+
877
+ // Check for (0 % -x) that will produce negative zero.
878
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
879
+ Label positive_left;
880
+ Label done;
881
+ __ test(left_reg, Operand(left_reg));
882
+ __ j(not_sign, &positive_left, Label::kNear);
883
+ __ idiv(right_reg);
884
+
885
+ // Test the remainder for 0, because then the result would be -0.
886
+ __ test(result_reg, Operand(result_reg));
887
+ __ j(not_zero, &done, Label::kNear);
888
+
889
+ DeoptimizeIf(no_condition, instr->environment());
890
+ __ bind(&positive_left);
891
+ __ idiv(right_reg);
892
+ __ bind(&done);
893
+ } else {
894
+ __ idiv(right_reg);
895
+ }
896
+ __ jmp(&done, Label::kNear);
897
+
898
+ __ bind(&remainder_eq_dividend);
899
+ __ mov(result_reg, left_reg);
900
+
901
+ __ bind(&done);
902
+ }
903
+ }
904
+
905
+
906
+ void LCodeGen::DoDivI(LDivI* instr) {
907
+ LOperand* right = instr->InputAt(1);
908
+ ASSERT(ToRegister(instr->result()).is(eax));
909
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
910
+ ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
911
+ ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
912
+
913
+ Register left_reg = eax;
914
+
915
+ // Check for x / 0.
916
+ Register right_reg = ToRegister(right);
917
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
918
+ __ test(right_reg, ToOperand(right));
919
+ DeoptimizeIf(zero, instr->environment());
920
+ }
921
+
922
+ // Check for (0 / -x) that will produce negative zero.
923
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
924
+ Label left_not_zero;
925
+ __ test(left_reg, Operand(left_reg));
926
+ __ j(not_zero, &left_not_zero, Label::kNear);
927
+ __ test(right_reg, ToOperand(right));
928
+ DeoptimizeIf(sign, instr->environment());
929
+ __ bind(&left_not_zero);
930
+ }
931
+
932
+ // Check for (-kMinInt / -1).
933
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
934
+ Label left_not_min_int;
935
+ __ cmp(left_reg, kMinInt);
936
+ __ j(not_zero, &left_not_min_int, Label::kNear);
937
+ __ cmp(right_reg, -1);
938
+ DeoptimizeIf(zero, instr->environment());
939
+ __ bind(&left_not_min_int);
940
+ }
941
+
942
+ // Sign extend to edx.
943
+ __ cdq();
944
+ __ idiv(right_reg);
945
+
946
+ // Deoptimize if remainder is not 0.
947
+ __ test(edx, Operand(edx));
948
+ DeoptimizeIf(not_zero, instr->environment());
949
+ }
950
+
951
+
952
+ void LCodeGen::DoMulI(LMulI* instr) {
953
+ Register left = ToRegister(instr->InputAt(0));
954
+ LOperand* right = instr->InputAt(1);
955
+
956
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
957
+ __ mov(ToRegister(instr->TempAt(0)), left);
958
+ }
959
+
960
+ if (right->IsConstantOperand()) {
961
+ // Try strength reductions on the multiplication.
962
+ // All replacement instructions are at most as long as the imul
963
+ // and have better latency.
964
+ int constant = ToInteger32(LConstantOperand::cast(right));
965
+ if (constant == -1) {
966
+ __ neg(left);
967
+ } else if (constant == 0) {
968
+ __ xor_(left, Operand(left));
969
+ } else if (constant == 2) {
970
+ __ add(left, Operand(left));
971
+ } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
972
+ // If we know that the multiplication can't overflow, it's safe to
973
+ // use instructions that don't set the overflow flag for the
974
+ // multiplication.
975
+ switch (constant) {
976
+ case 1:
977
+ // Do nothing.
978
+ break;
979
+ case 3:
980
+ __ lea(left, Operand(left, left, times_2, 0));
981
+ break;
982
+ case 4:
983
+ __ shl(left, 2);
984
+ break;
985
+ case 5:
986
+ __ lea(left, Operand(left, left, times_4, 0));
987
+ break;
988
+ case 8:
989
+ __ shl(left, 3);
990
+ break;
991
+ case 9:
992
+ __ lea(left, Operand(left, left, times_8, 0));
993
+ break;
994
+ case 16:
995
+ __ shl(left, 4);
996
+ break;
997
+ default:
998
+ __ imul(left, left, constant);
999
+ break;
1000
+ }
1001
+ } else {
1002
+ __ imul(left, left, constant);
1003
+ }
1004
+ } else {
1005
+ __ imul(left, ToOperand(right));
1006
+ }
1007
+
1008
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1009
+ DeoptimizeIf(overflow, instr->environment());
1010
+ }
1011
+
1012
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1013
+ // Bail out if the result is supposed to be negative zero.
1014
+ Label done;
1015
+ __ test(left, Operand(left));
1016
+ __ j(not_zero, &done, Label::kNear);
1017
+ if (right->IsConstantOperand()) {
1018
+ if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
1019
+ DeoptimizeIf(no_condition, instr->environment());
1020
+ }
1021
+ } else {
1022
+ // Test the non-zero operand for negative sign.
1023
+ __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
1024
+ DeoptimizeIf(sign, instr->environment());
1025
+ }
1026
+ __ bind(&done);
1027
+ }
1028
+ }
1029
+
1030
+
1031
+ void LCodeGen::DoBitI(LBitI* instr) {
1032
+ LOperand* left = instr->InputAt(0);
1033
+ LOperand* right = instr->InputAt(1);
1034
+ ASSERT(left->Equals(instr->result()));
1035
+ ASSERT(left->IsRegister());
1036
+
1037
+ if (right->IsConstantOperand()) {
1038
+ int right_operand = ToInteger32(LConstantOperand::cast(right));
1039
+ switch (instr->op()) {
1040
+ case Token::BIT_AND:
1041
+ __ and_(ToRegister(left), right_operand);
1042
+ break;
1043
+ case Token::BIT_OR:
1044
+ __ or_(ToRegister(left), right_operand);
1045
+ break;
1046
+ case Token::BIT_XOR:
1047
+ __ xor_(ToRegister(left), right_operand);
1048
+ break;
1049
+ default:
1050
+ UNREACHABLE();
1051
+ break;
1052
+ }
1053
+ } else {
1054
+ switch (instr->op()) {
1055
+ case Token::BIT_AND:
1056
+ __ and_(ToRegister(left), ToOperand(right));
1057
+ break;
1058
+ case Token::BIT_OR:
1059
+ __ or_(ToRegister(left), ToOperand(right));
1060
+ break;
1061
+ case Token::BIT_XOR:
1062
+ __ xor_(ToRegister(left), ToOperand(right));
1063
+ break;
1064
+ default:
1065
+ UNREACHABLE();
1066
+ break;
1067
+ }
1068
+ }
1069
+ }
1070
+
1071
+
1072
+ void LCodeGen::DoShiftI(LShiftI* instr) {
1073
+ LOperand* left = instr->InputAt(0);
1074
+ LOperand* right = instr->InputAt(1);
1075
+ ASSERT(left->Equals(instr->result()));
1076
+ ASSERT(left->IsRegister());
1077
+ if (right->IsRegister()) {
1078
+ ASSERT(ToRegister(right).is(ecx));
1079
+
1080
+ switch (instr->op()) {
1081
+ case Token::SAR:
1082
+ __ sar_cl(ToRegister(left));
1083
+ break;
1084
+ case Token::SHR:
1085
+ __ shr_cl(ToRegister(left));
1086
+ if (instr->can_deopt()) {
1087
+ __ test(ToRegister(left), Immediate(0x80000000));
1088
+ DeoptimizeIf(not_zero, instr->environment());
1089
+ }
1090
+ break;
1091
+ case Token::SHL:
1092
+ __ shl_cl(ToRegister(left));
1093
+ break;
1094
+ default:
1095
+ UNREACHABLE();
1096
+ break;
1097
+ }
1098
+ } else {
1099
+ int value = ToInteger32(LConstantOperand::cast(right));
1100
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1101
+ switch (instr->op()) {
1102
+ case Token::SAR:
1103
+ if (shift_count != 0) {
1104
+ __ sar(ToRegister(left), shift_count);
1105
+ }
1106
+ break;
1107
+ case Token::SHR:
1108
+ if (shift_count == 0 && instr->can_deopt()) {
1109
+ __ test(ToRegister(left), Immediate(0x80000000));
1110
+ DeoptimizeIf(not_zero, instr->environment());
1111
+ } else {
1112
+ __ shr(ToRegister(left), shift_count);
1113
+ }
1114
+ break;
1115
+ case Token::SHL:
1116
+ if (shift_count != 0) {
1117
+ __ shl(ToRegister(left), shift_count);
1118
+ }
1119
+ break;
1120
+ default:
1121
+ UNREACHABLE();
1122
+ break;
1123
+ }
1124
+ }
1125
+ }
1126
+
1127
+
1128
+ void LCodeGen::DoSubI(LSubI* instr) {
1129
+ LOperand* left = instr->InputAt(0);
1130
+ LOperand* right = instr->InputAt(1);
1131
+ ASSERT(left->Equals(instr->result()));
1132
+
1133
+ if (right->IsConstantOperand()) {
1134
+ __ sub(ToOperand(left), ToImmediate(right));
1135
+ } else {
1136
+ __ sub(ToRegister(left), ToOperand(right));
1137
+ }
1138
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1139
+ DeoptimizeIf(overflow, instr->environment());
1140
+ }
1141
+ }
1142
+
1143
+
1144
+ void LCodeGen::DoConstantI(LConstantI* instr) {
1145
+ ASSERT(instr->result()->IsRegister());
1146
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1147
+ }
1148
+
1149
+
1150
+ void LCodeGen::DoConstantD(LConstantD* instr) {
1151
+ ASSERT(instr->result()->IsDoubleRegister());
1152
+ XMMRegister res = ToDoubleRegister(instr->result());
1153
+ double v = instr->value();
1154
+ // Use xor to produce +0.0 in a fast and compact way, but avoid to
1155
+ // do so if the constant is -0.0.
1156
+ if (BitCast<uint64_t, double>(v) == 0) {
1157
+ __ xorps(res, res);
1158
+ } else {
1159
+ Register temp = ToRegister(instr->TempAt(0));
1160
+ uint64_t int_val = BitCast<uint64_t, double>(v);
1161
+ int32_t lower = static_cast<int32_t>(int_val);
1162
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1163
+ if (CpuFeatures::IsSupported(SSE4_1)) {
1164
+ CpuFeatures::Scope scope(SSE4_1);
1165
+ if (lower != 0) {
1166
+ __ Set(temp, Immediate(lower));
1167
+ __ movd(res, Operand(temp));
1168
+ __ Set(temp, Immediate(upper));
1169
+ __ pinsrd(res, Operand(temp), 1);
1170
+ } else {
1171
+ __ xorps(res, res);
1172
+ __ Set(temp, Immediate(upper));
1173
+ __ pinsrd(res, Operand(temp), 1);
1174
+ }
1175
+ } else {
1176
+ __ Set(temp, Immediate(upper));
1177
+ __ movd(res, Operand(temp));
1178
+ __ psllq(res, 32);
1179
+ if (lower != 0) {
1180
+ __ Set(temp, Immediate(lower));
1181
+ __ movd(xmm0, Operand(temp));
1182
+ __ por(res, xmm0);
1183
+ }
1184
+ }
1185
+ }
1186
+ }
1187
+
1188
+
1189
+ void LCodeGen::DoConstantT(LConstantT* instr) {
1190
+ ASSERT(instr->result()->IsRegister());
1191
+ __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1192
+ }
1193
+
1194
+
1195
+ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1196
+ Register result = ToRegister(instr->result());
1197
+ Register array = ToRegister(instr->InputAt(0));
1198
+ __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
1199
+ }
1200
+
1201
+
1202
+ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
1203
+ Register result = ToRegister(instr->result());
1204
+ Register array = ToRegister(instr->InputAt(0));
1205
+ __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
1206
+ }
1207
+
1208
+
1209
+ void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
1210
+ Register result = ToRegister(instr->result());
1211
+ Register array = ToRegister(instr->InputAt(0));
1212
+ __ mov(result, FieldOperand(array, ExternalArray::kLengthOffset));
1213
+ }
1214
+
1215
+
1216
+ void LCodeGen::DoValueOf(LValueOf* instr) {
1217
+ Register input = ToRegister(instr->InputAt(0));
1218
+ Register result = ToRegister(instr->result());
1219
+ Register map = ToRegister(instr->TempAt(0));
1220
+ ASSERT(input.is(result));
1221
+ Label done;
1222
+ // If the object is a smi return the object.
1223
+ __ test(input, Immediate(kSmiTagMask));
1224
+ __ j(zero, &done, Label::kNear);
1225
+
1226
+ // If the object is not a value type, return the object.
1227
+ __ CmpObjectType(input, JS_VALUE_TYPE, map);
1228
+ __ j(not_equal, &done, Label::kNear);
1229
+ __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1230
+
1231
+ __ bind(&done);
1232
+ }
1233
+
1234
+
1235
+ void LCodeGen::DoBitNotI(LBitNotI* instr) {
1236
+ LOperand* input = instr->InputAt(0);
1237
+ ASSERT(input->Equals(instr->result()));
1238
+ __ not_(ToRegister(input));
1239
+ }
1240
+
1241
+
1242
+ void LCodeGen::DoThrow(LThrow* instr) {
1243
+ __ push(ToOperand(instr->InputAt(0)));
1244
+ CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
1245
+
1246
+ if (FLAG_debug_code) {
1247
+ Comment("Unreachable code.");
1248
+ __ int3();
1249
+ }
1250
+ }
1251
+
1252
+
1253
+ void LCodeGen::DoAddI(LAddI* instr) {
1254
+ LOperand* left = instr->InputAt(0);
1255
+ LOperand* right = instr->InputAt(1);
1256
+ ASSERT(left->Equals(instr->result()));
1257
+
1258
+ if (right->IsConstantOperand()) {
1259
+ __ add(ToOperand(left), ToImmediate(right));
1260
+ } else {
1261
+ __ add(ToRegister(left), ToOperand(right));
1262
+ }
1263
+
1264
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1265
+ DeoptimizeIf(overflow, instr->environment());
1266
+ }
1267
+ }
1268
+
1269
+
1270
+ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1271
+ XMMRegister left = ToDoubleRegister(instr->InputAt(0));
1272
+ XMMRegister right = ToDoubleRegister(instr->InputAt(1));
1273
+ XMMRegister result = ToDoubleRegister(instr->result());
1274
+ // Modulo uses a fixed result register.
1275
+ ASSERT(instr->op() == Token::MOD || left.is(result));
1276
+ switch (instr->op()) {
1277
+ case Token::ADD:
1278
+ __ addsd(left, right);
1279
+ break;
1280
+ case Token::SUB:
1281
+ __ subsd(left, right);
1282
+ break;
1283
+ case Token::MUL:
1284
+ __ mulsd(left, right);
1285
+ break;
1286
+ case Token::DIV:
1287
+ __ divsd(left, right);
1288
+ break;
1289
+ case Token::MOD: {
1290
+ // Pass two doubles as arguments on the stack.
1291
+ __ PrepareCallCFunction(4, eax);
1292
+ __ movdbl(Operand(esp, 0 * kDoubleSize), left);
1293
+ __ movdbl(Operand(esp, 1 * kDoubleSize), right);
1294
+ __ CallCFunction(
1295
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
1296
+ 4);
1297
+
1298
+ // Return value is in st(0) on ia32.
1299
+ // Store it into the (fixed) result register.
1300
+ __ sub(Operand(esp), Immediate(kDoubleSize));
1301
+ __ fstp_d(Operand(esp, 0));
1302
+ __ movdbl(result, Operand(esp, 0));
1303
+ __ add(Operand(esp), Immediate(kDoubleSize));
1304
+ break;
1305
+ }
1306
+ default:
1307
+ UNREACHABLE();
1308
+ break;
1309
+ }
1310
+ }
1311
+
1312
+
1313
+ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1314
+ ASSERT(ToRegister(instr->InputAt(0)).is(edx));
1315
+ ASSERT(ToRegister(instr->InputAt(1)).is(eax));
1316
+ ASSERT(ToRegister(instr->result()).is(eax));
1317
+
1318
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1319
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
1320
+ }
1321
+
1322
+
1323
+ int LCodeGen::GetNextEmittedBlock(int block) {
1324
+ for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1325
+ LLabel* label = chunk_->GetLabel(i);
1326
+ if (!label->HasReplacement()) return i;
1327
+ }
1328
+ return -1;
1329
+ }
1330
+
1331
+
1332
+ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1333
+ int next_block = GetNextEmittedBlock(current_block_);
1334
+ right_block = chunk_->LookupDestination(right_block);
1335
+ left_block = chunk_->LookupDestination(left_block);
1336
+
1337
+ if (right_block == left_block) {
1338
+ EmitGoto(left_block);
1339
+ } else if (left_block == next_block) {
1340
+ __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1341
+ } else if (right_block == next_block) {
1342
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
1343
+ } else {
1344
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
1345
+ __ jmp(chunk_->GetAssemblyLabel(right_block));
1346
+ }
1347
+ }
1348
+
1349
+
1350
+ void LCodeGen::DoBranch(LBranch* instr) {
1351
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1352
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1353
+
1354
+ Representation r = instr->hydrogen()->representation();
1355
+ if (r.IsInteger32()) {
1356
+ Register reg = ToRegister(instr->InputAt(0));
1357
+ __ test(reg, Operand(reg));
1358
+ EmitBranch(true_block, false_block, not_zero);
1359
+ } else if (r.IsDouble()) {
1360
+ XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
1361
+ __ xorps(xmm0, xmm0);
1362
+ __ ucomisd(reg, xmm0);
1363
+ EmitBranch(true_block, false_block, not_equal);
1364
+ } else {
1365
+ ASSERT(r.IsTagged());
1366
+ Register reg = ToRegister(instr->InputAt(0));
1367
+ if (instr->hydrogen()->type().IsBoolean()) {
1368
+ __ cmp(reg, factory()->true_value());
1369
+ EmitBranch(true_block, false_block, equal);
1370
+ } else {
1371
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1372
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1373
+
1374
+ __ cmp(reg, factory()->undefined_value());
1375
+ __ j(equal, false_label);
1376
+ __ cmp(reg, factory()->true_value());
1377
+ __ j(equal, true_label);
1378
+ __ cmp(reg, factory()->false_value());
1379
+ __ j(equal, false_label);
1380
+ __ test(reg, Operand(reg));
1381
+ __ j(equal, false_label);
1382
+ __ test(reg, Immediate(kSmiTagMask));
1383
+ __ j(zero, true_label);
1384
+
1385
+ // Test for double values. Zero is false.
1386
+ Label call_stub;
1387
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1388
+ factory()->heap_number_map());
1389
+ __ j(not_equal, &call_stub, Label::kNear);
1390
+ __ fldz();
1391
+ __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1392
+ __ FCmp();
1393
+ __ j(zero, false_label);
1394
+ __ jmp(true_label);
1395
+
1396
+ // The conversion stub doesn't cause garbage collections so it's
1397
+ // safe to not record a safepoint after the call.
1398
+ __ bind(&call_stub);
1399
+ ToBooleanStub stub;
1400
+ __ pushad();
1401
+ __ push(reg);
1402
+ __ CallStub(&stub);
1403
+ __ test(eax, Operand(eax));
1404
+ __ popad();
1405
+ EmitBranch(true_block, false_block, not_zero);
1406
+ }
1407
+ }
1408
+ }
1409
+
1410
+
1411
+ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1412
+ block = chunk_->LookupDestination(block);
1413
+ int next_block = GetNextEmittedBlock(current_block_);
1414
+ if (block != next_block) {
1415
+ // Perform stack overflow check if this goto needs it before jumping.
1416
+ if (deferred_stack_check != NULL) {
1417
+ ExternalReference stack_limit =
1418
+ ExternalReference::address_of_stack_limit(isolate());
1419
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
1420
+ __ j(above_equal, chunk_->GetAssemblyLabel(block));
1421
+ __ jmp(deferred_stack_check->entry());
1422
+ deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1423
+ } else {
1424
+ __ jmp(chunk_->GetAssemblyLabel(block));
1425
+ }
1426
+ }
1427
+ }
1428
+
1429
+
1430
+ void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1431
+ PushSafepointRegistersScope scope(this);
1432
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
1433
+ }
1434
+
1435
+ void LCodeGen::DoGoto(LGoto* instr) {
1436
+ class DeferredStackCheck: public LDeferredCode {
1437
+ public:
1438
+ DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1439
+ : LDeferredCode(codegen), instr_(instr) { }
1440
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1441
+ private:
1442
+ LGoto* instr_;
1443
+ };
1444
+
1445
+ DeferredStackCheck* deferred = NULL;
1446
+ if (instr->include_stack_check()) {
1447
+ deferred = new DeferredStackCheck(this, instr);
1448
+ }
1449
+ EmitGoto(instr->block_id(), deferred);
1450
+ }
1451
+
1452
+
1453
+ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1454
+ Condition cond = no_condition;
1455
+ switch (op) {
1456
+ case Token::EQ:
1457
+ case Token::EQ_STRICT:
1458
+ cond = equal;
1459
+ break;
1460
+ case Token::LT:
1461
+ cond = is_unsigned ? below : less;
1462
+ break;
1463
+ case Token::GT:
1464
+ cond = is_unsigned ? above : greater;
1465
+ break;
1466
+ case Token::LTE:
1467
+ cond = is_unsigned ? below_equal : less_equal;
1468
+ break;
1469
+ case Token::GTE:
1470
+ cond = is_unsigned ? above_equal : greater_equal;
1471
+ break;
1472
+ case Token::IN:
1473
+ case Token::INSTANCEOF:
1474
+ default:
1475
+ UNREACHABLE();
1476
+ }
1477
+ return cond;
1478
+ }
1479
+
1480
+
1481
+ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1482
+ if (right->IsConstantOperand()) {
1483
+ __ cmp(ToOperand(left), ToImmediate(right));
1484
+ } else {
1485
+ __ cmp(ToRegister(left), ToOperand(right));
1486
+ }
1487
+ }
1488
+
1489
+
1490
+ void LCodeGen::DoCmpID(LCmpID* instr) {
1491
+ LOperand* left = instr->InputAt(0);
1492
+ LOperand* right = instr->InputAt(1);
1493
+ LOperand* result = instr->result();
1494
+
1495
+ Label unordered;
1496
+ if (instr->is_double()) {
1497
+ // Don't base result on EFLAGS when a NaN is involved. Instead
1498
+ // jump to the unordered case, which produces a false value.
1499
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1500
+ __ j(parity_even, &unordered, Label::kNear);
1501
+ } else {
1502
+ EmitCmpI(left, right);
1503
+ }
1504
+
1505
+ Label done;
1506
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
1507
+ __ mov(ToRegister(result), factory()->true_value());
1508
+ __ j(cc, &done, Label::kNear);
1509
+
1510
+ __ bind(&unordered);
1511
+ __ mov(ToRegister(result), factory()->false_value());
1512
+ __ bind(&done);
1513
+ }
1514
+
1515
+
1516
+ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1517
+ LOperand* left = instr->InputAt(0);
1518
+ LOperand* right = instr->InputAt(1);
1519
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1520
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1521
+
1522
+ if (instr->is_double()) {
1523
+ // Don't base result on EFLAGS when a NaN is involved. Instead
1524
+ // jump to the false block.
1525
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1526
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1527
+ } else {
1528
+ EmitCmpI(left, right);
1529
+ }
1530
+
1531
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
1532
+ EmitBranch(true_block, false_block, cc);
1533
+ }
1534
+
1535
+
1536
+ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1537
+ Register left = ToRegister(instr->InputAt(0));
1538
+ Register right = ToRegister(instr->InputAt(1));
1539
+ Register result = ToRegister(instr->result());
1540
+
1541
+ __ cmp(left, Operand(right));
1542
+ __ mov(result, factory()->true_value());
1543
+ Label done;
1544
+ __ j(equal, &done, Label::kNear);
1545
+ __ mov(result, factory()->false_value());
1546
+ __ bind(&done);
1547
+ }
1548
+
1549
+
1550
+ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1551
+ Register left = ToRegister(instr->InputAt(0));
1552
+ Register right = ToRegister(instr->InputAt(1));
1553
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1554
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1555
+
1556
+ __ cmp(left, Operand(right));
1557
+ EmitBranch(true_block, false_block, equal);
1558
+ }
1559
+
1560
+
1561
+ void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
1562
+ Register left = ToRegister(instr->InputAt(0));
1563
+ Register right = ToRegister(instr->InputAt(1));
1564
+ Register result = ToRegister(instr->result());
1565
+
1566
+ Label done;
1567
+ __ cmp(left, Operand(right));
1568
+ __ mov(result, factory()->false_value());
1569
+ __ j(not_equal, &done, Label::kNear);
1570
+ __ mov(result, factory()->true_value());
1571
+ __ bind(&done);
1572
+ }
1573
+
1574
+
1575
+ void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
1576
+ Register left = ToRegister(instr->InputAt(0));
1577
+ Register right = ToRegister(instr->InputAt(1));
1578
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1579
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1580
+
1581
+ __ cmp(left, Operand(right));
1582
+ EmitBranch(true_block, false_block, equal);
1583
+ }
1584
+
1585
+
1586
+ void LCodeGen::DoIsNull(LIsNull* instr) {
1587
+ Register reg = ToRegister(instr->InputAt(0));
1588
+ Register result = ToRegister(instr->result());
1589
+
1590
+ // TODO(fsc): If the expression is known to be a smi, then it's
1591
+ // definitely not null. Materialize false.
1592
+
1593
+ __ cmp(reg, factory()->null_value());
1594
+ if (instr->is_strict()) {
1595
+ __ mov(result, factory()->true_value());
1596
+ Label done;
1597
+ __ j(equal, &done, Label::kNear);
1598
+ __ mov(result, factory()->false_value());
1599
+ __ bind(&done);
1600
+ } else {
1601
+ Label true_value, false_value, done;
1602
+ __ j(equal, &true_value, Label::kNear);
1603
+ __ cmp(reg, factory()->undefined_value());
1604
+ __ j(equal, &true_value, Label::kNear);
1605
+ __ test(reg, Immediate(kSmiTagMask));
1606
+ __ j(zero, &false_value, Label::kNear);
1607
+ // Check for undetectable objects by looking in the bit field in
1608
+ // the map. The object has already been smi checked.
1609
+ Register scratch = result;
1610
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1611
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1612
+ __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1613
+ __ j(not_zero, &true_value, Label::kNear);
1614
+ __ bind(&false_value);
1615
+ __ mov(result, factory()->false_value());
1616
+ __ jmp(&done, Label::kNear);
1617
+ __ bind(&true_value);
1618
+ __ mov(result, factory()->true_value());
1619
+ __ bind(&done);
1620
+ }
1621
+ }
1622
+
1623
+
1624
+ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1625
+ Register reg = ToRegister(instr->InputAt(0));
1626
+
1627
+ // TODO(fsc): If the expression is known to be a smi, then it's
1628
+ // definitely not null. Jump to the false block.
1629
+
1630
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1631
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1632
+
1633
+ __ cmp(reg, factory()->null_value());
1634
+ if (instr->is_strict()) {
1635
+ EmitBranch(true_block, false_block, equal);
1636
+ } else {
1637
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1638
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1639
+ __ j(equal, true_label);
1640
+ __ cmp(reg, factory()->undefined_value());
1641
+ __ j(equal, true_label);
1642
+ __ test(reg, Immediate(kSmiTagMask));
1643
+ __ j(zero, false_label);
1644
+ // Check for undetectable objects by looking in the bit field in
1645
+ // the map. The object has already been smi checked.
1646
+ Register scratch = ToRegister(instr->TempAt(0));
1647
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1648
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1649
+ __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1650
+ EmitBranch(true_block, false_block, not_zero);
1651
+ }
1652
+ }
1653
+
1654
+
1655
+ Condition LCodeGen::EmitIsObject(Register input,
1656
+ Register temp1,
1657
+ Register temp2,
1658
+ Label* is_not_object,
1659
+ Label* is_object) {
1660
+ ASSERT(!input.is(temp1));
1661
+ ASSERT(!input.is(temp2));
1662
+ ASSERT(!temp1.is(temp2));
1663
+
1664
+ __ test(input, Immediate(kSmiTagMask));
1665
+ __ j(equal, is_not_object);
1666
+
1667
+ __ cmp(input, isolate()->factory()->null_value());
1668
+ __ j(equal, is_object);
1669
+
1670
+ __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1671
+ // Undetectable objects behave like undefined.
1672
+ __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
1673
+ __ test(temp2, Immediate(1 << Map::kIsUndetectable));
1674
+ __ j(not_zero, is_not_object);
1675
+
1676
+ __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
1677
+ __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
1678
+ __ j(below, is_not_object);
1679
+ __ cmp(temp2, LAST_JS_OBJECT_TYPE);
1680
+ return below_equal;
1681
+ }
1682
+
1683
+
1684
+ void LCodeGen::DoIsObject(LIsObject* instr) {
1685
+ Register reg = ToRegister(instr->InputAt(0));
1686
+ Register result = ToRegister(instr->result());
1687
+ Register temp = ToRegister(instr->TempAt(0));
1688
+ Label is_false, is_true, done;
1689
+
1690
+ Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
1691
+ __ j(true_cond, &is_true);
1692
+
1693
+ __ bind(&is_false);
1694
+ __ mov(result, factory()->false_value());
1695
+ __ jmp(&done);
1696
+
1697
+ __ bind(&is_true);
1698
+ __ mov(result, factory()->true_value());
1699
+
1700
+ __ bind(&done);
1701
+ }
1702
+
1703
+
1704
+ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1705
+ Register reg = ToRegister(instr->InputAt(0));
1706
+ Register temp = ToRegister(instr->TempAt(0));
1707
+ Register temp2 = ToRegister(instr->TempAt(1));
1708
+
1709
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1710
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1711
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1712
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1713
+
1714
+ Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
1715
+
1716
+ EmitBranch(true_block, false_block, true_cond);
1717
+ }
1718
+
1719
+
1720
+ void LCodeGen::DoIsSmi(LIsSmi* instr) {
1721
+ Operand input = ToOperand(instr->InputAt(0));
1722
+ Register result = ToRegister(instr->result());
1723
+
1724
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1725
+ __ test(input, Immediate(kSmiTagMask));
1726
+ __ mov(result, factory()->true_value());
1727
+ Label done;
1728
+ __ j(zero, &done, Label::kNear);
1729
+ __ mov(result, factory()->false_value());
1730
+ __ bind(&done);
1731
+ }
1732
+
1733
+
1734
+ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1735
+ Operand input = ToOperand(instr->InputAt(0));
1736
+
1737
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1738
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1739
+
1740
+ __ test(input, Immediate(kSmiTagMask));
1741
+ EmitBranch(true_block, false_block, zero);
1742
+ }
1743
+
1744
+
1745
+ void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
1746
+ Register input = ToRegister(instr->InputAt(0));
1747
+ Register result = ToRegister(instr->result());
1748
+
1749
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1750
+ Label false_label, done;
1751
+ STATIC_ASSERT(kSmiTag == 0);
1752
+ __ test(input, Immediate(kSmiTagMask));
1753
+ __ j(zero, &false_label, Label::kNear);
1754
+ __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
1755
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
1756
+ 1 << Map::kIsUndetectable);
1757
+ __ j(zero, &false_label, Label::kNear);
1758
+ __ mov(result, factory()->true_value());
1759
+ __ jmp(&done);
1760
+ __ bind(&false_label);
1761
+ __ mov(result, factory()->false_value());
1762
+ __ bind(&done);
1763
+ }
1764
+
1765
+
1766
+ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1767
+ Register input = ToRegister(instr->InputAt(0));
1768
+ Register temp = ToRegister(instr->TempAt(0));
1769
+
1770
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1771
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1772
+
1773
+ STATIC_ASSERT(kSmiTag == 0);
1774
+ __ test(input, Immediate(kSmiTagMask));
1775
+ __ j(zero, chunk_->GetAssemblyLabel(false_block));
1776
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
1777
+ __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1778
+ 1 << Map::kIsUndetectable);
1779
+ EmitBranch(true_block, false_block, not_zero);
1780
+ }
1781
+
1782
+
1783
+ static InstanceType TestType(HHasInstanceType* instr) {
1784
+ InstanceType from = instr->from();
1785
+ InstanceType to = instr->to();
1786
+ if (from == FIRST_TYPE) return to;
1787
+ ASSERT(from == to || to == LAST_TYPE);
1788
+ return from;
1789
+ }
1790
+
1791
+
1792
+ static Condition BranchCondition(HHasInstanceType* instr) {
1793
+ InstanceType from = instr->from();
1794
+ InstanceType to = instr->to();
1795
+ if (from == to) return equal;
1796
+ if (to == LAST_TYPE) return above_equal;
1797
+ if (from == FIRST_TYPE) return below_equal;
1798
+ UNREACHABLE();
1799
+ return equal;
1800
+ }
1801
+
1802
+
1803
+ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1804
+ Register input = ToRegister(instr->InputAt(0));
1805
+ Register result = ToRegister(instr->result());
1806
+
1807
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1808
+ __ test(input, Immediate(kSmiTagMask));
1809
+ Label done, is_false;
1810
+ __ j(zero, &is_false, Label::kNear);
1811
+ __ CmpObjectType(input, TestType(instr->hydrogen()), result);
1812
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())),
1813
+ &is_false, Label::kNear);
1814
+ __ mov(result, factory()->true_value());
1815
+ __ jmp(&done, Label::kNear);
1816
+ __ bind(&is_false);
1817
+ __ mov(result, factory()->false_value());
1818
+ __ bind(&done);
1819
+ }
1820
+
1821
+
1822
+ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1823
+ Register input = ToRegister(instr->InputAt(0));
1824
+ Register temp = ToRegister(instr->TempAt(0));
1825
+
1826
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1827
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1828
+
1829
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1830
+
1831
+ __ test(input, Immediate(kSmiTagMask));
1832
+ __ j(zero, false_label);
1833
+
1834
+ __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
1835
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1836
+ }
1837
+
1838
+
1839
+ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1840
+ Register input = ToRegister(instr->InputAt(0));
1841
+ Register result = ToRegister(instr->result());
1842
+
1843
+ if (FLAG_debug_code) {
1844
+ __ AbortIfNotString(input);
1845
+ }
1846
+
1847
+ __ mov(result, FieldOperand(input, String::kHashFieldOffset));
1848
+ __ IndexFromHash(result, result);
1849
+ }
1850
+
1851
+
1852
+ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1853
+ Register input = ToRegister(instr->InputAt(0));
1854
+ Register result = ToRegister(instr->result());
1855
+
1856
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1857
+ __ mov(result, factory()->true_value());
1858
+ __ test(FieldOperand(input, String::kHashFieldOffset),
1859
+ Immediate(String::kContainsCachedArrayIndexMask));
1860
+ Label done;
1861
+ __ j(zero, &done, Label::kNear);
1862
+ __ mov(result, factory()->false_value());
1863
+ __ bind(&done);
1864
+ }
1865
+
1866
+
1867
+ void LCodeGen::DoHasCachedArrayIndexAndBranch(
1868
+ LHasCachedArrayIndexAndBranch* instr) {
1869
+ Register input = ToRegister(instr->InputAt(0));
1870
+
1871
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1872
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1873
+
1874
+ __ test(FieldOperand(input, String::kHashFieldOffset),
1875
+ Immediate(String::kContainsCachedArrayIndexMask));
1876
+ EmitBranch(true_block, false_block, equal);
1877
+ }
1878
+
1879
+
1880
+ // Branches to a label or falls through with the answer in the z flag. Trashes
1881
+ // the temp registers, but not the input. Only input and temp2 may alias.
1882
+ void LCodeGen::EmitClassOfTest(Label* is_true,
1883
+ Label* is_false,
1884
+ Handle<String>class_name,
1885
+ Register input,
1886
+ Register temp,
1887
+ Register temp2) {
1888
+ ASSERT(!input.is(temp));
1889
+ ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1890
+ __ test(input, Immediate(kSmiTagMask));
1891
+ __ j(zero, is_false);
1892
+ __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1893
+ __ j(below, is_false);
1894
+
1895
+ // Map is now in temp.
1896
+ // Functions have class 'Function'.
1897
+ __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1898
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
1899
+ __ j(equal, is_true);
1900
+ } else {
1901
+ __ j(equal, is_false);
1902
+ }
1903
+
1904
+ // Check if the constructor in the map is a function.
1905
+ __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1906
+
1907
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
1908
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1909
+ // LAST_JS_OBJECT_TYPE.
1910
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1911
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1912
+
1913
+ // Objects with a non-function constructor have class 'Object'.
1914
+ __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1915
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
1916
+ __ j(not_equal, is_true);
1917
+ } else {
1918
+ __ j(not_equal, is_false);
1919
+ }
1920
+
1921
+ // temp now contains the constructor function. Grab the
1922
+ // instance class name from there.
1923
+ __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1924
+ __ mov(temp, FieldOperand(temp,
1925
+ SharedFunctionInfo::kInstanceClassNameOffset));
1926
+ // The class name we are testing against is a symbol because it's a literal.
1927
+ // The name in the constructor is a symbol because of the way the context is
1928
+ // booted. This routine isn't expected to work for random API-created
1929
+ // classes and it doesn't have to because you can't access it with natives
1930
+ // syntax. Since both sides are symbols it is sufficient to use an identity
1931
+ // comparison.
1932
+ __ cmp(temp, class_name);
1933
+ // End with the answer in the z flag.
1934
+ }
1935
+
1936
+
1937
+ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1938
+ Register input = ToRegister(instr->InputAt(0));
1939
+ Register result = ToRegister(instr->result());
1940
+ ASSERT(input.is(result));
1941
+ Register temp = ToRegister(instr->TempAt(0));
1942
+ Handle<String> class_name = instr->hydrogen()->class_name();
1943
+ Label done;
1944
+ Label is_true, is_false;
1945
+
1946
+ EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1947
+
1948
+ __ j(not_equal, &is_false, Label::kNear);
1949
+
1950
+ __ bind(&is_true);
1951
+ __ mov(result, factory()->true_value());
1952
+ __ jmp(&done, Label::kNear);
1953
+
1954
+ __ bind(&is_false);
1955
+ __ mov(result, factory()->false_value());
1956
+ __ bind(&done);
1957
+ }
1958
+
1959
+
1960
+ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1961
+ Register input = ToRegister(instr->InputAt(0));
1962
+ Register temp = ToRegister(instr->TempAt(0));
1963
+ Register temp2 = ToRegister(instr->TempAt(1));
1964
+ if (input.is(temp)) {
1965
+ // Swap.
1966
+ Register swapper = temp;
1967
+ temp = temp2;
1968
+ temp2 = swapper;
1969
+ }
1970
+ Handle<String> class_name = instr->hydrogen()->class_name();
1971
+
1972
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
1973
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
1974
+
1975
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
1976
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
1977
+
1978
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1979
+
1980
+ EmitBranch(true_block, false_block, equal);
1981
+ }
1982
+
1983
+
1984
+ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1985
+ Register reg = ToRegister(instr->InputAt(0));
1986
+ int true_block = instr->true_block_id();
1987
+ int false_block = instr->false_block_id();
1988
+
1989
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1990
+ EmitBranch(true_block, false_block, equal);
1991
+ }
1992
+
1993
+
1994
+ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1995
+ // Object and function are in fixed registers defined by the stub.
1996
+ ASSERT(ToRegister(instr->context()).is(esi));
1997
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
1998
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
1999
+
2000
+ Label true_value, done;
2001
+ __ test(eax, Operand(eax));
2002
+ __ j(zero, &true_value, Label::kNear);
2003
+ __ mov(ToRegister(instr->result()), factory()->false_value());
2004
+ __ jmp(&done, Label::kNear);
2005
+ __ bind(&true_value);
2006
+ __ mov(ToRegister(instr->result()), factory()->true_value());
2007
+ __ bind(&done);
2008
+ }
2009
+
2010
+
2011
+ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
2012
+ ASSERT(ToRegister(instr->context()).is(esi));
2013
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
2014
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
2015
+
2016
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2017
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
2018
+ __ test(eax, Operand(eax));
2019
+ EmitBranch(true_block, false_block, zero);
2020
+ }
2021
+
2022
+
2023
+ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2024
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2025
+ public:
2026
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2027
+ LInstanceOfKnownGlobal* instr)
2028
+ : LDeferredCode(codegen), instr_(instr) { }
2029
+ virtual void Generate() {
2030
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
2031
+ }
2032
+
2033
+ Label* map_check() { return &map_check_; }
2034
+
2035
+ private:
2036
+ LInstanceOfKnownGlobal* instr_;
2037
+ Label map_check_;
2038
+ };
2039
+
2040
+ DeferredInstanceOfKnownGlobal* deferred;
2041
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2042
+
2043
+ Label done, false_result;
2044
+ Register object = ToRegister(instr->InputAt(0));
2045
+ Register temp = ToRegister(instr->TempAt(0));
2046
+
2047
+ // A Smi is not an instance of anything.
2048
+ __ test(object, Immediate(kSmiTagMask));
2049
+ __ j(zero, &false_result);
2050
+
2051
+ // This is the inlined call site instanceof cache. The two occurences of the
2052
+ // hole value will be patched to the last map/result pair generated by the
2053
+ // instanceof stub.
2054
+ Label cache_miss;
2055
+ Register map = ToRegister(instr->TempAt(0));
2056
+ __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2057
+ __ bind(deferred->map_check()); // Label for calculating code patching.
2058
+ __ cmp(map, factory()->the_hole_value()); // Patched to cached map.
2059
+ __ j(not_equal, &cache_miss, Label::kNear);
2060
+ __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2061
+ __ jmp(&done);
2062
+
2063
+ // The inlined call site cache did not match. Check for null and string
2064
+ // before calling the deferred code.
2065
+ __ bind(&cache_miss);
2066
+ // Null is not an instance of anything.
2067
+ __ cmp(object, factory()->null_value());
2068
+ __ j(equal, &false_result);
2069
+
2070
+ // String values are not instances of anything.
2071
+ Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2072
+ __ j(is_string, &false_result);
2073
+
2074
+ // Go to the deferred code.
2075
+ __ jmp(deferred->entry());
2076
+
2077
+ __ bind(&false_result);
2078
+ __ mov(ToRegister(instr->result()), factory()->false_value());
2079
+
2080
+ // Here result has either true or false. Deferred code also produces true or
2081
+ // false object.
2082
+ __ bind(deferred->exit());
2083
+ __ bind(&done);
2084
+ }
2085
+
2086
+
2087
+ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2088
+ Label* map_check) {
2089
+ PushSafepointRegistersScope scope(this);
2090
+
2091
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2092
+ flags = static_cast<InstanceofStub::Flags>(
2093
+ flags | InstanceofStub::kArgsInRegisters);
2094
+ flags = static_cast<InstanceofStub::Flags>(
2095
+ flags | InstanceofStub::kCallSiteInlineCheck);
2096
+ flags = static_cast<InstanceofStub::Flags>(
2097
+ flags | InstanceofStub::kReturnTrueFalseObject);
2098
+ InstanceofStub stub(flags);
2099
+
2100
+ // Get the temp register reserved by the instruction. This needs to be a
2101
+ // register which is pushed last by PushSafepointRegisters as top of the
2102
+ // stack is used to pass the offset to the location of the map check to
2103
+ // the stub.
2104
+ Register temp = ToRegister(instr->TempAt(0));
2105
+ ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
2106
+ __ mov(InstanceofStub::right(), Immediate(instr->function()));
2107
+ static const int kAdditionalDelta = 16;
2108
+ int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2109
+ __ mov(temp, Immediate(delta));
2110
+ __ StoreToSafepointRegisterSlot(temp, temp);
2111
+ CallCodeGeneric(stub.GetCode(),
2112
+ RelocInfo::CODE_TARGET,
2113
+ instr,
2114
+ RESTORE_CONTEXT,
2115
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2116
+ // Put the result value into the eax slot and restore all registers.
2117
+ __ StoreToSafepointRegisterSlot(eax, eax);
2118
+ }
2119
+
2120
+
2121
+ static Condition ComputeCompareCondition(Token::Value op) {
2122
+ switch (op) {
2123
+ case Token::EQ_STRICT:
2124
+ case Token::EQ:
2125
+ return equal;
2126
+ case Token::LT:
2127
+ return less;
2128
+ case Token::GT:
2129
+ return greater;
2130
+ case Token::LTE:
2131
+ return less_equal;
2132
+ case Token::GTE:
2133
+ return greater_equal;
2134
+ default:
2135
+ UNREACHABLE();
2136
+ return no_condition;
2137
+ }
2138
+ }
2139
+
2140
+
2141
+ void LCodeGen::DoCmpT(LCmpT* instr) {
2142
+ Token::Value op = instr->op();
2143
+
2144
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
2145
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
2146
+
2147
+ Condition condition = ComputeCompareCondition(op);
2148
+ if (op == Token::GT || op == Token::LTE) {
2149
+ condition = ReverseCondition(condition);
2150
+ }
2151
+ Label true_value, done;
2152
+ __ test(eax, Operand(eax));
2153
+ __ j(condition, &true_value, Label::kNear);
2154
+ __ mov(ToRegister(instr->result()), factory()->false_value());
2155
+ __ jmp(&done, Label::kNear);
2156
+ __ bind(&true_value);
2157
+ __ mov(ToRegister(instr->result()), factory()->true_value());
2158
+ __ bind(&done);
2159
+ }
2160
+
2161
+
2162
+ void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
2163
+ Token::Value op = instr->op();
2164
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
2165
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
2166
+
2167
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
2168
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
2169
+
2170
+ // The compare stub expects compare condition and the input operands
2171
+ // reversed for GT and LTE.
2172
+ Condition condition = ComputeCompareCondition(op);
2173
+ if (op == Token::GT || op == Token::LTE) {
2174
+ condition = ReverseCondition(condition);
2175
+ }
2176
+ __ test(eax, Operand(eax));
2177
+ EmitBranch(true_block, false_block, condition);
2178
+ }
2179
+
2180
+
2181
+ void LCodeGen::DoReturn(LReturn* instr) {
2182
+ if (FLAG_trace) {
2183
+ // Preserve the return value on the stack and rely on the runtime call
2184
+ // to return the value in the same register. We're leaving the code
2185
+ // managed by the register allocator and tearing down the frame, it's
2186
+ // safe to write to the context register.
2187
+ __ push(eax);
2188
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2189
+ __ CallRuntime(Runtime::kTraceExit, 1);
2190
+ }
2191
+ __ mov(esp, ebp);
2192
+ __ pop(ebp);
2193
+ __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2194
+ }
2195
+
2196
+
2197
+ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2198
+ Register result = ToRegister(instr->result());
2199
+ __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2200
+ if (instr->hydrogen()->check_hole_value()) {
2201
+ __ cmp(result, factory()->the_hole_value());
2202
+ DeoptimizeIf(equal, instr->environment());
2203
+ }
2204
+ }
2205
+
2206
+
2207
+ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2208
+ ASSERT(ToRegister(instr->context()).is(esi));
2209
+ ASSERT(ToRegister(instr->global_object()).is(eax));
2210
+ ASSERT(ToRegister(instr->result()).is(eax));
2211
+
2212
+ __ mov(ecx, instr->name());
2213
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2214
+ RelocInfo::CODE_TARGET_CONTEXT;
2215
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2216
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
2217
+ }
2218
+
2219
+
2220
+ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2221
+ Register value = ToRegister(instr->InputAt(0));
2222
+ Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
2223
+
2224
+ // If the cell we are storing to contains the hole it could have
2225
+ // been deleted from the property dictionary. In that case, we need
2226
+ // to update the property details in the property dictionary to mark
2227
+ // it as no longer deleted. We deoptimize in that case.
2228
+ if (instr->hydrogen()->check_hole_value()) {
2229
+ __ cmp(cell_operand, factory()->the_hole_value());
2230
+ DeoptimizeIf(equal, instr->environment());
2231
+ }
2232
+
2233
+ // Store the value.
2234
+ __ mov(cell_operand, value);
2235
+ }
2236
+
2237
+
2238
+ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2239
+ ASSERT(ToRegister(instr->context()).is(esi));
2240
+ ASSERT(ToRegister(instr->global_object()).is(edx));
2241
+ ASSERT(ToRegister(instr->value()).is(eax));
2242
+
2243
+ __ mov(ecx, instr->name());
2244
+ Handle<Code> ic = instr->strict_mode()
2245
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
2246
+ : isolate()->builtins()->StoreIC_Initialize();
2247
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
2248
+ }
2249
+
2250
+
2251
+ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2252
+ Register context = ToRegister(instr->context());
2253
+ Register result = ToRegister(instr->result());
2254
+ __ mov(result, ContextOperand(context, instr->slot_index()));
2255
+ }
2256
+
2257
+
2258
+ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2259
+ Register context = ToRegister(instr->context());
2260
+ Register value = ToRegister(instr->value());
2261
+ __ mov(ContextOperand(context, instr->slot_index()), value);
2262
+ if (instr->needs_write_barrier()) {
2263
+ Register temp = ToRegister(instr->TempAt(0));
2264
+ int offset = Context::SlotOffset(instr->slot_index());
2265
+ __ RecordWrite(context, offset, value, temp);
2266
+ }
2267
+ }
2268
+
2269
+
2270
+ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2271
+ Register object = ToRegister(instr->object());
2272
+ Register result = ToRegister(instr->result());
2273
+ if (instr->hydrogen()->is_in_object()) {
2274
+ __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
2275
+ } else {
2276
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2277
+ __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
2278
+ }
2279
+ }
2280
+
2281
+
2282
+ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2283
+ Register object,
2284
+ Handle<Map> type,
2285
+ Handle<String> name) {
2286
+ LookupResult lookup;
2287
+ type->LookupInDescriptors(NULL, *name, &lookup);
2288
+ ASSERT(lookup.IsProperty() &&
2289
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2290
+ if (lookup.type() == FIELD) {
2291
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
2292
+ int offset = index * kPointerSize;
2293
+ if (index < 0) {
2294
+ // Negative property indices are in-object properties, indexed
2295
+ // from the end of the fixed part of the object.
2296
+ __ mov(result, FieldOperand(object, offset + type->instance_size()));
2297
+ } else {
2298
+ // Non-negative property indices are in the properties array.
2299
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2300
+ __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2301
+ }
2302
+ } else {
2303
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2304
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
2305
+ }
2306
+ }
2307
+
2308
+
2309
+ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2310
+ Register object = ToRegister(instr->object());
2311
+ Register result = ToRegister(instr->result());
2312
+
2313
+ int map_count = instr->hydrogen()->types()->length();
2314
+ Handle<String> name = instr->hydrogen()->name();
2315
+ if (map_count == 0) {
2316
+ ASSERT(instr->hydrogen()->need_generic());
2317
+ __ mov(ecx, name);
2318
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2319
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
2320
+ } else {
2321
+ Label done;
2322
+ for (int i = 0; i < map_count - 1; ++i) {
2323
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
2324
+ Label next;
2325
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
2326
+ __ j(not_equal, &next, Label::kNear);
2327
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2328
+ __ jmp(&done, Label::kNear);
2329
+ __ bind(&next);
2330
+ }
2331
+ Handle<Map> map = instr->hydrogen()->types()->last();
2332
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
2333
+ if (instr->hydrogen()->need_generic()) {
2334
+ Label generic;
2335
+ __ j(not_equal, &generic, Label::kNear);
2336
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2337
+ __ jmp(&done, Label::kNear);
2338
+ __ bind(&generic);
2339
+ __ mov(ecx, name);
2340
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2341
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
2342
+ } else {
2343
+ DeoptimizeIf(not_equal, instr->environment());
2344
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
2345
+ }
2346
+ __ bind(&done);
2347
+ }
2348
+ }
2349
+
2350
+
2351
+ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2352
+ ASSERT(ToRegister(instr->context()).is(esi));
2353
+ ASSERT(ToRegister(instr->object()).is(eax));
2354
+ ASSERT(ToRegister(instr->result()).is(eax));
2355
+
2356
+ __ mov(ecx, instr->name());
2357
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2358
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
2359
+ }
2360
+
2361
+
2362
+ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2363
+ Register function = ToRegister(instr->function());
2364
+ Register temp = ToRegister(instr->TempAt(0));
2365
+ Register result = ToRegister(instr->result());
2366
+
2367
+ // Check that the function really is a function.
2368
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2369
+ DeoptimizeIf(not_equal, instr->environment());
2370
+
2371
+ // Check whether the function has an instance prototype.
2372
+ Label non_instance;
2373
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
2374
+ 1 << Map::kHasNonInstancePrototype);
2375
+ __ j(not_zero, &non_instance, Label::kNear);
2376
+
2377
+ // Get the prototype or initial map from the function.
2378
+ __ mov(result,
2379
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2380
+
2381
+ // Check that the function has a prototype or an initial map.
2382
+ __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2383
+ DeoptimizeIf(equal, instr->environment());
2384
+
2385
+ // If the function does not have an initial map, we're done.
2386
+ Label done;
2387
+ __ CmpObjectType(result, MAP_TYPE, temp);
2388
+ __ j(not_equal, &done, Label::kNear);
2389
+
2390
+ // Get the prototype from the initial map.
2391
+ __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2392
+ __ jmp(&done, Label::kNear);
2393
+
2394
+ // Non-instance prototype: Fetch prototype from constructor field
2395
+ // in the function's map.
2396
+ __ bind(&non_instance);
2397
+ __ mov(result, FieldOperand(result, Map::kConstructorOffset));
2398
+
2399
+ // All done.
2400
+ __ bind(&done);
2401
+ }
2402
+
2403
+
2404
+ void LCodeGen::DoLoadElements(LLoadElements* instr) {
2405
+ Register result = ToRegister(instr->result());
2406
+ Register input = ToRegister(instr->InputAt(0));
2407
+ __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
2408
+ if (FLAG_debug_code) {
2409
+ Label done;
2410
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2411
+ Immediate(factory()->fixed_array_map()));
2412
+ __ j(equal, &done, Label::kNear);
2413
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
2414
+ Immediate(factory()->fixed_cow_array_map()));
2415
+ __ j(equal, &done, Label::kNear);
2416
+ Register temp((result.is(eax)) ? ebx : eax);
2417
+ __ push(temp);
2418
+ __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
2419
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
2420
+ __ sub(Operand(temp), Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
2421
+ __ cmp(Operand(temp), Immediate(kExternalArrayTypeCount));
2422
+ __ pop(temp);
2423
+ __ Check(below, "Check for fast elements or pixel array failed.");
2424
+ __ bind(&done);
2425
+ }
2426
+ }
2427
+
2428
+
2429
+ void LCodeGen::DoLoadExternalArrayPointer(
2430
+ LLoadExternalArrayPointer* instr) {
2431
+ Register result = ToRegister(instr->result());
2432
+ Register input = ToRegister(instr->InputAt(0));
2433
+ __ mov(result, FieldOperand(input,
2434
+ ExternalArray::kExternalPointerOffset));
2435
+ }
2436
+
2437
+
2438
+ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2439
+ Register arguments = ToRegister(instr->arguments());
2440
+ Register length = ToRegister(instr->length());
2441
+ Operand index = ToOperand(instr->index());
2442
+ Register result = ToRegister(instr->result());
2443
+
2444
+ __ sub(length, index);
2445
+ DeoptimizeIf(below_equal, instr->environment());
2446
+
2447
+ // There are two words between the frame pointer and the last argument.
2448
+ // Subtracting from length accounts for one of them add one more.
2449
+ __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2450
+ }
2451
+
2452
+
2453
+ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2454
+ Register elements = ToRegister(instr->elements());
2455
+ Register key = ToRegister(instr->key());
2456
+ Register result = ToRegister(instr->result());
2457
+ ASSERT(result.is(elements));
2458
+
2459
+ // Load the result.
2460
+ __ mov(result, FieldOperand(elements,
2461
+ key,
2462
+ times_pointer_size,
2463
+ FixedArray::kHeaderSize));
2464
+
2465
+ // Check for the hole value.
2466
+ if (instr->hydrogen()->RequiresHoleCheck()) {
2467
+ __ cmp(result, factory()->the_hole_value());
2468
+ DeoptimizeIf(equal, instr->environment());
2469
+ }
2470
+ }
2471
+
2472
+
2473
+ Operand LCodeGen::BuildExternalArrayOperand(LOperand* external_pointer,
2474
+ LOperand* key,
2475
+ ExternalArrayType array_type) {
2476
+ Register external_pointer_reg = ToRegister(external_pointer);
2477
+ int shift_size = ExternalArrayTypeToShiftSize(array_type);
2478
+ if (key->IsConstantOperand()) {
2479
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
2480
+ if (constant_value & 0xF0000000) {
2481
+ Abort("array index constant value too big");
2482
+ }
2483
+ return Operand(external_pointer_reg, constant_value * (1 << shift_size));
2484
+ } else {
2485
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2486
+ return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
2487
+ }
2488
+ }
2489
+
2490
+
2491
+ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2492
+ LLoadKeyedSpecializedArrayElement* instr) {
2493
+ ExternalArrayType array_type = instr->array_type();
2494
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
2495
+ instr->key(), array_type));
2496
+ if (array_type == kExternalFloatArray) {
2497
+ XMMRegister result(ToDoubleRegister(instr->result()));
2498
+ __ movss(result, operand);
2499
+ __ cvtss2sd(result, result);
2500
+ } else if (array_type == kExternalDoubleArray) {
2501
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
2502
+ } else {
2503
+ Register result(ToRegister(instr->result()));
2504
+ switch (array_type) {
2505
+ case kExternalByteArray:
2506
+ __ movsx_b(result, operand);
2507
+ break;
2508
+ case kExternalUnsignedByteArray:
2509
+ case kExternalPixelArray:
2510
+ __ movzx_b(result, operand);
2511
+ break;
2512
+ case kExternalShortArray:
2513
+ __ movsx_w(result, operand);
2514
+ break;
2515
+ case kExternalUnsignedShortArray:
2516
+ __ movzx_w(result, operand);
2517
+ break;
2518
+ case kExternalIntArray:
2519
+ __ mov(result, operand);
2520
+ break;
2521
+ case kExternalUnsignedIntArray:
2522
+ __ mov(result, operand);
2523
+ __ test(result, Operand(result));
2524
+ // TODO(danno): we could be more clever here, perhaps having a special
2525
+ // version of the stub that detects if the overflow case actually
2526
+ // happens, and generate code that returns a double rather than int.
2527
+ DeoptimizeIf(negative, instr->environment());
2528
+ break;
2529
+ case kExternalFloatArray:
2530
+ case kExternalDoubleArray:
2531
+ UNREACHABLE();
2532
+ break;
2533
+ }
2534
+ }
2535
+ }
2536
+
2537
+
2538
+ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2539
+ ASSERT(ToRegister(instr->context()).is(esi));
2540
+ ASSERT(ToRegister(instr->object()).is(edx));
2541
+ ASSERT(ToRegister(instr->key()).is(eax));
2542
+
2543
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2544
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
2545
+ }
2546
+
2547
+
2548
+ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2549
+ Register result = ToRegister(instr->result());
2550
+
2551
+ // Check for arguments adapter frame.
2552
+ Label done, adapted;
2553
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2554
+ __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
2555
+ __ cmp(Operand(result),
2556
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2557
+ __ j(equal, &adapted, Label::kNear);
2558
+
2559
+ // No arguments adaptor frame.
2560
+ __ mov(result, Operand(ebp));
2561
+ __ jmp(&done, Label::kNear);
2562
+
2563
+ // Arguments adaptor frame present.
2564
+ __ bind(&adapted);
2565
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2566
+
2567
+ // Result is the frame pointer for the frame if not adapted and for the real
2568
+ // frame below the adaptor frame if adapted.
2569
+ __ bind(&done);
2570
+ }
2571
+
2572
+
2573
+ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2574
+ Operand elem = ToOperand(instr->InputAt(0));
2575
+ Register result = ToRegister(instr->result());
2576
+
2577
+ Label done;
2578
+
2579
+ // If no arguments adaptor frame the number of arguments is fixed.
2580
+ __ cmp(ebp, elem);
2581
+ __ mov(result, Immediate(scope()->num_parameters()));
2582
+ __ j(equal, &done, Label::kNear);
2583
+
2584
+ // Arguments adaptor frame present. Get argument length from there.
2585
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2586
+ __ mov(result, Operand(result,
2587
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
2588
+ __ SmiUntag(result);
2589
+
2590
+ // Argument length is in result register.
2591
+ __ bind(&done);
2592
+ }
2593
+
2594
+
2595
+ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2596
+ Register receiver = ToRegister(instr->receiver());
2597
+ Register function = ToRegister(instr->function());
2598
+ Register length = ToRegister(instr->length());
2599
+ Register elements = ToRegister(instr->elements());
2600
+ Register scratch = ToRegister(instr->TempAt(0));
2601
+ ASSERT(receiver.is(eax)); // Used for parameter count.
2602
+ ASSERT(function.is(edi)); // Required by InvokeFunction.
2603
+ ASSERT(ToRegister(instr->result()).is(eax));
2604
+
2605
+ // If the receiver is null or undefined, we have to pass the global object
2606
+ // as a receiver.
2607
+ Label global_object, receiver_ok;
2608
+ __ cmp(receiver, factory()->null_value());
2609
+ __ j(equal, &global_object, Label::kNear);
2610
+ __ cmp(receiver, factory()->undefined_value());
2611
+ __ j(equal, &global_object, Label::kNear);
2612
+
2613
+ // The receiver should be a JS object.
2614
+ __ test(receiver, Immediate(kSmiTagMask));
2615
+ DeoptimizeIf(equal, instr->environment());
2616
+ __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
2617
+ DeoptimizeIf(below, instr->environment());
2618
+ __ jmp(&receiver_ok, Label::kNear);
2619
+
2620
+ __ bind(&global_object);
2621
+ // TODO(kmillikin): We have a hydrogen value for the global object. See
2622
+ // if it's better to use it than to explicitly fetch it from the context
2623
+ // here.
2624
+ __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
2625
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
2626
+ __ bind(&receiver_ok);
2627
+
2628
+ // Copy the arguments to this function possibly from the
2629
+ // adaptor frame below it.
2630
+ const uint32_t kArgumentsLimit = 1 * KB;
2631
+ __ cmp(length, kArgumentsLimit);
2632
+ DeoptimizeIf(above, instr->environment());
2633
+
2634
+ __ push(receiver);
2635
+ __ mov(receiver, length);
2636
+
2637
+ // Loop through the arguments pushing them onto the execution
2638
+ // stack.
2639
+ Label invoke, loop;
2640
+ // length is a small non-negative integer, due to the test above.
2641
+ __ test(length, Operand(length));
2642
+ __ j(zero, &invoke, Label::kNear);
2643
+ __ bind(&loop);
2644
+ __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2645
+ __ dec(length);
2646
+ __ j(not_zero, &loop);
2647
+
2648
+ // Invoke the function.
2649
+ __ bind(&invoke);
2650
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2651
+ LPointerMap* pointers = instr->pointer_map();
2652
+ LEnvironment* env = instr->deoptimization_environment();
2653
+ RecordPosition(pointers->position());
2654
+ RegisterEnvironmentForDeoptimization(env);
2655
+ SafepointGenerator safepoint_generator(this,
2656
+ pointers,
2657
+ env->deoptimization_index());
2658
+ ParameterCount actual(eax);
2659
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
2660
+ }
2661
+
2662
+
2663
+ void LCodeGen::DoPushArgument(LPushArgument* instr) {
2664
+ LOperand* argument = instr->InputAt(0);
2665
+ if (argument->IsConstantOperand()) {
2666
+ __ push(ToImmediate(argument));
2667
+ } else {
2668
+ __ push(ToOperand(argument));
2669
+ }
2670
+ }
2671
+
2672
+
2673
+ void LCodeGen::DoContext(LContext* instr) {
2674
+ Register result = ToRegister(instr->result());
2675
+ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
2676
+ }
2677
+
2678
+
2679
+ void LCodeGen::DoOuterContext(LOuterContext* instr) {
2680
+ Register context = ToRegister(instr->context());
2681
+ Register result = ToRegister(instr->result());
2682
+ __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
2683
+ __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
2684
+ }
2685
+
2686
+
2687
+ void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2688
+ Register context = ToRegister(instr->context());
2689
+ Register result = ToRegister(instr->result());
2690
+ __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
2691
+ }
2692
+
2693
+
2694
+ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2695
+ Register global = ToRegister(instr->global());
2696
+ Register result = ToRegister(instr->result());
2697
+ __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
2698
+ }
2699
+
2700
+
2701
+ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2702
+ int arity,
2703
+ LInstruction* instr,
2704
+ CallKind call_kind) {
2705
+ // Change context if needed.
2706
+ bool change_context =
2707
+ (info()->closure()->context() != function->context()) ||
2708
+ scope()->contains_with() ||
2709
+ (scope()->num_heap_slots() > 0);
2710
+ if (change_context) {
2711
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2712
+ } else {
2713
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2714
+ }
2715
+
2716
+ // Set eax to arguments count if adaption is not needed. Assumes that eax
2717
+ // is available to write to at this point.
2718
+ if (!function->NeedsArgumentsAdaption()) {
2719
+ __ mov(eax, arity);
2720
+ }
2721
+
2722
+ LPointerMap* pointers = instr->pointer_map();
2723
+ RecordPosition(pointers->position());
2724
+
2725
+ // Invoke function.
2726
+ __ SetCallKind(ecx, call_kind);
2727
+ if (*function == *info()->closure()) {
2728
+ __ CallSelf();
2729
+ } else {
2730
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
2731
+ }
2732
+
2733
+ // Setup deoptimization.
2734
+ RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
2735
+ }
2736
+
2737
+
2738
+ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2739
+ ASSERT(ToRegister(instr->result()).is(eax));
2740
+ __ mov(edi, instr->function());
2741
+ CallKnownFunction(instr->function(),
2742
+ instr->arity(),
2743
+ instr,
2744
+ CALL_AS_METHOD);
2745
+ }
2746
+
2747
+
2748
+ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2749
+ Register input_reg = ToRegister(instr->InputAt(0));
2750
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2751
+ factory()->heap_number_map());
2752
+ DeoptimizeIf(not_equal, instr->environment());
2753
+
2754
+ Label done;
2755
+ Register tmp = input_reg.is(eax) ? ecx : eax;
2756
+ Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2757
+
2758
+ // Preserve the value of all registers.
2759
+ PushSafepointRegistersScope scope(this);
2760
+
2761
+ Label negative;
2762
+ __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2763
+ // Check the sign of the argument. If the argument is positive, just
2764
+ // return it. We do not need to patch the stack since |input| and
2765
+ // |result| are the same register and |input| will be restored
2766
+ // unchanged by popping safepoint registers.
2767
+ __ test(tmp, Immediate(HeapNumber::kSignMask));
2768
+ __ j(not_zero, &negative);
2769
+ __ jmp(&done);
2770
+
2771
+ __ bind(&negative);
2772
+
2773
+ Label allocated, slow;
2774
+ __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2775
+ __ jmp(&allocated);
2776
+
2777
+ // Slow case: Call the runtime system to do the number allocation.
2778
+ __ bind(&slow);
2779
+
2780
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2781
+
2782
+ // Set the pointer to the new heap number in tmp.
2783
+ if (!tmp.is(eax)) __ mov(tmp, eax);
2784
+
2785
+ // Restore input_reg after call to runtime.
2786
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
2787
+
2788
+ __ bind(&allocated);
2789
+ __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2790
+ __ and_(tmp2, ~HeapNumber::kSignMask);
2791
+ __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2792
+ __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2793
+ __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2794
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
2795
+
2796
+ __ bind(&done);
2797
+ }
2798
+
2799
+
2800
+ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2801
+ Register input_reg = ToRegister(instr->InputAt(0));
2802
+ __ test(input_reg, Operand(input_reg));
2803
+ Label is_positive;
2804
+ __ j(not_sign, &is_positive);
2805
+ __ neg(input_reg);
2806
+ __ test(input_reg, Operand(input_reg));
2807
+ DeoptimizeIf(negative, instr->environment());
2808
+ __ bind(&is_positive);
2809
+ }
2810
+
2811
+
2812
+ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2813
+ // Class for deferred case.
2814
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2815
+ public:
2816
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2817
+ LUnaryMathOperation* instr)
2818
+ : LDeferredCode(codegen), instr_(instr) { }
2819
+ virtual void Generate() {
2820
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2821
+ }
2822
+ private:
2823
+ LUnaryMathOperation* instr_;
2824
+ };
2825
+
2826
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
2827
+ Representation r = instr->hydrogen()->value()->representation();
2828
+
2829
+ if (r.IsDouble()) {
2830
+ XMMRegister scratch = xmm0;
2831
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2832
+ __ xorps(scratch, scratch);
2833
+ __ subsd(scratch, input_reg);
2834
+ __ pand(input_reg, scratch);
2835
+ } else if (r.IsInteger32()) {
2836
+ EmitIntegerMathAbs(instr);
2837
+ } else { // Tagged case.
2838
+ DeferredMathAbsTaggedHeapNumber* deferred =
2839
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
2840
+ Register input_reg = ToRegister(instr->InputAt(0));
2841
+ // Smi check.
2842
+ __ test(input_reg, Immediate(kSmiTagMask));
2843
+ __ j(not_zero, deferred->entry());
2844
+ EmitIntegerMathAbs(instr);
2845
+ __ bind(deferred->exit());
2846
+ }
2847
+ }
2848
+
2849
+
2850
+ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2851
+ XMMRegister xmm_scratch = xmm0;
2852
+ Register output_reg = ToRegister(instr->result());
2853
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2854
+ __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
2855
+ __ ucomisd(input_reg, xmm_scratch);
2856
+
2857
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2858
+ DeoptimizeIf(below_equal, instr->environment());
2859
+ } else {
2860
+ DeoptimizeIf(below, instr->environment());
2861
+ }
2862
+
2863
+ // Use truncating instruction (OK because input is positive).
2864
+ __ cvttsd2si(output_reg, Operand(input_reg));
2865
+
2866
+ // Overflow is signalled with minint.
2867
+ __ cmp(output_reg, 0x80000000u);
2868
+ DeoptimizeIf(equal, instr->environment());
2869
+ }
2870
+
2871
+
2872
+ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2873
+ XMMRegister xmm_scratch = xmm0;
2874
+ Register output_reg = ToRegister(instr->result());
2875
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2876
+
2877
+ Label below_half, done;
2878
+ // xmm_scratch = 0.5
2879
+ ExternalReference one_half = ExternalReference::address_of_one_half();
2880
+ __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2881
+
2882
+ __ ucomisd(xmm_scratch, input_reg);
2883
+ __ j(above, &below_half);
2884
+ // input = input + 0.5
2885
+ __ addsd(input_reg, xmm_scratch);
2886
+
2887
+
2888
+ // Compute Math.floor(value + 0.5).
2889
+ // Use truncating instruction (OK because input is positive).
2890
+ __ cvttsd2si(output_reg, Operand(input_reg));
2891
+
2892
+ // Overflow is signalled with minint.
2893
+ __ cmp(output_reg, 0x80000000u);
2894
+ DeoptimizeIf(equal, instr->environment());
2895
+ __ jmp(&done);
2896
+
2897
+ __ bind(&below_half);
2898
+
2899
+ // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
2900
+ // we can ignore the difference between a result of -0 and +0.
2901
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2902
+ // If the sign is positive, we return +0.
2903
+ __ movmskpd(output_reg, input_reg);
2904
+ __ test(output_reg, Immediate(1));
2905
+ DeoptimizeIf(not_zero, instr->environment());
2906
+ } else {
2907
+ // If the input is >= -0.5, we return +0.
2908
+ __ mov(output_reg, Immediate(0xBF000000));
2909
+ __ movd(xmm_scratch, Operand(output_reg));
2910
+ __ cvtss2sd(xmm_scratch, xmm_scratch);
2911
+ __ ucomisd(input_reg, xmm_scratch);
2912
+ DeoptimizeIf(below, instr->environment());
2913
+ }
2914
+ __ Set(output_reg, Immediate(0));
2915
+ __ bind(&done);
2916
+ }
2917
+
2918
+
2919
+ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2920
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2921
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2922
+ __ sqrtsd(input_reg, input_reg);
2923
+ }
2924
+
2925
+
2926
+ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2927
+ XMMRegister xmm_scratch = xmm0;
2928
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2929
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2930
+ __ xorps(xmm_scratch, xmm_scratch);
2931
+ __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
2932
+ __ sqrtsd(input_reg, input_reg);
2933
+ }
2934
+
2935
+
2936
+ void LCodeGen::DoPower(LPower* instr) {
2937
+ LOperand* left = instr->InputAt(0);
2938
+ LOperand* right = instr->InputAt(1);
2939
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
2940
+ Representation exponent_type = instr->hydrogen()->right()->representation();
2941
+
2942
+ if (exponent_type.IsDouble()) {
2943
+ // It is safe to use ebx directly since the instruction is marked
2944
+ // as a call.
2945
+ __ PrepareCallCFunction(4, ebx);
2946
+ __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2947
+ __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
2948
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
2949
+ 4);
2950
+ } else if (exponent_type.IsInteger32()) {
2951
+ // It is safe to use ebx directly since the instruction is marked
2952
+ // as a call.
2953
+ ASSERT(!ToRegister(right).is(ebx));
2954
+ __ PrepareCallCFunction(4, ebx);
2955
+ __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2956
+ __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
2957
+ __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
2958
+ 4);
2959
+ } else {
2960
+ ASSERT(exponent_type.IsTagged());
2961
+ CpuFeatures::Scope scope(SSE2);
2962
+ Register right_reg = ToRegister(right);
2963
+
2964
+ Label non_smi, call;
2965
+ __ test(right_reg, Immediate(kSmiTagMask));
2966
+ __ j(not_zero, &non_smi);
2967
+ __ SmiUntag(right_reg);
2968
+ __ cvtsi2sd(result_reg, Operand(right_reg));
2969
+ __ jmp(&call);
2970
+
2971
+ __ bind(&non_smi);
2972
+ // It is safe to use ebx directly since the instruction is marked
2973
+ // as a call.
2974
+ ASSERT(!right_reg.is(ebx));
2975
+ __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
2976
+ DeoptimizeIf(not_equal, instr->environment());
2977
+ __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
2978
+
2979
+ __ bind(&call);
2980
+ __ PrepareCallCFunction(4, ebx);
2981
+ __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2982
+ __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
2983
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
2984
+ 4);
2985
+ }
2986
+
2987
+ // Return value is in st(0) on ia32.
2988
+ // Store it into the (fixed) result register.
2989
+ __ sub(Operand(esp), Immediate(kDoubleSize));
2990
+ __ fstp_d(Operand(esp, 0));
2991
+ __ movdbl(result_reg, Operand(esp, 0));
2992
+ __ add(Operand(esp), Immediate(kDoubleSize));
2993
+ }
2994
+
2995
+
2996
+ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2997
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
2998
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2999
+ Label positive, done, zero;
3000
+ __ xorps(xmm0, xmm0);
3001
+ __ ucomisd(input_reg, xmm0);
3002
+ __ j(above, &positive, Label::kNear);
3003
+ __ j(equal, &zero, Label::kNear);
3004
+ ExternalReference nan = ExternalReference::address_of_nan();
3005
+ __ movdbl(input_reg, Operand::StaticVariable(nan));
3006
+ __ jmp(&done, Label::kNear);
3007
+ __ bind(&zero);
3008
+ __ push(Immediate(0xFFF00000));
3009
+ __ push(Immediate(0));
3010
+ __ movdbl(input_reg, Operand(esp, 0));
3011
+ __ add(Operand(esp), Immediate(kDoubleSize));
3012
+ __ jmp(&done, Label::kNear);
3013
+ __ bind(&positive);
3014
+ __ fldln2();
3015
+ __ sub(Operand(esp), Immediate(kDoubleSize));
3016
+ __ movdbl(Operand(esp, 0), input_reg);
3017
+ __ fld_d(Operand(esp, 0));
3018
+ __ fyl2x();
3019
+ __ fstp_d(Operand(esp, 0));
3020
+ __ movdbl(input_reg, Operand(esp, 0));
3021
+ __ add(Operand(esp), Immediate(kDoubleSize));
3022
+ __ bind(&done);
3023
+ }
3024
+
3025
+
3026
+ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3027
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3028
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
3029
+ TranscendentalCacheStub::UNTAGGED);
3030
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
3031
+ }
3032
+
3033
+
3034
+ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3035
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3036
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
3037
+ TranscendentalCacheStub::UNTAGGED);
3038
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
3039
+ }
3040
+
3041
+
3042
+ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3043
+ switch (instr->op()) {
3044
+ case kMathAbs:
3045
+ DoMathAbs(instr);
3046
+ break;
3047
+ case kMathFloor:
3048
+ DoMathFloor(instr);
3049
+ break;
3050
+ case kMathRound:
3051
+ DoMathRound(instr);
3052
+ break;
3053
+ case kMathSqrt:
3054
+ DoMathSqrt(instr);
3055
+ break;
3056
+ case kMathPowHalf:
3057
+ DoMathPowHalf(instr);
3058
+ break;
3059
+ case kMathCos:
3060
+ DoMathCos(instr);
3061
+ break;
3062
+ case kMathSin:
3063
+ DoMathSin(instr);
3064
+ break;
3065
+ case kMathLog:
3066
+ DoMathLog(instr);
3067
+ break;
3068
+
3069
+ default:
3070
+ UNREACHABLE();
3071
+ }
3072
+ }
3073
+
3074
+
3075
+ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3076
+ ASSERT(ToRegister(instr->context()).is(esi));
3077
+ ASSERT(ToRegister(instr->function()).is(edi));
3078
+ ASSERT(instr->HasPointerMap());
3079
+ ASSERT(instr->HasDeoptimizationEnvironment());
3080
+ LPointerMap* pointers = instr->pointer_map();
3081
+ LEnvironment* env = instr->deoptimization_environment();
3082
+ RecordPosition(pointers->position());
3083
+ RegisterEnvironmentForDeoptimization(env);
3084
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
3085
+ ParameterCount count(instr->arity());
3086
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
3087
+ }
3088
+
3089
+
3090
+ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3091
+ ASSERT(ToRegister(instr->context()).is(esi));
3092
+ ASSERT(ToRegister(instr->key()).is(ecx));
3093
+ ASSERT(ToRegister(instr->result()).is(eax));
3094
+
3095
+ int arity = instr->arity();
3096
+ Handle<Code> ic = isolate()->stub_cache()->
3097
+ ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
3098
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
3099
+ }
3100
+
3101
+
3102
+ void LCodeGen::DoCallNamed(LCallNamed* instr) {
3103
+ ASSERT(ToRegister(instr->context()).is(esi));
3104
+ ASSERT(ToRegister(instr->result()).is(eax));
3105
+
3106
+ int arity = instr->arity();
3107
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3108
+ Handle<Code> ic =
3109
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
3110
+ __ mov(ecx, instr->name());
3111
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
3112
+ }
3113
+
3114
+
3115
+ void LCodeGen::DoCallFunction(LCallFunction* instr) {
3116
+ ASSERT(ToRegister(instr->context()).is(esi));
3117
+ ASSERT(ToRegister(instr->result()).is(eax));
3118
+
3119
+ int arity = instr->arity();
3120
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
3121
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
3122
+ __ Drop(1);
3123
+ }
3124
+
3125
+
3126
+ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3127
+ ASSERT(ToRegister(instr->context()).is(esi));
3128
+ ASSERT(ToRegister(instr->result()).is(eax));
3129
+
3130
+ int arity = instr->arity();
3131
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3132
+ Handle<Code> ic =
3133
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
3134
+ __ mov(ecx, instr->name());
3135
+ CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
3136
+ }
3137
+
3138
+
3139
+ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3140
+ ASSERT(ToRegister(instr->result()).is(eax));
3141
+ __ mov(edi, instr->target());
3142
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3143
+ }
3144
+
3145
+
3146
+ void LCodeGen::DoCallNew(LCallNew* instr) {
3147
+ ASSERT(ToRegister(instr->context()).is(esi));
3148
+ ASSERT(ToRegister(instr->constructor()).is(edi));
3149
+ ASSERT(ToRegister(instr->result()).is(eax));
3150
+
3151
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
3152
+ __ Set(eax, Immediate(instr->arity()));
3153
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
3154
+ }
3155
+
3156
+
3157
+ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3158
+ CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
3159
+ }
3160
+
3161
+
3162
+ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3163
+ Register object = ToRegister(instr->object());
3164
+ Register value = ToRegister(instr->value());
3165
+ int offset = instr->offset();
3166
+
3167
+ if (!instr->transition().is_null()) {
3168
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
3169
+ }
3170
+
3171
+ // Do the store.
3172
+ if (instr->is_in_object()) {
3173
+ __ mov(FieldOperand(object, offset), value);
3174
+ if (instr->needs_write_barrier()) {
3175
+ Register temp = ToRegister(instr->TempAt(0));
3176
+ // Update the write barrier for the object for in-object properties.
3177
+ __ RecordWrite(object, offset, value, temp);
3178
+ }
3179
+ } else {
3180
+ Register temp = ToRegister(instr->TempAt(0));
3181
+ __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
3182
+ __ mov(FieldOperand(temp, offset), value);
3183
+ if (instr->needs_write_barrier()) {
3184
+ // Update the write barrier for the properties array.
3185
+ // object is used as a scratch register.
3186
+ __ RecordWrite(temp, offset, value, object);
3187
+ }
3188
+ }
3189
+ }
3190
+
3191
+
3192
+ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3193
+ ASSERT(ToRegister(instr->context()).is(esi));
3194
+ ASSERT(ToRegister(instr->object()).is(edx));
3195
+ ASSERT(ToRegister(instr->value()).is(eax));
3196
+
3197
+ __ mov(ecx, instr->name());
3198
+ Handle<Code> ic = instr->strict_mode()
3199
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
3200
+ : isolate()->builtins()->StoreIC_Initialize();
3201
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
3202
+ }
3203
+
3204
+
3205
+ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3206
+ __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3207
+ DeoptimizeIf(above_equal, instr->environment());
3208
+ }
3209
+
3210
+
3211
+ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3212
+ LStoreKeyedSpecializedArrayElement* instr) {
3213
+ ExternalArrayType array_type = instr->array_type();
3214
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
3215
+ instr->key(), array_type));
3216
+ if (array_type == kExternalFloatArray) {
3217
+ __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
3218
+ __ movss(operand, xmm0);
3219
+ } else if (array_type == kExternalDoubleArray) {
3220
+ __ movdbl(operand, ToDoubleRegister(instr->value()));
3221
+ } else {
3222
+ Register value = ToRegister(instr->value());
3223
+ switch (array_type) {
3224
+ case kExternalPixelArray:
3225
+ case kExternalByteArray:
3226
+ case kExternalUnsignedByteArray:
3227
+ __ mov_b(operand, value);
3228
+ break;
3229
+ case kExternalShortArray:
3230
+ case kExternalUnsignedShortArray:
3231
+ __ mov_w(operand, value);
3232
+ break;
3233
+ case kExternalIntArray:
3234
+ case kExternalUnsignedIntArray:
3235
+ __ mov(operand, value);
3236
+ break;
3237
+ case kExternalFloatArray:
3238
+ case kExternalDoubleArray:
3239
+ UNREACHABLE();
3240
+ break;
3241
+ }
3242
+ }
3243
+ }
3244
+
3245
+
3246
+ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3247
+ Register value = ToRegister(instr->value());
3248
+ Register elements = ToRegister(instr->object());
3249
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3250
+
3251
+ // Do the store.
3252
+ if (instr->key()->IsConstantOperand()) {
3253
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3254
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3255
+ int offset =
3256
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3257
+ __ mov(FieldOperand(elements, offset), value);
3258
+ } else {
3259
+ __ mov(FieldOperand(elements,
3260
+ key,
3261
+ times_pointer_size,
3262
+ FixedArray::kHeaderSize),
3263
+ value);
3264
+ }
3265
+
3266
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
3267
+ // Compute address of modified element and store it into key register.
3268
+ __ lea(key,
3269
+ FieldOperand(elements,
3270
+ key,
3271
+ times_pointer_size,
3272
+ FixedArray::kHeaderSize));
3273
+ __ RecordWrite(elements, key, value);
3274
+ }
3275
+ }
3276
+
3277
+
3278
+ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3279
+ ASSERT(ToRegister(instr->context()).is(esi));
3280
+ ASSERT(ToRegister(instr->object()).is(edx));
3281
+ ASSERT(ToRegister(instr->key()).is(ecx));
3282
+ ASSERT(ToRegister(instr->value()).is(eax));
3283
+
3284
+ Handle<Code> ic = instr->strict_mode()
3285
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3286
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
3287
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
3288
+ }
3289
+
3290
+
3291
+ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3292
+ class DeferredStringCharCodeAt: public LDeferredCode {
3293
+ public:
3294
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3295
+ : LDeferredCode(codegen), instr_(instr) { }
3296
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3297
+ private:
3298
+ LStringCharCodeAt* instr_;
3299
+ };
3300
+
3301
+ Register string = ToRegister(instr->string());
3302
+ Register index = no_reg;
3303
+ int const_index = -1;
3304
+ if (instr->index()->IsConstantOperand()) {
3305
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3306
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3307
+ if (!Smi::IsValid(const_index)) {
3308
+ // Guaranteed to be out of bounds because of the assert above.
3309
+ // So the bounds check that must dominate this instruction must
3310
+ // have deoptimized already.
3311
+ if (FLAG_debug_code) {
3312
+ __ Abort("StringCharCodeAt: out of bounds index.");
3313
+ }
3314
+ // No code needs to be generated.
3315
+ return;
3316
+ }
3317
+ } else {
3318
+ index = ToRegister(instr->index());
3319
+ }
3320
+ Register result = ToRegister(instr->result());
3321
+
3322
+ DeferredStringCharCodeAt* deferred =
3323
+ new DeferredStringCharCodeAt(this, instr);
3324
+
3325
+ Label flat_string, ascii_string, done;
3326
+
3327
+ // Fetch the instance type of the receiver into result register.
3328
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
3329
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
3330
+
3331
+ // We need special handling for non-flat strings.
3332
+ STATIC_ASSERT(kSeqStringTag == 0);
3333
+ __ test(result, Immediate(kStringRepresentationMask));
3334
+ __ j(zero, &flat_string, Label::kNear);
3335
+
3336
+ // Handle non-flat strings.
3337
+ __ test(result, Immediate(kIsConsStringMask));
3338
+ __ j(zero, deferred->entry());
3339
+
3340
+ // ConsString.
3341
+ // Check whether the right hand side is the empty string (i.e. if
3342
+ // this is really a flat string in a cons string). If that is not
3343
+ // the case we would rather go to the runtime system now to flatten
3344
+ // the string.
3345
+ __ cmp(FieldOperand(string, ConsString::kSecondOffset),
3346
+ Immediate(factory()->empty_string()));
3347
+ __ j(not_equal, deferred->entry());
3348
+ // Get the first of the two strings and load its instance type.
3349
+ __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
3350
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
3351
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
3352
+ // If the first cons component is also non-flat, then go to runtime.
3353
+ STATIC_ASSERT(kSeqStringTag == 0);
3354
+ __ test(result, Immediate(kStringRepresentationMask));
3355
+ __ j(not_zero, deferred->entry());
3356
+
3357
+ // Check for ASCII or two-byte string.
3358
+ __ bind(&flat_string);
3359
+ STATIC_ASSERT(kAsciiStringTag != 0);
3360
+ __ test(result, Immediate(kStringEncodingMask));
3361
+ __ j(not_zero, &ascii_string, Label::kNear);
3362
+
3363
+ // Two-byte string.
3364
+ // Load the two-byte character code into the result register.
3365
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3366
+ if (instr->index()->IsConstantOperand()) {
3367
+ __ movzx_w(result,
3368
+ FieldOperand(string,
3369
+ SeqTwoByteString::kHeaderSize +
3370
+ (kUC16Size * const_index)));
3371
+ } else {
3372
+ __ movzx_w(result, FieldOperand(string,
3373
+ index,
3374
+ times_2,
3375
+ SeqTwoByteString::kHeaderSize));
3376
+ }
3377
+ __ jmp(&done, Label::kNear);
3378
+
3379
+ // ASCII string.
3380
+ // Load the byte into the result register.
3381
+ __ bind(&ascii_string);
3382
+ if (instr->index()->IsConstantOperand()) {
3383
+ __ movzx_b(result, FieldOperand(string,
3384
+ SeqAsciiString::kHeaderSize + const_index));
3385
+ } else {
3386
+ __ movzx_b(result, FieldOperand(string,
3387
+ index,
3388
+ times_1,
3389
+ SeqAsciiString::kHeaderSize));
3390
+ }
3391
+ __ bind(&done);
3392
+ __ bind(deferred->exit());
3393
+ }
3394
+
3395
+
3396
+ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3397
+ Register string = ToRegister(instr->string());
3398
+ Register result = ToRegister(instr->result());
3399
+
3400
+ // TODO(3095996): Get rid of this. For now, we need to make the
3401
+ // result register contain a valid pointer because it is already
3402
+ // contained in the register pointer map.
3403
+ __ Set(result, Immediate(0));
3404
+
3405
+ PushSafepointRegistersScope scope(this);
3406
+ __ push(string);
3407
+ // Push the index as a smi. This is safe because of the checks in
3408
+ // DoStringCharCodeAt above.
3409
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3410
+ if (instr->index()->IsConstantOperand()) {
3411
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3412
+ __ push(Immediate(Smi::FromInt(const_index)));
3413
+ } else {
3414
+ Register index = ToRegister(instr->index());
3415
+ __ SmiTag(index);
3416
+ __ push(index);
3417
+ }
3418
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3419
+ if (FLAG_debug_code) {
3420
+ __ AbortIfNotSmi(eax);
3421
+ }
3422
+ __ SmiUntag(eax);
3423
+ __ StoreToSafepointRegisterSlot(result, eax);
3424
+ }
3425
+
3426
+
3427
+ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3428
+ class DeferredStringCharFromCode: public LDeferredCode {
3429
+ public:
3430
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3431
+ : LDeferredCode(codegen), instr_(instr) { }
3432
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3433
+ private:
3434
+ LStringCharFromCode* instr_;
3435
+ };
3436
+
3437
+ DeferredStringCharFromCode* deferred =
3438
+ new DeferredStringCharFromCode(this, instr);
3439
+
3440
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3441
+ Register char_code = ToRegister(instr->char_code());
3442
+ Register result = ToRegister(instr->result());
3443
+ ASSERT(!char_code.is(result));
3444
+
3445
+ __ cmp(char_code, String::kMaxAsciiCharCode);
3446
+ __ j(above, deferred->entry());
3447
+ __ Set(result, Immediate(factory()->single_character_string_cache()));
3448
+ __ mov(result, FieldOperand(result,
3449
+ char_code, times_pointer_size,
3450
+ FixedArray::kHeaderSize));
3451
+ __ cmp(result, factory()->undefined_value());
3452
+ __ j(equal, deferred->entry());
3453
+ __ bind(deferred->exit());
3454
+ }
3455
+
3456
+
3457
+ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3458
+ Register char_code = ToRegister(instr->char_code());
3459
+ Register result = ToRegister(instr->result());
3460
+
3461
+ // TODO(3095996): Get rid of this. For now, we need to make the
3462
+ // result register contain a valid pointer because it is already
3463
+ // contained in the register pointer map.
3464
+ __ Set(result, Immediate(0));
3465
+
3466
+ PushSafepointRegistersScope scope(this);
3467
+ __ SmiTag(char_code);
3468
+ __ push(char_code);
3469
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3470
+ __ StoreToSafepointRegisterSlot(result, eax);
3471
+ }
3472
+
3473
+
3474
+ void LCodeGen::DoStringLength(LStringLength* instr) {
3475
+ Register string = ToRegister(instr->string());
3476
+ Register result = ToRegister(instr->result());
3477
+ __ mov(result, FieldOperand(string, String::kLengthOffset));
3478
+ }
3479
+
3480
+
3481
+ void LCodeGen::DoStringAdd(LStringAdd* instr) {
3482
+ if (instr->left()->IsConstantOperand()) {
3483
+ __ push(ToImmediate(instr->left()));
3484
+ } else {
3485
+ __ push(ToOperand(instr->left()));
3486
+ }
3487
+ if (instr->right()->IsConstantOperand()) {
3488
+ __ push(ToImmediate(instr->right()));
3489
+ } else {
3490
+ __ push(ToOperand(instr->right()));
3491
+ }
3492
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3493
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
3494
+ }
3495
+
3496
+
3497
+ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3498
+ LOperand* input = instr->InputAt(0);
3499
+ ASSERT(input->IsRegister() || input->IsStackSlot());
3500
+ LOperand* output = instr->result();
3501
+ ASSERT(output->IsDoubleRegister());
3502
+ __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
3503
+ }
3504
+
3505
+
3506
+ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3507
+ class DeferredNumberTagI: public LDeferredCode {
3508
+ public:
3509
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3510
+ : LDeferredCode(codegen), instr_(instr) { }
3511
+ virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3512
+ private:
3513
+ LNumberTagI* instr_;
3514
+ };
3515
+
3516
+ LOperand* input = instr->InputAt(0);
3517
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
3518
+ Register reg = ToRegister(input);
3519
+
3520
+ DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3521
+ __ SmiTag(reg);
3522
+ __ j(overflow, deferred->entry());
3523
+ __ bind(deferred->exit());
3524
+ }
3525
+
3526
+
3527
+ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3528
+ Label slow;
3529
+ Register reg = ToRegister(instr->InputAt(0));
3530
+ Register tmp = reg.is(eax) ? ecx : eax;
3531
+
3532
+ // Preserve the value of all registers.
3533
+ PushSafepointRegistersScope scope(this);
3534
+
3535
+ // There was overflow, so bits 30 and 31 of the original integer
3536
+ // disagree. Try to allocate a heap number in new space and store
3537
+ // the value in there. If that fails, call the runtime system.
3538
+ Label done;
3539
+ __ SmiUntag(reg);
3540
+ __ xor_(reg, 0x80000000);
3541
+ __ cvtsi2sd(xmm0, Operand(reg));
3542
+ if (FLAG_inline_new) {
3543
+ __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
3544
+ __ jmp(&done, Label::kNear);
3545
+ }
3546
+
3547
+ // Slow case: Call the runtime system to do the number allocation.
3548
+ __ bind(&slow);
3549
+
3550
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
3551
+ // register is stored, as this register is in the pointer map, but contains an
3552
+ // integer value.
3553
+ __ StoreToSafepointRegisterSlot(reg, Immediate(0));
3554
+
3555
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3556
+ if (!reg.is(eax)) __ mov(reg, eax);
3557
+
3558
+ // Done. Put the value in xmm0 into the value of the allocated heap
3559
+ // number.
3560
+ __ bind(&done);
3561
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
3562
+ __ StoreToSafepointRegisterSlot(reg, reg);
3563
+ }
3564
+
3565
+
3566
+ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3567
+ class DeferredNumberTagD: public LDeferredCode {
3568
+ public:
3569
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3570
+ : LDeferredCode(codegen), instr_(instr) { }
3571
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3572
+ private:
3573
+ LNumberTagD* instr_;
3574
+ };
3575
+
3576
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3577
+ Register reg = ToRegister(instr->result());
3578
+ Register tmp = ToRegister(instr->TempAt(0));
3579
+
3580
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3581
+ if (FLAG_inline_new) {
3582
+ __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
3583
+ } else {
3584
+ __ jmp(deferred->entry());
3585
+ }
3586
+ __ bind(deferred->exit());
3587
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
3588
+ }
3589
+
3590
+
3591
+ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3592
+ // TODO(3095996): Get rid of this. For now, we need to make the
3593
+ // result register contain a valid pointer because it is already
3594
+ // contained in the register pointer map.
3595
+ Register reg = ToRegister(instr->result());
3596
+ __ Set(reg, Immediate(0));
3597
+
3598
+ PushSafepointRegistersScope scope(this);
3599
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3600
+ __ StoreToSafepointRegisterSlot(reg, eax);
3601
+ }
3602
+
3603
+
3604
+ void LCodeGen::DoSmiTag(LSmiTag* instr) {
3605
+ LOperand* input = instr->InputAt(0);
3606
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
3607
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3608
+ __ SmiTag(ToRegister(input));
3609
+ }
3610
+
3611
+
3612
+ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3613
+ LOperand* input = instr->InputAt(0);
3614
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
3615
+ if (instr->needs_check()) {
3616
+ __ test(ToRegister(input), Immediate(kSmiTagMask));
3617
+ DeoptimizeIf(not_zero, instr->environment());
3618
+ }
3619
+ __ SmiUntag(ToRegister(input));
3620
+ }
3621
+
3622
+
3623
+ void LCodeGen::EmitNumberUntagD(Register input_reg,
3624
+ XMMRegister result_reg,
3625
+ LEnvironment* env) {
3626
+ Label load_smi, heap_number, done;
3627
+
3628
+ // Smi check.
3629
+ __ test(input_reg, Immediate(kSmiTagMask));
3630
+ __ j(zero, &load_smi, Label::kNear);
3631
+
3632
+ // Heap number map check.
3633
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3634
+ factory()->heap_number_map());
3635
+ __ j(equal, &heap_number, Label::kNear);
3636
+
3637
+ __ cmp(input_reg, factory()->undefined_value());
3638
+ DeoptimizeIf(not_equal, env);
3639
+
3640
+ // Convert undefined to NaN.
3641
+ ExternalReference nan = ExternalReference::address_of_nan();
3642
+ __ movdbl(result_reg, Operand::StaticVariable(nan));
3643
+ __ jmp(&done, Label::kNear);
3644
+
3645
+ // Heap number to XMM conversion.
3646
+ __ bind(&heap_number);
3647
+ __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3648
+ __ jmp(&done, Label::kNear);
3649
+
3650
+ // Smi to XMM conversion
3651
+ __ bind(&load_smi);
3652
+ __ SmiUntag(input_reg); // Untag smi before converting to float.
3653
+ __ cvtsi2sd(result_reg, Operand(input_reg));
3654
+ __ SmiTag(input_reg); // Retag smi.
3655
+ __ bind(&done);
3656
+ }
3657
+
3658
+
3659
+ class DeferredTaggedToI: public LDeferredCode {
3660
+ public:
3661
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3662
+ : LDeferredCode(codegen), instr_(instr) { }
3663
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3664
+ private:
3665
+ LTaggedToI* instr_;
3666
+ };
3667
+
3668
+
3669
+ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3670
+ Label done, heap_number;
3671
+ Register input_reg = ToRegister(instr->InputAt(0));
3672
+
3673
+ // Heap number map check.
3674
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3675
+ factory()->heap_number_map());
3676
+
3677
+ if (instr->truncating()) {
3678
+ __ j(equal, &heap_number, Label::kNear);
3679
+ // Check for undefined. Undefined is converted to zero for truncating
3680
+ // conversions.
3681
+ __ cmp(input_reg, factory()->undefined_value());
3682
+ DeoptimizeIf(not_equal, instr->environment());
3683
+ __ mov(input_reg, 0);
3684
+ __ jmp(&done, Label::kNear);
3685
+
3686
+ __ bind(&heap_number);
3687
+ if (CpuFeatures::IsSupported(SSE3)) {
3688
+ CpuFeatures::Scope scope(SSE3);
3689
+ Label convert;
3690
+ // Use more powerful conversion when sse3 is available.
3691
+ // Load x87 register with heap number.
3692
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
3693
+ // Get exponent alone and check for too-big exponent.
3694
+ __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3695
+ __ and_(input_reg, HeapNumber::kExponentMask);
3696
+ const uint32_t kTooBigExponent =
3697
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
3698
+ __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
3699
+ __ j(less, &convert, Label::kNear);
3700
+ // Pop FPU stack before deoptimizing.
3701
+ __ ffree(0);
3702
+ __ fincstp();
3703
+ DeoptimizeIf(no_condition, instr->environment());
3704
+
3705
+ // Reserve space for 64 bit answer.
3706
+ __ bind(&convert);
3707
+ __ sub(Operand(esp), Immediate(kDoubleSize));
3708
+ // Do conversion, which cannot fail because we checked the exponent.
3709
+ __ fisttp_d(Operand(esp, 0));
3710
+ __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
3711
+ __ add(Operand(esp), Immediate(kDoubleSize));
3712
+ } else {
3713
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
3714
+ __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3715
+ __ cvttsd2si(input_reg, Operand(xmm0));
3716
+ __ cmp(input_reg, 0x80000000u);
3717
+ __ j(not_equal, &done);
3718
+ // Check if the input was 0x8000000 (kMinInt).
3719
+ // If no, then we got an overflow and we deoptimize.
3720
+ ExternalReference min_int = ExternalReference::address_of_min_int();
3721
+ __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
3722
+ __ ucomisd(xmm_temp, xmm0);
3723
+ DeoptimizeIf(not_equal, instr->environment());
3724
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
3725
+ }
3726
+ } else {
3727
+ // Deoptimize if we don't have a heap number.
3728
+ DeoptimizeIf(not_equal, instr->environment());
3729
+
3730
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
3731
+ __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3732
+ __ cvttsd2si(input_reg, Operand(xmm0));
3733
+ __ cvtsi2sd(xmm_temp, Operand(input_reg));
3734
+ __ ucomisd(xmm0, xmm_temp);
3735
+ DeoptimizeIf(not_equal, instr->environment());
3736
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
3737
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3738
+ __ test(input_reg, Operand(input_reg));
3739
+ __ j(not_zero, &done);
3740
+ __ movmskpd(input_reg, xmm0);
3741
+ __ and_(input_reg, 1);
3742
+ DeoptimizeIf(not_zero, instr->environment());
3743
+ }
3744
+ }
3745
+ __ bind(&done);
3746
+ }
3747
+
3748
+
3749
+ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3750
+ LOperand* input = instr->InputAt(0);
3751
+ ASSERT(input->IsRegister());
3752
+ ASSERT(input->Equals(instr->result()));
3753
+
3754
+ Register input_reg = ToRegister(input);
3755
+
3756
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3757
+
3758
+ // Smi check.
3759
+ __ test(input_reg, Immediate(kSmiTagMask));
3760
+ __ j(not_zero, deferred->entry());
3761
+
3762
+ // Smi to int32 conversion
3763
+ __ SmiUntag(input_reg); // Untag smi.
3764
+
3765
+ __ bind(deferred->exit());
3766
+ }
3767
+
3768
+
3769
+ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3770
+ LOperand* input = instr->InputAt(0);
3771
+ ASSERT(input->IsRegister());
3772
+ LOperand* result = instr->result();
3773
+ ASSERT(result->IsDoubleRegister());
3774
+
3775
+ Register input_reg = ToRegister(input);
3776
+ XMMRegister result_reg = ToDoubleRegister(result);
3777
+
3778
+ EmitNumberUntagD(input_reg, result_reg, instr->environment());
3779
+ }
3780
+
3781
+
3782
+ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3783
+ LOperand* input = instr->InputAt(0);
3784
+ ASSERT(input->IsDoubleRegister());
3785
+ LOperand* result = instr->result();
3786
+ ASSERT(result->IsRegister());
3787
+
3788
+ XMMRegister input_reg = ToDoubleRegister(input);
3789
+ Register result_reg = ToRegister(result);
3790
+
3791
+ if (instr->truncating()) {
3792
+ // Performs a truncating conversion of a floating point number as used by
3793
+ // the JS bitwise operations.
3794
+ __ cvttsd2si(result_reg, Operand(input_reg));
3795
+ __ cmp(result_reg, 0x80000000u);
3796
+ if (CpuFeatures::IsSupported(SSE3)) {
3797
+ // This will deoptimize if the exponent of the input in out of range.
3798
+ CpuFeatures::Scope scope(SSE3);
3799
+ Label convert, done;
3800
+ __ j(not_equal, &done, Label::kNear);
3801
+ __ sub(Operand(esp), Immediate(kDoubleSize));
3802
+ __ movdbl(Operand(esp, 0), input_reg);
3803
+ // Get exponent alone and check for too-big exponent.
3804
+ __ mov(result_reg, Operand(esp, sizeof(int32_t)));
3805
+ __ and_(result_reg, HeapNumber::kExponentMask);
3806
+ const uint32_t kTooBigExponent =
3807
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
3808
+ __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
3809
+ __ j(less, &convert, Label::kNear);
3810
+ __ add(Operand(esp), Immediate(kDoubleSize));
3811
+ DeoptimizeIf(no_condition, instr->environment());
3812
+ __ bind(&convert);
3813
+ // Do conversion, which cannot fail because we checked the exponent.
3814
+ __ fld_d(Operand(esp, 0));
3815
+ __ fisttp_d(Operand(esp, 0));
3816
+ __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
3817
+ __ add(Operand(esp), Immediate(kDoubleSize));
3818
+ __ bind(&done);
3819
+ } else {
3820
+ Label done;
3821
+ Register temp_reg = ToRegister(instr->TempAt(0));
3822
+ XMMRegister xmm_scratch = xmm0;
3823
+
3824
+ // If cvttsd2si succeeded, we're done. Otherwise, we attempt
3825
+ // manual conversion.
3826
+ __ j(not_equal, &done, Label::kNear);
3827
+
3828
+ // Get high 32 bits of the input in result_reg and temp_reg.
3829
+ __ pshufd(xmm_scratch, input_reg, 1);
3830
+ __ movd(Operand(temp_reg), xmm_scratch);
3831
+ __ mov(result_reg, temp_reg);
3832
+
3833
+ // Prepare negation mask in temp_reg.
3834
+ __ sar(temp_reg, kBitsPerInt - 1);
3835
+
3836
+ // Extract the exponent from result_reg and subtract adjusted
3837
+ // bias from it. The adjustment is selected in a way such that
3838
+ // when the difference is zero, the answer is in the low 32 bits
3839
+ // of the input, otherwise a shift has to be performed.
3840
+ __ shr(result_reg, HeapNumber::kExponentShift);
3841
+ __ and_(result_reg,
3842
+ HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
3843
+ __ sub(Operand(result_reg),
3844
+ Immediate(HeapNumber::kExponentBias +
3845
+ HeapNumber::kExponentBits +
3846
+ HeapNumber::kMantissaBits));
3847
+ // Don't handle big (> kMantissaBits + kExponentBits == 63) or
3848
+ // special exponents.
3849
+ DeoptimizeIf(greater, instr->environment());
3850
+
3851
+ // Zero out the sign and the exponent in the input (by shifting
3852
+ // it to the left) and restore the implicit mantissa bit,
3853
+ // i.e. convert the input to unsigned int64 shifted left by
3854
+ // kExponentBits.
3855
+ ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
3856
+ // Minus zero has the most significant bit set and the other
3857
+ // bits cleared.
3858
+ __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
3859
+ __ psllq(input_reg, HeapNumber::kExponentBits);
3860
+ __ por(input_reg, xmm_scratch);
3861
+
3862
+ // Get the amount to shift the input right in xmm_scratch.
3863
+ __ neg(result_reg);
3864
+ __ movd(xmm_scratch, Operand(result_reg));
3865
+
3866
+ // Shift the input right and extract low 32 bits.
3867
+ __ psrlq(input_reg, xmm_scratch);
3868
+ __ movd(Operand(result_reg), input_reg);
3869
+
3870
+ // Use the prepared mask in temp_reg to negate the result if necessary.
3871
+ __ xor_(result_reg, Operand(temp_reg));
3872
+ __ sub(result_reg, Operand(temp_reg));
3873
+ __ bind(&done);
3874
+ }
3875
+ } else {
3876
+ Label done;
3877
+ __ cvttsd2si(result_reg, Operand(input_reg));
3878
+ __ cvtsi2sd(xmm0, Operand(result_reg));
3879
+ __ ucomisd(xmm0, input_reg);
3880
+ DeoptimizeIf(not_equal, instr->environment());
3881
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
3882
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3883
+ // The integer converted back is equal to the original. We
3884
+ // only have to test if we got -0 as an input.
3885
+ __ test(result_reg, Operand(result_reg));
3886
+ __ j(not_zero, &done, Label::kNear);
3887
+ __ movmskpd(result_reg, input_reg);
3888
+ // Bit 0 contains the sign of the double in input_reg.
3889
+ // If input was positive, we are ok and return 0, otherwise
3890
+ // deoptimize.
3891
+ __ and_(result_reg, 1);
3892
+ DeoptimizeIf(not_zero, instr->environment());
3893
+ }
3894
+ __ bind(&done);
3895
+ }
3896
+ }
3897
+
3898
+
3899
+ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3900
+ LOperand* input = instr->InputAt(0);
3901
+ __ test(ToRegister(input), Immediate(kSmiTagMask));
3902
+ DeoptimizeIf(not_zero, instr->environment());
3903
+ }
3904
+
3905
+
3906
+ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
3907
+ LOperand* input = instr->InputAt(0);
3908
+ __ test(ToRegister(input), Immediate(kSmiTagMask));
3909
+ DeoptimizeIf(zero, instr->environment());
3910
+ }
3911
+
3912
+
3913
+ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3914
+ Register input = ToRegister(instr->InputAt(0));
3915
+ Register temp = ToRegister(instr->TempAt(0));
3916
+
3917
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
3918
+
3919
+ if (instr->hydrogen()->is_interval_check()) {
3920
+ InstanceType first;
3921
+ InstanceType last;
3922
+ instr->hydrogen()->GetCheckInterval(&first, &last);
3923
+
3924
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3925
+ static_cast<int8_t>(first));
3926
+
3927
+ // If there is only one type in the interval check for equality.
3928
+ if (first == last) {
3929
+ DeoptimizeIf(not_equal, instr->environment());
3930
+ } else {
3931
+ DeoptimizeIf(below, instr->environment());
3932
+ // Omit check for the last type.
3933
+ if (last != LAST_TYPE) {
3934
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3935
+ static_cast<int8_t>(last));
3936
+ DeoptimizeIf(above, instr->environment());
3937
+ }
3938
+ }
3939
+ } else {
3940
+ uint8_t mask;
3941
+ uint8_t tag;
3942
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
3943
+
3944
+ if (IsPowerOf2(mask)) {
3945
+ ASSERT(tag == 0 || IsPowerOf2(tag));
3946
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
3947
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
3948
+ } else {
3949
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
3950
+ __ and_(temp, mask);
3951
+ __ cmpb(Operand(temp), tag);
3952
+ DeoptimizeIf(not_equal, instr->environment());
3953
+ }
3954
+ }
3955
+ }
3956
+
3957
+
3958
+ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
3959
+ ASSERT(instr->InputAt(0)->IsRegister());
3960
+ Register reg = ToRegister(instr->InputAt(0));
3961
+ __ cmp(reg, instr->hydrogen()->target());
3962
+ DeoptimizeIf(not_equal, instr->environment());
3963
+ }
3964
+
3965
+
3966
+ void LCodeGen::DoCheckMap(LCheckMap* instr) {
3967
+ LOperand* input = instr->InputAt(0);
3968
+ ASSERT(input->IsRegister());
3969
+ Register reg = ToRegister(input);
3970
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3971
+ instr->hydrogen()->map());
3972
+ DeoptimizeIf(not_equal, instr->environment());
3973
+ }
3974
+
3975
+
3976
+ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
3977
+ XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
3978
+ Register result_reg = ToRegister(instr->result());
3979
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
3980
+ }
3981
+
3982
+
3983
+ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
3984
+ ASSERT(instr->unclamped()->Equals(instr->result()));
3985
+ Register value_reg = ToRegister(instr->result());
3986
+ __ ClampUint8(value_reg);
3987
+ }
3988
+
3989
+
3990
+ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
3991
+ ASSERT(instr->unclamped()->Equals(instr->result()));
3992
+ Register input_reg = ToRegister(instr->unclamped());
3993
+ Label is_smi, done, heap_number;
3994
+
3995
+ __ JumpIfSmi(input_reg, &is_smi);
3996
+
3997
+ // Check for heap number
3998
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3999
+ factory()->heap_number_map());
4000
+ __ j(equal, &heap_number, Label::kNear);
4001
+
4002
+ // Check for undefined. Undefined is converted to zero for clamping
4003
+ // conversions.
4004
+ __ cmp(input_reg, factory()->undefined_value());
4005
+ DeoptimizeIf(not_equal, instr->environment());
4006
+ __ mov(input_reg, 0);
4007
+ __ jmp(&done, Label::kNear);
4008
+
4009
+ // Heap number
4010
+ __ bind(&heap_number);
4011
+ __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4012
+ __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
4013
+ __ jmp(&done, Label::kNear);
4014
+
4015
+ // smi
4016
+ __ bind(&is_smi);
4017
+ __ SmiUntag(input_reg);
4018
+ __ ClampUint8(input_reg);
4019
+
4020
+ __ bind(&done);
4021
+ }
4022
+
4023
+
4024
+ void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
4025
+ if (isolate()->heap()->InNewSpace(*object)) {
4026
+ Handle<JSGlobalPropertyCell> cell =
4027
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
4028
+ __ mov(result, Operand::Cell(cell));
4029
+ } else {
4030
+ __ mov(result, object);
4031
+ }
4032
+ }
4033
+
4034
+
4035
+ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4036
+ Register reg = ToRegister(instr->TempAt(0));
4037
+
4038
+ Handle<JSObject> holder = instr->holder();
4039
+ Handle<JSObject> current_prototype = instr->prototype();
4040
+
4041
+ // Load prototype object.
4042
+ LoadHeapObject(reg, current_prototype);
4043
+
4044
+ // Check prototype maps up to the holder.
4045
+ while (!current_prototype.is_identical_to(holder)) {
4046
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
4047
+ Handle<Map>(current_prototype->map()));
4048
+ DeoptimizeIf(not_equal, instr->environment());
4049
+ current_prototype =
4050
+ Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4051
+ // Load next prototype object.
4052
+ LoadHeapObject(reg, current_prototype);
4053
+ }
4054
+
4055
+ // Check the holder map.
4056
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
4057
+ Handle<Map>(current_prototype->map()));
4058
+ DeoptimizeIf(not_equal, instr->environment());
4059
+ }
4060
+
4061
+
4062
+ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4063
+ // Setup the parameters to the stub/runtime call.
4064
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
4065
+ __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
4066
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4067
+ __ push(Immediate(instr->hydrogen()->constant_elements()));
4068
+
4069
+ // Pick the right runtime function or stub to call.
4070
+ int length = instr->hydrogen()->length();
4071
+ if (instr->hydrogen()->IsCopyOnWrite()) {
4072
+ ASSERT(instr->hydrogen()->depth() == 1);
4073
+ FastCloneShallowArrayStub::Mode mode =
4074
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4075
+ FastCloneShallowArrayStub stub(mode, length);
4076
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
4077
+ } else if (instr->hydrogen()->depth() > 1) {
4078
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
4079
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4080
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
4081
+ } else {
4082
+ FastCloneShallowArrayStub::Mode mode =
4083
+ FastCloneShallowArrayStub::CLONE_ELEMENTS;
4084
+ FastCloneShallowArrayStub stub(mode, length);
4085
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
4086
+ }
4087
+ }
4088
+
4089
+
4090
+ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4091
+ ASSERT(ToRegister(instr->context()).is(esi));
4092
+ // Setup the parameters to the stub/runtime call.
4093
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
4094
+ __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
4095
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4096
+ __ push(Immediate(instr->hydrogen()->constant_properties()));
4097
+ int flags = instr->hydrogen()->fast_elements()
4098
+ ? ObjectLiteral::kFastElements
4099
+ : ObjectLiteral::kNoFlags;
4100
+ flags |= instr->hydrogen()->has_function()
4101
+ ? ObjectLiteral::kHasFunction
4102
+ : ObjectLiteral::kNoFlags;
4103
+ __ push(Immediate(Smi::FromInt(flags)));
4104
+
4105
+ // Pick the right runtime function to call.
4106
+ if (instr->hydrogen()->depth() > 1) {
4107
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
4108
+ } else {
4109
+ CallRuntime(Runtime::kCreateObjectLiteralShallow,
4110
+ 4,
4111
+ instr,
4112
+ CONTEXT_ADJUSTED);
4113
+ }
4114
+ }
4115
+
4116
+
4117
+ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4118
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
4119
+ __ push(eax);
4120
+ CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
4121
+ }
4122
+
4123
+
4124
+ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4125
+ Label materialized;
4126
+ // Registers will be used as follows:
4127
+ // edi = JS function.
4128
+ // ecx = literals array.
4129
+ // ebx = regexp literal.
4130
+ // eax = regexp literal clone.
4131
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
4132
+ __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
4133
+ int literal_offset = FixedArray::kHeaderSize +
4134
+ instr->hydrogen()->literal_index() * kPointerSize;
4135
+ __ mov(ebx, FieldOperand(ecx, literal_offset));
4136
+ __ cmp(ebx, factory()->undefined_value());
4137
+ __ j(not_equal, &materialized, Label::kNear);
4138
+
4139
+ // Create regexp literal using runtime function
4140
+ // Result will be in eax.
4141
+ __ push(ecx);
4142
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
4143
+ __ push(Immediate(instr->hydrogen()->pattern()));
4144
+ __ push(Immediate(instr->hydrogen()->flags()));
4145
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
4146
+ __ mov(ebx, eax);
4147
+
4148
+ __ bind(&materialized);
4149
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4150
+ Label allocated, runtime_allocate;
4151
+ __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
4152
+ __ jmp(&allocated);
4153
+
4154
+ __ bind(&runtime_allocate);
4155
+ __ push(ebx);
4156
+ __ push(Immediate(Smi::FromInt(size)));
4157
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
4158
+ __ pop(ebx);
4159
+
4160
+ __ bind(&allocated);
4161
+ // Copy the content into the newly allocated memory.
4162
+ // (Unroll copy loop once for better throughput).
4163
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4164
+ __ mov(edx, FieldOperand(ebx, i));
4165
+ __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
4166
+ __ mov(FieldOperand(eax, i), edx);
4167
+ __ mov(FieldOperand(eax, i + kPointerSize), ecx);
4168
+ }
4169
+ if ((size % (2 * kPointerSize)) != 0) {
4170
+ __ mov(edx, FieldOperand(ebx, size - kPointerSize));
4171
+ __ mov(FieldOperand(eax, size - kPointerSize), edx);
4172
+ }
4173
+ }
4174
+
4175
+
4176
+ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4177
+ // Use the fast case closure allocation code that allocates in new
4178
+ // space for nested functions that don't need literals cloning.
4179
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4180
+ bool pretenure = instr->hydrogen()->pretenure();
4181
+ if (!pretenure && shared_info->num_literals() == 0) {
4182
+ FastNewClosureStub stub(
4183
+ shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
4184
+ __ push(Immediate(shared_info));
4185
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
4186
+ } else {
4187
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
4188
+ __ push(Immediate(shared_info));
4189
+ __ push(Immediate(pretenure
4190
+ ? factory()->true_value()
4191
+ : factory()->false_value()));
4192
+ CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
4193
+ }
4194
+ }
4195
+
4196
+
4197
+ void LCodeGen::DoTypeof(LTypeof* instr) {
4198
+ LOperand* input = instr->InputAt(0);
4199
+ if (input->IsConstantOperand()) {
4200
+ __ push(ToImmediate(input));
4201
+ } else {
4202
+ __ push(ToOperand(input));
4203
+ }
4204
+ CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
4205
+ }
4206
+
4207
+
4208
+ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
4209
+ Register input = ToRegister(instr->InputAt(0));
4210
+ Register result = ToRegister(instr->result());
4211
+ Label true_label;
4212
+ Label false_label;
4213
+ Label done;
4214
+
4215
+ Condition final_branch_condition = EmitTypeofIs(&true_label,
4216
+ &false_label,
4217
+ input,
4218
+ instr->type_literal());
4219
+ __ j(final_branch_condition, &true_label, Label::kNear);
4220
+ __ bind(&false_label);
4221
+ __ mov(result, factory()->false_value());
4222
+ __ jmp(&done, Label::kNear);
4223
+
4224
+ __ bind(&true_label);
4225
+ __ mov(result, factory()->true_value());
4226
+
4227
+ __ bind(&done);
4228
+ }
4229
+
4230
+
4231
+ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4232
+ Register input = ToRegister(instr->InputAt(0));
4233
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
4234
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
4235
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
4236
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
4237
+
4238
+ Condition final_branch_condition = EmitTypeofIs(true_label,
4239
+ false_label,
4240
+ input,
4241
+ instr->type_literal());
4242
+
4243
+ EmitBranch(true_block, false_block, final_branch_condition);
4244
+ }
4245
+
4246
+
4247
+ Condition LCodeGen::EmitTypeofIs(Label* true_label,
4248
+ Label* false_label,
4249
+ Register input,
4250
+ Handle<String> type_name) {
4251
+ Condition final_branch_condition = no_condition;
4252
+ if (type_name->Equals(heap()->number_symbol())) {
4253
+ __ JumpIfSmi(input, true_label);
4254
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
4255
+ factory()->heap_number_map());
4256
+ final_branch_condition = equal;
4257
+
4258
+ } else if (type_name->Equals(heap()->string_symbol())) {
4259
+ __ JumpIfSmi(input, false_label);
4260
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
4261
+ __ j(above_equal, false_label);
4262
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4263
+ 1 << Map::kIsUndetectable);
4264
+ final_branch_condition = zero;
4265
+
4266
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
4267
+ __ cmp(input, factory()->true_value());
4268
+ __ j(equal, true_label);
4269
+ __ cmp(input, factory()->false_value());
4270
+ final_branch_condition = equal;
4271
+
4272
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
4273
+ __ cmp(input, factory()->undefined_value());
4274
+ __ j(equal, true_label);
4275
+ __ JumpIfSmi(input, false_label);
4276
+ // Check for undetectable objects => true.
4277
+ __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
4278
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4279
+ 1 << Map::kIsUndetectable);
4280
+ final_branch_condition = not_zero;
4281
+
4282
+ } else if (type_name->Equals(heap()->function_symbol())) {
4283
+ __ JumpIfSmi(input, false_label);
4284
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
4285
+ __ j(equal, true_label);
4286
+ // Regular expressions => 'function' (they are callable).
4287
+ __ CmpInstanceType(input, JS_REGEXP_TYPE);
4288
+ final_branch_condition = equal;
4289
+
4290
+ } else if (type_name->Equals(heap()->object_symbol())) {
4291
+ __ JumpIfSmi(input, false_label);
4292
+ __ cmp(input, factory()->null_value());
4293
+ __ j(equal, true_label);
4294
+ // Regular expressions => 'function', not 'object'.
4295
+ __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
4296
+ __ j(below, false_label);
4297
+ __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
4298
+ __ j(above_equal, false_label);
4299
+ // Check for undetectable objects => false.
4300
+ __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4301
+ 1 << Map::kIsUndetectable);
4302
+ final_branch_condition = zero;
4303
+
4304
+ } else {
4305
+ final_branch_condition = not_equal;
4306
+ __ jmp(false_label);
4307
+ // A dead branch instruction will be generated after this point.
4308
+ }
4309
+
4310
+ return final_branch_condition;
4311
+ }
4312
+
4313
+
4314
+ void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
4315
+ Register result = ToRegister(instr->result());
4316
+ Label true_label;
4317
+ Label done;
4318
+
4319
+ EmitIsConstructCall(result);
4320
+ __ j(equal, &true_label, Label::kNear);
4321
+
4322
+ __ mov(result, factory()->false_value());
4323
+ __ jmp(&done, Label::kNear);
4324
+
4325
+ __ bind(&true_label);
4326
+ __ mov(result, factory()->true_value());
4327
+
4328
+ __ bind(&done);
4329
+ }
4330
+
4331
+
4332
+ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4333
+ Register temp = ToRegister(instr->TempAt(0));
4334
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
4335
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
4336
+
4337
+ EmitIsConstructCall(temp);
4338
+ EmitBranch(true_block, false_block, equal);
4339
+ }
4340
+
4341
+
4342
+ void LCodeGen::EmitIsConstructCall(Register temp) {
4343
+ // Get the frame pointer for the calling frame.
4344
+ __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
4345
+
4346
+ // Skip the arguments adaptor frame if it exists.
4347
+ Label check_frame_marker;
4348
+ __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
4349
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4350
+ __ j(not_equal, &check_frame_marker, Label::kNear);
4351
+ __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
4352
+
4353
+ // Check the marker in the calling frame.
4354
+ __ bind(&check_frame_marker);
4355
+ __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
4356
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
4357
+ }
4358
+
4359
+
4360
+ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4361
+ // No code for lazy bailout instruction. Used to capture environment after a
4362
+ // call for populating the safepoint data with deoptimization data.
4363
+ }
4364
+
4365
+
4366
+ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4367
+ DeoptimizeIf(no_condition, instr->environment());
4368
+ }
4369
+
4370
+
4371
+ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4372
+ LOperand* obj = instr->object();
4373
+ LOperand* key = instr->key();
4374
+ __ push(ToOperand(obj));
4375
+ if (key->IsConstantOperand()) {
4376
+ __ push(ToImmediate(key));
4377
+ } else {
4378
+ __ push(ToOperand(key));
4379
+ }
4380
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4381
+ LPointerMap* pointers = instr->pointer_map();
4382
+ LEnvironment* env = instr->deoptimization_environment();
4383
+ RecordPosition(pointers->position());
4384
+ RegisterEnvironmentForDeoptimization(env);
4385
+ // Create safepoint generator that will also ensure enough space in the
4386
+ // reloc info for patching in deoptimization (since this is invoking a
4387
+ // builtin)
4388
+ SafepointGenerator safepoint_generator(this,
4389
+ pointers,
4390
+ env->deoptimization_index());
4391
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4392
+ __ push(Immediate(Smi::FromInt(strict_mode_flag())));
4393
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4394
+ }
4395
+
4396
+
4397
+ void LCodeGen::DoStackCheck(LStackCheck* instr) {
4398
+ // Perform stack overflow check.
4399
+ Label done;
4400
+ ExternalReference stack_limit =
4401
+ ExternalReference::address_of_stack_limit(isolate());
4402
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
4403
+ __ j(above_equal, &done, Label::kNear);
4404
+
4405
+ StackCheckStub stub;
4406
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
4407
+ __ bind(&done);
4408
+ }
4409
+
4410
+
4411
+ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4412
+ // This is a pseudo-instruction that ensures that the environment here is
4413
+ // properly registered for deoptimization and records the assembler's PC
4414
+ // offset.
4415
+ LEnvironment* environment = instr->environment();
4416
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4417
+ instr->SpilledDoubleRegisterArray());
4418
+
4419
+ // If the environment were already registered, we would have no way of
4420
+ // backpatching it with the spill slot operands.
4421
+ ASSERT(!environment->HasBeenRegistered());
4422
+ RegisterEnvironmentForDeoptimization(environment);
4423
+ ASSERT(osr_pc_offset_ == -1);
4424
+ osr_pc_offset_ = masm()->pc_offset();
4425
+ }
4426
+
4427
+
4428
+ void LCodeGen::DoIn(LIn* instr) {
4429
+ LOperand* obj = instr->object();
4430
+ LOperand* key = instr->key();
4431
+ if (key->IsConstantOperand()) {
4432
+ __ push(ToImmediate(key));
4433
+ } else {
4434
+ __ push(ToOperand(key));
4435
+ }
4436
+ if (obj->IsConstantOperand()) {
4437
+ __ push(ToImmediate(obj));
4438
+ } else {
4439
+ __ push(ToOperand(obj));
4440
+ }
4441
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4442
+ LPointerMap* pointers = instr->pointer_map();
4443
+ LEnvironment* env = instr->deoptimization_environment();
4444
+ RecordPosition(pointers->position());
4445
+ RegisterEnvironmentForDeoptimization(env);
4446
+ // Create safepoint generator that will also ensure enough space in the
4447
+ // reloc info for patching in deoptimization (since this is invoking a
4448
+ // builtin)
4449
+ SafepointGenerator safepoint_generator(this,
4450
+ pointers,
4451
+ env->deoptimization_index());
4452
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4453
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4454
+ }
4455
+
4456
+
4457
+ #undef __
4458
+
4459
+ } } // namespace v8::internal
4460
+
4461
+ #endif // V8_TARGET_ARCH_IA32