libv8 3.11.8.17 → 3.16.14.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (754) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +1 -2
  3. data/Gemfile +1 -1
  4. data/Rakefile +6 -7
  5. data/lib/libv8/version.rb +1 -1
  6. data/vendor/v8/.gitignore +24 -3
  7. data/vendor/v8/AUTHORS +7 -0
  8. data/vendor/v8/ChangeLog +839 -0
  9. data/vendor/v8/DEPS +1 -1
  10. data/vendor/v8/Makefile.android +92 -0
  11. data/vendor/v8/OWNERS +11 -0
  12. data/vendor/v8/PRESUBMIT.py +71 -0
  13. data/vendor/v8/SConstruct +34 -39
  14. data/vendor/v8/build/android.gypi +56 -37
  15. data/vendor/v8/build/common.gypi +112 -30
  16. data/vendor/v8/build/gyp_v8 +1 -1
  17. data/vendor/v8/build/standalone.gypi +15 -11
  18. data/vendor/v8/include/v8-debug.h +9 -1
  19. data/vendor/v8/include/v8-preparser.h +4 -3
  20. data/vendor/v8/include/v8-profiler.h +25 -25
  21. data/vendor/v8/include/v8-testing.h +4 -3
  22. data/vendor/v8/include/v8.h +994 -540
  23. data/vendor/v8/preparser/preparser-process.cc +3 -3
  24. data/vendor/v8/samples/lineprocessor.cc +20 -27
  25. data/vendor/v8/samples/process.cc +18 -14
  26. data/vendor/v8/samples/shell.cc +16 -15
  27. data/vendor/v8/src/SConscript +15 -14
  28. data/vendor/v8/src/accessors.cc +169 -77
  29. data/vendor/v8/src/accessors.h +4 -0
  30. data/vendor/v8/src/allocation-inl.h +2 -2
  31. data/vendor/v8/src/allocation.h +7 -7
  32. data/vendor/v8/src/api.cc +810 -497
  33. data/vendor/v8/src/api.h +85 -60
  34. data/vendor/v8/src/arm/assembler-arm-inl.h +179 -22
  35. data/vendor/v8/src/arm/assembler-arm.cc +633 -264
  36. data/vendor/v8/src/arm/assembler-arm.h +264 -197
  37. data/vendor/v8/src/arm/builtins-arm.cc +117 -27
  38. data/vendor/v8/src/arm/code-stubs-arm.cc +1241 -700
  39. data/vendor/v8/src/arm/code-stubs-arm.h +35 -138
  40. data/vendor/v8/src/arm/codegen-arm.cc +285 -16
  41. data/vendor/v8/src/arm/codegen-arm.h +22 -0
  42. data/vendor/v8/src/arm/constants-arm.cc +5 -3
  43. data/vendor/v8/src/arm/constants-arm.h +24 -11
  44. data/vendor/v8/src/arm/debug-arm.cc +3 -3
  45. data/vendor/v8/src/arm/deoptimizer-arm.cc +382 -92
  46. data/vendor/v8/src/arm/disasm-arm.cc +61 -12
  47. data/vendor/v8/src/arm/frames-arm.h +0 -14
  48. data/vendor/v8/src/arm/full-codegen-arm.cc +332 -304
  49. data/vendor/v8/src/arm/ic-arm.cc +180 -259
  50. data/vendor/v8/src/arm/lithium-arm.cc +364 -316
  51. data/vendor/v8/src/arm/lithium-arm.h +512 -275
  52. data/vendor/v8/src/arm/lithium-codegen-arm.cc +1768 -809
  53. data/vendor/v8/src/arm/lithium-codegen-arm.h +97 -35
  54. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +12 -5
  55. data/vendor/v8/src/arm/macro-assembler-arm.cc +439 -228
  56. data/vendor/v8/src/arm/macro-assembler-arm.h +116 -70
  57. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +54 -44
  58. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +3 -10
  59. data/vendor/v8/src/arm/simulator-arm.cc +272 -238
  60. data/vendor/v8/src/arm/simulator-arm.h +38 -8
  61. data/vendor/v8/src/arm/stub-cache-arm.cc +522 -895
  62. data/vendor/v8/src/array.js +101 -70
  63. data/vendor/v8/src/assembler.cc +270 -19
  64. data/vendor/v8/src/assembler.h +110 -15
  65. data/vendor/v8/src/ast.cc +79 -69
  66. data/vendor/v8/src/ast.h +255 -301
  67. data/vendor/v8/src/atomicops.h +7 -1
  68. data/vendor/v8/src/atomicops_internals_tsan.h +335 -0
  69. data/vendor/v8/src/bootstrapper.cc +481 -418
  70. data/vendor/v8/src/bootstrapper.h +4 -4
  71. data/vendor/v8/src/builtins.cc +498 -311
  72. data/vendor/v8/src/builtins.h +75 -47
  73. data/vendor/v8/src/checks.cc +2 -1
  74. data/vendor/v8/src/checks.h +8 -0
  75. data/vendor/v8/src/code-stubs-hydrogen.cc +253 -0
  76. data/vendor/v8/src/code-stubs.cc +249 -84
  77. data/vendor/v8/src/code-stubs.h +501 -169
  78. data/vendor/v8/src/codegen.cc +36 -18
  79. data/vendor/v8/src/codegen.h +25 -3
  80. data/vendor/v8/src/collection.js +54 -17
  81. data/vendor/v8/src/compilation-cache.cc +24 -16
  82. data/vendor/v8/src/compilation-cache.h +15 -6
  83. data/vendor/v8/src/compiler.cc +497 -195
  84. data/vendor/v8/src/compiler.h +246 -38
  85. data/vendor/v8/src/contexts.cc +64 -24
  86. data/vendor/v8/src/contexts.h +60 -29
  87. data/vendor/v8/src/conversions-inl.h +24 -14
  88. data/vendor/v8/src/conversions.h +7 -4
  89. data/vendor/v8/src/counters.cc +21 -12
  90. data/vendor/v8/src/counters.h +44 -16
  91. data/vendor/v8/src/cpu-profiler.h +1 -1
  92. data/vendor/v8/src/d8-debug.cc +2 -2
  93. data/vendor/v8/src/d8-readline.cc +13 -2
  94. data/vendor/v8/src/d8.cc +681 -273
  95. data/vendor/v8/src/d8.gyp +4 -4
  96. data/vendor/v8/src/d8.h +38 -18
  97. data/vendor/v8/src/d8.js +0 -617
  98. data/vendor/v8/src/data-flow.h +55 -0
  99. data/vendor/v8/src/date.js +1 -42
  100. data/vendor/v8/src/dateparser-inl.h +5 -1
  101. data/vendor/v8/src/debug-agent.cc +10 -15
  102. data/vendor/v8/src/debug-debugger.js +147 -149
  103. data/vendor/v8/src/debug.cc +323 -164
  104. data/vendor/v8/src/debug.h +26 -14
  105. data/vendor/v8/src/deoptimizer.cc +765 -290
  106. data/vendor/v8/src/deoptimizer.h +130 -28
  107. data/vendor/v8/src/disassembler.cc +10 -4
  108. data/vendor/v8/src/elements-kind.cc +7 -2
  109. data/vendor/v8/src/elements-kind.h +19 -0
  110. data/vendor/v8/src/elements.cc +607 -285
  111. data/vendor/v8/src/elements.h +36 -13
  112. data/vendor/v8/src/execution.cc +52 -31
  113. data/vendor/v8/src/execution.h +4 -4
  114. data/vendor/v8/src/extensions/externalize-string-extension.cc +5 -4
  115. data/vendor/v8/src/extensions/gc-extension.cc +5 -1
  116. data/vendor/v8/src/extensions/statistics-extension.cc +153 -0
  117. data/vendor/v8/src/{inspector.h → extensions/statistics-extension.h} +12 -23
  118. data/vendor/v8/src/factory.cc +101 -134
  119. data/vendor/v8/src/factory.h +36 -31
  120. data/vendor/v8/src/flag-definitions.h +102 -25
  121. data/vendor/v8/src/flags.cc +9 -5
  122. data/vendor/v8/src/frames-inl.h +10 -0
  123. data/vendor/v8/src/frames.cc +116 -26
  124. data/vendor/v8/src/frames.h +96 -12
  125. data/vendor/v8/src/full-codegen.cc +219 -74
  126. data/vendor/v8/src/full-codegen.h +63 -21
  127. data/vendor/v8/src/func-name-inferrer.cc +8 -7
  128. data/vendor/v8/src/func-name-inferrer.h +5 -3
  129. data/vendor/v8/src/gdb-jit.cc +71 -57
  130. data/vendor/v8/src/global-handles.cc +230 -101
  131. data/vendor/v8/src/global-handles.h +26 -27
  132. data/vendor/v8/src/globals.h +17 -19
  133. data/vendor/v8/src/handles-inl.h +59 -12
  134. data/vendor/v8/src/handles.cc +180 -200
  135. data/vendor/v8/src/handles.h +80 -11
  136. data/vendor/v8/src/hashmap.h +60 -40
  137. data/vendor/v8/src/heap-inl.h +107 -45
  138. data/vendor/v8/src/heap-profiler.cc +38 -19
  139. data/vendor/v8/src/heap-profiler.h +24 -14
  140. data/vendor/v8/src/heap.cc +1123 -738
  141. data/vendor/v8/src/heap.h +385 -146
  142. data/vendor/v8/src/hydrogen-instructions.cc +700 -217
  143. data/vendor/v8/src/hydrogen-instructions.h +1158 -472
  144. data/vendor/v8/src/hydrogen.cc +3319 -1662
  145. data/vendor/v8/src/hydrogen.h +411 -170
  146. data/vendor/v8/src/ia32/assembler-ia32-inl.h +46 -16
  147. data/vendor/v8/src/ia32/assembler-ia32.cc +131 -61
  148. data/vendor/v8/src/ia32/assembler-ia32.h +115 -57
  149. data/vendor/v8/src/ia32/builtins-ia32.cc +99 -5
  150. data/vendor/v8/src/ia32/code-stubs-ia32.cc +787 -495
  151. data/vendor/v8/src/ia32/code-stubs-ia32.h +10 -100
  152. data/vendor/v8/src/ia32/codegen-ia32.cc +227 -23
  153. data/vendor/v8/src/ia32/codegen-ia32.h +14 -0
  154. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +428 -87
  155. data/vendor/v8/src/ia32/disasm-ia32.cc +28 -1
  156. data/vendor/v8/src/ia32/frames-ia32.h +6 -16
  157. data/vendor/v8/src/ia32/full-codegen-ia32.cc +280 -272
  158. data/vendor/v8/src/ia32/ic-ia32.cc +150 -250
  159. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +1600 -517
  160. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +90 -24
  161. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +10 -6
  162. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.h +2 -2
  163. data/vendor/v8/src/ia32/lithium-ia32.cc +405 -302
  164. data/vendor/v8/src/ia32/lithium-ia32.h +526 -271
  165. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +378 -119
  166. data/vendor/v8/src/ia32/macro-assembler-ia32.h +62 -28
  167. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +43 -30
  168. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +2 -10
  169. data/vendor/v8/src/ia32/stub-cache-ia32.cc +492 -678
  170. data/vendor/v8/src/ic-inl.h +9 -4
  171. data/vendor/v8/src/ic.cc +836 -923
  172. data/vendor/v8/src/ic.h +228 -247
  173. data/vendor/v8/src/incremental-marking-inl.h +26 -30
  174. data/vendor/v8/src/incremental-marking.cc +276 -248
  175. data/vendor/v8/src/incremental-marking.h +29 -37
  176. data/vendor/v8/src/interface.cc +34 -25
  177. data/vendor/v8/src/interface.h +69 -25
  178. data/vendor/v8/src/interpreter-irregexp.cc +2 -2
  179. data/vendor/v8/src/isolate.cc +382 -76
  180. data/vendor/v8/src/isolate.h +109 -56
  181. data/vendor/v8/src/json-parser.h +217 -104
  182. data/vendor/v8/src/json-stringifier.h +745 -0
  183. data/vendor/v8/src/json.js +10 -132
  184. data/vendor/v8/src/jsregexp-inl.h +106 -0
  185. data/vendor/v8/src/jsregexp.cc +517 -285
  186. data/vendor/v8/src/jsregexp.h +145 -117
  187. data/vendor/v8/src/list-inl.h +35 -22
  188. data/vendor/v8/src/list.h +46 -19
  189. data/vendor/v8/src/lithium-allocator-inl.h +22 -2
  190. data/vendor/v8/src/lithium-allocator.cc +85 -70
  191. data/vendor/v8/src/lithium-allocator.h +21 -39
  192. data/vendor/v8/src/lithium.cc +259 -5
  193. data/vendor/v8/src/lithium.h +131 -32
  194. data/vendor/v8/src/liveedit-debugger.js +52 -3
  195. data/vendor/v8/src/liveedit.cc +393 -113
  196. data/vendor/v8/src/liveedit.h +7 -3
  197. data/vendor/v8/src/log-utils.cc +4 -2
  198. data/vendor/v8/src/log.cc +170 -140
  199. data/vendor/v8/src/log.h +62 -11
  200. data/vendor/v8/src/macro-assembler.h +17 -0
  201. data/vendor/v8/src/macros.py +2 -0
  202. data/vendor/v8/src/mark-compact-inl.h +3 -23
  203. data/vendor/v8/src/mark-compact.cc +801 -830
  204. data/vendor/v8/src/mark-compact.h +154 -47
  205. data/vendor/v8/src/marking-thread.cc +85 -0
  206. data/vendor/v8/src/{inspector.cc → marking-thread.h} +32 -24
  207. data/vendor/v8/src/math.js +12 -18
  208. data/vendor/v8/src/messages.cc +18 -8
  209. data/vendor/v8/src/messages.js +314 -261
  210. data/vendor/v8/src/mips/assembler-mips-inl.h +58 -6
  211. data/vendor/v8/src/mips/assembler-mips.cc +92 -75
  212. data/vendor/v8/src/mips/assembler-mips.h +54 -60
  213. data/vendor/v8/src/mips/builtins-mips.cc +116 -17
  214. data/vendor/v8/src/mips/code-stubs-mips.cc +919 -556
  215. data/vendor/v8/src/mips/code-stubs-mips.h +22 -131
  216. data/vendor/v8/src/mips/codegen-mips.cc +281 -6
  217. data/vendor/v8/src/mips/codegen-mips.h +22 -0
  218. data/vendor/v8/src/mips/constants-mips.cc +2 -0
  219. data/vendor/v8/src/mips/constants-mips.h +12 -2
  220. data/vendor/v8/src/mips/deoptimizer-mips.cc +286 -50
  221. data/vendor/v8/src/mips/disasm-mips.cc +13 -0
  222. data/vendor/v8/src/mips/full-codegen-mips.cc +297 -284
  223. data/vendor/v8/src/mips/ic-mips.cc +182 -263
  224. data/vendor/v8/src/mips/lithium-codegen-mips.cc +1208 -556
  225. data/vendor/v8/src/mips/lithium-codegen-mips.h +72 -19
  226. data/vendor/v8/src/mips/lithium-gap-resolver-mips.cc +9 -2
  227. data/vendor/v8/src/mips/lithium-mips.cc +290 -302
  228. data/vendor/v8/src/mips/lithium-mips.h +463 -266
  229. data/vendor/v8/src/mips/macro-assembler-mips.cc +208 -115
  230. data/vendor/v8/src/mips/macro-assembler-mips.h +67 -24
  231. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +40 -25
  232. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +3 -9
  233. data/vendor/v8/src/mips/simulator-mips.cc +112 -40
  234. data/vendor/v8/src/mips/simulator-mips.h +5 -0
  235. data/vendor/v8/src/mips/stub-cache-mips.cc +502 -884
  236. data/vendor/v8/src/mirror-debugger.js +157 -30
  237. data/vendor/v8/src/mksnapshot.cc +88 -14
  238. data/vendor/v8/src/object-observe.js +235 -0
  239. data/vendor/v8/src/objects-debug.cc +178 -176
  240. data/vendor/v8/src/objects-inl.h +1333 -486
  241. data/vendor/v8/src/objects-printer.cc +125 -43
  242. data/vendor/v8/src/objects-visiting-inl.h +578 -6
  243. data/vendor/v8/src/objects-visiting.cc +2 -2
  244. data/vendor/v8/src/objects-visiting.h +172 -79
  245. data/vendor/v8/src/objects.cc +3533 -2885
  246. data/vendor/v8/src/objects.h +1352 -1131
  247. data/vendor/v8/src/optimizing-compiler-thread.cc +152 -0
  248. data/vendor/v8/src/optimizing-compiler-thread.h +111 -0
  249. data/vendor/v8/src/parser.cc +390 -500
  250. data/vendor/v8/src/parser.h +45 -33
  251. data/vendor/v8/src/platform-cygwin.cc +10 -21
  252. data/vendor/v8/src/platform-freebsd.cc +36 -41
  253. data/vendor/v8/src/platform-linux.cc +160 -124
  254. data/vendor/v8/src/platform-macos.cc +30 -27
  255. data/vendor/v8/src/platform-nullos.cc +17 -1
  256. data/vendor/v8/src/platform-openbsd.cc +19 -50
  257. data/vendor/v8/src/platform-posix.cc +14 -0
  258. data/vendor/v8/src/platform-solaris.cc +20 -53
  259. data/vendor/v8/src/platform-win32.cc +49 -26
  260. data/vendor/v8/src/platform.h +40 -1
  261. data/vendor/v8/src/preparser.cc +8 -5
  262. data/vendor/v8/src/preparser.h +2 -2
  263. data/vendor/v8/src/prettyprinter.cc +16 -0
  264. data/vendor/v8/src/prettyprinter.h +2 -0
  265. data/vendor/v8/src/profile-generator-inl.h +1 -0
  266. data/vendor/v8/src/profile-generator.cc +209 -147
  267. data/vendor/v8/src/profile-generator.h +15 -12
  268. data/vendor/v8/src/property-details.h +46 -31
  269. data/vendor/v8/src/property.cc +27 -46
  270. data/vendor/v8/src/property.h +163 -83
  271. data/vendor/v8/src/proxy.js +7 -2
  272. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +4 -13
  273. data/vendor/v8/src/regexp-macro-assembler-irregexp.h +1 -2
  274. data/vendor/v8/src/regexp-macro-assembler-tracer.cc +1 -11
  275. data/vendor/v8/src/regexp-macro-assembler-tracer.h +0 -1
  276. data/vendor/v8/src/regexp-macro-assembler.cc +31 -14
  277. data/vendor/v8/src/regexp-macro-assembler.h +14 -11
  278. data/vendor/v8/src/regexp-stack.cc +1 -0
  279. data/vendor/v8/src/regexp.js +9 -8
  280. data/vendor/v8/src/rewriter.cc +18 -7
  281. data/vendor/v8/src/runtime-profiler.cc +52 -43
  282. data/vendor/v8/src/runtime-profiler.h +0 -25
  283. data/vendor/v8/src/runtime.cc +2006 -2023
  284. data/vendor/v8/src/runtime.h +56 -49
  285. data/vendor/v8/src/safepoint-table.cc +12 -18
  286. data/vendor/v8/src/safepoint-table.h +11 -8
  287. data/vendor/v8/src/scanner.cc +1 -0
  288. data/vendor/v8/src/scanner.h +4 -10
  289. data/vendor/v8/src/scopeinfo.cc +35 -9
  290. data/vendor/v8/src/scopeinfo.h +64 -3
  291. data/vendor/v8/src/scopes.cc +251 -156
  292. data/vendor/v8/src/scopes.h +61 -27
  293. data/vendor/v8/src/serialize.cc +348 -396
  294. data/vendor/v8/src/serialize.h +125 -114
  295. data/vendor/v8/src/small-pointer-list.h +11 -11
  296. data/vendor/v8/src/{smart-array-pointer.h → smart-pointers.h} +64 -15
  297. data/vendor/v8/src/snapshot-common.cc +64 -15
  298. data/vendor/v8/src/snapshot-empty.cc +7 -1
  299. data/vendor/v8/src/snapshot.h +9 -2
  300. data/vendor/v8/src/spaces-inl.h +17 -0
  301. data/vendor/v8/src/spaces.cc +477 -183
  302. data/vendor/v8/src/spaces.h +238 -58
  303. data/vendor/v8/src/splay-tree-inl.h +8 -7
  304. data/vendor/v8/src/splay-tree.h +24 -10
  305. data/vendor/v8/src/store-buffer.cc +12 -5
  306. data/vendor/v8/src/store-buffer.h +2 -4
  307. data/vendor/v8/src/string-search.h +22 -6
  308. data/vendor/v8/src/string-stream.cc +11 -8
  309. data/vendor/v8/src/string.js +47 -15
  310. data/vendor/v8/src/stub-cache.cc +461 -224
  311. data/vendor/v8/src/stub-cache.h +164 -102
  312. data/vendor/v8/src/sweeper-thread.cc +105 -0
  313. data/vendor/v8/src/sweeper-thread.h +81 -0
  314. data/vendor/v8/src/token.h +1 -0
  315. data/vendor/v8/src/transitions-inl.h +220 -0
  316. data/vendor/v8/src/transitions.cc +160 -0
  317. data/vendor/v8/src/transitions.h +207 -0
  318. data/vendor/v8/src/type-info.cc +182 -181
  319. data/vendor/v8/src/type-info.h +31 -19
  320. data/vendor/v8/src/unicode-inl.h +62 -106
  321. data/vendor/v8/src/unicode.cc +57 -67
  322. data/vendor/v8/src/unicode.h +45 -91
  323. data/vendor/v8/src/uri.js +57 -29
  324. data/vendor/v8/src/utils.h +105 -5
  325. data/vendor/v8/src/v8-counters.cc +54 -11
  326. data/vendor/v8/src/v8-counters.h +134 -19
  327. data/vendor/v8/src/v8.cc +29 -29
  328. data/vendor/v8/src/v8.h +1 -0
  329. data/vendor/v8/src/v8conversions.cc +26 -22
  330. data/vendor/v8/src/v8globals.h +56 -43
  331. data/vendor/v8/src/v8natives.js +83 -30
  332. data/vendor/v8/src/v8threads.cc +42 -21
  333. data/vendor/v8/src/v8threads.h +4 -1
  334. data/vendor/v8/src/v8utils.cc +9 -93
  335. data/vendor/v8/src/v8utils.h +37 -33
  336. data/vendor/v8/src/variables.cc +6 -3
  337. data/vendor/v8/src/variables.h +6 -13
  338. data/vendor/v8/src/version.cc +2 -2
  339. data/vendor/v8/src/vm-state-inl.h +11 -0
  340. data/vendor/v8/src/x64/assembler-x64-inl.h +39 -8
  341. data/vendor/v8/src/x64/assembler-x64.cc +78 -64
  342. data/vendor/v8/src/x64/assembler-x64.h +38 -33
  343. data/vendor/v8/src/x64/builtins-x64.cc +105 -7
  344. data/vendor/v8/src/x64/code-stubs-x64.cc +790 -413
  345. data/vendor/v8/src/x64/code-stubs-x64.h +10 -106
  346. data/vendor/v8/src/x64/codegen-x64.cc +210 -8
  347. data/vendor/v8/src/x64/codegen-x64.h +20 -1
  348. data/vendor/v8/src/x64/deoptimizer-x64.cc +336 -75
  349. data/vendor/v8/src/x64/disasm-x64.cc +15 -0
  350. data/vendor/v8/src/x64/frames-x64.h +0 -14
  351. data/vendor/v8/src/x64/full-codegen-x64.cc +293 -270
  352. data/vendor/v8/src/x64/ic-x64.cc +153 -251
  353. data/vendor/v8/src/x64/lithium-codegen-x64.cc +1379 -531
  354. data/vendor/v8/src/x64/lithium-codegen-x64.h +67 -23
  355. data/vendor/v8/src/x64/lithium-gap-resolver-x64.cc +2 -2
  356. data/vendor/v8/src/x64/lithium-x64.cc +349 -289
  357. data/vendor/v8/src/x64/lithium-x64.h +460 -250
  358. data/vendor/v8/src/x64/macro-assembler-x64.cc +350 -177
  359. data/vendor/v8/src/x64/macro-assembler-x64.h +67 -49
  360. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +46 -33
  361. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +2 -3
  362. data/vendor/v8/src/x64/stub-cache-x64.cc +484 -653
  363. data/vendor/v8/src/zone-inl.h +9 -27
  364. data/vendor/v8/src/zone.cc +5 -5
  365. data/vendor/v8/src/zone.h +53 -27
  366. data/vendor/v8/test/benchmarks/testcfg.py +5 -0
  367. data/vendor/v8/test/cctest/cctest.cc +4 -0
  368. data/vendor/v8/test/cctest/cctest.gyp +3 -1
  369. data/vendor/v8/test/cctest/cctest.h +57 -9
  370. data/vendor/v8/test/cctest/cctest.status +15 -15
  371. data/vendor/v8/test/cctest/test-accessors.cc +26 -0
  372. data/vendor/v8/test/cctest/test-alloc.cc +22 -30
  373. data/vendor/v8/test/cctest/test-api.cc +1943 -314
  374. data/vendor/v8/test/cctest/test-assembler-arm.cc +133 -13
  375. data/vendor/v8/test/cctest/test-assembler-ia32.cc +1 -1
  376. data/vendor/v8/test/cctest/test-assembler-mips.cc +12 -0
  377. data/vendor/v8/test/cctest/test-ast.cc +4 -2
  378. data/vendor/v8/test/cctest/test-compiler.cc +61 -29
  379. data/vendor/v8/test/cctest/test-dataflow.cc +2 -2
  380. data/vendor/v8/test/cctest/test-debug.cc +212 -33
  381. data/vendor/v8/test/cctest/test-decls.cc +257 -11
  382. data/vendor/v8/test/cctest/test-dictionary.cc +24 -10
  383. data/vendor/v8/test/cctest/test-disasm-arm.cc +118 -1
  384. data/vendor/v8/test/cctest/test-disasm-ia32.cc +3 -2
  385. data/vendor/v8/test/cctest/test-flags.cc +14 -1
  386. data/vendor/v8/test/cctest/test-func-name-inference.cc +7 -4
  387. data/vendor/v8/test/cctest/test-global-object.cc +51 -0
  388. data/vendor/v8/test/cctest/test-hashing.cc +32 -23
  389. data/vendor/v8/test/cctest/test-heap-profiler.cc +131 -77
  390. data/vendor/v8/test/cctest/test-heap.cc +1084 -143
  391. data/vendor/v8/test/cctest/test-list.cc +1 -1
  392. data/vendor/v8/test/cctest/test-liveedit.cc +3 -2
  393. data/vendor/v8/test/cctest/test-lockers.cc +12 -13
  394. data/vendor/v8/test/cctest/test-log.cc +10 -8
  395. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +2 -2
  396. data/vendor/v8/test/cctest/test-mark-compact.cc +44 -22
  397. data/vendor/v8/test/cctest/test-object-observe.cc +434 -0
  398. data/vendor/v8/test/cctest/test-parsing.cc +86 -39
  399. data/vendor/v8/test/cctest/test-platform-linux.cc +6 -0
  400. data/vendor/v8/test/cctest/test-platform-win32.cc +7 -0
  401. data/vendor/v8/test/cctest/test-random.cc +5 -4
  402. data/vendor/v8/test/cctest/test-regexp.cc +137 -101
  403. data/vendor/v8/test/cctest/test-serialize.cc +150 -230
  404. data/vendor/v8/test/cctest/test-sockets.cc +1 -1
  405. data/vendor/v8/test/cctest/test-spaces.cc +139 -0
  406. data/vendor/v8/test/cctest/test-strings.cc +736 -74
  407. data/vendor/v8/test/cctest/test-thread-termination.cc +10 -11
  408. data/vendor/v8/test/cctest/test-threads.cc +4 -4
  409. data/vendor/v8/test/cctest/test-utils.cc +16 -0
  410. data/vendor/v8/test/cctest/test-weakmaps.cc +7 -3
  411. data/vendor/v8/test/cctest/testcfg.py +64 -5
  412. data/vendor/v8/test/es5conform/testcfg.py +5 -0
  413. data/vendor/v8/test/message/message.status +1 -1
  414. data/vendor/v8/test/message/overwritten-builtins.out +3 -0
  415. data/vendor/v8/test/message/testcfg.py +89 -8
  416. data/vendor/v8/test/message/try-catch-finally-no-message.out +26 -26
  417. data/vendor/v8/test/mjsunit/accessor-map-sharing.js +18 -2
  418. data/vendor/v8/test/mjsunit/allocation-site-info.js +126 -0
  419. data/vendor/v8/test/mjsunit/array-bounds-check-removal.js +62 -1
  420. data/vendor/v8/test/mjsunit/array-iteration.js +1 -1
  421. data/vendor/v8/test/mjsunit/array-literal-transitions.js +2 -0
  422. data/vendor/v8/test/mjsunit/array-natives-elements.js +317 -0
  423. data/vendor/v8/test/mjsunit/array-reduce.js +8 -8
  424. data/vendor/v8/test/mjsunit/array-slice.js +12 -0
  425. data/vendor/v8/test/mjsunit/array-store-and-grow.js +4 -1
  426. data/vendor/v8/test/mjsunit/assert-opt-and-deopt.js +1 -1
  427. data/vendor/v8/test/mjsunit/bugs/bug-2337.js +53 -0
  428. data/vendor/v8/test/mjsunit/compare-known-objects-slow.js +69 -0
  429. data/vendor/v8/test/mjsunit/compiler/alloc-object-huge.js +3 -1
  430. data/vendor/v8/test/mjsunit/compiler/inline-accessors.js +368 -0
  431. data/vendor/v8/test/mjsunit/compiler/inline-arguments.js +87 -1
  432. data/vendor/v8/test/mjsunit/compiler/inline-closures.js +49 -0
  433. data/vendor/v8/test/mjsunit/compiler/inline-construct.js +55 -43
  434. data/vendor/v8/test/mjsunit/compiler/inline-literals.js +39 -0
  435. data/vendor/v8/test/mjsunit/compiler/multiply-add.js +69 -0
  436. data/vendor/v8/test/mjsunit/compiler/optimized-closures.js +57 -0
  437. data/vendor/v8/test/mjsunit/compiler/parallel-proto-change.js +44 -0
  438. data/vendor/v8/test/mjsunit/compiler/property-static.js +69 -0
  439. data/vendor/v8/test/mjsunit/compiler/proto-chain-constant.js +55 -0
  440. data/vendor/v8/test/mjsunit/compiler/proto-chain-load.js +44 -0
  441. data/vendor/v8/test/mjsunit/compiler/regress-gvn.js +3 -2
  442. data/vendor/v8/test/mjsunit/compiler/regress-or.js +6 -2
  443. data/vendor/v8/test/mjsunit/compiler/rotate.js +224 -0
  444. data/vendor/v8/test/mjsunit/compiler/uint32.js +173 -0
  445. data/vendor/v8/test/mjsunit/count-based-osr.js +2 -1
  446. data/vendor/v8/test/mjsunit/d8-os.js +3 -3
  447. data/vendor/v8/test/mjsunit/date-parse.js +3 -0
  448. data/vendor/v8/test/mjsunit/date.js +22 -0
  449. data/vendor/v8/test/mjsunit/debug-break-inline.js +1 -0
  450. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js +22 -12
  451. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized.js +21 -10
  452. data/vendor/v8/test/mjsunit/debug-liveedit-compile-error.js +60 -0
  453. data/vendor/v8/test/mjsunit/debug-liveedit-double-call.js +142 -0
  454. data/vendor/v8/test/mjsunit/debug-liveedit-literals.js +94 -0
  455. data/vendor/v8/test/mjsunit/debug-liveedit-restart-frame.js +153 -0
  456. data/vendor/v8/test/mjsunit/debug-multiple-breakpoints.js +1 -1
  457. data/vendor/v8/test/mjsunit/debug-script-breakpoints-closure.js +67 -0
  458. data/vendor/v8/test/mjsunit/debug-script-breakpoints-nested.js +82 -0
  459. data/vendor/v8/test/mjsunit/debug-script.js +4 -2
  460. data/vendor/v8/test/mjsunit/debug-set-variable-value.js +308 -0
  461. data/vendor/v8/test/mjsunit/debug-stepout-scope-part1.js +190 -0
  462. data/vendor/v8/test/mjsunit/debug-stepout-scope-part2.js +83 -0
  463. data/vendor/v8/test/mjsunit/debug-stepout-scope-part3.js +80 -0
  464. data/vendor/v8/test/mjsunit/debug-stepout-scope-part4.js +80 -0
  465. data/vendor/v8/test/mjsunit/debug-stepout-scope-part5.js +77 -0
  466. data/vendor/v8/test/mjsunit/debug-stepout-scope-part6.js +79 -0
  467. data/vendor/v8/test/mjsunit/debug-stepout-scope-part7.js +79 -0
  468. data/vendor/v8/test/mjsunit/{debug-stepout-scope.js → debug-stepout-scope-part8.js} +0 -189
  469. data/vendor/v8/test/mjsunit/delete-non-configurable.js +74 -0
  470. data/vendor/v8/test/mjsunit/deopt-minus-zero.js +56 -0
  471. data/vendor/v8/test/mjsunit/elements-kind.js +6 -4
  472. data/vendor/v8/test/mjsunit/elements-length-no-holey.js +33 -0
  473. data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +46 -19
  474. data/vendor/v8/test/mjsunit/error-accessors.js +54 -0
  475. data/vendor/v8/test/mjsunit/error-constructors.js +1 -14
  476. data/vendor/v8/test/mjsunit/error-tostring.js +8 -0
  477. data/vendor/v8/test/mjsunit/eval-stack-trace.js +204 -0
  478. data/vendor/v8/test/mjsunit/external-array.js +364 -1
  479. data/vendor/v8/test/mjsunit/fast-array-length.js +37 -0
  480. data/vendor/v8/test/mjsunit/fast-non-keyed.js +113 -0
  481. data/vendor/v8/test/mjsunit/fast-prototype.js +117 -0
  482. data/vendor/v8/test/mjsunit/function-call.js +14 -18
  483. data/vendor/v8/test/mjsunit/fuzz-natives-part1.js +230 -0
  484. data/vendor/v8/test/mjsunit/fuzz-natives-part2.js +229 -0
  485. data/vendor/v8/test/mjsunit/fuzz-natives-part3.js +229 -0
  486. data/vendor/v8/test/mjsunit/{fuzz-natives.js → fuzz-natives-part4.js} +12 -2
  487. data/vendor/v8/test/mjsunit/generated-transition-stub.js +218 -0
  488. data/vendor/v8/test/mjsunit/greedy.js +1 -1
  489. data/vendor/v8/test/mjsunit/harmony/block-conflicts.js +2 -1
  490. data/vendor/v8/test/mjsunit/harmony/block-let-crankshaft.js +1 -1
  491. data/vendor/v8/test/mjsunit/harmony/collections.js +69 -11
  492. data/vendor/v8/test/mjsunit/harmony/debug-blockscopes.js +2 -2
  493. data/vendor/v8/test/mjsunit/harmony/module-linking.js +180 -3
  494. data/vendor/v8/test/mjsunit/harmony/module-parsing.js +31 -0
  495. data/vendor/v8/test/mjsunit/harmony/module-recompile.js +87 -0
  496. data/vendor/v8/test/mjsunit/harmony/module-resolution.js +15 -2
  497. data/vendor/v8/test/mjsunit/harmony/object-observe.js +1056 -0
  498. data/vendor/v8/test/mjsunit/harmony/proxies-json.js +178 -0
  499. data/vendor/v8/test/mjsunit/harmony/proxies.js +25 -10
  500. data/vendor/v8/test/mjsunit/json-parser-recursive.js +33 -0
  501. data/vendor/v8/test/mjsunit/json-stringify-recursive.js +52 -0
  502. data/vendor/v8/test/mjsunit/json.js +38 -2
  503. data/vendor/v8/test/mjsunit/json2.js +153 -0
  504. data/vendor/v8/test/mjsunit/limit-locals.js +5 -4
  505. data/vendor/v8/test/mjsunit/manual-parallel-recompile.js +79 -0
  506. data/vendor/v8/test/mjsunit/math-exp-precision.js +64 -0
  507. data/vendor/v8/test/mjsunit/math-floor-negative.js +59 -0
  508. data/vendor/v8/test/mjsunit/math-floor-of-div-minus-zero.js +41 -0
  509. data/vendor/v8/test/mjsunit/math-floor-of-div-nosudiv.js +288 -0
  510. data/vendor/v8/test/mjsunit/math-floor-of-div.js +81 -9
  511. data/vendor/v8/test/mjsunit/{math-floor.js → math-floor-part1.js} +1 -72
  512. data/vendor/v8/test/mjsunit/math-floor-part2.js +76 -0
  513. data/vendor/v8/test/mjsunit/math-floor-part3.js +78 -0
  514. data/vendor/v8/test/mjsunit/math-floor-part4.js +76 -0
  515. data/vendor/v8/test/mjsunit/mirror-object.js +43 -9
  516. data/vendor/v8/test/mjsunit/mjsunit.js +1 -1
  517. data/vendor/v8/test/mjsunit/mjsunit.status +52 -27
  518. data/vendor/v8/test/mjsunit/mul-exhaustive-part1.js +491 -0
  519. data/vendor/v8/test/mjsunit/mul-exhaustive-part10.js +470 -0
  520. data/vendor/v8/test/mjsunit/mul-exhaustive-part2.js +525 -0
  521. data/vendor/v8/test/mjsunit/mul-exhaustive-part3.js +532 -0
  522. data/vendor/v8/test/mjsunit/mul-exhaustive-part4.js +509 -0
  523. data/vendor/v8/test/mjsunit/mul-exhaustive-part5.js +505 -0
  524. data/vendor/v8/test/mjsunit/mul-exhaustive-part6.js +554 -0
  525. data/vendor/v8/test/mjsunit/mul-exhaustive-part7.js +497 -0
  526. data/vendor/v8/test/mjsunit/mul-exhaustive-part8.js +526 -0
  527. data/vendor/v8/test/mjsunit/mul-exhaustive-part9.js +533 -0
  528. data/vendor/v8/test/mjsunit/new-function.js +34 -0
  529. data/vendor/v8/test/mjsunit/numops-fuzz-part1.js +1172 -0
  530. data/vendor/v8/test/mjsunit/numops-fuzz-part2.js +1178 -0
  531. data/vendor/v8/test/mjsunit/numops-fuzz-part3.js +1178 -0
  532. data/vendor/v8/test/mjsunit/numops-fuzz-part4.js +1177 -0
  533. data/vendor/v8/test/mjsunit/object-define-property.js +107 -2
  534. data/vendor/v8/test/mjsunit/override-read-only-property.js +6 -4
  535. data/vendor/v8/test/mjsunit/packed-elements.js +2 -2
  536. data/vendor/v8/test/mjsunit/parse-int-float.js +4 -4
  537. data/vendor/v8/test/mjsunit/pixel-array-rounding.js +1 -1
  538. data/vendor/v8/test/mjsunit/readonly.js +228 -0
  539. data/vendor/v8/test/mjsunit/regexp-capture-3.js +16 -18
  540. data/vendor/v8/test/mjsunit/regexp-capture.js +2 -0
  541. data/vendor/v8/test/mjsunit/regexp-global.js +122 -0
  542. data/vendor/v8/test/mjsunit/regexp-results-cache.js +78 -0
  543. data/vendor/v8/test/mjsunit/regress/regress-1117.js +12 -3
  544. data/vendor/v8/test/mjsunit/regress/regress-1118.js +1 -1
  545. data/vendor/v8/test/mjsunit/regress/regress-115100.js +36 -0
  546. data/vendor/v8/test/mjsunit/regress/regress-1199637.js +1 -3
  547. data/vendor/v8/test/mjsunit/regress/regress-121407.js +1 -1
  548. data/vendor/v8/test/mjsunit/regress/regress-131923.js +30 -0
  549. data/vendor/v8/test/mjsunit/regress/regress-131994.js +70 -0
  550. data/vendor/v8/test/mjsunit/regress/regress-133211.js +35 -0
  551. data/vendor/v8/test/mjsunit/regress/regress-133211b.js +39 -0
  552. data/vendor/v8/test/mjsunit/regress/regress-136048.js +34 -0
  553. data/vendor/v8/test/mjsunit/regress/regress-137768.js +73 -0
  554. data/vendor/v8/test/mjsunit/regress/regress-143967.js +34 -0
  555. data/vendor/v8/test/mjsunit/regress/regress-145201.js +107 -0
  556. data/vendor/v8/test/mjsunit/regress/regress-147497.js +45 -0
  557. data/vendor/v8/test/mjsunit/regress/regress-148378.js +38 -0
  558. data/vendor/v8/test/mjsunit/regress/regress-1563.js +1 -1
  559. data/vendor/v8/test/mjsunit/regress/regress-1591.js +48 -0
  560. data/vendor/v8/test/mjsunit/regress/regress-164442.js +45 -0
  561. data/vendor/v8/test/mjsunit/regress/regress-165637.js +61 -0
  562. data/vendor/v8/test/mjsunit/regress/regress-166379.js +39 -0
  563. data/vendor/v8/test/mjsunit/regress/regress-166553.js +33 -0
  564. data/vendor/v8/test/mjsunit/regress/regress-1692.js +1 -1
  565. data/vendor/v8/test/mjsunit/regress/regress-171641.js +40 -0
  566. data/vendor/v8/test/mjsunit/regress/regress-1980.js +1 -1
  567. data/vendor/v8/test/mjsunit/regress/regress-2073.js +99 -0
  568. data/vendor/v8/test/mjsunit/regress/regress-2119.js +36 -0
  569. data/vendor/v8/test/mjsunit/regress/regress-2156.js +39 -0
  570. data/vendor/v8/test/mjsunit/regress/regress-2163.js +70 -0
  571. data/vendor/v8/test/mjsunit/regress/regress-2170.js +58 -0
  572. data/vendor/v8/test/mjsunit/regress/regress-2172.js +35 -0
  573. data/vendor/v8/test/mjsunit/regress/regress-2185-2.js +145 -0
  574. data/vendor/v8/test/mjsunit/regress/regress-2185.js +38 -0
  575. data/vendor/v8/test/mjsunit/regress/regress-2186.js +49 -0
  576. data/vendor/v8/test/mjsunit/regress/regress-2193.js +58 -0
  577. data/vendor/v8/test/mjsunit/regress/regress-2219.js +32 -0
  578. data/vendor/v8/test/mjsunit/regress/regress-2225.js +65 -0
  579. data/vendor/v8/test/mjsunit/regress/regress-2226.js +36 -0
  580. data/vendor/v8/test/mjsunit/regress/regress-2234.js +41 -0
  581. data/vendor/v8/test/mjsunit/regress/regress-2243.js +31 -0
  582. data/vendor/v8/test/mjsunit/regress/regress-2249.js +33 -0
  583. data/vendor/v8/test/mjsunit/regress/regress-2250.js +68 -0
  584. data/vendor/v8/test/mjsunit/regress/regress-2261.js +113 -0
  585. data/vendor/v8/test/mjsunit/regress/regress-2263.js +30 -0
  586. data/vendor/v8/test/mjsunit/regress/regress-2284.js +32 -0
  587. data/vendor/v8/test/mjsunit/regress/regress-2285.js +32 -0
  588. data/vendor/v8/test/mjsunit/regress/regress-2286.js +32 -0
  589. data/vendor/v8/test/mjsunit/regress/regress-2289.js +34 -0
  590. data/vendor/v8/test/mjsunit/regress/regress-2291.js +36 -0
  591. data/vendor/v8/test/mjsunit/regress/regress-2294.js +70 -0
  592. data/vendor/v8/test/mjsunit/regress/regress-2296.js +40 -0
  593. data/vendor/v8/test/mjsunit/regress/regress-2315.js +40 -0
  594. data/vendor/v8/test/mjsunit/regress/regress-2318.js +66 -0
  595. data/vendor/v8/test/mjsunit/regress/regress-2322.js +36 -0
  596. data/vendor/v8/test/mjsunit/regress/regress-2326.js +54 -0
  597. data/vendor/v8/test/mjsunit/regress/regress-2336.js +53 -0
  598. data/vendor/v8/test/mjsunit/regress/regress-2339.js +59 -0
  599. data/vendor/v8/test/mjsunit/regress/regress-2346.js +123 -0
  600. data/vendor/v8/test/mjsunit/regress/regress-2373.js +29 -0
  601. data/vendor/v8/test/mjsunit/regress/regress-2374.js +33 -0
  602. data/vendor/v8/test/mjsunit/regress/regress-2398.js +41 -0
  603. data/vendor/v8/test/mjsunit/regress/regress-2410.js +36 -0
  604. data/vendor/v8/test/mjsunit/regress/regress-2416.js +75 -0
  605. data/vendor/v8/test/mjsunit/regress/regress-2419.js +37 -0
  606. data/vendor/v8/test/mjsunit/regress/regress-2433.js +36 -0
  607. data/vendor/v8/test/mjsunit/regress/regress-2437.js +156 -0
  608. data/vendor/v8/test/mjsunit/regress/regress-2438.js +52 -0
  609. data/vendor/v8/test/mjsunit/regress/regress-2443.js +129 -0
  610. data/vendor/v8/test/mjsunit/regress/regress-2444.js +120 -0
  611. data/vendor/v8/test/mjsunit/regress/regress-2489.js +50 -0
  612. data/vendor/v8/test/mjsunit/regress/regress-2499.js +40 -0
  613. data/vendor/v8/test/mjsunit/regress/regress-334.js +1 -1
  614. data/vendor/v8/test/mjsunit/regress/regress-492.js +39 -1
  615. data/vendor/v8/test/mjsunit/regress/regress-builtin-array-op.js +38 -0
  616. data/vendor/v8/test/mjsunit/regress/regress-cnlt-elements.js +43 -0
  617. data/vendor/v8/test/mjsunit/regress/regress-cnlt-enum-indices.js +45 -0
  618. data/vendor/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js +46 -0
  619. data/vendor/v8/test/mjsunit/regress/regress-convert-enum.js +60 -0
  620. data/vendor/v8/test/mjsunit/regress/regress-convert-enum2.js +46 -0
  621. data/vendor/v8/test/mjsunit/regress/regress-convert-transition.js +40 -0
  622. data/vendor/v8/test/mjsunit/regress/regress-crbug-119926.js +3 -1
  623. data/vendor/v8/test/mjsunit/regress/regress-crbug-125148.js +90 -0
  624. data/vendor/v8/test/mjsunit/regress/regress-crbug-134055.js +63 -0
  625. data/vendor/v8/test/mjsunit/regress/regress-crbug-134609.js +59 -0
  626. data/vendor/v8/test/mjsunit/regress/regress-crbug-135008.js +45 -0
  627. data/vendor/v8/test/mjsunit/regress/regress-crbug-135066.js +55 -0
  628. data/vendor/v8/test/mjsunit/regress/regress-crbug-137689.js +47 -0
  629. data/vendor/v8/test/mjsunit/regress/regress-crbug-138887.js +48 -0
  630. data/vendor/v8/test/mjsunit/regress/regress-crbug-140083.js +44 -0
  631. data/vendor/v8/test/mjsunit/regress/regress-crbug-142087.js +38 -0
  632. data/vendor/v8/test/mjsunit/regress/regress-crbug-142218.js +44 -0
  633. data/vendor/v8/test/mjsunit/regress/regress-crbug-145961.js +39 -0
  634. data/vendor/v8/test/mjsunit/regress/regress-crbug-146910.js +33 -0
  635. data/vendor/v8/test/mjsunit/regress/regress-crbug-147475.js +48 -0
  636. data/vendor/v8/test/mjsunit/regress/regress-crbug-148376.js +35 -0
  637. data/vendor/v8/test/mjsunit/regress/regress-crbug-150545.js +53 -0
  638. data/vendor/v8/test/mjsunit/regress/regress-crbug-150729.js +39 -0
  639. data/vendor/v8/test/mjsunit/regress/regress-crbug-157019.js +54 -0
  640. data/vendor/v8/test/mjsunit/regress/regress-crbug-157520.js +38 -0
  641. data/vendor/v8/test/mjsunit/regress/regress-crbug-158185.js +39 -0
  642. data/vendor/v8/test/mjsunit/regress/regress-crbug-160010.js +35 -0
  643. data/vendor/v8/test/mjsunit/regress/regress-crbug-162085.js +71 -0
  644. data/vendor/v8/test/mjsunit/regress/regress-crbug-168545.js +34 -0
  645. data/vendor/v8/test/mjsunit/regress/regress-crbug-170856.js +33 -0
  646. data/vendor/v8/test/mjsunit/regress/regress-crbug-172345.js +34 -0
  647. data/vendor/v8/test/mjsunit/regress/regress-crbug-173974.js +36 -0
  648. data/vendor/v8/test/mjsunit/regress/regress-crbug-18639.js +9 -5
  649. data/vendor/v8/test/mjsunit/regress/regress-debug-code-recompilation.js +2 -1
  650. data/vendor/v8/test/mjsunit/regress/regress-deep-proto.js +45 -0
  651. data/vendor/v8/test/mjsunit/regress/regress-delete-empty-double.js +40 -0
  652. data/vendor/v8/test/mjsunit/regress/regress-iteration-order.js +42 -0
  653. data/vendor/v8/test/mjsunit/regress/regress-json-stringify-gc.js +41 -0
  654. data/vendor/v8/test/mjsunit/regress/regress-latin-1.js +78 -0
  655. data/vendor/v8/test/mjsunit/regress/regress-load-elements.js +49 -0
  656. data/vendor/v8/test/mjsunit/regress/regress-observe-empty-double-array.js +38 -0
  657. data/vendor/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js +37 -0
  658. data/vendor/v8/test/mjsunit/shift-for-integer-div.js +59 -0
  659. data/vendor/v8/test/mjsunit/stack-traces-gc.js +119 -0
  660. data/vendor/v8/test/mjsunit/stack-traces-overflow.js +122 -0
  661. data/vendor/v8/test/mjsunit/stack-traces.js +39 -1
  662. data/vendor/v8/test/mjsunit/str-to-num.js +7 -2
  663. data/vendor/v8/test/mjsunit/strict-mode.js +36 -11
  664. data/vendor/v8/test/mjsunit/string-charcodeat.js +3 -0
  665. data/vendor/v8/test/mjsunit/string-natives.js +72 -0
  666. data/vendor/v8/test/mjsunit/string-split.js +17 -0
  667. data/vendor/v8/test/mjsunit/testcfg.py +76 -6
  668. data/vendor/v8/test/mjsunit/tools/tickprocessor.js +4 -1
  669. data/vendor/v8/test/mjsunit/try-finally-continue.js +72 -0
  670. data/vendor/v8/test/mjsunit/typed-array-slice.js +61 -0
  671. data/vendor/v8/test/mjsunit/unbox-double-arrays.js +2 -0
  672. data/vendor/v8/test/mjsunit/uri.js +12 -0
  673. data/vendor/v8/test/mjsunit/with-readonly.js +4 -2
  674. data/vendor/v8/test/mozilla/mozilla.status +19 -113
  675. data/vendor/v8/test/mozilla/testcfg.py +122 -3
  676. data/vendor/v8/test/preparser/preparser.status +5 -0
  677. data/vendor/v8/test/preparser/strict-identifiers.pyt +1 -1
  678. data/vendor/v8/test/preparser/testcfg.py +101 -5
  679. data/vendor/v8/test/sputnik/sputnik.status +1 -1
  680. data/vendor/v8/test/sputnik/testcfg.py +5 -0
  681. data/vendor/v8/test/test262/README +2 -2
  682. data/vendor/v8/test/test262/test262.status +13 -36
  683. data/vendor/v8/test/test262/testcfg.py +102 -8
  684. data/vendor/v8/tools/android-build.sh +0 -0
  685. data/vendor/v8/tools/android-ll-prof.sh +69 -0
  686. data/vendor/v8/tools/android-run.py +109 -0
  687. data/vendor/v8/tools/android-sync.sh +105 -0
  688. data/vendor/v8/tools/bash-completion.sh +0 -0
  689. data/vendor/v8/tools/check-static-initializers.sh +0 -0
  690. data/vendor/v8/tools/common-includes.sh +15 -22
  691. data/vendor/v8/tools/disasm.py +4 -4
  692. data/vendor/v8/tools/fuzz-harness.sh +0 -0
  693. data/vendor/v8/tools/gen-postmortem-metadata.py +6 -8
  694. data/vendor/v8/tools/grokdump.py +404 -129
  695. data/vendor/v8/tools/gyp/v8.gyp +105 -43
  696. data/vendor/v8/tools/linux-tick-processor +5 -5
  697. data/vendor/v8/tools/ll_prof.py +75 -15
  698. data/vendor/v8/tools/merge-to-branch.sh +2 -2
  699. data/vendor/v8/tools/plot-timer-events +70 -0
  700. data/vendor/v8/tools/plot-timer-events.js +510 -0
  701. data/vendor/v8/tools/presubmit.py +1 -0
  702. data/vendor/v8/tools/push-to-trunk.sh +14 -4
  703. data/vendor/v8/tools/run-llprof.sh +69 -0
  704. data/vendor/v8/tools/run-tests.py +372 -0
  705. data/vendor/v8/tools/run-valgrind.py +1 -1
  706. data/vendor/v8/tools/status-file-converter.py +39 -0
  707. data/vendor/v8/tools/test-server.py +224 -0
  708. data/vendor/v8/tools/test-wrapper-gypbuild.py +13 -16
  709. data/vendor/v8/tools/test.py +10 -19
  710. data/vendor/v8/tools/testrunner/README +174 -0
  711. data/vendor/v8/tools/testrunner/__init__.py +26 -0
  712. data/vendor/v8/tools/testrunner/local/__init__.py +26 -0
  713. data/vendor/v8/tools/testrunner/local/commands.py +153 -0
  714. data/vendor/v8/tools/testrunner/local/execution.py +182 -0
  715. data/vendor/v8/tools/testrunner/local/old_statusfile.py +460 -0
  716. data/vendor/v8/tools/testrunner/local/progress.py +238 -0
  717. data/vendor/v8/tools/testrunner/local/statusfile.py +145 -0
  718. data/vendor/v8/tools/testrunner/local/testsuite.py +187 -0
  719. data/vendor/v8/tools/testrunner/local/utils.py +108 -0
  720. data/vendor/v8/tools/testrunner/local/verbose.py +99 -0
  721. data/vendor/v8/tools/testrunner/network/__init__.py +26 -0
  722. data/vendor/v8/tools/testrunner/network/distro.py +90 -0
  723. data/vendor/v8/tools/testrunner/network/endpoint.py +124 -0
  724. data/vendor/v8/tools/testrunner/network/network_execution.py +253 -0
  725. data/vendor/v8/tools/testrunner/network/perfdata.py +120 -0
  726. data/vendor/v8/tools/testrunner/objects/__init__.py +26 -0
  727. data/vendor/v8/tools/testrunner/objects/context.py +50 -0
  728. data/vendor/v8/tools/testrunner/objects/output.py +60 -0
  729. data/vendor/v8/tools/testrunner/objects/peer.py +80 -0
  730. data/vendor/v8/tools/testrunner/objects/testcase.py +83 -0
  731. data/vendor/v8/tools/testrunner/objects/workpacket.py +90 -0
  732. data/vendor/v8/tools/testrunner/server/__init__.py +26 -0
  733. data/vendor/v8/tools/testrunner/server/compression.py +111 -0
  734. data/vendor/v8/tools/testrunner/server/constants.py +51 -0
  735. data/vendor/v8/tools/testrunner/server/daemon.py +147 -0
  736. data/vendor/v8/tools/testrunner/server/local_handler.py +119 -0
  737. data/vendor/v8/tools/testrunner/server/main.py +245 -0
  738. data/vendor/v8/tools/testrunner/server/presence_handler.py +120 -0
  739. data/vendor/v8/tools/testrunner/server/signatures.py +63 -0
  740. data/vendor/v8/tools/testrunner/server/status_handler.py +112 -0
  741. data/vendor/v8/tools/testrunner/server/work_handler.py +150 -0
  742. data/vendor/v8/tools/tick-processor.html +168 -0
  743. data/vendor/v8/tools/tickprocessor-driver.js +5 -3
  744. data/vendor/v8/tools/tickprocessor.js +58 -15
  745. metadata +534 -30
  746. data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +0 -11
  747. data/patches/do-not-imply-vfp3-and-armv7.patch +0 -44
  748. data/patches/fPIC-on-x64.patch +0 -14
  749. data/vendor/v8/src/liveobjectlist-inl.h +0 -126
  750. data/vendor/v8/src/liveobjectlist.cc +0 -2631
  751. data/vendor/v8/src/liveobjectlist.h +0 -319
  752. data/vendor/v8/test/mjsunit/mul-exhaustive.js +0 -4629
  753. data/vendor/v8/test/mjsunit/numops-fuzz.js +0 -4609
  754. data/vendor/v8/test/mjsunit/regress/regress-1969.js +0 -5045
@@ -65,10 +65,6 @@ bool LCodeGen::GenerateCode() {
65
65
  HPhase phase("Z_Code generation", chunk());
66
66
  ASSERT(is_unused());
67
67
  status_ = GENERATING;
68
- CpuFeatures::Scope scope1(VFP3);
69
- CpuFeatures::Scope scope2(ARMv7);
70
-
71
- CodeStub::GenerateFPStubs();
72
68
 
73
69
  // Open a frame scope to indicate that there is a frame on the stack. The
74
70
  // NONE indicates that the scope shouldn't actually generate code to set up
@@ -91,17 +87,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
91
87
  }
92
88
 
93
89
 
94
- void LCodeGen::Abort(const char* format, ...) {
95
- if (FLAG_trace_bailout) {
96
- SmartArrayPointer<char> name(
97
- info()->shared_info()->DebugName()->ToCString());
98
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
99
- va_list arguments;
100
- va_start(arguments, format);
101
- OS::VPrint(format, arguments);
102
- va_end(arguments);
103
- PrintF("\n");
104
- }
90
+ void LCodeGen::Abort(const char* reason) {
91
+ info()->set_bailout_reason(reason);
105
92
  status_ = ABORTED;
106
93
  }
107
94
 
@@ -127,53 +114,96 @@ void LCodeGen::Comment(const char* format, ...) {
127
114
  bool LCodeGen::GeneratePrologue() {
128
115
  ASSERT(is_generating());
129
116
 
117
+ if (info()->IsOptimizing()) {
118
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
119
+
130
120
  #ifdef DEBUG
131
- if (strlen(FLAG_stop_at) > 0 &&
132
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
133
- __ stop("stop_at");
134
- }
121
+ if (strlen(FLAG_stop_at) > 0 &&
122
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
123
+ __ stop("stop_at");
124
+ }
135
125
  #endif
136
126
 
137
- // r1: Callee's JS function.
138
- // cp: Callee's context.
139
- // fp: Caller's frame pointer.
140
- // lr: Caller's pc.
141
-
142
- // Strict mode functions and builtins need to replace the receiver
143
- // with undefined when called as functions (without an explicit
144
- // receiver object). r5 is zero for method calls and non-zero for
145
- // function calls.
146
- if (!info_->is_classic_mode() || info_->is_native()) {
147
- Label ok;
148
- __ cmp(r5, Operand(0));
149
- __ b(eq, &ok);
150
- int receiver_offset = scope()->num_parameters() * kPointerSize;
151
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
152
- __ str(r2, MemOperand(sp, receiver_offset));
153
- __ bind(&ok);
127
+ // r1: Callee's JS function.
128
+ // cp: Callee's context.
129
+ // fp: Caller's frame pointer.
130
+ // lr: Caller's pc.
131
+
132
+ // Strict mode functions and builtins need to replace the receiver
133
+ // with undefined when called as functions (without an explicit
134
+ // receiver object). r5 is zero for method calls and non-zero for
135
+ // function calls.
136
+ if (!info_->is_classic_mode() || info_->is_native()) {
137
+ Label ok;
138
+ __ cmp(r5, Operand::Zero());
139
+ __ b(eq, &ok);
140
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
141
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
142
+ __ str(r2, MemOperand(sp, receiver_offset));
143
+ __ bind(&ok);
144
+ }
154
145
  }
155
146
 
156
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
157
- __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
147
+ info()->set_prologue_offset(masm_->pc_offset());
148
+ if (NeedsEagerFrame()) {
149
+ if (info()->IsStub()) {
150
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
151
+ __ Push(Smi::FromInt(StackFrame::STUB));
152
+ // Adjust FP to point to saved FP.
153
+ __ add(fp, sp, Operand(2 * kPointerSize));
154
+ } else {
155
+ PredictableCodeSizeScope predictible_code_size_scope(
156
+ masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
157
+ // The following three instructions must remain together and unmodified
158
+ // for code aging to work properly.
159
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
160
+ // Load undefined value here, so the value is ready for the loop
161
+ // below.
162
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
163
+ // Adjust FP to point to saved FP.
164
+ __ add(fp, sp, Operand(2 * kPointerSize));
165
+ }
166
+ frame_is_built_ = true;
167
+ }
158
168
 
159
169
  // Reserve space for the stack slots needed by the code.
160
170
  int slots = GetStackSlotCount();
161
171
  if (slots > 0) {
162
172
  if (FLAG_debug_code) {
163
- __ mov(r0, Operand(slots));
164
- __ mov(r2, Operand(kSlotsZapValue));
173
+ __ sub(sp, sp, Operand(slots * kPointerSize));
174
+ __ push(r0);
175
+ __ push(r1);
176
+ __ add(r0, sp, Operand(slots * kPointerSize));
177
+ __ mov(r1, Operand(kSlotsZapValue));
165
178
  Label loop;
166
179
  __ bind(&loop);
167
- __ push(r2);
168
- __ sub(r0, r0, Operand(1), SetCC);
180
+ __ sub(r0, r0, Operand(kPointerSize));
181
+ __ str(r1, MemOperand(r0, 2 * kPointerSize));
182
+ __ cmp(r0, sp);
169
183
  __ b(ne, &loop);
184
+ __ pop(r1);
185
+ __ pop(r0);
170
186
  } else {
171
187
  __ sub(sp, sp, Operand(slots * kPointerSize));
172
188
  }
173
189
  }
174
190
 
191
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
192
+ CpuFeatures::Scope scope(VFP2);
193
+ Comment(";;; Save clobbered callee double registers");
194
+ int count = 0;
195
+ BitVector* doubles = chunk()->allocated_double_registers();
196
+ BitVector::Iterator save_iterator(doubles);
197
+ while (!save_iterator.Done()) {
198
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
199
+ MemOperand(sp, count * kDoubleSize));
200
+ save_iterator.Advance();
201
+ count++;
202
+ }
203
+ }
204
+
175
205
  // Possibly allocate a local context.
176
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
206
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177
207
  if (heap_slots > 0) {
178
208
  Comment(";;; Allocate local context");
179
209
  // Argument to NewContext is the function, which is in r1.
@@ -209,7 +239,7 @@ bool LCodeGen::GeneratePrologue() {
209
239
  }
210
240
 
211
241
  // Trace the call.
212
- if (FLAG_trace) {
242
+ if (FLAG_trace && info()->IsOptimizing()) {
213
243
  __ CallRuntime(Runtime::kTraceEnter, 0);
214
244
  }
215
245
  return !is_aborted();
@@ -229,7 +259,30 @@ bool LCodeGen::GenerateBody() {
229
259
  }
230
260
 
231
261
  if (emit_instructions) {
232
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
262
+ if (FLAG_code_comments) {
263
+ HValue* hydrogen = instr->hydrogen_value();
264
+ if (hydrogen != NULL) {
265
+ if (hydrogen->IsChange()) {
266
+ HValue* changed_value = HChange::cast(hydrogen)->value();
267
+ int use_id = 0;
268
+ const char* use_mnemo = "dead";
269
+ if (hydrogen->UseCount() >= 1) {
270
+ HValue* use_value = hydrogen->uses().value();
271
+ use_id = use_value->id();
272
+ use_mnemo = use_value->Mnemonic();
273
+ }
274
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
275
+ current_instruction_, instr->Mnemonic(),
276
+ changed_value->id(), changed_value->Mnemonic(),
277
+ use_id, use_mnemo);
278
+ } else {
279
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
280
+ instr->Mnemonic(), hydrogen->id());
281
+ }
282
+ } else {
283
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
284
+ }
285
+ }
233
286
  instr->CompileToNative(this);
234
287
  }
235
288
  }
@@ -244,10 +297,31 @@ bool LCodeGen::GenerateDeferredCode() {
244
297
  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
245
298
  LDeferredCode* code = deferred_[i];
246
299
  __ bind(code->entry());
300
+ if (NeedsDeferredFrame()) {
301
+ Comment(";;; Deferred build frame",
302
+ code->instruction_index(),
303
+ code->instr()->Mnemonic());
304
+ ASSERT(!frame_is_built_);
305
+ ASSERT(info()->IsStub());
306
+ frame_is_built_ = true;
307
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
308
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
309
+ __ push(scratch0());
310
+ __ add(fp, sp, Operand(2 * kPointerSize));
311
+ }
247
312
  Comment(";;; Deferred code @%d: %s.",
248
313
  code->instruction_index(),
249
314
  code->instr()->Mnemonic());
250
315
  code->Generate();
316
+ if (NeedsDeferredFrame()) {
317
+ Comment(";;; Deferred destroy frame",
318
+ code->instruction_index(),
319
+ code->instr()->Mnemonic());
320
+ ASSERT(frame_is_built_);
321
+ __ pop(ip);
322
+ __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
323
+ frame_is_built_ = false;
324
+ }
251
325
  __ jmp(code->exit());
252
326
  }
253
327
  }
@@ -269,24 +343,77 @@ bool LCodeGen::GenerateDeoptJumpTable() {
269
343
  // Each entry in the jump table generates one instruction and inlines one
270
344
  // 32bit data after it.
271
345
  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
272
- deopt_jump_table_.length() * 2)) {
346
+ deopt_jump_table_.length() * 7)) {
273
347
  Abort("Generated code is too large");
274
348
  }
275
349
 
276
- // Block the constant pool emission during the jump table emission.
277
- __ BlockConstPoolFor(deopt_jump_table_.length());
278
350
  __ RecordComment("[ Deoptimisation jump table");
279
351
  Label table_start;
280
352
  __ bind(&table_start);
353
+ Label needs_frame_not_call;
354
+ Label needs_frame_is_call;
281
355
  for (int i = 0; i < deopt_jump_table_.length(); i++) {
282
356
  __ bind(&deopt_jump_table_[i].label);
283
- __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
284
- __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
357
+ Address entry = deopt_jump_table_[i].address;
358
+ bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
359
+ Deoptimizer::BailoutType type =
360
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
361
+ int id = Deoptimizer::GetDeoptimizationId(entry, type);
362
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
363
+ Comment(";;; jump table entry %d.", i);
364
+ } else {
365
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
366
+ }
367
+ if (deopt_jump_table_[i].needs_frame) {
368
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
369
+ if (is_lazy_deopt) {
370
+ if (needs_frame_is_call.is_bound()) {
371
+ __ b(&needs_frame_is_call);
372
+ } else {
373
+ __ bind(&needs_frame_is_call);
374
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
375
+ // This variant of deopt can only be used with stubs. Since we don't
376
+ // have a function pointer to install in the stack frame that we're
377
+ // building, install a special marker there instead.
378
+ ASSERT(info()->IsStub());
379
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
380
+ __ push(scratch0());
381
+ __ add(fp, sp, Operand(2 * kPointerSize));
382
+ __ mov(lr, Operand(pc), LeaveCC, al);
383
+ __ mov(pc, ip);
384
+ }
385
+ } else {
386
+ if (needs_frame_not_call.is_bound()) {
387
+ __ b(&needs_frame_not_call);
388
+ } else {
389
+ __ bind(&needs_frame_not_call);
390
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
391
+ // This variant of deopt can only be used with stubs. Since we don't
392
+ // have a function pointer to install in the stack frame that we're
393
+ // building, install a special marker there instead.
394
+ ASSERT(info()->IsStub());
395
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
396
+ __ push(scratch0());
397
+ __ add(fp, sp, Operand(2 * kPointerSize));
398
+ __ mov(pc, ip);
399
+ }
400
+ }
401
+ } else {
402
+ if (is_lazy_deopt) {
403
+ __ mov(lr, Operand(pc), LeaveCC, al);
404
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
405
+ } else {
406
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
407
+ }
408
+ }
409
+ masm()->CheckConstPool(false, false);
285
410
  }
286
- ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
287
- deopt_jump_table_.length() * 2);
288
411
  __ RecordComment("]");
289
412
 
413
+ // Force constant pool emission at the end of the deopt jump table to make
414
+ // sure that no constant pools are emitted after.
415
+ masm()->CheckConstPool(true, false);
416
+
290
417
  // The deoptimization jump table is the last part of the instruction
291
418
  // sequence. Mark the generated code as done unless we bailed out.
292
419
  if (!is_aborted()) status_ = DONE;
@@ -306,8 +433,8 @@ Register LCodeGen::ToRegister(int index) const {
306
433
  }
307
434
 
308
435
 
309
- DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
310
- return DoubleRegister::FromAllocationIndex(index);
436
+ DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
437
+ return DwVfpRegister::FromAllocationIndex(index);
311
438
  }
312
439
 
313
440
 
@@ -322,7 +449,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
322
449
  return ToRegister(op->index());
323
450
  } else if (op->IsConstantOperand()) {
324
451
  LConstantOperand* const_op = LConstantOperand::cast(op);
325
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
452
+ HConstant* constant = chunk_->LookupConstant(const_op);
453
+ Handle<Object> literal = constant->handle();
326
454
  Representation r = chunk_->LookupLiteralRepresentation(const_op);
327
455
  if (r.IsInteger32()) {
328
456
  ASSERT(literal->IsNumber());
@@ -347,20 +475,21 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
347
475
  }
348
476
 
349
477
 
350
- DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
478
+ DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
351
479
  ASSERT(op->IsDoubleRegister());
352
480
  return ToDoubleRegister(op->index());
353
481
  }
354
482
 
355
483
 
356
- DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
357
- SwVfpRegister flt_scratch,
358
- DoubleRegister dbl_scratch) {
484
+ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
485
+ SwVfpRegister flt_scratch,
486
+ DwVfpRegister dbl_scratch) {
359
487
  if (op->IsDoubleRegister()) {
360
488
  return ToDoubleRegister(op->index());
361
489
  } else if (op->IsConstantOperand()) {
362
490
  LConstantOperand* const_op = LConstantOperand::cast(op);
363
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
491
+ HConstant* constant = chunk_->LookupConstant(const_op);
492
+ Handle<Object> literal = constant->handle();
364
493
  Representation r = chunk_->LookupLiteralRepresentation(const_op);
365
494
  if (r.IsInteger32()) {
366
495
  ASSERT(literal->IsNumber());
@@ -386,9 +515,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
386
515
 
387
516
 
388
517
  Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
389
- Handle<Object> literal = chunk_->LookupLiteral(op);
518
+ HConstant* constant = chunk_->LookupConstant(op);
390
519
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
391
- return literal;
520
+ return constant->handle();
392
521
  }
393
522
 
394
523
 
@@ -398,42 +527,40 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
398
527
 
399
528
 
400
529
  int LCodeGen::ToInteger32(LConstantOperand* op) const {
401
- Handle<Object> value = chunk_->LookupLiteral(op);
402
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
403
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
404
- value->Number());
405
- return static_cast<int32_t>(value->Number());
530
+ HConstant* constant = chunk_->LookupConstant(op);
531
+ return constant->Integer32Value();
406
532
  }
407
533
 
408
534
 
409
535
  double LCodeGen::ToDouble(LConstantOperand* op) const {
410
- Handle<Object> value = chunk_->LookupLiteral(op);
411
- return value->Number();
536
+ HConstant* constant = chunk_->LookupConstant(op);
537
+ ASSERT(constant->HasDoubleValue());
538
+ return constant->DoubleValue();
412
539
  }
413
540
 
414
541
 
415
542
  Operand LCodeGen::ToOperand(LOperand* op) {
416
543
  if (op->IsConstantOperand()) {
417
544
  LConstantOperand* const_op = LConstantOperand::cast(op);
418
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
545
+ HConstant* constant = chunk()->LookupConstant(const_op);
419
546
  Representation r = chunk_->LookupLiteralRepresentation(const_op);
420
547
  if (r.IsInteger32()) {
421
- ASSERT(literal->IsNumber());
422
- return Operand(static_cast<int32_t>(literal->Number()));
548
+ ASSERT(constant->HasInteger32Value());
549
+ return Operand(constant->Integer32Value());
423
550
  } else if (r.IsDouble()) {
424
551
  Abort("ToOperand Unsupported double immediate.");
425
552
  }
426
553
  ASSERT(r.IsTagged());
427
- return Operand(literal);
554
+ return Operand(constant->handle());
428
555
  } else if (op->IsRegister()) {
429
556
  return Operand(ToRegister(op));
430
557
  } else if (op->IsDoubleRegister()) {
431
558
  Abort("ToOperand IsDoubleRegister unimplemented");
432
- return Operand(0);
559
+ return Operand::Zero();
433
560
  }
434
561
  // Stack slots not implemented, use ToMemOperand instead.
435
562
  UNREACHABLE();
436
- return Operand(0);
563
+ return Operand::Zero();
437
564
  }
438
565
 
439
566
 
@@ -469,7 +596,9 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
469
596
 
470
597
 
471
598
  void LCodeGen::WriteTranslation(LEnvironment* environment,
472
- Translation* translation) {
599
+ Translation* translation,
600
+ int* arguments_index,
601
+ int* arguments_count) {
473
602
  if (environment == NULL) return;
474
603
 
475
604
  // The translation includes one command per value in the environment.
@@ -477,8 +606,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
477
606
  // The output frame height does not include the parameters.
478
607
  int height = translation_size - environment->parameter_count();
479
608
 
480
- WriteTranslation(environment->outer(), translation);
481
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
609
+ // Function parameters are arguments to the outermost environment. The
610
+ // arguments index points to the first element of a sequence of tagged
611
+ // values on the stack that represent the arguments. This needs to be
612
+ // kept in sync with the LArgumentsElements implementation.
613
+ *arguments_index = -environment->parameter_count();
614
+ *arguments_count = environment->parameter_count();
615
+
616
+ WriteTranslation(environment->outer(),
617
+ translation,
618
+ arguments_index,
619
+ arguments_count);
620
+ bool has_closure_id = !info()->closure().is_null() &&
621
+ *info()->closure() != *environment->closure();
622
+ int closure_id = has_closure_id
623
+ ? DefineDeoptimizationLiteral(environment->closure())
624
+ : Translation::kSelfLiteralId;
625
+
482
626
  switch (environment->frame_type()) {
483
627
  case JS_FUNCTION:
484
628
  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -486,12 +630,34 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
486
630
  case JS_CONSTRUCT:
487
631
  translation->BeginConstructStubFrame(closure_id, translation_size);
488
632
  break;
633
+ case JS_GETTER:
634
+ ASSERT(translation_size == 1);
635
+ ASSERT(height == 0);
636
+ translation->BeginGetterStubFrame(closure_id);
637
+ break;
638
+ case JS_SETTER:
639
+ ASSERT(translation_size == 2);
640
+ ASSERT(height == 0);
641
+ translation->BeginSetterStubFrame(closure_id);
642
+ break;
643
+ case STUB:
644
+ translation->BeginCompiledStubFrame();
645
+ break;
489
646
  case ARGUMENTS_ADAPTOR:
490
647
  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
491
648
  break;
492
- default:
493
- UNREACHABLE();
494
649
  }
650
+
651
+ // Inlined frames which push their arguments cause the index to be
652
+ // bumped and a new stack area to be used for materialization.
653
+ if (environment->entry() != NULL &&
654
+ environment->entry()->arguments_pushed()) {
655
+ *arguments_index = *arguments_index < 0
656
+ ? GetStackSlotCount()
657
+ : *arguments_index + *arguments_count;
658
+ *arguments_count = environment->entry()->arguments_count() + 1;
659
+ }
660
+
495
661
  for (int i = 0; i < translation_size; ++i) {
496
662
  LOperand* value = environment->values()->at(i);
497
663
  // spilled_registers_ and spilled_double_registers_ are either
@@ -502,7 +668,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
502
668
  translation->MarkDuplicate();
503
669
  AddToTranslation(translation,
504
670
  environment->spilled_registers()[value->index()],
505
- environment->HasTaggedValueAt(i));
671
+ environment->HasTaggedValueAt(i),
672
+ environment->HasUint32ValueAt(i),
673
+ *arguments_index,
674
+ *arguments_count);
506
675
  } else if (
507
676
  value->IsDoubleRegister() &&
508
677
  environment->spilled_double_registers()[value->index()] != NULL) {
@@ -510,26 +679,39 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
510
679
  AddToTranslation(
511
680
  translation,
512
681
  environment->spilled_double_registers()[value->index()],
513
- false);
682
+ false,
683
+ false,
684
+ *arguments_index,
685
+ *arguments_count);
514
686
  }
515
687
  }
516
688
 
517
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
689
+ AddToTranslation(translation,
690
+ value,
691
+ environment->HasTaggedValueAt(i),
692
+ environment->HasUint32ValueAt(i),
693
+ *arguments_index,
694
+ *arguments_count);
518
695
  }
519
696
  }
520
697
 
521
698
 
522
699
  void LCodeGen::AddToTranslation(Translation* translation,
523
700
  LOperand* op,
524
- bool is_tagged) {
701
+ bool is_tagged,
702
+ bool is_uint32,
703
+ int arguments_index,
704
+ int arguments_count) {
525
705
  if (op == NULL) {
526
706
  // TODO(twuerthinger): Introduce marker operands to indicate that this value
527
707
  // is not present and must be reconstructed from the deoptimizer. Currently
528
708
  // this is only used for the arguments object.
529
- translation->StoreArgumentsObject();
709
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
530
710
  } else if (op->IsStackSlot()) {
531
711
  if (is_tagged) {
532
712
  translation->StoreStackSlot(op->index());
713
+ } else if (is_uint32) {
714
+ translation->StoreUint32StackSlot(op->index());
533
715
  } else {
534
716
  translation->StoreInt32StackSlot(op->index());
535
717
  }
@@ -543,6 +725,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
543
725
  Register reg = ToRegister(op);
544
726
  if (is_tagged) {
545
727
  translation->StoreRegister(reg);
728
+ } else if (is_uint32) {
729
+ translation->StoreUint32Register(reg);
546
730
  } else {
547
731
  translation->StoreInt32Register(reg);
548
732
  }
@@ -550,8 +734,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
550
734
  DoubleRegister reg = ToDoubleRegister(op);
551
735
  translation->StoreDoubleRegister(reg);
552
736
  } else if (op->IsConstantOperand()) {
553
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
554
- int src_index = DefineDeoptimizationLiteral(literal);
737
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
738
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
555
739
  translation->StoreLiteral(src_index);
556
740
  } else {
557
741
  UNREACHABLE();
@@ -561,19 +745,24 @@ void LCodeGen::AddToTranslation(Translation* translation,
561
745
 
562
746
  void LCodeGen::CallCode(Handle<Code> code,
563
747
  RelocInfo::Mode mode,
564
- LInstruction* instr) {
565
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
748
+ LInstruction* instr,
749
+ TargetAddressStorageMode storage_mode) {
750
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
566
751
  }
567
752
 
568
753
 
569
754
  void LCodeGen::CallCodeGeneric(Handle<Code> code,
570
755
  RelocInfo::Mode mode,
571
756
  LInstruction* instr,
572
- SafepointMode safepoint_mode) {
757
+ SafepointMode safepoint_mode,
758
+ TargetAddressStorageMode storage_mode) {
573
759
  ASSERT(instr != NULL);
760
+ // Block literal pool emission to ensure nop indicating no inlined smi code
761
+ // is in the correct position.
762
+ Assembler::BlockConstPoolScope block_const_pool(masm());
574
763
  LPointerMap* pointers = instr->pointer_map();
575
764
  RecordPosition(pointers->position());
576
- __ Call(code, mode);
765
+ __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
577
766
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
578
767
 
579
768
  // Signal that we don't inline smi code before these stubs in the
@@ -625,20 +814,22 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
625
814
 
626
815
  int frame_count = 0;
627
816
  int jsframe_count = 0;
817
+ int args_index = 0;
818
+ int args_count = 0;
628
819
  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
629
820
  ++frame_count;
630
821
  if (e->frame_type() == JS_FUNCTION) {
631
822
  ++jsframe_count;
632
823
  }
633
824
  }
634
- Translation translation(&translations_, frame_count, jsframe_count);
635
- WriteTranslation(environment, &translation);
825
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
826
+ WriteTranslation(environment, &translation, &args_index, &args_count);
636
827
  int deoptimization_index = deoptimizations_.length();
637
828
  int pc_offset = masm()->pc_offset();
638
829
  environment->Register(deoptimization_index,
639
830
  translation.index(),
640
831
  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
641
- deoptimizations_.Add(environment);
832
+ deoptimizations_.Add(environment, zone());
642
833
  }
643
834
  }
644
835
 
@@ -647,7 +838,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
647
838
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
648
839
  ASSERT(environment->HasBeenRegistered());
649
840
  int id = environment->deoptimization_index();
650
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
841
+
842
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
843
+ ? Deoptimizer::LAZY
844
+ : Deoptimizer::EAGER;
845
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
651
846
  if (entry == NULL) {
652
847
  Abort("bailout was not prepared");
653
848
  return;
@@ -655,22 +850,26 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
655
850
 
656
851
  ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
657
852
 
658
- if (FLAG_deopt_every_n_times == 1 &&
659
- info_->shared_info()->opt_count() == id) {
853
+ if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
660
854
  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
661
855
  return;
662
856
  }
663
857
 
664
858
  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
665
859
 
666
- if (cc == al) {
860
+ bool needs_lazy_deopt = info()->IsStub();
861
+ ASSERT(info()->IsStub() || frame_is_built_);
862
+ if (cc == al && !needs_lazy_deopt) {
667
863
  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
668
864
  } else {
669
865
  // We often have several deopts to the same entry, reuse the last
670
866
  // jump entry if this is the case.
671
867
  if (deopt_jump_table_.is_empty() ||
672
- (deopt_jump_table_.last().address != entry)) {
673
- deopt_jump_table_.Add(JumpTableEntry(entry));
868
+ (deopt_jump_table_.last().address != entry) ||
869
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
870
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
871
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
872
+ deopt_jump_table_.Add(table_entry, zone());
674
873
  }
675
874
  __ b(cc, &deopt_jump_table_.last().label);
676
875
  }
@@ -694,13 +893,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
694
893
  }
695
894
  data->SetLiteralArray(*literals);
696
895
 
697
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
896
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
698
897
  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
699
898
 
700
899
  // Populate the deoptimization entries.
701
900
  for (int i = 0; i < length; i++) {
702
901
  LEnvironment* env = deoptimizations_[i];
703
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
902
+ data->SetAstId(i, env->ast_id());
704
903
  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
705
904
  data->SetArgumentsStackHeight(i,
706
905
  Smi::FromInt(env->arguments_stack_height()));
@@ -715,7 +914,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
715
914
  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
716
915
  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
717
916
  }
718
- deoptimization_literals_.Add(literal);
917
+ deoptimization_literals_.Add(literal, zone());
719
918
  return result;
720
919
  }
721
920
 
@@ -761,14 +960,14 @@ void LCodeGen::RecordSafepoint(
761
960
  for (int i = 0; i < operands->length(); i++) {
762
961
  LOperand* pointer = operands->at(i);
763
962
  if (pointer->IsStackSlot()) {
764
- safepoint.DefinePointerSlot(pointer->index());
963
+ safepoint.DefinePointerSlot(pointer->index(), zone());
765
964
  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
766
- safepoint.DefinePointerRegister(ToRegister(pointer));
965
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
767
966
  }
768
967
  }
769
968
  if (kind & Safepoint::kWithRegisters) {
770
969
  // Register cp always contains a pointer to the context.
771
- safepoint.DefinePointerRegister(cp);
970
+ safepoint.DefinePointerRegister(cp, zone());
772
971
  }
773
972
  }
774
973
 
@@ -780,7 +979,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
780
979
 
781
980
 
782
981
  void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
783
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
982
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
784
983
  RecordSafepoint(&empty_pointers, deopt_mode);
785
984
  }
786
985
 
@@ -899,7 +1098,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
899
1098
 
900
1099
  void LCodeGen::DoModI(LModI* instr) {
901
1100
  if (instr->hydrogen()->HasPowerOf2Divisor()) {
902
- Register dividend = ToRegister(instr->InputAt(0));
1101
+ Register dividend = ToRegister(instr->left());
903
1102
  Register result = ToRegister(instr->result());
904
1103
 
905
1104
  int32_t divisor =
@@ -908,14 +1107,14 @@ void LCodeGen::DoModI(LModI* instr) {
908
1107
  if (divisor < 0) divisor = -divisor;
909
1108
 
910
1109
  Label positive_dividend, done;
911
- __ cmp(dividend, Operand(0));
1110
+ __ cmp(dividend, Operand::Zero());
912
1111
  __ b(pl, &positive_dividend);
913
- __ rsb(result, dividend, Operand(0));
1112
+ __ rsb(result, dividend, Operand::Zero());
914
1113
  __ and_(result, result, Operand(divisor - 1), SetCC);
915
1114
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
916
1115
  DeoptimizeIf(eq, instr->environment());
917
1116
  }
918
- __ rsb(result, result, Operand(0));
1117
+ __ rsb(result, result, Operand::Zero());
919
1118
  __ b(&done);
920
1119
  __ bind(&positive_dividend);
921
1120
  __ and_(result, dividend, Operand(divisor - 1));
@@ -924,112 +1123,147 @@ void LCodeGen::DoModI(LModI* instr) {
924
1123
  }
925
1124
 
926
1125
  // These registers hold untagged 32 bit values.
927
- Register left = ToRegister(instr->InputAt(0));
928
- Register right = ToRegister(instr->InputAt(1));
1126
+ Register left = ToRegister(instr->left());
1127
+ Register right = ToRegister(instr->right());
929
1128
  Register result = ToRegister(instr->result());
1129
+ Label done;
930
1130
 
931
- Register scratch = scratch0();
932
- Register scratch2 = ToRegister(instr->TempAt(0));
933
- DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
934
- DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
935
- DwVfpRegister quotient = double_scratch0();
1131
+ if (CpuFeatures::IsSupported(SUDIV)) {
1132
+ CpuFeatures::Scope scope(SUDIV);
1133
+ // Check for x % 0.
1134
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1135
+ __ cmp(right, Operand::Zero());
1136
+ DeoptimizeIf(eq, instr->environment());
1137
+ }
936
1138
 
937
- ASSERT(!dividend.is(divisor));
938
- ASSERT(!dividend.is(quotient));
939
- ASSERT(!divisor.is(quotient));
940
- ASSERT(!scratch.is(left));
941
- ASSERT(!scratch.is(right));
942
- ASSERT(!scratch.is(result));
1139
+ // Check for (kMinInt % -1).
1140
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1141
+ Label left_not_min_int;
1142
+ __ cmp(left, Operand(kMinInt));
1143
+ __ b(ne, &left_not_min_int);
1144
+ __ cmp(right, Operand(-1));
1145
+ DeoptimizeIf(eq, instr->environment());
1146
+ __ bind(&left_not_min_int);
1147
+ }
943
1148
 
944
- Label done, vfp_modulo, both_positive, right_negative;
1149
+ // For r3 = r1 % r2; we can have the following ARM code
1150
+ // sdiv r3, r1, r2
1151
+ // mls r3, r3, r2, r1
945
1152
 
946
- // Check for x % 0.
947
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
948
- __ cmp(right, Operand(0));
949
- DeoptimizeIf(eq, instr->environment());
950
- }
1153
+ __ sdiv(result, left, right);
1154
+ __ mls(result, result, right, left);
1155
+ __ cmp(result, Operand::Zero());
1156
+ __ b(ne, &done);
951
1157
 
952
- __ Move(result, left);
1158
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1159
+ __ cmp(left, Operand::Zero());
1160
+ DeoptimizeIf(lt, instr->environment());
1161
+ }
1162
+ } else {
1163
+ Register scratch = scratch0();
1164
+ Register scratch2 = ToRegister(instr->temp());
1165
+ DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
1166
+ DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
1167
+ DwVfpRegister quotient = double_scratch0();
953
1168
 
954
- // (0 % x) must yield 0 (if x is finite, which is the case here).
955
- __ cmp(left, Operand(0));
956
- __ b(eq, &done);
957
- // Preload right in a vfp register.
958
- __ vmov(divisor.low(), right);
959
- __ b(lt, &vfp_modulo);
1169
+ ASSERT(!dividend.is(divisor));
1170
+ ASSERT(!dividend.is(quotient));
1171
+ ASSERT(!divisor.is(quotient));
1172
+ ASSERT(!scratch.is(left));
1173
+ ASSERT(!scratch.is(right));
1174
+ ASSERT(!scratch.is(result));
960
1175
 
961
- __ cmp(left, Operand(right));
962
- __ b(lt, &done);
963
-
964
- // Check for (positive) power of two on the right hand side.
965
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
966
- scratch,
967
- &right_negative,
968
- &both_positive);
969
- // Perform modulo operation (scratch contains right - 1).
970
- __ and_(result, scratch, Operand(left));
971
- __ b(&done);
1176
+ Label vfp_modulo, both_positive, right_negative;
1177
+
1178
+ CpuFeatures::Scope scope(VFP2);
972
1179
 
973
- __ bind(&right_negative);
974
- // Negate right. The sign of the divisor does not matter.
975
- __ rsb(right, right, Operand(0));
976
-
977
- __ bind(&both_positive);
978
- const int kUnfolds = 3;
979
- // If the right hand side is smaller than the (nonnegative)
980
- // left hand side, the left hand side is the result.
981
- // Else try a few subtractions of the left hand side.
982
- __ mov(scratch, left);
983
- for (int i = 0; i < kUnfolds; i++) {
984
- // Check if the left hand side is less or equal than the
985
- // the right hand side.
986
- __ cmp(scratch, Operand(right));
987
- __ mov(result, scratch, LeaveCC, lt);
1180
+ // Check for x % 0.
1181
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1182
+ __ cmp(right, Operand::Zero());
1183
+ DeoptimizeIf(eq, instr->environment());
1184
+ }
1185
+
1186
+ __ Move(result, left);
1187
+
1188
+ // (0 % x) must yield 0 (if x is finite, which is the case here).
1189
+ __ cmp(left, Operand::Zero());
1190
+ __ b(eq, &done);
1191
+ // Preload right in a vfp register.
1192
+ __ vmov(divisor.low(), right);
1193
+ __ b(lt, &vfp_modulo);
1194
+
1195
+ __ cmp(left, Operand(right));
988
1196
  __ b(lt, &done);
989
- // If not, reduce the left hand side by the right hand
990
- // side and check again.
991
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
992
- }
993
-
994
- __ bind(&vfp_modulo);
995
- // Load the arguments in VFP registers.
996
- // The divisor value is preloaded before. Be careful that 'right' is only live
997
- // on entry.
998
- __ vmov(dividend.low(), left);
999
- // From here on don't use right as it may have been reallocated (for example
1000
- // to scratch2).
1001
- right = no_reg;
1002
-
1003
- __ vcvt_f64_s32(dividend, dividend.low());
1004
- __ vcvt_f64_s32(divisor, divisor.low());
1005
-
1006
- // We do not care about the sign of the divisor.
1007
- __ vabs(divisor, divisor);
1008
- // Compute the quotient and round it to a 32bit integer.
1009
- __ vdiv(quotient, dividend, divisor);
1010
- __ vcvt_s32_f64(quotient.low(), quotient);
1011
- __ vcvt_f64_s32(quotient, quotient.low());
1012
-
1013
- // Compute the remainder in result.
1014
- DwVfpRegister double_scratch = dividend;
1015
- __ vmul(double_scratch, divisor, quotient);
1016
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1017
- __ vmov(scratch, double_scratch.low());
1018
-
1019
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1020
- __ sub(result, left, scratch);
1021
- } else {
1022
- Label ok;
1023
- // Check for -0.
1024
- __ sub(scratch2, left, scratch, SetCC);
1025
- __ b(ne, &ok);
1026
- __ cmp(left, Operand(0));
1027
- DeoptimizeIf(mi, instr->environment());
1028
- __ bind(&ok);
1029
- // Load the result and we are done.
1030
- __ mov(result, scratch2);
1031
- }
1032
1197
 
1198
+ // Check for (positive) power of two on the right hand side.
1199
+ __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
1200
+ scratch,
1201
+ &right_negative,
1202
+ &both_positive);
1203
+ // Perform modulo operation (scratch contains right - 1).
1204
+ __ and_(result, scratch, Operand(left));
1205
+ __ b(&done);
1206
+
1207
+ __ bind(&right_negative);
1208
+ // Negate right. The sign of the divisor does not matter.
1209
+ __ rsb(right, right, Operand::Zero());
1210
+
1211
+ __ bind(&both_positive);
1212
+ const int kUnfolds = 3;
1213
+ // If the right hand side is smaller than the (nonnegative)
1214
+ // left hand side, the left hand side is the result.
1215
+ // Else try a few subtractions of the left hand side.
1216
+ __ mov(scratch, left);
1217
+ for (int i = 0; i < kUnfolds; i++) {
1218
+ // Check if the left hand side is less or equal than the
1219
+ // the right hand side.
1220
+ __ cmp(scratch, Operand(right));
1221
+ __ mov(result, scratch, LeaveCC, lt);
1222
+ __ b(lt, &done);
1223
+ // If not, reduce the left hand side by the right hand
1224
+ // side and check again.
1225
+ if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
1226
+ }
1227
+
1228
+ __ bind(&vfp_modulo);
1229
+ // Load the arguments in VFP registers.
1230
+ // The divisor value is preloaded before. Be careful that 'right'
1231
+ // is only live on entry.
1232
+ __ vmov(dividend.low(), left);
1233
+ // From here on don't use right as it may have been reallocated
1234
+ // (for example to scratch2).
1235
+ right = no_reg;
1236
+
1237
+ __ vcvt_f64_s32(dividend, dividend.low());
1238
+ __ vcvt_f64_s32(divisor, divisor.low());
1239
+
1240
+ // We do not care about the sign of the divisor.
1241
+ __ vabs(divisor, divisor);
1242
+ // Compute the quotient and round it to a 32bit integer.
1243
+ __ vdiv(quotient, dividend, divisor);
1244
+ __ vcvt_s32_f64(quotient.low(), quotient);
1245
+ __ vcvt_f64_s32(quotient, quotient.low());
1246
+
1247
+ // Compute the remainder in result.
1248
+ DwVfpRegister double_scratch = dividend;
1249
+ __ vmul(double_scratch, divisor, quotient);
1250
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1251
+ __ vmov(scratch, double_scratch.low());
1252
+
1253
+ if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1254
+ __ sub(result, left, scratch);
1255
+ } else {
1256
+ Label ok;
1257
+ // Check for -0.
1258
+ __ sub(scratch2, left, scratch, SetCC);
1259
+ __ b(ne, &ok);
1260
+ __ cmp(left, Operand::Zero());
1261
+ DeoptimizeIf(mi, instr->environment());
1262
+ __ bind(&ok);
1263
+ // Load the result and we are done.
1264
+ __ mov(result, scratch2);
1265
+ }
1266
+ }
1033
1267
  __ bind(&done);
1034
1268
  }
1035
1269
 
@@ -1058,11 +1292,11 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
1058
1292
  if (divisor > 0) {
1059
1293
  __ Move(result, dividend);
1060
1294
  } else {
1061
- __ rsb(result, dividend, Operand(0), SetCC);
1295
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
1062
1296
  DeoptimizeIf(vs, environment);
1063
1297
  }
1064
1298
  // Compute the remainder.
1065
- __ mov(remainder, Operand(0));
1299
+ __ mov(remainder, Operand::Zero());
1066
1300
  return;
1067
1301
 
1068
1302
  default:
@@ -1080,7 +1314,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
1080
1314
  // handled separately.
1081
1315
  if (divisor < 0) {
1082
1316
  ASSERT(divisor != -1);
1083
- __ rsb(result, result, Operand(0));
1317
+ __ rsb(result, result, Operand::Zero());
1084
1318
  }
1085
1319
  // Compute the remainder.
1086
1320
  if (divisor > 0) {
@@ -1116,7 +1350,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
1116
1350
  __ mov(scratch, Operand(scratch, ASR, s));
1117
1351
  }
1118
1352
  __ add(result, scratch, Operand(dividend, LSR, 31));
1119
- if (divisor < 0) __ rsb(result, result, Operand(0));
1353
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
1120
1354
  // Compute the remainder.
1121
1355
  __ mov(ip, Operand(divisor));
1122
1356
  // This sequence could be replaced with 'mls' when
@@ -1134,35 +1368,38 @@ void LCodeGen::DoDivI(LDivI* instr) {
1134
1368
  DeferredDivI(LCodeGen* codegen, LDivI* instr)
1135
1369
  : LDeferredCode(codegen), instr_(instr) { }
1136
1370
  virtual void Generate() {
1137
- codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1371
+ codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
1372
+ instr_->left(),
1373
+ instr_->right(),
1374
+ Token::DIV);
1138
1375
  }
1139
1376
  virtual LInstruction* instr() { return instr_; }
1140
1377
  private:
1141
1378
  LDivI* instr_;
1142
1379
  };
1143
1380
 
1144
- const Register left = ToRegister(instr->InputAt(0));
1145
- const Register right = ToRegister(instr->InputAt(1));
1381
+ const Register left = ToRegister(instr->left());
1382
+ const Register right = ToRegister(instr->right());
1146
1383
  const Register scratch = scratch0();
1147
1384
  const Register result = ToRegister(instr->result());
1148
1385
 
1149
1386
  // Check for x / 0.
1150
1387
  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1151
- __ cmp(right, Operand(0));
1388
+ __ cmp(right, Operand::Zero());
1152
1389
  DeoptimizeIf(eq, instr->environment());
1153
1390
  }
1154
1391
 
1155
1392
  // Check for (0 / -x) that will produce negative zero.
1156
1393
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1157
1394
  Label left_not_zero;
1158
- __ cmp(left, Operand(0));
1395
+ __ cmp(left, Operand::Zero());
1159
1396
  __ b(ne, &left_not_zero);
1160
- __ cmp(right, Operand(0));
1397
+ __ cmp(right, Operand::Zero());
1161
1398
  DeoptimizeIf(mi, instr->environment());
1162
1399
  __ bind(&left_not_zero);
1163
1400
  }
1164
1401
 
1165
- // Check for (-kMinInt / -1).
1402
+ // Check for (kMinInt / -1).
1166
1403
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1167
1404
  Label left_not_min_int;
1168
1405
  __ cmp(left, Operand(kMinInt));
@@ -1190,7 +1427,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
1190
1427
 
1191
1428
  // Call the stub. The numbers in r0 and r1 have
1192
1429
  // to be tagged to Smis. If that is not possible, deoptimize.
1193
- DeferredDivI* deferred = new DeferredDivI(this, instr);
1430
+ DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
1194
1431
 
1195
1432
  __ TrySmiTag(left, &deoptimize, scratch);
1196
1433
  __ TrySmiTag(right, &deoptimize, scratch);
@@ -1209,39 +1446,96 @@ void LCodeGen::DoDivI(LDivI* instr) {
1209
1446
  }
1210
1447
 
1211
1448
 
1449
+ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1450
+ DwVfpRegister addend = ToDoubleRegister(instr->addend());
1451
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1452
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1453
+
1454
+ // This is computed in-place.
1455
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
1456
+
1457
+ __ vmla(addend, multiplier, multiplicand);
1458
+ }
1459
+
1460
+
1212
1461
  void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1213
1462
  const Register result = ToRegister(instr->result());
1214
- const Register left = ToRegister(instr->InputAt(0));
1215
- const Register remainder = ToRegister(instr->TempAt(0));
1463
+ const Register left = ToRegister(instr->left());
1464
+ const Register remainder = ToRegister(instr->temp());
1216
1465
  const Register scratch = scratch0();
1217
1466
 
1218
- // We only optimize this for division by constants, because the standard
1219
- // integer division routine is usually slower than transitionning to VFP.
1220
- // This could be optimized on processors with SDIV available.
1221
- ASSERT(instr->InputAt(1)->IsConstantOperand());
1222
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
1223
- if (divisor < 0) {
1224
- __ cmp(left, Operand(0));
1467
+ if (!CpuFeatures::IsSupported(SUDIV)) {
1468
+ // If the CPU doesn't support sdiv instruction, we only optimize when we
1469
+ // have magic numbers for the divisor. The standard integer division routine
1470
+ // is usually slower than transitionning to VFP.
1471
+ ASSERT(instr->right()->IsConstantOperand());
1472
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1473
+ ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1474
+ if (divisor < 0) {
1475
+ __ cmp(left, Operand::Zero());
1476
+ DeoptimizeIf(eq, instr->environment());
1477
+ }
1478
+ EmitSignedIntegerDivisionByConstant(result,
1479
+ left,
1480
+ divisor,
1481
+ remainder,
1482
+ scratch,
1483
+ instr->environment());
1484
+ // We performed a truncating division. Correct the result if necessary.
1485
+ __ cmp(remainder, Operand::Zero());
1486
+ __ teq(remainder, Operand(divisor), ne);
1487
+ __ sub(result, result, Operand(1), LeaveCC, mi);
1488
+ } else {
1489
+ CpuFeatures::Scope scope(SUDIV);
1490
+ const Register right = ToRegister(instr->right());
1491
+
1492
+ // Check for x / 0.
1493
+ __ cmp(right, Operand::Zero());
1225
1494
  DeoptimizeIf(eq, instr->environment());
1495
+
1496
+ // Check for (kMinInt / -1).
1497
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1498
+ Label left_not_min_int;
1499
+ __ cmp(left, Operand(kMinInt));
1500
+ __ b(ne, &left_not_min_int);
1501
+ __ cmp(right, Operand(-1));
1502
+ DeoptimizeIf(eq, instr->environment());
1503
+ __ bind(&left_not_min_int);
1504
+ }
1505
+
1506
+ // Check for (0 / -x) that will produce negative zero.
1507
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1508
+ __ cmp(right, Operand::Zero());
1509
+ __ cmp(left, Operand::Zero(), mi);
1510
+ // "right" can't be null because the code would have already been
1511
+ // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
1512
+ // In this case we need to deoptimize to produce a -0.
1513
+ DeoptimizeIf(eq, instr->environment());
1514
+ }
1515
+
1516
+ Label done;
1517
+ __ sdiv(result, left, right);
1518
+ // If both operands have the same sign then we are done.
1519
+ __ eor(remainder, left, Operand(right), SetCC);
1520
+ __ b(pl, &done);
1521
+
1522
+ // Check if the result needs to be corrected.
1523
+ __ mls(remainder, result, right, left);
1524
+ __ cmp(remainder, Operand::Zero());
1525
+ __ sub(result, result, Operand(1), LeaveCC, ne);
1526
+
1527
+ __ bind(&done);
1226
1528
  }
1227
- EmitSignedIntegerDivisionByConstant(result,
1228
- left,
1229
- divisor,
1230
- remainder,
1231
- scratch,
1232
- instr->environment());
1233
- // We operated a truncating division. Correct the result if necessary.
1234
- __ cmp(remainder, Operand(0));
1235
- __ teq(remainder, Operand(divisor), ne);
1236
- __ sub(result, result, Operand(1), LeaveCC, mi);
1237
1529
  }
1238
1530
 
1239
1531
 
1240
- template<int T>
1241
- void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1532
+ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
1533
+ LOperand* left_argument,
1534
+ LOperand* right_argument,
1242
1535
  Token::Value op) {
1243
- Register left = ToRegister(instr->InputAt(0));
1244
- Register right = ToRegister(instr->InputAt(1));
1536
+ CpuFeatures::Scope vfp_scope(VFP2);
1537
+ Register left = ToRegister(left_argument);
1538
+ Register right = ToRegister(right_argument);
1245
1539
 
1246
1540
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1247
1541
  // Move left to r1 and right to r0 for the stub call.
@@ -1260,7 +1554,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1260
1554
  }
1261
1555
  BinaryOpStub stub(op, OVERWRITE_LEFT);
1262
1556
  __ CallStub(&stub);
1263
- RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1557
+ RecordSafepointWithRegistersAndDoubles(pointer_map,
1264
1558
  0,
1265
1559
  Safepoint::kNoLazyDeopt);
1266
1560
  // Overwrite the stored value of r0 with the result of the stub.
@@ -1272,8 +1566,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
1272
1566
  Register scratch = scratch0();
1273
1567
  Register result = ToRegister(instr->result());
1274
1568
  // Note that result may alias left.
1275
- Register left = ToRegister(instr->InputAt(0));
1276
- LOperand* right_op = instr->InputAt(1);
1569
+ Register left = ToRegister(instr->left());
1570
+ LOperand* right_op = instr->right();
1277
1571
 
1278
1572
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1279
1573
  bool bailout_on_minus_zero =
@@ -1286,22 +1580,22 @@ void LCodeGen::DoMulI(LMulI* instr) {
1286
1580
  if (bailout_on_minus_zero && (constant < 0)) {
1287
1581
  // The case of a null constant will be handled separately.
1288
1582
  // If constant is negative and left is null, the result should be -0.
1289
- __ cmp(left, Operand(0));
1583
+ __ cmp(left, Operand::Zero());
1290
1584
  DeoptimizeIf(eq, instr->environment());
1291
1585
  }
1292
1586
 
1293
1587
  switch (constant) {
1294
1588
  case -1:
1295
- __ rsb(result, left, Operand(0));
1589
+ __ rsb(result, left, Operand::Zero());
1296
1590
  break;
1297
1591
  case 0:
1298
1592
  if (bailout_on_minus_zero) {
1299
1593
  // If left is strictly negative and the constant is null, the
1300
1594
  // result is -0. Deoptimize if required, otherwise return 0.
1301
- __ cmp(left, Operand(0));
1595
+ __ cmp(left, Operand::Zero());
1302
1596
  DeoptimizeIf(mi, instr->environment());
1303
1597
  }
1304
- __ mov(result, Operand(0));
1598
+ __ mov(result, Operand::Zero());
1305
1599
  break;
1306
1600
  case 1:
1307
1601
  __ Move(result, left);
@@ -1328,7 +1622,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
1328
1622
  }
1329
1623
 
1330
1624
  // Correct the sign of the result is the constant is negative.
1331
- if (constant < 0) __ rsb(result, result, Operand(0));
1625
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
1332
1626
 
1333
1627
  } else {
1334
1628
  // Generate standard code.
@@ -1340,7 +1634,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
1340
1634
  } else {
1341
1635
  Register right = EmitLoadRegister(right_op, scratch);
1342
1636
  if (bailout_on_minus_zero) {
1343
- __ orr(ToRegister(instr->TempAt(0)), left, right);
1637
+ __ orr(ToRegister(instr->temp()), left, right);
1344
1638
  }
1345
1639
 
1346
1640
  if (can_overflow) {
@@ -1355,9 +1649,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
1355
1649
  if (bailout_on_minus_zero) {
1356
1650
  // Bail out if the result is supposed to be negative zero.
1357
1651
  Label done;
1358
- __ cmp(result, Operand(0));
1652
+ __ cmp(result, Operand::Zero());
1359
1653
  __ b(ne, &done);
1360
- __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
1654
+ __ cmp(ToRegister(instr->temp()), Operand::Zero());
1361
1655
  DeoptimizeIf(mi, instr->environment());
1362
1656
  __ bind(&done);
1363
1657
  }
@@ -1366,8 +1660,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
1366
1660
 
1367
1661
 
1368
1662
  void LCodeGen::DoBitI(LBitI* instr) {
1369
- LOperand* left_op = instr->InputAt(0);
1370
- LOperand* right_op = instr->InputAt(1);
1663
+ LOperand* left_op = instr->left();
1664
+ LOperand* right_op = instr->right();
1371
1665
  ASSERT(left_op->IsRegister());
1372
1666
  Register left = ToRegister(left_op);
1373
1667
  Register result = ToRegister(instr->result());
@@ -1400,14 +1694,17 @@ void LCodeGen::DoBitI(LBitI* instr) {
1400
1694
  void LCodeGen::DoShiftI(LShiftI* instr) {
1401
1695
  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1402
1696
  // result may alias either of them.
1403
- LOperand* right_op = instr->InputAt(1);
1404
- Register left = ToRegister(instr->InputAt(0));
1697
+ LOperand* right_op = instr->right();
1698
+ Register left = ToRegister(instr->left());
1405
1699
  Register result = ToRegister(instr->result());
1406
1700
  Register scratch = scratch0();
1407
1701
  if (right_op->IsRegister()) {
1408
1702
  // Mask the right_op operand.
1409
1703
  __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1410
1704
  switch (instr->op()) {
1705
+ case Token::ROR:
1706
+ __ mov(result, Operand(left, ROR, scratch));
1707
+ break;
1411
1708
  case Token::SAR:
1412
1709
  __ mov(result, Operand(left, ASR, scratch));
1413
1710
  break;
@@ -1431,6 +1728,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
1431
1728
  int value = ToInteger32(LConstantOperand::cast(right_op));
1432
1729
  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1433
1730
  switch (instr->op()) {
1731
+ case Token::ROR:
1732
+ if (shift_count != 0) {
1733
+ __ mov(result, Operand(left, ROR, shift_count));
1734
+ } else {
1735
+ __ Move(result, left);
1736
+ }
1737
+ break;
1434
1738
  case Token::SAR:
1435
1739
  if (shift_count != 0) {
1436
1740
  __ mov(result, Operand(left, ASR, shift_count));
@@ -1465,8 +1769,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
1465
1769
 
1466
1770
 
1467
1771
  void LCodeGen::DoSubI(LSubI* instr) {
1468
- LOperand* left = instr->InputAt(0);
1469
- LOperand* right = instr->InputAt(1);
1772
+ LOperand* left = instr->left();
1773
+ LOperand* right = instr->right();
1470
1774
  LOperand* result = instr->result();
1471
1775
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1472
1776
  SBit set_cond = can_overflow ? SetCC : LeaveCC;
@@ -1485,6 +1789,27 @@ void LCodeGen::DoSubI(LSubI* instr) {
1485
1789
  }
1486
1790
 
1487
1791
 
1792
+ void LCodeGen::DoRSubI(LRSubI* instr) {
1793
+ LOperand* left = instr->left();
1794
+ LOperand* right = instr->right();
1795
+ LOperand* result = instr->result();
1796
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1797
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
1798
+
1799
+ if (right->IsStackSlot() || right->IsArgument()) {
1800
+ Register right_reg = EmitLoadRegister(right, ip);
1801
+ __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1802
+ } else {
1803
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
1804
+ __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1805
+ }
1806
+
1807
+ if (can_overflow) {
1808
+ DeoptimizeIf(vs, instr->environment());
1809
+ }
1810
+ }
1811
+
1812
+
1488
1813
  void LCodeGen::DoConstantI(LConstantI* instr) {
1489
1814
  ASSERT(instr->result()->IsRegister());
1490
1815
  __ mov(ToRegister(instr->result()), Operand(instr->value()));
@@ -1494,8 +1819,9 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
1494
1819
  void LCodeGen::DoConstantD(LConstantD* instr) {
1495
1820
  ASSERT(instr->result()->IsDoubleRegister());
1496
1821
  DwVfpRegister result = ToDoubleRegister(instr->result());
1822
+ CpuFeatures::Scope scope(VFP2);
1497
1823
  double v = instr->value();
1498
- __ Vmov(result, v);
1824
+ __ Vmov(result, v, scratch0());
1499
1825
  }
1500
1826
 
1501
1827
 
@@ -1512,21 +1838,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
1512
1838
 
1513
1839
  void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1514
1840
  Register result = ToRegister(instr->result());
1515
- Register array = ToRegister(instr->InputAt(0));
1841
+ Register array = ToRegister(instr->value());
1516
1842
  __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1517
1843
  }
1518
1844
 
1519
1845
 
1520
1846
  void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1521
1847
  Register result = ToRegister(instr->result());
1522
- Register array = ToRegister(instr->InputAt(0));
1848
+ Register array = ToRegister(instr->value());
1523
1849
  __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1524
1850
  }
1525
1851
 
1526
1852
 
1853
+ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1854
+ Register result = ToRegister(instr->result());
1855
+ Register map = ToRegister(instr->value());
1856
+ __ EnumLength(result, map);
1857
+ }
1858
+
1859
+
1527
1860
  void LCodeGen::DoElementsKind(LElementsKind* instr) {
1528
1861
  Register result = ToRegister(instr->result());
1529
- Register input = ToRegister(instr->InputAt(0));
1862
+ Register input = ToRegister(instr->value());
1530
1863
 
1531
1864
  // Load map into |result|.
1532
1865
  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -1539,9 +1872,9 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
1539
1872
 
1540
1873
 
1541
1874
  void LCodeGen::DoValueOf(LValueOf* instr) {
1542
- Register input = ToRegister(instr->InputAt(0));
1875
+ Register input = ToRegister(instr->value());
1543
1876
  Register result = ToRegister(instr->result());
1544
- Register map = ToRegister(instr->TempAt(0));
1877
+ Register map = ToRegister(instr->temp());
1545
1878
  Label done;
1546
1879
 
1547
1880
  // If the object is a smi return the object.
@@ -1560,9 +1893,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
1560
1893
 
1561
1894
 
1562
1895
  void LCodeGen::DoDateField(LDateField* instr) {
1563
- Register object = ToRegister(instr->InputAt(0));
1896
+ Register object = ToRegister(instr->date());
1564
1897
  Register result = ToRegister(instr->result());
1565
- Register scratch = ToRegister(instr->TempAt(0));
1898
+ Register scratch = ToRegister(instr->temp());
1566
1899
  Smi* index = instr->index();
1567
1900
  Label runtime, done;
1568
1901
  ASSERT(object.is(result));
@@ -1570,11 +1903,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
1570
1903
  ASSERT(!scratch.is(scratch0()));
1571
1904
  ASSERT(!scratch.is(object));
1572
1905
 
1573
- #ifdef DEBUG
1574
- __ AbortIfSmi(object);
1906
+ __ tst(object, Operand(kSmiTagMask));
1907
+ DeoptimizeIf(eq, instr->environment());
1575
1908
  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1576
- __ Assert(eq, "Trying to get date field from non-date.");
1577
- #endif
1909
+ DeoptimizeIf(ne, instr->environment());
1578
1910
 
1579
1911
  if (index->value() == 0) {
1580
1912
  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1599,15 +1931,24 @@ void LCodeGen::DoDateField(LDateField* instr) {
1599
1931
  }
1600
1932
 
1601
1933
 
1934
+ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1935
+ SeqStringSetCharGenerator::Generate(masm(),
1936
+ instr->encoding(),
1937
+ ToRegister(instr->string()),
1938
+ ToRegister(instr->index()),
1939
+ ToRegister(instr->value()));
1940
+ }
1941
+
1942
+
1602
1943
  void LCodeGen::DoBitNotI(LBitNotI* instr) {
1603
- Register input = ToRegister(instr->InputAt(0));
1944
+ Register input = ToRegister(instr->value());
1604
1945
  Register result = ToRegister(instr->result());
1605
1946
  __ mvn(result, Operand(input));
1606
1947
  }
1607
1948
 
1608
1949
 
1609
1950
  void LCodeGen::DoThrow(LThrow* instr) {
1610
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1951
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
1611
1952
  __ push(input_reg);
1612
1953
  CallRuntime(Runtime::kThrow, 1, instr);
1613
1954
 
@@ -1618,8 +1959,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
1618
1959
 
1619
1960
 
1620
1961
  void LCodeGen::DoAddI(LAddI* instr) {
1621
- LOperand* left = instr->InputAt(0);
1622
- LOperand* right = instr->InputAt(1);
1962
+ LOperand* left = instr->left();
1963
+ LOperand* right = instr->right();
1623
1964
  LOperand* result = instr->result();
1624
1965
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1625
1966
  SBit set_cond = can_overflow ? SetCC : LeaveCC;
@@ -1638,10 +1979,74 @@ void LCodeGen::DoAddI(LAddI* instr) {
1638
1979
  }
1639
1980
 
1640
1981
 
1982
+ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1983
+ LOperand* left = instr->left();
1984
+ LOperand* right = instr->right();
1985
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
1986
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1987
+ if (instr->hydrogen()->representation().IsInteger32()) {
1988
+ Register left_reg = ToRegister(left);
1989
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1990
+ ? ToOperand(right)
1991
+ : Operand(EmitLoadRegister(right, ip));
1992
+ Register result_reg = ToRegister(instr->result());
1993
+ __ cmp(left_reg, right_op);
1994
+ if (!result_reg.is(left_reg)) {
1995
+ __ mov(result_reg, left_reg, LeaveCC, condition);
1996
+ }
1997
+ __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
1998
+ } else {
1999
+ ASSERT(instr->hydrogen()->representation().IsDouble());
2000
+ CpuFeatures::Scope scope(VFP2);
2001
+ DwVfpRegister left_reg = ToDoubleRegister(left);
2002
+ DwVfpRegister right_reg = ToDoubleRegister(right);
2003
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2004
+ Label check_nan_left, check_zero, return_left, return_right, done;
2005
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
2006
+ __ b(vs, &check_nan_left);
2007
+ __ b(eq, &check_zero);
2008
+ __ b(condition, &return_left);
2009
+ __ b(al, &return_right);
2010
+
2011
+ __ bind(&check_zero);
2012
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
2013
+ __ b(ne, &return_left); // left == right != 0.
2014
+ // At this point, both left and right are either 0 or -0.
2015
+ if (operation == HMathMinMax::kMathMin) {
2016
+ // We could use a single 'vorr' instruction here if we had NEON support.
2017
+ __ vneg(left_reg, left_reg);
2018
+ __ vsub(result_reg, left_reg, right_reg);
2019
+ __ vneg(result_reg, result_reg);
2020
+ } else {
2021
+ // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2022
+ // the decision for vadd is easy because vand is a NEON instruction.
2023
+ __ vadd(result_reg, left_reg, right_reg);
2024
+ }
2025
+ __ b(al, &done);
2026
+
2027
+ __ bind(&check_nan_left);
2028
+ __ VFPCompareAndSetFlags(left_reg, left_reg);
2029
+ __ b(vs, &return_left); // left == NaN.
2030
+ __ bind(&return_right);
2031
+ if (!right_reg.is(result_reg)) {
2032
+ __ vmov(result_reg, right_reg);
2033
+ }
2034
+ __ b(al, &done);
2035
+
2036
+ __ bind(&return_left);
2037
+ if (!left_reg.is(result_reg)) {
2038
+ __ vmov(result_reg, left_reg);
2039
+ }
2040
+ __ bind(&done);
2041
+ }
2042
+ }
2043
+
2044
+
1641
2045
  void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1642
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1643
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1644
- DoubleRegister result = ToDoubleRegister(instr->result());
2046
+ CpuFeatures::Scope scope(VFP2);
2047
+ DwVfpRegister left = ToDoubleRegister(instr->left());
2048
+ DwVfpRegister right = ToDoubleRegister(instr->right());
2049
+ DwVfpRegister result = ToDoubleRegister(instr->result());
1645
2050
  switch (instr->op()) {
1646
2051
  case Token::ADD:
1647
2052
  __ vadd(result, left, right);
@@ -1679,11 +2084,14 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1679
2084
 
1680
2085
 
1681
2086
  void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1682
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1683
- ASSERT(ToRegister(instr->InputAt(1)).is(r0));
2087
+ ASSERT(ToRegister(instr->left()).is(r1));
2088
+ ASSERT(ToRegister(instr->right()).is(r0));
1684
2089
  ASSERT(ToRegister(instr->result()).is(r0));
1685
2090
 
1686
2091
  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
2092
+ // Block literal pool emission to ensure nop indicating no inlined smi code
2093
+ // is in the correct position.
2094
+ Assembler::BlockConstPoolScope block_const_pool(masm());
1687
2095
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1688
2096
  __ nop(); // Signals no inlined code.
1689
2097
  }
@@ -1722,11 +2130,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
1722
2130
 
1723
2131
  Representation r = instr->hydrogen()->value()->representation();
1724
2132
  if (r.IsInteger32()) {
1725
- Register reg = ToRegister(instr->InputAt(0));
1726
- __ cmp(reg, Operand(0));
2133
+ Register reg = ToRegister(instr->value());
2134
+ __ cmp(reg, Operand::Zero());
1727
2135
  EmitBranch(true_block, false_block, ne);
1728
2136
  } else if (r.IsDouble()) {
1729
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
2137
+ CpuFeatures::Scope scope(VFP2);
2138
+ DwVfpRegister reg = ToDoubleRegister(instr->value());
1730
2139
  Register scratch = scratch0();
1731
2140
 
1732
2141
  // Test the double value. Zero and NaN are false.
@@ -1735,13 +2144,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
1735
2144
  EmitBranch(true_block, false_block, eq);
1736
2145
  } else {
1737
2146
  ASSERT(r.IsTagged());
1738
- Register reg = ToRegister(instr->InputAt(0));
2147
+ Register reg = ToRegister(instr->value());
1739
2148
  HType type = instr->hydrogen()->value()->type();
1740
2149
  if (type.IsBoolean()) {
1741
2150
  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1742
2151
  EmitBranch(true_block, false_block, eq);
1743
2152
  } else if (type.IsSmi()) {
1744
- __ cmp(reg, Operand(0));
2153
+ __ cmp(reg, Operand::Zero());
1745
2154
  EmitBranch(true_block, false_block, ne);
1746
2155
  } else {
1747
2156
  Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -1771,7 +2180,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
1771
2180
 
1772
2181
  if (expected.Contains(ToBooleanStub::SMI)) {
1773
2182
  // Smis: 0 -> false, all other -> true.
1774
- __ cmp(reg, Operand(0));
2183
+ __ cmp(reg, Operand::Zero());
1775
2184
  __ b(eq, false_label);
1776
2185
  __ JumpIfSmi(reg, true_label);
1777
2186
  } else if (expected.NeedsMap()) {
@@ -1804,15 +2213,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
1804
2213
  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1805
2214
  __ b(ge, &not_string);
1806
2215
  __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
1807
- __ cmp(ip, Operand(0));
2216
+ __ cmp(ip, Operand::Zero());
1808
2217
  __ b(ne, true_label);
1809
2218
  __ b(false_label);
1810
2219
  __ bind(&not_string);
1811
2220
  }
1812
2221
 
1813
2222
  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2223
+ CpuFeatures::Scope scope(VFP2);
1814
2224
  // heap number -> false iff +0, -0, or NaN.
1815
- DoubleRegister dbl_scratch = double_scratch0();
2225
+ DwVfpRegister dbl_scratch = double_scratch0();
1816
2226
  Label not_heap_number;
1817
2227
  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1818
2228
  __ b(ne, &not_heap_number);
@@ -1874,8 +2284,8 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1874
2284
 
1875
2285
 
1876
2286
  void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1877
- LOperand* left = instr->InputAt(0);
1878
- LOperand* right = instr->InputAt(1);
2287
+ LOperand* left = instr->left();
2288
+ LOperand* right = instr->right();
1879
2289
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1880
2290
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1881
2291
  Condition cond = TokenToCondition(instr->op(), false);
@@ -1890,6 +2300,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1890
2300
  EmitGoto(next_block);
1891
2301
  } else {
1892
2302
  if (instr->is_double()) {
2303
+ CpuFeatures::Scope scope(VFP2);
1893
2304
  // Compare left and right operands as doubles and load the
1894
2305
  // resulting flags into the normal status register.
1895
2306
  __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -1915,8 +2326,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1915
2326
 
1916
2327
 
1917
2328
  void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1918
- Register left = ToRegister(instr->InputAt(0));
1919
- Register right = ToRegister(instr->InputAt(1));
2329
+ Register left = ToRegister(instr->left());
2330
+ Register right = ToRegister(instr->right());
1920
2331
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1921
2332
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1922
2333
 
@@ -1926,7 +2337,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1926
2337
 
1927
2338
 
1928
2339
  void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1929
- Register left = ToRegister(instr->InputAt(0));
2340
+ Register left = ToRegister(instr->left());
1930
2341
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1931
2342
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1932
2343
 
@@ -1937,7 +2348,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1937
2348
 
1938
2349
  void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1939
2350
  Register scratch = scratch0();
1940
- Register reg = ToRegister(instr->InputAt(0));
2351
+ Register reg = ToRegister(instr->value());
1941
2352
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1942
2353
 
1943
2354
  // If the expression is known to be untagged or a smi, then it's definitely
@@ -2005,8 +2416,8 @@ Condition LCodeGen::EmitIsObject(Register input,
2005
2416
 
2006
2417
 
2007
2418
  void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2008
- Register reg = ToRegister(instr->InputAt(0));
2009
- Register temp1 = ToRegister(instr->TempAt(0));
2419
+ Register reg = ToRegister(instr->value());
2420
+ Register temp1 = ToRegister(instr->temp());
2010
2421
 
2011
2422
  int true_block = chunk_->LookupDestination(instr->true_block_id());
2012
2423
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2031,8 +2442,8 @@ Condition LCodeGen::EmitIsString(Register input,
2031
2442
 
2032
2443
 
2033
2444
  void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2034
- Register reg = ToRegister(instr->InputAt(0));
2035
- Register temp1 = ToRegister(instr->TempAt(0));
2445
+ Register reg = ToRegister(instr->value());
2446
+ Register temp1 = ToRegister(instr->temp());
2036
2447
 
2037
2448
  int true_block = chunk_->LookupDestination(instr->true_block_id());
2038
2449
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2049,15 +2460,15 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2049
2460
  int true_block = chunk_->LookupDestination(instr->true_block_id());
2050
2461
  int false_block = chunk_->LookupDestination(instr->false_block_id());
2051
2462
 
2052
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
2463
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
2053
2464
  __ tst(input_reg, Operand(kSmiTagMask));
2054
2465
  EmitBranch(true_block, false_block, eq);
2055
2466
  }
2056
2467
 
2057
2468
 
2058
2469
  void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2059
- Register input = ToRegister(instr->InputAt(0));
2060
- Register temp = ToRegister(instr->TempAt(0));
2470
+ Register input = ToRegister(instr->value());
2471
+ Register temp = ToRegister(instr->temp());
2061
2472
 
2062
2473
  int true_block = chunk_->LookupDestination(instr->true_block_id());
2063
2474
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2097,7 +2508,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2097
2508
 
2098
2509
  Handle<Code> ic = CompareIC::GetUninitialized(op);
2099
2510
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2100
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2511
+ // This instruction also signals no smi code inlined.
2512
+ __ cmp(r0, Operand::Zero());
2101
2513
 
2102
2514
  Condition condition = ComputeCompareCondition(op);
2103
2515
 
@@ -2127,7 +2539,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2127
2539
 
2128
2540
  void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2129
2541
  Register scratch = scratch0();
2130
- Register input = ToRegister(instr->InputAt(0));
2542
+ Register input = ToRegister(instr->value());
2131
2543
 
2132
2544
  int true_block = chunk_->LookupDestination(instr->true_block_id());
2133
2545
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2142,12 +2554,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2142
2554
 
2143
2555
 
2144
2556
  void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2145
- Register input = ToRegister(instr->InputAt(0));
2557
+ Register input = ToRegister(instr->value());
2146
2558
  Register result = ToRegister(instr->result());
2147
2559
 
2148
- if (FLAG_debug_code) {
2149
- __ AbortIfNotString(input);
2150
- }
2560
+ __ AssertString(input);
2151
2561
 
2152
2562
  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2153
2563
  __ IndexFromHash(result, result);
@@ -2156,7 +2566,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2156
2566
 
2157
2567
  void LCodeGen::DoHasCachedArrayIndexAndBranch(
2158
2568
  LHasCachedArrayIndexAndBranch* instr) {
2159
- Register input = ToRegister(instr->InputAt(0));
2569
+ Register input = ToRegister(instr->value());
2160
2570
  Register scratch = scratch0();
2161
2571
 
2162
2572
  int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2183,7 +2593,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
2183
2593
 
2184
2594
  __ JumpIfSmi(input, is_false);
2185
2595
 
2186
- if (class_name->IsEqualTo(CStrVector("Function"))) {
2596
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2187
2597
  // Assuming the following assertions, we can use the same compares to test
2188
2598
  // for both being a function type and being in the object type range.
2189
2599
  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2214,7 +2624,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
2214
2624
 
2215
2625
  // Objects with a non-function constructor have class 'Object'.
2216
2626
  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2217
- if (class_name->IsEqualTo(CStrVector("Object"))) {
2627
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2218
2628
  __ b(ne, is_true);
2219
2629
  } else {
2220
2630
  __ b(ne, is_false);
@@ -2237,9 +2647,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
2237
2647
 
2238
2648
 
2239
2649
  void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2240
- Register input = ToRegister(instr->InputAt(0));
2650
+ Register input = ToRegister(instr->value());
2241
2651
  Register temp = scratch0();
2242
- Register temp2 = ToRegister(instr->TempAt(0));
2652
+ Register temp2 = ToRegister(instr->temp());
2243
2653
  Handle<String> class_name = instr->hydrogen()->class_name();
2244
2654
 
2245
2655
  int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2255,8 +2665,8 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2255
2665
 
2256
2666
 
2257
2667
  void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2258
- Register reg = ToRegister(instr->InputAt(0));
2259
- Register temp = ToRegister(instr->TempAt(0));
2668
+ Register reg = ToRegister(instr->value());
2669
+ Register temp = ToRegister(instr->temp());
2260
2670
  int true_block = instr->true_block_id();
2261
2671
  int false_block = instr->false_block_id();
2262
2672
 
@@ -2267,13 +2677,13 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2267
2677
 
2268
2678
 
2269
2679
  void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2270
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
2271
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
2680
+ ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2681
+ ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2272
2682
 
2273
2683
  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2274
2684
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2275
2685
 
2276
- __ cmp(r0, Operand(0));
2686
+ __ cmp(r0, Operand::Zero());
2277
2687
  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2278
2688
  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2279
2689
  }
@@ -2296,11 +2706,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2296
2706
  };
2297
2707
 
2298
2708
  DeferredInstanceOfKnownGlobal* deferred;
2299
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2709
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2300
2710
 
2301
2711
  Label done, false_result;
2302
- Register object = ToRegister(instr->InputAt(0));
2303
- Register temp = ToRegister(instr->TempAt(0));
2712
+ Register object = ToRegister(instr->value());
2713
+ Register temp = ToRegister(instr->temp());
2304
2714
  Register result = ToRegister(instr->result());
2305
2715
 
2306
2716
  ASSERT(object.is(r0));
@@ -2315,20 +2725,26 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2315
2725
  Label cache_miss;
2316
2726
  Register map = temp;
2317
2727
  __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2318
- __ bind(deferred->map_check()); // Label for calculating code patching.
2319
- // We use Factory::the_hole_value() on purpose instead of loading from the
2320
- // root array to force relocation to be able to later patch with
2321
- // the cached map.
2322
- Handle<JSGlobalPropertyCell> cell =
2323
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2324
- __ mov(ip, Operand(Handle<Object>(cell)));
2325
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2326
- __ cmp(map, Operand(ip));
2327
- __ b(ne, &cache_miss);
2328
- // We use Factory::the_hole_value() on purpose instead of loading from the
2329
- // root array to force relocation to be able to later patch
2330
- // with true or false.
2331
- __ mov(result, Operand(factory()->the_hole_value()));
2728
+ {
2729
+ // Block constant pool emission to ensure the positions of instructions are
2730
+ // as expected by the patcher. See InstanceofStub::Generate().
2731
+ Assembler::BlockConstPoolScope block_const_pool(masm());
2732
+ __ bind(deferred->map_check()); // Label for calculating code patching.
2733
+ // We use Factory::the_hole_value() on purpose instead of loading from the
2734
+ // root array to force relocation to be able to later patch with
2735
+ // the cached map.
2736
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2737
+ Handle<JSGlobalPropertyCell> cell =
2738
+ factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2739
+ __ mov(ip, Operand(Handle<Object>(cell)));
2740
+ __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2741
+ __ cmp(map, Operand(ip));
2742
+ __ b(ne, &cache_miss);
2743
+ // We use Factory::the_hole_value() on purpose instead of loading from the
2744
+ // root array to force relocation to be able to later patch
2745
+ // with true or false.
2746
+ __ mov(result, Operand(factory()->the_hole_value()));
2747
+ }
2332
2748
  __ b(&done);
2333
2749
 
2334
2750
  // The inlined call site cache did not match. Check null and string before
@@ -2375,15 +2791,24 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2375
2791
  // Get the temp register reserved by the instruction. This needs to be r4 as
2376
2792
  // its slot of the pushing of safepoint registers is used to communicate the
2377
2793
  // offset to the location of the map check.
2378
- Register temp = ToRegister(instr->TempAt(0));
2794
+ Register temp = ToRegister(instr->temp());
2379
2795
  ASSERT(temp.is(r4));
2380
2796
  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2381
- static const int kAdditionalDelta = 4;
2797
+ static const int kAdditionalDelta = 5;
2798
+ // Make sure that code size is predicable, since we use specific constants
2799
+ // offsets in the code to find embedded values..
2800
+ PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
2382
2801
  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2383
2802
  Label before_push_delta;
2384
2803
  __ bind(&before_push_delta);
2385
2804
  __ BlockConstPoolFor(kAdditionalDelta);
2386
2805
  __ mov(temp, Operand(delta * kPointerSize));
2806
+ // The mov above can generate one or two instructions. The delta was computed
2807
+ // for two instructions, so we need to pad here in case of one instruction.
2808
+ if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2809
+ ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2810
+ __ nop();
2811
+ }
2387
2812
  __ StoreToSafepointRegisterSlot(temp, temp);
2388
2813
  CallCodeGeneric(stub.GetCode(),
2389
2814
  RelocInfo::CODE_TARGET,
@@ -2402,7 +2827,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
2402
2827
 
2403
2828
  Handle<Code> ic = CompareIC::GetUninitialized(op);
2404
2829
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2405
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2830
+ // This instruction also signals no smi code inlined.
2831
+ __ cmp(r0, Operand::Zero());
2406
2832
 
2407
2833
  Condition condition = ComputeCompareCondition(op);
2408
2834
  __ LoadRoot(ToRegister(instr->result()),
@@ -2415,16 +2841,33 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
2415
2841
 
2416
2842
 
2417
2843
  void LCodeGen::DoReturn(LReturn* instr) {
2418
- if (FLAG_trace) {
2844
+ if (FLAG_trace && info()->IsOptimizing()) {
2419
2845
  // Push the return value on the stack as the parameter.
2420
2846
  // Runtime::TraceExit returns its parameter in r0.
2421
2847
  __ push(r0);
2422
2848
  __ CallRuntime(Runtime::kTraceExit, 1);
2423
2849
  }
2424
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2425
- __ mov(sp, fp);
2426
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
2427
- __ add(sp, sp, Operand(sp_delta));
2850
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
2851
+ CpuFeatures::Scope scope(VFP2);
2852
+ ASSERT(NeedsEagerFrame());
2853
+ BitVector* doubles = chunk()->allocated_double_registers();
2854
+ BitVector::Iterator save_iterator(doubles);
2855
+ int count = 0;
2856
+ while (!save_iterator.Done()) {
2857
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
2858
+ MemOperand(sp, count * kDoubleSize));
2859
+ save_iterator.Advance();
2860
+ count++;
2861
+ }
2862
+ }
2863
+ if (NeedsEagerFrame()) {
2864
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2865
+ __ mov(sp, fp);
2866
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
2867
+ if (!info()->IsStub()) {
2868
+ __ add(sp, sp, Operand(sp_delta));
2869
+ }
2870
+ }
2428
2871
  __ Jump(lr);
2429
2872
  }
2430
2873
 
@@ -2466,7 +2909,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2466
2909
  // it as no longer deleted.
2467
2910
  if (instr->hydrogen()->RequiresHoleCheck()) {
2468
2911
  // We use a temp to check the payload (CompareRoot might clobber ip).
2469
- Register payload = ToRegister(instr->TempAt(0));
2912
+ Register payload = ToRegister(instr->temp());
2470
2913
  __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2471
2914
  __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2472
2915
  DeoptimizeIf(eq, instr->environment());
@@ -2545,7 +2988,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2545
2988
 
2546
2989
 
2547
2990
  void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2548
- Register object = ToRegister(instr->InputAt(0));
2991
+ Register object = ToRegister(instr->object());
2549
2992
  Register result = ToRegister(instr->result());
2550
2993
  if (instr->hydrogen()->is_in_object()) {
2551
2994
  __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
@@ -2559,12 +3002,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2559
3002
  void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2560
3003
  Register object,
2561
3004
  Handle<Map> type,
2562
- Handle<String> name) {
3005
+ Handle<String> name,
3006
+ LEnvironment* env) {
2563
3007
  LookupResult lookup(isolate());
2564
- type->LookupInDescriptors(NULL, *name, &lookup);
2565
- ASSERT(lookup.IsFound() &&
2566
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2567
- if (lookup.type() == FIELD) {
3008
+ type->LookupDescriptor(NULL, *name, &lookup);
3009
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
3010
+ if (lookup.IsField()) {
2568
3011
  int index = lookup.GetLocalFieldIndexFromMap(*type);
2569
3012
  int offset = index * kPointerSize;
2570
3013
  if (index < 0) {
@@ -2576,9 +3019,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2576
3019
  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2577
3020
  __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2578
3021
  }
2579
- } else {
3022
+ } else if (lookup.IsConstantFunction()) {
2580
3023
  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2581
3024
  __ LoadHeapObject(result, function);
3025
+ } else {
3026
+ // Negative lookup.
3027
+ // Check prototypes.
3028
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
3029
+ Heap* heap = type->GetHeap();
3030
+ while (*current != heap->null_value()) {
3031
+ __ LoadHeapObject(result, current);
3032
+ __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
3033
+ __ cmp(result, Operand(Handle<Map>(current->map())));
3034
+ DeoptimizeIf(ne, env);
3035
+ current =
3036
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
3037
+ }
3038
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2582
3039
  }
2583
3040
  }
2584
3041
 
@@ -2586,7 +3043,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2586
3043
  void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2587
3044
  Register object = ToRegister(instr->object());
2588
3045
  Register result = ToRegister(instr->result());
2589
- Register scratch = scratch0();
3046
+ Register object_map = scratch0();
2590
3047
 
2591
3048
  int map_count = instr->hydrogen()->types()->length();
2592
3049
  bool need_generic = instr->hydrogen()->need_generic();
@@ -2597,18 +3054,24 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2597
3054
  }
2598
3055
  Handle<String> name = instr->hydrogen()->name();
2599
3056
  Label done;
2600
- __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3057
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2601
3058
  for (int i = 0; i < map_count; ++i) {
2602
3059
  bool last = (i == map_count - 1);
2603
3060
  Handle<Map> map = instr->hydrogen()->types()->at(i);
2604
- __ cmp(scratch, Operand(map));
3061
+ Label check_passed;
3062
+ __ CompareMap(
3063
+ object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2605
3064
  if (last && !need_generic) {
2606
3065
  DeoptimizeIf(ne, instr->environment());
2607
- EmitLoadFieldOrConstantFunction(result, object, map, name);
3066
+ __ bind(&check_passed);
3067
+ EmitLoadFieldOrConstantFunction(
3068
+ result, object, map, name, instr->environment());
2608
3069
  } else {
2609
3070
  Label next;
2610
3071
  __ b(ne, &next);
2611
- EmitLoadFieldOrConstantFunction(result, object, map, name);
3072
+ __ bind(&check_passed);
3073
+ EmitLoadFieldOrConstantFunction(
3074
+ result, object, map, name, instr->environment());
2612
3075
  __ b(&done);
2613
3076
  __ bind(&next);
2614
3077
  }
@@ -2616,7 +3079,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2616
3079
  if (need_generic) {
2617
3080
  __ mov(r2, Operand(name));
2618
3081
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2619
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
3082
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
2620
3083
  }
2621
3084
  __ bind(&done);
2622
3085
  }
@@ -2629,7 +3092,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2629
3092
  // Name is always in r2.
2630
3093
  __ mov(r2, Operand(instr->name()));
2631
3094
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2632
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
3095
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
2633
3096
  }
2634
3097
 
2635
3098
 
@@ -2679,7 +3142,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2679
3142
 
2680
3143
  void LCodeGen::DoLoadElements(LLoadElements* instr) {
2681
3144
  Register result = ToRegister(instr->result());
2682
- Register input = ToRegister(instr->InputAt(0));
3145
+ Register input = ToRegister(instr->object());
2683
3146
  Register scratch = scratch0();
2684
3147
 
2685
3148
  __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
@@ -2714,7 +3177,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
2714
3177
  void LCodeGen::DoLoadExternalArrayPointer(
2715
3178
  LLoadExternalArrayPointer* instr) {
2716
3179
  Register to_reg = ToRegister(instr->result());
2717
- Register from_reg = ToRegister(instr->InputAt(0));
3180
+ Register from_reg = ToRegister(instr->object());
2718
3181
  __ ldr(to_reg, FieldMemOperand(from_reg,
2719
3182
  ExternalArray::kExternalPointerOffset));
2720
3183
  }
@@ -2725,84 +3188,16 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2725
3188
  Register length = ToRegister(instr->length());
2726
3189
  Register index = ToRegister(instr->index());
2727
3190
  Register result = ToRegister(instr->result());
2728
-
2729
- // Bailout index is not a valid argument index. Use unsigned check to get
2730
- // negative check for free.
2731
- __ sub(length, length, index, SetCC);
2732
- DeoptimizeIf(ls, instr->environment());
2733
-
2734
3191
  // There are two words between the frame pointer and the last argument.
2735
3192
  // Subtracting from length accounts for one of them add one more.
3193
+ __ sub(length, length, index);
2736
3194
  __ add(length, length, Operand(1));
2737
3195
  __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2738
3196
  }
2739
3197
 
2740
3198
 
2741
- void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2742
- Register elements = ToRegister(instr->elements());
2743
- Register key = EmitLoadRegister(instr->key(), scratch0());
2744
- Register result = ToRegister(instr->result());
2745
- Register scratch = scratch0();
2746
-
2747
- // Load the result.
2748
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2749
- uint32_t offset = FixedArray::kHeaderSize +
2750
- (instr->additional_index() << kPointerSizeLog2);
2751
- __ ldr(result, FieldMemOperand(scratch, offset));
2752
-
2753
- // Check for the hole value.
2754
- if (instr->hydrogen()->RequiresHoleCheck()) {
2755
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2756
- __ cmp(result, scratch);
2757
- DeoptimizeIf(eq, instr->environment());
2758
- }
2759
- }
2760
-
2761
-
2762
- void LCodeGen::DoLoadKeyedFastDoubleElement(
2763
- LLoadKeyedFastDoubleElement* instr) {
2764
- Register elements = ToRegister(instr->elements());
2765
- bool key_is_constant = instr->key()->IsConstantOperand();
2766
- Register key = no_reg;
2767
- DwVfpRegister result = ToDoubleRegister(instr->result());
2768
- Register scratch = scratch0();
2769
-
2770
- int shift_size =
2771
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2772
- int constant_key = 0;
2773
- if (key_is_constant) {
2774
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2775
- if (constant_key & 0xF0000000) {
2776
- Abort("array index constant value too big.");
2777
- }
2778
- } else {
2779
- key = ToRegister(instr->key());
2780
- }
2781
-
2782
- Operand operand = key_is_constant
2783
- ? Operand(((constant_key + instr->additional_index()) << shift_size) +
2784
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
2785
- : Operand(key, LSL, shift_size);
2786
- __ add(elements, elements, operand);
2787
- if (!key_is_constant) {
2788
- __ add(elements, elements,
2789
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
2790
- (instr->additional_index() << shift_size)));
2791
- }
2792
-
2793
- if (instr->hydrogen()->RequiresHoleCheck()) {
2794
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2795
- __ cmp(scratch, Operand(kHoleNanUpper32));
2796
- DeoptimizeIf(eq, instr->environment());
2797
- }
2798
-
2799
- __ vldr(result, elements, 0);
2800
- }
2801
-
2802
-
2803
- void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2804
- LLoadKeyedSpecializedArrayElement* instr) {
2805
- Register external_pointer = ToRegister(instr->external_pointer());
3199
+ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3200
+ Register external_pointer = ToRegister(instr->elements());
2806
3201
  Register key = no_reg;
2807
3202
  ElementsKind elements_kind = instr->elements_kind();
2808
3203
  bool key_is_constant = instr->key()->IsConstantOperand();
@@ -2815,34 +3210,77 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2815
3210
  } else {
2816
3211
  key = ToRegister(instr->key());
2817
3212
  }
2818
- int shift_size = ElementsKindToShiftSize(elements_kind);
2819
- int additional_offset = instr->additional_index() << shift_size;
3213
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
3214
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3215
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
3216
+ int additional_offset = instr->additional_index() << element_size_shift;
2820
3217
 
2821
3218
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2822
3219
  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2823
- CpuFeatures::Scope scope(VFP3);
2824
3220
  DwVfpRegister result = ToDoubleRegister(instr->result());
2825
3221
  Operand operand = key_is_constant
2826
- ? Operand(constant_key << shift_size)
3222
+ ? Operand(constant_key << element_size_shift)
2827
3223
  : Operand(key, LSL, shift_size);
2828
3224
  __ add(scratch0(), external_pointer, operand);
2829
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2830
- __ vldr(result.low(), scratch0(), additional_offset);
2831
- __ vcvt_f64_f32(result, result.low());
2832
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2833
- __ vldr(result, scratch0(), additional_offset);
3225
+ if (CpuFeatures::IsSupported(VFP2)) {
3226
+ CpuFeatures::Scope scope(VFP2);
3227
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3228
+ __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
3229
+ __ vcvt_f64_f32(result, kScratchDoubleReg.low());
3230
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3231
+ __ vldr(result, scratch0(), additional_offset);
3232
+ }
3233
+ } else {
3234
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3235
+ Register value = external_pointer;
3236
+ __ ldr(value, MemOperand(scratch0(), additional_offset));
3237
+ __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
3238
+
3239
+ __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
3240
+ __ and_(scratch0(), scratch0(),
3241
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3242
+
3243
+ Label exponent_rebiased;
3244
+ __ teq(scratch0(), Operand(0x00));
3245
+ __ b(eq, &exponent_rebiased);
3246
+
3247
+ __ teq(scratch0(), Operand(0xff));
3248
+ __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
3249
+ __ b(eq, &exponent_rebiased);
3250
+
3251
+ // Rebias exponent.
3252
+ __ add(scratch0(),
3253
+ scratch0(),
3254
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3255
+
3256
+ __ bind(&exponent_rebiased);
3257
+ __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
3258
+ __ orr(sfpd_hi, sfpd_hi,
3259
+ Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
3260
+
3261
+ // Shift mantissa.
3262
+ static const int kMantissaShiftForHiWord =
3263
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3264
+
3265
+ static const int kMantissaShiftForLoWord =
3266
+ kBitsPerInt - kMantissaShiftForHiWord;
3267
+
3268
+ __ orr(sfpd_hi, sfpd_hi,
3269
+ Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
3270
+ __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
3271
+
3272
+ } else {
3273
+ __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
3274
+ __ ldr(sfpd_hi, MemOperand(scratch0(),
3275
+ additional_offset + kPointerSize));
3276
+ }
2834
3277
  }
2835
3278
  } else {
2836
3279
  Register result = ToRegister(instr->result());
2837
- if (instr->additional_index() != 0 && !key_is_constant) {
2838
- __ add(scratch0(), key, Operand(instr->additional_index()));
2839
- }
2840
- MemOperand mem_operand(key_is_constant
2841
- ? MemOperand(external_pointer,
2842
- (constant_key << shift_size) + additional_offset)
2843
- : (instr->additional_index() == 0
2844
- ? MemOperand(external_pointer, key, LSL, shift_size)
2845
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
3280
+ MemOperand mem_operand = PrepareKeyedOperand(
3281
+ key, external_pointer, key_is_constant, constant_key,
3282
+ element_size_shift, shift_size,
3283
+ instr->additional_index(), additional_offset);
2846
3284
  switch (elements_kind) {
2847
3285
  case EXTERNAL_BYTE_ELEMENTS:
2848
3286
  __ ldrsb(result, mem_operand);
@@ -2862,11 +3300,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2862
3300
  break;
2863
3301
  case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2864
3302
  __ ldr(result, mem_operand);
2865
- __ cmp(result, Operand(0x80000000));
2866
- // TODO(danno): we could be more clever here, perhaps having a special
2867
- // version of the stub that detects if the overflow case actually
2868
- // happens, and generate code that returns a double rather than int.
2869
- DeoptimizeIf(cs, instr->environment());
3303
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3304
+ __ cmp(result, Operand(0x80000000));
3305
+ DeoptimizeIf(cs, instr->environment());
3306
+ }
2870
3307
  break;
2871
3308
  case EXTERNAL_FLOAT_ELEMENTS:
2872
3309
  case EXTERNAL_DOUBLE_ELEMENTS:
@@ -2885,21 +3322,156 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2885
3322
  }
2886
3323
 
2887
3324
 
2888
- void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2889
- ASSERT(ToRegister(instr->object()).is(r1));
2890
- ASSERT(ToRegister(instr->key()).is(r0));
2891
-
2892
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2893
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
2894
- }
2895
-
2896
-
2897
- void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3325
+ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3326
+ Register elements = ToRegister(instr->elements());
3327
+ bool key_is_constant = instr->key()->IsConstantOperand();
3328
+ Register key = no_reg;
3329
+ DwVfpRegister result = ToDoubleRegister(instr->result());
2898
3330
  Register scratch = scratch0();
2899
- Register result = ToRegister(instr->result());
2900
3331
 
2901
- if (instr->hydrogen()->from_inlined()) {
2902
- __ sub(result, sp, Operand(2 * kPointerSize));
3332
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3333
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3334
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
3335
+ int constant_key = 0;
3336
+ if (key_is_constant) {
3337
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3338
+ if (constant_key & 0xF0000000) {
3339
+ Abort("array index constant value too big.");
3340
+ }
3341
+ } else {
3342
+ key = ToRegister(instr->key());
3343
+ }
3344
+
3345
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3346
+ ((constant_key + instr->additional_index()) << element_size_shift);
3347
+ if (!key_is_constant) {
3348
+ __ add(elements, elements, Operand(key, LSL, shift_size));
3349
+ }
3350
+ if (CpuFeatures::IsSupported(VFP2)) {
3351
+ CpuFeatures::Scope scope(VFP2);
3352
+ __ add(elements, elements, Operand(base_offset));
3353
+ __ vldr(result, elements, 0);
3354
+ if (instr->hydrogen()->RequiresHoleCheck()) {
3355
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
3356
+ __ cmp(scratch, Operand(kHoleNanUpper32));
3357
+ DeoptimizeIf(eq, instr->environment());
3358
+ }
3359
+ } else {
3360
+ __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
3361
+ __ ldr(sfpd_lo, MemOperand(elements, base_offset));
3362
+ if (instr->hydrogen()->RequiresHoleCheck()) {
3363
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
3364
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
3365
+ DeoptimizeIf(eq, instr->environment());
3366
+ }
3367
+ }
3368
+ }
3369
+
3370
+
3371
+ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3372
+ Register elements = ToRegister(instr->elements());
3373
+ Register result = ToRegister(instr->result());
3374
+ Register scratch = scratch0();
3375
+ Register store_base = scratch;
3376
+ int offset = 0;
3377
+
3378
+ if (instr->key()->IsConstantOperand()) {
3379
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3380
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3381
+ instr->additional_index());
3382
+ store_base = elements;
3383
+ } else {
3384
+ Register key = EmitLoadRegister(instr->key(), scratch0());
3385
+ // Even though the HLoadKeyed instruction forces the input
3386
+ // representation for the key to be an integer, the input gets replaced
3387
+ // during bound check elimination with the index argument to the bounds
3388
+ // check, which can be tagged, so that case must be handled here, too.
3389
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
3390
+ __ add(scratch, elements,
3391
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
3392
+ } else {
3393
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3394
+ }
3395
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3396
+ }
3397
+ __ ldr(result, FieldMemOperand(store_base, offset));
3398
+
3399
+ // Check for the hole value.
3400
+ if (instr->hydrogen()->RequiresHoleCheck()) {
3401
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3402
+ __ tst(result, Operand(kSmiTagMask));
3403
+ DeoptimizeIf(ne, instr->environment());
3404
+ } else {
3405
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3406
+ __ cmp(result, scratch);
3407
+ DeoptimizeIf(eq, instr->environment());
3408
+ }
3409
+ }
3410
+ }
3411
+
3412
+
3413
+ void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3414
+ if (instr->is_external()) {
3415
+ DoLoadKeyedExternalArray(instr);
3416
+ } else if (instr->hydrogen()->representation().IsDouble()) {
3417
+ DoLoadKeyedFixedDoubleArray(instr);
3418
+ } else {
3419
+ DoLoadKeyedFixedArray(instr);
3420
+ }
3421
+ }
3422
+
3423
+
3424
+ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3425
+ Register base,
3426
+ bool key_is_constant,
3427
+ int constant_key,
3428
+ int element_size,
3429
+ int shift_size,
3430
+ int additional_index,
3431
+ int additional_offset) {
3432
+ if (additional_index != 0 && !key_is_constant) {
3433
+ additional_index *= 1 << (element_size - shift_size);
3434
+ __ add(scratch0(), key, Operand(additional_index));
3435
+ }
3436
+
3437
+ if (key_is_constant) {
3438
+ return MemOperand(base,
3439
+ (constant_key << element_size) + additional_offset);
3440
+ }
3441
+
3442
+ if (additional_index == 0) {
3443
+ if (shift_size >= 0) {
3444
+ return MemOperand(base, key, LSL, shift_size);
3445
+ } else {
3446
+ ASSERT_EQ(-1, shift_size);
3447
+ return MemOperand(base, key, LSR, 1);
3448
+ }
3449
+ }
3450
+
3451
+ if (shift_size >= 0) {
3452
+ return MemOperand(base, scratch0(), LSL, shift_size);
3453
+ } else {
3454
+ ASSERT_EQ(-1, shift_size);
3455
+ return MemOperand(base, scratch0(), LSR, 1);
3456
+ }
3457
+ }
3458
+
3459
+
3460
+ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3461
+ ASSERT(ToRegister(instr->object()).is(r1));
3462
+ ASSERT(ToRegister(instr->key()).is(r0));
3463
+
3464
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3465
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3466
+ }
3467
+
3468
+
3469
+ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3470
+ Register scratch = scratch0();
3471
+ Register result = ToRegister(instr->result());
3472
+
3473
+ if (instr->hydrogen()->from_inlined()) {
3474
+ __ sub(result, sp, Operand(2 * kPointerSize));
2903
3475
  } else {
2904
3476
  // Check if the calling frame is an arguments adaptor frame.
2905
3477
  Label done, adapted;
@@ -2916,7 +3488,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2916
3488
 
2917
3489
 
2918
3490
  void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2919
- Register elem = ToRegister(instr->InputAt(0));
3491
+ Register elem = ToRegister(instr->elements());
2920
3492
  Register result = ToRegister(instr->result());
2921
3493
 
2922
3494
  Label done;
@@ -3011,7 +3583,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3011
3583
  // stack.
3012
3584
  Label invoke, loop;
3013
3585
  // length is a small non-negative integer, due to the test above.
3014
- __ cmp(length, Operand(0));
3586
+ __ cmp(length, Operand::Zero());
3015
3587
  __ b(eq, &invoke);
3016
3588
  __ bind(&loop);
3017
3589
  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
@@ -3035,7 +3607,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3035
3607
 
3036
3608
 
3037
3609
  void LCodeGen::DoPushArgument(LPushArgument* instr) {
3038
- LOperand* argument = instr->InputAt(0);
3610
+ LOperand* argument = instr->value();
3039
3611
  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3040
3612
  Abort("DoPushArgument not implemented for double type.");
3041
3613
  } else {
@@ -3052,13 +3624,19 @@ void LCodeGen::DoDrop(LDrop* instr) {
3052
3624
 
3053
3625
  void LCodeGen::DoThisFunction(LThisFunction* instr) {
3054
3626
  Register result = ToRegister(instr->result());
3055
- __ LoadHeapObject(result, instr->hydrogen()->closure());
3627
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3056
3628
  }
3057
3629
 
3058
3630
 
3059
3631
  void LCodeGen::DoContext(LContext* instr) {
3632
+ // If there is a non-return use, the context must be moved to a register.
3060
3633
  Register result = ToRegister(instr->result());
3061
- __ mov(result, cp);
3634
+ for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
3635
+ if (!it.value()->IsReturn()) {
3636
+ __ mov(result, cp);
3637
+ return;
3638
+ }
3639
+ }
3062
3640
  }
3063
3641
 
3064
3642
 
@@ -3082,12 +3660,12 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3082
3660
 
3083
3661
  void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3084
3662
  Register result = ToRegister(instr->result());
3085
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
3663
+ __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3086
3664
  }
3087
3665
 
3088
3666
 
3089
3667
  void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3090
- Register global = ToRegister(instr->global());
3668
+ Register global = ToRegister(instr->global_object());
3091
3669
  Register result = ToRegister(instr->result());
3092
3670
  __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
3093
3671
  }
@@ -3109,14 +3687,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3109
3687
  __ LoadHeapObject(r1, function);
3110
3688
  }
3111
3689
 
3112
- // Change context if needed.
3113
- bool change_context =
3114
- (info()->closure()->context() != function->context()) ||
3115
- scope()->contains_with() ||
3116
- (scope()->num_heap_slots() > 0);
3117
- if (change_context) {
3118
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3119
- }
3690
+ // Change context.
3691
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3120
3692
 
3121
3693
  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3122
3694
  // is available to write to at this point.
@@ -3153,7 +3725,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3153
3725
 
3154
3726
 
3155
3727
  void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3156
- Register input = ToRegister(instr->InputAt(0));
3728
+ Register input = ToRegister(instr->value());
3157
3729
  Register result = ToRegister(instr->result());
3158
3730
  Register scratch = scratch0();
3159
3731
 
@@ -3219,20 +3791,21 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3219
3791
 
3220
3792
 
3221
3793
  void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3222
- Register input = ToRegister(instr->InputAt(0));
3794
+ Register input = ToRegister(instr->value());
3223
3795
  Register result = ToRegister(instr->result());
3224
- __ cmp(input, Operand(0));
3796
+ __ cmp(input, Operand::Zero());
3225
3797
  __ Move(result, input, pl);
3226
3798
  // We can make rsb conditional because the previous cmp instruction
3227
3799
  // will clear the V (overflow) flag and rsb won't set this flag
3228
3800
  // if input is positive.
3229
- __ rsb(result, input, Operand(0), SetCC, mi);
3801
+ __ rsb(result, input, Operand::Zero(), SetCC, mi);
3230
3802
  // Deoptimize on overflow.
3231
3803
  DeoptimizeIf(vs, instr->environment());
3232
3804
  }
3233
3805
 
3234
3806
 
3235
3807
  void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3808
+ CpuFeatures::Scope scope(VFP2);
3236
3809
  // Class for deferred case.
3237
3810
  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3238
3811
  public:
@@ -3249,7 +3822,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3249
3822
 
3250
3823
  Representation r = instr->hydrogen()->value()->representation();
3251
3824
  if (r.IsDouble()) {
3252
- DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
3825
+ DwVfpRegister input = ToDoubleRegister(instr->value());
3253
3826
  DwVfpRegister result = ToDoubleRegister(instr->result());
3254
3827
  __ vabs(result, input);
3255
3828
  } else if (r.IsInteger32()) {
@@ -3257,8 +3830,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3257
3830
  } else {
3258
3831
  // Representation is tagged.
3259
3832
  DeferredMathAbsTaggedHeapNumber* deferred =
3260
- new DeferredMathAbsTaggedHeapNumber(this, instr);
3261
- Register input = ToRegister(instr->InputAt(0));
3833
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3834
+ Register input = ToRegister(instr->value());
3262
3835
  // Smi check.
3263
3836
  __ JumpIfNotSmi(input, deferred->entry());
3264
3837
  // If smi, handle it directly.
@@ -3269,29 +3842,25 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3269
3842
 
3270
3843
 
3271
3844
  void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3272
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3845
+ CpuFeatures::Scope scope(VFP2);
3846
+ DwVfpRegister input = ToDoubleRegister(instr->value());
3273
3847
  Register result = ToRegister(instr->result());
3274
- SwVfpRegister single_scratch = double_scratch0().low();
3275
- Register scratch1 = scratch0();
3276
- Register scratch2 = ToRegister(instr->TempAt(0));
3848
+ Register scratch = scratch0();
3277
3849
 
3278
3850
  __ EmitVFPTruncate(kRoundToMinusInf,
3279
- single_scratch,
3851
+ result,
3280
3852
  input,
3281
- scratch1,
3282
- scratch2);
3853
+ scratch,
3854
+ double_scratch0());
3283
3855
  DeoptimizeIf(ne, instr->environment());
3284
3856
 
3285
- // Move the result back to general purpose register r0.
3286
- __ vmov(result, single_scratch);
3287
-
3288
3857
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3289
3858
  // Test for -0.
3290
3859
  Label done;
3291
- __ cmp(result, Operand(0));
3860
+ __ cmp(result, Operand::Zero());
3292
3861
  __ b(ne, &done);
3293
- __ vmov(scratch1, input.high());
3294
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
3862
+ __ vmov(scratch, input.high());
3863
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
3295
3864
  DeoptimizeIf(ne, instr->environment());
3296
3865
  __ bind(&done);
3297
3866
  }
@@ -3299,8 +3868,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3299
3868
 
3300
3869
 
3301
3870
  void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3302
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3871
+ CpuFeatures::Scope scope(VFP2);
3872
+ DwVfpRegister input = ToDoubleRegister(instr->value());
3303
3873
  Register result = ToRegister(instr->result());
3874
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3304
3875
  Register scratch = scratch0();
3305
3876
  Label done, check_sign_on_zero;
3306
3877
 
@@ -3313,7 +3884,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3313
3884
 
3314
3885
  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3315
3886
  __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
3316
- __ mov(result, Operand(0), LeaveCC, le);
3887
+ __ mov(result, Operand::Zero(), LeaveCC, le);
3317
3888
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3318
3889
  __ b(le, &check_sign_on_zero);
3319
3890
  } else {
@@ -3325,12 +3896,12 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3325
3896
  __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
3326
3897
  DeoptimizeIf(ge, instr->environment());
3327
3898
 
3899
+ __ Vmov(double_scratch0(), 0.5, scratch);
3900
+ __ vadd(double_scratch0(), input, double_scratch0());
3901
+
3328
3902
  // Save the original sign for later comparison.
3329
3903
  __ and_(scratch, result, Operand(HeapNumber::kSignMask));
3330
3904
 
3331
- __ Vmov(double_scratch0(), 0.5);
3332
- __ vadd(double_scratch0(), input, double_scratch0());
3333
-
3334
3905
  // Check sign of the result: if the sign changed, the input
3335
3906
  // value was in ]0.5, 0[ and the result should be -0.
3336
3907
  __ vmov(result, double_scratch0().high());
@@ -3338,21 +3909,20 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3338
3909
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3339
3910
  DeoptimizeIf(mi, instr->environment());
3340
3911
  } else {
3341
- __ mov(result, Operand(0), LeaveCC, mi);
3912
+ __ mov(result, Operand::Zero(), LeaveCC, mi);
3342
3913
  __ b(mi, &done);
3343
3914
  }
3344
3915
 
3345
3916
  __ EmitVFPTruncate(kRoundToMinusInf,
3346
- double_scratch0().low(),
3347
- double_scratch0(),
3348
3917
  result,
3349
- scratch);
3918
+ double_scratch0(),
3919
+ scratch,
3920
+ double_scratch1);
3350
3921
  DeoptimizeIf(ne, instr->environment());
3351
- __ vmov(result, double_scratch0().low());
3352
3922
 
3353
3923
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3354
3924
  // Test for -0.
3355
- __ cmp(result, Operand(0));
3925
+ __ cmp(result, Operand::Zero());
3356
3926
  __ b(ne, &done);
3357
3927
  __ bind(&check_sign_on_zero);
3358
3928
  __ vmov(scratch, input.high());
@@ -3364,22 +3934,24 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3364
3934
 
3365
3935
 
3366
3936
  void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3367
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3368
- DoubleRegister result = ToDoubleRegister(instr->result());
3937
+ CpuFeatures::Scope scope(VFP2);
3938
+ DwVfpRegister input = ToDoubleRegister(instr->value());
3939
+ DwVfpRegister result = ToDoubleRegister(instr->result());
3369
3940
  __ vsqrt(result, input);
3370
3941
  }
3371
3942
 
3372
3943
 
3373
3944
  void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3374
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3375
- DoubleRegister result = ToDoubleRegister(instr->result());
3376
- DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3945
+ CpuFeatures::Scope scope(VFP2);
3946
+ DwVfpRegister input = ToDoubleRegister(instr->value());
3947
+ DwVfpRegister result = ToDoubleRegister(instr->result());
3948
+ DwVfpRegister temp = ToDoubleRegister(instr->temp());
3377
3949
 
3378
3950
  // Note that according to ECMA-262 15.8.2.13:
3379
3951
  // Math.pow(-Infinity, 0.5) == Infinity
3380
3952
  // Math.sqrt(-Infinity) == NaN
3381
3953
  Label done;
3382
- __ vmov(temp, -V8_INFINITY);
3954
+ __ vmov(temp, -V8_INFINITY, scratch0());
3383
3955
  __ VFPCompareAndSetFlags(input, temp);
3384
3956
  __ vneg(result, temp, eq);
3385
3957
  __ b(&done, eq);
@@ -3392,14 +3964,15 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3392
3964
 
3393
3965
 
3394
3966
  void LCodeGen::DoPower(LPower* instr) {
3967
+ CpuFeatures::Scope scope(VFP2);
3395
3968
  Representation exponent_type = instr->hydrogen()->right()->representation();
3396
3969
  // Having marked this as a call, we can use any registers.
3397
3970
  // Just make sure that the input/output registers are the expected ones.
3398
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3399
- ToDoubleRegister(instr->InputAt(1)).is(d2));
3400
- ASSERT(!instr->InputAt(1)->IsRegister() ||
3401
- ToRegister(instr->InputAt(1)).is(r2));
3402
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
3971
+ ASSERT(!instr->right()->IsDoubleRegister() ||
3972
+ ToDoubleRegister(instr->right()).is(d2));
3973
+ ASSERT(!instr->right()->IsRegister() ||
3974
+ ToRegister(instr->right()).is(r2));
3975
+ ASSERT(ToDoubleRegister(instr->left()).is(d1));
3403
3976
  ASSERT(ToDoubleRegister(instr->result()).is(d3));
3404
3977
 
3405
3978
  if (exponent_type.IsTagged()) {
@@ -3424,6 +3997,7 @@ void LCodeGen::DoPower(LPower* instr) {
3424
3997
 
3425
3998
 
3426
3999
  void LCodeGen::DoRandom(LRandom* instr) {
4000
+ CpuFeatures::Scope scope(VFP2);
3427
4001
  class DeferredDoRandom: public LDeferredCode {
3428
4002
  public:
3429
4003
  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3434,25 +4008,25 @@ void LCodeGen::DoRandom(LRandom* instr) {
3434
4008
  LRandom* instr_;
3435
4009
  };
3436
4010
 
3437
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
4011
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3438
4012
 
3439
4013
  // Having marked this instruction as a call we can use any
3440
4014
  // registers.
3441
4015
  ASSERT(ToDoubleRegister(instr->result()).is(d7));
3442
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
4016
+ ASSERT(ToRegister(instr->global_object()).is(r0));
3443
4017
 
3444
4018
  static const int kSeedSize = sizeof(uint32_t);
3445
4019
  STATIC_ASSERT(kPointerSize == kSeedSize);
3446
4020
 
3447
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
4021
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
3448
4022
  static const int kRandomSeedOffset =
3449
4023
  FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3450
4024
  __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
3451
- // r2: FixedArray of the global context's random seeds
4025
+ // r2: FixedArray of the native context's random seeds
3452
4026
 
3453
4027
  // Load state[0].
3454
4028
  __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
3455
- __ cmp(r1, Operand(0));
4029
+ __ cmp(r1, Operand::Zero());
3456
4030
  __ b(eq, deferred->entry());
3457
4031
  // Load state[1].
3458
4032
  __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
@@ -3487,7 +4061,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
3487
4061
  // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3488
4062
  __ vmov(d7, r0, r1);
3489
4063
  // Move 0x4130000000000000 to VFP.
3490
- __ mov(r0, Operand(0, RelocInfo::NONE));
4064
+ __ mov(r0, Operand::Zero());
3491
4065
  __ vmov(d8, r0, r1);
3492
4066
  // Subtract and store the result in the heap number.
3493
4067
  __ vsub(d7, d7, d8);
@@ -3501,6 +4075,21 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
3501
4075
  }
3502
4076
 
3503
4077
 
4078
+ void LCodeGen::DoMathExp(LMathExp* instr) {
4079
+ CpuFeatures::Scope scope(VFP2);
4080
+ DwVfpRegister input = ToDoubleRegister(instr->value());
4081
+ DwVfpRegister result = ToDoubleRegister(instr->result());
4082
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
4083
+ DwVfpRegister double_scratch2 = double_scratch0();
4084
+ Register temp1 = ToRegister(instr->temp1());
4085
+ Register temp2 = ToRegister(instr->temp2());
4086
+
4087
+ MathExpGenerator::EmitMathExp(
4088
+ masm(), input, result, double_scratch1, double_scratch2,
4089
+ temp1, temp2, scratch0());
4090
+ }
4091
+
4092
+
3504
4093
  void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3505
4094
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3506
4095
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3596,7 +4185,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3596
4185
  int arity = instr->arity();
3597
4186
  Handle<Code> ic =
3598
4187
  isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3599
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
4188
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3600
4189
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3601
4190
  }
3602
4191
 
@@ -3609,7 +4198,7 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
3609
4198
  Handle<Code> ic =
3610
4199
  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3611
4200
  __ mov(r2, Operand(instr->name()));
3612
- CallCode(ic, mode, instr);
4201
+ CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
3613
4202
  // Restore context register.
3614
4203
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3615
4204
  }
@@ -3634,7 +4223,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3634
4223
  Handle<Code> ic =
3635
4224
  isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3636
4225
  __ mov(r2, Operand(instr->name()));
3637
- CallCode(ic, mode, instr);
4226
+ CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
3638
4227
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3639
4228
  }
3640
4229
 
@@ -3650,7 +4239,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3650
4239
 
3651
4240
 
3652
4241
  void LCodeGen::DoCallNew(LCallNew* instr) {
3653
- ASSERT(ToRegister(instr->InputAt(0)).is(r1));
4242
+ ASSERT(ToRegister(instr->constructor()).is(r1));
3654
4243
  ASSERT(ToRegister(instr->result()).is(r0));
3655
4244
 
3656
4245
  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3676,7 +4265,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3676
4265
  __ mov(scratch, Operand(instr->transition()));
3677
4266
  __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3678
4267
  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3679
- Register temp = ToRegister(instr->TempAt(0));
4268
+ Register temp = ToRegister(instr->temp());
3680
4269
  // Update the write barrier for the map field.
3681
4270
  __ RecordWriteField(object,
3682
4271
  HeapObject::kMapOffset,
@@ -3734,104 +4323,51 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3734
4323
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3735
4324
  ? isolate()->builtins()->StoreIC_Initialize_Strict()
3736
4325
  : isolate()->builtins()->StoreIC_Initialize();
3737
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
3738
- }
3739
-
3740
-
3741
- void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3742
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3743
- DeoptimizeIf(hs, instr->environment());
4326
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3744
4327
  }
3745
4328
 
3746
4329
 
3747
- void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3748
- Register value = ToRegister(instr->value());
3749
- Register elements = ToRegister(instr->object());
3750
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3751
- Register scratch = scratch0();
3752
-
3753
- // Do the store.
3754
- if (instr->key()->IsConstantOperand()) {
3755
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3756
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3757
- int offset =
3758
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3759
- + FixedArray::kHeaderSize;
3760
- __ str(value, FieldMemOperand(elements, offset));
3761
- } else {
3762
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3763
- if (instr->additional_index() != 0) {
3764
- __ add(scratch,
3765
- scratch,
3766
- Operand(instr->additional_index() << kPointerSizeLog2));
4330
+ void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
4331
+ HValue* value,
4332
+ LOperand* operand) {
4333
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
4334
+ if (operand->IsRegister()) {
4335
+ __ tst(ToRegister(operand), Operand(kSmiTagMask));
4336
+ } else {
4337
+ __ mov(ip, ToOperand(operand));
4338
+ __ tst(ip, Operand(kSmiTagMask));
3767
4339
  }
3768
- __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3769
- }
3770
-
3771
- if (instr->hydrogen()->NeedsWriteBarrier()) {
3772
- HType type = instr->hydrogen()->value()->type();
3773
- SmiCheck check_needed =
3774
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3775
- // Compute address of modified element and store it into key register.
3776
- __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3777
- __ RecordWrite(elements,
3778
- key,
3779
- value,
3780
- kLRHasBeenSaved,
3781
- kSaveFPRegs,
3782
- EMIT_REMEMBERED_SET,
3783
- check_needed);
4340
+ DeoptimizeIf(ne, environment);
3784
4341
  }
3785
4342
  }
3786
4343
 
3787
4344
 
3788
- void LCodeGen::DoStoreKeyedFastDoubleElement(
3789
- LStoreKeyedFastDoubleElement* instr) {
3790
- DwVfpRegister value = ToDoubleRegister(instr->value());
3791
- Register elements = ToRegister(instr->elements());
3792
- Register key = no_reg;
3793
- Register scratch = scratch0();
3794
- bool key_is_constant = instr->key()->IsConstantOperand();
3795
- int constant_key = 0;
3796
-
3797
- // Calculate the effective address of the slot in the array to store the
3798
- // double value.
3799
- if (key_is_constant) {
3800
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3801
- if (constant_key & 0xF0000000) {
3802
- Abort("array index constant value too big.");
4345
+ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4346
+ DeoptIfTaggedButNotSmi(instr->environment(),
4347
+ instr->hydrogen()->length(),
4348
+ instr->length());
4349
+ DeoptIfTaggedButNotSmi(instr->environment(),
4350
+ instr->hydrogen()->index(),
4351
+ instr->index());
4352
+ if (instr->index()->IsConstantOperand()) {
4353
+ int constant_index =
4354
+ ToInteger32(LConstantOperand::cast(instr->index()));
4355
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
4356
+ __ mov(ip, Operand(Smi::FromInt(constant_index)));
4357
+ } else {
4358
+ __ mov(ip, Operand(constant_index));
3803
4359
  }
4360
+ __ cmp(ip, ToRegister(instr->length()));
3804
4361
  } else {
3805
- key = ToRegister(instr->key());
4362
+ __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3806
4363
  }
3807
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3808
- Operand operand = key_is_constant
3809
- ? Operand((constant_key << shift_size) +
3810
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
3811
- : Operand(key, LSL, shift_size);
3812
- __ add(scratch, elements, operand);
3813
- if (!key_is_constant) {
3814
- __ add(scratch, scratch,
3815
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3816
- }
3817
-
3818
- if (instr->NeedsCanonicalization()) {
3819
- // Check for NaN. All NaNs must be canonicalized.
3820
- __ VFPCompareAndSetFlags(value, value);
3821
- // Only load canonical NaN if the comparison above set the overflow.
3822
- __ Vmov(value,
3823
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
3824
- vs);
3825
- }
3826
-
3827
- __ vstr(value, scratch, instr->additional_index() << shift_size);
4364
+ DeoptimizeIf(hs, instr->environment());
3828
4365
  }
3829
4366
 
3830
4367
 
3831
- void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3832
- LStoreKeyedSpecializedArrayElement* instr) {
3833
-
3834
- Register external_pointer = ToRegister(instr->external_pointer());
4368
+ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4369
+ CpuFeatures::Scope scope(VFP2);
4370
+ Register external_pointer = ToRegister(instr->elements());
3835
4371
  Register key = no_reg;
3836
4372
  ElementsKind elements_kind = instr->elements_kind();
3837
4373
  bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3844,15 +4380,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3844
4380
  } else {
3845
4381
  key = ToRegister(instr->key());
3846
4382
  }
3847
- int shift_size = ElementsKindToShiftSize(elements_kind);
3848
- int additional_offset = instr->additional_index() << shift_size;
4383
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
4384
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4385
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
4386
+ int additional_offset = instr->additional_index() << element_size_shift;
3849
4387
 
3850
4388
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3851
4389
  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3852
4390
  CpuFeatures::Scope scope(VFP3);
3853
4391
  DwVfpRegister value(ToDoubleRegister(instr->value()));
3854
- Operand operand(key_is_constant ? Operand(constant_key << shift_size)
3855
- : Operand(key, LSL, shift_size));
4392
+ Operand operand(key_is_constant
4393
+ ? Operand(constant_key << element_size_shift)
4394
+ : Operand(key, LSL, shift_size));
3856
4395
  __ add(scratch0(), external_pointer, operand);
3857
4396
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3858
4397
  __ vcvt_f32_f64(double_scratch0().low(), value);
@@ -3862,16 +4401,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3862
4401
  }
3863
4402
  } else {
3864
4403
  Register value(ToRegister(instr->value()));
3865
- if (instr->additional_index() != 0 && !key_is_constant) {
3866
- __ add(scratch0(), key, Operand(instr->additional_index()));
3867
- }
3868
- MemOperand mem_operand(key_is_constant
3869
- ? MemOperand(external_pointer,
3870
- ((constant_key + instr->additional_index())
3871
- << shift_size))
3872
- : (instr->additional_index() == 0
3873
- ? MemOperand(external_pointer, key, LSL, shift_size)
3874
- : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
4404
+ MemOperand mem_operand = PrepareKeyedOperand(
4405
+ key, external_pointer, key_is_constant, constant_key,
4406
+ element_size_shift, shift_size,
4407
+ instr->additional_index(), additional_offset);
3875
4408
  switch (elements_kind) {
3876
4409
  case EXTERNAL_PIXEL_ELEMENTS:
3877
4410
  case EXTERNAL_BYTE_ELEMENTS:
@@ -3903,6 +4436,111 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3903
4436
  }
3904
4437
 
3905
4438
 
4439
+ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4440
+ CpuFeatures::Scope scope(VFP2);
4441
+ DwVfpRegister value = ToDoubleRegister(instr->value());
4442
+ Register elements = ToRegister(instr->elements());
4443
+ Register key = no_reg;
4444
+ Register scratch = scratch0();
4445
+ bool key_is_constant = instr->key()->IsConstantOperand();
4446
+ int constant_key = 0;
4447
+
4448
+ // Calculate the effective address of the slot in the array to store the
4449
+ // double value.
4450
+ if (key_is_constant) {
4451
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4452
+ if (constant_key & 0xF0000000) {
4453
+ Abort("array index constant value too big.");
4454
+ }
4455
+ } else {
4456
+ key = ToRegister(instr->key());
4457
+ }
4458
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4459
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4460
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
4461
+ Operand operand = key_is_constant
4462
+ ? Operand((constant_key << element_size_shift) +
4463
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
4464
+ : Operand(key, LSL, shift_size);
4465
+ __ add(scratch, elements, operand);
4466
+ if (!key_is_constant) {
4467
+ __ add(scratch, scratch,
4468
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4469
+ }
4470
+
4471
+ if (instr->NeedsCanonicalization()) {
4472
+ // Check for NaN. All NaNs must be canonicalized.
4473
+ __ VFPCompareAndSetFlags(value, value);
4474
+ // Only load canonical NaN if the comparison above set the overflow.
4475
+ __ Vmov(value,
4476
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
4477
+ no_reg, vs);
4478
+ }
4479
+
4480
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4481
+ }
4482
+
4483
+
4484
+ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4485
+ Register value = ToRegister(instr->value());
4486
+ Register elements = ToRegister(instr->elements());
4487
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4488
+ : no_reg;
4489
+ Register scratch = scratch0();
4490
+ Register store_base = scratch;
4491
+ int offset = 0;
4492
+
4493
+ // Do the store.
4494
+ if (instr->key()->IsConstantOperand()) {
4495
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4496
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4497
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4498
+ instr->additional_index());
4499
+ store_base = elements;
4500
+ } else {
4501
+ // Even though the HLoadKeyed instruction forces the input
4502
+ // representation for the key to be an integer, the input gets replaced
4503
+ // during bound check elimination with the index argument to the bounds
4504
+ // check, which can be tagged, so that case must be handled here, too.
4505
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
4506
+ __ add(scratch, elements,
4507
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
4508
+ } else {
4509
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4510
+ }
4511
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4512
+ }
4513
+ __ str(value, FieldMemOperand(store_base, offset));
4514
+
4515
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
4516
+ HType type = instr->hydrogen()->value()->type();
4517
+ SmiCheck check_needed =
4518
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4519
+ // Compute address of modified element and store it into key register.
4520
+ __ add(key, store_base, Operand(offset - kHeapObjectTag));
4521
+ __ RecordWrite(elements,
4522
+ key,
4523
+ value,
4524
+ kLRHasBeenSaved,
4525
+ kSaveFPRegs,
4526
+ EMIT_REMEMBERED_SET,
4527
+ check_needed);
4528
+ }
4529
+ }
4530
+
4531
+
4532
+ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4533
+ // By cases: external, fast double
4534
+ if (instr->is_external()) {
4535
+ DoStoreKeyedExternalArray(instr);
4536
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4537
+ DoStoreKeyedFixedDoubleArray(instr);
4538
+ } else {
4539
+ DoStoreKeyedFixedArray(instr);
4540
+ }
4541
+ }
4542
+
4543
+
3906
4544
  void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3907
4545
  ASSERT(ToRegister(instr->object()).is(r2));
3908
4546
  ASSERT(ToRegister(instr->key()).is(r1));
@@ -3911,44 +4549,56 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3911
4549
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3912
4550
  ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3913
4551
  : isolate()->builtins()->KeyedStoreIC_Initialize();
3914
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
4552
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3915
4553
  }
3916
4554
 
3917
4555
 
3918
4556
  void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3919
4557
  Register object_reg = ToRegister(instr->object());
3920
- Register new_map_reg = ToRegister(instr->new_map_reg());
3921
4558
  Register scratch = scratch0();
3922
4559
 
3923
4560
  Handle<Map> from_map = instr->original_map();
3924
4561
  Handle<Map> to_map = instr->transitioned_map();
3925
- ElementsKind from_kind = from_map->elements_kind();
3926
- ElementsKind to_kind = to_map->elements_kind();
4562
+ ElementsKind from_kind = instr->from_kind();
4563
+ ElementsKind to_kind = instr->to_kind();
3927
4564
 
3928
4565
  Label not_applicable;
3929
4566
  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3930
4567
  __ cmp(scratch, Operand(from_map));
3931
4568
  __ b(ne, &not_applicable);
3932
- __ mov(new_map_reg, Operand(to_map));
3933
4569
 
3934
4570
  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4571
+ Register new_map_reg = ToRegister(instr->new_map_temp());
4572
+ __ mov(new_map_reg, Operand(to_map));
3935
4573
  __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3936
4574
  // Write barrier.
3937
4575
  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3938
4576
  scratch, kLRHasBeenSaved, kDontSaveFPRegs);
4577
+ } else if (FLAG_compiled_transitions) {
4578
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4579
+ __ Move(r0, object_reg);
4580
+ __ Move(r1, to_map);
4581
+ TransitionElementsKindStub stub(from_kind, to_kind);
4582
+ __ CallStub(&stub);
4583
+ RecordSafepointWithRegisters(
4584
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3939
4585
  } else if (IsFastSmiElementsKind(from_kind) &&
3940
4586
  IsFastDoubleElementsKind(to_kind)) {
3941
- Register fixed_object_reg = ToRegister(instr->temp_reg());
4587
+ Register fixed_object_reg = ToRegister(instr->temp());
3942
4588
  ASSERT(fixed_object_reg.is(r2));
4589
+ Register new_map_reg = ToRegister(instr->new_map_temp());
3943
4590
  ASSERT(new_map_reg.is(r3));
4591
+ __ mov(new_map_reg, Operand(to_map));
3944
4592
  __ mov(fixed_object_reg, object_reg);
3945
4593
  CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3946
4594
  RelocInfo::CODE_TARGET, instr);
3947
4595
  } else if (IsFastDoubleElementsKind(from_kind) &&
3948
4596
  IsFastObjectElementsKind(to_kind)) {
3949
- Register fixed_object_reg = ToRegister(instr->temp_reg());
4597
+ Register fixed_object_reg = ToRegister(instr->temp());
3950
4598
  ASSERT(fixed_object_reg.is(r2));
4599
+ Register new_map_reg = ToRegister(instr->new_map_temp());
3951
4600
  ASSERT(new_map_reg.is(r3));
4601
+ __ mov(new_map_reg, Operand(to_map));
3952
4602
  __ mov(fixed_object_reg, object_reg);
3953
4603
  CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3954
4604
  RelocInfo::CODE_TARGET, instr);
@@ -3959,6 +4609,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3959
4609
  }
3960
4610
 
3961
4611
 
4612
+ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4613
+ Register object = ToRegister(instr->object());
4614
+ Register temp = ToRegister(instr->temp());
4615
+ __ TestJSArrayForAllocationSiteInfo(object, temp);
4616
+ DeoptimizeIf(eq, instr->environment());
4617
+ }
4618
+
4619
+
3962
4620
  void LCodeGen::DoStringAdd(LStringAdd* instr) {
3963
4621
  __ push(ToRegister(instr->left()));
3964
4622
  __ push(ToRegister(instr->right()));
@@ -3979,7 +4637,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3979
4637
  };
3980
4638
 
3981
4639
  DeferredStringCharCodeAt* deferred =
3982
- new DeferredStringCharCodeAt(this, instr);
4640
+ new(zone()) DeferredStringCharCodeAt(this, instr);
3983
4641
 
3984
4642
  StringCharLoadGenerator::Generate(masm(),
3985
4643
  ToRegister(instr->string()),
@@ -3998,7 +4656,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3998
4656
  // TODO(3095996): Get rid of this. For now, we need to make the
3999
4657
  // result register contain a valid pointer because it is already
4000
4658
  // contained in the register pointer map.
4001
- __ mov(result, Operand(0));
4659
+ __ mov(result, Operand::Zero());
4002
4660
 
4003
4661
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4004
4662
  __ push(string);
@@ -4014,9 +4672,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4014
4672
  __ push(index);
4015
4673
  }
4016
4674
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4017
- if (FLAG_debug_code) {
4018
- __ AbortIfNotSmi(r0);
4019
- }
4675
+ __ AssertSmi(r0);
4020
4676
  __ SmiUntag(r0);
4021
4677
  __ StoreToSafepointRegisterSlot(r0, result);
4022
4678
  }
@@ -4034,14 +4690,14 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4034
4690
  };
4035
4691
 
4036
4692
  DeferredStringCharFromCode* deferred =
4037
- new DeferredStringCharFromCode(this, instr);
4693
+ new(zone()) DeferredStringCharFromCode(this, instr);
4038
4694
 
4039
4695
  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4040
4696
  Register char_code = ToRegister(instr->char_code());
4041
4697
  Register result = ToRegister(instr->result());
4042
4698
  ASSERT(!char_code.is(result));
4043
4699
 
4044
- __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
4700
+ __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4045
4701
  __ b(hi, deferred->entry());
4046
4702
  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4047
4703
  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
@@ -4060,7 +4716,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4060
4716
  // TODO(3095996): Get rid of this. For now, we need to make the
4061
4717
  // result register contain a valid pointer because it is already
4062
4718
  // contained in the register pointer map.
4063
- __ mov(result, Operand(0));
4719
+ __ mov(result, Operand::Zero());
4064
4720
 
4065
4721
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4066
4722
  __ SmiTag(char_code);
@@ -4071,14 +4727,15 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4071
4727
 
4072
4728
 
4073
4729
  void LCodeGen::DoStringLength(LStringLength* instr) {
4074
- Register string = ToRegister(instr->InputAt(0));
4730
+ Register string = ToRegister(instr->string());
4075
4731
  Register result = ToRegister(instr->result());
4076
4732
  __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
4077
4733
  }
4078
4734
 
4079
4735
 
4080
4736
  void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4081
- LOperand* input = instr->InputAt(0);
4737
+ CpuFeatures::Scope scope(VFP2);
4738
+ LOperand* input = instr->value();
4082
4739
  ASSERT(input->IsRegister() || input->IsStackSlot());
4083
4740
  LOperand* output = instr->result();
4084
4741
  ASSERT(output->IsDoubleRegister());
@@ -4094,50 +4751,160 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4094
4751
  }
4095
4752
 
4096
4753
 
4754
+ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4755
+ CpuFeatures::Scope scope(VFP2);
4756
+ LOperand* input = instr->value();
4757
+ LOperand* output = instr->result();
4758
+
4759
+ SwVfpRegister flt_scratch = double_scratch0().low();
4760
+ __ vmov(flt_scratch, ToRegister(input));
4761
+ __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4762
+ }
4763
+
4764
+
4097
4765
  void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4098
4766
  class DeferredNumberTagI: public LDeferredCode {
4099
4767
  public:
4100
4768
  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4101
4769
  : LDeferredCode(codegen), instr_(instr) { }
4102
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
4770
+ virtual void Generate() {
4771
+ codegen()->DoDeferredNumberTagI(instr_,
4772
+ instr_->value(),
4773
+ SIGNED_INT32);
4774
+ }
4103
4775
  virtual LInstruction* instr() { return instr_; }
4104
4776
  private:
4105
4777
  LNumberTagI* instr_;
4106
4778
  };
4107
4779
 
4108
- Register src = ToRegister(instr->InputAt(0));
4780
+ Register src = ToRegister(instr->value());
4109
4781
  Register dst = ToRegister(instr->result());
4110
4782
 
4111
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
4783
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4112
4784
  __ SmiTag(dst, src, SetCC);
4113
4785
  __ b(vs, deferred->entry());
4114
4786
  __ bind(deferred->exit());
4115
4787
  }
4116
4788
 
4117
4789
 
4118
- void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
4790
+ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4791
+ class DeferredNumberTagU: public LDeferredCode {
4792
+ public:
4793
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4794
+ : LDeferredCode(codegen), instr_(instr) { }
4795
+ virtual void Generate() {
4796
+ codegen()->DoDeferredNumberTagI(instr_,
4797
+ instr_->value(),
4798
+ UNSIGNED_INT32);
4799
+ }
4800
+ virtual LInstruction* instr() { return instr_; }
4801
+ private:
4802
+ LNumberTagU* instr_;
4803
+ };
4804
+
4805
+ LOperand* input = instr->value();
4806
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
4807
+ Register reg = ToRegister(input);
4808
+
4809
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4810
+ __ cmp(reg, Operand(Smi::kMaxValue));
4811
+ __ b(hi, deferred->entry());
4812
+ __ SmiTag(reg, reg);
4813
+ __ bind(deferred->exit());
4814
+ }
4815
+
4816
+
4817
+ // Convert unsigned integer with specified number of leading zeroes in binary
4818
+ // representation to IEEE 754 double.
4819
+ // Integer to convert is passed in register hiword.
4820
+ // Resulting double is returned in registers hiword:loword.
4821
+ // This functions does not work correctly for 0.
4822
+ static void GenerateUInt2Double(MacroAssembler* masm,
4823
+ Register hiword,
4824
+ Register loword,
4825
+ Register scratch,
4826
+ int leading_zeroes) {
4827
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
4828
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
4829
+
4830
+ const int mantissa_shift_for_hi_word =
4831
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
4832
+ const int mantissa_shift_for_lo_word =
4833
+ kBitsPerInt - mantissa_shift_for_hi_word;
4834
+ masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
4835
+ if (mantissa_shift_for_hi_word > 0) {
4836
+ masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
4837
+ masm->orr(hiword, scratch,
4838
+ Operand(hiword, LSR, mantissa_shift_for_hi_word));
4839
+ } else {
4840
+ masm->mov(loword, Operand::Zero());
4841
+ masm->orr(hiword, scratch,
4842
+ Operand(hiword, LSL, -mantissa_shift_for_hi_word));
4843
+ }
4844
+
4845
+ // If least significant bit of biased exponent was not 1 it was corrupted
4846
+ // by most significant bit of mantissa so we should fix that.
4847
+ if (!(biased_exponent & 1)) {
4848
+ masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
4849
+ }
4850
+ }
4851
+
4852
+
4853
+ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4854
+ LOperand* value,
4855
+ IntegerSignedness signedness) {
4119
4856
  Label slow;
4120
- Register src = ToRegister(instr->InputAt(0));
4857
+ Register src = ToRegister(value);
4121
4858
  Register dst = ToRegister(instr->result());
4122
- DoubleRegister dbl_scratch = double_scratch0();
4859
+ DwVfpRegister dbl_scratch = double_scratch0();
4123
4860
  SwVfpRegister flt_scratch = dbl_scratch.low();
4124
4861
 
4125
4862
  // Preserve the value of all registers.
4126
4863
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4127
4864
 
4128
- // There was overflow, so bits 30 and 31 of the original integer
4129
- // disagree. Try to allocate a heap number in new space and store
4130
- // the value in there. If that fails, call the runtime system.
4131
4865
  Label done;
4132
- if (dst.is(src)) {
4133
- __ SmiUntag(src, dst);
4134
- __ eor(src, src, Operand(0x80000000));
4866
+ if (signedness == SIGNED_INT32) {
4867
+ // There was overflow, so bits 30 and 31 of the original integer
4868
+ // disagree. Try to allocate a heap number in new space and store
4869
+ // the value in there. If that fails, call the runtime system.
4870
+ if (dst.is(src)) {
4871
+ __ SmiUntag(src, dst);
4872
+ __ eor(src, src, Operand(0x80000000));
4873
+ }
4874
+ if (CpuFeatures::IsSupported(VFP2)) {
4875
+ CpuFeatures::Scope scope(VFP2);
4876
+ __ vmov(flt_scratch, src);
4877
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4878
+ } else {
4879
+ FloatingPointHelper::Destination dest =
4880
+ FloatingPointHelper::kCoreRegisters;
4881
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
4882
+ sfpd_lo, sfpd_hi,
4883
+ scratch0(), s0);
4884
+ }
4885
+ } else {
4886
+ if (CpuFeatures::IsSupported(VFP2)) {
4887
+ CpuFeatures::Scope scope(VFP2);
4888
+ __ vmov(flt_scratch, src);
4889
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
4890
+ } else {
4891
+ Label no_leading_zero, done;
4892
+ __ tst(src, Operand(0x80000000));
4893
+ __ b(ne, &no_leading_zero);
4894
+
4895
+ // Integer has one leading zeros.
4896
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
4897
+ __ b(&done);
4898
+
4899
+ __ bind(&no_leading_zero);
4900
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
4901
+ __ b(&done);
4902
+ }
4135
4903
  }
4136
- __ vmov(flt_scratch, src);
4137
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4904
+
4138
4905
  if (FLAG_inline_new) {
4139
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4140
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
4906
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4907
+ __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
4141
4908
  __ Move(dst, r5);
4142
4909
  __ b(&done);
4143
4910
  }
@@ -4148,16 +4915,23 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
4148
4915
  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4149
4916
  // register is stored, as this register is in the pointer map, but contains an
4150
4917
  // integer value.
4151
- __ mov(ip, Operand(0));
4918
+ __ mov(ip, Operand::Zero());
4152
4919
  __ StoreToSafepointRegisterSlot(ip, dst);
4153
4920
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4154
4921
  __ Move(dst, r0);
4922
+ __ sub(dst, dst, Operand(kHeapObjectTag));
4155
4923
 
4156
4924
  // Done. Put the value in dbl_scratch into the value of the allocated heap
4157
4925
  // number.
4158
4926
  __ bind(&done);
4159
- __ sub(ip, dst, Operand(kHeapObjectTag));
4160
- __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
4927
+ if (CpuFeatures::IsSupported(VFP2)) {
4928
+ CpuFeatures::Scope scope(VFP2);
4929
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4930
+ } else {
4931
+ __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
4932
+ __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
4933
+ }
4934
+ __ add(dst, dst, Operand(kHeapObjectTag));
4161
4935
  __ StoreToSafepointRegisterSlot(dst, dst);
4162
4936
  }
4163
4937
 
@@ -4173,22 +4947,84 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4173
4947
  LNumberTagD* instr_;
4174
4948
  };
4175
4949
 
4176
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
4950
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4177
4951
  Register scratch = scratch0();
4178
4952
  Register reg = ToRegister(instr->result());
4179
- Register temp1 = ToRegister(instr->TempAt(0));
4180
- Register temp2 = ToRegister(instr->TempAt(1));
4953
+ Register temp1 = ToRegister(instr->temp());
4954
+ Register temp2 = ToRegister(instr->temp2());
4955
+
4956
+ bool convert_hole = false;
4957
+ HValue* change_input = instr->hydrogen()->value();
4958
+ if (change_input->IsLoadKeyed()) {
4959
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
4960
+ convert_hole = load->UsesMustHandleHole();
4961
+ }
4181
4962
 
4182
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
4963
+ Label no_special_nan_handling;
4964
+ Label done;
4965
+ if (convert_hole) {
4966
+ if (CpuFeatures::IsSupported(VFP2)) {
4967
+ CpuFeatures::Scope scope(VFP2);
4968
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4969
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
4970
+ __ b(vc, &no_special_nan_handling);
4971
+ __ vmov(reg, scratch0(), input_reg);
4972
+ __ cmp(scratch0(), Operand(kHoleNanUpper32));
4973
+ Label canonicalize;
4974
+ __ b(ne, &canonicalize);
4975
+ __ Move(reg, factory()->the_hole_value());
4976
+ __ b(&done);
4977
+ __ bind(&canonicalize);
4978
+ __ Vmov(input_reg,
4979
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
4980
+ no_reg);
4981
+ } else {
4982
+ Label not_hole;
4983
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
4984
+ __ b(ne, &not_hole);
4985
+ __ Move(reg, factory()->the_hole_value());
4986
+ __ b(&done);
4987
+ __ bind(&not_hole);
4988
+ __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
4989
+ __ cmp(scratch, Operand(0x7ff00000));
4990
+ __ b(ne, &no_special_nan_handling);
4991
+ Label special_nan_handling;
4992
+ __ tst(sfpd_hi, Operand(0x000FFFFF));
4993
+ __ b(ne, &special_nan_handling);
4994
+ __ cmp(sfpd_lo, Operand(0));
4995
+ __ b(eq, &no_special_nan_handling);
4996
+ __ bind(&special_nan_handling);
4997
+ double canonical_nan =
4998
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double();
4999
+ uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
5000
+ __ mov(sfpd_lo,
5001
+ Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
5002
+ __ mov(sfpd_hi,
5003
+ Operand(static_cast<uint32_t>(casted_nan >> 32)));
5004
+ }
5005
+ }
5006
+
5007
+ __ bind(&no_special_nan_handling);
5008
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4183
5009
  if (FLAG_inline_new) {
4184
5010
  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4185
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
5011
+ // We want the untagged address first for performance
5012
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
5013
+ DONT_TAG_RESULT);
4186
5014
  } else {
4187
5015
  __ jmp(deferred->entry());
4188
5016
  }
4189
5017
  __ bind(deferred->exit());
4190
- __ sub(ip, reg, Operand(kHeapObjectTag));
4191
- __ vstr(input_reg, ip, HeapNumber::kValueOffset);
5018
+ if (CpuFeatures::IsSupported(VFP2)) {
5019
+ CpuFeatures::Scope scope(VFP2);
5020
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
5021
+ } else {
5022
+ __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
5023
+ __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
5024
+ }
5025
+ // Now that we have finished with the object's real address tag it
5026
+ __ add(reg, reg, Operand(kHeapObjectTag));
5027
+ __ bind(&done);
4192
5028
  }
4193
5029
 
4194
5030
 
@@ -4197,22 +5033,23 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4197
5033
  // result register contain a valid pointer because it is already
4198
5034
  // contained in the register pointer map.
4199
5035
  Register reg = ToRegister(instr->result());
4200
- __ mov(reg, Operand(0));
5036
+ __ mov(reg, Operand::Zero());
4201
5037
 
4202
5038
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4203
5039
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
5040
+ __ sub(r0, r0, Operand(kHeapObjectTag));
4204
5041
  __ StoreToSafepointRegisterSlot(r0, reg);
4205
5042
  }
4206
5043
 
4207
5044
 
4208
5045
  void LCodeGen::DoSmiTag(LSmiTag* instr) {
4209
5046
  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4210
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
5047
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4211
5048
  }
4212
5049
 
4213
5050
 
4214
5051
  void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4215
- Register input = ToRegister(instr->InputAt(0));
5052
+ Register input = ToRegister(instr->value());
4216
5053
  Register result = ToRegister(instr->result());
4217
5054
  if (instr->needs_check()) {
4218
5055
  STATIC_ASSERT(kHeapObjectTag == 1);
@@ -4226,53 +5063,69 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4226
5063
 
4227
5064
 
4228
5065
  void LCodeGen::EmitNumberUntagD(Register input_reg,
4229
- DoubleRegister result_reg,
5066
+ DwVfpRegister result_reg,
4230
5067
  bool deoptimize_on_undefined,
4231
5068
  bool deoptimize_on_minus_zero,
4232
- LEnvironment* env) {
5069
+ LEnvironment* env,
5070
+ NumberUntagDMode mode) {
4233
5071
  Register scratch = scratch0();
4234
5072
  SwVfpRegister flt_scratch = double_scratch0().low();
4235
5073
  ASSERT(!result_reg.is(double_scratch0()));
5074
+ CpuFeatures::Scope scope(VFP2);
4236
5075
 
4237
5076
  Label load_smi, heap_number, done;
4238
5077
 
4239
- // Smi check.
4240
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5078
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5079
+ // Smi check.
5080
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4241
5081
 
4242
- // Heap number map check.
4243
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4244
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4245
- __ cmp(scratch, Operand(ip));
4246
- if (deoptimize_on_undefined) {
4247
- DeoptimizeIf(ne, env);
4248
- } else {
4249
- Label heap_number;
4250
- __ b(eq, &heap_number);
5082
+ // Heap number map check.
5083
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5084
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5085
+ __ cmp(scratch, Operand(ip));
5086
+ if (deoptimize_on_undefined) {
5087
+ DeoptimizeIf(ne, env);
5088
+ } else {
5089
+ Label heap_number;
5090
+ __ b(eq, &heap_number);
4251
5091
 
4252
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4253
- __ cmp(input_reg, Operand(ip));
4254
- DeoptimizeIf(ne, env);
5092
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5093
+ __ cmp(input_reg, Operand(ip));
5094
+ DeoptimizeIf(ne, env);
5095
+
5096
+ // Convert undefined to NaN.
5097
+ __ LoadRoot(ip, Heap::kNanValueRootIndex);
5098
+ __ sub(ip, ip, Operand(kHeapObjectTag));
5099
+ __ vldr(result_reg, ip, HeapNumber::kValueOffset);
5100
+ __ jmp(&done);
4255
5101
 
4256
- // Convert undefined to NaN.
4257
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
4258
- __ sub(ip, ip, Operand(kHeapObjectTag));
5102
+ __ bind(&heap_number);
5103
+ }
5104
+ // Heap number to double register conversion.
5105
+ __ sub(ip, input_reg, Operand(kHeapObjectTag));
4259
5106
  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
5107
+ if (deoptimize_on_minus_zero) {
5108
+ __ vmov(ip, result_reg.low());
5109
+ __ cmp(ip, Operand::Zero());
5110
+ __ b(ne, &done);
5111
+ __ vmov(ip, result_reg.high());
5112
+ __ cmp(ip, Operand(HeapNumber::kSignMask));
5113
+ DeoptimizeIf(eq, env);
5114
+ }
4260
5115
  __ jmp(&done);
4261
-
4262
- __ bind(&heap_number);
4263
- }
4264
- // Heap number to double register conversion.
4265
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
4266
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4267
- if (deoptimize_on_minus_zero) {
4268
- __ vmov(ip, result_reg.low());
4269
- __ cmp(ip, Operand(0));
4270
- __ b(ne, &done);
4271
- __ vmov(ip, result_reg.high());
4272
- __ cmp(ip, Operand(HeapNumber::kSignMask));
4273
- DeoptimizeIf(eq, env);
5116
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
5117
+ __ SmiUntag(scratch, input_reg, SetCC);
5118
+ DeoptimizeIf(cs, env);
5119
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
5120
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5121
+ __ Vmov(result_reg,
5122
+ FixedDoubleArray::hole_nan_as_double(),
5123
+ no_reg);
5124
+ __ b(&done);
5125
+ } else {
5126
+ __ SmiUntag(scratch, input_reg);
5127
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4274
5128
  }
4275
- __ jmp(&done);
4276
5129
 
4277
5130
  // Smi to double register conversion
4278
5131
  __ bind(&load_smi);
@@ -4284,11 +5137,11 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
4284
5137
 
4285
5138
 
4286
5139
  void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4287
- Register input_reg = ToRegister(instr->InputAt(0));
5140
+ Register input_reg = ToRegister(instr->value());
4288
5141
  Register scratch1 = scratch0();
4289
- Register scratch2 = ToRegister(instr->TempAt(0));
5142
+ Register scratch2 = ToRegister(instr->temp());
4290
5143
  DwVfpRegister double_scratch = double_scratch0();
4291
- SwVfpRegister single_scratch = double_scratch.low();
5144
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4292
5145
 
4293
5146
  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4294
5147
  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4307,8 +5160,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4307
5160
  __ cmp(scratch1, Operand(ip));
4308
5161
 
4309
5162
  if (instr->truncating()) {
4310
- Register scratch3 = ToRegister(instr->TempAt(1));
4311
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
5163
+ CpuFeatures::Scope scope(VFP2);
5164
+ Register scratch3 = ToRegister(instr->temp2());
4312
5165
  ASSERT(!scratch3.is(input_reg) &&
4313
5166
  !scratch3.is(scratch1) &&
4314
5167
  !scratch3.is(scratch2));
@@ -4321,7 +5174,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4321
5174
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4322
5175
  __ cmp(input_reg, Operand(ip));
4323
5176
  DeoptimizeIf(ne, instr->environment());
4324
- __ mov(input_reg, Operand(0));
5177
+ __ mov(input_reg, Operand::Zero());
4325
5178
  __ b(&done);
4326
5179
 
4327
5180
  __ bind(&heap_number);
@@ -4330,7 +5183,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4330
5183
 
4331
5184
  __ EmitECMATruncate(input_reg,
4332
5185
  double_scratch2,
4333
- single_scratch,
5186
+ double_scratch,
4334
5187
  scratch1,
4335
5188
  scratch2,
4336
5189
  scratch3);
@@ -4343,17 +5196,15 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4343
5196
  __ sub(ip, input_reg, Operand(kHeapObjectTag));
4344
5197
  __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
4345
5198
  __ EmitVFPTruncate(kRoundToZero,
4346
- single_scratch,
5199
+ input_reg,
4347
5200
  double_scratch,
4348
5201
  scratch1,
4349
- scratch2,
5202
+ double_scratch2,
4350
5203
  kCheckForInexactConversion);
4351
5204
  DeoptimizeIf(ne, instr->environment());
4352
- // Load the result.
4353
- __ vmov(input_reg, single_scratch);
4354
5205
 
4355
5206
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4356
- __ cmp(input_reg, Operand(0));
5207
+ __ cmp(input_reg, Operand::Zero());
4357
5208
  __ b(ne, &done);
4358
5209
  __ vmov(scratch1, double_scratch.high());
4359
5210
  __ tst(scratch1, Operand(HeapNumber::kSignMask));
@@ -4375,13 +5226,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4375
5226
  LTaggedToI* instr_;
4376
5227
  };
4377
5228
 
4378
- LOperand* input = instr->InputAt(0);
5229
+ LOperand* input = instr->value();
4379
5230
  ASSERT(input->IsRegister());
4380
5231
  ASSERT(input->Equals(instr->result()));
4381
5232
 
4382
5233
  Register input_reg = ToRegister(input);
4383
5234
 
4384
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
5235
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4385
5236
 
4386
5237
  // Optimistically untag the input.
4387
5238
  // If the input is a HeapObject, SmiUntag will set the carry flag.
@@ -4394,72 +5245,88 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4394
5245
 
4395
5246
 
4396
5247
  void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4397
- LOperand* input = instr->InputAt(0);
5248
+ LOperand* input = instr->value();
4398
5249
  ASSERT(input->IsRegister());
4399
5250
  LOperand* result = instr->result();
4400
5251
  ASSERT(result->IsDoubleRegister());
4401
5252
 
4402
5253
  Register input_reg = ToRegister(input);
4403
- DoubleRegister result_reg = ToDoubleRegister(result);
5254
+ DwVfpRegister result_reg = ToDoubleRegister(result);
5255
+
5256
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
5257
+ HValue* value = instr->hydrogen()->value();
5258
+ if (value->type().IsSmi()) {
5259
+ if (value->IsLoadKeyed()) {
5260
+ HLoadKeyed* load = HLoadKeyed::cast(value);
5261
+ if (load->UsesMustHandleHole()) {
5262
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
5263
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
5264
+ } else {
5265
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
5266
+ }
5267
+ } else {
5268
+ mode = NUMBER_CANDIDATE_IS_SMI;
5269
+ }
5270
+ }
5271
+ }
4404
5272
 
4405
5273
  EmitNumberUntagD(input_reg, result_reg,
4406
5274
  instr->hydrogen()->deoptimize_on_undefined(),
4407
5275
  instr->hydrogen()->deoptimize_on_minus_zero(),
4408
- instr->environment());
5276
+ instr->environment(),
5277
+ mode);
4409
5278
  }
4410
5279
 
4411
5280
 
4412
5281
  void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4413
5282
  Register result_reg = ToRegister(instr->result());
4414
5283
  Register scratch1 = scratch0();
4415
- Register scratch2 = ToRegister(instr->TempAt(0));
4416
- DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
4417
- SwVfpRegister single_scratch = double_scratch0().low();
5284
+ Register scratch2 = ToRegister(instr->temp());
5285
+ DwVfpRegister double_input = ToDoubleRegister(instr->value());
5286
+ DwVfpRegister double_scratch = double_scratch0();
4418
5287
 
4419
5288
  Label done;
4420
5289
 
4421
5290
  if (instr->truncating()) {
4422
- Register scratch3 = ToRegister(instr->TempAt(1));
5291
+ Register scratch3 = ToRegister(instr->temp2());
4423
5292
  __ EmitECMATruncate(result_reg,
4424
5293
  double_input,
4425
- single_scratch,
5294
+ double_scratch,
4426
5295
  scratch1,
4427
5296
  scratch2,
4428
5297
  scratch3);
4429
5298
  } else {
4430
- VFPRoundingMode rounding_mode = kRoundToMinusInf;
4431
- __ EmitVFPTruncate(rounding_mode,
4432
- single_scratch,
5299
+ __ EmitVFPTruncate(kRoundToMinusInf,
5300
+ result_reg,
4433
5301
  double_input,
4434
5302
  scratch1,
4435
- scratch2,
5303
+ double_scratch,
4436
5304
  kCheckForInexactConversion);
5305
+
4437
5306
  // Deoptimize if we had a vfp invalid exception,
4438
5307
  // including inexact operation.
4439
5308
  DeoptimizeIf(ne, instr->environment());
4440
- // Retrieve the result.
4441
- __ vmov(result_reg, single_scratch);
4442
5309
  }
4443
5310
  __ bind(&done);
4444
5311
  }
4445
5312
 
4446
5313
 
4447
5314
  void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4448
- LOperand* input = instr->InputAt(0);
5315
+ LOperand* input = instr->value();
4449
5316
  __ tst(ToRegister(input), Operand(kSmiTagMask));
4450
5317
  DeoptimizeIf(ne, instr->environment());
4451
5318
  }
4452
5319
 
4453
5320
 
4454
5321
  void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4455
- LOperand* input = instr->InputAt(0);
5322
+ LOperand* input = instr->value();
4456
5323
  __ tst(ToRegister(input), Operand(kSmiTagMask));
4457
5324
  DeoptimizeIf(eq, instr->environment());
4458
5325
  }
4459
5326
 
4460
5327
 
4461
5328
  void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4462
- Register input = ToRegister(instr->InputAt(0));
5329
+ Register input = ToRegister(instr->value());
4463
5330
  Register scratch = scratch0();
4464
5331
 
4465
5332
  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -4518,46 +5385,48 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4518
5385
  }
4519
5386
 
4520
5387
 
4521
- void LCodeGen::DoCheckMapCommon(Register reg,
4522
- Register scratch,
5388
+ void LCodeGen::DoCheckMapCommon(Register map_reg,
4523
5389
  Handle<Map> map,
4524
5390
  CompareMapMode mode,
4525
5391
  LEnvironment* env) {
4526
5392
  Label success;
4527
- __ CompareMap(reg, scratch, map, &success, mode);
5393
+ __ CompareMap(map_reg, map, &success, mode);
4528
5394
  DeoptimizeIf(ne, env);
4529
5395
  __ bind(&success);
4530
5396
  }
4531
5397
 
4532
5398
 
4533
5399
  void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4534
- Register scratch = scratch0();
4535
- LOperand* input = instr->InputAt(0);
5400
+ Register map_reg = scratch0();
5401
+ LOperand* input = instr->value();
4536
5402
  ASSERT(input->IsRegister());
4537
5403
  Register reg = ToRegister(input);
4538
5404
 
4539
5405
  Label success;
4540
5406
  SmallMapList* map_set = instr->hydrogen()->map_set();
5407
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4541
5408
  for (int i = 0; i < map_set->length() - 1; i++) {
4542
5409
  Handle<Map> map = map_set->at(i);
4543
- __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
5410
+ __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP);
4544
5411
  __ b(eq, &success);
4545
5412
  }
4546
5413
  Handle<Map> map = map_set->last();
4547
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
5414
+ DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
4548
5415
  __ bind(&success);
4549
5416
  }
4550
5417
 
4551
5418
 
4552
5419
  void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4553
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5420
+ CpuFeatures::Scope vfp_scope(VFP2);
5421
+ DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
4554
5422
  Register result_reg = ToRegister(instr->result());
4555
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
5423
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
4556
5424
  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4557
5425
  }
4558
5426
 
4559
5427
 
4560
5428
  void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5429
+ CpuFeatures::Scope scope(VFP2);
4561
5430
  Register unclamped_reg = ToRegister(instr->unclamped());
4562
5431
  Register result_reg = ToRegister(instr->result());
4563
5432
  __ ClampUint8(result_reg, unclamped_reg);
@@ -4565,10 +5434,11 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4565
5434
 
4566
5435
 
4567
5436
  void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5437
+ CpuFeatures::Scope scope(VFP2);
4568
5438
  Register scratch = scratch0();
4569
5439
  Register input_reg = ToRegister(instr->unclamped());
4570
5440
  Register result_reg = ToRegister(instr->result());
4571
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
5441
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
4572
5442
  Label is_smi, done, heap_number;
4573
5443
 
4574
5444
  // Both smi and heap number cases are handled.
@@ -4583,7 +5453,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4583
5453
  // conversions.
4584
5454
  __ cmp(input_reg, Operand(factory()->undefined_value()));
4585
5455
  DeoptimizeIf(ne, instr->environment());
4586
- __ mov(result_reg, Operand(0));
5456
+ __ mov(result_reg, Operand::Zero());
4587
5457
  __ jmp(&done);
4588
5458
 
4589
5459
  // Heap number
@@ -4602,31 +5472,23 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4602
5472
 
4603
5473
 
4604
5474
  void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4605
- Register temp1 = ToRegister(instr->TempAt(0));
4606
- Register temp2 = ToRegister(instr->TempAt(1));
4607
-
4608
- Handle<JSObject> holder = instr->holder();
4609
- Handle<JSObject> current_prototype = instr->prototype();
4610
-
4611
- // Load prototype object.
4612
- __ LoadHeapObject(temp1, current_prototype);
4613
-
4614
- // Check prototype maps up to the holder.
4615
- while (!current_prototype.is_identical_to(holder)) {
4616
- DoCheckMapCommon(temp1, temp2,
4617
- Handle<Map>(current_prototype->map()),
4618
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4619
- current_prototype =
4620
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4621
- // Load next prototype object.
4622
- __ LoadHeapObject(temp1, current_prototype);
4623
- }
4624
-
4625
- // Check the holder map.
4626
- DoCheckMapCommon(temp1, temp2,
4627
- Handle<Map>(current_prototype->map()),
4628
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4629
- DeoptimizeIf(ne, instr->environment());
5475
+ ASSERT(instr->temp()->Equals(instr->result()));
5476
+ Register prototype_reg = ToRegister(instr->temp());
5477
+ Register map_reg = ToRegister(instr->temp2());
5478
+
5479
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
5480
+ ZoneList<Handle<Map> >* maps = instr->maps();
5481
+
5482
+ ASSERT(prototypes->length() == maps->length());
5483
+
5484
+ for (int i = 0; i < prototypes->length(); i++) {
5485
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
5486
+ __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
5487
+ DoCheckMapCommon(map_reg,
5488
+ maps->at(i),
5489
+ ALLOW_ELEMENT_TRANSITION_MAPS,
5490
+ instr->environment());
5491
+ }
4630
5492
  }
4631
5493
 
4632
5494
 
@@ -4641,11 +5503,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4641
5503
  LAllocateObject* instr_;
4642
5504
  };
4643
5505
 
4644
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
5506
+ DeferredAllocateObject* deferred =
5507
+ new(zone()) DeferredAllocateObject(this, instr);
4645
5508
 
4646
5509
  Register result = ToRegister(instr->result());
4647
- Register scratch = ToRegister(instr->TempAt(0));
4648
- Register scratch2 = ToRegister(instr->TempAt(1));
5510
+ Register scratch = ToRegister(instr->temp());
5511
+ Register scratch2 = ToRegister(instr->temp2());
4649
5512
  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4650
5513
  Handle<Map> initial_map(constructor->initial_map());
4651
5514
  int instance_size = initial_map->instance_size();
@@ -4702,7 +5565,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4702
5565
  // TODO(3095996): Get rid of this. For now, we need to make the
4703
5566
  // result register contain a valid pointer because it is already
4704
5567
  // contained in the register pointer map.
4705
- __ mov(result, Operand(0));
5568
+ __ mov(result, Operand::Zero());
4706
5569
 
4707
5570
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4708
5571
  __ mov(r0, Operand(Smi::FromInt(instance_size)));
@@ -4712,10 +5575,69 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4712
5575
  }
4713
5576
 
4714
5577
 
5578
+ void LCodeGen::DoAllocate(LAllocate* instr) {
5579
+ class DeferredAllocate: public LDeferredCode {
5580
+ public:
5581
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5582
+ : LDeferredCode(codegen), instr_(instr) { }
5583
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
5584
+ virtual LInstruction* instr() { return instr_; }
5585
+ private:
5586
+ LAllocate* instr_;
5587
+ };
5588
+
5589
+ DeferredAllocate* deferred =
5590
+ new(zone()) DeferredAllocate(this, instr);
5591
+
5592
+ Register size = ToRegister(instr->size());
5593
+ Register result = ToRegister(instr->result());
5594
+ Register scratch = ToRegister(instr->temp1());
5595
+ Register scratch2 = ToRegister(instr->temp2());
5596
+
5597
+ HAllocate* original_instr = instr->hydrogen();
5598
+ if (original_instr->size()->IsConstant()) {
5599
+ UNREACHABLE();
5600
+ } else {
5601
+ // Allocate memory for the object.
5602
+ AllocationFlags flags = TAG_OBJECT;
5603
+ if (original_instr->MustAllocateDoubleAligned()) {
5604
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5605
+ }
5606
+ __ AllocateInNewSpace(size,
5607
+ result,
5608
+ scratch,
5609
+ scratch2,
5610
+ deferred->entry(),
5611
+ TAG_OBJECT);
5612
+ }
5613
+
5614
+ __ bind(deferred->exit());
5615
+ }
5616
+
5617
+
5618
+ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5619
+ Register size = ToRegister(instr->size());
5620
+ Register result = ToRegister(instr->result());
5621
+
5622
+ // TODO(3095996): Get rid of this. For now, we need to make the
5623
+ // result register contain a valid pointer because it is already
5624
+ // contained in the register pointer map.
5625
+ __ mov(result, Operand(Smi::FromInt(0)));
5626
+
5627
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5628
+ __ SmiTag(size, size);
5629
+ __ push(size);
5630
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5631
+ __ StoreToSafepointRegisterSlot(r0, result);
5632
+ }
5633
+
5634
+
4715
5635
  void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4716
- Heap* heap = isolate()->heap();
5636
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
4717
5637
  ElementsKind boilerplate_elements_kind =
4718
5638
  instr->hydrogen()->boilerplate_elements_kind();
5639
+ AllocationSiteMode allocation_site_mode =
5640
+ instr->hydrogen()->allocation_site_mode();
4719
5641
 
4720
5642
  // Deopt if the array literal boilerplate ElementsKind is of a type different
4721
5643
  // than the expected one. The check isn't necessary if the boilerplate has
@@ -4733,12 +5655,12 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4733
5655
  DeoptimizeIf(ne, instr->environment());
4734
5656
  }
4735
5657
 
4736
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4737
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
5658
+ // Set up the parameters to the stub/runtime call.
5659
+ __ LoadHeapObject(r3, literals);
4738
5660
  __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4739
5661
  // Boilerplate already exists, constant elements are never accessed.
4740
5662
  // Pass an empty fixed array.
4741
- __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
5663
+ __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
4742
5664
  __ Push(r3, r2, r1);
4743
5665
 
4744
5666
  // Pick the right runtime function or stub to call.
@@ -4747,7 +5669,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4747
5669
  ASSERT(instr->hydrogen()->depth() == 1);
4748
5670
  FastCloneShallowArrayStub::Mode mode =
4749
5671
  FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4750
- FastCloneShallowArrayStub stub(mode, length);
5672
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
4751
5673
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4752
5674
  } else if (instr->hydrogen()->depth() > 1) {
4753
5675
  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
@@ -4756,9 +5678,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4756
5678
  } else {
4757
5679
  FastCloneShallowArrayStub::Mode mode =
4758
5680
  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4759
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4760
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4761
- FastCloneShallowArrayStub stub(mode, length);
5681
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
5682
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
5683
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
4762
5684
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4763
5685
  }
4764
5686
  }
@@ -4767,10 +5689,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4767
5689
  void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4768
5690
  Register result,
4769
5691
  Register source,
4770
- int* offset) {
5692
+ int* offset,
5693
+ AllocationSiteMode mode) {
4771
5694
  ASSERT(!source.is(r2));
4772
5695
  ASSERT(!result.is(r2));
4773
5696
 
5697
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
5698
+ object->map()->CanTrackAllocationSite();
5699
+
4774
5700
  // Only elements backing stores for non-COW arrays need to be copied.
4775
5701
  Handle<FixedArrayBase> elements(object->elements());
4776
5702
  bool has_elements = elements->length() > 0 &&
@@ -4780,8 +5706,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4780
5706
  // this object and its backing store.
4781
5707
  int object_offset = *offset;
4782
5708
  int object_size = object->map()->instance_size();
4783
- int elements_offset = *offset + object_size;
4784
5709
  int elements_size = has_elements ? elements->Size() : 0;
5710
+ int elements_offset = *offset + object_size;
5711
+ if (create_allocation_site_info) {
5712
+ elements_offset += AllocationSiteInfo::kSize;
5713
+ *offset += AllocationSiteInfo::kSize;
5714
+ }
5715
+
4785
5716
  *offset += object_size + elements_size;
4786
5717
 
4787
5718
  // Copy object header.
@@ -4806,7 +5737,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4806
5737
  __ add(r2, result, Operand(*offset));
4807
5738
  __ str(r2, FieldMemOperand(result, total_offset));
4808
5739
  __ LoadHeapObject(source, value_object);
4809
- EmitDeepCopy(value_object, result, source, offset);
5740
+ EmitDeepCopy(value_object, result, source, offset,
5741
+ DONT_TRACK_ALLOCATION_SITE);
4810
5742
  } else if (value->IsHeapObject()) {
4811
5743
  __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4812
5744
  __ str(r2, FieldMemOperand(result, total_offset));
@@ -4816,6 +5748,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4816
5748
  }
4817
5749
  }
4818
5750
 
5751
+ // Build Allocation Site Info if desired
5752
+ if (create_allocation_site_info) {
5753
+ __ mov(r2, Operand(Handle<Map>(isolate()->heap()->
5754
+ allocation_site_info_map())));
5755
+ __ str(r2, FieldMemOperand(result, object_size));
5756
+ __ str(source, FieldMemOperand(result, object_size + kPointerSize));
5757
+ }
5758
+
4819
5759
  if (has_elements) {
4820
5760
  // Copy elements backing store header.
4821
5761
  __ LoadHeapObject(source, elements);
@@ -4832,8 +5772,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4832
5772
  for (int i = 0; i < elements_length; i++) {
4833
5773
  int64_t value = double_array->get_representation(i);
4834
5774
  // We only support little endian mode...
4835
- int32_t value_low = value & 0xFFFFFFFF;
4836
- int32_t value_high = value >> 32;
5775
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
5776
+ int32_t value_high = static_cast<int32_t>(value >> 32);
4837
5777
  int total_offset =
4838
5778
  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4839
5779
  __ mov(r2, Operand(value_low));
@@ -4851,7 +5791,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4851
5791
  __ add(r2, result, Operand(*offset));
4852
5792
  __ str(r2, FieldMemOperand(result, total_offset));
4853
5793
  __ LoadHeapObject(source, value_object);
4854
- EmitDeepCopy(value_object, result, source, offset);
5794
+ EmitDeepCopy(value_object, result, source, offset,
5795
+ DONT_TRACK_ALLOCATION_SITE);
4855
5796
  } else if (value->IsHeapObject()) {
4856
5797
  __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4857
5798
  __ str(r2, FieldMemOperand(result, total_offset));
@@ -4902,7 +5843,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4902
5843
  __ bind(&allocated);
4903
5844
  int offset = 0;
4904
5845
  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
4905
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
5846
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset,
5847
+ instr->hydrogen()->allocation_site_mode());
4906
5848
  ASSERT_EQ(size, offset);
4907
5849
  }
4908
5850
 
@@ -4937,7 +5879,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4937
5879
 
4938
5880
 
4939
5881
  void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4940
- ASSERT(ToRegister(instr->InputAt(0)).is(r0));
5882
+ ASSERT(ToRegister(instr->value()).is(r0));
4941
5883
  __ push(r0);
4942
5884
  CallRuntime(Runtime::kToFastProperties, 1, instr);
4943
5885
  }
@@ -4946,15 +5888,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4946
5888
  void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4947
5889
  Label materialized;
4948
5890
  // Registers will be used as follows:
4949
- // r3 = JS function.
4950
5891
  // r7 = literals array.
4951
5892
  // r1 = regexp literal.
4952
5893
  // r0 = regexp literal clone.
4953
5894
  // r2 and r4-r6 are used as temporaries.
4954
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4955
- __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4956
- int literal_offset = FixedArray::kHeaderSize +
4957
- instr->hydrogen()->literal_index() * kPointerSize;
5895
+ int literal_offset =
5896
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5897
+ __ LoadHeapObject(r7, instr->hydrogen()->literals());
4958
5898
  __ ldr(r1, FieldMemOperand(r7, literal_offset));
4959
5899
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4960
5900
  __ cmp(r1, ip);
@@ -5020,14 +5960,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5020
5960
 
5021
5961
 
5022
5962
  void LCodeGen::DoTypeof(LTypeof* instr) {
5023
- Register input = ToRegister(instr->InputAt(0));
5963
+ Register input = ToRegister(instr->value());
5024
5964
  __ push(input);
5025
5965
  CallRuntime(Runtime::kTypeof, 1, instr);
5026
5966
  }
5027
5967
 
5028
5968
 
5029
5969
  void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5030
- Register input = ToRegister(instr->InputAt(0));
5970
+ Register input = ToRegister(instr->value());
5031
5971
  int true_block = chunk_->LookupDestination(instr->true_block_id());
5032
5972
  int false_block = chunk_->LookupDestination(instr->false_block_id());
5033
5973
  Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -5117,7 +6057,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
5117
6057
 
5118
6058
 
5119
6059
  void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5120
- Register temp1 = ToRegister(instr->TempAt(0));
6060
+ Register temp1 = ToRegister(instr->temp());
5121
6061
  int true_block = chunk_->LookupDestination(instr->true_block_id());
5122
6062
  int false_block = chunk_->LookupDestination(instr->false_block_id());
5123
6063
 
@@ -5146,11 +6086,14 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5146
6086
 
5147
6087
 
5148
6088
  void LCodeGen::EnsureSpaceForLazyDeopt() {
6089
+ if (info()->IsStub()) return;
5149
6090
  // Ensure that we have enough space after the previous lazy-bailout
5150
6091
  // instruction for patching the code here.
5151
6092
  int current_pc = masm()->pc_offset();
5152
6093
  int patch_size = Deoptimizer::patch_size();
5153
6094
  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
6095
+ // Block literal pool emission for duration of padding.
6096
+ Assembler::BlockConstPoolScope block_const_pool(masm());
5154
6097
  int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5155
6098
  ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5156
6099
  while (padding_size > 0) {
@@ -5176,6 +6119,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5176
6119
  }
5177
6120
 
5178
6121
 
6122
+ void LCodeGen::DoDummyUse(LDummyUse* instr) {
6123
+ // Nothing to see here, move on!
6124
+ }
6125
+
6126
+
5179
6127
  void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5180
6128
  Register object = ToRegister(instr->object());
5181
6129
  Register key = ToRegister(instr->key());
@@ -5236,6 +6184,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
5236
6184
  __ cmp(sp, Operand(ip));
5237
6185
  __ b(hs, &done);
5238
6186
  StackCheckStub stub;
6187
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
5239
6188
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5240
6189
  EnsureSpaceForLazyDeopt();
5241
6190
  __ bind(&done);
@@ -5245,7 +6194,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
5245
6194
  ASSERT(instr->hydrogen()->is_backwards_branch());
5246
6195
  // Perform stack overflow check if this goto needs it before jumping.
5247
6196
  DeferredStackCheck* deferred_stack_check =
5248
- new DeferredStackCheck(this, instr);
6197
+ new(zone()) DeferredStackCheck(this, instr);
5249
6198
  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5250
6199
  __ cmp(sp, Operand(ip));
5251
6200
  __ b(lo, deferred_stack_check->entry());
@@ -5316,13 +6265,23 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5316
6265
  void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5317
6266
  Register map = ToRegister(instr->map());
5318
6267
  Register result = ToRegister(instr->result());
6268
+ Label load_cache, done;
6269
+ __ EnumLength(result, map);
6270
+ __ cmp(result, Operand(Smi::FromInt(0)));
6271
+ __ b(ne, &load_cache);
6272
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
6273
+ __ jmp(&done);
6274
+
6275
+ __ bind(&load_cache);
5319
6276
  __ LoadInstanceDescriptors(map, result);
5320
6277
  __ ldr(result,
5321
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
6278
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5322
6279
  __ ldr(result,
5323
6280
  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5324
- __ cmp(result, Operand(0));
6281
+ __ cmp(result, Operand::Zero());
5325
6282
  DeoptimizeIf(eq, instr->environment());
6283
+
6284
+ __ bind(&done);
5326
6285
  }
5327
6286
 
5328
6287
 
@@ -5342,7 +6301,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5342
6301
  Register scratch = scratch0();
5343
6302
 
5344
6303
  Label out_of_object, done;
5345
- __ cmp(index, Operand(0));
6304
+ __ cmp(index, Operand::Zero());
5346
6305
  __ b(lt, &out_of_object);
5347
6306
 
5348
6307
  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);