libv8 3.11.8.17 → 3.16.14.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (754) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +1 -2
  3. data/Gemfile +1 -1
  4. data/Rakefile +6 -7
  5. data/lib/libv8/version.rb +1 -1
  6. data/vendor/v8/.gitignore +24 -3
  7. data/vendor/v8/AUTHORS +7 -0
  8. data/vendor/v8/ChangeLog +839 -0
  9. data/vendor/v8/DEPS +1 -1
  10. data/vendor/v8/Makefile.android +92 -0
  11. data/vendor/v8/OWNERS +11 -0
  12. data/vendor/v8/PRESUBMIT.py +71 -0
  13. data/vendor/v8/SConstruct +34 -39
  14. data/vendor/v8/build/android.gypi +56 -37
  15. data/vendor/v8/build/common.gypi +112 -30
  16. data/vendor/v8/build/gyp_v8 +1 -1
  17. data/vendor/v8/build/standalone.gypi +15 -11
  18. data/vendor/v8/include/v8-debug.h +9 -1
  19. data/vendor/v8/include/v8-preparser.h +4 -3
  20. data/vendor/v8/include/v8-profiler.h +25 -25
  21. data/vendor/v8/include/v8-testing.h +4 -3
  22. data/vendor/v8/include/v8.h +994 -540
  23. data/vendor/v8/preparser/preparser-process.cc +3 -3
  24. data/vendor/v8/samples/lineprocessor.cc +20 -27
  25. data/vendor/v8/samples/process.cc +18 -14
  26. data/vendor/v8/samples/shell.cc +16 -15
  27. data/vendor/v8/src/SConscript +15 -14
  28. data/vendor/v8/src/accessors.cc +169 -77
  29. data/vendor/v8/src/accessors.h +4 -0
  30. data/vendor/v8/src/allocation-inl.h +2 -2
  31. data/vendor/v8/src/allocation.h +7 -7
  32. data/vendor/v8/src/api.cc +810 -497
  33. data/vendor/v8/src/api.h +85 -60
  34. data/vendor/v8/src/arm/assembler-arm-inl.h +179 -22
  35. data/vendor/v8/src/arm/assembler-arm.cc +633 -264
  36. data/vendor/v8/src/arm/assembler-arm.h +264 -197
  37. data/vendor/v8/src/arm/builtins-arm.cc +117 -27
  38. data/vendor/v8/src/arm/code-stubs-arm.cc +1241 -700
  39. data/vendor/v8/src/arm/code-stubs-arm.h +35 -138
  40. data/vendor/v8/src/arm/codegen-arm.cc +285 -16
  41. data/vendor/v8/src/arm/codegen-arm.h +22 -0
  42. data/vendor/v8/src/arm/constants-arm.cc +5 -3
  43. data/vendor/v8/src/arm/constants-arm.h +24 -11
  44. data/vendor/v8/src/arm/debug-arm.cc +3 -3
  45. data/vendor/v8/src/arm/deoptimizer-arm.cc +382 -92
  46. data/vendor/v8/src/arm/disasm-arm.cc +61 -12
  47. data/vendor/v8/src/arm/frames-arm.h +0 -14
  48. data/vendor/v8/src/arm/full-codegen-arm.cc +332 -304
  49. data/vendor/v8/src/arm/ic-arm.cc +180 -259
  50. data/vendor/v8/src/arm/lithium-arm.cc +364 -316
  51. data/vendor/v8/src/arm/lithium-arm.h +512 -275
  52. data/vendor/v8/src/arm/lithium-codegen-arm.cc +1768 -809
  53. data/vendor/v8/src/arm/lithium-codegen-arm.h +97 -35
  54. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +12 -5
  55. data/vendor/v8/src/arm/macro-assembler-arm.cc +439 -228
  56. data/vendor/v8/src/arm/macro-assembler-arm.h +116 -70
  57. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +54 -44
  58. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +3 -10
  59. data/vendor/v8/src/arm/simulator-arm.cc +272 -238
  60. data/vendor/v8/src/arm/simulator-arm.h +38 -8
  61. data/vendor/v8/src/arm/stub-cache-arm.cc +522 -895
  62. data/vendor/v8/src/array.js +101 -70
  63. data/vendor/v8/src/assembler.cc +270 -19
  64. data/vendor/v8/src/assembler.h +110 -15
  65. data/vendor/v8/src/ast.cc +79 -69
  66. data/vendor/v8/src/ast.h +255 -301
  67. data/vendor/v8/src/atomicops.h +7 -1
  68. data/vendor/v8/src/atomicops_internals_tsan.h +335 -0
  69. data/vendor/v8/src/bootstrapper.cc +481 -418
  70. data/vendor/v8/src/bootstrapper.h +4 -4
  71. data/vendor/v8/src/builtins.cc +498 -311
  72. data/vendor/v8/src/builtins.h +75 -47
  73. data/vendor/v8/src/checks.cc +2 -1
  74. data/vendor/v8/src/checks.h +8 -0
  75. data/vendor/v8/src/code-stubs-hydrogen.cc +253 -0
  76. data/vendor/v8/src/code-stubs.cc +249 -84
  77. data/vendor/v8/src/code-stubs.h +501 -169
  78. data/vendor/v8/src/codegen.cc +36 -18
  79. data/vendor/v8/src/codegen.h +25 -3
  80. data/vendor/v8/src/collection.js +54 -17
  81. data/vendor/v8/src/compilation-cache.cc +24 -16
  82. data/vendor/v8/src/compilation-cache.h +15 -6
  83. data/vendor/v8/src/compiler.cc +497 -195
  84. data/vendor/v8/src/compiler.h +246 -38
  85. data/vendor/v8/src/contexts.cc +64 -24
  86. data/vendor/v8/src/contexts.h +60 -29
  87. data/vendor/v8/src/conversions-inl.h +24 -14
  88. data/vendor/v8/src/conversions.h +7 -4
  89. data/vendor/v8/src/counters.cc +21 -12
  90. data/vendor/v8/src/counters.h +44 -16
  91. data/vendor/v8/src/cpu-profiler.h +1 -1
  92. data/vendor/v8/src/d8-debug.cc +2 -2
  93. data/vendor/v8/src/d8-readline.cc +13 -2
  94. data/vendor/v8/src/d8.cc +681 -273
  95. data/vendor/v8/src/d8.gyp +4 -4
  96. data/vendor/v8/src/d8.h +38 -18
  97. data/vendor/v8/src/d8.js +0 -617
  98. data/vendor/v8/src/data-flow.h +55 -0
  99. data/vendor/v8/src/date.js +1 -42
  100. data/vendor/v8/src/dateparser-inl.h +5 -1
  101. data/vendor/v8/src/debug-agent.cc +10 -15
  102. data/vendor/v8/src/debug-debugger.js +147 -149
  103. data/vendor/v8/src/debug.cc +323 -164
  104. data/vendor/v8/src/debug.h +26 -14
  105. data/vendor/v8/src/deoptimizer.cc +765 -290
  106. data/vendor/v8/src/deoptimizer.h +130 -28
  107. data/vendor/v8/src/disassembler.cc +10 -4
  108. data/vendor/v8/src/elements-kind.cc +7 -2
  109. data/vendor/v8/src/elements-kind.h +19 -0
  110. data/vendor/v8/src/elements.cc +607 -285
  111. data/vendor/v8/src/elements.h +36 -13
  112. data/vendor/v8/src/execution.cc +52 -31
  113. data/vendor/v8/src/execution.h +4 -4
  114. data/vendor/v8/src/extensions/externalize-string-extension.cc +5 -4
  115. data/vendor/v8/src/extensions/gc-extension.cc +5 -1
  116. data/vendor/v8/src/extensions/statistics-extension.cc +153 -0
  117. data/vendor/v8/src/{inspector.h → extensions/statistics-extension.h} +12 -23
  118. data/vendor/v8/src/factory.cc +101 -134
  119. data/vendor/v8/src/factory.h +36 -31
  120. data/vendor/v8/src/flag-definitions.h +102 -25
  121. data/vendor/v8/src/flags.cc +9 -5
  122. data/vendor/v8/src/frames-inl.h +10 -0
  123. data/vendor/v8/src/frames.cc +116 -26
  124. data/vendor/v8/src/frames.h +96 -12
  125. data/vendor/v8/src/full-codegen.cc +219 -74
  126. data/vendor/v8/src/full-codegen.h +63 -21
  127. data/vendor/v8/src/func-name-inferrer.cc +8 -7
  128. data/vendor/v8/src/func-name-inferrer.h +5 -3
  129. data/vendor/v8/src/gdb-jit.cc +71 -57
  130. data/vendor/v8/src/global-handles.cc +230 -101
  131. data/vendor/v8/src/global-handles.h +26 -27
  132. data/vendor/v8/src/globals.h +17 -19
  133. data/vendor/v8/src/handles-inl.h +59 -12
  134. data/vendor/v8/src/handles.cc +180 -200
  135. data/vendor/v8/src/handles.h +80 -11
  136. data/vendor/v8/src/hashmap.h +60 -40
  137. data/vendor/v8/src/heap-inl.h +107 -45
  138. data/vendor/v8/src/heap-profiler.cc +38 -19
  139. data/vendor/v8/src/heap-profiler.h +24 -14
  140. data/vendor/v8/src/heap.cc +1123 -738
  141. data/vendor/v8/src/heap.h +385 -146
  142. data/vendor/v8/src/hydrogen-instructions.cc +700 -217
  143. data/vendor/v8/src/hydrogen-instructions.h +1158 -472
  144. data/vendor/v8/src/hydrogen.cc +3319 -1662
  145. data/vendor/v8/src/hydrogen.h +411 -170
  146. data/vendor/v8/src/ia32/assembler-ia32-inl.h +46 -16
  147. data/vendor/v8/src/ia32/assembler-ia32.cc +131 -61
  148. data/vendor/v8/src/ia32/assembler-ia32.h +115 -57
  149. data/vendor/v8/src/ia32/builtins-ia32.cc +99 -5
  150. data/vendor/v8/src/ia32/code-stubs-ia32.cc +787 -495
  151. data/vendor/v8/src/ia32/code-stubs-ia32.h +10 -100
  152. data/vendor/v8/src/ia32/codegen-ia32.cc +227 -23
  153. data/vendor/v8/src/ia32/codegen-ia32.h +14 -0
  154. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +428 -87
  155. data/vendor/v8/src/ia32/disasm-ia32.cc +28 -1
  156. data/vendor/v8/src/ia32/frames-ia32.h +6 -16
  157. data/vendor/v8/src/ia32/full-codegen-ia32.cc +280 -272
  158. data/vendor/v8/src/ia32/ic-ia32.cc +150 -250
  159. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +1600 -517
  160. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +90 -24
  161. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +10 -6
  162. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.h +2 -2
  163. data/vendor/v8/src/ia32/lithium-ia32.cc +405 -302
  164. data/vendor/v8/src/ia32/lithium-ia32.h +526 -271
  165. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +378 -119
  166. data/vendor/v8/src/ia32/macro-assembler-ia32.h +62 -28
  167. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +43 -30
  168. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +2 -10
  169. data/vendor/v8/src/ia32/stub-cache-ia32.cc +492 -678
  170. data/vendor/v8/src/ic-inl.h +9 -4
  171. data/vendor/v8/src/ic.cc +836 -923
  172. data/vendor/v8/src/ic.h +228 -247
  173. data/vendor/v8/src/incremental-marking-inl.h +26 -30
  174. data/vendor/v8/src/incremental-marking.cc +276 -248
  175. data/vendor/v8/src/incremental-marking.h +29 -37
  176. data/vendor/v8/src/interface.cc +34 -25
  177. data/vendor/v8/src/interface.h +69 -25
  178. data/vendor/v8/src/interpreter-irregexp.cc +2 -2
  179. data/vendor/v8/src/isolate.cc +382 -76
  180. data/vendor/v8/src/isolate.h +109 -56
  181. data/vendor/v8/src/json-parser.h +217 -104
  182. data/vendor/v8/src/json-stringifier.h +745 -0
  183. data/vendor/v8/src/json.js +10 -132
  184. data/vendor/v8/src/jsregexp-inl.h +106 -0
  185. data/vendor/v8/src/jsregexp.cc +517 -285
  186. data/vendor/v8/src/jsregexp.h +145 -117
  187. data/vendor/v8/src/list-inl.h +35 -22
  188. data/vendor/v8/src/list.h +46 -19
  189. data/vendor/v8/src/lithium-allocator-inl.h +22 -2
  190. data/vendor/v8/src/lithium-allocator.cc +85 -70
  191. data/vendor/v8/src/lithium-allocator.h +21 -39
  192. data/vendor/v8/src/lithium.cc +259 -5
  193. data/vendor/v8/src/lithium.h +131 -32
  194. data/vendor/v8/src/liveedit-debugger.js +52 -3
  195. data/vendor/v8/src/liveedit.cc +393 -113
  196. data/vendor/v8/src/liveedit.h +7 -3
  197. data/vendor/v8/src/log-utils.cc +4 -2
  198. data/vendor/v8/src/log.cc +170 -140
  199. data/vendor/v8/src/log.h +62 -11
  200. data/vendor/v8/src/macro-assembler.h +17 -0
  201. data/vendor/v8/src/macros.py +2 -0
  202. data/vendor/v8/src/mark-compact-inl.h +3 -23
  203. data/vendor/v8/src/mark-compact.cc +801 -830
  204. data/vendor/v8/src/mark-compact.h +154 -47
  205. data/vendor/v8/src/marking-thread.cc +85 -0
  206. data/vendor/v8/src/{inspector.cc → marking-thread.h} +32 -24
  207. data/vendor/v8/src/math.js +12 -18
  208. data/vendor/v8/src/messages.cc +18 -8
  209. data/vendor/v8/src/messages.js +314 -261
  210. data/vendor/v8/src/mips/assembler-mips-inl.h +58 -6
  211. data/vendor/v8/src/mips/assembler-mips.cc +92 -75
  212. data/vendor/v8/src/mips/assembler-mips.h +54 -60
  213. data/vendor/v8/src/mips/builtins-mips.cc +116 -17
  214. data/vendor/v8/src/mips/code-stubs-mips.cc +919 -556
  215. data/vendor/v8/src/mips/code-stubs-mips.h +22 -131
  216. data/vendor/v8/src/mips/codegen-mips.cc +281 -6
  217. data/vendor/v8/src/mips/codegen-mips.h +22 -0
  218. data/vendor/v8/src/mips/constants-mips.cc +2 -0
  219. data/vendor/v8/src/mips/constants-mips.h +12 -2
  220. data/vendor/v8/src/mips/deoptimizer-mips.cc +286 -50
  221. data/vendor/v8/src/mips/disasm-mips.cc +13 -0
  222. data/vendor/v8/src/mips/full-codegen-mips.cc +297 -284
  223. data/vendor/v8/src/mips/ic-mips.cc +182 -263
  224. data/vendor/v8/src/mips/lithium-codegen-mips.cc +1208 -556
  225. data/vendor/v8/src/mips/lithium-codegen-mips.h +72 -19
  226. data/vendor/v8/src/mips/lithium-gap-resolver-mips.cc +9 -2
  227. data/vendor/v8/src/mips/lithium-mips.cc +290 -302
  228. data/vendor/v8/src/mips/lithium-mips.h +463 -266
  229. data/vendor/v8/src/mips/macro-assembler-mips.cc +208 -115
  230. data/vendor/v8/src/mips/macro-assembler-mips.h +67 -24
  231. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +40 -25
  232. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +3 -9
  233. data/vendor/v8/src/mips/simulator-mips.cc +112 -40
  234. data/vendor/v8/src/mips/simulator-mips.h +5 -0
  235. data/vendor/v8/src/mips/stub-cache-mips.cc +502 -884
  236. data/vendor/v8/src/mirror-debugger.js +157 -30
  237. data/vendor/v8/src/mksnapshot.cc +88 -14
  238. data/vendor/v8/src/object-observe.js +235 -0
  239. data/vendor/v8/src/objects-debug.cc +178 -176
  240. data/vendor/v8/src/objects-inl.h +1333 -486
  241. data/vendor/v8/src/objects-printer.cc +125 -43
  242. data/vendor/v8/src/objects-visiting-inl.h +578 -6
  243. data/vendor/v8/src/objects-visiting.cc +2 -2
  244. data/vendor/v8/src/objects-visiting.h +172 -79
  245. data/vendor/v8/src/objects.cc +3533 -2885
  246. data/vendor/v8/src/objects.h +1352 -1131
  247. data/vendor/v8/src/optimizing-compiler-thread.cc +152 -0
  248. data/vendor/v8/src/optimizing-compiler-thread.h +111 -0
  249. data/vendor/v8/src/parser.cc +390 -500
  250. data/vendor/v8/src/parser.h +45 -33
  251. data/vendor/v8/src/platform-cygwin.cc +10 -21
  252. data/vendor/v8/src/platform-freebsd.cc +36 -41
  253. data/vendor/v8/src/platform-linux.cc +160 -124
  254. data/vendor/v8/src/platform-macos.cc +30 -27
  255. data/vendor/v8/src/platform-nullos.cc +17 -1
  256. data/vendor/v8/src/platform-openbsd.cc +19 -50
  257. data/vendor/v8/src/platform-posix.cc +14 -0
  258. data/vendor/v8/src/platform-solaris.cc +20 -53
  259. data/vendor/v8/src/platform-win32.cc +49 -26
  260. data/vendor/v8/src/platform.h +40 -1
  261. data/vendor/v8/src/preparser.cc +8 -5
  262. data/vendor/v8/src/preparser.h +2 -2
  263. data/vendor/v8/src/prettyprinter.cc +16 -0
  264. data/vendor/v8/src/prettyprinter.h +2 -0
  265. data/vendor/v8/src/profile-generator-inl.h +1 -0
  266. data/vendor/v8/src/profile-generator.cc +209 -147
  267. data/vendor/v8/src/profile-generator.h +15 -12
  268. data/vendor/v8/src/property-details.h +46 -31
  269. data/vendor/v8/src/property.cc +27 -46
  270. data/vendor/v8/src/property.h +163 -83
  271. data/vendor/v8/src/proxy.js +7 -2
  272. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +4 -13
  273. data/vendor/v8/src/regexp-macro-assembler-irregexp.h +1 -2
  274. data/vendor/v8/src/regexp-macro-assembler-tracer.cc +1 -11
  275. data/vendor/v8/src/regexp-macro-assembler-tracer.h +0 -1
  276. data/vendor/v8/src/regexp-macro-assembler.cc +31 -14
  277. data/vendor/v8/src/regexp-macro-assembler.h +14 -11
  278. data/vendor/v8/src/regexp-stack.cc +1 -0
  279. data/vendor/v8/src/regexp.js +9 -8
  280. data/vendor/v8/src/rewriter.cc +18 -7
  281. data/vendor/v8/src/runtime-profiler.cc +52 -43
  282. data/vendor/v8/src/runtime-profiler.h +0 -25
  283. data/vendor/v8/src/runtime.cc +2006 -2023
  284. data/vendor/v8/src/runtime.h +56 -49
  285. data/vendor/v8/src/safepoint-table.cc +12 -18
  286. data/vendor/v8/src/safepoint-table.h +11 -8
  287. data/vendor/v8/src/scanner.cc +1 -0
  288. data/vendor/v8/src/scanner.h +4 -10
  289. data/vendor/v8/src/scopeinfo.cc +35 -9
  290. data/vendor/v8/src/scopeinfo.h +64 -3
  291. data/vendor/v8/src/scopes.cc +251 -156
  292. data/vendor/v8/src/scopes.h +61 -27
  293. data/vendor/v8/src/serialize.cc +348 -396
  294. data/vendor/v8/src/serialize.h +125 -114
  295. data/vendor/v8/src/small-pointer-list.h +11 -11
  296. data/vendor/v8/src/{smart-array-pointer.h → smart-pointers.h} +64 -15
  297. data/vendor/v8/src/snapshot-common.cc +64 -15
  298. data/vendor/v8/src/snapshot-empty.cc +7 -1
  299. data/vendor/v8/src/snapshot.h +9 -2
  300. data/vendor/v8/src/spaces-inl.h +17 -0
  301. data/vendor/v8/src/spaces.cc +477 -183
  302. data/vendor/v8/src/spaces.h +238 -58
  303. data/vendor/v8/src/splay-tree-inl.h +8 -7
  304. data/vendor/v8/src/splay-tree.h +24 -10
  305. data/vendor/v8/src/store-buffer.cc +12 -5
  306. data/vendor/v8/src/store-buffer.h +2 -4
  307. data/vendor/v8/src/string-search.h +22 -6
  308. data/vendor/v8/src/string-stream.cc +11 -8
  309. data/vendor/v8/src/string.js +47 -15
  310. data/vendor/v8/src/stub-cache.cc +461 -224
  311. data/vendor/v8/src/stub-cache.h +164 -102
  312. data/vendor/v8/src/sweeper-thread.cc +105 -0
  313. data/vendor/v8/src/sweeper-thread.h +81 -0
  314. data/vendor/v8/src/token.h +1 -0
  315. data/vendor/v8/src/transitions-inl.h +220 -0
  316. data/vendor/v8/src/transitions.cc +160 -0
  317. data/vendor/v8/src/transitions.h +207 -0
  318. data/vendor/v8/src/type-info.cc +182 -181
  319. data/vendor/v8/src/type-info.h +31 -19
  320. data/vendor/v8/src/unicode-inl.h +62 -106
  321. data/vendor/v8/src/unicode.cc +57 -67
  322. data/vendor/v8/src/unicode.h +45 -91
  323. data/vendor/v8/src/uri.js +57 -29
  324. data/vendor/v8/src/utils.h +105 -5
  325. data/vendor/v8/src/v8-counters.cc +54 -11
  326. data/vendor/v8/src/v8-counters.h +134 -19
  327. data/vendor/v8/src/v8.cc +29 -29
  328. data/vendor/v8/src/v8.h +1 -0
  329. data/vendor/v8/src/v8conversions.cc +26 -22
  330. data/vendor/v8/src/v8globals.h +56 -43
  331. data/vendor/v8/src/v8natives.js +83 -30
  332. data/vendor/v8/src/v8threads.cc +42 -21
  333. data/vendor/v8/src/v8threads.h +4 -1
  334. data/vendor/v8/src/v8utils.cc +9 -93
  335. data/vendor/v8/src/v8utils.h +37 -33
  336. data/vendor/v8/src/variables.cc +6 -3
  337. data/vendor/v8/src/variables.h +6 -13
  338. data/vendor/v8/src/version.cc +2 -2
  339. data/vendor/v8/src/vm-state-inl.h +11 -0
  340. data/vendor/v8/src/x64/assembler-x64-inl.h +39 -8
  341. data/vendor/v8/src/x64/assembler-x64.cc +78 -64
  342. data/vendor/v8/src/x64/assembler-x64.h +38 -33
  343. data/vendor/v8/src/x64/builtins-x64.cc +105 -7
  344. data/vendor/v8/src/x64/code-stubs-x64.cc +790 -413
  345. data/vendor/v8/src/x64/code-stubs-x64.h +10 -106
  346. data/vendor/v8/src/x64/codegen-x64.cc +210 -8
  347. data/vendor/v8/src/x64/codegen-x64.h +20 -1
  348. data/vendor/v8/src/x64/deoptimizer-x64.cc +336 -75
  349. data/vendor/v8/src/x64/disasm-x64.cc +15 -0
  350. data/vendor/v8/src/x64/frames-x64.h +0 -14
  351. data/vendor/v8/src/x64/full-codegen-x64.cc +293 -270
  352. data/vendor/v8/src/x64/ic-x64.cc +153 -251
  353. data/vendor/v8/src/x64/lithium-codegen-x64.cc +1379 -531
  354. data/vendor/v8/src/x64/lithium-codegen-x64.h +67 -23
  355. data/vendor/v8/src/x64/lithium-gap-resolver-x64.cc +2 -2
  356. data/vendor/v8/src/x64/lithium-x64.cc +349 -289
  357. data/vendor/v8/src/x64/lithium-x64.h +460 -250
  358. data/vendor/v8/src/x64/macro-assembler-x64.cc +350 -177
  359. data/vendor/v8/src/x64/macro-assembler-x64.h +67 -49
  360. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +46 -33
  361. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +2 -3
  362. data/vendor/v8/src/x64/stub-cache-x64.cc +484 -653
  363. data/vendor/v8/src/zone-inl.h +9 -27
  364. data/vendor/v8/src/zone.cc +5 -5
  365. data/vendor/v8/src/zone.h +53 -27
  366. data/vendor/v8/test/benchmarks/testcfg.py +5 -0
  367. data/vendor/v8/test/cctest/cctest.cc +4 -0
  368. data/vendor/v8/test/cctest/cctest.gyp +3 -1
  369. data/vendor/v8/test/cctest/cctest.h +57 -9
  370. data/vendor/v8/test/cctest/cctest.status +15 -15
  371. data/vendor/v8/test/cctest/test-accessors.cc +26 -0
  372. data/vendor/v8/test/cctest/test-alloc.cc +22 -30
  373. data/vendor/v8/test/cctest/test-api.cc +1943 -314
  374. data/vendor/v8/test/cctest/test-assembler-arm.cc +133 -13
  375. data/vendor/v8/test/cctest/test-assembler-ia32.cc +1 -1
  376. data/vendor/v8/test/cctest/test-assembler-mips.cc +12 -0
  377. data/vendor/v8/test/cctest/test-ast.cc +4 -2
  378. data/vendor/v8/test/cctest/test-compiler.cc +61 -29
  379. data/vendor/v8/test/cctest/test-dataflow.cc +2 -2
  380. data/vendor/v8/test/cctest/test-debug.cc +212 -33
  381. data/vendor/v8/test/cctest/test-decls.cc +257 -11
  382. data/vendor/v8/test/cctest/test-dictionary.cc +24 -10
  383. data/vendor/v8/test/cctest/test-disasm-arm.cc +118 -1
  384. data/vendor/v8/test/cctest/test-disasm-ia32.cc +3 -2
  385. data/vendor/v8/test/cctest/test-flags.cc +14 -1
  386. data/vendor/v8/test/cctest/test-func-name-inference.cc +7 -4
  387. data/vendor/v8/test/cctest/test-global-object.cc +51 -0
  388. data/vendor/v8/test/cctest/test-hashing.cc +32 -23
  389. data/vendor/v8/test/cctest/test-heap-profiler.cc +131 -77
  390. data/vendor/v8/test/cctest/test-heap.cc +1084 -143
  391. data/vendor/v8/test/cctest/test-list.cc +1 -1
  392. data/vendor/v8/test/cctest/test-liveedit.cc +3 -2
  393. data/vendor/v8/test/cctest/test-lockers.cc +12 -13
  394. data/vendor/v8/test/cctest/test-log.cc +10 -8
  395. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +2 -2
  396. data/vendor/v8/test/cctest/test-mark-compact.cc +44 -22
  397. data/vendor/v8/test/cctest/test-object-observe.cc +434 -0
  398. data/vendor/v8/test/cctest/test-parsing.cc +86 -39
  399. data/vendor/v8/test/cctest/test-platform-linux.cc +6 -0
  400. data/vendor/v8/test/cctest/test-platform-win32.cc +7 -0
  401. data/vendor/v8/test/cctest/test-random.cc +5 -4
  402. data/vendor/v8/test/cctest/test-regexp.cc +137 -101
  403. data/vendor/v8/test/cctest/test-serialize.cc +150 -230
  404. data/vendor/v8/test/cctest/test-sockets.cc +1 -1
  405. data/vendor/v8/test/cctest/test-spaces.cc +139 -0
  406. data/vendor/v8/test/cctest/test-strings.cc +736 -74
  407. data/vendor/v8/test/cctest/test-thread-termination.cc +10 -11
  408. data/vendor/v8/test/cctest/test-threads.cc +4 -4
  409. data/vendor/v8/test/cctest/test-utils.cc +16 -0
  410. data/vendor/v8/test/cctest/test-weakmaps.cc +7 -3
  411. data/vendor/v8/test/cctest/testcfg.py +64 -5
  412. data/vendor/v8/test/es5conform/testcfg.py +5 -0
  413. data/vendor/v8/test/message/message.status +1 -1
  414. data/vendor/v8/test/message/overwritten-builtins.out +3 -0
  415. data/vendor/v8/test/message/testcfg.py +89 -8
  416. data/vendor/v8/test/message/try-catch-finally-no-message.out +26 -26
  417. data/vendor/v8/test/mjsunit/accessor-map-sharing.js +18 -2
  418. data/vendor/v8/test/mjsunit/allocation-site-info.js +126 -0
  419. data/vendor/v8/test/mjsunit/array-bounds-check-removal.js +62 -1
  420. data/vendor/v8/test/mjsunit/array-iteration.js +1 -1
  421. data/vendor/v8/test/mjsunit/array-literal-transitions.js +2 -0
  422. data/vendor/v8/test/mjsunit/array-natives-elements.js +317 -0
  423. data/vendor/v8/test/mjsunit/array-reduce.js +8 -8
  424. data/vendor/v8/test/mjsunit/array-slice.js +12 -0
  425. data/vendor/v8/test/mjsunit/array-store-and-grow.js +4 -1
  426. data/vendor/v8/test/mjsunit/assert-opt-and-deopt.js +1 -1
  427. data/vendor/v8/test/mjsunit/bugs/bug-2337.js +53 -0
  428. data/vendor/v8/test/mjsunit/compare-known-objects-slow.js +69 -0
  429. data/vendor/v8/test/mjsunit/compiler/alloc-object-huge.js +3 -1
  430. data/vendor/v8/test/mjsunit/compiler/inline-accessors.js +368 -0
  431. data/vendor/v8/test/mjsunit/compiler/inline-arguments.js +87 -1
  432. data/vendor/v8/test/mjsunit/compiler/inline-closures.js +49 -0
  433. data/vendor/v8/test/mjsunit/compiler/inline-construct.js +55 -43
  434. data/vendor/v8/test/mjsunit/compiler/inline-literals.js +39 -0
  435. data/vendor/v8/test/mjsunit/compiler/multiply-add.js +69 -0
  436. data/vendor/v8/test/mjsunit/compiler/optimized-closures.js +57 -0
  437. data/vendor/v8/test/mjsunit/compiler/parallel-proto-change.js +44 -0
  438. data/vendor/v8/test/mjsunit/compiler/property-static.js +69 -0
  439. data/vendor/v8/test/mjsunit/compiler/proto-chain-constant.js +55 -0
  440. data/vendor/v8/test/mjsunit/compiler/proto-chain-load.js +44 -0
  441. data/vendor/v8/test/mjsunit/compiler/regress-gvn.js +3 -2
  442. data/vendor/v8/test/mjsunit/compiler/regress-or.js +6 -2
  443. data/vendor/v8/test/mjsunit/compiler/rotate.js +224 -0
  444. data/vendor/v8/test/mjsunit/compiler/uint32.js +173 -0
  445. data/vendor/v8/test/mjsunit/count-based-osr.js +2 -1
  446. data/vendor/v8/test/mjsunit/d8-os.js +3 -3
  447. data/vendor/v8/test/mjsunit/date-parse.js +3 -0
  448. data/vendor/v8/test/mjsunit/date.js +22 -0
  449. data/vendor/v8/test/mjsunit/debug-break-inline.js +1 -0
  450. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js +22 -12
  451. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized.js +21 -10
  452. data/vendor/v8/test/mjsunit/debug-liveedit-compile-error.js +60 -0
  453. data/vendor/v8/test/mjsunit/debug-liveedit-double-call.js +142 -0
  454. data/vendor/v8/test/mjsunit/debug-liveedit-literals.js +94 -0
  455. data/vendor/v8/test/mjsunit/debug-liveedit-restart-frame.js +153 -0
  456. data/vendor/v8/test/mjsunit/debug-multiple-breakpoints.js +1 -1
  457. data/vendor/v8/test/mjsunit/debug-script-breakpoints-closure.js +67 -0
  458. data/vendor/v8/test/mjsunit/debug-script-breakpoints-nested.js +82 -0
  459. data/vendor/v8/test/mjsunit/debug-script.js +4 -2
  460. data/vendor/v8/test/mjsunit/debug-set-variable-value.js +308 -0
  461. data/vendor/v8/test/mjsunit/debug-stepout-scope-part1.js +190 -0
  462. data/vendor/v8/test/mjsunit/debug-stepout-scope-part2.js +83 -0
  463. data/vendor/v8/test/mjsunit/debug-stepout-scope-part3.js +80 -0
  464. data/vendor/v8/test/mjsunit/debug-stepout-scope-part4.js +80 -0
  465. data/vendor/v8/test/mjsunit/debug-stepout-scope-part5.js +77 -0
  466. data/vendor/v8/test/mjsunit/debug-stepout-scope-part6.js +79 -0
  467. data/vendor/v8/test/mjsunit/debug-stepout-scope-part7.js +79 -0
  468. data/vendor/v8/test/mjsunit/{debug-stepout-scope.js → debug-stepout-scope-part8.js} +0 -189
  469. data/vendor/v8/test/mjsunit/delete-non-configurable.js +74 -0
  470. data/vendor/v8/test/mjsunit/deopt-minus-zero.js +56 -0
  471. data/vendor/v8/test/mjsunit/elements-kind.js +6 -4
  472. data/vendor/v8/test/mjsunit/elements-length-no-holey.js +33 -0
  473. data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +46 -19
  474. data/vendor/v8/test/mjsunit/error-accessors.js +54 -0
  475. data/vendor/v8/test/mjsunit/error-constructors.js +1 -14
  476. data/vendor/v8/test/mjsunit/error-tostring.js +8 -0
  477. data/vendor/v8/test/mjsunit/eval-stack-trace.js +204 -0
  478. data/vendor/v8/test/mjsunit/external-array.js +364 -1
  479. data/vendor/v8/test/mjsunit/fast-array-length.js +37 -0
  480. data/vendor/v8/test/mjsunit/fast-non-keyed.js +113 -0
  481. data/vendor/v8/test/mjsunit/fast-prototype.js +117 -0
  482. data/vendor/v8/test/mjsunit/function-call.js +14 -18
  483. data/vendor/v8/test/mjsunit/fuzz-natives-part1.js +230 -0
  484. data/vendor/v8/test/mjsunit/fuzz-natives-part2.js +229 -0
  485. data/vendor/v8/test/mjsunit/fuzz-natives-part3.js +229 -0
  486. data/vendor/v8/test/mjsunit/{fuzz-natives.js → fuzz-natives-part4.js} +12 -2
  487. data/vendor/v8/test/mjsunit/generated-transition-stub.js +218 -0
  488. data/vendor/v8/test/mjsunit/greedy.js +1 -1
  489. data/vendor/v8/test/mjsunit/harmony/block-conflicts.js +2 -1
  490. data/vendor/v8/test/mjsunit/harmony/block-let-crankshaft.js +1 -1
  491. data/vendor/v8/test/mjsunit/harmony/collections.js +69 -11
  492. data/vendor/v8/test/mjsunit/harmony/debug-blockscopes.js +2 -2
  493. data/vendor/v8/test/mjsunit/harmony/module-linking.js +180 -3
  494. data/vendor/v8/test/mjsunit/harmony/module-parsing.js +31 -0
  495. data/vendor/v8/test/mjsunit/harmony/module-recompile.js +87 -0
  496. data/vendor/v8/test/mjsunit/harmony/module-resolution.js +15 -2
  497. data/vendor/v8/test/mjsunit/harmony/object-observe.js +1056 -0
  498. data/vendor/v8/test/mjsunit/harmony/proxies-json.js +178 -0
  499. data/vendor/v8/test/mjsunit/harmony/proxies.js +25 -10
  500. data/vendor/v8/test/mjsunit/json-parser-recursive.js +33 -0
  501. data/vendor/v8/test/mjsunit/json-stringify-recursive.js +52 -0
  502. data/vendor/v8/test/mjsunit/json.js +38 -2
  503. data/vendor/v8/test/mjsunit/json2.js +153 -0
  504. data/vendor/v8/test/mjsunit/limit-locals.js +5 -4
  505. data/vendor/v8/test/mjsunit/manual-parallel-recompile.js +79 -0
  506. data/vendor/v8/test/mjsunit/math-exp-precision.js +64 -0
  507. data/vendor/v8/test/mjsunit/math-floor-negative.js +59 -0
  508. data/vendor/v8/test/mjsunit/math-floor-of-div-minus-zero.js +41 -0
  509. data/vendor/v8/test/mjsunit/math-floor-of-div-nosudiv.js +288 -0
  510. data/vendor/v8/test/mjsunit/math-floor-of-div.js +81 -9
  511. data/vendor/v8/test/mjsunit/{math-floor.js → math-floor-part1.js} +1 -72
  512. data/vendor/v8/test/mjsunit/math-floor-part2.js +76 -0
  513. data/vendor/v8/test/mjsunit/math-floor-part3.js +78 -0
  514. data/vendor/v8/test/mjsunit/math-floor-part4.js +76 -0
  515. data/vendor/v8/test/mjsunit/mirror-object.js +43 -9
  516. data/vendor/v8/test/mjsunit/mjsunit.js +1 -1
  517. data/vendor/v8/test/mjsunit/mjsunit.status +52 -27
  518. data/vendor/v8/test/mjsunit/mul-exhaustive-part1.js +491 -0
  519. data/vendor/v8/test/mjsunit/mul-exhaustive-part10.js +470 -0
  520. data/vendor/v8/test/mjsunit/mul-exhaustive-part2.js +525 -0
  521. data/vendor/v8/test/mjsunit/mul-exhaustive-part3.js +532 -0
  522. data/vendor/v8/test/mjsunit/mul-exhaustive-part4.js +509 -0
  523. data/vendor/v8/test/mjsunit/mul-exhaustive-part5.js +505 -0
  524. data/vendor/v8/test/mjsunit/mul-exhaustive-part6.js +554 -0
  525. data/vendor/v8/test/mjsunit/mul-exhaustive-part7.js +497 -0
  526. data/vendor/v8/test/mjsunit/mul-exhaustive-part8.js +526 -0
  527. data/vendor/v8/test/mjsunit/mul-exhaustive-part9.js +533 -0
  528. data/vendor/v8/test/mjsunit/new-function.js +34 -0
  529. data/vendor/v8/test/mjsunit/numops-fuzz-part1.js +1172 -0
  530. data/vendor/v8/test/mjsunit/numops-fuzz-part2.js +1178 -0
  531. data/vendor/v8/test/mjsunit/numops-fuzz-part3.js +1178 -0
  532. data/vendor/v8/test/mjsunit/numops-fuzz-part4.js +1177 -0
  533. data/vendor/v8/test/mjsunit/object-define-property.js +107 -2
  534. data/vendor/v8/test/mjsunit/override-read-only-property.js +6 -4
  535. data/vendor/v8/test/mjsunit/packed-elements.js +2 -2
  536. data/vendor/v8/test/mjsunit/parse-int-float.js +4 -4
  537. data/vendor/v8/test/mjsunit/pixel-array-rounding.js +1 -1
  538. data/vendor/v8/test/mjsunit/readonly.js +228 -0
  539. data/vendor/v8/test/mjsunit/regexp-capture-3.js +16 -18
  540. data/vendor/v8/test/mjsunit/regexp-capture.js +2 -0
  541. data/vendor/v8/test/mjsunit/regexp-global.js +122 -0
  542. data/vendor/v8/test/mjsunit/regexp-results-cache.js +78 -0
  543. data/vendor/v8/test/mjsunit/regress/regress-1117.js +12 -3
  544. data/vendor/v8/test/mjsunit/regress/regress-1118.js +1 -1
  545. data/vendor/v8/test/mjsunit/regress/regress-115100.js +36 -0
  546. data/vendor/v8/test/mjsunit/regress/regress-1199637.js +1 -3
  547. data/vendor/v8/test/mjsunit/regress/regress-121407.js +1 -1
  548. data/vendor/v8/test/mjsunit/regress/regress-131923.js +30 -0
  549. data/vendor/v8/test/mjsunit/regress/regress-131994.js +70 -0
  550. data/vendor/v8/test/mjsunit/regress/regress-133211.js +35 -0
  551. data/vendor/v8/test/mjsunit/regress/regress-133211b.js +39 -0
  552. data/vendor/v8/test/mjsunit/regress/regress-136048.js +34 -0
  553. data/vendor/v8/test/mjsunit/regress/regress-137768.js +73 -0
  554. data/vendor/v8/test/mjsunit/regress/regress-143967.js +34 -0
  555. data/vendor/v8/test/mjsunit/regress/regress-145201.js +107 -0
  556. data/vendor/v8/test/mjsunit/regress/regress-147497.js +45 -0
  557. data/vendor/v8/test/mjsunit/regress/regress-148378.js +38 -0
  558. data/vendor/v8/test/mjsunit/regress/regress-1563.js +1 -1
  559. data/vendor/v8/test/mjsunit/regress/regress-1591.js +48 -0
  560. data/vendor/v8/test/mjsunit/regress/regress-164442.js +45 -0
  561. data/vendor/v8/test/mjsunit/regress/regress-165637.js +61 -0
  562. data/vendor/v8/test/mjsunit/regress/regress-166379.js +39 -0
  563. data/vendor/v8/test/mjsunit/regress/regress-166553.js +33 -0
  564. data/vendor/v8/test/mjsunit/regress/regress-1692.js +1 -1
  565. data/vendor/v8/test/mjsunit/regress/regress-171641.js +40 -0
  566. data/vendor/v8/test/mjsunit/regress/regress-1980.js +1 -1
  567. data/vendor/v8/test/mjsunit/regress/regress-2073.js +99 -0
  568. data/vendor/v8/test/mjsunit/regress/regress-2119.js +36 -0
  569. data/vendor/v8/test/mjsunit/regress/regress-2156.js +39 -0
  570. data/vendor/v8/test/mjsunit/regress/regress-2163.js +70 -0
  571. data/vendor/v8/test/mjsunit/regress/regress-2170.js +58 -0
  572. data/vendor/v8/test/mjsunit/regress/regress-2172.js +35 -0
  573. data/vendor/v8/test/mjsunit/regress/regress-2185-2.js +145 -0
  574. data/vendor/v8/test/mjsunit/regress/regress-2185.js +38 -0
  575. data/vendor/v8/test/mjsunit/regress/regress-2186.js +49 -0
  576. data/vendor/v8/test/mjsunit/regress/regress-2193.js +58 -0
  577. data/vendor/v8/test/mjsunit/regress/regress-2219.js +32 -0
  578. data/vendor/v8/test/mjsunit/regress/regress-2225.js +65 -0
  579. data/vendor/v8/test/mjsunit/regress/regress-2226.js +36 -0
  580. data/vendor/v8/test/mjsunit/regress/regress-2234.js +41 -0
  581. data/vendor/v8/test/mjsunit/regress/regress-2243.js +31 -0
  582. data/vendor/v8/test/mjsunit/regress/regress-2249.js +33 -0
  583. data/vendor/v8/test/mjsunit/regress/regress-2250.js +68 -0
  584. data/vendor/v8/test/mjsunit/regress/regress-2261.js +113 -0
  585. data/vendor/v8/test/mjsunit/regress/regress-2263.js +30 -0
  586. data/vendor/v8/test/mjsunit/regress/regress-2284.js +32 -0
  587. data/vendor/v8/test/mjsunit/regress/regress-2285.js +32 -0
  588. data/vendor/v8/test/mjsunit/regress/regress-2286.js +32 -0
  589. data/vendor/v8/test/mjsunit/regress/regress-2289.js +34 -0
  590. data/vendor/v8/test/mjsunit/regress/regress-2291.js +36 -0
  591. data/vendor/v8/test/mjsunit/regress/regress-2294.js +70 -0
  592. data/vendor/v8/test/mjsunit/regress/regress-2296.js +40 -0
  593. data/vendor/v8/test/mjsunit/regress/regress-2315.js +40 -0
  594. data/vendor/v8/test/mjsunit/regress/regress-2318.js +66 -0
  595. data/vendor/v8/test/mjsunit/regress/regress-2322.js +36 -0
  596. data/vendor/v8/test/mjsunit/regress/regress-2326.js +54 -0
  597. data/vendor/v8/test/mjsunit/regress/regress-2336.js +53 -0
  598. data/vendor/v8/test/mjsunit/regress/regress-2339.js +59 -0
  599. data/vendor/v8/test/mjsunit/regress/regress-2346.js +123 -0
  600. data/vendor/v8/test/mjsunit/regress/regress-2373.js +29 -0
  601. data/vendor/v8/test/mjsunit/regress/regress-2374.js +33 -0
  602. data/vendor/v8/test/mjsunit/regress/regress-2398.js +41 -0
  603. data/vendor/v8/test/mjsunit/regress/regress-2410.js +36 -0
  604. data/vendor/v8/test/mjsunit/regress/regress-2416.js +75 -0
  605. data/vendor/v8/test/mjsunit/regress/regress-2419.js +37 -0
  606. data/vendor/v8/test/mjsunit/regress/regress-2433.js +36 -0
  607. data/vendor/v8/test/mjsunit/regress/regress-2437.js +156 -0
  608. data/vendor/v8/test/mjsunit/regress/regress-2438.js +52 -0
  609. data/vendor/v8/test/mjsunit/regress/regress-2443.js +129 -0
  610. data/vendor/v8/test/mjsunit/regress/regress-2444.js +120 -0
  611. data/vendor/v8/test/mjsunit/regress/regress-2489.js +50 -0
  612. data/vendor/v8/test/mjsunit/regress/regress-2499.js +40 -0
  613. data/vendor/v8/test/mjsunit/regress/regress-334.js +1 -1
  614. data/vendor/v8/test/mjsunit/regress/regress-492.js +39 -1
  615. data/vendor/v8/test/mjsunit/regress/regress-builtin-array-op.js +38 -0
  616. data/vendor/v8/test/mjsunit/regress/regress-cnlt-elements.js +43 -0
  617. data/vendor/v8/test/mjsunit/regress/regress-cnlt-enum-indices.js +45 -0
  618. data/vendor/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js +46 -0
  619. data/vendor/v8/test/mjsunit/regress/regress-convert-enum.js +60 -0
  620. data/vendor/v8/test/mjsunit/regress/regress-convert-enum2.js +46 -0
  621. data/vendor/v8/test/mjsunit/regress/regress-convert-transition.js +40 -0
  622. data/vendor/v8/test/mjsunit/regress/regress-crbug-119926.js +3 -1
  623. data/vendor/v8/test/mjsunit/regress/regress-crbug-125148.js +90 -0
  624. data/vendor/v8/test/mjsunit/regress/regress-crbug-134055.js +63 -0
  625. data/vendor/v8/test/mjsunit/regress/regress-crbug-134609.js +59 -0
  626. data/vendor/v8/test/mjsunit/regress/regress-crbug-135008.js +45 -0
  627. data/vendor/v8/test/mjsunit/regress/regress-crbug-135066.js +55 -0
  628. data/vendor/v8/test/mjsunit/regress/regress-crbug-137689.js +47 -0
  629. data/vendor/v8/test/mjsunit/regress/regress-crbug-138887.js +48 -0
  630. data/vendor/v8/test/mjsunit/regress/regress-crbug-140083.js +44 -0
  631. data/vendor/v8/test/mjsunit/regress/regress-crbug-142087.js +38 -0
  632. data/vendor/v8/test/mjsunit/regress/regress-crbug-142218.js +44 -0
  633. data/vendor/v8/test/mjsunit/regress/regress-crbug-145961.js +39 -0
  634. data/vendor/v8/test/mjsunit/regress/regress-crbug-146910.js +33 -0
  635. data/vendor/v8/test/mjsunit/regress/regress-crbug-147475.js +48 -0
  636. data/vendor/v8/test/mjsunit/regress/regress-crbug-148376.js +35 -0
  637. data/vendor/v8/test/mjsunit/regress/regress-crbug-150545.js +53 -0
  638. data/vendor/v8/test/mjsunit/regress/regress-crbug-150729.js +39 -0
  639. data/vendor/v8/test/mjsunit/regress/regress-crbug-157019.js +54 -0
  640. data/vendor/v8/test/mjsunit/regress/regress-crbug-157520.js +38 -0
  641. data/vendor/v8/test/mjsunit/regress/regress-crbug-158185.js +39 -0
  642. data/vendor/v8/test/mjsunit/regress/regress-crbug-160010.js +35 -0
  643. data/vendor/v8/test/mjsunit/regress/regress-crbug-162085.js +71 -0
  644. data/vendor/v8/test/mjsunit/regress/regress-crbug-168545.js +34 -0
  645. data/vendor/v8/test/mjsunit/regress/regress-crbug-170856.js +33 -0
  646. data/vendor/v8/test/mjsunit/regress/regress-crbug-172345.js +34 -0
  647. data/vendor/v8/test/mjsunit/regress/regress-crbug-173974.js +36 -0
  648. data/vendor/v8/test/mjsunit/regress/regress-crbug-18639.js +9 -5
  649. data/vendor/v8/test/mjsunit/regress/regress-debug-code-recompilation.js +2 -1
  650. data/vendor/v8/test/mjsunit/regress/regress-deep-proto.js +45 -0
  651. data/vendor/v8/test/mjsunit/regress/regress-delete-empty-double.js +40 -0
  652. data/vendor/v8/test/mjsunit/regress/regress-iteration-order.js +42 -0
  653. data/vendor/v8/test/mjsunit/regress/regress-json-stringify-gc.js +41 -0
  654. data/vendor/v8/test/mjsunit/regress/regress-latin-1.js +78 -0
  655. data/vendor/v8/test/mjsunit/regress/regress-load-elements.js +49 -0
  656. data/vendor/v8/test/mjsunit/regress/regress-observe-empty-double-array.js +38 -0
  657. data/vendor/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js +37 -0
  658. data/vendor/v8/test/mjsunit/shift-for-integer-div.js +59 -0
  659. data/vendor/v8/test/mjsunit/stack-traces-gc.js +119 -0
  660. data/vendor/v8/test/mjsunit/stack-traces-overflow.js +122 -0
  661. data/vendor/v8/test/mjsunit/stack-traces.js +39 -1
  662. data/vendor/v8/test/mjsunit/str-to-num.js +7 -2
  663. data/vendor/v8/test/mjsunit/strict-mode.js +36 -11
  664. data/vendor/v8/test/mjsunit/string-charcodeat.js +3 -0
  665. data/vendor/v8/test/mjsunit/string-natives.js +72 -0
  666. data/vendor/v8/test/mjsunit/string-split.js +17 -0
  667. data/vendor/v8/test/mjsunit/testcfg.py +76 -6
  668. data/vendor/v8/test/mjsunit/tools/tickprocessor.js +4 -1
  669. data/vendor/v8/test/mjsunit/try-finally-continue.js +72 -0
  670. data/vendor/v8/test/mjsunit/typed-array-slice.js +61 -0
  671. data/vendor/v8/test/mjsunit/unbox-double-arrays.js +2 -0
  672. data/vendor/v8/test/mjsunit/uri.js +12 -0
  673. data/vendor/v8/test/mjsunit/with-readonly.js +4 -2
  674. data/vendor/v8/test/mozilla/mozilla.status +19 -113
  675. data/vendor/v8/test/mozilla/testcfg.py +122 -3
  676. data/vendor/v8/test/preparser/preparser.status +5 -0
  677. data/vendor/v8/test/preparser/strict-identifiers.pyt +1 -1
  678. data/vendor/v8/test/preparser/testcfg.py +101 -5
  679. data/vendor/v8/test/sputnik/sputnik.status +1 -1
  680. data/vendor/v8/test/sputnik/testcfg.py +5 -0
  681. data/vendor/v8/test/test262/README +2 -2
  682. data/vendor/v8/test/test262/test262.status +13 -36
  683. data/vendor/v8/test/test262/testcfg.py +102 -8
  684. data/vendor/v8/tools/android-build.sh +0 -0
  685. data/vendor/v8/tools/android-ll-prof.sh +69 -0
  686. data/vendor/v8/tools/android-run.py +109 -0
  687. data/vendor/v8/tools/android-sync.sh +105 -0
  688. data/vendor/v8/tools/bash-completion.sh +0 -0
  689. data/vendor/v8/tools/check-static-initializers.sh +0 -0
  690. data/vendor/v8/tools/common-includes.sh +15 -22
  691. data/vendor/v8/tools/disasm.py +4 -4
  692. data/vendor/v8/tools/fuzz-harness.sh +0 -0
  693. data/vendor/v8/tools/gen-postmortem-metadata.py +6 -8
  694. data/vendor/v8/tools/grokdump.py +404 -129
  695. data/vendor/v8/tools/gyp/v8.gyp +105 -43
  696. data/vendor/v8/tools/linux-tick-processor +5 -5
  697. data/vendor/v8/tools/ll_prof.py +75 -15
  698. data/vendor/v8/tools/merge-to-branch.sh +2 -2
  699. data/vendor/v8/tools/plot-timer-events +70 -0
  700. data/vendor/v8/tools/plot-timer-events.js +510 -0
  701. data/vendor/v8/tools/presubmit.py +1 -0
  702. data/vendor/v8/tools/push-to-trunk.sh +14 -4
  703. data/vendor/v8/tools/run-llprof.sh +69 -0
  704. data/vendor/v8/tools/run-tests.py +372 -0
  705. data/vendor/v8/tools/run-valgrind.py +1 -1
  706. data/vendor/v8/tools/status-file-converter.py +39 -0
  707. data/vendor/v8/tools/test-server.py +224 -0
  708. data/vendor/v8/tools/test-wrapper-gypbuild.py +13 -16
  709. data/vendor/v8/tools/test.py +10 -19
  710. data/vendor/v8/tools/testrunner/README +174 -0
  711. data/vendor/v8/tools/testrunner/__init__.py +26 -0
  712. data/vendor/v8/tools/testrunner/local/__init__.py +26 -0
  713. data/vendor/v8/tools/testrunner/local/commands.py +153 -0
  714. data/vendor/v8/tools/testrunner/local/execution.py +182 -0
  715. data/vendor/v8/tools/testrunner/local/old_statusfile.py +460 -0
  716. data/vendor/v8/tools/testrunner/local/progress.py +238 -0
  717. data/vendor/v8/tools/testrunner/local/statusfile.py +145 -0
  718. data/vendor/v8/tools/testrunner/local/testsuite.py +187 -0
  719. data/vendor/v8/tools/testrunner/local/utils.py +108 -0
  720. data/vendor/v8/tools/testrunner/local/verbose.py +99 -0
  721. data/vendor/v8/tools/testrunner/network/__init__.py +26 -0
  722. data/vendor/v8/tools/testrunner/network/distro.py +90 -0
  723. data/vendor/v8/tools/testrunner/network/endpoint.py +124 -0
  724. data/vendor/v8/tools/testrunner/network/network_execution.py +253 -0
  725. data/vendor/v8/tools/testrunner/network/perfdata.py +120 -0
  726. data/vendor/v8/tools/testrunner/objects/__init__.py +26 -0
  727. data/vendor/v8/tools/testrunner/objects/context.py +50 -0
  728. data/vendor/v8/tools/testrunner/objects/output.py +60 -0
  729. data/vendor/v8/tools/testrunner/objects/peer.py +80 -0
  730. data/vendor/v8/tools/testrunner/objects/testcase.py +83 -0
  731. data/vendor/v8/tools/testrunner/objects/workpacket.py +90 -0
  732. data/vendor/v8/tools/testrunner/server/__init__.py +26 -0
  733. data/vendor/v8/tools/testrunner/server/compression.py +111 -0
  734. data/vendor/v8/tools/testrunner/server/constants.py +51 -0
  735. data/vendor/v8/tools/testrunner/server/daemon.py +147 -0
  736. data/vendor/v8/tools/testrunner/server/local_handler.py +119 -0
  737. data/vendor/v8/tools/testrunner/server/main.py +245 -0
  738. data/vendor/v8/tools/testrunner/server/presence_handler.py +120 -0
  739. data/vendor/v8/tools/testrunner/server/signatures.py +63 -0
  740. data/vendor/v8/tools/testrunner/server/status_handler.py +112 -0
  741. data/vendor/v8/tools/testrunner/server/work_handler.py +150 -0
  742. data/vendor/v8/tools/tick-processor.html +168 -0
  743. data/vendor/v8/tools/tickprocessor-driver.js +5 -3
  744. data/vendor/v8/tools/tickprocessor.js +58 -15
  745. metadata +534 -30
  746. data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +0 -11
  747. data/patches/do-not-imply-vfp3-and-armv7.patch +0 -44
  748. data/patches/fPIC-on-x64.patch +0 -14
  749. data/vendor/v8/src/liveobjectlist-inl.h +0 -126
  750. data/vendor/v8/src/liveobjectlist.cc +0 -2631
  751. data/vendor/v8/src/liveobjectlist.h +0 -319
  752. data/vendor/v8/test/mjsunit/mul-exhaustive.js +0 -4629
  753. data/vendor/v8/test/mjsunit/numops-fuzz.js +0 -4609
  754. data/vendor/v8/test/mjsunit/regress/regress-1969.js +0 -5045
@@ -67,8 +67,6 @@ bool LCodeGen::GenerateCode() {
67
67
  status_ = GENERATING;
68
68
  CpuFeatures::Scope scope(FPU);
69
69
 
70
- CodeStub::GenerateFPStubs();
71
-
72
70
  // Open a frame scope to indicate that there is a frame on the stack. The
73
71
  // NONE indicates that the scope shouldn't actually generate code to set up
74
72
  // the frame (that is done in GeneratePrologue).
@@ -77,6 +75,7 @@ bool LCodeGen::GenerateCode() {
77
75
  return GeneratePrologue() &&
78
76
  GenerateBody() &&
79
77
  GenerateDeferredCode() &&
78
+ GenerateDeoptJumpTable() &&
80
79
  GenerateSafepointTable();
81
80
  }
82
81
 
@@ -89,17 +88,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
89
88
  }
90
89
 
91
90
 
92
- void LCodeGen::Abort(const char* format, ...) {
93
- if (FLAG_trace_bailout) {
94
- SmartArrayPointer<char> name(
95
- info()->shared_info()->DebugName()->ToCString());
96
- PrintF("Aborting LCodeGen in @\"%s\": ", *name);
97
- va_list arguments;
98
- va_start(arguments, format);
99
- OS::VPrint(format, arguments);
100
- va_end(arguments);
101
- PrintF("\n");
102
- }
91
+ void LChunkBuilder::Abort(const char* reason) {
92
+ info()->set_bailout_reason(reason);
103
93
  status_ = ABORTED;
104
94
  }
105
95
 
@@ -125,34 +115,48 @@ void LCodeGen::Comment(const char* format, ...) {
125
115
  bool LCodeGen::GeneratePrologue() {
126
116
  ASSERT(is_generating());
127
117
 
118
+ if (info()->IsOptimizing()) {
119
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
120
+
128
121
  #ifdef DEBUG
129
- if (strlen(FLAG_stop_at) > 0 &&
130
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
131
- __ stop("stop_at");
132
- }
122
+ if (strlen(FLAG_stop_at) > 0 &&
123
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
124
+ __ stop("stop_at");
125
+ }
133
126
  #endif
134
127
 
135
- // a1: Callee's JS function.
136
- // cp: Callee's context.
137
- // fp: Caller's frame pointer.
138
- // lr: Caller's pc.
139
-
140
- // Strict mode functions and builtins need to replace the receiver
141
- // with undefined when called as functions (without an explicit
142
- // receiver object). r5 is zero for method calls and non-zero for
143
- // function calls.
144
- if (!info_->is_classic_mode() || info_->is_native()) {
145
- Label ok;
146
- __ Branch(&ok, eq, t1, Operand(zero_reg));
147
-
148
- int receiver_offset = scope()->num_parameters() * kPointerSize;
149
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
150
- __ sw(a2, MemOperand(sp, receiver_offset));
151
- __ bind(&ok);
128
+ // a1: Callee's JS function.
129
+ // cp: Callee's context.
130
+ // fp: Caller's frame pointer.
131
+ // lr: Caller's pc.
132
+
133
+ // Strict mode functions and builtins need to replace the receiver
134
+ // with undefined when called as functions (without an explicit
135
+ // receiver object). r5 is zero for method calls and non-zero for
136
+ // function calls.
137
+ if (!info_->is_classic_mode() || info_->is_native()) {
138
+ Label ok;
139
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
140
+
141
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
142
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
143
+ __ sw(a2, MemOperand(sp, receiver_offset));
144
+ __ bind(&ok);
145
+ }
152
146
  }
153
147
 
154
- __ Push(ra, fp, cp, a1);
155
- __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
148
+ info()->set_prologue_offset(masm_->pc_offset());
149
+ if (NeedsEagerFrame()) {
150
+ // The following three instructions must remain together and unmodified for
151
+ // code aging to work properly.
152
+ __ Push(ra, fp, cp, a1);
153
+ // Add unused load of ip to ensure prologue sequence is identical for
154
+ // full-codegen and lithium-codegen.
155
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
156
+ // Adj. FP to point to saved FP.
157
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
158
+ frame_is_built_ = true;
159
+ }
156
160
 
157
161
  // Reserve space for the stack slots needed by the code.
158
162
  int slots = GetStackSlotCount();
@@ -171,7 +175,7 @@ bool LCodeGen::GeneratePrologue() {
171
175
  }
172
176
 
173
177
  // Possibly allocate a local context.
174
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
178
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175
179
  if (heap_slots > 0) {
176
180
  Comment(";;; Allocate local context");
177
181
  // Argument to NewContext is the function, which is in a1.
@@ -207,7 +211,7 @@ bool LCodeGen::GeneratePrologue() {
207
211
  }
208
212
 
209
213
  // Trace the call.
210
- if (FLAG_trace) {
214
+ if (FLAG_trace && info()->IsOptimizing()) {
211
215
  __ CallRuntime(Runtime::kTraceEnter, 0);
212
216
  }
213
217
  EnsureSpaceForLazyDeopt();
@@ -228,7 +232,30 @@ bool LCodeGen::GenerateBody() {
228
232
  }
229
233
 
230
234
  if (emit_instructions) {
231
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
235
+ if (FLAG_code_comments) {
236
+ HValue* hydrogen = instr->hydrogen_value();
237
+ if (hydrogen != NULL) {
238
+ if (hydrogen->IsChange()) {
239
+ HValue* changed_value = HChange::cast(hydrogen)->value();
240
+ int use_id = 0;
241
+ const char* use_mnemo = "dead";
242
+ if (hydrogen->UseCount() >= 1) {
243
+ HValue* use_value = hydrogen->uses().value();
244
+ use_id = use_value->id();
245
+ use_mnemo = use_value->Mnemonic();
246
+ }
247
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
248
+ current_instruction_, instr->Mnemonic(),
249
+ changed_value->id(), changed_value->Mnemonic(),
250
+ use_id, use_mnemo);
251
+ } else {
252
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
253
+ instr->Mnemonic(), hydrogen->id());
254
+ }
255
+ } else {
256
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
257
+ }
258
+ }
232
259
  instr->CompileToNative(this);
233
260
  }
234
261
  }
@@ -242,10 +269,31 @@ bool LCodeGen::GenerateDeferredCode() {
242
269
  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
243
270
  LDeferredCode* code = deferred_[i];
244
271
  __ bind(code->entry());
272
+ if (NeedsDeferredFrame()) {
273
+ Comment(";;; Deferred build frame",
274
+ code->instruction_index(),
275
+ code->instr()->Mnemonic());
276
+ ASSERT(!frame_is_built_);
277
+ ASSERT(info()->IsStub());
278
+ frame_is_built_ = true;
279
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
280
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
281
+ __ push(scratch0());
282
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
283
+ }
245
284
  Comment(";;; Deferred code @%d: %s.",
246
285
  code->instruction_index(),
247
286
  code->instr()->Mnemonic());
248
287
  code->Generate();
288
+ if (NeedsDeferredFrame()) {
289
+ Comment(";;; Deferred destroy frame",
290
+ code->instruction_index(),
291
+ code->instr()->Mnemonic());
292
+ ASSERT(frame_is_built_);
293
+ __ pop(at);
294
+ __ MultiPop(cp.bit() | fp.bit() | ra.bit());
295
+ frame_is_built_ = false;
296
+ }
249
297
  __ jmp(code->exit());
250
298
  }
251
299
  }
@@ -257,10 +305,81 @@ bool LCodeGen::GenerateDeferredCode() {
257
305
 
258
306
 
259
307
  bool LCodeGen::GenerateDeoptJumpTable() {
260
- // TODO(plind): not clear that this will have advantage for MIPS.
261
- // Skipping it for now. Raised issue #100 for this.
262
- Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
263
- return false;
308
+ // Check that the jump table is accessible from everywhere in the function
309
+ // code, i.e. that offsets to the table can be encoded in the 16bit signed
310
+ // immediate of a branch instruction.
311
+ // To simplify we consider the code size from the first instruction to the
312
+ // end of the jump table.
313
+ if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
314
+ deopt_jump_table_.length() * 12)) {
315
+ Abort("Generated code is too large");
316
+ }
317
+
318
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
319
+ __ RecordComment("[ Deoptimization jump table");
320
+ Label table_start;
321
+ __ bind(&table_start);
322
+ Label needs_frame_not_call;
323
+ Label needs_frame_is_call;
324
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
325
+ __ bind(&deopt_jump_table_[i].label);
326
+ Address entry = deopt_jump_table_[i].address;
327
+ bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
328
+ Deoptimizer::BailoutType type =
329
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
330
+ int id = Deoptimizer::GetDeoptimizationId(entry, type);
331
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
332
+ Comment(";;; jump table entry %d.", i);
333
+ } else {
334
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
335
+ }
336
+ __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
337
+ if (deopt_jump_table_[i].needs_frame) {
338
+ if (is_lazy_deopt) {
339
+ if (needs_frame_is_call.is_bound()) {
340
+ __ Branch(&needs_frame_is_call);
341
+ } else {
342
+ __ bind(&needs_frame_is_call);
343
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
344
+ // This variant of deopt can only be used with stubs. Since we don't
345
+ // have a function pointer to install in the stack frame that we're
346
+ // building, install a special marker there instead.
347
+ ASSERT(info()->IsStub());
348
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
349
+ __ push(scratch0());
350
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
351
+ __ Call(t9);
352
+ }
353
+ } else {
354
+ if (needs_frame_not_call.is_bound()) {
355
+ __ Branch(&needs_frame_not_call);
356
+ } else {
357
+ __ bind(&needs_frame_not_call);
358
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
359
+ // This variant of deopt can only be used with stubs. Since we don't
360
+ // have a function pointer to install in the stack frame that we're
361
+ // building, install a special marker there instead.
362
+ ASSERT(info()->IsStub());
363
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
364
+ __ push(scratch0());
365
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
366
+ __ Jump(t9);
367
+ }
368
+ }
369
+ } else {
370
+ if (is_lazy_deopt) {
371
+ __ Call(t9);
372
+ } else {
373
+ __ Jump(t9);
374
+ }
375
+ }
376
+ }
377
+ __ RecordComment("]");
378
+
379
+ // The deoptimization jump table is the last part of the instruction
380
+ // sequence. Mark the generated code as done unless we bailed out.
381
+ if (!is_aborted()) status_ = DONE;
382
+ return !is_aborted();
264
383
  }
265
384
 
266
385
 
@@ -292,7 +411,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
292
411
  return ToRegister(op->index());
293
412
  } else if (op->IsConstantOperand()) {
294
413
  LConstantOperand* const_op = LConstantOperand::cast(op);
295
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
414
+ HConstant* constant = chunk_->LookupConstant(const_op);
415
+ Handle<Object> literal = constant->handle();
296
416
  Representation r = chunk_->LookupLiteralRepresentation(const_op);
297
417
  if (r.IsInteger32()) {
298
418
  ASSERT(literal->IsNumber());
@@ -330,7 +450,8 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
330
450
  return ToDoubleRegister(op->index());
331
451
  } else if (op->IsConstantOperand()) {
332
452
  LConstantOperand* const_op = LConstantOperand::cast(op);
333
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
453
+ HConstant* constant = chunk_->LookupConstant(const_op);
454
+ Handle<Object> literal = constant->handle();
334
455
  Representation r = chunk_->LookupLiteralRepresentation(const_op);
335
456
  if (r.IsInteger32()) {
336
457
  ASSERT(literal->IsNumber());
@@ -354,9 +475,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
354
475
 
355
476
 
356
477
  Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
357
- Handle<Object> literal = chunk_->LookupLiteral(op);
478
+ HConstant* constant = chunk_->LookupConstant(op);
358
479
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
359
- return literal;
480
+ return constant->handle();
360
481
  }
361
482
 
362
483
 
@@ -366,33 +487,31 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
366
487
 
367
488
 
368
489
  int LCodeGen::ToInteger32(LConstantOperand* op) const {
369
- Handle<Object> value = chunk_->LookupLiteral(op);
370
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
371
- ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
372
- value->Number());
373
- return static_cast<int32_t>(value->Number());
490
+ HConstant* constant = chunk_->LookupConstant(op);
491
+ return constant->Integer32Value();
374
492
  }
375
493
 
376
494
 
377
495
  double LCodeGen::ToDouble(LConstantOperand* op) const {
378
- Handle<Object> value = chunk_->LookupLiteral(op);
379
- return value->Number();
496
+ HConstant* constant = chunk_->LookupConstant(op);
497
+ ASSERT(constant->HasDoubleValue());
498
+ return constant->DoubleValue();
380
499
  }
381
500
 
382
501
 
383
502
  Operand LCodeGen::ToOperand(LOperand* op) {
384
503
  if (op->IsConstantOperand()) {
385
504
  LConstantOperand* const_op = LConstantOperand::cast(op);
386
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
505
+ HConstant* constant = chunk()->LookupConstant(const_op);
387
506
  Representation r = chunk_->LookupLiteralRepresentation(const_op);
388
507
  if (r.IsInteger32()) {
389
- ASSERT(literal->IsNumber());
390
- return Operand(static_cast<int32_t>(literal->Number()));
508
+ ASSERT(constant->HasInteger32Value());
509
+ return Operand(constant->Integer32Value());
391
510
  } else if (r.IsDouble()) {
392
511
  Abort("ToOperand Unsupported double immediate.");
393
512
  }
394
513
  ASSERT(r.IsTagged());
395
- return Operand(literal);
514
+ return Operand(constant->handle());
396
515
  } else if (op->IsRegister()) {
397
516
  return Operand(ToRegister(op));
398
517
  } else if (op->IsDoubleRegister()) {
@@ -437,7 +556,9 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
437
556
 
438
557
 
439
558
  void LCodeGen::WriteTranslation(LEnvironment* environment,
440
- Translation* translation) {
559
+ Translation* translation,
560
+ int* arguments_index,
561
+ int* arguments_count) {
441
562
  if (environment == NULL) return;
442
563
 
443
564
  // The translation includes one command per value in the environment.
@@ -445,8 +566,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
445
566
  // The output frame height does not include the parameters.
446
567
  int height = translation_size - environment->parameter_count();
447
568
 
448
- WriteTranslation(environment->outer(), translation);
449
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
569
+ // Function parameters are arguments to the outermost environment. The
570
+ // arguments index points to the first element of a sequence of tagged
571
+ // values on the stack that represent the arguments. This needs to be
572
+ // kept in sync with the LArgumentsElements implementation.
573
+ *arguments_index = -environment->parameter_count();
574
+ *arguments_count = environment->parameter_count();
575
+
576
+ WriteTranslation(environment->outer(),
577
+ translation,
578
+ arguments_index,
579
+ arguments_count);
580
+ bool has_closure_id = !info()->closure().is_null() &&
581
+ *info()->closure() != *environment->closure();
582
+ int closure_id = has_closure_id
583
+ ? DefineDeoptimizationLiteral(environment->closure())
584
+ : Translation::kSelfLiteralId;
585
+
450
586
  switch (environment->frame_type()) {
451
587
  case JS_FUNCTION:
452
588
  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -454,12 +590,34 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
454
590
  case JS_CONSTRUCT:
455
591
  translation->BeginConstructStubFrame(closure_id, translation_size);
456
592
  break;
593
+ case JS_GETTER:
594
+ ASSERT(translation_size == 1);
595
+ ASSERT(height == 0);
596
+ translation->BeginGetterStubFrame(closure_id);
597
+ break;
598
+ case JS_SETTER:
599
+ ASSERT(translation_size == 2);
600
+ ASSERT(height == 0);
601
+ translation->BeginSetterStubFrame(closure_id);
602
+ break;
603
+ case STUB:
604
+ translation->BeginCompiledStubFrame();
605
+ break;
457
606
  case ARGUMENTS_ADAPTOR:
458
607
  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
459
608
  break;
460
- default:
461
- UNREACHABLE();
462
609
  }
610
+
611
+ // Inlined frames which push their arguments cause the index to be
612
+ // bumped and a new stack area to be used for materialization.
613
+ if (environment->entry() != NULL &&
614
+ environment->entry()->arguments_pushed()) {
615
+ *arguments_index = *arguments_index < 0
616
+ ? GetStackSlotCount()
617
+ : *arguments_index + *arguments_count;
618
+ *arguments_count = environment->entry()->arguments_count() + 1;
619
+ }
620
+
463
621
  for (int i = 0; i < translation_size; ++i) {
464
622
  LOperand* value = environment->values()->at(i);
465
623
  // spilled_registers_ and spilled_double_registers_ are either
@@ -470,7 +628,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
470
628
  translation->MarkDuplicate();
471
629
  AddToTranslation(translation,
472
630
  environment->spilled_registers()[value->index()],
473
- environment->HasTaggedValueAt(i));
631
+ environment->HasTaggedValueAt(i),
632
+ environment->HasUint32ValueAt(i),
633
+ *arguments_index,
634
+ *arguments_count);
474
635
  } else if (
475
636
  value->IsDoubleRegister() &&
476
637
  environment->spilled_double_registers()[value->index()] != NULL) {
@@ -478,26 +639,39 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
478
639
  AddToTranslation(
479
640
  translation,
480
641
  environment->spilled_double_registers()[value->index()],
481
- false);
642
+ false,
643
+ false,
644
+ *arguments_index,
645
+ *arguments_count);
482
646
  }
483
647
  }
484
648
 
485
- AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
649
+ AddToTranslation(translation,
650
+ value,
651
+ environment->HasTaggedValueAt(i),
652
+ environment->HasUint32ValueAt(i),
653
+ *arguments_index,
654
+ *arguments_count);
486
655
  }
487
656
  }
488
657
 
489
658
 
490
659
  void LCodeGen::AddToTranslation(Translation* translation,
491
660
  LOperand* op,
492
- bool is_tagged) {
661
+ bool is_tagged,
662
+ bool is_uint32,
663
+ int arguments_index,
664
+ int arguments_count) {
493
665
  if (op == NULL) {
494
666
  // TODO(twuerthinger): Introduce marker operands to indicate that this value
495
667
  // is not present and must be reconstructed from the deoptimizer. Currently
496
668
  // this is only used for the arguments object.
497
- translation->StoreArgumentsObject();
669
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
498
670
  } else if (op->IsStackSlot()) {
499
671
  if (is_tagged) {
500
672
  translation->StoreStackSlot(op->index());
673
+ } else if (is_uint32) {
674
+ translation->StoreUint32StackSlot(op->index());
501
675
  } else {
502
676
  translation->StoreInt32StackSlot(op->index());
503
677
  }
@@ -511,6 +685,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
511
685
  Register reg = ToRegister(op);
512
686
  if (is_tagged) {
513
687
  translation->StoreRegister(reg);
688
+ } else if (is_uint32) {
689
+ translation->StoreUint32Register(reg);
514
690
  } else {
515
691
  translation->StoreInt32Register(reg);
516
692
  }
@@ -518,8 +694,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
518
694
  DoubleRegister reg = ToDoubleRegister(op);
519
695
  translation->StoreDoubleRegister(reg);
520
696
  } else if (op->IsConstantOperand()) {
521
- Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
522
- int src_index = DefineDeoptimizationLiteral(literal);
697
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
698
+ int src_index = DefineDeoptimizationLiteral(constant->handle());
523
699
  translation->StoreLiteral(src_index);
524
700
  } else {
525
701
  UNREACHABLE();
@@ -586,20 +762,22 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
586
762
 
587
763
  int frame_count = 0;
588
764
  int jsframe_count = 0;
765
+ int args_index = 0;
766
+ int args_count = 0;
589
767
  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
590
768
  ++frame_count;
591
769
  if (e->frame_type() == JS_FUNCTION) {
592
770
  ++jsframe_count;
593
771
  }
594
772
  }
595
- Translation translation(&translations_, frame_count, jsframe_count);
596
- WriteTranslation(environment, &translation);
773
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
774
+ WriteTranslation(environment, &translation, &args_index, &args_count);
597
775
  int deoptimization_index = deoptimizations_.length();
598
776
  int pc_offset = masm()->pc_offset();
599
777
  environment->Register(deoptimization_index,
600
778
  translation.index(),
601
779
  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
602
- deoptimizations_.Add(environment);
780
+ deoptimizations_.Add(environment, zone());
603
781
  }
604
782
  }
605
783
 
@@ -611,7 +789,11 @@ void LCodeGen::DeoptimizeIf(Condition cc,
611
789
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
612
790
  ASSERT(environment->HasBeenRegistered());
613
791
  int id = environment->deoptimization_index();
614
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
792
+
793
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
794
+ ? Deoptimizer::LAZY
795
+ : Deoptimizer::EAGER;
796
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
615
797
  if (entry == NULL) {
616
798
  Abort("bailout was not prepared");
617
799
  return;
@@ -619,8 +801,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
619
801
 
620
802
  ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
621
803
 
622
- if (FLAG_deopt_every_n_times == 1 &&
623
- info_->shared_info()->opt_count() == id) {
804
+ if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
624
805
  __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
625
806
  return;
626
807
  }
@@ -634,9 +815,22 @@ void LCodeGen::DeoptimizeIf(Condition cc,
634
815
  __ bind(&skip);
635
816
  }
636
817
 
637
- // TODO(plind): The Arm port is a little different here, due to their
638
- // DeOpt jump table, which is not used for Mips yet.
639
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
818
+ bool needs_lazy_deopt = info()->IsStub();
819
+ ASSERT(info()->IsStub() || frame_is_built_);
820
+ if (cc == al && !needs_lazy_deopt) {
821
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
822
+ } else {
823
+ // We often have several deopts to the same entry, reuse the last
824
+ // jump entry if this is the case.
825
+ if (deopt_jump_table_.is_empty() ||
826
+ (deopt_jump_table_.last().address != entry) ||
827
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
828
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
829
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
830
+ deopt_jump_table_.Add(table_entry, zone());
831
+ }
832
+ __ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
833
+ }
640
834
  }
641
835
 
642
836
 
@@ -657,13 +851,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
657
851
  }
658
852
  data->SetLiteralArray(*literals);
659
853
 
660
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
854
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
661
855
  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
662
856
 
663
857
  // Populate the deoptimization entries.
664
858
  for (int i = 0; i < length; i++) {
665
859
  LEnvironment* env = deoptimizations_[i];
666
- data->SetAstId(i, Smi::FromInt(env->ast_id()));
860
+ data->SetAstId(i, env->ast_id());
667
861
  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
668
862
  data->SetArgumentsStackHeight(i,
669
863
  Smi::FromInt(env->arguments_stack_height()));
@@ -678,7 +872,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
678
872
  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
679
873
  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
680
874
  }
681
- deoptimization_literals_.Add(literal);
875
+ deoptimization_literals_.Add(literal, zone());
682
876
  return result;
683
877
  }
684
878
 
@@ -724,14 +918,14 @@ void LCodeGen::RecordSafepoint(
724
918
  for (int i = 0; i < operands->length(); i++) {
725
919
  LOperand* pointer = operands->at(i);
726
920
  if (pointer->IsStackSlot()) {
727
- safepoint.DefinePointerSlot(pointer->index());
921
+ safepoint.DefinePointerSlot(pointer->index(), zone());
728
922
  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
729
- safepoint.DefinePointerRegister(ToRegister(pointer));
923
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
730
924
  }
731
925
  }
732
926
  if (kind & Safepoint::kWithRegisters) {
733
927
  // Register cp always contains a pointer to the context.
734
- safepoint.DefinePointerRegister(cp);
928
+ safepoint.DefinePointerRegister(cp, zone());
735
929
  }
736
930
  }
737
931
 
@@ -743,7 +937,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
743
937
 
744
938
 
745
939
  void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
746
- LPointerMap empty_pointers(RelocInfo::kNoPosition);
940
+ LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
747
941
  RecordSafepoint(&empty_pointers, deopt_mode);
748
942
  }
749
943
 
@@ -862,7 +1056,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
862
1056
 
863
1057
  void LCodeGen::DoModI(LModI* instr) {
864
1058
  Register scratch = scratch0();
865
- const Register left = ToRegister(instr->InputAt(0));
1059
+ const Register left = ToRegister(instr->left());
866
1060
  const Register result = ToRegister(instr->result());
867
1061
 
868
1062
  Label done;
@@ -890,7 +1084,7 @@ void LCodeGen::DoModI(LModI* instr) {
890
1084
  __ And(result, scratch, p2constant - 1);
891
1085
  } else {
892
1086
  // div runs in the background while we check for special cases.
893
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
1087
+ Register right = EmitLoadRegister(instr->right(), scratch);
894
1088
  __ div(left, right);
895
1089
 
896
1090
  // Check for x % 0.
@@ -898,6 +1092,14 @@ void LCodeGen::DoModI(LModI* instr) {
898
1092
  DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
899
1093
  }
900
1094
 
1095
+ // Check for (kMinInt % -1).
1096
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1097
+ Label left_not_min_int;
1098
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1099
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1100
+ __ bind(&left_not_min_int);
1101
+ }
1102
+
901
1103
  __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
902
1104
  __ mfhi(result);
903
1105
 
@@ -910,8 +1112,8 @@ void LCodeGen::DoModI(LModI* instr) {
910
1112
 
911
1113
 
912
1114
  void LCodeGen::DoDivI(LDivI* instr) {
913
- const Register left = ToRegister(instr->InputAt(0));
914
- const Register right = ToRegister(instr->InputAt(1));
1115
+ const Register left = ToRegister(instr->left());
1116
+ const Register right = ToRegister(instr->right());
915
1117
  const Register result = ToRegister(instr->result());
916
1118
 
917
1119
  // On MIPS div is asynchronous - it will run in the background while we
@@ -931,7 +1133,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
931
1133
  __ bind(&left_not_zero);
932
1134
  }
933
1135
 
934
- // Check for (-kMinInt / -1).
1136
+ // Check for (kMinInt / -1).
935
1137
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
936
1138
  Label left_not_min_int;
937
1139
  __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
@@ -945,12 +1147,24 @@ void LCodeGen::DoDivI(LDivI* instr) {
945
1147
  }
946
1148
 
947
1149
 
1150
+ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1151
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
1152
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1153
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1154
+
1155
+ // This is computed in-place.
1156
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
1157
+
1158
+ __ madd_d(addend, addend, multiplier, multiplicand);
1159
+ }
1160
+
1161
+
948
1162
  void LCodeGen::DoMulI(LMulI* instr) {
949
1163
  Register scratch = scratch0();
950
1164
  Register result = ToRegister(instr->result());
951
1165
  // Note that result may alias left.
952
- Register left = ToRegister(instr->InputAt(0));
953
- LOperand* right_op = instr->InputAt(1);
1166
+ Register left = ToRegister(instr->left());
1167
+ LOperand* right_op = instr->right();
954
1168
 
955
1169
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
956
1170
  bool bailout_on_minus_zero =
@@ -1020,7 +1234,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
1020
1234
  } else {
1021
1235
  Register right = EmitLoadRegister(right_op, scratch);
1022
1236
  if (bailout_on_minus_zero) {
1023
- __ Or(ToRegister(instr->TempAt(0)), left, right);
1237
+ __ Or(ToRegister(instr->temp()), left, right);
1024
1238
  }
1025
1239
 
1026
1240
  if (can_overflow) {
@@ -1040,7 +1254,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
1040
1254
  __ Branch(&done, ne, result, Operand(zero_reg));
1041
1255
  DeoptimizeIf(lt,
1042
1256
  instr->environment(),
1043
- ToRegister(instr->TempAt(0)),
1257
+ ToRegister(instr->temp()),
1044
1258
  Operand(zero_reg));
1045
1259
  __ bind(&done);
1046
1260
  }
@@ -1049,8 +1263,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
1049
1263
 
1050
1264
 
1051
1265
  void LCodeGen::DoBitI(LBitI* instr) {
1052
- LOperand* left_op = instr->InputAt(0);
1053
- LOperand* right_op = instr->InputAt(1);
1266
+ LOperand* left_op = instr->left();
1267
+ LOperand* right_op = instr->right();
1054
1268
  ASSERT(left_op->IsRegister());
1055
1269
  Register left = ToRegister(left_op);
1056
1270
  Register result = ToRegister(instr->result());
@@ -1083,14 +1297,17 @@ void LCodeGen::DoBitI(LBitI* instr) {
1083
1297
  void LCodeGen::DoShiftI(LShiftI* instr) {
1084
1298
  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1085
1299
  // result may alias either of them.
1086
- LOperand* right_op = instr->InputAt(1);
1087
- Register left = ToRegister(instr->InputAt(0));
1300
+ LOperand* right_op = instr->right();
1301
+ Register left = ToRegister(instr->left());
1088
1302
  Register result = ToRegister(instr->result());
1089
1303
 
1090
1304
  if (right_op->IsRegister()) {
1091
1305
  // No need to mask the right operand on MIPS, it is built into the variable
1092
1306
  // shift instructions.
1093
1307
  switch (instr->op()) {
1308
+ case Token::ROR:
1309
+ __ Ror(result, left, Operand(ToRegister(right_op)));
1310
+ break;
1094
1311
  case Token::SAR:
1095
1312
  __ srav(result, left, ToRegister(right_op));
1096
1313
  break;
@@ -1112,6 +1329,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
1112
1329
  int value = ToInteger32(LConstantOperand::cast(right_op));
1113
1330
  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1114
1331
  switch (instr->op()) {
1332
+ case Token::ROR:
1333
+ if (shift_count != 0) {
1334
+ __ Ror(result, left, Operand(shift_count));
1335
+ } else {
1336
+ __ Move(result, left);
1337
+ }
1338
+ break;
1115
1339
  case Token::SAR:
1116
1340
  if (shift_count != 0) {
1117
1341
  __ sra(result, left, shift_count);
@@ -1146,8 +1370,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
1146
1370
 
1147
1371
 
1148
1372
  void LCodeGen::DoSubI(LSubI* instr) {
1149
- LOperand* left = instr->InputAt(0);
1150
- LOperand* right = instr->InputAt(1);
1373
+ LOperand* left = instr->left();
1374
+ LOperand* right = instr->right();
1151
1375
  LOperand* result = instr->result();
1152
1376
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1153
1377
 
@@ -1193,6 +1417,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
1193
1417
  void LCodeGen::DoConstantD(LConstantD* instr) {
1194
1418
  ASSERT(instr->result()->IsDoubleRegister());
1195
1419
  DoubleRegister result = ToDoubleRegister(instr->result());
1420
+ CpuFeatures::Scope scope(FPU);
1196
1421
  double v = instr->value();
1197
1422
  __ Move(result, v);
1198
1423
  }
@@ -1211,21 +1436,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
1211
1436
 
1212
1437
  void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1213
1438
  Register result = ToRegister(instr->result());
1214
- Register array = ToRegister(instr->InputAt(0));
1439
+ Register array = ToRegister(instr->value());
1215
1440
  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1216
1441
  }
1217
1442
 
1218
1443
 
1219
1444
  void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1220
1445
  Register result = ToRegister(instr->result());
1221
- Register array = ToRegister(instr->InputAt(0));
1446
+ Register array = ToRegister(instr->value());
1222
1447
  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1223
1448
  }
1224
1449
 
1225
1450
 
1451
+ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1452
+ Register result = ToRegister(instr->result());
1453
+ Register map = ToRegister(instr->value());
1454
+ __ EnumLength(result, map);
1455
+ }
1456
+
1457
+
1226
1458
  void LCodeGen::DoElementsKind(LElementsKind* instr) {
1227
1459
  Register result = ToRegister(instr->result());
1228
- Register input = ToRegister(instr->InputAt(0));
1460
+ Register input = ToRegister(instr->value());
1229
1461
 
1230
1462
  // Load map into |result|.
1231
1463
  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -1238,9 +1470,9 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
1238
1470
 
1239
1471
 
1240
1472
  void LCodeGen::DoValueOf(LValueOf* instr) {
1241
- Register input = ToRegister(instr->InputAt(0));
1473
+ Register input = ToRegister(instr->value());
1242
1474
  Register result = ToRegister(instr->result());
1243
- Register map = ToRegister(instr->TempAt(0));
1475
+ Register map = ToRegister(instr->temp());
1244
1476
  Label done;
1245
1477
 
1246
1478
  // If the object is a smi return the object.
@@ -1257,9 +1489,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
1257
1489
 
1258
1490
 
1259
1491
  void LCodeGen::DoDateField(LDateField* instr) {
1260
- Register object = ToRegister(instr->InputAt(0));
1492
+ Register object = ToRegister(instr->date());
1261
1493
  Register result = ToRegister(instr->result());
1262
- Register scratch = ToRegister(instr->TempAt(0));
1494
+ Register scratch = ToRegister(instr->temp());
1263
1495
  Smi* index = instr->index();
1264
1496
  Label runtime, done;
1265
1497
  ASSERT(object.is(a0));
@@ -1267,12 +1499,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
1267
1499
  ASSERT(!scratch.is(scratch0()));
1268
1500
  ASSERT(!scratch.is(object));
1269
1501
 
1270
- #ifdef DEBUG
1271
- __ AbortIfSmi(object);
1502
+ __ And(at, object, Operand(kSmiTagMask));
1503
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1272
1504
  __ GetObjectType(object, scratch, scratch);
1273
- __ Assert(eq, "Trying to get date field from non-date.",
1274
- scratch, Operand(JS_DATE_TYPE));
1275
- #endif
1505
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1276
1506
 
1277
1507
  if (index->value() == 0) {
1278
1508
  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1296,15 +1526,24 @@ void LCodeGen::DoDateField(LDateField* instr) {
1296
1526
  }
1297
1527
 
1298
1528
 
1529
+ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1530
+ SeqStringSetCharGenerator::Generate(masm(),
1531
+ instr->encoding(),
1532
+ ToRegister(instr->string()),
1533
+ ToRegister(instr->index()),
1534
+ ToRegister(instr->value()));
1535
+ }
1536
+
1537
+
1299
1538
  void LCodeGen::DoBitNotI(LBitNotI* instr) {
1300
- Register input = ToRegister(instr->InputAt(0));
1539
+ Register input = ToRegister(instr->value());
1301
1540
  Register result = ToRegister(instr->result());
1302
1541
  __ Nor(result, zero_reg, Operand(input));
1303
1542
  }
1304
1543
 
1305
1544
 
1306
1545
  void LCodeGen::DoThrow(LThrow* instr) {
1307
- Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1546
+ Register input_reg = EmitLoadRegister(instr->value(), at);
1308
1547
  __ push(input_reg);
1309
1548
  CallRuntime(Runtime::kThrow, 1, instr);
1310
1549
 
@@ -1315,8 +1554,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
1315
1554
 
1316
1555
 
1317
1556
  void LCodeGen::DoAddI(LAddI* instr) {
1318
- LOperand* left = instr->InputAt(0);
1319
- LOperand* right = instr->InputAt(1);
1557
+ LOperand* left = instr->left();
1558
+ LOperand* right = instr->right();
1320
1559
  LOperand* result = instr->result();
1321
1560
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1322
1561
 
@@ -1353,9 +1592,73 @@ void LCodeGen::DoAddI(LAddI* instr) {
1353
1592
  }
1354
1593
 
1355
1594
 
1595
+ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1596
+ LOperand* left = instr->left();
1597
+ LOperand* right = instr->right();
1598
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
1599
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1600
+ if (instr->hydrogen()->representation().IsInteger32()) {
1601
+ Register left_reg = ToRegister(left);
1602
+ Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1603
+ ? ToOperand(right)
1604
+ : Operand(EmitLoadRegister(right, at));
1605
+ Register result_reg = ToRegister(instr->result());
1606
+ Label return_right, done;
1607
+ if (!result_reg.is(left_reg)) {
1608
+ __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1609
+ __ mov(result_reg, left_reg);
1610
+ __ Branch(&done);
1611
+ }
1612
+ __ Branch(&done, condition, left_reg, right_op);
1613
+ __ bind(&return_right);
1614
+ __ Addu(result_reg, zero_reg, right_op);
1615
+ __ bind(&done);
1616
+ } else {
1617
+ ASSERT(instr->hydrogen()->representation().IsDouble());
1618
+ CpuFeatures::Scope scope(FPU);
1619
+ FPURegister left_reg = ToDoubleRegister(left);
1620
+ FPURegister right_reg = ToDoubleRegister(right);
1621
+ FPURegister result_reg = ToDoubleRegister(instr->result());
1622
+ Label check_nan_left, check_zero, return_left, return_right, done;
1623
+ __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1624
+ __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1625
+ __ Branch(&return_right);
1626
+
1627
+ __ bind(&check_zero);
1628
+ // left == right != 0.
1629
+ __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1630
+ // At this point, both left and right are either 0 or -0.
1631
+ if (operation == HMathMinMax::kMathMin) {
1632
+ __ neg_d(left_reg, left_reg);
1633
+ __ sub_d(result_reg, left_reg, right_reg);
1634
+ __ neg_d(result_reg, result_reg);
1635
+ } else {
1636
+ __ add_d(result_reg, left_reg, right_reg);
1637
+ }
1638
+ __ Branch(&done);
1639
+
1640
+ __ bind(&check_nan_left);
1641
+ // left == NaN.
1642
+ __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1643
+ __ bind(&return_right);
1644
+ if (!right_reg.is(result_reg)) {
1645
+ __ mov_d(result_reg, right_reg);
1646
+ }
1647
+ __ Branch(&done);
1648
+
1649
+ __ bind(&return_left);
1650
+ if (!left_reg.is(result_reg)) {
1651
+ __ mov_d(result_reg, left_reg);
1652
+ }
1653
+ __ bind(&done);
1654
+ }
1655
+ }
1656
+
1657
+
1356
1658
  void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1357
- DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1358
- DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1659
+ CpuFeatures::Scope scope(FPU);
1660
+ DoubleRegister left = ToDoubleRegister(instr->left());
1661
+ DoubleRegister right = ToDoubleRegister(instr->right());
1359
1662
  DoubleRegister result = ToDoubleRegister(instr->result());
1360
1663
  switch (instr->op()) {
1361
1664
  case Token::ADD:
@@ -1395,8 +1698,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1395
1698
 
1396
1699
 
1397
1700
  void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1398
- ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1399
- ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1701
+ ASSERT(ToRegister(instr->left()).is(a1));
1702
+ ASSERT(ToRegister(instr->right()).is(a0));
1400
1703
  ASSERT(ToRegister(instr->result()).is(v0));
1401
1704
 
1402
1705
  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
@@ -1460,15 +1763,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
1460
1763
 
1461
1764
  Representation r = instr->hydrogen()->value()->representation();
1462
1765
  if (r.IsInteger32()) {
1463
- Register reg = ToRegister(instr->InputAt(0));
1766
+ Register reg = ToRegister(instr->value());
1464
1767
  EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1465
1768
  } else if (r.IsDouble()) {
1466
- DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1769
+ CpuFeatures::Scope scope(FPU);
1770
+ DoubleRegister reg = ToDoubleRegister(instr->value());
1467
1771
  // Test the double value. Zero and NaN are false.
1468
1772
  EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1469
1773
  } else {
1470
1774
  ASSERT(r.IsTagged());
1471
- Register reg = ToRegister(instr->InputAt(0));
1775
+ Register reg = ToRegister(instr->value());
1472
1776
  HType type = instr->hydrogen()->value()->type();
1473
1777
  if (type.IsBoolean()) {
1474
1778
  __ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -1540,6 +1844,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
1540
1844
  }
1541
1845
 
1542
1846
  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1847
+ CpuFeatures::Scope scope(FPU);
1543
1848
  // heap number -> false iff +0, -0, or NaN.
1544
1849
  DoubleRegister dbl_scratch = double_scratch0();
1545
1850
  Label not_heap_number;
@@ -1602,8 +1907,8 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1602
1907
 
1603
1908
 
1604
1909
  void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1605
- LOperand* left = instr->InputAt(0);
1606
- LOperand* right = instr->InputAt(1);
1910
+ LOperand* left = instr->left();
1911
+ LOperand* right = instr->right();
1607
1912
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1608
1913
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1609
1914
 
@@ -1619,6 +1924,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1619
1924
  EmitGoto(next_block);
1620
1925
  } else {
1621
1926
  if (instr->is_double()) {
1927
+ CpuFeatures::Scope scope(FPU);
1622
1928
  // Compare left and right as doubles and load the
1623
1929
  // resulting flags into the normal status register.
1624
1930
  FPURegister left_reg = ToDoubleRegister(left);
@@ -1654,8 +1960,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1654
1960
 
1655
1961
 
1656
1962
  void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1657
- Register left = ToRegister(instr->InputAt(0));
1658
- Register right = ToRegister(instr->InputAt(1));
1963
+ Register left = ToRegister(instr->left());
1964
+ Register right = ToRegister(instr->right());
1659
1965
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1660
1966
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1661
1967
 
@@ -1664,7 +1970,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1664
1970
 
1665
1971
 
1666
1972
  void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1667
- Register left = ToRegister(instr->InputAt(0));
1973
+ Register left = ToRegister(instr->left());
1668
1974
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1669
1975
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1670
1976
 
@@ -1676,7 +1982,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1676
1982
 
1677
1983
  void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1678
1984
  Register scratch = scratch0();
1679
- Register reg = ToRegister(instr->InputAt(0));
1985
+ Register reg = ToRegister(instr->value());
1680
1986
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1681
1987
 
1682
1988
  // If the expression is known to be untagged or a smi, then it's definitely
@@ -1742,8 +2048,8 @@ Condition LCodeGen::EmitIsObject(Register input,
1742
2048
 
1743
2049
 
1744
2050
  void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1745
- Register reg = ToRegister(instr->InputAt(0));
1746
- Register temp1 = ToRegister(instr->TempAt(0));
2051
+ Register reg = ToRegister(instr->value());
2052
+ Register temp1 = ToRegister(instr->temp());
1747
2053
  Register temp2 = scratch0();
1748
2054
 
1749
2055
  int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1770,8 +2076,8 @@ Condition LCodeGen::EmitIsString(Register input,
1770
2076
 
1771
2077
 
1772
2078
  void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1773
- Register reg = ToRegister(instr->InputAt(0));
1774
- Register temp1 = ToRegister(instr->TempAt(0));
2079
+ Register reg = ToRegister(instr->value());
2080
+ Register temp1 = ToRegister(instr->temp());
1775
2081
 
1776
2082
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1777
2083
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1789,15 +2095,15 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1789
2095
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1790
2096
  int false_block = chunk_->LookupDestination(instr->false_block_id());
1791
2097
 
1792
- Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
2098
+ Register input_reg = EmitLoadRegister(instr->value(), at);
1793
2099
  __ And(at, input_reg, kSmiTagMask);
1794
2100
  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1795
2101
  }
1796
2102
 
1797
2103
 
1798
2104
  void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1799
- Register input = ToRegister(instr->InputAt(0));
1800
- Register temp = ToRegister(instr->TempAt(0));
2105
+ Register input = ToRegister(instr->value());
2106
+ Register temp = ToRegister(instr->temp());
1801
2107
 
1802
2108
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1803
2109
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1866,7 +2172,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1866
2172
 
1867
2173
  void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1868
2174
  Register scratch = scratch0();
1869
- Register input = ToRegister(instr->InputAt(0));
2175
+ Register input = ToRegister(instr->value());
1870
2176
 
1871
2177
  int true_block = chunk_->LookupDestination(instr->true_block_id());
1872
2178
  int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1885,12 +2191,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1885
2191
 
1886
2192
 
1887
2193
  void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1888
- Register input = ToRegister(instr->InputAt(0));
2194
+ Register input = ToRegister(instr->value());
1889
2195
  Register result = ToRegister(instr->result());
1890
2196
 
1891
- if (FLAG_debug_code) {
1892
- __ AbortIfNotString(input);
1893
- }
2197
+ __ AssertString(input);
1894
2198
 
1895
2199
  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1896
2200
  __ IndexFromHash(result, result);
@@ -1899,7 +2203,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1899
2203
 
1900
2204
  void LCodeGen::DoHasCachedArrayIndexAndBranch(
1901
2205
  LHasCachedArrayIndexAndBranch* instr) {
1902
- Register input = ToRegister(instr->InputAt(0));
2206
+ Register input = ToRegister(instr->value());
1903
2207
  Register scratch = scratch0();
1904
2208
 
1905
2209
  int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1926,7 +2230,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
1926
2230
 
1927
2231
  __ JumpIfSmi(input, is_false);
1928
2232
 
1929
- if (class_name->IsEqualTo(CStrVector("Function"))) {
2233
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
1930
2234
  // Assuming the following assertions, we can use the same compares to test
1931
2235
  // for both being a function type and being in the object type range.
1932
2236
  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -1955,7 +2259,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
1955
2259
 
1956
2260
  // Objects with a non-function constructor have class 'Object'.
1957
2261
  __ GetObjectType(temp, temp2, temp2);
1958
- if (class_name->IsEqualTo(CStrVector("Object"))) {
2262
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
1959
2263
  __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1960
2264
  } else {
1961
2265
  __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -1979,9 +2283,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
1979
2283
 
1980
2284
 
1981
2285
  void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1982
- Register input = ToRegister(instr->InputAt(0));
2286
+ Register input = ToRegister(instr->value());
1983
2287
  Register temp = scratch0();
1984
- Register temp2 = ToRegister(instr->TempAt(0));
2288
+ Register temp2 = ToRegister(instr->temp());
1985
2289
  Handle<String> class_name = instr->hydrogen()->class_name();
1986
2290
 
1987
2291
  int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1997,8 +2301,8 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1997
2301
 
1998
2302
 
1999
2303
  void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2000
- Register reg = ToRegister(instr->InputAt(0));
2001
- Register temp = ToRegister(instr->TempAt(0));
2304
+ Register reg = ToRegister(instr->value());
2305
+ Register temp = ToRegister(instr->temp());
2002
2306
  int true_block = instr->true_block_id();
2003
2307
  int false_block = instr->false_block_id();
2004
2308
 
@@ -2009,8 +2313,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2009
2313
 
2010
2314
  void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2011
2315
  Label true_label, done;
2012
- ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
2013
- ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
2316
+ ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
2317
+ ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
2014
2318
  Register result = ToRegister(instr->result());
2015
2319
  ASSERT(result.is(v0));
2016
2320
 
@@ -2044,11 +2348,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2044
2348
  };
2045
2349
 
2046
2350
  DeferredInstanceOfKnownGlobal* deferred;
2047
- deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2351
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2048
2352
 
2049
2353
  Label done, false_result;
2050
- Register object = ToRegister(instr->InputAt(0));
2051
- Register temp = ToRegister(instr->TempAt(0));
2354
+ Register object = ToRegister(instr->value());
2355
+ Register temp = ToRegister(instr->temp());
2052
2356
  Register result = ToRegister(instr->result());
2053
2357
 
2054
2358
  ASSERT(object.is(a0));
@@ -2123,7 +2427,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2123
2427
  // Get the temp register reserved by the instruction. This needs to be t0 as
2124
2428
  // its slot of the pushing of safepoint registers is used to communicate the
2125
2429
  // offset to the location of the map check.
2126
- Register temp = ToRegister(instr->TempAt(0));
2430
+ Register temp = ToRegister(instr->temp());
2127
2431
  ASSERT(temp.is(t0));
2128
2432
  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2129
2433
  static const int kAdditionalDelta = 7;
@@ -2168,16 +2472,18 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
2168
2472
 
2169
2473
 
2170
2474
  void LCodeGen::DoReturn(LReturn* instr) {
2171
- if (FLAG_trace) {
2475
+ if (FLAG_trace && info()->IsOptimizing()) {
2172
2476
  // Push the return value on the stack as the parameter.
2173
2477
  // Runtime::TraceExit returns its parameter in v0.
2174
2478
  __ push(v0);
2175
2479
  __ CallRuntime(Runtime::kTraceExit, 1);
2176
2480
  }
2177
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2178
- __ mov(sp, fp);
2179
- __ Pop(ra, fp);
2180
- __ Addu(sp, sp, Operand(sp_delta));
2481
+ if (NeedsEagerFrame()) {
2482
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2483
+ __ mov(sp, fp);
2484
+ __ Pop(ra, fp);
2485
+ __ Addu(sp, sp, Operand(sp_delta));
2486
+ }
2181
2487
  __ Jump(ra);
2182
2488
  }
2183
2489
 
@@ -2218,7 +2524,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2218
2524
  // it as no longer deleted.
2219
2525
  if (instr->hydrogen()->RequiresHoleCheck()) {
2220
2526
  // We use a temp to check the payload.
2221
- Register payload = ToRegister(instr->TempAt(0));
2527
+ Register payload = ToRegister(instr->temp());
2222
2528
  __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2223
2529
  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2224
2530
  DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
@@ -2301,7 +2607,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2301
2607
 
2302
2608
 
2303
2609
  void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2304
- Register object = ToRegister(instr->InputAt(0));
2610
+ Register object = ToRegister(instr->object());
2305
2611
  Register result = ToRegister(instr->result());
2306
2612
  if (instr->hydrogen()->is_in_object()) {
2307
2613
  __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
@@ -2315,12 +2621,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2315
2621
  void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2316
2622
  Register object,
2317
2623
  Handle<Map> type,
2318
- Handle<String> name) {
2624
+ Handle<String> name,
2625
+ LEnvironment* env) {
2319
2626
  LookupResult lookup(isolate());
2320
- type->LookupInDescriptors(NULL, *name, &lookup);
2321
- ASSERT(lookup.IsFound() &&
2322
- (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2323
- if (lookup.type() == FIELD) {
2627
+ type->LookupDescriptor(NULL, *name, &lookup);
2628
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
2629
+ if (lookup.IsField()) {
2324
2630
  int index = lookup.GetLocalFieldIndexFromMap(*type);
2325
2631
  int offset = index * kPointerSize;
2326
2632
  if (index < 0) {
@@ -2332,9 +2638,22 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2332
2638
  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2333
2639
  __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2334
2640
  }
2335
- } else {
2641
+ } else if (lookup.IsConstantFunction()) {
2336
2642
  Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2337
2643
  __ LoadHeapObject(result, function);
2644
+ } else {
2645
+ // Negative lookup.
2646
+ // Check prototypes.
2647
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
2648
+ Heap* heap = type->GetHeap();
2649
+ while (*current != heap->null_value()) {
2650
+ __ LoadHeapObject(result, current);
2651
+ __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
2652
+ DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
2653
+ current =
2654
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
2655
+ }
2656
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2338
2657
  }
2339
2658
  }
2340
2659
 
@@ -2342,7 +2661,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2342
2661
  void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2343
2662
  Register object = ToRegister(instr->object());
2344
2663
  Register result = ToRegister(instr->result());
2345
- Register scratch = scratch0();
2664
+ Register object_map = scratch0();
2346
2665
 
2347
2666
  int map_count = instr->hydrogen()->types()->length();
2348
2667
  bool need_generic = instr->hydrogen()->need_generic();
@@ -2353,17 +2672,25 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2353
2672
  }
2354
2673
  Handle<String> name = instr->hydrogen()->name();
2355
2674
  Label done;
2356
- __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2675
+ __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2357
2676
  for (int i = 0; i < map_count; ++i) {
2358
2677
  bool last = (i == map_count - 1);
2359
2678
  Handle<Map> map = instr->hydrogen()->types()->at(i);
2679
+ Label check_passed;
2680
+ __ CompareMapAndBranch(
2681
+ object_map, map, &check_passed,
2682
+ eq, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
2360
2683
  if (last && !need_generic) {
2361
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2362
- EmitLoadFieldOrConstantFunction(result, object, map, name);
2684
+ DeoptimizeIf(al, instr->environment());
2685
+ __ bind(&check_passed);
2686
+ EmitLoadFieldOrConstantFunction(
2687
+ result, object, map, name, instr->environment());
2363
2688
  } else {
2364
2689
  Label next;
2365
- __ Branch(&next, ne, scratch, Operand(map));
2366
- EmitLoadFieldOrConstantFunction(result, object, map, name);
2690
+ __ Branch(&next);
2691
+ __ bind(&check_passed);
2692
+ EmitLoadFieldOrConstantFunction(
2693
+ result, object, map, name, instr->environment());
2367
2694
  __ Branch(&done);
2368
2695
  __ bind(&next);
2369
2696
  }
@@ -2433,7 +2760,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2433
2760
 
2434
2761
  void LCodeGen::DoLoadElements(LLoadElements* instr) {
2435
2762
  Register result = ToRegister(instr->result());
2436
- Register input = ToRegister(instr->InputAt(0));
2763
+ Register input = ToRegister(instr->object());
2437
2764
  Register scratch = scratch0();
2438
2765
 
2439
2766
  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
@@ -2466,7 +2793,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
2466
2793
  void LCodeGen::DoLoadExternalArrayPointer(
2467
2794
  LLoadExternalArrayPointer* instr) {
2468
2795
  Register to_reg = ToRegister(instr->result());
2469
- Register from_reg = ToRegister(instr->InputAt(0));
2796
+ Register from_reg = ToRegister(instr->object());
2470
2797
  __ lw(to_reg, FieldMemOperand(from_reg,
2471
2798
  ExternalArray::kExternalPointerOffset));
2472
2799
  }
@@ -2477,14 +2804,6 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2477
2804
  Register length = ToRegister(instr->length());
2478
2805
  Register index = ToRegister(instr->index());
2479
2806
  Register result = ToRegister(instr->result());
2480
-
2481
- // Bailout index is not a valid argument index. Use unsigned check to get
2482
- // negative check for free.
2483
-
2484
- // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2485
- // as they do in Arm. It will save us an instruction.
2486
- DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2487
-
2488
2807
  // There are two words between the frame pointer and the last argument.
2489
2808
  // Subtracting from length accounts for one of them, add one more.
2490
2809
  __ subu(length, length, index);
@@ -2495,71 +2814,8 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2495
2814
  }
2496
2815
 
2497
2816
 
2498
- void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2499
- Register elements = ToRegister(instr->elements());
2500
- Register key = EmitLoadRegister(instr->key(), scratch0());
2501
- Register result = ToRegister(instr->result());
2502
- Register scratch = scratch0();
2503
-
2504
- // Load the result.
2505
- __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2506
- __ addu(scratch, elements, scratch);
2507
- uint32_t offset = FixedArray::kHeaderSize +
2508
- (instr->additional_index() << kPointerSizeLog2);
2509
- __ lw(result, FieldMemOperand(scratch, offset));
2510
-
2511
- // Check for the hole value.
2512
- if (instr->hydrogen()->RequiresHoleCheck()) {
2513
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2514
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2515
- }
2516
- }
2517
-
2518
-
2519
- void LCodeGen::DoLoadKeyedFastDoubleElement(
2520
- LLoadKeyedFastDoubleElement* instr) {
2521
- Register elements = ToRegister(instr->elements());
2522
- bool key_is_constant = instr->key()->IsConstantOperand();
2523
- Register key = no_reg;
2524
- DoubleRegister result = ToDoubleRegister(instr->result());
2525
- Register scratch = scratch0();
2526
-
2527
- int shift_size =
2528
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2529
- int constant_key = 0;
2530
- if (key_is_constant) {
2531
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2532
- if (constant_key & 0xF0000000) {
2533
- Abort("array index constant value too big.");
2534
- }
2535
- } else {
2536
- key = ToRegister(instr->key());
2537
- }
2538
-
2539
- if (key_is_constant) {
2540
- __ Addu(elements, elements,
2541
- Operand(((constant_key + instr->additional_index()) << shift_size) +
2542
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2543
- } else {
2544
- __ sll(scratch, key, shift_size);
2545
- __ Addu(elements, elements, Operand(scratch));
2546
- __ Addu(elements, elements,
2547
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
2548
- (instr->additional_index() << shift_size)));
2549
- }
2550
-
2551
- if (instr->hydrogen()->RequiresHoleCheck()) {
2552
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2553
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2554
- }
2555
-
2556
- __ ldc1(result, MemOperand(elements));
2557
- }
2558
-
2559
-
2560
- void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2561
- LLoadKeyedSpecializedArrayElement* instr) {
2562
- Register external_pointer = ToRegister(instr->external_pointer());
2817
+ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2818
+ Register external_pointer = ToRegister(instr->elements());
2563
2819
  Register key = no_reg;
2564
2820
  ElementsKind elements_kind = instr->elements_kind();
2565
2821
  bool key_is_constant = instr->key()->IsConstantOperand();
@@ -2572,45 +2828,82 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2572
2828
  } else {
2573
2829
  key = ToRegister(instr->key());
2574
2830
  }
2575
- int shift_size = ElementsKindToShiftSize(elements_kind);
2576
- int additional_offset = instr->additional_index() << shift_size;
2831
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
2832
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2833
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
2834
+ int additional_offset = instr->additional_index() << element_size_shift;
2577
2835
 
2578
2836
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2579
2837
  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2580
2838
  FPURegister result = ToDoubleRegister(instr->result());
2581
2839
  if (key_is_constant) {
2582
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
2840
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2583
2841
  } else {
2584
2842
  __ sll(scratch0(), key, shift_size);
2585
2843
  __ Addu(scratch0(), scratch0(), external_pointer);
2586
2844
  }
2587
-
2588
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2589
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
2590
- __ cvt_d_s(result, result);
2591
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2592
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
2593
- }
2594
- } else {
2595
- Register result = ToRegister(instr->result());
2596
- Register scratch = scratch0();
2597
- if (instr->additional_index() != 0 && !key_is_constant) {
2598
- __ Addu(scratch, key, instr->additional_index());
2599
- }
2600
- MemOperand mem_operand(zero_reg);
2601
- if (key_is_constant) {
2602
- mem_operand =
2603
- MemOperand(external_pointer,
2604
- (constant_key << shift_size) + additional_offset);
2845
+ if (CpuFeatures::IsSupported(FPU)) {
2846
+ CpuFeatures::Scope scope(FPU);
2847
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2848
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
2849
+ __ cvt_d_s(result, result);
2850
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2851
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
2852
+ }
2605
2853
  } else {
2606
- if (instr->additional_index() == 0) {
2607
- __ sll(scratch, key, shift_size);
2854
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2855
+ Register value = external_pointer;
2856
+ __ lw(value, MemOperand(scratch0(), additional_offset));
2857
+ __ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
2858
+
2859
+ __ srl(scratch0(), value, kBinary32MantissaBits);
2860
+ __ And(scratch0(), scratch0(),
2861
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
2862
+
2863
+ Label exponent_rebiased;
2864
+ __ Xor(at, scratch0(), Operand(0x00));
2865
+ __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
2866
+
2867
+ __ Xor(at, scratch0(), Operand(0xff));
2868
+ Label skip;
2869
+ __ Branch(&skip, ne, at, Operand(zero_reg));
2870
+ __ li(scratch0(), Operand(0x7ff));
2871
+ __ bind(&skip);
2872
+ __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
2873
+
2874
+ // Rebias exponent.
2875
+ __ Addu(scratch0(),
2876
+ scratch0(),
2877
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
2878
+
2879
+ __ bind(&exponent_rebiased);
2880
+ __ And(sfpd_hi, value, Operand(kBinary32SignMask));
2881
+ __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
2882
+ __ Or(sfpd_hi, sfpd_hi, at);
2883
+
2884
+ // Shift mantissa.
2885
+ static const int kMantissaShiftForHiWord =
2886
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
2887
+
2888
+ static const int kMantissaShiftForLoWord =
2889
+ kBitsPerInt - kMantissaShiftForHiWord;
2890
+
2891
+ __ srl(at, sfpd_lo, kMantissaShiftForHiWord);
2892
+ __ Or(sfpd_hi, sfpd_hi, at);
2893
+ __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
2894
+
2608
2895
  } else {
2609
- __ sll(scratch, scratch, shift_size);
2896
+ __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
2897
+ __ lw(sfpd_hi, MemOperand(scratch0(),
2898
+ additional_offset + kPointerSize));
2610
2899
  }
2611
- __ Addu(scratch, scratch, external_pointer);
2612
- mem_operand = MemOperand(scratch);
2613
2900
  }
2901
+ } else {
2902
+ Register result = ToRegister(instr->result());
2903
+ MemOperand mem_operand = PrepareKeyedOperand(
2904
+ key, external_pointer, key_is_constant, constant_key,
2905
+ element_size_shift, shift_size,
2906
+ instr->additional_index(), additional_offset);
2614
2907
  switch (elements_kind) {
2615
2908
  case EXTERNAL_BYTE_ELEMENTS:
2616
2909
  __ lb(result, mem_operand);
@@ -2630,11 +2923,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2630
2923
  break;
2631
2924
  case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2632
2925
  __ lw(result, mem_operand);
2633
- // TODO(danno): we could be more clever here, perhaps having a special
2634
- // version of the stub that detects if the overflow case actually
2635
- // happens, and generate code that returns a double rather than int.
2636
- DeoptimizeIf(Ugreater_equal, instr->environment(),
2637
- result, Operand(0x80000000));
2926
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2927
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
2928
+ result, Operand(0x80000000));
2929
+ }
2638
2930
  break;
2639
2931
  case EXTERNAL_FLOAT_ELEMENTS:
2640
2932
  case EXTERNAL_DOUBLE_ELEMENTS:
@@ -2653,6 +2945,148 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2653
2945
  }
2654
2946
 
2655
2947
 
2948
+ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2949
+ Register elements = ToRegister(instr->elements());
2950
+ bool key_is_constant = instr->key()->IsConstantOperand();
2951
+ Register key = no_reg;
2952
+ DoubleRegister result = ToDoubleRegister(instr->result());
2953
+ Register scratch = scratch0();
2954
+
2955
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2956
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
2957
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
2958
+ int constant_key = 0;
2959
+ if (key_is_constant) {
2960
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2961
+ if (constant_key & 0xF0000000) {
2962
+ Abort("array index constant value too big.");
2963
+ }
2964
+ } else {
2965
+ key = ToRegister(instr->key());
2966
+ }
2967
+
2968
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
2969
+ ((constant_key + instr->additional_index()) << element_size_shift);
2970
+ if (!key_is_constant) {
2971
+ __ sll(scratch, key, shift_size);
2972
+ __ Addu(elements, elements, scratch);
2973
+ }
2974
+ if (CpuFeatures::IsSupported(FPU)) {
2975
+ CpuFeatures::Scope scope(FPU);
2976
+ __ Addu(elements, elements, Operand(base_offset));
2977
+ __ ldc1(result, MemOperand(elements));
2978
+ if (instr->hydrogen()->RequiresHoleCheck()) {
2979
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2980
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2981
+ }
2982
+ } else {
2983
+ __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
2984
+ __ lw(sfpd_lo, MemOperand(elements, base_offset));
2985
+ if (instr->hydrogen()->RequiresHoleCheck()) {
2986
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
2987
+ DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
2988
+ }
2989
+ }
2990
+ }
2991
+
2992
+
2993
+ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2994
+ Register elements = ToRegister(instr->elements());
2995
+ Register result = ToRegister(instr->result());
2996
+ Register scratch = scratch0();
2997
+ Register store_base = scratch;
2998
+ int offset = 0;
2999
+
3000
+ if (instr->key()->IsConstantOperand()) {
3001
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3002
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3003
+ instr->additional_index());
3004
+ store_base = elements;
3005
+ } else {
3006
+ Register key = EmitLoadRegister(instr->key(), scratch0());
3007
+ // Even though the HLoadKeyed instruction forces the input
3008
+ // representation for the key to be an integer, the input gets replaced
3009
+ // during bound check elimination with the index argument to the bounds
3010
+ // check, which can be tagged, so that case must be handled here, too.
3011
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
3012
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3013
+ __ addu(scratch, elements, scratch);
3014
+ } else {
3015
+ __ sll(scratch, key, kPointerSizeLog2);
3016
+ __ addu(scratch, elements, scratch);
3017
+ }
3018
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3019
+ }
3020
+ __ lw(result, FieldMemOperand(store_base, offset));
3021
+
3022
+ // Check for the hole value.
3023
+ if (instr->hydrogen()->RequiresHoleCheck()) {
3024
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3025
+ __ And(scratch, result, Operand(kSmiTagMask));
3026
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3027
+ } else {
3028
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3029
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3030
+ }
3031
+ }
3032
+ }
3033
+
3034
+
3035
+ void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3036
+ if (instr->is_external()) {
3037
+ DoLoadKeyedExternalArray(instr);
3038
+ } else if (instr->hydrogen()->representation().IsDouble()) {
3039
+ DoLoadKeyedFixedDoubleArray(instr);
3040
+ } else {
3041
+ DoLoadKeyedFixedArray(instr);
3042
+ }
3043
+ }
3044
+
3045
+
3046
+ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3047
+ Register base,
3048
+ bool key_is_constant,
3049
+ int constant_key,
3050
+ int element_size,
3051
+ int shift_size,
3052
+ int additional_index,
3053
+ int additional_offset) {
3054
+ if (additional_index != 0 && !key_is_constant) {
3055
+ additional_index *= 1 << (element_size - shift_size);
3056
+ __ Addu(scratch0(), key, Operand(additional_index));
3057
+ }
3058
+
3059
+ if (key_is_constant) {
3060
+ return MemOperand(base,
3061
+ (constant_key << element_size) + additional_offset);
3062
+ }
3063
+
3064
+ if (additional_index == 0) {
3065
+ if (shift_size >= 0) {
3066
+ __ sll(scratch0(), key, shift_size);
3067
+ __ Addu(scratch0(), base, scratch0());
3068
+ return MemOperand(scratch0());
3069
+ } else {
3070
+ ASSERT_EQ(-1, shift_size);
3071
+ __ srl(scratch0(), key, 1);
3072
+ __ Addu(scratch0(), base, scratch0());
3073
+ return MemOperand(scratch0());
3074
+ }
3075
+ }
3076
+
3077
+ if (shift_size >= 0) {
3078
+ __ sll(scratch0(), scratch0(), shift_size);
3079
+ __ Addu(scratch0(), base, scratch0());
3080
+ return MemOperand(scratch0());
3081
+ } else {
3082
+ ASSERT_EQ(-1, shift_size);
3083
+ __ srl(scratch0(), scratch0(), 1);
3084
+ __ Addu(scratch0(), base, scratch0());
3085
+ return MemOperand(scratch0());
3086
+ }
3087
+ }
3088
+
3089
+
2656
3090
  void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2657
3091
  ASSERT(ToRegister(instr->object()).is(a1));
2658
3092
  ASSERT(ToRegister(instr->key()).is(a0));
@@ -2685,7 +3119,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2685
3119
 
2686
3120
 
2687
3121
  void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2688
- Register elem = ToRegister(instr->InputAt(0));
3122
+ Register elem = ToRegister(instr->elements());
2689
3123
  Register result = ToRegister(instr->result());
2690
3124
 
2691
3125
  Label done;
@@ -2803,7 +3237,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2803
3237
 
2804
3238
 
2805
3239
  void LCodeGen::DoPushArgument(LPushArgument* instr) {
2806
- LOperand* argument = instr->InputAt(0);
3240
+ LOperand* argument = instr->value();
2807
3241
  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2808
3242
  Abort("DoPushArgument not implemented for double type.");
2809
3243
  } else {
@@ -2820,7 +3254,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
2820
3254
 
2821
3255
  void LCodeGen::DoThisFunction(LThisFunction* instr) {
2822
3256
  Register result = ToRegister(instr->result());
2823
- __ LoadHeapObject(result, instr->hydrogen()->closure());
3257
+ __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2824
3258
  }
2825
3259
 
2826
3260
 
@@ -2849,12 +3283,12 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2849
3283
 
2850
3284
  void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2851
3285
  Register result = ToRegister(instr->result());
2852
- __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
3286
+ __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
2853
3287
  }
2854
3288
 
2855
3289
 
2856
3290
  void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2857
- Register global = ToRegister(instr->global());
3291
+ Register global = ToRegister(instr->global_object());
2858
3292
  Register result = ToRegister(instr->result());
2859
3293
  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2860
3294
  }
@@ -2876,14 +3310,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2876
3310
  __ LoadHeapObject(a1, function);
2877
3311
  }
2878
3312
 
2879
- // Change context if needed.
2880
- bool change_context =
2881
- (info()->closure()->context() != function->context()) ||
2882
- scope()->contains_with() ||
2883
- (scope()->num_heap_slots() > 0);
2884
- if (change_context) {
2885
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2886
- }
3313
+ // Change context.
3314
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2887
3315
 
2888
3316
  // Set r0 to arguments count if adaption is not needed. Assumes that r0
2889
3317
  // is available to write to at this point.
@@ -2921,7 +3349,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2921
3349
 
2922
3350
 
2923
3351
  void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2924
- Register input = ToRegister(instr->InputAt(0));
3352
+ Register input = ToRegister(instr->value());
2925
3353
  Register result = ToRegister(instr->result());
2926
3354
  Register scratch = scratch0();
2927
3355
 
@@ -2986,7 +3414,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2986
3414
 
2987
3415
 
2988
3416
  void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2989
- Register input = ToRegister(instr->InputAt(0));
3417
+ Register input = ToRegister(instr->value());
2990
3418
  Register result = ToRegister(instr->result());
2991
3419
  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2992
3420
  Label done;
@@ -3001,6 +3429,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3001
3429
 
3002
3430
 
3003
3431
  void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3432
+ CpuFeatures::Scope scope(FPU);
3004
3433
  // Class for deferred case.
3005
3434
  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3006
3435
  public:
@@ -3017,7 +3446,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3017
3446
 
3018
3447
  Representation r = instr->hydrogen()->value()->representation();
3019
3448
  if (r.IsDouble()) {
3020
- FPURegister input = ToDoubleRegister(instr->InputAt(0));
3449
+ FPURegister input = ToDoubleRegister(instr->value());
3021
3450
  FPURegister result = ToDoubleRegister(instr->result());
3022
3451
  __ abs_d(result, input);
3023
3452
  } else if (r.IsInteger32()) {
@@ -3025,8 +3454,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3025
3454
  } else {
3026
3455
  // Representation is tagged.
3027
3456
  DeferredMathAbsTaggedHeapNumber* deferred =
3028
- new DeferredMathAbsTaggedHeapNumber(this, instr);
3029
- Register input = ToRegister(instr->InputAt(0));
3457
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3458
+ Register input = ToRegister(instr->value());
3030
3459
  // Smi check.
3031
3460
  __ JumpIfNotSmi(input, deferred->entry());
3032
3461
  // If smi, handle it directly.
@@ -3037,24 +3466,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3037
3466
 
3038
3467
 
3039
3468
  void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3040
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3469
+ CpuFeatures::Scope scope(FPU);
3470
+ DoubleRegister input = ToDoubleRegister(instr->value());
3041
3471
  Register result = ToRegister(instr->result());
3042
- FPURegister single_scratch = double_scratch0().low();
3043
3472
  Register scratch1 = scratch0();
3044
- Register except_flag = ToRegister(instr->TempAt(0));
3473
+ Register except_flag = ToRegister(instr->temp());
3045
3474
 
3046
3475
  __ EmitFPUTruncate(kRoundToMinusInf,
3047
- single_scratch,
3476
+ result,
3048
3477
  input,
3049
3478
  scratch1,
3479
+ double_scratch0(),
3050
3480
  except_flag);
3051
3481
 
3052
3482
  // Deopt if the operation did not succeed.
3053
3483
  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3054
3484
 
3055
- // Load the result.
3056
- __ mfc1(result, single_scratch);
3057
-
3058
3485
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3059
3486
  // Test for -0.
3060
3487
  Label done;
@@ -3068,8 +3495,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3068
3495
 
3069
3496
 
3070
3497
  void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3071
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3498
+ CpuFeatures::Scope scope(FPU);
3499
+ DoubleRegister input = ToDoubleRegister(instr->value());
3072
3500
  Register result = ToRegister(instr->result());
3501
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3073
3502
  Register scratch = scratch0();
3074
3503
  Label done, check_sign_on_zero;
3075
3504
 
@@ -3121,17 +3550,15 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3121
3550
  }
3122
3551
 
3123
3552
  Register except_flag = scratch;
3124
-
3125
3553
  __ EmitFPUTruncate(kRoundToMinusInf,
3126
- double_scratch0().low(),
3127
- double_scratch0(),
3128
3554
  result,
3555
+ double_scratch0(),
3556
+ at,
3557
+ double_scratch1,
3129
3558
  except_flag);
3130
3559
 
3131
3560
  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3132
3561
 
3133
- __ mfc1(result, double_scratch0().low());
3134
-
3135
3562
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3136
3563
  // Test for -0.
3137
3564
  __ Branch(&done, ne, result, Operand(zero_reg));
@@ -3145,16 +3572,18 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3145
3572
 
3146
3573
 
3147
3574
  void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3148
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3575
+ CpuFeatures::Scope scope(FPU);
3576
+ DoubleRegister input = ToDoubleRegister(instr->value());
3149
3577
  DoubleRegister result = ToDoubleRegister(instr->result());
3150
3578
  __ sqrt_d(result, input);
3151
3579
  }
3152
3580
 
3153
3581
 
3154
3582
  void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3155
- DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3583
+ CpuFeatures::Scope scope(FPU);
3584
+ DoubleRegister input = ToDoubleRegister(instr->value());
3156
3585
  DoubleRegister result = ToDoubleRegister(instr->result());
3157
- DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3586
+ DoubleRegister temp = ToDoubleRegister(instr->temp());
3158
3587
 
3159
3588
  ASSERT(!input.is(result));
3160
3589
 
@@ -3176,14 +3605,15 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3176
3605
 
3177
3606
 
3178
3607
  void LCodeGen::DoPower(LPower* instr) {
3608
+ CpuFeatures::Scope scope(FPU);
3179
3609
  Representation exponent_type = instr->hydrogen()->right()->representation();
3180
3610
  // Having marked this as a call, we can use any registers.
3181
3611
  // Just make sure that the input/output registers are the expected ones.
3182
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3183
- ToDoubleRegister(instr->InputAt(1)).is(f4));
3184
- ASSERT(!instr->InputAt(1)->IsRegister() ||
3185
- ToRegister(instr->InputAt(1)).is(a2));
3186
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
3612
+ ASSERT(!instr->right()->IsDoubleRegister() ||
3613
+ ToDoubleRegister(instr->right()).is(f4));
3614
+ ASSERT(!instr->right()->IsRegister() ||
3615
+ ToRegister(instr->right()).is(a2));
3616
+ ASSERT(ToDoubleRegister(instr->left()).is(f2));
3187
3617
  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3188
3618
 
3189
3619
  if (exponent_type.IsTagged()) {
@@ -3206,6 +3636,7 @@ void LCodeGen::DoPower(LPower* instr) {
3206
3636
 
3207
3637
 
3208
3638
  void LCodeGen::DoRandom(LRandom* instr) {
3639
+ CpuFeatures::Scope scope(FPU);
3209
3640
  class DeferredDoRandom: public LDeferredCode {
3210
3641
  public:
3211
3642
  DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3216,20 +3647,20 @@ void LCodeGen::DoRandom(LRandom* instr) {
3216
3647
  LRandom* instr_;
3217
3648
  };
3218
3649
 
3219
- DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3650
+ DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3220
3651
  // Having marked this instruction as a call we can use any
3221
3652
  // registers.
3222
3653
  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3223
- ASSERT(ToRegister(instr->InputAt(0)).is(a0));
3654
+ ASSERT(ToRegister(instr->global_object()).is(a0));
3224
3655
 
3225
3656
  static const int kSeedSize = sizeof(uint32_t);
3226
3657
  STATIC_ASSERT(kPointerSize == kSeedSize);
3227
3658
 
3228
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
3659
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
3229
3660
  static const int kRandomSeedOffset =
3230
3661
  FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3231
3662
  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3232
- // a2: FixedArray of the global context's random seeds
3663
+ // a2: FixedArray of the native context's random seeds
3233
3664
 
3234
3665
  // Load state[0].
3235
3666
  __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
@@ -3281,6 +3712,21 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
3281
3712
  }
3282
3713
 
3283
3714
 
3715
+ void LCodeGen::DoMathExp(LMathExp* instr) {
3716
+ CpuFeatures::Scope scope(FPU);
3717
+ DoubleRegister input = ToDoubleRegister(instr->value());
3718
+ DoubleRegister result = ToDoubleRegister(instr->result());
3719
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3720
+ DoubleRegister double_scratch2 = double_scratch0();
3721
+ Register temp1 = ToRegister(instr->temp1());
3722
+ Register temp2 = ToRegister(instr->temp2());
3723
+
3724
+ MathExpGenerator::EmitMathExp(
3725
+ masm(), input, result, double_scratch1, double_scratch2,
3726
+ temp1, temp2, scratch0());
3727
+ }
3728
+
3729
+
3284
3730
  void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3285
3731
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3286
3732
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3430,7 +3876,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3430
3876
 
3431
3877
 
3432
3878
  void LCodeGen::DoCallNew(LCallNew* instr) {
3433
- ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3879
+ ASSERT(ToRegister(instr->constructor()).is(a1));
3434
3880
  ASSERT(ToRegister(instr->result()).is(v0));
3435
3881
 
3436
3882
  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3456,7 +3902,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3456
3902
  __ li(scratch, Operand(instr->transition()));
3457
3903
  __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3458
3904
  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3459
- Register temp = ToRegister(instr->TempAt(0));
3905
+ Register temp = ToRegister(instr->temp());
3460
3906
  // Update the write barrier for the map field.
3461
3907
  __ RecordWriteField(object,
3462
3908
  HeapObject::kMapOffset,
@@ -3518,107 +3964,53 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3518
3964
  }
3519
3965
 
3520
3966
 
3521
- void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3522
- DeoptimizeIf(hs,
3523
- instr->environment(),
3524
- ToRegister(instr->index()),
3525
- Operand(ToRegister(instr->length())));
3526
- }
3527
-
3528
-
3529
- void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3530
- Register value = ToRegister(instr->value());
3531
- Register elements = ToRegister(instr->object());
3532
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3533
- Register scratch = scratch0();
3534
-
3535
- // Do the store.
3536
- if (instr->key()->IsConstantOperand()) {
3537
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3538
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3539
- int offset =
3540
- (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3541
- + FixedArray::kHeaderSize;
3542
- __ sw(value, FieldMemOperand(elements, offset));
3543
- } else {
3544
- __ sll(scratch, key, kPointerSizeLog2);
3545
- __ addu(scratch, elements, scratch);
3546
- if (instr->additional_index() != 0) {
3547
- __ Addu(scratch,
3548
- scratch,
3549
- instr->additional_index() << kPointerSizeLog2);
3967
+ void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
3968
+ HValue* value,
3969
+ LOperand* operand) {
3970
+ if (value->representation().IsTagged() && !value->type().IsSmi()) {
3971
+ if (operand->IsRegister()) {
3972
+ __ And(at, ToRegister(operand), Operand(kSmiTagMask));
3973
+ DeoptimizeIf(ne, environment, at, Operand(zero_reg));
3974
+ } else {
3975
+ __ li(at, ToOperand(operand));
3976
+ __ And(at, at, Operand(kSmiTagMask));
3977
+ DeoptimizeIf(ne, environment, at, Operand(zero_reg));
3550
3978
  }
3551
- __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3552
- }
3553
-
3554
- if (instr->hydrogen()->NeedsWriteBarrier()) {
3555
- HType type = instr->hydrogen()->value()->type();
3556
- SmiCheck check_needed =
3557
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3558
- // Compute address of modified element and store it into key register.
3559
- __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3560
- __ RecordWrite(elements,
3561
- key,
3562
- value,
3563
- kRAHasBeenSaved,
3564
- kSaveFPRegs,
3565
- EMIT_REMEMBERED_SET,
3566
- check_needed);
3567
3979
  }
3568
3980
  }
3569
3981
 
3570
3982
 
3571
- void LCodeGen::DoStoreKeyedFastDoubleElement(
3572
- LStoreKeyedFastDoubleElement* instr) {
3573
- DoubleRegister value = ToDoubleRegister(instr->value());
3574
- Register elements = ToRegister(instr->elements());
3575
- Register key = no_reg;
3576
- Register scratch = scratch0();
3577
- bool key_is_constant = instr->key()->IsConstantOperand();
3578
- int constant_key = 0;
3579
- Label not_nan;
3580
-
3581
- // Calculate the effective address of the slot in the array to store the
3582
- // double value.
3583
- if (key_is_constant) {
3584
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3585
- if (constant_key & 0xF0000000) {
3586
- Abort("array index constant value too big.");
3983
+ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3984
+ DeoptIfTaggedButNotSmi(instr->environment(),
3985
+ instr->hydrogen()->length(),
3986
+ instr->length());
3987
+ DeoptIfTaggedButNotSmi(instr->environment(),
3988
+ instr->hydrogen()->index(),
3989
+ instr->index());
3990
+ if (instr->index()->IsConstantOperand()) {
3991
+ int constant_index =
3992
+ ToInteger32(LConstantOperand::cast(instr->index()));
3993
+ if (instr->hydrogen()->length()->representation().IsTagged()) {
3994
+ __ li(at, Operand(Smi::FromInt(constant_index)));
3995
+ } else {
3996
+ __ li(at, Operand(constant_index));
3587
3997
  }
3998
+ DeoptimizeIf(hs,
3999
+ instr->environment(),
4000
+ at,
4001
+ Operand(ToRegister(instr->length())));
3588
4002
  } else {
3589
- key = ToRegister(instr->key());
3590
- }
3591
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3592
- if (key_is_constant) {
3593
- __ Addu(scratch, elements, Operand((constant_key << shift_size) +
3594
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3595
- } else {
3596
- __ sll(scratch, key, shift_size);
3597
- __ Addu(scratch, elements, Operand(scratch));
3598
- __ Addu(scratch, scratch,
3599
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3600
- }
3601
-
3602
- if (instr->NeedsCanonicalization()) {
3603
- Label is_nan;
3604
- // Check for NaN. All NaNs must be canonicalized.
3605
- __ BranchF(NULL, &is_nan, eq, value, value);
3606
- __ Branch(&not_nan);
3607
-
3608
- // Only load canonical NaN if the comparison above set the overflow.
3609
- __ bind(&is_nan);
3610
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4003
+ DeoptimizeIf(hs,
4004
+ instr->environment(),
4005
+ ToRegister(instr->index()),
4006
+ Operand(ToRegister(instr->length())));
3611
4007
  }
3612
-
3613
- __ bind(&not_nan);
3614
- __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
3615
4008
  }
3616
4009
 
3617
4010
 
3618
- void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3619
- LStoreKeyedSpecializedArrayElement* instr) {
3620
-
3621
- Register external_pointer = ToRegister(instr->external_pointer());
4011
+ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4012
+ CpuFeatures::Scope scope(FPU);
4013
+ Register external_pointer = ToRegister(instr->elements());
3622
4014
  Register key = no_reg;
3623
4015
  ElementsKind elements_kind = instr->elements_kind();
3624
4016
  bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3631,14 +4023,17 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3631
4023
  } else {
3632
4024
  key = ToRegister(instr->key());
3633
4025
  }
3634
- int shift_size = ElementsKindToShiftSize(elements_kind);
3635
- int additional_offset = instr->additional_index() << shift_size;
4026
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
4027
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4028
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
4029
+ int additional_offset = instr->additional_index() << element_size_shift;
3636
4030
 
3637
4031
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3638
4032
  elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3639
4033
  FPURegister value(ToDoubleRegister(instr->value()));
3640
4034
  if (key_is_constant) {
3641
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
4035
+ __ Addu(scratch0(), external_pointer, constant_key <<
4036
+ element_size_shift);
3642
4037
  } else {
3643
4038
  __ sll(scratch0(), key, shift_size);
3644
4039
  __ Addu(scratch0(), scratch0(), external_pointer);
@@ -3652,24 +4047,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3652
4047
  }
3653
4048
  } else {
3654
4049
  Register value(ToRegister(instr->value()));
3655
- Register scratch = scratch0();
3656
- if (instr->additional_index() != 0 && !key_is_constant) {
3657
- __ Addu(scratch, key, instr->additional_index());
3658
- }
3659
- MemOperand mem_operand(zero_reg);
3660
- if (key_is_constant) {
3661
- mem_operand = MemOperand(external_pointer,
3662
- ((constant_key + instr->additional_index())
3663
- << shift_size));
3664
- } else {
3665
- if (instr->additional_index() == 0) {
3666
- __ sll(scratch, key, shift_size);
3667
- } else {
3668
- __ sll(scratch, scratch, shift_size);
3669
- }
3670
- __ Addu(scratch, scratch, external_pointer);
3671
- mem_operand = MemOperand(scratch);
3672
- }
4050
+ MemOperand mem_operand = PrepareKeyedOperand(
4051
+ key, external_pointer, key_is_constant, constant_key,
4052
+ element_size_shift, shift_size,
4053
+ instr->additional_index(), additional_offset);
3673
4054
  switch (elements_kind) {
3674
4055
  case EXTERNAL_PIXEL_ELEMENTS:
3675
4056
  case EXTERNAL_BYTE_ELEMENTS:
@@ -3700,6 +4081,118 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3700
4081
  }
3701
4082
  }
3702
4083
 
4084
+
4085
+ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4086
+ CpuFeatures::Scope scope(FPU);
4087
+ DoubleRegister value = ToDoubleRegister(instr->value());
4088
+ Register elements = ToRegister(instr->elements());
4089
+ Register key = no_reg;
4090
+ Register scratch = scratch0();
4091
+ bool key_is_constant = instr->key()->IsConstantOperand();
4092
+ int constant_key = 0;
4093
+ Label not_nan;
4094
+
4095
+ // Calculate the effective address of the slot in the array to store the
4096
+ // double value.
4097
+ if (key_is_constant) {
4098
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4099
+ if (constant_key & 0xF0000000) {
4100
+ Abort("array index constant value too big.");
4101
+ }
4102
+ } else {
4103
+ key = ToRegister(instr->key());
4104
+ }
4105
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4106
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
4107
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
4108
+ if (key_is_constant) {
4109
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
4110
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4111
+ } else {
4112
+ __ sll(scratch, key, shift_size);
4113
+ __ Addu(scratch, elements, Operand(scratch));
4114
+ __ Addu(scratch, scratch,
4115
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4116
+ }
4117
+
4118
+ if (instr->NeedsCanonicalization()) {
4119
+ Label is_nan;
4120
+ // Check for NaN. All NaNs must be canonicalized.
4121
+ __ BranchF(NULL, &is_nan, eq, value, value);
4122
+ __ Branch(&not_nan);
4123
+
4124
+ // Only load canonical NaN if the comparison above set the overflow.
4125
+ __ bind(&is_nan);
4126
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4127
+ }
4128
+
4129
+ __ bind(&not_nan);
4130
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4131
+ element_size_shift));
4132
+ }
4133
+
4134
+
4135
+ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4136
+ Register value = ToRegister(instr->value());
4137
+ Register elements = ToRegister(instr->elements());
4138
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4139
+ : no_reg;
4140
+ Register scratch = scratch0();
4141
+ Register store_base = scratch;
4142
+ int offset = 0;
4143
+
4144
+ // Do the store.
4145
+ if (instr->key()->IsConstantOperand()) {
4146
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4147
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4148
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4149
+ instr->additional_index());
4150
+ store_base = elements;
4151
+ } else {
4152
+ // Even though the HLoadKeyed instruction forces the input
4153
+ // representation for the key to be an integer, the input gets replaced
4154
+ // during bound check elimination with the index argument to the bounds
4155
+ // check, which can be tagged, so that case must be handled here, too.
4156
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
4157
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4158
+ __ addu(scratch, elements, scratch);
4159
+ } else {
4160
+ __ sll(scratch, key, kPointerSizeLog2);
4161
+ __ addu(scratch, elements, scratch);
4162
+ }
4163
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4164
+ }
4165
+ __ sw(value, FieldMemOperand(store_base, offset));
4166
+
4167
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
4168
+ HType type = instr->hydrogen()->value()->type();
4169
+ SmiCheck check_needed =
4170
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4171
+ // Compute address of modified element and store it into key register.
4172
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
4173
+ __ RecordWrite(elements,
4174
+ key,
4175
+ value,
4176
+ kRAHasBeenSaved,
4177
+ kSaveFPRegs,
4178
+ EMIT_REMEMBERED_SET,
4179
+ check_needed);
4180
+ }
4181
+ }
4182
+
4183
+
4184
+ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4185
+ // By cases: external, fast double
4186
+ if (instr->is_external()) {
4187
+ DoStoreKeyedExternalArray(instr);
4188
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4189
+ DoStoreKeyedFixedDoubleArray(instr);
4190
+ } else {
4191
+ DoStoreKeyedFixedArray(instr);
4192
+ }
4193
+ }
4194
+
4195
+
3703
4196
  void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3704
4197
  ASSERT(ToRegister(instr->object()).is(a2));
3705
4198
  ASSERT(ToRegister(instr->key()).is(a1));
@@ -3714,13 +4207,13 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3714
4207
 
3715
4208
  void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3716
4209
  Register object_reg = ToRegister(instr->object());
3717
- Register new_map_reg = ToRegister(instr->new_map_reg());
4210
+ Register new_map_reg = ToRegister(instr->new_map_temp());
3718
4211
  Register scratch = scratch0();
3719
4212
 
3720
4213
  Handle<Map> from_map = instr->original_map();
3721
4214
  Handle<Map> to_map = instr->transitioned_map();
3722
- ElementsKind from_kind = from_map->elements_kind();
3723
- ElementsKind to_kind = to_map->elements_kind();
4215
+ ElementsKind from_kind = instr->from_kind();
4216
+ ElementsKind to_kind = instr->to_kind();
3724
4217
 
3725
4218
  __ mov(ToRegister(instr->result()), object_reg);
3726
4219
 
@@ -3729,14 +4222,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3729
4222
  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
3730
4223
 
3731
4224
  __ li(new_map_reg, Operand(to_map));
3732
- if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
4225
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
3733
4226
  __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3734
4227
  // Write barrier.
3735
4228
  __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3736
4229
  scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3737
4230
  } else if (IsFastSmiElementsKind(from_kind) &&
3738
4231
  IsFastDoubleElementsKind(to_kind)) {
3739
- Register fixed_object_reg = ToRegister(instr->temp_reg());
4232
+ Register fixed_object_reg = ToRegister(instr->temp());
3740
4233
  ASSERT(fixed_object_reg.is(a2));
3741
4234
  ASSERT(new_map_reg.is(a3));
3742
4235
  __ mov(fixed_object_reg, object_reg);
@@ -3744,7 +4237,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3744
4237
  RelocInfo::CODE_TARGET, instr);
3745
4238
  } else if (IsFastDoubleElementsKind(from_kind) &&
3746
4239
  IsFastObjectElementsKind(to_kind)) {
3747
- Register fixed_object_reg = ToRegister(instr->temp_reg());
4240
+ Register fixed_object_reg = ToRegister(instr->temp());
3748
4241
  ASSERT(fixed_object_reg.is(a2));
3749
4242
  ASSERT(new_map_reg.is(a3));
3750
4243
  __ mov(fixed_object_reg, object_reg);
@@ -3777,7 +4270,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3777
4270
  };
3778
4271
 
3779
4272
  DeferredStringCharCodeAt* deferred =
3780
- new DeferredStringCharCodeAt(this, instr);
4273
+ new(zone()) DeferredStringCharCodeAt(this, instr);
3781
4274
  StringCharLoadGenerator::Generate(masm(),
3782
4275
  ToRegister(instr->string()),
3783
4276
  ToRegister(instr->index()),
@@ -3811,9 +4304,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3811
4304
  __ push(index);
3812
4305
  }
3813
4306
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3814
- if (FLAG_debug_code) {
3815
- __ AbortIfNotSmi(v0);
3816
- }
4307
+ __ AssertSmi(v0);
3817
4308
  __ SmiUntag(v0);
3818
4309
  __ StoreToSafepointRegisterSlot(v0, result);
3819
4310
  }
@@ -3831,7 +4322,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3831
4322
  };
3832
4323
 
3833
4324
  DeferredStringCharFromCode* deferred =
3834
- new DeferredStringCharFromCode(this, instr);
4325
+ new(zone()) DeferredStringCharFromCode(this, instr);
3835
4326
 
3836
4327
  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3837
4328
  Register char_code = ToRegister(instr->char_code());
@@ -3840,7 +4331,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3840
4331
  ASSERT(!char_code.is(result));
3841
4332
 
3842
4333
  __ Branch(deferred->entry(), hi,
3843
- char_code, Operand(String::kMaxAsciiCharCode));
4334
+ char_code, Operand(String::kMaxOneByteCharCode));
3844
4335
  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3845
4336
  __ sll(scratch, char_code, kPointerSizeLog2);
3846
4337
  __ Addu(result, result, scratch);
@@ -3869,14 +4360,15 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3869
4360
 
3870
4361
 
3871
4362
  void LCodeGen::DoStringLength(LStringLength* instr) {
3872
- Register string = ToRegister(instr->InputAt(0));
4363
+ Register string = ToRegister(instr->string());
3873
4364
  Register result = ToRegister(instr->result());
3874
4365
  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
3875
4366
  }
3876
4367
 
3877
4368
 
3878
4369
  void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3879
- LOperand* input = instr->InputAt(0);
4370
+ CpuFeatures::Scope scope(FPU);
4371
+ LOperand* input = instr->value();
3880
4372
  ASSERT(input->IsRegister() || input->IsStackSlot());
3881
4373
  LOperand* output = instr->result();
3882
4374
  ASSERT(output->IsDoubleRegister());
@@ -3892,50 +4384,161 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3892
4384
  }
3893
4385
 
3894
4386
 
4387
+ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4388
+ CpuFeatures::Scope scope(FPU);
4389
+ LOperand* input = instr->value();
4390
+ LOperand* output = instr->result();
4391
+
4392
+ FPURegister dbl_scratch = double_scratch0();
4393
+ __ mtc1(ToRegister(input), dbl_scratch);
4394
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4395
+ }
4396
+
4397
+
3895
4398
  void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3896
4399
  class DeferredNumberTagI: public LDeferredCode {
3897
4400
  public:
3898
4401
  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3899
4402
  : LDeferredCode(codegen), instr_(instr) { }
3900
- virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
4403
+ virtual void Generate() {
4404
+ codegen()->DoDeferredNumberTagI(instr_,
4405
+ instr_->value(),
4406
+ SIGNED_INT32);
4407
+ }
3901
4408
  virtual LInstruction* instr() { return instr_; }
3902
4409
  private:
3903
4410
  LNumberTagI* instr_;
3904
4411
  };
3905
4412
 
3906
- Register src = ToRegister(instr->InputAt(0));
4413
+ Register src = ToRegister(instr->value());
3907
4414
  Register dst = ToRegister(instr->result());
3908
4415
  Register overflow = scratch0();
3909
4416
 
3910
- DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
4417
+ DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
3911
4418
  __ SmiTagCheckOverflow(dst, src, overflow);
3912
4419
  __ BranchOnOverflow(deferred->entry(), overflow);
3913
4420
  __ bind(deferred->exit());
3914
4421
  }
3915
4422
 
3916
4423
 
3917
- void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
4424
+ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4425
+ class DeferredNumberTagU: public LDeferredCode {
4426
+ public:
4427
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4428
+ : LDeferredCode(codegen), instr_(instr) { }
4429
+ virtual void Generate() {
4430
+ codegen()->DoDeferredNumberTagI(instr_,
4431
+ instr_->value(),
4432
+ UNSIGNED_INT32);
4433
+ }
4434
+ virtual LInstruction* instr() { return instr_; }
4435
+ private:
4436
+ LNumberTagU* instr_;
4437
+ };
4438
+
4439
+ LOperand* input = instr->value();
4440
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
4441
+ Register reg = ToRegister(input);
4442
+
4443
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4444
+ __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
4445
+ __ SmiTag(reg, reg);
4446
+ __ bind(deferred->exit());
4447
+ }
4448
+
4449
+
4450
+ // Convert unsigned integer with specified number of leading zeroes in binary
4451
+ // representation to IEEE 754 double.
4452
+ // Integer to convert is passed in register hiword.
4453
+ // Resulting double is returned in registers hiword:loword.
4454
+ // This functions does not work correctly for 0.
4455
+ static void GenerateUInt2Double(MacroAssembler* masm,
4456
+ Register hiword,
4457
+ Register loword,
4458
+ Register scratch,
4459
+ int leading_zeroes) {
4460
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
4461
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
4462
+
4463
+ const int mantissa_shift_for_hi_word =
4464
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
4465
+ const int mantissa_shift_for_lo_word =
4466
+ kBitsPerInt - mantissa_shift_for_hi_word;
4467
+ masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
4468
+ if (mantissa_shift_for_hi_word > 0) {
4469
+ masm->sll(loword, hiword, mantissa_shift_for_lo_word);
4470
+ masm->srl(hiword, hiword, mantissa_shift_for_hi_word);
4471
+ masm->Or(hiword, scratch, hiword);
4472
+ } else {
4473
+ masm->mov(loword, zero_reg);
4474
+ masm->sll(hiword, hiword, mantissa_shift_for_hi_word);
4475
+ masm->Or(hiword, scratch, hiword);
4476
+ }
4477
+
4478
+ // If least significant bit of biased exponent was not 1 it was corrupted
4479
+ // by most significant bit of mantissa so we should fix that.
4480
+ if (!(biased_exponent & 1)) {
4481
+ masm->li(scratch, 1 << HeapNumber::kExponentShift);
4482
+ masm->nor(scratch, scratch, scratch);
4483
+ masm->and_(hiword, hiword, scratch);
4484
+ }
4485
+ }
4486
+
4487
+
4488
+ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4489
+ LOperand* value,
4490
+ IntegerSignedness signedness) {
3918
4491
  Label slow;
3919
- Register src = ToRegister(instr->InputAt(0));
4492
+ Register src = ToRegister(value);
3920
4493
  Register dst = ToRegister(instr->result());
3921
- FPURegister dbl_scratch = double_scratch0();
4494
+ DoubleRegister dbl_scratch = double_scratch0();
3922
4495
 
3923
4496
  // Preserve the value of all registers.
3924
4497
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3925
4498
 
3926
- // There was overflow, so bits 30 and 31 of the original integer
3927
- // disagree. Try to allocate a heap number in new space and store
3928
- // the value in there. If that fails, call the runtime system.
3929
4499
  Label done;
3930
- if (dst.is(src)) {
3931
- __ SmiUntag(src, dst);
3932
- __ Xor(src, src, Operand(0x80000000));
4500
+ if (signedness == SIGNED_INT32) {
4501
+ // There was overflow, so bits 30 and 31 of the original integer
4502
+ // disagree. Try to allocate a heap number in new space and store
4503
+ // the value in there. If that fails, call the runtime system.
4504
+ if (dst.is(src)) {
4505
+ __ SmiUntag(src, dst);
4506
+ __ Xor(src, src, Operand(0x80000000));
4507
+ }
4508
+ if (CpuFeatures::IsSupported(FPU)) {
4509
+ CpuFeatures::Scope scope(FPU);
4510
+ __ mtc1(src, dbl_scratch);
4511
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
4512
+ } else {
4513
+ FloatingPointHelper::Destination dest =
4514
+ FloatingPointHelper::kCoreRegisters;
4515
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
4516
+ sfpd_lo, sfpd_hi,
4517
+ scratch0(), f2);
4518
+ }
4519
+ } else {
4520
+ if (CpuFeatures::IsSupported(FPU)) {
4521
+ CpuFeatures::Scope scope(FPU);
4522
+ __ mtc1(src, dbl_scratch);
4523
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4524
+ } else {
4525
+ Label no_leading_zero, done;
4526
+ __ And(at, src, Operand(0x80000000));
4527
+ __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
4528
+
4529
+ // Integer has one leading zeros.
4530
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1);
4531
+ __ Branch(&done);
4532
+
4533
+ __ bind(&no_leading_zero);
4534
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0);
4535
+ __ Branch(&done);
4536
+ }
3933
4537
  }
3934
- __ mtc1(src, dbl_scratch);
3935
- __ cvt_d_w(dbl_scratch, dbl_scratch);
4538
+
3936
4539
  if (FLAG_inline_new) {
3937
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3938
- __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
4540
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4541
+ __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
3939
4542
  __ Move(dst, t1);
3940
4543
  __ Branch(&done);
3941
4544
  }
@@ -3949,11 +4552,19 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3949
4552
  __ StoreToSafepointRegisterSlot(zero_reg, dst);
3950
4553
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3951
4554
  __ Move(dst, v0);
4555
+ __ Subu(dst, dst, kHeapObjectTag);
3952
4556
 
3953
4557
  // Done. Put the value in dbl_scratch into the value of the allocated heap
3954
4558
  // number.
3955
4559
  __ bind(&done);
3956
- __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4560
+ if (CpuFeatures::IsSupported(FPU)) {
4561
+ CpuFeatures::Scope scope(FPU);
4562
+ __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4563
+ } else {
4564
+ __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
4565
+ __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
4566
+ }
4567
+ __ Addu(dst, dst, kHeapObjectTag);
3957
4568
  __ StoreToSafepointRegisterSlot(dst, dst);
3958
4569
  }
3959
4570
 
@@ -3969,21 +4580,31 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3969
4580
  LNumberTagD* instr_;
3970
4581
  };
3971
4582
 
3972
- DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
4583
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
3973
4584
  Register scratch = scratch0();
3974
4585
  Register reg = ToRegister(instr->result());
3975
- Register temp1 = ToRegister(instr->TempAt(0));
3976
- Register temp2 = ToRegister(instr->TempAt(1));
4586
+ Register temp1 = ToRegister(instr->temp());
4587
+ Register temp2 = ToRegister(instr->temp2());
3977
4588
 
3978
- DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
4589
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
3979
4590
  if (FLAG_inline_new) {
3980
4591
  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3981
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4592
+ // We want the untagged address first for performance
4593
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4594
+ DONT_TAG_RESULT);
3982
4595
  } else {
3983
4596
  __ Branch(deferred->entry());
3984
4597
  }
3985
4598
  __ bind(deferred->exit());
3986
- __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4599
+ if (CpuFeatures::IsSupported(FPU)) {
4600
+ CpuFeatures::Scope scope(FPU);
4601
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4602
+ } else {
4603
+ __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
4604
+ __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
4605
+ }
4606
+ // Now that we have finished with the object's real address tag it
4607
+ __ Addu(reg, reg, kHeapObjectTag);
3987
4608
  }
3988
4609
 
3989
4610
 
@@ -3996,19 +4617,20 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3996
4617
 
3997
4618
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3998
4619
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4620
+ __ Subu(v0, v0, kHeapObjectTag);
3999
4621
  __ StoreToSafepointRegisterSlot(v0, reg);
4000
4622
  }
4001
4623
 
4002
4624
 
4003
4625
  void LCodeGen::DoSmiTag(LSmiTag* instr) {
4004
4626
  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4005
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
4627
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4006
4628
  }
4007
4629
 
4008
4630
 
4009
4631
  void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4010
4632
  Register scratch = scratch0();
4011
- Register input = ToRegister(instr->InputAt(0));
4633
+ Register input = ToRegister(instr->value());
4012
4634
  Register result = ToRegister(instr->result());
4013
4635
  if (instr->needs_check()) {
4014
4636
  STATIC_ASSERT(kHeapObjectTag == 1);
@@ -4028,6 +4650,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
4028
4650
  bool deoptimize_on_minus_zero,
4029
4651
  LEnvironment* env) {
4030
4652
  Register scratch = scratch0();
4653
+ CpuFeatures::Scope scope(FPU);
4031
4654
 
4032
4655
  Label load_smi, heap_number, done;
4033
4656
 
@@ -4073,11 +4696,11 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
4073
4696
 
4074
4697
 
4075
4698
  void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4076
- Register input_reg = ToRegister(instr->InputAt(0));
4699
+ Register input_reg = ToRegister(instr->value());
4077
4700
  Register scratch1 = scratch0();
4078
- Register scratch2 = ToRegister(instr->TempAt(0));
4701
+ Register scratch2 = ToRegister(instr->temp());
4079
4702
  DoubleRegister double_scratch = double_scratch0();
4080
- FPURegister single_scratch = double_scratch.low();
4703
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4081
4704
 
4082
4705
  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4083
4706
  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4092,8 +4715,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4092
4715
  // of the if.
4093
4716
 
4094
4717
  if (instr->truncating()) {
4095
- Register scratch3 = ToRegister(instr->TempAt(1));
4096
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4718
+ CpuFeatures::Scope scope(FPU);
4719
+ Register scratch3 = ToRegister(instr->temp2());
4720
+ FPURegister single_scratch = double_scratch.low();
4097
4721
  ASSERT(!scratch3.is(input_reg) &&
4098
4722
  !scratch3.is(scratch1) &&
4099
4723
  !scratch3.is(scratch2));
@@ -4128,18 +4752,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4128
4752
 
4129
4753
  Register except_flag = scratch2;
4130
4754
  __ EmitFPUTruncate(kRoundToZero,
4131
- single_scratch,
4755
+ input_reg,
4132
4756
  double_scratch,
4133
4757
  scratch1,
4758
+ double_scratch2,
4134
4759
  except_flag,
4135
4760
  kCheckForInexactConversion);
4136
4761
 
4137
4762
  // Deopt if the operation did not succeed.
4138
4763
  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4139
4764
 
4140
- // Load the result.
4141
- __ mfc1(input_reg, single_scratch);
4142
-
4143
4765
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4144
4766
  __ Branch(&done, ne, input_reg, Operand(zero_reg));
4145
4767
 
@@ -4163,13 +4785,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4163
4785
  LTaggedToI* instr_;
4164
4786
  };
4165
4787
 
4166
- LOperand* input = instr->InputAt(0);
4788
+ LOperand* input = instr->value();
4167
4789
  ASSERT(input->IsRegister());
4168
4790
  ASSERT(input->Equals(instr->result()));
4169
4791
 
4170
4792
  Register input_reg = ToRegister(input);
4171
4793
 
4172
- DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
4794
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4173
4795
 
4174
4796
  // Let the deferred code handle the HeapObject case.
4175
4797
  __ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4181,7 +4803,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4181
4803
 
4182
4804
 
4183
4805
  void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4184
- LOperand* input = instr->InputAt(0);
4806
+ LOperand* input = instr->value();
4185
4807
  ASSERT(input->IsRegister());
4186
4808
  LOperand* result = instr->result();
4187
4809
  ASSERT(result->IsDoubleRegister());
@@ -4199,12 +4821,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4199
4821
  void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4200
4822
  Register result_reg = ToRegister(instr->result());
4201
4823
  Register scratch1 = scratch0();
4202
- Register scratch2 = ToRegister(instr->TempAt(0));
4203
- DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
4204
- FPURegister single_scratch = double_scratch0().low();
4824
+ Register scratch2 = ToRegister(instr->temp());
4825
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
4205
4826
 
4206
4827
  if (instr->truncating()) {
4207
- Register scratch3 = ToRegister(instr->TempAt(1));
4828
+ Register scratch3 = ToRegister(instr->temp2());
4829
+ FPURegister single_scratch = double_scratch0().low();
4208
4830
  __ EmitECMATruncate(result_reg,
4209
4831
  double_input,
4210
4832
  single_scratch,
@@ -4215,37 +4837,35 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4215
4837
  Register except_flag = scratch2;
4216
4838
 
4217
4839
  __ EmitFPUTruncate(kRoundToMinusInf,
4218
- single_scratch,
4840
+ result_reg,
4219
4841
  double_input,
4220
4842
  scratch1,
4843
+ double_scratch0(),
4221
4844
  except_flag,
4222
4845
  kCheckForInexactConversion);
4223
4846
 
4224
4847
  // Deopt if the operation did not succeed (except_flag != 0).
4225
4848
  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4226
-
4227
- // Load the result.
4228
- __ mfc1(result_reg, single_scratch);
4229
4849
  }
4230
4850
  }
4231
4851
 
4232
4852
 
4233
4853
  void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4234
- LOperand* input = instr->InputAt(0);
4854
+ LOperand* input = instr->value();
4235
4855
  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4236
4856
  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4237
4857
  }
4238
4858
 
4239
4859
 
4240
4860
  void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4241
- LOperand* input = instr->InputAt(0);
4861
+ LOperand* input = instr->value();
4242
4862
  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4243
4863
  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4244
4864
  }
4245
4865
 
4246
4866
 
4247
4867
  void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4248
- Register input = ToRegister(instr->InputAt(0));
4868
+ Register input = ToRegister(instr->value());
4249
4869
  Register scratch = scratch0();
4250
4870
 
4251
4871
  __ GetObjectType(input, scratch, scratch);
@@ -4301,45 +4921,47 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4301
4921
  }
4302
4922
 
4303
4923
 
4304
- void LCodeGen::DoCheckMapCommon(Register reg,
4305
- Register scratch,
4924
+ void LCodeGen::DoCheckMapCommon(Register map_reg,
4306
4925
  Handle<Map> map,
4307
4926
  CompareMapMode mode,
4308
4927
  LEnvironment* env) {
4309
4928
  Label success;
4310
- __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
4929
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success, mode);
4311
4930
  DeoptimizeIf(al, env);
4312
4931
  __ bind(&success);
4313
4932
  }
4314
4933
 
4315
4934
 
4316
4935
  void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4317
- Register scratch = scratch0();
4318
- LOperand* input = instr->InputAt(0);
4936
+ Register map_reg = scratch0();
4937
+ LOperand* input = instr->value();
4319
4938
  ASSERT(input->IsRegister());
4320
4939
  Register reg = ToRegister(input);
4321
4940
  Label success;
4322
4941
  SmallMapList* map_set = instr->hydrogen()->map_set();
4942
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4323
4943
  for (int i = 0; i < map_set->length() - 1; i++) {
4324
4944
  Handle<Map> map = map_set->at(i);
4325
4945
  __ CompareMapAndBranch(
4326
- reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
4946
+ map_reg, map, &success, eq, &success, REQUIRE_EXACT_MAP);
4327
4947
  }
4328
4948
  Handle<Map> map = map_set->last();
4329
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4949
+ DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
4330
4950
  __ bind(&success);
4331
4951
  }
4332
4952
 
4333
4953
 
4334
4954
  void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4955
+ CpuFeatures::Scope vfp_scope(FPU);
4335
4956
  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4336
4957
  Register result_reg = ToRegister(instr->result());
4337
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4958
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4338
4959
  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4339
4960
  }
4340
4961
 
4341
4962
 
4342
4963
  void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4964
+ CpuFeatures::Scope vfp_scope(FPU);
4343
4965
  Register unclamped_reg = ToRegister(instr->unclamped());
4344
4966
  Register result_reg = ToRegister(instr->result());
4345
4967
  __ ClampUint8(result_reg, unclamped_reg);
@@ -4347,10 +4969,11 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4347
4969
 
4348
4970
 
4349
4971
  void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4972
+ CpuFeatures::Scope vfp_scope(FPU);
4350
4973
  Register scratch = scratch0();
4351
4974
  Register input_reg = ToRegister(instr->unclamped());
4352
4975
  Register result_reg = ToRegister(instr->result());
4353
- DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4976
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4354
4977
  Label is_smi, done, heap_number;
4355
4978
 
4356
4979
  // Both smi and heap number cases are handled.
@@ -4382,30 +5005,23 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4382
5005
 
4383
5006
 
4384
5007
  void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4385
- Register temp1 = ToRegister(instr->TempAt(0));
4386
- Register temp2 = ToRegister(instr->TempAt(1));
5008
+ ASSERT(instr->temp()->Equals(instr->result()));
5009
+ Register prototype_reg = ToRegister(instr->temp());
5010
+ Register map_reg = ToRegister(instr->temp2());
4387
5011
 
4388
- Handle<JSObject> holder = instr->holder();
4389
- Handle<JSObject> current_prototype = instr->prototype();
5012
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
5013
+ ZoneList<Handle<Map> >* maps = instr->maps();
4390
5014
 
4391
- // Load prototype object.
4392
- __ LoadHeapObject(temp1, current_prototype);
5015
+ ASSERT(prototypes->length() == maps->length());
4393
5016
 
4394
- // Check prototype maps up to the holder.
4395
- while (!current_prototype.is_identical_to(holder)) {
4396
- DoCheckMapCommon(temp1, temp2,
4397
- Handle<Map>(current_prototype->map()),
4398
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4399
- current_prototype =
4400
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4401
- // Load next prototype object.
4402
- __ LoadHeapObject(temp1, current_prototype);
5017
+ for (int i = 0; i < prototypes->length(); i++) {
5018
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
5019
+ __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
5020
+ DoCheckMapCommon(map_reg,
5021
+ maps->at(i),
5022
+ ALLOW_ELEMENT_TRANSITION_MAPS,
5023
+ instr->environment());
4403
5024
  }
4404
-
4405
- // Check the holder map.
4406
- DoCheckMapCommon(temp1, temp2,
4407
- Handle<Map>(current_prototype->map()),
4408
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4409
5025
  }
4410
5026
 
4411
5027
 
@@ -4420,11 +5036,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4420
5036
  LAllocateObject* instr_;
4421
5037
  };
4422
5038
 
4423
- DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
5039
+ DeferredAllocateObject* deferred =
5040
+ new(zone()) DeferredAllocateObject(this, instr);
4424
5041
 
4425
5042
  Register result = ToRegister(instr->result());
4426
- Register scratch = ToRegister(instr->TempAt(0));
4427
- Register scratch2 = ToRegister(instr->TempAt(1));
5043
+ Register scratch = ToRegister(instr->temp());
5044
+ Register scratch2 = ToRegister(instr->temp2());
4428
5045
  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4429
5046
  Handle<Map> initial_map(constructor->initial_map());
4430
5047
  int instance_size = initial_map->instance_size();
@@ -4492,9 +5109,11 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4492
5109
 
4493
5110
 
4494
5111
  void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4495
- Heap* heap = isolate()->heap();
5112
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
4496
5113
  ElementsKind boilerplate_elements_kind =
4497
5114
  instr->hydrogen()->boilerplate_elements_kind();
5115
+ AllocationSiteMode allocation_site_mode =
5116
+ instr->hydrogen()->allocation_site_mode();
4498
5117
 
4499
5118
  // Deopt if the array literal boilerplate ElementsKind is of a type different
4500
5119
  // than the expected one. The check isn't necessary if the boilerplate has
@@ -4513,12 +5132,13 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4513
5132
  a2,
4514
5133
  Operand(boilerplate_elements_kind));
4515
5134
  }
4516
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4517
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
5135
+
5136
+ // Set up the parameters to the stub/runtime call.
5137
+ __ LoadHeapObject(a3, literals);
4518
5138
  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4519
5139
  // Boilerplate already exists, constant elements are never accessed.
4520
5140
  // Pass an empty fixed array.
4521
- __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
5141
+ __ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
4522
5142
  __ Push(a3, a2, a1);
4523
5143
 
4524
5144
  // Pick the right runtime function or stub to call.
@@ -4527,7 +5147,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4527
5147
  ASSERT(instr->hydrogen()->depth() == 1);
4528
5148
  FastCloneShallowArrayStub::Mode mode =
4529
5149
  FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4530
- FastCloneShallowArrayStub stub(mode, length);
5150
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
4531
5151
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4532
5152
  } else if (instr->hydrogen()->depth() > 1) {
4533
5153
  CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
@@ -4536,9 +5156,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4536
5156
  } else {
4537
5157
  FastCloneShallowArrayStub::Mode mode =
4538
5158
  boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4539
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4540
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4541
- FastCloneShallowArrayStub stub(mode, length);
5159
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
5160
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
5161
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
4542
5162
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4543
5163
  }
4544
5164
  }
@@ -4547,10 +5167,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4547
5167
  void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4548
5168
  Register result,
4549
5169
  Register source,
4550
- int* offset) {
5170
+ int* offset,
5171
+ AllocationSiteMode mode) {
4551
5172
  ASSERT(!source.is(a2));
4552
5173
  ASSERT(!result.is(a2));
4553
5174
 
5175
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
5176
+ object->map()->CanTrackAllocationSite();
5177
+
4554
5178
  // Only elements backing stores for non-COW arrays need to be copied.
4555
5179
  Handle<FixedArrayBase> elements(object->elements());
4556
5180
  bool has_elements = elements->length() > 0 &&
@@ -4560,8 +5184,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4560
5184
  // this object and its backing store.
4561
5185
  int object_offset = *offset;
4562
5186
  int object_size = object->map()->instance_size();
4563
- int elements_offset = *offset + object_size;
4564
5187
  int elements_size = has_elements ? elements->Size() : 0;
5188
+ int elements_offset = *offset + object_size;
5189
+ if (create_allocation_site_info) {
5190
+ elements_offset += AllocationSiteInfo::kSize;
5191
+ *offset += AllocationSiteInfo::kSize;
5192
+ }
5193
+
4565
5194
  *offset += object_size + elements_size;
4566
5195
 
4567
5196
  // Copy object header.
@@ -4586,7 +5215,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4586
5215
  __ Addu(a2, result, Operand(*offset));
4587
5216
  __ sw(a2, FieldMemOperand(result, total_offset));
4588
5217
  __ LoadHeapObject(source, value_object);
4589
- EmitDeepCopy(value_object, result, source, offset);
5218
+ EmitDeepCopy(value_object, result, source, offset,
5219
+ DONT_TRACK_ALLOCATION_SITE);
4590
5220
  } else if (value->IsHeapObject()) {
4591
5221
  __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4592
5222
  __ sw(a2, FieldMemOperand(result, total_offset));
@@ -4596,6 +5226,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4596
5226
  }
4597
5227
  }
4598
5228
 
5229
+ // Build Allocation Site Info if desired
5230
+ if (create_allocation_site_info) {
5231
+ __ li(a2, Operand(Handle<Map>(isolate()->heap()->
5232
+ allocation_site_info_map())));
5233
+ __ sw(a2, FieldMemOperand(result, object_size));
5234
+ __ sw(source, FieldMemOperand(result, object_size + kPointerSize));
5235
+ }
4599
5236
 
4600
5237
  if (has_elements) {
4601
5238
  // Copy elements backing store header.
@@ -4613,8 +5250,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4613
5250
  for (int i = 0; i < elements_length; i++) {
4614
5251
  int64_t value = double_array->get_representation(i);
4615
5252
  // We only support little endian mode...
4616
- int32_t value_low = value & 0xFFFFFFFF;
4617
- int32_t value_high = value >> 32;
5253
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
5254
+ int32_t value_high = static_cast<int32_t>(value >> 32);
4618
5255
  int total_offset =
4619
5256
  elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4620
5257
  __ li(a2, Operand(value_low));
@@ -4632,7 +5269,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4632
5269
  __ Addu(a2, result, Operand(*offset));
4633
5270
  __ sw(a2, FieldMemOperand(result, total_offset));
4634
5271
  __ LoadHeapObject(source, value_object);
4635
- EmitDeepCopy(value_object, result, source, offset);
5272
+ EmitDeepCopy(value_object, result, source, offset,
5273
+ DONT_TRACK_ALLOCATION_SITE);
4636
5274
  } else if (value->IsHeapObject()) {
4637
5275
  __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4638
5276
  __ sw(a2, FieldMemOperand(result, total_offset));
@@ -4683,7 +5321,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4683
5321
  __ bind(&allocated);
4684
5322
  int offset = 0;
4685
5323
  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4686
- EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
5324
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset,
5325
+ instr->hydrogen()->allocation_site_mode());
4687
5326
  ASSERT_EQ(size, offset);
4688
5327
  }
4689
5328
 
@@ -4719,7 +5358,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4719
5358
 
4720
5359
 
4721
5360
  void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4722
- ASSERT(ToRegister(instr->InputAt(0)).is(a0));
5361
+ ASSERT(ToRegister(instr->value()).is(a0));
4723
5362
  ASSERT(ToRegister(instr->result()).is(v0));
4724
5363
  __ push(a0);
4725
5364
  CallRuntime(Runtime::kToFastProperties, 1, instr);
@@ -4729,15 +5368,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4729
5368
  void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4730
5369
  Label materialized;
4731
5370
  // Registers will be used as follows:
4732
- // a3 = JS function.
4733
5371
  // t3 = literals array.
4734
5372
  // a1 = regexp literal.
4735
5373
  // a0 = regexp literal clone.
4736
5374
  // a2 and t0-t2 are used as temporaries.
4737
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4738
- __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4739
- int literal_offset = FixedArray::kHeaderSize +
4740
- instr->hydrogen()->literal_index() * kPointerSize;
5375
+ int literal_offset =
5376
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5377
+ __ LoadHeapObject(t3, instr->hydrogen()->literals());
4741
5378
  __ lw(a1, FieldMemOperand(t3, literal_offset));
4742
5379
  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4743
5380
  __ Branch(&materialized, ne, a1, Operand(at));
@@ -4803,14 +5440,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4803
5440
 
4804
5441
  void LCodeGen::DoTypeof(LTypeof* instr) {
4805
5442
  ASSERT(ToRegister(instr->result()).is(v0));
4806
- Register input = ToRegister(instr->InputAt(0));
5443
+ Register input = ToRegister(instr->value());
4807
5444
  __ push(input);
4808
5445
  CallRuntime(Runtime::kTypeof, 1, instr);
4809
5446
  }
4810
5447
 
4811
5448
 
4812
5449
  void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4813
- Register input = ToRegister(instr->InputAt(0));
5450
+ Register input = ToRegister(instr->value());
4814
5451
  int true_block = chunk_->LookupDestination(instr->true_block_id());
4815
5452
  int false_block = chunk_->LookupDestination(instr->false_block_id());
4816
5453
  Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -4937,7 +5574,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
4937
5574
 
4938
5575
 
4939
5576
  void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4940
- Register temp1 = ToRegister(instr->TempAt(0));
5577
+ Register temp1 = ToRegister(instr->temp());
4941
5578
  int true_block = chunk_->LookupDestination(instr->true_block_id());
4942
5579
  int false_block = chunk_->LookupDestination(instr->false_block_id());
4943
5580
 
@@ -4967,6 +5604,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4967
5604
 
4968
5605
 
4969
5606
  void LCodeGen::EnsureSpaceForLazyDeopt() {
5607
+ if (info()->IsStub()) return;
4970
5608
  // Ensure that we have enough space after the previous lazy-bailout
4971
5609
  // instruction for patching the code here.
4972
5610
  int current_pc = masm()->pc_offset();
@@ -4997,6 +5635,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4997
5635
  }
4998
5636
 
4999
5637
 
5638
+ void LCodeGen::DoDummyUse(LDummyUse* instr) {
5639
+ // Nothing to see here, move on!
5640
+ }
5641
+
5642
+
5000
5643
  void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5001
5644
  Register object = ToRegister(instr->object());
5002
5645
  Register key = ToRegister(instr->key());
@@ -5065,7 +5708,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
5065
5708
  ASSERT(instr->hydrogen()->is_backwards_branch());
5066
5709
  // Perform stack overflow check if this goto needs it before jumping.
5067
5710
  DeferredStackCheck* deferred_stack_check =
5068
- new DeferredStackCheck(this, instr);
5711
+ new(zone()) DeferredStackCheck(this, instr);
5069
5712
  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5070
5713
  __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5071
5714
  EnsureSpaceForLazyDeopt();
@@ -5136,12 +5779,21 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5136
5779
  void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5137
5780
  Register map = ToRegister(instr->map());
5138
5781
  Register result = ToRegister(instr->result());
5782
+ Label load_cache, done;
5783
+ __ EnumLength(result, map);
5784
+ __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5785
+ __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5786
+ __ jmp(&done);
5787
+
5788
+ __ bind(&load_cache);
5139
5789
  __ LoadInstanceDescriptors(map, result);
5140
5790
  __ lw(result,
5141
- FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
5791
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5142
5792
  __ lw(result,
5143
5793
  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5144
5794
  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5795
+
5796
+ __ bind(&done);
5145
5797
  }
5146
5798
 
5147
5799