libv8 3.11.8.17 → 3.16.14.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (754) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +1 -2
  3. data/Gemfile +1 -1
  4. data/Rakefile +6 -7
  5. data/lib/libv8/version.rb +1 -1
  6. data/vendor/v8/.gitignore +24 -3
  7. data/vendor/v8/AUTHORS +7 -0
  8. data/vendor/v8/ChangeLog +839 -0
  9. data/vendor/v8/DEPS +1 -1
  10. data/vendor/v8/Makefile.android +92 -0
  11. data/vendor/v8/OWNERS +11 -0
  12. data/vendor/v8/PRESUBMIT.py +71 -0
  13. data/vendor/v8/SConstruct +34 -39
  14. data/vendor/v8/build/android.gypi +56 -37
  15. data/vendor/v8/build/common.gypi +112 -30
  16. data/vendor/v8/build/gyp_v8 +1 -1
  17. data/vendor/v8/build/standalone.gypi +15 -11
  18. data/vendor/v8/include/v8-debug.h +9 -1
  19. data/vendor/v8/include/v8-preparser.h +4 -3
  20. data/vendor/v8/include/v8-profiler.h +25 -25
  21. data/vendor/v8/include/v8-testing.h +4 -3
  22. data/vendor/v8/include/v8.h +994 -540
  23. data/vendor/v8/preparser/preparser-process.cc +3 -3
  24. data/vendor/v8/samples/lineprocessor.cc +20 -27
  25. data/vendor/v8/samples/process.cc +18 -14
  26. data/vendor/v8/samples/shell.cc +16 -15
  27. data/vendor/v8/src/SConscript +15 -14
  28. data/vendor/v8/src/accessors.cc +169 -77
  29. data/vendor/v8/src/accessors.h +4 -0
  30. data/vendor/v8/src/allocation-inl.h +2 -2
  31. data/vendor/v8/src/allocation.h +7 -7
  32. data/vendor/v8/src/api.cc +810 -497
  33. data/vendor/v8/src/api.h +85 -60
  34. data/vendor/v8/src/arm/assembler-arm-inl.h +179 -22
  35. data/vendor/v8/src/arm/assembler-arm.cc +633 -264
  36. data/vendor/v8/src/arm/assembler-arm.h +264 -197
  37. data/vendor/v8/src/arm/builtins-arm.cc +117 -27
  38. data/vendor/v8/src/arm/code-stubs-arm.cc +1241 -700
  39. data/vendor/v8/src/arm/code-stubs-arm.h +35 -138
  40. data/vendor/v8/src/arm/codegen-arm.cc +285 -16
  41. data/vendor/v8/src/arm/codegen-arm.h +22 -0
  42. data/vendor/v8/src/arm/constants-arm.cc +5 -3
  43. data/vendor/v8/src/arm/constants-arm.h +24 -11
  44. data/vendor/v8/src/arm/debug-arm.cc +3 -3
  45. data/vendor/v8/src/arm/deoptimizer-arm.cc +382 -92
  46. data/vendor/v8/src/arm/disasm-arm.cc +61 -12
  47. data/vendor/v8/src/arm/frames-arm.h +0 -14
  48. data/vendor/v8/src/arm/full-codegen-arm.cc +332 -304
  49. data/vendor/v8/src/arm/ic-arm.cc +180 -259
  50. data/vendor/v8/src/arm/lithium-arm.cc +364 -316
  51. data/vendor/v8/src/arm/lithium-arm.h +512 -275
  52. data/vendor/v8/src/arm/lithium-codegen-arm.cc +1768 -809
  53. data/vendor/v8/src/arm/lithium-codegen-arm.h +97 -35
  54. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +12 -5
  55. data/vendor/v8/src/arm/macro-assembler-arm.cc +439 -228
  56. data/vendor/v8/src/arm/macro-assembler-arm.h +116 -70
  57. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +54 -44
  58. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +3 -10
  59. data/vendor/v8/src/arm/simulator-arm.cc +272 -238
  60. data/vendor/v8/src/arm/simulator-arm.h +38 -8
  61. data/vendor/v8/src/arm/stub-cache-arm.cc +522 -895
  62. data/vendor/v8/src/array.js +101 -70
  63. data/vendor/v8/src/assembler.cc +270 -19
  64. data/vendor/v8/src/assembler.h +110 -15
  65. data/vendor/v8/src/ast.cc +79 -69
  66. data/vendor/v8/src/ast.h +255 -301
  67. data/vendor/v8/src/atomicops.h +7 -1
  68. data/vendor/v8/src/atomicops_internals_tsan.h +335 -0
  69. data/vendor/v8/src/bootstrapper.cc +481 -418
  70. data/vendor/v8/src/bootstrapper.h +4 -4
  71. data/vendor/v8/src/builtins.cc +498 -311
  72. data/vendor/v8/src/builtins.h +75 -47
  73. data/vendor/v8/src/checks.cc +2 -1
  74. data/vendor/v8/src/checks.h +8 -0
  75. data/vendor/v8/src/code-stubs-hydrogen.cc +253 -0
  76. data/vendor/v8/src/code-stubs.cc +249 -84
  77. data/vendor/v8/src/code-stubs.h +501 -169
  78. data/vendor/v8/src/codegen.cc +36 -18
  79. data/vendor/v8/src/codegen.h +25 -3
  80. data/vendor/v8/src/collection.js +54 -17
  81. data/vendor/v8/src/compilation-cache.cc +24 -16
  82. data/vendor/v8/src/compilation-cache.h +15 -6
  83. data/vendor/v8/src/compiler.cc +497 -195
  84. data/vendor/v8/src/compiler.h +246 -38
  85. data/vendor/v8/src/contexts.cc +64 -24
  86. data/vendor/v8/src/contexts.h +60 -29
  87. data/vendor/v8/src/conversions-inl.h +24 -14
  88. data/vendor/v8/src/conversions.h +7 -4
  89. data/vendor/v8/src/counters.cc +21 -12
  90. data/vendor/v8/src/counters.h +44 -16
  91. data/vendor/v8/src/cpu-profiler.h +1 -1
  92. data/vendor/v8/src/d8-debug.cc +2 -2
  93. data/vendor/v8/src/d8-readline.cc +13 -2
  94. data/vendor/v8/src/d8.cc +681 -273
  95. data/vendor/v8/src/d8.gyp +4 -4
  96. data/vendor/v8/src/d8.h +38 -18
  97. data/vendor/v8/src/d8.js +0 -617
  98. data/vendor/v8/src/data-flow.h +55 -0
  99. data/vendor/v8/src/date.js +1 -42
  100. data/vendor/v8/src/dateparser-inl.h +5 -1
  101. data/vendor/v8/src/debug-agent.cc +10 -15
  102. data/vendor/v8/src/debug-debugger.js +147 -149
  103. data/vendor/v8/src/debug.cc +323 -164
  104. data/vendor/v8/src/debug.h +26 -14
  105. data/vendor/v8/src/deoptimizer.cc +765 -290
  106. data/vendor/v8/src/deoptimizer.h +130 -28
  107. data/vendor/v8/src/disassembler.cc +10 -4
  108. data/vendor/v8/src/elements-kind.cc +7 -2
  109. data/vendor/v8/src/elements-kind.h +19 -0
  110. data/vendor/v8/src/elements.cc +607 -285
  111. data/vendor/v8/src/elements.h +36 -13
  112. data/vendor/v8/src/execution.cc +52 -31
  113. data/vendor/v8/src/execution.h +4 -4
  114. data/vendor/v8/src/extensions/externalize-string-extension.cc +5 -4
  115. data/vendor/v8/src/extensions/gc-extension.cc +5 -1
  116. data/vendor/v8/src/extensions/statistics-extension.cc +153 -0
  117. data/vendor/v8/src/{inspector.h → extensions/statistics-extension.h} +12 -23
  118. data/vendor/v8/src/factory.cc +101 -134
  119. data/vendor/v8/src/factory.h +36 -31
  120. data/vendor/v8/src/flag-definitions.h +102 -25
  121. data/vendor/v8/src/flags.cc +9 -5
  122. data/vendor/v8/src/frames-inl.h +10 -0
  123. data/vendor/v8/src/frames.cc +116 -26
  124. data/vendor/v8/src/frames.h +96 -12
  125. data/vendor/v8/src/full-codegen.cc +219 -74
  126. data/vendor/v8/src/full-codegen.h +63 -21
  127. data/vendor/v8/src/func-name-inferrer.cc +8 -7
  128. data/vendor/v8/src/func-name-inferrer.h +5 -3
  129. data/vendor/v8/src/gdb-jit.cc +71 -57
  130. data/vendor/v8/src/global-handles.cc +230 -101
  131. data/vendor/v8/src/global-handles.h +26 -27
  132. data/vendor/v8/src/globals.h +17 -19
  133. data/vendor/v8/src/handles-inl.h +59 -12
  134. data/vendor/v8/src/handles.cc +180 -200
  135. data/vendor/v8/src/handles.h +80 -11
  136. data/vendor/v8/src/hashmap.h +60 -40
  137. data/vendor/v8/src/heap-inl.h +107 -45
  138. data/vendor/v8/src/heap-profiler.cc +38 -19
  139. data/vendor/v8/src/heap-profiler.h +24 -14
  140. data/vendor/v8/src/heap.cc +1123 -738
  141. data/vendor/v8/src/heap.h +385 -146
  142. data/vendor/v8/src/hydrogen-instructions.cc +700 -217
  143. data/vendor/v8/src/hydrogen-instructions.h +1158 -472
  144. data/vendor/v8/src/hydrogen.cc +3319 -1662
  145. data/vendor/v8/src/hydrogen.h +411 -170
  146. data/vendor/v8/src/ia32/assembler-ia32-inl.h +46 -16
  147. data/vendor/v8/src/ia32/assembler-ia32.cc +131 -61
  148. data/vendor/v8/src/ia32/assembler-ia32.h +115 -57
  149. data/vendor/v8/src/ia32/builtins-ia32.cc +99 -5
  150. data/vendor/v8/src/ia32/code-stubs-ia32.cc +787 -495
  151. data/vendor/v8/src/ia32/code-stubs-ia32.h +10 -100
  152. data/vendor/v8/src/ia32/codegen-ia32.cc +227 -23
  153. data/vendor/v8/src/ia32/codegen-ia32.h +14 -0
  154. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +428 -87
  155. data/vendor/v8/src/ia32/disasm-ia32.cc +28 -1
  156. data/vendor/v8/src/ia32/frames-ia32.h +6 -16
  157. data/vendor/v8/src/ia32/full-codegen-ia32.cc +280 -272
  158. data/vendor/v8/src/ia32/ic-ia32.cc +150 -250
  159. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +1600 -517
  160. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +90 -24
  161. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +10 -6
  162. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.h +2 -2
  163. data/vendor/v8/src/ia32/lithium-ia32.cc +405 -302
  164. data/vendor/v8/src/ia32/lithium-ia32.h +526 -271
  165. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +378 -119
  166. data/vendor/v8/src/ia32/macro-assembler-ia32.h +62 -28
  167. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +43 -30
  168. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +2 -10
  169. data/vendor/v8/src/ia32/stub-cache-ia32.cc +492 -678
  170. data/vendor/v8/src/ic-inl.h +9 -4
  171. data/vendor/v8/src/ic.cc +836 -923
  172. data/vendor/v8/src/ic.h +228 -247
  173. data/vendor/v8/src/incremental-marking-inl.h +26 -30
  174. data/vendor/v8/src/incremental-marking.cc +276 -248
  175. data/vendor/v8/src/incremental-marking.h +29 -37
  176. data/vendor/v8/src/interface.cc +34 -25
  177. data/vendor/v8/src/interface.h +69 -25
  178. data/vendor/v8/src/interpreter-irregexp.cc +2 -2
  179. data/vendor/v8/src/isolate.cc +382 -76
  180. data/vendor/v8/src/isolate.h +109 -56
  181. data/vendor/v8/src/json-parser.h +217 -104
  182. data/vendor/v8/src/json-stringifier.h +745 -0
  183. data/vendor/v8/src/json.js +10 -132
  184. data/vendor/v8/src/jsregexp-inl.h +106 -0
  185. data/vendor/v8/src/jsregexp.cc +517 -285
  186. data/vendor/v8/src/jsregexp.h +145 -117
  187. data/vendor/v8/src/list-inl.h +35 -22
  188. data/vendor/v8/src/list.h +46 -19
  189. data/vendor/v8/src/lithium-allocator-inl.h +22 -2
  190. data/vendor/v8/src/lithium-allocator.cc +85 -70
  191. data/vendor/v8/src/lithium-allocator.h +21 -39
  192. data/vendor/v8/src/lithium.cc +259 -5
  193. data/vendor/v8/src/lithium.h +131 -32
  194. data/vendor/v8/src/liveedit-debugger.js +52 -3
  195. data/vendor/v8/src/liveedit.cc +393 -113
  196. data/vendor/v8/src/liveedit.h +7 -3
  197. data/vendor/v8/src/log-utils.cc +4 -2
  198. data/vendor/v8/src/log.cc +170 -140
  199. data/vendor/v8/src/log.h +62 -11
  200. data/vendor/v8/src/macro-assembler.h +17 -0
  201. data/vendor/v8/src/macros.py +2 -0
  202. data/vendor/v8/src/mark-compact-inl.h +3 -23
  203. data/vendor/v8/src/mark-compact.cc +801 -830
  204. data/vendor/v8/src/mark-compact.h +154 -47
  205. data/vendor/v8/src/marking-thread.cc +85 -0
  206. data/vendor/v8/src/{inspector.cc → marking-thread.h} +32 -24
  207. data/vendor/v8/src/math.js +12 -18
  208. data/vendor/v8/src/messages.cc +18 -8
  209. data/vendor/v8/src/messages.js +314 -261
  210. data/vendor/v8/src/mips/assembler-mips-inl.h +58 -6
  211. data/vendor/v8/src/mips/assembler-mips.cc +92 -75
  212. data/vendor/v8/src/mips/assembler-mips.h +54 -60
  213. data/vendor/v8/src/mips/builtins-mips.cc +116 -17
  214. data/vendor/v8/src/mips/code-stubs-mips.cc +919 -556
  215. data/vendor/v8/src/mips/code-stubs-mips.h +22 -131
  216. data/vendor/v8/src/mips/codegen-mips.cc +281 -6
  217. data/vendor/v8/src/mips/codegen-mips.h +22 -0
  218. data/vendor/v8/src/mips/constants-mips.cc +2 -0
  219. data/vendor/v8/src/mips/constants-mips.h +12 -2
  220. data/vendor/v8/src/mips/deoptimizer-mips.cc +286 -50
  221. data/vendor/v8/src/mips/disasm-mips.cc +13 -0
  222. data/vendor/v8/src/mips/full-codegen-mips.cc +297 -284
  223. data/vendor/v8/src/mips/ic-mips.cc +182 -263
  224. data/vendor/v8/src/mips/lithium-codegen-mips.cc +1208 -556
  225. data/vendor/v8/src/mips/lithium-codegen-mips.h +72 -19
  226. data/vendor/v8/src/mips/lithium-gap-resolver-mips.cc +9 -2
  227. data/vendor/v8/src/mips/lithium-mips.cc +290 -302
  228. data/vendor/v8/src/mips/lithium-mips.h +463 -266
  229. data/vendor/v8/src/mips/macro-assembler-mips.cc +208 -115
  230. data/vendor/v8/src/mips/macro-assembler-mips.h +67 -24
  231. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +40 -25
  232. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +3 -9
  233. data/vendor/v8/src/mips/simulator-mips.cc +112 -40
  234. data/vendor/v8/src/mips/simulator-mips.h +5 -0
  235. data/vendor/v8/src/mips/stub-cache-mips.cc +502 -884
  236. data/vendor/v8/src/mirror-debugger.js +157 -30
  237. data/vendor/v8/src/mksnapshot.cc +88 -14
  238. data/vendor/v8/src/object-observe.js +235 -0
  239. data/vendor/v8/src/objects-debug.cc +178 -176
  240. data/vendor/v8/src/objects-inl.h +1333 -486
  241. data/vendor/v8/src/objects-printer.cc +125 -43
  242. data/vendor/v8/src/objects-visiting-inl.h +578 -6
  243. data/vendor/v8/src/objects-visiting.cc +2 -2
  244. data/vendor/v8/src/objects-visiting.h +172 -79
  245. data/vendor/v8/src/objects.cc +3533 -2885
  246. data/vendor/v8/src/objects.h +1352 -1131
  247. data/vendor/v8/src/optimizing-compiler-thread.cc +152 -0
  248. data/vendor/v8/src/optimizing-compiler-thread.h +111 -0
  249. data/vendor/v8/src/parser.cc +390 -500
  250. data/vendor/v8/src/parser.h +45 -33
  251. data/vendor/v8/src/platform-cygwin.cc +10 -21
  252. data/vendor/v8/src/platform-freebsd.cc +36 -41
  253. data/vendor/v8/src/platform-linux.cc +160 -124
  254. data/vendor/v8/src/platform-macos.cc +30 -27
  255. data/vendor/v8/src/platform-nullos.cc +17 -1
  256. data/vendor/v8/src/platform-openbsd.cc +19 -50
  257. data/vendor/v8/src/platform-posix.cc +14 -0
  258. data/vendor/v8/src/platform-solaris.cc +20 -53
  259. data/vendor/v8/src/platform-win32.cc +49 -26
  260. data/vendor/v8/src/platform.h +40 -1
  261. data/vendor/v8/src/preparser.cc +8 -5
  262. data/vendor/v8/src/preparser.h +2 -2
  263. data/vendor/v8/src/prettyprinter.cc +16 -0
  264. data/vendor/v8/src/prettyprinter.h +2 -0
  265. data/vendor/v8/src/profile-generator-inl.h +1 -0
  266. data/vendor/v8/src/profile-generator.cc +209 -147
  267. data/vendor/v8/src/profile-generator.h +15 -12
  268. data/vendor/v8/src/property-details.h +46 -31
  269. data/vendor/v8/src/property.cc +27 -46
  270. data/vendor/v8/src/property.h +163 -83
  271. data/vendor/v8/src/proxy.js +7 -2
  272. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +4 -13
  273. data/vendor/v8/src/regexp-macro-assembler-irregexp.h +1 -2
  274. data/vendor/v8/src/regexp-macro-assembler-tracer.cc +1 -11
  275. data/vendor/v8/src/regexp-macro-assembler-tracer.h +0 -1
  276. data/vendor/v8/src/regexp-macro-assembler.cc +31 -14
  277. data/vendor/v8/src/regexp-macro-assembler.h +14 -11
  278. data/vendor/v8/src/regexp-stack.cc +1 -0
  279. data/vendor/v8/src/regexp.js +9 -8
  280. data/vendor/v8/src/rewriter.cc +18 -7
  281. data/vendor/v8/src/runtime-profiler.cc +52 -43
  282. data/vendor/v8/src/runtime-profiler.h +0 -25
  283. data/vendor/v8/src/runtime.cc +2006 -2023
  284. data/vendor/v8/src/runtime.h +56 -49
  285. data/vendor/v8/src/safepoint-table.cc +12 -18
  286. data/vendor/v8/src/safepoint-table.h +11 -8
  287. data/vendor/v8/src/scanner.cc +1 -0
  288. data/vendor/v8/src/scanner.h +4 -10
  289. data/vendor/v8/src/scopeinfo.cc +35 -9
  290. data/vendor/v8/src/scopeinfo.h +64 -3
  291. data/vendor/v8/src/scopes.cc +251 -156
  292. data/vendor/v8/src/scopes.h +61 -27
  293. data/vendor/v8/src/serialize.cc +348 -396
  294. data/vendor/v8/src/serialize.h +125 -114
  295. data/vendor/v8/src/small-pointer-list.h +11 -11
  296. data/vendor/v8/src/{smart-array-pointer.h → smart-pointers.h} +64 -15
  297. data/vendor/v8/src/snapshot-common.cc +64 -15
  298. data/vendor/v8/src/snapshot-empty.cc +7 -1
  299. data/vendor/v8/src/snapshot.h +9 -2
  300. data/vendor/v8/src/spaces-inl.h +17 -0
  301. data/vendor/v8/src/spaces.cc +477 -183
  302. data/vendor/v8/src/spaces.h +238 -58
  303. data/vendor/v8/src/splay-tree-inl.h +8 -7
  304. data/vendor/v8/src/splay-tree.h +24 -10
  305. data/vendor/v8/src/store-buffer.cc +12 -5
  306. data/vendor/v8/src/store-buffer.h +2 -4
  307. data/vendor/v8/src/string-search.h +22 -6
  308. data/vendor/v8/src/string-stream.cc +11 -8
  309. data/vendor/v8/src/string.js +47 -15
  310. data/vendor/v8/src/stub-cache.cc +461 -224
  311. data/vendor/v8/src/stub-cache.h +164 -102
  312. data/vendor/v8/src/sweeper-thread.cc +105 -0
  313. data/vendor/v8/src/sweeper-thread.h +81 -0
  314. data/vendor/v8/src/token.h +1 -0
  315. data/vendor/v8/src/transitions-inl.h +220 -0
  316. data/vendor/v8/src/transitions.cc +160 -0
  317. data/vendor/v8/src/transitions.h +207 -0
  318. data/vendor/v8/src/type-info.cc +182 -181
  319. data/vendor/v8/src/type-info.h +31 -19
  320. data/vendor/v8/src/unicode-inl.h +62 -106
  321. data/vendor/v8/src/unicode.cc +57 -67
  322. data/vendor/v8/src/unicode.h +45 -91
  323. data/vendor/v8/src/uri.js +57 -29
  324. data/vendor/v8/src/utils.h +105 -5
  325. data/vendor/v8/src/v8-counters.cc +54 -11
  326. data/vendor/v8/src/v8-counters.h +134 -19
  327. data/vendor/v8/src/v8.cc +29 -29
  328. data/vendor/v8/src/v8.h +1 -0
  329. data/vendor/v8/src/v8conversions.cc +26 -22
  330. data/vendor/v8/src/v8globals.h +56 -43
  331. data/vendor/v8/src/v8natives.js +83 -30
  332. data/vendor/v8/src/v8threads.cc +42 -21
  333. data/vendor/v8/src/v8threads.h +4 -1
  334. data/vendor/v8/src/v8utils.cc +9 -93
  335. data/vendor/v8/src/v8utils.h +37 -33
  336. data/vendor/v8/src/variables.cc +6 -3
  337. data/vendor/v8/src/variables.h +6 -13
  338. data/vendor/v8/src/version.cc +2 -2
  339. data/vendor/v8/src/vm-state-inl.h +11 -0
  340. data/vendor/v8/src/x64/assembler-x64-inl.h +39 -8
  341. data/vendor/v8/src/x64/assembler-x64.cc +78 -64
  342. data/vendor/v8/src/x64/assembler-x64.h +38 -33
  343. data/vendor/v8/src/x64/builtins-x64.cc +105 -7
  344. data/vendor/v8/src/x64/code-stubs-x64.cc +790 -413
  345. data/vendor/v8/src/x64/code-stubs-x64.h +10 -106
  346. data/vendor/v8/src/x64/codegen-x64.cc +210 -8
  347. data/vendor/v8/src/x64/codegen-x64.h +20 -1
  348. data/vendor/v8/src/x64/deoptimizer-x64.cc +336 -75
  349. data/vendor/v8/src/x64/disasm-x64.cc +15 -0
  350. data/vendor/v8/src/x64/frames-x64.h +0 -14
  351. data/vendor/v8/src/x64/full-codegen-x64.cc +293 -270
  352. data/vendor/v8/src/x64/ic-x64.cc +153 -251
  353. data/vendor/v8/src/x64/lithium-codegen-x64.cc +1379 -531
  354. data/vendor/v8/src/x64/lithium-codegen-x64.h +67 -23
  355. data/vendor/v8/src/x64/lithium-gap-resolver-x64.cc +2 -2
  356. data/vendor/v8/src/x64/lithium-x64.cc +349 -289
  357. data/vendor/v8/src/x64/lithium-x64.h +460 -250
  358. data/vendor/v8/src/x64/macro-assembler-x64.cc +350 -177
  359. data/vendor/v8/src/x64/macro-assembler-x64.h +67 -49
  360. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +46 -33
  361. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +2 -3
  362. data/vendor/v8/src/x64/stub-cache-x64.cc +484 -653
  363. data/vendor/v8/src/zone-inl.h +9 -27
  364. data/vendor/v8/src/zone.cc +5 -5
  365. data/vendor/v8/src/zone.h +53 -27
  366. data/vendor/v8/test/benchmarks/testcfg.py +5 -0
  367. data/vendor/v8/test/cctest/cctest.cc +4 -0
  368. data/vendor/v8/test/cctest/cctest.gyp +3 -1
  369. data/vendor/v8/test/cctest/cctest.h +57 -9
  370. data/vendor/v8/test/cctest/cctest.status +15 -15
  371. data/vendor/v8/test/cctest/test-accessors.cc +26 -0
  372. data/vendor/v8/test/cctest/test-alloc.cc +22 -30
  373. data/vendor/v8/test/cctest/test-api.cc +1943 -314
  374. data/vendor/v8/test/cctest/test-assembler-arm.cc +133 -13
  375. data/vendor/v8/test/cctest/test-assembler-ia32.cc +1 -1
  376. data/vendor/v8/test/cctest/test-assembler-mips.cc +12 -0
  377. data/vendor/v8/test/cctest/test-ast.cc +4 -2
  378. data/vendor/v8/test/cctest/test-compiler.cc +61 -29
  379. data/vendor/v8/test/cctest/test-dataflow.cc +2 -2
  380. data/vendor/v8/test/cctest/test-debug.cc +212 -33
  381. data/vendor/v8/test/cctest/test-decls.cc +257 -11
  382. data/vendor/v8/test/cctest/test-dictionary.cc +24 -10
  383. data/vendor/v8/test/cctest/test-disasm-arm.cc +118 -1
  384. data/vendor/v8/test/cctest/test-disasm-ia32.cc +3 -2
  385. data/vendor/v8/test/cctest/test-flags.cc +14 -1
  386. data/vendor/v8/test/cctest/test-func-name-inference.cc +7 -4
  387. data/vendor/v8/test/cctest/test-global-object.cc +51 -0
  388. data/vendor/v8/test/cctest/test-hashing.cc +32 -23
  389. data/vendor/v8/test/cctest/test-heap-profiler.cc +131 -77
  390. data/vendor/v8/test/cctest/test-heap.cc +1084 -143
  391. data/vendor/v8/test/cctest/test-list.cc +1 -1
  392. data/vendor/v8/test/cctest/test-liveedit.cc +3 -2
  393. data/vendor/v8/test/cctest/test-lockers.cc +12 -13
  394. data/vendor/v8/test/cctest/test-log.cc +10 -8
  395. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +2 -2
  396. data/vendor/v8/test/cctest/test-mark-compact.cc +44 -22
  397. data/vendor/v8/test/cctest/test-object-observe.cc +434 -0
  398. data/vendor/v8/test/cctest/test-parsing.cc +86 -39
  399. data/vendor/v8/test/cctest/test-platform-linux.cc +6 -0
  400. data/vendor/v8/test/cctest/test-platform-win32.cc +7 -0
  401. data/vendor/v8/test/cctest/test-random.cc +5 -4
  402. data/vendor/v8/test/cctest/test-regexp.cc +137 -101
  403. data/vendor/v8/test/cctest/test-serialize.cc +150 -230
  404. data/vendor/v8/test/cctest/test-sockets.cc +1 -1
  405. data/vendor/v8/test/cctest/test-spaces.cc +139 -0
  406. data/vendor/v8/test/cctest/test-strings.cc +736 -74
  407. data/vendor/v8/test/cctest/test-thread-termination.cc +10 -11
  408. data/vendor/v8/test/cctest/test-threads.cc +4 -4
  409. data/vendor/v8/test/cctest/test-utils.cc +16 -0
  410. data/vendor/v8/test/cctest/test-weakmaps.cc +7 -3
  411. data/vendor/v8/test/cctest/testcfg.py +64 -5
  412. data/vendor/v8/test/es5conform/testcfg.py +5 -0
  413. data/vendor/v8/test/message/message.status +1 -1
  414. data/vendor/v8/test/message/overwritten-builtins.out +3 -0
  415. data/vendor/v8/test/message/testcfg.py +89 -8
  416. data/vendor/v8/test/message/try-catch-finally-no-message.out +26 -26
  417. data/vendor/v8/test/mjsunit/accessor-map-sharing.js +18 -2
  418. data/vendor/v8/test/mjsunit/allocation-site-info.js +126 -0
  419. data/vendor/v8/test/mjsunit/array-bounds-check-removal.js +62 -1
  420. data/vendor/v8/test/mjsunit/array-iteration.js +1 -1
  421. data/vendor/v8/test/mjsunit/array-literal-transitions.js +2 -0
  422. data/vendor/v8/test/mjsunit/array-natives-elements.js +317 -0
  423. data/vendor/v8/test/mjsunit/array-reduce.js +8 -8
  424. data/vendor/v8/test/mjsunit/array-slice.js +12 -0
  425. data/vendor/v8/test/mjsunit/array-store-and-grow.js +4 -1
  426. data/vendor/v8/test/mjsunit/assert-opt-and-deopt.js +1 -1
  427. data/vendor/v8/test/mjsunit/bugs/bug-2337.js +53 -0
  428. data/vendor/v8/test/mjsunit/compare-known-objects-slow.js +69 -0
  429. data/vendor/v8/test/mjsunit/compiler/alloc-object-huge.js +3 -1
  430. data/vendor/v8/test/mjsunit/compiler/inline-accessors.js +368 -0
  431. data/vendor/v8/test/mjsunit/compiler/inline-arguments.js +87 -1
  432. data/vendor/v8/test/mjsunit/compiler/inline-closures.js +49 -0
  433. data/vendor/v8/test/mjsunit/compiler/inline-construct.js +55 -43
  434. data/vendor/v8/test/mjsunit/compiler/inline-literals.js +39 -0
  435. data/vendor/v8/test/mjsunit/compiler/multiply-add.js +69 -0
  436. data/vendor/v8/test/mjsunit/compiler/optimized-closures.js +57 -0
  437. data/vendor/v8/test/mjsunit/compiler/parallel-proto-change.js +44 -0
  438. data/vendor/v8/test/mjsunit/compiler/property-static.js +69 -0
  439. data/vendor/v8/test/mjsunit/compiler/proto-chain-constant.js +55 -0
  440. data/vendor/v8/test/mjsunit/compiler/proto-chain-load.js +44 -0
  441. data/vendor/v8/test/mjsunit/compiler/regress-gvn.js +3 -2
  442. data/vendor/v8/test/mjsunit/compiler/regress-or.js +6 -2
  443. data/vendor/v8/test/mjsunit/compiler/rotate.js +224 -0
  444. data/vendor/v8/test/mjsunit/compiler/uint32.js +173 -0
  445. data/vendor/v8/test/mjsunit/count-based-osr.js +2 -1
  446. data/vendor/v8/test/mjsunit/d8-os.js +3 -3
  447. data/vendor/v8/test/mjsunit/date-parse.js +3 -0
  448. data/vendor/v8/test/mjsunit/date.js +22 -0
  449. data/vendor/v8/test/mjsunit/debug-break-inline.js +1 -0
  450. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js +22 -12
  451. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized.js +21 -10
  452. data/vendor/v8/test/mjsunit/debug-liveedit-compile-error.js +60 -0
  453. data/vendor/v8/test/mjsunit/debug-liveedit-double-call.js +142 -0
  454. data/vendor/v8/test/mjsunit/debug-liveedit-literals.js +94 -0
  455. data/vendor/v8/test/mjsunit/debug-liveedit-restart-frame.js +153 -0
  456. data/vendor/v8/test/mjsunit/debug-multiple-breakpoints.js +1 -1
  457. data/vendor/v8/test/mjsunit/debug-script-breakpoints-closure.js +67 -0
  458. data/vendor/v8/test/mjsunit/debug-script-breakpoints-nested.js +82 -0
  459. data/vendor/v8/test/mjsunit/debug-script.js +4 -2
  460. data/vendor/v8/test/mjsunit/debug-set-variable-value.js +308 -0
  461. data/vendor/v8/test/mjsunit/debug-stepout-scope-part1.js +190 -0
  462. data/vendor/v8/test/mjsunit/debug-stepout-scope-part2.js +83 -0
  463. data/vendor/v8/test/mjsunit/debug-stepout-scope-part3.js +80 -0
  464. data/vendor/v8/test/mjsunit/debug-stepout-scope-part4.js +80 -0
  465. data/vendor/v8/test/mjsunit/debug-stepout-scope-part5.js +77 -0
  466. data/vendor/v8/test/mjsunit/debug-stepout-scope-part6.js +79 -0
  467. data/vendor/v8/test/mjsunit/debug-stepout-scope-part7.js +79 -0
  468. data/vendor/v8/test/mjsunit/{debug-stepout-scope.js → debug-stepout-scope-part8.js} +0 -189
  469. data/vendor/v8/test/mjsunit/delete-non-configurable.js +74 -0
  470. data/vendor/v8/test/mjsunit/deopt-minus-zero.js +56 -0
  471. data/vendor/v8/test/mjsunit/elements-kind.js +6 -4
  472. data/vendor/v8/test/mjsunit/elements-length-no-holey.js +33 -0
  473. data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +46 -19
  474. data/vendor/v8/test/mjsunit/error-accessors.js +54 -0
  475. data/vendor/v8/test/mjsunit/error-constructors.js +1 -14
  476. data/vendor/v8/test/mjsunit/error-tostring.js +8 -0
  477. data/vendor/v8/test/mjsunit/eval-stack-trace.js +204 -0
  478. data/vendor/v8/test/mjsunit/external-array.js +364 -1
  479. data/vendor/v8/test/mjsunit/fast-array-length.js +37 -0
  480. data/vendor/v8/test/mjsunit/fast-non-keyed.js +113 -0
  481. data/vendor/v8/test/mjsunit/fast-prototype.js +117 -0
  482. data/vendor/v8/test/mjsunit/function-call.js +14 -18
  483. data/vendor/v8/test/mjsunit/fuzz-natives-part1.js +230 -0
  484. data/vendor/v8/test/mjsunit/fuzz-natives-part2.js +229 -0
  485. data/vendor/v8/test/mjsunit/fuzz-natives-part3.js +229 -0
  486. data/vendor/v8/test/mjsunit/{fuzz-natives.js → fuzz-natives-part4.js} +12 -2
  487. data/vendor/v8/test/mjsunit/generated-transition-stub.js +218 -0
  488. data/vendor/v8/test/mjsunit/greedy.js +1 -1
  489. data/vendor/v8/test/mjsunit/harmony/block-conflicts.js +2 -1
  490. data/vendor/v8/test/mjsunit/harmony/block-let-crankshaft.js +1 -1
  491. data/vendor/v8/test/mjsunit/harmony/collections.js +69 -11
  492. data/vendor/v8/test/mjsunit/harmony/debug-blockscopes.js +2 -2
  493. data/vendor/v8/test/mjsunit/harmony/module-linking.js +180 -3
  494. data/vendor/v8/test/mjsunit/harmony/module-parsing.js +31 -0
  495. data/vendor/v8/test/mjsunit/harmony/module-recompile.js +87 -0
  496. data/vendor/v8/test/mjsunit/harmony/module-resolution.js +15 -2
  497. data/vendor/v8/test/mjsunit/harmony/object-observe.js +1056 -0
  498. data/vendor/v8/test/mjsunit/harmony/proxies-json.js +178 -0
  499. data/vendor/v8/test/mjsunit/harmony/proxies.js +25 -10
  500. data/vendor/v8/test/mjsunit/json-parser-recursive.js +33 -0
  501. data/vendor/v8/test/mjsunit/json-stringify-recursive.js +52 -0
  502. data/vendor/v8/test/mjsunit/json.js +38 -2
  503. data/vendor/v8/test/mjsunit/json2.js +153 -0
  504. data/vendor/v8/test/mjsunit/limit-locals.js +5 -4
  505. data/vendor/v8/test/mjsunit/manual-parallel-recompile.js +79 -0
  506. data/vendor/v8/test/mjsunit/math-exp-precision.js +64 -0
  507. data/vendor/v8/test/mjsunit/math-floor-negative.js +59 -0
  508. data/vendor/v8/test/mjsunit/math-floor-of-div-minus-zero.js +41 -0
  509. data/vendor/v8/test/mjsunit/math-floor-of-div-nosudiv.js +288 -0
  510. data/vendor/v8/test/mjsunit/math-floor-of-div.js +81 -9
  511. data/vendor/v8/test/mjsunit/{math-floor.js → math-floor-part1.js} +1 -72
  512. data/vendor/v8/test/mjsunit/math-floor-part2.js +76 -0
  513. data/vendor/v8/test/mjsunit/math-floor-part3.js +78 -0
  514. data/vendor/v8/test/mjsunit/math-floor-part4.js +76 -0
  515. data/vendor/v8/test/mjsunit/mirror-object.js +43 -9
  516. data/vendor/v8/test/mjsunit/mjsunit.js +1 -1
  517. data/vendor/v8/test/mjsunit/mjsunit.status +52 -27
  518. data/vendor/v8/test/mjsunit/mul-exhaustive-part1.js +491 -0
  519. data/vendor/v8/test/mjsunit/mul-exhaustive-part10.js +470 -0
  520. data/vendor/v8/test/mjsunit/mul-exhaustive-part2.js +525 -0
  521. data/vendor/v8/test/mjsunit/mul-exhaustive-part3.js +532 -0
  522. data/vendor/v8/test/mjsunit/mul-exhaustive-part4.js +509 -0
  523. data/vendor/v8/test/mjsunit/mul-exhaustive-part5.js +505 -0
  524. data/vendor/v8/test/mjsunit/mul-exhaustive-part6.js +554 -0
  525. data/vendor/v8/test/mjsunit/mul-exhaustive-part7.js +497 -0
  526. data/vendor/v8/test/mjsunit/mul-exhaustive-part8.js +526 -0
  527. data/vendor/v8/test/mjsunit/mul-exhaustive-part9.js +533 -0
  528. data/vendor/v8/test/mjsunit/new-function.js +34 -0
  529. data/vendor/v8/test/mjsunit/numops-fuzz-part1.js +1172 -0
  530. data/vendor/v8/test/mjsunit/numops-fuzz-part2.js +1178 -0
  531. data/vendor/v8/test/mjsunit/numops-fuzz-part3.js +1178 -0
  532. data/vendor/v8/test/mjsunit/numops-fuzz-part4.js +1177 -0
  533. data/vendor/v8/test/mjsunit/object-define-property.js +107 -2
  534. data/vendor/v8/test/mjsunit/override-read-only-property.js +6 -4
  535. data/vendor/v8/test/mjsunit/packed-elements.js +2 -2
  536. data/vendor/v8/test/mjsunit/parse-int-float.js +4 -4
  537. data/vendor/v8/test/mjsunit/pixel-array-rounding.js +1 -1
  538. data/vendor/v8/test/mjsunit/readonly.js +228 -0
  539. data/vendor/v8/test/mjsunit/regexp-capture-3.js +16 -18
  540. data/vendor/v8/test/mjsunit/regexp-capture.js +2 -0
  541. data/vendor/v8/test/mjsunit/regexp-global.js +122 -0
  542. data/vendor/v8/test/mjsunit/regexp-results-cache.js +78 -0
  543. data/vendor/v8/test/mjsunit/regress/regress-1117.js +12 -3
  544. data/vendor/v8/test/mjsunit/regress/regress-1118.js +1 -1
  545. data/vendor/v8/test/mjsunit/regress/regress-115100.js +36 -0
  546. data/vendor/v8/test/mjsunit/regress/regress-1199637.js +1 -3
  547. data/vendor/v8/test/mjsunit/regress/regress-121407.js +1 -1
  548. data/vendor/v8/test/mjsunit/regress/regress-131923.js +30 -0
  549. data/vendor/v8/test/mjsunit/regress/regress-131994.js +70 -0
  550. data/vendor/v8/test/mjsunit/regress/regress-133211.js +35 -0
  551. data/vendor/v8/test/mjsunit/regress/regress-133211b.js +39 -0
  552. data/vendor/v8/test/mjsunit/regress/regress-136048.js +34 -0
  553. data/vendor/v8/test/mjsunit/regress/regress-137768.js +73 -0
  554. data/vendor/v8/test/mjsunit/regress/regress-143967.js +34 -0
  555. data/vendor/v8/test/mjsunit/regress/regress-145201.js +107 -0
  556. data/vendor/v8/test/mjsunit/regress/regress-147497.js +45 -0
  557. data/vendor/v8/test/mjsunit/regress/regress-148378.js +38 -0
  558. data/vendor/v8/test/mjsunit/regress/regress-1563.js +1 -1
  559. data/vendor/v8/test/mjsunit/regress/regress-1591.js +48 -0
  560. data/vendor/v8/test/mjsunit/regress/regress-164442.js +45 -0
  561. data/vendor/v8/test/mjsunit/regress/regress-165637.js +61 -0
  562. data/vendor/v8/test/mjsunit/regress/regress-166379.js +39 -0
  563. data/vendor/v8/test/mjsunit/regress/regress-166553.js +33 -0
  564. data/vendor/v8/test/mjsunit/regress/regress-1692.js +1 -1
  565. data/vendor/v8/test/mjsunit/regress/regress-171641.js +40 -0
  566. data/vendor/v8/test/mjsunit/regress/regress-1980.js +1 -1
  567. data/vendor/v8/test/mjsunit/regress/regress-2073.js +99 -0
  568. data/vendor/v8/test/mjsunit/regress/regress-2119.js +36 -0
  569. data/vendor/v8/test/mjsunit/regress/regress-2156.js +39 -0
  570. data/vendor/v8/test/mjsunit/regress/regress-2163.js +70 -0
  571. data/vendor/v8/test/mjsunit/regress/regress-2170.js +58 -0
  572. data/vendor/v8/test/mjsunit/regress/regress-2172.js +35 -0
  573. data/vendor/v8/test/mjsunit/regress/regress-2185-2.js +145 -0
  574. data/vendor/v8/test/mjsunit/regress/regress-2185.js +38 -0
  575. data/vendor/v8/test/mjsunit/regress/regress-2186.js +49 -0
  576. data/vendor/v8/test/mjsunit/regress/regress-2193.js +58 -0
  577. data/vendor/v8/test/mjsunit/regress/regress-2219.js +32 -0
  578. data/vendor/v8/test/mjsunit/regress/regress-2225.js +65 -0
  579. data/vendor/v8/test/mjsunit/regress/regress-2226.js +36 -0
  580. data/vendor/v8/test/mjsunit/regress/regress-2234.js +41 -0
  581. data/vendor/v8/test/mjsunit/regress/regress-2243.js +31 -0
  582. data/vendor/v8/test/mjsunit/regress/regress-2249.js +33 -0
  583. data/vendor/v8/test/mjsunit/regress/regress-2250.js +68 -0
  584. data/vendor/v8/test/mjsunit/regress/regress-2261.js +113 -0
  585. data/vendor/v8/test/mjsunit/regress/regress-2263.js +30 -0
  586. data/vendor/v8/test/mjsunit/regress/regress-2284.js +32 -0
  587. data/vendor/v8/test/mjsunit/regress/regress-2285.js +32 -0
  588. data/vendor/v8/test/mjsunit/regress/regress-2286.js +32 -0
  589. data/vendor/v8/test/mjsunit/regress/regress-2289.js +34 -0
  590. data/vendor/v8/test/mjsunit/regress/regress-2291.js +36 -0
  591. data/vendor/v8/test/mjsunit/regress/regress-2294.js +70 -0
  592. data/vendor/v8/test/mjsunit/regress/regress-2296.js +40 -0
  593. data/vendor/v8/test/mjsunit/regress/regress-2315.js +40 -0
  594. data/vendor/v8/test/mjsunit/regress/regress-2318.js +66 -0
  595. data/vendor/v8/test/mjsunit/regress/regress-2322.js +36 -0
  596. data/vendor/v8/test/mjsunit/regress/regress-2326.js +54 -0
  597. data/vendor/v8/test/mjsunit/regress/regress-2336.js +53 -0
  598. data/vendor/v8/test/mjsunit/regress/regress-2339.js +59 -0
  599. data/vendor/v8/test/mjsunit/regress/regress-2346.js +123 -0
  600. data/vendor/v8/test/mjsunit/regress/regress-2373.js +29 -0
  601. data/vendor/v8/test/mjsunit/regress/regress-2374.js +33 -0
  602. data/vendor/v8/test/mjsunit/regress/regress-2398.js +41 -0
  603. data/vendor/v8/test/mjsunit/regress/regress-2410.js +36 -0
  604. data/vendor/v8/test/mjsunit/regress/regress-2416.js +75 -0
  605. data/vendor/v8/test/mjsunit/regress/regress-2419.js +37 -0
  606. data/vendor/v8/test/mjsunit/regress/regress-2433.js +36 -0
  607. data/vendor/v8/test/mjsunit/regress/regress-2437.js +156 -0
  608. data/vendor/v8/test/mjsunit/regress/regress-2438.js +52 -0
  609. data/vendor/v8/test/mjsunit/regress/regress-2443.js +129 -0
  610. data/vendor/v8/test/mjsunit/regress/regress-2444.js +120 -0
  611. data/vendor/v8/test/mjsunit/regress/regress-2489.js +50 -0
  612. data/vendor/v8/test/mjsunit/regress/regress-2499.js +40 -0
  613. data/vendor/v8/test/mjsunit/regress/regress-334.js +1 -1
  614. data/vendor/v8/test/mjsunit/regress/regress-492.js +39 -1
  615. data/vendor/v8/test/mjsunit/regress/regress-builtin-array-op.js +38 -0
  616. data/vendor/v8/test/mjsunit/regress/regress-cnlt-elements.js +43 -0
  617. data/vendor/v8/test/mjsunit/regress/regress-cnlt-enum-indices.js +45 -0
  618. data/vendor/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js +46 -0
  619. data/vendor/v8/test/mjsunit/regress/regress-convert-enum.js +60 -0
  620. data/vendor/v8/test/mjsunit/regress/regress-convert-enum2.js +46 -0
  621. data/vendor/v8/test/mjsunit/regress/regress-convert-transition.js +40 -0
  622. data/vendor/v8/test/mjsunit/regress/regress-crbug-119926.js +3 -1
  623. data/vendor/v8/test/mjsunit/regress/regress-crbug-125148.js +90 -0
  624. data/vendor/v8/test/mjsunit/regress/regress-crbug-134055.js +63 -0
  625. data/vendor/v8/test/mjsunit/regress/regress-crbug-134609.js +59 -0
  626. data/vendor/v8/test/mjsunit/regress/regress-crbug-135008.js +45 -0
  627. data/vendor/v8/test/mjsunit/regress/regress-crbug-135066.js +55 -0
  628. data/vendor/v8/test/mjsunit/regress/regress-crbug-137689.js +47 -0
  629. data/vendor/v8/test/mjsunit/regress/regress-crbug-138887.js +48 -0
  630. data/vendor/v8/test/mjsunit/regress/regress-crbug-140083.js +44 -0
  631. data/vendor/v8/test/mjsunit/regress/regress-crbug-142087.js +38 -0
  632. data/vendor/v8/test/mjsunit/regress/regress-crbug-142218.js +44 -0
  633. data/vendor/v8/test/mjsunit/regress/regress-crbug-145961.js +39 -0
  634. data/vendor/v8/test/mjsunit/regress/regress-crbug-146910.js +33 -0
  635. data/vendor/v8/test/mjsunit/regress/regress-crbug-147475.js +48 -0
  636. data/vendor/v8/test/mjsunit/regress/regress-crbug-148376.js +35 -0
  637. data/vendor/v8/test/mjsunit/regress/regress-crbug-150545.js +53 -0
  638. data/vendor/v8/test/mjsunit/regress/regress-crbug-150729.js +39 -0
  639. data/vendor/v8/test/mjsunit/regress/regress-crbug-157019.js +54 -0
  640. data/vendor/v8/test/mjsunit/regress/regress-crbug-157520.js +38 -0
  641. data/vendor/v8/test/mjsunit/regress/regress-crbug-158185.js +39 -0
  642. data/vendor/v8/test/mjsunit/regress/regress-crbug-160010.js +35 -0
  643. data/vendor/v8/test/mjsunit/regress/regress-crbug-162085.js +71 -0
  644. data/vendor/v8/test/mjsunit/regress/regress-crbug-168545.js +34 -0
  645. data/vendor/v8/test/mjsunit/regress/regress-crbug-170856.js +33 -0
  646. data/vendor/v8/test/mjsunit/regress/regress-crbug-172345.js +34 -0
  647. data/vendor/v8/test/mjsunit/regress/regress-crbug-173974.js +36 -0
  648. data/vendor/v8/test/mjsunit/regress/regress-crbug-18639.js +9 -5
  649. data/vendor/v8/test/mjsunit/regress/regress-debug-code-recompilation.js +2 -1
  650. data/vendor/v8/test/mjsunit/regress/regress-deep-proto.js +45 -0
  651. data/vendor/v8/test/mjsunit/regress/regress-delete-empty-double.js +40 -0
  652. data/vendor/v8/test/mjsunit/regress/regress-iteration-order.js +42 -0
  653. data/vendor/v8/test/mjsunit/regress/regress-json-stringify-gc.js +41 -0
  654. data/vendor/v8/test/mjsunit/regress/regress-latin-1.js +78 -0
  655. data/vendor/v8/test/mjsunit/regress/regress-load-elements.js +49 -0
  656. data/vendor/v8/test/mjsunit/regress/regress-observe-empty-double-array.js +38 -0
  657. data/vendor/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js +37 -0
  658. data/vendor/v8/test/mjsunit/shift-for-integer-div.js +59 -0
  659. data/vendor/v8/test/mjsunit/stack-traces-gc.js +119 -0
  660. data/vendor/v8/test/mjsunit/stack-traces-overflow.js +122 -0
  661. data/vendor/v8/test/mjsunit/stack-traces.js +39 -1
  662. data/vendor/v8/test/mjsunit/str-to-num.js +7 -2
  663. data/vendor/v8/test/mjsunit/strict-mode.js +36 -11
  664. data/vendor/v8/test/mjsunit/string-charcodeat.js +3 -0
  665. data/vendor/v8/test/mjsunit/string-natives.js +72 -0
  666. data/vendor/v8/test/mjsunit/string-split.js +17 -0
  667. data/vendor/v8/test/mjsunit/testcfg.py +76 -6
  668. data/vendor/v8/test/mjsunit/tools/tickprocessor.js +4 -1
  669. data/vendor/v8/test/mjsunit/try-finally-continue.js +72 -0
  670. data/vendor/v8/test/mjsunit/typed-array-slice.js +61 -0
  671. data/vendor/v8/test/mjsunit/unbox-double-arrays.js +2 -0
  672. data/vendor/v8/test/mjsunit/uri.js +12 -0
  673. data/vendor/v8/test/mjsunit/with-readonly.js +4 -2
  674. data/vendor/v8/test/mozilla/mozilla.status +19 -113
  675. data/vendor/v8/test/mozilla/testcfg.py +122 -3
  676. data/vendor/v8/test/preparser/preparser.status +5 -0
  677. data/vendor/v8/test/preparser/strict-identifiers.pyt +1 -1
  678. data/vendor/v8/test/preparser/testcfg.py +101 -5
  679. data/vendor/v8/test/sputnik/sputnik.status +1 -1
  680. data/vendor/v8/test/sputnik/testcfg.py +5 -0
  681. data/vendor/v8/test/test262/README +2 -2
  682. data/vendor/v8/test/test262/test262.status +13 -36
  683. data/vendor/v8/test/test262/testcfg.py +102 -8
  684. data/vendor/v8/tools/android-build.sh +0 -0
  685. data/vendor/v8/tools/android-ll-prof.sh +69 -0
  686. data/vendor/v8/tools/android-run.py +109 -0
  687. data/vendor/v8/tools/android-sync.sh +105 -0
  688. data/vendor/v8/tools/bash-completion.sh +0 -0
  689. data/vendor/v8/tools/check-static-initializers.sh +0 -0
  690. data/vendor/v8/tools/common-includes.sh +15 -22
  691. data/vendor/v8/tools/disasm.py +4 -4
  692. data/vendor/v8/tools/fuzz-harness.sh +0 -0
  693. data/vendor/v8/tools/gen-postmortem-metadata.py +6 -8
  694. data/vendor/v8/tools/grokdump.py +404 -129
  695. data/vendor/v8/tools/gyp/v8.gyp +105 -43
  696. data/vendor/v8/tools/linux-tick-processor +5 -5
  697. data/vendor/v8/tools/ll_prof.py +75 -15
  698. data/vendor/v8/tools/merge-to-branch.sh +2 -2
  699. data/vendor/v8/tools/plot-timer-events +70 -0
  700. data/vendor/v8/tools/plot-timer-events.js +510 -0
  701. data/vendor/v8/tools/presubmit.py +1 -0
  702. data/vendor/v8/tools/push-to-trunk.sh +14 -4
  703. data/vendor/v8/tools/run-llprof.sh +69 -0
  704. data/vendor/v8/tools/run-tests.py +372 -0
  705. data/vendor/v8/tools/run-valgrind.py +1 -1
  706. data/vendor/v8/tools/status-file-converter.py +39 -0
  707. data/vendor/v8/tools/test-server.py +224 -0
  708. data/vendor/v8/tools/test-wrapper-gypbuild.py +13 -16
  709. data/vendor/v8/tools/test.py +10 -19
  710. data/vendor/v8/tools/testrunner/README +174 -0
  711. data/vendor/v8/tools/testrunner/__init__.py +26 -0
  712. data/vendor/v8/tools/testrunner/local/__init__.py +26 -0
  713. data/vendor/v8/tools/testrunner/local/commands.py +153 -0
  714. data/vendor/v8/tools/testrunner/local/execution.py +182 -0
  715. data/vendor/v8/tools/testrunner/local/old_statusfile.py +460 -0
  716. data/vendor/v8/tools/testrunner/local/progress.py +238 -0
  717. data/vendor/v8/tools/testrunner/local/statusfile.py +145 -0
  718. data/vendor/v8/tools/testrunner/local/testsuite.py +187 -0
  719. data/vendor/v8/tools/testrunner/local/utils.py +108 -0
  720. data/vendor/v8/tools/testrunner/local/verbose.py +99 -0
  721. data/vendor/v8/tools/testrunner/network/__init__.py +26 -0
  722. data/vendor/v8/tools/testrunner/network/distro.py +90 -0
  723. data/vendor/v8/tools/testrunner/network/endpoint.py +124 -0
  724. data/vendor/v8/tools/testrunner/network/network_execution.py +253 -0
  725. data/vendor/v8/tools/testrunner/network/perfdata.py +120 -0
  726. data/vendor/v8/tools/testrunner/objects/__init__.py +26 -0
  727. data/vendor/v8/tools/testrunner/objects/context.py +50 -0
  728. data/vendor/v8/tools/testrunner/objects/output.py +60 -0
  729. data/vendor/v8/tools/testrunner/objects/peer.py +80 -0
  730. data/vendor/v8/tools/testrunner/objects/testcase.py +83 -0
  731. data/vendor/v8/tools/testrunner/objects/workpacket.py +90 -0
  732. data/vendor/v8/tools/testrunner/server/__init__.py +26 -0
  733. data/vendor/v8/tools/testrunner/server/compression.py +111 -0
  734. data/vendor/v8/tools/testrunner/server/constants.py +51 -0
  735. data/vendor/v8/tools/testrunner/server/daemon.py +147 -0
  736. data/vendor/v8/tools/testrunner/server/local_handler.py +119 -0
  737. data/vendor/v8/tools/testrunner/server/main.py +245 -0
  738. data/vendor/v8/tools/testrunner/server/presence_handler.py +120 -0
  739. data/vendor/v8/tools/testrunner/server/signatures.py +63 -0
  740. data/vendor/v8/tools/testrunner/server/status_handler.py +112 -0
  741. data/vendor/v8/tools/testrunner/server/work_handler.py +150 -0
  742. data/vendor/v8/tools/tick-processor.html +168 -0
  743. data/vendor/v8/tools/tickprocessor-driver.js +5 -3
  744. data/vendor/v8/tools/tickprocessor.js +58 -15
  745. metadata +534 -30
  746. data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +0 -11
  747. data/patches/do-not-imply-vfp3-and-armv7.patch +0 -44
  748. data/patches/fPIC-on-x64.patch +0 -14
  749. data/vendor/v8/src/liveobjectlist-inl.h +0 -126
  750. data/vendor/v8/src/liveobjectlist.cc +0 -2631
  751. data/vendor/v8/src/liveobjectlist.h +0 -319
  752. data/vendor/v8/test/mjsunit/mul-exhaustive.js +0 -4629
  753. data/vendor/v8/test/mjsunit/numops-fuzz.js +0 -4609
  754. data/vendor/v8/test/mjsunit/regress/regress-1969.js +0 -5045
@@ -34,12 +34,37 @@
34
34
  #include "isolate.h"
35
35
  #include "jsregexp.h"
36
36
  #include "regexp-macro-assembler.h"
37
+ #include "runtime.h"
37
38
  #include "stub-cache.h"
38
39
  #include "codegen.h"
39
40
 
40
41
  namespace v8 {
41
42
  namespace internal {
42
43
 
44
+
45
+ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
46
+ Isolate* isolate,
47
+ CodeStubInterfaceDescriptor* descriptor) {
48
+ static Register registers[] = { edx, ecx };
49
+ descriptor->register_param_count_ = 2;
50
+ descriptor->register_params_ = registers;
51
+ descriptor->stack_parameter_count_ = NULL;
52
+ descriptor->deoptimization_handler_ =
53
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
54
+ }
55
+
56
+
57
+ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
58
+ Isolate* isolate,
59
+ CodeStubInterfaceDescriptor* descriptor) {
60
+ static Register registers[] = { eax, ebx };
61
+ descriptor->register_param_count_ = 2;
62
+ descriptor->register_params_ = registers;
63
+ descriptor->deoptimization_handler_ =
64
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
65
+ }
66
+
67
+
43
68
  #define __ ACCESS_MASM(masm)
44
69
 
45
70
  void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -66,9 +91,13 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
66
91
  void FastNewClosureStub::Generate(MacroAssembler* masm) {
67
92
  // Create a new closure from the given function info in new
68
93
  // space. Set the context to the current context in esi.
94
+ Counters* counters = masm->isolate()->counters();
95
+
69
96
  Label gc;
70
97
  __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
71
98
 
99
+ __ IncrementCounter(counters->fast_new_closure_total(), 1);
100
+
72
101
  // Get the function info from the stack.
73
102
  __ mov(edx, Operand(esp, 1 * kPointerSize));
74
103
 
@@ -76,12 +105,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
76
105
  ? Context::FUNCTION_MAP_INDEX
77
106
  : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
78
107
 
79
- // Compute the function map in the current global context and set that
108
+ // Compute the function map in the current native context and set that
80
109
  // as the map of the allocated object.
81
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
82
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
83
- __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
84
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
110
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
111
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
112
+ __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
113
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
85
114
 
86
115
  // Initialize the rest of the function. We don't have to update the
87
116
  // write barrier because the allocated object is in new space.
@@ -94,11 +123,20 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
94
123
  __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
95
124
  __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
96
125
  __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
97
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
98
- Immediate(factory->undefined_value()));
99
126
 
100
127
  // Initialize the code pointer in the function to be the one
101
128
  // found in the shared function info object.
129
+ // But first check if there is an optimized version for our context.
130
+ Label check_optimized;
131
+ Label install_unoptimized;
132
+ if (FLAG_cache_optimized_code) {
133
+ __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
134
+ __ test(ebx, ebx);
135
+ __ j(not_zero, &check_optimized, Label::kNear);
136
+ }
137
+ __ bind(&install_unoptimized);
138
+ __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
139
+ Immediate(factory->undefined_value()));
102
140
  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
103
141
  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
104
142
  __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
@@ -106,6 +144,68 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
106
144
  // Return and remove the on-stack parameter.
107
145
  __ ret(1 * kPointerSize);
108
146
 
147
+ __ bind(&check_optimized);
148
+
149
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
150
+
151
+ // ecx holds native context, ebx points to fixed array of 3-element entries
152
+ // (native context, optimized code, literals).
153
+ // Map must never be empty, so check the first elements.
154
+ Label install_optimized;
155
+ // Speculatively move code object into edx.
156
+ __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
157
+ __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
158
+ __ j(equal, &install_optimized);
159
+
160
+ // Iterate through the rest of map backwards. edx holds an index as a Smi.
161
+ Label loop;
162
+ Label restore;
163
+ __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
164
+ __ bind(&loop);
165
+ // Do not double check first entry.
166
+ __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
167
+ __ j(equal, &restore);
168
+ __ sub(edx, Immediate(Smi::FromInt(
169
+ SharedFunctionInfo::kEntryLength))); // Skip an entry.
170
+ __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
171
+ __ j(not_equal, &loop, Label::kNear);
172
+ // Hit: fetch the optimized code.
173
+ __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
174
+
175
+ __ bind(&install_optimized);
176
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
177
+
178
+ // TODO(fschneider): Idea: store proper code pointers in the optimized code
179
+ // map and either unmangle them on marking or do nothing as the whole map is
180
+ // discarded on major GC anyway.
181
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
182
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
183
+
184
+ // Now link a function into a list of optimized functions.
185
+ __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
186
+
187
+ __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
188
+ // No need for write barrier as JSFunction (eax) is in the new space.
189
+
190
+ __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
191
+ // Store JSFunction (eax) into edx before issuing write barrier as
192
+ // it clobbers all the registers passed.
193
+ __ mov(edx, eax);
194
+ __ RecordWriteContextSlot(
195
+ ecx,
196
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
197
+ edx,
198
+ ebx,
199
+ kDontSaveFPRegs);
200
+
201
+ // Return and remove the on-stack parameter.
202
+ __ ret(1 * kPointerSize);
203
+
204
+ __ bind(&restore);
205
+ // Restore SharedFunctionInfo into edx.
206
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
207
+ __ jmp(&install_unoptimized);
208
+
109
209
  // Create a new closure through the slower runtime call.
110
210
  __ bind(&gc);
111
211
  __ pop(ecx); // Temporarily remove return address.
@@ -142,8 +242,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
142
242
  __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
143
243
 
144
244
  // Copy the global object from the previous context.
145
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
146
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
245
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
246
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
147
247
 
148
248
  // Initialize the rest of the slots to undefined.
149
249
  __ mov(ebx, factory->undefined_value());
@@ -186,9 +286,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
186
286
  __ mov(FieldOperand(eax, Context::kLengthOffset),
187
287
  Immediate(Smi::FromInt(length)));
188
288
 
189
- // If this block context is nested in the global context we get a smi
289
+ // If this block context is nested in the native context we get a smi
190
290
  // sentinel instead of a function. The block context should get the
191
- // canonical empty function of the global context as its closure which
291
+ // canonical empty function of the native context as its closure which
192
292
  // we still have to look up.
193
293
  Label after_sentinel;
194
294
  __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
@@ -198,7 +298,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
198
298
  __ Assert(equal, message);
199
299
  }
200
300
  __ mov(ecx, GlobalObjectOperand());
201
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
301
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
202
302
  __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
203
303
  __ bind(&after_sentinel);
204
304
 
@@ -208,8 +308,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
208
308
  __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
209
309
 
210
310
  // Copy the global object from the previous context.
211
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
212
- __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
311
+ __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
312
+ __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
213
313
 
214
314
  // Initialize the rest of the slots to the hole value.
215
315
  if (slots_ == 1) {
@@ -236,6 +336,7 @@ static void GenerateFastCloneShallowArrayCommon(
236
336
  MacroAssembler* masm,
237
337
  int length,
238
338
  FastCloneShallowArrayStub::Mode mode,
339
+ AllocationSiteMode allocation_site_mode,
239
340
  Label* fail) {
240
341
  // Registers on entry:
241
342
  //
@@ -249,11 +350,27 @@ static void GenerateFastCloneShallowArrayCommon(
249
350
  ? FixedDoubleArray::SizeFor(length)
250
351
  : FixedArray::SizeFor(length);
251
352
  }
252
- int size = JSArray::kSize + elements_size;
353
+ int size = JSArray::kSize;
354
+ int allocation_info_start = size;
355
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
356
+ size += AllocationSiteInfo::kSize;
357
+ }
358
+ size += elements_size;
253
359
 
254
360
  // Allocate both the JS array and the elements array in one big
255
361
  // allocation. This avoids multiple limit checks.
256
- __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
362
+ AllocationFlags flags = TAG_OBJECT;
363
+ if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
364
+ flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
365
+ }
366
+ __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
367
+
368
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
369
+ __ mov(FieldOperand(eax, allocation_info_start),
370
+ Immediate(Handle<Map>(masm->isolate()->heap()->
371
+ allocation_site_info_map())));
372
+ __ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx);
373
+ }
257
374
 
258
375
  // Copy the JS array part.
259
376
  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -267,7 +384,11 @@ static void GenerateFastCloneShallowArrayCommon(
267
384
  // Get hold of the elements array of the boilerplate and setup the
268
385
  // elements pointer in the resulting object.
269
386
  __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
270
- __ lea(edx, Operand(eax, JSArray::kSize));
387
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
388
+ __ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize));
389
+ } else {
390
+ __ lea(edx, Operand(eax, JSArray::kSize));
391
+ }
271
392
  __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
272
393
 
273
394
  // Copy the elements array.
@@ -322,15 +443,17 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
322
443
  __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
323
444
  __ CheckMap(ebx, factory->fixed_cow_array_map(),
324
445
  &check_fast_elements, DONT_DO_SMI_CHECK);
325
- GenerateFastCloneShallowArrayCommon(masm, 0,
326
- COPY_ON_WRITE_ELEMENTS, &slow_case);
446
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
447
+ allocation_site_mode_,
448
+ &slow_case);
327
449
  __ ret(3 * kPointerSize);
328
450
 
329
451
  __ bind(&check_fast_elements);
330
452
  __ CheckMap(ebx, factory->fixed_array_map(),
331
453
  &double_elements, DONT_DO_SMI_CHECK);
332
- GenerateFastCloneShallowArrayCommon(masm, length_,
333
- CLONE_ELEMENTS, &slow_case);
454
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
455
+ allocation_site_mode_,
456
+ &slow_case);
334
457
  __ ret(3 * kPointerSize);
335
458
 
336
459
  __ bind(&double_elements);
@@ -359,7 +482,10 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
359
482
  __ pop(ecx);
360
483
  }
361
484
 
362
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
485
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
486
+ allocation_site_mode_,
487
+ &slow_case);
488
+
363
489
  // Return and remove the on-stack parameters.
364
490
  __ ret(3 * kPointerSize);
365
491
 
@@ -660,6 +786,12 @@ class FloatingPointHelper : public AllStatic {
660
786
  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
661
787
  Label* non_int32,
662
788
  Register scratch);
789
+
790
+ static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
791
+ Label* non_int32,
792
+ XMMRegister operand,
793
+ Register scratch,
794
+ XMMRegister xmm_scratch);
663
795
  };
664
796
 
665
797
 
@@ -680,11 +812,20 @@ static void IntegerConvert(MacroAssembler* masm,
680
812
  // Get exponent alone in scratch2.
681
813
  __ mov(scratch2, scratch);
682
814
  __ and_(scratch2, HeapNumber::kExponentMask);
815
+ __ shr(scratch2, HeapNumber::kExponentShift);
816
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
817
+ // Load ecx with zero. We use this either for the final shift or
818
+ // for the answer.
819
+ __ xor_(ecx, ecx);
820
+ // If the exponent is above 83, the number contains no significant
821
+ // bits in the range 0..2^31, so the result is zero.
822
+ static const uint32_t kResultIsZeroExponent = 83;
823
+ __ cmp(scratch2, Immediate(kResultIsZeroExponent));
824
+ __ j(above, &done);
683
825
  if (use_sse3) {
684
826
  CpuFeatures::Scope scope(SSE3);
685
827
  // Check whether the exponent is too big for a 64 bit signed integer.
686
- static const uint32_t kTooBigExponent =
687
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
828
+ static const uint32_t kTooBigExponent = 63;
688
829
  __ cmp(scratch2, Immediate(kTooBigExponent));
689
830
  __ j(greater_equal, conversion_failure);
690
831
  // Load x87 register with heap number.
@@ -696,15 +837,11 @@ static void IntegerConvert(MacroAssembler* masm,
696
837
  __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
697
838
  __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
698
839
  } else {
699
- // Load ecx with zero. We use this either for the final shift or
700
- // for the answer.
701
- __ xor_(ecx, ecx);
702
840
  // Check whether the exponent matches a 32 bit signed int that cannot be
703
841
  // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
704
842
  // exponent is 30 (biased). This is the exponent that we are fastest at and
705
843
  // also the highest exponent we can handle here.
706
- const uint32_t non_smi_exponent =
707
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
844
+ const uint32_t non_smi_exponent = 30;
708
845
  __ cmp(scratch2, Immediate(non_smi_exponent));
709
846
  // If we have a match of the int32-but-not-Smi exponent then skip some
710
847
  // logic.
@@ -716,8 +853,7 @@ static void IntegerConvert(MacroAssembler* masm,
716
853
  {
717
854
  // Handle a big exponent. The only reason we have this code is that the
718
855
  // >>> operator has a tendency to generate numbers with an exponent of 31.
719
- const uint32_t big_non_smi_exponent =
720
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
856
+ const uint32_t big_non_smi_exponent = 31;
721
857
  __ cmp(scratch2, Immediate(big_non_smi_exponent));
722
858
  __ j(not_equal, conversion_failure);
723
859
  // We have the big exponent, typically from >>>. This means the number is
@@ -746,19 +882,8 @@ static void IntegerConvert(MacroAssembler* masm,
746
882
  }
747
883
 
748
884
  __ bind(&normal_exponent);
749
- // Exponent word in scratch, exponent part of exponent word in scratch2.
750
- // Zero in ecx.
751
- // We know the exponent is smaller than 30 (biased). If it is less than
752
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
753
- // it rounds to zero.
754
- const uint32_t zero_exponent =
755
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
756
- __ sub(scratch2, Immediate(zero_exponent));
757
- // ecx already has a Smi zero.
758
- __ j(less, &done, Label::kNear);
759
-
760
- // We have a shifted exponent between 0 and 30 in scratch2.
761
- __ shr(scratch2, HeapNumber::kExponentShift);
885
+ // Exponent word in scratch, exponent in scratch2. Zero in ecx.
886
+ // We know that 0 <= exponent < 30.
762
887
  __ mov(ecx, Immediate(30));
763
888
  __ sub(ecx, scratch2);
764
889
 
@@ -793,8 +918,8 @@ static void IntegerConvert(MacroAssembler* masm,
793
918
  __ jmp(&done, Label::kNear);
794
919
  __ bind(&negative);
795
920
  __ sub(ecx, scratch2);
796
- __ bind(&done);
797
921
  }
922
+ __ bind(&done);
798
923
  }
799
924
 
800
925
 
@@ -1117,16 +1242,17 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1117
1242
  }
1118
1243
 
1119
1244
 
1245
+ void BinaryOpStub::Initialize() {
1246
+ platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
1247
+ }
1248
+
1249
+
1120
1250
  void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1121
1251
  __ pop(ecx); // Save return address.
1122
1252
  __ push(edx);
1123
1253
  __ push(eax);
1124
1254
  // Left and right arguments are now on top.
1125
- // Push this stub's key. Although the operation and the type info are
1126
- // encoded into the key, the encoding is opaque, so push them too.
1127
1255
  __ push(Immediate(Smi::FromInt(MinorKey())));
1128
- __ push(Immediate(Smi::FromInt(op_)));
1129
- __ push(Immediate(Smi::FromInt(operands_type_)));
1130
1256
 
1131
1257
  __ push(ecx); // Push return address.
1132
1258
 
@@ -1135,7 +1261,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1135
1261
  __ TailCallExternalReference(
1136
1262
  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1137
1263
  masm->isolate()),
1138
- 5,
1264
+ 3,
1139
1265
  1);
1140
1266
  }
1141
1267
 
@@ -1145,11 +1271,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1145
1271
  void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
1146
1272
  __ pop(ecx); // Save return address.
1147
1273
  // Left and right arguments are already on top of the stack.
1148
- // Push this stub's key. Although the operation and the type info are
1149
- // encoded into the key, the encoding is opaque, so push them too.
1150
1274
  __ push(Immediate(Smi::FromInt(MinorKey())));
1151
- __ push(Immediate(Smi::FromInt(op_)));
1152
- __ push(Immediate(Smi::FromInt(operands_type_)));
1153
1275
 
1154
1276
  __ push(ecx); // Push return address.
1155
1277
 
@@ -1158,73 +1280,22 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
1158
1280
  __ TailCallExternalReference(
1159
1281
  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1160
1282
  masm->isolate()),
1161
- 5,
1283
+ 3,
1162
1284
  1);
1163
1285
  }
1164
1286
 
1165
1287
 
1166
- void BinaryOpStub::Generate(MacroAssembler* masm) {
1167
- // Explicitly allow generation of nested stubs. It is safe here because
1168
- // generation code does not use any raw pointers.
1169
- AllowStubCallsScope allow_stub_calls(masm, true);
1170
-
1171
- switch (operands_type_) {
1172
- case BinaryOpIC::UNINITIALIZED:
1173
- GenerateTypeTransition(masm);
1174
- break;
1175
- case BinaryOpIC::SMI:
1176
- GenerateSmiStub(masm);
1177
- break;
1178
- case BinaryOpIC::INT32:
1179
- GenerateInt32Stub(masm);
1180
- break;
1181
- case BinaryOpIC::HEAP_NUMBER:
1182
- GenerateHeapNumberStub(masm);
1183
- break;
1184
- case BinaryOpIC::ODDBALL:
1185
- GenerateOddballStub(masm);
1186
- break;
1187
- case BinaryOpIC::BOTH_STRING:
1188
- GenerateBothStringStub(masm);
1189
- break;
1190
- case BinaryOpIC::STRING:
1191
- GenerateStringStub(masm);
1192
- break;
1193
- case BinaryOpIC::GENERIC:
1194
- GenerateGeneric(masm);
1195
- break;
1196
- default:
1197
- UNREACHABLE();
1198
- }
1199
- }
1200
-
1201
-
1202
- void BinaryOpStub::PrintName(StringStream* stream) {
1203
- const char* op_name = Token::Name(op_);
1204
- const char* overwrite_name;
1205
- switch (mode_) {
1206
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1207
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1208
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1209
- default: overwrite_name = "UnknownOverwrite"; break;
1210
- }
1211
- stream->Add("BinaryOpStub_%s_%s_%s",
1212
- op_name,
1213
- overwrite_name,
1214
- BinaryOpIC::GetName(operands_type_));
1215
- }
1216
-
1217
-
1218
- void BinaryOpStub::GenerateSmiCode(
1288
+ static void BinaryOpStub_GenerateSmiCode(
1219
1289
  MacroAssembler* masm,
1220
1290
  Label* slow,
1221
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1291
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
1292
+ Token::Value op) {
1222
1293
  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1223
1294
  // dividend in eax and edx free for the division. Use eax, ebx for those.
1224
1295
  Comment load_comment(masm, "-- Load arguments");
1225
1296
  Register left = edx;
1226
1297
  Register right = eax;
1227
- if (op_ == Token::DIV || op_ == Token::MOD) {
1298
+ if (op == Token::DIV || op == Token::MOD) {
1228
1299
  left = eax;
1229
1300
  right = ebx;
1230
1301
  __ mov(ebx, eax);
@@ -1237,7 +1308,7 @@ void BinaryOpStub::GenerateSmiCode(
1237
1308
  Label not_smis;
1238
1309
  Register combined = ecx;
1239
1310
  ASSERT(!left.is(combined) && !right.is(combined));
1240
- switch (op_) {
1311
+ switch (op) {
1241
1312
  case Token::BIT_OR:
1242
1313
  // Perform the operation into eax and smi check the result. Preserve
1243
1314
  // eax in case the result is not a smi.
@@ -1281,7 +1352,7 @@ void BinaryOpStub::GenerateSmiCode(
1281
1352
  // eax and check the result if necessary.
1282
1353
  Comment perform_smi(masm, "-- Perform smi operation");
1283
1354
  Label use_fp_on_smis;
1284
- switch (op_) {
1355
+ switch (op) {
1285
1356
  case Token::BIT_OR:
1286
1357
  // Nothing to do.
1287
1358
  break;
@@ -1415,7 +1486,7 @@ void BinaryOpStub::GenerateSmiCode(
1415
1486
  }
1416
1487
 
1417
1488
  // 5. Emit return of result in eax. Some operations have registers pushed.
1418
- switch (op_) {
1489
+ switch (op) {
1419
1490
  case Token::ADD:
1420
1491
  case Token::SUB:
1421
1492
  case Token::MUL:
@@ -1438,9 +1509,9 @@ void BinaryOpStub::GenerateSmiCode(
1438
1509
  // 6. For some operations emit inline code to perform floating point
1439
1510
  // operations on known smis (e.g., if the result of the operation
1440
1511
  // overflowed the smi range).
1441
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1512
+ if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
1442
1513
  __ bind(&use_fp_on_smis);
1443
- switch (op_) {
1514
+ switch (op) {
1444
1515
  // Undo the effects of some operations, and some register moves.
1445
1516
  case Token::SHL:
1446
1517
  // The arguments are saved on the stack, and only used from there.
@@ -1468,8 +1539,8 @@ void BinaryOpStub::GenerateSmiCode(
1468
1539
  }
1469
1540
  __ jmp(&not_smis);
1470
1541
  } else {
1471
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1472
- switch (op_) {
1542
+ ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
1543
+ switch (op) {
1473
1544
  case Token::SHL:
1474
1545
  case Token::SHR: {
1475
1546
  Comment perform_float(masm, "-- Perform float operation on smis");
@@ -1480,13 +1551,13 @@ void BinaryOpStub::GenerateSmiCode(
1480
1551
  // Store the result in the HeapNumber and return.
1481
1552
  // It's OK to overwrite the arguments on the stack because we
1482
1553
  // are about to return.
1483
- if (op_ == Token::SHR) {
1554
+ if (op == Token::SHR) {
1484
1555
  __ mov(Operand(esp, 1 * kPointerSize), left);
1485
1556
  __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1486
1557
  __ fild_d(Operand(esp, 1 * kPointerSize));
1487
1558
  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1488
1559
  } else {
1489
- ASSERT_EQ(Token::SHL, op_);
1560
+ ASSERT_EQ(Token::SHL, op);
1490
1561
  if (CpuFeatures::IsSupported(SSE2)) {
1491
1562
  CpuFeatures::Scope use_sse2(SSE2);
1492
1563
  __ cvtsi2sd(xmm0, left);
@@ -1508,7 +1579,7 @@ void BinaryOpStub::GenerateSmiCode(
1508
1579
  Comment perform_float(masm, "-- Perform float operation on smis");
1509
1580
  __ bind(&use_fp_on_smis);
1510
1581
  // Restore arguments to edx, eax.
1511
- switch (op_) {
1582
+ switch (op) {
1512
1583
  case Token::ADD:
1513
1584
  // Revert right = right + left.
1514
1585
  __ sub(right, left);
@@ -1534,7 +1605,7 @@ void BinaryOpStub::GenerateSmiCode(
1534
1605
  if (CpuFeatures::IsSupported(SSE2)) {
1535
1606
  CpuFeatures::Scope use_sse2(SSE2);
1536
1607
  FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1537
- switch (op_) {
1608
+ switch (op) {
1538
1609
  case Token::ADD: __ addsd(xmm0, xmm1); break;
1539
1610
  case Token::SUB: __ subsd(xmm0, xmm1); break;
1540
1611
  case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1544,7 +1615,7 @@ void BinaryOpStub::GenerateSmiCode(
1544
1615
  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1545
1616
  } else { // SSE2 not available, use FPU.
1546
1617
  FloatingPointHelper::LoadFloatSmis(masm, ebx);
1547
- switch (op_) {
1618
+ switch (op) {
1548
1619
  case Token::ADD: __ faddp(1); break;
1549
1620
  case Token::SUB: __ fsubp(1); break;
1550
1621
  case Token::MUL: __ fmulp(1); break;
@@ -1567,7 +1638,7 @@ void BinaryOpStub::GenerateSmiCode(
1567
1638
  // edx and eax.
1568
1639
  Comment done_comment(masm, "-- Enter non-smi code");
1569
1640
  __ bind(&not_smis);
1570
- switch (op_) {
1641
+ switch (op) {
1571
1642
  case Token::BIT_OR:
1572
1643
  case Token::SHL:
1573
1644
  case Token::SAR:
@@ -1614,9 +1685,11 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1614
1685
 
1615
1686
  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1616
1687
  result_type_ == BinaryOpIC::SMI) {
1617
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1688
+ BinaryOpStub_GenerateSmiCode(
1689
+ masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
1618
1690
  } else {
1619
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1691
+ BinaryOpStub_GenerateSmiCode(
1692
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1620
1693
  }
1621
1694
  __ bind(&call_runtime);
1622
1695
  switch (op_) {
@@ -1641,19 +1714,9 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1641
1714
  }
1642
1715
 
1643
1716
 
1644
- void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1645
- ASSERT(operands_type_ == BinaryOpIC::STRING);
1646
- ASSERT(op_ == Token::ADD);
1647
- // Try to add arguments as strings, otherwise, transition to the generic
1648
- // BinaryOpIC type.
1649
- GenerateAddStrings(masm);
1650
- GenerateTypeTransition(masm);
1651
- }
1652
-
1653
-
1654
1717
  void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1655
1718
  Label call_runtime;
1656
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1719
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1657
1720
  ASSERT(op_ == Token::ADD);
1658
1721
  // If both arguments are strings, call the string add stub.
1659
1722
  // Otherwise, do a transition.
@@ -1681,6 +1744,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1681
1744
  }
1682
1745
 
1683
1746
 
1747
+ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1748
+ Label* alloc_failure,
1749
+ OverwriteMode mode);
1750
+
1751
+
1684
1752
  // Input:
1685
1753
  // edx: left operand (tagged)
1686
1754
  // eax: right operand (tagged)
@@ -1688,7 +1756,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1688
1756
  // eax: result (tagged)
1689
1757
  void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1690
1758
  Label call_runtime;
1691
- ASSERT(operands_type_ == BinaryOpIC::INT32);
1759
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1692
1760
 
1693
1761
  // Floating point case.
1694
1762
  switch (op_) {
@@ -1701,6 +1769,18 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1701
1769
  Label not_int32;
1702
1770
  if (CpuFeatures::IsSupported(SSE2)) {
1703
1771
  CpuFeatures::Scope use_sse2(SSE2);
1772
+ // It could be that only SMIs have been seen at either the left
1773
+ // or the right operand. For precise type feedback, patch the IC
1774
+ // again if this changes.
1775
+ // In theory, we would need the same check in the non-SSE2 case,
1776
+ // but since we don't support Crankshaft on such hardware we can
1777
+ // afford not to care about precise type feedback.
1778
+ if (left_type_ == BinaryOpIC::SMI) {
1779
+ __ JumpIfNotSmi(edx, &not_int32);
1780
+ }
1781
+ if (right_type_ == BinaryOpIC::SMI) {
1782
+ __ JumpIfNotSmi(eax, &not_int32);
1783
+ }
1704
1784
  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1705
1785
  FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1706
1786
  if (op_ == Token::MOD) {
@@ -1716,13 +1796,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1716
1796
  }
1717
1797
  // Check result type if it is currently Int32.
1718
1798
  if (result_type_ <= BinaryOpIC::INT32) {
1719
- __ cvttsd2si(ecx, Operand(xmm0));
1720
- __ cvtsi2sd(xmm2, ecx);
1721
- __ ucomisd(xmm0, xmm2);
1722
- __ j(not_zero, &not_int32);
1723
- __ j(carry, &not_int32);
1799
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
1800
+ masm, &not_int32, xmm0, ecx, xmm2);
1724
1801
  }
1725
- GenerateHeapResultAllocation(masm, &call_runtime);
1802
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1726
1803
  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1727
1804
  __ ret(0);
1728
1805
  }
@@ -1748,7 +1825,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1748
1825
  default: UNREACHABLE();
1749
1826
  }
1750
1827
  Label after_alloc_failure;
1751
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
1828
+ BinaryOpStub_GenerateHeapResultAllocation(
1829
+ masm, &after_alloc_failure, mode_);
1752
1830
  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1753
1831
  __ ret(0);
1754
1832
  __ bind(&after_alloc_failure);
@@ -1773,10 +1851,14 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1773
1851
  Label not_floats;
1774
1852
  Label not_int32;
1775
1853
  Label non_smi_result;
1854
+ // We do not check the input arguments here, as any value is
1855
+ // unconditionally truncated to an int32 anyway. To get the
1856
+ // right optimized code, int32 type feedback is just right.
1857
+ bool use_sse3 = platform_specific_bit_;
1776
1858
  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1777
- use_sse3_,
1859
+ use_sse3,
1778
1860
  &not_floats);
1779
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1861
+ FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3,
1780
1862
  &not_int32);
1781
1863
  switch (op_) {
1782
1864
  case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -1849,44 +1931,24 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1849
1931
 
1850
1932
  switch (op_) {
1851
1933
  case Token::ADD:
1852
- GenerateRegisterArgsPush(masm);
1853
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1854
- break;
1855
1934
  case Token::SUB:
1856
- GenerateRegisterArgsPush(masm);
1857
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1858
- break;
1859
1935
  case Token::MUL:
1860
- GenerateRegisterArgsPush(masm);
1861
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1862
- break;
1863
1936
  case Token::DIV:
1864
1937
  GenerateRegisterArgsPush(masm);
1865
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1866
1938
  break;
1867
1939
  case Token::MOD:
1868
- break;
1940
+ return; // Handled above.
1869
1941
  case Token::BIT_OR:
1870
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1871
- break;
1872
1942
  case Token::BIT_AND:
1873
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1874
- break;
1875
1943
  case Token::BIT_XOR:
1876
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1877
- break;
1878
1944
  case Token::SAR:
1879
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1880
- break;
1881
1945
  case Token::SHL:
1882
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1883
- break;
1884
1946
  case Token::SHR:
1885
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1886
1947
  break;
1887
1948
  default:
1888
1949
  UNREACHABLE();
1889
1950
  }
1951
+ GenerateCallRuntime(masm);
1890
1952
  }
1891
1953
 
1892
1954
 
@@ -1935,7 +1997,28 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1935
1997
  Label not_floats;
1936
1998
  if (CpuFeatures::IsSupported(SSE2)) {
1937
1999
  CpuFeatures::Scope use_sse2(SSE2);
2000
+
2001
+ // It could be that only SMIs have been seen at either the left
2002
+ // or the right operand. For precise type feedback, patch the IC
2003
+ // again if this changes.
2004
+ // In theory, we would need the same check in the non-SSE2 case,
2005
+ // but since we don't support Crankshaft on such hardware we can
2006
+ // afford not to care about precise type feedback.
2007
+ if (left_type_ == BinaryOpIC::SMI) {
2008
+ __ JumpIfNotSmi(edx, &not_floats);
2009
+ }
2010
+ if (right_type_ == BinaryOpIC::SMI) {
2011
+ __ JumpIfNotSmi(eax, &not_floats);
2012
+ }
1938
2013
  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2014
+ if (left_type_ == BinaryOpIC::INT32) {
2015
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
2016
+ masm, &not_floats, xmm0, ecx, xmm2);
2017
+ }
2018
+ if (right_type_ == BinaryOpIC::INT32) {
2019
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
2020
+ masm, &not_floats, xmm1, ecx, xmm2);
2021
+ }
1939
2022
 
1940
2023
  switch (op_) {
1941
2024
  case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -1944,7 +2027,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1944
2027
  case Token::DIV: __ divsd(xmm0, xmm1); break;
1945
2028
  default: UNREACHABLE();
1946
2029
  }
1947
- GenerateHeapResultAllocation(masm, &call_runtime);
2030
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
1948
2031
  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1949
2032
  __ ret(0);
1950
2033
  } else { // SSE2 not available, use FPU.
@@ -1961,7 +2044,8 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1961
2044
  default: UNREACHABLE();
1962
2045
  }
1963
2046
  Label after_alloc_failure;
1964
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
2047
+ BinaryOpStub_GenerateHeapResultAllocation(
2048
+ masm, &after_alloc_failure, mode_);
1965
2049
  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1966
2050
  __ ret(0);
1967
2051
  __ bind(&after_alloc_failure);
@@ -1987,8 +2071,12 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1987
2071
  GenerateRegisterArgsPush(masm);
1988
2072
  Label not_floats;
1989
2073
  Label non_smi_result;
2074
+ // We do not check the input arguments here, as any value is
2075
+ // unconditionally truncated to an int32 anyway. To get the
2076
+ // right optimized code, int32 type feedback is just right.
2077
+ bool use_sse3 = platform_specific_bit_;
1990
2078
  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1991
- use_sse3_,
2079
+ use_sse3,
1992
2080
  &not_floats);
1993
2081
  switch (op_) {
1994
2082
  case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2060,46 +2148,23 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2060
2148
 
2061
2149
  switch (op_) {
2062
2150
  case Token::ADD:
2063
- GenerateRegisterArgsPush(masm);
2064
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2065
- break;
2066
2151
  case Token::SUB:
2067
- GenerateRegisterArgsPush(masm);
2068
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2069
- break;
2070
2152
  case Token::MUL:
2071
- GenerateRegisterArgsPush(masm);
2072
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2073
- break;
2074
2153
  case Token::DIV:
2075
- GenerateRegisterArgsPush(masm);
2076
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2077
- break;
2078
2154
  case Token::MOD:
2079
2155
  GenerateRegisterArgsPush(masm);
2080
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2081
2156
  break;
2082
2157
  case Token::BIT_OR:
2083
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2084
- break;
2085
2158
  case Token::BIT_AND:
2086
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2087
- break;
2088
2159
  case Token::BIT_XOR:
2089
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2090
- break;
2091
2160
  case Token::SAR:
2092
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2093
- break;
2094
2161
  case Token::SHL:
2095
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2096
- break;
2097
2162
  case Token::SHR:
2098
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2099
2163
  break;
2100
2164
  default:
2101
2165
  UNREACHABLE();
2102
2166
  }
2167
+ GenerateCallRuntime(masm);
2103
2168
  }
2104
2169
 
2105
2170
 
@@ -2128,7 +2193,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2128
2193
  UNREACHABLE();
2129
2194
  }
2130
2195
 
2131
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2196
+ BinaryOpStub_GenerateSmiCode(
2197
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
2132
2198
 
2133
2199
  // Floating point case.
2134
2200
  switch (op_) {
@@ -2148,7 +2214,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2148
2214
  case Token::DIV: __ divsd(xmm0, xmm1); break;
2149
2215
  default: UNREACHABLE();
2150
2216
  }
2151
- GenerateHeapResultAllocation(masm, &call_runtime);
2217
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
2152
2218
  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2153
2219
  __ ret(0);
2154
2220
  } else { // SSE2 not available, use FPU.
@@ -2165,7 +2231,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2165
2231
  default: UNREACHABLE();
2166
2232
  }
2167
2233
  Label after_alloc_failure;
2168
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
2234
+ BinaryOpStub_GenerateHeapResultAllocation(
2235
+ masm, &after_alloc_failure, mode_);
2169
2236
  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2170
2237
  __ ret(0);
2171
2238
  __ bind(&after_alloc_failure);
@@ -2186,8 +2253,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2186
2253
  case Token::SHL:
2187
2254
  case Token::SHR: {
2188
2255
  Label non_smi_result;
2256
+ bool use_sse3 = platform_specific_bit_;
2189
2257
  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2190
- use_sse3_,
2258
+ use_sse3,
2191
2259
  &call_runtime);
2192
2260
  switch (op_) {
2193
2261
  case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2254,48 +2322,26 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2254
2322
  // result.
2255
2323
  __ bind(&call_runtime);
2256
2324
  switch (op_) {
2257
- case Token::ADD: {
2325
+ case Token::ADD:
2258
2326
  GenerateAddStrings(masm);
2259
- GenerateRegisterArgsPush(masm);
2260
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2261
- break;
2262
- }
2327
+ // Fall through.
2263
2328
  case Token::SUB:
2264
- GenerateRegisterArgsPush(masm);
2265
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2266
- break;
2267
2329
  case Token::MUL:
2268
- GenerateRegisterArgsPush(masm);
2269
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2270
- break;
2271
2330
  case Token::DIV:
2272
2331
  GenerateRegisterArgsPush(masm);
2273
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2274
2332
  break;
2275
2333
  case Token::MOD:
2276
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2277
- break;
2278
2334
  case Token::BIT_OR:
2279
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2280
- break;
2281
2335
  case Token::BIT_AND:
2282
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2283
- break;
2284
2336
  case Token::BIT_XOR:
2285
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2286
- break;
2287
2337
  case Token::SAR:
2288
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2289
- break;
2290
2338
  case Token::SHL:
2291
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2292
- break;
2293
2339
  case Token::SHR:
2294
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2295
2340
  break;
2296
2341
  default:
2297
2342
  UNREACHABLE();
2298
2343
  }
2344
+ GenerateCallRuntime(masm);
2299
2345
  }
2300
2346
 
2301
2347
 
@@ -2331,11 +2377,10 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2331
2377
  }
2332
2378
 
2333
2379
 
2334
- void BinaryOpStub::GenerateHeapResultAllocation(
2335
- MacroAssembler* masm,
2336
- Label* alloc_failure) {
2380
+ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
2381
+ Label* alloc_failure,
2382
+ OverwriteMode mode) {
2337
2383
  Label skip_allocation;
2338
- OverwriteMode mode = mode_;
2339
2384
  switch (mode) {
2340
2385
  case OVERWRITE_LEFT: {
2341
2386
  // If the argument in edx is already an object, we skip the
@@ -2428,6 +2473,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2428
2473
 
2429
2474
  __ bind(&loaded);
2430
2475
  } else { // UNTAGGED.
2476
+ CpuFeatures::Scope scope(SSE2);
2431
2477
  if (CpuFeatures::IsSupported(SSE4_1)) {
2432
2478
  CpuFeatures::Scope sse4_scope(SSE4_1);
2433
2479
  __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
@@ -2500,6 +2546,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2500
2546
  __ fstp(0);
2501
2547
  __ ret(kPointerSize);
2502
2548
  } else { // UNTAGGED.
2549
+ CpuFeatures::Scope scope(SSE2);
2503
2550
  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2504
2551
  __ Ret();
2505
2552
  }
@@ -2512,6 +2559,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2512
2559
  if (tagged) {
2513
2560
  __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2514
2561
  } else { // UNTAGGED.
2562
+ CpuFeatures::Scope scope(SSE2);
2515
2563
  __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2516
2564
  __ sub(esp, Immediate(kDoubleSize));
2517
2565
  __ movdbl(Operand(esp, 0), xmm1);
@@ -2526,6 +2574,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2526
2574
  if (tagged) {
2527
2575
  __ ret(kPointerSize);
2528
2576
  } else { // UNTAGGED.
2577
+ CpuFeatures::Scope scope(SSE2);
2529
2578
  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2530
2579
  __ Ret();
2531
2580
 
@@ -2558,6 +2607,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2558
2607
  ExternalReference(RuntimeFunction(), masm->isolate());
2559
2608
  __ TailCallExternalReference(runtime, 1, 1);
2560
2609
  } else { // UNTAGGED.
2610
+ CpuFeatures::Scope scope(SSE2);
2561
2611
  __ bind(&runtime_call_clear_stack);
2562
2612
  __ bind(&runtime_call);
2563
2613
  __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@@ -2847,16 +2897,24 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2847
2897
  void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2848
2898
  Label* non_int32,
2849
2899
  Register scratch) {
2850
- __ cvttsd2si(scratch, Operand(xmm0));
2851
- __ cvtsi2sd(xmm2, scratch);
2852
- __ ucomisd(xmm0, xmm2);
2853
- __ j(not_zero, non_int32);
2854
- __ j(carry, non_int32);
2855
- __ cvttsd2si(scratch, Operand(xmm1));
2856
- __ cvtsi2sd(xmm2, scratch);
2857
- __ ucomisd(xmm1, xmm2);
2900
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2);
2901
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2);
2902
+ }
2903
+
2904
+
2905
+ void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
2906
+ Label* non_int32,
2907
+ XMMRegister operand,
2908
+ Register scratch,
2909
+ XMMRegister xmm_scratch) {
2910
+ __ cvttsd2si(scratch, Operand(operand));
2911
+ __ cvtsi2sd(xmm_scratch, scratch);
2912
+ __ pcmpeqd(xmm_scratch, operand);
2913
+ __ movmskps(scratch, xmm_scratch);
2914
+ // Two least significant bits should be both set.
2915
+ __ not_(scratch);
2916
+ __ test(scratch, Immediate(3));
2858
2917
  __ j(not_zero, non_int32);
2859
- __ j(carry, non_int32);
2860
2918
  }
2861
2919
 
2862
2920
 
@@ -3111,10 +3169,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3111
3169
  // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
3112
3170
  __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
3113
3171
  __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
3114
- __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
3172
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
3115
3173
  // FSCALE calculates st(0) * 2^st(1)
3116
3174
  __ fscale(); // 2^X, rnd(X)
3117
- __ fstp(1);
3175
+ __ fstp(1); // 2^X
3118
3176
  // Bail out to runtime in case of exceptions in the status word.
3119
3177
  __ fnstsw_ax();
3120
3178
  __ test_b(eax, 0x5F); // We check for all but precision exception.
@@ -3138,21 +3196,28 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3138
3196
  __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
3139
3197
 
3140
3198
  // Get absolute value of exponent.
3141
- Label no_neg, while_true, no_multiply;
3199
+ Label no_neg, while_true, while_false;
3142
3200
  __ test(scratch, scratch);
3143
3201
  __ j(positive, &no_neg, Label::kNear);
3144
3202
  __ neg(scratch);
3145
3203
  __ bind(&no_neg);
3146
3204
 
3147
- __ bind(&while_true);
3205
+ __ j(zero, &while_false, Label::kNear);
3148
3206
  __ shr(scratch, 1);
3149
- __ j(not_carry, &no_multiply, Label::kNear);
3150
- __ mulsd(double_result, double_scratch);
3151
- __ bind(&no_multiply);
3207
+ // Above condition means CF==0 && ZF==0. This means that the
3208
+ // bit that has been shifted out is 0 and the result is not 0.
3209
+ __ j(above, &while_true, Label::kNear);
3210
+ __ movsd(double_result, double_scratch);
3211
+ __ j(zero, &while_false, Label::kNear);
3152
3212
 
3213
+ __ bind(&while_true);
3214
+ __ shr(scratch, 1);
3153
3215
  __ mulsd(double_scratch, double_scratch);
3216
+ __ j(above, &while_true, Label::kNear);
3217
+ __ mulsd(double_result, double_scratch);
3154
3218
  __ j(not_zero, &while_true);
3155
3219
 
3220
+ __ bind(&while_false);
3156
3221
  // scratch has the original value of the exponent - if the exponent is
3157
3222
  // negative, return 1/result.
3158
3223
  __ test(exponent, exponent);
@@ -3207,6 +3272,128 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3207
3272
  }
3208
3273
 
3209
3274
 
3275
+ void ArrayLengthStub::Generate(MacroAssembler* masm) {
3276
+ // ----------- S t a t e -------------
3277
+ // -- ecx : name
3278
+ // -- edx : receiver
3279
+ // -- esp[0] : return address
3280
+ // -----------------------------------
3281
+ Label miss;
3282
+
3283
+ if (kind() == Code::KEYED_LOAD_IC) {
3284
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_symbol()));
3285
+ __ j(not_equal, &miss);
3286
+ }
3287
+
3288
+ StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
3289
+ __ bind(&miss);
3290
+ StubCompiler::GenerateLoadMiss(masm, kind());
3291
+ }
3292
+
3293
+
3294
+ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
3295
+ // ----------- S t a t e -------------
3296
+ // -- ecx : name
3297
+ // -- edx : receiver
3298
+ // -- esp[0] : return address
3299
+ // -----------------------------------
3300
+ Label miss;
3301
+
3302
+ if (kind() == Code::KEYED_LOAD_IC) {
3303
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_symbol()));
3304
+ __ j(not_equal, &miss);
3305
+ }
3306
+
3307
+ StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
3308
+ __ bind(&miss);
3309
+ StubCompiler::GenerateLoadMiss(masm, kind());
3310
+ }
3311
+
3312
+
3313
+ void StringLengthStub::Generate(MacroAssembler* masm) {
3314
+ // ----------- S t a t e -------------
3315
+ // -- ecx : name
3316
+ // -- edx : receiver
3317
+ // -- esp[0] : return address
3318
+ // -----------------------------------
3319
+ Label miss;
3320
+
3321
+ if (kind() == Code::KEYED_LOAD_IC) {
3322
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_symbol()));
3323
+ __ j(not_equal, &miss);
3324
+ }
3325
+
3326
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
3327
+ support_wrapper_);
3328
+ __ bind(&miss);
3329
+ StubCompiler::GenerateLoadMiss(masm, kind());
3330
+ }
3331
+
3332
+
3333
+ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
3334
+ // ----------- S t a t e -------------
3335
+ // -- eax : value
3336
+ // -- ecx : name
3337
+ // -- edx : receiver
3338
+ // -- esp[0] : return address
3339
+ // -----------------------------------
3340
+ //
3341
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
3342
+ // (currently anything except for external arrays which means anything with
3343
+ // elements of FixedArray type). Value must be a number, but only smis are
3344
+ // accepted as the most common case.
3345
+
3346
+ Label miss;
3347
+
3348
+ Register receiver = edx;
3349
+ Register value = eax;
3350
+ Register scratch = ebx;
3351
+
3352
+ if (kind() == Code::KEYED_LOAD_IC) {
3353
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_symbol()));
3354
+ __ j(not_equal, &miss);
3355
+ }
3356
+
3357
+ // Check that the receiver isn't a smi.
3358
+ __ JumpIfSmi(receiver, &miss);
3359
+
3360
+ // Check that the object is a JS array.
3361
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
3362
+ __ j(not_equal, &miss);
3363
+
3364
+ // Check that elements are FixedArray.
3365
+ // We rely on StoreIC_ArrayLength below to deal with all types of
3366
+ // fast elements (including COW).
3367
+ __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
3368
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
3369
+ __ j(not_equal, &miss);
3370
+
3371
+ // Check that the array has fast properties, otherwise the length
3372
+ // property might have been redefined.
3373
+ __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
3374
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
3375
+ Heap::kHashTableMapRootIndex);
3376
+ __ j(equal, &miss);
3377
+
3378
+ // Check that value is a smi.
3379
+ __ JumpIfNotSmi(value, &miss);
3380
+
3381
+ // Prepare tail call to StoreIC_ArrayLength.
3382
+ __ pop(scratch);
3383
+ __ push(receiver);
3384
+ __ push(value);
3385
+ __ push(scratch); // return address
3386
+
3387
+ ExternalReference ref =
3388
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
3389
+ __ TailCallExternalReference(ref, 2, 1);
3390
+
3391
+ __ bind(&miss);
3392
+
3393
+ StubCompiler::GenerateStoreMiss(masm, kind());
3394
+ }
3395
+
3396
+
3210
3397
  void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3211
3398
  // The key is in edx and the parameter count is in eax.
3212
3399
 
@@ -3359,10 +3546,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
3359
3546
  // esp[0] = mapped parameter count (tagged)
3360
3547
  // esp[8] = parameter count (tagged)
3361
3548
  // esp[12] = address of receiver argument
3362
- // Get the arguments boilerplate from the current (global) context into edi.
3549
+ // Get the arguments boilerplate from the current native context into edi.
3363
3550
  Label has_mapped_parameters, copy;
3364
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3365
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3551
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3552
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
3366
3553
  __ mov(ebx, Operand(esp, 0 * kPointerSize));
3367
3554
  __ test(ebx, ebx);
3368
3555
  __ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -3510,7 +3697,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
3510
3697
  __ bind(&runtime);
3511
3698
  __ pop(eax); // Remove saved parameter count.
3512
3699
  __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
3513
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3700
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3514
3701
  }
3515
3702
 
3516
3703
 
@@ -3552,9 +3739,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3552
3739
  // Do the allocation of both objects in one go.
3553
3740
  __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3554
3741
 
3555
- // Get the arguments boilerplate from the current (global) context.
3556
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3557
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3742
+ // Get the arguments boilerplate from the current native context.
3743
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3744
+ __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
3558
3745
  const int offset =
3559
3746
  Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
3560
3747
  __ mov(edi, Operand(edi, offset));
@@ -3673,7 +3860,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
3673
3860
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3674
3861
  __ add(edx, Immediate(2)); // edx was a smi.
3675
3862
  // Check that the static offsets vector buffer is large enough.
3676
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3863
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize);
3677
3864
  __ j(above, &runtime);
3678
3865
 
3679
3866
  // ecx: RegExp data (FixedArray)
@@ -3877,9 +4064,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
3877
4064
  __ test(ecx, ecx);
3878
4065
  __ j(zero, &setup_two_byte, Label::kNear);
3879
4066
  __ SmiUntag(esi);
3880
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
4067
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
3881
4068
  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3882
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
4069
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
3883
4070
  __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3884
4071
  __ jmp(&setup_rest, Label::kNear);
3885
4072
 
@@ -4025,7 +4212,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4025
4212
  }
4026
4213
  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
4027
4214
  // Move the pointer so that offset-wise, it looks like a sequential string.
4028
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
4215
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
4029
4216
  __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4030
4217
  STATIC_ASSERT(kTwoByteStringTag == 0);
4031
4218
  __ test_b(ebx, kStringEncodingMask);
@@ -4054,8 +4241,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4054
4241
  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4055
4242
  // Elements: [Map][Length][..elements..]
4056
4243
  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4057
- times_half_pointer_size,
4058
- ebx, // In: Number of elements (times 2, being a smi)
4244
+ times_pointer_size,
4245
+ ebx, // In: Number of elements as a smi
4246
+ REGISTER_VALUE_IS_SMI,
4059
4247
  eax, // Out: Start of allocation (tagged).
4060
4248
  ecx, // Out: End of allocation.
4061
4249
  edx, // Scratch register
@@ -4067,11 +4255,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4067
4255
  // Set empty properties FixedArray.
4068
4256
  // Set elements to point to FixedArray allocated right after the JSArray.
4069
4257
  // Interleave operations for better latency.
4070
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
4258
+ __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
4071
4259
  Factory* factory = masm->isolate()->factory();
4072
4260
  __ mov(ecx, Immediate(factory->empty_fixed_array()));
4073
4261
  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4074
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
4262
+ __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
4075
4263
  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
4076
4264
  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
4077
4265
  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
@@ -4095,15 +4283,15 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4095
4283
  Immediate(factory->fixed_array_map()));
4096
4284
  // Set length.
4097
4285
  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
4098
- // Fill contents of fixed-array with the-hole.
4286
+ // Fill contents of fixed-array with undefined.
4099
4287
  __ SmiUntag(ecx);
4100
- __ mov(edx, Immediate(factory->the_hole_value()));
4288
+ __ mov(edx, Immediate(factory->undefined_value()));
4101
4289
  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
4102
- // Fill fixed array elements with hole.
4290
+ // Fill fixed array elements with undefined.
4103
4291
  // eax: JSArray.
4104
4292
  // ecx: Number of elements to fill.
4105
4293
  // ebx: Start of elements in FixedArray.
4106
- // edx: the hole.
4294
+ // edx: undefined.
4107
4295
  Label loop;
4108
4296
  __ test(ecx, ecx);
4109
4297
  __ bind(&loop);
@@ -4238,30 +4426,59 @@ static int NegativeComparisonResult(Condition cc) {
4238
4426
  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4239
4427
  }
4240
4428
 
4241
- void CompareStub::Generate(MacroAssembler* masm) {
4242
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4243
4429
 
4430
+ static void CheckInputType(MacroAssembler* masm,
4431
+ Register input,
4432
+ CompareIC::State expected,
4433
+ Label* fail) {
4434
+ Label ok;
4435
+ if (expected == CompareIC::SMI) {
4436
+ __ JumpIfNotSmi(input, fail);
4437
+ } else if (expected == CompareIC::HEAP_NUMBER) {
4438
+ __ JumpIfSmi(input, &ok);
4439
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
4440
+ Immediate(masm->isolate()->factory()->heap_number_map()));
4441
+ __ j(not_equal, fail);
4442
+ }
4443
+ // We could be strict about symbol/string here, but as long as
4444
+ // hydrogen doesn't care, the stub doesn't have to care either.
4445
+ __ bind(&ok);
4446
+ }
4447
+
4448
+
4449
+ static void BranchIfNonSymbol(MacroAssembler* masm,
4450
+ Label* label,
4451
+ Register object,
4452
+ Register scratch) {
4453
+ __ JumpIfSmi(object, label);
4454
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4455
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4456
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4457
+ __ cmp(scratch, kSymbolTag | kStringTag);
4458
+ __ j(not_equal, label);
4459
+ }
4460
+
4461
+
4462
+ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
4244
4463
  Label check_unequal_objects;
4464
+ Condition cc = GetCondition();
4245
4465
 
4246
- // Compare two smis if required.
4247
- if (include_smi_compare_) {
4248
- Label non_smi, smi_done;
4249
- __ mov(ecx, edx);
4250
- __ or_(ecx, eax);
4251
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
4252
- __ sub(edx, eax); // Return on the result of the subtraction.
4253
- __ j(no_overflow, &smi_done, Label::kNear);
4254
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
4255
- __ bind(&smi_done);
4256
- __ mov(eax, edx);
4257
- __ ret(0);
4258
- __ bind(&non_smi);
4259
- } else if (FLAG_debug_code) {
4260
- __ mov(ecx, edx);
4261
- __ or_(ecx, eax);
4262
- __ test(ecx, Immediate(kSmiTagMask));
4263
- __ Assert(not_zero, "Unexpected smi operands.");
4264
- }
4466
+ Label miss;
4467
+ CheckInputType(masm, edx, left_, &miss);
4468
+ CheckInputType(masm, eax, right_, &miss);
4469
+
4470
+ // Compare two smis.
4471
+ Label non_smi, smi_done;
4472
+ __ mov(ecx, edx);
4473
+ __ or_(ecx, eax);
4474
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
4475
+ __ sub(edx, eax); // Return on the result of the subtraction.
4476
+ __ j(no_overflow, &smi_done, Label::kNear);
4477
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
4478
+ __ bind(&smi_done);
4479
+ __ mov(eax, edx);
4480
+ __ ret(0);
4481
+ __ bind(&non_smi);
4265
4482
 
4266
4483
  // NOTICE! This code is only reached after a smi-fast-case check, so
4267
4484
  // it is certain that at least one operand isn't a smi.
@@ -4273,67 +4490,61 @@ void CompareStub::Generate(MacroAssembler* masm) {
4273
4490
  __ cmp(eax, edx);
4274
4491
  __ j(not_equal, &not_identical);
4275
4492
 
4276
- if (cc_ != equal) {
4493
+ if (cc != equal) {
4277
4494
  // Check for undefined. undefined OP undefined is false even though
4278
4495
  // undefined == undefined.
4279
4496
  Label check_for_nan;
4280
4497
  __ cmp(edx, masm->isolate()->factory()->undefined_value());
4281
4498
  __ j(not_equal, &check_for_nan, Label::kNear);
4282
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4499
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
4283
4500
  __ ret(0);
4284
4501
  __ bind(&check_for_nan);
4285
4502
  }
4286
4503
 
4287
4504
  // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
4288
4505
  // so we do the second best thing - test it ourselves.
4289
- // Note: if cc_ != equal, never_nan_nan_ is not used.
4290
- if (never_nan_nan_ && (cc_ == equal)) {
4291
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4506
+ Label heap_number;
4507
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
4508
+ Immediate(masm->isolate()->factory()->heap_number_map()));
4509
+ __ j(equal, &heap_number, Label::kNear);
4510
+ if (cc != equal) {
4511
+ // Call runtime on identical JSObjects. Otherwise return equal.
4512
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4513
+ __ j(above_equal, &not_identical);
4514
+ }
4515
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4516
+ __ ret(0);
4517
+
4518
+ __ bind(&heap_number);
4519
+ // It is a heap number, so return non-equal if it's NaN and equal if
4520
+ // it's not NaN.
4521
+ // The representation of NaN values has all exponent bits (52..62) set,
4522
+ // and not all mantissa bits (0..51) clear.
4523
+ // We only accept QNaNs, which have bit 51 set.
4524
+ // Read top bits of double representation (second word of value).
4525
+
4526
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4527
+ // all bits in the mask are set. We only need to check the word
4528
+ // that contains the exponent and high bit of the mantissa.
4529
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4530
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
4531
+ __ Set(eax, Immediate(0));
4532
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4533
+ // bits.
4534
+ __ add(edx, edx);
4535
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
4536
+ if (cc == equal) {
4537
+ STATIC_ASSERT(EQUAL != 1);
4538
+ __ setcc(above_equal, eax);
4292
4539
  __ ret(0);
4293
4540
  } else {
4294
- Label heap_number;
4295
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
4296
- Immediate(masm->isolate()->factory()->heap_number_map()));
4297
- __ j(equal, &heap_number, Label::kNear);
4298
- if (cc_ != equal) {
4299
- // Call runtime on identical JSObjects. Otherwise return equal.
4300
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4301
- __ j(above_equal, &not_identical);
4302
- }
4541
+ Label nan;
4542
+ __ j(above_equal, &nan, Label::kNear);
4303
4543
  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4304
4544
  __ ret(0);
4305
-
4306
- __ bind(&heap_number);
4307
- // It is a heap number, so return non-equal if it's NaN and equal if
4308
- // it's not NaN.
4309
- // The representation of NaN values has all exponent bits (52..62) set,
4310
- // and not all mantissa bits (0..51) clear.
4311
- // We only accept QNaNs, which have bit 51 set.
4312
- // Read top bits of double representation (second word of value).
4313
-
4314
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4315
- // all bits in the mask are set. We only need to check the word
4316
- // that contains the exponent and high bit of the mantissa.
4317
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4318
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
4319
- __ Set(eax, Immediate(0));
4320
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4321
- // bits.
4322
- __ add(edx, edx);
4323
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
4324
- if (cc_ == equal) {
4325
- STATIC_ASSERT(EQUAL != 1);
4326
- __ setcc(above_equal, eax);
4327
- __ ret(0);
4328
- } else {
4329
- Label nan;
4330
- __ j(above_equal, &nan, Label::kNear);
4331
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4332
- __ ret(0);
4333
- __ bind(&nan);
4334
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4335
- __ ret(0);
4336
- }
4545
+ __ bind(&nan);
4546
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
4547
+ __ ret(0);
4337
4548
  }
4338
4549
 
4339
4550
  __ bind(&not_identical);
@@ -4341,7 +4552,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
4341
4552
 
4342
4553
  // Strict equality can quickly decide whether objects are equal.
4343
4554
  // Non-strict object equality is slower, so it is handled later in the stub.
4344
- if (cc_ == equal && strict_) {
4555
+ if (cc == equal && strict()) {
4345
4556
  Label slow; // Fallthrough label.
4346
4557
  Label not_smis;
4347
4558
  // If we're doing a strict equality comparison, we don't have to do
@@ -4412,70 +4623,68 @@ void CompareStub::Generate(MacroAssembler* masm) {
4412
4623
  }
4413
4624
 
4414
4625
  // Generate the number comparison code.
4415
- if (include_number_compare_) {
4416
- Label non_number_comparison;
4417
- Label unordered;
4418
- if (CpuFeatures::IsSupported(SSE2)) {
4419
- CpuFeatures::Scope use_sse2(SSE2);
4420
- CpuFeatures::Scope use_cmov(CMOV);
4421
-
4422
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4423
- __ ucomisd(xmm0, xmm1);
4626
+ Label non_number_comparison;
4627
+ Label unordered;
4628
+ if (CpuFeatures::IsSupported(SSE2)) {
4629
+ CpuFeatures::Scope use_sse2(SSE2);
4630
+ CpuFeatures::Scope use_cmov(CMOV);
4424
4631
 
4425
- // Don't base result on EFLAGS when a NaN is involved.
4426
- __ j(parity_even, &unordered, Label::kNear);
4427
- // Return a result of -1, 0, or 1, based on EFLAGS.
4428
- __ mov(eax, 0); // equal
4429
- __ mov(ecx, Immediate(Smi::FromInt(1)));
4430
- __ cmov(above, eax, ecx);
4431
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
4432
- __ cmov(below, eax, ecx);
4433
- __ ret(0);
4434
- } else {
4435
- FloatingPointHelper::CheckFloatOperands(
4436
- masm, &non_number_comparison, ebx);
4437
- FloatingPointHelper::LoadFloatOperand(masm, eax);
4438
- FloatingPointHelper::LoadFloatOperand(masm, edx);
4439
- __ FCmp();
4632
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4633
+ __ ucomisd(xmm0, xmm1);
4440
4634
 
4441
- // Don't base result on EFLAGS when a NaN is involved.
4442
- __ j(parity_even, &unordered, Label::kNear);
4635
+ // Don't base result on EFLAGS when a NaN is involved.
4636
+ __ j(parity_even, &unordered, Label::kNear);
4637
+ // Return a result of -1, 0, or 1, based on EFLAGS.
4638
+ __ mov(eax, 0); // equal
4639
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
4640
+ __ cmov(above, eax, ecx);
4641
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
4642
+ __ cmov(below, eax, ecx);
4643
+ __ ret(0);
4644
+ } else {
4645
+ FloatingPointHelper::CheckFloatOperands(
4646
+ masm, &non_number_comparison, ebx);
4647
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
4648
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
4649
+ __ FCmp();
4443
4650
 
4444
- Label below_label, above_label;
4445
- // Return a result of -1, 0, or 1, based on EFLAGS.
4446
- __ j(below, &below_label, Label::kNear);
4447
- __ j(above, &above_label, Label::kNear);
4651
+ // Don't base result on EFLAGS when a NaN is involved.
4652
+ __ j(parity_even, &unordered, Label::kNear);
4448
4653
 
4449
- __ Set(eax, Immediate(0));
4450
- __ ret(0);
4654
+ Label below_label, above_label;
4655
+ // Return a result of -1, 0, or 1, based on EFLAGS.
4656
+ __ j(below, &below_label, Label::kNear);
4657
+ __ j(above, &above_label, Label::kNear);
4451
4658
 
4452
- __ bind(&below_label);
4453
- __ mov(eax, Immediate(Smi::FromInt(-1)));
4454
- __ ret(0);
4659
+ __ Set(eax, Immediate(0));
4660
+ __ ret(0);
4455
4661
 
4456
- __ bind(&above_label);
4457
- __ mov(eax, Immediate(Smi::FromInt(1)));
4458
- __ ret(0);
4459
- }
4662
+ __ bind(&below_label);
4663
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
4664
+ __ ret(0);
4460
4665
 
4461
- // If one of the numbers was NaN, then the result is always false.
4462
- // The cc is never not-equal.
4463
- __ bind(&unordered);
4464
- ASSERT(cc_ != not_equal);
4465
- if (cc_ == less || cc_ == less_equal) {
4466
- __ mov(eax, Immediate(Smi::FromInt(1)));
4467
- } else {
4468
- __ mov(eax, Immediate(Smi::FromInt(-1)));
4469
- }
4666
+ __ bind(&above_label);
4667
+ __ mov(eax, Immediate(Smi::FromInt(1)));
4470
4668
  __ ret(0);
4669
+ }
4471
4670
 
4472
- // The number comparison code did not provide a valid result.
4473
- __ bind(&non_number_comparison);
4671
+ // If one of the numbers was NaN, then the result is always false.
4672
+ // The cc is never not-equal.
4673
+ __ bind(&unordered);
4674
+ ASSERT(cc != not_equal);
4675
+ if (cc == less || cc == less_equal) {
4676
+ __ mov(eax, Immediate(Smi::FromInt(1)));
4677
+ } else {
4678
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
4474
4679
  }
4680
+ __ ret(0);
4681
+
4682
+ // The number comparison code did not provide a valid result.
4683
+ __ bind(&non_number_comparison);
4475
4684
 
4476
4685
  // Fast negative check for symbol-to-symbol equality.
4477
4686
  Label check_for_strings;
4478
- if (cc_ == equal) {
4687
+ if (cc == equal) {
4479
4688
  BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4480
4689
  BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4481
4690
 
@@ -4491,7 +4700,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
4491
4700
  &check_unequal_objects);
4492
4701
 
4493
4702
  // Inline comparison of ASCII strings.
4494
- if (cc_ == equal) {
4703
+ if (cc == equal) {
4495
4704
  StringCompareStub::GenerateFlatAsciiStringEquals(masm,
4496
4705
  edx,
4497
4706
  eax,
@@ -4510,7 +4719,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
4510
4719
  #endif
4511
4720
 
4512
4721
  __ bind(&check_unequal_objects);
4513
- if (cc_ == equal && !strict_) {
4722
+ if (cc == equal && !strict()) {
4514
4723
  // Non-strict equality. Objects are unequal if
4515
4724
  // they are both JSObjects and not undetectable,
4516
4725
  // and their pointers are different.
@@ -4554,11 +4763,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
4554
4763
 
4555
4764
  // Figure out which native to call and setup the arguments.
4556
4765
  Builtins::JavaScript builtin;
4557
- if (cc_ == equal) {
4558
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4766
+ if (cc == equal) {
4767
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4559
4768
  } else {
4560
4769
  builtin = Builtins::COMPARE;
4561
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4770
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
4562
4771
  }
4563
4772
 
4564
4773
  // Restore return address on the stack.
@@ -4567,19 +4776,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
4567
4776
  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4568
4777
  // tagged as a small integer.
4569
4778
  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4570
- }
4571
-
4572
4779
 
4573
- void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4574
- Label* label,
4575
- Register object,
4576
- Register scratch) {
4577
- __ JumpIfSmi(object, label);
4578
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4579
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4580
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4581
- __ cmp(scratch, kSymbolTag | kStringTag);
4582
- __ j(not_equal, label);
4780
+ __ bind(&miss);
4781
+ GenerateMiss(masm);
4583
4782
  }
4584
4783
 
4585
4784
 
@@ -4786,10 +4985,17 @@ void CodeStub::GenerateStubsAheadOfTime() {
4786
4985
 
4787
4986
 
4788
4987
  void CodeStub::GenerateFPStubs() {
4789
- CEntryStub save_doubles(1, kSaveFPRegs);
4790
- Handle<Code> code = save_doubles.GetCode();
4791
- code->set_is_pregenerated(true);
4792
- code->GetIsolate()->set_fp_stubs_generated(true);
4988
+ if (CpuFeatures::IsSupported(SSE2)) {
4989
+ CEntryStub save_doubles(1, kSaveFPRegs);
4990
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
4991
+ // which would lead to code stub initialization state being messed up.
4992
+ Code* save_doubles_code;
4993
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
4994
+ save_doubles_code = *(save_doubles.GetCode());
4995
+ }
4996
+ save_doubles_code->set_is_pregenerated(true);
4997
+ save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
4998
+ }
4793
4999
  }
4794
5000
 
4795
5001
 
@@ -4800,6 +5006,19 @@ void CEntryStub::GenerateAheadOfTime() {
4800
5006
  }
4801
5007
 
4802
5008
 
5009
+ static void JumpIfOOM(MacroAssembler* masm,
5010
+ Register value,
5011
+ Register scratch,
5012
+ Label* oom_label) {
5013
+ __ mov(scratch, value);
5014
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
5015
+ STATIC_ASSERT(kFailureTag == 3);
5016
+ __ and_(scratch, 0xf);
5017
+ __ cmp(scratch, 0xf);
5018
+ __ j(equal, oom_label);
5019
+ }
5020
+
5021
+
4803
5022
  void CEntryStub::GenerateCore(MacroAssembler* masm,
4804
5023
  Label* throw_normal_exception,
4805
5024
  Label* throw_termination_exception,
@@ -4897,8 +5116,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
4897
5116
  __ j(zero, &retry, Label::kNear);
4898
5117
 
4899
5118
  // Special handling of out of memory exceptions.
4900
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4901
- __ j(equal, throw_out_of_memory_exception);
5119
+ JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
4902
5120
 
4903
5121
  // Retrieve the pending exception and clear the variable.
4904
5122
  __ mov(eax, Operand::StaticVariable(pending_exception_address));
@@ -4980,7 +5198,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
4980
5198
  // Set pending exception and eax to out of memory exception.
4981
5199
  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4982
5200
  isolate);
4983
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
5201
+ Label already_have_failure;
5202
+ JumpIfOOM(masm, eax, ecx, &already_have_failure);
5203
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
5204
+ __ bind(&already_have_failure);
4984
5205
  __ mov(Operand::StaticVariable(pending_exception), eax);
4985
5206
  // Fall through to the next label.
4986
5207
 
@@ -5324,44 +5545,6 @@ Register InstanceofStub::left() { return eax; }
5324
5545
  Register InstanceofStub::right() { return edx; }
5325
5546
 
5326
5547
 
5327
- int CompareStub::MinorKey() {
5328
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5329
- // stubs the never NaN NaN condition is only taken into account if the
5330
- // condition is equals.
5331
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5332
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5333
- return ConditionField::encode(static_cast<unsigned>(cc_))
5334
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
5335
- | StrictField::encode(strict_)
5336
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
5337
- | IncludeNumberCompareField::encode(include_number_compare_)
5338
- | IncludeSmiCompareField::encode(include_smi_compare_);
5339
- }
5340
-
5341
-
5342
- // Unfortunately you have to run without snapshots to see most of these
5343
- // names in the profile since most compare stubs end up in the snapshot.
5344
- void CompareStub::PrintName(StringStream* stream) {
5345
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5346
- const char* cc_name;
5347
- switch (cc_) {
5348
- case less: cc_name = "LT"; break;
5349
- case greater: cc_name = "GT"; break;
5350
- case less_equal: cc_name = "LE"; break;
5351
- case greater_equal: cc_name = "GE"; break;
5352
- case equal: cc_name = "EQ"; break;
5353
- case not_equal: cc_name = "NE"; break;
5354
- default: cc_name = "UnknownCondition"; break;
5355
- }
5356
- bool is_equality = cc_ == equal || cc_ == not_equal;
5357
- stream->Add("CompareStub_%s", cc_name);
5358
- if (strict_ && is_equality) stream->Add("_STRICT");
5359
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5360
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
5361
- if (!include_smi_compare_) stream->Add("_NO_SMI");
5362
- }
5363
-
5364
-
5365
5548
  // -------------------------------------------------------------------------
5366
5549
  // StringCharCodeAtGenerator
5367
5550
 
@@ -5461,10 +5644,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5461
5644
  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5462
5645
  STATIC_ASSERT(kSmiTag == 0);
5463
5646
  STATIC_ASSERT(kSmiShiftSize == 0);
5464
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5647
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
5465
5648
  __ test(code_,
5466
5649
  Immediate(kSmiTagMask |
5467
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5650
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
5468
5651
  __ j(not_zero, &slow_case_);
5469
5652
 
5470
5653
  Factory* factory = masm->isolate()->factory();
@@ -5596,8 +5779,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5596
5779
  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
5597
5780
 
5598
5781
  // Get the two characters forming the new string.
5599
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5600
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5782
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
5783
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
5601
5784
 
5602
5785
  // Try to lookup two character string in symbol table. If it is not found
5603
5786
  // just allocate a new one.
@@ -5614,8 +5797,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5614
5797
  __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5615
5798
  __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5616
5799
  // Get the two characters forming the new string.
5617
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5618
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5800
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
5801
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
5619
5802
  __ bind(&make_two_character_string_no_reload);
5620
5803
  __ IncrementCounter(counters->string_add_make_two_char(), 1);
5621
5804
  __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
@@ -5623,7 +5806,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5623
5806
  __ shl(ecx, kBitsPerByte);
5624
5807
  __ or_(ebx, ecx);
5625
5808
  // Set the characters in the new string.
5626
- __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5809
+ __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
5627
5810
  __ IncrementCounter(counters->string_add_native(), 1);
5628
5811
  __ ret(2 * kPointerSize);
5629
5812
 
@@ -5640,7 +5823,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5640
5823
  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5641
5824
  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5642
5825
  __ and_(ecx, edi);
5643
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5826
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5644
5827
  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5645
5828
  __ test(ecx, Immediate(kStringEncodingMask));
5646
5829
  __ j(zero, &non_ascii);
@@ -5649,7 +5832,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5649
5832
  __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
5650
5833
  __ bind(&allocated);
5651
5834
  // Fill the fields of the cons string.
5652
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5835
+ __ AssertSmi(ebx);
5653
5836
  __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5654
5837
  __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5655
5838
  Immediate(String::kEmptyHashField));
@@ -5668,9 +5851,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5668
5851
  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5669
5852
  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5670
5853
  __ xor_(edi, ecx);
5671
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5672
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5673
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5854
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
5855
+ __ and_(edi, kOneByteStringTag | kAsciiDataHintTag);
5856
+ __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag);
5674
5857
  __ j(equal, &ascii_data);
5675
5858
  // Allocate a two byte cons string.
5676
5859
  __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
@@ -5697,10 +5880,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5697
5880
  __ test_b(ecx, kShortExternalStringMask);
5698
5881
  __ j(not_zero, &call_runtime);
5699
5882
  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
5700
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5883
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5701
5884
  __ jmp(&first_prepared, Label::kNear);
5702
5885
  __ bind(&first_is_sequential);
5703
- __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5886
+ __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5704
5887
  __ bind(&first_prepared);
5705
5888
 
5706
5889
  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -5718,10 +5901,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5718
5901
  __ test_b(edi, kShortExternalStringMask);
5719
5902
  __ j(not_zero, &call_runtime);
5720
5903
  __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
5721
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5904
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5722
5905
  __ jmp(&second_prepared, Label::kNear);
5723
5906
  __ bind(&second_is_sequential);
5724
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5907
+ __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5725
5908
  __ bind(&second_prepared);
5726
5909
 
5727
5910
  // Push the addresses of both strings' first characters onto the stack.
@@ -5742,7 +5925,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5742
5925
  // eax: result string
5743
5926
  __ mov(ecx, eax);
5744
5927
  // Locate first character of result.
5745
- __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5928
+ __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5746
5929
  // Load first argument's length and first character location. Account for
5747
5930
  // values currently on the stack when fetching arguments from it.
5748
5931
  __ mov(edx, Operand(esp, 4 * kPointerSize));
@@ -6052,7 +6235,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
6052
6235
  temp, temp, &next_probe_pop_mask[i]);
6053
6236
 
6054
6237
  // Check if the two characters match.
6055
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
6238
+ __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
6056
6239
  __ and_(temp, 0x0000ffff);
6057
6240
  __ cmp(chars, temp);
6058
6241
  __ j(equal, &found_in_symbol_table);
@@ -6241,7 +6424,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
6241
6424
  // string's encoding is wrong because we always have to recheck encoding of
6242
6425
  // the newly created string's parent anyways due to externalized strings.
6243
6426
  Label two_byte_slice, set_slice_header;
6244
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6427
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
6245
6428
  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6246
6429
  __ test(ebx, Immediate(kStringEncodingMask));
6247
6430
  __ j(zero, &two_byte_slice, Label::kNear);
@@ -6280,7 +6463,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
6280
6463
  __ j(not_zero, &runtime);
6281
6464
  __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
6282
6465
  // Move the pointer so that offset-wise, it looks like a sequential string.
6283
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6466
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
6284
6467
  __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6285
6468
 
6286
6469
  __ bind(&sequential_string);
@@ -6288,7 +6471,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
6288
6471
  __ push(edx);
6289
6472
  __ push(edi);
6290
6473
  __ SmiUntag(ecx);
6291
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6474
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
6292
6475
  __ test_b(ebx, kStringEncodingMask);
6293
6476
  __ j(zero, &two_byte_sequential);
6294
6477
 
@@ -6300,12 +6483,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
6300
6483
  __ mov(edx, esi); // esi used by following code.
6301
6484
  // Locate first character of result.
6302
6485
  __ mov(edi, eax);
6303
- __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6486
+ __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
6304
6487
  // Load string argument and locate character of sub string start.
6305
6488
  __ pop(esi);
6306
6489
  __ pop(ebx);
6307
6490
  __ SmiUntag(ebx);
6308
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
6491
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
6309
6492
 
6310
6493
  // eax: result string
6311
6494
  // ecx: result length
@@ -6430,7 +6613,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6430
6613
  // Compare lengths - strings up to min-length are equal.
6431
6614
  __ bind(&compare_lengths);
6432
6615
  __ test(length_delta, length_delta);
6616
+ #ifndef ENABLE_LATIN_1
6433
6617
  __ j(not_zero, &result_not_equal, Label::kNear);
6618
+ #else
6619
+ Label length_not_equal;
6620
+ __ j(not_zero, &length_not_equal, Label::kNear);
6621
+ #endif
6434
6622
 
6435
6623
  // Result is EQUAL.
6436
6624
  STATIC_ASSERT(EQUAL == 0);
@@ -6439,8 +6627,19 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6439
6627
  __ ret(0);
6440
6628
 
6441
6629
  Label result_greater;
6630
+ #ifdef ENABLE_LATIN_1
6631
+ Label result_less;
6632
+ __ bind(&length_not_equal);
6633
+ __ j(greater, &result_greater, Label::kNear);
6634
+ __ jmp(&result_less, Label::kNear);
6635
+ #endif
6442
6636
  __ bind(&result_not_equal);
6637
+ #ifndef ENABLE_LATIN_1
6443
6638
  __ j(greater, &result_greater, Label::kNear);
6639
+ #else
6640
+ __ j(above, &result_greater, Label::kNear);
6641
+ __ bind(&result_less);
6642
+ #endif
6444
6643
 
6445
6644
  // Result is LESS.
6446
6645
  __ Set(eax, Immediate(Smi::FromInt(LESS)));
@@ -6466,9 +6665,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
6466
6665
  // doesn't need an additional compare.
6467
6666
  __ SmiUntag(length);
6468
6667
  __ lea(left,
6469
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
6668
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
6470
6669
  __ lea(right,
6471
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
6670
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
6472
6671
  __ neg(length);
6473
6672
  Register index = length; // index = -length;
6474
6673
 
@@ -6523,7 +6722,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
6523
6722
 
6524
6723
 
6525
6724
  void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6526
- ASSERT(state_ == CompareIC::SMIS);
6725
+ ASSERT(state_ == CompareIC::SMI);
6527
6726
  Label miss;
6528
6727
  __ mov(ecx, edx);
6529
6728
  __ or_(ecx, eax);
@@ -6549,31 +6748,52 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6549
6748
 
6550
6749
 
6551
6750
  void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6552
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6751
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
6553
6752
 
6554
6753
  Label generic_stub;
6555
6754
  Label unordered, maybe_undefined1, maybe_undefined2;
6556
6755
  Label miss;
6557
- __ mov(ecx, edx);
6558
- __ and_(ecx, eax);
6559
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6560
6756
 
6561
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6562
- __ j(not_equal, &maybe_undefined1, Label::kNear);
6563
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6564
- __ j(not_equal, &maybe_undefined2, Label::kNear);
6757
+ if (left_ == CompareIC::SMI) {
6758
+ __ JumpIfNotSmi(edx, &miss);
6759
+ }
6760
+ if (right_ == CompareIC::SMI) {
6761
+ __ JumpIfNotSmi(eax, &miss);
6762
+ }
6565
6763
 
6566
6764
  // Inlining the double comparison and falling back to the general compare
6567
- // stub if NaN is involved or SS2 or CMOV is unsupported.
6765
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
6568
6766
  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
6569
6767
  CpuFeatures::Scope scope1(SSE2);
6570
6768
  CpuFeatures::Scope scope2(CMOV);
6571
6769
 
6572
- // Load left and right operand
6573
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6770
+ // Load left and right operand.
6771
+ Label done, left, left_smi, right_smi;
6772
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
6773
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6774
+ masm->isolate()->factory()->heap_number_map());
6775
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
6574
6776
  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6777
+ __ jmp(&left, Label::kNear);
6778
+ __ bind(&right_smi);
6779
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
6780
+ __ SmiUntag(ecx);
6781
+ __ cvtsi2sd(xmm1, ecx);
6782
+
6783
+ __ bind(&left);
6784
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
6785
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
6786
+ masm->isolate()->factory()->heap_number_map());
6787
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
6788
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6789
+ __ jmp(&done);
6790
+ __ bind(&left_smi);
6791
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
6792
+ __ SmiUntag(ecx);
6793
+ __ cvtsi2sd(xmm0, ecx);
6575
6794
 
6576
- // Compare operands
6795
+ __ bind(&done);
6796
+ // Compare operands.
6577
6797
  __ ucomisd(xmm0, xmm1);
6578
6798
 
6579
6799
  // Don't base result on EFLAGS when a NaN is involved.
@@ -6587,17 +6807,30 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6587
6807
  __ mov(ecx, Immediate(Smi::FromInt(-1)));
6588
6808
  __ cmov(below, eax, ecx);
6589
6809
  __ ret(0);
6810
+ } else {
6811
+ __ mov(ecx, edx);
6812
+ __ and_(ecx, eax);
6813
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6814
+
6815
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6816
+ masm->isolate()->factory()->heap_number_map());
6817
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
6818
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
6819
+ masm->isolate()->factory()->heap_number_map());
6820
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
6590
6821
  }
6591
6822
 
6592
6823
  __ bind(&unordered);
6593
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6594
6824
  __ bind(&generic_stub);
6825
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
6826
+ CompareIC::GENERIC);
6595
6827
  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6596
6828
 
6597
6829
  __ bind(&maybe_undefined1);
6598
6830
  if (Token::IsOrderedRelationalCompareOp(op_)) {
6599
6831
  __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
6600
6832
  __ j(not_equal, &miss);
6833
+ __ JumpIfSmi(edx, &unordered);
6601
6834
  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6602
6835
  __ j(not_equal, &maybe_undefined2, Label::kNear);
6603
6836
  __ jmp(&unordered);
@@ -6615,7 +6848,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6615
6848
 
6616
6849
 
6617
6850
  void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6618
- ASSERT(state_ == CompareIC::SYMBOLS);
6851
+ ASSERT(state_ == CompareIC::SYMBOL);
6619
6852
  ASSERT(GetCondition() == equal);
6620
6853
 
6621
6854
  // Registers containing left and right operands respectively.
@@ -6660,7 +6893,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6660
6893
 
6661
6894
 
6662
6895
  void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6663
- ASSERT(state_ == CompareIC::STRINGS);
6896
+ ASSERT(state_ == CompareIC::STRING);
6664
6897
  Label miss;
6665
6898
 
6666
6899
  bool equality = Token::IsEqualityOp(op_);
@@ -6749,7 +6982,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6749
6982
 
6750
6983
 
6751
6984
  void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6752
- ASSERT(state_ == CompareIC::OBJECTS);
6985
+ ASSERT(state_ == CompareIC::OBJECT);
6753
6986
  Label miss;
6754
6987
  __ mov(ecx, edx);
6755
6988
  __ and_(ecx, eax);
@@ -6898,8 +7131,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6898
7131
  ASSERT(!name.is(r0));
6899
7132
  ASSERT(!name.is(r1));
6900
7133
 
6901
- // Assert that name contains a string.
6902
- if (FLAG_debug_code) __ AbortIfNotString(name);
7134
+ __ AssertString(name);
6903
7135
 
6904
7136
  __ mov(r1, FieldOperand(elements, kCapacityOffset));
6905
7137
  __ shr(r1, kSmiTagSize); // convert smi to int
@@ -7073,6 +7305,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7073
7305
  { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
7074
7306
  // StoreArrayLiteralElementStub::Generate
7075
7307
  { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
7308
+ // FastNewClosureStub
7309
+ { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
7076
7310
  // Null termination.
7077
7311
  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7078
7312
  };
@@ -7121,6 +7355,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7121
7355
  }
7122
7356
 
7123
7357
 
7358
+ bool CodeStub::CanUseFPRegisters() {
7359
+ return CpuFeatures::IsSupported(SSE2);
7360
+ }
7361
+
7362
+
7124
7363
  // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7125
7364
  // the value has just been written into the object, now this stub makes sure
7126
7365
  // we keep the GC informed. The word in the object where the value has been
@@ -7208,13 +7447,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7208
7447
  int argument_count = 3;
7209
7448
  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7210
7449
  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
7211
- if (mode == INCREMENTAL_COMPACTION) {
7212
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
7213
- } else {
7214
- ASSERT(mode == INCREMENTAL);
7215
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7216
- __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
7217
- }
7450
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
7218
7451
  __ mov(Operand(esp, 2 * kPointerSize),
7219
7452
  Immediate(ExternalReference::isolate_address()));
7220
7453
 
@@ -7241,6 +7474,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7241
7474
  Mode mode) {
7242
7475
  Label object_is_black, need_incremental, need_incremental_pop_object;
7243
7476
 
7477
+ __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
7478
+ __ and_(regs_.scratch0(), regs_.object());
7479
+ __ mov(regs_.scratch1(),
7480
+ Operand(regs_.scratch0(),
7481
+ MemoryChunk::kWriteBarrierCounterOffset));
7482
+ __ sub(regs_.scratch1(), Immediate(1));
7483
+ __ mov(Operand(regs_.scratch0(),
7484
+ MemoryChunk::kWriteBarrierCounterOffset),
7485
+ regs_.scratch1());
7486
+ __ j(negative, &need_incremental);
7487
+
7244
7488
  // Let's look at the color of the object: If it is not black we don't have
7245
7489
  // to inform the incremental marker.
7246
7490
  __ JumpIfBlack(regs_.object(),
@@ -7397,6 +7641,54 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7397
7641
  __ ret(0);
7398
7642
  }
7399
7643
 
7644
+
7645
+ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
7646
+ ASSERT(!Serializer::enabled());
7647
+ bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
7648
+ CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
7649
+ __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
7650
+ int parameter_count_offset =
7651
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
7652
+ __ mov(ebx, MemOperand(ebp, parameter_count_offset));
7653
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
7654
+ __ pop(ecx);
7655
+ __ lea(esp, MemOperand(esp, ebx, times_pointer_size,
7656
+ extra_expression_stack_count_ * kPointerSize));
7657
+ __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
7658
+ }
7659
+
7660
+
7661
+ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
7662
+ if (entry_hook_ != NULL) {
7663
+ ProfileEntryHookStub stub;
7664
+ masm->CallStub(&stub);
7665
+ }
7666
+ }
7667
+
7668
+
7669
+ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
7670
+ // Ecx is the only volatile register we must save.
7671
+ __ push(ecx);
7672
+
7673
+ // Calculate and push the original stack pointer.
7674
+ __ lea(eax, Operand(esp, kPointerSize));
7675
+ __ push(eax);
7676
+
7677
+ // Calculate and push the function address.
7678
+ __ mov(eax, Operand(eax, 0));
7679
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
7680
+ __ push(eax);
7681
+
7682
+ // Call the entry hook.
7683
+ int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
7684
+ __ call(Operand(hook_location, RelocInfo::NONE32));
7685
+ __ add(esp, Immediate(2 * kPointerSize));
7686
+
7687
+ // Restore ecx.
7688
+ __ pop(ecx);
7689
+ __ ret(0);
7690
+ }
7691
+
7400
7692
  #undef __
7401
7693
 
7402
7694
  } } // namespace v8::internal