mustang 0.0.1 → 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (560) hide show
  1. data/.rspec +1 -0
  2. data/Isolate +9 -0
  3. data/README.md +6 -12
  4. data/Rakefile +30 -4
  5. data/TODO.md +9 -0
  6. data/ext/v8/extconf.rb +56 -0
  7. data/ext/v8/v8.cpp +37 -0
  8. data/ext/v8/v8_array.cpp +161 -0
  9. data/ext/v8/v8_array.h +17 -0
  10. data/ext/v8/v8_base.cpp +147 -0
  11. data/ext/v8/v8_base.h +23 -0
  12. data/ext/v8/v8_cast.cpp +151 -0
  13. data/ext/v8/v8_cast.h +64 -0
  14. data/ext/v8/v8_context.cpp +174 -0
  15. data/ext/v8/v8_context.h +12 -0
  16. data/ext/v8/v8_date.cpp +61 -0
  17. data/ext/v8/v8_date.h +16 -0
  18. data/ext/v8/v8_errors.cpp +147 -0
  19. data/ext/v8/v8_errors.h +19 -0
  20. data/ext/v8/v8_external.cpp +66 -0
  21. data/ext/v8/v8_external.h +16 -0
  22. data/ext/v8/v8_function.cpp +182 -0
  23. data/ext/v8/v8_function.h +14 -0
  24. data/ext/v8/v8_integer.cpp +70 -0
  25. data/ext/v8/v8_integer.h +16 -0
  26. data/ext/v8/v8_macros.h +30 -0
  27. data/ext/v8/v8_main.cpp +53 -0
  28. data/ext/v8/v8_main.h +13 -0
  29. data/ext/v8/v8_number.cpp +62 -0
  30. data/ext/v8/v8_number.h +16 -0
  31. data/ext/v8/v8_object.cpp +172 -0
  32. data/ext/v8/v8_object.h +17 -0
  33. data/ext/v8/v8_ref.cpp +72 -0
  34. data/ext/v8/v8_ref.h +43 -0
  35. data/ext/v8/v8_regexp.cpp +148 -0
  36. data/ext/v8/v8_regexp.h +16 -0
  37. data/ext/v8/v8_string.cpp +78 -0
  38. data/ext/v8/v8_string.h +16 -0
  39. data/ext/v8/v8_value.cpp +370 -0
  40. data/ext/v8/v8_value.h +19 -0
  41. data/gemspec.yml +2 -1
  42. data/lib/core_ext/class.rb +14 -0
  43. data/lib/core_ext/object.rb +12 -0
  44. data/lib/core_ext/symbol.rb +23 -0
  45. data/lib/mustang.rb +44 -0
  46. data/lib/mustang/context.rb +69 -0
  47. data/lib/mustang/errors.rb +36 -0
  48. data/lib/support/delegated.rb +25 -0
  49. data/lib/v8/array.rb +21 -0
  50. data/lib/v8/context.rb +13 -0
  51. data/lib/v8/date.rb +20 -0
  52. data/lib/v8/error.rb +15 -0
  53. data/lib/v8/external.rb +16 -0
  54. data/lib/v8/function.rb +11 -0
  55. data/lib/v8/integer.rb +16 -0
  56. data/lib/v8/number.rb +16 -0
  57. data/lib/v8/object.rb +66 -0
  58. data/lib/v8/regexp.rb +23 -0
  59. data/lib/v8/string.rb +27 -0
  60. data/mustang.gemspec +3 -0
  61. data/spec/core_ext/class_spec.rb +19 -0
  62. data/spec/core_ext/object_spec.rb +19 -0
  63. data/spec/core_ext/symbol_spec.rb +27 -0
  64. data/spec/fixtures/test1.js +2 -0
  65. data/spec/fixtures/test2.js +2 -0
  66. data/spec/spec_helper.rb +20 -0
  67. data/spec/v8/array_spec.rb +88 -0
  68. data/spec/v8/cast_spec.rb +151 -0
  69. data/spec/v8/context_spec.rb +78 -0
  70. data/spec/v8/data_spec.rb +39 -0
  71. data/spec/v8/date_spec.rb +45 -0
  72. data/spec/v8/empty_spec.rb +27 -0
  73. data/spec/v8/errors_spec.rb +142 -0
  74. data/spec/v8/external_spec.rb +44 -0
  75. data/spec/v8/function_spec.rb +170 -0
  76. data/spec/v8/integer_spec.rb +41 -0
  77. data/spec/v8/main_spec.rb +18 -0
  78. data/spec/v8/null_spec.rb +27 -0
  79. data/spec/v8/number_spec.rb +40 -0
  80. data/spec/v8/object_spec.rb +79 -0
  81. data/spec/v8/primitive_spec.rb +9 -0
  82. data/spec/v8/regexp_spec.rb +65 -0
  83. data/spec/v8/string_spec.rb +48 -0
  84. data/spec/v8/undefined_spec.rb +27 -0
  85. data/spec/v8/value_spec.rb +215 -0
  86. data/vendor/v8/.gitignore +2 -0
  87. data/vendor/v8/AUTHORS +3 -1
  88. data/vendor/v8/ChangeLog +117 -0
  89. data/vendor/v8/SConstruct +334 -53
  90. data/vendor/v8/include/v8-debug.h +21 -11
  91. data/vendor/v8/include/v8-preparser.h +1 -1
  92. data/vendor/v8/include/v8-profiler.h +122 -43
  93. data/vendor/v8/include/v8-testing.h +5 -0
  94. data/vendor/v8/include/v8.h +171 -17
  95. data/vendor/v8/preparser/SConscript +38 -0
  96. data/vendor/v8/preparser/preparser-process.cc +77 -114
  97. data/vendor/v8/samples/shell.cc +232 -46
  98. data/vendor/v8/src/SConscript +29 -5
  99. data/vendor/v8/src/accessors.cc +70 -211
  100. data/vendor/v8/{test/cctest/test-mips.cc → src/allocation-inl.h} +15 -18
  101. data/vendor/v8/src/allocation.cc +0 -82
  102. data/vendor/v8/src/allocation.h +9 -42
  103. data/vendor/v8/src/api.cc +1645 -1156
  104. data/vendor/v8/src/api.h +76 -12
  105. data/vendor/v8/src/apiutils.h +0 -7
  106. data/vendor/v8/src/arguments.h +15 -4
  107. data/vendor/v8/src/arm/assembler-arm-inl.h +10 -9
  108. data/vendor/v8/src/arm/assembler-arm.cc +62 -23
  109. data/vendor/v8/src/arm/assembler-arm.h +76 -11
  110. data/vendor/v8/src/arm/builtins-arm.cc +39 -33
  111. data/vendor/v8/src/arm/code-stubs-arm.cc +1182 -402
  112. data/vendor/v8/src/arm/code-stubs-arm.h +20 -54
  113. data/vendor/v8/src/arm/codegen-arm.cc +159 -106
  114. data/vendor/v8/src/arm/codegen-arm.h +6 -6
  115. data/vendor/v8/src/arm/constants-arm.h +16 -1
  116. data/vendor/v8/src/arm/cpu-arm.cc +7 -5
  117. data/vendor/v8/src/arm/debug-arm.cc +6 -4
  118. data/vendor/v8/src/arm/deoptimizer-arm.cc +51 -14
  119. data/vendor/v8/src/arm/disasm-arm.cc +47 -15
  120. data/vendor/v8/src/arm/frames-arm.h +1 -1
  121. data/vendor/v8/src/arm/full-codegen-arm.cc +724 -408
  122. data/vendor/v8/src/arm/ic-arm.cc +90 -85
  123. data/vendor/v8/src/arm/lithium-arm.cc +140 -69
  124. data/vendor/v8/src/arm/lithium-arm.h +161 -46
  125. data/vendor/v8/src/arm/lithium-codegen-arm.cc +567 -297
  126. data/vendor/v8/src/arm/lithium-codegen-arm.h +21 -9
  127. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +2 -0
  128. data/vendor/v8/src/arm/macro-assembler-arm.cc +457 -96
  129. data/vendor/v8/src/arm/macro-assembler-arm.h +115 -18
  130. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +20 -13
  131. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +1 -0
  132. data/vendor/v8/src/arm/simulator-arm.cc +184 -101
  133. data/vendor/v8/src/arm/simulator-arm.h +26 -21
  134. data/vendor/v8/src/arm/stub-cache-arm.cc +450 -467
  135. data/vendor/v8/src/arm/virtual-frame-arm.cc +14 -12
  136. data/vendor/v8/src/arm/virtual-frame-arm.h +11 -8
  137. data/vendor/v8/src/array.js +35 -18
  138. data/vendor/v8/src/assembler.cc +186 -92
  139. data/vendor/v8/src/assembler.h +106 -69
  140. data/vendor/v8/src/ast-inl.h +5 -0
  141. data/vendor/v8/src/ast.cc +46 -35
  142. data/vendor/v8/src/ast.h +107 -50
  143. data/vendor/v8/src/atomicops.h +2 -0
  144. data/vendor/v8/src/atomicops_internals_mips_gcc.h +169 -0
  145. data/vendor/v8/src/bootstrapper.cc +649 -399
  146. data/vendor/v8/src/bootstrapper.h +94 -27
  147. data/vendor/v8/src/builtins.cc +359 -227
  148. data/vendor/v8/src/builtins.h +157 -123
  149. data/vendor/v8/src/checks.cc +2 -2
  150. data/vendor/v8/src/checks.h +4 -0
  151. data/vendor/v8/src/code-stubs.cc +27 -17
  152. data/vendor/v8/src/code-stubs.h +38 -17
  153. data/vendor/v8/src/codegen-inl.h +5 -1
  154. data/vendor/v8/src/codegen.cc +27 -17
  155. data/vendor/v8/src/codegen.h +9 -9
  156. data/vendor/v8/src/compilation-cache.cc +92 -206
  157. data/vendor/v8/src/compilation-cache.h +205 -30
  158. data/vendor/v8/src/compiler.cc +107 -120
  159. data/vendor/v8/src/compiler.h +17 -2
  160. data/vendor/v8/src/contexts.cc +22 -15
  161. data/vendor/v8/src/contexts.h +14 -8
  162. data/vendor/v8/src/conversions.cc +86 -30
  163. data/vendor/v8/src/counters.cc +19 -4
  164. data/vendor/v8/src/counters.h +28 -16
  165. data/vendor/v8/src/cpu-profiler-inl.h +4 -3
  166. data/vendor/v8/src/cpu-profiler.cc +123 -72
  167. data/vendor/v8/src/cpu-profiler.h +33 -19
  168. data/vendor/v8/src/cpu.h +2 -0
  169. data/vendor/v8/src/d8-debug.cc +3 -3
  170. data/vendor/v8/src/d8-debug.h +7 -6
  171. data/vendor/v8/src/d8-posix.cc +2 -0
  172. data/vendor/v8/src/d8.cc +22 -12
  173. data/vendor/v8/src/d8.gyp +3 -0
  174. data/vendor/v8/src/d8.js +618 -0
  175. data/vendor/v8/src/data-flow.h +3 -3
  176. data/vendor/v8/src/dateparser.h +4 -2
  177. data/vendor/v8/src/debug-agent.cc +10 -9
  178. data/vendor/v8/src/debug-agent.h +9 -11
  179. data/vendor/v8/src/debug-debugger.js +121 -0
  180. data/vendor/v8/src/debug.cc +331 -227
  181. data/vendor/v8/src/debug.h +248 -219
  182. data/vendor/v8/src/deoptimizer.cc +173 -62
  183. data/vendor/v8/src/deoptimizer.h +119 -19
  184. data/vendor/v8/src/disasm.h +3 -0
  185. data/vendor/v8/src/disassembler.cc +10 -9
  186. data/vendor/v8/src/execution.cc +185 -129
  187. data/vendor/v8/src/execution.h +47 -78
  188. data/vendor/v8/src/extensions/experimental/break-iterator.cc +250 -0
  189. data/vendor/v8/src/extensions/experimental/break-iterator.h +89 -0
  190. data/vendor/v8/src/extensions/experimental/experimental.gyp +2 -0
  191. data/vendor/v8/src/extensions/experimental/i18n-extension.cc +22 -2
  192. data/vendor/v8/src/extensions/externalize-string-extension.cc +2 -2
  193. data/vendor/v8/src/extensions/gc-extension.cc +1 -1
  194. data/vendor/v8/src/factory.cc +261 -154
  195. data/vendor/v8/src/factory.h +162 -158
  196. data/vendor/v8/src/flag-definitions.h +17 -11
  197. data/vendor/v8/src/frame-element.cc +0 -5
  198. data/vendor/v8/src/frame-element.h +9 -13
  199. data/vendor/v8/src/frames-inl.h +7 -0
  200. data/vendor/v8/src/frames.cc +56 -46
  201. data/vendor/v8/src/frames.h +36 -25
  202. data/vendor/v8/src/full-codegen.cc +15 -24
  203. data/vendor/v8/src/full-codegen.h +13 -41
  204. data/vendor/v8/src/func-name-inferrer.cc +7 -6
  205. data/vendor/v8/src/func-name-inferrer.h +1 -1
  206. data/vendor/v8/src/gdb-jit.cc +1 -0
  207. data/vendor/v8/src/global-handles.cc +118 -56
  208. data/vendor/v8/src/global-handles.h +98 -40
  209. data/vendor/v8/src/globals.h +2 -2
  210. data/vendor/v8/src/handles-inl.h +106 -9
  211. data/vendor/v8/src/handles.cc +220 -157
  212. data/vendor/v8/src/handles.h +38 -59
  213. data/vendor/v8/src/hashmap.h +3 -3
  214. data/vendor/v8/src/heap-inl.h +141 -25
  215. data/vendor/v8/src/heap-profiler.cc +117 -63
  216. data/vendor/v8/src/heap-profiler.h +38 -21
  217. data/vendor/v8/src/heap.cc +805 -564
  218. data/vendor/v8/src/heap.h +640 -594
  219. data/vendor/v8/src/hydrogen-instructions.cc +216 -73
  220. data/vendor/v8/src/hydrogen-instructions.h +259 -124
  221. data/vendor/v8/src/hydrogen.cc +996 -1171
  222. data/vendor/v8/src/hydrogen.h +163 -144
  223. data/vendor/v8/src/ia32/assembler-ia32-inl.h +12 -11
  224. data/vendor/v8/src/ia32/assembler-ia32.cc +85 -39
  225. data/vendor/v8/src/ia32/assembler-ia32.h +82 -16
  226. data/vendor/v8/src/ia32/builtins-ia32.cc +64 -58
  227. data/vendor/v8/src/ia32/code-stubs-ia32.cc +248 -324
  228. data/vendor/v8/src/ia32/code-stubs-ia32.h +3 -44
  229. data/vendor/v8/src/ia32/codegen-ia32.cc +217 -165
  230. data/vendor/v8/src/ia32/codegen-ia32.h +3 -0
  231. data/vendor/v8/src/ia32/cpu-ia32.cc +6 -5
  232. data/vendor/v8/src/ia32/debug-ia32.cc +8 -5
  233. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +124 -14
  234. data/vendor/v8/src/ia32/disasm-ia32.cc +85 -62
  235. data/vendor/v8/src/ia32/frames-ia32.h +1 -1
  236. data/vendor/v8/src/ia32/full-codegen-ia32.cc +348 -435
  237. data/vendor/v8/src/ia32/ic-ia32.cc +91 -91
  238. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +500 -255
  239. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +13 -4
  240. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +6 -0
  241. data/vendor/v8/src/ia32/lithium-ia32.cc +122 -45
  242. data/vendor/v8/src/ia32/lithium-ia32.h +128 -41
  243. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +109 -84
  244. data/vendor/v8/src/ia32/macro-assembler-ia32.h +18 -9
  245. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +26 -15
  246. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +1 -0
  247. data/vendor/v8/src/ia32/register-allocator-ia32.cc +30 -30
  248. data/vendor/v8/src/ia32/simulator-ia32.h +4 -4
  249. data/vendor/v8/src/ia32/stub-cache-ia32.cc +383 -400
  250. data/vendor/v8/src/ia32/virtual-frame-ia32.cc +36 -13
  251. data/vendor/v8/src/ia32/virtual-frame-ia32.h +11 -5
  252. data/vendor/v8/src/ic-inl.h +12 -2
  253. data/vendor/v8/src/ic.cc +304 -221
  254. data/vendor/v8/src/ic.h +115 -58
  255. data/vendor/v8/src/interpreter-irregexp.cc +25 -21
  256. data/vendor/v8/src/interpreter-irregexp.h +2 -1
  257. data/vendor/v8/src/isolate.cc +883 -0
  258. data/vendor/v8/src/isolate.h +1304 -0
  259. data/vendor/v8/src/json.js +10 -10
  260. data/vendor/v8/src/jsregexp.cc +111 -80
  261. data/vendor/v8/src/jsregexp.h +6 -7
  262. data/vendor/v8/src/jump-target-heavy.cc +5 -8
  263. data/vendor/v8/src/jump-target-heavy.h +0 -6
  264. data/vendor/v8/src/jump-target-inl.h +1 -1
  265. data/vendor/v8/src/jump-target-light.cc +3 -3
  266. data/vendor/v8/src/lithium-allocator-inl.h +2 -0
  267. data/vendor/v8/src/lithium-allocator.cc +42 -30
  268. data/vendor/v8/src/lithium-allocator.h +8 -22
  269. data/vendor/v8/src/lithium.cc +1 -0
  270. data/vendor/v8/src/liveedit.cc +141 -99
  271. data/vendor/v8/src/liveedit.h +7 -2
  272. data/vendor/v8/src/liveobjectlist-inl.h +90 -0
  273. data/vendor/v8/src/liveobjectlist.cc +2537 -1
  274. data/vendor/v8/src/liveobjectlist.h +245 -35
  275. data/vendor/v8/src/log-utils.cc +122 -35
  276. data/vendor/v8/src/log-utils.h +33 -36
  277. data/vendor/v8/src/log.cc +299 -241
  278. data/vendor/v8/src/log.h +177 -110
  279. data/vendor/v8/src/mark-compact.cc +612 -470
  280. data/vendor/v8/src/mark-compact.h +153 -80
  281. data/vendor/v8/src/messages.cc +16 -14
  282. data/vendor/v8/src/messages.js +30 -7
  283. data/vendor/v8/src/mips/assembler-mips-inl.h +155 -35
  284. data/vendor/v8/src/mips/assembler-mips.cc +1093 -219
  285. data/vendor/v8/src/mips/assembler-mips.h +552 -153
  286. data/vendor/v8/src/mips/builtins-mips.cc +43 -100
  287. data/vendor/v8/src/mips/code-stubs-mips.cc +752 -0
  288. data/vendor/v8/src/mips/code-stubs-mips.h +511 -0
  289. data/vendor/v8/src/mips/codegen-mips-inl.h +8 -14
  290. data/vendor/v8/src/mips/codegen-mips.cc +672 -896
  291. data/vendor/v8/src/mips/codegen-mips.h +271 -69
  292. data/vendor/v8/src/mips/constants-mips.cc +44 -20
  293. data/vendor/v8/src/mips/constants-mips.h +238 -40
  294. data/vendor/v8/src/mips/cpu-mips.cc +20 -3
  295. data/vendor/v8/src/mips/debug-mips.cc +35 -7
  296. data/vendor/v8/src/mips/deoptimizer-mips.cc +91 -0
  297. data/vendor/v8/src/mips/disasm-mips.cc +329 -93
  298. data/vendor/v8/src/mips/frames-mips.cc +2 -50
  299. data/vendor/v8/src/mips/frames-mips.h +24 -9
  300. data/vendor/v8/src/mips/full-codegen-mips.cc +473 -23
  301. data/vendor/v8/src/mips/ic-mips.cc +81 -45
  302. data/vendor/v8/src/mips/jump-target-mips.cc +11 -106
  303. data/vendor/v8/src/mips/lithium-codegen-mips.h +65 -0
  304. data/vendor/v8/src/mips/lithium-mips.h +304 -0
  305. data/vendor/v8/src/mips/macro-assembler-mips.cc +2391 -390
  306. data/vendor/v8/src/mips/macro-assembler-mips.h +718 -121
  307. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +478 -0
  308. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +250 -0
  309. data/vendor/v8/src/mips/register-allocator-mips-inl.h +0 -3
  310. data/vendor/v8/src/mips/register-allocator-mips.h +3 -2
  311. data/vendor/v8/src/mips/simulator-mips.cc +1009 -221
  312. data/vendor/v8/src/mips/simulator-mips.h +119 -36
  313. data/vendor/v8/src/mips/stub-cache-mips.cc +331 -148
  314. data/vendor/v8/src/mips/{fast-codegen-mips.cc → virtual-frame-mips-inl.h} +11 -30
  315. data/vendor/v8/src/mips/virtual-frame-mips.cc +137 -149
  316. data/vendor/v8/src/mips/virtual-frame-mips.h +294 -312
  317. data/vendor/v8/src/mirror-debugger.js +9 -8
  318. data/vendor/v8/src/mksnapshot.cc +2 -2
  319. data/vendor/v8/src/objects-debug.cc +16 -16
  320. data/vendor/v8/src/objects-inl.h +421 -195
  321. data/vendor/v8/src/objects-printer.cc +7 -7
  322. data/vendor/v8/src/objects-visiting.cc +1 -1
  323. data/vendor/v8/src/objects-visiting.h +33 -12
  324. data/vendor/v8/src/objects.cc +935 -658
  325. data/vendor/v8/src/objects.h +234 -139
  326. data/vendor/v8/src/parser.cc +484 -439
  327. data/vendor/v8/src/parser.h +35 -14
  328. data/vendor/v8/src/platform-cygwin.cc +173 -107
  329. data/vendor/v8/src/platform-freebsd.cc +224 -72
  330. data/vendor/v8/src/platform-linux.cc +234 -95
  331. data/vendor/v8/src/platform-macos.cc +215 -82
  332. data/vendor/v8/src/platform-nullos.cc +9 -3
  333. data/vendor/v8/src/platform-openbsd.cc +22 -7
  334. data/vendor/v8/src/platform-posix.cc +30 -5
  335. data/vendor/v8/src/platform-solaris.cc +120 -38
  336. data/vendor/v8/src/platform-tls-mac.h +62 -0
  337. data/vendor/v8/src/platform-tls-win32.h +62 -0
  338. data/vendor/v8/src/platform-tls.h +50 -0
  339. data/vendor/v8/src/platform-win32.cc +195 -97
  340. data/vendor/v8/src/platform.h +72 -15
  341. data/vendor/v8/src/preparse-data.cc +2 -0
  342. data/vendor/v8/src/preparser-api.cc +8 -2
  343. data/vendor/v8/src/preparser.cc +1 -1
  344. data/vendor/v8/src/prettyprinter.cc +43 -52
  345. data/vendor/v8/src/prettyprinter.h +1 -1
  346. data/vendor/v8/src/profile-generator-inl.h +0 -28
  347. data/vendor/v8/src/profile-generator.cc +942 -685
  348. data/vendor/v8/src/profile-generator.h +210 -176
  349. data/vendor/v8/src/property.cc +6 -0
  350. data/vendor/v8/src/property.h +14 -3
  351. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +1 -1
  352. data/vendor/v8/src/regexp-macro-assembler.cc +28 -19
  353. data/vendor/v8/src/regexp-macro-assembler.h +11 -6
  354. data/vendor/v8/src/regexp-stack.cc +18 -10
  355. data/vendor/v8/src/regexp-stack.h +45 -21
  356. data/vendor/v8/src/regexp.js +3 -3
  357. data/vendor/v8/src/register-allocator-inl.h +3 -3
  358. data/vendor/v8/src/register-allocator.cc +1 -7
  359. data/vendor/v8/src/register-allocator.h +5 -15
  360. data/vendor/v8/src/rewriter.cc +2 -1
  361. data/vendor/v8/src/runtime-profiler.cc +158 -128
  362. data/vendor/v8/src/runtime-profiler.h +131 -15
  363. data/vendor/v8/src/runtime.cc +2409 -1692
  364. data/vendor/v8/src/runtime.h +93 -17
  365. data/vendor/v8/src/safepoint-table.cc +3 -0
  366. data/vendor/v8/src/safepoint-table.h +9 -3
  367. data/vendor/v8/src/scanner-base.cc +21 -28
  368. data/vendor/v8/src/scanner-base.h +22 -11
  369. data/vendor/v8/src/scanner.cc +3 -5
  370. data/vendor/v8/src/scanner.h +4 -2
  371. data/vendor/v8/src/scopeinfo.cc +11 -16
  372. data/vendor/v8/src/scopeinfo.h +26 -15
  373. data/vendor/v8/src/scopes.cc +67 -37
  374. data/vendor/v8/src/scopes.h +26 -12
  375. data/vendor/v8/src/serialize.cc +193 -154
  376. data/vendor/v8/src/serialize.h +41 -36
  377. data/vendor/v8/src/small-pointer-list.h +163 -0
  378. data/vendor/v8/src/snapshot-common.cc +1 -1
  379. data/vendor/v8/src/snapshot.h +3 -1
  380. data/vendor/v8/src/spaces-inl.h +30 -25
  381. data/vendor/v8/src/spaces.cc +263 -370
  382. data/vendor/v8/src/spaces.h +178 -166
  383. data/vendor/v8/src/string-search.cc +4 -3
  384. data/vendor/v8/src/string-search.h +21 -20
  385. data/vendor/v8/src/string-stream.cc +32 -24
  386. data/vendor/v8/src/string.js +7 -7
  387. data/vendor/v8/src/stub-cache.cc +324 -248
  388. data/vendor/v8/src/stub-cache.h +181 -155
  389. data/vendor/v8/src/token.cc +3 -3
  390. data/vendor/v8/src/token.h +3 -3
  391. data/vendor/v8/src/top.cc +218 -390
  392. data/vendor/v8/src/type-info.cc +98 -32
  393. data/vendor/v8/src/type-info.h +10 -3
  394. data/vendor/v8/src/unicode.cc +1 -1
  395. data/vendor/v8/src/unicode.h +1 -1
  396. data/vendor/v8/src/utils.h +3 -0
  397. data/vendor/v8/src/v8-counters.cc +18 -11
  398. data/vendor/v8/src/v8-counters.h +34 -13
  399. data/vendor/v8/src/v8.cc +66 -121
  400. data/vendor/v8/src/v8.h +7 -4
  401. data/vendor/v8/src/v8globals.h +18 -12
  402. data/vendor/v8/src/{memory.h → v8memory.h} +0 -0
  403. data/vendor/v8/src/v8natives.js +59 -18
  404. data/vendor/v8/src/v8threads.cc +127 -114
  405. data/vendor/v8/src/v8threads.h +42 -35
  406. data/vendor/v8/src/v8utils.h +2 -39
  407. data/vendor/v8/src/variables.h +1 -1
  408. data/vendor/v8/src/version.cc +26 -5
  409. data/vendor/v8/src/version.h +4 -0
  410. data/vendor/v8/src/virtual-frame-heavy-inl.h +2 -4
  411. data/vendor/v8/src/virtual-frame-light-inl.h +5 -4
  412. data/vendor/v8/src/vm-state-inl.h +21 -17
  413. data/vendor/v8/src/vm-state.h +7 -5
  414. data/vendor/v8/src/win32-headers.h +1 -0
  415. data/vendor/v8/src/x64/assembler-x64-inl.h +12 -11
  416. data/vendor/v8/src/x64/assembler-x64.cc +80 -40
  417. data/vendor/v8/src/x64/assembler-x64.h +67 -17
  418. data/vendor/v8/src/x64/builtins-x64.cc +34 -33
  419. data/vendor/v8/src/x64/code-stubs-x64.cc +636 -377
  420. data/vendor/v8/src/x64/code-stubs-x64.h +14 -48
  421. data/vendor/v8/src/x64/codegen-x64-inl.h +1 -1
  422. data/vendor/v8/src/x64/codegen-x64.cc +158 -136
  423. data/vendor/v8/src/x64/codegen-x64.h +4 -1
  424. data/vendor/v8/src/x64/cpu-x64.cc +7 -5
  425. data/vendor/v8/src/x64/debug-x64.cc +8 -6
  426. data/vendor/v8/src/x64/deoptimizer-x64.cc +195 -20
  427. data/vendor/v8/src/x64/disasm-x64.cc +42 -23
  428. data/vendor/v8/src/x64/frames-x64.cc +1 -1
  429. data/vendor/v8/src/x64/frames-x64.h +2 -2
  430. data/vendor/v8/src/x64/full-codegen-x64.cc +780 -218
  431. data/vendor/v8/src/x64/ic-x64.cc +77 -79
  432. data/vendor/v8/src/x64/jump-target-x64.cc +1 -1
  433. data/vendor/v8/src/x64/lithium-codegen-x64.cc +698 -181
  434. data/vendor/v8/src/x64/lithium-codegen-x64.h +31 -6
  435. data/vendor/v8/src/x64/lithium-x64.cc +136 -54
  436. data/vendor/v8/src/x64/lithium-x64.h +142 -51
  437. data/vendor/v8/src/x64/macro-assembler-x64.cc +456 -187
  438. data/vendor/v8/src/x64/macro-assembler-x64.h +166 -34
  439. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +44 -28
  440. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +8 -4
  441. data/vendor/v8/src/x64/register-allocator-x64-inl.h +3 -3
  442. data/vendor/v8/src/x64/register-allocator-x64.cc +12 -8
  443. data/vendor/v8/src/x64/simulator-x64.h +5 -5
  444. data/vendor/v8/src/x64/stub-cache-x64.cc +299 -344
  445. data/vendor/v8/src/x64/virtual-frame-x64.cc +37 -13
  446. data/vendor/v8/src/x64/virtual-frame-x64.h +13 -7
  447. data/vendor/v8/src/zone-inl.h +49 -3
  448. data/vendor/v8/src/zone.cc +42 -41
  449. data/vendor/v8/src/zone.h +37 -34
  450. data/vendor/v8/test/benchmarks/testcfg.py +100 -0
  451. data/vendor/v8/test/cctest/SConscript +5 -4
  452. data/vendor/v8/test/cctest/cctest.h +3 -2
  453. data/vendor/v8/test/cctest/cctest.status +6 -11
  454. data/vendor/v8/test/cctest/test-accessors.cc +3 -3
  455. data/vendor/v8/test/cctest/test-alloc.cc +39 -33
  456. data/vendor/v8/test/cctest/test-api.cc +1092 -205
  457. data/vendor/v8/test/cctest/test-assembler-arm.cc +39 -25
  458. data/vendor/v8/test/cctest/test-assembler-ia32.cc +36 -37
  459. data/vendor/v8/test/cctest/test-assembler-mips.cc +1098 -40
  460. data/vendor/v8/test/cctest/test-assembler-x64.cc +32 -25
  461. data/vendor/v8/test/cctest/test-ast.cc +1 -0
  462. data/vendor/v8/test/cctest/test-circular-queue.cc +8 -5
  463. data/vendor/v8/test/cctest/test-compiler.cc +24 -24
  464. data/vendor/v8/test/cctest/test-cpu-profiler.cc +140 -5
  465. data/vendor/v8/test/cctest/test-dataflow.cc +1 -0
  466. data/vendor/v8/test/cctest/test-debug.cc +136 -77
  467. data/vendor/v8/test/cctest/test-decls.cc +1 -1
  468. data/vendor/v8/test/cctest/test-deoptimization.cc +25 -24
  469. data/vendor/v8/test/cctest/test-disasm-arm.cc +9 -4
  470. data/vendor/v8/test/cctest/test-disasm-ia32.cc +10 -8
  471. data/vendor/v8/test/cctest/test-func-name-inference.cc +10 -4
  472. data/vendor/v8/test/cctest/test-heap-profiler.cc +226 -164
  473. data/vendor/v8/test/cctest/test-heap.cc +240 -217
  474. data/vendor/v8/test/cctest/test-liveedit.cc +1 -0
  475. data/vendor/v8/test/cctest/test-log-stack-tracer.cc +18 -20
  476. data/vendor/v8/test/cctest/test-log.cc +114 -108
  477. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +247 -177
  478. data/vendor/v8/test/cctest/test-mark-compact.cc +129 -90
  479. data/vendor/v8/test/cctest/test-parsing.cc +15 -14
  480. data/vendor/v8/test/cctest/test-platform-linux.cc +1 -0
  481. data/vendor/v8/test/cctest/test-platform-tls.cc +66 -0
  482. data/vendor/v8/test/cctest/test-platform-win32.cc +1 -0
  483. data/vendor/v8/test/cctest/test-profile-generator.cc +1 -1
  484. data/vendor/v8/test/cctest/test-regexp.cc +53 -41
  485. data/vendor/v8/test/cctest/test-reloc-info.cc +18 -11
  486. data/vendor/v8/test/cctest/test-serialize.cc +44 -43
  487. data/vendor/v8/test/cctest/test-sockets.cc +8 -3
  488. data/vendor/v8/test/cctest/test-spaces.cc +47 -29
  489. data/vendor/v8/test/cctest/test-strings.cc +20 -20
  490. data/vendor/v8/test/cctest/test-thread-termination.cc +8 -3
  491. data/vendor/v8/test/cctest/test-threads.cc +5 -3
  492. data/vendor/v8/test/cctest/test-utils.cc +5 -4
  493. data/vendor/v8/test/cctest/testcfg.py +7 -3
  494. data/vendor/v8/test/es5conform/es5conform.status +2 -77
  495. data/vendor/v8/test/es5conform/testcfg.py +1 -1
  496. data/vendor/v8/test/message/testcfg.py +1 -1
  497. data/vendor/v8/test/mjsunit/accessors-on-global-object.js +3 -3
  498. data/vendor/v8/test/mjsunit/array-concat.js +43 -1
  499. data/vendor/v8/test/mjsunit/array-join.js +25 -0
  500. data/vendor/v8/test/mjsunit/bitops-info.js +7 -1
  501. data/vendor/v8/test/mjsunit/compiler/array-length.js +2 -2
  502. data/vendor/v8/test/mjsunit/compiler/global-accessors.js +47 -0
  503. data/vendor/v8/test/mjsunit/compiler/pic.js +1 -1
  504. data/vendor/v8/test/mjsunit/compiler/regress-loadfield.js +65 -0
  505. data/vendor/v8/test/mjsunit/math-sqrt.js +5 -1
  506. data/vendor/v8/test/mjsunit/mjsunit.js +59 -8
  507. data/vendor/v8/test/mjsunit/mjsunit.status +0 -12
  508. data/vendor/v8/test/mjsunit/mul-exhaustive.js +129 -11
  509. data/vendor/v8/test/mjsunit/negate-zero.js +1 -1
  510. data/vendor/v8/test/mjsunit/object-freeze.js +5 -13
  511. data/vendor/v8/test/mjsunit/object-prevent-extensions.js +9 -50
  512. data/vendor/v8/test/mjsunit/object-seal.js +4 -13
  513. data/vendor/v8/test/mjsunit/override-eval-with-non-function.js +36 -0
  514. data/vendor/v8/test/mjsunit/regress/regress-1145.js +54 -0
  515. data/vendor/v8/test/mjsunit/regress/regress-1172-bis.js +37 -0
  516. data/vendor/v8/test/mjsunit/regress/regress-1181.js +54 -0
  517. data/vendor/v8/test/mjsunit/regress/regress-1207.js +35 -0
  518. data/vendor/v8/test/mjsunit/regress/regress-1209.js +34 -0
  519. data/vendor/v8/test/mjsunit/regress/regress-1210.js +48 -0
  520. data/vendor/v8/test/mjsunit/regress/regress-1213.js +43 -0
  521. data/vendor/v8/test/mjsunit/regress/regress-1218.js +29 -0
  522. data/vendor/v8/test/mjsunit/regress/regress-1229.js +79 -0
  523. data/vendor/v8/test/mjsunit/regress/regress-1233.js +47 -0
  524. data/vendor/v8/test/mjsunit/regress/regress-1236.js +34 -0
  525. data/vendor/v8/test/mjsunit/regress/regress-1237.js +36 -0
  526. data/vendor/v8/test/mjsunit/regress/regress-1240.js +39 -0
  527. data/vendor/v8/test/mjsunit/regress/regress-1257.js +58 -0
  528. data/vendor/v8/test/mjsunit/regress/regress-1278.js +69 -0
  529. data/vendor/v8/test/mjsunit/regress/regress-create-exception.js +1 -0
  530. data/vendor/v8/test/mjsunit/regress/regress-lazy-deopt-reloc.js +52 -0
  531. data/vendor/v8/test/mjsunit/sin-cos.js +15 -10
  532. data/vendor/v8/test/mjsunit/smi-negative-zero.js +2 -2
  533. data/vendor/v8/test/mjsunit/str-to-num.js +1 -1
  534. data/vendor/v8/test/mjsunit/strict-mode.js +435 -0
  535. data/vendor/v8/test/mjsunit/testcfg.py +23 -6
  536. data/vendor/v8/test/mozilla/mozilla.status +0 -2
  537. data/vendor/v8/test/mozilla/testcfg.py +1 -1
  538. data/vendor/v8/test/preparser/empty.js +28 -0
  539. data/vendor/v8/test/preparser/functions-only.js +38 -0
  540. data/vendor/v8/test/preparser/non-alphanum.js +34 -0
  541. data/vendor/v8/test/preparser/symbols-only.js +49 -0
  542. data/vendor/v8/test/preparser/testcfg.py +90 -0
  543. data/vendor/v8/test/sputnik/testcfg.py +1 -1
  544. data/vendor/v8/test/test262/README +16 -0
  545. data/vendor/v8/test/test262/harness-adapt.js +80 -0
  546. data/vendor/v8/test/test262/test262.status +1506 -0
  547. data/vendor/v8/test/test262/testcfg.py +123 -0
  548. data/vendor/v8/tools/freebsd-tick-processor +10 -0
  549. data/vendor/v8/tools/gyp/v8.gyp +8 -33
  550. data/vendor/v8/tools/linux-tick-processor +5 -3
  551. data/vendor/v8/tools/test.py +37 -14
  552. data/vendor/v8/tools/tickprocessor.js +22 -8
  553. data/vendor/v8/tools/visual_studio/v8_base.vcproj +13 -1
  554. data/vendor/v8/tools/visual_studio/v8_base_arm.vcproj +5 -1
  555. data/vendor/v8/tools/visual_studio/v8_base_x64.vcproj +5 -1
  556. data/vendor/v8/tools/visual_studio/x64.vsprops +1 -0
  557. metadata +1495 -1341
  558. data/ext/extconf.rb +0 -22
  559. data/ext/mustang.cpp +0 -58
  560. data/vendor/v8/src/top.h +0 -608
@@ -68,7 +68,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
68
68
  // JumpToExternalReference expects r0 to contain the number of arguments
69
69
  // including the receiver and the extra arguments.
70
70
  __ add(r0, r0, Operand(num_extra_args + 1));
71
- __ JumpToExternalReference(ExternalReference(id));
71
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
72
72
  }
73
73
 
74
74
 
@@ -310,6 +310,7 @@ static void AllocateJSArray(MacroAssembler* masm,
310
310
  // construct call and normal call.
311
311
  static void ArrayNativeCode(MacroAssembler* masm,
312
312
  Label* call_generic_code) {
313
+ Counters* counters = masm->isolate()->counters();
313
314
  Label argc_one_or_more, argc_two_or_more;
314
315
 
315
316
  // Check for array construction with zero arguments or one.
@@ -325,7 +326,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
325
326
  r5,
326
327
  JSArray::kPreallocatedArrayElements,
327
328
  call_generic_code);
328
- __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
329
+ __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
329
330
  // Setup return value, remove receiver from stack and return.
330
331
  __ mov(r0, r2);
331
332
  __ add(sp, sp, Operand(kPointerSize));
@@ -361,7 +362,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
361
362
  r7,
362
363
  true,
363
364
  call_generic_code);
364
- __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
365
+ __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
365
366
  // Setup return value, remove receiver and argument from stack and return.
366
367
  __ mov(r0, r3);
367
368
  __ add(sp, sp, Operand(2 * kPointerSize));
@@ -385,7 +386,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
385
386
  r7,
386
387
  false,
387
388
  call_generic_code);
388
- __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
389
+ __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
389
390
 
390
391
  // Fill arguments as array elements. Copy from the top of the stack (last
391
392
  // element) to the array backing store filling it backwards. Note:
@@ -428,7 +429,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
428
429
  GenerateLoadArrayFunction(masm, r1);
429
430
 
430
431
  if (FLAG_debug_code) {
431
- // Initial map for the builtin Array function shoud be a map.
432
+ // Initial map for the builtin Array functions should be maps.
432
433
  __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
433
434
  __ tst(r2, Operand(kSmiTagMask));
434
435
  __ Assert(ne, "Unexpected initial map for Array function");
@@ -442,8 +443,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
442
443
  // Jump to the generic array code if the specialized code cannot handle
443
444
  // the construction.
444
445
  __ bind(&generic_array_code);
445
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
446
- Handle<Code> array_code(code);
446
+
447
+ Handle<Code> array_code =
448
+ masm->isolate()->builtins()->ArrayCodeGeneric();
447
449
  __ Jump(array_code, RelocInfo::CODE_TARGET);
448
450
  }
449
451
 
@@ -458,11 +460,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
458
460
  Label generic_constructor;
459
461
 
460
462
  if (FLAG_debug_code) {
461
- // The array construct code is only set for the builtin Array function which
462
- // always have a map.
463
- GenerateLoadArrayFunction(masm, r2);
464
- __ cmp(r1, r2);
465
- __ Assert(eq, "Unexpected Array function");
463
+ // The array construct code is only set for the builtin and internal
464
+ // Array functions which always have a map.
466
465
  // Initial map for the builtin Array function should be a map.
467
466
  __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
468
467
  __ tst(r2, Operand(kSmiTagMask));
@@ -477,8 +476,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
477
476
  // Jump to the generic construct code in case the specialized code cannot
478
477
  // handle the construction.
479
478
  __ bind(&generic_constructor);
480
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
481
- Handle<Code> generic_construct_stub(code);
479
+ Handle<Code> generic_construct_stub =
480
+ masm->isolate()->builtins()->JSConstructStubGeneric();
482
481
  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
483
482
  }
484
483
 
@@ -491,7 +490,8 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
491
490
  // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
492
491
  // -- sp[argc * 4] : receiver
493
492
  // -----------------------------------
494
- __ IncrementCounter(&Counters::string_ctor_calls, 1, r2, r3);
493
+ Counters* counters = masm->isolate()->counters();
494
+ __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
495
495
 
496
496
  Register function = r1;
497
497
  if (FLAG_debug_code) {
@@ -521,7 +521,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
521
521
  r5, // Scratch.
522
522
  false, // Is it a Smi?
523
523
  &not_cached);
524
- __ IncrementCounter(&Counters::string_ctor_cached_number, 1, r3, r4);
524
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
525
525
  __ bind(&argument_is_string);
526
526
 
527
527
  // ----------- S t a t e -------------
@@ -575,13 +575,13 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
575
575
  __ tst(r3, Operand(kIsNotStringMask));
576
576
  __ b(ne, &convert_argument);
577
577
  __ mov(argument, r0);
578
- __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
578
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
579
579
  __ b(&argument_is_string);
580
580
 
581
581
  // Invoke the conversion builtin and put the result into r2.
582
582
  __ bind(&convert_argument);
583
583
  __ push(function); // Preserve the function.
584
- __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
584
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
585
585
  __ EnterInternalFrame();
586
586
  __ push(r0);
587
587
  __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
@@ -600,7 +600,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
600
600
  // At this point the argument is already a string. Call runtime to
601
601
  // create a string wrapper.
602
602
  __ bind(&gc_required);
603
- __ IncrementCounter(&Counters::string_ctor_gc_required, 1, r3, r4);
603
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
604
604
  __ EnterInternalFrame();
605
605
  __ push(argument);
606
606
  __ CallRuntime(Runtime::kNewStringWrapper, 1);
@@ -636,7 +636,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
636
636
  // Set expected number of arguments to zero (not changing r0).
637
637
  __ mov(r2, Operand(0, RelocInfo::NONE));
638
638
  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
639
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
639
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
640
640
  RelocInfo::CODE_TARGET);
641
641
  }
642
642
 
@@ -647,6 +647,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
647
647
  // Should never count constructions for api objects.
648
648
  ASSERT(!is_api_function || !count_constructions);
649
649
 
650
+ Isolate* isolate = masm->isolate();
651
+
650
652
  // Enter a construct frame.
651
653
  __ EnterConstructFrame();
652
654
 
@@ -662,7 +664,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
662
664
  Label undo_allocation;
663
665
  #ifdef ENABLE_DEBUGGER_SUPPORT
664
666
  ExternalReference debug_step_in_fp =
665
- ExternalReference::debug_step_in_fp_address();
667
+ ExternalReference::debug_step_in_fp_address(isolate);
666
668
  __ mov(r2, Operand(debug_step_in_fp));
667
669
  __ ldr(r2, MemOperand(r2));
668
670
  __ tst(r2, r2);
@@ -908,8 +910,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
908
910
  // r1: constructor function
909
911
  if (is_api_function) {
910
912
  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
911
- Handle<Code> code = Handle<Code>(
912
- Builtins::builtin(Builtins::HandleApiCallConstruct));
913
+ Handle<Code> code =
914
+ masm->isolate()->builtins()->HandleApiCallConstruct();
913
915
  ParameterCount expected(0);
914
916
  __ InvokeCode(code, expected, expected,
915
917
  RelocInfo::CODE_TARGET, CALL_FUNCTION);
@@ -966,7 +968,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
966
968
  __ LeaveConstructFrame();
967
969
  __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
968
970
  __ add(sp, sp, Operand(kPointerSize));
969
- __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
971
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
970
972
  __ Jump(lr);
971
973
  }
972
974
 
@@ -1006,7 +1008,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
1006
1008
  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1007
1009
 
1008
1010
  // Set up the roots register.
1009
- ExternalReference roots_address = ExternalReference::roots_address();
1011
+ ExternalReference roots_address =
1012
+ ExternalReference::roots_address(masm->isolate());
1010
1013
  __ mov(r10, Operand(roots_address));
1011
1014
 
1012
1015
  // Push the function and the receiver onto the stack.
@@ -1042,7 +1045,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
1042
1045
  // Invoke the code and pass argc as r0.
1043
1046
  __ mov(r0, Operand(r3));
1044
1047
  if (is_construct) {
1045
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
1048
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
1046
1049
  RelocInfo::CODE_TARGET);
1047
1050
  } else {
1048
1051
  ParameterCount actual(r0);
@@ -1170,9 +1173,11 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
1170
1173
 
1171
1174
 
1172
1175
  void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1173
- // Probe the CPU to set the supported features, because this builtin
1174
- // may be called before the initialization performs CPU setup.
1175
- CpuFeatures::Probe(false);
1176
+ CpuFeatures::TryForceFeatureScope scope(VFP3);
1177
+ if (!CpuFeatures::IsSupported(VFP3)) {
1178
+ __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
1179
+ return;
1180
+ }
1176
1181
 
1177
1182
  // Lookup the function in the JavaScript frame and push it as an
1178
1183
  // argument to the on-stack replacement function.
@@ -1335,8 +1340,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1335
1340
  // Expected number of arguments is 0 for CALL_NON_FUNCTION.
1336
1341
  __ mov(r2, Operand(0, RelocInfo::NONE));
1337
1342
  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
1338
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
1339
- RelocInfo::CODE_TARGET);
1343
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1344
+ RelocInfo::CODE_TARGET);
1340
1345
  __ bind(&function);
1341
1346
  }
1342
1347
 
@@ -1351,8 +1356,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1351
1356
  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
1352
1357
  __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1353
1358
  __ cmp(r2, r0); // Check formal and actual parameter counts.
1354
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
1355
- RelocInfo::CODE_TARGET, ne);
1359
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1360
+ RelocInfo::CODE_TARGET,
1361
+ ne);
1356
1362
 
1357
1363
  ParameterCount expected(0);
1358
1364
  __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
@@ -91,11 +91,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
91
91
  &gc,
92
92
  TAG_OBJECT);
93
93
 
94
+ int map_index = strict_mode_ == kStrictMode
95
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
96
+ : Context::FUNCTION_MAP_INDEX;
97
+
94
98
  // Compute the function map in the current global context and set that
95
99
  // as the map of the allocated object.
96
100
  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
97
101
  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
98
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
102
+ __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
99
103
  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
100
104
 
101
105
  // Initialize the rest of the function. We don't have to update the
@@ -397,17 +401,88 @@ class FloatingPointHelper : public AllStatic {
397
401
  Register scratch2,
398
402
  Label* not_number);
399
403
 
400
- // Loads the number from object into dst as a 32-bit integer if possible. If
401
- // the object is not a 32-bit integer control continues at the label
402
- // not_int32. If VFP is supported double_scratch is used but not scratch2.
403
- static void LoadNumberAsInteger(MacroAssembler* masm,
404
- Register object,
405
- Register dst,
406
- Register heap_number_map,
407
- Register scratch1,
408
- Register scratch2,
409
- DwVfpRegister double_scratch,
410
- Label* not_int32);
404
+ // Convert the smi or heap number in object to an int32 using the rules
405
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
406
+ // and brought into the range -2^31 .. +2^31 - 1.
407
+ static void ConvertNumberToInt32(MacroAssembler* masm,
408
+ Register object,
409
+ Register dst,
410
+ Register heap_number_map,
411
+ Register scratch1,
412
+ Register scratch2,
413
+ Register scratch3,
414
+ DwVfpRegister double_scratch,
415
+ Label* not_int32);
416
+
417
+ // Load the number from object into double_dst in the double format.
418
+ // Control will jump to not_int32 if the value cannot be exactly represented
419
+ // by a 32-bit integer.
420
+ // Floating point value in the 32-bit integer range that are not exact integer
421
+ // won't be loaded.
422
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
423
+ Register object,
424
+ Destination destination,
425
+ DwVfpRegister double_dst,
426
+ Register dst1,
427
+ Register dst2,
428
+ Register heap_number_map,
429
+ Register scratch1,
430
+ Register scratch2,
431
+ SwVfpRegister single_scratch,
432
+ Label* not_int32);
433
+
434
+ // Loads the number from object into dst as a 32-bit integer.
435
+ // Control will jump to not_int32 if the object cannot be exactly represented
436
+ // by a 32-bit integer.
437
+ // Floating point value in the 32-bit integer range that are not exact integer
438
+ // won't be converted.
439
+ // scratch3 is not used when VFP3 is supported.
440
+ static void LoadNumberAsInt32(MacroAssembler* masm,
441
+ Register object,
442
+ Register dst,
443
+ Register heap_number_map,
444
+ Register scratch1,
445
+ Register scratch2,
446
+ Register scratch3,
447
+ DwVfpRegister double_scratch,
448
+ Label* not_int32);
449
+
450
+ // Generate non VFP3 code to check if a double can be exactly represented by a
451
+ // 32-bit integer. This does not check for 0 or -0, which need
452
+ // to be checked for separately.
453
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
454
+ // through otherwise.
455
+ // src1 and src2 will be cloberred.
456
+ //
457
+ // Expected input:
458
+ // - src1: higher (exponent) part of the double value.
459
+ // - src2: lower (mantissa) part of the double value.
460
+ // Output status:
461
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
462
+ // - src2: contains 1.
463
+ // - other registers are clobbered.
464
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
465
+ Register src1,
466
+ Register src2,
467
+ Register dst,
468
+ Register scratch,
469
+ Label* not_int32);
470
+
471
+ // Generates code to call a C function to do a double operation using core
472
+ // registers. (Used when VFP3 is not supported.)
473
+ // This code never falls through, but returns with a heap number containing
474
+ // the result in r0.
475
+ // Register heapnumber_result must be a heap number in which the
476
+ // result of the operation will be stored.
477
+ // Requires the following layout on entry:
478
+ // r0: Left value (least significant part of mantissa).
479
+ // r1: Left value (sign, exponent, top of mantissa).
480
+ // r2: Right value (least significant part of mantissa).
481
+ // r3: Right value (sign, exponent, top of mantissa).
482
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
483
+ Token::Value op,
484
+ Register heap_number_result,
485
+ Register scratch);
411
486
 
412
487
  private:
413
488
  static void LoadNumber(MacroAssembler* masm,
@@ -495,7 +570,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
495
570
  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
496
571
 
497
572
  // Handle loading a double from a heap number.
498
- if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
573
+ if (CpuFeatures::IsSupported(VFP3) &&
574
+ destination == kVFPRegisters) {
499
575
  CpuFeatures::Scope scope(VFP3);
500
576
  // Load the double from tagged HeapNumber to double register.
501
577
  __ sub(scratch1, object, Operand(kHeapObjectTag));
@@ -533,33 +609,365 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
533
609
  }
534
610
 
535
611
 
536
- void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
537
- Register object,
538
- Register dst,
539
- Register heap_number_map,
540
- Register scratch1,
541
- Register scratch2,
542
- DwVfpRegister double_scratch,
543
- Label* not_int32) {
612
+ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
613
+ Register object,
614
+ Register dst,
615
+ Register heap_number_map,
616
+ Register scratch1,
617
+ Register scratch2,
618
+ Register scratch3,
619
+ DwVfpRegister double_scratch,
620
+ Label* not_number) {
544
621
  if (FLAG_debug_code) {
545
622
  __ AbortIfNotRootValue(heap_number_map,
546
623
  Heap::kHeapNumberMapRootIndex,
547
624
  "HeapNumberMap register clobbered.");
548
625
  }
549
- Label is_smi, done;
626
+ Label is_smi;
627
+ Label done;
628
+ Label not_in_int32_range;
629
+
550
630
  __ JumpIfSmi(object, &is_smi);
551
631
  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
552
632
  __ cmp(scratch1, heap_number_map);
553
- __ b(ne, not_int32);
554
- __ ConvertToInt32(
555
- object, dst, scratch1, scratch2, double_scratch, not_int32);
633
+ __ b(ne, not_number);
634
+ __ ConvertToInt32(object,
635
+ dst,
636
+ scratch1,
637
+ scratch2,
638
+ double_scratch,
639
+ &not_in_int32_range);
556
640
  __ jmp(&done);
641
+
642
+ __ bind(&not_in_int32_range);
643
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
644
+ __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
645
+
646
+ __ EmitOutOfInt32RangeTruncate(dst,
647
+ scratch1,
648
+ scratch2,
649
+ scratch3);
650
+ __ jmp(&done);
651
+
557
652
  __ bind(&is_smi);
558
653
  __ SmiUntag(dst, object);
559
654
  __ bind(&done);
560
655
  }
561
656
 
562
657
 
658
+ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
659
+ Register object,
660
+ Destination destination,
661
+ DwVfpRegister double_dst,
662
+ Register dst1,
663
+ Register dst2,
664
+ Register heap_number_map,
665
+ Register scratch1,
666
+ Register scratch2,
667
+ SwVfpRegister single_scratch,
668
+ Label* not_int32) {
669
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
670
+ ASSERT(!scratch1.is(scratch2));
671
+ ASSERT(!heap_number_map.is(object) &&
672
+ !heap_number_map.is(scratch1) &&
673
+ !heap_number_map.is(scratch2));
674
+
675
+ Label done, obj_is_not_smi;
676
+
677
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
678
+ __ SmiUntag(scratch1, object);
679
+ if (CpuFeatures::IsSupported(VFP3)) {
680
+ CpuFeatures::Scope scope(VFP3);
681
+ __ vmov(single_scratch, scratch1);
682
+ __ vcvt_f64_s32(double_dst, single_scratch);
683
+ if (destination == kCoreRegisters) {
684
+ __ vmov(dst1, dst2, double_dst);
685
+ }
686
+ } else {
687
+ Label fewer_than_20_useful_bits;
688
+ // Expected output:
689
+ // | dst1 | dst2 |
690
+ // | s | exp | mantissa |
691
+
692
+ // Check for zero.
693
+ __ cmp(scratch1, Operand(0));
694
+ __ mov(dst1, scratch1);
695
+ __ mov(dst2, scratch1);
696
+ __ b(eq, &done);
697
+
698
+ // Preload the sign of the value.
699
+ __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
700
+ // Get the absolute value of the object (as an unsigned integer).
701
+ __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
702
+
703
+ // Get mantisssa[51:20].
704
+
705
+ // Get the position of the first set bit.
706
+ __ CountLeadingZeros(dst2, scratch1, scratch2);
707
+ __ rsb(dst2, dst2, Operand(31));
708
+
709
+ // Set the exponent.
710
+ __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
711
+ __ Bfi(dst1, scratch2, scratch2,
712
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
713
+
714
+ // Clear the first non null bit.
715
+ __ mov(scratch2, Operand(1));
716
+ __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
717
+
718
+ __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
719
+ // Get the number of bits to set in the lower part of the mantissa.
720
+ __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
721
+ __ b(mi, &fewer_than_20_useful_bits);
722
+ // Set the higher 20 bits of the mantissa.
723
+ __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
724
+ __ rsb(scratch2, scratch2, Operand(32));
725
+ __ mov(dst2, Operand(scratch1, LSL, scratch2));
726
+ __ b(&done);
727
+
728
+ __ bind(&fewer_than_20_useful_bits);
729
+ __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
730
+ __ mov(scratch2, Operand(scratch1, LSL, scratch2));
731
+ __ orr(dst1, dst1, scratch2);
732
+ // Set dst2 to 0.
733
+ __ mov(dst2, Operand(0));
734
+ }
735
+
736
+ __ b(&done);
737
+
738
+ __ bind(&obj_is_not_smi);
739
+ if (FLAG_debug_code) {
740
+ __ AbortIfNotRootValue(heap_number_map,
741
+ Heap::kHeapNumberMapRootIndex,
742
+ "HeapNumberMap register clobbered.");
743
+ }
744
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
745
+
746
+ // Load the number.
747
+ if (CpuFeatures::IsSupported(VFP3)) {
748
+ CpuFeatures::Scope scope(VFP3);
749
+ // Load the double value.
750
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
751
+ __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
752
+
753
+ __ EmitVFPTruncate(kRoundToZero,
754
+ single_scratch,
755
+ double_dst,
756
+ scratch1,
757
+ scratch2,
758
+ kCheckForInexactConversion);
759
+
760
+ // Jump to not_int32 if the operation did not succeed.
761
+ __ b(ne, not_int32);
762
+
763
+ if (destination == kCoreRegisters) {
764
+ __ vmov(dst1, dst2, double_dst);
765
+ }
766
+
767
+ } else {
768
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
769
+ // Load the double value in the destination registers..
770
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
771
+
772
+ // Check for 0 and -0.
773
+ __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
774
+ __ orr(scratch1, scratch1, Operand(dst2));
775
+ __ cmp(scratch1, Operand(0));
776
+ __ b(eq, &done);
777
+
778
+ // Check that the value can be exactly represented by a 32-bit integer.
779
+ // Jump to not_int32 if that's not the case.
780
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
781
+
782
+ // dst1 and dst2 were trashed. Reload the double value.
783
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
784
+ }
785
+
786
+ __ bind(&done);
787
+ }
788
+
789
+
790
+ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
791
+ Register object,
792
+ Register dst,
793
+ Register heap_number_map,
794
+ Register scratch1,
795
+ Register scratch2,
796
+ Register scratch3,
797
+ DwVfpRegister double_scratch,
798
+ Label* not_int32) {
799
+ ASSERT(!dst.is(object));
800
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
801
+ ASSERT(!scratch1.is(scratch2) &&
802
+ !scratch1.is(scratch3) &&
803
+ !scratch2.is(scratch3));
804
+
805
+ Label done;
806
+
807
+ // Untag the object into the destination register.
808
+ __ SmiUntag(dst, object);
809
+ // Just return if the object is a smi.
810
+ __ JumpIfSmi(object, &done);
811
+
812
+ if (FLAG_debug_code) {
813
+ __ AbortIfNotRootValue(heap_number_map,
814
+ Heap::kHeapNumberMapRootIndex,
815
+ "HeapNumberMap register clobbered.");
816
+ }
817
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
818
+
819
+ // Object is a heap number.
820
+ // Convert the floating point value to a 32-bit integer.
821
+ if (CpuFeatures::IsSupported(VFP3)) {
822
+ CpuFeatures::Scope scope(VFP3);
823
+ SwVfpRegister single_scratch = double_scratch.low();
824
+ // Load the double value.
825
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
826
+ __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
827
+
828
+ __ EmitVFPTruncate(kRoundToZero,
829
+ single_scratch,
830
+ double_scratch,
831
+ scratch1,
832
+ scratch2,
833
+ kCheckForInexactConversion);
834
+
835
+ // Jump to not_int32 if the operation did not succeed.
836
+ __ b(ne, not_int32);
837
+ // Get the result in the destination register.
838
+ __ vmov(dst, single_scratch);
839
+
840
+ } else {
841
+ // Load the double value in the destination registers.
842
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
843
+ __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
844
+
845
+ // Check for 0 and -0.
846
+ __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
847
+ __ orr(dst, scratch2, Operand(dst));
848
+ __ cmp(dst, Operand(0));
849
+ __ b(eq, &done);
850
+
851
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
852
+
853
+ // Registers state after DoubleIs32BitInteger.
854
+ // dst: mantissa[51:20].
855
+ // scratch2: 1
856
+
857
+ // Shift back the higher bits of the mantissa.
858
+ __ mov(dst, Operand(dst, LSR, scratch3));
859
+ // Set the implicit first bit.
860
+ __ rsb(scratch3, scratch3, Operand(32));
861
+ __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
862
+ // Set the sign.
863
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
864
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
865
+ __ rsb(dst, dst, Operand(0), LeaveCC, mi);
866
+ }
867
+
868
+ __ bind(&done);
869
+ }
870
+
871
+
872
+ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
873
+ Register src1,
874
+ Register src2,
875
+ Register dst,
876
+ Register scratch,
877
+ Label* not_int32) {
878
+ // Get exponent alone in scratch.
879
+ __ Ubfx(scratch,
880
+ src1,
881
+ HeapNumber::kExponentShift,
882
+ HeapNumber::kExponentBits);
883
+
884
+ // Substract the bias from the exponent.
885
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
886
+
887
+ // src1: higher (exponent) part of the double value.
888
+ // src2: lower (mantissa) part of the double value.
889
+ // scratch: unbiased exponent.
890
+
891
+ // Fast cases. Check for obvious non 32-bit integer values.
892
+ // Negative exponent cannot yield 32-bit integers.
893
+ __ b(mi, not_int32);
894
+ // Exponent greater than 31 cannot yield 32-bit integers.
895
+ // Also, a positive value with an exponent equal to 31 is outside of the
896
+ // signed 32-bit integer range.
897
+ // Another way to put it is that if (exponent - signbit) > 30 then the
898
+ // number cannot be represented as an int32.
899
+ Register tmp = dst;
900
+ __ sub(tmp, scratch, Operand(src1, LSR, 31));
901
+ __ cmp(tmp, Operand(30));
902
+ __ b(gt, not_int32);
903
+ // - Bits [21:0] in the mantissa are not null.
904
+ __ tst(src2, Operand(0x3fffff));
905
+ __ b(ne, not_int32);
906
+
907
+ // Otherwise the exponent needs to be big enough to shift left all the
908
+ // non zero bits left. So we need the (30 - exponent) last bits of the
909
+ // 31 higher bits of the mantissa to be null.
910
+ // Because bits [21:0] are null, we can check instead that the
911
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
912
+
913
+ // Get the 32 higher bits of the mantissa in dst.
914
+ __ Ubfx(dst,
915
+ src2,
916
+ HeapNumber::kMantissaBitsInTopWord,
917
+ 32 - HeapNumber::kMantissaBitsInTopWord);
918
+ __ orr(dst,
919
+ dst,
920
+ Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
921
+
922
+ // Create the mask and test the lower bits (of the higher bits).
923
+ __ rsb(scratch, scratch, Operand(32));
924
+ __ mov(src2, Operand(1));
925
+ __ mov(src1, Operand(src2, LSL, scratch));
926
+ __ sub(src1, src1, Operand(1));
927
+ __ tst(dst, src1);
928
+ __ b(ne, not_int32);
929
+ }
930
+
931
+
932
+ void FloatingPointHelper::CallCCodeForDoubleOperation(
933
+ MacroAssembler* masm,
934
+ Token::Value op,
935
+ Register heap_number_result,
936
+ Register scratch) {
937
+ // Using core registers:
938
+ // r0: Left value (least significant part of mantissa).
939
+ // r1: Left value (sign, exponent, top of mantissa).
940
+ // r2: Right value (least significant part of mantissa).
941
+ // r3: Right value (sign, exponent, top of mantissa).
942
+
943
+ // Assert that heap_number_result is callee-saved.
944
+ // We currently always use r5 to pass it.
945
+ ASSERT(heap_number_result.is(r5));
946
+
947
+ // Push the current return address before the C call. Return will be
948
+ // through pop(pc) below.
949
+ __ push(lr);
950
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
951
+ // Call C routine that may not cause GC or other trouble.
952
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
953
+ 4);
954
+ // Store answer in the overwritable heap number.
955
+ #if !defined(USE_ARM_EABI)
956
+ // Double returned in fp coprocessor register 0 and 1, encoded as
957
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
958
+ // need to substract the tag from heap_number_result.
959
+ __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
960
+ __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
961
+ #else
962
+ // Double returned in registers 0 and 1.
963
+ __ Strd(r0, r1, FieldMemOperand(heap_number_result,
964
+ HeapNumber::kValueOffset));
965
+ #endif
966
+ // Place heap_number_result in r0 and return to the pushed return address.
967
+ __ mov(r0, Operand(heap_number_result));
968
+ __ pop(pc);
969
+ }
970
+
563
971
 
564
972
  // See comment for class.
565
973
  void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
@@ -622,7 +1030,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
622
1030
  // The two objects are identical. If we know that one of them isn't NaN then
623
1031
  // we now know they test equal.
624
1032
  if (cond != eq || !never_nan_nan) {
625
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
1033
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
626
1034
  // so we do the second best thing - test it ourselves.
627
1035
  // They are both equal and they are not both Smis so both of them are not
628
1036
  // Smis. If it's not a heap number, then return equal.
@@ -897,7 +1305,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
897
1305
  // Call C routine that may not cause GC or other trouble.
898
1306
  __ push(lr);
899
1307
  __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
900
- __ CallCFunction(ExternalReference::compare_doubles(), 4);
1308
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
901
1309
  __ pop(pc); // Return.
902
1310
  }
903
1311
  }
@@ -1050,6 +1458,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1050
1458
  // number string cache for smis is just the smi value, and the hash for
1051
1459
  // doubles is the xor of the upper and lower words. See
1052
1460
  // Heap::GetNumberStringCache.
1461
+ Isolate* isolate = masm->isolate();
1053
1462
  Label is_smi;
1054
1463
  Label load_result_from_cache;
1055
1464
  if (!object_is_smi) {
@@ -1111,7 +1520,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1111
1520
  __ bind(&load_result_from_cache);
1112
1521
  __ ldr(result,
1113
1522
  FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1114
- __ IncrementCounter(&Counters::number_to_string_native,
1523
+ __ IncrementCounter(isolate->counters()->number_to_string_native(),
1115
1524
  1,
1116
1525
  scratch1,
1117
1526
  scratch2);
@@ -1187,6 +1596,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
1187
1596
  __ bind(&both_loaded_as_doubles);
1188
1597
  // The arguments have been converted to doubles and stored in d6 and d7, if
1189
1598
  // VFP3 is supported, or in r0, r1, r2, and r3.
1599
+ Isolate* isolate = masm->isolate();
1190
1600
  if (CpuFeatures::IsSupported(VFP3)) {
1191
1601
  __ bind(&lhs_not_nan);
1192
1602
  CpuFeatures::Scope scope(VFP3);
@@ -1257,7 +1667,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
1257
1667
 
1258
1668
  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1259
1669
 
1260
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
1670
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1261
1671
  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1262
1672
  lhs_,
1263
1673
  rhs_,
@@ -1296,6 +1706,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
1296
1706
  // This stub does not handle the inlined cases (Smis, Booleans, undefined).
1297
1707
  // The stub returns zero for false, and a non-zero value for true.
1298
1708
  void ToBooleanStub::Generate(MacroAssembler* masm) {
1709
+ // This stub uses VFP3 instructions.
1710
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
1711
+
1299
1712
  Label false_result;
1300
1713
  Label not_heap_number;
1301
1714
  Register scratch = r9.is(tos_) ? r7 : r9;
@@ -1380,7 +1793,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
1380
1793
  Register rhs,
1381
1794
  const Builtins::JavaScript& builtin) {
1382
1795
  Label slow, slow_reverse, do_the_call;
1383
- bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
1796
+ bool use_fp_registers =
1797
+ CpuFeatures::IsSupported(VFP3) &&
1798
+ Token::MOD != op_;
1384
1799
 
1385
1800
  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
1386
1801
  Register heap_number_map = r6;
@@ -1606,7 +2021,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
1606
2021
  __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
1607
2022
  // Call C routine that may not cause GC or other trouble. r5 is callee
1608
2023
  // save.
1609
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2024
+ __ CallCFunction(
2025
+ ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
1610
2026
  // Store answer in the overwritable heap number.
1611
2027
  #if !defined(USE_ARM_EABI)
1612
2028
  // Double returned in fp coprocessor register 0 and 1, encoded as
@@ -2410,7 +2826,7 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2410
2826
  __ Push(r2, r1, r0);
2411
2827
 
2412
2828
  __ TailCallExternalReference(
2413
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
2829
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
2414
2830
  5,
2415
2831
  1);
2416
2832
  }
@@ -2441,7 +2857,8 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2441
2857
  __ Push(r2, r1, r0);
2442
2858
 
2443
2859
  __ TailCallExternalReference(
2444
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
2860
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
2861
+ masm->isolate()),
2445
2862
  5,
2446
2863
  1);
2447
2864
  }
@@ -2467,6 +2884,9 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
2467
2884
  case TRBinaryOpIC::HEAP_NUMBER:
2468
2885
  GenerateHeapNumberStub(masm);
2469
2886
  break;
2887
+ case TRBinaryOpIC::ODDBALL:
2888
+ GenerateOddballStub(masm);
2889
+ break;
2470
2890
  case TRBinaryOpIC::STRING:
2471
2891
  GenerateStringStub(masm);
2472
2892
  break;
@@ -2482,7 +2902,8 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
2482
2902
  const char* TypeRecordingBinaryOpStub::GetName() {
2483
2903
  if (name_ != NULL) return name_;
2484
2904
  const int kMaxNameLength = 100;
2485
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
2905
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
2906
+ kMaxNameLength);
2486
2907
  if (name_ == NULL) return "OOM";
2487
2908
  const char* op_name = Token::Name(op_);
2488
2909
  const char* overwrite_name;
@@ -2636,6 +3057,7 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2636
3057
  Register right = r0;
2637
3058
  Register scratch1 = r7;
2638
3059
  Register scratch2 = r9;
3060
+ Register scratch3 = r4;
2639
3061
 
2640
3062
  ASSERT(smi_operands || (not_numbers != NULL));
2641
3063
  if (smi_operands && FLAG_debug_code) {
@@ -2655,7 +3077,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2655
3077
  // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2656
3078
  // depending on whether VFP3 is available or not.
2657
3079
  FloatingPointHelper::Destination destination =
2658
- CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
3080
+ CpuFeatures::IsSupported(VFP3) &&
3081
+ op_ != Token::MOD ?
2659
3082
  FloatingPointHelper::kVFPRegisters :
2660
3083
  FloatingPointHelper::kCoreRegisters;
2661
3084
 
@@ -2704,33 +3127,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2704
3127
  __ add(r0, r0, Operand(kHeapObjectTag));
2705
3128
  __ Ret();
2706
3129
  } else {
2707
- // Using core registers:
2708
- // r0: Left value (least significant part of mantissa).
2709
- // r1: Left value (sign, exponent, top of mantissa).
2710
- // r2: Right value (least significant part of mantissa).
2711
- // r3: Right value (sign, exponent, top of mantissa).
2712
-
2713
- // Push the current return address before the C call. Return will be
2714
- // through pop(pc) below.
2715
- __ push(lr);
2716
- __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2717
- // Call C routine that may not cause GC or other trouble. r5 is callee
2718
- // save.
2719
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2720
- // Store answer in the overwritable heap number.
2721
- #if !defined(USE_ARM_EABI)
2722
- // Double returned in fp coprocessor register 0 and 1, encoded as
2723
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
2724
- // need to substract the tag from r5.
2725
- __ sub(scratch1, result, Operand(kHeapObjectTag));
2726
- __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2727
- #else
2728
- // Double returned in registers 0 and 1.
2729
- __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2730
- #endif
2731
- // Plase result in r0 and return to the pushed return address.
2732
- __ mov(r0, Operand(result));
2733
- __ pop(pc);
3130
+ // Call the C function to handle the double operation.
3131
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm,
3132
+ op_,
3133
+ result,
3134
+ scratch1);
2734
3135
  }
2735
3136
  break;
2736
3137
  }
@@ -2745,22 +3146,24 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2745
3146
  __ SmiUntag(r2, right);
2746
3147
  } else {
2747
3148
  // Convert operands to 32-bit integers. Right in r2 and left in r3.
2748
- FloatingPointHelper::LoadNumberAsInteger(masm,
2749
- left,
2750
- r3,
2751
- heap_number_map,
2752
- scratch1,
2753
- scratch2,
2754
- d0,
2755
- not_numbers);
2756
- FloatingPointHelper::LoadNumberAsInteger(masm,
2757
- right,
2758
- r2,
2759
- heap_number_map,
2760
- scratch1,
2761
- scratch2,
2762
- d0,
2763
- not_numbers);
3149
+ FloatingPointHelper::ConvertNumberToInt32(masm,
3150
+ left,
3151
+ r3,
3152
+ heap_number_map,
3153
+ scratch1,
3154
+ scratch2,
3155
+ scratch3,
3156
+ d0,
3157
+ not_numbers);
3158
+ FloatingPointHelper::ConvertNumberToInt32(masm,
3159
+ right,
3160
+ r2,
3161
+ heap_number_map,
3162
+ scratch1,
3163
+ scratch2,
3164
+ scratch3,
3165
+ d0,
3166
+ not_numbers);
2764
3167
  }
2765
3168
 
2766
3169
  Label result_not_a_smi;
@@ -2776,7 +3179,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2776
3179
  break;
2777
3180
  case Token::SAR:
2778
3181
  // Use only the 5 least significant bits of the shift count.
2779
- __ and_(r2, r2, Operand(0x1f));
2780
3182
  __ GetLeastBitsFromInt32(r2, r2, 5);
2781
3183
  __ mov(r2, Operand(r3, ASR, r2));
2782
3184
  break;
@@ -2921,18 +3323,339 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2921
3323
  void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2922
3324
  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2923
3325
 
2924
- GenerateTypeTransition(masm);
3326
+ Register left = r1;
3327
+ Register right = r0;
3328
+ Register scratch1 = r7;
3329
+ Register scratch2 = r9;
3330
+ DwVfpRegister double_scratch = d0;
3331
+ SwVfpRegister single_scratch = s3;
3332
+
3333
+ Register heap_number_result = no_reg;
3334
+ Register heap_number_map = r6;
3335
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3336
+
3337
+ Label call_runtime;
3338
+ // Labels for type transition, used for wrong input or output types.
3339
+ // Both label are currently actually bound to the same position. We use two
3340
+ // different label to differentiate the cause leading to type transition.
3341
+ Label transition;
3342
+
3343
+ // Smi-smi fast case.
3344
+ Label skip;
3345
+ __ orr(scratch1, left, right);
3346
+ __ JumpIfNotSmi(scratch1, &skip);
3347
+ GenerateSmiSmiOperation(masm);
3348
+ // Fall through if the result is not a smi.
3349
+ __ bind(&skip);
3350
+
3351
+ switch (op_) {
3352
+ case Token::ADD:
3353
+ case Token::SUB:
3354
+ case Token::MUL:
3355
+ case Token::DIV:
3356
+ case Token::MOD: {
3357
+ // Load both operands and check that they are 32-bit integer.
3358
+ // Jump to type transition if they are not. The registers r0 and r1 (right
3359
+ // and left) are preserved for the runtime call.
3360
+ FloatingPointHelper::Destination destination =
3361
+ CpuFeatures::IsSupported(VFP3) &&
3362
+ op_ != Token::MOD ?
3363
+ FloatingPointHelper::kVFPRegisters :
3364
+ FloatingPointHelper::kCoreRegisters;
3365
+
3366
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
3367
+ right,
3368
+ destination,
3369
+ d7,
3370
+ r2,
3371
+ r3,
3372
+ heap_number_map,
3373
+ scratch1,
3374
+ scratch2,
3375
+ s0,
3376
+ &transition);
3377
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
3378
+ left,
3379
+ destination,
3380
+ d6,
3381
+ r4,
3382
+ r5,
3383
+ heap_number_map,
3384
+ scratch1,
3385
+ scratch2,
3386
+ s0,
3387
+ &transition);
3388
+
3389
+ if (destination == FloatingPointHelper::kVFPRegisters) {
3390
+ CpuFeatures::Scope scope(VFP3);
3391
+ Label return_heap_number;
3392
+ switch (op_) {
3393
+ case Token::ADD:
3394
+ __ vadd(d5, d6, d7);
3395
+ break;
3396
+ case Token::SUB:
3397
+ __ vsub(d5, d6, d7);
3398
+ break;
3399
+ case Token::MUL:
3400
+ __ vmul(d5, d6, d7);
3401
+ break;
3402
+ case Token::DIV:
3403
+ __ vdiv(d5, d6, d7);
3404
+ break;
3405
+ default:
3406
+ UNREACHABLE();
3407
+ }
3408
+
3409
+ if (op_ != Token::DIV) {
3410
+ // These operations produce an integer result.
3411
+ // Try to return a smi if we can.
3412
+ // Otherwise return a heap number if allowed, or jump to type
3413
+ // transition.
3414
+
3415
+ __ EmitVFPTruncate(kRoundToZero,
3416
+ single_scratch,
3417
+ d5,
3418
+ scratch1,
3419
+ scratch2);
3420
+
3421
+ if (result_type_ <= TRBinaryOpIC::INT32) {
3422
+ // If the ne condition is set, result does
3423
+ // not fit in a 32-bit integer.
3424
+ __ b(ne, &transition);
3425
+ }
3426
+
3427
+ // Check if the result fits in a smi.
3428
+ __ vmov(scratch1, single_scratch);
3429
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
3430
+ // If not try to return a heap number.
3431
+ __ b(mi, &return_heap_number);
3432
+ // Check for minus zero. Return heap number for minus zero.
3433
+ Label not_zero;
3434
+ __ cmp(scratch1, Operand(0));
3435
+ __ b(ne, &not_zero);
3436
+ __ vmov(scratch2, d5.high());
3437
+ __ tst(scratch2, Operand(HeapNumber::kSignMask));
3438
+ __ b(ne, &return_heap_number);
3439
+ __ bind(&not_zero);
3440
+
3441
+ // Tag the result and return.
3442
+ __ SmiTag(r0, scratch1);
3443
+ __ Ret();
3444
+ } else {
3445
+ // DIV just falls through to allocating a heap number.
3446
+ }
3447
+
3448
+ if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
3449
+ : TRBinaryOpIC::INT32) {
3450
+ __ bind(&return_heap_number);
3451
+ // We are using vfp registers so r5 is available.
3452
+ heap_number_result = r5;
3453
+ GenerateHeapResultAllocation(masm,
3454
+ heap_number_result,
3455
+ heap_number_map,
3456
+ scratch1,
3457
+ scratch2,
3458
+ &call_runtime);
3459
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3460
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
3461
+ __ mov(r0, heap_number_result);
3462
+ __ Ret();
3463
+ }
3464
+
3465
+ // A DIV operation expecting an integer result falls through
3466
+ // to type transition.
3467
+
3468
+ } else {
3469
+ // We preserved r0 and r1 to be able to call runtime.
3470
+ // Save the left value on the stack.
3471
+ __ Push(r5, r4);
3472
+
3473
+ // Allocate a heap number to store the result.
3474
+ heap_number_result = r5;
3475
+ GenerateHeapResultAllocation(masm,
3476
+ heap_number_result,
3477
+ heap_number_map,
3478
+ scratch1,
3479
+ scratch2,
3480
+ &call_runtime);
3481
+
3482
+ // Load the left value from the value saved on the stack.
3483
+ __ Pop(r1, r0);
3484
+
3485
+ // Call the C function to handle the double operation.
3486
+ FloatingPointHelper::CallCCodeForDoubleOperation(
3487
+ masm, op_, heap_number_result, scratch1);
3488
+ }
3489
+
3490
+ break;
3491
+ }
3492
+
3493
+ case Token::BIT_OR:
3494
+ case Token::BIT_XOR:
3495
+ case Token::BIT_AND:
3496
+ case Token::SAR:
3497
+ case Token::SHR:
3498
+ case Token::SHL: {
3499
+ Label return_heap_number;
3500
+ Register scratch3 = r5;
3501
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. The
3502
+ // registers r0 and r1 (right and left) are preserved for the runtime
3503
+ // call.
3504
+ FloatingPointHelper::LoadNumberAsInt32(masm,
3505
+ left,
3506
+ r3,
3507
+ heap_number_map,
3508
+ scratch1,
3509
+ scratch2,
3510
+ scratch3,
3511
+ d0,
3512
+ &transition);
3513
+ FloatingPointHelper::LoadNumberAsInt32(masm,
3514
+ right,
3515
+ r2,
3516
+ heap_number_map,
3517
+ scratch1,
3518
+ scratch2,
3519
+ scratch3,
3520
+ d0,
3521
+ &transition);
3522
+
3523
+ // The ECMA-262 standard specifies that, for shift operations, only the
3524
+ // 5 least significant bits of the shift value should be used.
3525
+ switch (op_) {
3526
+ case Token::BIT_OR:
3527
+ __ orr(r2, r3, Operand(r2));
3528
+ break;
3529
+ case Token::BIT_XOR:
3530
+ __ eor(r2, r3, Operand(r2));
3531
+ break;
3532
+ case Token::BIT_AND:
3533
+ __ and_(r2, r3, Operand(r2));
3534
+ break;
3535
+ case Token::SAR:
3536
+ __ and_(r2, r2, Operand(0x1f));
3537
+ __ mov(r2, Operand(r3, ASR, r2));
3538
+ break;
3539
+ case Token::SHR:
3540
+ __ and_(r2, r2, Operand(0x1f));
3541
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
3542
+ // SHR is special because it is required to produce a positive answer.
3543
+ // We only get a negative result if the shift value (r2) is 0.
3544
+ // This result cannot be respresented as a signed 32-bit integer, try
3545
+ // to return a heap number if we can.
3546
+ // The non vfp3 code does not support this special case, so jump to
3547
+ // runtime if we don't support it.
3548
+ if (CpuFeatures::IsSupported(VFP3)) {
3549
+ __ b(mi,
3550
+ (result_type_ <= TRBinaryOpIC::INT32) ? &transition
3551
+ : &return_heap_number);
3552
+ } else {
3553
+ __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
3554
+ : &call_runtime);
3555
+ }
3556
+ break;
3557
+ case Token::SHL:
3558
+ __ and_(r2, r2, Operand(0x1f));
3559
+ __ mov(r2, Operand(r3, LSL, r2));
3560
+ break;
3561
+ default:
3562
+ UNREACHABLE();
3563
+ }
3564
+
3565
+ // Check if the result fits in a smi.
3566
+ __ add(scratch1, r2, Operand(0x40000000), SetCC);
3567
+ // If not try to return a heap number. (We know the result is an int32.)
3568
+ __ b(mi, &return_heap_number);
3569
+ // Tag the result and return.
3570
+ __ SmiTag(r0, r2);
3571
+ __ Ret();
3572
+
3573
+ __ bind(&return_heap_number);
3574
+ if (CpuFeatures::IsSupported(VFP3)) {
3575
+ CpuFeatures::Scope scope(VFP3);
3576
+ heap_number_result = r5;
3577
+ GenerateHeapResultAllocation(masm,
3578
+ heap_number_result,
3579
+ heap_number_map,
3580
+ scratch1,
3581
+ scratch2,
3582
+ &call_runtime);
3583
+
3584
+ if (op_ != Token::SHR) {
3585
+ // Convert the result to a floating point value.
3586
+ __ vmov(double_scratch.low(), r2);
3587
+ __ vcvt_f64_s32(double_scratch, double_scratch.low());
3588
+ } else {
3589
+ // The result must be interpreted as an unsigned 32-bit integer.
3590
+ __ vmov(double_scratch.low(), r2);
3591
+ __ vcvt_f64_u32(double_scratch, double_scratch.low());
3592
+ }
3593
+
3594
+ // Store the result.
3595
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3596
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3597
+ __ mov(r0, heap_number_result);
3598
+ __ Ret();
3599
+ } else {
3600
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
3601
+ // r3 as scratch. r0 is preserved and returned.
3602
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3603
+ __ TailCallStub(&stub);
3604
+ }
3605
+
3606
+ break;
3607
+ }
3608
+
3609
+ default:
3610
+ UNREACHABLE();
3611
+ }
3612
+
3613
+ if (transition.is_linked()) {
3614
+ __ bind(&transition);
3615
+ GenerateTypeTransition(masm);
3616
+ }
3617
+
3618
+ __ bind(&call_runtime);
3619
+ GenerateCallRuntime(masm);
2925
3620
  }
2926
3621
 
2927
3622
 
2928
- void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2929
- Label not_numbers, call_runtime;
2930
- ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
3623
+ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3624
+ Label call_runtime;
2931
3625
 
2932
- GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
3626
+ if (op_ == Token::ADD) {
3627
+ // Handle string addition here, because it is the only operation
3628
+ // that does not do a ToNumber conversion on the operands.
3629
+ GenerateAddStrings(masm);
3630
+ }
2933
3631
 
2934
- __ bind(&not_numbers);
2935
- GenerateTypeTransition(masm);
3632
+ // Convert oddball arguments to numbers.
3633
+ Label check, done;
3634
+ __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3635
+ __ b(ne, &check);
3636
+ if (Token::IsBitOp(op_)) {
3637
+ __ mov(r1, Operand(Smi::FromInt(0)));
3638
+ } else {
3639
+ __ LoadRoot(r1, Heap::kNanValueRootIndex);
3640
+ }
3641
+ __ jmp(&done);
3642
+ __ bind(&check);
3643
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3644
+ __ b(ne, &done);
3645
+ if (Token::IsBitOp(op_)) {
3646
+ __ mov(r0, Operand(Smi::FromInt(0)));
3647
+ } else {
3648
+ __ LoadRoot(r0, Heap::kNanValueRootIndex);
3649
+ }
3650
+ __ bind(&done);
3651
+
3652
+ GenerateHeapNumberStub(masm);
3653
+ }
3654
+
3655
+
3656
+ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3657
+ Label call_runtime;
3658
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
2936
3659
 
2937
3660
  __ bind(&call_runtime);
2938
3661
  GenerateCallRuntime(masm);
@@ -3069,32 +3792,47 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3069
3792
 
3070
3793
 
3071
3794
  void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3072
- // Argument is a number and is on stack and in r0.
3073
- Label runtime_call;
3795
+ // Untagged case: double input in d2, double result goes
3796
+ // into d2.
3797
+ // Tagged case: tagged input on top of stack and in r0,
3798
+ // tagged result (heap number) goes into r0.
3799
+
3074
3800
  Label input_not_smi;
3075
3801
  Label loaded;
3802
+ Label calculate;
3803
+ Label invalid_cache;
3804
+ const Register scratch0 = r9;
3805
+ const Register scratch1 = r7;
3806
+ const Register cache_entry = r0;
3807
+ const bool tagged = (argument_type_ == TAGGED);
3076
3808
 
3077
3809
  if (CpuFeatures::IsSupported(VFP3)) {
3078
- // Load argument and check if it is a smi.
3079
- __ JumpIfNotSmi(r0, &input_not_smi);
3080
-
3081
3810
  CpuFeatures::Scope scope(VFP3);
3082
- // Input is a smi. Convert to double and load the low and high words
3083
- // of the double into r2, r3.
3084
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3085
- __ b(&loaded);
3086
-
3087
- __ bind(&input_not_smi);
3088
- // Check if input is a HeapNumber.
3089
- __ CheckMap(r0,
3090
- r1,
3091
- Heap::kHeapNumberMapRootIndex,
3092
- &runtime_call,
3093
- true);
3094
- // Input is a HeapNumber. Load it to a double register and store the
3095
- // low and high words into r2, r3.
3096
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
3097
-
3811
+ if (tagged) {
3812
+ // Argument is a number and is on stack and in r0.
3813
+ // Load argument and check if it is a smi.
3814
+ __ JumpIfNotSmi(r0, &input_not_smi);
3815
+
3816
+ // Input is a smi. Convert to double and load the low and high words
3817
+ // of the double into r2, r3.
3818
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3819
+ __ b(&loaded);
3820
+
3821
+ __ bind(&input_not_smi);
3822
+ // Check if input is a HeapNumber.
3823
+ __ CheckMap(r0,
3824
+ r1,
3825
+ Heap::kHeapNumberMapRootIndex,
3826
+ &calculate,
3827
+ true);
3828
+ // Input is a HeapNumber. Load it to a double register and store the
3829
+ // low and high words into r2, r3.
3830
+ __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
3831
+ __ vmov(r2, r3, d0);
3832
+ } else {
3833
+ // Input is untagged double in d2. Output goes to d2.
3834
+ __ vmov(r2, r3, d2);
3835
+ }
3098
3836
  __ bind(&loaded);
3099
3837
  // r2 = low 32 bits of double value
3100
3838
  // r3 = high 32 bits of double value
@@ -3103,24 +3841,28 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3103
3841
  __ eor(r1, r2, Operand(r3));
3104
3842
  __ eor(r1, r1, Operand(r1, ASR, 16));
3105
3843
  __ eor(r1, r1, Operand(r1, ASR, 8));
3106
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
3107
- __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
3844
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3845
+ __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3108
3846
 
3109
3847
  // r2 = low 32 bits of double value.
3110
3848
  // r3 = high 32 bits of double value.
3111
3849
  // r1 = TranscendentalCache::hash(double value).
3112
- __ mov(r0,
3113
- Operand(ExternalReference::transcendental_cache_array_address()));
3114
- // r0 points to cache array.
3115
- __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
3850
+ Isolate* isolate = masm->isolate();
3851
+ ExternalReference cache_array =
3852
+ ExternalReference::transcendental_cache_array_address(isolate);
3853
+ __ mov(cache_entry, Operand(cache_array));
3854
+ // cache_entry points to cache array.
3855
+ int cache_array_index
3856
+ = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3857
+ __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3116
3858
  // r0 points to the cache for the type type_.
3117
3859
  // If NULL, the cache hasn't been initialized yet, so go through runtime.
3118
- __ cmp(r0, Operand(0, RelocInfo::NONE));
3119
- __ b(eq, &runtime_call);
3860
+ __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3861
+ __ b(eq, &invalid_cache);
3120
3862
 
3121
3863
  #ifdef DEBUG
3122
3864
  // Check that the layout of cache elements match expectations.
3123
- { TranscendentalCache::Element test_elem[2];
3865
+ { TranscendentalCache::SubCache::Element test_elem[2];
3124
3866
  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3125
3867
  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3126
3868
  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
@@ -3135,21 +3877,113 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3135
3877
 
3136
3878
  // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3137
3879
  __ add(r1, r1, Operand(r1, LSL, 1));
3138
- __ add(r0, r0, Operand(r1, LSL, 2));
3880
+ __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3139
3881
  // Check if cache matches: Double value is stored in uint32_t[2] array.
3140
- __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
3882
+ __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3141
3883
  __ cmp(r2, r4);
3142
- __ b(ne, &runtime_call);
3884
+ __ b(ne, &calculate);
3143
3885
  __ cmp(r3, r5);
3144
- __ b(ne, &runtime_call);
3145
- // Cache hit. Load result, pop argument and return.
3146
- __ mov(r0, Operand(r6));
3147
- __ pop();
3886
+ __ b(ne, &calculate);
3887
+ // Cache hit. Load result, cleanup and return.
3888
+ if (tagged) {
3889
+ // Pop input value from stack and load result into r0.
3890
+ __ pop();
3891
+ __ mov(r0, Operand(r6));
3892
+ } else {
3893
+ // Load result into d2.
3894
+ __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3895
+ }
3896
+ __ Ret();
3897
+ } // if (CpuFeatures::IsSupported(VFP3))
3898
+
3899
+ __ bind(&calculate);
3900
+ if (tagged) {
3901
+ __ bind(&invalid_cache);
3902
+ ExternalReference runtime_function =
3903
+ ExternalReference(RuntimeFunction(), masm->isolate());
3904
+ __ TailCallExternalReference(runtime_function, 1, 1);
3905
+ } else {
3906
+ if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
3907
+ CpuFeatures::Scope scope(VFP3);
3908
+
3909
+ Label no_update;
3910
+ Label skip_cache;
3911
+ const Register heap_number_map = r5;
3912
+
3913
+ // Call C function to calculate the result and update the cache.
3914
+ // Register r0 holds precalculated cache entry address; preserve
3915
+ // it on the stack and pop it into register cache_entry after the
3916
+ // call.
3917
+ __ push(cache_entry);
3918
+ GenerateCallCFunction(masm, scratch0);
3919
+ __ GetCFunctionDoubleResult(d2);
3920
+
3921
+ // Try to update the cache. If we cannot allocate a
3922
+ // heap number, we return the result without updating.
3923
+ __ pop(cache_entry);
3924
+ __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3925
+ __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3926
+ __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3927
+ __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3928
+ __ Ret();
3929
+
3930
+ __ bind(&invalid_cache);
3931
+ // The cache is invalid. Call runtime which will recreate the
3932
+ // cache.
3933
+ __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3934
+ __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3935
+ __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3936
+ __ EnterInternalFrame();
3937
+ __ push(r0);
3938
+ __ CallRuntime(RuntimeFunction(), 1);
3939
+ __ LeaveInternalFrame();
3940
+ __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3941
+ __ Ret();
3942
+
3943
+ __ bind(&skip_cache);
3944
+ // Call C function to calculate the result and answer directly
3945
+ // without updating the cache.
3946
+ GenerateCallCFunction(masm, scratch0);
3947
+ __ GetCFunctionDoubleResult(d2);
3948
+ __ bind(&no_update);
3949
+
3950
+ // We return the value in d2 without adding it to the cache, but
3951
+ // we cause a scavenging GC so that future allocations will succeed.
3952
+ __ EnterInternalFrame();
3953
+
3954
+ // Allocate an aligned object larger than a HeapNumber.
3955
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3956
+ __ mov(scratch0, Operand(4 * kPointerSize));
3957
+ __ push(scratch0);
3958
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3959
+ __ LeaveInternalFrame();
3148
3960
  __ Ret();
3149
3961
  }
3962
+ }
3963
+
3964
+
3965
+ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3966
+ Register scratch) {
3967
+ Isolate* isolate = masm->isolate();
3150
3968
 
3151
- __ bind(&runtime_call);
3152
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
3969
+ __ push(lr);
3970
+ __ PrepareCallCFunction(2, scratch);
3971
+ __ vmov(r0, r1, d2);
3972
+ switch (type_) {
3973
+ case TranscendentalCache::SIN:
3974
+ __ CallCFunction(ExternalReference::math_sin_double_function(isolate), 2);
3975
+ break;
3976
+ case TranscendentalCache::COS:
3977
+ __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 2);
3978
+ break;
3979
+ case TranscendentalCache::LOG:
3980
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate), 2);
3981
+ break;
3982
+ default:
3983
+ UNIMPLEMENTED();
3984
+ break;
3985
+ }
3986
+ __ pop(lr);
3153
3987
  }
3154
3988
 
3155
3989
 
@@ -3306,6 +4140,113 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
3306
4140
  }
3307
4141
 
3308
4142
 
4143
+ void MathPowStub::Generate(MacroAssembler* masm) {
4144
+ Label call_runtime;
4145
+
4146
+ if (CpuFeatures::IsSupported(VFP3)) {
4147
+ CpuFeatures::Scope scope(VFP3);
4148
+
4149
+ Label base_not_smi;
4150
+ Label exponent_not_smi;
4151
+ Label convert_exponent;
4152
+
4153
+ const Register base = r0;
4154
+ const Register exponent = r1;
4155
+ const Register heapnumbermap = r5;
4156
+ const Register heapnumber = r6;
4157
+ const DoubleRegister double_base = d0;
4158
+ const DoubleRegister double_exponent = d1;
4159
+ const DoubleRegister double_result = d2;
4160
+ const SwVfpRegister single_scratch = s0;
4161
+ const Register scratch = r9;
4162
+ const Register scratch2 = r7;
4163
+
4164
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
4165
+ __ ldr(base, MemOperand(sp, 1 * kPointerSize));
4166
+ __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
4167
+
4168
+ // Convert base to double value and store it in d0.
4169
+ __ JumpIfNotSmi(base, &base_not_smi);
4170
+ // Base is a Smi. Untag and convert it.
4171
+ __ SmiUntag(base);
4172
+ __ vmov(single_scratch, base);
4173
+ __ vcvt_f64_s32(double_base, single_scratch);
4174
+ __ b(&convert_exponent);
4175
+
4176
+ __ bind(&base_not_smi);
4177
+ __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
4178
+ __ cmp(scratch, heapnumbermap);
4179
+ __ b(ne, &call_runtime);
4180
+ // Base is a heapnumber. Load it into double register.
4181
+ __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
4182
+
4183
+ __ bind(&convert_exponent);
4184
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
4185
+ __ SmiUntag(exponent);
4186
+
4187
+ // The base is in a double register and the exponent is
4188
+ // an untagged smi. Allocate a heap number and call a
4189
+ // C function for integer exponents. The register containing
4190
+ // the heap number is callee-saved.
4191
+ __ AllocateHeapNumber(heapnumber,
4192
+ scratch,
4193
+ scratch2,
4194
+ heapnumbermap,
4195
+ &call_runtime);
4196
+ __ push(lr);
4197
+ __ PrepareCallCFunction(3, scratch);
4198
+ __ mov(r2, exponent);
4199
+ __ vmov(r0, r1, double_base);
4200
+ __ CallCFunction(
4201
+ ExternalReference::power_double_int_function(masm->isolate()), 3);
4202
+ __ pop(lr);
4203
+ __ GetCFunctionDoubleResult(double_result);
4204
+ __ vstr(double_result,
4205
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
4206
+ __ mov(r0, heapnumber);
4207
+ __ Ret(2 * kPointerSize);
4208
+
4209
+ __ bind(&exponent_not_smi);
4210
+ __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
4211
+ __ cmp(scratch, heapnumbermap);
4212
+ __ b(ne, &call_runtime);
4213
+ // Exponent is a heapnumber. Load it into double register.
4214
+ __ vldr(double_exponent,
4215
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
4216
+
4217
+ // The base and the exponent are in double registers.
4218
+ // Allocate a heap number and call a C function for
4219
+ // double exponents. The register containing
4220
+ // the heap number is callee-saved.
4221
+ __ AllocateHeapNumber(heapnumber,
4222
+ scratch,
4223
+ scratch2,
4224
+ heapnumbermap,
4225
+ &call_runtime);
4226
+ __ push(lr);
4227
+ __ PrepareCallCFunction(4, scratch);
4228
+ __ vmov(r0, r1, double_base);
4229
+ __ vmov(r2, r3, double_exponent);
4230
+ __ CallCFunction(
4231
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
4232
+ __ pop(lr);
4233
+ __ GetCFunctionDoubleResult(double_result);
4234
+ __ vstr(double_result,
4235
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
4236
+ __ mov(r0, heapnumber);
4237
+ __ Ret(2 * kPointerSize);
4238
+ }
4239
+
4240
+ __ bind(&call_runtime);
4241
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
4242
+ }
4243
+
4244
+
4245
+ bool CEntryStub::NeedsImmovableCode() {
4246
+ return true;
4247
+ }
4248
+
4249
+
3309
4250
  void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3310
4251
  __ Throw(r0);
3311
4252
  }
@@ -3327,15 +4268,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
3327
4268
  // r4: number of arguments including receiver (C callee-saved)
3328
4269
  // r5: pointer to builtin function (C callee-saved)
3329
4270
  // r6: pointer to the first argument (C callee-saved)
4271
+ Isolate* isolate = masm->isolate();
3330
4272
 
3331
4273
  if (do_gc) {
3332
4274
  // Passing r0.
3333
4275
  __ PrepareCallCFunction(1, r1);
3334
- __ CallCFunction(ExternalReference::perform_gc_function(), 1);
4276
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1);
3335
4277
  }
3336
4278
 
3337
4279
  ExternalReference scope_depth =
3338
- ExternalReference::heap_always_allocate_scope_depth();
4280
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
3339
4281
  if (always_allocate) {
3340
4282
  __ mov(r0, Operand(scope_depth));
3341
4283
  __ ldr(r1, MemOperand(r0));
@@ -3364,6 +4306,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
3364
4306
  }
3365
4307
  #endif
3366
4308
 
4309
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
4310
+
4311
+
3367
4312
  // TODO(1242173): To let the GC traverse the return address of the exit
3368
4313
  // frames, we need to know where the return address is. Right now,
3369
4314
  // we store it on the stack to be able to find it again, but we never
@@ -3417,15 +4362,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
3417
4362
  __ b(eq, throw_out_of_memory_exception);
3418
4363
 
3419
4364
  // Retrieve the pending exception and clear the variable.
3420
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
4365
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
3421
4366
  __ ldr(r3, MemOperand(ip));
3422
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
4367
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
4368
+ isolate)));
3423
4369
  __ ldr(r0, MemOperand(ip));
3424
4370
  __ str(r3, MemOperand(ip));
3425
4371
 
3426
4372
  // Special handling of termination exceptions which are uncatchable
3427
4373
  // by javascript code.
3428
- __ cmp(r0, Operand(Factory::termination_exception()));
4374
+ __ cmp(r0, Operand(isolate->factory()->termination_exception()));
3429
4375
  __ b(eq, throw_termination_exception);
3430
4376
 
3431
4377
  // Handle normal exception.
@@ -3533,11 +4479,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3533
4479
  // r2: receiver
3534
4480
  // r3: argc
3535
4481
  // r4: argv
4482
+ Isolate* isolate = masm->isolate();
3536
4483
  __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3537
4484
  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3538
4485
  __ mov(r7, Operand(Smi::FromInt(marker)));
3539
4486
  __ mov(r6, Operand(Smi::FromInt(marker)));
3540
- __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
4487
+ __ mov(r5,
4488
+ Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
3541
4489
  __ ldr(r5, MemOperand(r5));
3542
4490
  __ Push(r8, r7, r6, r5);
3543
4491
 
@@ -3546,7 +4494,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3546
4494
 
3547
4495
  #ifdef ENABLE_LOGGING_AND_PROFILING
3548
4496
  // If this is the outermost JS call, set js_entry_sp value.
3549
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
4497
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
3550
4498
  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
3551
4499
  __ ldr(r6, MemOperand(r5));
3552
4500
  __ cmp(r6, Operand(0, RelocInfo::NONE));
@@ -3560,7 +4508,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3560
4508
  // exception field in the JSEnv and return a failure sentinel.
3561
4509
  // Coming in here the fp will be invalid because the PushTryHandler below
3562
4510
  // sets it to 0 to signal the existence of the JSEntry frame.
3563
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
4511
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
4512
+ isolate)));
3564
4513
  __ str(r0, MemOperand(ip));
3565
4514
  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3566
4515
  __ b(&exit);
@@ -3575,9 +4524,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3575
4524
  // saved values before returning a failure to C.
3576
4525
 
3577
4526
  // Clear any pending exceptions.
3578
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
4527
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
3579
4528
  __ ldr(r5, MemOperand(ip));
3580
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
4529
+ __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address,
4530
+ isolate)));
3581
4531
  __ str(r5, MemOperand(ip));
3582
4532
 
3583
4533
  // Invoke the function by calling through JS entry trampoline builtin.
@@ -3591,10 +4541,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3591
4541
  // r3: argc
3592
4542
  // r4: argv
3593
4543
  if (is_construct) {
3594
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
4544
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4545
+ isolate);
3595
4546
  __ mov(ip, Operand(construct_entry));
3596
4547
  } else {
3597
- ExternalReference entry(Builtins::JSEntryTrampoline);
4548
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
3598
4549
  __ mov(ip, Operand(entry));
3599
4550
  }
3600
4551
  __ ldr(ip, MemOperand(ip)); // deref address
@@ -3610,7 +4561,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3610
4561
  // displacement since the current stack pointer (sp) points directly
3611
4562
  // to the stack handler.
3612
4563
  __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
3613
- __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
4564
+ __ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate)));
3614
4565
  __ str(r3, MemOperand(ip));
3615
4566
  // No need to restore registers
3616
4567
  __ add(sp, sp, Operand(StackHandlerConstants::kSize));
@@ -3628,7 +4579,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3628
4579
  __ bind(&exit); // r0 holds result
3629
4580
  // Restore the top frame descriptors from the stack.
3630
4581
  __ pop(r3);
3631
- __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
4582
+ __ mov(ip,
4583
+ Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate)));
3632
4584
  __ str(r3, MemOperand(ip));
3633
4585
 
3634
4586
  // Reset the stack to the callee saved registers.
@@ -3785,7 +4737,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
3785
4737
  __ b(ne, &slow);
3786
4738
 
3787
4739
  // Null is not instance of anything.
3788
- __ cmp(scratch, Operand(Factory::null_value()));
4740
+ __ cmp(scratch, Operand(FACTORY->null_value()));
3789
4741
  __ b(ne, &object_not_null);
3790
4742
  __ mov(r0, Operand(Smi::FromInt(1)));
3791
4743
  __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -3912,7 +4864,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
3912
4864
  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
3913
4865
  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
3914
4866
  __ bind(&add_arguments_object);
3915
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
4867
+ __ add(r1, r1, Operand(GetArgumentsObjectSize() / kPointerSize));
3916
4868
 
3917
4869
  // Do the allocation of both objects in one go.
3918
4870
  __ AllocateInNewSpace(
@@ -3924,23 +4876,28 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
3924
4876
  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
3925
4877
 
3926
4878
  // Get the arguments boilerplate from the current (global) context.
3927
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
3928
4879
  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3929
4880
  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
3930
- __ ldr(r4, MemOperand(r4, offset));
4881
+ __ ldr(r4, MemOperand(r4,
4882
+ Context::SlotOffset(GetArgumentsBoilerplateIndex())));
3931
4883
 
3932
4884
  // Copy the JS object part.
3933
4885
  __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
3934
4886
 
3935
- // Setup the callee in-object property.
3936
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
3937
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
3938
- __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
4887
+ if (type_ == NEW_NON_STRICT) {
4888
+ // Setup the callee in-object property.
4889
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4890
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4891
+ const int kCalleeOffset = JSObject::kHeaderSize +
4892
+ Heap::kArgumentsCalleeIndex * kPointerSize;
4893
+ __ str(r3, FieldMemOperand(r0, kCalleeOffset));
4894
+ }
3939
4895
 
3940
4896
  // Get the length (smi tagged) and set that as an in-object property too.
3941
- STATIC_ASSERT(Heap::arguments_length_index == 1);
4897
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3942
4898
  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
3943
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
4899
+ __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4900
+ Heap::kArgumentsLengthIndex * kPointerSize));
3944
4901
 
3945
4902
  // If there are no actual arguments, we're done.
3946
4903
  Label done;
@@ -3952,7 +4909,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
3952
4909
 
3953
4910
  // Setup the elements pointer in the allocated arguments object and
3954
4911
  // initialize the header in the elements fixed array.
3955
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
4912
+ __ add(r4, r0, Operand(GetArgumentsObjectSize()));
3956
4913
  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
3957
4914
  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
3958
4915
  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
@@ -4019,10 +4976,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4019
4976
  Register last_match_info_elements = r6;
4020
4977
 
4021
4978
  // Ensure that a RegExp stack is allocated.
4979
+ Isolate* isolate = masm->isolate();
4022
4980
  ExternalReference address_of_regexp_stack_memory_address =
4023
- ExternalReference::address_of_regexp_stack_memory_address();
4981
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
4024
4982
  ExternalReference address_of_regexp_stack_memory_size =
4025
- ExternalReference::address_of_regexp_stack_memory_size();
4983
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
4026
4984
  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4027
4985
  __ ldr(r0, MemOperand(r0, 0));
4028
4986
  __ tst(r0, Operand(r0));
@@ -4163,7 +5121,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4163
5121
  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
4164
5122
  __ b(ne, &runtime);
4165
5123
 
4166
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
5124
+ // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4167
5125
  // r7: code
4168
5126
  // subject: Subject string
4169
5127
  // regexp_data: RegExp data (FixedArray)
@@ -4173,20 +5131,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4173
5131
  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
4174
5132
 
4175
5133
  // r1: previous index
4176
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
5134
+ // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4177
5135
  // r7: code
4178
5136
  // subject: Subject string
4179
5137
  // regexp_data: RegExp data (FixedArray)
4180
5138
  // All checks done. Now push arguments for native regexp code.
4181
- __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
5139
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
4182
5140
 
4183
- static const int kRegExpExecuteArguments = 7;
5141
+ // Isolates: note we add an additional parameter here (isolate pointer).
5142
+ static const int kRegExpExecuteArguments = 8;
4184
5143
  static const int kParameterRegisters = 4;
4185
5144
  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4186
5145
 
4187
5146
  // Stack pointer now points to cell where return address is to be written.
4188
5147
  // Arguments are before that on the stack or in registers.
4189
5148
 
5149
+ // Argument 8 (sp[16]): Pass current isolate address.
5150
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
5151
+ __ str(r0, MemOperand(sp, 4 * kPointerSize));
5152
+
4190
5153
  // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
4191
5154
  __ mov(r0, Operand(1));
4192
5155
  __ str(r0, MemOperand(sp, 3 * kPointerSize));
@@ -4200,7 +5163,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4200
5163
  __ str(r0, MemOperand(sp, 2 * kPointerSize));
4201
5164
 
4202
5165
  // Argument 5 (sp[4]): static offsets vector buffer.
4203
- __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
5166
+ __ mov(r0,
5167
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
4204
5168
  __ str(r0, MemOperand(sp, 1 * kPointerSize));
4205
5169
 
4206
5170
  // For arguments 4 and 3 get string length, calculate start of string data and
@@ -4248,9 +5212,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4248
5212
  // stack overflow (on the backtrack stack) was detected in RegExp code but
4249
5213
  // haven't created the exception yet. Handle that in the runtime system.
4250
5214
  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4251
- __ mov(r1, Operand(ExternalReference::the_hole_value_location()));
5215
+ __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
4252
5216
  __ ldr(r1, MemOperand(r1, 0));
4253
- __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
5217
+ __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
5218
+ isolate)));
4254
5219
  __ ldr(r0, MemOperand(r2, 0));
4255
5220
  __ cmp(r0, r1);
4256
5221
  __ b(eq, &runtime);
@@ -4270,7 +5235,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4270
5235
 
4271
5236
  __ bind(&failure);
4272
5237
  // For failure and exception return null.
4273
- __ mov(r0, Operand(Factory::null_value()));
5238
+ __ mov(r0, Operand(FACTORY->null_value()));
4274
5239
  __ add(sp, sp, Operand(4 * kPointerSize));
4275
5240
  __ Ret();
4276
5241
 
@@ -4303,7 +5268,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4303
5268
 
4304
5269
  // Get the static offsets vector filled by the native regexp code.
4305
5270
  ExternalReference address_of_static_offsets_vector =
4306
- ExternalReference::address_of_static_offsets_vector();
5271
+ ExternalReference::address_of_static_offsets_vector(isolate);
4307
5272
  __ mov(r2, Operand(address_of_static_offsets_vector));
4308
5273
 
4309
5274
  // r1: number of capture registers
@@ -4375,7 +5340,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4375
5340
  // Interleave operations for better latency.
4376
5341
  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
4377
5342
  __ add(r3, r0, Operand(JSRegExpResult::kSize));
4378
- __ mov(r4, Operand(Factory::empty_fixed_array()));
5343
+ __ mov(r4, Operand(FACTORY->empty_fixed_array()));
4379
5344
  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4380
5345
  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
4381
5346
  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -4396,13 +5361,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4396
5361
  // r5: Number of elements in array, untagged.
4397
5362
 
4398
5363
  // Set map.
4399
- __ mov(r2, Operand(Factory::fixed_array_map()));
5364
+ __ mov(r2, Operand(FACTORY->fixed_array_map()));
4400
5365
  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
4401
5366
  // Set FixedArray length.
4402
5367
  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
4403
5368
  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
4404
5369
  // Fill contents of fixed-array with the-hole.
4405
- __ mov(r2, Operand(Factory::the_hole_value()));
5370
+ __ mov(r2, Operand(FACTORY->the_hole_value()));
4406
5371
  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4407
5372
  // Fill fixed array elements with hole.
4408
5373
  // r0: JSArray, tagged.
@@ -4479,7 +5444,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
4479
5444
  __ mov(r0, Operand(argc_)); // Setup the number of arguments.
4480
5445
  __ mov(r2, Operand(0, RelocInfo::NONE));
4481
5446
  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
4482
- __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
5447
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
4483
5448
  RelocInfo::CODE_TARGET);
4484
5449
  }
4485
5450
 
@@ -4492,7 +5457,8 @@ const char* CompareStub::GetName() {
4492
5457
 
4493
5458
  if (name_ != NULL) return name_;
4494
5459
  const int kMaxNameLength = 100;
4495
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
5460
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
5461
+ kMaxNameLength);
4496
5462
  if (name_ == NULL) return "OOM";
4497
5463
 
4498
5464
  const char* cc_name;
@@ -4705,7 +5671,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4705
5671
  __ b(ne, &slow_case_);
4706
5672
 
4707
5673
  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4708
- // At this point code register contains smi tagged ascii char code.
5674
+ // At this point code register contains smi tagged ASCII char code.
4709
5675
  STATIC_ASSERT(kSmiTag == 0);
4710
5676
  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
4711
5677
  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
@@ -5037,7 +6003,6 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5037
6003
  Register symbol_table = c2;
5038
6004
  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5039
6005
 
5040
- // Load undefined value
5041
6006
  Register undefined = scratch4;
5042
6007
  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5043
6008
 
@@ -5058,6 +6023,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5058
6023
  // mask: capacity mask
5059
6024
  // first_symbol_table_element: address of the first element of
5060
6025
  // the symbol table
6026
+ // undefined: the undefined object
5061
6027
  // scratch: -
5062
6028
 
5063
6029
  // Perform a number of probes in the symbol table.
@@ -5085,20 +6051,32 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5085
6051
  kPointerSizeLog2));
5086
6052
 
5087
6053
  // If entry is undefined no string with this hash can be found.
5088
- __ cmp(candidate, undefined);
6054
+ Label is_string;
6055
+ __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
6056
+ __ b(ne, &is_string);
6057
+
6058
+ __ cmp(undefined, candidate);
5089
6059
  __ b(eq, not_found);
6060
+ // Must be null (deleted entry).
6061
+ if (FLAG_debug_code) {
6062
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
6063
+ __ cmp(ip, candidate);
6064
+ __ Assert(eq, "oddball in symbol table is not undefined or null");
6065
+ }
6066
+ __ jmp(&next_probe[i]);
6067
+
6068
+ __ bind(&is_string);
6069
+
6070
+ // Check that the candidate is a non-external ASCII string. The instance
6071
+ // type is still in the scratch register from the CompareObjectType
6072
+ // operation.
6073
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5090
6074
 
5091
6075
  // If length is not 2 the string is not a candidate.
5092
6076
  __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5093
6077
  __ cmp(scratch, Operand(Smi::FromInt(2)));
5094
6078
  __ b(ne, &next_probe[i]);
5095
6079
 
5096
- // Check that the candidate is a non-external ascii string.
5097
- __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
5098
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5099
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
5100
- &next_probe[i]);
5101
-
5102
6080
  // Check if the two characters match.
5103
6081
  // Assumes that word load is little endian.
5104
6082
  __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
@@ -5173,7 +6151,6 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5173
6151
  static const int kFromOffset = 1 * kPointerSize;
5174
6152
  static const int kStringOffset = 2 * kPointerSize;
5175
6153
 
5176
-
5177
6154
  // Check bounds and smi-ness.
5178
6155
  Register to = r6;
5179
6156
  Register from = r7;
@@ -5254,7 +6231,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5254
6231
  // r3: from index (untaged smi)
5255
6232
  // r5: string.
5256
6233
  // r7 (a.k.a. from): from offset (smi)
5257
- // Check for flat ascii string.
6234
+ // Check for flat ASCII string.
5258
6235
  Label non_ascii_flat;
5259
6236
  __ tst(r1, Operand(kStringEncodingMask));
5260
6237
  STATIC_ASSERT(kTwoByteStringTag == 0);
@@ -5274,7 +6251,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5274
6251
  Label make_two_character_string;
5275
6252
  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5276
6253
  masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
5277
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6254
+ Counters* counters = masm->isolate()->counters();
6255
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5278
6256
  __ add(sp, sp, Operand(3 * kPointerSize));
5279
6257
  __ Ret();
5280
6258
 
@@ -5283,7 +6261,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5283
6261
  __ bind(&make_two_character_string);
5284
6262
  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
5285
6263
  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
5286
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6264
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5287
6265
  __ add(sp, sp, Operand(3 * kPointerSize));
5288
6266
  __ Ret();
5289
6267
 
@@ -5309,7 +6287,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5309
6287
  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
5310
6288
  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
5311
6289
  COPY_ASCII | DEST_ALWAYS_ALIGNED);
5312
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6290
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5313
6291
  __ add(sp, sp, Operand(3 * kPointerSize));
5314
6292
  __ Ret();
5315
6293
 
@@ -5341,7 +6319,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5341
6319
  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5342
6320
  StringHelper::GenerateCopyCharactersLong(
5343
6321
  masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
5344
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6322
+ __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
5345
6323
  __ add(sp, sp, Operand(3 * kPointerSize));
5346
6324
  __ Ret();
5347
6325
 
@@ -5413,6 +6391,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5413
6391
  void StringCompareStub::Generate(MacroAssembler* masm) {
5414
6392
  Label runtime;
5415
6393
 
6394
+ Counters* counters = masm->isolate()->counters();
6395
+
5416
6396
  // Stack frame on entry.
5417
6397
  // sp[0]: right string
5418
6398
  // sp[4]: left string
@@ -5424,17 +6404,17 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
5424
6404
  STATIC_ASSERT(EQUAL == 0);
5425
6405
  STATIC_ASSERT(kSmiTag == 0);
5426
6406
  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
5427
- __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
6407
+ __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
5428
6408
  __ add(sp, sp, Operand(2 * kPointerSize));
5429
6409
  __ Ret();
5430
6410
 
5431
6411
  __ bind(&not_same);
5432
6412
 
5433
- // Check that both objects are sequential ascii strings.
6413
+ // Check that both objects are sequential ASCII strings.
5434
6414
  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
5435
6415
 
5436
- // Compare flat ascii strings natively. Remove arguments from stack first.
5437
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
6416
+ // Compare flat ASCII strings natively. Remove arguments from stack first.
6417
+ __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
5438
6418
  __ add(sp, sp, Operand(2 * kPointerSize));
5439
6419
  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
5440
6420
 
@@ -5449,6 +6429,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5449
6429
  Label string_add_runtime, call_builtin;
5450
6430
  Builtins::JavaScript builtin_id = Builtins::ADD;
5451
6431
 
6432
+ Counters* counters = masm->isolate()->counters();
6433
+
5452
6434
  // Stack on entry:
5453
6435
  // sp[0]: second argument (right).
5454
6436
  // sp[4]: first argument (left).
@@ -5504,7 +6486,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5504
6486
  __ cmp(r3, Operand(Smi::FromInt(0)), ne);
5505
6487
  __ b(ne, &strings_not_empty); // If either string was empty, return r0.
5506
6488
 
5507
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6489
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5508
6490
  __ add(sp, sp, Operand(2 * kPointerSize));
5509
6491
  __ Ret();
5510
6492
 
@@ -5525,12 +6507,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5525
6507
  // Adding two lengths can't overflow.
5526
6508
  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
5527
6509
  __ add(r6, r2, Operand(r3));
5528
- // Use the runtime system when adding two one character strings, as it
5529
- // contains optimizations for this specific case using the symbol table.
6510
+ // Use the symbol table when adding two one character strings, as it
6511
+ // helps later optimizations to return a symbol here.
5530
6512
  __ cmp(r6, Operand(2));
5531
6513
  __ b(ne, &longer_than_two);
5532
6514
 
5533
- // Check that both strings are non-external ascii strings.
6515
+ // Check that both strings are non-external ASCII strings.
5534
6516
  if (flags_ != NO_STRING_ADD_FLAGS) {
5535
6517
  __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5536
6518
  __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -5549,7 +6531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5549
6531
  Label make_two_character_string;
5550
6532
  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5551
6533
  masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
5552
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6534
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5553
6535
  __ add(sp, sp, Operand(2 * kPointerSize));
5554
6536
  __ Ret();
5555
6537
 
@@ -5562,7 +6544,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5562
6544
  __ mov(r6, Operand(2));
5563
6545
  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
5564
6546
  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
5565
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6547
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5566
6548
  __ add(sp, sp, Operand(2 * kPointerSize));
5567
6549
  __ Ret();
5568
6550
 
@@ -5578,7 +6560,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5578
6560
  __ b(hs, &string_add_runtime);
5579
6561
 
5580
6562
  // If result is not supposed to be flat, allocate a cons string object.
5581
- // If both strings are ascii the result is an ascii cons string.
6563
+ // If both strings are ASCII the result is an ASCII cons string.
5582
6564
  if (flags_ != NO_STRING_ADD_FLAGS) {
5583
6565
  __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
5584
6566
  __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -5599,13 +6581,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5599
6581
  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
5600
6582
  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
5601
6583
  __ mov(r0, Operand(r7));
5602
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6584
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5603
6585
  __ add(sp, sp, Operand(2 * kPointerSize));
5604
6586
  __ Ret();
5605
6587
 
5606
6588
  __ bind(&non_ascii);
5607
6589
  // At least one of the strings is two-byte. Check whether it happens
5608
- // to contain only ascii characters.
6590
+ // to contain only ASCII characters.
5609
6591
  // r4: first instance type.
5610
6592
  // r5: second instance type.
5611
6593
  __ tst(r4, Operand(kAsciiDataHintMask));
@@ -5681,7 +6663,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5681
6663
  // r7: result string.
5682
6664
  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
5683
6665
  __ mov(r0, Operand(r7));
5684
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6666
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5685
6667
  __ add(sp, sp, Operand(2 * kPointerSize));
5686
6668
  __ Ret();
5687
6669
 
@@ -5722,7 +6704,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
5722
6704
  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
5723
6705
 
5724
6706
  __ mov(r0, Operand(r7));
5725
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6707
+ __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
5726
6708
  __ add(sp, sp, Operand(2 * kPointerSize));
5727
6709
  __ Ret();
5728
6710
 
@@ -5786,56 +6768,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5786
6768
  }
5787
6769
 
5788
6770
 
5789
- void StringCharAtStub::Generate(MacroAssembler* masm) {
5790
- // Expects two arguments (object, index) on the stack:
5791
- // lr: return address
5792
- // sp[0]: index
5793
- // sp[4]: object
5794
- Register object = r1;
5795
- Register index = r0;
5796
- Register scratch1 = r2;
5797
- Register scratch2 = r3;
5798
- Register result = r0;
5799
-
5800
- // Get object and index from the stack.
5801
- __ pop(index);
5802
- __ pop(object);
5803
-
5804
- Label need_conversion;
5805
- Label index_out_of_range;
5806
- Label done;
5807
- StringCharAtGenerator generator(object,
5808
- index,
5809
- scratch1,
5810
- scratch2,
5811
- result,
5812
- &need_conversion,
5813
- &need_conversion,
5814
- &index_out_of_range,
5815
- STRING_INDEX_IS_NUMBER);
5816
- generator.GenerateFast(masm);
5817
- __ b(&done);
5818
-
5819
- __ bind(&index_out_of_range);
5820
- // When the index is out of range, the spec requires us to return
5821
- // the empty string.
5822
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
5823
- __ jmp(&done);
5824
-
5825
- __ bind(&need_conversion);
5826
- // Move smi zero into the result register, which will trigger
5827
- // conversion.
5828
- __ mov(result, Operand(Smi::FromInt(0)));
5829
- __ b(&done);
5830
-
5831
- StubRuntimeCallHelper call_helper;
5832
- generator.GenerateSlow(masm, call_helper);
5833
-
5834
- __ bind(&done);
5835
- __ Ret();
5836
- }
5837
-
5838
-
5839
6771
  void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5840
6772
  ASSERT(state_ == CompareIC::SMIS);
5841
6773
  Label miss;
@@ -5934,7 +6866,8 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5934
6866
  __ push(lr);
5935
6867
 
5936
6868
  // Call the runtime system in a fresh internal frame.
5937
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
6869
+ ExternalReference miss =
6870
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5938
6871
  __ EnterInternalFrame();
5939
6872
  __ Push(r1, r0);
5940
6873
  __ mov(ip, Operand(Smi::FromInt(op_)));
@@ -5957,11 +6890,10 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
5957
6890
 
5958
6891
 
5959
6892
  void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5960
- ApiFunction *function) {
6893
+ ExternalReference function) {
5961
6894
  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
5962
6895
  RelocInfo::CODE_TARGET));
5963
- __ mov(r2,
5964
- Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
6896
+ __ mov(r2, Operand(function));
5965
6897
  // Push return address (accessible to GC through exit frame pc).
5966
6898
  __ str(pc, MemOperand(sp, 0));
5967
6899
  __ Jump(r2); // Call the api function.
@@ -5978,158 +6910,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5978
6910
  }
5979
6911
 
5980
6912
 
5981
- void GenerateFastPixelArrayLoad(MacroAssembler* masm,
5982
- Register receiver,
5983
- Register key,
5984
- Register elements_map,
5985
- Register elements,
5986
- Register scratch1,
5987
- Register scratch2,
5988
- Register result,
5989
- Label* not_pixel_array,
5990
- Label* key_not_smi,
5991
- Label* out_of_range) {
5992
- // Register use:
5993
- //
5994
- // receiver - holds the receiver on entry.
5995
- // Unchanged unless 'result' is the same register.
5996
- //
5997
- // key - holds the smi key on entry.
5998
- // Unchanged unless 'result' is the same register.
5999
- //
6000
- // elements - set to be the receiver's elements on exit.
6001
- //
6002
- // elements_map - set to be the map of the receiver's elements
6003
- // on exit.
6004
- //
6005
- // result - holds the result of the pixel array load on exit,
6006
- // tagged as a smi if successful.
6007
- //
6008
- // Scratch registers:
6009
- //
6010
- // scratch1 - used a scratch register in map check, if map
6011
- // check is successful, contains the length of the
6012
- // pixel array, the pointer to external elements and
6013
- // the untagged result.
6014
- //
6015
- // scratch2 - holds the untaged key.
6016
-
6017
- // Some callers already have verified that the key is a smi. key_not_smi is
6018
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
6019
- // to ensure the key is a smi must be added.
6020
- if (key_not_smi != NULL) {
6021
- __ JumpIfNotSmi(key, key_not_smi);
6022
- } else {
6023
- if (FLAG_debug_code) {
6024
- __ AbortIfNotSmi(key);
6025
- }
6026
- }
6027
- __ SmiUntag(scratch2, key);
6028
-
6029
- // Verify that the receiver has pixel array elements.
6030
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
6031
- __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
6032
- not_pixel_array, true);
6033
-
6034
- // Key must be in range of the pixel array.
6035
- __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
6036
- __ cmp(scratch2, scratch1);
6037
- __ b(hs, out_of_range); // unsigned check handles negative keys.
6038
-
6039
- // Perform the indexed load and tag the result as a smi.
6040
- __ ldr(scratch1,
6041
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6042
- __ ldrb(scratch1, MemOperand(scratch1, scratch2));
6043
- __ SmiTag(r0, scratch1);
6044
- __ Ret();
6045
- }
6046
-
6047
-
6048
- void GenerateFastPixelArrayStore(MacroAssembler* masm,
6049
- Register receiver,
6050
- Register key,
6051
- Register value,
6052
- Register elements,
6053
- Register elements_map,
6054
- Register scratch1,
6055
- Register scratch2,
6056
- bool load_elements_from_receiver,
6057
- bool load_elements_map_from_elements,
6058
- Label* key_not_smi,
6059
- Label* value_not_smi,
6060
- Label* not_pixel_array,
6061
- Label* out_of_range) {
6062
- // Register use:
6063
- // receiver - holds the receiver and is unchanged unless the
6064
- // store succeeds.
6065
- // key - holds the key (must be a smi) and is unchanged.
6066
- // value - holds the value (must be a smi) and is unchanged.
6067
- // elements - holds the element object of the receiver on entry if
6068
- // load_elements_from_receiver is false, otherwise used
6069
- // internally to store the pixel arrays elements and
6070
- // external array pointer.
6071
- // elements_map - holds the map of the element object if
6072
- // load_elements_map_from_elements is false, otherwise
6073
- // loaded with the element map.
6074
- //
6075
- Register external_pointer = elements;
6076
- Register untagged_key = scratch1;
6077
- Register untagged_value = scratch2;
6078
-
6079
- if (load_elements_from_receiver) {
6080
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
6081
- }
6082
-
6083
- // By passing NULL as not_pixel_array, callers signal that they have already
6084
- // verified that the receiver has pixel array elements.
6085
- if (not_pixel_array != NULL) {
6086
- if (load_elements_map_from_elements) {
6087
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
6088
- }
6089
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
6090
- __ cmp(elements_map, ip);
6091
- __ b(ne, not_pixel_array);
6092
- } else {
6093
- if (FLAG_debug_code) {
6094
- // Map check should have already made sure that elements is a pixel array.
6095
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
6096
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
6097
- __ cmp(elements_map, ip);
6098
- __ Assert(eq, "Elements isn't a pixel array");
6099
- }
6100
- }
6101
-
6102
- // Some callers already have verified that the key is a smi. key_not_smi is
6103
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
6104
- // to ensure the key is a smi must be added.
6105
- if (key_not_smi != NULL) {
6106
- __ JumpIfNotSmi(key, key_not_smi);
6107
- } else {
6108
- if (FLAG_debug_code) {
6109
- __ AbortIfNotSmi(key);
6110
- }
6111
- }
6112
-
6113
- __ SmiUntag(untagged_key, key);
6114
-
6115
- // Perform bounds check.
6116
- __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
6117
- __ cmp(untagged_key, scratch2);
6118
- __ b(hs, out_of_range); // unsigned check handles negative keys.
6119
-
6120
- __ JumpIfNotSmi(value, value_not_smi);
6121
- __ SmiUntag(untagged_value, value);
6122
-
6123
- // Clamp the value to [0..255].
6124
- __ Usat(untagged_value, 8, Operand(untagged_value));
6125
- // Get the pointer to the external array. This clobbers elements.
6126
- __ ldr(external_pointer,
6127
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6128
- __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
6129
- __ Ret();
6130
- }
6131
-
6132
-
6133
6913
  #undef __
6134
6914
 
6135
6915
  } } // namespace v8::internal