libv8 3.11.8.17 → 3.16.14.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (754) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +1 -2
  3. data/Gemfile +1 -1
  4. data/Rakefile +6 -7
  5. data/lib/libv8/version.rb +1 -1
  6. data/vendor/v8/.gitignore +24 -3
  7. data/vendor/v8/AUTHORS +7 -0
  8. data/vendor/v8/ChangeLog +839 -0
  9. data/vendor/v8/DEPS +1 -1
  10. data/vendor/v8/Makefile.android +92 -0
  11. data/vendor/v8/OWNERS +11 -0
  12. data/vendor/v8/PRESUBMIT.py +71 -0
  13. data/vendor/v8/SConstruct +34 -39
  14. data/vendor/v8/build/android.gypi +56 -37
  15. data/vendor/v8/build/common.gypi +112 -30
  16. data/vendor/v8/build/gyp_v8 +1 -1
  17. data/vendor/v8/build/standalone.gypi +15 -11
  18. data/vendor/v8/include/v8-debug.h +9 -1
  19. data/vendor/v8/include/v8-preparser.h +4 -3
  20. data/vendor/v8/include/v8-profiler.h +25 -25
  21. data/vendor/v8/include/v8-testing.h +4 -3
  22. data/vendor/v8/include/v8.h +994 -540
  23. data/vendor/v8/preparser/preparser-process.cc +3 -3
  24. data/vendor/v8/samples/lineprocessor.cc +20 -27
  25. data/vendor/v8/samples/process.cc +18 -14
  26. data/vendor/v8/samples/shell.cc +16 -15
  27. data/vendor/v8/src/SConscript +15 -14
  28. data/vendor/v8/src/accessors.cc +169 -77
  29. data/vendor/v8/src/accessors.h +4 -0
  30. data/vendor/v8/src/allocation-inl.h +2 -2
  31. data/vendor/v8/src/allocation.h +7 -7
  32. data/vendor/v8/src/api.cc +810 -497
  33. data/vendor/v8/src/api.h +85 -60
  34. data/vendor/v8/src/arm/assembler-arm-inl.h +179 -22
  35. data/vendor/v8/src/arm/assembler-arm.cc +633 -264
  36. data/vendor/v8/src/arm/assembler-arm.h +264 -197
  37. data/vendor/v8/src/arm/builtins-arm.cc +117 -27
  38. data/vendor/v8/src/arm/code-stubs-arm.cc +1241 -700
  39. data/vendor/v8/src/arm/code-stubs-arm.h +35 -138
  40. data/vendor/v8/src/arm/codegen-arm.cc +285 -16
  41. data/vendor/v8/src/arm/codegen-arm.h +22 -0
  42. data/vendor/v8/src/arm/constants-arm.cc +5 -3
  43. data/vendor/v8/src/arm/constants-arm.h +24 -11
  44. data/vendor/v8/src/arm/debug-arm.cc +3 -3
  45. data/vendor/v8/src/arm/deoptimizer-arm.cc +382 -92
  46. data/vendor/v8/src/arm/disasm-arm.cc +61 -12
  47. data/vendor/v8/src/arm/frames-arm.h +0 -14
  48. data/vendor/v8/src/arm/full-codegen-arm.cc +332 -304
  49. data/vendor/v8/src/arm/ic-arm.cc +180 -259
  50. data/vendor/v8/src/arm/lithium-arm.cc +364 -316
  51. data/vendor/v8/src/arm/lithium-arm.h +512 -275
  52. data/vendor/v8/src/arm/lithium-codegen-arm.cc +1768 -809
  53. data/vendor/v8/src/arm/lithium-codegen-arm.h +97 -35
  54. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +12 -5
  55. data/vendor/v8/src/arm/macro-assembler-arm.cc +439 -228
  56. data/vendor/v8/src/arm/macro-assembler-arm.h +116 -70
  57. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +54 -44
  58. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +3 -10
  59. data/vendor/v8/src/arm/simulator-arm.cc +272 -238
  60. data/vendor/v8/src/arm/simulator-arm.h +38 -8
  61. data/vendor/v8/src/arm/stub-cache-arm.cc +522 -895
  62. data/vendor/v8/src/array.js +101 -70
  63. data/vendor/v8/src/assembler.cc +270 -19
  64. data/vendor/v8/src/assembler.h +110 -15
  65. data/vendor/v8/src/ast.cc +79 -69
  66. data/vendor/v8/src/ast.h +255 -301
  67. data/vendor/v8/src/atomicops.h +7 -1
  68. data/vendor/v8/src/atomicops_internals_tsan.h +335 -0
  69. data/vendor/v8/src/bootstrapper.cc +481 -418
  70. data/vendor/v8/src/bootstrapper.h +4 -4
  71. data/vendor/v8/src/builtins.cc +498 -311
  72. data/vendor/v8/src/builtins.h +75 -47
  73. data/vendor/v8/src/checks.cc +2 -1
  74. data/vendor/v8/src/checks.h +8 -0
  75. data/vendor/v8/src/code-stubs-hydrogen.cc +253 -0
  76. data/vendor/v8/src/code-stubs.cc +249 -84
  77. data/vendor/v8/src/code-stubs.h +501 -169
  78. data/vendor/v8/src/codegen.cc +36 -18
  79. data/vendor/v8/src/codegen.h +25 -3
  80. data/vendor/v8/src/collection.js +54 -17
  81. data/vendor/v8/src/compilation-cache.cc +24 -16
  82. data/vendor/v8/src/compilation-cache.h +15 -6
  83. data/vendor/v8/src/compiler.cc +497 -195
  84. data/vendor/v8/src/compiler.h +246 -38
  85. data/vendor/v8/src/contexts.cc +64 -24
  86. data/vendor/v8/src/contexts.h +60 -29
  87. data/vendor/v8/src/conversions-inl.h +24 -14
  88. data/vendor/v8/src/conversions.h +7 -4
  89. data/vendor/v8/src/counters.cc +21 -12
  90. data/vendor/v8/src/counters.h +44 -16
  91. data/vendor/v8/src/cpu-profiler.h +1 -1
  92. data/vendor/v8/src/d8-debug.cc +2 -2
  93. data/vendor/v8/src/d8-readline.cc +13 -2
  94. data/vendor/v8/src/d8.cc +681 -273
  95. data/vendor/v8/src/d8.gyp +4 -4
  96. data/vendor/v8/src/d8.h +38 -18
  97. data/vendor/v8/src/d8.js +0 -617
  98. data/vendor/v8/src/data-flow.h +55 -0
  99. data/vendor/v8/src/date.js +1 -42
  100. data/vendor/v8/src/dateparser-inl.h +5 -1
  101. data/vendor/v8/src/debug-agent.cc +10 -15
  102. data/vendor/v8/src/debug-debugger.js +147 -149
  103. data/vendor/v8/src/debug.cc +323 -164
  104. data/vendor/v8/src/debug.h +26 -14
  105. data/vendor/v8/src/deoptimizer.cc +765 -290
  106. data/vendor/v8/src/deoptimizer.h +130 -28
  107. data/vendor/v8/src/disassembler.cc +10 -4
  108. data/vendor/v8/src/elements-kind.cc +7 -2
  109. data/vendor/v8/src/elements-kind.h +19 -0
  110. data/vendor/v8/src/elements.cc +607 -285
  111. data/vendor/v8/src/elements.h +36 -13
  112. data/vendor/v8/src/execution.cc +52 -31
  113. data/vendor/v8/src/execution.h +4 -4
  114. data/vendor/v8/src/extensions/externalize-string-extension.cc +5 -4
  115. data/vendor/v8/src/extensions/gc-extension.cc +5 -1
  116. data/vendor/v8/src/extensions/statistics-extension.cc +153 -0
  117. data/vendor/v8/src/{inspector.h → extensions/statistics-extension.h} +12 -23
  118. data/vendor/v8/src/factory.cc +101 -134
  119. data/vendor/v8/src/factory.h +36 -31
  120. data/vendor/v8/src/flag-definitions.h +102 -25
  121. data/vendor/v8/src/flags.cc +9 -5
  122. data/vendor/v8/src/frames-inl.h +10 -0
  123. data/vendor/v8/src/frames.cc +116 -26
  124. data/vendor/v8/src/frames.h +96 -12
  125. data/vendor/v8/src/full-codegen.cc +219 -74
  126. data/vendor/v8/src/full-codegen.h +63 -21
  127. data/vendor/v8/src/func-name-inferrer.cc +8 -7
  128. data/vendor/v8/src/func-name-inferrer.h +5 -3
  129. data/vendor/v8/src/gdb-jit.cc +71 -57
  130. data/vendor/v8/src/global-handles.cc +230 -101
  131. data/vendor/v8/src/global-handles.h +26 -27
  132. data/vendor/v8/src/globals.h +17 -19
  133. data/vendor/v8/src/handles-inl.h +59 -12
  134. data/vendor/v8/src/handles.cc +180 -200
  135. data/vendor/v8/src/handles.h +80 -11
  136. data/vendor/v8/src/hashmap.h +60 -40
  137. data/vendor/v8/src/heap-inl.h +107 -45
  138. data/vendor/v8/src/heap-profiler.cc +38 -19
  139. data/vendor/v8/src/heap-profiler.h +24 -14
  140. data/vendor/v8/src/heap.cc +1123 -738
  141. data/vendor/v8/src/heap.h +385 -146
  142. data/vendor/v8/src/hydrogen-instructions.cc +700 -217
  143. data/vendor/v8/src/hydrogen-instructions.h +1158 -472
  144. data/vendor/v8/src/hydrogen.cc +3319 -1662
  145. data/vendor/v8/src/hydrogen.h +411 -170
  146. data/vendor/v8/src/ia32/assembler-ia32-inl.h +46 -16
  147. data/vendor/v8/src/ia32/assembler-ia32.cc +131 -61
  148. data/vendor/v8/src/ia32/assembler-ia32.h +115 -57
  149. data/vendor/v8/src/ia32/builtins-ia32.cc +99 -5
  150. data/vendor/v8/src/ia32/code-stubs-ia32.cc +787 -495
  151. data/vendor/v8/src/ia32/code-stubs-ia32.h +10 -100
  152. data/vendor/v8/src/ia32/codegen-ia32.cc +227 -23
  153. data/vendor/v8/src/ia32/codegen-ia32.h +14 -0
  154. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +428 -87
  155. data/vendor/v8/src/ia32/disasm-ia32.cc +28 -1
  156. data/vendor/v8/src/ia32/frames-ia32.h +6 -16
  157. data/vendor/v8/src/ia32/full-codegen-ia32.cc +280 -272
  158. data/vendor/v8/src/ia32/ic-ia32.cc +150 -250
  159. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +1600 -517
  160. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +90 -24
  161. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +10 -6
  162. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.h +2 -2
  163. data/vendor/v8/src/ia32/lithium-ia32.cc +405 -302
  164. data/vendor/v8/src/ia32/lithium-ia32.h +526 -271
  165. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +378 -119
  166. data/vendor/v8/src/ia32/macro-assembler-ia32.h +62 -28
  167. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +43 -30
  168. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +2 -10
  169. data/vendor/v8/src/ia32/stub-cache-ia32.cc +492 -678
  170. data/vendor/v8/src/ic-inl.h +9 -4
  171. data/vendor/v8/src/ic.cc +836 -923
  172. data/vendor/v8/src/ic.h +228 -247
  173. data/vendor/v8/src/incremental-marking-inl.h +26 -30
  174. data/vendor/v8/src/incremental-marking.cc +276 -248
  175. data/vendor/v8/src/incremental-marking.h +29 -37
  176. data/vendor/v8/src/interface.cc +34 -25
  177. data/vendor/v8/src/interface.h +69 -25
  178. data/vendor/v8/src/interpreter-irregexp.cc +2 -2
  179. data/vendor/v8/src/isolate.cc +382 -76
  180. data/vendor/v8/src/isolate.h +109 -56
  181. data/vendor/v8/src/json-parser.h +217 -104
  182. data/vendor/v8/src/json-stringifier.h +745 -0
  183. data/vendor/v8/src/json.js +10 -132
  184. data/vendor/v8/src/jsregexp-inl.h +106 -0
  185. data/vendor/v8/src/jsregexp.cc +517 -285
  186. data/vendor/v8/src/jsregexp.h +145 -117
  187. data/vendor/v8/src/list-inl.h +35 -22
  188. data/vendor/v8/src/list.h +46 -19
  189. data/vendor/v8/src/lithium-allocator-inl.h +22 -2
  190. data/vendor/v8/src/lithium-allocator.cc +85 -70
  191. data/vendor/v8/src/lithium-allocator.h +21 -39
  192. data/vendor/v8/src/lithium.cc +259 -5
  193. data/vendor/v8/src/lithium.h +131 -32
  194. data/vendor/v8/src/liveedit-debugger.js +52 -3
  195. data/vendor/v8/src/liveedit.cc +393 -113
  196. data/vendor/v8/src/liveedit.h +7 -3
  197. data/vendor/v8/src/log-utils.cc +4 -2
  198. data/vendor/v8/src/log.cc +170 -140
  199. data/vendor/v8/src/log.h +62 -11
  200. data/vendor/v8/src/macro-assembler.h +17 -0
  201. data/vendor/v8/src/macros.py +2 -0
  202. data/vendor/v8/src/mark-compact-inl.h +3 -23
  203. data/vendor/v8/src/mark-compact.cc +801 -830
  204. data/vendor/v8/src/mark-compact.h +154 -47
  205. data/vendor/v8/src/marking-thread.cc +85 -0
  206. data/vendor/v8/src/{inspector.cc → marking-thread.h} +32 -24
  207. data/vendor/v8/src/math.js +12 -18
  208. data/vendor/v8/src/messages.cc +18 -8
  209. data/vendor/v8/src/messages.js +314 -261
  210. data/vendor/v8/src/mips/assembler-mips-inl.h +58 -6
  211. data/vendor/v8/src/mips/assembler-mips.cc +92 -75
  212. data/vendor/v8/src/mips/assembler-mips.h +54 -60
  213. data/vendor/v8/src/mips/builtins-mips.cc +116 -17
  214. data/vendor/v8/src/mips/code-stubs-mips.cc +919 -556
  215. data/vendor/v8/src/mips/code-stubs-mips.h +22 -131
  216. data/vendor/v8/src/mips/codegen-mips.cc +281 -6
  217. data/vendor/v8/src/mips/codegen-mips.h +22 -0
  218. data/vendor/v8/src/mips/constants-mips.cc +2 -0
  219. data/vendor/v8/src/mips/constants-mips.h +12 -2
  220. data/vendor/v8/src/mips/deoptimizer-mips.cc +286 -50
  221. data/vendor/v8/src/mips/disasm-mips.cc +13 -0
  222. data/vendor/v8/src/mips/full-codegen-mips.cc +297 -284
  223. data/vendor/v8/src/mips/ic-mips.cc +182 -263
  224. data/vendor/v8/src/mips/lithium-codegen-mips.cc +1208 -556
  225. data/vendor/v8/src/mips/lithium-codegen-mips.h +72 -19
  226. data/vendor/v8/src/mips/lithium-gap-resolver-mips.cc +9 -2
  227. data/vendor/v8/src/mips/lithium-mips.cc +290 -302
  228. data/vendor/v8/src/mips/lithium-mips.h +463 -266
  229. data/vendor/v8/src/mips/macro-assembler-mips.cc +208 -115
  230. data/vendor/v8/src/mips/macro-assembler-mips.h +67 -24
  231. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +40 -25
  232. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +3 -9
  233. data/vendor/v8/src/mips/simulator-mips.cc +112 -40
  234. data/vendor/v8/src/mips/simulator-mips.h +5 -0
  235. data/vendor/v8/src/mips/stub-cache-mips.cc +502 -884
  236. data/vendor/v8/src/mirror-debugger.js +157 -30
  237. data/vendor/v8/src/mksnapshot.cc +88 -14
  238. data/vendor/v8/src/object-observe.js +235 -0
  239. data/vendor/v8/src/objects-debug.cc +178 -176
  240. data/vendor/v8/src/objects-inl.h +1333 -486
  241. data/vendor/v8/src/objects-printer.cc +125 -43
  242. data/vendor/v8/src/objects-visiting-inl.h +578 -6
  243. data/vendor/v8/src/objects-visiting.cc +2 -2
  244. data/vendor/v8/src/objects-visiting.h +172 -79
  245. data/vendor/v8/src/objects.cc +3533 -2885
  246. data/vendor/v8/src/objects.h +1352 -1131
  247. data/vendor/v8/src/optimizing-compiler-thread.cc +152 -0
  248. data/vendor/v8/src/optimizing-compiler-thread.h +111 -0
  249. data/vendor/v8/src/parser.cc +390 -500
  250. data/vendor/v8/src/parser.h +45 -33
  251. data/vendor/v8/src/platform-cygwin.cc +10 -21
  252. data/vendor/v8/src/platform-freebsd.cc +36 -41
  253. data/vendor/v8/src/platform-linux.cc +160 -124
  254. data/vendor/v8/src/platform-macos.cc +30 -27
  255. data/vendor/v8/src/platform-nullos.cc +17 -1
  256. data/vendor/v8/src/platform-openbsd.cc +19 -50
  257. data/vendor/v8/src/platform-posix.cc +14 -0
  258. data/vendor/v8/src/platform-solaris.cc +20 -53
  259. data/vendor/v8/src/platform-win32.cc +49 -26
  260. data/vendor/v8/src/platform.h +40 -1
  261. data/vendor/v8/src/preparser.cc +8 -5
  262. data/vendor/v8/src/preparser.h +2 -2
  263. data/vendor/v8/src/prettyprinter.cc +16 -0
  264. data/vendor/v8/src/prettyprinter.h +2 -0
  265. data/vendor/v8/src/profile-generator-inl.h +1 -0
  266. data/vendor/v8/src/profile-generator.cc +209 -147
  267. data/vendor/v8/src/profile-generator.h +15 -12
  268. data/vendor/v8/src/property-details.h +46 -31
  269. data/vendor/v8/src/property.cc +27 -46
  270. data/vendor/v8/src/property.h +163 -83
  271. data/vendor/v8/src/proxy.js +7 -2
  272. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +4 -13
  273. data/vendor/v8/src/regexp-macro-assembler-irregexp.h +1 -2
  274. data/vendor/v8/src/regexp-macro-assembler-tracer.cc +1 -11
  275. data/vendor/v8/src/regexp-macro-assembler-tracer.h +0 -1
  276. data/vendor/v8/src/regexp-macro-assembler.cc +31 -14
  277. data/vendor/v8/src/regexp-macro-assembler.h +14 -11
  278. data/vendor/v8/src/regexp-stack.cc +1 -0
  279. data/vendor/v8/src/regexp.js +9 -8
  280. data/vendor/v8/src/rewriter.cc +18 -7
  281. data/vendor/v8/src/runtime-profiler.cc +52 -43
  282. data/vendor/v8/src/runtime-profiler.h +0 -25
  283. data/vendor/v8/src/runtime.cc +2006 -2023
  284. data/vendor/v8/src/runtime.h +56 -49
  285. data/vendor/v8/src/safepoint-table.cc +12 -18
  286. data/vendor/v8/src/safepoint-table.h +11 -8
  287. data/vendor/v8/src/scanner.cc +1 -0
  288. data/vendor/v8/src/scanner.h +4 -10
  289. data/vendor/v8/src/scopeinfo.cc +35 -9
  290. data/vendor/v8/src/scopeinfo.h +64 -3
  291. data/vendor/v8/src/scopes.cc +251 -156
  292. data/vendor/v8/src/scopes.h +61 -27
  293. data/vendor/v8/src/serialize.cc +348 -396
  294. data/vendor/v8/src/serialize.h +125 -114
  295. data/vendor/v8/src/small-pointer-list.h +11 -11
  296. data/vendor/v8/src/{smart-array-pointer.h → smart-pointers.h} +64 -15
  297. data/vendor/v8/src/snapshot-common.cc +64 -15
  298. data/vendor/v8/src/snapshot-empty.cc +7 -1
  299. data/vendor/v8/src/snapshot.h +9 -2
  300. data/vendor/v8/src/spaces-inl.h +17 -0
  301. data/vendor/v8/src/spaces.cc +477 -183
  302. data/vendor/v8/src/spaces.h +238 -58
  303. data/vendor/v8/src/splay-tree-inl.h +8 -7
  304. data/vendor/v8/src/splay-tree.h +24 -10
  305. data/vendor/v8/src/store-buffer.cc +12 -5
  306. data/vendor/v8/src/store-buffer.h +2 -4
  307. data/vendor/v8/src/string-search.h +22 -6
  308. data/vendor/v8/src/string-stream.cc +11 -8
  309. data/vendor/v8/src/string.js +47 -15
  310. data/vendor/v8/src/stub-cache.cc +461 -224
  311. data/vendor/v8/src/stub-cache.h +164 -102
  312. data/vendor/v8/src/sweeper-thread.cc +105 -0
  313. data/vendor/v8/src/sweeper-thread.h +81 -0
  314. data/vendor/v8/src/token.h +1 -0
  315. data/vendor/v8/src/transitions-inl.h +220 -0
  316. data/vendor/v8/src/transitions.cc +160 -0
  317. data/vendor/v8/src/transitions.h +207 -0
  318. data/vendor/v8/src/type-info.cc +182 -181
  319. data/vendor/v8/src/type-info.h +31 -19
  320. data/vendor/v8/src/unicode-inl.h +62 -106
  321. data/vendor/v8/src/unicode.cc +57 -67
  322. data/vendor/v8/src/unicode.h +45 -91
  323. data/vendor/v8/src/uri.js +57 -29
  324. data/vendor/v8/src/utils.h +105 -5
  325. data/vendor/v8/src/v8-counters.cc +54 -11
  326. data/vendor/v8/src/v8-counters.h +134 -19
  327. data/vendor/v8/src/v8.cc +29 -29
  328. data/vendor/v8/src/v8.h +1 -0
  329. data/vendor/v8/src/v8conversions.cc +26 -22
  330. data/vendor/v8/src/v8globals.h +56 -43
  331. data/vendor/v8/src/v8natives.js +83 -30
  332. data/vendor/v8/src/v8threads.cc +42 -21
  333. data/vendor/v8/src/v8threads.h +4 -1
  334. data/vendor/v8/src/v8utils.cc +9 -93
  335. data/vendor/v8/src/v8utils.h +37 -33
  336. data/vendor/v8/src/variables.cc +6 -3
  337. data/vendor/v8/src/variables.h +6 -13
  338. data/vendor/v8/src/version.cc +2 -2
  339. data/vendor/v8/src/vm-state-inl.h +11 -0
  340. data/vendor/v8/src/x64/assembler-x64-inl.h +39 -8
  341. data/vendor/v8/src/x64/assembler-x64.cc +78 -64
  342. data/vendor/v8/src/x64/assembler-x64.h +38 -33
  343. data/vendor/v8/src/x64/builtins-x64.cc +105 -7
  344. data/vendor/v8/src/x64/code-stubs-x64.cc +790 -413
  345. data/vendor/v8/src/x64/code-stubs-x64.h +10 -106
  346. data/vendor/v8/src/x64/codegen-x64.cc +210 -8
  347. data/vendor/v8/src/x64/codegen-x64.h +20 -1
  348. data/vendor/v8/src/x64/deoptimizer-x64.cc +336 -75
  349. data/vendor/v8/src/x64/disasm-x64.cc +15 -0
  350. data/vendor/v8/src/x64/frames-x64.h +0 -14
  351. data/vendor/v8/src/x64/full-codegen-x64.cc +293 -270
  352. data/vendor/v8/src/x64/ic-x64.cc +153 -251
  353. data/vendor/v8/src/x64/lithium-codegen-x64.cc +1379 -531
  354. data/vendor/v8/src/x64/lithium-codegen-x64.h +67 -23
  355. data/vendor/v8/src/x64/lithium-gap-resolver-x64.cc +2 -2
  356. data/vendor/v8/src/x64/lithium-x64.cc +349 -289
  357. data/vendor/v8/src/x64/lithium-x64.h +460 -250
  358. data/vendor/v8/src/x64/macro-assembler-x64.cc +350 -177
  359. data/vendor/v8/src/x64/macro-assembler-x64.h +67 -49
  360. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +46 -33
  361. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +2 -3
  362. data/vendor/v8/src/x64/stub-cache-x64.cc +484 -653
  363. data/vendor/v8/src/zone-inl.h +9 -27
  364. data/vendor/v8/src/zone.cc +5 -5
  365. data/vendor/v8/src/zone.h +53 -27
  366. data/vendor/v8/test/benchmarks/testcfg.py +5 -0
  367. data/vendor/v8/test/cctest/cctest.cc +4 -0
  368. data/vendor/v8/test/cctest/cctest.gyp +3 -1
  369. data/vendor/v8/test/cctest/cctest.h +57 -9
  370. data/vendor/v8/test/cctest/cctest.status +15 -15
  371. data/vendor/v8/test/cctest/test-accessors.cc +26 -0
  372. data/vendor/v8/test/cctest/test-alloc.cc +22 -30
  373. data/vendor/v8/test/cctest/test-api.cc +1943 -314
  374. data/vendor/v8/test/cctest/test-assembler-arm.cc +133 -13
  375. data/vendor/v8/test/cctest/test-assembler-ia32.cc +1 -1
  376. data/vendor/v8/test/cctest/test-assembler-mips.cc +12 -0
  377. data/vendor/v8/test/cctest/test-ast.cc +4 -2
  378. data/vendor/v8/test/cctest/test-compiler.cc +61 -29
  379. data/vendor/v8/test/cctest/test-dataflow.cc +2 -2
  380. data/vendor/v8/test/cctest/test-debug.cc +212 -33
  381. data/vendor/v8/test/cctest/test-decls.cc +257 -11
  382. data/vendor/v8/test/cctest/test-dictionary.cc +24 -10
  383. data/vendor/v8/test/cctest/test-disasm-arm.cc +118 -1
  384. data/vendor/v8/test/cctest/test-disasm-ia32.cc +3 -2
  385. data/vendor/v8/test/cctest/test-flags.cc +14 -1
  386. data/vendor/v8/test/cctest/test-func-name-inference.cc +7 -4
  387. data/vendor/v8/test/cctest/test-global-object.cc +51 -0
  388. data/vendor/v8/test/cctest/test-hashing.cc +32 -23
  389. data/vendor/v8/test/cctest/test-heap-profiler.cc +131 -77
  390. data/vendor/v8/test/cctest/test-heap.cc +1084 -143
  391. data/vendor/v8/test/cctest/test-list.cc +1 -1
  392. data/vendor/v8/test/cctest/test-liveedit.cc +3 -2
  393. data/vendor/v8/test/cctest/test-lockers.cc +12 -13
  394. data/vendor/v8/test/cctest/test-log.cc +10 -8
  395. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +2 -2
  396. data/vendor/v8/test/cctest/test-mark-compact.cc +44 -22
  397. data/vendor/v8/test/cctest/test-object-observe.cc +434 -0
  398. data/vendor/v8/test/cctest/test-parsing.cc +86 -39
  399. data/vendor/v8/test/cctest/test-platform-linux.cc +6 -0
  400. data/vendor/v8/test/cctest/test-platform-win32.cc +7 -0
  401. data/vendor/v8/test/cctest/test-random.cc +5 -4
  402. data/vendor/v8/test/cctest/test-regexp.cc +137 -101
  403. data/vendor/v8/test/cctest/test-serialize.cc +150 -230
  404. data/vendor/v8/test/cctest/test-sockets.cc +1 -1
  405. data/vendor/v8/test/cctest/test-spaces.cc +139 -0
  406. data/vendor/v8/test/cctest/test-strings.cc +736 -74
  407. data/vendor/v8/test/cctest/test-thread-termination.cc +10 -11
  408. data/vendor/v8/test/cctest/test-threads.cc +4 -4
  409. data/vendor/v8/test/cctest/test-utils.cc +16 -0
  410. data/vendor/v8/test/cctest/test-weakmaps.cc +7 -3
  411. data/vendor/v8/test/cctest/testcfg.py +64 -5
  412. data/vendor/v8/test/es5conform/testcfg.py +5 -0
  413. data/vendor/v8/test/message/message.status +1 -1
  414. data/vendor/v8/test/message/overwritten-builtins.out +3 -0
  415. data/vendor/v8/test/message/testcfg.py +89 -8
  416. data/vendor/v8/test/message/try-catch-finally-no-message.out +26 -26
  417. data/vendor/v8/test/mjsunit/accessor-map-sharing.js +18 -2
  418. data/vendor/v8/test/mjsunit/allocation-site-info.js +126 -0
  419. data/vendor/v8/test/mjsunit/array-bounds-check-removal.js +62 -1
  420. data/vendor/v8/test/mjsunit/array-iteration.js +1 -1
  421. data/vendor/v8/test/mjsunit/array-literal-transitions.js +2 -0
  422. data/vendor/v8/test/mjsunit/array-natives-elements.js +317 -0
  423. data/vendor/v8/test/mjsunit/array-reduce.js +8 -8
  424. data/vendor/v8/test/mjsunit/array-slice.js +12 -0
  425. data/vendor/v8/test/mjsunit/array-store-and-grow.js +4 -1
  426. data/vendor/v8/test/mjsunit/assert-opt-and-deopt.js +1 -1
  427. data/vendor/v8/test/mjsunit/bugs/bug-2337.js +53 -0
  428. data/vendor/v8/test/mjsunit/compare-known-objects-slow.js +69 -0
  429. data/vendor/v8/test/mjsunit/compiler/alloc-object-huge.js +3 -1
  430. data/vendor/v8/test/mjsunit/compiler/inline-accessors.js +368 -0
  431. data/vendor/v8/test/mjsunit/compiler/inline-arguments.js +87 -1
  432. data/vendor/v8/test/mjsunit/compiler/inline-closures.js +49 -0
  433. data/vendor/v8/test/mjsunit/compiler/inline-construct.js +55 -43
  434. data/vendor/v8/test/mjsunit/compiler/inline-literals.js +39 -0
  435. data/vendor/v8/test/mjsunit/compiler/multiply-add.js +69 -0
  436. data/vendor/v8/test/mjsunit/compiler/optimized-closures.js +57 -0
  437. data/vendor/v8/test/mjsunit/compiler/parallel-proto-change.js +44 -0
  438. data/vendor/v8/test/mjsunit/compiler/property-static.js +69 -0
  439. data/vendor/v8/test/mjsunit/compiler/proto-chain-constant.js +55 -0
  440. data/vendor/v8/test/mjsunit/compiler/proto-chain-load.js +44 -0
  441. data/vendor/v8/test/mjsunit/compiler/regress-gvn.js +3 -2
  442. data/vendor/v8/test/mjsunit/compiler/regress-or.js +6 -2
  443. data/vendor/v8/test/mjsunit/compiler/rotate.js +224 -0
  444. data/vendor/v8/test/mjsunit/compiler/uint32.js +173 -0
  445. data/vendor/v8/test/mjsunit/count-based-osr.js +2 -1
  446. data/vendor/v8/test/mjsunit/d8-os.js +3 -3
  447. data/vendor/v8/test/mjsunit/date-parse.js +3 -0
  448. data/vendor/v8/test/mjsunit/date.js +22 -0
  449. data/vendor/v8/test/mjsunit/debug-break-inline.js +1 -0
  450. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js +22 -12
  451. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized.js +21 -10
  452. data/vendor/v8/test/mjsunit/debug-liveedit-compile-error.js +60 -0
  453. data/vendor/v8/test/mjsunit/debug-liveedit-double-call.js +142 -0
  454. data/vendor/v8/test/mjsunit/debug-liveedit-literals.js +94 -0
  455. data/vendor/v8/test/mjsunit/debug-liveedit-restart-frame.js +153 -0
  456. data/vendor/v8/test/mjsunit/debug-multiple-breakpoints.js +1 -1
  457. data/vendor/v8/test/mjsunit/debug-script-breakpoints-closure.js +67 -0
  458. data/vendor/v8/test/mjsunit/debug-script-breakpoints-nested.js +82 -0
  459. data/vendor/v8/test/mjsunit/debug-script.js +4 -2
  460. data/vendor/v8/test/mjsunit/debug-set-variable-value.js +308 -0
  461. data/vendor/v8/test/mjsunit/debug-stepout-scope-part1.js +190 -0
  462. data/vendor/v8/test/mjsunit/debug-stepout-scope-part2.js +83 -0
  463. data/vendor/v8/test/mjsunit/debug-stepout-scope-part3.js +80 -0
  464. data/vendor/v8/test/mjsunit/debug-stepout-scope-part4.js +80 -0
  465. data/vendor/v8/test/mjsunit/debug-stepout-scope-part5.js +77 -0
  466. data/vendor/v8/test/mjsunit/debug-stepout-scope-part6.js +79 -0
  467. data/vendor/v8/test/mjsunit/debug-stepout-scope-part7.js +79 -0
  468. data/vendor/v8/test/mjsunit/{debug-stepout-scope.js → debug-stepout-scope-part8.js} +0 -189
  469. data/vendor/v8/test/mjsunit/delete-non-configurable.js +74 -0
  470. data/vendor/v8/test/mjsunit/deopt-minus-zero.js +56 -0
  471. data/vendor/v8/test/mjsunit/elements-kind.js +6 -4
  472. data/vendor/v8/test/mjsunit/elements-length-no-holey.js +33 -0
  473. data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +46 -19
  474. data/vendor/v8/test/mjsunit/error-accessors.js +54 -0
  475. data/vendor/v8/test/mjsunit/error-constructors.js +1 -14
  476. data/vendor/v8/test/mjsunit/error-tostring.js +8 -0
  477. data/vendor/v8/test/mjsunit/eval-stack-trace.js +204 -0
  478. data/vendor/v8/test/mjsunit/external-array.js +364 -1
  479. data/vendor/v8/test/mjsunit/fast-array-length.js +37 -0
  480. data/vendor/v8/test/mjsunit/fast-non-keyed.js +113 -0
  481. data/vendor/v8/test/mjsunit/fast-prototype.js +117 -0
  482. data/vendor/v8/test/mjsunit/function-call.js +14 -18
  483. data/vendor/v8/test/mjsunit/fuzz-natives-part1.js +230 -0
  484. data/vendor/v8/test/mjsunit/fuzz-natives-part2.js +229 -0
  485. data/vendor/v8/test/mjsunit/fuzz-natives-part3.js +229 -0
  486. data/vendor/v8/test/mjsunit/{fuzz-natives.js → fuzz-natives-part4.js} +12 -2
  487. data/vendor/v8/test/mjsunit/generated-transition-stub.js +218 -0
  488. data/vendor/v8/test/mjsunit/greedy.js +1 -1
  489. data/vendor/v8/test/mjsunit/harmony/block-conflicts.js +2 -1
  490. data/vendor/v8/test/mjsunit/harmony/block-let-crankshaft.js +1 -1
  491. data/vendor/v8/test/mjsunit/harmony/collections.js +69 -11
  492. data/vendor/v8/test/mjsunit/harmony/debug-blockscopes.js +2 -2
  493. data/vendor/v8/test/mjsunit/harmony/module-linking.js +180 -3
  494. data/vendor/v8/test/mjsunit/harmony/module-parsing.js +31 -0
  495. data/vendor/v8/test/mjsunit/harmony/module-recompile.js +87 -0
  496. data/vendor/v8/test/mjsunit/harmony/module-resolution.js +15 -2
  497. data/vendor/v8/test/mjsunit/harmony/object-observe.js +1056 -0
  498. data/vendor/v8/test/mjsunit/harmony/proxies-json.js +178 -0
  499. data/vendor/v8/test/mjsunit/harmony/proxies.js +25 -10
  500. data/vendor/v8/test/mjsunit/json-parser-recursive.js +33 -0
  501. data/vendor/v8/test/mjsunit/json-stringify-recursive.js +52 -0
  502. data/vendor/v8/test/mjsunit/json.js +38 -2
  503. data/vendor/v8/test/mjsunit/json2.js +153 -0
  504. data/vendor/v8/test/mjsunit/limit-locals.js +5 -4
  505. data/vendor/v8/test/mjsunit/manual-parallel-recompile.js +79 -0
  506. data/vendor/v8/test/mjsunit/math-exp-precision.js +64 -0
  507. data/vendor/v8/test/mjsunit/math-floor-negative.js +59 -0
  508. data/vendor/v8/test/mjsunit/math-floor-of-div-minus-zero.js +41 -0
  509. data/vendor/v8/test/mjsunit/math-floor-of-div-nosudiv.js +288 -0
  510. data/vendor/v8/test/mjsunit/math-floor-of-div.js +81 -9
  511. data/vendor/v8/test/mjsunit/{math-floor.js → math-floor-part1.js} +1 -72
  512. data/vendor/v8/test/mjsunit/math-floor-part2.js +76 -0
  513. data/vendor/v8/test/mjsunit/math-floor-part3.js +78 -0
  514. data/vendor/v8/test/mjsunit/math-floor-part4.js +76 -0
  515. data/vendor/v8/test/mjsunit/mirror-object.js +43 -9
  516. data/vendor/v8/test/mjsunit/mjsunit.js +1 -1
  517. data/vendor/v8/test/mjsunit/mjsunit.status +52 -27
  518. data/vendor/v8/test/mjsunit/mul-exhaustive-part1.js +491 -0
  519. data/vendor/v8/test/mjsunit/mul-exhaustive-part10.js +470 -0
  520. data/vendor/v8/test/mjsunit/mul-exhaustive-part2.js +525 -0
  521. data/vendor/v8/test/mjsunit/mul-exhaustive-part3.js +532 -0
  522. data/vendor/v8/test/mjsunit/mul-exhaustive-part4.js +509 -0
  523. data/vendor/v8/test/mjsunit/mul-exhaustive-part5.js +505 -0
  524. data/vendor/v8/test/mjsunit/mul-exhaustive-part6.js +554 -0
  525. data/vendor/v8/test/mjsunit/mul-exhaustive-part7.js +497 -0
  526. data/vendor/v8/test/mjsunit/mul-exhaustive-part8.js +526 -0
  527. data/vendor/v8/test/mjsunit/mul-exhaustive-part9.js +533 -0
  528. data/vendor/v8/test/mjsunit/new-function.js +34 -0
  529. data/vendor/v8/test/mjsunit/numops-fuzz-part1.js +1172 -0
  530. data/vendor/v8/test/mjsunit/numops-fuzz-part2.js +1178 -0
  531. data/vendor/v8/test/mjsunit/numops-fuzz-part3.js +1178 -0
  532. data/vendor/v8/test/mjsunit/numops-fuzz-part4.js +1177 -0
  533. data/vendor/v8/test/mjsunit/object-define-property.js +107 -2
  534. data/vendor/v8/test/mjsunit/override-read-only-property.js +6 -4
  535. data/vendor/v8/test/mjsunit/packed-elements.js +2 -2
  536. data/vendor/v8/test/mjsunit/parse-int-float.js +4 -4
  537. data/vendor/v8/test/mjsunit/pixel-array-rounding.js +1 -1
  538. data/vendor/v8/test/mjsunit/readonly.js +228 -0
  539. data/vendor/v8/test/mjsunit/regexp-capture-3.js +16 -18
  540. data/vendor/v8/test/mjsunit/regexp-capture.js +2 -0
  541. data/vendor/v8/test/mjsunit/regexp-global.js +122 -0
  542. data/vendor/v8/test/mjsunit/regexp-results-cache.js +78 -0
  543. data/vendor/v8/test/mjsunit/regress/regress-1117.js +12 -3
  544. data/vendor/v8/test/mjsunit/regress/regress-1118.js +1 -1
  545. data/vendor/v8/test/mjsunit/regress/regress-115100.js +36 -0
  546. data/vendor/v8/test/mjsunit/regress/regress-1199637.js +1 -3
  547. data/vendor/v8/test/mjsunit/regress/regress-121407.js +1 -1
  548. data/vendor/v8/test/mjsunit/regress/regress-131923.js +30 -0
  549. data/vendor/v8/test/mjsunit/regress/regress-131994.js +70 -0
  550. data/vendor/v8/test/mjsunit/regress/regress-133211.js +35 -0
  551. data/vendor/v8/test/mjsunit/regress/regress-133211b.js +39 -0
  552. data/vendor/v8/test/mjsunit/regress/regress-136048.js +34 -0
  553. data/vendor/v8/test/mjsunit/regress/regress-137768.js +73 -0
  554. data/vendor/v8/test/mjsunit/regress/regress-143967.js +34 -0
  555. data/vendor/v8/test/mjsunit/regress/regress-145201.js +107 -0
  556. data/vendor/v8/test/mjsunit/regress/regress-147497.js +45 -0
  557. data/vendor/v8/test/mjsunit/regress/regress-148378.js +38 -0
  558. data/vendor/v8/test/mjsunit/regress/regress-1563.js +1 -1
  559. data/vendor/v8/test/mjsunit/regress/regress-1591.js +48 -0
  560. data/vendor/v8/test/mjsunit/regress/regress-164442.js +45 -0
  561. data/vendor/v8/test/mjsunit/regress/regress-165637.js +61 -0
  562. data/vendor/v8/test/mjsunit/regress/regress-166379.js +39 -0
  563. data/vendor/v8/test/mjsunit/regress/regress-166553.js +33 -0
  564. data/vendor/v8/test/mjsunit/regress/regress-1692.js +1 -1
  565. data/vendor/v8/test/mjsunit/regress/regress-171641.js +40 -0
  566. data/vendor/v8/test/mjsunit/regress/regress-1980.js +1 -1
  567. data/vendor/v8/test/mjsunit/regress/regress-2073.js +99 -0
  568. data/vendor/v8/test/mjsunit/regress/regress-2119.js +36 -0
  569. data/vendor/v8/test/mjsunit/regress/regress-2156.js +39 -0
  570. data/vendor/v8/test/mjsunit/regress/regress-2163.js +70 -0
  571. data/vendor/v8/test/mjsunit/regress/regress-2170.js +58 -0
  572. data/vendor/v8/test/mjsunit/regress/regress-2172.js +35 -0
  573. data/vendor/v8/test/mjsunit/regress/regress-2185-2.js +145 -0
  574. data/vendor/v8/test/mjsunit/regress/regress-2185.js +38 -0
  575. data/vendor/v8/test/mjsunit/regress/regress-2186.js +49 -0
  576. data/vendor/v8/test/mjsunit/regress/regress-2193.js +58 -0
  577. data/vendor/v8/test/mjsunit/regress/regress-2219.js +32 -0
  578. data/vendor/v8/test/mjsunit/regress/regress-2225.js +65 -0
  579. data/vendor/v8/test/mjsunit/regress/regress-2226.js +36 -0
  580. data/vendor/v8/test/mjsunit/regress/regress-2234.js +41 -0
  581. data/vendor/v8/test/mjsunit/regress/regress-2243.js +31 -0
  582. data/vendor/v8/test/mjsunit/regress/regress-2249.js +33 -0
  583. data/vendor/v8/test/mjsunit/regress/regress-2250.js +68 -0
  584. data/vendor/v8/test/mjsunit/regress/regress-2261.js +113 -0
  585. data/vendor/v8/test/mjsunit/regress/regress-2263.js +30 -0
  586. data/vendor/v8/test/mjsunit/regress/regress-2284.js +32 -0
  587. data/vendor/v8/test/mjsunit/regress/regress-2285.js +32 -0
  588. data/vendor/v8/test/mjsunit/regress/regress-2286.js +32 -0
  589. data/vendor/v8/test/mjsunit/regress/regress-2289.js +34 -0
  590. data/vendor/v8/test/mjsunit/regress/regress-2291.js +36 -0
  591. data/vendor/v8/test/mjsunit/regress/regress-2294.js +70 -0
  592. data/vendor/v8/test/mjsunit/regress/regress-2296.js +40 -0
  593. data/vendor/v8/test/mjsunit/regress/regress-2315.js +40 -0
  594. data/vendor/v8/test/mjsunit/regress/regress-2318.js +66 -0
  595. data/vendor/v8/test/mjsunit/regress/regress-2322.js +36 -0
  596. data/vendor/v8/test/mjsunit/regress/regress-2326.js +54 -0
  597. data/vendor/v8/test/mjsunit/regress/regress-2336.js +53 -0
  598. data/vendor/v8/test/mjsunit/regress/regress-2339.js +59 -0
  599. data/vendor/v8/test/mjsunit/regress/regress-2346.js +123 -0
  600. data/vendor/v8/test/mjsunit/regress/regress-2373.js +29 -0
  601. data/vendor/v8/test/mjsunit/regress/regress-2374.js +33 -0
  602. data/vendor/v8/test/mjsunit/regress/regress-2398.js +41 -0
  603. data/vendor/v8/test/mjsunit/regress/regress-2410.js +36 -0
  604. data/vendor/v8/test/mjsunit/regress/regress-2416.js +75 -0
  605. data/vendor/v8/test/mjsunit/regress/regress-2419.js +37 -0
  606. data/vendor/v8/test/mjsunit/regress/regress-2433.js +36 -0
  607. data/vendor/v8/test/mjsunit/regress/regress-2437.js +156 -0
  608. data/vendor/v8/test/mjsunit/regress/regress-2438.js +52 -0
  609. data/vendor/v8/test/mjsunit/regress/regress-2443.js +129 -0
  610. data/vendor/v8/test/mjsunit/regress/regress-2444.js +120 -0
  611. data/vendor/v8/test/mjsunit/regress/regress-2489.js +50 -0
  612. data/vendor/v8/test/mjsunit/regress/regress-2499.js +40 -0
  613. data/vendor/v8/test/mjsunit/regress/regress-334.js +1 -1
  614. data/vendor/v8/test/mjsunit/regress/regress-492.js +39 -1
  615. data/vendor/v8/test/mjsunit/regress/regress-builtin-array-op.js +38 -0
  616. data/vendor/v8/test/mjsunit/regress/regress-cnlt-elements.js +43 -0
  617. data/vendor/v8/test/mjsunit/regress/regress-cnlt-enum-indices.js +45 -0
  618. data/vendor/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js +46 -0
  619. data/vendor/v8/test/mjsunit/regress/regress-convert-enum.js +60 -0
  620. data/vendor/v8/test/mjsunit/regress/regress-convert-enum2.js +46 -0
  621. data/vendor/v8/test/mjsunit/regress/regress-convert-transition.js +40 -0
  622. data/vendor/v8/test/mjsunit/regress/regress-crbug-119926.js +3 -1
  623. data/vendor/v8/test/mjsunit/regress/regress-crbug-125148.js +90 -0
  624. data/vendor/v8/test/mjsunit/regress/regress-crbug-134055.js +63 -0
  625. data/vendor/v8/test/mjsunit/regress/regress-crbug-134609.js +59 -0
  626. data/vendor/v8/test/mjsunit/regress/regress-crbug-135008.js +45 -0
  627. data/vendor/v8/test/mjsunit/regress/regress-crbug-135066.js +55 -0
  628. data/vendor/v8/test/mjsunit/regress/regress-crbug-137689.js +47 -0
  629. data/vendor/v8/test/mjsunit/regress/regress-crbug-138887.js +48 -0
  630. data/vendor/v8/test/mjsunit/regress/regress-crbug-140083.js +44 -0
  631. data/vendor/v8/test/mjsunit/regress/regress-crbug-142087.js +38 -0
  632. data/vendor/v8/test/mjsunit/regress/regress-crbug-142218.js +44 -0
  633. data/vendor/v8/test/mjsunit/regress/regress-crbug-145961.js +39 -0
  634. data/vendor/v8/test/mjsunit/regress/regress-crbug-146910.js +33 -0
  635. data/vendor/v8/test/mjsunit/regress/regress-crbug-147475.js +48 -0
  636. data/vendor/v8/test/mjsunit/regress/regress-crbug-148376.js +35 -0
  637. data/vendor/v8/test/mjsunit/regress/regress-crbug-150545.js +53 -0
  638. data/vendor/v8/test/mjsunit/regress/regress-crbug-150729.js +39 -0
  639. data/vendor/v8/test/mjsunit/regress/regress-crbug-157019.js +54 -0
  640. data/vendor/v8/test/mjsunit/regress/regress-crbug-157520.js +38 -0
  641. data/vendor/v8/test/mjsunit/regress/regress-crbug-158185.js +39 -0
  642. data/vendor/v8/test/mjsunit/regress/regress-crbug-160010.js +35 -0
  643. data/vendor/v8/test/mjsunit/regress/regress-crbug-162085.js +71 -0
  644. data/vendor/v8/test/mjsunit/regress/regress-crbug-168545.js +34 -0
  645. data/vendor/v8/test/mjsunit/regress/regress-crbug-170856.js +33 -0
  646. data/vendor/v8/test/mjsunit/regress/regress-crbug-172345.js +34 -0
  647. data/vendor/v8/test/mjsunit/regress/regress-crbug-173974.js +36 -0
  648. data/vendor/v8/test/mjsunit/regress/regress-crbug-18639.js +9 -5
  649. data/vendor/v8/test/mjsunit/regress/regress-debug-code-recompilation.js +2 -1
  650. data/vendor/v8/test/mjsunit/regress/regress-deep-proto.js +45 -0
  651. data/vendor/v8/test/mjsunit/regress/regress-delete-empty-double.js +40 -0
  652. data/vendor/v8/test/mjsunit/regress/regress-iteration-order.js +42 -0
  653. data/vendor/v8/test/mjsunit/regress/regress-json-stringify-gc.js +41 -0
  654. data/vendor/v8/test/mjsunit/regress/regress-latin-1.js +78 -0
  655. data/vendor/v8/test/mjsunit/regress/regress-load-elements.js +49 -0
  656. data/vendor/v8/test/mjsunit/regress/regress-observe-empty-double-array.js +38 -0
  657. data/vendor/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js +37 -0
  658. data/vendor/v8/test/mjsunit/shift-for-integer-div.js +59 -0
  659. data/vendor/v8/test/mjsunit/stack-traces-gc.js +119 -0
  660. data/vendor/v8/test/mjsunit/stack-traces-overflow.js +122 -0
  661. data/vendor/v8/test/mjsunit/stack-traces.js +39 -1
  662. data/vendor/v8/test/mjsunit/str-to-num.js +7 -2
  663. data/vendor/v8/test/mjsunit/strict-mode.js +36 -11
  664. data/vendor/v8/test/mjsunit/string-charcodeat.js +3 -0
  665. data/vendor/v8/test/mjsunit/string-natives.js +72 -0
  666. data/vendor/v8/test/mjsunit/string-split.js +17 -0
  667. data/vendor/v8/test/mjsunit/testcfg.py +76 -6
  668. data/vendor/v8/test/mjsunit/tools/tickprocessor.js +4 -1
  669. data/vendor/v8/test/mjsunit/try-finally-continue.js +72 -0
  670. data/vendor/v8/test/mjsunit/typed-array-slice.js +61 -0
  671. data/vendor/v8/test/mjsunit/unbox-double-arrays.js +2 -0
  672. data/vendor/v8/test/mjsunit/uri.js +12 -0
  673. data/vendor/v8/test/mjsunit/with-readonly.js +4 -2
  674. data/vendor/v8/test/mozilla/mozilla.status +19 -113
  675. data/vendor/v8/test/mozilla/testcfg.py +122 -3
  676. data/vendor/v8/test/preparser/preparser.status +5 -0
  677. data/vendor/v8/test/preparser/strict-identifiers.pyt +1 -1
  678. data/vendor/v8/test/preparser/testcfg.py +101 -5
  679. data/vendor/v8/test/sputnik/sputnik.status +1 -1
  680. data/vendor/v8/test/sputnik/testcfg.py +5 -0
  681. data/vendor/v8/test/test262/README +2 -2
  682. data/vendor/v8/test/test262/test262.status +13 -36
  683. data/vendor/v8/test/test262/testcfg.py +102 -8
  684. data/vendor/v8/tools/android-build.sh +0 -0
  685. data/vendor/v8/tools/android-ll-prof.sh +69 -0
  686. data/vendor/v8/tools/android-run.py +109 -0
  687. data/vendor/v8/tools/android-sync.sh +105 -0
  688. data/vendor/v8/tools/bash-completion.sh +0 -0
  689. data/vendor/v8/tools/check-static-initializers.sh +0 -0
  690. data/vendor/v8/tools/common-includes.sh +15 -22
  691. data/vendor/v8/tools/disasm.py +4 -4
  692. data/vendor/v8/tools/fuzz-harness.sh +0 -0
  693. data/vendor/v8/tools/gen-postmortem-metadata.py +6 -8
  694. data/vendor/v8/tools/grokdump.py +404 -129
  695. data/vendor/v8/tools/gyp/v8.gyp +105 -43
  696. data/vendor/v8/tools/linux-tick-processor +5 -5
  697. data/vendor/v8/tools/ll_prof.py +75 -15
  698. data/vendor/v8/tools/merge-to-branch.sh +2 -2
  699. data/vendor/v8/tools/plot-timer-events +70 -0
  700. data/vendor/v8/tools/plot-timer-events.js +510 -0
  701. data/vendor/v8/tools/presubmit.py +1 -0
  702. data/vendor/v8/tools/push-to-trunk.sh +14 -4
  703. data/vendor/v8/tools/run-llprof.sh +69 -0
  704. data/vendor/v8/tools/run-tests.py +372 -0
  705. data/vendor/v8/tools/run-valgrind.py +1 -1
  706. data/vendor/v8/tools/status-file-converter.py +39 -0
  707. data/vendor/v8/tools/test-server.py +224 -0
  708. data/vendor/v8/tools/test-wrapper-gypbuild.py +13 -16
  709. data/vendor/v8/tools/test.py +10 -19
  710. data/vendor/v8/tools/testrunner/README +174 -0
  711. data/vendor/v8/tools/testrunner/__init__.py +26 -0
  712. data/vendor/v8/tools/testrunner/local/__init__.py +26 -0
  713. data/vendor/v8/tools/testrunner/local/commands.py +153 -0
  714. data/vendor/v8/tools/testrunner/local/execution.py +182 -0
  715. data/vendor/v8/tools/testrunner/local/old_statusfile.py +460 -0
  716. data/vendor/v8/tools/testrunner/local/progress.py +238 -0
  717. data/vendor/v8/tools/testrunner/local/statusfile.py +145 -0
  718. data/vendor/v8/tools/testrunner/local/testsuite.py +187 -0
  719. data/vendor/v8/tools/testrunner/local/utils.py +108 -0
  720. data/vendor/v8/tools/testrunner/local/verbose.py +99 -0
  721. data/vendor/v8/tools/testrunner/network/__init__.py +26 -0
  722. data/vendor/v8/tools/testrunner/network/distro.py +90 -0
  723. data/vendor/v8/tools/testrunner/network/endpoint.py +124 -0
  724. data/vendor/v8/tools/testrunner/network/network_execution.py +253 -0
  725. data/vendor/v8/tools/testrunner/network/perfdata.py +120 -0
  726. data/vendor/v8/tools/testrunner/objects/__init__.py +26 -0
  727. data/vendor/v8/tools/testrunner/objects/context.py +50 -0
  728. data/vendor/v8/tools/testrunner/objects/output.py +60 -0
  729. data/vendor/v8/tools/testrunner/objects/peer.py +80 -0
  730. data/vendor/v8/tools/testrunner/objects/testcase.py +83 -0
  731. data/vendor/v8/tools/testrunner/objects/workpacket.py +90 -0
  732. data/vendor/v8/tools/testrunner/server/__init__.py +26 -0
  733. data/vendor/v8/tools/testrunner/server/compression.py +111 -0
  734. data/vendor/v8/tools/testrunner/server/constants.py +51 -0
  735. data/vendor/v8/tools/testrunner/server/daemon.py +147 -0
  736. data/vendor/v8/tools/testrunner/server/local_handler.py +119 -0
  737. data/vendor/v8/tools/testrunner/server/main.py +245 -0
  738. data/vendor/v8/tools/testrunner/server/presence_handler.py +120 -0
  739. data/vendor/v8/tools/testrunner/server/signatures.py +63 -0
  740. data/vendor/v8/tools/testrunner/server/status_handler.py +112 -0
  741. data/vendor/v8/tools/testrunner/server/work_handler.py +150 -0
  742. data/vendor/v8/tools/tick-processor.html +168 -0
  743. data/vendor/v8/tools/tickprocessor-driver.js +5 -3
  744. data/vendor/v8/tools/tickprocessor.js +58 -15
  745. metadata +534 -30
  746. data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +0 -11
  747. data/patches/do-not-imply-vfp3-and-armv7.patch +0 -44
  748. data/patches/fPIC-on-x64.patch +0 -14
  749. data/vendor/v8/src/liveobjectlist-inl.h +0 -126
  750. data/vendor/v8/src/liveobjectlist.cc +0 -2631
  751. data/vendor/v8/src/liveobjectlist.h +0 -319
  752. data/vendor/v8/test/mjsunit/mul-exhaustive.js +0 -4629
  753. data/vendor/v8/test/mjsunit/numops-fuzz.js +0 -4609
  754. data/vendor/v8/test/mjsunit/regress/regress-1969.js +0 -5045
@@ -75,12 +75,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
75
75
  // Load the built-in InternalArray function from the current context.
76
76
  static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
77
77
  Register result) {
78
- // Load the global context.
78
+ // Load the native context.
79
79
 
80
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
81
80
  __ ldr(result,
82
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
83
- // Load the InternalArray function from the global context.
81
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
82
+ __ ldr(result,
83
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
84
+ // Load the InternalArray function from the native context.
84
85
  __ ldr(result,
85
86
  MemOperand(result,
86
87
  Context::SlotOffset(
@@ -90,12 +91,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
90
91
 
91
92
  // Load the built-in Array function from the current context.
92
93
  static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
93
- // Load the global context.
94
+ // Load the native context.
94
95
 
95
- __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
96
96
  __ ldr(result,
97
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
98
- // Load the Array function from the global context.
97
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
98
+ __ ldr(result,
99
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
100
+ // Load the Array function from the native context.
99
101
  __ ldr(result,
100
102
  MemOperand(result,
101
103
  Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@@ -138,7 +140,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
138
140
  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
139
141
  __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
140
142
  // Field JSArray::kElementsOffset is initialized later.
141
- __ mov(scratch3, Operand(0, RelocInfo::NONE));
143
+ __ mov(scratch3, Operand::Zero());
142
144
  __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
143
145
 
144
146
  if (initial_capacity == 0) {
@@ -317,7 +319,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
317
319
  has_non_smi_element, finish, cant_transition_map, not_double;
318
320
 
319
321
  // Check for array construction with zero arguments or one.
320
- __ cmp(r0, Operand(0, RelocInfo::NONE));
322
+ __ cmp(r0, Operand::Zero());
321
323
  __ b(ne, &argc_one_or_more);
322
324
 
323
325
  // Handle construction of an empty array.
@@ -345,7 +347,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
345
347
  __ tst(r2, r2);
346
348
  __ b(ne, &not_empty_array);
347
349
  __ Drop(1); // Adjust stack.
348
- __ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
350
+ __ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
349
351
  __ b(&empty_array);
350
352
 
351
353
  __ bind(&not_empty_array);
@@ -588,7 +590,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
588
590
 
589
591
  // Load the first arguments in r0 and get rid of the rest.
590
592
  Label no_arguments;
591
- __ cmp(r0, Operand(0, RelocInfo::NONE));
593
+ __ cmp(r0, Operand::Zero());
592
594
  __ b(eq, &no_arguments);
593
595
  // First args = sp[(argc - 1) * 4].
594
596
  __ sub(r0, r0, Operand(1));
@@ -632,7 +634,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
632
634
  __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
633
635
  __ Assert(eq, "Unexpected string wrapper instance size");
634
636
  __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
635
- __ cmp(r4, Operand(0, RelocInfo::NONE));
637
+ __ cmp(r4, Operand::Zero());
636
638
  __ Assert(eq, "Unexpected unused properties of string wrapper");
637
639
  }
638
640
  __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -697,6 +699,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
697
699
  }
698
700
 
699
701
 
702
+ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
703
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
704
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
705
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
706
+ __ mov(pc, r2);
707
+ }
708
+
709
+
710
+ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
711
+ GenerateTailCallToSharedCode(masm);
712
+ }
713
+
714
+
715
+ void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
716
+ {
717
+ FrameScope scope(masm, StackFrame::INTERNAL);
718
+
719
+ // Push a copy of the function onto the stack.
720
+ __ push(r1);
721
+ // Push call kind information.
722
+ __ push(r5);
723
+
724
+ __ push(r1); // Function is also the parameter to the runtime call.
725
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
726
+
727
+ // Restore call kind information.
728
+ __ pop(r5);
729
+ // Restore receiver.
730
+ __ pop(r1);
731
+
732
+ // Tear down internal frame.
733
+ }
734
+
735
+ GenerateTailCallToSharedCode(masm);
736
+ }
737
+
738
+
700
739
  static void Generate_JSConstructStubHelper(MacroAssembler* masm,
701
740
  bool is_api_function,
702
741
  bool count_constructions) {
@@ -1058,7 +1097,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
1058
1097
  // r5-r7, cp may be clobbered
1059
1098
 
1060
1099
  // Clear the context before we push it when entering the internal frame.
1061
- __ mov(cp, Operand(0, RelocInfo::NONE));
1100
+ __ mov(cp, Operand::Zero());
1062
1101
 
1063
1102
  // Enter an internal frame.
1064
1103
  {
@@ -1187,6 +1226,57 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
1187
1226
  }
1188
1227
 
1189
1228
 
1229
+ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
1230
+ // For now, we are relying on the fact that make_code_young doesn't do any
1231
+ // garbage collection which allows us to save/restore the registers without
1232
+ // worrying about which of them contain pointers. We also don't build an
1233
+ // internal frame to make the code faster, since we shouldn't have to do stack
1234
+ // crawls in MakeCodeYoung. This seems a bit fragile.
1235
+
1236
+ // The following registers must be saved and restored when calling through to
1237
+ // the runtime:
1238
+ // r0 - contains return address (beginning of patch sequence)
1239
+ // r1 - function object
1240
+ FrameScope scope(masm, StackFrame::MANUAL);
1241
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
1242
+ __ PrepareCallCFunction(1, 0, r1);
1243
+ __ CallCFunction(
1244
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
1245
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
1246
+ __ mov(pc, r0);
1247
+ }
1248
+
1249
+ #define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
1250
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
1251
+ MacroAssembler* masm) { \
1252
+ GenerateMakeCodeYoungAgainCommon(masm); \
1253
+ } \
1254
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
1255
+ MacroAssembler* masm) { \
1256
+ GenerateMakeCodeYoungAgainCommon(masm); \
1257
+ }
1258
+ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
1259
+ #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
1260
+
1261
+
1262
+ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
1263
+ {
1264
+ FrameScope scope(masm, StackFrame::INTERNAL);
1265
+
1266
+ // Preserve registers across notification, this is important for compiled
1267
+ // stubs that tail call the runtime on deopts passing their parameters in
1268
+ // registers.
1269
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
1270
+ // Pass the function and deoptimization type to the runtime system.
1271
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
1272
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
1273
+ }
1274
+
1275
+ __ add(sp, sp, Operand(kPointerSize)); // Ignore state
1276
+ __ mov(pc, lr); // Jump to miss handler
1277
+ }
1278
+
1279
+
1190
1280
  static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
1191
1281
  Deoptimizer::BailoutType type) {
1192
1282
  {
@@ -1246,7 +1336,7 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
1246
1336
 
1247
1337
  void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1248
1338
  CpuFeatures::TryForceFeatureScope scope(VFP3);
1249
- if (!CpuFeatures::IsSupported(VFP3)) {
1339
+ if (!CPU::SupportsCrankshaft()) {
1250
1340
  __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
1251
1341
  return;
1252
1342
  }
@@ -1283,7 +1373,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1283
1373
  // 1. Make sure we have at least one argument.
1284
1374
  // r0: actual number of arguments
1285
1375
  { Label done;
1286
- __ cmp(r0, Operand(0));
1376
+ __ cmp(r0, Operand::Zero());
1287
1377
  __ b(ne, &done);
1288
1378
  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1289
1379
  __ push(r2);
@@ -1304,7 +1394,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1304
1394
  // r0: actual number of arguments
1305
1395
  // r1: function
1306
1396
  Label shift_arguments;
1307
- __ mov(r4, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION
1397
+ __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
1308
1398
  { Label convert_to_object, use_global_receiver, patch_receiver;
1309
1399
  // Change context eagerly in case we need the global receiver.
1310
1400
  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1359,16 +1449,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1359
1449
 
1360
1450
  // Restore the function to r1, and the flag to r4.
1361
1451
  __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
1362
- __ mov(r4, Operand(0, RelocInfo::NONE));
1452
+ __ mov(r4, Operand::Zero());
1363
1453
  __ jmp(&patch_receiver);
1364
1454
 
1365
1455
  // Use the global receiver object from the called function as the
1366
1456
  // receiver.
1367
1457
  __ bind(&use_global_receiver);
1368
1458
  const int kGlobalIndex =
1369
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1459
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1370
1460
  __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
1371
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
1461
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
1372
1462
  __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
1373
1463
  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
1374
1464
 
@@ -1381,11 +1471,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1381
1471
 
1382
1472
  // 3b. Check for function proxy.
1383
1473
  __ bind(&slow);
1384
- __ mov(r4, Operand(1, RelocInfo::NONE)); // indicate function proxy
1474
+ __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
1385
1475
  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
1386
1476
  __ b(eq, &shift_arguments);
1387
1477
  __ bind(&non_function);
1388
- __ mov(r4, Operand(2, RelocInfo::NONE)); // indicate non-function
1478
+ __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
1389
1479
 
1390
1480
  // 3c. Patch the first argument when calling a non-function. The
1391
1481
  // CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1429,7 +1519,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1429
1519
  __ tst(r4, r4);
1430
1520
  __ b(eq, &function);
1431
1521
  // Expected number of arguments is 0 for CALL_NON_FUNCTION.
1432
- __ mov(r2, Operand(0, RelocInfo::NONE));
1522
+ __ mov(r2, Operand::Zero());
1433
1523
  __ SetCallKind(r5, CALL_AS_METHOD);
1434
1524
  __ cmp(r4, Operand(1));
1435
1525
  __ b(ne, &non_proxy);
@@ -1507,7 +1597,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1507
1597
  // Push current limit and index.
1508
1598
  __ bind(&okay);
1509
1599
  __ push(r0); // limit
1510
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
1600
+ __ mov(r1, Operand::Zero()); // initial index
1511
1601
  __ push(r1);
1512
1602
 
1513
1603
  // Get the receiver.
@@ -1561,9 +1651,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1561
1651
  // Use the current global receiver object as the receiver.
1562
1652
  __ bind(&use_global_receiver);
1563
1653
  const int kGlobalOffset =
1564
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1654
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1565
1655
  __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
1566
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
1656
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
1567
1657
  __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
1568
1658
  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
1569
1659
 
@@ -1619,7 +1709,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1619
1709
  __ bind(&call_proxy);
1620
1710
  __ push(r1); // add function proxy as last argument
1621
1711
  __ add(r0, r0, Operand(1));
1622
- __ mov(r2, Operand(0, RelocInfo::NONE));
1712
+ __ mov(r2, Operand::Zero());
1623
1713
  __ SetCallKind(r5, CALL_AS_METHOD);
1624
1714
  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
1625
1715
  __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -32,17 +32,40 @@
32
32
  #include "bootstrapper.h"
33
33
  #include "code-stubs.h"
34
34
  #include "regexp-macro-assembler.h"
35
+ #include "stub-cache.h"
35
36
 
36
37
  namespace v8 {
37
38
  namespace internal {
38
39
 
39
40
 
41
+ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
42
+ Isolate* isolate,
43
+ CodeStubInterfaceDescriptor* descriptor) {
44
+ static Register registers[] = { r1, r0 };
45
+ descriptor->register_param_count_ = 2;
46
+ descriptor->register_params_ = registers;
47
+ descriptor->deoptimization_handler_ =
48
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
49
+ }
50
+
51
+
52
+ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
53
+ Isolate* isolate,
54
+ CodeStubInterfaceDescriptor* descriptor) {
55
+ static Register registers[] = { r0, r1 };
56
+ descriptor->register_param_count_ = 2;
57
+ descriptor->register_params_ = registers;
58
+ Address entry =
59
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
60
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
61
+ }
62
+
63
+
40
64
  #define __ ACCESS_MASM(masm)
41
65
 
42
66
  static void EmitIdenticalObjectComparison(MacroAssembler* masm,
43
67
  Label* slow,
44
- Condition cond,
45
- bool never_nan_nan);
68
+ Condition cond);
46
69
  static void EmitSmiNonsmiComparison(MacroAssembler* masm,
47
70
  Register lhs,
48
71
  Register rhs,
@@ -85,6 +108,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
85
108
  void FastNewClosureStub::Generate(MacroAssembler* masm) {
86
109
  // Create a new closure from the given function info in new
87
110
  // space. Set the context to the current context in cp.
111
+ Counters* counters = masm->isolate()->counters();
112
+
88
113
  Label gc;
89
114
 
90
115
  // Pop the function info from the stack.
@@ -98,32 +123,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
98
123
  &gc,
99
124
  TAG_OBJECT);
100
125
 
126
+ __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
127
+
101
128
  int map_index = (language_mode_ == CLASSIC_MODE)
102
129
  ? Context::FUNCTION_MAP_INDEX
103
130
  : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
104
131
 
105
- // Compute the function map in the current global context and set that
132
+ // Compute the function map in the current native context and set that
106
133
  // as the map of the allocated object.
107
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
108
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
109
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
110
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
134
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
135
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
136
+ __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
137
+ __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
111
138
 
112
139
  // Initialize the rest of the function. We don't have to update the
113
140
  // write barrier because the allocated object is in new space.
114
141
  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
115
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
116
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
142
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
117
143
  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
118
144
  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
119
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
145
+ __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
120
146
  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
121
147
  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
122
148
  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
123
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
124
149
 
125
150
  // Initialize the code pointer in the function to be the one
126
151
  // found in the shared function info object.
152
+ // But first check if there is an optimized version for our context.
153
+ Label check_optimized;
154
+ Label install_unoptimized;
155
+ if (FLAG_cache_optimized_code) {
156
+ __ ldr(r1,
157
+ FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
158
+ __ tst(r1, r1);
159
+ __ b(ne, &check_optimized);
160
+ }
161
+ __ bind(&install_unoptimized);
162
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
163
+ __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
127
164
  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
128
165
  __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
129
166
  __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
@@ -131,6 +168,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
131
168
  // Return result. The argument function info has been popped already.
132
169
  __ Ret();
133
170
 
171
+ __ bind(&check_optimized);
172
+
173
+ __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
174
+
175
+ // r2 holds native context, r1 points to fixed array of 3-element entries
176
+ // (native context, optimized code, literals).
177
+ // The optimized code map must never be empty, so check the first elements.
178
+ Label install_optimized;
179
+ // Speculatively move code object into r4.
180
+ __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
181
+ __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
182
+ __ cmp(r2, r5);
183
+ __ b(eq, &install_optimized);
184
+
185
+ // Iterate through the rest of map backwards. r4 holds an index as a Smi.
186
+ Label loop;
187
+ __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
188
+ __ bind(&loop);
189
+ // Do not double check first entry.
190
+
191
+ __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
192
+ __ b(eq, &install_unoptimized);
193
+ __ sub(r4, r4, Operand(
194
+ Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
195
+ __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
196
+ __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
197
+ __ ldr(r5, MemOperand(r5));
198
+ __ cmp(r2, r5);
199
+ __ b(ne, &loop);
200
+ // Hit: fetch the optimized code.
201
+ __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
202
+ __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
203
+ __ add(r5, r5, Operand(kPointerSize));
204
+ __ ldr(r4, MemOperand(r5));
205
+
206
+ __ bind(&install_optimized);
207
+ __ IncrementCounter(counters->fast_new_closure_install_optimized(),
208
+ 1, r6, r7);
209
+
210
+ // TODO(fschneider): Idea: store proper code pointers in the map and either
211
+ // unmangle them on marking or do nothing as the whole map is discarded on
212
+ // major GC anyway.
213
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
214
+ __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
215
+
216
+ // Now link a function into a list of optimized functions.
217
+ __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
218
+
219
+ __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
220
+ // No need for write barrier as JSFunction (eax) is in the new space.
221
+
222
+ __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
223
+ // Store JSFunction (eax) into edx before issuing write barrier as
224
+ // it clobbers all the registers passed.
225
+ __ mov(r4, r0);
226
+ __ RecordWriteContextSlot(
227
+ r2,
228
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
229
+ r4,
230
+ r1,
231
+ kLRHasNotBeenSaved,
232
+ kDontSaveFPRegs);
233
+
234
+ // Return result. The argument function info has been popped already.
235
+ __ Ret();
236
+
134
237
  // Create a new closure through the slower runtime call.
135
238
  __ bind(&gc);
136
239
  __ LoadRoot(r4, Heap::kFalseValueRootIndex);
@@ -162,12 +265,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
162
265
  __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
163
266
 
164
267
  // Set up the fixed slots, copy the global object from the previous context.
165
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
268
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
166
269
  __ mov(r1, Operand(Smi::FromInt(0)));
167
270
  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
168
271
  __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
169
272
  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
170
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
273
+ __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
171
274
 
172
275
  // Initialize the rest of the slots to undefined.
173
276
  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -210,9 +313,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
210
313
  __ mov(r2, Operand(Smi::FromInt(length)));
211
314
  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
212
315
 
213
- // If this block context is nested in the global context we get a smi
316
+ // If this block context is nested in the native context we get a smi
214
317
  // sentinel instead of a function. The block context should get the
215
- // canonical empty function of the global context as its closure which
318
+ // canonical empty function of the native context as its closure which
216
319
  // we still have to look up.
217
320
  Label after_sentinel;
218
321
  __ JumpIfNotSmi(r3, &after_sentinel);
@@ -222,16 +325,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
222
325
  __ Assert(eq, message);
223
326
  }
224
327
  __ ldr(r3, GlobalObjectOperand());
225
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
328
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
226
329
  __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
227
330
  __ bind(&after_sentinel);
228
331
 
229
332
  // Set up the fixed slots, copy the global object from the previous context.
230
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
333
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
231
334
  __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
232
335
  __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
233
336
  __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
234
- __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
337
+ __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
235
338
 
236
339
  // Initialize the rest of the slots to the hole value.
237
340
  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -254,6 +357,7 @@ static void GenerateFastCloneShallowArrayCommon(
254
357
  MacroAssembler* masm,
255
358
  int length,
256
359
  FastCloneShallowArrayStub::Mode mode,
360
+ AllocationSiteMode allocation_site_mode,
257
361
  Label* fail) {
258
362
  // Registers on entry:
259
363
  //
@@ -267,16 +371,28 @@ static void GenerateFastCloneShallowArrayCommon(
267
371
  ? FixedDoubleArray::SizeFor(length)
268
372
  : FixedArray::SizeFor(length);
269
373
  }
270
- int size = JSArray::kSize + elements_size;
374
+
375
+ int size = JSArray::kSize;
376
+ int allocation_info_start = size;
377
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
378
+ size += AllocationSiteInfo::kSize;
379
+ }
380
+ size += elements_size;
271
381
 
272
382
  // Allocate both the JS array and the elements array in one big
273
383
  // allocation. This avoids multiple limit checks.
274
- __ AllocateInNewSpace(size,
275
- r0,
276
- r1,
277
- r2,
278
- fail,
279
- TAG_OBJECT);
384
+ AllocationFlags flags = TAG_OBJECT;
385
+ if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
386
+ flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
387
+ }
388
+ __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
389
+
390
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
391
+ __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()->
392
+ allocation_site_info_map())));
393
+ __ str(r2, FieldMemOperand(r0, allocation_info_start));
394
+ __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize));
395
+ }
280
396
 
281
397
  // Copy the JS array part.
282
398
  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -290,7 +406,11 @@ static void GenerateFastCloneShallowArrayCommon(
290
406
  // Get hold of the elements array of the boilerplate and setup the
291
407
  // elements pointer in the resulting object.
292
408
  __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
293
- __ add(r2, r0, Operand(JSArray::kSize));
409
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
410
+ __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
411
+ } else {
412
+ __ add(r2, r0, Operand(JSArray::kSize));
413
+ }
294
414
  __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
295
415
 
296
416
  // Copy the elements array.
@@ -323,8 +443,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
323
443
  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
324
444
  __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
325
445
  __ b(ne, &check_fast_elements);
326
- GenerateFastCloneShallowArrayCommon(masm, 0,
327
- COPY_ON_WRITE_ELEMENTS, &slow_case);
446
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
447
+ allocation_site_mode_,
448
+ &slow_case);
328
449
  // Return and remove the on-stack parameters.
329
450
  __ add(sp, sp, Operand(3 * kPointerSize));
330
451
  __ Ret();
@@ -332,8 +453,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
332
453
  __ bind(&check_fast_elements);
333
454
  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
334
455
  __ b(ne, &double_elements);
335
- GenerateFastCloneShallowArrayCommon(masm, length_,
336
- CLONE_ELEMENTS, &slow_case);
456
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
457
+ allocation_site_mode_,
458
+ &slow_case);
337
459
  // Return and remove the on-stack parameters.
338
460
  __ add(sp, sp, Operand(3 * kPointerSize));
339
461
  __ Ret();
@@ -365,7 +487,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
365
487
  __ pop(r3);
366
488
  }
367
489
 
368
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
490
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
491
+ allocation_site_mode_,
492
+ &slow_case);
369
493
 
370
494
  // Return and remove the on-stack parameters.
371
495
  __ add(sp, sp, Operand(3 * kPointerSize));
@@ -424,7 +548,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
424
548
  // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
425
549
  // scratch register. Destroys the source register. No GC occurs during this
426
550
  // stub so you don't have to set up the frame.
427
- class ConvertToDoubleStub : public CodeStub {
551
+ class ConvertToDoubleStub : public PlatformCodeStub {
428
552
  public:
429
553
  ConvertToDoubleStub(Register result_reg_1,
430
554
  Register result_reg_2,
@@ -471,7 +595,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
471
595
  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
472
596
  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
473
597
  // Subtract from 0 if source was negative.
474
- __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
598
+ __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
475
599
 
476
600
  // We have -1, 0 or 1, which we treat specially. Register source_ contains
477
601
  // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -484,7 +608,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
484
608
  HeapNumber::kExponentBias << HeapNumber::kExponentShift;
485
609
  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
486
610
  // 1, 0 and -1 all have 0 for the second word.
487
- __ mov(mantissa, Operand(0, RelocInfo::NONE));
611
+ __ mov(mantissa, Operand::Zero());
488
612
  __ Ret();
489
613
 
490
614
  __ bind(&not_special);
@@ -519,8 +643,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
519
643
  FloatingPointHelper::Destination destination,
520
644
  Register scratch1,
521
645
  Register scratch2) {
522
- if (CpuFeatures::IsSupported(VFP3)) {
523
- CpuFeatures::Scope scope(VFP3);
646
+ if (CpuFeatures::IsSupported(VFP2)) {
647
+ CpuFeatures::Scope scope(VFP2);
524
648
  __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
525
649
  __ vmov(d7.high(), scratch1);
526
650
  __ vcvt_f64_s32(d7, d7.high());
@@ -547,24 +671,6 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
547
671
  }
548
672
 
549
673
 
550
- void FloatingPointHelper::LoadOperands(
551
- MacroAssembler* masm,
552
- FloatingPointHelper::Destination destination,
553
- Register heap_number_map,
554
- Register scratch1,
555
- Register scratch2,
556
- Label* slow) {
557
-
558
- // Load right operand (r0) to d6 or r2/r3.
559
- LoadNumber(masm, destination,
560
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
561
-
562
- // Load left operand (r1) to d7 or r0/r1.
563
- LoadNumber(masm, destination,
564
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
565
- }
566
-
567
-
568
674
  void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
569
675
  Destination destination,
570
676
  Register object,
@@ -575,11 +681,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
575
681
  Register scratch1,
576
682
  Register scratch2,
577
683
  Label* not_number) {
578
- if (FLAG_debug_code) {
579
- __ AbortIfNotRootValue(heap_number_map,
580
- Heap::kHeapNumberMapRootIndex,
581
- "HeapNumberMap register clobbered.");
582
- }
684
+ __ AssertRootValue(heap_number_map,
685
+ Heap::kHeapNumberMapRootIndex,
686
+ "HeapNumberMap register clobbered.");
583
687
 
584
688
  Label is_smi, done;
585
689
 
@@ -589,9 +693,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
589
693
  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
590
694
 
591
695
  // Handle loading a double from a heap number.
592
- if (CpuFeatures::IsSupported(VFP3) &&
696
+ if (CpuFeatures::IsSupported(VFP2) &&
593
697
  destination == kVFPRegisters) {
594
- CpuFeatures::Scope scope(VFP3);
698
+ CpuFeatures::Scope scope(VFP2);
595
699
  // Load the double from tagged HeapNumber to double register.
596
700
  __ sub(scratch1, object, Operand(kHeapObjectTag));
597
701
  __ vldr(dst, scratch1, HeapNumber::kValueOffset);
@@ -604,8 +708,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
604
708
 
605
709
  // Handle loading a double from a smi.
606
710
  __ bind(&is_smi);
607
- if (CpuFeatures::IsSupported(VFP3)) {
608
- CpuFeatures::Scope scope(VFP3);
711
+ if (CpuFeatures::IsSupported(VFP2)) {
712
+ CpuFeatures::Scope scope(VFP2);
609
713
  // Convert smi to double using VFP instructions.
610
714
  __ vmov(dst.high(), scratch1);
611
715
  __ vcvt_f64_s32(dst, dst.high());
@@ -636,11 +740,9 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
636
740
  Register scratch3,
637
741
  DwVfpRegister double_scratch,
638
742
  Label* not_number) {
639
- if (FLAG_debug_code) {
640
- __ AbortIfNotRootValue(heap_number_map,
641
- Heap::kHeapNumberMapRootIndex,
642
- "HeapNumberMap register clobbered.");
643
- }
743
+ __ AssertRootValue(heap_number_map,
744
+ Heap::kHeapNumberMapRootIndex,
745
+ "HeapNumberMap register clobbered.");
644
746
  Label done;
645
747
  Label not_in_int32_range;
646
748
 
@@ -672,71 +774,72 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
672
774
  Register int_scratch,
673
775
  Destination destination,
674
776
  DwVfpRegister double_dst,
675
- Register dst1,
676
- Register dst2,
777
+ Register dst_mantissa,
778
+ Register dst_exponent,
677
779
  Register scratch2,
678
780
  SwVfpRegister single_scratch) {
679
781
  ASSERT(!int_scratch.is(scratch2));
680
- ASSERT(!int_scratch.is(dst1));
681
- ASSERT(!int_scratch.is(dst2));
782
+ ASSERT(!int_scratch.is(dst_mantissa));
783
+ ASSERT(!int_scratch.is(dst_exponent));
682
784
 
683
785
  Label done;
684
786
 
685
- if (CpuFeatures::IsSupported(VFP3)) {
686
- CpuFeatures::Scope scope(VFP3);
787
+ if (CpuFeatures::IsSupported(VFP2)) {
788
+ CpuFeatures::Scope scope(VFP2);
687
789
  __ vmov(single_scratch, int_scratch);
688
790
  __ vcvt_f64_s32(double_dst, single_scratch);
689
791
  if (destination == kCoreRegisters) {
690
- __ vmov(dst1, dst2, double_dst);
792
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
691
793
  }
692
794
  } else {
693
795
  Label fewer_than_20_useful_bits;
694
796
  // Expected output:
695
- // | dst2 | dst1 |
797
+ // | dst_exponent | dst_mantissa |
696
798
  // | s | exp | mantissa |
697
799
 
698
800
  // Check for zero.
699
801
  __ cmp(int_scratch, Operand::Zero());
700
- __ mov(dst2, int_scratch);
701
- __ mov(dst1, int_scratch);
802
+ __ mov(dst_exponent, int_scratch);
803
+ __ mov(dst_mantissa, int_scratch);
702
804
  __ b(eq, &done);
703
805
 
704
806
  // Preload the sign of the value.
705
- __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
807
+ __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
706
808
  // Get the absolute value of the object (as an unsigned integer).
707
809
  __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
708
810
 
709
811
  // Get mantissa[51:20].
710
812
 
711
813
  // Get the position of the first set bit.
712
- __ CountLeadingZeros(dst1, int_scratch, scratch2);
713
- __ rsb(dst1, dst1, Operand(31));
814
+ __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
815
+ __ rsb(dst_mantissa, dst_mantissa, Operand(31));
714
816
 
715
817
  // Set the exponent.
716
- __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
717
- __ Bfi(dst2, scratch2, scratch2,
818
+ __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
819
+ __ Bfi(dst_exponent, scratch2, scratch2,
718
820
  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
719
821
 
720
822
  // Clear the first non null bit.
721
823
  __ mov(scratch2, Operand(1));
722
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
824
+ __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
723
825
 
724
- __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
826
+ __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
725
827
  // Get the number of bits to set in the lower part of the mantissa.
726
- __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
828
+ __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
829
+ SetCC);
727
830
  __ b(mi, &fewer_than_20_useful_bits);
728
831
  // Set the higher 20 bits of the mantissa.
729
- __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
832
+ __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
730
833
  __ rsb(scratch2, scratch2, Operand(32));
731
- __ mov(dst1, Operand(int_scratch, LSL, scratch2));
834
+ __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
732
835
  __ b(&done);
733
836
 
734
837
  __ bind(&fewer_than_20_useful_bits);
735
- __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
838
+ __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
736
839
  __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
737
- __ orr(dst2, dst2, scratch2);
840
+ __ orr(dst_exponent, dst_exponent, scratch2);
738
841
  // Set dst1 to 0.
739
- __ mov(dst1, Operand::Zero());
842
+ __ mov(dst_mantissa, Operand::Zero());
740
843
  }
741
844
  __ bind(&done);
742
845
  }
@@ -746,8 +849,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
746
849
  Register object,
747
850
  Destination destination,
748
851
  DwVfpRegister double_dst,
749
- Register dst1,
750
- Register dst2,
852
+ DwVfpRegister double_scratch,
853
+ Register dst_mantissa,
854
+ Register dst_exponent,
751
855
  Register heap_number_map,
752
856
  Register scratch1,
753
857
  Register scratch2,
@@ -763,56 +867,80 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
763
867
 
764
868
  __ JumpIfNotSmi(object, &obj_is_not_smi);
765
869
  __ SmiUntag(scratch1, object);
766
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
767
- scratch2, single_scratch);
870
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
871
+ dst_exponent, scratch2, single_scratch);
768
872
  __ b(&done);
769
873
 
770
874
  __ bind(&obj_is_not_smi);
771
- if (FLAG_debug_code) {
772
- __ AbortIfNotRootValue(heap_number_map,
773
- Heap::kHeapNumberMapRootIndex,
774
- "HeapNumberMap register clobbered.");
775
- }
875
+ __ AssertRootValue(heap_number_map,
876
+ Heap::kHeapNumberMapRootIndex,
877
+ "HeapNumberMap register clobbered.");
776
878
  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
777
879
 
778
880
  // Load the number.
779
- if (CpuFeatures::IsSupported(VFP3)) {
780
- CpuFeatures::Scope scope(VFP3);
881
+ if (CpuFeatures::IsSupported(VFP2)) {
882
+ CpuFeatures::Scope scope(VFP2);
781
883
  // Load the double value.
782
884
  __ sub(scratch1, object, Operand(kHeapObjectTag));
783
885
  __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
784
886
 
785
887
  __ EmitVFPTruncate(kRoundToZero,
786
- single_scratch,
787
- double_dst,
788
888
  scratch1,
889
+ double_dst,
789
890
  scratch2,
891
+ double_scratch,
790
892
  kCheckForInexactConversion);
791
893
 
792
894
  // Jump to not_int32 if the operation did not succeed.
793
895
  __ b(ne, not_int32);
794
896
 
795
897
  if (destination == kCoreRegisters) {
796
- __ vmov(dst1, dst2, double_dst);
898
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
797
899
  }
798
900
 
799
901
  } else {
800
902
  ASSERT(!scratch1.is(object) && !scratch2.is(object));
801
- // Load the double value in the destination registers..
802
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
903
+ // Load the double value in the destination registers.
904
+ bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
905
+ if (save_registers) {
906
+ // Save both output registers, because the other one probably holds
907
+ // an important value too.
908
+ __ Push(dst_exponent, dst_mantissa);
909
+ }
910
+ __ Ldrd(dst_mantissa, dst_exponent,
911
+ FieldMemOperand(object, HeapNumber::kValueOffset));
803
912
 
804
913
  // Check for 0 and -0.
805
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
806
- __ orr(scratch1, scratch1, Operand(dst2));
914
+ Label zero;
915
+ __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
916
+ __ orr(scratch1, scratch1, Operand(dst_mantissa));
807
917
  __ cmp(scratch1, Operand::Zero());
808
- __ b(eq, &done);
918
+ __ b(eq, &zero);
809
919
 
810
920
  // Check that the value can be exactly represented by a 32-bit integer.
811
921
  // Jump to not_int32 if that's not the case.
812
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
922
+ Label restore_input_and_miss;
923
+ DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
924
+ &restore_input_and_miss);
813
925
 
814
- // dst1 and dst2 were trashed. Reload the double value.
815
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
926
+ // dst_* were trashed. Reload the double value.
927
+ if (save_registers) {
928
+ __ Pop(dst_exponent, dst_mantissa);
929
+ }
930
+ __ Ldrd(dst_mantissa, dst_exponent,
931
+ FieldMemOperand(object, HeapNumber::kValueOffset));
932
+ __ b(&done);
933
+
934
+ __ bind(&restore_input_and_miss);
935
+ if (save_registers) {
936
+ __ Pop(dst_exponent, dst_mantissa);
937
+ }
938
+ __ b(not_int32);
939
+
940
+ __ bind(&zero);
941
+ if (save_registers) {
942
+ __ Drop(2);
943
+ }
816
944
  }
817
945
 
818
946
  __ bind(&done);
@@ -826,7 +954,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
826
954
  Register scratch1,
827
955
  Register scratch2,
828
956
  Register scratch3,
829
- DwVfpRegister double_scratch,
957
+ DwVfpRegister double_scratch0,
958
+ DwVfpRegister double_scratch1,
830
959
  Label* not_int32) {
831
960
  ASSERT(!dst.is(object));
832
961
  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -834,38 +963,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
834
963
  !scratch1.is(scratch3) &&
835
964
  !scratch2.is(scratch3));
836
965
 
837
- Label done;
966
+ Label done, maybe_undefined;
838
967
 
839
968
  __ UntagAndJumpIfSmi(dst, object, &done);
840
969
 
841
- if (FLAG_debug_code) {
842
- __ AbortIfNotRootValue(heap_number_map,
843
- Heap::kHeapNumberMapRootIndex,
844
- "HeapNumberMap register clobbered.");
845
- }
846
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
970
+ __ AssertRootValue(heap_number_map,
971
+ Heap::kHeapNumberMapRootIndex,
972
+ "HeapNumberMap register clobbered.");
973
+
974
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
847
975
 
848
976
  // Object is a heap number.
849
977
  // Convert the floating point value to a 32-bit integer.
850
- if (CpuFeatures::IsSupported(VFP3)) {
851
- CpuFeatures::Scope scope(VFP3);
852
- SwVfpRegister single_scratch = double_scratch.low();
978
+ if (CpuFeatures::IsSupported(VFP2)) {
979
+ CpuFeatures::Scope scope(VFP2);
980
+
853
981
  // Load the double value.
854
982
  __ sub(scratch1, object, Operand(kHeapObjectTag));
855
- __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
983
+ __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
856
984
 
857
985
  __ EmitVFPTruncate(kRoundToZero,
858
- single_scratch,
859
- double_scratch,
986
+ dst,
987
+ double_scratch0,
860
988
  scratch1,
861
- scratch2,
989
+ double_scratch1,
862
990
  kCheckForInexactConversion);
863
991
 
864
992
  // Jump to not_int32 if the operation did not succeed.
865
993
  __ b(ne, not_int32);
866
- // Get the result in the destination register.
867
- __ vmov(dst, single_scratch);
868
-
869
994
  } else {
870
995
  // Load the double value in the destination registers.
871
996
  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -893,20 +1018,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
893
1018
  __ tst(scratch1, Operand(HeapNumber::kSignMask));
894
1019
  __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
895
1020
  }
1021
+ __ b(&done);
1022
+
1023
+ __ bind(&maybe_undefined);
1024
+ __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
1025
+ __ b(ne, not_int32);
1026
+ // |undefined| is truncated to 0.
1027
+ __ mov(dst, Operand(Smi::FromInt(0)));
1028
+ // Fall through.
896
1029
 
897
1030
  __ bind(&done);
898
1031
  }
899
1032
 
900
1033
 
901
1034
  void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
902
- Register src1,
903
- Register src2,
1035
+ Register src_exponent,
1036
+ Register src_mantissa,
904
1037
  Register dst,
905
1038
  Register scratch,
906
1039
  Label* not_int32) {
907
1040
  // Get exponent alone in scratch.
908
1041
  __ Ubfx(scratch,
909
- src1,
1042
+ src_exponent,
910
1043
  HeapNumber::kExponentShift,
911
1044
  HeapNumber::kExponentBits);
912
1045
 
@@ -926,11 +1059,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
926
1059
  // Another way to put it is that if (exponent - signbit) > 30 then the
927
1060
  // number cannot be represented as an int32.
928
1061
  Register tmp = dst;
929
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
1062
+ __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
930
1063
  __ cmp(tmp, Operand(30));
931
1064
  __ b(gt, not_int32);
932
1065
  // - Bits [21:0] in the mantissa are not null.
933
- __ tst(src2, Operand(0x3fffff));
1066
+ __ tst(src_mantissa, Operand(0x3fffff));
934
1067
  __ b(ne, not_int32);
935
1068
 
936
1069
  // Otherwise the exponent needs to be big enough to shift left all the
@@ -941,19 +1074,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
941
1074
 
942
1075
  // Get the 32 higher bits of the mantissa in dst.
943
1076
  __ Ubfx(dst,
944
- src2,
1077
+ src_mantissa,
945
1078
  HeapNumber::kMantissaBitsInTopWord,
946
1079
  32 - HeapNumber::kMantissaBitsInTopWord);
947
1080
  __ orr(dst,
948
1081
  dst,
949
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1082
+ Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
950
1083
 
951
1084
  // Create the mask and test the lower bits (of the higher bits).
952
1085
  __ rsb(scratch, scratch, Operand(32));
953
- __ mov(src2, Operand(1));
954
- __ mov(src1, Operand(src2, LSL, scratch));
955
- __ sub(src1, src1, Operand(1));
956
- __ tst(dst, src1);
1086
+ __ mov(src_mantissa, Operand(1));
1087
+ __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
1088
+ __ sub(src_exponent, src_exponent, Operand(1));
1089
+ __ tst(dst, src_exponent);
957
1090
  __ b(ne, not_int32);
958
1091
  }
959
1092
 
@@ -978,7 +1111,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
978
1111
  __ push(lr);
979
1112
  __ PrepareCallCFunction(0, 2, scratch);
980
1113
  if (masm->use_eabi_hardfloat()) {
981
- CpuFeatures::Scope scope(VFP3);
1114
+ CpuFeatures::Scope scope(VFP2);
982
1115
  __ vmov(d0, r0, r1);
983
1116
  __ vmov(d1, r2, r3);
984
1117
  }
@@ -990,7 +1123,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
990
1123
  // Store answer in the overwritable heap number. Double returned in
991
1124
  // registers r0 and r1 or in d0.
992
1125
  if (masm->use_eabi_hardfloat()) {
993
- CpuFeatures::Scope scope(VFP3);
1126
+ CpuFeatures::Scope scope(VFP2);
994
1127
  __ vstr(d0,
995
1128
  FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
996
1129
  } else {
@@ -1043,7 +1176,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1043
1176
  // Set the sign bit in scratch_ if the value was negative.
1044
1177
  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1045
1178
  // Subtract from 0 if the value was negative.
1046
- __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
1179
+ __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
1047
1180
  // We should be masking the implict first digit of the mantissa away here,
1048
1181
  // but it just ends up combining harmlessly with the last digit of the
1049
1182
  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
@@ -1066,7 +1199,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1066
1199
  non_smi_exponent += 1 << HeapNumber::kExponentShift;
1067
1200
  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1068
1201
  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1069
- __ mov(ip, Operand(0, RelocInfo::NONE));
1202
+ __ mov(ip, Operand::Zero());
1070
1203
  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1071
1204
  __ Ret();
1072
1205
  }
@@ -1077,48 +1210,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1077
1210
  // for "identity and not NaN".
1078
1211
  static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1079
1212
  Label* slow,
1080
- Condition cond,
1081
- bool never_nan_nan) {
1213
+ Condition cond) {
1082
1214
  Label not_identical;
1083
1215
  Label heap_number, return_equal;
1084
1216
  __ cmp(r0, r1);
1085
1217
  __ b(ne, &not_identical);
1086
1218
 
1087
- // The two objects are identical. If we know that one of them isn't NaN then
1088
- // we now know they test equal.
1089
- if (cond != eq || !never_nan_nan) {
1090
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
1091
- // so we do the second best thing - test it ourselves.
1092
- // They are both equal and they are not both Smis so both of them are not
1093
- // Smis. If it's not a heap number, then return equal.
1094
- if (cond == lt || cond == gt) {
1095
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
1219
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
1220
+ // so we do the second best thing - test it ourselves.
1221
+ // They are both equal and they are not both Smis so both of them are not
1222
+ // Smis. If it's not a heap number, then return equal.
1223
+ if (cond == lt || cond == gt) {
1224
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
1225
+ __ b(ge, slow);
1226
+ } else {
1227
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1228
+ __ b(eq, &heap_number);
1229
+ // Comparing JS objects with <=, >= is complicated.
1230
+ if (cond != eq) {
1231
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
1096
1232
  __ b(ge, slow);
1097
- } else {
1098
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1099
- __ b(eq, &heap_number);
1100
- // Comparing JS objects with <=, >= is complicated.
1101
- if (cond != eq) {
1102
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
1103
- __ b(ge, slow);
1104
- // Normally here we fall through to return_equal, but undefined is
1105
- // special: (undefined == undefined) == true, but
1106
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1107
- if (cond == le || cond == ge) {
1108
- __ cmp(r4, Operand(ODDBALL_TYPE));
1109
- __ b(ne, &return_equal);
1110
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1111
- __ cmp(r0, r2);
1112
- __ b(ne, &return_equal);
1113
- if (cond == le) {
1114
- // undefined <= undefined should fail.
1115
- __ mov(r0, Operand(GREATER));
1116
- } else {
1117
- // undefined >= undefined should fail.
1118
- __ mov(r0, Operand(LESS));
1119
- }
1120
- __ Ret();
1233
+ // Normally here we fall through to return_equal, but undefined is
1234
+ // special: (undefined == undefined) == true, but
1235
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1236
+ if (cond == le || cond == ge) {
1237
+ __ cmp(r4, Operand(ODDBALL_TYPE));
1238
+ __ b(ne, &return_equal);
1239
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1240
+ __ cmp(r0, r2);
1241
+ __ b(ne, &return_equal);
1242
+ if (cond == le) {
1243
+ // undefined <= undefined should fail.
1244
+ __ mov(r0, Operand(GREATER));
1245
+ } else {
1246
+ // undefined >= undefined should fail.
1247
+ __ mov(r0, Operand(LESS));
1121
1248
  }
1249
+ __ Ret();
1122
1250
  }
1123
1251
  }
1124
1252
  }
@@ -1133,47 +1261,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1133
1261
  }
1134
1262
  __ Ret();
1135
1263
 
1136
- if (cond != eq || !never_nan_nan) {
1137
- // For less and greater we don't have to check for NaN since the result of
1138
- // x < x is false regardless. For the others here is some code to check
1139
- // for NaN.
1140
- if (cond != lt && cond != gt) {
1141
- __ bind(&heap_number);
1142
- // It is a heap number, so return non-equal if it's NaN and equal if it's
1143
- // not NaN.
1144
-
1145
- // The representation of NaN values has all exponent bits (52..62) set,
1146
- // and not all mantissa bits (0..51) clear.
1147
- // Read top bits of double representation (second word of value).
1148
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1149
- // Test that exponent bits are all set.
1150
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1151
- // NaNs have all-one exponents so they sign extend to -1.
1152
- __ cmp(r3, Operand(-1));
1153
- __ b(ne, &return_equal);
1154
-
1155
- // Shift out flag and all exponent bits, retaining only mantissa.
1156
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1157
- // Or with all low-bits of mantissa.
1158
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1159
- __ orr(r0, r3, Operand(r2), SetCC);
1160
- // For equal we already have the right value in r0: Return zero (equal)
1161
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1162
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
1163
- // value if it's a NaN.
1164
- if (cond != eq) {
1165
- // All-zero means Infinity means equal.
1166
- __ Ret(eq);
1167
- if (cond == le) {
1168
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
1169
- } else {
1170
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
1171
- }
1264
+ // For less and greater we don't have to check for NaN since the result of
1265
+ // x < x is false regardless. For the others here is some code to check
1266
+ // for NaN.
1267
+ if (cond != lt && cond != gt) {
1268
+ __ bind(&heap_number);
1269
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
1270
+ // not NaN.
1271
+
1272
+ // The representation of NaN values has all exponent bits (52..62) set,
1273
+ // and not all mantissa bits (0..51) clear.
1274
+ // Read top bits of double representation (second word of value).
1275
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1276
+ // Test that exponent bits are all set.
1277
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1278
+ // NaNs have all-one exponents so they sign extend to -1.
1279
+ __ cmp(r3, Operand(-1));
1280
+ __ b(ne, &return_equal);
1281
+
1282
+ // Shift out flag and all exponent bits, retaining only mantissa.
1283
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1284
+ // Or with all low-bits of mantissa.
1285
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1286
+ __ orr(r0, r3, Operand(r2), SetCC);
1287
+ // For equal we already have the right value in r0: Return zero (equal)
1288
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1289
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
1290
+ // value if it's a NaN.
1291
+ if (cond != eq) {
1292
+ // All-zero means Infinity means equal.
1293
+ __ Ret(eq);
1294
+ if (cond == le) {
1295
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
1296
+ } else {
1297
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
1172
1298
  }
1173
- __ Ret();
1174
1299
  }
1175
- // No fall through here.
1300
+ __ Ret();
1176
1301
  }
1302
+ // No fall through here.
1177
1303
 
1178
1304
  __ bind(&not_identical);
1179
1305
  }
@@ -1209,9 +1335,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1209
1335
  }
1210
1336
 
1211
1337
  // Lhs is a smi, rhs is a number.
1212
- if (CpuFeatures::IsSupported(VFP3)) {
1338
+ if (CpuFeatures::IsSupported(VFP2)) {
1213
1339
  // Convert lhs to a double in d7.
1214
- CpuFeatures::Scope scope(VFP3);
1340
+ CpuFeatures::Scope scope(VFP2);
1215
1341
  __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1216
1342
  // Load the double from rhs, tagged HeapNumber r0, to d6.
1217
1343
  __ sub(r7, rhs, Operand(kHeapObjectTag));
@@ -1249,8 +1375,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1249
1375
  }
1250
1376
 
1251
1377
  // Rhs is a smi, lhs is a heap number.
1252
- if (CpuFeatures::IsSupported(VFP3)) {
1253
- CpuFeatures::Scope scope(VFP3);
1378
+ if (CpuFeatures::IsSupported(VFP2)) {
1379
+ CpuFeatures::Scope scope(VFP2);
1254
1380
  // Load the double from lhs, tagged HeapNumber r1, to d7.
1255
1381
  __ sub(r7, lhs, Operand(kHeapObjectTag));
1256
1382
  __ vldr(d7, r7, HeapNumber::kValueOffset);
@@ -1289,7 +1415,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1289
1415
  Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1290
1416
  SetCC);
1291
1417
  __ b(ne, &one_is_nan);
1292
- __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
1418
+ __ cmp(lhs_mantissa, Operand::Zero());
1293
1419
  __ b(ne, &one_is_nan);
1294
1420
 
1295
1421
  __ bind(lhs_not_nan);
@@ -1304,7 +1430,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1304
1430
  Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1305
1431
  SetCC);
1306
1432
  __ b(ne, &one_is_nan);
1307
- __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
1433
+ __ cmp(rhs_mantissa, Operand::Zero());
1308
1434
  __ b(eq, &neither_is_nan);
1309
1435
 
1310
1436
  __ bind(&one_is_nan);
@@ -1362,7 +1488,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
1362
1488
  __ push(lr);
1363
1489
  __ PrepareCallCFunction(0, 2, r5);
1364
1490
  if (masm->use_eabi_hardfloat()) {
1365
- CpuFeatures::Scope scope(VFP3);
1491
+ CpuFeatures::Scope scope(VFP2);
1366
1492
  __ vmov(d0, r0, r1);
1367
1493
  __ vmov(d1, r2, r3);
1368
1494
  }
@@ -1437,8 +1563,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1437
1563
 
1438
1564
  // Both are heap numbers. Load them up then jump to the code we have
1439
1565
  // for that.
1440
- if (CpuFeatures::IsSupported(VFP3)) {
1441
- CpuFeatures::Scope scope(VFP3);
1566
+ if (CpuFeatures::IsSupported(VFP2)) {
1567
+ CpuFeatures::Scope scope(VFP2);
1442
1568
  __ sub(r7, rhs, Operand(kHeapObjectTag));
1443
1569
  __ vldr(d6, r7, HeapNumber::kValueOffset);
1444
1570
  __ sub(r7, lhs, Operand(kHeapObjectTag));
@@ -1527,8 +1653,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1527
1653
  Label load_result_from_cache;
1528
1654
  if (!object_is_smi) {
1529
1655
  __ JumpIfSmi(object, &is_smi);
1530
- if (CpuFeatures::IsSupported(VFP3)) {
1531
- CpuFeatures::Scope scope(VFP3);
1656
+ if (CpuFeatures::IsSupported(VFP2)) {
1657
+ CpuFeatures::Scope scope(VFP2);
1532
1658
  __ CheckMap(object,
1533
1659
  scratch1,
1534
1660
  Heap::kHeapNumberMapRootIndex,
@@ -1607,42 +1733,60 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
1607
1733
  }
1608
1734
 
1609
1735
 
1610
- // On entry lhs_ and rhs_ are the values to be compared.
1736
+ static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1737
+ Register input,
1738
+ Register scratch,
1739
+ CompareIC::State expected,
1740
+ Label* fail) {
1741
+ Label ok;
1742
+ if (expected == CompareIC::SMI) {
1743
+ __ JumpIfNotSmi(input, fail);
1744
+ } else if (expected == CompareIC::HEAP_NUMBER) {
1745
+ __ JumpIfSmi(input, &ok);
1746
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1747
+ DONT_DO_SMI_CHECK);
1748
+ }
1749
+ // We could be strict about symbol/string here, but as long as
1750
+ // hydrogen doesn't care, the stub doesn't have to care either.
1751
+ __ bind(&ok);
1752
+ }
1753
+
1754
+
1755
+ // On entry r1 and r2 are the values to be compared.
1611
1756
  // On exit r0 is 0, positive or negative to indicate the result of
1612
1757
  // the comparison.
1613
- void CompareStub::Generate(MacroAssembler* masm) {
1614
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
1615
- (lhs_.is(r1) && rhs_.is(r0)));
1758
+ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1759
+ Register lhs = r1;
1760
+ Register rhs = r0;
1761
+ Condition cc = GetCondition();
1762
+
1763
+ Label miss;
1764
+ ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
1765
+ ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
1616
1766
 
1617
1767
  Label slow; // Call builtin.
1618
1768
  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1619
1769
 
1620
- if (include_smi_compare_) {
1621
- Label not_two_smis, smi_done;
1622
- __ orr(r2, r1, r0);
1623
- __ JumpIfNotSmi(r2, &not_two_smis);
1624
- __ mov(r1, Operand(r1, ASR, 1));
1625
- __ sub(r0, r1, Operand(r0, ASR, 1));
1626
- __ Ret();
1627
- __ bind(&not_two_smis);
1628
- } else if (FLAG_debug_code) {
1629
- __ orr(r2, r1, r0);
1630
- __ tst(r2, Operand(kSmiTagMask));
1631
- __ Assert(ne, "CompareStub: unexpected smi operands.");
1632
- }
1770
+ Label not_two_smis, smi_done;
1771
+ __ orr(r2, r1, r0);
1772
+ __ JumpIfNotSmi(r2, &not_two_smis);
1773
+ __ mov(r1, Operand(r1, ASR, 1));
1774
+ __ sub(r0, r1, Operand(r0, ASR, 1));
1775
+ __ Ret();
1776
+ __ bind(&not_two_smis);
1633
1777
 
1634
1778
  // NOTICE! This code is only reached after a smi-fast-case check, so
1635
1779
  // it is certain that at least one operand isn't a smi.
1636
1780
 
1637
1781
  // Handle the case where the objects are identical. Either returns the answer
1638
1782
  // or goes to slow. Only falls through if the objects were not identical.
1639
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1783
+ EmitIdenticalObjectComparison(masm, &slow, cc);
1640
1784
 
1641
1785
  // If either is a Smi (we know that not both are), then they can only
1642
1786
  // be strictly equal if the other is a HeapNumber.
1643
1787
  STATIC_ASSERT(kSmiTag == 0);
1644
1788
  ASSERT_EQ(0, Smi::FromInt(0));
1645
- __ and_(r2, lhs_, Operand(rhs_));
1789
+ __ and_(r2, lhs, Operand(rhs));
1646
1790
  __ JumpIfNotSmi(r2, &not_smis);
1647
1791
  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1648
1792
  // 1) Return the answer.
@@ -1653,15 +1797,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
1653
1797
  // comparison. If VFP3 is supported the double values of the numbers have
1654
1798
  // been loaded into d7 and d6. Otherwise, the double values have been loaded
1655
1799
  // into r0, r1, r2, and r3.
1656
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1800
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
1657
1801
 
1658
1802
  __ bind(&both_loaded_as_doubles);
1659
1803
  // The arguments have been converted to doubles and stored in d6 and d7, if
1660
1804
  // VFP3 is supported, or in r0, r1, r2, and r3.
1661
1805
  Isolate* isolate = masm->isolate();
1662
- if (CpuFeatures::IsSupported(VFP3)) {
1806
+ if (CpuFeatures::IsSupported(VFP2)) {
1663
1807
  __ bind(&lhs_not_nan);
1664
- CpuFeatures::Scope scope(VFP3);
1808
+ CpuFeatures::Scope scope(VFP2);
1665
1809
  Label no_nan;
1666
1810
  // ARMv7 VFP3 instructions to implement double precision comparison.
1667
1811
  __ VFPCompareAndSetFlags(d7, d6);
@@ -1676,7 +1820,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
1676
1820
  // If one of the sides was a NaN then the v flag is set. Load r0 with
1677
1821
  // whatever it takes to make the comparison fail, since comparisons with NaN
1678
1822
  // always fail.
1679
- if (cc_ == lt || cc_ == le) {
1823
+ if (cc == lt || cc == le) {
1680
1824
  __ mov(r0, Operand(GREATER));
1681
1825
  } else {
1682
1826
  __ mov(r0, Operand(LESS));
@@ -1685,19 +1829,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
1685
1829
  } else {
1686
1830
  // Checks for NaN in the doubles we have loaded. Can return the answer or
1687
1831
  // fall through if neither is a NaN. Also binds lhs_not_nan.
1688
- EmitNanCheck(masm, &lhs_not_nan, cc_);
1832
+ EmitNanCheck(masm, &lhs_not_nan, cc);
1689
1833
  // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
1690
1834
  // answer. Never falls through.
1691
- EmitTwoNonNanDoubleComparison(masm, cc_);
1835
+ EmitTwoNonNanDoubleComparison(masm, cc);
1692
1836
  }
1693
1837
 
1694
1838
  __ bind(&not_smis);
1695
1839
  // At this point we know we are dealing with two different objects,
1696
1840
  // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1697
- if (strict_) {
1841
+ if (strict()) {
1698
1842
  // This returns non-equal for some object types, or falls through if it
1699
1843
  // was not lucky.
1700
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1844
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1701
1845
  }
1702
1846
 
1703
1847
  Label check_for_symbols;
@@ -1707,8 +1851,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
1707
1851
  // that case. If the inputs are not doubles then jumps to check_for_symbols.
1708
1852
  // In this case r2 will contain the type of rhs_. Never falls through.
1709
1853
  EmitCheckForTwoHeapNumbers(masm,
1710
- lhs_,
1711
- rhs_,
1854
+ lhs,
1855
+ rhs,
1712
1856
  &both_loaded_as_doubles,
1713
1857
  &check_for_symbols,
1714
1858
  &flat_string_check);
@@ -1716,31 +1860,31 @@ void CompareStub::Generate(MacroAssembler* masm) {
1716
1860
  __ bind(&check_for_symbols);
1717
1861
  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1718
1862
  // symbols.
1719
- if (cc_ == eq && !strict_) {
1863
+ if (cc == eq && !strict()) {
1720
1864
  // Returns an answer for two symbols or two detectable objects.
1721
1865
  // Otherwise jumps to string case or not both strings case.
1722
1866
  // Assumes that r2 is the type of rhs_ on entry.
1723
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1867
+ EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
1724
1868
  }
1725
1869
 
1726
1870
  // Check for both being sequential ASCII strings, and inline if that is the
1727
1871
  // case.
1728
1872
  __ bind(&flat_string_check);
1729
1873
 
1730
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1874
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
1731
1875
 
1732
1876
  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1733
- if (cc_ == eq) {
1877
+ if (cc == eq) {
1734
1878
  StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1735
- lhs_,
1736
- rhs_,
1879
+ lhs,
1880
+ rhs,
1737
1881
  r2,
1738
1882
  r3,
1739
1883
  r4);
1740
1884
  } else {
1741
1885
  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1742
- lhs_,
1743
- rhs_,
1886
+ lhs,
1887
+ rhs,
1744
1888
  r2,
1745
1889
  r3,
1746
1890
  r4,
@@ -1750,18 +1894,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
1750
1894
 
1751
1895
  __ bind(&slow);
1752
1896
 
1753
- __ Push(lhs_, rhs_);
1897
+ __ Push(lhs, rhs);
1754
1898
  // Figure out which native to call and setup the arguments.
1755
1899
  Builtins::JavaScript native;
1756
- if (cc_ == eq) {
1757
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1900
+ if (cc == eq) {
1901
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1758
1902
  } else {
1759
1903
  native = Builtins::COMPARE;
1760
1904
  int ncr; // NaN compare result
1761
- if (cc_ == lt || cc_ == le) {
1905
+ if (cc == lt || cc == le) {
1762
1906
  ncr = GREATER;
1763
1907
  } else {
1764
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
1908
+ ASSERT(cc == gt || cc == ge); // remaining cases
1765
1909
  ncr = LESS;
1766
1910
  }
1767
1911
  __ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1771,6 +1915,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
1771
1915
  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1772
1916
  // tagged as a small integer.
1773
1917
  __ InvokeBuiltin(native, JUMP_FUNCTION);
1918
+
1919
+ __ bind(&miss);
1920
+ GenerateMiss(masm);
1774
1921
  }
1775
1922
 
1776
1923
 
@@ -1779,11 +1926,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
1779
1926
  void ToBooleanStub::Generate(MacroAssembler* masm) {
1780
1927
  // This stub overrides SometimesSetsUpAFrame() to return false. That means
1781
1928
  // we cannot call anything that could cause a GC from this stub.
1782
- // This stub uses VFP3 instructions.
1783
- CpuFeatures::Scope scope(VFP3);
1784
-
1785
1929
  Label patch;
1786
1930
  const Register map = r9.is(tos_) ? r7 : r9;
1931
+ const Register temp = map;
1787
1932
 
1788
1933
  // undefined -> false.
1789
1934
  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
@@ -1812,7 +1957,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
1812
1957
  __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1813
1958
  __ tst(ip, Operand(1 << Map::kIsUndetectable));
1814
1959
  // Undetectable -> false.
1815
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1960
+ __ mov(tos_, Operand::Zero(), LeaveCC, ne);
1816
1961
  __ Ret(ne);
1817
1962
  }
1818
1963
  }
@@ -1836,13 +1981,56 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
1836
1981
  Label not_heap_number;
1837
1982
  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1838
1983
  __ b(ne, &not_heap_number);
1839
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1840
- __ VFPCompareAndSetFlags(d1, 0.0);
1841
- // "tos_" is a register, and contains a non zero value by default.
1842
- // Hence we only need to overwrite "tos_" with zero to return false for
1843
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1844
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
1845
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
1984
+
1985
+ if (CpuFeatures::IsSupported(VFP2)) {
1986
+ CpuFeatures::Scope scope(VFP2);
1987
+
1988
+ __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1989
+ __ VFPCompareAndSetFlags(d1, 0.0);
1990
+ // "tos_" is a register, and contains a non zero value by default.
1991
+ // Hence we only need to overwrite "tos_" with zero to return false for
1992
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1993
+ __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
1994
+ __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
1995
+ } else {
1996
+ Label done, not_nan, not_zero;
1997
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
1998
+ // -0 maps to false:
1999
+ __ bic(
2000
+ temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
2001
+ __ b(ne, &not_zero);
2002
+ // If exponent word is zero then the answer depends on the mantissa word.
2003
+ __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
2004
+ __ jmp(&done);
2005
+
2006
+ // Check for NaN.
2007
+ __ bind(&not_zero);
2008
+ // We already zeroed the sign bit, now shift out the mantissa so we only
2009
+ // have the exponent left.
2010
+ __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
2011
+ unsigned int shifted_exponent_mask =
2012
+ HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
2013
+ __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
2014
+ __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
2015
+
2016
+ // Reload exponent word.
2017
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
2018
+ __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
2019
+ // If mantissa is not zero then we have a NaN, so return 0.
2020
+ __ mov(tos_, Operand::Zero(), LeaveCC, ne);
2021
+ __ b(ne, &done);
2022
+
2023
+ // Load mantissa word.
2024
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
2025
+ __ cmp(temp, Operand::Zero());
2026
+ // If mantissa is not zero then we have a NaN, so return 0.
2027
+ __ mov(tos_, Operand::Zero(), LeaveCC, ne);
2028
+ __ b(ne, &done);
2029
+
2030
+ __ bind(&not_nan);
2031
+ __ mov(tos_, Operand(1, RelocInfo::NONE32));
2032
+ __ bind(&done);
2033
+ }
1846
2034
  __ Ret();
1847
2035
  __ bind(&not_heap_number);
1848
2036
  }
@@ -1863,7 +2051,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1863
2051
  // The value of a root is never NULL, so we can avoid loading a non-null
1864
2052
  // value into tos_ when we want to return 'true'.
1865
2053
  if (!result) {
1866
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
2054
+ __ mov(tos_, Operand::Zero(), LeaveCC, eq);
1867
2055
  }
1868
2056
  __ Ret(eq);
1869
2057
  }
@@ -1891,17 +2079,22 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1891
2079
  // store the registers in any particular way, but we do have to store and
1892
2080
  // restore them.
1893
2081
  __ stm(db_w, sp, kCallerSaved | lr.bit());
2082
+
2083
+ const Register scratch = r1;
2084
+
1894
2085
  if (save_doubles_ == kSaveFPRegs) {
1895
- CpuFeatures::Scope scope(VFP3);
1896
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1897
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
2086
+ CpuFeatures::Scope scope(VFP2);
2087
+ // Check CPU flags for number of registers, setting the Z condition flag.
2088
+ __ CheckFor32DRegs(scratch);
2089
+
2090
+ __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
2091
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
1898
2092
  DwVfpRegister reg = DwVfpRegister::from_code(i);
1899
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
2093
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
1900
2094
  }
1901
2095
  }
1902
2096
  const int argument_count = 1;
1903
2097
  const int fp_argument_count = 0;
1904
- const Register scratch = r1;
1905
2098
 
1906
2099
  AllowExternalCallThatCantCauseGC scope(masm);
1907
2100
  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
@@ -1910,12 +2103,16 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1910
2103
  ExternalReference::store_buffer_overflow_function(masm->isolate()),
1911
2104
  argument_count);
1912
2105
  if (save_doubles_ == kSaveFPRegs) {
1913
- CpuFeatures::Scope scope(VFP3);
1914
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
2106
+ CpuFeatures::Scope scope(VFP2);
2107
+
2108
+ // Check CPU flags for number of registers, setting the Z condition flag.
2109
+ __ CheckFor32DRegs(scratch);
2110
+
2111
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
1915
2112
  DwVfpRegister reg = DwVfpRegister::from_code(i);
1916
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
2113
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
1917
2114
  }
1918
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
2115
+ __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
1919
2116
  }
1920
2117
  __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1921
2118
  }
@@ -2008,7 +2205,7 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2008
2205
  __ b(eq, slow);
2009
2206
 
2010
2207
  // Return '0 - value'.
2011
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
2208
+ __ rsb(r0, r0, Operand::Zero());
2012
2209
  __ Ret();
2013
2210
  }
2014
2211
 
@@ -2140,9 +2337,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2140
2337
  __ mov(r0, r2); // Move newly allocated heap number to r0.
2141
2338
  }
2142
2339
 
2143
- if (CpuFeatures::IsSupported(VFP3)) {
2340
+ if (CpuFeatures::IsSupported(VFP2)) {
2144
2341
  // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
2145
- CpuFeatures::Scope scope(VFP3);
2342
+ CpuFeatures::Scope scope(VFP2);
2146
2343
  __ vmov(s0, r1);
2147
2344
  __ vcvt_f64_s32(d0, s0);
2148
2345
  __ sub(r2, r0, Operand(kHeapObjectTag));
@@ -2213,20 +2410,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
2213
2410
  }
2214
2411
 
2215
2412
 
2413
+ void BinaryOpStub::Initialize() {
2414
+ platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
2415
+ }
2416
+
2417
+
2216
2418
  void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2217
2419
  Label get_result;
2218
2420
 
2219
2421
  __ Push(r1, r0);
2220
2422
 
2221
2423
  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2222
- __ mov(r1, Operand(Smi::FromInt(op_)));
2223
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2224
- __ Push(r2, r1, r0);
2424
+ __ push(r2);
2225
2425
 
2226
2426
  __ TailCallExternalReference(
2227
2427
  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2228
2428
  masm->isolate()),
2229
- 5,
2429
+ 3,
2230
2430
  1);
2231
2431
  }
2232
2432
 
@@ -2237,59 +2437,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2237
2437
  }
2238
2438
 
2239
2439
 
2240
- void BinaryOpStub::Generate(MacroAssembler* masm) {
2241
- // Explicitly allow generation of nested stubs. It is safe here because
2242
- // generation code does not use any raw pointers.
2243
- AllowStubCallsScope allow_stub_calls(masm, true);
2244
-
2245
- switch (operands_type_) {
2246
- case BinaryOpIC::UNINITIALIZED:
2247
- GenerateTypeTransition(masm);
2248
- break;
2249
- case BinaryOpIC::SMI:
2250
- GenerateSmiStub(masm);
2251
- break;
2252
- case BinaryOpIC::INT32:
2253
- GenerateInt32Stub(masm);
2254
- break;
2255
- case BinaryOpIC::HEAP_NUMBER:
2256
- GenerateHeapNumberStub(masm);
2257
- break;
2258
- case BinaryOpIC::ODDBALL:
2259
- GenerateOddballStub(masm);
2260
- break;
2261
- case BinaryOpIC::BOTH_STRING:
2262
- GenerateBothStringStub(masm);
2263
- break;
2264
- case BinaryOpIC::STRING:
2265
- GenerateStringStub(masm);
2266
- break;
2267
- case BinaryOpIC::GENERIC:
2268
- GenerateGeneric(masm);
2269
- break;
2270
- default:
2271
- UNREACHABLE();
2272
- }
2273
- }
2274
-
2275
-
2276
- void BinaryOpStub::PrintName(StringStream* stream) {
2277
- const char* op_name = Token::Name(op_);
2278
- const char* overwrite_name;
2279
- switch (mode_) {
2280
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2281
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2282
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2283
- default: overwrite_name = "UnknownOverwrite"; break;
2284
- }
2285
- stream->Add("BinaryOpStub_%s_%s_%s",
2286
- op_name,
2287
- overwrite_name,
2288
- BinaryOpIC::GetName(operands_type_));
2289
- }
2290
-
2291
-
2292
- void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2440
+ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
2441
+ Token::Value op) {
2293
2442
  Register left = r1;
2294
2443
  Register right = r0;
2295
2444
  Register scratch1 = r7;
@@ -2299,7 +2448,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2299
2448
  STATIC_ASSERT(kSmiTag == 0);
2300
2449
 
2301
2450
  Label not_smi_result;
2302
- switch (op_) {
2451
+ switch (op) {
2303
2452
  case Token::ADD:
2304
2453
  __ add(right, left, Operand(right), SetCC); // Add optimistically.
2305
2454
  __ Ret(vc);
@@ -2324,7 +2473,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2324
2473
  __ cmp(ip, Operand(scratch2));
2325
2474
  __ b(ne, &not_smi_result);
2326
2475
  // Go slow on zero result to handle -0.
2327
- __ cmp(scratch1, Operand(0));
2476
+ __ cmp(scratch1, Operand::Zero());
2328
2477
  __ mov(right, Operand(scratch1), LeaveCC, ne);
2329
2478
  __ Ret(ne);
2330
2479
  // We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -2335,33 +2484,112 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2335
2484
  // We fall through here if we multiplied a negative number with 0, because
2336
2485
  // that would mean we should produce -0.
2337
2486
  break;
2338
- case Token::DIV:
2487
+ case Token::DIV: {
2488
+ Label div_with_sdiv;
2489
+
2490
+ // Check for 0 divisor.
2491
+ __ cmp(right, Operand::Zero());
2492
+ __ b(eq, &not_smi_result);
2493
+
2339
2494
  // Check for power of two on the right hand side.
2340
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2341
- // Check for positive and no remainder (scratch1 contains right - 1).
2342
- __ orr(scratch2, scratch1, Operand(0x80000000u));
2343
- __ tst(left, scratch2);
2344
- __ b(ne, &not_smi_result);
2495
+ __ sub(scratch1, right, Operand(1));
2496
+ __ tst(scratch1, right);
2497
+ if (CpuFeatures::IsSupported(SUDIV)) {
2498
+ __ b(ne, &div_with_sdiv);
2499
+ // Check for no remainder.
2500
+ __ tst(left, scratch1);
2501
+ __ b(ne, &not_smi_result);
2502
+ // Check for positive left hand side.
2503
+ __ cmp(left, Operand::Zero());
2504
+ __ b(mi, &div_with_sdiv);
2505
+ } else {
2506
+ __ b(ne, &not_smi_result);
2507
+ // Check for positive and no remainder.
2508
+ __ orr(scratch2, scratch1, Operand(0x80000000u));
2509
+ __ tst(left, scratch2);
2510
+ __ b(ne, &not_smi_result);
2511
+ }
2345
2512
 
2346
2513
  // Perform division by shifting.
2347
2514
  __ CountLeadingZeros(scratch1, scratch1, scratch2);
2348
2515
  __ rsb(scratch1, scratch1, Operand(31));
2349
2516
  __ mov(right, Operand(left, LSR, scratch1));
2350
2517
  __ Ret();
2518
+
2519
+ if (CpuFeatures::IsSupported(SUDIV)) {
2520
+ Label result_not_zero;
2521
+
2522
+ __ bind(&div_with_sdiv);
2523
+ // Do division.
2524
+ __ sdiv(scratch1, left, right);
2525
+ // Check that the remainder is zero.
2526
+ __ mls(scratch2, scratch1, right, left);
2527
+ __ cmp(scratch2, Operand::Zero());
2528
+ __ b(ne, &not_smi_result);
2529
+ // Check for negative zero result.
2530
+ __ cmp(scratch1, Operand::Zero());
2531
+ __ b(ne, &result_not_zero);
2532
+ __ cmp(right, Operand::Zero());
2533
+ __ b(lt, &not_smi_result);
2534
+ __ bind(&result_not_zero);
2535
+ // Check for the corner case of dividing the most negative smi by -1.
2536
+ __ cmp(scratch1, Operand(0x40000000));
2537
+ __ b(eq, &not_smi_result);
2538
+ // Tag and return the result.
2539
+ __ SmiTag(right, scratch1);
2540
+ __ Ret();
2541
+ }
2351
2542
  break;
2352
- case Token::MOD:
2353
- // Check for two positive smis.
2354
- __ orr(scratch1, left, Operand(right));
2355
- __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2356
- __ b(ne, &not_smi_result);
2543
+ }
2544
+ case Token::MOD: {
2545
+ Label modulo_with_sdiv;
2546
+
2547
+ if (CpuFeatures::IsSupported(SUDIV)) {
2548
+ // Check for x % 0.
2549
+ __ cmp(right, Operand::Zero());
2550
+ __ b(eq, &not_smi_result);
2551
+
2552
+ // Check for two positive smis.
2553
+ __ orr(scratch1, left, Operand(right));
2554
+ __ tst(scratch1, Operand(0x80000000u));
2555
+ __ b(ne, &modulo_with_sdiv);
2556
+
2557
+ // Check for power of two on the right hand side.
2558
+ __ sub(scratch1, right, Operand(1));
2559
+ __ tst(scratch1, right);
2560
+ __ b(ne, &modulo_with_sdiv);
2561
+ } else {
2562
+ // Check for two positive smis.
2563
+ __ orr(scratch1, left, Operand(right));
2564
+ __ tst(scratch1, Operand(0x80000000u));
2565
+ __ b(ne, &not_smi_result);
2357
2566
 
2358
- // Check for power of two on the right hand side.
2359
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2567
+ // Check for power of two on the right hand side.
2568
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2569
+ }
2360
2570
 
2361
- // Perform modulus by masking.
2571
+ // Perform modulus by masking (scratch1 contains right - 1).
2362
2572
  __ and_(right, left, Operand(scratch1));
2363
2573
  __ Ret();
2574
+
2575
+ if (CpuFeatures::IsSupported(SUDIV)) {
2576
+ __ bind(&modulo_with_sdiv);
2577
+ __ mov(scratch2, right);
2578
+ // Perform modulus with sdiv and mls.
2579
+ __ sdiv(scratch1, left, right);
2580
+ __ mls(right, scratch1, right, left);
2581
+ // Return if the result is not 0.
2582
+ __ cmp(right, Operand::Zero());
2583
+ __ Ret(ne);
2584
+ // The result is 0, check for -0 case.
2585
+ __ cmp(left, Operand::Zero());
2586
+ __ Ret(pl);
2587
+ // This is a -0 case, restore the value of right.
2588
+ __ mov(right, scratch2);
2589
+ // We fall through here to not_smi_result to produce -0.
2590
+ }
2364
2591
  break;
2592
+ }
2365
2593
  case Token::BIT_OR:
2366
2594
  __ orr(right, left, Operand(right));
2367
2595
  __ Ret();
@@ -2414,10 +2642,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2414
2642
  }
2415
2643
 
2416
2644
 
2417
- void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2418
- bool smi_operands,
2419
- Label* not_numbers,
2420
- Label* gc_required) {
2645
+ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
2646
+ Register result,
2647
+ Register heap_number_map,
2648
+ Register scratch1,
2649
+ Register scratch2,
2650
+ Label* gc_required,
2651
+ OverwriteMode mode);
2652
+
2653
+
2654
+ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
2655
+ BinaryOpIC::TypeInfo left_type,
2656
+ BinaryOpIC::TypeInfo right_type,
2657
+ bool smi_operands,
2658
+ Label* not_numbers,
2659
+ Label* gc_required,
2660
+ Label* miss,
2661
+ Token::Value op,
2662
+ OverwriteMode mode) {
2421
2663
  Register left = r1;
2422
2664
  Register right = r0;
2423
2665
  Register scratch1 = r7;
@@ -2425,15 +2667,21 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2425
2667
  Register scratch3 = r4;
2426
2668
 
2427
2669
  ASSERT(smi_operands || (not_numbers != NULL));
2428
- if (smi_operands && FLAG_debug_code) {
2429
- __ AbortIfNotSmi(left);
2430
- __ AbortIfNotSmi(right);
2670
+ if (smi_operands) {
2671
+ __ AssertSmi(left);
2672
+ __ AssertSmi(right);
2673
+ }
2674
+ if (left_type == BinaryOpIC::SMI) {
2675
+ __ JumpIfNotSmi(left, miss);
2676
+ }
2677
+ if (right_type == BinaryOpIC::SMI) {
2678
+ __ JumpIfNotSmi(right, miss);
2431
2679
  }
2432
2680
 
2433
2681
  Register heap_number_map = r6;
2434
2682
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2435
2683
 
2436
- switch (op_) {
2684
+ switch (op) {
2437
2685
  case Token::ADD:
2438
2686
  case Token::SUB:
2439
2687
  case Token::MUL:
@@ -2442,26 +2690,45 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2442
2690
  // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2443
2691
  // depending on whether VFP3 is available or not.
2444
2692
  FloatingPointHelper::Destination destination =
2445
- CpuFeatures::IsSupported(VFP3) &&
2446
- op_ != Token::MOD ?
2693
+ CpuFeatures::IsSupported(VFP2) &&
2694
+ op != Token::MOD ?
2447
2695
  FloatingPointHelper::kVFPRegisters :
2448
2696
  FloatingPointHelper::kCoreRegisters;
2449
2697
 
2450
2698
  // Allocate new heap number for result.
2451
2699
  Register result = r5;
2452
- GenerateHeapResultAllocation(
2453
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
2700
+ BinaryOpStub_GenerateHeapResultAllocation(
2701
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
2454
2702
 
2455
2703
  // Load the operands.
2456
2704
  if (smi_operands) {
2457
2705
  FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2458
2706
  } else {
2459
- FloatingPointHelper::LoadOperands(masm,
2460
- destination,
2461
- heap_number_map,
2462
- scratch1,
2463
- scratch2,
2464
- not_numbers);
2707
+ // Load right operand to d7 or r2/r3.
2708
+ if (right_type == BinaryOpIC::INT32) {
2709
+ FloatingPointHelper::LoadNumberAsInt32Double(
2710
+ masm, right, destination, d7, d8, r2, r3, heap_number_map,
2711
+ scratch1, scratch2, s0, miss);
2712
+ } else {
2713
+ Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
2714
+ : not_numbers;
2715
+ FloatingPointHelper::LoadNumber(
2716
+ masm, destination, right, d7, r2, r3, heap_number_map,
2717
+ scratch1, scratch2, fail);
2718
+ }
2719
+ // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
2720
+ // jumps to |miss|.
2721
+ if (left_type == BinaryOpIC::INT32) {
2722
+ FloatingPointHelper::LoadNumberAsInt32Double(
2723
+ masm, left, destination, d6, d8, r0, r1, heap_number_map,
2724
+ scratch1, scratch2, s0, miss);
2725
+ } else {
2726
+ Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
2727
+ : not_numbers;
2728
+ FloatingPointHelper::LoadNumber(
2729
+ masm, destination, left, d6, r0, r1, heap_number_map,
2730
+ scratch1, scratch2, fail);
2731
+ }
2465
2732
  }
2466
2733
 
2467
2734
  // Calculate the result.
@@ -2469,8 +2736,8 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2469
2736
  // Using VFP registers:
2470
2737
  // d6: Left value
2471
2738
  // d7: Right value
2472
- CpuFeatures::Scope scope(VFP3);
2473
- switch (op_) {
2739
+ CpuFeatures::Scope scope(VFP2);
2740
+ switch (op) {
2474
2741
  case Token::ADD:
2475
2742
  __ vadd(d5, d6, d7);
2476
2743
  break;
@@ -2494,7 +2761,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2494
2761
  } else {
2495
2762
  // Call the C function to handle the double operation.
2496
2763
  FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2497
- op_,
2764
+ op,
2498
2765
  result,
2499
2766
  scratch1);
2500
2767
  if (FLAG_debug_code) {
@@ -2535,7 +2802,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2535
2802
  }
2536
2803
 
2537
2804
  Label result_not_a_smi;
2538
- switch (op_) {
2805
+ switch (op) {
2539
2806
  case Token::BIT_OR:
2540
2807
  __ orr(r2, r3, Operand(r2));
2541
2808
  break;
@@ -2558,7 +2825,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2558
2825
  // The code below for writing into heap numbers isn't capable of
2559
2826
  // writing the register as an unsigned int so we go to slow case if we
2560
2827
  // hit this case.
2561
- if (CpuFeatures::IsSupported(VFP3)) {
2828
+ if (CpuFeatures::IsSupported(VFP2)) {
2562
2829
  __ b(mi, &result_not_a_smi);
2563
2830
  } else {
2564
2831
  __ b(mi, not_numbers);
@@ -2586,8 +2853,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2586
2853
  __ AllocateHeapNumber(
2587
2854
  result, scratch1, scratch2, heap_number_map, gc_required);
2588
2855
  } else {
2589
- GenerateHeapResultAllocation(
2590
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
2856
+ BinaryOpStub_GenerateHeapResultAllocation(
2857
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
2858
+ mode);
2591
2859
  }
2592
2860
 
2593
2861
  // r2: Answer as signed int32.
@@ -2597,12 +2865,12 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2597
2865
  // result.
2598
2866
  __ mov(r0, Operand(r5));
2599
2867
 
2600
- if (CpuFeatures::IsSupported(VFP3)) {
2868
+ if (CpuFeatures::IsSupported(VFP2)) {
2601
2869
  // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
2602
2870
  // mentioned above SHR needs to always produce a positive result.
2603
- CpuFeatures::Scope scope(VFP3);
2871
+ CpuFeatures::Scope scope(VFP2);
2604
2872
  __ vmov(s0, r2);
2605
- if (op_ == Token::SHR) {
2873
+ if (op == Token::SHR) {
2606
2874
  __ vcvt_f64_u32(d0, s0);
2607
2875
  } else {
2608
2876
  __ vcvt_f64_s32(d0, s0);
@@ -2627,12 +2895,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2627
2895
  // Generate the smi code. If the operation on smis are successful this return is
2628
2896
  // generated. If the result is not a smi and heap number allocation is not
2629
2897
  // requested the code falls through. If number allocation is requested but a
2630
- // heap number cannot be allocated the code jumps to the lable gc_required.
2631
- void BinaryOpStub::GenerateSmiCode(
2898
+ // heap number cannot be allocated the code jumps to the label gc_required.
2899
+ void BinaryOpStub_GenerateSmiCode(
2632
2900
  MacroAssembler* masm,
2633
2901
  Label* use_runtime,
2634
2902
  Label* gc_required,
2635
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2903
+ Token::Value op,
2904
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
2905
+ OverwriteMode mode) {
2636
2906
  Label not_smis;
2637
2907
 
2638
2908
  Register left = r1;
@@ -2645,12 +2915,14 @@ void BinaryOpStub::GenerateSmiCode(
2645
2915
  __ JumpIfNotSmi(scratch1, &not_smis);
2646
2916
 
2647
2917
  // If the smi-smi operation results in a smi return is generated.
2648
- GenerateSmiSmiOperation(masm);
2918
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
2649
2919
 
2650
2920
  // If heap number results are possible generate the result in an allocated
2651
2921
  // heap number.
2652
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2653
- GenerateFPOperation(masm, true, use_runtime, gc_required);
2922
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
2923
+ BinaryOpStub_GenerateFPOperation(
2924
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
2925
+ use_runtime, gc_required, &not_smis, op, mode);
2654
2926
  }
2655
2927
  __ bind(&not_smis);
2656
2928
  }
@@ -2662,14 +2934,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2662
2934
  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2663
2935
  result_type_ == BinaryOpIC::SMI) {
2664
2936
  // Only allow smi results.
2665
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2937
+ BinaryOpStub_GenerateSmiCode(
2938
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
2666
2939
  } else {
2667
2940
  // Allow heap number result and don't make a transition if a heap number
2668
2941
  // cannot be allocated.
2669
- GenerateSmiCode(masm,
2670
- &call_runtime,
2671
- &call_runtime,
2672
- ALLOW_HEAPNUMBER_RESULTS);
2942
+ BinaryOpStub_GenerateSmiCode(
2943
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
2944
+ mode_);
2673
2945
  }
2674
2946
 
2675
2947
  // Code falls through if the result is not returned as either a smi or heap
@@ -2677,23 +2949,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2677
2949
  GenerateTypeTransition(masm);
2678
2950
 
2679
2951
  __ bind(&call_runtime);
2952
+ GenerateRegisterArgsPush(masm);
2680
2953
  GenerateCallRuntime(masm);
2681
2954
  }
2682
2955
 
2683
2956
 
2684
- void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2685
- ASSERT(operands_type_ == BinaryOpIC::STRING);
2686
- ASSERT(op_ == Token::ADD);
2687
- // Try to add arguments as strings, otherwise, transition to the generic
2688
- // BinaryOpIC type.
2689
- GenerateAddStrings(masm);
2690
- GenerateTypeTransition(masm);
2691
- }
2692
-
2693
-
2694
2957
  void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2695
2958
  Label call_runtime;
2696
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2959
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
2697
2960
  ASSERT(op_ == Token::ADD);
2698
2961
  // If both arguments are strings, call the string add stub.
2699
2962
  // Otherwise, do a transition.
@@ -2722,14 +2985,13 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2722
2985
 
2723
2986
 
2724
2987
  void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2725
- ASSERT(operands_type_ == BinaryOpIC::INT32);
2988
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
2726
2989
 
2727
2990
  Register left = r1;
2728
2991
  Register right = r0;
2729
2992
  Register scratch1 = r7;
2730
2993
  Register scratch2 = r9;
2731
2994
  DwVfpRegister double_scratch = d0;
2732
- SwVfpRegister single_scratch = s3;
2733
2995
 
2734
2996
  Register heap_number_result = no_reg;
2735
2997
  Register heap_number_map = r6;
@@ -2745,7 +3007,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2745
3007
  Label skip;
2746
3008
  __ orr(scratch1, left, right);
2747
3009
  __ JumpIfNotSmi(scratch1, &skip);
2748
- GenerateSmiSmiOperation(masm);
3010
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
2749
3011
  // Fall through if the result is not a smi.
2750
3012
  __ bind(&skip);
2751
3013
 
@@ -2755,11 +3017,20 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2755
3017
  case Token::MUL:
2756
3018
  case Token::DIV:
2757
3019
  case Token::MOD: {
3020
+ // It could be that only SMIs have been seen at either the left
3021
+ // or the right operand. For precise type feedback, patch the IC
3022
+ // again if this changes.
3023
+ if (left_type_ == BinaryOpIC::SMI) {
3024
+ __ JumpIfNotSmi(left, &transition);
3025
+ }
3026
+ if (right_type_ == BinaryOpIC::SMI) {
3027
+ __ JumpIfNotSmi(right, &transition);
3028
+ }
2758
3029
  // Load both operands and check that they are 32-bit integer.
2759
3030
  // Jump to type transition if they are not. The registers r0 and r1 (right
2760
3031
  // and left) are preserved for the runtime call.
2761
3032
  FloatingPointHelper::Destination destination =
2762
- (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
3033
+ (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
2763
3034
  ? FloatingPointHelper::kVFPRegisters
2764
3035
  : FloatingPointHelper::kCoreRegisters;
2765
3036
 
@@ -2767,6 +3038,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2767
3038
  right,
2768
3039
  destination,
2769
3040
  d7,
3041
+ d8,
2770
3042
  r2,
2771
3043
  r3,
2772
3044
  heap_number_map,
@@ -2778,6 +3050,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2778
3050
  left,
2779
3051
  destination,
2780
3052
  d6,
3053
+ d8,
2781
3054
  r4,
2782
3055
  r5,
2783
3056
  heap_number_map,
@@ -2787,7 +3060,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2787
3060
  &transition);
2788
3061
 
2789
3062
  if (destination == FloatingPointHelper::kVFPRegisters) {
2790
- CpuFeatures::Scope scope(VFP3);
3063
+ CpuFeatures::Scope scope(VFP2);
2791
3064
  Label return_heap_number;
2792
3065
  switch (op_) {
2793
3066
  case Token::ADD:
@@ -2813,10 +3086,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2813
3086
  // transition.
2814
3087
 
2815
3088
  __ EmitVFPTruncate(kRoundToZero,
2816
- single_scratch,
2817
- d5,
2818
3089
  scratch1,
2819
- scratch2);
3090
+ d5,
3091
+ scratch2,
3092
+ d8);
2820
3093
 
2821
3094
  if (result_type_ <= BinaryOpIC::INT32) {
2822
3095
  // If the ne condition is set, result does
@@ -2825,7 +3098,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2825
3098
  }
2826
3099
 
2827
3100
  // Check if the result fits in a smi.
2828
- __ vmov(scratch1, single_scratch);
2829
3101
  __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2830
3102
  // If not try to return a heap number.
2831
3103
  __ b(mi, &return_heap_number);
@@ -2852,12 +3124,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2852
3124
  : BinaryOpIC::INT32)) {
2853
3125
  // We are using vfp registers so r5 is available.
2854
3126
  heap_number_result = r5;
2855
- GenerateHeapResultAllocation(masm,
2856
- heap_number_result,
2857
- heap_number_map,
2858
- scratch1,
2859
- scratch2,
2860
- &call_runtime);
3127
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
3128
+ heap_number_result,
3129
+ heap_number_map,
3130
+ scratch1,
3131
+ scratch2,
3132
+ &call_runtime,
3133
+ mode_);
2861
3134
  __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
2862
3135
  __ vstr(d5, r0, HeapNumber::kValueOffset);
2863
3136
  __ mov(r0, heap_number_result);
@@ -2876,12 +3149,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2876
3149
 
2877
3150
  // Allocate a heap number to store the result.
2878
3151
  heap_number_result = r5;
2879
- GenerateHeapResultAllocation(masm,
2880
- heap_number_result,
2881
- heap_number_map,
2882
- scratch1,
2883
- scratch2,
2884
- &pop_and_call_runtime);
3152
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
3153
+ heap_number_result,
3154
+ heap_number_map,
3155
+ scratch1,
3156
+ scratch2,
3157
+ &pop_and_call_runtime,
3158
+ mode_);
2885
3159
 
2886
3160
  // Load the left value from the value saved on the stack.
2887
3161
  __ Pop(r1, r0);
@@ -2920,6 +3194,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2920
3194
  scratch2,
2921
3195
  scratch3,
2922
3196
  d0,
3197
+ d1,
2923
3198
  &transition);
2924
3199
  FloatingPointHelper::LoadNumberAsInt32(masm,
2925
3200
  right,
@@ -2929,6 +3204,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2929
3204
  scratch2,
2930
3205
  scratch3,
2931
3206
  d0,
3207
+ d1,
2932
3208
  &transition);
2933
3209
 
2934
3210
  // The ECMA-262 standard specifies that, for shift operations, only the
@@ -2954,9 +3230,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2954
3230
  // We only get a negative result if the shift value (r2) is 0.
2955
3231
  // This result cannot be respresented as a signed 32-bit integer, try
2956
3232
  // to return a heap number if we can.
2957
- // The non vfp3 code does not support this special case, so jump to
3233
+ // The non vfp2 code does not support this special case, so jump to
2958
3234
  // runtime if we don't support it.
2959
- if (CpuFeatures::IsSupported(VFP3)) {
3235
+ if (CpuFeatures::IsSupported(VFP2)) {
2960
3236
  __ b(mi, (result_type_ <= BinaryOpIC::INT32)
2961
3237
  ? &transition
2962
3238
  : &return_heap_number);
@@ -2984,15 +3260,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2984
3260
 
2985
3261
  __ bind(&return_heap_number);
2986
3262
  heap_number_result = r5;
2987
- GenerateHeapResultAllocation(masm,
2988
- heap_number_result,
2989
- heap_number_map,
2990
- scratch1,
2991
- scratch2,
2992
- &call_runtime);
2993
-
2994
- if (CpuFeatures::IsSupported(VFP3)) {
2995
- CpuFeatures::Scope scope(VFP3);
3263
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
3264
+ heap_number_result,
3265
+ heap_number_map,
3266
+ scratch1,
3267
+ scratch2,
3268
+ &call_runtime,
3269
+ mode_);
3270
+
3271
+ if (CpuFeatures::IsSupported(VFP2)) {
3272
+ CpuFeatures::Scope scope(VFP2);
2996
3273
  if (op_ != Token::SHR) {
2997
3274
  // Convert the result to a floating point value.
2998
3275
  __ vmov(double_scratch.low(), r2);
@@ -3033,6 +3310,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
3033
3310
  }
3034
3311
 
3035
3312
  __ bind(&call_runtime);
3313
+ GenerateRegisterArgsPush(masm);
3036
3314
  GenerateCallRuntime(masm);
3037
3315
  }
3038
3316
 
@@ -3071,20 +3349,32 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3071
3349
 
3072
3350
 
3073
3351
  void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3074
- Label call_runtime;
3075
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3352
+ Label call_runtime, transition;
3353
+ BinaryOpStub_GenerateFPOperation(
3354
+ masm, left_type_, right_type_, false,
3355
+ &transition, &call_runtime, &transition, op_, mode_);
3356
+
3357
+ __ bind(&transition);
3358
+ GenerateTypeTransition(masm);
3076
3359
 
3077
3360
  __ bind(&call_runtime);
3361
+ GenerateRegisterArgsPush(masm);
3078
3362
  GenerateCallRuntime(masm);
3079
3363
  }
3080
3364
 
3081
3365
 
3082
3366
  void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3083
- Label call_runtime, call_string_add_or_runtime;
3367
+ Label call_runtime, call_string_add_or_runtime, transition;
3368
+
3369
+ BinaryOpStub_GenerateSmiCode(
3370
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
3084
3371
 
3085
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3372
+ BinaryOpStub_GenerateFPOperation(
3373
+ masm, left_type_, right_type_, false,
3374
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
3086
3375
 
3087
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3376
+ __ bind(&transition);
3377
+ GenerateTypeTransition(masm);
3088
3378
 
3089
3379
  __ bind(&call_string_add_or_runtime);
3090
3380
  if (op_ == Token::ADD) {
@@ -3092,6 +3382,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3092
3382
  }
3093
3383
 
3094
3384
  __ bind(&call_runtime);
3385
+ GenerateRegisterArgsPush(masm);
3095
3386
  GenerateCallRuntime(masm);
3096
3387
  }
3097
3388
 
@@ -3127,61 +3418,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3127
3418
  }
3128
3419
 
3129
3420
 
3130
- void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3131
- GenerateRegisterArgsPush(masm);
3132
- switch (op_) {
3133
- case Token::ADD:
3134
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3135
- break;
3136
- case Token::SUB:
3137
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3138
- break;
3139
- case Token::MUL:
3140
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3141
- break;
3142
- case Token::DIV:
3143
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3144
- break;
3145
- case Token::MOD:
3146
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3147
- break;
3148
- case Token::BIT_OR:
3149
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3150
- break;
3151
- case Token::BIT_AND:
3152
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3153
- break;
3154
- case Token::BIT_XOR:
3155
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3156
- break;
3157
- case Token::SAR:
3158
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3159
- break;
3160
- case Token::SHR:
3161
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3162
- break;
3163
- case Token::SHL:
3164
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3165
- break;
3166
- default:
3167
- UNREACHABLE();
3168
- }
3169
- }
3170
-
3171
-
3172
- void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
3173
- Register result,
3174
- Register heap_number_map,
3175
- Register scratch1,
3176
- Register scratch2,
3177
- Label* gc_required) {
3421
+ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
3422
+ Register result,
3423
+ Register heap_number_map,
3424
+ Register scratch1,
3425
+ Register scratch2,
3426
+ Label* gc_required,
3427
+ OverwriteMode mode) {
3178
3428
  // Code below will scratch result if allocation fails. To keep both arguments
3179
3429
  // intact for the runtime call result cannot be one of these.
3180
3430
  ASSERT(!result.is(r0) && !result.is(r1));
3181
3431
 
3182
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3432
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
3183
3433
  Label skip_allocation, allocated;
3184
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
3434
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
3185
3435
  // If the overwritable operand is already an object, we skip the
3186
3436
  // allocation of a heap number.
3187
3437
  __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3194,7 +3444,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
3194
3444
  __ mov(result, Operand(overwritable_operand));
3195
3445
  __ bind(&allocated);
3196
3446
  } else {
3197
- ASSERT(mode_ == NO_OVERWRITE);
3447
+ ASSERT(mode == NO_OVERWRITE);
3198
3448
  __ AllocateHeapNumber(
3199
3449
  result, scratch1, scratch2, heap_number_map, gc_required);
3200
3450
  }
@@ -3221,8 +3471,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3221
3471
  const Register cache_entry = r0;
3222
3472
  const bool tagged = (argument_type_ == TAGGED);
3223
3473
 
3224
- if (CpuFeatures::IsSupported(VFP3)) {
3225
- CpuFeatures::Scope scope(VFP3);
3474
+ if (CpuFeatures::IsSupported(VFP2)) {
3475
+ CpuFeatures::Scope scope(VFP2);
3226
3476
  if (tagged) {
3227
3477
  // Argument is a number and is on stack and in r0.
3228
3478
  // Load argument and check if it is a smi.
@@ -3272,7 +3522,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3272
3522
  __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3273
3523
  // r0 points to the cache for the type type_.
3274
3524
  // If NULL, the cache hasn't been initialized yet, so go through runtime.
3275
- __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3525
+ __ cmp(cache_entry, Operand::Zero());
3276
3526
  __ b(eq, &invalid_cache);
3277
3527
 
3278
3528
  #ifdef DEBUG
@@ -3323,23 +3573,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3323
3573
  ExternalReference(RuntimeFunction(), masm->isolate());
3324
3574
  __ TailCallExternalReference(runtime_function, 1, 1);
3325
3575
  } else {
3326
- if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
3327
- CpuFeatures::Scope scope(VFP3);
3576
+ ASSERT(CpuFeatures::IsSupported(VFP2));
3577
+ CpuFeatures::Scope scope(VFP2);
3328
3578
 
3329
3579
  Label no_update;
3330
3580
  Label skip_cache;
3331
3581
 
3332
3582
  // Call C function to calculate the result and update the cache.
3333
- // Register r0 holds precalculated cache entry address; preserve
3334
- // it on the stack and pop it into register cache_entry after the
3335
- // call.
3336
- __ push(cache_entry);
3583
+ // r0: precalculated cache entry address.
3584
+ // r2 and r3: parts of the double value.
3585
+ // Store r0, r2 and r3 on stack for later before calling C function.
3586
+ __ Push(r3, r2, cache_entry);
3337
3587
  GenerateCallCFunction(masm, scratch0);
3338
3588
  __ GetCFunctionDoubleResult(d2);
3339
3589
 
3340
3590
  // Try to update the cache. If we cannot allocate a
3341
3591
  // heap number, we return the result without updating.
3342
- __ pop(cache_entry);
3592
+ __ Pop(r3, r2, cache_entry);
3343
3593
  __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3344
3594
  __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3345
3595
  __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
@@ -3385,6 +3635,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3385
3635
 
3386
3636
  void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3387
3637
  Register scratch) {
3638
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
3388
3639
  Isolate* isolate = masm->isolate();
3389
3640
 
3390
3641
  __ push(lr);
@@ -3445,15 +3696,15 @@ void InterruptStub::Generate(MacroAssembler* masm) {
3445
3696
 
3446
3697
 
3447
3698
  void MathPowStub::Generate(MacroAssembler* masm) {
3448
- CpuFeatures::Scope vfp3_scope(VFP3);
3699
+ CpuFeatures::Scope vfp2_scope(VFP2);
3449
3700
  const Register base = r1;
3450
3701
  const Register exponent = r2;
3451
3702
  const Register heapnumbermap = r5;
3452
3703
  const Register heapnumber = r0;
3453
- const DoubleRegister double_base = d1;
3454
- const DoubleRegister double_exponent = d2;
3455
- const DoubleRegister double_result = d3;
3456
- const DoubleRegister double_scratch = d0;
3704
+ const DwVfpRegister double_base = d1;
3705
+ const DwVfpRegister double_exponent = d2;
3706
+ const DwVfpRegister double_result = d3;
3707
+ const DwVfpRegister double_scratch = d0;
3457
3708
  const SwVfpRegister single_scratch = s0;
3458
3709
  const Register scratch = r9;
3459
3710
  const Register scratch2 = r7;
@@ -3514,13 +3765,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3514
3765
  Label not_plus_half;
3515
3766
 
3516
3767
  // Test for 0.5.
3517
- __ vmov(double_scratch, 0.5);
3768
+ __ vmov(double_scratch, 0.5, scratch);
3518
3769
  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3519
3770
  __ b(ne, &not_plus_half);
3520
3771
 
3521
3772
  // Calculates square root of base. Check for the special case of
3522
3773
  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3523
- __ vmov(double_scratch, -V8_INFINITY);
3774
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
3524
3775
  __ VFPCompareAndSetFlags(double_base, double_scratch);
3525
3776
  __ vneg(double_result, double_scratch, eq);
3526
3777
  __ b(eq, &done);
@@ -3531,20 +3782,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3531
3782
  __ jmp(&done);
3532
3783
 
3533
3784
  __ bind(&not_plus_half);
3534
- __ vmov(double_scratch, -0.5);
3785
+ __ vmov(double_scratch, -0.5, scratch);
3535
3786
  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3536
3787
  __ b(ne, &call_runtime);
3537
3788
 
3538
3789
  // Calculates square root of base. Check for the special case of
3539
3790
  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3540
- __ vmov(double_scratch, -V8_INFINITY);
3791
+ __ vmov(double_scratch, -V8_INFINITY, scratch);
3541
3792
  __ VFPCompareAndSetFlags(double_base, double_scratch);
3542
3793
  __ vmov(double_result, kDoubleRegZero, eq);
3543
3794
  __ b(eq, &done);
3544
3795
 
3545
3796
  // Add +0 to convert -0 to +0.
3546
3797
  __ vadd(double_scratch, double_base, kDoubleRegZero);
3547
- __ vmov(double_result, 1);
3798
+ __ vmov(double_result, 1.0, scratch);
3548
3799
  __ vsqrt(double_scratch, double_scratch);
3549
3800
  __ vdiv(double_result, double_result, double_scratch);
3550
3801
  __ jmp(&done);
@@ -3579,11 +3830,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3579
3830
  __ mov(exponent, scratch);
3580
3831
  }
3581
3832
  __ vmov(double_scratch, double_base); // Back up base.
3582
- __ vmov(double_result, 1.0);
3833
+ __ vmov(double_result, 1.0, scratch2);
3583
3834
 
3584
3835
  // Get absolute value of exponent.
3585
- __ cmp(scratch, Operand(0));
3586
- __ mov(scratch2, Operand(0), LeaveCC, mi);
3836
+ __ cmp(scratch, Operand::Zero());
3837
+ __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
3587
3838
  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3588
3839
 
3589
3840
  Label while_true;
@@ -3593,9 +3844,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
3593
3844
  __ vmul(double_scratch, double_scratch, double_scratch, ne);
3594
3845
  __ b(ne, &while_true);
3595
3846
 
3596
- __ cmp(exponent, Operand(0));
3847
+ __ cmp(exponent, Operand::Zero());
3597
3848
  __ b(ge, &done);
3598
- __ vmov(double_scratch, 1.0);
3849
+ __ vmov(double_scratch, 1.0, scratch);
3599
3850
  __ vdiv(double_result, double_scratch, double_result);
3600
3851
  // Test whether result is zero. Bail out to check for subnormal result.
3601
3852
  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
@@ -3663,12 +3914,29 @@ void CodeStub::GenerateStubsAheadOfTime() {
3663
3914
 
3664
3915
 
3665
3916
  void CodeStub::GenerateFPStubs() {
3666
- CEntryStub save_doubles(1, kSaveFPRegs);
3667
- Handle<Code> code = save_doubles.GetCode();
3668
- code->set_is_pregenerated(true);
3669
- StoreBufferOverflowStub stub(kSaveFPRegs);
3670
- stub.GetCode()->set_is_pregenerated(true);
3671
- code->GetIsolate()->set_fp_stubs_generated(true);
3917
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
3918
+ ? kSaveFPRegs
3919
+ : kDontSaveFPRegs;
3920
+ CEntryStub save_doubles(1, mode);
3921
+ StoreBufferOverflowStub stub(mode);
3922
+ // These stubs might already be in the snapshot, detect that and don't
3923
+ // regenerate, which would lead to code stub initialization state being messed
3924
+ // up.
3925
+ Code* save_doubles_code = NULL;
3926
+ Code* store_buffer_overflow_code = NULL;
3927
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
3928
+ if (CpuFeatures::IsSupported(VFP2)) {
3929
+ CpuFeatures::Scope scope2(VFP2);
3930
+ save_doubles_code = *save_doubles.GetCode();
3931
+ store_buffer_overflow_code = *stub.GetCode();
3932
+ } else {
3933
+ save_doubles_code = *save_doubles.GetCode();
3934
+ store_buffer_overflow_code = *stub.GetCode();
3935
+ }
3936
+ save_doubles_code->set_is_pregenerated(true);
3937
+ store_buffer_overflow_code->set_is_pregenerated(true);
3938
+ }
3939
+ ISOLATE->set_fp_stubs_generated(true);
3672
3940
  }
3673
3941
 
3674
3942
 
@@ -3679,6 +3947,18 @@ void CEntryStub::GenerateAheadOfTime() {
3679
3947
  }
3680
3948
 
3681
3949
 
3950
+ static void JumpIfOOM(MacroAssembler* masm,
3951
+ Register value,
3952
+ Register scratch,
3953
+ Label* oom_label) {
3954
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
3955
+ STATIC_ASSERT(kFailureTag == 3);
3956
+ __ and_(scratch, value, Operand(0xf));
3957
+ __ cmp(scratch, Operand(0xf));
3958
+ __ b(eq, oom_label);
3959
+ }
3960
+
3961
+
3682
3962
  void CEntryStub::GenerateCore(MacroAssembler* masm,
3683
3963
  Label* throw_normal_exception,
3684
3964
  Label* throw_termination_exception,
@@ -3737,9 +4017,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
3737
4017
  // Compute the return address in lr to return to after the jump below. Pc is
3738
4018
  // already at '+ 8' from the current instruction but return is after three
3739
4019
  // instructions so add another 4 to pc to get the return address.
3740
- masm->add(lr, pc, Operand(4));
3741
- __ str(lr, MemOperand(sp, 0));
3742
- masm->Jump(r5);
4020
+ {
4021
+ // Prevent literal pool emission before return address.
4022
+ Assembler::BlockConstPoolScope block_const_pool(masm);
4023
+ masm->add(lr, pc, Operand(4));
4024
+ __ str(lr, MemOperand(sp, 0));
4025
+ masm->Jump(r5);
4026
+ }
3743
4027
 
3744
4028
  if (always_allocate) {
3745
4029
  // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -3774,9 +4058,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
3774
4058
  __ b(eq, &retry);
3775
4059
 
3776
4060
  // Special handling of out of memory exceptions.
3777
- Failure* out_of_memory = Failure::OutOfMemoryException();
3778
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3779
- __ b(eq, throw_out_of_memory_exception);
4061
+ JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
3780
4062
 
3781
4063
  // Retrieve the pending exception and clear the variable.
3782
4064
  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
@@ -3863,13 +4145,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
3863
4145
  Isolate* isolate = masm->isolate();
3864
4146
  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3865
4147
  isolate);
3866
- __ mov(r0, Operand(false, RelocInfo::NONE));
4148
+ __ mov(r0, Operand(false, RelocInfo::NONE32));
3867
4149
  __ mov(r2, Operand(external_caught));
3868
4150
  __ str(r0, MemOperand(r2));
3869
4151
 
3870
4152
  // Set pending exception and r0 to out of memory exception.
3871
- Failure* out_of_memory = Failure::OutOfMemoryException();
4153
+ Label already_have_failure;
4154
+ JumpIfOOM(masm, r0, ip, &already_have_failure);
4155
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
3872
4156
  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4157
+ __ bind(&already_have_failure);
3873
4158
  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3874
4159
  isolate)));
3875
4160
  __ str(r0, MemOperand(r2));
@@ -3897,8 +4182,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3897
4182
  // Save callee-saved registers (incl. cp and fp), sp, and lr
3898
4183
  __ stm(db_w, sp, kCalleeSaved | lr.bit());
3899
4184
 
3900
- if (CpuFeatures::IsSupported(VFP3)) {
3901
- CpuFeatures::Scope scope(VFP3);
4185
+ if (CpuFeatures::IsSupported(VFP2)) {
4186
+ CpuFeatures::Scope scope(VFP2);
3902
4187
  // Save callee-saved vfp registers.
3903
4188
  __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
3904
4189
  // Set up the reserved register for 0.0.
@@ -3913,7 +4198,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3913
4198
 
3914
4199
  // Set up argv in r4.
3915
4200
  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3916
- if (CpuFeatures::IsSupported(VFP3)) {
4201
+ if (CpuFeatures::IsSupported(VFP2)) {
3917
4202
  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
3918
4203
  }
3919
4204
  __ ldr(r4, MemOperand(sp, offset_to_argv));
@@ -3956,14 +4241,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3956
4241
  // Jump to a faked try block that does the invoke, with a faked catch
3957
4242
  // block that sets the pending exception.
3958
4243
  __ jmp(&invoke);
3959
- __ bind(&handler_entry);
3960
- handler_offset_ = handler_entry.pos();
3961
- // Caught exception: Store result (exception) in the pending exception
3962
- // field in the JSEnv and return a failure sentinel. Coming in here the
3963
- // fp will be invalid because the PushTryHandler below sets it to 0 to
3964
- // signal the existence of the JSEntry frame.
3965
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3966
- isolate)));
4244
+
4245
+ // Block literal pool emission whilst taking the position of the handler
4246
+ // entry. This avoids making the assumption that literal pools are always
4247
+ // emitted after an instruction is emitted, rather than before.
4248
+ {
4249
+ Assembler::BlockConstPoolScope block_const_pool(masm);
4250
+ __ bind(&handler_entry);
4251
+ handler_offset_ = handler_entry.pos();
4252
+ // Caught exception: Store result (exception) in the pending exception
4253
+ // field in the JSEnv and return a failure sentinel. Coming in here the
4254
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
4255
+ // signal the existence of the JSEntry frame.
4256
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4257
+ isolate)));
4258
+ }
3967
4259
  __ str(r0, MemOperand(ip));
3968
4260
  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3969
4261
  __ b(&exit);
@@ -4006,9 +4298,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4006
4298
 
4007
4299
  // Branch and link to JSEntryTrampoline. We don't use the double underscore
4008
4300
  // macro for the add instruction because we don't want the coverage tool
4009
- // inserting instructions here after we read the pc.
4010
- __ mov(lr, Operand(pc));
4011
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4301
+ // inserting instructions here after we read the pc. We block literal pool
4302
+ // emission for the same reason.
4303
+ {
4304
+ Assembler::BlockConstPoolScope block_const_pool(masm);
4305
+ __ mov(lr, Operand(pc));
4306
+ masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4307
+ }
4012
4308
 
4013
4309
  // Unlink this frame from the handler chain.
4014
4310
  __ PopTryHandler();
@@ -4040,8 +4336,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4040
4336
  }
4041
4337
  #endif
4042
4338
 
4043
- if (CpuFeatures::IsSupported(VFP3)) {
4044
- CpuFeatures::Scope scope(VFP3);
4339
+ if (CpuFeatures::IsSupported(VFP2)) {
4340
+ CpuFeatures::Scope scope(VFP2);
4045
4341
  // Restore callee-saved vfp registers.
4046
4342
  __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
4047
4343
  }
@@ -4228,6 +4524,165 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
4228
4524
  }
4229
4525
 
4230
4526
 
4527
+ void ArrayLengthStub::Generate(MacroAssembler* masm) {
4528
+ Label miss;
4529
+ Register receiver;
4530
+ if (kind() == Code::KEYED_LOAD_IC) {
4531
+ // ----------- S t a t e -------------
4532
+ // -- lr : return address
4533
+ // -- r0 : key
4534
+ // -- r1 : receiver
4535
+ // -----------------------------------
4536
+ __ cmp(r0, Operand(masm->isolate()->factory()->length_symbol()));
4537
+ __ b(ne, &miss);
4538
+ receiver = r1;
4539
+ } else {
4540
+ ASSERT(kind() == Code::LOAD_IC);
4541
+ // ----------- S t a t e -------------
4542
+ // -- r2 : name
4543
+ // -- lr : return address
4544
+ // -- r0 : receiver
4545
+ // -- sp[0] : receiver
4546
+ // -----------------------------------
4547
+ receiver = r0;
4548
+ }
4549
+
4550
+ StubCompiler::GenerateLoadArrayLength(masm, receiver, r3, &miss);
4551
+ __ bind(&miss);
4552
+ StubCompiler::GenerateLoadMiss(masm, kind());
4553
+ }
4554
+
4555
+
4556
+ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
4557
+ Label miss;
4558
+ Register receiver;
4559
+ if (kind() == Code::KEYED_LOAD_IC) {
4560
+ // ----------- S t a t e -------------
4561
+ // -- lr : return address
4562
+ // -- r0 : key
4563
+ // -- r1 : receiver
4564
+ // -----------------------------------
4565
+ __ cmp(r0, Operand(masm->isolate()->factory()->prototype_symbol()));
4566
+ __ b(ne, &miss);
4567
+ receiver = r1;
4568
+ } else {
4569
+ ASSERT(kind() == Code::LOAD_IC);
4570
+ // ----------- S t a t e -------------
4571
+ // -- r2 : name
4572
+ // -- lr : return address
4573
+ // -- r0 : receiver
4574
+ // -- sp[0] : receiver
4575
+ // -----------------------------------
4576
+ receiver = r0;
4577
+ }
4578
+
4579
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
4580
+ __ bind(&miss);
4581
+ StubCompiler::GenerateLoadMiss(masm, kind());
4582
+ }
4583
+
4584
+
4585
+ void StringLengthStub::Generate(MacroAssembler* masm) {
4586
+ Label miss;
4587
+ Register receiver;
4588
+ if (kind() == Code::KEYED_LOAD_IC) {
4589
+ // ----------- S t a t e -------------
4590
+ // -- lr : return address
4591
+ // -- r0 : key
4592
+ // -- r1 : receiver
4593
+ // -----------------------------------
4594
+ __ cmp(r0, Operand(masm->isolate()->factory()->length_symbol()));
4595
+ __ b(ne, &miss);
4596
+ receiver = r1;
4597
+ } else {
4598
+ ASSERT(kind() == Code::LOAD_IC);
4599
+ // ----------- S t a t e -------------
4600
+ // -- r2 : name
4601
+ // -- lr : return address
4602
+ // -- r0 : receiver
4603
+ // -- sp[0] : receiver
4604
+ // -----------------------------------
4605
+ receiver = r0;
4606
+ }
4607
+
4608
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
4609
+ support_wrapper_);
4610
+
4611
+ __ bind(&miss);
4612
+ StubCompiler::GenerateLoadMiss(masm, kind());
4613
+ }
4614
+
4615
+
4616
+ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
4617
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
4618
+ // (currently anything except for external arrays which means anything with
4619
+ // elements of FixedArray type). Value must be a number, but only smis are
4620
+ // accepted as the most common case.
4621
+ Label miss;
4622
+
4623
+ Register receiver;
4624
+ Register value;
4625
+ if (kind() == Code::KEYED_STORE_IC) {
4626
+ // ----------- S t a t e -------------
4627
+ // -- lr : return address
4628
+ // -- r0 : value
4629
+ // -- r1 : key
4630
+ // -- r2 : receiver
4631
+ // -----------------------------------
4632
+ __ cmp(r1, Operand(masm->isolate()->factory()->length_symbol()));
4633
+ __ b(ne, &miss);
4634
+ receiver = r2;
4635
+ value = r0;
4636
+ } else {
4637
+ ASSERT(kind() == Code::STORE_IC);
4638
+ // ----------- S t a t e -------------
4639
+ // -- lr : return address
4640
+ // -- r0 : value
4641
+ // -- r1 : receiver
4642
+ // -- r2 : key
4643
+ // -----------------------------------
4644
+ receiver = r1;
4645
+ value = r0;
4646
+ }
4647
+ Register scratch = r3;
4648
+
4649
+ // Check that the receiver isn't a smi.
4650
+ __ JumpIfSmi(receiver, &miss);
4651
+
4652
+ // Check that the object is a JS array.
4653
+ __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
4654
+ __ b(ne, &miss);
4655
+
4656
+ // Check that elements are FixedArray.
4657
+ // We rely on StoreIC_ArrayLength below to deal with all types of
4658
+ // fast elements (including COW).
4659
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
4660
+ __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
4661
+ __ b(ne, &miss);
4662
+
4663
+ // Check that the array has fast properties, otherwise the length
4664
+ // property might have been redefined.
4665
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
4666
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
4667
+ __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
4668
+ __ b(eq, &miss);
4669
+
4670
+ // Check that value is a smi.
4671
+ __ JumpIfNotSmi(value, &miss);
4672
+
4673
+ // Prepare tail call to StoreIC_ArrayLength.
4674
+ __ Push(receiver, value);
4675
+
4676
+ ExternalReference ref =
4677
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
4678
+ __ TailCallExternalReference(ref, 2, 1);
4679
+
4680
+ __ bind(&miss);
4681
+
4682
+ StubCompiler::GenerateStoreMiss(masm, kind());
4683
+ }
4684
+
4685
+
4231
4686
  Register InstanceofStub::left() { return r0; }
4232
4687
 
4233
4688
 
@@ -4370,14 +4825,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4370
4825
 
4371
4826
  // r0 = address of new object(s) (tagged)
4372
4827
  // r2 = argument count (tagged)
4373
- // Get the arguments boilerplate from the current (global) context into r4.
4828
+ // Get the arguments boilerplate from the current native context into r4.
4374
4829
  const int kNormalOffset =
4375
4830
  Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4376
4831
  const int kAliasedOffset =
4377
4832
  Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4378
4833
 
4379
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
4380
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4834
+ __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4835
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
4381
4836
  __ cmp(r1, Operand::Zero());
4382
4837
  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4383
4838
  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@@ -4534,7 +4989,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4534
4989
  // of the arguments object and the elements array in words.
4535
4990
  Label add_arguments_object;
4536
4991
  __ bind(&try_allocate);
4537
- __ cmp(r1, Operand(0, RelocInfo::NONE));
4992
+ __ cmp(r1, Operand::Zero());
4538
4993
  __ b(eq, &add_arguments_object);
4539
4994
  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4540
4995
  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
@@ -4550,9 +5005,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4550
5005
  static_cast<AllocationFlags>(TAG_OBJECT |
4551
5006
  SIZE_IN_WORDS));
4552
5007
 
4553
- // Get the arguments boilerplate from the current (global) context.
4554
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4555
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
5008
+ // Get the arguments boilerplate from the current native context.
5009
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5010
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
4556
5011
  __ ldr(r4, MemOperand(r4, Context::SlotOffset(
4557
5012
  Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4558
5013
 
@@ -4567,7 +5022,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4567
5022
 
4568
5023
  // If there are no actual arguments, we're done.
4569
5024
  Label done;
4570
- __ cmp(r1, Operand(0, RelocInfo::NONE));
5025
+ __ cmp(r1, Operand::Zero());
4571
5026
  __ b(eq, &done);
4572
5027
 
4573
5028
  // Get the parameters pointer from the stack.
@@ -4594,7 +5049,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4594
5049
  // Post-increment r4 with kPointerSize on each iteration.
4595
5050
  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4596
5051
  __ sub(r1, r1, Operand(1));
4597
- __ cmp(r1, Operand(0, RelocInfo::NONE));
5052
+ __ cmp(r1, Operand::Zero());
4598
5053
  __ b(ne, &loop);
4599
5054
 
4600
5055
  // Return and remove the on-stack parameters.
@@ -4646,7 +5101,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4646
5101
  ExternalReference::address_of_regexp_stack_memory_size(isolate);
4647
5102
  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4648
5103
  __ ldr(r0, MemOperand(r0, 0));
4649
- __ cmp(r0, Operand(0));
5104
+ __ cmp(r0, Operand::Zero());
4650
5105
  __ b(eq, &runtime);
4651
5106
 
4652
5107
  // Check that the first argument is a JSRegExp object.
@@ -4681,7 +5136,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4681
5136
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4682
5137
  __ add(r2, r2, Operand(2)); // r2 was a smi.
4683
5138
  // Check that the static offsets vector buffer is large enough.
4684
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
5139
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
4685
5140
  __ b(hi, &runtime);
4686
5141
 
4687
5142
  // r2: Number of capture registers
@@ -4728,7 +5183,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4728
5183
  __ b(gt, &runtime);
4729
5184
 
4730
5185
  // Reset offset for possibly sliced string.
4731
- __ mov(r9, Operand(0));
5186
+ __ mov(r9, Operand::Zero());
4732
5187
  // subject: Subject string
4733
5188
  // regexp_data: RegExp data (FixedArray)
4734
5189
  // Check the representation and encoding of the subject string.
@@ -4793,7 +5248,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4793
5248
  // subject: Subject string
4794
5249
  // regexp_data: RegExp data (FixedArray)
4795
5250
  // r0: Instance type of subject string
4796
- STATIC_ASSERT(4 == kAsciiStringTag);
5251
+ STATIC_ASSERT(4 == kOneByteStringTag);
4797
5252
  STATIC_ASSERT(kTwoByteStringTag == 0);
4798
5253
  // Find the code object based on the assumptions above.
4799
5254
  __ and_(r0, r0, Operand(kStringEncodingMask));
@@ -4849,7 +5304,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
4849
5304
 
4850
5305
  // Argument 6: Set the number of capture registers to zero to force global
4851
5306
  // regexps to behave as non-global. This does not affect non-global regexps.
4852
- __ mov(r0, Operand(0));
5307
+ __ mov(r0, Operand::Zero());
4853
5308
  __ str(r0, MemOperand(sp, 2 * kPointerSize));
4854
5309
 
4855
5310
  // Argument 5 (sp[4]): static offsets vector buffer.
@@ -5017,7 +5472,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
5017
5472
  __ ldr(subject,
5018
5473
  FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5019
5474
  // Move the pointer so that offset-wise, it looks like a sequential string.
5020
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5475
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
5021
5476
  __ sub(subject,
5022
5477
  subject,
5023
5478
  Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -5067,10 +5522,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5067
5522
  // Set empty properties FixedArray.
5068
5523
  // Set elements to point to FixedArray allocated right after the JSArray.
5069
5524
  // Interleave operations for better latency.
5070
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5525
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
5071
5526
  __ add(r3, r0, Operand(JSRegExpResult::kSize));
5072
5527
  __ mov(r4, Operand(factory->empty_fixed_array()));
5073
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5528
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
5074
5529
  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5075
5530
  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5076
5531
  __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -5095,16 +5550,16 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5095
5550
  // Set FixedArray length.
5096
5551
  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5097
5552
  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5098
- // Fill contents of fixed-array with the-hole.
5099
- __ mov(r2, Operand(factory->the_hole_value()));
5553
+ // Fill contents of fixed-array with undefined.
5554
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
5100
5555
  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5101
- // Fill fixed array elements with hole.
5556
+ // Fill fixed array elements with undefined.
5102
5557
  // r0: JSArray, tagged.
5103
- // r2: the hole.
5558
+ // r2: undefined.
5104
5559
  // r3: Start of elements in FixedArray.
5105
5560
  // r5: Number of elements to fill.
5106
5561
  Label loop;
5107
- __ cmp(r5, Operand(0));
5562
+ __ cmp(r5, Operand::Zero());
5108
5563
  __ bind(&loop);
5109
5564
  __ b(le, &done); // Jump if r5 is negative or zero.
5110
5565
  __ sub(r5, r5, Operand(1), SetCC);
@@ -5176,7 +5631,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
5176
5631
  __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5177
5632
  __ b(ne, &call);
5178
5633
  // Patch the receiver on the stack with the global receiver object.
5179
- __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5634
+ __ ldr(r3,
5635
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
5180
5636
  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
5181
5637
  __ str(r3, MemOperand(sp, argc_ * kPointerSize));
5182
5638
  __ bind(&call);
@@ -5229,8 +5685,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
5229
5685
  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5230
5686
  __ b(ne, &non_function);
5231
5687
  __ push(r1); // put proxy as additional argument
5232
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
5233
- __ mov(r2, Operand(0, RelocInfo::NONE));
5688
+ __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
5689
+ __ mov(r2, Operand::Zero());
5234
5690
  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5235
5691
  __ SetCallKind(r5, CALL_AS_METHOD);
5236
5692
  {
@@ -5244,7 +5700,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
5244
5700
  __ bind(&non_function);
5245
5701
  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5246
5702
  __ mov(r0, Operand(argc_)); // Set up the number of arguments.
5247
- __ mov(r2, Operand(0, RelocInfo::NONE));
5703
+ __ mov(r2, Operand::Zero());
5248
5704
  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5249
5705
  __ SetCallKind(r5, CALL_AS_METHOD);
5250
5706
  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -5287,55 +5743,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
5287
5743
  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5288
5744
  __ bind(&do_call);
5289
5745
  // Set expected number of arguments to zero (not changing r0).
5290
- __ mov(r2, Operand(0, RelocInfo::NONE));
5746
+ __ mov(r2, Operand::Zero());
5291
5747
  __ SetCallKind(r5, CALL_AS_METHOD);
5292
5748
  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5293
5749
  RelocInfo::CODE_TARGET);
5294
5750
  }
5295
5751
 
5296
5752
 
5297
- // Unfortunately you have to run without snapshots to see most of these
5298
- // names in the profile since most compare stubs end up in the snapshot.
5299
- void CompareStub::PrintName(StringStream* stream) {
5300
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5301
- (lhs_.is(r1) && rhs_.is(r0)));
5302
- const char* cc_name;
5303
- switch (cc_) {
5304
- case lt: cc_name = "LT"; break;
5305
- case gt: cc_name = "GT"; break;
5306
- case le: cc_name = "LE"; break;
5307
- case ge: cc_name = "GE"; break;
5308
- case eq: cc_name = "EQ"; break;
5309
- case ne: cc_name = "NE"; break;
5310
- default: cc_name = "UnknownCondition"; break;
5311
- }
5312
- bool is_equality = cc_ == eq || cc_ == ne;
5313
- stream->Add("CompareStub_%s", cc_name);
5314
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
5315
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
5316
- if (strict_ && is_equality) stream->Add("_STRICT");
5317
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5318
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
5319
- if (!include_smi_compare_) stream->Add("_NO_SMI");
5320
- }
5321
-
5322
-
5323
- int CompareStub::MinorKey() {
5324
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5325
- // stubs the never NaN NaN condition is only taken into account if the
5326
- // condition is equals.
5327
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
5328
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5329
- (lhs_.is(r1) && rhs_.is(r0)));
5330
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
5331
- | RegisterField::encode(lhs_.is(r0))
5332
- | StrictField::encode(strict_)
5333
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5334
- | IncludeNumberCompareField::encode(include_number_compare_)
5335
- | IncludeSmiCompareField::encode(include_smi_compare_);
5336
- }
5337
-
5338
-
5339
5753
  // StringCharCodeAtGenerator
5340
5754
  void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5341
5755
  Label flat_string;
@@ -5434,10 +5848,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5434
5848
  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5435
5849
  STATIC_ASSERT(kSmiTag == 0);
5436
5850
  STATIC_ASSERT(kSmiShiftSize == 0);
5437
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5851
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
5438
5852
  __ tst(code_,
5439
5853
  Operand(kSmiTagMask |
5440
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5854
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
5441
5855
  __ b(ne, &slow_case_);
5442
5856
 
5443
5857
  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -5498,7 +5912,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5498
5912
  if (!ascii) {
5499
5913
  __ add(count, count, Operand(count), SetCC);
5500
5914
  } else {
5501
- __ cmp(count, Operand(0, RelocInfo::NONE));
5915
+ __ cmp(count, Operand::Zero());
5502
5916
  }
5503
5917
  __ b(eq, &done);
5504
5918
 
@@ -5553,7 +5967,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5553
5967
  if (!ascii) {
5554
5968
  __ add(count, count, Operand(count), SetCC);
5555
5969
  } else {
5556
- __ cmp(count, Operand(0, RelocInfo::NONE));
5970
+ __ cmp(count, Operand::Zero());
5557
5971
  }
5558
5972
  __ b(eq, &done);
5559
5973
 
@@ -5785,7 +6199,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5785
6199
 
5786
6200
  // Check if the two characters match.
5787
6201
  // Assumes that word load is little endian.
5788
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
6202
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
5789
6203
  __ cmp(chars, scratch);
5790
6204
  __ b(eq, &found_in_symbol_table);
5791
6205
  __ bind(&next_probe[i]);
@@ -5868,23 +6282,28 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5868
6282
  STATIC_ASSERT(kSmiTag == 0);
5869
6283
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5870
6284
 
5871
- // I.e., arithmetic shift right by one un-smi-tags.
5872
- __ mov(r2, Operand(r2, ASR, 1), SetCC);
5873
- __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
5874
- // If either to or from had the smi tag bit set, then carry is set now.
5875
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
6285
+ // Arithmetic shift right by one un-smi-tags. In this case we rotate right
6286
+ // instead because we bail out on non-smi values: ROR and ASR are equivalent
6287
+ // for smis but they set the flags in a way that's easier to optimize.
6288
+ __ mov(r2, Operand(r2, ROR, 1), SetCC);
6289
+ __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
6290
+ // If either to or from had the smi tag bit set, then C is set now, and N
6291
+ // has the same value: we rotated by 1, so the bottom bit is now the top bit.
5876
6292
  // We want to bailout to runtime here if From is negative. In that case, the
5877
6293
  // next instruction is not executed and we fall through to bailing out to
5878
- // runtime. pl is the opposite of mi.
5879
- // Both r2 and r3 are untagged integers.
5880
- __ sub(r2, r2, Operand(r3), SetCC, pl);
5881
- __ b(mi, &runtime); // Fail if from > to.
6294
+ // runtime.
6295
+ // Executed if both r2 and r3 are untagged integers.
6296
+ __ sub(r2, r2, Operand(r3), SetCC, cc);
6297
+ // One of the above un-smis or the above SUB could have set N==1.
6298
+ __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
5882
6299
 
5883
6300
  // Make sure first argument is a string.
5884
6301
  __ ldr(r0, MemOperand(sp, kStringOffset));
5885
6302
  STATIC_ASSERT(kSmiTag == 0);
5886
- __ JumpIfSmi(r0, &runtime);
5887
- Condition is_string = masm->IsObjectStringType(r0, r1);
6303
+ // Do a JumpIfSmi, but fold its jump into the subsequent string test.
6304
+ __ tst(r0, Operand(kSmiTagMask));
6305
+ Condition is_string = masm->IsObjectStringType(r0, r1, ne);
6306
+ ASSERT(is_string == eq);
5888
6307
  __ b(NegateCondition(is_string), &runtime);
5889
6308
 
5890
6309
  // Short-cut for the case of trivial substring.
@@ -5955,7 +6374,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5955
6374
  // string's encoding is wrong because we always have to recheck encoding of
5956
6375
  // the newly created string's parent anyways due to externalized strings.
5957
6376
  Label two_byte_slice, set_slice_header;
5958
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6377
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5959
6378
  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5960
6379
  __ tst(r1, Operand(kStringEncodingMask));
5961
6380
  __ b(eq, &two_byte_slice);
@@ -5993,12 +6412,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
5993
6412
 
5994
6413
  __ bind(&sequential_string);
5995
6414
  // Locate first character of underlying subject string.
5996
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5997
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6415
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
6416
+ __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
5998
6417
 
5999
6418
  __ bind(&allocate_result);
6000
6419
  // Sequential acii string. Allocate the result.
6001
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6420
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
6002
6421
  __ tst(r1, Operand(kStringEncodingMask));
6003
6422
  __ b(eq, &two_byte_sequential);
6004
6423
 
@@ -6008,13 +6427,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
6008
6427
  // Locate first character of substring to copy.
6009
6428
  __ add(r5, r5, r3);
6010
6429
  // Locate first character of result.
6011
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6430
+ __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
6012
6431
 
6013
6432
  // r0: result string
6014
6433
  // r1: first character of result string
6015
6434
  // r2: result string length
6016
6435
  // r5: first character of substring to copy
6017
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6436
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6018
6437
  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
6019
6438
  COPY_ASCII | DEST_ALWAYS_ALIGNED);
6020
6439
  __ jmp(&return_r0);
@@ -6071,7 +6490,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6071
6490
  Label compare_chars;
6072
6491
  __ bind(&check_zero_length);
6073
6492
  STATIC_ASSERT(kSmiTag == 0);
6074
- __ cmp(length, Operand(0));
6493
+ __ cmp(length, Operand::Zero());
6075
6494
  __ b(ne, &compare_chars);
6076
6495
  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6077
6496
  __ Ret();
@@ -6104,7 +6523,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6104
6523
  __ mov(scratch1, scratch2, LeaveCC, gt);
6105
6524
  Register min_length = scratch1;
6106
6525
  STATIC_ASSERT(kSmiTag == 0);
6107
- __ cmp(min_length, Operand(0));
6526
+ __ cmp(min_length, Operand::Zero());
6108
6527
  __ b(eq, &compare_lengths);
6109
6528
 
6110
6529
  // Compare loop.
@@ -6139,7 +6558,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
6139
6558
  // doesn't need an additional compare.
6140
6559
  __ SmiUntag(length);
6141
6560
  __ add(scratch1, length,
6142
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6561
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
6143
6562
  __ add(left, left, Operand(scratch1));
6144
6563
  __ add(right, right, Operand(scratch1));
6145
6564
  __ rsb(length, length, Operand::Zero());
@@ -6292,8 +6711,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
6292
6711
  &call_runtime);
6293
6712
 
6294
6713
  // Get the two characters forming the sub string.
6295
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6296
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
6714
+ __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
6715
+ __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
6297
6716
 
6298
6717
  // Try to lookup two character string in symbol table. If it is not found
6299
6718
  // just allocate a new one.
@@ -6312,7 +6731,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
6312
6731
  // in a little endian mode)
6313
6732
  __ mov(r6, Operand(2));
6314
6733
  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6315
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6734
+ __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
6316
6735
  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6317
6736
  __ add(sp, sp, Operand(2 * kPointerSize));
6318
6737
  __ Ret();
@@ -6363,9 +6782,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
6363
6782
  __ tst(r5, Operand(kAsciiDataHintMask), ne);
6364
6783
  __ b(ne, &ascii_data);
6365
6784
  __ eor(r4, r4, Operand(r5));
6366
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6367
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6368
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6785
+ STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
6786
+ __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
6787
+ __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
6369
6788
  __ b(eq, &ascii_data);
6370
6789
 
6371
6790
  // Allocate a two byte cons string.
@@ -6399,10 +6818,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
6399
6818
 
6400
6819
  STATIC_ASSERT(kSeqStringTag == 0);
6401
6820
  __ tst(r4, Operand(kStringRepresentationMask));
6402
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6821
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6403
6822
  __ add(r7,
6404
6823
  r0,
6405
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
6824
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
6406
6825
  LeaveCC,
6407
6826
  eq);
6408
6827
  __ b(eq, &first_prepared);
@@ -6415,10 +6834,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
6415
6834
 
6416
6835
  STATIC_ASSERT(kSeqStringTag == 0);
6417
6836
  __ tst(r5, Operand(kStringRepresentationMask));
6418
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6837
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6419
6838
  __ add(r1,
6420
6839
  r1,
6421
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
6840
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
6422
6841
  LeaveCC,
6423
6842
  eq);
6424
6843
  __ b(eq, &second_prepared);
@@ -6441,7 +6860,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
6441
6860
  __ b(eq, &non_ascii_string_add_flat_result);
6442
6861
 
6443
6862
  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6444
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6863
+ __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
6445
6864
  // r0: result string.
6446
6865
  // r7: first character of first string.
6447
6866
  // r1: first character of second string.
@@ -6532,7 +6951,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6532
6951
 
6533
6952
 
6534
6953
  void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6535
- ASSERT(state_ == CompareIC::SMIS);
6954
+ ASSERT(state_ == CompareIC::SMI);
6536
6955
  Label miss;
6537
6956
  __ orr(r2, r1, r0);
6538
6957
  __ JumpIfNotSmi(r2, &miss);
@@ -6553,31 +6972,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6553
6972
 
6554
6973
 
6555
6974
  void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6556
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6975
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
6557
6976
 
6558
6977
  Label generic_stub;
6559
6978
  Label unordered, maybe_undefined1, maybe_undefined2;
6560
6979
  Label miss;
6561
- __ and_(r2, r1, Operand(r0));
6562
- __ JumpIfSmi(r2, &generic_stub);
6563
6980
 
6564
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6565
- __ b(ne, &maybe_undefined1);
6566
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6567
- __ b(ne, &maybe_undefined2);
6981
+ if (left_ == CompareIC::SMI) {
6982
+ __ JumpIfNotSmi(r1, &miss);
6983
+ }
6984
+ if (right_ == CompareIC::SMI) {
6985
+ __ JumpIfNotSmi(r0, &miss);
6986
+ }
6568
6987
 
6569
6988
  // Inlining the double comparison and falling back to the general compare
6570
- // stub if NaN is involved or VFP3 is unsupported.
6571
- if (CpuFeatures::IsSupported(VFP3)) {
6572
- CpuFeatures::Scope scope(VFP3);
6573
-
6574
- // Load left and right operand
6575
- __ sub(r2, r1, Operand(kHeapObjectTag));
6576
- __ vldr(d0, r2, HeapNumber::kValueOffset);
6989
+ // stub if NaN is involved or VFP2 is unsupported.
6990
+ if (CpuFeatures::IsSupported(VFP2)) {
6991
+ CpuFeatures::Scope scope(VFP2);
6992
+
6993
+ // Load left and right operand.
6994
+ Label done, left, left_smi, right_smi;
6995
+ __ JumpIfSmi(r0, &right_smi);
6996
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
6997
+ DONT_DO_SMI_CHECK);
6577
6998
  __ sub(r2, r0, Operand(kHeapObjectTag));
6578
6999
  __ vldr(d1, r2, HeapNumber::kValueOffset);
7000
+ __ b(&left);
7001
+ __ bind(&right_smi);
7002
+ __ SmiUntag(r2, r0); // Can't clobber r0 yet.
7003
+ SwVfpRegister single_scratch = d2.low();
7004
+ __ vmov(single_scratch, r2);
7005
+ __ vcvt_f64_s32(d1, single_scratch);
7006
+
7007
+ __ bind(&left);
7008
+ __ JumpIfSmi(r1, &left_smi);
7009
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
7010
+ DONT_DO_SMI_CHECK);
7011
+ __ sub(r2, r1, Operand(kHeapObjectTag));
7012
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
7013
+ __ b(&done);
7014
+ __ bind(&left_smi);
7015
+ __ SmiUntag(r2, r1); // Can't clobber r1 yet.
7016
+ single_scratch = d3.low();
7017
+ __ vmov(single_scratch, r2);
7018
+ __ vcvt_f64_s32(d0, single_scratch);
6579
7019
 
6580
- // Compare operands
7020
+ __ bind(&done);
7021
+ // Compare operands.
6581
7022
  __ VFPCompareAndSetFlags(d0, d1);
6582
7023
 
6583
7024
  // Don't base result on status bits when a NaN is involved.
@@ -6591,14 +7032,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6591
7032
  }
6592
7033
 
6593
7034
  __ bind(&unordered);
6594
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
6595
7035
  __ bind(&generic_stub);
7036
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
7037
+ CompareIC::GENERIC);
6596
7038
  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6597
7039
 
6598
7040
  __ bind(&maybe_undefined1);
6599
7041
  if (Token::IsOrderedRelationalCompareOp(op_)) {
6600
7042
  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
6601
7043
  __ b(ne, &miss);
7044
+ __ JumpIfSmi(r1, &unordered);
6602
7045
  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6603
7046
  __ b(ne, &maybe_undefined2);
6604
7047
  __ jmp(&unordered);
@@ -6616,7 +7059,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6616
7059
 
6617
7060
 
6618
7061
  void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6619
- ASSERT(state_ == CompareIC::SYMBOLS);
7062
+ ASSERT(state_ == CompareIC::SYMBOL);
6620
7063
  Label miss;
6621
7064
 
6622
7065
  // Registers containing left and right operands respectively.
@@ -6654,7 +7097,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6654
7097
 
6655
7098
 
6656
7099
  void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6657
- ASSERT(state_ == CompareIC::STRINGS);
7100
+ ASSERT(state_ == CompareIC::STRING);
6658
7101
  Label miss;
6659
7102
 
6660
7103
  bool equality = Token::IsEqualityOp(op_);
@@ -6732,7 +7175,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6732
7175
 
6733
7176
 
6734
7177
  void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6735
- ASSERT(state_ == CompareIC::OBJECTS);
7178
+ ASSERT(state_ == CompareIC::OBJECT);
6736
7179
  Label miss;
6737
7180
  __ and_(r2, r1, Operand(r0));
6738
7181
  __ JumpIfSmi(r2, &miss);
@@ -6812,6 +7255,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6812
7255
  Register target) {
6813
7256
  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6814
7257
  RelocInfo::CODE_TARGET));
7258
+
7259
+ // Prevent literal pool emission during calculation of return address.
7260
+ Assembler::BlockConstPoolScope block_const_pool(masm);
7261
+
6815
7262
  // Push return address (accessible to GC through exit frame pc).
6816
7263
  // Note that using pc with str is deprecated.
6817
7264
  Label start;
@@ -6898,7 +7345,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6898
7345
  __ mov(r1, Operand(Handle<String>(name)));
6899
7346
  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
6900
7347
  __ CallStub(&stub);
6901
- __ cmp(r0, Operand(0));
7348
+ __ cmp(r0, Operand::Zero());
6902
7349
  __ ldm(ia_w, sp, spill_mask);
6903
7350
 
6904
7351
  __ b(eq, done);
@@ -6922,8 +7369,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6922
7369
  ASSERT(!name.is(scratch1));
6923
7370
  ASSERT(!name.is(scratch2));
6924
7371
 
6925
- // Assert that name contains a string.
6926
- if (FLAG_debug_code) __ AbortIfNotString(name);
7372
+ __ AssertString(name);
6927
7373
 
6928
7374
  // Compute the capacity mask.
6929
7375
  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
@@ -6975,7 +7421,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6975
7421
  }
6976
7422
  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6977
7423
  __ CallStub(&stub);
6978
- __ cmp(r0, Operand(0));
7424
+ __ cmp(r0, Operand::Zero());
6979
7425
  __ mov(scratch2, Operand(r2));
6980
7426
  __ ldm(ia_w, sp, spill_mask);
6981
7427
 
@@ -7112,12 +7558,15 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7112
7558
  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
7113
7559
  // StoreArrayLiteralElementStub::Generate
7114
7560
  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
7561
+ // FastNewClosureStub::Generate
7562
+ { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
7115
7563
  // Null termination.
7116
7564
  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7117
7565
  };
7118
7566
 
7119
7567
  #undef REG
7120
7568
 
7569
+
7121
7570
  bool RecordWriteStub::IsPregenerated() {
7122
7571
  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7123
7572
  !entry->object.is(no_reg);
@@ -7159,6 +7608,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7159
7608
  }
7160
7609
 
7161
7610
 
7611
+ bool CodeStub::CanUseFPRegisters() {
7612
+ return CpuFeatures::IsSupported(VFP2);
7613
+ }
7614
+
7615
+
7162
7616
  // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7163
7617
  // the value has just been written into the object, now this stub makes sure
7164
7618
  // we keep the GC informed. The word in the object where the value has been
@@ -7172,8 +7626,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
7172
7626
  // forth between a compare instructions (a nop in this position) and the
7173
7627
  // real branch when we start and stop incremental heap marking.
7174
7628
  // See RecordWriteStub::Patch for details.
7175
- __ b(&skip_to_incremental_noncompacting);
7176
- __ b(&skip_to_incremental_compacting);
7629
+ {
7630
+ // Block literal pool emission, as the position of these two instructions
7631
+ // is assumed by the patching code.
7632
+ Assembler::BlockConstPoolScope block_const_pool(masm);
7633
+ __ b(&skip_to_incremental_noncompacting);
7634
+ __ b(&skip_to_incremental_compacting);
7635
+ }
7177
7636
 
7178
7637
  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7179
7638
  __ RememberedSetHelper(object_,
@@ -7249,12 +7708,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7249
7708
  ASSERT(!address.is(r0));
7250
7709
  __ Move(address, regs_.address());
7251
7710
  __ Move(r0, regs_.object());
7252
- if (mode == INCREMENTAL_COMPACTION) {
7253
- __ Move(r1, address);
7254
- } else {
7255
- ASSERT(mode == INCREMENTAL);
7256
- __ ldr(r1, MemOperand(address, 0));
7257
- }
7711
+ __ Move(r1, address);
7258
7712
  __ mov(r2, Operand(ExternalReference::isolate_address()));
7259
7713
 
7260
7714
  AllowExternalCallThatCantCauseGC scope(masm);
@@ -7282,6 +7736,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7282
7736
  Label need_incremental;
7283
7737
  Label need_incremental_pop_scratch;
7284
7738
 
7739
+ __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
7740
+ __ ldr(regs_.scratch1(),
7741
+ MemOperand(regs_.scratch0(),
7742
+ MemoryChunk::kWriteBarrierCounterOffset));
7743
+ __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
7744
+ __ str(regs_.scratch1(),
7745
+ MemOperand(regs_.scratch0(),
7746
+ MemoryChunk::kWriteBarrierCounterOffset));
7747
+ __ b(mi, &need_incremental);
7748
+
7285
7749
  // Let's look at the color of the object: If it is not black we don't have
7286
7750
  // to inform the incremental marker.
7287
7751
  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -7402,11 +7866,88 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7402
7866
  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7403
7867
  __ bind(&double_elements);
7404
7868
  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7405
- __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
7869
+ __ StoreNumberToDoubleElements(r0, r3,
7870
+ // Overwrites all regs after this.
7871
+ r5, r6, r7, r9, r2,
7406
7872
  &slow_elements);
7407
7873
  __ Ret();
7408
7874
  }
7409
7875
 
7876
+
7877
+ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
7878
+ ASSERT(!Serializer::enabled());
7879
+ bool save_fp_regs = CpuFeatures::IsSupported(VFP2);
7880
+ CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
7881
+ __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
7882
+ int parameter_count_offset =
7883
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
7884
+ __ ldr(r1, MemOperand(fp, parameter_count_offset));
7885
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
7886
+ __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
7887
+ __ add(sp, sp, r1);
7888
+ __ Ret();
7889
+ }
7890
+
7891
+
7892
+ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
7893
+ if (entry_hook_ != NULL) {
7894
+ PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
7895
+ ProfileEntryHookStub stub;
7896
+ __ push(lr);
7897
+ __ CallStub(&stub);
7898
+ __ pop(lr);
7899
+ }
7900
+ }
7901
+
7902
+
7903
+ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
7904
+ // The entry hook is a "push lr" instruction, followed by a call.
7905
+ const int32_t kReturnAddressDistanceFromFunctionStart =
7906
+ 3 * Assembler::kInstrSize;
7907
+
7908
+ // Save live volatile registers.
7909
+ __ Push(lr, r5, r1);
7910
+ const int32_t kNumSavedRegs = 3;
7911
+
7912
+ // Compute the function's address for the first argument.
7913
+ __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
7914
+
7915
+ // The caller's return address is above the saved temporaries.
7916
+ // Grab that for the second argument to the hook.
7917
+ __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
7918
+
7919
+ // Align the stack if necessary.
7920
+ int frame_alignment = masm->ActivationFrameAlignment();
7921
+ if (frame_alignment > kPointerSize) {
7922
+ __ mov(r5, sp);
7923
+ ASSERT(IsPowerOf2(frame_alignment));
7924
+ __ and_(sp, sp, Operand(-frame_alignment));
7925
+ }
7926
+
7927
+ #if defined(V8_HOST_ARCH_ARM)
7928
+ __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
7929
+ __ ldr(ip, MemOperand(ip));
7930
+ #else
7931
+ // Under the simulator we need to indirect the entry hook through a
7932
+ // trampoline function at a known address.
7933
+ Address trampoline_address = reinterpret_cast<Address>(
7934
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
7935
+ ApiFunction dispatcher(trampoline_address);
7936
+ __ mov(ip, Operand(ExternalReference(&dispatcher,
7937
+ ExternalReference::BUILTIN_CALL,
7938
+ masm->isolate())));
7939
+ #endif
7940
+ __ Call(ip);
7941
+
7942
+ // Restore the stack pointer if needed.
7943
+ if (frame_alignment > kPointerSize) {
7944
+ __ mov(sp, r5);
7945
+ }
7946
+
7947
+ __ Pop(lr, r5, r1);
7948
+ __ Ret();
7949
+ }
7950
+
7410
7951
  #undef __
7411
7952
 
7412
7953
  } } // namespace v8::internal