libv8 3.11.8.17 → 3.16.14.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (754) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +1 -2
  3. data/Gemfile +1 -1
  4. data/Rakefile +6 -7
  5. data/lib/libv8/version.rb +1 -1
  6. data/vendor/v8/.gitignore +24 -3
  7. data/vendor/v8/AUTHORS +7 -0
  8. data/vendor/v8/ChangeLog +839 -0
  9. data/vendor/v8/DEPS +1 -1
  10. data/vendor/v8/Makefile.android +92 -0
  11. data/vendor/v8/OWNERS +11 -0
  12. data/vendor/v8/PRESUBMIT.py +71 -0
  13. data/vendor/v8/SConstruct +34 -39
  14. data/vendor/v8/build/android.gypi +56 -37
  15. data/vendor/v8/build/common.gypi +112 -30
  16. data/vendor/v8/build/gyp_v8 +1 -1
  17. data/vendor/v8/build/standalone.gypi +15 -11
  18. data/vendor/v8/include/v8-debug.h +9 -1
  19. data/vendor/v8/include/v8-preparser.h +4 -3
  20. data/vendor/v8/include/v8-profiler.h +25 -25
  21. data/vendor/v8/include/v8-testing.h +4 -3
  22. data/vendor/v8/include/v8.h +994 -540
  23. data/vendor/v8/preparser/preparser-process.cc +3 -3
  24. data/vendor/v8/samples/lineprocessor.cc +20 -27
  25. data/vendor/v8/samples/process.cc +18 -14
  26. data/vendor/v8/samples/shell.cc +16 -15
  27. data/vendor/v8/src/SConscript +15 -14
  28. data/vendor/v8/src/accessors.cc +169 -77
  29. data/vendor/v8/src/accessors.h +4 -0
  30. data/vendor/v8/src/allocation-inl.h +2 -2
  31. data/vendor/v8/src/allocation.h +7 -7
  32. data/vendor/v8/src/api.cc +810 -497
  33. data/vendor/v8/src/api.h +85 -60
  34. data/vendor/v8/src/arm/assembler-arm-inl.h +179 -22
  35. data/vendor/v8/src/arm/assembler-arm.cc +633 -264
  36. data/vendor/v8/src/arm/assembler-arm.h +264 -197
  37. data/vendor/v8/src/arm/builtins-arm.cc +117 -27
  38. data/vendor/v8/src/arm/code-stubs-arm.cc +1241 -700
  39. data/vendor/v8/src/arm/code-stubs-arm.h +35 -138
  40. data/vendor/v8/src/arm/codegen-arm.cc +285 -16
  41. data/vendor/v8/src/arm/codegen-arm.h +22 -0
  42. data/vendor/v8/src/arm/constants-arm.cc +5 -3
  43. data/vendor/v8/src/arm/constants-arm.h +24 -11
  44. data/vendor/v8/src/arm/debug-arm.cc +3 -3
  45. data/vendor/v8/src/arm/deoptimizer-arm.cc +382 -92
  46. data/vendor/v8/src/arm/disasm-arm.cc +61 -12
  47. data/vendor/v8/src/arm/frames-arm.h +0 -14
  48. data/vendor/v8/src/arm/full-codegen-arm.cc +332 -304
  49. data/vendor/v8/src/arm/ic-arm.cc +180 -259
  50. data/vendor/v8/src/arm/lithium-arm.cc +364 -316
  51. data/vendor/v8/src/arm/lithium-arm.h +512 -275
  52. data/vendor/v8/src/arm/lithium-codegen-arm.cc +1768 -809
  53. data/vendor/v8/src/arm/lithium-codegen-arm.h +97 -35
  54. data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +12 -5
  55. data/vendor/v8/src/arm/macro-assembler-arm.cc +439 -228
  56. data/vendor/v8/src/arm/macro-assembler-arm.h +116 -70
  57. data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +54 -44
  58. data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +3 -10
  59. data/vendor/v8/src/arm/simulator-arm.cc +272 -238
  60. data/vendor/v8/src/arm/simulator-arm.h +38 -8
  61. data/vendor/v8/src/arm/stub-cache-arm.cc +522 -895
  62. data/vendor/v8/src/array.js +101 -70
  63. data/vendor/v8/src/assembler.cc +270 -19
  64. data/vendor/v8/src/assembler.h +110 -15
  65. data/vendor/v8/src/ast.cc +79 -69
  66. data/vendor/v8/src/ast.h +255 -301
  67. data/vendor/v8/src/atomicops.h +7 -1
  68. data/vendor/v8/src/atomicops_internals_tsan.h +335 -0
  69. data/vendor/v8/src/bootstrapper.cc +481 -418
  70. data/vendor/v8/src/bootstrapper.h +4 -4
  71. data/vendor/v8/src/builtins.cc +498 -311
  72. data/vendor/v8/src/builtins.h +75 -47
  73. data/vendor/v8/src/checks.cc +2 -1
  74. data/vendor/v8/src/checks.h +8 -0
  75. data/vendor/v8/src/code-stubs-hydrogen.cc +253 -0
  76. data/vendor/v8/src/code-stubs.cc +249 -84
  77. data/vendor/v8/src/code-stubs.h +501 -169
  78. data/vendor/v8/src/codegen.cc +36 -18
  79. data/vendor/v8/src/codegen.h +25 -3
  80. data/vendor/v8/src/collection.js +54 -17
  81. data/vendor/v8/src/compilation-cache.cc +24 -16
  82. data/vendor/v8/src/compilation-cache.h +15 -6
  83. data/vendor/v8/src/compiler.cc +497 -195
  84. data/vendor/v8/src/compiler.h +246 -38
  85. data/vendor/v8/src/contexts.cc +64 -24
  86. data/vendor/v8/src/contexts.h +60 -29
  87. data/vendor/v8/src/conversions-inl.h +24 -14
  88. data/vendor/v8/src/conversions.h +7 -4
  89. data/vendor/v8/src/counters.cc +21 -12
  90. data/vendor/v8/src/counters.h +44 -16
  91. data/vendor/v8/src/cpu-profiler.h +1 -1
  92. data/vendor/v8/src/d8-debug.cc +2 -2
  93. data/vendor/v8/src/d8-readline.cc +13 -2
  94. data/vendor/v8/src/d8.cc +681 -273
  95. data/vendor/v8/src/d8.gyp +4 -4
  96. data/vendor/v8/src/d8.h +38 -18
  97. data/vendor/v8/src/d8.js +0 -617
  98. data/vendor/v8/src/data-flow.h +55 -0
  99. data/vendor/v8/src/date.js +1 -42
  100. data/vendor/v8/src/dateparser-inl.h +5 -1
  101. data/vendor/v8/src/debug-agent.cc +10 -15
  102. data/vendor/v8/src/debug-debugger.js +147 -149
  103. data/vendor/v8/src/debug.cc +323 -164
  104. data/vendor/v8/src/debug.h +26 -14
  105. data/vendor/v8/src/deoptimizer.cc +765 -290
  106. data/vendor/v8/src/deoptimizer.h +130 -28
  107. data/vendor/v8/src/disassembler.cc +10 -4
  108. data/vendor/v8/src/elements-kind.cc +7 -2
  109. data/vendor/v8/src/elements-kind.h +19 -0
  110. data/vendor/v8/src/elements.cc +607 -285
  111. data/vendor/v8/src/elements.h +36 -13
  112. data/vendor/v8/src/execution.cc +52 -31
  113. data/vendor/v8/src/execution.h +4 -4
  114. data/vendor/v8/src/extensions/externalize-string-extension.cc +5 -4
  115. data/vendor/v8/src/extensions/gc-extension.cc +5 -1
  116. data/vendor/v8/src/extensions/statistics-extension.cc +153 -0
  117. data/vendor/v8/src/{inspector.h → extensions/statistics-extension.h} +12 -23
  118. data/vendor/v8/src/factory.cc +101 -134
  119. data/vendor/v8/src/factory.h +36 -31
  120. data/vendor/v8/src/flag-definitions.h +102 -25
  121. data/vendor/v8/src/flags.cc +9 -5
  122. data/vendor/v8/src/frames-inl.h +10 -0
  123. data/vendor/v8/src/frames.cc +116 -26
  124. data/vendor/v8/src/frames.h +96 -12
  125. data/vendor/v8/src/full-codegen.cc +219 -74
  126. data/vendor/v8/src/full-codegen.h +63 -21
  127. data/vendor/v8/src/func-name-inferrer.cc +8 -7
  128. data/vendor/v8/src/func-name-inferrer.h +5 -3
  129. data/vendor/v8/src/gdb-jit.cc +71 -57
  130. data/vendor/v8/src/global-handles.cc +230 -101
  131. data/vendor/v8/src/global-handles.h +26 -27
  132. data/vendor/v8/src/globals.h +17 -19
  133. data/vendor/v8/src/handles-inl.h +59 -12
  134. data/vendor/v8/src/handles.cc +180 -200
  135. data/vendor/v8/src/handles.h +80 -11
  136. data/vendor/v8/src/hashmap.h +60 -40
  137. data/vendor/v8/src/heap-inl.h +107 -45
  138. data/vendor/v8/src/heap-profiler.cc +38 -19
  139. data/vendor/v8/src/heap-profiler.h +24 -14
  140. data/vendor/v8/src/heap.cc +1123 -738
  141. data/vendor/v8/src/heap.h +385 -146
  142. data/vendor/v8/src/hydrogen-instructions.cc +700 -217
  143. data/vendor/v8/src/hydrogen-instructions.h +1158 -472
  144. data/vendor/v8/src/hydrogen.cc +3319 -1662
  145. data/vendor/v8/src/hydrogen.h +411 -170
  146. data/vendor/v8/src/ia32/assembler-ia32-inl.h +46 -16
  147. data/vendor/v8/src/ia32/assembler-ia32.cc +131 -61
  148. data/vendor/v8/src/ia32/assembler-ia32.h +115 -57
  149. data/vendor/v8/src/ia32/builtins-ia32.cc +99 -5
  150. data/vendor/v8/src/ia32/code-stubs-ia32.cc +787 -495
  151. data/vendor/v8/src/ia32/code-stubs-ia32.h +10 -100
  152. data/vendor/v8/src/ia32/codegen-ia32.cc +227 -23
  153. data/vendor/v8/src/ia32/codegen-ia32.h +14 -0
  154. data/vendor/v8/src/ia32/deoptimizer-ia32.cc +428 -87
  155. data/vendor/v8/src/ia32/disasm-ia32.cc +28 -1
  156. data/vendor/v8/src/ia32/frames-ia32.h +6 -16
  157. data/vendor/v8/src/ia32/full-codegen-ia32.cc +280 -272
  158. data/vendor/v8/src/ia32/ic-ia32.cc +150 -250
  159. data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +1600 -517
  160. data/vendor/v8/src/ia32/lithium-codegen-ia32.h +90 -24
  161. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +10 -6
  162. data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.h +2 -2
  163. data/vendor/v8/src/ia32/lithium-ia32.cc +405 -302
  164. data/vendor/v8/src/ia32/lithium-ia32.h +526 -271
  165. data/vendor/v8/src/ia32/macro-assembler-ia32.cc +378 -119
  166. data/vendor/v8/src/ia32/macro-assembler-ia32.h +62 -28
  167. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +43 -30
  168. data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +2 -10
  169. data/vendor/v8/src/ia32/stub-cache-ia32.cc +492 -678
  170. data/vendor/v8/src/ic-inl.h +9 -4
  171. data/vendor/v8/src/ic.cc +836 -923
  172. data/vendor/v8/src/ic.h +228 -247
  173. data/vendor/v8/src/incremental-marking-inl.h +26 -30
  174. data/vendor/v8/src/incremental-marking.cc +276 -248
  175. data/vendor/v8/src/incremental-marking.h +29 -37
  176. data/vendor/v8/src/interface.cc +34 -25
  177. data/vendor/v8/src/interface.h +69 -25
  178. data/vendor/v8/src/interpreter-irregexp.cc +2 -2
  179. data/vendor/v8/src/isolate.cc +382 -76
  180. data/vendor/v8/src/isolate.h +109 -56
  181. data/vendor/v8/src/json-parser.h +217 -104
  182. data/vendor/v8/src/json-stringifier.h +745 -0
  183. data/vendor/v8/src/json.js +10 -132
  184. data/vendor/v8/src/jsregexp-inl.h +106 -0
  185. data/vendor/v8/src/jsregexp.cc +517 -285
  186. data/vendor/v8/src/jsregexp.h +145 -117
  187. data/vendor/v8/src/list-inl.h +35 -22
  188. data/vendor/v8/src/list.h +46 -19
  189. data/vendor/v8/src/lithium-allocator-inl.h +22 -2
  190. data/vendor/v8/src/lithium-allocator.cc +85 -70
  191. data/vendor/v8/src/lithium-allocator.h +21 -39
  192. data/vendor/v8/src/lithium.cc +259 -5
  193. data/vendor/v8/src/lithium.h +131 -32
  194. data/vendor/v8/src/liveedit-debugger.js +52 -3
  195. data/vendor/v8/src/liveedit.cc +393 -113
  196. data/vendor/v8/src/liveedit.h +7 -3
  197. data/vendor/v8/src/log-utils.cc +4 -2
  198. data/vendor/v8/src/log.cc +170 -140
  199. data/vendor/v8/src/log.h +62 -11
  200. data/vendor/v8/src/macro-assembler.h +17 -0
  201. data/vendor/v8/src/macros.py +2 -0
  202. data/vendor/v8/src/mark-compact-inl.h +3 -23
  203. data/vendor/v8/src/mark-compact.cc +801 -830
  204. data/vendor/v8/src/mark-compact.h +154 -47
  205. data/vendor/v8/src/marking-thread.cc +85 -0
  206. data/vendor/v8/src/{inspector.cc → marking-thread.h} +32 -24
  207. data/vendor/v8/src/math.js +12 -18
  208. data/vendor/v8/src/messages.cc +18 -8
  209. data/vendor/v8/src/messages.js +314 -261
  210. data/vendor/v8/src/mips/assembler-mips-inl.h +58 -6
  211. data/vendor/v8/src/mips/assembler-mips.cc +92 -75
  212. data/vendor/v8/src/mips/assembler-mips.h +54 -60
  213. data/vendor/v8/src/mips/builtins-mips.cc +116 -17
  214. data/vendor/v8/src/mips/code-stubs-mips.cc +919 -556
  215. data/vendor/v8/src/mips/code-stubs-mips.h +22 -131
  216. data/vendor/v8/src/mips/codegen-mips.cc +281 -6
  217. data/vendor/v8/src/mips/codegen-mips.h +22 -0
  218. data/vendor/v8/src/mips/constants-mips.cc +2 -0
  219. data/vendor/v8/src/mips/constants-mips.h +12 -2
  220. data/vendor/v8/src/mips/deoptimizer-mips.cc +286 -50
  221. data/vendor/v8/src/mips/disasm-mips.cc +13 -0
  222. data/vendor/v8/src/mips/full-codegen-mips.cc +297 -284
  223. data/vendor/v8/src/mips/ic-mips.cc +182 -263
  224. data/vendor/v8/src/mips/lithium-codegen-mips.cc +1208 -556
  225. data/vendor/v8/src/mips/lithium-codegen-mips.h +72 -19
  226. data/vendor/v8/src/mips/lithium-gap-resolver-mips.cc +9 -2
  227. data/vendor/v8/src/mips/lithium-mips.cc +290 -302
  228. data/vendor/v8/src/mips/lithium-mips.h +463 -266
  229. data/vendor/v8/src/mips/macro-assembler-mips.cc +208 -115
  230. data/vendor/v8/src/mips/macro-assembler-mips.h +67 -24
  231. data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +40 -25
  232. data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +3 -9
  233. data/vendor/v8/src/mips/simulator-mips.cc +112 -40
  234. data/vendor/v8/src/mips/simulator-mips.h +5 -0
  235. data/vendor/v8/src/mips/stub-cache-mips.cc +502 -884
  236. data/vendor/v8/src/mirror-debugger.js +157 -30
  237. data/vendor/v8/src/mksnapshot.cc +88 -14
  238. data/vendor/v8/src/object-observe.js +235 -0
  239. data/vendor/v8/src/objects-debug.cc +178 -176
  240. data/vendor/v8/src/objects-inl.h +1333 -486
  241. data/vendor/v8/src/objects-printer.cc +125 -43
  242. data/vendor/v8/src/objects-visiting-inl.h +578 -6
  243. data/vendor/v8/src/objects-visiting.cc +2 -2
  244. data/vendor/v8/src/objects-visiting.h +172 -79
  245. data/vendor/v8/src/objects.cc +3533 -2885
  246. data/vendor/v8/src/objects.h +1352 -1131
  247. data/vendor/v8/src/optimizing-compiler-thread.cc +152 -0
  248. data/vendor/v8/src/optimizing-compiler-thread.h +111 -0
  249. data/vendor/v8/src/parser.cc +390 -500
  250. data/vendor/v8/src/parser.h +45 -33
  251. data/vendor/v8/src/platform-cygwin.cc +10 -21
  252. data/vendor/v8/src/platform-freebsd.cc +36 -41
  253. data/vendor/v8/src/platform-linux.cc +160 -124
  254. data/vendor/v8/src/platform-macos.cc +30 -27
  255. data/vendor/v8/src/platform-nullos.cc +17 -1
  256. data/vendor/v8/src/platform-openbsd.cc +19 -50
  257. data/vendor/v8/src/platform-posix.cc +14 -0
  258. data/vendor/v8/src/platform-solaris.cc +20 -53
  259. data/vendor/v8/src/platform-win32.cc +49 -26
  260. data/vendor/v8/src/platform.h +40 -1
  261. data/vendor/v8/src/preparser.cc +8 -5
  262. data/vendor/v8/src/preparser.h +2 -2
  263. data/vendor/v8/src/prettyprinter.cc +16 -0
  264. data/vendor/v8/src/prettyprinter.h +2 -0
  265. data/vendor/v8/src/profile-generator-inl.h +1 -0
  266. data/vendor/v8/src/profile-generator.cc +209 -147
  267. data/vendor/v8/src/profile-generator.h +15 -12
  268. data/vendor/v8/src/property-details.h +46 -31
  269. data/vendor/v8/src/property.cc +27 -46
  270. data/vendor/v8/src/property.h +163 -83
  271. data/vendor/v8/src/proxy.js +7 -2
  272. data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +4 -13
  273. data/vendor/v8/src/regexp-macro-assembler-irregexp.h +1 -2
  274. data/vendor/v8/src/regexp-macro-assembler-tracer.cc +1 -11
  275. data/vendor/v8/src/regexp-macro-assembler-tracer.h +0 -1
  276. data/vendor/v8/src/regexp-macro-assembler.cc +31 -14
  277. data/vendor/v8/src/regexp-macro-assembler.h +14 -11
  278. data/vendor/v8/src/regexp-stack.cc +1 -0
  279. data/vendor/v8/src/regexp.js +9 -8
  280. data/vendor/v8/src/rewriter.cc +18 -7
  281. data/vendor/v8/src/runtime-profiler.cc +52 -43
  282. data/vendor/v8/src/runtime-profiler.h +0 -25
  283. data/vendor/v8/src/runtime.cc +2006 -2023
  284. data/vendor/v8/src/runtime.h +56 -49
  285. data/vendor/v8/src/safepoint-table.cc +12 -18
  286. data/vendor/v8/src/safepoint-table.h +11 -8
  287. data/vendor/v8/src/scanner.cc +1 -0
  288. data/vendor/v8/src/scanner.h +4 -10
  289. data/vendor/v8/src/scopeinfo.cc +35 -9
  290. data/vendor/v8/src/scopeinfo.h +64 -3
  291. data/vendor/v8/src/scopes.cc +251 -156
  292. data/vendor/v8/src/scopes.h +61 -27
  293. data/vendor/v8/src/serialize.cc +348 -396
  294. data/vendor/v8/src/serialize.h +125 -114
  295. data/vendor/v8/src/small-pointer-list.h +11 -11
  296. data/vendor/v8/src/{smart-array-pointer.h → smart-pointers.h} +64 -15
  297. data/vendor/v8/src/snapshot-common.cc +64 -15
  298. data/vendor/v8/src/snapshot-empty.cc +7 -1
  299. data/vendor/v8/src/snapshot.h +9 -2
  300. data/vendor/v8/src/spaces-inl.h +17 -0
  301. data/vendor/v8/src/spaces.cc +477 -183
  302. data/vendor/v8/src/spaces.h +238 -58
  303. data/vendor/v8/src/splay-tree-inl.h +8 -7
  304. data/vendor/v8/src/splay-tree.h +24 -10
  305. data/vendor/v8/src/store-buffer.cc +12 -5
  306. data/vendor/v8/src/store-buffer.h +2 -4
  307. data/vendor/v8/src/string-search.h +22 -6
  308. data/vendor/v8/src/string-stream.cc +11 -8
  309. data/vendor/v8/src/string.js +47 -15
  310. data/vendor/v8/src/stub-cache.cc +461 -224
  311. data/vendor/v8/src/stub-cache.h +164 -102
  312. data/vendor/v8/src/sweeper-thread.cc +105 -0
  313. data/vendor/v8/src/sweeper-thread.h +81 -0
  314. data/vendor/v8/src/token.h +1 -0
  315. data/vendor/v8/src/transitions-inl.h +220 -0
  316. data/vendor/v8/src/transitions.cc +160 -0
  317. data/vendor/v8/src/transitions.h +207 -0
  318. data/vendor/v8/src/type-info.cc +182 -181
  319. data/vendor/v8/src/type-info.h +31 -19
  320. data/vendor/v8/src/unicode-inl.h +62 -106
  321. data/vendor/v8/src/unicode.cc +57 -67
  322. data/vendor/v8/src/unicode.h +45 -91
  323. data/vendor/v8/src/uri.js +57 -29
  324. data/vendor/v8/src/utils.h +105 -5
  325. data/vendor/v8/src/v8-counters.cc +54 -11
  326. data/vendor/v8/src/v8-counters.h +134 -19
  327. data/vendor/v8/src/v8.cc +29 -29
  328. data/vendor/v8/src/v8.h +1 -0
  329. data/vendor/v8/src/v8conversions.cc +26 -22
  330. data/vendor/v8/src/v8globals.h +56 -43
  331. data/vendor/v8/src/v8natives.js +83 -30
  332. data/vendor/v8/src/v8threads.cc +42 -21
  333. data/vendor/v8/src/v8threads.h +4 -1
  334. data/vendor/v8/src/v8utils.cc +9 -93
  335. data/vendor/v8/src/v8utils.h +37 -33
  336. data/vendor/v8/src/variables.cc +6 -3
  337. data/vendor/v8/src/variables.h +6 -13
  338. data/vendor/v8/src/version.cc +2 -2
  339. data/vendor/v8/src/vm-state-inl.h +11 -0
  340. data/vendor/v8/src/x64/assembler-x64-inl.h +39 -8
  341. data/vendor/v8/src/x64/assembler-x64.cc +78 -64
  342. data/vendor/v8/src/x64/assembler-x64.h +38 -33
  343. data/vendor/v8/src/x64/builtins-x64.cc +105 -7
  344. data/vendor/v8/src/x64/code-stubs-x64.cc +790 -413
  345. data/vendor/v8/src/x64/code-stubs-x64.h +10 -106
  346. data/vendor/v8/src/x64/codegen-x64.cc +210 -8
  347. data/vendor/v8/src/x64/codegen-x64.h +20 -1
  348. data/vendor/v8/src/x64/deoptimizer-x64.cc +336 -75
  349. data/vendor/v8/src/x64/disasm-x64.cc +15 -0
  350. data/vendor/v8/src/x64/frames-x64.h +0 -14
  351. data/vendor/v8/src/x64/full-codegen-x64.cc +293 -270
  352. data/vendor/v8/src/x64/ic-x64.cc +153 -251
  353. data/vendor/v8/src/x64/lithium-codegen-x64.cc +1379 -531
  354. data/vendor/v8/src/x64/lithium-codegen-x64.h +67 -23
  355. data/vendor/v8/src/x64/lithium-gap-resolver-x64.cc +2 -2
  356. data/vendor/v8/src/x64/lithium-x64.cc +349 -289
  357. data/vendor/v8/src/x64/lithium-x64.h +460 -250
  358. data/vendor/v8/src/x64/macro-assembler-x64.cc +350 -177
  359. data/vendor/v8/src/x64/macro-assembler-x64.h +67 -49
  360. data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +46 -33
  361. data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +2 -3
  362. data/vendor/v8/src/x64/stub-cache-x64.cc +484 -653
  363. data/vendor/v8/src/zone-inl.h +9 -27
  364. data/vendor/v8/src/zone.cc +5 -5
  365. data/vendor/v8/src/zone.h +53 -27
  366. data/vendor/v8/test/benchmarks/testcfg.py +5 -0
  367. data/vendor/v8/test/cctest/cctest.cc +4 -0
  368. data/vendor/v8/test/cctest/cctest.gyp +3 -1
  369. data/vendor/v8/test/cctest/cctest.h +57 -9
  370. data/vendor/v8/test/cctest/cctest.status +15 -15
  371. data/vendor/v8/test/cctest/test-accessors.cc +26 -0
  372. data/vendor/v8/test/cctest/test-alloc.cc +22 -30
  373. data/vendor/v8/test/cctest/test-api.cc +1943 -314
  374. data/vendor/v8/test/cctest/test-assembler-arm.cc +133 -13
  375. data/vendor/v8/test/cctest/test-assembler-ia32.cc +1 -1
  376. data/vendor/v8/test/cctest/test-assembler-mips.cc +12 -0
  377. data/vendor/v8/test/cctest/test-ast.cc +4 -2
  378. data/vendor/v8/test/cctest/test-compiler.cc +61 -29
  379. data/vendor/v8/test/cctest/test-dataflow.cc +2 -2
  380. data/vendor/v8/test/cctest/test-debug.cc +212 -33
  381. data/vendor/v8/test/cctest/test-decls.cc +257 -11
  382. data/vendor/v8/test/cctest/test-dictionary.cc +24 -10
  383. data/vendor/v8/test/cctest/test-disasm-arm.cc +118 -1
  384. data/vendor/v8/test/cctest/test-disasm-ia32.cc +3 -2
  385. data/vendor/v8/test/cctest/test-flags.cc +14 -1
  386. data/vendor/v8/test/cctest/test-func-name-inference.cc +7 -4
  387. data/vendor/v8/test/cctest/test-global-object.cc +51 -0
  388. data/vendor/v8/test/cctest/test-hashing.cc +32 -23
  389. data/vendor/v8/test/cctest/test-heap-profiler.cc +131 -77
  390. data/vendor/v8/test/cctest/test-heap.cc +1084 -143
  391. data/vendor/v8/test/cctest/test-list.cc +1 -1
  392. data/vendor/v8/test/cctest/test-liveedit.cc +3 -2
  393. data/vendor/v8/test/cctest/test-lockers.cc +12 -13
  394. data/vendor/v8/test/cctest/test-log.cc +10 -8
  395. data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +2 -2
  396. data/vendor/v8/test/cctest/test-mark-compact.cc +44 -22
  397. data/vendor/v8/test/cctest/test-object-observe.cc +434 -0
  398. data/vendor/v8/test/cctest/test-parsing.cc +86 -39
  399. data/vendor/v8/test/cctest/test-platform-linux.cc +6 -0
  400. data/vendor/v8/test/cctest/test-platform-win32.cc +7 -0
  401. data/vendor/v8/test/cctest/test-random.cc +5 -4
  402. data/vendor/v8/test/cctest/test-regexp.cc +137 -101
  403. data/vendor/v8/test/cctest/test-serialize.cc +150 -230
  404. data/vendor/v8/test/cctest/test-sockets.cc +1 -1
  405. data/vendor/v8/test/cctest/test-spaces.cc +139 -0
  406. data/vendor/v8/test/cctest/test-strings.cc +736 -74
  407. data/vendor/v8/test/cctest/test-thread-termination.cc +10 -11
  408. data/vendor/v8/test/cctest/test-threads.cc +4 -4
  409. data/vendor/v8/test/cctest/test-utils.cc +16 -0
  410. data/vendor/v8/test/cctest/test-weakmaps.cc +7 -3
  411. data/vendor/v8/test/cctest/testcfg.py +64 -5
  412. data/vendor/v8/test/es5conform/testcfg.py +5 -0
  413. data/vendor/v8/test/message/message.status +1 -1
  414. data/vendor/v8/test/message/overwritten-builtins.out +3 -0
  415. data/vendor/v8/test/message/testcfg.py +89 -8
  416. data/vendor/v8/test/message/try-catch-finally-no-message.out +26 -26
  417. data/vendor/v8/test/mjsunit/accessor-map-sharing.js +18 -2
  418. data/vendor/v8/test/mjsunit/allocation-site-info.js +126 -0
  419. data/vendor/v8/test/mjsunit/array-bounds-check-removal.js +62 -1
  420. data/vendor/v8/test/mjsunit/array-iteration.js +1 -1
  421. data/vendor/v8/test/mjsunit/array-literal-transitions.js +2 -0
  422. data/vendor/v8/test/mjsunit/array-natives-elements.js +317 -0
  423. data/vendor/v8/test/mjsunit/array-reduce.js +8 -8
  424. data/vendor/v8/test/mjsunit/array-slice.js +12 -0
  425. data/vendor/v8/test/mjsunit/array-store-and-grow.js +4 -1
  426. data/vendor/v8/test/mjsunit/assert-opt-and-deopt.js +1 -1
  427. data/vendor/v8/test/mjsunit/bugs/bug-2337.js +53 -0
  428. data/vendor/v8/test/mjsunit/compare-known-objects-slow.js +69 -0
  429. data/vendor/v8/test/mjsunit/compiler/alloc-object-huge.js +3 -1
  430. data/vendor/v8/test/mjsunit/compiler/inline-accessors.js +368 -0
  431. data/vendor/v8/test/mjsunit/compiler/inline-arguments.js +87 -1
  432. data/vendor/v8/test/mjsunit/compiler/inline-closures.js +49 -0
  433. data/vendor/v8/test/mjsunit/compiler/inline-construct.js +55 -43
  434. data/vendor/v8/test/mjsunit/compiler/inline-literals.js +39 -0
  435. data/vendor/v8/test/mjsunit/compiler/multiply-add.js +69 -0
  436. data/vendor/v8/test/mjsunit/compiler/optimized-closures.js +57 -0
  437. data/vendor/v8/test/mjsunit/compiler/parallel-proto-change.js +44 -0
  438. data/vendor/v8/test/mjsunit/compiler/property-static.js +69 -0
  439. data/vendor/v8/test/mjsunit/compiler/proto-chain-constant.js +55 -0
  440. data/vendor/v8/test/mjsunit/compiler/proto-chain-load.js +44 -0
  441. data/vendor/v8/test/mjsunit/compiler/regress-gvn.js +3 -2
  442. data/vendor/v8/test/mjsunit/compiler/regress-or.js +6 -2
  443. data/vendor/v8/test/mjsunit/compiler/rotate.js +224 -0
  444. data/vendor/v8/test/mjsunit/compiler/uint32.js +173 -0
  445. data/vendor/v8/test/mjsunit/count-based-osr.js +2 -1
  446. data/vendor/v8/test/mjsunit/d8-os.js +3 -3
  447. data/vendor/v8/test/mjsunit/date-parse.js +3 -0
  448. data/vendor/v8/test/mjsunit/date.js +22 -0
  449. data/vendor/v8/test/mjsunit/debug-break-inline.js +1 -0
  450. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js +22 -12
  451. data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized.js +21 -10
  452. data/vendor/v8/test/mjsunit/debug-liveedit-compile-error.js +60 -0
  453. data/vendor/v8/test/mjsunit/debug-liveedit-double-call.js +142 -0
  454. data/vendor/v8/test/mjsunit/debug-liveedit-literals.js +94 -0
  455. data/vendor/v8/test/mjsunit/debug-liveedit-restart-frame.js +153 -0
  456. data/vendor/v8/test/mjsunit/debug-multiple-breakpoints.js +1 -1
  457. data/vendor/v8/test/mjsunit/debug-script-breakpoints-closure.js +67 -0
  458. data/vendor/v8/test/mjsunit/debug-script-breakpoints-nested.js +82 -0
  459. data/vendor/v8/test/mjsunit/debug-script.js +4 -2
  460. data/vendor/v8/test/mjsunit/debug-set-variable-value.js +308 -0
  461. data/vendor/v8/test/mjsunit/debug-stepout-scope-part1.js +190 -0
  462. data/vendor/v8/test/mjsunit/debug-stepout-scope-part2.js +83 -0
  463. data/vendor/v8/test/mjsunit/debug-stepout-scope-part3.js +80 -0
  464. data/vendor/v8/test/mjsunit/debug-stepout-scope-part4.js +80 -0
  465. data/vendor/v8/test/mjsunit/debug-stepout-scope-part5.js +77 -0
  466. data/vendor/v8/test/mjsunit/debug-stepout-scope-part6.js +79 -0
  467. data/vendor/v8/test/mjsunit/debug-stepout-scope-part7.js +79 -0
  468. data/vendor/v8/test/mjsunit/{debug-stepout-scope.js → debug-stepout-scope-part8.js} +0 -189
  469. data/vendor/v8/test/mjsunit/delete-non-configurable.js +74 -0
  470. data/vendor/v8/test/mjsunit/deopt-minus-zero.js +56 -0
  471. data/vendor/v8/test/mjsunit/elements-kind.js +6 -4
  472. data/vendor/v8/test/mjsunit/elements-length-no-holey.js +33 -0
  473. data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +46 -19
  474. data/vendor/v8/test/mjsunit/error-accessors.js +54 -0
  475. data/vendor/v8/test/mjsunit/error-constructors.js +1 -14
  476. data/vendor/v8/test/mjsunit/error-tostring.js +8 -0
  477. data/vendor/v8/test/mjsunit/eval-stack-trace.js +204 -0
  478. data/vendor/v8/test/mjsunit/external-array.js +364 -1
  479. data/vendor/v8/test/mjsunit/fast-array-length.js +37 -0
  480. data/vendor/v8/test/mjsunit/fast-non-keyed.js +113 -0
  481. data/vendor/v8/test/mjsunit/fast-prototype.js +117 -0
  482. data/vendor/v8/test/mjsunit/function-call.js +14 -18
  483. data/vendor/v8/test/mjsunit/fuzz-natives-part1.js +230 -0
  484. data/vendor/v8/test/mjsunit/fuzz-natives-part2.js +229 -0
  485. data/vendor/v8/test/mjsunit/fuzz-natives-part3.js +229 -0
  486. data/vendor/v8/test/mjsunit/{fuzz-natives.js → fuzz-natives-part4.js} +12 -2
  487. data/vendor/v8/test/mjsunit/generated-transition-stub.js +218 -0
  488. data/vendor/v8/test/mjsunit/greedy.js +1 -1
  489. data/vendor/v8/test/mjsunit/harmony/block-conflicts.js +2 -1
  490. data/vendor/v8/test/mjsunit/harmony/block-let-crankshaft.js +1 -1
  491. data/vendor/v8/test/mjsunit/harmony/collections.js +69 -11
  492. data/vendor/v8/test/mjsunit/harmony/debug-blockscopes.js +2 -2
  493. data/vendor/v8/test/mjsunit/harmony/module-linking.js +180 -3
  494. data/vendor/v8/test/mjsunit/harmony/module-parsing.js +31 -0
  495. data/vendor/v8/test/mjsunit/harmony/module-recompile.js +87 -0
  496. data/vendor/v8/test/mjsunit/harmony/module-resolution.js +15 -2
  497. data/vendor/v8/test/mjsunit/harmony/object-observe.js +1056 -0
  498. data/vendor/v8/test/mjsunit/harmony/proxies-json.js +178 -0
  499. data/vendor/v8/test/mjsunit/harmony/proxies.js +25 -10
  500. data/vendor/v8/test/mjsunit/json-parser-recursive.js +33 -0
  501. data/vendor/v8/test/mjsunit/json-stringify-recursive.js +52 -0
  502. data/vendor/v8/test/mjsunit/json.js +38 -2
  503. data/vendor/v8/test/mjsunit/json2.js +153 -0
  504. data/vendor/v8/test/mjsunit/limit-locals.js +5 -4
  505. data/vendor/v8/test/mjsunit/manual-parallel-recompile.js +79 -0
  506. data/vendor/v8/test/mjsunit/math-exp-precision.js +64 -0
  507. data/vendor/v8/test/mjsunit/math-floor-negative.js +59 -0
  508. data/vendor/v8/test/mjsunit/math-floor-of-div-minus-zero.js +41 -0
  509. data/vendor/v8/test/mjsunit/math-floor-of-div-nosudiv.js +288 -0
  510. data/vendor/v8/test/mjsunit/math-floor-of-div.js +81 -9
  511. data/vendor/v8/test/mjsunit/{math-floor.js → math-floor-part1.js} +1 -72
  512. data/vendor/v8/test/mjsunit/math-floor-part2.js +76 -0
  513. data/vendor/v8/test/mjsunit/math-floor-part3.js +78 -0
  514. data/vendor/v8/test/mjsunit/math-floor-part4.js +76 -0
  515. data/vendor/v8/test/mjsunit/mirror-object.js +43 -9
  516. data/vendor/v8/test/mjsunit/mjsunit.js +1 -1
  517. data/vendor/v8/test/mjsunit/mjsunit.status +52 -27
  518. data/vendor/v8/test/mjsunit/mul-exhaustive-part1.js +491 -0
  519. data/vendor/v8/test/mjsunit/mul-exhaustive-part10.js +470 -0
  520. data/vendor/v8/test/mjsunit/mul-exhaustive-part2.js +525 -0
  521. data/vendor/v8/test/mjsunit/mul-exhaustive-part3.js +532 -0
  522. data/vendor/v8/test/mjsunit/mul-exhaustive-part4.js +509 -0
  523. data/vendor/v8/test/mjsunit/mul-exhaustive-part5.js +505 -0
  524. data/vendor/v8/test/mjsunit/mul-exhaustive-part6.js +554 -0
  525. data/vendor/v8/test/mjsunit/mul-exhaustive-part7.js +497 -0
  526. data/vendor/v8/test/mjsunit/mul-exhaustive-part8.js +526 -0
  527. data/vendor/v8/test/mjsunit/mul-exhaustive-part9.js +533 -0
  528. data/vendor/v8/test/mjsunit/new-function.js +34 -0
  529. data/vendor/v8/test/mjsunit/numops-fuzz-part1.js +1172 -0
  530. data/vendor/v8/test/mjsunit/numops-fuzz-part2.js +1178 -0
  531. data/vendor/v8/test/mjsunit/numops-fuzz-part3.js +1178 -0
  532. data/vendor/v8/test/mjsunit/numops-fuzz-part4.js +1177 -0
  533. data/vendor/v8/test/mjsunit/object-define-property.js +107 -2
  534. data/vendor/v8/test/mjsunit/override-read-only-property.js +6 -4
  535. data/vendor/v8/test/mjsunit/packed-elements.js +2 -2
  536. data/vendor/v8/test/mjsunit/parse-int-float.js +4 -4
  537. data/vendor/v8/test/mjsunit/pixel-array-rounding.js +1 -1
  538. data/vendor/v8/test/mjsunit/readonly.js +228 -0
  539. data/vendor/v8/test/mjsunit/regexp-capture-3.js +16 -18
  540. data/vendor/v8/test/mjsunit/regexp-capture.js +2 -0
  541. data/vendor/v8/test/mjsunit/regexp-global.js +122 -0
  542. data/vendor/v8/test/mjsunit/regexp-results-cache.js +78 -0
  543. data/vendor/v8/test/mjsunit/regress/regress-1117.js +12 -3
  544. data/vendor/v8/test/mjsunit/regress/regress-1118.js +1 -1
  545. data/vendor/v8/test/mjsunit/regress/regress-115100.js +36 -0
  546. data/vendor/v8/test/mjsunit/regress/regress-1199637.js +1 -3
  547. data/vendor/v8/test/mjsunit/regress/regress-121407.js +1 -1
  548. data/vendor/v8/test/mjsunit/regress/regress-131923.js +30 -0
  549. data/vendor/v8/test/mjsunit/regress/regress-131994.js +70 -0
  550. data/vendor/v8/test/mjsunit/regress/regress-133211.js +35 -0
  551. data/vendor/v8/test/mjsunit/regress/regress-133211b.js +39 -0
  552. data/vendor/v8/test/mjsunit/regress/regress-136048.js +34 -0
  553. data/vendor/v8/test/mjsunit/regress/regress-137768.js +73 -0
  554. data/vendor/v8/test/mjsunit/regress/regress-143967.js +34 -0
  555. data/vendor/v8/test/mjsunit/regress/regress-145201.js +107 -0
  556. data/vendor/v8/test/mjsunit/regress/regress-147497.js +45 -0
  557. data/vendor/v8/test/mjsunit/regress/regress-148378.js +38 -0
  558. data/vendor/v8/test/mjsunit/regress/regress-1563.js +1 -1
  559. data/vendor/v8/test/mjsunit/regress/regress-1591.js +48 -0
  560. data/vendor/v8/test/mjsunit/regress/regress-164442.js +45 -0
  561. data/vendor/v8/test/mjsunit/regress/regress-165637.js +61 -0
  562. data/vendor/v8/test/mjsunit/regress/regress-166379.js +39 -0
  563. data/vendor/v8/test/mjsunit/regress/regress-166553.js +33 -0
  564. data/vendor/v8/test/mjsunit/regress/regress-1692.js +1 -1
  565. data/vendor/v8/test/mjsunit/regress/regress-171641.js +40 -0
  566. data/vendor/v8/test/mjsunit/regress/regress-1980.js +1 -1
  567. data/vendor/v8/test/mjsunit/regress/regress-2073.js +99 -0
  568. data/vendor/v8/test/mjsunit/regress/regress-2119.js +36 -0
  569. data/vendor/v8/test/mjsunit/regress/regress-2156.js +39 -0
  570. data/vendor/v8/test/mjsunit/regress/regress-2163.js +70 -0
  571. data/vendor/v8/test/mjsunit/regress/regress-2170.js +58 -0
  572. data/vendor/v8/test/mjsunit/regress/regress-2172.js +35 -0
  573. data/vendor/v8/test/mjsunit/regress/regress-2185-2.js +145 -0
  574. data/vendor/v8/test/mjsunit/regress/regress-2185.js +38 -0
  575. data/vendor/v8/test/mjsunit/regress/regress-2186.js +49 -0
  576. data/vendor/v8/test/mjsunit/regress/regress-2193.js +58 -0
  577. data/vendor/v8/test/mjsunit/regress/regress-2219.js +32 -0
  578. data/vendor/v8/test/mjsunit/regress/regress-2225.js +65 -0
  579. data/vendor/v8/test/mjsunit/regress/regress-2226.js +36 -0
  580. data/vendor/v8/test/mjsunit/regress/regress-2234.js +41 -0
  581. data/vendor/v8/test/mjsunit/regress/regress-2243.js +31 -0
  582. data/vendor/v8/test/mjsunit/regress/regress-2249.js +33 -0
  583. data/vendor/v8/test/mjsunit/regress/regress-2250.js +68 -0
  584. data/vendor/v8/test/mjsunit/regress/regress-2261.js +113 -0
  585. data/vendor/v8/test/mjsunit/regress/regress-2263.js +30 -0
  586. data/vendor/v8/test/mjsunit/regress/regress-2284.js +32 -0
  587. data/vendor/v8/test/mjsunit/regress/regress-2285.js +32 -0
  588. data/vendor/v8/test/mjsunit/regress/regress-2286.js +32 -0
  589. data/vendor/v8/test/mjsunit/regress/regress-2289.js +34 -0
  590. data/vendor/v8/test/mjsunit/regress/regress-2291.js +36 -0
  591. data/vendor/v8/test/mjsunit/regress/regress-2294.js +70 -0
  592. data/vendor/v8/test/mjsunit/regress/regress-2296.js +40 -0
  593. data/vendor/v8/test/mjsunit/regress/regress-2315.js +40 -0
  594. data/vendor/v8/test/mjsunit/regress/regress-2318.js +66 -0
  595. data/vendor/v8/test/mjsunit/regress/regress-2322.js +36 -0
  596. data/vendor/v8/test/mjsunit/regress/regress-2326.js +54 -0
  597. data/vendor/v8/test/mjsunit/regress/regress-2336.js +53 -0
  598. data/vendor/v8/test/mjsunit/regress/regress-2339.js +59 -0
  599. data/vendor/v8/test/mjsunit/regress/regress-2346.js +123 -0
  600. data/vendor/v8/test/mjsunit/regress/regress-2373.js +29 -0
  601. data/vendor/v8/test/mjsunit/regress/regress-2374.js +33 -0
  602. data/vendor/v8/test/mjsunit/regress/regress-2398.js +41 -0
  603. data/vendor/v8/test/mjsunit/regress/regress-2410.js +36 -0
  604. data/vendor/v8/test/mjsunit/regress/regress-2416.js +75 -0
  605. data/vendor/v8/test/mjsunit/regress/regress-2419.js +37 -0
  606. data/vendor/v8/test/mjsunit/regress/regress-2433.js +36 -0
  607. data/vendor/v8/test/mjsunit/regress/regress-2437.js +156 -0
  608. data/vendor/v8/test/mjsunit/regress/regress-2438.js +52 -0
  609. data/vendor/v8/test/mjsunit/regress/regress-2443.js +129 -0
  610. data/vendor/v8/test/mjsunit/regress/regress-2444.js +120 -0
  611. data/vendor/v8/test/mjsunit/regress/regress-2489.js +50 -0
  612. data/vendor/v8/test/mjsunit/regress/regress-2499.js +40 -0
  613. data/vendor/v8/test/mjsunit/regress/regress-334.js +1 -1
  614. data/vendor/v8/test/mjsunit/regress/regress-492.js +39 -1
  615. data/vendor/v8/test/mjsunit/regress/regress-builtin-array-op.js +38 -0
  616. data/vendor/v8/test/mjsunit/regress/regress-cnlt-elements.js +43 -0
  617. data/vendor/v8/test/mjsunit/regress/regress-cnlt-enum-indices.js +45 -0
  618. data/vendor/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js +46 -0
  619. data/vendor/v8/test/mjsunit/regress/regress-convert-enum.js +60 -0
  620. data/vendor/v8/test/mjsunit/regress/regress-convert-enum2.js +46 -0
  621. data/vendor/v8/test/mjsunit/regress/regress-convert-transition.js +40 -0
  622. data/vendor/v8/test/mjsunit/regress/regress-crbug-119926.js +3 -1
  623. data/vendor/v8/test/mjsunit/regress/regress-crbug-125148.js +90 -0
  624. data/vendor/v8/test/mjsunit/regress/regress-crbug-134055.js +63 -0
  625. data/vendor/v8/test/mjsunit/regress/regress-crbug-134609.js +59 -0
  626. data/vendor/v8/test/mjsunit/regress/regress-crbug-135008.js +45 -0
  627. data/vendor/v8/test/mjsunit/regress/regress-crbug-135066.js +55 -0
  628. data/vendor/v8/test/mjsunit/regress/regress-crbug-137689.js +47 -0
  629. data/vendor/v8/test/mjsunit/regress/regress-crbug-138887.js +48 -0
  630. data/vendor/v8/test/mjsunit/regress/regress-crbug-140083.js +44 -0
  631. data/vendor/v8/test/mjsunit/regress/regress-crbug-142087.js +38 -0
  632. data/vendor/v8/test/mjsunit/regress/regress-crbug-142218.js +44 -0
  633. data/vendor/v8/test/mjsunit/regress/regress-crbug-145961.js +39 -0
  634. data/vendor/v8/test/mjsunit/regress/regress-crbug-146910.js +33 -0
  635. data/vendor/v8/test/mjsunit/regress/regress-crbug-147475.js +48 -0
  636. data/vendor/v8/test/mjsunit/regress/regress-crbug-148376.js +35 -0
  637. data/vendor/v8/test/mjsunit/regress/regress-crbug-150545.js +53 -0
  638. data/vendor/v8/test/mjsunit/regress/regress-crbug-150729.js +39 -0
  639. data/vendor/v8/test/mjsunit/regress/regress-crbug-157019.js +54 -0
  640. data/vendor/v8/test/mjsunit/regress/regress-crbug-157520.js +38 -0
  641. data/vendor/v8/test/mjsunit/regress/regress-crbug-158185.js +39 -0
  642. data/vendor/v8/test/mjsunit/regress/regress-crbug-160010.js +35 -0
  643. data/vendor/v8/test/mjsunit/regress/regress-crbug-162085.js +71 -0
  644. data/vendor/v8/test/mjsunit/regress/regress-crbug-168545.js +34 -0
  645. data/vendor/v8/test/mjsunit/regress/regress-crbug-170856.js +33 -0
  646. data/vendor/v8/test/mjsunit/regress/regress-crbug-172345.js +34 -0
  647. data/vendor/v8/test/mjsunit/regress/regress-crbug-173974.js +36 -0
  648. data/vendor/v8/test/mjsunit/regress/regress-crbug-18639.js +9 -5
  649. data/vendor/v8/test/mjsunit/regress/regress-debug-code-recompilation.js +2 -1
  650. data/vendor/v8/test/mjsunit/regress/regress-deep-proto.js +45 -0
  651. data/vendor/v8/test/mjsunit/regress/regress-delete-empty-double.js +40 -0
  652. data/vendor/v8/test/mjsunit/regress/regress-iteration-order.js +42 -0
  653. data/vendor/v8/test/mjsunit/regress/regress-json-stringify-gc.js +41 -0
  654. data/vendor/v8/test/mjsunit/regress/regress-latin-1.js +78 -0
  655. data/vendor/v8/test/mjsunit/regress/regress-load-elements.js +49 -0
  656. data/vendor/v8/test/mjsunit/regress/regress-observe-empty-double-array.js +38 -0
  657. data/vendor/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js +37 -0
  658. data/vendor/v8/test/mjsunit/shift-for-integer-div.js +59 -0
  659. data/vendor/v8/test/mjsunit/stack-traces-gc.js +119 -0
  660. data/vendor/v8/test/mjsunit/stack-traces-overflow.js +122 -0
  661. data/vendor/v8/test/mjsunit/stack-traces.js +39 -1
  662. data/vendor/v8/test/mjsunit/str-to-num.js +7 -2
  663. data/vendor/v8/test/mjsunit/strict-mode.js +36 -11
  664. data/vendor/v8/test/mjsunit/string-charcodeat.js +3 -0
  665. data/vendor/v8/test/mjsunit/string-natives.js +72 -0
  666. data/vendor/v8/test/mjsunit/string-split.js +17 -0
  667. data/vendor/v8/test/mjsunit/testcfg.py +76 -6
  668. data/vendor/v8/test/mjsunit/tools/tickprocessor.js +4 -1
  669. data/vendor/v8/test/mjsunit/try-finally-continue.js +72 -0
  670. data/vendor/v8/test/mjsunit/typed-array-slice.js +61 -0
  671. data/vendor/v8/test/mjsunit/unbox-double-arrays.js +2 -0
  672. data/vendor/v8/test/mjsunit/uri.js +12 -0
  673. data/vendor/v8/test/mjsunit/with-readonly.js +4 -2
  674. data/vendor/v8/test/mozilla/mozilla.status +19 -113
  675. data/vendor/v8/test/mozilla/testcfg.py +122 -3
  676. data/vendor/v8/test/preparser/preparser.status +5 -0
  677. data/vendor/v8/test/preparser/strict-identifiers.pyt +1 -1
  678. data/vendor/v8/test/preparser/testcfg.py +101 -5
  679. data/vendor/v8/test/sputnik/sputnik.status +1 -1
  680. data/vendor/v8/test/sputnik/testcfg.py +5 -0
  681. data/vendor/v8/test/test262/README +2 -2
  682. data/vendor/v8/test/test262/test262.status +13 -36
  683. data/vendor/v8/test/test262/testcfg.py +102 -8
  684. data/vendor/v8/tools/android-build.sh +0 -0
  685. data/vendor/v8/tools/android-ll-prof.sh +69 -0
  686. data/vendor/v8/tools/android-run.py +109 -0
  687. data/vendor/v8/tools/android-sync.sh +105 -0
  688. data/vendor/v8/tools/bash-completion.sh +0 -0
  689. data/vendor/v8/tools/check-static-initializers.sh +0 -0
  690. data/vendor/v8/tools/common-includes.sh +15 -22
  691. data/vendor/v8/tools/disasm.py +4 -4
  692. data/vendor/v8/tools/fuzz-harness.sh +0 -0
  693. data/vendor/v8/tools/gen-postmortem-metadata.py +6 -8
  694. data/vendor/v8/tools/grokdump.py +404 -129
  695. data/vendor/v8/tools/gyp/v8.gyp +105 -43
  696. data/vendor/v8/tools/linux-tick-processor +5 -5
  697. data/vendor/v8/tools/ll_prof.py +75 -15
  698. data/vendor/v8/tools/merge-to-branch.sh +2 -2
  699. data/vendor/v8/tools/plot-timer-events +70 -0
  700. data/vendor/v8/tools/plot-timer-events.js +510 -0
  701. data/vendor/v8/tools/presubmit.py +1 -0
  702. data/vendor/v8/tools/push-to-trunk.sh +14 -4
  703. data/vendor/v8/tools/run-llprof.sh +69 -0
  704. data/vendor/v8/tools/run-tests.py +372 -0
  705. data/vendor/v8/tools/run-valgrind.py +1 -1
  706. data/vendor/v8/tools/status-file-converter.py +39 -0
  707. data/vendor/v8/tools/test-server.py +224 -0
  708. data/vendor/v8/tools/test-wrapper-gypbuild.py +13 -16
  709. data/vendor/v8/tools/test.py +10 -19
  710. data/vendor/v8/tools/testrunner/README +174 -0
  711. data/vendor/v8/tools/testrunner/__init__.py +26 -0
  712. data/vendor/v8/tools/testrunner/local/__init__.py +26 -0
  713. data/vendor/v8/tools/testrunner/local/commands.py +153 -0
  714. data/vendor/v8/tools/testrunner/local/execution.py +182 -0
  715. data/vendor/v8/tools/testrunner/local/old_statusfile.py +460 -0
  716. data/vendor/v8/tools/testrunner/local/progress.py +238 -0
  717. data/vendor/v8/tools/testrunner/local/statusfile.py +145 -0
  718. data/vendor/v8/tools/testrunner/local/testsuite.py +187 -0
  719. data/vendor/v8/tools/testrunner/local/utils.py +108 -0
  720. data/vendor/v8/tools/testrunner/local/verbose.py +99 -0
  721. data/vendor/v8/tools/testrunner/network/__init__.py +26 -0
  722. data/vendor/v8/tools/testrunner/network/distro.py +90 -0
  723. data/vendor/v8/tools/testrunner/network/endpoint.py +124 -0
  724. data/vendor/v8/tools/testrunner/network/network_execution.py +253 -0
  725. data/vendor/v8/tools/testrunner/network/perfdata.py +120 -0
  726. data/vendor/v8/tools/testrunner/objects/__init__.py +26 -0
  727. data/vendor/v8/tools/testrunner/objects/context.py +50 -0
  728. data/vendor/v8/tools/testrunner/objects/output.py +60 -0
  729. data/vendor/v8/tools/testrunner/objects/peer.py +80 -0
  730. data/vendor/v8/tools/testrunner/objects/testcase.py +83 -0
  731. data/vendor/v8/tools/testrunner/objects/workpacket.py +90 -0
  732. data/vendor/v8/tools/testrunner/server/__init__.py +26 -0
  733. data/vendor/v8/tools/testrunner/server/compression.py +111 -0
  734. data/vendor/v8/tools/testrunner/server/constants.py +51 -0
  735. data/vendor/v8/tools/testrunner/server/daemon.py +147 -0
  736. data/vendor/v8/tools/testrunner/server/local_handler.py +119 -0
  737. data/vendor/v8/tools/testrunner/server/main.py +245 -0
  738. data/vendor/v8/tools/testrunner/server/presence_handler.py +120 -0
  739. data/vendor/v8/tools/testrunner/server/signatures.py +63 -0
  740. data/vendor/v8/tools/testrunner/server/status_handler.py +112 -0
  741. data/vendor/v8/tools/testrunner/server/work_handler.py +150 -0
  742. data/vendor/v8/tools/tick-processor.html +168 -0
  743. data/vendor/v8/tools/tickprocessor-driver.js +5 -3
  744. data/vendor/v8/tools/tickprocessor.js +58 -15
  745. metadata +534 -30
  746. data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +0 -11
  747. data/patches/do-not-imply-vfp3-and-armv7.patch +0 -44
  748. data/patches/fPIC-on-x64.patch +0 -14
  749. data/vendor/v8/src/liveobjectlist-inl.h +0 -126
  750. data/vendor/v8/src/liveobjectlist.cc +0 -2631
  751. data/vendor/v8/src/liveobjectlist.h +0 -319
  752. data/vendor/v8/test/mjsunit/mul-exhaustive.js +0 -4629
  753. data/vendor/v8/test/mjsunit/numops-fuzz.js +0 -4609
  754. data/vendor/v8/test/mjsunit/regress/regress-1969.js +0 -5045
@@ -44,21 +44,26 @@ class SafepointGenerator;
44
44
  class LCodeGen BASE_EMBEDDED {
45
45
  public:
46
46
  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
47
- : chunk_(chunk),
47
+ : zone_(info->zone()),
48
+ chunk_(static_cast<LPlatformChunk*>(chunk)),
48
49
  masm_(assembler),
49
50
  info_(info),
50
51
  current_block_(-1),
51
52
  current_instruction_(-1),
52
53
  instructions_(chunk->instructions()),
53
- deoptimizations_(4),
54
- deopt_jump_table_(4),
55
- deoptimization_literals_(8),
54
+ deoptimizations_(4, info->zone()),
55
+ deopt_jump_table_(4, info->zone()),
56
+ deoptimization_literals_(8, info->zone()),
56
57
  inlined_function_count_(0),
57
58
  scope_(info->scope()),
58
59
  status_(UNUSED),
59
- deferred_(8),
60
+ translations_(info->zone()),
61
+ deferred_(8, info->zone()),
62
+ support_aligned_spilled_doubles_(false),
60
63
  osr_pc_offset_(-1),
61
64
  last_lazy_deopt_pc_(0),
65
+ frame_is_built_(false),
66
+ safepoints_(info->zone()),
62
67
  resolver_(this),
63
68
  expected_safepoint_kind_(Safepoint::kSimple) {
64
69
  PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -71,6 +76,16 @@ class LCodeGen BASE_EMBEDDED {
71
76
  Isolate* isolate() const { return info_->isolate(); }
72
77
  Factory* factory() const { return isolate()->factory(); }
73
78
  Heap* heap() const { return isolate()->heap(); }
79
+ Zone* zone() const { return zone_; }
80
+
81
+ bool NeedsEagerFrame() const {
82
+ return GetStackSlotCount() > 0 ||
83
+ info()->is_non_deferred_calling() ||
84
+ !info()->IsStub();
85
+ }
86
+ bool NeedsDeferredFrame() const {
87
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
88
+ }
74
89
 
75
90
  // Support for converting LOperands to assembler types.
76
91
  // LOperand must be a register.
@@ -80,12 +95,12 @@ class LCodeGen BASE_EMBEDDED {
80
95
  Register EmitLoadRegister(LOperand* op, Register scratch);
81
96
 
82
97
  // LOperand must be a double register.
83
- DoubleRegister ToDoubleRegister(LOperand* op) const;
98
+ DwVfpRegister ToDoubleRegister(LOperand* op) const;
84
99
 
85
100
  // LOperand is loaded into dbl_scratch, unless already a double register.
86
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
87
- SwVfpRegister flt_scratch,
88
- DoubleRegister dbl_scratch);
101
+ DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
102
+ SwVfpRegister flt_scratch,
103
+ DwVfpRegister dbl_scratch);
89
104
  int ToInteger32(LConstantOperand* op) const;
90
105
  double ToDouble(LConstantOperand* op) const;
91
106
  Operand ToOperand(LOperand* op);
@@ -106,11 +121,17 @@ class LCodeGen BASE_EMBEDDED {
106
121
  void FinishCode(Handle<Code> code);
107
122
 
108
123
  // Deferred code support.
109
- template<int T>
110
- void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
124
+ void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
125
+ LOperand* left_argument,
126
+ LOperand* right_argument,
111
127
  Token::Value op);
112
128
  void DoDeferredNumberTagD(LNumberTagD* instr);
113
- void DoDeferredNumberTagI(LNumberTagI* instr);
129
+
130
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
131
+ void DoDeferredNumberTagI(LInstruction* instr,
132
+ LOperand* value,
133
+ IntegerSignedness signedness);
134
+
114
135
  void DoDeferredTaggedToI(LTaggedToI* instr);
115
136
  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
116
137
  void DoDeferredStackCheck(LStackCheck* instr);
@@ -118,18 +139,31 @@ class LCodeGen BASE_EMBEDDED {
118
139
  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
119
140
  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
120
141
  void DoDeferredAllocateObject(LAllocateObject* instr);
142
+ void DoDeferredAllocate(LAllocate* instr);
121
143
  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
122
144
  Label* map_check);
123
145
 
124
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
146
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map,
125
147
  CompareMapMode mode, LEnvironment* env);
126
148
 
127
149
  // Parallel move support.
128
150
  void DoParallelMove(LParallelMove* move);
129
151
  void DoGap(LGap* instr);
130
152
 
153
+ MemOperand PrepareKeyedOperand(Register key,
154
+ Register base,
155
+ bool key_is_constant,
156
+ int constant_key,
157
+ int element_size,
158
+ int shift_size,
159
+ int additional_index,
160
+ int additional_offset);
161
+
131
162
  // Emit frame translation commands for an environment.
132
- void WriteTranslation(LEnvironment* environment, Translation* translation);
163
+ void WriteTranslation(LEnvironment* environment,
164
+ Translation* translation,
165
+ int* arguments_index,
166
+ int* arguments_count);
133
167
 
134
168
  // Declare methods that deal with the individual node types.
135
169
  #define DECLARE_DO(type) void Do##type(L##type* node);
@@ -153,7 +187,7 @@ class LCodeGen BASE_EMBEDDED {
153
187
  return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
154
188
  }
155
189
 
156
- LChunk* chunk() const { return chunk_; }
190
+ LPlatformChunk* chunk() const { return chunk_; }
157
191
  Scope* scope() const { return scope_; }
158
192
  HGraph* graph() const { return chunk_->graph(); }
159
193
 
@@ -171,12 +205,12 @@ class LCodeGen BASE_EMBEDDED {
171
205
  Register temporary2);
172
206
 
173
207
  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
174
- int GetParameterCount() const { return scope()->num_parameters(); }
208
+ int GetParameterCount() const { return info()->num_parameters(); }
175
209
 
176
- void Abort(const char* format, ...);
210
+ void Abort(const char* reason);
177
211
  void Comment(const char* format, ...);
178
212
 
179
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
213
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
180
214
 
181
215
  // Code generation passes. Returns true if code generation should
182
216
  // continue.
@@ -191,14 +225,18 @@ class LCodeGen BASE_EMBEDDED {
191
225
  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
192
226
  };
193
227
 
194
- void CallCode(Handle<Code> code,
195
- RelocInfo::Mode mode,
196
- LInstruction* instr);
228
+ void CallCode(
229
+ Handle<Code> code,
230
+ RelocInfo::Mode mode,
231
+ LInstruction* instr,
232
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
197
233
 
198
- void CallCodeGeneric(Handle<Code> code,
199
- RelocInfo::Mode mode,
200
- LInstruction* instr,
201
- SafepointMode safepoint_mode);
234
+ void CallCodeGeneric(
235
+ Handle<Code> code,
236
+ RelocInfo::Mode mode,
237
+ LInstruction* instr,
238
+ SafepointMode safepoint_mode,
239
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
202
240
 
203
241
  void CallRuntime(const Runtime::Function* function,
204
242
  int num_arguments,
@@ -239,14 +277,17 @@ class LCodeGen BASE_EMBEDDED {
239
277
 
240
278
  void AddToTranslation(Translation* translation,
241
279
  LOperand* op,
242
- bool is_tagged);
280
+ bool is_tagged,
281
+ bool is_uint32,
282
+ int arguments_index,
283
+ int arguments_count);
243
284
  void PopulateDeoptimizationData(Handle<Code> code);
244
285
  int DefineDeoptimizationLiteral(Handle<Object> literal);
245
286
 
246
287
  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
247
288
 
248
289
  Register ToRegister(int index) const;
249
- DoubleRegister ToDoubleRegister(int index) const;
290
+ DwVfpRegister ToDoubleRegister(int index) const;
250
291
 
251
292
  // Specific math operations - used from DoUnaryMathOperation.
252
293
  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -279,10 +320,15 @@ class LCodeGen BASE_EMBEDDED {
279
320
  void EmitGoto(int block);
280
321
  void EmitBranch(int left_block, int right_block, Condition cc);
281
322
  void EmitNumberUntagD(Register input,
282
- DoubleRegister result,
323
+ DwVfpRegister result,
283
324
  bool deoptimize_on_undefined,
284
325
  bool deoptimize_on_minus_zero,
285
- LEnvironment* env);
326
+ LEnvironment* env,
327
+ NumberUntagDMode mode);
328
+
329
+ void DeoptIfTaggedButNotSmi(LEnvironment* environment,
330
+ HValue* value,
331
+ LOperand* operand);
286
332
 
287
333
  // Emits optimized code for typeof x == "y". Modifies input register.
288
334
  // Returns the condition on which a final split to
@@ -314,14 +360,16 @@ class LCodeGen BASE_EMBEDDED {
314
360
  void EmitLoadFieldOrConstantFunction(Register result,
315
361
  Register object,
316
362
  Handle<Map> type,
317
- Handle<String> name);
363
+ Handle<String> name,
364
+ LEnvironment* env);
318
365
 
319
366
  // Emits optimized code to deep-copy the contents of statically known
320
367
  // object graphs (e.g. object literal boilerplate).
321
368
  void EmitDeepCopy(Handle<JSObject> object,
322
369
  Register result,
323
370
  Register source,
324
- int* offset);
371
+ int* offset,
372
+ AllocationSiteMode mode);
325
373
 
326
374
  // Emit optimized code for integer division.
327
375
  // Inputs are signed.
@@ -335,16 +383,27 @@ class LCodeGen BASE_EMBEDDED {
335
383
  LEnvironment* environment);
336
384
 
337
385
  struct JumpTableEntry {
338
- explicit inline JumpTableEntry(Address entry)
386
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
339
387
  : label(),
340
- address(entry) { }
388
+ address(entry),
389
+ needs_frame(frame),
390
+ is_lazy_deopt(is_lazy) { }
341
391
  Label label;
342
392
  Address address;
393
+ bool needs_frame;
394
+ bool is_lazy_deopt;
343
395
  };
344
396
 
345
397
  void EnsureSpaceForLazyDeopt();
346
-
347
- LChunk* const chunk_;
398
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
399
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
400
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
401
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
402
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
403
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
404
+
405
+ Zone* zone_;
406
+ LPlatformChunk* const chunk_;
348
407
  MacroAssembler* const masm_;
349
408
  CompilationInfo* const info_;
350
409
 
@@ -359,8 +418,10 @@ class LCodeGen BASE_EMBEDDED {
359
418
  Status status_;
360
419
  TranslationBuffer translations_;
361
420
  ZoneList<LDeferredCode*> deferred_;
421
+ bool support_aligned_spilled_doubles_;
362
422
  int osr_pc_offset_;
363
423
  int last_lazy_deopt_pc_;
424
+ bool frame_is_built_;
364
425
 
365
426
  // Builder that keeps track of safepoints in the code. The table
366
427
  // itself is emitted at the end of the generated code.
@@ -376,6 +437,7 @@ class LCodeGen BASE_EMBEDDED {
376
437
  PushSafepointRegistersScope(LCodeGen* codegen,
377
438
  Safepoint::Kind kind)
378
439
  : codegen_(codegen) {
440
+ ASSERT(codegen_->info()->is_calling());
379
441
  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
380
442
  codegen_->expected_safepoint_kind_ = kind;
381
443
 
@@ -36,7 +36,7 @@ namespace internal {
36
36
  static const Register kSavedValueRegister = { 9 };
37
37
 
38
38
  LGapResolver::LGapResolver(LCodeGen* owner)
39
- : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
39
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
40
40
  saved_destination_(NULL) { }
41
41
 
42
42
 
@@ -79,7 +79,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
79
79
  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80
80
  for (int i = 0; i < moves->length(); ++i) {
81
81
  LMoveOperands move = moves->at(i);
82
- if (!move.IsRedundant()) moves_.Add(move);
82
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
83
83
  }
84
84
  Verify();
85
85
  }
@@ -171,8 +171,10 @@ void LGapResolver::BreakCycle(int index) {
171
171
  } else if (source->IsStackSlot()) {
172
172
  __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
173
173
  } else if (source->IsDoubleRegister()) {
174
+ CpuFeatures::Scope scope(VFP2);
174
175
  __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
175
176
  } else if (source->IsDoubleStackSlot()) {
177
+ CpuFeatures::Scope scope(VFP2);
176
178
  __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
177
179
  } else {
178
180
  UNREACHABLE();
@@ -192,8 +194,10 @@ void LGapResolver::RestoreValue() {
192
194
  } else if (saved_destination_->IsStackSlot()) {
193
195
  __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
194
196
  } else if (saved_destination_->IsDoubleRegister()) {
197
+ CpuFeatures::Scope scope(VFP2);
195
198
  __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
196
199
  } else if (saved_destination_->IsDoubleStackSlot()) {
200
+ CpuFeatures::Scope scope(VFP2);
197
201
  __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
198
202
  } else {
199
203
  UNREACHABLE();
@@ -229,7 +233,8 @@ void LGapResolver::EmitMove(int index) {
229
233
  MemOperand destination_operand = cgen_->ToMemOperand(destination);
230
234
  if (in_cycle_) {
231
235
  if (!destination_operand.OffsetIsUint12Encodable()) {
232
- // ip is overwritten while saving the value to the destination.
236
+ CpuFeatures::Scope scope(VFP2);
237
+ // ip is overwritten while saving the value to the destination.
233
238
  // Therefore we can't use ip. It is OK if the read from the source
234
239
  // destroys ip, since that happens before the value is read.
235
240
  __ vldr(kScratchDoubleReg.low(), source_operand);
@@ -267,7 +272,8 @@ void LGapResolver::EmitMove(int index) {
267
272
  }
268
273
 
269
274
  } else if (source->IsDoubleRegister()) {
270
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
275
+ CpuFeatures::Scope scope(VFP2);
276
+ DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
271
277
  if (destination->IsDoubleRegister()) {
272
278
  __ vmov(cgen_->ToDoubleRegister(destination), source_register);
273
279
  } else {
@@ -276,7 +282,8 @@ void LGapResolver::EmitMove(int index) {
276
282
  }
277
283
 
278
284
  } else if (source->IsDoubleStackSlot()) {
279
- MemOperand source_operand = cgen_->ToMemOperand(source);
285
+ CpuFeatures::Scope scope(VFP2);
286
+ MemOperand source_operand = cgen_->ToMemOperand(source);
280
287
  if (destination->IsDoubleRegister()) {
281
288
  __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
282
289
  } else {
@@ -108,7 +108,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
108
108
 
109
109
 
110
110
  int MacroAssembler::CallSize(Register target, Condition cond) {
111
- #if USE_BLX
111
+ #ifdef USE_BLX
112
112
  return kInstrSize;
113
113
  #else
114
114
  return 2 * kInstrSize;
@@ -121,7 +121,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
121
121
  BlockConstPoolScope block_const_pool(this);
122
122
  Label start;
123
123
  bind(&start);
124
- #if USE_BLX
124
+ #ifdef USE_BLX
125
125
  blx(target, cond);
126
126
  #else
127
127
  // set lr for return at current pc + 8
@@ -137,7 +137,19 @@ int MacroAssembler::CallSize(
137
137
  int size = 2 * kInstrSize;
138
138
  Instr mov_instr = cond | MOV | LeaveCC;
139
139
  intptr_t immediate = reinterpret_cast<intptr_t>(target);
140
- if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
140
+ if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
141
+ size += kInstrSize;
142
+ }
143
+ return size;
144
+ }
145
+
146
+
147
+ int MacroAssembler::CallSizeNotPredictableCodeSize(
148
+ Address target, RelocInfo::Mode rmode, Condition cond) {
149
+ int size = 2 * kInstrSize;
150
+ Instr mov_instr = cond | MOV | LeaveCC;
151
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
152
+ if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
141
153
  size += kInstrSize;
142
154
  }
143
155
  return size;
@@ -146,15 +158,29 @@ int MacroAssembler::CallSize(
146
158
 
147
159
  void MacroAssembler::Call(Address target,
148
160
  RelocInfo::Mode rmode,
149
- Condition cond) {
161
+ Condition cond,
162
+ TargetAddressStorageMode mode) {
150
163
  // Block constant pool for the call instruction sequence.
151
164
  BlockConstPoolScope block_const_pool(this);
152
165
  Label start;
153
166
  bind(&start);
154
- #if USE_BLX
155
- // On ARMv5 and after the recommended call sequence is:
156
- // ldr ip, [pc, #...]
157
- // blx ip
167
+
168
+ bool old_predictable_code_size = predictable_code_size();
169
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
170
+ set_predictable_code_size(true);
171
+ }
172
+
173
+ #ifdef USE_BLX
174
+ // Call sequence on V7 or later may be :
175
+ // movw ip, #... @ call address low 16
176
+ // movt ip, #... @ call address high 16
177
+ // blx ip
178
+ // @ return address
179
+ // Or for pre-V7 or values that may be back-patched
180
+ // to avoid ICache flushes:
181
+ // ldr ip, [pc, #...] @ call address
182
+ // blx ip
183
+ // @ return address
158
184
 
159
185
  // Statement positions are expected to be recorded when the target
160
186
  // address is loaded. The mov method will automatically record
@@ -165,21 +191,22 @@ void MacroAssembler::Call(Address target,
165
191
  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
166
192
  blx(ip, cond);
167
193
 
168
- ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
169
194
  #else
170
195
  // Set lr for return at current pc + 8.
171
196
  mov(lr, Operand(pc), LeaveCC, cond);
172
197
  // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
173
198
  mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
174
- ASSERT(kCallTargetAddressOffset == kInstrSize);
175
199
  #endif
176
200
  ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
201
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
202
+ set_predictable_code_size(old_predictable_code_size);
203
+ }
177
204
  }
178
205
 
179
206
 
180
207
  int MacroAssembler::CallSize(Handle<Code> code,
181
208
  RelocInfo::Mode rmode,
182
- unsigned ast_id,
209
+ TypeFeedbackId ast_id,
183
210
  Condition cond) {
184
211
  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
185
212
  }
@@ -187,19 +214,18 @@ int MacroAssembler::CallSize(Handle<Code> code,
187
214
 
188
215
  void MacroAssembler::Call(Handle<Code> code,
189
216
  RelocInfo::Mode rmode,
190
- unsigned ast_id,
191
- Condition cond) {
217
+ TypeFeedbackId ast_id,
218
+ Condition cond,
219
+ TargetAddressStorageMode mode) {
192
220
  Label start;
193
221
  bind(&start);
194
222
  ASSERT(RelocInfo::IsCodeTarget(rmode));
195
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
223
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
196
224
  SetRecordedAstId(ast_id);
197
225
  rmode = RelocInfo::CODE_TARGET_WITH_ID;
198
226
  }
199
227
  // 'code' is always generated ARM code, never THUMB code
200
- Call(reinterpret_cast<Address>(code.location()), rmode, cond);
201
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
202
- SizeOfCodeGeneratedSince(&start));
228
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
203
229
  }
204
230
 
205
231
 
@@ -264,9 +290,9 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
264
290
  }
265
291
 
266
292
 
267
- void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
268
- ASSERT(CpuFeatures::IsSupported(VFP3));
269
- CpuFeatures::Scope scope(VFP3);
293
+ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
294
+ ASSERT(CpuFeatures::IsSupported(VFP2));
295
+ CpuFeatures::Scope scope(VFP2);
270
296
  if (!dst.is(src)) {
271
297
  vmov(dst, src);
272
298
  }
@@ -276,17 +302,15 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
276
302
  void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
277
303
  Condition cond) {
278
304
  if (!src2.is_reg() &&
279
- !src2.must_use_constant_pool() &&
305
+ !src2.must_output_reloc_info(this) &&
280
306
  src2.immediate() == 0) {
281
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
282
-
283
- } else if (!src2.is_single_instruction() &&
284
- !src2.must_use_constant_pool() &&
307
+ mov(dst, Operand::Zero(), LeaveCC, cond);
308
+ } else if (!src2.is_single_instruction(this) &&
309
+ !src2.must_output_reloc_info(this) &&
285
310
  CpuFeatures::IsSupported(ARMv7) &&
286
311
  IsPowerOf2(src2.immediate() + 1)) {
287
312
  ubfx(dst, src1, 0,
288
313
  WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
289
-
290
314
  } else {
291
315
  and_(dst, src1, src2, LeaveCC, cond);
292
316
  }
@@ -296,7 +320,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
296
320
  void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
297
321
  Condition cond) {
298
322
  ASSERT(lsb < 32);
299
- if (!CpuFeatures::IsSupported(ARMv7)) {
323
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
300
324
  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
301
325
  and_(dst, src1, Operand(mask), LeaveCC, cond);
302
326
  if (lsb != 0) {
@@ -311,7 +335,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
311
335
  void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
312
336
  Condition cond) {
313
337
  ASSERT(lsb < 32);
314
- if (!CpuFeatures::IsSupported(ARMv7)) {
338
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
315
339
  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
316
340
  and_(dst, src1, Operand(mask), LeaveCC, cond);
317
341
  int shift_up = 32 - lsb - width;
@@ -339,7 +363,7 @@ void MacroAssembler::Bfi(Register dst,
339
363
  ASSERT(lsb + width < 32);
340
364
  ASSERT(!scratch.is(dst));
341
365
  if (width == 0) return;
342
- if (!CpuFeatures::IsSupported(ARMv7)) {
366
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
343
367
  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
344
368
  bic(dst, dst, Operand(mask));
345
369
  and_(scratch, src, Operand((1 << width) - 1));
@@ -351,12 +375,14 @@ void MacroAssembler::Bfi(Register dst,
351
375
  }
352
376
 
353
377
 
354
- void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
378
+ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
379
+ Condition cond) {
355
380
  ASSERT(lsb < 32);
356
- if (!CpuFeatures::IsSupported(ARMv7)) {
381
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
357
382
  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
358
- bic(dst, dst, Operand(mask));
383
+ bic(dst, src, Operand(mask));
359
384
  } else {
385
+ Move(dst, src, cond);
360
386
  bfc(dst, lsb, width, cond);
361
387
  }
362
388
  }
@@ -364,7 +390,7 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
364
390
 
365
391
  void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
366
392
  Condition cond) {
367
- if (!CpuFeatures::IsSupported(ARMv7)) {
393
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
368
394
  ASSERT(!dst.is(pc) && !src.rm().is(pc));
369
395
  ASSERT((satpos >= 0) && (satpos <= 31));
370
396
 
@@ -384,7 +410,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
384
410
  }
385
411
  tst(dst, Operand(~satval));
386
412
  b(eq, &done);
387
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
413
+ mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
388
414
  mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
389
415
  bind(&done);
390
416
  } else {
@@ -396,6 +422,17 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
396
422
  void MacroAssembler::LoadRoot(Register destination,
397
423
  Heap::RootListIndex index,
398
424
  Condition cond) {
425
+ if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
426
+ !Heap::RootCanBeWrittenAfterInitialization(index) &&
427
+ !predictable_code_size()) {
428
+ Handle<Object> root(isolate()->heap()->roots_array_start()[index]);
429
+ if (!isolate()->heap()->InNewSpace(*root)) {
430
+ // The CPU supports fast immediate values, and this root will never
431
+ // change. We will load it as a relocatable immediate value.
432
+ mov(destination, Operand(root), LeaveCC, cond);
433
+ return;
434
+ }
435
+ }
399
436
  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
400
437
  }
401
438
 
@@ -605,20 +642,24 @@ void MacroAssembler::PopSafepointRegisters() {
605
642
 
606
643
 
607
644
  void MacroAssembler::PushSafepointRegistersAndDoubles() {
645
+ // Number of d-regs not known at snapshot time.
646
+ ASSERT(!Serializer::enabled());
608
647
  PushSafepointRegisters();
609
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
648
+ sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
610
649
  kDoubleSize));
611
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
650
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
612
651
  vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
613
652
  }
614
653
  }
615
654
 
616
655
 
617
656
  void MacroAssembler::PopSafepointRegistersAndDoubles() {
618
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
657
+ // Number of d-regs not known at snapshot time.
658
+ ASSERT(!Serializer::enabled());
659
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
619
660
  vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
620
661
  }
621
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
662
+ add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
622
663
  kDoubleSize));
623
664
  PopSafepointRegisters();
624
665
  }
@@ -653,8 +694,10 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
653
694
 
654
695
 
655
696
  MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
697
+ // Number of d-regs not known at snapshot time.
698
+ ASSERT(!Serializer::enabled());
656
699
  // General purpose registers are pushed last on the stack.
657
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
700
+ int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
658
701
  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
659
702
  return MemOperand(sp, doubles_size + register_offset);
660
703
  }
@@ -672,7 +715,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
672
715
  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
673
716
 
674
717
  // Generate two ldr instructions if ldrd is not available.
675
- if (CpuFeatures::IsSupported(ARMv7)) {
718
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
676
719
  CpuFeatures::Scope scope(ARMv7);
677
720
  ldrd(dst1, dst2, src, cond);
678
721
  } else {
@@ -714,7 +757,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
714
757
  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
715
758
 
716
759
  // Generate two str instructions if strd is not available.
717
- if (CpuFeatures::IsSupported(ARMv7)) {
760
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
718
761
  CpuFeatures::Scope scope(ARMv7);
719
762
  strd(src1, src2, dst, cond);
720
763
  } else {
@@ -733,15 +776,6 @@ void MacroAssembler::Strd(Register src1, Register src2,
733
776
  }
734
777
 
735
778
 
736
- void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
737
- const Register scratch,
738
- const Condition cond) {
739
- vmrs(scratch, cond);
740
- bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
741
- vmsr(scratch, cond);
742
- }
743
-
744
-
745
779
  void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
746
780
  const DwVfpRegister src2,
747
781
  const Condition cond) {
@@ -777,8 +811,9 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
777
811
 
778
812
  void MacroAssembler::Vmov(const DwVfpRegister dst,
779
813
  const double imm,
814
+ const Register scratch,
780
815
  const Condition cond) {
781
- ASSERT(CpuFeatures::IsEnabled(VFP3));
816
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
782
817
  static const DoubleRepresentation minus_zero(-0.0);
783
818
  static const DoubleRepresentation zero(0.0);
784
819
  DoubleRepresentation value(imm);
@@ -788,7 +823,7 @@ void MacroAssembler::Vmov(const DwVfpRegister dst,
788
823
  } else if (value.bits == minus_zero.bits) {
789
824
  vneg(dst, kDoubleRegZero, cond);
790
825
  } else {
791
- vmov(dst, imm, cond);
826
+ vmov(dst, imm, scratch, cond);
792
827
  }
793
828
  }
794
829
 
@@ -826,7 +861,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
826
861
  // Reserve room for saved entry sp and code object.
827
862
  sub(sp, sp, Operand(2 * kPointerSize));
828
863
  if (emit_debug_code()) {
829
- mov(ip, Operand(0));
864
+ mov(ip, Operand::Zero());
830
865
  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
831
866
  }
832
867
  mov(ip, Operand(CodeObject()));
@@ -840,12 +875,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
840
875
 
841
876
  // Optionally save all double registers.
842
877
  if (save_doubles) {
843
- DwVfpRegister first = d0;
844
- DwVfpRegister last =
845
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
846
- vstm(db_w, sp, first, last);
878
+ CpuFeatures::Scope scope(VFP2);
879
+ // Check CPU flags for number of registers, setting the Z condition flag.
880
+ CheckFor32DRegs(ip);
881
+
882
+ // Push registers d0-d15, and possibly d16-d31, on the stack.
883
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
884
+ vstm(db_w, sp, d16, d31, ne);
885
+ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
886
+ vstm(db_w, sp, d0, d15);
847
887
  // Note that d0 will be accessible at
848
- // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
888
+ // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
849
889
  // since the sp slot and code slot were pushed after the fp.
850
890
  }
851
891
 
@@ -900,17 +940,24 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
900
940
  Register argument_count) {
901
941
  // Optionally restore all double registers.
902
942
  if (save_doubles) {
943
+ CpuFeatures::Scope scope(VFP2);
903
944
  // Calculate the stack location of the saved doubles and restore them.
904
945
  const int offset = 2 * kPointerSize;
905
- sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
906
- DwVfpRegister first = d0;
907
- DwVfpRegister last =
908
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
909
- vldm(ia, r3, first, last);
946
+ sub(r3, fp,
947
+ Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
948
+
949
+ // Check CPU flags for number of registers, setting the Z condition flag.
950
+ CheckFor32DRegs(ip);
951
+
952
+ // Pop registers d0-d15, and possibly d16-d31, from r3.
953
+ // If d16-d31 are not popped, increase r3 instead.
954
+ vldm(ia_w, r3, d0, d15);
955
+ vldm(ia_w, r3, d16, d31, ne);
956
+ add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
910
957
  }
911
958
 
912
959
  // Clear top frame.
913
- mov(r3, Operand(0, RelocInfo::NONE));
960
+ mov(r3, Operand::Zero());
914
961
  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
915
962
  str(r3, MemOperand(ip));
916
963
 
@@ -929,7 +976,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
929
976
  }
930
977
  }
931
978
 
932
- void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
979
+ void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
980
+ ASSERT(CpuFeatures::IsSupported(VFP2));
933
981
  if (use_eabi_hardfloat()) {
934
982
  Move(dst, d0);
935
983
  } else {
@@ -1179,7 +1227,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
1179
1227
 
1180
1228
  #ifdef ENABLE_DEBUGGER_SUPPORT
1181
1229
  void MacroAssembler::DebugBreak() {
1182
- mov(r0, Operand(0, RelocInfo::NONE));
1230
+ mov(r0, Operand::Zero());
1183
1231
  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1184
1232
  CEntryStub ces(1);
1185
1233
  ASSERT(AllowThisStubCall(&ces));
@@ -1210,7 +1258,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1210
1258
  // Push the frame pointer, context, state, and code object.
1211
1259
  if (kind == StackHandler::JS_ENTRY) {
1212
1260
  mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
1213
- mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
1261
+ mov(ip, Operand::Zero()); // NULL frame pointer.
1214
1262
  stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
1215
1263
  } else {
1216
1264
  stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
@@ -1334,35 +1382,36 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1334
1382
  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1335
1383
  // In debug mode, make sure the lexical context is set.
1336
1384
  #ifdef DEBUG
1337
- cmp(scratch, Operand(0, RelocInfo::NONE));
1385
+ cmp(scratch, Operand::Zero());
1338
1386
  Check(ne, "we should not have an empty lexical context");
1339
1387
  #endif
1340
1388
 
1341
- // Load the global context of the current context.
1342
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1389
+ // Load the native context of the current context.
1390
+ int offset =
1391
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1343
1392
  ldr(scratch, FieldMemOperand(scratch, offset));
1344
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
1393
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1345
1394
 
1346
- // Check the context is a global context.
1395
+ // Check the context is a native context.
1347
1396
  if (emit_debug_code()) {
1348
1397
  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1349
1398
  // Cannot use ip as a temporary in this verification code. Due to the fact
1350
1399
  // that ip is clobbered as part of cmp with an object Operand.
1351
1400
  push(holder_reg); // Temporarily save holder on the stack.
1352
- // Read the first word and compare to the global_context_map.
1401
+ // Read the first word and compare to the native_context_map.
1353
1402
  ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1354
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1403
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1355
1404
  cmp(holder_reg, ip);
1356
- Check(eq, "JSGlobalObject::global_context should be a global context.");
1405
+ Check(eq, "JSGlobalObject::native_context should be a native context.");
1357
1406
  pop(holder_reg); // Restore holder.
1358
1407
  }
1359
1408
 
1360
1409
  // Check if both contexts are the same.
1361
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1410
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1362
1411
  cmp(scratch, Operand(ip));
1363
1412
  b(eq, &same_contexts);
1364
1413
 
1365
- // Check the context is a global context.
1414
+ // Check the context is a native context.
1366
1415
  if (emit_debug_code()) {
1367
1416
  // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1368
1417
  // Cannot use ip as a temporary in this verification code. Due to the fact
@@ -1374,13 +1423,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1374
1423
  Check(ne, "JSGlobalProxy::context() should not be null.");
1375
1424
 
1376
1425
  ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1377
- LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1426
+ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1378
1427
  cmp(holder_reg, ip);
1379
- Check(eq, "JSGlobalObject::global_context should be a global context.");
1428
+ Check(eq, "JSGlobalObject::native_context should be a native context.");
1380
1429
  // Restore ip is not needed. ip is reloaded below.
1381
1430
  pop(holder_reg); // Restore holder.
1382
1431
  // Restore ip to holder's context.
1383
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1432
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1384
1433
  }
1385
1434
 
1386
1435
  // Check that the security token in the calling global object is
@@ -1553,7 +1602,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
1553
1602
  Register topaddr = scratch1;
1554
1603
  Register obj_size_reg = scratch2;
1555
1604
  mov(topaddr, Operand(new_space_allocation_top));
1556
- mov(obj_size_reg, Operand(object_size));
1605
+ Operand obj_size_operand = Operand(object_size);
1606
+ if (!obj_size_operand.is_single_instruction(this)) {
1607
+ // We are about to steal IP, so we need to load this value first
1608
+ mov(obj_size_reg, obj_size_operand);
1609
+ }
1557
1610
 
1558
1611
  // This code stores a temporary value in ip. This is OK, as the code below
1559
1612
  // does not need ip for implicit literal generation.
@@ -1573,9 +1626,27 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
1573
1626
  ldr(ip, MemOperand(topaddr, limit - top));
1574
1627
  }
1575
1628
 
1629
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
1630
+ // Align the next allocation. Storing the filler map without checking top is
1631
+ // always safe because the limit of the heap is always aligned.
1632
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1633
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1634
+ Label aligned;
1635
+ b(eq, &aligned);
1636
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1637
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1638
+ bind(&aligned);
1639
+ }
1640
+
1576
1641
  // Calculate new top and bail out if new space is exhausted. Use result
1577
1642
  // to calculate the new top.
1578
- add(scratch2, result, Operand(obj_size_reg), SetCC);
1643
+ if (obj_size_operand.is_single_instruction(this)) {
1644
+ // We can add the size as an immediate
1645
+ add(scratch2, result, obj_size_operand, SetCC);
1646
+ } else {
1647
+ // Doesn't fit in an immediate, we have to use the register
1648
+ add(scratch2, result, obj_size_reg, SetCC);
1649
+ }
1579
1650
  b(cs, gc_required);
1580
1651
  cmp(scratch2, Operand(ip));
1581
1652
  b(hi, gc_required);
@@ -1652,6 +1723,18 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
1652
1723
  ldr(ip, MemOperand(topaddr, limit - top));
1653
1724
  }
1654
1725
 
1726
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
1727
+ // Align the next allocation. Storing the filler map without checking top is
1728
+ // always safe because the limit of the heap is always aligned.
1729
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1730
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1731
+ Label aligned;
1732
+ b(eq, &aligned);
1733
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1734
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1735
+ bind(&aligned);
1736
+ }
1737
+
1655
1738
  // Calculate new top and bail out if new space is exhausted. Use result
1656
1739
  // to calculate the new top. Object size may be in words so a shift is
1657
1740
  // required to get the number of bytes.
@@ -1737,10 +1820,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
1737
1820
  Label* gc_required) {
1738
1821
  // Calculate the number of bytes needed for the characters in the string while
1739
1822
  // observing object alignment.
1740
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1823
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1741
1824
  ASSERT(kCharSize == 1);
1742
1825
  add(scratch1, length,
1743
- Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
1826
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1744
1827
  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1745
1828
 
1746
1829
  // Allocate ASCII string in new space.
@@ -1906,13 +1989,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
1906
1989
 
1907
1990
  void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1908
1991
  Register key_reg,
1909
- Register receiver_reg,
1910
1992
  Register elements_reg,
1911
1993
  Register scratch1,
1912
1994
  Register scratch2,
1913
1995
  Register scratch3,
1914
1996
  Register scratch4,
1915
- Label* fail) {
1997
+ Label* fail,
1998
+ int elements_offset) {
1916
1999
  Label smi_value, maybe_nan, have_double_value, is_nan, done;
1917
2000
  Register mantissa_reg = scratch2;
1918
2001
  Register exponent_reg = scratch3;
@@ -1939,8 +2022,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1939
2022
  bind(&have_double_value);
1940
2023
  add(scratch1, elements_reg,
1941
2024
  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
1942
- str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
1943
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
2025
+ str(mantissa_reg, FieldMemOperand(
2026
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
2027
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
2028
+ sizeof(kHoleNanLower32);
1944
2029
  str(exponent_reg, FieldMemOperand(scratch1, offset));
1945
2030
  jmp(&done);
1946
2031
 
@@ -1949,7 +2034,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1949
2034
  // it's an Infinity, and the non-NaN code path applies.
1950
2035
  b(gt, &is_nan);
1951
2036
  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1952
- cmp(mantissa_reg, Operand(0));
2037
+ cmp(mantissa_reg, Operand::Zero());
1953
2038
  b(eq, &have_double_value);
1954
2039
  bind(&is_nan);
1955
2040
  // Load canonical NaN for storing into the double array.
@@ -1961,19 +2046,20 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1961
2046
 
1962
2047
  bind(&smi_value);
1963
2048
  add(scratch1, elements_reg,
1964
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2049
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
2050
+ elements_offset));
1965
2051
  add(scratch1, scratch1,
1966
2052
  Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
1967
2053
  // scratch1 is now effective address of the double element
1968
2054
 
1969
2055
  FloatingPointHelper::Destination destination;
1970
- if (CpuFeatures::IsSupported(VFP3)) {
2056
+ if (CpuFeatures::IsSupported(VFP2)) {
1971
2057
  destination = FloatingPointHelper::kVFPRegisters;
1972
2058
  } else {
1973
2059
  destination = FloatingPointHelper::kCoreRegisters;
1974
2060
  }
1975
2061
 
1976
- Register untagged_value = receiver_reg;
2062
+ Register untagged_value = elements_reg;
1977
2063
  SmiUntag(untagged_value, value_reg);
1978
2064
  FloatingPointHelper::ConvertIntToDouble(this,
1979
2065
  untagged_value,
@@ -1984,7 +2070,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1984
2070
  scratch4,
1985
2071
  s2);
1986
2072
  if (destination == FloatingPointHelper::kVFPRegisters) {
1987
- CpuFeatures::Scope scope(VFP3);
2073
+ CpuFeatures::Scope scope(VFP2);
1988
2074
  vstr(d0, scratch1, 0);
1989
2075
  } else {
1990
2076
  str(mantissa_reg, MemOperand(scratch1, 0));
@@ -2000,7 +2086,15 @@ void MacroAssembler::CompareMap(Register obj,
2000
2086
  Label* early_success,
2001
2087
  CompareMapMode mode) {
2002
2088
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2003
- cmp(scratch, Operand(map));
2089
+ CompareMap(scratch, map, early_success, mode);
2090
+ }
2091
+
2092
+
2093
+ void MacroAssembler::CompareMap(Register obj_map,
2094
+ Handle<Map> map,
2095
+ Label* early_success,
2096
+ CompareMapMode mode) {
2097
+ cmp(obj_map, Operand(map));
2004
2098
  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
2005
2099
  ElementsKind kind = map->elements_kind();
2006
2100
  if (IsFastElementsKind(kind)) {
@@ -2008,10 +2102,10 @@ void MacroAssembler::CompareMap(Register obj,
2008
2102
  Map* current_map = *map;
2009
2103
  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
2010
2104
  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
2011
- current_map = current_map->LookupElementsTransitionMap(kind, NULL);
2105
+ current_map = current_map->LookupElementsTransitionMap(kind);
2012
2106
  if (!current_map) break;
2013
2107
  b(eq, early_success);
2014
- cmp(scratch, Operand(Handle<Map>(current_map)));
2108
+ cmp(obj_map, Operand(Handle<Map>(current_map)));
2015
2109
  }
2016
2110
  }
2017
2111
  }
@@ -2125,9 +2219,11 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
2125
2219
  }
2126
2220
 
2127
2221
 
2128
- void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
2222
+ void MacroAssembler::CallStub(CodeStub* stub,
2223
+ TypeFeedbackId ast_id,
2224
+ Condition cond) {
2129
2225
  ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2130
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
2226
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2131
2227
  }
2132
2228
 
2133
2229
 
@@ -2162,19 +2258,35 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
2162
2258
  add(r6, r6, Operand(1));
2163
2259
  str(r6, MemOperand(r7, kLevelOffset));
2164
2260
 
2261
+ if (FLAG_log_timer_events) {
2262
+ FrameScope frame(this, StackFrame::MANUAL);
2263
+ PushSafepointRegisters();
2264
+ PrepareCallCFunction(0, r0);
2265
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
2266
+ PopSafepointRegisters();
2267
+ }
2268
+
2165
2269
  // Native call returns to the DirectCEntry stub which redirects to the
2166
2270
  // return address pushed on stack (could have moved after GC).
2167
2271
  // DirectCEntry stub itself is generated early and never moves.
2168
2272
  DirectCEntryStub stub;
2169
2273
  stub.GenerateCall(this, function);
2170
2274
 
2275
+ if (FLAG_log_timer_events) {
2276
+ FrameScope frame(this, StackFrame::MANUAL);
2277
+ PushSafepointRegisters();
2278
+ PrepareCallCFunction(0, r0);
2279
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
2280
+ PopSafepointRegisters();
2281
+ }
2282
+
2171
2283
  Label promote_scheduled_exception;
2172
2284
  Label delete_allocated_handles;
2173
2285
  Label leave_exit_frame;
2174
2286
 
2175
2287
  // If result is non-zero, dereference to get the result value
2176
2288
  // otherwise set it to undefined.
2177
- cmp(r0, Operand(0));
2289
+ cmp(r0, Operand::Zero());
2178
2290
  LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
2179
2291
  ldr(r0, MemOperand(r0), ne);
2180
2292
 
@@ -2323,8 +2435,8 @@ void MacroAssembler::ConvertToInt32(Register source,
2323
2435
  Register scratch2,
2324
2436
  DwVfpRegister double_scratch,
2325
2437
  Label *not_int32) {
2326
- if (CpuFeatures::IsSupported(VFP3)) {
2327
- CpuFeatures::Scope scope(VFP3);
2438
+ if (CpuFeatures::IsSupported(VFP2)) {
2439
+ CpuFeatures::Scope scope(VFP2);
2328
2440
  sub(scratch, source, Operand(kHeapObjectTag));
2329
2441
  vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2330
2442
  vcvt_s32_f64(double_scratch.low(), double_scratch);
@@ -2353,7 +2465,7 @@ void MacroAssembler::ConvertToInt32(Register source,
2353
2465
  HeapNumber::kExponentBits);
2354
2466
  // Load dest with zero. We use this either for the final shift or
2355
2467
  // for the answer.
2356
- mov(dest, Operand(0, RelocInfo::NONE));
2468
+ mov(dest, Operand::Zero());
2357
2469
  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
2358
2470
  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
2359
2471
  // the exponent that we are fastest at and also the highest exponent we can
@@ -2407,23 +2519,44 @@ void MacroAssembler::ConvertToInt32(Register source,
2407
2519
  // Move down according to the exponent.
2408
2520
  mov(dest, Operand(scratch, LSR, dest));
2409
2521
  // Fix sign if sign bit was set.
2410
- rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2522
+ rsb(dest, dest, Operand::Zero(), LeaveCC, ne);
2411
2523
  bind(&done);
2412
2524
  }
2413
2525
  }
2414
2526
 
2415
2527
 
2528
+ void MacroAssembler::TryFastDoubleToInt32(Register result,
2529
+ DwVfpRegister double_input,
2530
+ DwVfpRegister double_scratch,
2531
+ Label* done) {
2532
+ ASSERT(!double_input.is(double_scratch));
2533
+
2534
+ vcvt_s32_f64(double_scratch.low(), double_input);
2535
+ vmov(result, double_scratch.low());
2536
+ vcvt_f64_s32(double_scratch, double_scratch.low());
2537
+ VFPCompareAndSetFlags(double_input, double_scratch);
2538
+ b(eq, done);
2539
+ }
2540
+
2541
+
2416
2542
  void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2417
- SwVfpRegister result,
2543
+ Register result,
2418
2544
  DwVfpRegister double_input,
2419
- Register scratch1,
2420
- Register scratch2,
2545
+ Register scratch,
2546
+ DwVfpRegister double_scratch,
2421
2547
  CheckForInexactConversion check_inexact) {
2422
- ASSERT(CpuFeatures::IsSupported(VFP3));
2423
- CpuFeatures::Scope scope(VFP3);
2424
- Register prev_fpscr = scratch1;
2425
- Register scratch = scratch2;
2548
+ ASSERT(!result.is(scratch));
2549
+ ASSERT(!double_input.is(double_scratch));
2550
+
2551
+ ASSERT(CpuFeatures::IsSupported(VFP2));
2552
+ CpuFeatures::Scope scope(VFP2);
2553
+ Register prev_fpscr = result;
2554
+ Label done;
2555
+
2556
+ // Test for values that can be exactly represented as a signed 32-bit integer.
2557
+ TryFastDoubleToInt32(result, double_input, double_scratch, &done);
2426
2558
 
2559
+ // Convert to integer, respecting rounding mode.
2427
2560
  int32_t check_inexact_conversion =
2428
2561
  (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2429
2562
 
@@ -2445,7 +2578,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2445
2578
  vmsr(scratch);
2446
2579
 
2447
2580
  // Convert the argument to an integer.
2448
- vcvt_s32_f64(result,
2581
+ vcvt_s32_f64(double_scratch.low(),
2449
2582
  double_input,
2450
2583
  (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
2451
2584
  : kFPSCRRounding);
@@ -2454,8 +2587,12 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2454
2587
  vmrs(scratch);
2455
2588
  // Restore FPSCR.
2456
2589
  vmsr(prev_fpscr);
2590
+ // Move the converted value into the result register.
2591
+ vmov(result, double_scratch.low());
2457
2592
  // Check for vfp exceptions.
2458
2593
  tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
2594
+
2595
+ bind(&done);
2459
2596
  }
2460
2597
 
2461
2598
 
@@ -2473,7 +2610,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2473
2610
 
2474
2611
  // Check for Infinity and NaNs, which should return 0.
2475
2612
  cmp(result, Operand(HeapNumber::kExponentMask));
2476
- mov(result, Operand(0), LeaveCC, eq);
2613
+ mov(result, Operand::Zero(), LeaveCC, eq);
2477
2614
  b(eq, &done);
2478
2615
 
2479
2616
  // Express exponent as delta to (number of mantissa bits + 31).
@@ -2485,7 +2622,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2485
2622
  // If the delta is strictly positive, all bits would be shifted away,
2486
2623
  // which means that we can return 0.
2487
2624
  b(le, &normal_exponent);
2488
- mov(result, Operand(0));
2625
+ mov(result, Operand::Zero());
2489
2626
  b(&done);
2490
2627
 
2491
2628
  bind(&normal_exponent);
@@ -2513,7 +2650,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2513
2650
  b(&pos_shift, ge);
2514
2651
 
2515
2652
  // Negate scratch.
2516
- rsb(scratch, scratch, Operand(0));
2653
+ rsb(scratch, scratch, Operand::Zero());
2517
2654
  mov(input_low, Operand(input_low, LSL, scratch));
2518
2655
  b(&shift_done);
2519
2656
 
@@ -2523,10 +2660,10 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2523
2660
  bind(&shift_done);
2524
2661
  orr(input_high, input_high, Operand(input_low));
2525
2662
  // Restore sign if necessary.
2526
- cmp(sign, Operand(0));
2663
+ cmp(sign, Operand::Zero());
2527
2664
  result = sign;
2528
2665
  sign = no_reg;
2529
- rsb(result, input_high, Operand(0), LeaveCC, ne);
2666
+ rsb(result, input_high, Operand::Zero(), LeaveCC, ne);
2530
2667
  mov(result, input_high, LeaveCC, eq);
2531
2668
  bind(&done);
2532
2669
  }
@@ -2534,28 +2671,44 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2534
2671
 
2535
2672
  void MacroAssembler::EmitECMATruncate(Register result,
2536
2673
  DwVfpRegister double_input,
2537
- SwVfpRegister single_scratch,
2674
+ DwVfpRegister double_scratch,
2538
2675
  Register scratch,
2539
2676
  Register input_high,
2540
2677
  Register input_low) {
2541
- CpuFeatures::Scope scope(VFP3);
2678
+ CpuFeatures::Scope scope(VFP2);
2542
2679
  ASSERT(!input_high.is(result));
2543
2680
  ASSERT(!input_low.is(result));
2544
2681
  ASSERT(!input_low.is(input_high));
2545
2682
  ASSERT(!scratch.is(result) &&
2546
2683
  !scratch.is(input_high) &&
2547
2684
  !scratch.is(input_low));
2548
- ASSERT(!single_scratch.is(double_input.low()) &&
2549
- !single_scratch.is(double_input.high()));
2685
+ ASSERT(!double_input.is(double_scratch));
2550
2686
 
2551
2687
  Label done;
2552
2688
 
2689
+ // Test if the value can be exactly represented as a signed integer.
2690
+ vcvt_s32_f64(double_scratch.low(), double_input);
2691
+ vmov(result, double_scratch.low());
2692
+ vcvt_f64_s32(double_scratch, double_scratch.low());
2693
+ // Note: this comparison is cheaper than reading the FPSCR exception bits.
2694
+ VFPCompareAndSetFlags(double_input, double_scratch);
2695
+ b(eq, &done);
2696
+
2697
+ // Check the exception flags. If they are not set, we are done.
2698
+ // If they are set, it could be because of the conversion above, or because
2699
+ // they were set before this code.
2700
+ vmrs(scratch);
2701
+ tst(scratch, Operand(kVFPOverflowExceptionBit |
2702
+ kVFPUnderflowExceptionBit |
2703
+ kVFPInvalidOpExceptionBit));
2704
+ b(eq, &done);
2705
+
2553
2706
  // Clear cumulative exception flags.
2554
- ClearFPSCRBits(kVFPExceptionMask, scratch);
2707
+ bic(scratch, scratch, Operand(kVFPExceptionMask));
2708
+ vmsr(scratch);
2555
2709
  // Try a conversion to a signed integer.
2556
- vcvt_s32_f64(single_scratch, double_input);
2557
- vmov(result, single_scratch);
2558
- // Retrieve he FPSCR.
2710
+ vcvt_s32_f64(double_scratch.low(), double_input);
2711
+ // Retrieve the FPSCR.
2559
2712
  vmrs(scratch);
2560
2713
  // Check for overflow and NaNs.
2561
2714
  tst(scratch, Operand(kVFPOverflowExceptionBit |
@@ -2577,7 +2730,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
2577
2730
  void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2578
2731
  Register src,
2579
2732
  int num_least_bits) {
2580
- if (CpuFeatures::IsSupported(ARMv7)) {
2733
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2581
2734
  ubfx(dst, src, kSmiTagSize, num_least_bits);
2582
2735
  } else {
2583
2736
  mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -2625,7 +2778,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2625
2778
  const Runtime::Function* function = Runtime::FunctionForId(id);
2626
2779
  mov(r0, Operand(function->nargs));
2627
2780
  mov(r1, Operand(ExternalReference(function, isolate())));
2628
- CEntryStub stub(1, kSaveFPRegs);
2781
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
2782
+ ? kSaveFPRegs
2783
+ : kDontSaveFPRegs;
2784
+ CEntryStub stub(1, mode);
2629
2785
  CallStub(&stub);
2630
2786
  }
2631
2787
 
@@ -2695,7 +2851,8 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2695
2851
  void MacroAssembler::GetBuiltinFunction(Register target,
2696
2852
  Builtins::JavaScript id) {
2697
2853
  // Load the builtins object into target register.
2698
- ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2854
+ ldr(target,
2855
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2699
2856
  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2700
2857
  // Load the JavaScript builtin function from the builtins object.
2701
2858
  ldr(target, FieldMemOperand(target,
@@ -2861,8 +3018,9 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
2861
3018
  Register scratch,
2862
3019
  Label* no_map_match) {
2863
3020
  // Load the global or builtins object from the current context.
2864
- ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2865
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
3021
+ ldr(scratch,
3022
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3023
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2866
3024
 
2867
3025
  // Check that the function's map is the same as the expected cached map.
2868
3026
  ldr(scratch,
@@ -2870,7 +3028,8 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
2870
3028
  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2871
3029
  size_t offset = expected_kind * kPointerSize +
2872
3030
  FixedArrayBase::kHeaderSize;
2873
- cmp(map_in_out, scratch);
3031
+ ldr(ip, FieldMemOperand(scratch, offset));
3032
+ cmp(map_in_out, ip);
2874
3033
  b(ne, no_map_match);
2875
3034
 
2876
3035
  // Use the transitioned cached map.
@@ -2907,11 +3066,12 @@ void MacroAssembler::LoadInitialArrayMap(
2907
3066
 
2908
3067
  void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2909
3068
  // Load the global or builtins object from the current context.
2910
- ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2911
- // Load the global context from the global or builtins object.
3069
+ ldr(function,
3070
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3071
+ // Load the native context from the global or builtins object.
2912
3072
  ldr(function, FieldMemOperand(function,
2913
- GlobalObject::kGlobalContextOffset));
2914
- // Load the function from the global context.
3073
+ GlobalObject::kNativeContextOffset));
3074
+ // Load the function from the native context.
2915
3075
  ldr(function, MemOperand(function, Context::SlotOffset(index)));
2916
3076
  }
2917
3077
 
@@ -2991,38 +3151,46 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
2991
3151
  }
2992
3152
 
2993
3153
 
2994
- void MacroAssembler::AbortIfSmi(Register object) {
2995
- STATIC_ASSERT(kSmiTag == 0);
2996
- tst(object, Operand(kSmiTagMask));
2997
- Assert(ne, "Operand is a smi");
3154
+ void MacroAssembler::AssertNotSmi(Register object) {
3155
+ if (emit_debug_code()) {
3156
+ STATIC_ASSERT(kSmiTag == 0);
3157
+ tst(object, Operand(kSmiTagMask));
3158
+ Check(ne, "Operand is a smi");
3159
+ }
2998
3160
  }
2999
3161
 
3000
3162
 
3001
- void MacroAssembler::AbortIfNotSmi(Register object) {
3002
- STATIC_ASSERT(kSmiTag == 0);
3003
- tst(object, Operand(kSmiTagMask));
3004
- Assert(eq, "Operand is not smi");
3163
+ void MacroAssembler::AssertSmi(Register object) {
3164
+ if (emit_debug_code()) {
3165
+ STATIC_ASSERT(kSmiTag == 0);
3166
+ tst(object, Operand(kSmiTagMask));
3167
+ Check(eq, "Operand is not smi");
3168
+ }
3005
3169
  }
3006
3170
 
3007
3171
 
3008
- void MacroAssembler::AbortIfNotString(Register object) {
3009
- STATIC_ASSERT(kSmiTag == 0);
3010
- tst(object, Operand(kSmiTagMask));
3011
- Assert(ne, "Operand is not a string");
3012
- push(object);
3013
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3014
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3015
- pop(object);
3016
- Assert(lo, "Operand is not a string");
3172
+ void MacroAssembler::AssertString(Register object) {
3173
+ if (emit_debug_code()) {
3174
+ STATIC_ASSERT(kSmiTag == 0);
3175
+ tst(object, Operand(kSmiTagMask));
3176
+ Check(ne, "Operand is a smi and not a string");
3177
+ push(object);
3178
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3179
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3180
+ pop(object);
3181
+ Check(lo, "Operand is not a string");
3182
+ }
3017
3183
  }
3018
3184
 
3019
3185
 
3020
3186
 
3021
- void MacroAssembler::AbortIfNotRootValue(Register src,
3022
- Heap::RootListIndex root_value_index,
3023
- const char* message) {
3024
- CompareRoot(src, root_value_index);
3025
- Assert(eq, message);
3187
+ void MacroAssembler::AssertRootValue(Register src,
3188
+ Heap::RootListIndex root_value_index,
3189
+ const char* message) {
3190
+ if (emit_debug_code()) {
3191
+ CompareRoot(src, root_value_index);
3192
+ Check(eq, message);
3193
+ }
3026
3194
  }
3027
3195
 
3028
3196
 
@@ -3080,7 +3248,8 @@ void MacroAssembler::AllocateHeapNumber(Register result,
3080
3248
  Register scratch1,
3081
3249
  Register scratch2,
3082
3250
  Register heap_number_map,
3083
- Label* gc_required) {
3251
+ Label* gc_required,
3252
+ TaggingMode tagging_mode) {
3084
3253
  // Allocate an object in the heap for the heap number and tag it as a heap
3085
3254
  // object.
3086
3255
  AllocateInNewSpace(HeapNumber::kSize,
@@ -3088,11 +3257,16 @@ void MacroAssembler::AllocateHeapNumber(Register result,
3088
3257
  scratch1,
3089
3258
  scratch2,
3090
3259
  gc_required,
3091
- TAG_OBJECT);
3260
+ tagging_mode == TAG_RESULT ? TAG_OBJECT :
3261
+ NO_ALLOCATION_FLAGS);
3092
3262
 
3093
3263
  // Store heap number map in the allocated object.
3094
3264
  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3095
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3265
+ if (tagging_mode == TAG_RESULT) {
3266
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3267
+ } else {
3268
+ str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3269
+ }
3096
3270
  }
3097
3271
 
3098
3272
 
@@ -3144,7 +3318,7 @@ void MacroAssembler::CopyBytes(Register src,
3144
3318
 
3145
3319
  // Align src before copying in word size chunks.
3146
3320
  bind(&align_loop);
3147
- cmp(length, Operand(0));
3321
+ cmp(length, Operand::Zero());
3148
3322
  b(eq, &done);
3149
3323
  bind(&align_loop_1);
3150
3324
  tst(src, Operand(kPointerSize - 1));
@@ -3163,23 +3337,23 @@ void MacroAssembler::CopyBytes(Register src,
3163
3337
  cmp(length, Operand(kPointerSize));
3164
3338
  b(lt, &byte_loop);
3165
3339
  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3166
- #if CAN_USE_UNALIGNED_ACCESSES
3167
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3168
- #else
3169
- strb(scratch, MemOperand(dst, 1, PostIndex));
3170
- mov(scratch, Operand(scratch, LSR, 8));
3171
- strb(scratch, MemOperand(dst, 1, PostIndex));
3172
- mov(scratch, Operand(scratch, LSR, 8));
3173
- strb(scratch, MemOperand(dst, 1, PostIndex));
3174
- mov(scratch, Operand(scratch, LSR, 8));
3175
- strb(scratch, MemOperand(dst, 1, PostIndex));
3176
- #endif
3340
+ if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3341
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3342
+ } else {
3343
+ strb(scratch, MemOperand(dst, 1, PostIndex));
3344
+ mov(scratch, Operand(scratch, LSR, 8));
3345
+ strb(scratch, MemOperand(dst, 1, PostIndex));
3346
+ mov(scratch, Operand(scratch, LSR, 8));
3347
+ strb(scratch, MemOperand(dst, 1, PostIndex));
3348
+ mov(scratch, Operand(scratch, LSR, 8));
3349
+ strb(scratch, MemOperand(dst, 1, PostIndex));
3350
+ }
3177
3351
  sub(length, length, Operand(kPointerSize));
3178
3352
  b(&word_loop);
3179
3353
 
3180
3354
  // Copy the last bytes if any left.
3181
3355
  bind(&byte_loop);
3182
- cmp(length, Operand(0));
3356
+ cmp(length, Operand::Zero());
3183
3357
  b(eq, &done);
3184
3358
  bind(&byte_loop_1);
3185
3359
  ldrb(scratch, MemOperand(src, 1, PostIndex));
@@ -3217,7 +3391,7 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
3217
3391
  // Order of the next two lines is important: zeros register
3218
3392
  // can be the same as source register.
3219
3393
  Move(scratch, source);
3220
- mov(zeros, Operand(0, RelocInfo::NONE));
3394
+ mov(zeros, Operand::Zero());
3221
3395
  // Top 16.
3222
3396
  tst(scratch, Operand(0xffff0000));
3223
3397
  add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3241,6 +3415,13 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
3241
3415
  }
3242
3416
 
3243
3417
 
3418
+ void MacroAssembler::CheckFor32DRegs(Register scratch) {
3419
+ mov(scratch, Operand(ExternalReference::cpu_features()));
3420
+ ldr(scratch, MemOperand(scratch));
3421
+ tst(scratch, Operand(1u << VFP32DREGS));
3422
+ }
3423
+
3424
+
3244
3425
  void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3245
3426
  Register first,
3246
3427
  Register second,
@@ -3279,9 +3460,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3279
3460
  if (use_eabi_hardfloat()) {
3280
3461
  // In the hard floating point calling convention, we can use
3281
3462
  // all double registers to pass doubles.
3282
- if (num_double_arguments > DoubleRegister::kNumRegisters) {
3463
+ if (num_double_arguments > DoubleRegister::NumRegisters()) {
3283
3464
  stack_passed_words +=
3284
- 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3465
+ 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3285
3466
  }
3286
3467
  } else {
3287
3468
  // In the soft floating point calling convention, every double
@@ -3322,7 +3503,8 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3322
3503
  }
3323
3504
 
3324
3505
 
3325
- void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3506
+ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
3507
+ ASSERT(CpuFeatures::IsSupported(VFP2));
3326
3508
  if (use_eabi_hardfloat()) {
3327
3509
  Move(d0, dreg);
3328
3510
  } else {
@@ -3331,8 +3513,9 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3331
3513
  }
3332
3514
 
3333
3515
 
3334
- void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3335
- DoubleRegister dreg2) {
3516
+ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
3517
+ DwVfpRegister dreg2) {
3518
+ ASSERT(CpuFeatures::IsSupported(VFP2));
3336
3519
  if (use_eabi_hardfloat()) {
3337
3520
  if (dreg2.is(d0)) {
3338
3521
  ASSERT(!dreg1.is(d1));
@@ -3349,8 +3532,9 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3349
3532
  }
3350
3533
 
3351
3534
 
3352
- void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3535
+ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
3353
3536
  Register reg) {
3537
+ ASSERT(CpuFeatures::IsSupported(VFP2));
3354
3538
  if (use_eabi_hardfloat()) {
3355
3539
  Move(d0, dreg);
3356
3540
  Move(r0, reg);
@@ -3452,7 +3636,7 @@ void MacroAssembler::CheckPageFlag(
3452
3636
  int mask,
3453
3637
  Condition cc,
3454
3638
  Label* condition_met) {
3455
- and_(scratch, object, Operand(~Page::kPageAlignmentMask));
3639
+ Bfc(scratch, object, 0, kPageSizeBits);
3456
3640
  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3457
3641
  tst(scratch, Operand(mask));
3458
3642
  b(cc, condition_met);
@@ -3601,7 +3785,7 @@ void MacroAssembler::EnsureNotWhite(
3601
3785
  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3602
3786
  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3603
3787
  // getting the length multiplied by 2.
3604
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
3788
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3605
3789
  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3606
3790
  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3607
3791
  tst(instance_type, Operand(kStringEncodingMask));
@@ -3631,8 +3815,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3631
3815
 
3632
3816
 
3633
3817
  void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3634
- DoubleRegister input_reg,
3635
- DoubleRegister temp_double_reg) {
3818
+ DwVfpRegister input_reg,
3819
+ DwVfpRegister temp_double_reg) {
3636
3820
  Label above_zero;
3637
3821
  Label done;
3638
3822
  Label in_bounds;
@@ -3642,12 +3826,12 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3642
3826
  b(gt, &above_zero);
3643
3827
 
3644
3828
  // Double value is less than zero, NaN or Inf, return 0.
3645
- mov(result_reg, Operand(0));
3829
+ mov(result_reg, Operand::Zero());
3646
3830
  b(al, &done);
3647
3831
 
3648
3832
  // Double value is >= 255, return 255.
3649
3833
  bind(&above_zero);
3650
- Vmov(temp_double_reg, 255.0);
3834
+ Vmov(temp_double_reg, 255.0, result_reg);
3651
3835
  VFPCompareAndSetFlags(input_reg, temp_double_reg);
3652
3836
  b(le, &in_bounds);
3653
3837
  mov(result_reg, Operand(255));
@@ -3655,71 +3839,99 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3655
3839
 
3656
3840
  // In 0-255 range, round and truncate.
3657
3841
  bind(&in_bounds);
3658
- Vmov(temp_double_reg, 0.5);
3659
- vadd(temp_double_reg, input_reg, temp_double_reg);
3660
- vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
3661
- vmov(result_reg, temp_double_reg.low());
3842
+ // Save FPSCR.
3843
+ vmrs(ip);
3844
+ // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3845
+ bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3846
+ vmsr(result_reg);
3847
+ vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
3848
+ vmov(result_reg, input_reg.low());
3849
+ // Restore FPSCR.
3850
+ vmsr(ip);
3662
3851
  bind(&done);
3663
3852
  }
3664
3853
 
3665
3854
 
3666
3855
  void MacroAssembler::LoadInstanceDescriptors(Register map,
3667
3856
  Register descriptors) {
3668
- ldr(descriptors,
3669
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
3670
- Label not_smi;
3671
- JumpIfNotSmi(descriptors, &not_smi);
3672
- mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
3673
- bind(&not_smi);
3857
+ ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3858
+ }
3859
+
3860
+
3861
+ void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3862
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3863
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3864
+ }
3865
+
3866
+
3867
+ void MacroAssembler::EnumLength(Register dst, Register map) {
3868
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3869
+ ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3870
+ and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3674
3871
  }
3675
3872
 
3676
3873
 
3677
3874
  void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3678
- Label next;
3679
- // Preload a couple of values used in the loop.
3680
3875
  Register empty_fixed_array_value = r6;
3681
3876
  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3682
- Register empty_descriptor_array_value = r7;
3683
- LoadRoot(empty_descriptor_array_value,
3684
- Heap::kEmptyDescriptorArrayRootIndex);
3685
- mov(r1, r0);
3686
- bind(&next);
3877
+ Label next, start;
3878
+ mov(r2, r0);
3687
3879
 
3688
- // Check that there are no elements. Register r1 contains the
3689
- // current JS object we've reached through the prototype chain.
3690
- ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
3691
- cmp(r2, empty_fixed_array_value);
3692
- b(ne, call_runtime);
3880
+ // Check if the enum length field is properly initialized, indicating that
3881
+ // there is an enum cache.
3882
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3693
3883
 
3694
- // Check that instance descriptors are not empty so that we can
3695
- // check for an enum cache. Leave the map in r2 for the subsequent
3696
- // prototype load.
3697
- ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
3698
- ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
3699
- JumpIfSmi(r3, call_runtime);
3884
+ EnumLength(r3, r1);
3885
+ cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
3886
+ b(eq, call_runtime);
3700
3887
 
3701
- // Check that there is an enum cache in the non-empty instance
3702
- // descriptors (r3). This is the case if the next enumeration
3703
- // index field does not contain a smi.
3704
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
3705
- JumpIfSmi(r3, call_runtime);
3888
+ jmp(&start);
3889
+
3890
+ bind(&next);
3891
+ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3706
3892
 
3707
3893
  // For all objects but the receiver, check that the cache is empty.
3708
- Label check_prototype;
3709
- cmp(r1, r0);
3710
- b(eq, &check_prototype);
3711
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
3712
- cmp(r3, empty_fixed_array_value);
3894
+ EnumLength(r3, r1);
3895
+ cmp(r3, Operand(Smi::FromInt(0)));
3896
+ b(ne, call_runtime);
3897
+
3898
+ bind(&start);
3899
+
3900
+ // Check that there are no elements. Register r2 contains the current JS
3901
+ // object we've reached through the prototype chain.
3902
+ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3903
+ cmp(r2, empty_fixed_array_value);
3713
3904
  b(ne, call_runtime);
3714
3905
 
3715
- // Load the prototype from the map and loop if non-null.
3716
- bind(&check_prototype);
3717
- ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
3718
- cmp(r1, null_value);
3906
+ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3907
+ cmp(r2, null_value);
3719
3908
  b(ne, &next);
3720
3909
  }
3721
3910
 
3722
3911
 
3912
+ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
3913
+ Register receiver_reg,
3914
+ Register scratch_reg) {
3915
+ Label no_info_available;
3916
+ ExternalReference new_space_start =
3917
+ ExternalReference::new_space_start(isolate());
3918
+ ExternalReference new_space_allocation_top =
3919
+ ExternalReference::new_space_allocation_top_address(isolate());
3920
+ add(scratch_reg, receiver_reg,
3921
+ Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
3922
+ cmp(scratch_reg, Operand(new_space_start));
3923
+ b(lt, &no_info_available);
3924
+ mov(ip, Operand(new_space_allocation_top));
3925
+ ldr(ip, MemOperand(ip));
3926
+ cmp(scratch_reg, ip);
3927
+ b(gt, &no_info_available);
3928
+ ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
3929
+ cmp(scratch_reg,
3930
+ Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
3931
+ bind(&no_info_available);
3932
+ }
3933
+
3934
+
3723
3935
  #ifdef DEBUG
3724
3936
  bool AreAliased(Register reg1,
3725
3937
  Register reg2,
@@ -3746,7 +3958,6 @@ bool AreAliased(Register reg1,
3746
3958
 
3747
3959
  CodePatcher::CodePatcher(byte* address, int instructions)
3748
3960
  : address_(address),
3749
- instructions_(instructions),
3750
3961
  size_(instructions * Assembler::kInstrSize),
3751
3962
  masm_(NULL, address, size_ + Assembler::kGap) {
3752
3963
  // Create a new macro assembler pointing to the address of the code to patch.