libv8 3.11.8.17 → 3.16.14.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.travis.yml +1 -2
- data/Gemfile +1 -1
- data/Rakefile +6 -7
- data/lib/libv8/version.rb +1 -1
- data/vendor/v8/.gitignore +24 -3
- data/vendor/v8/AUTHORS +7 -0
- data/vendor/v8/ChangeLog +839 -0
- data/vendor/v8/DEPS +1 -1
- data/vendor/v8/Makefile.android +92 -0
- data/vendor/v8/OWNERS +11 -0
- data/vendor/v8/PRESUBMIT.py +71 -0
- data/vendor/v8/SConstruct +34 -39
- data/vendor/v8/build/android.gypi +56 -37
- data/vendor/v8/build/common.gypi +112 -30
- data/vendor/v8/build/gyp_v8 +1 -1
- data/vendor/v8/build/standalone.gypi +15 -11
- data/vendor/v8/include/v8-debug.h +9 -1
- data/vendor/v8/include/v8-preparser.h +4 -3
- data/vendor/v8/include/v8-profiler.h +25 -25
- data/vendor/v8/include/v8-testing.h +4 -3
- data/vendor/v8/include/v8.h +994 -540
- data/vendor/v8/preparser/preparser-process.cc +3 -3
- data/vendor/v8/samples/lineprocessor.cc +20 -27
- data/vendor/v8/samples/process.cc +18 -14
- data/vendor/v8/samples/shell.cc +16 -15
- data/vendor/v8/src/SConscript +15 -14
- data/vendor/v8/src/accessors.cc +169 -77
- data/vendor/v8/src/accessors.h +4 -0
- data/vendor/v8/src/allocation-inl.h +2 -2
- data/vendor/v8/src/allocation.h +7 -7
- data/vendor/v8/src/api.cc +810 -497
- data/vendor/v8/src/api.h +85 -60
- data/vendor/v8/src/arm/assembler-arm-inl.h +179 -22
- data/vendor/v8/src/arm/assembler-arm.cc +633 -264
- data/vendor/v8/src/arm/assembler-arm.h +264 -197
- data/vendor/v8/src/arm/builtins-arm.cc +117 -27
- data/vendor/v8/src/arm/code-stubs-arm.cc +1241 -700
- data/vendor/v8/src/arm/code-stubs-arm.h +35 -138
- data/vendor/v8/src/arm/codegen-arm.cc +285 -16
- data/vendor/v8/src/arm/codegen-arm.h +22 -0
- data/vendor/v8/src/arm/constants-arm.cc +5 -3
- data/vendor/v8/src/arm/constants-arm.h +24 -11
- data/vendor/v8/src/arm/debug-arm.cc +3 -3
- data/vendor/v8/src/arm/deoptimizer-arm.cc +382 -92
- data/vendor/v8/src/arm/disasm-arm.cc +61 -12
- data/vendor/v8/src/arm/frames-arm.h +0 -14
- data/vendor/v8/src/arm/full-codegen-arm.cc +332 -304
- data/vendor/v8/src/arm/ic-arm.cc +180 -259
- data/vendor/v8/src/arm/lithium-arm.cc +364 -316
- data/vendor/v8/src/arm/lithium-arm.h +512 -275
- data/vendor/v8/src/arm/lithium-codegen-arm.cc +1768 -809
- data/vendor/v8/src/arm/lithium-codegen-arm.h +97 -35
- data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +12 -5
- data/vendor/v8/src/arm/macro-assembler-arm.cc +439 -228
- data/vendor/v8/src/arm/macro-assembler-arm.h +116 -70
- data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +54 -44
- data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +3 -10
- data/vendor/v8/src/arm/simulator-arm.cc +272 -238
- data/vendor/v8/src/arm/simulator-arm.h +38 -8
- data/vendor/v8/src/arm/stub-cache-arm.cc +522 -895
- data/vendor/v8/src/array.js +101 -70
- data/vendor/v8/src/assembler.cc +270 -19
- data/vendor/v8/src/assembler.h +110 -15
- data/vendor/v8/src/ast.cc +79 -69
- data/vendor/v8/src/ast.h +255 -301
- data/vendor/v8/src/atomicops.h +7 -1
- data/vendor/v8/src/atomicops_internals_tsan.h +335 -0
- data/vendor/v8/src/bootstrapper.cc +481 -418
- data/vendor/v8/src/bootstrapper.h +4 -4
- data/vendor/v8/src/builtins.cc +498 -311
- data/vendor/v8/src/builtins.h +75 -47
- data/vendor/v8/src/checks.cc +2 -1
- data/vendor/v8/src/checks.h +8 -0
- data/vendor/v8/src/code-stubs-hydrogen.cc +253 -0
- data/vendor/v8/src/code-stubs.cc +249 -84
- data/vendor/v8/src/code-stubs.h +501 -169
- data/vendor/v8/src/codegen.cc +36 -18
- data/vendor/v8/src/codegen.h +25 -3
- data/vendor/v8/src/collection.js +54 -17
- data/vendor/v8/src/compilation-cache.cc +24 -16
- data/vendor/v8/src/compilation-cache.h +15 -6
- data/vendor/v8/src/compiler.cc +497 -195
- data/vendor/v8/src/compiler.h +246 -38
- data/vendor/v8/src/contexts.cc +64 -24
- data/vendor/v8/src/contexts.h +60 -29
- data/vendor/v8/src/conversions-inl.h +24 -14
- data/vendor/v8/src/conversions.h +7 -4
- data/vendor/v8/src/counters.cc +21 -12
- data/vendor/v8/src/counters.h +44 -16
- data/vendor/v8/src/cpu-profiler.h +1 -1
- data/vendor/v8/src/d8-debug.cc +2 -2
- data/vendor/v8/src/d8-readline.cc +13 -2
- data/vendor/v8/src/d8.cc +681 -273
- data/vendor/v8/src/d8.gyp +4 -4
- data/vendor/v8/src/d8.h +38 -18
- data/vendor/v8/src/d8.js +0 -617
- data/vendor/v8/src/data-flow.h +55 -0
- data/vendor/v8/src/date.js +1 -42
- data/vendor/v8/src/dateparser-inl.h +5 -1
- data/vendor/v8/src/debug-agent.cc +10 -15
- data/vendor/v8/src/debug-debugger.js +147 -149
- data/vendor/v8/src/debug.cc +323 -164
- data/vendor/v8/src/debug.h +26 -14
- data/vendor/v8/src/deoptimizer.cc +765 -290
- data/vendor/v8/src/deoptimizer.h +130 -28
- data/vendor/v8/src/disassembler.cc +10 -4
- data/vendor/v8/src/elements-kind.cc +7 -2
- data/vendor/v8/src/elements-kind.h +19 -0
- data/vendor/v8/src/elements.cc +607 -285
- data/vendor/v8/src/elements.h +36 -13
- data/vendor/v8/src/execution.cc +52 -31
- data/vendor/v8/src/execution.h +4 -4
- data/vendor/v8/src/extensions/externalize-string-extension.cc +5 -4
- data/vendor/v8/src/extensions/gc-extension.cc +5 -1
- data/vendor/v8/src/extensions/statistics-extension.cc +153 -0
- data/vendor/v8/src/{inspector.h → extensions/statistics-extension.h} +12 -23
- data/vendor/v8/src/factory.cc +101 -134
- data/vendor/v8/src/factory.h +36 -31
- data/vendor/v8/src/flag-definitions.h +102 -25
- data/vendor/v8/src/flags.cc +9 -5
- data/vendor/v8/src/frames-inl.h +10 -0
- data/vendor/v8/src/frames.cc +116 -26
- data/vendor/v8/src/frames.h +96 -12
- data/vendor/v8/src/full-codegen.cc +219 -74
- data/vendor/v8/src/full-codegen.h +63 -21
- data/vendor/v8/src/func-name-inferrer.cc +8 -7
- data/vendor/v8/src/func-name-inferrer.h +5 -3
- data/vendor/v8/src/gdb-jit.cc +71 -57
- data/vendor/v8/src/global-handles.cc +230 -101
- data/vendor/v8/src/global-handles.h +26 -27
- data/vendor/v8/src/globals.h +17 -19
- data/vendor/v8/src/handles-inl.h +59 -12
- data/vendor/v8/src/handles.cc +180 -200
- data/vendor/v8/src/handles.h +80 -11
- data/vendor/v8/src/hashmap.h +60 -40
- data/vendor/v8/src/heap-inl.h +107 -45
- data/vendor/v8/src/heap-profiler.cc +38 -19
- data/vendor/v8/src/heap-profiler.h +24 -14
- data/vendor/v8/src/heap.cc +1123 -738
- data/vendor/v8/src/heap.h +385 -146
- data/vendor/v8/src/hydrogen-instructions.cc +700 -217
- data/vendor/v8/src/hydrogen-instructions.h +1158 -472
- data/vendor/v8/src/hydrogen.cc +3319 -1662
- data/vendor/v8/src/hydrogen.h +411 -170
- data/vendor/v8/src/ia32/assembler-ia32-inl.h +46 -16
- data/vendor/v8/src/ia32/assembler-ia32.cc +131 -61
- data/vendor/v8/src/ia32/assembler-ia32.h +115 -57
- data/vendor/v8/src/ia32/builtins-ia32.cc +99 -5
- data/vendor/v8/src/ia32/code-stubs-ia32.cc +787 -495
- data/vendor/v8/src/ia32/code-stubs-ia32.h +10 -100
- data/vendor/v8/src/ia32/codegen-ia32.cc +227 -23
- data/vendor/v8/src/ia32/codegen-ia32.h +14 -0
- data/vendor/v8/src/ia32/deoptimizer-ia32.cc +428 -87
- data/vendor/v8/src/ia32/disasm-ia32.cc +28 -1
- data/vendor/v8/src/ia32/frames-ia32.h +6 -16
- data/vendor/v8/src/ia32/full-codegen-ia32.cc +280 -272
- data/vendor/v8/src/ia32/ic-ia32.cc +150 -250
- data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +1600 -517
- data/vendor/v8/src/ia32/lithium-codegen-ia32.h +90 -24
- data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +10 -6
- data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.h +2 -2
- data/vendor/v8/src/ia32/lithium-ia32.cc +405 -302
- data/vendor/v8/src/ia32/lithium-ia32.h +526 -271
- data/vendor/v8/src/ia32/macro-assembler-ia32.cc +378 -119
- data/vendor/v8/src/ia32/macro-assembler-ia32.h +62 -28
- data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +43 -30
- data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +2 -10
- data/vendor/v8/src/ia32/stub-cache-ia32.cc +492 -678
- data/vendor/v8/src/ic-inl.h +9 -4
- data/vendor/v8/src/ic.cc +836 -923
- data/vendor/v8/src/ic.h +228 -247
- data/vendor/v8/src/incremental-marking-inl.h +26 -30
- data/vendor/v8/src/incremental-marking.cc +276 -248
- data/vendor/v8/src/incremental-marking.h +29 -37
- data/vendor/v8/src/interface.cc +34 -25
- data/vendor/v8/src/interface.h +69 -25
- data/vendor/v8/src/interpreter-irregexp.cc +2 -2
- data/vendor/v8/src/isolate.cc +382 -76
- data/vendor/v8/src/isolate.h +109 -56
- data/vendor/v8/src/json-parser.h +217 -104
- data/vendor/v8/src/json-stringifier.h +745 -0
- data/vendor/v8/src/json.js +10 -132
- data/vendor/v8/src/jsregexp-inl.h +106 -0
- data/vendor/v8/src/jsregexp.cc +517 -285
- data/vendor/v8/src/jsregexp.h +145 -117
- data/vendor/v8/src/list-inl.h +35 -22
- data/vendor/v8/src/list.h +46 -19
- data/vendor/v8/src/lithium-allocator-inl.h +22 -2
- data/vendor/v8/src/lithium-allocator.cc +85 -70
- data/vendor/v8/src/lithium-allocator.h +21 -39
- data/vendor/v8/src/lithium.cc +259 -5
- data/vendor/v8/src/lithium.h +131 -32
- data/vendor/v8/src/liveedit-debugger.js +52 -3
- data/vendor/v8/src/liveedit.cc +393 -113
- data/vendor/v8/src/liveedit.h +7 -3
- data/vendor/v8/src/log-utils.cc +4 -2
- data/vendor/v8/src/log.cc +170 -140
- data/vendor/v8/src/log.h +62 -11
- data/vendor/v8/src/macro-assembler.h +17 -0
- data/vendor/v8/src/macros.py +2 -0
- data/vendor/v8/src/mark-compact-inl.h +3 -23
- data/vendor/v8/src/mark-compact.cc +801 -830
- data/vendor/v8/src/mark-compact.h +154 -47
- data/vendor/v8/src/marking-thread.cc +85 -0
- data/vendor/v8/src/{inspector.cc → marking-thread.h} +32 -24
- data/vendor/v8/src/math.js +12 -18
- data/vendor/v8/src/messages.cc +18 -8
- data/vendor/v8/src/messages.js +314 -261
- data/vendor/v8/src/mips/assembler-mips-inl.h +58 -6
- data/vendor/v8/src/mips/assembler-mips.cc +92 -75
- data/vendor/v8/src/mips/assembler-mips.h +54 -60
- data/vendor/v8/src/mips/builtins-mips.cc +116 -17
- data/vendor/v8/src/mips/code-stubs-mips.cc +919 -556
- data/vendor/v8/src/mips/code-stubs-mips.h +22 -131
- data/vendor/v8/src/mips/codegen-mips.cc +281 -6
- data/vendor/v8/src/mips/codegen-mips.h +22 -0
- data/vendor/v8/src/mips/constants-mips.cc +2 -0
- data/vendor/v8/src/mips/constants-mips.h +12 -2
- data/vendor/v8/src/mips/deoptimizer-mips.cc +286 -50
- data/vendor/v8/src/mips/disasm-mips.cc +13 -0
- data/vendor/v8/src/mips/full-codegen-mips.cc +297 -284
- data/vendor/v8/src/mips/ic-mips.cc +182 -263
- data/vendor/v8/src/mips/lithium-codegen-mips.cc +1208 -556
- data/vendor/v8/src/mips/lithium-codegen-mips.h +72 -19
- data/vendor/v8/src/mips/lithium-gap-resolver-mips.cc +9 -2
- data/vendor/v8/src/mips/lithium-mips.cc +290 -302
- data/vendor/v8/src/mips/lithium-mips.h +463 -266
- data/vendor/v8/src/mips/macro-assembler-mips.cc +208 -115
- data/vendor/v8/src/mips/macro-assembler-mips.h +67 -24
- data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +40 -25
- data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +3 -9
- data/vendor/v8/src/mips/simulator-mips.cc +112 -40
- data/vendor/v8/src/mips/simulator-mips.h +5 -0
- data/vendor/v8/src/mips/stub-cache-mips.cc +502 -884
- data/vendor/v8/src/mirror-debugger.js +157 -30
- data/vendor/v8/src/mksnapshot.cc +88 -14
- data/vendor/v8/src/object-observe.js +235 -0
- data/vendor/v8/src/objects-debug.cc +178 -176
- data/vendor/v8/src/objects-inl.h +1333 -486
- data/vendor/v8/src/objects-printer.cc +125 -43
- data/vendor/v8/src/objects-visiting-inl.h +578 -6
- data/vendor/v8/src/objects-visiting.cc +2 -2
- data/vendor/v8/src/objects-visiting.h +172 -79
- data/vendor/v8/src/objects.cc +3533 -2885
- data/vendor/v8/src/objects.h +1352 -1131
- data/vendor/v8/src/optimizing-compiler-thread.cc +152 -0
- data/vendor/v8/src/optimizing-compiler-thread.h +111 -0
- data/vendor/v8/src/parser.cc +390 -500
- data/vendor/v8/src/parser.h +45 -33
- data/vendor/v8/src/platform-cygwin.cc +10 -21
- data/vendor/v8/src/platform-freebsd.cc +36 -41
- data/vendor/v8/src/platform-linux.cc +160 -124
- data/vendor/v8/src/platform-macos.cc +30 -27
- data/vendor/v8/src/platform-nullos.cc +17 -1
- data/vendor/v8/src/platform-openbsd.cc +19 -50
- data/vendor/v8/src/platform-posix.cc +14 -0
- data/vendor/v8/src/platform-solaris.cc +20 -53
- data/vendor/v8/src/platform-win32.cc +49 -26
- data/vendor/v8/src/platform.h +40 -1
- data/vendor/v8/src/preparser.cc +8 -5
- data/vendor/v8/src/preparser.h +2 -2
- data/vendor/v8/src/prettyprinter.cc +16 -0
- data/vendor/v8/src/prettyprinter.h +2 -0
- data/vendor/v8/src/profile-generator-inl.h +1 -0
- data/vendor/v8/src/profile-generator.cc +209 -147
- data/vendor/v8/src/profile-generator.h +15 -12
- data/vendor/v8/src/property-details.h +46 -31
- data/vendor/v8/src/property.cc +27 -46
- data/vendor/v8/src/property.h +163 -83
- data/vendor/v8/src/proxy.js +7 -2
- data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +4 -13
- data/vendor/v8/src/regexp-macro-assembler-irregexp.h +1 -2
- data/vendor/v8/src/regexp-macro-assembler-tracer.cc +1 -11
- data/vendor/v8/src/regexp-macro-assembler-tracer.h +0 -1
- data/vendor/v8/src/regexp-macro-assembler.cc +31 -14
- data/vendor/v8/src/regexp-macro-assembler.h +14 -11
- data/vendor/v8/src/regexp-stack.cc +1 -0
- data/vendor/v8/src/regexp.js +9 -8
- data/vendor/v8/src/rewriter.cc +18 -7
- data/vendor/v8/src/runtime-profiler.cc +52 -43
- data/vendor/v8/src/runtime-profiler.h +0 -25
- data/vendor/v8/src/runtime.cc +2006 -2023
- data/vendor/v8/src/runtime.h +56 -49
- data/vendor/v8/src/safepoint-table.cc +12 -18
- data/vendor/v8/src/safepoint-table.h +11 -8
- data/vendor/v8/src/scanner.cc +1 -0
- data/vendor/v8/src/scanner.h +4 -10
- data/vendor/v8/src/scopeinfo.cc +35 -9
- data/vendor/v8/src/scopeinfo.h +64 -3
- data/vendor/v8/src/scopes.cc +251 -156
- data/vendor/v8/src/scopes.h +61 -27
- data/vendor/v8/src/serialize.cc +348 -396
- data/vendor/v8/src/serialize.h +125 -114
- data/vendor/v8/src/small-pointer-list.h +11 -11
- data/vendor/v8/src/{smart-array-pointer.h → smart-pointers.h} +64 -15
- data/vendor/v8/src/snapshot-common.cc +64 -15
- data/vendor/v8/src/snapshot-empty.cc +7 -1
- data/vendor/v8/src/snapshot.h +9 -2
- data/vendor/v8/src/spaces-inl.h +17 -0
- data/vendor/v8/src/spaces.cc +477 -183
- data/vendor/v8/src/spaces.h +238 -58
- data/vendor/v8/src/splay-tree-inl.h +8 -7
- data/vendor/v8/src/splay-tree.h +24 -10
- data/vendor/v8/src/store-buffer.cc +12 -5
- data/vendor/v8/src/store-buffer.h +2 -4
- data/vendor/v8/src/string-search.h +22 -6
- data/vendor/v8/src/string-stream.cc +11 -8
- data/vendor/v8/src/string.js +47 -15
- data/vendor/v8/src/stub-cache.cc +461 -224
- data/vendor/v8/src/stub-cache.h +164 -102
- data/vendor/v8/src/sweeper-thread.cc +105 -0
- data/vendor/v8/src/sweeper-thread.h +81 -0
- data/vendor/v8/src/token.h +1 -0
- data/vendor/v8/src/transitions-inl.h +220 -0
- data/vendor/v8/src/transitions.cc +160 -0
- data/vendor/v8/src/transitions.h +207 -0
- data/vendor/v8/src/type-info.cc +182 -181
- data/vendor/v8/src/type-info.h +31 -19
- data/vendor/v8/src/unicode-inl.h +62 -106
- data/vendor/v8/src/unicode.cc +57 -67
- data/vendor/v8/src/unicode.h +45 -91
- data/vendor/v8/src/uri.js +57 -29
- data/vendor/v8/src/utils.h +105 -5
- data/vendor/v8/src/v8-counters.cc +54 -11
- data/vendor/v8/src/v8-counters.h +134 -19
- data/vendor/v8/src/v8.cc +29 -29
- data/vendor/v8/src/v8.h +1 -0
- data/vendor/v8/src/v8conversions.cc +26 -22
- data/vendor/v8/src/v8globals.h +56 -43
- data/vendor/v8/src/v8natives.js +83 -30
- data/vendor/v8/src/v8threads.cc +42 -21
- data/vendor/v8/src/v8threads.h +4 -1
- data/vendor/v8/src/v8utils.cc +9 -93
- data/vendor/v8/src/v8utils.h +37 -33
- data/vendor/v8/src/variables.cc +6 -3
- data/vendor/v8/src/variables.h +6 -13
- data/vendor/v8/src/version.cc +2 -2
- data/vendor/v8/src/vm-state-inl.h +11 -0
- data/vendor/v8/src/x64/assembler-x64-inl.h +39 -8
- data/vendor/v8/src/x64/assembler-x64.cc +78 -64
- data/vendor/v8/src/x64/assembler-x64.h +38 -33
- data/vendor/v8/src/x64/builtins-x64.cc +105 -7
- data/vendor/v8/src/x64/code-stubs-x64.cc +790 -413
- data/vendor/v8/src/x64/code-stubs-x64.h +10 -106
- data/vendor/v8/src/x64/codegen-x64.cc +210 -8
- data/vendor/v8/src/x64/codegen-x64.h +20 -1
- data/vendor/v8/src/x64/deoptimizer-x64.cc +336 -75
- data/vendor/v8/src/x64/disasm-x64.cc +15 -0
- data/vendor/v8/src/x64/frames-x64.h +0 -14
- data/vendor/v8/src/x64/full-codegen-x64.cc +293 -270
- data/vendor/v8/src/x64/ic-x64.cc +153 -251
- data/vendor/v8/src/x64/lithium-codegen-x64.cc +1379 -531
- data/vendor/v8/src/x64/lithium-codegen-x64.h +67 -23
- data/vendor/v8/src/x64/lithium-gap-resolver-x64.cc +2 -2
- data/vendor/v8/src/x64/lithium-x64.cc +349 -289
- data/vendor/v8/src/x64/lithium-x64.h +460 -250
- data/vendor/v8/src/x64/macro-assembler-x64.cc +350 -177
- data/vendor/v8/src/x64/macro-assembler-x64.h +67 -49
- data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +46 -33
- data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +2 -3
- data/vendor/v8/src/x64/stub-cache-x64.cc +484 -653
- data/vendor/v8/src/zone-inl.h +9 -27
- data/vendor/v8/src/zone.cc +5 -5
- data/vendor/v8/src/zone.h +53 -27
- data/vendor/v8/test/benchmarks/testcfg.py +5 -0
- data/vendor/v8/test/cctest/cctest.cc +4 -0
- data/vendor/v8/test/cctest/cctest.gyp +3 -1
- data/vendor/v8/test/cctest/cctest.h +57 -9
- data/vendor/v8/test/cctest/cctest.status +15 -15
- data/vendor/v8/test/cctest/test-accessors.cc +26 -0
- data/vendor/v8/test/cctest/test-alloc.cc +22 -30
- data/vendor/v8/test/cctest/test-api.cc +1943 -314
- data/vendor/v8/test/cctest/test-assembler-arm.cc +133 -13
- data/vendor/v8/test/cctest/test-assembler-ia32.cc +1 -1
- data/vendor/v8/test/cctest/test-assembler-mips.cc +12 -0
- data/vendor/v8/test/cctest/test-ast.cc +4 -2
- data/vendor/v8/test/cctest/test-compiler.cc +61 -29
- data/vendor/v8/test/cctest/test-dataflow.cc +2 -2
- data/vendor/v8/test/cctest/test-debug.cc +212 -33
- data/vendor/v8/test/cctest/test-decls.cc +257 -11
- data/vendor/v8/test/cctest/test-dictionary.cc +24 -10
- data/vendor/v8/test/cctest/test-disasm-arm.cc +118 -1
- data/vendor/v8/test/cctest/test-disasm-ia32.cc +3 -2
- data/vendor/v8/test/cctest/test-flags.cc +14 -1
- data/vendor/v8/test/cctest/test-func-name-inference.cc +7 -4
- data/vendor/v8/test/cctest/test-global-object.cc +51 -0
- data/vendor/v8/test/cctest/test-hashing.cc +32 -23
- data/vendor/v8/test/cctest/test-heap-profiler.cc +131 -77
- data/vendor/v8/test/cctest/test-heap.cc +1084 -143
- data/vendor/v8/test/cctest/test-list.cc +1 -1
- data/vendor/v8/test/cctest/test-liveedit.cc +3 -2
- data/vendor/v8/test/cctest/test-lockers.cc +12 -13
- data/vendor/v8/test/cctest/test-log.cc +10 -8
- data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +2 -2
- data/vendor/v8/test/cctest/test-mark-compact.cc +44 -22
- data/vendor/v8/test/cctest/test-object-observe.cc +434 -0
- data/vendor/v8/test/cctest/test-parsing.cc +86 -39
- data/vendor/v8/test/cctest/test-platform-linux.cc +6 -0
- data/vendor/v8/test/cctest/test-platform-win32.cc +7 -0
- data/vendor/v8/test/cctest/test-random.cc +5 -4
- data/vendor/v8/test/cctest/test-regexp.cc +137 -101
- data/vendor/v8/test/cctest/test-serialize.cc +150 -230
- data/vendor/v8/test/cctest/test-sockets.cc +1 -1
- data/vendor/v8/test/cctest/test-spaces.cc +139 -0
- data/vendor/v8/test/cctest/test-strings.cc +736 -74
- data/vendor/v8/test/cctest/test-thread-termination.cc +10 -11
- data/vendor/v8/test/cctest/test-threads.cc +4 -4
- data/vendor/v8/test/cctest/test-utils.cc +16 -0
- data/vendor/v8/test/cctest/test-weakmaps.cc +7 -3
- data/vendor/v8/test/cctest/testcfg.py +64 -5
- data/vendor/v8/test/es5conform/testcfg.py +5 -0
- data/vendor/v8/test/message/message.status +1 -1
- data/vendor/v8/test/message/overwritten-builtins.out +3 -0
- data/vendor/v8/test/message/testcfg.py +89 -8
- data/vendor/v8/test/message/try-catch-finally-no-message.out +26 -26
- data/vendor/v8/test/mjsunit/accessor-map-sharing.js +18 -2
- data/vendor/v8/test/mjsunit/allocation-site-info.js +126 -0
- data/vendor/v8/test/mjsunit/array-bounds-check-removal.js +62 -1
- data/vendor/v8/test/mjsunit/array-iteration.js +1 -1
- data/vendor/v8/test/mjsunit/array-literal-transitions.js +2 -0
- data/vendor/v8/test/mjsunit/array-natives-elements.js +317 -0
- data/vendor/v8/test/mjsunit/array-reduce.js +8 -8
- data/vendor/v8/test/mjsunit/array-slice.js +12 -0
- data/vendor/v8/test/mjsunit/array-store-and-grow.js +4 -1
- data/vendor/v8/test/mjsunit/assert-opt-and-deopt.js +1 -1
- data/vendor/v8/test/mjsunit/bugs/bug-2337.js +53 -0
- data/vendor/v8/test/mjsunit/compare-known-objects-slow.js +69 -0
- data/vendor/v8/test/mjsunit/compiler/alloc-object-huge.js +3 -1
- data/vendor/v8/test/mjsunit/compiler/inline-accessors.js +368 -0
- data/vendor/v8/test/mjsunit/compiler/inline-arguments.js +87 -1
- data/vendor/v8/test/mjsunit/compiler/inline-closures.js +49 -0
- data/vendor/v8/test/mjsunit/compiler/inline-construct.js +55 -43
- data/vendor/v8/test/mjsunit/compiler/inline-literals.js +39 -0
- data/vendor/v8/test/mjsunit/compiler/multiply-add.js +69 -0
- data/vendor/v8/test/mjsunit/compiler/optimized-closures.js +57 -0
- data/vendor/v8/test/mjsunit/compiler/parallel-proto-change.js +44 -0
- data/vendor/v8/test/mjsunit/compiler/property-static.js +69 -0
- data/vendor/v8/test/mjsunit/compiler/proto-chain-constant.js +55 -0
- data/vendor/v8/test/mjsunit/compiler/proto-chain-load.js +44 -0
- data/vendor/v8/test/mjsunit/compiler/regress-gvn.js +3 -2
- data/vendor/v8/test/mjsunit/compiler/regress-or.js +6 -2
- data/vendor/v8/test/mjsunit/compiler/rotate.js +224 -0
- data/vendor/v8/test/mjsunit/compiler/uint32.js +173 -0
- data/vendor/v8/test/mjsunit/count-based-osr.js +2 -1
- data/vendor/v8/test/mjsunit/d8-os.js +3 -3
- data/vendor/v8/test/mjsunit/date-parse.js +3 -0
- data/vendor/v8/test/mjsunit/date.js +22 -0
- data/vendor/v8/test/mjsunit/debug-break-inline.js +1 -0
- data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js +22 -12
- data/vendor/v8/test/mjsunit/debug-evaluate-locals-optimized.js +21 -10
- data/vendor/v8/test/mjsunit/debug-liveedit-compile-error.js +60 -0
- data/vendor/v8/test/mjsunit/debug-liveedit-double-call.js +142 -0
- data/vendor/v8/test/mjsunit/debug-liveedit-literals.js +94 -0
- data/vendor/v8/test/mjsunit/debug-liveedit-restart-frame.js +153 -0
- data/vendor/v8/test/mjsunit/debug-multiple-breakpoints.js +1 -1
- data/vendor/v8/test/mjsunit/debug-script-breakpoints-closure.js +67 -0
- data/vendor/v8/test/mjsunit/debug-script-breakpoints-nested.js +82 -0
- data/vendor/v8/test/mjsunit/debug-script.js +4 -2
- data/vendor/v8/test/mjsunit/debug-set-variable-value.js +308 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part1.js +190 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part2.js +83 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part3.js +80 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part4.js +80 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part5.js +77 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part6.js +79 -0
- data/vendor/v8/test/mjsunit/debug-stepout-scope-part7.js +79 -0
- data/vendor/v8/test/mjsunit/{debug-stepout-scope.js → debug-stepout-scope-part8.js} +0 -189
- data/vendor/v8/test/mjsunit/delete-non-configurable.js +74 -0
- data/vendor/v8/test/mjsunit/deopt-minus-zero.js +56 -0
- data/vendor/v8/test/mjsunit/elements-kind.js +6 -4
- data/vendor/v8/test/mjsunit/elements-length-no-holey.js +33 -0
- data/vendor/v8/test/mjsunit/elements-transition-hoisting.js +46 -19
- data/vendor/v8/test/mjsunit/error-accessors.js +54 -0
- data/vendor/v8/test/mjsunit/error-constructors.js +1 -14
- data/vendor/v8/test/mjsunit/error-tostring.js +8 -0
- data/vendor/v8/test/mjsunit/eval-stack-trace.js +204 -0
- data/vendor/v8/test/mjsunit/external-array.js +364 -1
- data/vendor/v8/test/mjsunit/fast-array-length.js +37 -0
- data/vendor/v8/test/mjsunit/fast-non-keyed.js +113 -0
- data/vendor/v8/test/mjsunit/fast-prototype.js +117 -0
- data/vendor/v8/test/mjsunit/function-call.js +14 -18
- data/vendor/v8/test/mjsunit/fuzz-natives-part1.js +230 -0
- data/vendor/v8/test/mjsunit/fuzz-natives-part2.js +229 -0
- data/vendor/v8/test/mjsunit/fuzz-natives-part3.js +229 -0
- data/vendor/v8/test/mjsunit/{fuzz-natives.js → fuzz-natives-part4.js} +12 -2
- data/vendor/v8/test/mjsunit/generated-transition-stub.js +218 -0
- data/vendor/v8/test/mjsunit/greedy.js +1 -1
- data/vendor/v8/test/mjsunit/harmony/block-conflicts.js +2 -1
- data/vendor/v8/test/mjsunit/harmony/block-let-crankshaft.js +1 -1
- data/vendor/v8/test/mjsunit/harmony/collections.js +69 -11
- data/vendor/v8/test/mjsunit/harmony/debug-blockscopes.js +2 -2
- data/vendor/v8/test/mjsunit/harmony/module-linking.js +180 -3
- data/vendor/v8/test/mjsunit/harmony/module-parsing.js +31 -0
- data/vendor/v8/test/mjsunit/harmony/module-recompile.js +87 -0
- data/vendor/v8/test/mjsunit/harmony/module-resolution.js +15 -2
- data/vendor/v8/test/mjsunit/harmony/object-observe.js +1056 -0
- data/vendor/v8/test/mjsunit/harmony/proxies-json.js +178 -0
- data/vendor/v8/test/mjsunit/harmony/proxies.js +25 -10
- data/vendor/v8/test/mjsunit/json-parser-recursive.js +33 -0
- data/vendor/v8/test/mjsunit/json-stringify-recursive.js +52 -0
- data/vendor/v8/test/mjsunit/json.js +38 -2
- data/vendor/v8/test/mjsunit/json2.js +153 -0
- data/vendor/v8/test/mjsunit/limit-locals.js +5 -4
- data/vendor/v8/test/mjsunit/manual-parallel-recompile.js +79 -0
- data/vendor/v8/test/mjsunit/math-exp-precision.js +64 -0
- data/vendor/v8/test/mjsunit/math-floor-negative.js +59 -0
- data/vendor/v8/test/mjsunit/math-floor-of-div-minus-zero.js +41 -0
- data/vendor/v8/test/mjsunit/math-floor-of-div-nosudiv.js +288 -0
- data/vendor/v8/test/mjsunit/math-floor-of-div.js +81 -9
- data/vendor/v8/test/mjsunit/{math-floor.js → math-floor-part1.js} +1 -72
- data/vendor/v8/test/mjsunit/math-floor-part2.js +76 -0
- data/vendor/v8/test/mjsunit/math-floor-part3.js +78 -0
- data/vendor/v8/test/mjsunit/math-floor-part4.js +76 -0
- data/vendor/v8/test/mjsunit/mirror-object.js +43 -9
- data/vendor/v8/test/mjsunit/mjsunit.js +1 -1
- data/vendor/v8/test/mjsunit/mjsunit.status +52 -27
- data/vendor/v8/test/mjsunit/mul-exhaustive-part1.js +491 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part10.js +470 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part2.js +525 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part3.js +532 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part4.js +509 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part5.js +505 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part6.js +554 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part7.js +497 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part8.js +526 -0
- data/vendor/v8/test/mjsunit/mul-exhaustive-part9.js +533 -0
- data/vendor/v8/test/mjsunit/new-function.js +34 -0
- data/vendor/v8/test/mjsunit/numops-fuzz-part1.js +1172 -0
- data/vendor/v8/test/mjsunit/numops-fuzz-part2.js +1178 -0
- data/vendor/v8/test/mjsunit/numops-fuzz-part3.js +1178 -0
- data/vendor/v8/test/mjsunit/numops-fuzz-part4.js +1177 -0
- data/vendor/v8/test/mjsunit/object-define-property.js +107 -2
- data/vendor/v8/test/mjsunit/override-read-only-property.js +6 -4
- data/vendor/v8/test/mjsunit/packed-elements.js +2 -2
- data/vendor/v8/test/mjsunit/parse-int-float.js +4 -4
- data/vendor/v8/test/mjsunit/pixel-array-rounding.js +1 -1
- data/vendor/v8/test/mjsunit/readonly.js +228 -0
- data/vendor/v8/test/mjsunit/regexp-capture-3.js +16 -18
- data/vendor/v8/test/mjsunit/regexp-capture.js +2 -0
- data/vendor/v8/test/mjsunit/regexp-global.js +122 -0
- data/vendor/v8/test/mjsunit/regexp-results-cache.js +78 -0
- data/vendor/v8/test/mjsunit/regress/regress-1117.js +12 -3
- data/vendor/v8/test/mjsunit/regress/regress-1118.js +1 -1
- data/vendor/v8/test/mjsunit/regress/regress-115100.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-1199637.js +1 -3
- data/vendor/v8/test/mjsunit/regress/regress-121407.js +1 -1
- data/vendor/v8/test/mjsunit/regress/regress-131923.js +30 -0
- data/vendor/v8/test/mjsunit/regress/regress-131994.js +70 -0
- data/vendor/v8/test/mjsunit/regress/regress-133211.js +35 -0
- data/vendor/v8/test/mjsunit/regress/regress-133211b.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-136048.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-137768.js +73 -0
- data/vendor/v8/test/mjsunit/regress/regress-143967.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-145201.js +107 -0
- data/vendor/v8/test/mjsunit/regress/regress-147497.js +45 -0
- data/vendor/v8/test/mjsunit/regress/regress-148378.js +38 -0
- data/vendor/v8/test/mjsunit/regress/regress-1563.js +1 -1
- data/vendor/v8/test/mjsunit/regress/regress-1591.js +48 -0
- data/vendor/v8/test/mjsunit/regress/regress-164442.js +45 -0
- data/vendor/v8/test/mjsunit/regress/regress-165637.js +61 -0
- data/vendor/v8/test/mjsunit/regress/regress-166379.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-166553.js +33 -0
- data/vendor/v8/test/mjsunit/regress/regress-1692.js +1 -1
- data/vendor/v8/test/mjsunit/regress/regress-171641.js +40 -0
- data/vendor/v8/test/mjsunit/regress/regress-1980.js +1 -1
- data/vendor/v8/test/mjsunit/regress/regress-2073.js +99 -0
- data/vendor/v8/test/mjsunit/regress/regress-2119.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-2156.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-2163.js +70 -0
- data/vendor/v8/test/mjsunit/regress/regress-2170.js +58 -0
- data/vendor/v8/test/mjsunit/regress/regress-2172.js +35 -0
- data/vendor/v8/test/mjsunit/regress/regress-2185-2.js +145 -0
- data/vendor/v8/test/mjsunit/regress/regress-2185.js +38 -0
- data/vendor/v8/test/mjsunit/regress/regress-2186.js +49 -0
- data/vendor/v8/test/mjsunit/regress/regress-2193.js +58 -0
- data/vendor/v8/test/mjsunit/regress/regress-2219.js +32 -0
- data/vendor/v8/test/mjsunit/regress/regress-2225.js +65 -0
- data/vendor/v8/test/mjsunit/regress/regress-2226.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-2234.js +41 -0
- data/vendor/v8/test/mjsunit/regress/regress-2243.js +31 -0
- data/vendor/v8/test/mjsunit/regress/regress-2249.js +33 -0
- data/vendor/v8/test/mjsunit/regress/regress-2250.js +68 -0
- data/vendor/v8/test/mjsunit/regress/regress-2261.js +113 -0
- data/vendor/v8/test/mjsunit/regress/regress-2263.js +30 -0
- data/vendor/v8/test/mjsunit/regress/regress-2284.js +32 -0
- data/vendor/v8/test/mjsunit/regress/regress-2285.js +32 -0
- data/vendor/v8/test/mjsunit/regress/regress-2286.js +32 -0
- data/vendor/v8/test/mjsunit/regress/regress-2289.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-2291.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-2294.js +70 -0
- data/vendor/v8/test/mjsunit/regress/regress-2296.js +40 -0
- data/vendor/v8/test/mjsunit/regress/regress-2315.js +40 -0
- data/vendor/v8/test/mjsunit/regress/regress-2318.js +66 -0
- data/vendor/v8/test/mjsunit/regress/regress-2322.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-2326.js +54 -0
- data/vendor/v8/test/mjsunit/regress/regress-2336.js +53 -0
- data/vendor/v8/test/mjsunit/regress/regress-2339.js +59 -0
- data/vendor/v8/test/mjsunit/regress/regress-2346.js +123 -0
- data/vendor/v8/test/mjsunit/regress/regress-2373.js +29 -0
- data/vendor/v8/test/mjsunit/regress/regress-2374.js +33 -0
- data/vendor/v8/test/mjsunit/regress/regress-2398.js +41 -0
- data/vendor/v8/test/mjsunit/regress/regress-2410.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-2416.js +75 -0
- data/vendor/v8/test/mjsunit/regress/regress-2419.js +37 -0
- data/vendor/v8/test/mjsunit/regress/regress-2433.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-2437.js +156 -0
- data/vendor/v8/test/mjsunit/regress/regress-2438.js +52 -0
- data/vendor/v8/test/mjsunit/regress/regress-2443.js +129 -0
- data/vendor/v8/test/mjsunit/regress/regress-2444.js +120 -0
- data/vendor/v8/test/mjsunit/regress/regress-2489.js +50 -0
- data/vendor/v8/test/mjsunit/regress/regress-2499.js +40 -0
- data/vendor/v8/test/mjsunit/regress/regress-334.js +1 -1
- data/vendor/v8/test/mjsunit/regress/regress-492.js +39 -1
- data/vendor/v8/test/mjsunit/regress/regress-builtin-array-op.js +38 -0
- data/vendor/v8/test/mjsunit/regress/regress-cnlt-elements.js +43 -0
- data/vendor/v8/test/mjsunit/regress/regress-cnlt-enum-indices.js +45 -0
- data/vendor/v8/test/mjsunit/regress/regress-cntl-descriptors-enum.js +46 -0
- data/vendor/v8/test/mjsunit/regress/regress-convert-enum.js +60 -0
- data/vendor/v8/test/mjsunit/regress/regress-convert-enum2.js +46 -0
- data/vendor/v8/test/mjsunit/regress/regress-convert-transition.js +40 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-119926.js +3 -1
- data/vendor/v8/test/mjsunit/regress/regress-crbug-125148.js +90 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-134055.js +63 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-134609.js +59 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-135008.js +45 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-135066.js +55 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-137689.js +47 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-138887.js +48 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-140083.js +44 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-142087.js +38 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-142218.js +44 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-145961.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-146910.js +33 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-147475.js +48 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-148376.js +35 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-150545.js +53 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-150729.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-157019.js +54 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-157520.js +38 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-158185.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-160010.js +35 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-162085.js +71 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-168545.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-170856.js +33 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-172345.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-173974.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-crbug-18639.js +9 -5
- data/vendor/v8/test/mjsunit/regress/regress-debug-code-recompilation.js +2 -1
- data/vendor/v8/test/mjsunit/regress/regress-deep-proto.js +45 -0
- data/vendor/v8/test/mjsunit/regress/regress-delete-empty-double.js +40 -0
- data/vendor/v8/test/mjsunit/regress/regress-iteration-order.js +42 -0
- data/vendor/v8/test/mjsunit/regress/regress-json-stringify-gc.js +41 -0
- data/vendor/v8/test/mjsunit/regress/regress-latin-1.js +78 -0
- data/vendor/v8/test/mjsunit/regress/regress-load-elements.js +49 -0
- data/vendor/v8/test/mjsunit/regress/regress-observe-empty-double-array.js +38 -0
- data/vendor/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js +37 -0
- data/vendor/v8/test/mjsunit/shift-for-integer-div.js +59 -0
- data/vendor/v8/test/mjsunit/stack-traces-gc.js +119 -0
- data/vendor/v8/test/mjsunit/stack-traces-overflow.js +122 -0
- data/vendor/v8/test/mjsunit/stack-traces.js +39 -1
- data/vendor/v8/test/mjsunit/str-to-num.js +7 -2
- data/vendor/v8/test/mjsunit/strict-mode.js +36 -11
- data/vendor/v8/test/mjsunit/string-charcodeat.js +3 -0
- data/vendor/v8/test/mjsunit/string-natives.js +72 -0
- data/vendor/v8/test/mjsunit/string-split.js +17 -0
- data/vendor/v8/test/mjsunit/testcfg.py +76 -6
- data/vendor/v8/test/mjsunit/tools/tickprocessor.js +4 -1
- data/vendor/v8/test/mjsunit/try-finally-continue.js +72 -0
- data/vendor/v8/test/mjsunit/typed-array-slice.js +61 -0
- data/vendor/v8/test/mjsunit/unbox-double-arrays.js +2 -0
- data/vendor/v8/test/mjsunit/uri.js +12 -0
- data/vendor/v8/test/mjsunit/with-readonly.js +4 -2
- data/vendor/v8/test/mozilla/mozilla.status +19 -113
- data/vendor/v8/test/mozilla/testcfg.py +122 -3
- data/vendor/v8/test/preparser/preparser.status +5 -0
- data/vendor/v8/test/preparser/strict-identifiers.pyt +1 -1
- data/vendor/v8/test/preparser/testcfg.py +101 -5
- data/vendor/v8/test/sputnik/sputnik.status +1 -1
- data/vendor/v8/test/sputnik/testcfg.py +5 -0
- data/vendor/v8/test/test262/README +2 -2
- data/vendor/v8/test/test262/test262.status +13 -36
- data/vendor/v8/test/test262/testcfg.py +102 -8
- data/vendor/v8/tools/android-build.sh +0 -0
- data/vendor/v8/tools/android-ll-prof.sh +69 -0
- data/vendor/v8/tools/android-run.py +109 -0
- data/vendor/v8/tools/android-sync.sh +105 -0
- data/vendor/v8/tools/bash-completion.sh +0 -0
- data/vendor/v8/tools/check-static-initializers.sh +0 -0
- data/vendor/v8/tools/common-includes.sh +15 -22
- data/vendor/v8/tools/disasm.py +4 -4
- data/vendor/v8/tools/fuzz-harness.sh +0 -0
- data/vendor/v8/tools/gen-postmortem-metadata.py +6 -8
- data/vendor/v8/tools/grokdump.py +404 -129
- data/vendor/v8/tools/gyp/v8.gyp +105 -43
- data/vendor/v8/tools/linux-tick-processor +5 -5
- data/vendor/v8/tools/ll_prof.py +75 -15
- data/vendor/v8/tools/merge-to-branch.sh +2 -2
- data/vendor/v8/tools/plot-timer-events +70 -0
- data/vendor/v8/tools/plot-timer-events.js +510 -0
- data/vendor/v8/tools/presubmit.py +1 -0
- data/vendor/v8/tools/push-to-trunk.sh +14 -4
- data/vendor/v8/tools/run-llprof.sh +69 -0
- data/vendor/v8/tools/run-tests.py +372 -0
- data/vendor/v8/tools/run-valgrind.py +1 -1
- data/vendor/v8/tools/status-file-converter.py +39 -0
- data/vendor/v8/tools/test-server.py +224 -0
- data/vendor/v8/tools/test-wrapper-gypbuild.py +13 -16
- data/vendor/v8/tools/test.py +10 -19
- data/vendor/v8/tools/testrunner/README +174 -0
- data/vendor/v8/tools/testrunner/__init__.py +26 -0
- data/vendor/v8/tools/testrunner/local/__init__.py +26 -0
- data/vendor/v8/tools/testrunner/local/commands.py +153 -0
- data/vendor/v8/tools/testrunner/local/execution.py +182 -0
- data/vendor/v8/tools/testrunner/local/old_statusfile.py +460 -0
- data/vendor/v8/tools/testrunner/local/progress.py +238 -0
- data/vendor/v8/tools/testrunner/local/statusfile.py +145 -0
- data/vendor/v8/tools/testrunner/local/testsuite.py +187 -0
- data/vendor/v8/tools/testrunner/local/utils.py +108 -0
- data/vendor/v8/tools/testrunner/local/verbose.py +99 -0
- data/vendor/v8/tools/testrunner/network/__init__.py +26 -0
- data/vendor/v8/tools/testrunner/network/distro.py +90 -0
- data/vendor/v8/tools/testrunner/network/endpoint.py +124 -0
- data/vendor/v8/tools/testrunner/network/network_execution.py +253 -0
- data/vendor/v8/tools/testrunner/network/perfdata.py +120 -0
- data/vendor/v8/tools/testrunner/objects/__init__.py +26 -0
- data/vendor/v8/tools/testrunner/objects/context.py +50 -0
- data/vendor/v8/tools/testrunner/objects/output.py +60 -0
- data/vendor/v8/tools/testrunner/objects/peer.py +80 -0
- data/vendor/v8/tools/testrunner/objects/testcase.py +83 -0
- data/vendor/v8/tools/testrunner/objects/workpacket.py +90 -0
- data/vendor/v8/tools/testrunner/server/__init__.py +26 -0
- data/vendor/v8/tools/testrunner/server/compression.py +111 -0
- data/vendor/v8/tools/testrunner/server/constants.py +51 -0
- data/vendor/v8/tools/testrunner/server/daemon.py +147 -0
- data/vendor/v8/tools/testrunner/server/local_handler.py +119 -0
- data/vendor/v8/tools/testrunner/server/main.py +245 -0
- data/vendor/v8/tools/testrunner/server/presence_handler.py +120 -0
- data/vendor/v8/tools/testrunner/server/signatures.py +63 -0
- data/vendor/v8/tools/testrunner/server/status_handler.py +112 -0
- data/vendor/v8/tools/testrunner/server/work_handler.py +150 -0
- data/vendor/v8/tools/tick-processor.html +168 -0
- data/vendor/v8/tools/tickprocessor-driver.js +5 -3
- data/vendor/v8/tools/tickprocessor.js +58 -15
- metadata +534 -30
- data/patches/add-freebsd9-and-freebsd10-to-gyp-GetFlavor.patch +0 -11
- data/patches/do-not-imply-vfp3-and-armv7.patch +0 -44
- data/patches/fPIC-on-x64.patch +0 -14
- data/vendor/v8/src/liveobjectlist-inl.h +0 -126
- data/vendor/v8/src/liveobjectlist.cc +0 -2631
- data/vendor/v8/src/liveobjectlist.h +0 -319
- data/vendor/v8/test/mjsunit/mul-exhaustive.js +0 -4629
- data/vendor/v8/test/mjsunit/numops-fuzz.js +0 -4609
- data/vendor/v8/test/mjsunit/regress/regress-1969.js +0 -5045
@@ -79,12 +79,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
|
79
79
|
// Load the built-in InternalArray function from the current context.
|
80
80
|
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
|
81
81
|
Register result) {
|
82
|
-
// Load the
|
82
|
+
// Load the native context.
|
83
83
|
|
84
|
-
__ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
85
84
|
__ lw(result,
|
86
|
-
|
87
|
-
|
85
|
+
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
86
|
+
__ lw(result,
|
87
|
+
FieldMemOperand(result, GlobalObject::kNativeContextOffset));
|
88
|
+
// Load the InternalArray function from the native context.
|
88
89
|
__ lw(result,
|
89
90
|
MemOperand(result,
|
90
91
|
Context::SlotOffset(
|
@@ -94,12 +95,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
|
|
94
95
|
|
95
96
|
// Load the built-in Array function from the current context.
|
96
97
|
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
|
97
|
-
// Load the
|
98
|
+
// Load the native context.
|
98
99
|
|
99
|
-
__ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
100
100
|
__ lw(result,
|
101
|
-
|
102
|
-
|
101
|
+
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
102
|
+
__ lw(result,
|
103
|
+
FieldMemOperand(result, GlobalObject::kNativeContextOffset));
|
104
|
+
// Load the Array function from the native context.
|
103
105
|
__ lw(result,
|
104
106
|
MemOperand(result,
|
105
107
|
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
|
@@ -713,6 +715,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
|
|
713
715
|
}
|
714
716
|
|
715
717
|
|
718
|
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
|
719
|
+
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
720
|
+
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
|
721
|
+
__ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
722
|
+
__ Jump(at);
|
723
|
+
}
|
724
|
+
|
725
|
+
|
726
|
+
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
|
727
|
+
GenerateTailCallToSharedCode(masm);
|
728
|
+
}
|
729
|
+
|
730
|
+
|
731
|
+
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
|
732
|
+
{
|
733
|
+
FrameScope scope(masm, StackFrame::INTERNAL);
|
734
|
+
|
735
|
+
// Push a copy of the function onto the stack.
|
736
|
+
__ push(a1);
|
737
|
+
// Push call kind information.
|
738
|
+
__ push(t1);
|
739
|
+
|
740
|
+
__ push(a1); // Function is also the parameter to the runtime call.
|
741
|
+
__ CallRuntime(Runtime::kParallelRecompile, 1);
|
742
|
+
|
743
|
+
// Restore call kind information.
|
744
|
+
__ pop(t1);
|
745
|
+
// Restore receiver.
|
746
|
+
__ pop(a1);
|
747
|
+
|
748
|
+
// Tear down internal frame.
|
749
|
+
}
|
750
|
+
|
751
|
+
GenerateTailCallToSharedCode(masm);
|
752
|
+
}
|
753
|
+
|
754
|
+
|
716
755
|
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
717
756
|
bool is_api_function,
|
718
757
|
bool count_constructions) {
|
@@ -1216,6 +1255,66 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
|
|
1216
1255
|
}
|
1217
1256
|
|
1218
1257
|
|
1258
|
+
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
|
1259
|
+
// For now, we are relying on the fact that make_code_young doesn't do any
|
1260
|
+
// garbage collection which allows us to save/restore the registers without
|
1261
|
+
// worrying about which of them contain pointers. We also don't build an
|
1262
|
+
// internal frame to make the code faster, since we shouldn't have to do stack
|
1263
|
+
// crawls in MakeCodeYoung. This seems a bit fragile.
|
1264
|
+
|
1265
|
+
__ mov(a0, ra);
|
1266
|
+
// Adjust a0 to point to the head of the PlatformCodeAge sequence
|
1267
|
+
__ Subu(a0, a0,
|
1268
|
+
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
|
1269
|
+
// Restore the original return address of the function
|
1270
|
+
__ mov(ra, at);
|
1271
|
+
|
1272
|
+
// The following registers must be saved and restored when calling through to
|
1273
|
+
// the runtime:
|
1274
|
+
// a0 - contains return address (beginning of patch sequence)
|
1275
|
+
// a1 - function object
|
1276
|
+
RegList saved_regs =
|
1277
|
+
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
|
1278
|
+
FrameScope scope(masm, StackFrame::MANUAL);
|
1279
|
+
__ MultiPush(saved_regs);
|
1280
|
+
__ PrepareCallCFunction(1, 0, a1);
|
1281
|
+
__ CallCFunction(
|
1282
|
+
ExternalReference::get_make_code_young_function(masm->isolate()), 1);
|
1283
|
+
__ MultiPop(saved_regs);
|
1284
|
+
__ Jump(a0);
|
1285
|
+
}
|
1286
|
+
|
1287
|
+
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
|
1288
|
+
void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
|
1289
|
+
MacroAssembler* masm) { \
|
1290
|
+
GenerateMakeCodeYoungAgainCommon(masm); \
|
1291
|
+
} \
|
1292
|
+
void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
|
1293
|
+
MacroAssembler* masm) { \
|
1294
|
+
GenerateMakeCodeYoungAgainCommon(masm); \
|
1295
|
+
}
|
1296
|
+
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
|
1297
|
+
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
|
1298
|
+
|
1299
|
+
|
1300
|
+
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
1301
|
+
{
|
1302
|
+
FrameScope scope(masm, StackFrame::INTERNAL);
|
1303
|
+
|
1304
|
+
// Preserve registers across notification, this is important for compiled
|
1305
|
+
// stubs that tail call the runtime on deopts passing their parameters in
|
1306
|
+
// registers.
|
1307
|
+
__ MultiPush(kJSCallerSaved | kCalleeSaved);
|
1308
|
+
// Pass the function and deoptimization type to the runtime system.
|
1309
|
+
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
1310
|
+
__ MultiPop(kJSCallerSaved | kCalleeSaved);
|
1311
|
+
}
|
1312
|
+
|
1313
|
+
__ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
|
1314
|
+
__ Jump(ra); // Jump to miss handler
|
1315
|
+
}
|
1316
|
+
|
1317
|
+
|
1219
1318
|
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
1220
1319
|
Deoptimizer::BailoutType type) {
|
1221
1320
|
{
|
@@ -1332,7 +1431,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|
1332
1431
|
// a0: actual number of arguments
|
1333
1432
|
// a1: function
|
1334
1433
|
Label shift_arguments;
|
1335
|
-
__ li(t0, Operand(0, RelocInfo::
|
1434
|
+
__ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
|
1336
1435
|
{ Label convert_to_object, use_global_receiver, patch_receiver;
|
1337
1436
|
// Change context eagerly in case we need the global receiver.
|
1338
1437
|
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
@@ -1386,16 +1485,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|
1386
1485
|
__ sll(at, a0, kPointerSizeLog2);
|
1387
1486
|
__ addu(at, sp, at);
|
1388
1487
|
__ lw(a1, MemOperand(at));
|
1389
|
-
__ li(t0, Operand(0, RelocInfo::
|
1488
|
+
__ li(t0, Operand(0, RelocInfo::NONE32));
|
1390
1489
|
__ Branch(&patch_receiver);
|
1391
1490
|
|
1392
1491
|
// Use the global receiver object from the called function as the
|
1393
1492
|
// receiver.
|
1394
1493
|
__ bind(&use_global_receiver);
|
1395
1494
|
const int kGlobalIndex =
|
1396
|
-
Context::kHeaderSize + Context::
|
1495
|
+
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
|
1397
1496
|
__ lw(a2, FieldMemOperand(cp, kGlobalIndex));
|
1398
|
-
__ lw(a2, FieldMemOperand(a2, GlobalObject::
|
1497
|
+
__ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
|
1399
1498
|
__ lw(a2, FieldMemOperand(a2, kGlobalIndex));
|
1400
1499
|
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
|
1401
1500
|
|
@@ -1409,11 +1508,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|
1409
1508
|
|
1410
1509
|
// 3b. Check for function proxy.
|
1411
1510
|
__ bind(&slow);
|
1412
|
-
__ li(t0, Operand(1, RelocInfo::
|
1511
|
+
__ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
|
1413
1512
|
__ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
|
1414
1513
|
|
1415
1514
|
__ bind(&non_function);
|
1416
|
-
__ li(t0, Operand(2, RelocInfo::
|
1515
|
+
__ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
|
1417
1516
|
|
1418
1517
|
// 3c. Patch the first argument when calling a non-function. The
|
1419
1518
|
// CALL_NON_FUNCTION builtin expects the non-function callee as
|
@@ -1586,9 +1685,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
|
1586
1685
|
// Use the current global receiver object as the receiver.
|
1587
1686
|
__ bind(&use_global_receiver);
|
1588
1687
|
const int kGlobalOffset =
|
1589
|
-
Context::kHeaderSize + Context::
|
1688
|
+
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
|
1590
1689
|
__ lw(a0, FieldMemOperand(cp, kGlobalOffset));
|
1591
|
-
__ lw(a0, FieldMemOperand(a0, GlobalObject::
|
1690
|
+
__ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
|
1592
1691
|
__ lw(a0, FieldMemOperand(a0, kGlobalOffset));
|
1593
1692
|
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
|
1594
1693
|
|
@@ -1644,7 +1743,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
|
1644
1743
|
__ bind(&call_proxy);
|
1645
1744
|
__ push(a1); // Add function proxy as last argument.
|
1646
1745
|
__ Addu(a0, a0, Operand(1));
|
1647
|
-
__ li(a2, Operand(0, RelocInfo::
|
1746
|
+
__ li(a2, Operand(0, RelocInfo::NONE32));
|
1648
1747
|
__ SetCallKind(t1, CALL_AS_METHOD);
|
1649
1748
|
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
|
1650
1749
|
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
@@ -33,17 +33,28 @@
|
|
33
33
|
#include "code-stubs.h"
|
34
34
|
#include "codegen.h"
|
35
35
|
#include "regexp-macro-assembler.h"
|
36
|
+
#include "stub-cache.h"
|
36
37
|
|
37
38
|
namespace v8 {
|
38
39
|
namespace internal {
|
39
40
|
|
40
41
|
|
42
|
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
|
43
|
+
Isolate* isolate,
|
44
|
+
CodeStubInterfaceDescriptor* descriptor) {
|
45
|
+
static Register registers[] = { a1, a0 };
|
46
|
+
descriptor->register_param_count_ = 2;
|
47
|
+
descriptor->register_params_ = registers;
|
48
|
+
descriptor->deoptimization_handler_ =
|
49
|
+
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
|
50
|
+
}
|
51
|
+
|
52
|
+
|
41
53
|
#define __ ACCESS_MASM(masm)
|
42
54
|
|
43
55
|
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
44
56
|
Label* slow,
|
45
|
-
Condition cc
|
46
|
-
bool never_nan_nan);
|
57
|
+
Condition cc);
|
47
58
|
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
48
59
|
Register lhs,
|
49
60
|
Register rhs,
|
@@ -87,6 +98,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
|
|
87
98
|
void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
88
99
|
// Create a new closure from the given function info in new
|
89
100
|
// space. Set the context to the current context in cp.
|
101
|
+
Counters* counters = masm->isolate()->counters();
|
102
|
+
|
90
103
|
Label gc;
|
91
104
|
|
92
105
|
// Pop the function info from the stack.
|
@@ -100,32 +113,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
|
100
113
|
&gc,
|
101
114
|
TAG_OBJECT);
|
102
115
|
|
116
|
+
__ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
|
117
|
+
|
103
118
|
int map_index = (language_mode_ == CLASSIC_MODE)
|
104
119
|
? Context::FUNCTION_MAP_INDEX
|
105
120
|
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
|
106
121
|
|
107
|
-
// Compute the function map in the current
|
122
|
+
// Compute the function map in the current native context and set that
|
108
123
|
// as the map of the allocated object.
|
109
|
-
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::
|
110
|
-
__ lw(a2, FieldMemOperand(a2, GlobalObject::
|
111
|
-
__ lw(
|
112
|
-
__ sw(
|
124
|
+
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
125
|
+
__ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
|
126
|
+
__ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
|
127
|
+
__ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
113
128
|
|
114
129
|
// Initialize the rest of the function. We don't have to update the
|
115
130
|
// write barrier because the allocated object is in new space.
|
116
131
|
__ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
|
117
|
-
__ LoadRoot(
|
118
|
-
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
|
132
|
+
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
|
119
133
|
__ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
120
134
|
__ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
|
121
|
-
__ sw(
|
135
|
+
__ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
|
122
136
|
__ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
|
123
137
|
__ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
|
124
138
|
__ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
|
125
|
-
__ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
|
126
139
|
|
127
140
|
// Initialize the code pointer in the function to be the one
|
128
141
|
// found in the shared function info object.
|
142
|
+
// But first check if there is an optimized version for our context.
|
143
|
+
Label check_optimized;
|
144
|
+
Label install_unoptimized;
|
145
|
+
if (FLAG_cache_optimized_code) {
|
146
|
+
__ lw(a1,
|
147
|
+
FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
148
|
+
__ And(at, a1, a1);
|
149
|
+
__ Branch(&check_optimized, ne, at, Operand(zero_reg));
|
150
|
+
}
|
151
|
+
__ bind(&install_unoptimized);
|
152
|
+
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
|
153
|
+
__ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
|
129
154
|
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
|
130
155
|
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
|
131
156
|
|
@@ -133,6 +158,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
|
133
158
|
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
|
134
159
|
__ Ret();
|
135
160
|
|
161
|
+
__ bind(&check_optimized);
|
162
|
+
|
163
|
+
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
|
164
|
+
|
165
|
+
// a2 holds native context, a1 points to fixed array of 3-element entries
|
166
|
+
// (native context, optimized code, literals).
|
167
|
+
// The optimized code map must never be empty, so check the first elements.
|
168
|
+
Label install_optimized;
|
169
|
+
// Speculatively move code object into t0.
|
170
|
+
__ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
|
171
|
+
__ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
|
172
|
+
__ Branch(&install_optimized, eq, a2, Operand(t1));
|
173
|
+
|
174
|
+
// Iterate through the rest of map backwards. t0 holds an index as a Smi.
|
175
|
+
Label loop;
|
176
|
+
__ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
|
177
|
+
__ bind(&loop);
|
178
|
+
// Do not double check first entry.
|
179
|
+
|
180
|
+
__ Branch(&install_unoptimized, eq, t0,
|
181
|
+
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
|
182
|
+
__ Subu(t0, t0, Operand(
|
183
|
+
Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
|
184
|
+
__ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
185
|
+
__ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
|
186
|
+
__ Addu(t1, t1, Operand(at));
|
187
|
+
__ lw(t1, MemOperand(t1));
|
188
|
+
__ Branch(&loop, ne, a2, Operand(t1));
|
189
|
+
// Hit: fetch the optimized code.
|
190
|
+
__ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
191
|
+
__ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
|
192
|
+
__ Addu(t1, t1, Operand(at));
|
193
|
+
__ Addu(t1, t1, Operand(kPointerSize));
|
194
|
+
__ lw(t0, MemOperand(t1));
|
195
|
+
|
196
|
+
__ bind(&install_optimized);
|
197
|
+
__ IncrementCounter(counters->fast_new_closure_install_optimized(),
|
198
|
+
1, t2, t3);
|
199
|
+
|
200
|
+
// TODO(fschneider): Idea: store proper code pointers in the map and either
|
201
|
+
// unmangle them on marking or do nothing as the whole map is discarded on
|
202
|
+
// major GC anyway.
|
203
|
+
__ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
204
|
+
__ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
|
205
|
+
|
206
|
+
// Now link a function into a list of optimized functions.
|
207
|
+
__ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
|
208
|
+
|
209
|
+
__ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
|
210
|
+
// No need for write barrier as JSFunction (eax) is in the new space.
|
211
|
+
|
212
|
+
__ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
|
213
|
+
// Store JSFunction (eax) into edx before issuing write barrier as
|
214
|
+
// it clobbers all the registers passed.
|
215
|
+
__ mov(t0, v0);
|
216
|
+
__ RecordWriteContextSlot(
|
217
|
+
a2,
|
218
|
+
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
|
219
|
+
t0,
|
220
|
+
a1,
|
221
|
+
kRAHasNotBeenSaved,
|
222
|
+
kDontSaveFPRegs);
|
223
|
+
|
224
|
+
// Return result. The argument function info has been popped already.
|
225
|
+
__ Ret();
|
226
|
+
|
136
227
|
// Create a new closure through the slower runtime call.
|
137
228
|
__ bind(&gc);
|
138
229
|
__ LoadRoot(t0, Heap::kFalseValueRootIndex);
|
@@ -164,12 +255,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
|
164
255
|
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
165
256
|
|
166
257
|
// Set up the fixed slots, copy the global object from the previous context.
|
167
|
-
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::
|
258
|
+
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
168
259
|
__ li(a1, Operand(Smi::FromInt(0)));
|
169
260
|
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
170
261
|
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
171
262
|
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
|
172
|
-
__ sw(a2, MemOperand(v0, Context::SlotOffset(Context::
|
263
|
+
__ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
173
264
|
|
174
265
|
// Initialize the rest of the slots to undefined.
|
175
266
|
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
|
@@ -211,9 +302,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
|
211
302
|
__ li(a2, Operand(Smi::FromInt(length)));
|
212
303
|
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
|
213
304
|
|
214
|
-
// If this block context is nested in the
|
305
|
+
// If this block context is nested in the native context we get a smi
|
215
306
|
// sentinel instead of a function. The block context should get the
|
216
|
-
// canonical empty function of the
|
307
|
+
// canonical empty function of the native context as its closure which
|
217
308
|
// we still have to look up.
|
218
309
|
Label after_sentinel;
|
219
310
|
__ JumpIfNotSmi(a3, &after_sentinel);
|
@@ -222,16 +313,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
|
222
313
|
__ Assert(eq, message, a3, Operand(zero_reg));
|
223
314
|
}
|
224
315
|
__ lw(a3, GlobalObjectOperand());
|
225
|
-
__ lw(a3, FieldMemOperand(a3, GlobalObject::
|
316
|
+
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
|
226
317
|
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
|
227
318
|
__ bind(&after_sentinel);
|
228
319
|
|
229
320
|
// Set up the fixed slots, copy the global object from the previous context.
|
230
|
-
__ lw(a2, ContextOperand(cp, Context::
|
321
|
+
__ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
|
231
322
|
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
|
232
323
|
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
|
233
324
|
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
|
234
|
-
__ sw(a2, ContextOperand(v0, Context::
|
325
|
+
__ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
|
235
326
|
|
236
327
|
// Initialize the rest of the slots to the hole value.
|
237
328
|
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
|
@@ -253,6 +344,7 @@ static void GenerateFastCloneShallowArrayCommon(
|
|
253
344
|
MacroAssembler* masm,
|
254
345
|
int length,
|
255
346
|
FastCloneShallowArrayStub::Mode mode,
|
347
|
+
AllocationSiteMode allocation_site_mode,
|
256
348
|
Label* fail) {
|
257
349
|
// Registers on entry:
|
258
350
|
// a3: boilerplate literal array.
|
@@ -265,7 +357,13 @@ static void GenerateFastCloneShallowArrayCommon(
|
|
265
357
|
? FixedDoubleArray::SizeFor(length)
|
266
358
|
: FixedArray::SizeFor(length);
|
267
359
|
}
|
268
|
-
|
360
|
+
|
361
|
+
int size = JSArray::kSize;
|
362
|
+
int allocation_info_start = size;
|
363
|
+
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
|
364
|
+
size += AllocationSiteInfo::kSize;
|
365
|
+
}
|
366
|
+
size += elements_size;
|
269
367
|
|
270
368
|
// Allocate both the JS array and the elements array in one big
|
271
369
|
// allocation. This avoids multiple limit checks.
|
@@ -276,6 +374,13 @@ static void GenerateFastCloneShallowArrayCommon(
|
|
276
374
|
fail,
|
277
375
|
TAG_OBJECT);
|
278
376
|
|
377
|
+
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
|
378
|
+
__ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
|
379
|
+
allocation_site_info_map())));
|
380
|
+
__ sw(a2, FieldMemOperand(v0, allocation_info_start));
|
381
|
+
__ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
|
382
|
+
}
|
383
|
+
|
279
384
|
// Copy the JS array part.
|
280
385
|
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
|
281
386
|
if ((i != JSArray::kElementsOffset) || (length == 0)) {
|
@@ -288,7 +393,11 @@ static void GenerateFastCloneShallowArrayCommon(
|
|
288
393
|
// Get hold of the elements array of the boilerplate and setup the
|
289
394
|
// elements pointer in the resulting object.
|
290
395
|
__ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
|
291
|
-
|
396
|
+
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
|
397
|
+
__ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
|
398
|
+
} else {
|
399
|
+
__ Addu(a2, v0, Operand(JSArray::kSize));
|
400
|
+
}
|
292
401
|
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
|
293
402
|
|
294
403
|
// Copy the elements array.
|
@@ -323,16 +432,18 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
|
|
323
432
|
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
|
324
433
|
__ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
|
325
434
|
__ Branch(&check_fast_elements, ne, v0, Operand(t1));
|
326
|
-
GenerateFastCloneShallowArrayCommon(masm, 0,
|
327
|
-
|
435
|
+
GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
|
436
|
+
allocation_site_mode_,
|
437
|
+
&slow_case);
|
328
438
|
// Return and remove the on-stack parameters.
|
329
439
|
__ DropAndRet(3);
|
330
440
|
|
331
441
|
__ bind(&check_fast_elements);
|
332
442
|
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
|
333
443
|
__ Branch(&double_elements, ne, v0, Operand(t1));
|
334
|
-
GenerateFastCloneShallowArrayCommon(masm, length_,
|
335
|
-
|
444
|
+
GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
|
445
|
+
allocation_site_mode_,
|
446
|
+
&slow_case);
|
336
447
|
// Return and remove the on-stack parameters.
|
337
448
|
__ DropAndRet(3);
|
338
449
|
|
@@ -363,7 +474,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
|
|
363
474
|
__ pop(a3);
|
364
475
|
}
|
365
476
|
|
366
|
-
GenerateFastCloneShallowArrayCommon(masm, length_, mode,
|
477
|
+
GenerateFastCloneShallowArrayCommon(masm, length_, mode,
|
478
|
+
allocation_site_mode_,
|
479
|
+
&slow_case);
|
367
480
|
|
368
481
|
// Return and remove the on-stack parameters.
|
369
482
|
__ DropAndRet(3);
|
@@ -421,7 +534,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
|
|
421
534
|
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
|
422
535
|
// scratch register. Destroys the source register. No GC occurs during this
|
423
536
|
// stub so you don't have to set up the frame.
|
424
|
-
class ConvertToDoubleStub : public
|
537
|
+
class ConvertToDoubleStub : public PlatformCodeStub {
|
425
538
|
public:
|
426
539
|
ConvertToDoubleStub(Register result_reg_1,
|
427
540
|
Register result_reg_2,
|
@@ -547,24 +660,6 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
|
|
547
660
|
}
|
548
661
|
|
549
662
|
|
550
|
-
void FloatingPointHelper::LoadOperands(
|
551
|
-
MacroAssembler* masm,
|
552
|
-
FloatingPointHelper::Destination destination,
|
553
|
-
Register heap_number_map,
|
554
|
-
Register scratch1,
|
555
|
-
Register scratch2,
|
556
|
-
Label* slow) {
|
557
|
-
|
558
|
-
// Load right operand (a0) to f12 or a2/a3.
|
559
|
-
LoadNumber(masm, destination,
|
560
|
-
a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
|
561
|
-
|
562
|
-
// Load left operand (a1) to f14 or a0/a1.
|
563
|
-
LoadNumber(masm, destination,
|
564
|
-
a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
|
565
|
-
}
|
566
|
-
|
567
|
-
|
568
663
|
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
569
664
|
Destination destination,
|
570
665
|
Register object,
|
@@ -575,11 +670,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
|
575
670
|
Register scratch1,
|
576
671
|
Register scratch2,
|
577
672
|
Label* not_number) {
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
"HeapNumberMap register clobbered.");
|
582
|
-
}
|
673
|
+
__ AssertRootValue(heap_number_map,
|
674
|
+
Heap::kHeapNumberMapRootIndex,
|
675
|
+
"HeapNumberMap register clobbered.");
|
583
676
|
|
584
677
|
Label is_smi, done;
|
585
678
|
|
@@ -641,11 +734,9 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
|
641
734
|
Register scratch3,
|
642
735
|
FPURegister double_scratch,
|
643
736
|
Label* not_number) {
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
"HeapNumberMap register clobbered.");
|
648
|
-
}
|
737
|
+
__ AssertRootValue(heap_number_map,
|
738
|
+
Heap::kHeapNumberMapRootIndex,
|
739
|
+
"HeapNumberMap register clobbered.");
|
649
740
|
Label done;
|
650
741
|
Label not_in_int32_range;
|
651
742
|
|
@@ -677,13 +768,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
|
677
768
|
Register int_scratch,
|
678
769
|
Destination destination,
|
679
770
|
FPURegister double_dst,
|
680
|
-
Register
|
681
|
-
Register
|
771
|
+
Register dst_mantissa,
|
772
|
+
Register dst_exponent,
|
682
773
|
Register scratch2,
|
683
774
|
FPURegister single_scratch) {
|
684
775
|
ASSERT(!int_scratch.is(scratch2));
|
685
|
-
ASSERT(!int_scratch.is(
|
686
|
-
ASSERT(!int_scratch.is(
|
776
|
+
ASSERT(!int_scratch.is(dst_mantissa));
|
777
|
+
ASSERT(!int_scratch.is(dst_exponent));
|
687
778
|
|
688
779
|
Label done;
|
689
780
|
|
@@ -692,64 +783,65 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
|
692
783
|
__ mtc1(int_scratch, single_scratch);
|
693
784
|
__ cvt_d_w(double_dst, single_scratch);
|
694
785
|
if (destination == kCoreRegisters) {
|
695
|
-
__ Move(
|
786
|
+
__ Move(dst_mantissa, dst_exponent, double_dst);
|
696
787
|
}
|
697
788
|
} else {
|
698
789
|
Label fewer_than_20_useful_bits;
|
699
790
|
// Expected output:
|
700
|
-
// |
|
791
|
+
// | dst_exponent | dst_mantissa |
|
701
792
|
// | s | exp | mantissa |
|
702
793
|
|
703
794
|
// Check for zero.
|
704
|
-
__ mov(
|
705
|
-
__ mov(
|
795
|
+
__ mov(dst_exponent, int_scratch);
|
796
|
+
__ mov(dst_mantissa, int_scratch);
|
706
797
|
__ Branch(&done, eq, int_scratch, Operand(zero_reg));
|
707
798
|
|
708
799
|
// Preload the sign of the value.
|
709
|
-
__ And(
|
800
|
+
__ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
|
710
801
|
// Get the absolute value of the object (as an unsigned integer).
|
711
802
|
Label skip_sub;
|
712
|
-
__ Branch(&skip_sub, ge,
|
803
|
+
__ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
|
713
804
|
__ Subu(int_scratch, zero_reg, int_scratch);
|
714
805
|
__ bind(&skip_sub);
|
715
806
|
|
716
807
|
// Get mantissa[51:20].
|
717
808
|
|
718
809
|
// Get the position of the first set bit.
|
719
|
-
__ Clz(
|
810
|
+
__ Clz(dst_mantissa, int_scratch);
|
720
811
|
__ li(scratch2, 31);
|
721
|
-
__ Subu(
|
812
|
+
__ Subu(dst_mantissa, scratch2, dst_mantissa);
|
722
813
|
|
723
814
|
// Set the exponent.
|
724
|
-
__ Addu(scratch2,
|
725
|
-
__ Ins(
|
815
|
+
__ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
|
816
|
+
__ Ins(dst_exponent, scratch2,
|
726
817
|
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
|
727
818
|
|
728
819
|
// Clear the first non null bit.
|
729
820
|
__ li(scratch2, Operand(1));
|
730
|
-
__ sllv(scratch2, scratch2,
|
821
|
+
__ sllv(scratch2, scratch2, dst_mantissa);
|
731
822
|
__ li(at, -1);
|
732
823
|
__ Xor(scratch2, scratch2, at);
|
733
824
|
__ And(int_scratch, int_scratch, scratch2);
|
734
825
|
|
735
826
|
// Get the number of bits to set in the lower part of the mantissa.
|
736
|
-
__ Subu(scratch2,
|
827
|
+
__ Subu(scratch2, dst_mantissa,
|
828
|
+
Operand(HeapNumber::kMantissaBitsInTopWord));
|
737
829
|
__ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
|
738
830
|
// Set the higher 20 bits of the mantissa.
|
739
831
|
__ srlv(at, int_scratch, scratch2);
|
740
|
-
__ or_(
|
832
|
+
__ or_(dst_exponent, dst_exponent, at);
|
741
833
|
__ li(at, 32);
|
742
834
|
__ subu(scratch2, at, scratch2);
|
743
|
-
__ sllv(
|
835
|
+
__ sllv(dst_mantissa, int_scratch, scratch2);
|
744
836
|
__ Branch(&done);
|
745
837
|
|
746
838
|
__ bind(&fewer_than_20_useful_bits);
|
747
839
|
__ li(at, HeapNumber::kMantissaBitsInTopWord);
|
748
|
-
__ subu(scratch2, at,
|
840
|
+
__ subu(scratch2, at, dst_mantissa);
|
749
841
|
__ sllv(scratch2, int_scratch, scratch2);
|
750
|
-
__ Or(
|
751
|
-
// Set
|
752
|
-
__ mov(
|
842
|
+
__ Or(dst_exponent, dst_exponent, scratch2);
|
843
|
+
// Set dst_mantissa to 0.
|
844
|
+
__ mov(dst_mantissa, zero_reg);
|
753
845
|
}
|
754
846
|
__ bind(&done);
|
755
847
|
}
|
@@ -759,8 +851,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
|
759
851
|
Register object,
|
760
852
|
Destination destination,
|
761
853
|
DoubleRegister double_dst,
|
762
|
-
|
763
|
-
Register
|
854
|
+
DoubleRegister double_scratch,
|
855
|
+
Register dst_mantissa,
|
856
|
+
Register dst_exponent,
|
764
857
|
Register heap_number_map,
|
765
858
|
Register scratch1,
|
766
859
|
Register scratch2,
|
@@ -776,16 +869,14 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
|
776
869
|
|
777
870
|
__ JumpIfNotSmi(object, &obj_is_not_smi);
|
778
871
|
__ SmiUntag(scratch1, object);
|
779
|
-
ConvertIntToDouble(masm, scratch1, destination, double_dst,
|
780
|
-
scratch2, single_scratch);
|
872
|
+
ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
|
873
|
+
dst_exponent, scratch2, single_scratch);
|
781
874
|
__ Branch(&done);
|
782
875
|
|
783
876
|
__ bind(&obj_is_not_smi);
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
"HeapNumberMap register clobbered.");
|
788
|
-
}
|
877
|
+
__ AssertRootValue(heap_number_map,
|
878
|
+
Heap::kHeapNumberMapRootIndex,
|
879
|
+
"HeapNumberMap register clobbered.");
|
789
880
|
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
790
881
|
|
791
882
|
// Load the number.
|
@@ -796,9 +887,10 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
|
796
887
|
|
797
888
|
Register except_flag = scratch2;
|
798
889
|
__ EmitFPUTruncate(kRoundToZero,
|
799
|
-
single_scratch,
|
800
|
-
double_dst,
|
801
890
|
scratch1,
|
891
|
+
double_dst,
|
892
|
+
at,
|
893
|
+
double_scratch,
|
802
894
|
except_flag,
|
803
895
|
kCheckForInexactConversion);
|
804
896
|
|
@@ -806,27 +898,51 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
|
806
898
|
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
|
807
899
|
|
808
900
|
if (destination == kCoreRegisters) {
|
809
|
-
__ Move(
|
901
|
+
__ Move(dst_mantissa, dst_exponent, double_dst);
|
810
902
|
}
|
811
903
|
|
812
904
|
} else {
|
813
905
|
ASSERT(!scratch1.is(object) && !scratch2.is(object));
|
814
906
|
// Load the double value in the destination registers.
|
815
|
-
|
816
|
-
|
907
|
+
bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
|
908
|
+
if (save_registers) {
|
909
|
+
// Save both output registers, because the other one probably holds
|
910
|
+
// an important value too.
|
911
|
+
__ Push(dst_exponent, dst_mantissa);
|
912
|
+
}
|
913
|
+
__ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
914
|
+
__ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
|
817
915
|
|
818
916
|
// Check for 0 and -0.
|
819
|
-
|
820
|
-
__
|
821
|
-
__
|
917
|
+
Label zero;
|
918
|
+
__ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
|
919
|
+
__ Or(scratch1, scratch1, Operand(dst_mantissa));
|
920
|
+
__ Branch(&zero, eq, scratch1, Operand(zero_reg));
|
822
921
|
|
823
922
|
// Check that the value can be exactly represented by a 32-bit integer.
|
824
923
|
// Jump to not_int32 if that's not the case.
|
825
|
-
|
924
|
+
Label restore_input_and_miss;
|
925
|
+
DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
|
926
|
+
&restore_input_and_miss);
|
826
927
|
|
827
|
-
//
|
828
|
-
|
829
|
-
|
928
|
+
// dst_* were trashed. Reload the double value.
|
929
|
+
if (save_registers) {
|
930
|
+
__ Pop(dst_exponent, dst_mantissa);
|
931
|
+
}
|
932
|
+
__ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
933
|
+
__ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
|
934
|
+
__ Branch(&done);
|
935
|
+
|
936
|
+
__ bind(&restore_input_and_miss);
|
937
|
+
if (save_registers) {
|
938
|
+
__ Pop(dst_exponent, dst_mantissa);
|
939
|
+
}
|
940
|
+
__ Branch(not_int32);
|
941
|
+
|
942
|
+
__ bind(&zero);
|
943
|
+
if (save_registers) {
|
944
|
+
__ Drop(2);
|
945
|
+
}
|
830
946
|
}
|
831
947
|
|
832
948
|
__ bind(&done);
|
@@ -840,7 +956,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|
840
956
|
Register scratch1,
|
841
957
|
Register scratch2,
|
842
958
|
Register scratch3,
|
843
|
-
DoubleRegister
|
959
|
+
DoubleRegister double_scratch0,
|
960
|
+
DoubleRegister double_scratch1,
|
844
961
|
Label* not_int32) {
|
845
962
|
ASSERT(!dst.is(object));
|
846
963
|
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
|
@@ -848,38 +965,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|
848
965
|
!scratch1.is(scratch3) &&
|
849
966
|
!scratch2.is(scratch3));
|
850
967
|
|
851
|
-
Label done;
|
968
|
+
Label done, maybe_undefined;
|
852
969
|
|
853
970
|
__ UntagAndJumpIfSmi(dst, object, &done);
|
854
971
|
|
855
|
-
|
856
|
-
|
857
|
-
|
858
|
-
|
859
|
-
|
860
|
-
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
972
|
+
__ AssertRootValue(heap_number_map,
|
973
|
+
Heap::kHeapNumberMapRootIndex,
|
974
|
+
"HeapNumberMap register clobbered.");
|
975
|
+
|
976
|
+
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
|
861
977
|
|
862
978
|
// Object is a heap number.
|
863
979
|
// Convert the floating point value to a 32-bit integer.
|
864
980
|
if (CpuFeatures::IsSupported(FPU)) {
|
865
981
|
CpuFeatures::Scope scope(FPU);
|
866
982
|
// Load the double value.
|
867
|
-
__ ldc1(
|
983
|
+
__ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
|
868
984
|
|
869
|
-
FPURegister single_scratch = double_scratch.low();
|
870
985
|
Register except_flag = scratch2;
|
871
986
|
__ EmitFPUTruncate(kRoundToZero,
|
872
|
-
|
873
|
-
|
987
|
+
dst,
|
988
|
+
double_scratch0,
|
874
989
|
scratch1,
|
990
|
+
double_scratch1,
|
875
991
|
except_flag,
|
876
992
|
kCheckForInexactConversion);
|
877
993
|
|
878
994
|
// Jump to not_int32 if the operation did not succeed.
|
879
995
|
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
|
880
|
-
// Get the result in the destination register.
|
881
|
-
__ mfc1(dst, single_scratch);
|
882
|
-
|
883
996
|
} else {
|
884
997
|
// Load the double value in the destination registers.
|
885
998
|
__ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
@@ -911,20 +1024,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|
911
1024
|
__ Subu(dst, zero_reg, dst);
|
912
1025
|
__ bind(&skip_sub);
|
913
1026
|
}
|
1027
|
+
__ Branch(&done);
|
1028
|
+
|
1029
|
+
__ bind(&maybe_undefined);
|
1030
|
+
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
1031
|
+
__ Branch(not_int32, ne, object, Operand(at));
|
1032
|
+
// |undefined| is truncated to 0.
|
1033
|
+
__ li(dst, Operand(Smi::FromInt(0)));
|
1034
|
+
// Fall through.
|
914
1035
|
|
915
1036
|
__ bind(&done);
|
916
1037
|
}
|
917
1038
|
|
918
1039
|
|
919
1040
|
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
920
|
-
Register
|
921
|
-
Register
|
1041
|
+
Register src_exponent,
|
1042
|
+
Register src_mantissa,
|
922
1043
|
Register dst,
|
923
1044
|
Register scratch,
|
924
1045
|
Label* not_int32) {
|
925
1046
|
// Get exponent alone in scratch.
|
926
1047
|
__ Ext(scratch,
|
927
|
-
|
1048
|
+
src_exponent,
|
928
1049
|
HeapNumber::kExponentShift,
|
929
1050
|
HeapNumber::kExponentBits);
|
930
1051
|
|
@@ -944,11 +1065,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
|
944
1065
|
// Another way to put it is that if (exponent - signbit) > 30 then the
|
945
1066
|
// number cannot be represented as an int32.
|
946
1067
|
Register tmp = dst;
|
947
|
-
__ srl(at,
|
1068
|
+
__ srl(at, src_exponent, 31);
|
948
1069
|
__ subu(tmp, scratch, at);
|
949
1070
|
__ Branch(not_int32, gt, tmp, Operand(30));
|
950
1071
|
// - Bits [21:0] in the mantissa are not null.
|
951
|
-
__ And(tmp,
|
1072
|
+
__ And(tmp, src_mantissa, 0x3fffff);
|
952
1073
|
__ Branch(not_int32, ne, tmp, Operand(zero_reg));
|
953
1074
|
|
954
1075
|
// Otherwise the exponent needs to be big enough to shift left all the
|
@@ -959,20 +1080,20 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
|
959
1080
|
|
960
1081
|
// Get the 32 higher bits of the mantissa in dst.
|
961
1082
|
__ Ext(dst,
|
962
|
-
|
1083
|
+
src_mantissa,
|
963
1084
|
HeapNumber::kMantissaBitsInTopWord,
|
964
1085
|
32 - HeapNumber::kMantissaBitsInTopWord);
|
965
|
-
__ sll(at,
|
1086
|
+
__ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
|
966
1087
|
__ or_(dst, dst, at);
|
967
1088
|
|
968
1089
|
// Create the mask and test the lower bits (of the higher bits).
|
969
1090
|
__ li(at, 32);
|
970
1091
|
__ subu(scratch, at, scratch);
|
971
|
-
__ li(
|
972
|
-
__ sllv(
|
973
|
-
__ Subu(
|
974
|
-
__ And(
|
975
|
-
__ Branch(not_int32, ne,
|
1092
|
+
__ li(src_mantissa, 1);
|
1093
|
+
__ sllv(src_exponent, src_mantissa, scratch);
|
1094
|
+
__ Subu(src_exponent, src_exponent, Operand(1));
|
1095
|
+
__ And(src_exponent, dst, src_exponent);
|
1096
|
+
__ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
|
976
1097
|
}
|
977
1098
|
|
978
1099
|
|
@@ -1111,48 +1232,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
|
|
1111
1232
|
// for "identity and not NaN".
|
1112
1233
|
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
1113
1234
|
Label* slow,
|
1114
|
-
Condition cc
|
1115
|
-
bool never_nan_nan) {
|
1235
|
+
Condition cc) {
|
1116
1236
|
Label not_identical;
|
1117
1237
|
Label heap_number, return_equal;
|
1118
1238
|
Register exp_mask_reg = t5;
|
1119
1239
|
|
1120
1240
|
__ Branch(¬_identical, ne, a0, Operand(a1));
|
1121
1241
|
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
//
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1146
|
-
__
|
1147
|
-
|
1148
|
-
|
1149
|
-
|
1150
|
-
} else {
|
1151
|
-
// undefined >= undefined should fail.
|
1152
|
-
__ li(v0, Operand(LESS));
|
1153
|
-
}
|
1154
|
-
__ Ret();
|
1242
|
+
__ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
|
1243
|
+
|
1244
|
+
// Test for NaN. Sadly, we can't just compare to factory->nan_value(),
|
1245
|
+
// so we do the second best thing - test it ourselves.
|
1246
|
+
// They are both equal and they are not both Smis so both of them are not
|
1247
|
+
// Smis. If it's not a heap number, then return equal.
|
1248
|
+
if (cc == less || cc == greater) {
|
1249
|
+
__ GetObjectType(a0, t4, t4);
|
1250
|
+
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
1251
|
+
} else {
|
1252
|
+
__ GetObjectType(a0, t4, t4);
|
1253
|
+
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
|
1254
|
+
// Comparing JS objects with <=, >= is complicated.
|
1255
|
+
if (cc != eq) {
|
1256
|
+
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
1257
|
+
// Normally here we fall through to return_equal, but undefined is
|
1258
|
+
// special: (undefined == undefined) == true, but
|
1259
|
+
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
|
1260
|
+
if (cc == less_equal || cc == greater_equal) {
|
1261
|
+
__ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
|
1262
|
+
__ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
|
1263
|
+
__ Branch(&return_equal, ne, a0, Operand(t2));
|
1264
|
+
if (cc == le) {
|
1265
|
+
// undefined <= undefined should fail.
|
1266
|
+
__ li(v0, Operand(GREATER));
|
1267
|
+
} else {
|
1268
|
+
// undefined >= undefined should fail.
|
1269
|
+
__ li(v0, Operand(LESS));
|
1155
1270
|
}
|
1271
|
+
__ Ret();
|
1156
1272
|
}
|
1157
1273
|
}
|
1158
1274
|
}
|
@@ -1168,46 +1284,44 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
|
1168
1284
|
}
|
1169
1285
|
__ Ret();
|
1170
1286
|
|
1171
|
-
|
1172
|
-
|
1173
|
-
|
1174
|
-
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1178
|
-
|
1179
|
-
|
1180
|
-
|
1181
|
-
|
1182
|
-
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1192
|
-
|
1193
|
-
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
|
1198
|
-
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
__ li(v0, Operand(LESS)); // NaN >= NaN should fail.
|
1205
|
-
}
|
1287
|
+
// For less and greater we don't have to check for NaN since the result of
|
1288
|
+
// x < x is false regardless. For the others here is some code to check
|
1289
|
+
// for NaN.
|
1290
|
+
if (cc != lt && cc != gt) {
|
1291
|
+
__ bind(&heap_number);
|
1292
|
+
// It is a heap number, so return non-equal if it's NaN and equal if it's
|
1293
|
+
// not NaN.
|
1294
|
+
|
1295
|
+
// The representation of NaN values has all exponent bits (52..62) set,
|
1296
|
+
// and not all mantissa bits (0..51) clear.
|
1297
|
+
// Read top bits of double representation (second word of value).
|
1298
|
+
__ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
1299
|
+
// Test that exponent bits are all set.
|
1300
|
+
__ And(t3, t2, Operand(exp_mask_reg));
|
1301
|
+
// If all bits not set (ne cond), then not a NaN, objects are equal.
|
1302
|
+
__ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
|
1303
|
+
|
1304
|
+
// Shift out flag and all exponent bits, retaining only mantissa.
|
1305
|
+
__ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
|
1306
|
+
// Or with all low-bits of mantissa.
|
1307
|
+
__ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
|
1308
|
+
__ Or(v0, t3, Operand(t2));
|
1309
|
+
// For equal we already have the right value in v0: Return zero (equal)
|
1310
|
+
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
|
1311
|
+
// not (it's a NaN). For <= and >= we need to load v0 with the failing
|
1312
|
+
// value if it's a NaN.
|
1313
|
+
if (cc != eq) {
|
1314
|
+
// All-zero means Infinity means equal.
|
1315
|
+
__ Ret(eq, v0, Operand(zero_reg));
|
1316
|
+
if (cc == le) {
|
1317
|
+
__ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
|
1318
|
+
} else {
|
1319
|
+
__ li(v0, Operand(LESS)); // NaN >= NaN should fail.
|
1206
1320
|
}
|
1207
|
-
__ Ret();
|
1208
1321
|
}
|
1209
|
-
|
1322
|
+
__ Ret();
|
1210
1323
|
}
|
1324
|
+
// No fall through here.
|
1211
1325
|
|
1212
1326
|
__ bind(¬_identical);
|
1213
1327
|
}
|
@@ -1680,43 +1794,61 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
|
|
1680
1794
|
}
|
1681
1795
|
|
1682
1796
|
|
1683
|
-
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
|
1688
|
-
Label
|
1797
|
+
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
|
1798
|
+
Register input,
|
1799
|
+
Register scratch,
|
1800
|
+
CompareIC::State expected,
|
1801
|
+
Label* fail) {
|
1802
|
+
Label ok;
|
1803
|
+
if (expected == CompareIC::SMI) {
|
1804
|
+
__ JumpIfNotSmi(input, fail);
|
1805
|
+
} else if (expected == CompareIC::HEAP_NUMBER) {
|
1806
|
+
__ JumpIfSmi(input, &ok);
|
1807
|
+
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
|
1808
|
+
DONT_DO_SMI_CHECK);
|
1809
|
+
}
|
1810
|
+
// We could be strict about symbol/string here, but as long as
|
1811
|
+
// hydrogen doesn't care, the stub doesn't have to care either.
|
1812
|
+
__ bind(&ok);
|
1813
|
+
}
|
1689
1814
|
|
1690
1815
|
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
__ subu(v0, a1, a0);
|
1699
|
-
__ bind(¬_two_smis);
|
1700
|
-
} else if (FLAG_debug_code) {
|
1701
|
-
__ Or(a2, a1, a0);
|
1702
|
-
__ And(a2, a2, kSmiTagMask);
|
1703
|
-
__ Assert(ne, "CompareStub: unexpected smi operands.",
|
1704
|
-
a2, Operand(zero_reg));
|
1705
|
-
}
|
1816
|
+
// On entry a1 and a2 are the values to be compared.
|
1817
|
+
// On exit a0 is 0, positive or negative to indicate the result of
|
1818
|
+
// the comparison.
|
1819
|
+
void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
1820
|
+
Register lhs = a1;
|
1821
|
+
Register rhs = a0;
|
1822
|
+
Condition cc = GetCondition();
|
1706
1823
|
|
1824
|
+
Label miss;
|
1825
|
+
ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
|
1826
|
+
ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
|
1827
|
+
|
1828
|
+
Label slow; // Call builtin.
|
1829
|
+
Label not_smis, both_loaded_as_doubles;
|
1830
|
+
|
1831
|
+
Label not_two_smis, smi_done;
|
1832
|
+
__ Or(a2, a1, a0);
|
1833
|
+
__ JumpIfNotSmi(a2, ¬_two_smis);
|
1834
|
+
__ sra(a1, a1, 1);
|
1835
|
+
__ sra(a0, a0, 1);
|
1836
|
+
__ Ret(USE_DELAY_SLOT);
|
1837
|
+
__ subu(v0, a1, a0);
|
1838
|
+
__ bind(¬_two_smis);
|
1707
1839
|
|
1708
1840
|
// NOTICE! This code is only reached after a smi-fast-case check, so
|
1709
1841
|
// it is certain that at least one operand isn't a smi.
|
1710
1842
|
|
1711
1843
|
// Handle the case where the objects are identical. Either returns the answer
|
1712
1844
|
// or goes to slow. Only falls through if the objects were not identical.
|
1713
|
-
EmitIdenticalObjectComparison(masm, &slow,
|
1845
|
+
EmitIdenticalObjectComparison(masm, &slow, cc);
|
1714
1846
|
|
1715
1847
|
// If either is a Smi (we know that not both are), then they can only
|
1716
1848
|
// be strictly equal if the other is a HeapNumber.
|
1717
1849
|
STATIC_ASSERT(kSmiTag == 0);
|
1718
1850
|
ASSERT_EQ(0, Smi::FromInt(0));
|
1719
|
-
__ And(t2,
|
1851
|
+
__ And(t2, lhs, Operand(rhs));
|
1720
1852
|
__ JumpIfNotSmi(t2, ¬_smis, t0);
|
1721
1853
|
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
|
1722
1854
|
// 1) Return the answer.
|
@@ -1726,8 +1858,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|
1726
1858
|
// In cases 3 and 4 we have found out we were dealing with a number-number
|
1727
1859
|
// comparison and the numbers have been loaded into f12 and f14 as doubles,
|
1728
1860
|
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
|
1729
|
-
EmitSmiNonsmiComparison(masm,
|
1730
|
-
&both_loaded_as_doubles, &slow,
|
1861
|
+
EmitSmiNonsmiComparison(masm, lhs, rhs,
|
1862
|
+
&both_loaded_as_doubles, &slow, strict());
|
1731
1863
|
|
1732
1864
|
__ bind(&both_loaded_as_doubles);
|
1733
1865
|
// f12, f14 are the double representations of the left hand side
|
@@ -1763,7 +1895,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|
1763
1895
|
__ bind(&nan);
|
1764
1896
|
// NaN comparisons always fail.
|
1765
1897
|
// Load whatever we need in v0 to make the comparison fail.
|
1766
|
-
if (
|
1898
|
+
if (cc == lt || cc == le) {
|
1767
1899
|
__ li(v0, Operand(GREATER));
|
1768
1900
|
} else {
|
1769
1901
|
__ li(v0, Operand(LESS));
|
@@ -1772,20 +1904,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|
1772
1904
|
} else {
|
1773
1905
|
// Checks for NaN in the doubles we have loaded. Can return the answer or
|
1774
1906
|
// fall through if neither is a NaN. Also binds rhs_not_nan.
|
1775
|
-
EmitNanCheck(masm,
|
1907
|
+
EmitNanCheck(masm, cc);
|
1776
1908
|
|
1777
1909
|
// Compares two doubles that are not NaNs. Returns the answer.
|
1778
1910
|
// Never falls through.
|
1779
|
-
EmitTwoNonNanDoubleComparison(masm,
|
1911
|
+
EmitTwoNonNanDoubleComparison(masm, cc);
|
1780
1912
|
}
|
1781
1913
|
|
1782
1914
|
__ bind(¬_smis);
|
1783
1915
|
// At this point we know we are dealing with two different objects,
|
1784
1916
|
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
|
1785
|
-
if (
|
1917
|
+
if (strict()) {
|
1786
1918
|
// This returns non-equal for some object types, or falls through if it
|
1787
1919
|
// was not lucky.
|
1788
|
-
EmitStrictTwoHeapObjectCompare(masm,
|
1920
|
+
EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
|
1789
1921
|
}
|
1790
1922
|
|
1791
1923
|
Label check_for_symbols;
|
@@ -1795,38 +1927,38 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|
1795
1927
|
// that case. If the inputs are not doubles then jumps to check_for_symbols.
|
1796
1928
|
// In this case a2 will contain the type of lhs_.
|
1797
1929
|
EmitCheckForTwoHeapNumbers(masm,
|
1798
|
-
|
1799
|
-
|
1930
|
+
lhs,
|
1931
|
+
rhs,
|
1800
1932
|
&both_loaded_as_doubles,
|
1801
1933
|
&check_for_symbols,
|
1802
1934
|
&flat_string_check);
|
1803
1935
|
|
1804
1936
|
__ bind(&check_for_symbols);
|
1805
|
-
if (
|
1937
|
+
if (cc == eq && !strict()) {
|
1806
1938
|
// Returns an answer for two symbols or two detectable objects.
|
1807
1939
|
// Otherwise jumps to string case or not both strings case.
|
1808
1940
|
// Assumes that a2 is the type of lhs_ on entry.
|
1809
|
-
EmitCheckForSymbolsOrObjects(masm,
|
1941
|
+
EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
|
1810
1942
|
}
|
1811
1943
|
|
1812
1944
|
// Check for both being sequential ASCII strings, and inline if that is the
|
1813
1945
|
// case.
|
1814
1946
|
__ bind(&flat_string_check);
|
1815
1947
|
|
1816
|
-
__ JumpIfNonSmisNotBothSequentialAsciiStrings(
|
1948
|
+
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
|
1817
1949
|
|
1818
1950
|
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
|
1819
|
-
if (
|
1951
|
+
if (cc == eq) {
|
1820
1952
|
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
|
1821
|
-
|
1822
|
-
|
1953
|
+
lhs,
|
1954
|
+
rhs,
|
1823
1955
|
a2,
|
1824
1956
|
a3,
|
1825
1957
|
t0);
|
1826
1958
|
} else {
|
1827
1959
|
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
|
1828
|
-
|
1829
|
-
|
1960
|
+
lhs,
|
1961
|
+
rhs,
|
1830
1962
|
a2,
|
1831
1963
|
a3,
|
1832
1964
|
t0,
|
@@ -1837,18 +1969,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|
1837
1969
|
__ bind(&slow);
|
1838
1970
|
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
|
1839
1971
|
// a1 (rhs) second.
|
1840
|
-
__ Push(
|
1972
|
+
__ Push(lhs, rhs);
|
1841
1973
|
// Figure out which native to call and setup the arguments.
|
1842
1974
|
Builtins::JavaScript native;
|
1843
|
-
if (
|
1844
|
-
native =
|
1975
|
+
if (cc == eq) {
|
1976
|
+
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
|
1845
1977
|
} else {
|
1846
1978
|
native = Builtins::COMPARE;
|
1847
1979
|
int ncr; // NaN compare result.
|
1848
|
-
if (
|
1980
|
+
if (cc == lt || cc == le) {
|
1849
1981
|
ncr = GREATER;
|
1850
1982
|
} else {
|
1851
|
-
ASSERT(
|
1983
|
+
ASSERT(cc == gt || cc == ge); // Remaining cases.
|
1852
1984
|
ncr = LESS;
|
1853
1985
|
}
|
1854
1986
|
__ li(a0, Operand(Smi::FromInt(ncr)));
|
@@ -1858,6 +1990,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|
1858
1990
|
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
|
1859
1991
|
// tagged as a small integer.
|
1860
1992
|
__ InvokeBuiltin(native, JUMP_FUNCTION);
|
1993
|
+
|
1994
|
+
__ bind(&miss);
|
1995
|
+
GenerateMiss(masm);
|
1861
1996
|
}
|
1862
1997
|
|
1863
1998
|
|
@@ -2298,20 +2433,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(
|
|
2298
2433
|
}
|
2299
2434
|
|
2300
2435
|
|
2436
|
+
void BinaryOpStub::Initialize() {
|
2437
|
+
platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
|
2438
|
+
}
|
2439
|
+
|
2440
|
+
|
2301
2441
|
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
2302
2442
|
Label get_result;
|
2303
2443
|
|
2304
2444
|
__ Push(a1, a0);
|
2305
2445
|
|
2306
2446
|
__ li(a2, Operand(Smi::FromInt(MinorKey())));
|
2307
|
-
__
|
2308
|
-
__ li(a0, Operand(Smi::FromInt(operands_type_)));
|
2309
|
-
__ Push(a2, a1, a0);
|
2447
|
+
__ push(a2);
|
2310
2448
|
|
2311
2449
|
__ TailCallExternalReference(
|
2312
2450
|
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
|
2313
2451
|
masm->isolate()),
|
2314
|
-
|
2452
|
+
3,
|
2315
2453
|
1);
|
2316
2454
|
}
|
2317
2455
|
|
@@ -2322,59 +2460,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
|
|
2322
2460
|
}
|
2323
2461
|
|
2324
2462
|
|
2325
|
-
void
|
2326
|
-
|
2327
|
-
// generation code does not use any raw pointers.
|
2328
|
-
AllowStubCallsScope allow_stub_calls(masm, true);
|
2329
|
-
switch (operands_type_) {
|
2330
|
-
case BinaryOpIC::UNINITIALIZED:
|
2331
|
-
GenerateTypeTransition(masm);
|
2332
|
-
break;
|
2333
|
-
case BinaryOpIC::SMI:
|
2334
|
-
GenerateSmiStub(masm);
|
2335
|
-
break;
|
2336
|
-
case BinaryOpIC::INT32:
|
2337
|
-
GenerateInt32Stub(masm);
|
2338
|
-
break;
|
2339
|
-
case BinaryOpIC::HEAP_NUMBER:
|
2340
|
-
GenerateHeapNumberStub(masm);
|
2341
|
-
break;
|
2342
|
-
case BinaryOpIC::ODDBALL:
|
2343
|
-
GenerateOddballStub(masm);
|
2344
|
-
break;
|
2345
|
-
case BinaryOpIC::BOTH_STRING:
|
2346
|
-
GenerateBothStringStub(masm);
|
2347
|
-
break;
|
2348
|
-
case BinaryOpIC::STRING:
|
2349
|
-
GenerateStringStub(masm);
|
2350
|
-
break;
|
2351
|
-
case BinaryOpIC::GENERIC:
|
2352
|
-
GenerateGeneric(masm);
|
2353
|
-
break;
|
2354
|
-
default:
|
2355
|
-
UNREACHABLE();
|
2356
|
-
}
|
2357
|
-
}
|
2358
|
-
|
2359
|
-
|
2360
|
-
void BinaryOpStub::PrintName(StringStream* stream) {
|
2361
|
-
const char* op_name = Token::Name(op_);
|
2362
|
-
const char* overwrite_name;
|
2363
|
-
switch (mode_) {
|
2364
|
-
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
|
2365
|
-
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
|
2366
|
-
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
|
2367
|
-
default: overwrite_name = "UnknownOverwrite"; break;
|
2368
|
-
}
|
2369
|
-
stream->Add("BinaryOpStub_%s_%s_%s",
|
2370
|
-
op_name,
|
2371
|
-
overwrite_name,
|
2372
|
-
BinaryOpIC::GetName(operands_type_));
|
2373
|
-
}
|
2374
|
-
|
2375
|
-
|
2376
|
-
|
2377
|
-
void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
2463
|
+
void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
|
2464
|
+
Token::Value op) {
|
2378
2465
|
Register left = a1;
|
2379
2466
|
Register right = a0;
|
2380
2467
|
|
@@ -2385,7 +2472,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
|
2385
2472
|
STATIC_ASSERT(kSmiTag == 0);
|
2386
2473
|
|
2387
2474
|
Label not_smi_result;
|
2388
|
-
switch (
|
2475
|
+
switch (op) {
|
2389
2476
|
case Token::ADD:
|
2390
2477
|
__ AdduAndCheckForOverflow(v0, left, right, scratch1);
|
2391
2478
|
__ RetOnNoOverflow(scratch1);
|
@@ -2528,10 +2615,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
|
2528
2615
|
}
|
2529
2616
|
|
2530
2617
|
|
2531
|
-
void
|
2532
|
-
|
2533
|
-
|
2534
|
-
|
2618
|
+
void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
|
2619
|
+
Register result,
|
2620
|
+
Register heap_number_map,
|
2621
|
+
Register scratch1,
|
2622
|
+
Register scratch2,
|
2623
|
+
Label* gc_required,
|
2624
|
+
OverwriteMode mode);
|
2625
|
+
|
2626
|
+
|
2627
|
+
void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
|
2628
|
+
BinaryOpIC::TypeInfo left_type,
|
2629
|
+
BinaryOpIC::TypeInfo right_type,
|
2630
|
+
bool smi_operands,
|
2631
|
+
Label* not_numbers,
|
2632
|
+
Label* gc_required,
|
2633
|
+
Label* miss,
|
2634
|
+
Token::Value op,
|
2635
|
+
OverwriteMode mode) {
|
2535
2636
|
Register left = a1;
|
2536
2637
|
Register right = a0;
|
2537
2638
|
Register scratch1 = t3;
|
@@ -2539,15 +2640,21 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2539
2640
|
Register scratch3 = t0;
|
2540
2641
|
|
2541
2642
|
ASSERT(smi_operands || (not_numbers != NULL));
|
2542
|
-
if (smi_operands
|
2543
|
-
__
|
2544
|
-
__
|
2643
|
+
if (smi_operands) {
|
2644
|
+
__ AssertSmi(left);
|
2645
|
+
__ AssertSmi(right);
|
2646
|
+
}
|
2647
|
+
if (left_type == BinaryOpIC::SMI) {
|
2648
|
+
__ JumpIfNotSmi(left, miss);
|
2649
|
+
}
|
2650
|
+
if (right_type == BinaryOpIC::SMI) {
|
2651
|
+
__ JumpIfNotSmi(right, miss);
|
2545
2652
|
}
|
2546
2653
|
|
2547
2654
|
Register heap_number_map = t2;
|
2548
2655
|
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
2549
2656
|
|
2550
|
-
switch (
|
2657
|
+
switch (op) {
|
2551
2658
|
case Token::ADD:
|
2552
2659
|
case Token::SUB:
|
2553
2660
|
case Token::MUL:
|
@@ -2557,25 +2664,44 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2557
2664
|
// depending on whether FPU is available or not.
|
2558
2665
|
FloatingPointHelper::Destination destination =
|
2559
2666
|
CpuFeatures::IsSupported(FPU) &&
|
2560
|
-
|
2667
|
+
op != Token::MOD ?
|
2561
2668
|
FloatingPointHelper::kFPURegisters :
|
2562
2669
|
FloatingPointHelper::kCoreRegisters;
|
2563
2670
|
|
2564
2671
|
// Allocate new heap number for result.
|
2565
2672
|
Register result = s0;
|
2566
|
-
|
2567
|
-
masm, result, heap_number_map, scratch1, scratch2, gc_required);
|
2673
|
+
BinaryOpStub_GenerateHeapResultAllocation(
|
2674
|
+
masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
|
2568
2675
|
|
2569
2676
|
// Load the operands.
|
2570
2677
|
if (smi_operands) {
|
2571
2678
|
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
|
2572
2679
|
} else {
|
2573
|
-
|
2574
|
-
|
2575
|
-
|
2576
|
-
|
2577
|
-
|
2578
|
-
|
2680
|
+
// Load right operand to f14 or a2/a3.
|
2681
|
+
if (right_type == BinaryOpIC::INT32) {
|
2682
|
+
FloatingPointHelper::LoadNumberAsInt32Double(
|
2683
|
+
masm, right, destination, f14, f16, a2, a3, heap_number_map,
|
2684
|
+
scratch1, scratch2, f2, miss);
|
2685
|
+
} else {
|
2686
|
+
Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
|
2687
|
+
: not_numbers;
|
2688
|
+
FloatingPointHelper::LoadNumber(
|
2689
|
+
masm, destination, right, f14, a2, a3, heap_number_map,
|
2690
|
+
scratch1, scratch2, fail);
|
2691
|
+
}
|
2692
|
+
// Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
|
2693
|
+
// jumps to |miss|.
|
2694
|
+
if (left_type == BinaryOpIC::INT32) {
|
2695
|
+
FloatingPointHelper::LoadNumberAsInt32Double(
|
2696
|
+
masm, left, destination, f12, f16, a0, a1, heap_number_map,
|
2697
|
+
scratch1, scratch2, f2, miss);
|
2698
|
+
} else {
|
2699
|
+
Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
|
2700
|
+
: not_numbers;
|
2701
|
+
FloatingPointHelper::LoadNumber(
|
2702
|
+
masm, destination, left, f12, a0, a1, heap_number_map,
|
2703
|
+
scratch1, scratch2, fail);
|
2704
|
+
}
|
2579
2705
|
}
|
2580
2706
|
|
2581
2707
|
// Calculate the result.
|
@@ -2584,7 +2710,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2584
2710
|
// f12: Left value.
|
2585
2711
|
// f14: Right value.
|
2586
2712
|
CpuFeatures::Scope scope(FPU);
|
2587
|
-
switch (
|
2713
|
+
switch (op) {
|
2588
2714
|
case Token::ADD:
|
2589
2715
|
__ add_d(f10, f12, f14);
|
2590
2716
|
break;
|
@@ -2610,7 +2736,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2610
2736
|
} else {
|
2611
2737
|
// Call the C function to handle the double operation.
|
2612
2738
|
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
|
2613
|
-
|
2739
|
+
op,
|
2614
2740
|
result,
|
2615
2741
|
scratch1);
|
2616
2742
|
if (FLAG_debug_code) {
|
@@ -2650,7 +2776,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2650
2776
|
not_numbers);
|
2651
2777
|
}
|
2652
2778
|
Label result_not_a_smi;
|
2653
|
-
switch (
|
2779
|
+
switch (op) {
|
2654
2780
|
case Token::BIT_OR:
|
2655
2781
|
__ Or(a2, a3, Operand(a2));
|
2656
2782
|
break;
|
@@ -2700,8 +2826,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2700
2826
|
__ AllocateHeapNumber(
|
2701
2827
|
result, scratch1, scratch2, heap_number_map, gc_required);
|
2702
2828
|
} else {
|
2703
|
-
|
2704
|
-
masm, result, heap_number_map, scratch1, scratch2, gc_required
|
2829
|
+
BinaryOpStub_GenerateHeapResultAllocation(
|
2830
|
+
masm, result, heap_number_map, scratch1, scratch2, gc_required,
|
2831
|
+
mode);
|
2705
2832
|
}
|
2706
2833
|
|
2707
2834
|
// a2: Answer as signed int32.
|
@@ -2716,7 +2843,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2716
2843
|
// mentioned above SHR needs to always produce a positive result.
|
2717
2844
|
CpuFeatures::Scope scope(FPU);
|
2718
2845
|
__ mtc1(a2, f0);
|
2719
|
-
if (
|
2846
|
+
if (op == Token::SHR) {
|
2720
2847
|
__ Cvt_d_uw(f0, f0, f22);
|
2721
2848
|
} else {
|
2722
2849
|
__ cvt_d_w(f0, f0);
|
@@ -2743,12 +2870,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
2743
2870
|
// Generate the smi code. If the operation on smis are successful this return is
|
2744
2871
|
// generated. If the result is not a smi and heap number allocation is not
|
2745
2872
|
// requested the code falls through. If number allocation is requested but a
|
2746
|
-
// heap number cannot be allocated the code jumps to the
|
2747
|
-
void
|
2873
|
+
// heap number cannot be allocated the code jumps to the label gc_required.
|
2874
|
+
void BinaryOpStub_GenerateSmiCode(
|
2748
2875
|
MacroAssembler* masm,
|
2749
2876
|
Label* use_runtime,
|
2750
2877
|
Label* gc_required,
|
2751
|
-
|
2878
|
+
Token::Value op,
|
2879
|
+
BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
|
2880
|
+
OverwriteMode mode) {
|
2752
2881
|
Label not_smis;
|
2753
2882
|
|
2754
2883
|
Register left = a1;
|
@@ -2761,12 +2890,14 @@ void BinaryOpStub::GenerateSmiCode(
|
|
2761
2890
|
__ JumpIfNotSmi(scratch1, ¬_smis);
|
2762
2891
|
|
2763
2892
|
// If the smi-smi operation results in a smi return is generated.
|
2764
|
-
|
2893
|
+
BinaryOpStub_GenerateSmiSmiOperation(masm, op);
|
2765
2894
|
|
2766
2895
|
// If heap number results are possible generate the result in an allocated
|
2767
2896
|
// heap number.
|
2768
|
-
if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
|
2769
|
-
|
2897
|
+
if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
|
2898
|
+
BinaryOpStub_GenerateFPOperation(
|
2899
|
+
masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
|
2900
|
+
use_runtime, gc_required, ¬_smis, op, mode);
|
2770
2901
|
}
|
2771
2902
|
__ bind(¬_smis);
|
2772
2903
|
}
|
@@ -2778,14 +2909,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
|
|
2778
2909
|
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
|
2779
2910
|
result_type_ == BinaryOpIC::SMI) {
|
2780
2911
|
// Only allow smi results.
|
2781
|
-
|
2912
|
+
BinaryOpStub_GenerateSmiCode(
|
2913
|
+
masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
|
2782
2914
|
} else {
|
2783
2915
|
// Allow heap number result and don't make a transition if a heap number
|
2784
2916
|
// cannot be allocated.
|
2785
|
-
|
2786
|
-
|
2787
|
-
|
2788
|
-
ALLOW_HEAPNUMBER_RESULTS);
|
2917
|
+
BinaryOpStub_GenerateSmiCode(
|
2918
|
+
masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
|
2919
|
+
mode_);
|
2789
2920
|
}
|
2790
2921
|
|
2791
2922
|
// Code falls through if the result is not returned as either a smi or heap
|
@@ -2793,22 +2924,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
|
|
2793
2924
|
GenerateTypeTransition(masm);
|
2794
2925
|
|
2795
2926
|
__ bind(&call_runtime);
|
2927
|
+
GenerateRegisterArgsPush(masm);
|
2796
2928
|
GenerateCallRuntime(masm);
|
2797
2929
|
}
|
2798
2930
|
|
2799
2931
|
|
2800
|
-
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
|
2801
|
-
ASSERT(operands_type_ == BinaryOpIC::STRING);
|
2802
|
-
// Try to add arguments as strings, otherwise, transition to the generic
|
2803
|
-
// BinaryOpIC type.
|
2804
|
-
GenerateAddStrings(masm);
|
2805
|
-
GenerateTypeTransition(masm);
|
2806
|
-
}
|
2807
|
-
|
2808
|
-
|
2809
2932
|
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
|
2810
2933
|
Label call_runtime;
|
2811
|
-
ASSERT(
|
2934
|
+
ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
|
2812
2935
|
ASSERT(op_ == Token::ADD);
|
2813
2936
|
// If both arguments are strings, call the string add stub.
|
2814
2937
|
// Otherwise, do a transition.
|
@@ -2837,7 +2960,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
|
|
2837
2960
|
|
2838
2961
|
|
2839
2962
|
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
2840
|
-
ASSERT(
|
2963
|
+
ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
|
2841
2964
|
|
2842
2965
|
Register left = a1;
|
2843
2966
|
Register right = a0;
|
@@ -2860,7 +2983,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2860
2983
|
Label skip;
|
2861
2984
|
__ Or(scratch1, left, right);
|
2862
2985
|
__ JumpIfNotSmi(scratch1, &skip);
|
2863
|
-
|
2986
|
+
BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
|
2864
2987
|
// Fall through if the result is not a smi.
|
2865
2988
|
__ bind(&skip);
|
2866
2989
|
|
@@ -2870,6 +2993,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2870
2993
|
case Token::MUL:
|
2871
2994
|
case Token::DIV:
|
2872
2995
|
case Token::MOD: {
|
2996
|
+
// It could be that only SMIs have been seen at either the left
|
2997
|
+
// or the right operand. For precise type feedback, patch the IC
|
2998
|
+
// again if this changes.
|
2999
|
+
if (left_type_ == BinaryOpIC::SMI) {
|
3000
|
+
__ JumpIfNotSmi(left, &transition);
|
3001
|
+
}
|
3002
|
+
if (right_type_ == BinaryOpIC::SMI) {
|
3003
|
+
__ JumpIfNotSmi(right, &transition);
|
3004
|
+
}
|
2873
3005
|
// Load both operands and check that they are 32-bit integer.
|
2874
3006
|
// Jump to type transition if they are not. The registers a0 and a1 (right
|
2875
3007
|
// and left) are preserved for the runtime call.
|
@@ -2882,6 +3014,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2882
3014
|
right,
|
2883
3015
|
destination,
|
2884
3016
|
f14,
|
3017
|
+
f16,
|
2885
3018
|
a2,
|
2886
3019
|
a3,
|
2887
3020
|
heap_number_map,
|
@@ -2893,6 +3026,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2893
3026
|
left,
|
2894
3027
|
destination,
|
2895
3028
|
f12,
|
3029
|
+
f16,
|
2896
3030
|
t0,
|
2897
3031
|
t1,
|
2898
3032
|
heap_number_map,
|
@@ -2929,9 +3063,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2929
3063
|
|
2930
3064
|
Register except_flag = scratch2;
|
2931
3065
|
__ EmitFPUTruncate(kRoundToZero,
|
2932
|
-
single_scratch,
|
2933
|
-
f10,
|
2934
3066
|
scratch1,
|
3067
|
+
f10,
|
3068
|
+
at,
|
3069
|
+
f16,
|
2935
3070
|
except_flag);
|
2936
3071
|
|
2937
3072
|
if (result_type_ <= BinaryOpIC::INT32) {
|
@@ -2940,7 +3075,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2940
3075
|
}
|
2941
3076
|
|
2942
3077
|
// Check if the result fits in a smi.
|
2943
|
-
__ mfc1(scratch1, single_scratch);
|
2944
3078
|
__ Addu(scratch2, scratch1, Operand(0x40000000));
|
2945
3079
|
// If not try to return a heap number.
|
2946
3080
|
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
|
@@ -2966,12 +3100,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2966
3100
|
: BinaryOpIC::INT32)) {
|
2967
3101
|
// We are using FPU registers so s0 is available.
|
2968
3102
|
heap_number_result = s0;
|
2969
|
-
|
2970
|
-
|
2971
|
-
|
2972
|
-
|
2973
|
-
|
2974
|
-
|
3103
|
+
BinaryOpStub_GenerateHeapResultAllocation(masm,
|
3104
|
+
heap_number_result,
|
3105
|
+
heap_number_map,
|
3106
|
+
scratch1,
|
3107
|
+
scratch2,
|
3108
|
+
&call_runtime,
|
3109
|
+
mode_);
|
2975
3110
|
__ mov(v0, heap_number_result);
|
2976
3111
|
__ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
|
2977
3112
|
__ Ret();
|
@@ -2989,12 +3124,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
2989
3124
|
|
2990
3125
|
// Allocate a heap number to store the result.
|
2991
3126
|
heap_number_result = s0;
|
2992
|
-
|
2993
|
-
|
2994
|
-
|
2995
|
-
|
2996
|
-
|
2997
|
-
|
3127
|
+
BinaryOpStub_GenerateHeapResultAllocation(masm,
|
3128
|
+
heap_number_result,
|
3129
|
+
heap_number_map,
|
3130
|
+
scratch1,
|
3131
|
+
scratch2,
|
3132
|
+
&pop_and_call_runtime,
|
3133
|
+
mode_);
|
2998
3134
|
|
2999
3135
|
// Load the left value from the value saved on the stack.
|
3000
3136
|
__ Pop(a1, a0);
|
@@ -3033,6 +3169,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
3033
3169
|
scratch2,
|
3034
3170
|
scratch3,
|
3035
3171
|
f0,
|
3172
|
+
f2,
|
3036
3173
|
&transition);
|
3037
3174
|
FloatingPointHelper::LoadNumberAsInt32(masm,
|
3038
3175
|
right,
|
@@ -3042,6 +3179,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
3042
3179
|
scratch2,
|
3043
3180
|
scratch3,
|
3044
3181
|
f0,
|
3182
|
+
f2,
|
3045
3183
|
&transition);
|
3046
3184
|
|
3047
3185
|
// The ECMA-262 standard specifies that, for shift operations, only the
|
@@ -3103,12 +3241,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
3103
3241
|
|
3104
3242
|
__ bind(&return_heap_number);
|
3105
3243
|
heap_number_result = t1;
|
3106
|
-
|
3107
|
-
|
3108
|
-
|
3109
|
-
|
3110
|
-
|
3111
|
-
|
3244
|
+
BinaryOpStub_GenerateHeapResultAllocation(masm,
|
3245
|
+
heap_number_result,
|
3246
|
+
heap_number_map,
|
3247
|
+
scratch1,
|
3248
|
+
scratch2,
|
3249
|
+
&call_runtime,
|
3250
|
+
mode_);
|
3112
3251
|
|
3113
3252
|
if (CpuFeatures::IsSupported(FPU)) {
|
3114
3253
|
CpuFeatures::Scope scope(FPU);
|
@@ -3130,7 +3269,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
3130
3269
|
} else {
|
3131
3270
|
// Tail call that writes the int32 in a2 to the heap number in v0, using
|
3132
3271
|
// a3 and a0 as scratch. v0 is preserved and returned.
|
3133
|
-
__ mov(
|
3272
|
+
__ mov(v0, t1);
|
3134
3273
|
WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
|
3135
3274
|
__ TailCallStub(&stub);
|
3136
3275
|
}
|
@@ -3152,6 +3291,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
|
3152
3291
|
}
|
3153
3292
|
|
3154
3293
|
__ bind(&call_runtime);
|
3294
|
+
GenerateRegisterArgsPush(masm);
|
3155
3295
|
GenerateCallRuntime(masm);
|
3156
3296
|
}
|
3157
3297
|
|
@@ -3190,20 +3330,32 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
|
|
3190
3330
|
|
3191
3331
|
|
3192
3332
|
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
|
3193
|
-
Label call_runtime;
|
3194
|
-
|
3333
|
+
Label call_runtime, transition;
|
3334
|
+
BinaryOpStub_GenerateFPOperation(
|
3335
|
+
masm, left_type_, right_type_, false,
|
3336
|
+
&transition, &call_runtime, &transition, op_, mode_);
|
3337
|
+
|
3338
|
+
__ bind(&transition);
|
3339
|
+
GenerateTypeTransition(masm);
|
3195
3340
|
|
3196
3341
|
__ bind(&call_runtime);
|
3342
|
+
GenerateRegisterArgsPush(masm);
|
3197
3343
|
GenerateCallRuntime(masm);
|
3198
3344
|
}
|
3199
3345
|
|
3200
3346
|
|
3201
3347
|
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
|
3202
|
-
Label call_runtime, call_string_add_or_runtime;
|
3348
|
+
Label call_runtime, call_string_add_or_runtime, transition;
|
3203
3349
|
|
3204
|
-
|
3350
|
+
BinaryOpStub_GenerateSmiCode(
|
3351
|
+
masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
|
3205
3352
|
|
3206
|
-
|
3353
|
+
BinaryOpStub_GenerateFPOperation(
|
3354
|
+
masm, left_type_, right_type_, false,
|
3355
|
+
&call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
|
3356
|
+
|
3357
|
+
__ bind(&transition);
|
3358
|
+
GenerateTypeTransition(masm);
|
3207
3359
|
|
3208
3360
|
__ bind(&call_string_add_or_runtime);
|
3209
3361
|
if (op_ == Token::ADD) {
|
@@ -3211,6 +3363,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
|
|
3211
3363
|
}
|
3212
3364
|
|
3213
3365
|
__ bind(&call_runtime);
|
3366
|
+
GenerateRegisterArgsPush(masm);
|
3214
3367
|
GenerateCallRuntime(masm);
|
3215
3368
|
}
|
3216
3369
|
|
@@ -3246,63 +3399,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
|
|
3246
3399
|
}
|
3247
3400
|
|
3248
3401
|
|
3249
|
-
void
|
3250
|
-
|
3251
|
-
|
3252
|
-
|
3253
|
-
|
3254
|
-
|
3255
|
-
|
3256
|
-
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
|
3257
|
-
break;
|
3258
|
-
case Token::MUL:
|
3259
|
-
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
|
3260
|
-
break;
|
3261
|
-
case Token::DIV:
|
3262
|
-
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
|
3263
|
-
break;
|
3264
|
-
case Token::MOD:
|
3265
|
-
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
|
3266
|
-
break;
|
3267
|
-
case Token::BIT_OR:
|
3268
|
-
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
|
3269
|
-
break;
|
3270
|
-
case Token::BIT_AND:
|
3271
|
-
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
|
3272
|
-
break;
|
3273
|
-
case Token::BIT_XOR:
|
3274
|
-
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
|
3275
|
-
break;
|
3276
|
-
case Token::SAR:
|
3277
|
-
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
|
3278
|
-
break;
|
3279
|
-
case Token::SHR:
|
3280
|
-
__ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
|
3281
|
-
break;
|
3282
|
-
case Token::SHL:
|
3283
|
-
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
|
3284
|
-
break;
|
3285
|
-
default:
|
3286
|
-
UNREACHABLE();
|
3287
|
-
}
|
3288
|
-
}
|
3289
|
-
|
3290
|
-
|
3291
|
-
void BinaryOpStub::GenerateHeapResultAllocation(
|
3292
|
-
MacroAssembler* masm,
|
3293
|
-
Register result,
|
3294
|
-
Register heap_number_map,
|
3295
|
-
Register scratch1,
|
3296
|
-
Register scratch2,
|
3297
|
-
Label* gc_required) {
|
3298
|
-
|
3402
|
+
void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
|
3403
|
+
Register result,
|
3404
|
+
Register heap_number_map,
|
3405
|
+
Register scratch1,
|
3406
|
+
Register scratch2,
|
3407
|
+
Label* gc_required,
|
3408
|
+
OverwriteMode mode) {
|
3299
3409
|
// Code below will scratch result if allocation fails. To keep both arguments
|
3300
3410
|
// intact for the runtime call result cannot be one of these.
|
3301
3411
|
ASSERT(!result.is(a0) && !result.is(a1));
|
3302
3412
|
|
3303
|
-
if (
|
3413
|
+
if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
|
3304
3414
|
Label skip_allocation, allocated;
|
3305
|
-
Register overwritable_operand =
|
3415
|
+
Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
|
3306
3416
|
// If the overwritable operand is already an object, we skip the
|
3307
3417
|
// allocation of a heap number.
|
3308
3418
|
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
|
@@ -3315,7 +3425,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(
|
|
3315
3425
|
__ mov(result, overwritable_operand);
|
3316
3426
|
__ bind(&allocated);
|
3317
3427
|
} else {
|
3318
|
-
ASSERT(
|
3428
|
+
ASSERT(mode == NO_OVERWRITE);
|
3319
3429
|
__ AllocateHeapNumber(
|
3320
3430
|
result, scratch1, scratch2, heap_number_map, gc_required);
|
3321
3431
|
}
|
@@ -3453,23 +3563,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
|
3453
3563
|
1,
|
3454
3564
|
1);
|
3455
3565
|
} else {
|
3456
|
-
|
3566
|
+
ASSERT(CpuFeatures::IsSupported(FPU));
|
3457
3567
|
CpuFeatures::Scope scope(FPU);
|
3458
3568
|
|
3459
3569
|
Label no_update;
|
3460
3570
|
Label skip_cache;
|
3461
3571
|
|
3462
3572
|
// Call C function to calculate the result and update the cache.
|
3463
|
-
//
|
3464
|
-
//
|
3465
|
-
//
|
3466
|
-
__ Push(
|
3573
|
+
// a0: precalculated cache entry address.
|
3574
|
+
// a2 and a3: parts of the double value.
|
3575
|
+
// Store a0, a2 and a3 on stack for later before calling C function.
|
3576
|
+
__ Push(a3, a2, cache_entry);
|
3467
3577
|
GenerateCallCFunction(masm, scratch0);
|
3468
3578
|
__ GetCFunctionDoubleResult(f4);
|
3469
3579
|
|
3470
3580
|
// Try to update the cache. If we cannot allocate a
|
3471
3581
|
// heap number, we return the result without updating.
|
3472
|
-
__ Pop(
|
3582
|
+
__ Pop(a3, a2, cache_entry);
|
3473
3583
|
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
|
3474
3584
|
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
|
3475
3585
|
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
|
@@ -3636,9 +3746,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|
3636
3746
|
Label int_exponent_convert;
|
3637
3747
|
// Detect integer exponents stored as double.
|
3638
3748
|
__ EmitFPUTruncate(kRoundToMinusInf,
|
3639
|
-
single_scratch,
|
3640
|
-
double_exponent,
|
3641
3749
|
scratch,
|
3750
|
+
double_exponent,
|
3751
|
+
at,
|
3752
|
+
double_scratch,
|
3642
3753
|
scratch2,
|
3643
3754
|
kCheckForInexactConversion);
|
3644
3755
|
// scratch2 == 0 means there was no conversion error.
|
@@ -3696,7 +3807,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|
3696
3807
|
__ push(ra);
|
3697
3808
|
{
|
3698
3809
|
AllowExternalCallThatCantCauseGC scope(masm);
|
3699
|
-
__ PrepareCallCFunction(0, 2,
|
3810
|
+
__ PrepareCallCFunction(0, 2, scratch2);
|
3700
3811
|
__ SetCallCDoubleArguments(double_base, double_exponent);
|
3701
3812
|
__ CallCFunction(
|
3702
3813
|
ExternalReference::power_double_double_function(masm->isolate()),
|
@@ -3707,7 +3818,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|
3707
3818
|
__ jmp(&done);
|
3708
3819
|
|
3709
3820
|
__ bind(&int_exponent_convert);
|
3710
|
-
__ mfc1(scratch, single_scratch);
|
3711
3821
|
}
|
3712
3822
|
|
3713
3823
|
// Calculate power with integer exponent.
|
@@ -3817,12 +3927,29 @@ void CodeStub::GenerateStubsAheadOfTime() {
|
|
3817
3927
|
|
3818
3928
|
|
3819
3929
|
void CodeStub::GenerateFPStubs() {
|
3820
|
-
|
3821
|
-
|
3822
|
-
|
3823
|
-
|
3824
|
-
stub
|
3825
|
-
|
3930
|
+
SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
|
3931
|
+
? kSaveFPRegs
|
3932
|
+
: kDontSaveFPRegs;
|
3933
|
+
CEntryStub save_doubles(1, mode);
|
3934
|
+
StoreBufferOverflowStub stub(mode);
|
3935
|
+
// These stubs might already be in the snapshot, detect that and don't
|
3936
|
+
// regenerate, which would lead to code stub initialization state being messed
|
3937
|
+
// up.
|
3938
|
+
Code* save_doubles_code = NULL;
|
3939
|
+
Code* store_buffer_overflow_code = NULL;
|
3940
|
+
if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
|
3941
|
+
if (CpuFeatures::IsSupported(FPU)) {
|
3942
|
+
CpuFeatures::Scope scope2(FPU);
|
3943
|
+
save_doubles_code = *save_doubles.GetCode();
|
3944
|
+
store_buffer_overflow_code = *stub.GetCode();
|
3945
|
+
} else {
|
3946
|
+
save_doubles_code = *save_doubles.GetCode();
|
3947
|
+
store_buffer_overflow_code = *stub.GetCode();
|
3948
|
+
}
|
3949
|
+
save_doubles_code->set_is_pregenerated(true);
|
3950
|
+
store_buffer_overflow_code->set_is_pregenerated(true);
|
3951
|
+
}
|
3952
|
+
ISOLATE->set_fp_stubs_generated(true);
|
3826
3953
|
}
|
3827
3954
|
|
3828
3955
|
|
@@ -3833,6 +3960,17 @@ void CEntryStub::GenerateAheadOfTime() {
|
|
3833
3960
|
}
|
3834
3961
|
|
3835
3962
|
|
3963
|
+
static void JumpIfOOM(MacroAssembler* masm,
|
3964
|
+
Register value,
|
3965
|
+
Register scratch,
|
3966
|
+
Label* oom_label) {
|
3967
|
+
STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
|
3968
|
+
STATIC_ASSERT(kFailureTag == 3);
|
3969
|
+
__ andi(scratch, value, 0xf);
|
3970
|
+
__ Branch(oom_label, eq, scratch, Operand(0xf));
|
3971
|
+
}
|
3972
|
+
|
3973
|
+
|
3836
3974
|
void CEntryStub::GenerateCore(MacroAssembler* masm,
|
3837
3975
|
Label* throw_normal_exception,
|
3838
3976
|
Label* throw_termination_exception,
|
@@ -3939,14 +4077,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
|
3939
4077
|
__ Branch(&retry, eq, t0, Operand(zero_reg));
|
3940
4078
|
|
3941
4079
|
// Special handling of out of memory exceptions.
|
3942
|
-
|
3943
|
-
__ Branch(USE_DELAY_SLOT,
|
3944
|
-
throw_out_of_memory_exception,
|
3945
|
-
eq,
|
3946
|
-
v0,
|
3947
|
-
Operand(reinterpret_cast<int32_t>(out_of_memory)));
|
3948
|
-
// If we throw the OOM exception, the value of a3 doesn't matter.
|
3949
|
-
// Any instruction can be in the delay slot that's not a jump.
|
4080
|
+
JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
|
3950
4081
|
|
3951
4082
|
// Retrieve the pending exception and clear the variable.
|
3952
4083
|
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
|
@@ -4033,13 +4164,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
|
4033
4164
|
Isolate* isolate = masm->isolate();
|
4034
4165
|
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
|
4035
4166
|
isolate);
|
4036
|
-
__ li(a0, Operand(false, RelocInfo::
|
4167
|
+
__ li(a0, Operand(false, RelocInfo::NONE32));
|
4037
4168
|
__ li(a2, Operand(external_caught));
|
4038
4169
|
__ sw(a0, MemOperand(a2));
|
4039
4170
|
|
4040
4171
|
// Set pending exception and v0 to out of memory exception.
|
4041
|
-
|
4172
|
+
Label already_have_failure;
|
4173
|
+
JumpIfOOM(masm, v0, t0, &already_have_failure);
|
4174
|
+
Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
|
4042
4175
|
__ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
|
4176
|
+
__ bind(&already_have_failure);
|
4043
4177
|
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
4044
4178
|
isolate)));
|
4045
4179
|
__ sw(v0, MemOperand(a2));
|
@@ -4410,6 +4544,165 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
|
4410
4544
|
}
|
4411
4545
|
|
4412
4546
|
|
4547
|
+
void ArrayLengthStub::Generate(MacroAssembler* masm) {
|
4548
|
+
Label miss;
|
4549
|
+
Register receiver;
|
4550
|
+
if (kind() == Code::KEYED_LOAD_IC) {
|
4551
|
+
// ----------- S t a t e -------------
|
4552
|
+
// -- ra : return address
|
4553
|
+
// -- a0 : key
|
4554
|
+
// -- a1 : receiver
|
4555
|
+
// -----------------------------------
|
4556
|
+
__ Branch(&miss, ne, a0,
|
4557
|
+
Operand(masm->isolate()->factory()->length_symbol()));
|
4558
|
+
receiver = a1;
|
4559
|
+
} else {
|
4560
|
+
ASSERT(kind() == Code::LOAD_IC);
|
4561
|
+
// ----------- S t a t e -------------
|
4562
|
+
// -- a2 : name
|
4563
|
+
// -- ra : return address
|
4564
|
+
// -- a0 : receiver
|
4565
|
+
// -- sp[0] : receiver
|
4566
|
+
// -----------------------------------
|
4567
|
+
receiver = a0;
|
4568
|
+
}
|
4569
|
+
|
4570
|
+
StubCompiler::GenerateLoadArrayLength(masm, receiver, a3, &miss);
|
4571
|
+
__ bind(&miss);
|
4572
|
+
StubCompiler::GenerateLoadMiss(masm, kind());
|
4573
|
+
}
|
4574
|
+
|
4575
|
+
|
4576
|
+
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
4577
|
+
Label miss;
|
4578
|
+
Register receiver;
|
4579
|
+
if (kind() == Code::KEYED_LOAD_IC) {
|
4580
|
+
// ----------- S t a t e -------------
|
4581
|
+
// -- ra : return address
|
4582
|
+
// -- a0 : key
|
4583
|
+
// -- a1 : receiver
|
4584
|
+
// -----------------------------------
|
4585
|
+
__ Branch(&miss, ne, a0,
|
4586
|
+
Operand(masm->isolate()->factory()->prototype_symbol()));
|
4587
|
+
receiver = a1;
|
4588
|
+
} else {
|
4589
|
+
ASSERT(kind() == Code::LOAD_IC);
|
4590
|
+
// ----------- S t a t e -------------
|
4591
|
+
// -- a2 : name
|
4592
|
+
// -- ra : return address
|
4593
|
+
// -- a0 : receiver
|
4594
|
+
// -- sp[0] : receiver
|
4595
|
+
// -----------------------------------
|
4596
|
+
receiver = a0;
|
4597
|
+
}
|
4598
|
+
|
4599
|
+
StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
|
4600
|
+
__ bind(&miss);
|
4601
|
+
StubCompiler::GenerateLoadMiss(masm, kind());
|
4602
|
+
}
|
4603
|
+
|
4604
|
+
|
4605
|
+
void StringLengthStub::Generate(MacroAssembler* masm) {
|
4606
|
+
Label miss;
|
4607
|
+
Register receiver;
|
4608
|
+
if (kind() == Code::KEYED_LOAD_IC) {
|
4609
|
+
// ----------- S t a t e -------------
|
4610
|
+
// -- ra : return address
|
4611
|
+
// -- a0 : key
|
4612
|
+
// -- a1 : receiver
|
4613
|
+
// -----------------------------------
|
4614
|
+
__ Branch(&miss, ne, a0,
|
4615
|
+
Operand(masm->isolate()->factory()->length_symbol()));
|
4616
|
+
receiver = a1;
|
4617
|
+
} else {
|
4618
|
+
ASSERT(kind() == Code::LOAD_IC);
|
4619
|
+
// ----------- S t a t e -------------
|
4620
|
+
// -- a2 : name
|
4621
|
+
// -- ra : return address
|
4622
|
+
// -- a0 : receiver
|
4623
|
+
// -- sp[0] : receiver
|
4624
|
+
// -----------------------------------
|
4625
|
+
receiver = a0;
|
4626
|
+
}
|
4627
|
+
|
4628
|
+
StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
|
4629
|
+
support_wrapper_);
|
4630
|
+
|
4631
|
+
__ bind(&miss);
|
4632
|
+
StubCompiler::GenerateLoadMiss(masm, kind());
|
4633
|
+
}
|
4634
|
+
|
4635
|
+
|
4636
|
+
void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
|
4637
|
+
// This accepts as a receiver anything JSArray::SetElementsLength accepts
|
4638
|
+
// (currently anything except for external arrays which means anything with
|
4639
|
+
// elements of FixedArray type). Value must be a number, but only smis are
|
4640
|
+
// accepted as the most common case.
|
4641
|
+
Label miss;
|
4642
|
+
|
4643
|
+
Register receiver;
|
4644
|
+
Register value;
|
4645
|
+
if (kind() == Code::KEYED_STORE_IC) {
|
4646
|
+
// ----------- S t a t e -------------
|
4647
|
+
// -- ra : return address
|
4648
|
+
// -- a0 : value
|
4649
|
+
// -- a1 : key
|
4650
|
+
// -- a2 : receiver
|
4651
|
+
// -----------------------------------
|
4652
|
+
__ Branch(&miss, ne, a1,
|
4653
|
+
Operand(masm->isolate()->factory()->length_symbol()));
|
4654
|
+
receiver = a2;
|
4655
|
+
value = a0;
|
4656
|
+
} else {
|
4657
|
+
ASSERT(kind() == Code::STORE_IC);
|
4658
|
+
// ----------- S t a t e -------------
|
4659
|
+
// -- ra : return address
|
4660
|
+
// -- a0 : value
|
4661
|
+
// -- a1 : receiver
|
4662
|
+
// -- a2 : key
|
4663
|
+
// -----------------------------------
|
4664
|
+
receiver = a1;
|
4665
|
+
value = a0;
|
4666
|
+
}
|
4667
|
+
Register scratch = a3;
|
4668
|
+
|
4669
|
+
// Check that the receiver isn't a smi.
|
4670
|
+
__ JumpIfSmi(receiver, &miss);
|
4671
|
+
|
4672
|
+
// Check that the object is a JS array.
|
4673
|
+
__ GetObjectType(receiver, scratch, scratch);
|
4674
|
+
__ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
|
4675
|
+
|
4676
|
+
// Check that elements are FixedArray.
|
4677
|
+
// We rely on StoreIC_ArrayLength below to deal with all types of
|
4678
|
+
// fast elements (including COW).
|
4679
|
+
__ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
|
4680
|
+
__ GetObjectType(scratch, scratch, scratch);
|
4681
|
+
__ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
|
4682
|
+
|
4683
|
+
// Check that the array has fast properties, otherwise the length
|
4684
|
+
// property might have been redefined.
|
4685
|
+
__ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
|
4686
|
+
__ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
|
4687
|
+
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
|
4688
|
+
__ Branch(&miss, eq, scratch, Operand(at));
|
4689
|
+
|
4690
|
+
// Check that value is a smi.
|
4691
|
+
__ JumpIfNotSmi(value, &miss);
|
4692
|
+
|
4693
|
+
// Prepare tail call to StoreIC_ArrayLength.
|
4694
|
+
__ Push(receiver, value);
|
4695
|
+
|
4696
|
+
ExternalReference ref =
|
4697
|
+
ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
|
4698
|
+
__ TailCallExternalReference(ref, 2, 1);
|
4699
|
+
|
4700
|
+
__ bind(&miss);
|
4701
|
+
|
4702
|
+
StubCompiler::GenerateStoreMiss(masm, kind());
|
4703
|
+
}
|
4704
|
+
|
4705
|
+
|
4413
4706
|
Register InstanceofStub::left() { return a0; }
|
4414
4707
|
|
4415
4708
|
|
@@ -4566,14 +4859,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
|
4566
4859
|
|
4567
4860
|
// v0 = address of new object(s) (tagged)
|
4568
4861
|
// a2 = argument count (tagged)
|
4569
|
-
// Get the arguments boilerplate from the current
|
4862
|
+
// Get the arguments boilerplate from the current native context into t0.
|
4570
4863
|
const int kNormalOffset =
|
4571
4864
|
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
|
4572
4865
|
const int kAliasedOffset =
|
4573
4866
|
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
|
4574
4867
|
|
4575
|
-
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::
|
4576
|
-
__ lw(t0, FieldMemOperand(t0, GlobalObject::
|
4868
|
+
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
4869
|
+
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
|
4577
4870
|
Label skip2_ne, skip2_eq;
|
4578
4871
|
__ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
|
4579
4872
|
__ lw(t0, MemOperand(t0, kNormalOffset));
|
@@ -4761,9 +5054,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
|
4761
5054
|
static_cast<AllocationFlags>(TAG_OBJECT |
|
4762
5055
|
SIZE_IN_WORDS));
|
4763
5056
|
|
4764
|
-
// Get the arguments boilerplate from the current
|
4765
|
-
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::
|
4766
|
-
__ lw(t0, FieldMemOperand(t0, GlobalObject::
|
5057
|
+
// Get the arguments boilerplate from the current native context.
|
5058
|
+
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
5059
|
+
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
|
4767
5060
|
__ lw(t0, MemOperand(t0, Context::SlotOffset(
|
4768
5061
|
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
|
4769
5062
|
|
@@ -4897,7 +5190,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
4897
5190
|
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
4898
5191
|
__ Addu(a2, a2, Operand(2)); // a2 was a smi.
|
4899
5192
|
// Check that the static offsets vector buffer is large enough.
|
4900
|
-
__ Branch(
|
5193
|
+
__ Branch(
|
5194
|
+
&runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
|
4901
5195
|
|
4902
5196
|
// a2: Number of capture registers
|
4903
5197
|
// regexp_data: RegExp data (FixedArray)
|
@@ -5010,7 +5304,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
5010
5304
|
// regexp_data: RegExp data (FixedArray)
|
5011
5305
|
// a0: Instance type of subject string
|
5012
5306
|
STATIC_ASSERT(kStringEncodingMask == 4);
|
5013
|
-
STATIC_ASSERT(
|
5307
|
+
STATIC_ASSERT(kOneByteStringTag == 4);
|
5014
5308
|
STATIC_ASSERT(kTwoByteStringTag == 0);
|
5015
5309
|
// Find the code object based on the assumptions above.
|
5016
5310
|
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
|
@@ -5249,7 +5543,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
5249
5543
|
__ lw(subject,
|
5250
5544
|
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
|
5251
5545
|
// Move the pointer so that offset-wise, it looks like a sequential string.
|
5252
|
-
STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
|
5546
|
+
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
|
5253
5547
|
__ Subu(subject,
|
5254
5548
|
subject,
|
5255
5549
|
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
|
@@ -5296,10 +5590,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
|
5296
5590
|
// Set empty properties FixedArray.
|
5297
5591
|
// Set elements to point to FixedArray allocated right after the JSArray.
|
5298
5592
|
// Interleave operations for better latency.
|
5299
|
-
__ lw(a2, ContextOperand(cp, Context::
|
5593
|
+
__ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
|
5300
5594
|
__ Addu(a3, v0, Operand(JSRegExpResult::kSize));
|
5301
5595
|
__ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
|
5302
|
-
__ lw(a2, FieldMemOperand(a2, GlobalObject::
|
5596
|
+
__ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
|
5303
5597
|
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
5304
5598
|
__ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
|
5305
5599
|
__ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
@@ -5324,12 +5618,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
|
5324
5618
|
// Set FixedArray length.
|
5325
5619
|
__ sll(t2, t1, kSmiTagSize);
|
5326
5620
|
__ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
|
5327
|
-
// Fill contents of fixed-array with
|
5328
|
-
__
|
5621
|
+
// Fill contents of fixed-array with undefined.
|
5622
|
+
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
|
5329
5623
|
__ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
5330
|
-
// Fill fixed array elements with
|
5624
|
+
// Fill fixed array elements with undefined.
|
5331
5625
|
// v0: JSArray, tagged.
|
5332
|
-
// a2:
|
5626
|
+
// a2: undefined.
|
5333
5627
|
// a3: Start of elements in FixedArray.
|
5334
5628
|
// t1: Number of elements to fill.
|
5335
5629
|
Label loop;
|
@@ -5408,7 +5702,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
|
5408
5702
|
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
5409
5703
|
__ Branch(&call, ne, t0, Operand(at));
|
5410
5704
|
// Patch the receiver on the stack with the global receiver object.
|
5411
|
-
__ lw(a3,
|
5705
|
+
__ lw(a3,
|
5706
|
+
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
5412
5707
|
__ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
|
5413
5708
|
__ sw(a3, MemOperand(sp, argc_ * kPointerSize));
|
5414
5709
|
__ bind(&call);
|
@@ -5460,8 +5755,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
|
5460
5755
|
// Check for function proxy.
|
5461
5756
|
__ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
|
5462
5757
|
__ push(a1); // Put proxy as additional argument.
|
5463
|
-
__ li(a0, Operand(argc_ + 1, RelocInfo::
|
5464
|
-
__ li(a2, Operand(0, RelocInfo::
|
5758
|
+
__ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
|
5759
|
+
__ li(a2, Operand(0, RelocInfo::NONE32));
|
5465
5760
|
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
|
5466
5761
|
__ SetCallKind(t1, CALL_AS_METHOD);
|
5467
5762
|
{
|
@@ -5518,52 +5813,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
|
|
5518
5813
|
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
5519
5814
|
__ bind(&do_call);
|
5520
5815
|
// Set expected number of arguments to zero (not changing r0).
|
5521
|
-
__ li(a2, Operand(0, RelocInfo::
|
5816
|
+
__ li(a2, Operand(0, RelocInfo::NONE32));
|
5522
5817
|
__ SetCallKind(t1, CALL_AS_METHOD);
|
5523
5818
|
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
5524
5819
|
RelocInfo::CODE_TARGET);
|
5525
5820
|
}
|
5526
5821
|
|
5527
5822
|
|
5528
|
-
// Unfortunately you have to run without snapshots to see most of these
|
5529
|
-
// names in the profile since most compare stubs end up in the snapshot.
|
5530
|
-
void CompareStub::PrintName(StringStream* stream) {
|
5531
|
-
ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
|
5532
|
-
(lhs_.is(a1) && rhs_.is(a0)));
|
5533
|
-
const char* cc_name;
|
5534
|
-
switch (cc_) {
|
5535
|
-
case lt: cc_name = "LT"; break;
|
5536
|
-
case gt: cc_name = "GT"; break;
|
5537
|
-
case le: cc_name = "LE"; break;
|
5538
|
-
case ge: cc_name = "GE"; break;
|
5539
|
-
case eq: cc_name = "EQ"; break;
|
5540
|
-
case ne: cc_name = "NE"; break;
|
5541
|
-
default: cc_name = "UnknownCondition"; break;
|
5542
|
-
}
|
5543
|
-
bool is_equality = cc_ == eq || cc_ == ne;
|
5544
|
-
stream->Add("CompareStub_%s", cc_name);
|
5545
|
-
stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
|
5546
|
-
stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
|
5547
|
-
if (strict_ && is_equality) stream->Add("_STRICT");
|
5548
|
-
if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
|
5549
|
-
if (!include_number_compare_) stream->Add("_NO_NUMBER");
|
5550
|
-
if (!include_smi_compare_) stream->Add("_NO_SMI");
|
5551
|
-
}
|
5552
|
-
|
5553
|
-
|
5554
|
-
int CompareStub::MinorKey() {
|
5555
|
-
// Encode the two parameters in a unique 16 bit value.
|
5556
|
-
ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
|
5557
|
-
ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
|
5558
|
-
(lhs_.is(a1) && rhs_.is(a0)));
|
5559
|
-
return ConditionField::encode(static_cast<unsigned>(cc_))
|
5560
|
-
| RegisterField::encode(lhs_.is(a0))
|
5561
|
-
| StrictField::encode(strict_)
|
5562
|
-
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
|
5563
|
-
| IncludeSmiCompareField::encode(include_smi_compare_);
|
5564
|
-
}
|
5565
|
-
|
5566
|
-
|
5567
5823
|
// StringCharCodeAtGenerator.
|
5568
5824
|
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
5569
5825
|
Label flat_string;
|
@@ -5674,11 +5930,11 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
|
5674
5930
|
|
5675
5931
|
STATIC_ASSERT(kSmiTag == 0);
|
5676
5932
|
STATIC_ASSERT(kSmiShiftSize == 0);
|
5677
|
-
ASSERT(IsPowerOf2(String::
|
5933
|
+
ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
|
5678
5934
|
__ And(t0,
|
5679
5935
|
code_,
|
5680
5936
|
Operand(kSmiTagMask |
|
5681
|
-
((~String::
|
5937
|
+
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
|
5682
5938
|
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
|
5683
5939
|
|
5684
5940
|
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
|
@@ -5997,7 +6253,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
|
|
5997
6253
|
|
5998
6254
|
// Check if the two characters match.
|
5999
6255
|
// Assumes that word load is little endian.
|
6000
|
-
__ lhu(scratch, FieldMemOperand(candidate,
|
6256
|
+
__ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
|
6001
6257
|
__ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
|
6002
6258
|
__ bind(&next_probe[i]);
|
6003
6259
|
}
|
@@ -6176,7 +6432,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
|
6176
6432
|
// string's encoding is wrong because we always have to recheck encoding of
|
6177
6433
|
// the newly created string's parent anyways due to externalized strings.
|
6178
6434
|
Label two_byte_slice, set_slice_header;
|
6179
|
-
STATIC_ASSERT((kStringEncodingMask &
|
6435
|
+
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
|
6180
6436
|
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
|
6181
6437
|
__ And(t0, a1, Operand(kStringEncodingMask));
|
6182
6438
|
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
|
@@ -6214,12 +6470,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
|
6214
6470
|
|
6215
6471
|
__ bind(&sequential_string);
|
6216
6472
|
// Locate first character of underlying subject string.
|
6217
|
-
STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
|
6218
|
-
__ Addu(t1, t1, Operand(
|
6473
|
+
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
|
6474
|
+
__ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
6219
6475
|
|
6220
6476
|
__ bind(&allocate_result);
|
6221
6477
|
// Sequential acii string. Allocate the result.
|
6222
|
-
STATIC_ASSERT((
|
6478
|
+
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
|
6223
6479
|
__ And(t0, a1, Operand(kStringEncodingMask));
|
6224
6480
|
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
|
6225
6481
|
|
@@ -6230,13 +6486,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
|
6230
6486
|
__ Addu(t1, t1, a3);
|
6231
6487
|
|
6232
6488
|
// Locate first character of result.
|
6233
|
-
__ Addu(a1, v0, Operand(
|
6489
|
+
__ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
6234
6490
|
|
6235
6491
|
// v0: result string
|
6236
6492
|
// a1: first character of result string
|
6237
6493
|
// a2: result string length
|
6238
6494
|
// t1: first character of substring to copy
|
6239
|
-
STATIC_ASSERT((
|
6495
|
+
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
6240
6496
|
StringHelper::GenerateCopyCharactersLong(
|
6241
6497
|
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
|
6242
6498
|
__ jmp(&return_v0);
|
@@ -6368,7 +6624,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
|
|
6368
6624
|
// doesn't need an additional compare.
|
6369
6625
|
__ SmiUntag(length);
|
6370
6626
|
__ Addu(scratch1, length,
|
6371
|
-
Operand(
|
6627
|
+
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
6372
6628
|
__ Addu(left, left, Operand(scratch1));
|
6373
6629
|
__ Addu(right, right, Operand(scratch1));
|
6374
6630
|
__ Subu(length, zero_reg, length);
|
@@ -6523,8 +6779,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
6523
6779
|
&call_runtime);
|
6524
6780
|
|
6525
6781
|
// Get the two characters forming the sub string.
|
6526
|
-
__ lbu(a2, FieldMemOperand(a0,
|
6527
|
-
__ lbu(a3, FieldMemOperand(a1,
|
6782
|
+
__ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
|
6783
|
+
__ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
|
6528
6784
|
|
6529
6785
|
// Try to lookup two character string in symbol table. If it is not found
|
6530
6786
|
// just allocate a new one.
|
@@ -6542,7 +6798,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
6542
6798
|
// in a little endian mode).
|
6543
6799
|
__ li(t2, Operand(2));
|
6544
6800
|
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
|
6545
|
-
__ sh(a2, FieldMemOperand(v0,
|
6801
|
+
__ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
|
6546
6802
|
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
|
6547
6803
|
__ DropAndRet(2);
|
6548
6804
|
|
@@ -6589,11 +6845,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
6589
6845
|
__ And(at, t0, Operand(kAsciiDataHintMask));
|
6590
6846
|
__ and_(at, at, t1);
|
6591
6847
|
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
|
6592
|
-
|
6593
|
-
|
6594
|
-
|
6595
|
-
__
|
6596
|
-
|
6848
|
+
__ Xor(t0, t0, Operand(t1));
|
6849
|
+
STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
|
6850
|
+
__ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
|
6851
|
+
__ Branch(&ascii_data, eq, t0,
|
6852
|
+
Operand(kOneByteStringTag | kAsciiDataHintTag));
|
6597
6853
|
|
6598
6854
|
// Allocate a two byte cons string.
|
6599
6855
|
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
|
@@ -6626,11 +6882,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
6626
6882
|
STATIC_ASSERT(kSeqStringTag == 0);
|
6627
6883
|
__ And(t4, t0, Operand(kStringRepresentationMask));
|
6628
6884
|
|
6629
|
-
STATIC_ASSERT(
|
6885
|
+
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
|
6630
6886
|
Label skip_first_add;
|
6631
6887
|
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
|
6632
6888
|
__ Branch(USE_DELAY_SLOT, &first_prepared);
|
6633
|
-
__ addiu(t3, a0,
|
6889
|
+
__ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
|
6634
6890
|
__ bind(&skip_first_add);
|
6635
6891
|
// External string: rule out short external string and load string resource.
|
6636
6892
|
STATIC_ASSERT(kShortExternalStringTag != 0);
|
@@ -6641,11 +6897,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
6641
6897
|
|
6642
6898
|
STATIC_ASSERT(kSeqStringTag == 0);
|
6643
6899
|
__ And(t4, t1, Operand(kStringRepresentationMask));
|
6644
|
-
STATIC_ASSERT(
|
6900
|
+
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
|
6645
6901
|
Label skip_second_add;
|
6646
6902
|
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
|
6647
6903
|
__ Branch(USE_DELAY_SLOT, &second_prepared);
|
6648
|
-
__ addiu(a1, a1,
|
6904
|
+
__ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
|
6649
6905
|
__ bind(&skip_second_add);
|
6650
6906
|
// External string: rule out short external string and load string resource.
|
6651
6907
|
STATIC_ASSERT(kShortExternalStringTag != 0);
|
@@ -6666,7 +6922,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
6666
6922
|
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
|
6667
6923
|
|
6668
6924
|
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
|
6669
|
-
__ Addu(t2, v0, Operand(
|
6925
|
+
__ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
6670
6926
|
// v0: result string.
|
6671
6927
|
// t3: first character of first string.
|
6672
6928
|
// a1: first character of second string
|
@@ -6754,7 +7010,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
|
|
6754
7010
|
|
6755
7011
|
|
6756
7012
|
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
6757
|
-
ASSERT(state_ == CompareIC::
|
7013
|
+
ASSERT(state_ == CompareIC::SMI);
|
6758
7014
|
Label miss;
|
6759
7015
|
__ Or(a2, a1, a0);
|
6760
7016
|
__ JumpIfNotSmi(a2, &miss);
|
@@ -6776,18 +7032,18 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
|
6776
7032
|
|
6777
7033
|
|
6778
7034
|
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
6779
|
-
ASSERT(state_ == CompareIC::
|
7035
|
+
ASSERT(state_ == CompareIC::HEAP_NUMBER);
|
6780
7036
|
|
6781
7037
|
Label generic_stub;
|
6782
7038
|
Label unordered, maybe_undefined1, maybe_undefined2;
|
6783
7039
|
Label miss;
|
6784
|
-
__ And(a2, a1, Operand(a0));
|
6785
|
-
__ JumpIfSmi(a2, &generic_stub);
|
6786
7040
|
|
6787
|
-
|
6788
|
-
|
6789
|
-
|
6790
|
-
|
7041
|
+
if (left_ == CompareIC::SMI) {
|
7042
|
+
__ JumpIfNotSmi(a1, &miss);
|
7043
|
+
}
|
7044
|
+
if (right_ == CompareIC::SMI) {
|
7045
|
+
__ JumpIfNotSmi(a0, &miss);
|
7046
|
+
}
|
6791
7047
|
|
6792
7048
|
// Inlining the double comparison and falling back to the general compare
|
6793
7049
|
// stub if NaN is involved or FPU is unsupported.
|
@@ -6795,10 +7051,33 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
|
6795
7051
|
CpuFeatures::Scope scope(FPU);
|
6796
7052
|
|
6797
7053
|
// Load left and right operand.
|
6798
|
-
|
6799
|
-
__
|
7054
|
+
Label done, left, left_smi, right_smi;
|
7055
|
+
__ JumpIfSmi(a0, &right_smi);
|
7056
|
+
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
|
7057
|
+
DONT_DO_SMI_CHECK);
|
6800
7058
|
__ Subu(a2, a0, Operand(kHeapObjectTag));
|
6801
7059
|
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
7060
|
+
__ Branch(&left);
|
7061
|
+
__ bind(&right_smi);
|
7062
|
+
__ SmiUntag(a2, a0); // Can't clobber a0 yet.
|
7063
|
+
FPURegister single_scratch = f6;
|
7064
|
+
__ mtc1(a2, single_scratch);
|
7065
|
+
__ cvt_d_w(f2, single_scratch);
|
7066
|
+
|
7067
|
+
__ bind(&left);
|
7068
|
+
__ JumpIfSmi(a1, &left_smi);
|
7069
|
+
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
|
7070
|
+
DONT_DO_SMI_CHECK);
|
7071
|
+
__ Subu(a2, a1, Operand(kHeapObjectTag));
|
7072
|
+
__ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
|
7073
|
+
__ Branch(&done);
|
7074
|
+
__ bind(&left_smi);
|
7075
|
+
__ SmiUntag(a2, a1); // Can't clobber a1 yet.
|
7076
|
+
single_scratch = f8;
|
7077
|
+
__ mtc1(a2, single_scratch);
|
7078
|
+
__ cvt_d_w(f0, single_scratch);
|
7079
|
+
|
7080
|
+
__ bind(&done);
|
6802
7081
|
|
6803
7082
|
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
|
6804
7083
|
Label fpu_eq, fpu_lt;
|
@@ -6822,15 +7101,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
|
6822
7101
|
}
|
6823
7102
|
|
6824
7103
|
__ bind(&unordered);
|
6825
|
-
|
6826
|
-
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
|
6827
7104
|
__ bind(&generic_stub);
|
7105
|
+
ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
|
7106
|
+
CompareIC::GENERIC);
|
6828
7107
|
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
6829
7108
|
|
6830
7109
|
__ bind(&maybe_undefined1);
|
6831
7110
|
if (Token::IsOrderedRelationalCompareOp(op_)) {
|
6832
7111
|
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
6833
7112
|
__ Branch(&miss, ne, a0, Operand(at));
|
7113
|
+
__ JumpIfSmi(a1, &unordered);
|
6834
7114
|
__ GetObjectType(a1, a2, a2);
|
6835
7115
|
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
|
6836
7116
|
__ jmp(&unordered);
|
@@ -6848,7 +7128,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
|
6848
7128
|
|
6849
7129
|
|
6850
7130
|
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
|
6851
|
-
ASSERT(state_ == CompareIC::
|
7131
|
+
ASSERT(state_ == CompareIC::SYMBOL);
|
6852
7132
|
Label miss;
|
6853
7133
|
|
6854
7134
|
// Registers containing left and right operands respectively.
|
@@ -6886,7 +7166,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
|
|
6886
7166
|
|
6887
7167
|
|
6888
7168
|
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
6889
|
-
ASSERT(state_ == CompareIC::
|
7169
|
+
ASSERT(state_ == CompareIC::STRING);
|
6890
7170
|
Label miss;
|
6891
7171
|
|
6892
7172
|
bool equality = Token::IsEqualityOp(op_);
|
@@ -6971,7 +7251,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
|
6971
7251
|
|
6972
7252
|
|
6973
7253
|
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
6974
|
-
ASSERT(state_ == CompareIC::
|
7254
|
+
ASSERT(state_ == CompareIC::OBJECT);
|
6975
7255
|
Label miss;
|
6976
7256
|
__ And(a2, a1, Operand(a0));
|
6977
7257
|
__ JumpIfSmi(a2, &miss);
|
@@ -7183,8 +7463,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
|
7183
7463
|
ASSERT(!name.is(scratch1));
|
7184
7464
|
ASSERT(!name.is(scratch2));
|
7185
7465
|
|
7186
|
-
|
7187
|
-
if (FLAG_debug_code) __ AbortIfNotString(name);
|
7466
|
+
__ AssertString(name);
|
7188
7467
|
|
7189
7468
|
// Compute the capacity mask.
|
7190
7469
|
__ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
|
@@ -7380,6 +7659,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
|
|
7380
7659
|
{ REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
|
7381
7660
|
// StoreArrayLiteralElementStub::Generate
|
7382
7661
|
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
|
7662
|
+
// FastNewClosureStub::Generate
|
7663
|
+
{ REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
|
7383
7664
|
// Null termination.
|
7384
7665
|
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
|
7385
7666
|
};
|
@@ -7428,6 +7709,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
|
|
7428
7709
|
}
|
7429
7710
|
|
7430
7711
|
|
7712
|
+
bool CodeStub::CanUseFPRegisters() {
|
7713
|
+
return CpuFeatures::IsSupported(FPU);
|
7714
|
+
}
|
7715
|
+
|
7716
|
+
|
7431
7717
|
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
|
7432
7718
|
// the value has just been written into the object, now this stub makes sure
|
7433
7719
|
// we keep the GC informed. The word in the object where the value has been
|
@@ -7520,12 +7806,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
|
|
7520
7806
|
ASSERT(!address.is(a0));
|
7521
7807
|
__ Move(address, regs_.address());
|
7522
7808
|
__ Move(a0, regs_.object());
|
7523
|
-
|
7524
|
-
__ Move(a1, address);
|
7525
|
-
} else {
|
7526
|
-
ASSERT(mode == INCREMENTAL);
|
7527
|
-
__ lw(a1, MemOperand(address, 0));
|
7528
|
-
}
|
7809
|
+
__ Move(a1, address);
|
7529
7810
|
__ li(a2, Operand(ExternalReference::isolate_address()));
|
7530
7811
|
|
7531
7812
|
AllowExternalCallThatCantCauseGC scope(masm);
|
@@ -7553,6 +7834,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
|
7553
7834
|
Label need_incremental;
|
7554
7835
|
Label need_incremental_pop_scratch;
|
7555
7836
|
|
7837
|
+
__ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
|
7838
|
+
__ lw(regs_.scratch1(),
|
7839
|
+
MemOperand(regs_.scratch0(),
|
7840
|
+
MemoryChunk::kWriteBarrierCounterOffset));
|
7841
|
+
__ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
|
7842
|
+
__ sw(regs_.scratch1(),
|
7843
|
+
MemOperand(regs_.scratch0(),
|
7844
|
+
MemoryChunk::kWriteBarrierCounterOffset));
|
7845
|
+
__ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
|
7846
|
+
|
7556
7847
|
// Let's look at the color of the object: If it is not black we don't have
|
7557
7848
|
// to inform the incremental marker.
|
7558
7849
|
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
|
@@ -7677,13 +7968,85 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
|
7677
7968
|
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
|
7678
7969
|
__ bind(&double_elements);
|
7679
7970
|
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
7680
|
-
__ StoreNumberToDoubleElements(a0, a3,
|
7971
|
+
__ StoreNumberToDoubleElements(a0, a3,
|
7972
|
+
// Overwrites all regs after this.
|
7973
|
+
t1, t2, t3, t5, a2,
|
7681
7974
|
&slow_elements);
|
7682
7975
|
__ Ret(USE_DELAY_SLOT);
|
7683
7976
|
__ mov(v0, a0);
|
7684
7977
|
}
|
7685
7978
|
|
7686
7979
|
|
7980
|
+
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
7981
|
+
ASSERT(!Serializer::enabled());
|
7982
|
+
bool save_fp_regs = CpuFeatures::IsSupported(FPU);
|
7983
|
+
CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
|
7984
|
+
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
|
7985
|
+
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
|
7986
|
+
__ Ret();
|
7987
|
+
}
|
7988
|
+
|
7989
|
+
|
7990
|
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
7991
|
+
if (entry_hook_ != NULL) {
|
7992
|
+
ProfileEntryHookStub stub;
|
7993
|
+
__ push(ra);
|
7994
|
+
__ CallStub(&stub);
|
7995
|
+
__ pop(ra);
|
7996
|
+
}
|
7997
|
+
}
|
7998
|
+
|
7999
|
+
|
8000
|
+
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
8001
|
+
// The entry hook is a "push ra" instruction, followed by a call.
|
8002
|
+
// Note: on MIPS "push" is 2 instruction
|
8003
|
+
const int32_t kReturnAddressDistanceFromFunctionStart =
|
8004
|
+
Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
|
8005
|
+
|
8006
|
+
// Save live volatile registers.
|
8007
|
+
__ Push(ra, t1, a1);
|
8008
|
+
const int32_t kNumSavedRegs = 3;
|
8009
|
+
|
8010
|
+
// Compute the function's address for the first argument.
|
8011
|
+
__ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
|
8012
|
+
|
8013
|
+
// The caller's return address is above the saved temporaries.
|
8014
|
+
// Grab that for the second argument to the hook.
|
8015
|
+
__ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
|
8016
|
+
|
8017
|
+
// Align the stack if necessary.
|
8018
|
+
int frame_alignment = masm->ActivationFrameAlignment();
|
8019
|
+
if (frame_alignment > kPointerSize) {
|
8020
|
+
__ mov(t1, sp);
|
8021
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
8022
|
+
__ And(sp, sp, Operand(-frame_alignment));
|
8023
|
+
}
|
8024
|
+
|
8025
|
+
#if defined(V8_HOST_ARCH_MIPS)
|
8026
|
+
__ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
|
8027
|
+
__ lw(at, MemOperand(at));
|
8028
|
+
#else
|
8029
|
+
// Under the simulator we need to indirect the entry hook through a
|
8030
|
+
// trampoline function at a known address.
|
8031
|
+
Address trampoline_address = reinterpret_cast<Address>(
|
8032
|
+
reinterpret_cast<intptr_t>(EntryHookTrampoline));
|
8033
|
+
ApiFunction dispatcher(trampoline_address);
|
8034
|
+
__ li(at, Operand(ExternalReference(&dispatcher,
|
8035
|
+
ExternalReference::BUILTIN_CALL,
|
8036
|
+
masm->isolate())));
|
8037
|
+
#endif
|
8038
|
+
__ Call(at);
|
8039
|
+
|
8040
|
+
// Restore the stack pointer if needed.
|
8041
|
+
if (frame_alignment > kPointerSize) {
|
8042
|
+
__ mov(sp, t1);
|
8043
|
+
}
|
8044
|
+
|
8045
|
+
__ Pop(ra, t1, a1);
|
8046
|
+
__ Ret();
|
8047
|
+
}
|
8048
|
+
|
8049
|
+
|
7687
8050
|
#undef __
|
7688
8051
|
|
7689
8052
|
} } // namespace v8::internal
|