mustang 0.0.1 → 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.rspec +1 -0
- data/Isolate +9 -0
- data/README.md +6 -12
- data/Rakefile +30 -4
- data/TODO.md +9 -0
- data/ext/v8/extconf.rb +56 -0
- data/ext/v8/v8.cpp +37 -0
- data/ext/v8/v8_array.cpp +161 -0
- data/ext/v8/v8_array.h +17 -0
- data/ext/v8/v8_base.cpp +147 -0
- data/ext/v8/v8_base.h +23 -0
- data/ext/v8/v8_cast.cpp +151 -0
- data/ext/v8/v8_cast.h +64 -0
- data/ext/v8/v8_context.cpp +174 -0
- data/ext/v8/v8_context.h +12 -0
- data/ext/v8/v8_date.cpp +61 -0
- data/ext/v8/v8_date.h +16 -0
- data/ext/v8/v8_errors.cpp +147 -0
- data/ext/v8/v8_errors.h +19 -0
- data/ext/v8/v8_external.cpp +66 -0
- data/ext/v8/v8_external.h +16 -0
- data/ext/v8/v8_function.cpp +182 -0
- data/ext/v8/v8_function.h +14 -0
- data/ext/v8/v8_integer.cpp +70 -0
- data/ext/v8/v8_integer.h +16 -0
- data/ext/v8/v8_macros.h +30 -0
- data/ext/v8/v8_main.cpp +53 -0
- data/ext/v8/v8_main.h +13 -0
- data/ext/v8/v8_number.cpp +62 -0
- data/ext/v8/v8_number.h +16 -0
- data/ext/v8/v8_object.cpp +172 -0
- data/ext/v8/v8_object.h +17 -0
- data/ext/v8/v8_ref.cpp +72 -0
- data/ext/v8/v8_ref.h +43 -0
- data/ext/v8/v8_regexp.cpp +148 -0
- data/ext/v8/v8_regexp.h +16 -0
- data/ext/v8/v8_string.cpp +78 -0
- data/ext/v8/v8_string.h +16 -0
- data/ext/v8/v8_value.cpp +370 -0
- data/ext/v8/v8_value.h +19 -0
- data/gemspec.yml +2 -1
- data/lib/core_ext/class.rb +14 -0
- data/lib/core_ext/object.rb +12 -0
- data/lib/core_ext/symbol.rb +23 -0
- data/lib/mustang.rb +44 -0
- data/lib/mustang/context.rb +69 -0
- data/lib/mustang/errors.rb +36 -0
- data/lib/support/delegated.rb +25 -0
- data/lib/v8/array.rb +21 -0
- data/lib/v8/context.rb +13 -0
- data/lib/v8/date.rb +20 -0
- data/lib/v8/error.rb +15 -0
- data/lib/v8/external.rb +16 -0
- data/lib/v8/function.rb +11 -0
- data/lib/v8/integer.rb +16 -0
- data/lib/v8/number.rb +16 -0
- data/lib/v8/object.rb +66 -0
- data/lib/v8/regexp.rb +23 -0
- data/lib/v8/string.rb +27 -0
- data/mustang.gemspec +3 -0
- data/spec/core_ext/class_spec.rb +19 -0
- data/spec/core_ext/object_spec.rb +19 -0
- data/spec/core_ext/symbol_spec.rb +27 -0
- data/spec/fixtures/test1.js +2 -0
- data/spec/fixtures/test2.js +2 -0
- data/spec/spec_helper.rb +20 -0
- data/spec/v8/array_spec.rb +88 -0
- data/spec/v8/cast_spec.rb +151 -0
- data/spec/v8/context_spec.rb +78 -0
- data/spec/v8/data_spec.rb +39 -0
- data/spec/v8/date_spec.rb +45 -0
- data/spec/v8/empty_spec.rb +27 -0
- data/spec/v8/errors_spec.rb +142 -0
- data/spec/v8/external_spec.rb +44 -0
- data/spec/v8/function_spec.rb +170 -0
- data/spec/v8/integer_spec.rb +41 -0
- data/spec/v8/main_spec.rb +18 -0
- data/spec/v8/null_spec.rb +27 -0
- data/spec/v8/number_spec.rb +40 -0
- data/spec/v8/object_spec.rb +79 -0
- data/spec/v8/primitive_spec.rb +9 -0
- data/spec/v8/regexp_spec.rb +65 -0
- data/spec/v8/string_spec.rb +48 -0
- data/spec/v8/undefined_spec.rb +27 -0
- data/spec/v8/value_spec.rb +215 -0
- data/vendor/v8/.gitignore +2 -0
- data/vendor/v8/AUTHORS +3 -1
- data/vendor/v8/ChangeLog +117 -0
- data/vendor/v8/SConstruct +334 -53
- data/vendor/v8/include/v8-debug.h +21 -11
- data/vendor/v8/include/v8-preparser.h +1 -1
- data/vendor/v8/include/v8-profiler.h +122 -43
- data/vendor/v8/include/v8-testing.h +5 -0
- data/vendor/v8/include/v8.h +171 -17
- data/vendor/v8/preparser/SConscript +38 -0
- data/vendor/v8/preparser/preparser-process.cc +77 -114
- data/vendor/v8/samples/shell.cc +232 -46
- data/vendor/v8/src/SConscript +29 -5
- data/vendor/v8/src/accessors.cc +70 -211
- data/vendor/v8/{test/cctest/test-mips.cc → src/allocation-inl.h} +15 -18
- data/vendor/v8/src/allocation.cc +0 -82
- data/vendor/v8/src/allocation.h +9 -42
- data/vendor/v8/src/api.cc +1645 -1156
- data/vendor/v8/src/api.h +76 -12
- data/vendor/v8/src/apiutils.h +0 -7
- data/vendor/v8/src/arguments.h +15 -4
- data/vendor/v8/src/arm/assembler-arm-inl.h +10 -9
- data/vendor/v8/src/arm/assembler-arm.cc +62 -23
- data/vendor/v8/src/arm/assembler-arm.h +76 -11
- data/vendor/v8/src/arm/builtins-arm.cc +39 -33
- data/vendor/v8/src/arm/code-stubs-arm.cc +1182 -402
- data/vendor/v8/src/arm/code-stubs-arm.h +20 -54
- data/vendor/v8/src/arm/codegen-arm.cc +159 -106
- data/vendor/v8/src/arm/codegen-arm.h +6 -6
- data/vendor/v8/src/arm/constants-arm.h +16 -1
- data/vendor/v8/src/arm/cpu-arm.cc +7 -5
- data/vendor/v8/src/arm/debug-arm.cc +6 -4
- data/vendor/v8/src/arm/deoptimizer-arm.cc +51 -14
- data/vendor/v8/src/arm/disasm-arm.cc +47 -15
- data/vendor/v8/src/arm/frames-arm.h +1 -1
- data/vendor/v8/src/arm/full-codegen-arm.cc +724 -408
- data/vendor/v8/src/arm/ic-arm.cc +90 -85
- data/vendor/v8/src/arm/lithium-arm.cc +140 -69
- data/vendor/v8/src/arm/lithium-arm.h +161 -46
- data/vendor/v8/src/arm/lithium-codegen-arm.cc +567 -297
- data/vendor/v8/src/arm/lithium-codegen-arm.h +21 -9
- data/vendor/v8/src/arm/lithium-gap-resolver-arm.cc +2 -0
- data/vendor/v8/src/arm/macro-assembler-arm.cc +457 -96
- data/vendor/v8/src/arm/macro-assembler-arm.h +115 -18
- data/vendor/v8/src/arm/regexp-macro-assembler-arm.cc +20 -13
- data/vendor/v8/src/arm/regexp-macro-assembler-arm.h +1 -0
- data/vendor/v8/src/arm/simulator-arm.cc +184 -101
- data/vendor/v8/src/arm/simulator-arm.h +26 -21
- data/vendor/v8/src/arm/stub-cache-arm.cc +450 -467
- data/vendor/v8/src/arm/virtual-frame-arm.cc +14 -12
- data/vendor/v8/src/arm/virtual-frame-arm.h +11 -8
- data/vendor/v8/src/array.js +35 -18
- data/vendor/v8/src/assembler.cc +186 -92
- data/vendor/v8/src/assembler.h +106 -69
- data/vendor/v8/src/ast-inl.h +5 -0
- data/vendor/v8/src/ast.cc +46 -35
- data/vendor/v8/src/ast.h +107 -50
- data/vendor/v8/src/atomicops.h +2 -0
- data/vendor/v8/src/atomicops_internals_mips_gcc.h +169 -0
- data/vendor/v8/src/bootstrapper.cc +649 -399
- data/vendor/v8/src/bootstrapper.h +94 -27
- data/vendor/v8/src/builtins.cc +359 -227
- data/vendor/v8/src/builtins.h +157 -123
- data/vendor/v8/src/checks.cc +2 -2
- data/vendor/v8/src/checks.h +4 -0
- data/vendor/v8/src/code-stubs.cc +27 -17
- data/vendor/v8/src/code-stubs.h +38 -17
- data/vendor/v8/src/codegen-inl.h +5 -1
- data/vendor/v8/src/codegen.cc +27 -17
- data/vendor/v8/src/codegen.h +9 -9
- data/vendor/v8/src/compilation-cache.cc +92 -206
- data/vendor/v8/src/compilation-cache.h +205 -30
- data/vendor/v8/src/compiler.cc +107 -120
- data/vendor/v8/src/compiler.h +17 -2
- data/vendor/v8/src/contexts.cc +22 -15
- data/vendor/v8/src/contexts.h +14 -8
- data/vendor/v8/src/conversions.cc +86 -30
- data/vendor/v8/src/counters.cc +19 -4
- data/vendor/v8/src/counters.h +28 -16
- data/vendor/v8/src/cpu-profiler-inl.h +4 -3
- data/vendor/v8/src/cpu-profiler.cc +123 -72
- data/vendor/v8/src/cpu-profiler.h +33 -19
- data/vendor/v8/src/cpu.h +2 -0
- data/vendor/v8/src/d8-debug.cc +3 -3
- data/vendor/v8/src/d8-debug.h +7 -6
- data/vendor/v8/src/d8-posix.cc +2 -0
- data/vendor/v8/src/d8.cc +22 -12
- data/vendor/v8/src/d8.gyp +3 -0
- data/vendor/v8/src/d8.js +618 -0
- data/vendor/v8/src/data-flow.h +3 -3
- data/vendor/v8/src/dateparser.h +4 -2
- data/vendor/v8/src/debug-agent.cc +10 -9
- data/vendor/v8/src/debug-agent.h +9 -11
- data/vendor/v8/src/debug-debugger.js +121 -0
- data/vendor/v8/src/debug.cc +331 -227
- data/vendor/v8/src/debug.h +248 -219
- data/vendor/v8/src/deoptimizer.cc +173 -62
- data/vendor/v8/src/deoptimizer.h +119 -19
- data/vendor/v8/src/disasm.h +3 -0
- data/vendor/v8/src/disassembler.cc +10 -9
- data/vendor/v8/src/execution.cc +185 -129
- data/vendor/v8/src/execution.h +47 -78
- data/vendor/v8/src/extensions/experimental/break-iterator.cc +250 -0
- data/vendor/v8/src/extensions/experimental/break-iterator.h +89 -0
- data/vendor/v8/src/extensions/experimental/experimental.gyp +2 -0
- data/vendor/v8/src/extensions/experimental/i18n-extension.cc +22 -2
- data/vendor/v8/src/extensions/externalize-string-extension.cc +2 -2
- data/vendor/v8/src/extensions/gc-extension.cc +1 -1
- data/vendor/v8/src/factory.cc +261 -154
- data/vendor/v8/src/factory.h +162 -158
- data/vendor/v8/src/flag-definitions.h +17 -11
- data/vendor/v8/src/frame-element.cc +0 -5
- data/vendor/v8/src/frame-element.h +9 -13
- data/vendor/v8/src/frames-inl.h +7 -0
- data/vendor/v8/src/frames.cc +56 -46
- data/vendor/v8/src/frames.h +36 -25
- data/vendor/v8/src/full-codegen.cc +15 -24
- data/vendor/v8/src/full-codegen.h +13 -41
- data/vendor/v8/src/func-name-inferrer.cc +7 -6
- data/vendor/v8/src/func-name-inferrer.h +1 -1
- data/vendor/v8/src/gdb-jit.cc +1 -0
- data/vendor/v8/src/global-handles.cc +118 -56
- data/vendor/v8/src/global-handles.h +98 -40
- data/vendor/v8/src/globals.h +2 -2
- data/vendor/v8/src/handles-inl.h +106 -9
- data/vendor/v8/src/handles.cc +220 -157
- data/vendor/v8/src/handles.h +38 -59
- data/vendor/v8/src/hashmap.h +3 -3
- data/vendor/v8/src/heap-inl.h +141 -25
- data/vendor/v8/src/heap-profiler.cc +117 -63
- data/vendor/v8/src/heap-profiler.h +38 -21
- data/vendor/v8/src/heap.cc +805 -564
- data/vendor/v8/src/heap.h +640 -594
- data/vendor/v8/src/hydrogen-instructions.cc +216 -73
- data/vendor/v8/src/hydrogen-instructions.h +259 -124
- data/vendor/v8/src/hydrogen.cc +996 -1171
- data/vendor/v8/src/hydrogen.h +163 -144
- data/vendor/v8/src/ia32/assembler-ia32-inl.h +12 -11
- data/vendor/v8/src/ia32/assembler-ia32.cc +85 -39
- data/vendor/v8/src/ia32/assembler-ia32.h +82 -16
- data/vendor/v8/src/ia32/builtins-ia32.cc +64 -58
- data/vendor/v8/src/ia32/code-stubs-ia32.cc +248 -324
- data/vendor/v8/src/ia32/code-stubs-ia32.h +3 -44
- data/vendor/v8/src/ia32/codegen-ia32.cc +217 -165
- data/vendor/v8/src/ia32/codegen-ia32.h +3 -0
- data/vendor/v8/src/ia32/cpu-ia32.cc +6 -5
- data/vendor/v8/src/ia32/debug-ia32.cc +8 -5
- data/vendor/v8/src/ia32/deoptimizer-ia32.cc +124 -14
- data/vendor/v8/src/ia32/disasm-ia32.cc +85 -62
- data/vendor/v8/src/ia32/frames-ia32.h +1 -1
- data/vendor/v8/src/ia32/full-codegen-ia32.cc +348 -435
- data/vendor/v8/src/ia32/ic-ia32.cc +91 -91
- data/vendor/v8/src/ia32/lithium-codegen-ia32.cc +500 -255
- data/vendor/v8/src/ia32/lithium-codegen-ia32.h +13 -4
- data/vendor/v8/src/ia32/lithium-gap-resolver-ia32.cc +6 -0
- data/vendor/v8/src/ia32/lithium-ia32.cc +122 -45
- data/vendor/v8/src/ia32/lithium-ia32.h +128 -41
- data/vendor/v8/src/ia32/macro-assembler-ia32.cc +109 -84
- data/vendor/v8/src/ia32/macro-assembler-ia32.h +18 -9
- data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.cc +26 -15
- data/vendor/v8/src/ia32/regexp-macro-assembler-ia32.h +1 -0
- data/vendor/v8/src/ia32/register-allocator-ia32.cc +30 -30
- data/vendor/v8/src/ia32/simulator-ia32.h +4 -4
- data/vendor/v8/src/ia32/stub-cache-ia32.cc +383 -400
- data/vendor/v8/src/ia32/virtual-frame-ia32.cc +36 -13
- data/vendor/v8/src/ia32/virtual-frame-ia32.h +11 -5
- data/vendor/v8/src/ic-inl.h +12 -2
- data/vendor/v8/src/ic.cc +304 -221
- data/vendor/v8/src/ic.h +115 -58
- data/vendor/v8/src/interpreter-irregexp.cc +25 -21
- data/vendor/v8/src/interpreter-irregexp.h +2 -1
- data/vendor/v8/src/isolate.cc +883 -0
- data/vendor/v8/src/isolate.h +1304 -0
- data/vendor/v8/src/json.js +10 -10
- data/vendor/v8/src/jsregexp.cc +111 -80
- data/vendor/v8/src/jsregexp.h +6 -7
- data/vendor/v8/src/jump-target-heavy.cc +5 -8
- data/vendor/v8/src/jump-target-heavy.h +0 -6
- data/vendor/v8/src/jump-target-inl.h +1 -1
- data/vendor/v8/src/jump-target-light.cc +3 -3
- data/vendor/v8/src/lithium-allocator-inl.h +2 -0
- data/vendor/v8/src/lithium-allocator.cc +42 -30
- data/vendor/v8/src/lithium-allocator.h +8 -22
- data/vendor/v8/src/lithium.cc +1 -0
- data/vendor/v8/src/liveedit.cc +141 -99
- data/vendor/v8/src/liveedit.h +7 -2
- data/vendor/v8/src/liveobjectlist-inl.h +90 -0
- data/vendor/v8/src/liveobjectlist.cc +2537 -1
- data/vendor/v8/src/liveobjectlist.h +245 -35
- data/vendor/v8/src/log-utils.cc +122 -35
- data/vendor/v8/src/log-utils.h +33 -36
- data/vendor/v8/src/log.cc +299 -241
- data/vendor/v8/src/log.h +177 -110
- data/vendor/v8/src/mark-compact.cc +612 -470
- data/vendor/v8/src/mark-compact.h +153 -80
- data/vendor/v8/src/messages.cc +16 -14
- data/vendor/v8/src/messages.js +30 -7
- data/vendor/v8/src/mips/assembler-mips-inl.h +155 -35
- data/vendor/v8/src/mips/assembler-mips.cc +1093 -219
- data/vendor/v8/src/mips/assembler-mips.h +552 -153
- data/vendor/v8/src/mips/builtins-mips.cc +43 -100
- data/vendor/v8/src/mips/code-stubs-mips.cc +752 -0
- data/vendor/v8/src/mips/code-stubs-mips.h +511 -0
- data/vendor/v8/src/mips/codegen-mips-inl.h +8 -14
- data/vendor/v8/src/mips/codegen-mips.cc +672 -896
- data/vendor/v8/src/mips/codegen-mips.h +271 -69
- data/vendor/v8/src/mips/constants-mips.cc +44 -20
- data/vendor/v8/src/mips/constants-mips.h +238 -40
- data/vendor/v8/src/mips/cpu-mips.cc +20 -3
- data/vendor/v8/src/mips/debug-mips.cc +35 -7
- data/vendor/v8/src/mips/deoptimizer-mips.cc +91 -0
- data/vendor/v8/src/mips/disasm-mips.cc +329 -93
- data/vendor/v8/src/mips/frames-mips.cc +2 -50
- data/vendor/v8/src/mips/frames-mips.h +24 -9
- data/vendor/v8/src/mips/full-codegen-mips.cc +473 -23
- data/vendor/v8/src/mips/ic-mips.cc +81 -45
- data/vendor/v8/src/mips/jump-target-mips.cc +11 -106
- data/vendor/v8/src/mips/lithium-codegen-mips.h +65 -0
- data/vendor/v8/src/mips/lithium-mips.h +304 -0
- data/vendor/v8/src/mips/macro-assembler-mips.cc +2391 -390
- data/vendor/v8/src/mips/macro-assembler-mips.h +718 -121
- data/vendor/v8/src/mips/regexp-macro-assembler-mips.cc +478 -0
- data/vendor/v8/src/mips/regexp-macro-assembler-mips.h +250 -0
- data/vendor/v8/src/mips/register-allocator-mips-inl.h +0 -3
- data/vendor/v8/src/mips/register-allocator-mips.h +3 -2
- data/vendor/v8/src/mips/simulator-mips.cc +1009 -221
- data/vendor/v8/src/mips/simulator-mips.h +119 -36
- data/vendor/v8/src/mips/stub-cache-mips.cc +331 -148
- data/vendor/v8/src/mips/{fast-codegen-mips.cc → virtual-frame-mips-inl.h} +11 -30
- data/vendor/v8/src/mips/virtual-frame-mips.cc +137 -149
- data/vendor/v8/src/mips/virtual-frame-mips.h +294 -312
- data/vendor/v8/src/mirror-debugger.js +9 -8
- data/vendor/v8/src/mksnapshot.cc +2 -2
- data/vendor/v8/src/objects-debug.cc +16 -16
- data/vendor/v8/src/objects-inl.h +421 -195
- data/vendor/v8/src/objects-printer.cc +7 -7
- data/vendor/v8/src/objects-visiting.cc +1 -1
- data/vendor/v8/src/objects-visiting.h +33 -12
- data/vendor/v8/src/objects.cc +935 -658
- data/vendor/v8/src/objects.h +234 -139
- data/vendor/v8/src/parser.cc +484 -439
- data/vendor/v8/src/parser.h +35 -14
- data/vendor/v8/src/platform-cygwin.cc +173 -107
- data/vendor/v8/src/platform-freebsd.cc +224 -72
- data/vendor/v8/src/platform-linux.cc +234 -95
- data/vendor/v8/src/platform-macos.cc +215 -82
- data/vendor/v8/src/platform-nullos.cc +9 -3
- data/vendor/v8/src/platform-openbsd.cc +22 -7
- data/vendor/v8/src/platform-posix.cc +30 -5
- data/vendor/v8/src/platform-solaris.cc +120 -38
- data/vendor/v8/src/platform-tls-mac.h +62 -0
- data/vendor/v8/src/platform-tls-win32.h +62 -0
- data/vendor/v8/src/platform-tls.h +50 -0
- data/vendor/v8/src/platform-win32.cc +195 -97
- data/vendor/v8/src/platform.h +72 -15
- data/vendor/v8/src/preparse-data.cc +2 -0
- data/vendor/v8/src/preparser-api.cc +8 -2
- data/vendor/v8/src/preparser.cc +1 -1
- data/vendor/v8/src/prettyprinter.cc +43 -52
- data/vendor/v8/src/prettyprinter.h +1 -1
- data/vendor/v8/src/profile-generator-inl.h +0 -28
- data/vendor/v8/src/profile-generator.cc +942 -685
- data/vendor/v8/src/profile-generator.h +210 -176
- data/vendor/v8/src/property.cc +6 -0
- data/vendor/v8/src/property.h +14 -3
- data/vendor/v8/src/regexp-macro-assembler-irregexp.cc +1 -1
- data/vendor/v8/src/regexp-macro-assembler.cc +28 -19
- data/vendor/v8/src/regexp-macro-assembler.h +11 -6
- data/vendor/v8/src/regexp-stack.cc +18 -10
- data/vendor/v8/src/regexp-stack.h +45 -21
- data/vendor/v8/src/regexp.js +3 -3
- data/vendor/v8/src/register-allocator-inl.h +3 -3
- data/vendor/v8/src/register-allocator.cc +1 -7
- data/vendor/v8/src/register-allocator.h +5 -15
- data/vendor/v8/src/rewriter.cc +2 -1
- data/vendor/v8/src/runtime-profiler.cc +158 -128
- data/vendor/v8/src/runtime-profiler.h +131 -15
- data/vendor/v8/src/runtime.cc +2409 -1692
- data/vendor/v8/src/runtime.h +93 -17
- data/vendor/v8/src/safepoint-table.cc +3 -0
- data/vendor/v8/src/safepoint-table.h +9 -3
- data/vendor/v8/src/scanner-base.cc +21 -28
- data/vendor/v8/src/scanner-base.h +22 -11
- data/vendor/v8/src/scanner.cc +3 -5
- data/vendor/v8/src/scanner.h +4 -2
- data/vendor/v8/src/scopeinfo.cc +11 -16
- data/vendor/v8/src/scopeinfo.h +26 -15
- data/vendor/v8/src/scopes.cc +67 -37
- data/vendor/v8/src/scopes.h +26 -12
- data/vendor/v8/src/serialize.cc +193 -154
- data/vendor/v8/src/serialize.h +41 -36
- data/vendor/v8/src/small-pointer-list.h +163 -0
- data/vendor/v8/src/snapshot-common.cc +1 -1
- data/vendor/v8/src/snapshot.h +3 -1
- data/vendor/v8/src/spaces-inl.h +30 -25
- data/vendor/v8/src/spaces.cc +263 -370
- data/vendor/v8/src/spaces.h +178 -166
- data/vendor/v8/src/string-search.cc +4 -3
- data/vendor/v8/src/string-search.h +21 -20
- data/vendor/v8/src/string-stream.cc +32 -24
- data/vendor/v8/src/string.js +7 -7
- data/vendor/v8/src/stub-cache.cc +324 -248
- data/vendor/v8/src/stub-cache.h +181 -155
- data/vendor/v8/src/token.cc +3 -3
- data/vendor/v8/src/token.h +3 -3
- data/vendor/v8/src/top.cc +218 -390
- data/vendor/v8/src/type-info.cc +98 -32
- data/vendor/v8/src/type-info.h +10 -3
- data/vendor/v8/src/unicode.cc +1 -1
- data/vendor/v8/src/unicode.h +1 -1
- data/vendor/v8/src/utils.h +3 -0
- data/vendor/v8/src/v8-counters.cc +18 -11
- data/vendor/v8/src/v8-counters.h +34 -13
- data/vendor/v8/src/v8.cc +66 -121
- data/vendor/v8/src/v8.h +7 -4
- data/vendor/v8/src/v8globals.h +18 -12
- data/vendor/v8/src/{memory.h → v8memory.h} +0 -0
- data/vendor/v8/src/v8natives.js +59 -18
- data/vendor/v8/src/v8threads.cc +127 -114
- data/vendor/v8/src/v8threads.h +42 -35
- data/vendor/v8/src/v8utils.h +2 -39
- data/vendor/v8/src/variables.h +1 -1
- data/vendor/v8/src/version.cc +26 -5
- data/vendor/v8/src/version.h +4 -0
- data/vendor/v8/src/virtual-frame-heavy-inl.h +2 -4
- data/vendor/v8/src/virtual-frame-light-inl.h +5 -4
- data/vendor/v8/src/vm-state-inl.h +21 -17
- data/vendor/v8/src/vm-state.h +7 -5
- data/vendor/v8/src/win32-headers.h +1 -0
- data/vendor/v8/src/x64/assembler-x64-inl.h +12 -11
- data/vendor/v8/src/x64/assembler-x64.cc +80 -40
- data/vendor/v8/src/x64/assembler-x64.h +67 -17
- data/vendor/v8/src/x64/builtins-x64.cc +34 -33
- data/vendor/v8/src/x64/code-stubs-x64.cc +636 -377
- data/vendor/v8/src/x64/code-stubs-x64.h +14 -48
- data/vendor/v8/src/x64/codegen-x64-inl.h +1 -1
- data/vendor/v8/src/x64/codegen-x64.cc +158 -136
- data/vendor/v8/src/x64/codegen-x64.h +4 -1
- data/vendor/v8/src/x64/cpu-x64.cc +7 -5
- data/vendor/v8/src/x64/debug-x64.cc +8 -6
- data/vendor/v8/src/x64/deoptimizer-x64.cc +195 -20
- data/vendor/v8/src/x64/disasm-x64.cc +42 -23
- data/vendor/v8/src/x64/frames-x64.cc +1 -1
- data/vendor/v8/src/x64/frames-x64.h +2 -2
- data/vendor/v8/src/x64/full-codegen-x64.cc +780 -218
- data/vendor/v8/src/x64/ic-x64.cc +77 -79
- data/vendor/v8/src/x64/jump-target-x64.cc +1 -1
- data/vendor/v8/src/x64/lithium-codegen-x64.cc +698 -181
- data/vendor/v8/src/x64/lithium-codegen-x64.h +31 -6
- data/vendor/v8/src/x64/lithium-x64.cc +136 -54
- data/vendor/v8/src/x64/lithium-x64.h +142 -51
- data/vendor/v8/src/x64/macro-assembler-x64.cc +456 -187
- data/vendor/v8/src/x64/macro-assembler-x64.h +166 -34
- data/vendor/v8/src/x64/regexp-macro-assembler-x64.cc +44 -28
- data/vendor/v8/src/x64/regexp-macro-assembler-x64.h +8 -4
- data/vendor/v8/src/x64/register-allocator-x64-inl.h +3 -3
- data/vendor/v8/src/x64/register-allocator-x64.cc +12 -8
- data/vendor/v8/src/x64/simulator-x64.h +5 -5
- data/vendor/v8/src/x64/stub-cache-x64.cc +299 -344
- data/vendor/v8/src/x64/virtual-frame-x64.cc +37 -13
- data/vendor/v8/src/x64/virtual-frame-x64.h +13 -7
- data/vendor/v8/src/zone-inl.h +49 -3
- data/vendor/v8/src/zone.cc +42 -41
- data/vendor/v8/src/zone.h +37 -34
- data/vendor/v8/test/benchmarks/testcfg.py +100 -0
- data/vendor/v8/test/cctest/SConscript +5 -4
- data/vendor/v8/test/cctest/cctest.h +3 -2
- data/vendor/v8/test/cctest/cctest.status +6 -11
- data/vendor/v8/test/cctest/test-accessors.cc +3 -3
- data/vendor/v8/test/cctest/test-alloc.cc +39 -33
- data/vendor/v8/test/cctest/test-api.cc +1092 -205
- data/vendor/v8/test/cctest/test-assembler-arm.cc +39 -25
- data/vendor/v8/test/cctest/test-assembler-ia32.cc +36 -37
- data/vendor/v8/test/cctest/test-assembler-mips.cc +1098 -40
- data/vendor/v8/test/cctest/test-assembler-x64.cc +32 -25
- data/vendor/v8/test/cctest/test-ast.cc +1 -0
- data/vendor/v8/test/cctest/test-circular-queue.cc +8 -5
- data/vendor/v8/test/cctest/test-compiler.cc +24 -24
- data/vendor/v8/test/cctest/test-cpu-profiler.cc +140 -5
- data/vendor/v8/test/cctest/test-dataflow.cc +1 -0
- data/vendor/v8/test/cctest/test-debug.cc +136 -77
- data/vendor/v8/test/cctest/test-decls.cc +1 -1
- data/vendor/v8/test/cctest/test-deoptimization.cc +25 -24
- data/vendor/v8/test/cctest/test-disasm-arm.cc +9 -4
- data/vendor/v8/test/cctest/test-disasm-ia32.cc +10 -8
- data/vendor/v8/test/cctest/test-func-name-inference.cc +10 -4
- data/vendor/v8/test/cctest/test-heap-profiler.cc +226 -164
- data/vendor/v8/test/cctest/test-heap.cc +240 -217
- data/vendor/v8/test/cctest/test-liveedit.cc +1 -0
- data/vendor/v8/test/cctest/test-log-stack-tracer.cc +18 -20
- data/vendor/v8/test/cctest/test-log.cc +114 -108
- data/vendor/v8/test/cctest/test-macro-assembler-x64.cc +247 -177
- data/vendor/v8/test/cctest/test-mark-compact.cc +129 -90
- data/vendor/v8/test/cctest/test-parsing.cc +15 -14
- data/vendor/v8/test/cctest/test-platform-linux.cc +1 -0
- data/vendor/v8/test/cctest/test-platform-tls.cc +66 -0
- data/vendor/v8/test/cctest/test-platform-win32.cc +1 -0
- data/vendor/v8/test/cctest/test-profile-generator.cc +1 -1
- data/vendor/v8/test/cctest/test-regexp.cc +53 -41
- data/vendor/v8/test/cctest/test-reloc-info.cc +18 -11
- data/vendor/v8/test/cctest/test-serialize.cc +44 -43
- data/vendor/v8/test/cctest/test-sockets.cc +8 -3
- data/vendor/v8/test/cctest/test-spaces.cc +47 -29
- data/vendor/v8/test/cctest/test-strings.cc +20 -20
- data/vendor/v8/test/cctest/test-thread-termination.cc +8 -3
- data/vendor/v8/test/cctest/test-threads.cc +5 -3
- data/vendor/v8/test/cctest/test-utils.cc +5 -4
- data/vendor/v8/test/cctest/testcfg.py +7 -3
- data/vendor/v8/test/es5conform/es5conform.status +2 -77
- data/vendor/v8/test/es5conform/testcfg.py +1 -1
- data/vendor/v8/test/message/testcfg.py +1 -1
- data/vendor/v8/test/mjsunit/accessors-on-global-object.js +3 -3
- data/vendor/v8/test/mjsunit/array-concat.js +43 -1
- data/vendor/v8/test/mjsunit/array-join.js +25 -0
- data/vendor/v8/test/mjsunit/bitops-info.js +7 -1
- data/vendor/v8/test/mjsunit/compiler/array-length.js +2 -2
- data/vendor/v8/test/mjsunit/compiler/global-accessors.js +47 -0
- data/vendor/v8/test/mjsunit/compiler/pic.js +1 -1
- data/vendor/v8/test/mjsunit/compiler/regress-loadfield.js +65 -0
- data/vendor/v8/test/mjsunit/math-sqrt.js +5 -1
- data/vendor/v8/test/mjsunit/mjsunit.js +59 -8
- data/vendor/v8/test/mjsunit/mjsunit.status +0 -12
- data/vendor/v8/test/mjsunit/mul-exhaustive.js +129 -11
- data/vendor/v8/test/mjsunit/negate-zero.js +1 -1
- data/vendor/v8/test/mjsunit/object-freeze.js +5 -13
- data/vendor/v8/test/mjsunit/object-prevent-extensions.js +9 -50
- data/vendor/v8/test/mjsunit/object-seal.js +4 -13
- data/vendor/v8/test/mjsunit/override-eval-with-non-function.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-1145.js +54 -0
- data/vendor/v8/test/mjsunit/regress/regress-1172-bis.js +37 -0
- data/vendor/v8/test/mjsunit/regress/regress-1181.js +54 -0
- data/vendor/v8/test/mjsunit/regress/regress-1207.js +35 -0
- data/vendor/v8/test/mjsunit/regress/regress-1209.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-1210.js +48 -0
- data/vendor/v8/test/mjsunit/regress/regress-1213.js +43 -0
- data/vendor/v8/test/mjsunit/regress/regress-1218.js +29 -0
- data/vendor/v8/test/mjsunit/regress/regress-1229.js +79 -0
- data/vendor/v8/test/mjsunit/regress/regress-1233.js +47 -0
- data/vendor/v8/test/mjsunit/regress/regress-1236.js +34 -0
- data/vendor/v8/test/mjsunit/regress/regress-1237.js +36 -0
- data/vendor/v8/test/mjsunit/regress/regress-1240.js +39 -0
- data/vendor/v8/test/mjsunit/regress/regress-1257.js +58 -0
- data/vendor/v8/test/mjsunit/regress/regress-1278.js +69 -0
- data/vendor/v8/test/mjsunit/regress/regress-create-exception.js +1 -0
- data/vendor/v8/test/mjsunit/regress/regress-lazy-deopt-reloc.js +52 -0
- data/vendor/v8/test/mjsunit/sin-cos.js +15 -10
- data/vendor/v8/test/mjsunit/smi-negative-zero.js +2 -2
- data/vendor/v8/test/mjsunit/str-to-num.js +1 -1
- data/vendor/v8/test/mjsunit/strict-mode.js +435 -0
- data/vendor/v8/test/mjsunit/testcfg.py +23 -6
- data/vendor/v8/test/mozilla/mozilla.status +0 -2
- data/vendor/v8/test/mozilla/testcfg.py +1 -1
- data/vendor/v8/test/preparser/empty.js +28 -0
- data/vendor/v8/test/preparser/functions-only.js +38 -0
- data/vendor/v8/test/preparser/non-alphanum.js +34 -0
- data/vendor/v8/test/preparser/symbols-only.js +49 -0
- data/vendor/v8/test/preparser/testcfg.py +90 -0
- data/vendor/v8/test/sputnik/testcfg.py +1 -1
- data/vendor/v8/test/test262/README +16 -0
- data/vendor/v8/test/test262/harness-adapt.js +80 -0
- data/vendor/v8/test/test262/test262.status +1506 -0
- data/vendor/v8/test/test262/testcfg.py +123 -0
- data/vendor/v8/tools/freebsd-tick-processor +10 -0
- data/vendor/v8/tools/gyp/v8.gyp +8 -33
- data/vendor/v8/tools/linux-tick-processor +5 -3
- data/vendor/v8/tools/test.py +37 -14
- data/vendor/v8/tools/tickprocessor.js +22 -8
- data/vendor/v8/tools/visual_studio/v8_base.vcproj +13 -1
- data/vendor/v8/tools/visual_studio/v8_base_arm.vcproj +5 -1
- data/vendor/v8/tools/visual_studio/v8_base_x64.vcproj +5 -1
- data/vendor/v8/tools/visual_studio/x64.vsprops +1 -0
- metadata +1495 -1341
- data/ext/extconf.rb +0 -22
- data/ext/mustang.cpp +0 -58
- data/vendor/v8/src/top.h +0 -608
@@ -1,4 +1,4 @@
|
|
1
|
-
// Copyright
|
1
|
+
// Copyright 2011 the V8 project authors. All rights reserved.
|
2
2
|
// Redistribution and use in source and binary forms, with or without
|
3
3
|
// modification, are permitted provided that the following conditions are
|
4
4
|
// met:
|
@@ -25,7 +25,7 @@
|
|
25
25
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
26
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
27
|
|
28
|
-
|
28
|
+
#include <limits.h> // For LONG_MIN, LONG_MAX
|
29
29
|
|
30
30
|
#include "v8.h"
|
31
31
|
|
@@ -41,68 +41,90 @@ namespace internal {
|
|
41
41
|
|
42
42
|
MacroAssembler::MacroAssembler(void* buffer, int size)
|
43
43
|
: Assembler(buffer, size),
|
44
|
-
unresolved_(0),
|
45
44
|
generating_stub_(false),
|
46
45
|
allow_stub_calls_(true),
|
47
|
-
code_object_(
|
46
|
+
code_object_(HEAP->undefined_value()) {
|
48
47
|
}
|
49
48
|
|
50
49
|
|
50
|
+
// Arguments macros
|
51
|
+
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
|
52
|
+
#define COND_ARGS cond, r1, r2
|
51
53
|
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
Jump(Operand(target, rmode), cond, r1, r2);
|
54
|
+
#define REGISTER_TARGET_BODY(Name) \
|
55
|
+
void MacroAssembler::Name(Register target, \
|
56
|
+
BranchDelaySlot bd) { \
|
57
|
+
Name(Operand(target), bd); \
|
58
|
+
} \
|
59
|
+
void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
|
60
|
+
BranchDelaySlot bd) { \
|
61
|
+
Name(Operand(target), COND_ARGS, bd); \
|
61
62
|
}
|
62
63
|
|
63
64
|
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
65
|
+
#define INT_PTR_TARGET_BODY(Name) \
|
66
|
+
void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
|
67
|
+
BranchDelaySlot bd) { \
|
68
|
+
Name(Operand(target, rmode), bd); \
|
69
|
+
} \
|
70
|
+
void MacroAssembler::Name(intptr_t target, \
|
71
|
+
RelocInfo::Mode rmode, \
|
72
|
+
COND_TYPED_ARGS, \
|
73
|
+
BranchDelaySlot bd) { \
|
74
|
+
Name(Operand(target, rmode), COND_ARGS, bd); \
|
68
75
|
}
|
69
76
|
|
70
77
|
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
78
|
+
#define BYTE_PTR_TARGET_BODY(Name) \
|
79
|
+
void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
|
80
|
+
BranchDelaySlot bd) { \
|
81
|
+
Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
|
82
|
+
} \
|
83
|
+
void MacroAssembler::Name(byte* target, \
|
84
|
+
RelocInfo::Mode rmode, \
|
85
|
+
COND_TYPED_ARGS, \
|
86
|
+
BranchDelaySlot bd) { \
|
87
|
+
Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
|
75
88
|
}
|
76
89
|
|
77
90
|
|
78
|
-
|
79
|
-
|
80
|
-
|
91
|
+
#define CODE_TARGET_BODY(Name) \
|
92
|
+
void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
|
93
|
+
BranchDelaySlot bd) { \
|
94
|
+
Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
|
95
|
+
} \
|
96
|
+
void MacroAssembler::Name(Handle<Code> target, \
|
97
|
+
RelocInfo::Mode rmode, \
|
98
|
+
COND_TYPED_ARGS, \
|
99
|
+
BranchDelaySlot bd) { \
|
100
|
+
Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
|
81
101
|
}
|
82
102
|
|
83
103
|
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
104
|
+
REGISTER_TARGET_BODY(Jump)
|
105
|
+
REGISTER_TARGET_BODY(Call)
|
106
|
+
INT_PTR_TARGET_BODY(Jump)
|
107
|
+
INT_PTR_TARGET_BODY(Call)
|
108
|
+
BYTE_PTR_TARGET_BODY(Jump)
|
109
|
+
BYTE_PTR_TARGET_BODY(Call)
|
110
|
+
CODE_TARGET_BODY(Jump)
|
111
|
+
CODE_TARGET_BODY(Call)
|
89
112
|
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
113
|
+
#undef COND_TYPED_ARGS
|
114
|
+
#undef COND_ARGS
|
115
|
+
#undef REGISTER_TARGET_BODY
|
116
|
+
#undef BYTE_PTR_TARGET_BODY
|
117
|
+
#undef CODE_TARGET_BODY
|
95
118
|
|
96
119
|
|
97
|
-
void MacroAssembler::
|
98
|
-
|
99
|
-
ASSERT(RelocInfo::IsCodeTarget(rmode));
|
100
|
-
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
|
120
|
+
void MacroAssembler::Ret(BranchDelaySlot bd) {
|
121
|
+
Jump(Operand(ra), bd);
|
101
122
|
}
|
102
123
|
|
103
124
|
|
104
|
-
void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2
|
105
|
-
|
125
|
+
void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
|
126
|
+
BranchDelaySlot bd) {
|
127
|
+
Jump(Operand(ra), cond, r1, r2, bd);
|
106
128
|
}
|
107
129
|
|
108
130
|
|
@@ -111,51 +133,248 @@ void MacroAssembler::LoadRoot(Register destination,
|
|
111
133
|
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
|
112
134
|
}
|
113
135
|
|
136
|
+
|
114
137
|
void MacroAssembler::LoadRoot(Register destination,
|
115
138
|
Heap::RootListIndex index,
|
116
139
|
Condition cond,
|
117
140
|
Register src1, const Operand& src2) {
|
118
|
-
Branch(NegateCondition(cond),
|
141
|
+
Branch(2, NegateCondition(cond), src1, src2);
|
119
142
|
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
|
120
143
|
}
|
121
144
|
|
122
145
|
|
123
|
-
void MacroAssembler::
|
146
|
+
void MacroAssembler::StoreRoot(Register source,
|
147
|
+
Heap::RootListIndex index) {
|
148
|
+
sw(source, MemOperand(s6, index << kPointerSizeLog2));
|
149
|
+
}
|
150
|
+
|
151
|
+
|
152
|
+
void MacroAssembler::StoreRoot(Register source,
|
153
|
+
Heap::RootListIndex index,
|
154
|
+
Condition cond,
|
155
|
+
Register src1, const Operand& src2) {
|
156
|
+
Branch(2, NegateCondition(cond), src1, src2);
|
157
|
+
sw(source, MemOperand(s6, index << kPointerSizeLog2));
|
158
|
+
}
|
159
|
+
|
160
|
+
|
161
|
+
void MacroAssembler::RecordWriteHelper(Register object,
|
162
|
+
Register address,
|
163
|
+
Register scratch) {
|
164
|
+
if (FLAG_debug_code) {
|
165
|
+
// Check that the object is not in new space.
|
166
|
+
Label not_in_new_space;
|
167
|
+
InNewSpace(object, scratch, ne, ¬_in_new_space);
|
168
|
+
Abort("new-space object passed to RecordWriteHelper");
|
169
|
+
bind(¬_in_new_space);
|
170
|
+
}
|
171
|
+
|
172
|
+
// Calculate page address: Clear bits from 0 to kPageSizeBits.
|
173
|
+
if (mips32r2) {
|
174
|
+
Ins(object, zero_reg, 0, kPageSizeBits);
|
175
|
+
} else {
|
176
|
+
// The Ins macro is slow on r1, so use shifts instead.
|
177
|
+
srl(object, object, kPageSizeBits);
|
178
|
+
sll(object, object, kPageSizeBits);
|
179
|
+
}
|
180
|
+
|
181
|
+
// Calculate region number.
|
182
|
+
Ext(address, address, Page::kRegionSizeLog2,
|
183
|
+
kPageSizeBits - Page::kRegionSizeLog2);
|
184
|
+
|
185
|
+
// Mark region dirty.
|
186
|
+
lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
187
|
+
li(at, Operand(1));
|
188
|
+
sllv(at, at, address);
|
189
|
+
or_(scratch, scratch, at);
|
190
|
+
sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
191
|
+
}
|
192
|
+
|
193
|
+
|
194
|
+
void MacroAssembler::InNewSpace(Register object,
|
195
|
+
Register scratch,
|
196
|
+
Condition cc,
|
197
|
+
Label* branch) {
|
198
|
+
ASSERT(cc == eq || cc == ne);
|
199
|
+
And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
|
200
|
+
Branch(branch, cc, scratch,
|
201
|
+
Operand(ExternalReference::new_space_start(isolate())));
|
202
|
+
}
|
203
|
+
|
204
|
+
|
205
|
+
// Will clobber 4 registers: object, scratch0, scratch1, at. The
|
206
|
+
// register 'object' contains a heap object pointer. The heap object
|
207
|
+
// tag is shifted away.
|
208
|
+
void MacroAssembler::RecordWrite(Register object,
|
209
|
+
Operand offset,
|
210
|
+
Register scratch0,
|
211
|
+
Register scratch1) {
|
212
|
+
// The compiled code assumes that record write doesn't change the
|
213
|
+
// context register, so we check that none of the clobbered
|
214
|
+
// registers are cp.
|
215
|
+
ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
|
216
|
+
|
217
|
+
Label done;
|
218
|
+
|
219
|
+
// First, test that the object is not in the new space. We cannot set
|
220
|
+
// region marks for new space pages.
|
221
|
+
InNewSpace(object, scratch0, eq, &done);
|
222
|
+
|
223
|
+
// Add offset into the object.
|
224
|
+
Addu(scratch0, object, offset);
|
225
|
+
|
226
|
+
// Record the actual write.
|
227
|
+
RecordWriteHelper(object, scratch0, scratch1);
|
228
|
+
|
229
|
+
bind(&done);
|
230
|
+
|
231
|
+
// Clobber all input registers when running with the debug-code flag
|
232
|
+
// turned on to provoke errors.
|
233
|
+
if (FLAG_debug_code) {
|
234
|
+
li(object, Operand(BitCast<int32_t>(kZapValue)));
|
235
|
+
li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
|
236
|
+
li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
|
237
|
+
}
|
238
|
+
}
|
239
|
+
|
240
|
+
|
241
|
+
// Will clobber 4 registers: object, address, scratch, ip. The
|
242
|
+
// register 'object' contains a heap object pointer. The heap object
|
243
|
+
// tag is shifted away.
|
244
|
+
void MacroAssembler::RecordWrite(Register object,
|
245
|
+
Register address,
|
124
246
|
Register scratch) {
|
125
|
-
|
247
|
+
// The compiled code assumes that record write doesn't change the
|
248
|
+
// context register, so we check that none of the clobbered
|
249
|
+
// registers are cp.
|
250
|
+
ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
|
251
|
+
|
252
|
+
Label done;
|
253
|
+
|
254
|
+
// First, test that the object is not in the new space. We cannot set
|
255
|
+
// region marks for new space pages.
|
256
|
+
InNewSpace(object, scratch, eq, &done);
|
257
|
+
|
258
|
+
// Record the actual write.
|
259
|
+
RecordWriteHelper(object, address, scratch);
|
260
|
+
|
261
|
+
bind(&done);
|
262
|
+
|
263
|
+
// Clobber all input registers when running with the debug-code flag
|
264
|
+
// turned on to provoke errors.
|
265
|
+
if (FLAG_debug_code) {
|
266
|
+
li(object, Operand(BitCast<int32_t>(kZapValue)));
|
267
|
+
li(address, Operand(BitCast<int32_t>(kZapValue)));
|
268
|
+
li(scratch, Operand(BitCast<int32_t>(kZapValue)));
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
|
273
|
+
// -----------------------------------------------------------------------------
|
274
|
+
// Allocation support
|
275
|
+
|
276
|
+
|
277
|
+
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
278
|
+
Register scratch,
|
279
|
+
Label* miss) {
|
280
|
+
Label same_contexts;
|
281
|
+
|
282
|
+
ASSERT(!holder_reg.is(scratch));
|
283
|
+
ASSERT(!holder_reg.is(at));
|
284
|
+
ASSERT(!scratch.is(at));
|
285
|
+
|
286
|
+
// Load current lexical context from the stack frame.
|
287
|
+
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
288
|
+
// In debug mode, make sure the lexical context is set.
|
289
|
+
#ifdef DEBUG
|
290
|
+
Check(ne, "we should not have an empty lexical context",
|
291
|
+
scratch, Operand(zero_reg));
|
292
|
+
#endif
|
293
|
+
|
294
|
+
// Load the global context of the current context.
|
295
|
+
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
|
296
|
+
lw(scratch, FieldMemOperand(scratch, offset));
|
297
|
+
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
|
298
|
+
|
299
|
+
// Check the context is a global context.
|
300
|
+
if (FLAG_debug_code) {
|
301
|
+
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
|
302
|
+
Push(holder_reg); // Temporarily save holder on the stack.
|
303
|
+
// Read the first word and compare to the global_context_map.
|
304
|
+
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
305
|
+
LoadRoot(at, Heap::kGlobalContextMapRootIndex);
|
306
|
+
Check(eq, "JSGlobalObject::global_context should be a global context.",
|
307
|
+
holder_reg, Operand(at));
|
308
|
+
Pop(holder_reg); // Restore holder.
|
309
|
+
}
|
310
|
+
|
311
|
+
// Check if both contexts are the same.
|
312
|
+
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
313
|
+
Branch(&same_contexts, eq, scratch, Operand(at));
|
314
|
+
|
315
|
+
// Check the context is a global context.
|
316
|
+
if (FLAG_debug_code) {
|
317
|
+
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
|
318
|
+
Push(holder_reg); // Temporarily save holder on the stack.
|
319
|
+
mov(holder_reg, at); // Move at to its holding place.
|
320
|
+
LoadRoot(at, Heap::kNullValueRootIndex);
|
321
|
+
Check(ne, "JSGlobalProxy::context() should not be null.",
|
322
|
+
holder_reg, Operand(at));
|
323
|
+
|
324
|
+
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
|
325
|
+
LoadRoot(at, Heap::kGlobalContextMapRootIndex);
|
326
|
+
Check(eq, "JSGlobalObject::global_context should be a global context.",
|
327
|
+
holder_reg, Operand(at));
|
328
|
+
// Restore at is not needed. at is reloaded below.
|
329
|
+
Pop(holder_reg); // Restore holder.
|
330
|
+
// Restore at to holder's context.
|
331
|
+
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
332
|
+
}
|
333
|
+
|
334
|
+
// Check that the security token in the calling global object is
|
335
|
+
// compatible with the security token in the receiving global
|
336
|
+
// object.
|
337
|
+
int token_offset = Context::kHeaderSize +
|
338
|
+
Context::SECURITY_TOKEN_INDEX * kPointerSize;
|
339
|
+
|
340
|
+
lw(scratch, FieldMemOperand(scratch, token_offset));
|
341
|
+
lw(at, FieldMemOperand(at, token_offset));
|
342
|
+
Branch(miss, ne, scratch, Operand(at));
|
343
|
+
|
344
|
+
bind(&same_contexts);
|
126
345
|
}
|
127
346
|
|
128
347
|
|
129
348
|
// ---------------------------------------------------------------------------
|
130
349
|
// Instruction macros
|
131
350
|
|
132
|
-
void MacroAssembler::
|
351
|
+
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
|
133
352
|
if (rt.is_reg()) {
|
134
|
-
|
353
|
+
addu(rd, rs, rt.rm());
|
135
354
|
} else {
|
136
|
-
if (is_int16(rt.imm32_) && !
|
137
|
-
|
355
|
+
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
356
|
+
addiu(rd, rs, rt.imm32_);
|
138
357
|
} else {
|
139
358
|
// li handles the relocation.
|
140
359
|
ASSERT(!rs.is(at));
|
141
360
|
li(at, rt);
|
142
|
-
|
361
|
+
addu(rd, rs, at);
|
143
362
|
}
|
144
363
|
}
|
145
364
|
}
|
146
365
|
|
147
366
|
|
148
|
-
void MacroAssembler::
|
367
|
+
void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
|
149
368
|
if (rt.is_reg()) {
|
150
|
-
|
369
|
+
subu(rd, rs, rt.rm());
|
151
370
|
} else {
|
152
|
-
if (is_int16(rt.imm32_) && !
|
153
|
-
addiu(rd, rs, rt.imm32_);
|
371
|
+
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
372
|
+
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
|
154
373
|
} else {
|
155
374
|
// li handles the relocation.
|
156
375
|
ASSERT(!rs.is(at));
|
157
376
|
li(at, rt);
|
158
|
-
|
377
|
+
subu(rd, rs, at);
|
159
378
|
}
|
160
379
|
}
|
161
380
|
}
|
@@ -225,7 +444,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
|
|
225
444
|
if (rt.is_reg()) {
|
226
445
|
and_(rd, rs, rt.rm());
|
227
446
|
} else {
|
228
|
-
if (
|
447
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
229
448
|
andi(rd, rs, rt.imm32_);
|
230
449
|
} else {
|
231
450
|
// li handles the relocation.
|
@@ -241,7 +460,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
|
|
241
460
|
if (rt.is_reg()) {
|
242
461
|
or_(rd, rs, rt.rm());
|
243
462
|
} else {
|
244
|
-
if (
|
463
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
245
464
|
ori(rd, rs, rt.imm32_);
|
246
465
|
} else {
|
247
466
|
// li handles the relocation.
|
@@ -257,7 +476,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
|
|
257
476
|
if (rt.is_reg()) {
|
258
477
|
xor_(rd, rs, rt.rm());
|
259
478
|
} else {
|
260
|
-
if (
|
479
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
261
480
|
xori(rd, rs, rt.imm32_);
|
262
481
|
} else {
|
263
482
|
// li handles the relocation.
|
@@ -285,7 +504,7 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
|
|
285
504
|
if (rt.is_reg()) {
|
286
505
|
slt(rd, rs, rt.rm());
|
287
506
|
} else {
|
288
|
-
if (is_int16(rt.imm32_) && !
|
507
|
+
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
289
508
|
slti(rd, rs, rt.imm32_);
|
290
509
|
} else {
|
291
510
|
// li handles the relocation.
|
@@ -301,7 +520,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
|
|
301
520
|
if (rt.is_reg()) {
|
302
521
|
sltu(rd, rs, rt.rm());
|
303
522
|
} else {
|
304
|
-
if (
|
523
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
305
524
|
sltiu(rd, rs, rt.imm32_);
|
306
525
|
} else {
|
307
526
|
// li handles the relocation.
|
@@ -313,31 +532,51 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
|
|
313
532
|
}
|
314
533
|
|
315
534
|
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
535
|
+
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
|
536
|
+
if (mips32r2) {
|
537
|
+
if (rt.is_reg()) {
|
538
|
+
rotrv(rd, rs, rt.rm());
|
539
|
+
} else {
|
540
|
+
rotr(rd, rs, rt.imm32_);
|
541
|
+
}
|
542
|
+
} else {
|
543
|
+
if (rt.is_reg()) {
|
544
|
+
subu(at, zero_reg, rt.rm());
|
545
|
+
sllv(at, rs, at);
|
546
|
+
srlv(rd, rs, rt.rm());
|
547
|
+
or_(rd, rd, at);
|
548
|
+
} else {
|
549
|
+
if (rt.imm32_ == 0) {
|
550
|
+
srl(rd, rs, 0);
|
551
|
+
} else {
|
552
|
+
srl(at, rs, rt.imm32_);
|
553
|
+
sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
|
554
|
+
or_(rd, rd, at);
|
555
|
+
}
|
556
|
+
}
|
557
|
+
}
|
321
558
|
}
|
322
559
|
|
323
560
|
|
561
|
+
//------------Pseudo-instructions-------------
|
562
|
+
|
324
563
|
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
|
325
564
|
ASSERT(!j.is_reg());
|
326
|
-
|
327
|
-
if (!
|
565
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
566
|
+
if (!MustUseReg(j.rmode_) && !gen2instr) {
|
328
567
|
// Normal load of an immediate value which does not need Relocation Info.
|
329
568
|
if (is_int16(j.imm32_)) {
|
330
569
|
addiu(rd, zero_reg, j.imm32_);
|
331
|
-
} else if (!(j.imm32_ &
|
570
|
+
} else if (!(j.imm32_ & kHiMask)) {
|
332
571
|
ori(rd, zero_reg, j.imm32_);
|
333
|
-
} else if (!(j.imm32_ &
|
334
|
-
lui(rd, (
|
572
|
+
} else if (!(j.imm32_ & kImm16Mask)) {
|
573
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
335
574
|
} else {
|
336
|
-
lui(rd, (
|
337
|
-
ori(rd, rd, (
|
575
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
576
|
+
ori(rd, rd, (j.imm32_ & kImm16Mask));
|
338
577
|
}
|
339
|
-
} else if (
|
340
|
-
if (
|
578
|
+
} else if (MustUseReg(j.rmode_) || gen2instr) {
|
579
|
+
if (MustUseReg(j.rmode_)) {
|
341
580
|
RecordRelocInfo(j.rmode_, j.imm32_);
|
342
581
|
}
|
343
582
|
// We need always the same number of instructions as we may need to patch
|
@@ -345,15 +584,15 @@ void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
|
|
345
584
|
if (is_int16(j.imm32_)) {
|
346
585
|
nop();
|
347
586
|
addiu(rd, zero_reg, j.imm32_);
|
348
|
-
} else if (!(j.imm32_ &
|
587
|
+
} else if (!(j.imm32_ & kHiMask)) {
|
349
588
|
nop();
|
350
589
|
ori(rd, zero_reg, j.imm32_);
|
351
|
-
} else if (!(j.imm32_ &
|
590
|
+
} else if (!(j.imm32_ & kImm16Mask)) {
|
352
591
|
nop();
|
353
|
-
lui(rd, (
|
592
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
354
593
|
} else {
|
355
|
-
lui(rd, (
|
356
|
-
ori(rd, rd, (
|
594
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
595
|
+
ori(rd, rd, (j.imm32_ & kImm16Mask));
|
357
596
|
}
|
358
597
|
}
|
359
598
|
}
|
@@ -417,153 +656,772 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
|
|
417
656
|
}
|
418
657
|
|
419
658
|
|
420
|
-
|
659
|
+
void MacroAssembler::Ext(Register rt,
|
660
|
+
Register rs,
|
661
|
+
uint16_t pos,
|
662
|
+
uint16_t size) {
|
663
|
+
ASSERT(pos < 32);
|
664
|
+
ASSERT(pos + size < 32);
|
421
665
|
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
r2 = rt.rm_;
|
430
|
-
} else if (cond != cc_always) {
|
431
|
-
// We don't want any other register but scratch clobbered.
|
432
|
-
ASSERT(!scratch.is(rs));
|
433
|
-
r2 = scratch;
|
434
|
-
li(r2, rt);
|
666
|
+
if (mips32r2) {
|
667
|
+
ext_(rt, rs, pos, size);
|
668
|
+
} else {
|
669
|
+
// Move rs to rt and shift it left then right to get the
|
670
|
+
// desired bitfield on the right side and zeroes on the left.
|
671
|
+
sll(rt, rs, 32 - (pos + size));
|
672
|
+
srl(rt, rt, 32 - size);
|
435
673
|
}
|
674
|
+
}
|
436
675
|
|
437
|
-
switch (cond) {
|
438
|
-
case cc_always:
|
439
|
-
b(offset);
|
440
|
-
break;
|
441
|
-
case eq:
|
442
|
-
beq(rs, r2, offset);
|
443
|
-
break;
|
444
|
-
case ne:
|
445
|
-
bne(rs, r2, offset);
|
446
|
-
break;
|
447
676
|
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
slt(scratch, rs, r2);
|
455
|
-
beq(scratch, zero_reg, offset);
|
456
|
-
break;
|
457
|
-
case less:
|
458
|
-
slt(scratch, rs, r2);
|
459
|
-
bne(scratch, zero_reg, offset);
|
460
|
-
break;
|
461
|
-
case less_equal:
|
462
|
-
slt(scratch, r2, rs);
|
463
|
-
beq(scratch, zero_reg, offset);
|
464
|
-
break;
|
677
|
+
void MacroAssembler::Ins(Register rt,
|
678
|
+
Register rs,
|
679
|
+
uint16_t pos,
|
680
|
+
uint16_t size) {
|
681
|
+
ASSERT(pos < 32);
|
682
|
+
ASSERT(pos + size < 32);
|
465
683
|
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
684
|
+
if (mips32r2) {
|
685
|
+
ins_(rt, rs, pos, size);
|
686
|
+
} else {
|
687
|
+
ASSERT(!rt.is(t8) && !rs.is(t8));
|
688
|
+
|
689
|
+
srl(t8, rt, pos + size);
|
690
|
+
// The left chunk from rt that needs to
|
691
|
+
// be saved is on the right side of t8.
|
692
|
+
sll(at, t8, pos + size);
|
693
|
+
// The 'at' register now contains the left chunk on
|
694
|
+
// the left (proper position) and zeroes.
|
695
|
+
sll(t8, rt, 32 - pos);
|
696
|
+
// t8 now contains the right chunk on the left and zeroes.
|
697
|
+
srl(t8, t8, 32 - pos);
|
698
|
+
// t8 now contains the right chunk on
|
699
|
+
// the right (proper position) and zeroes.
|
700
|
+
or_(rt, at, t8);
|
701
|
+
// rt now contains the left and right chunks from the original rt
|
702
|
+
// in their proper position and zeroes in the middle.
|
703
|
+
sll(t8, rs, 32 - size);
|
704
|
+
// t8 now contains the chunk from rs on the left and zeroes.
|
705
|
+
srl(t8, t8, 32 - size - pos);
|
706
|
+
// t8 now contains the original chunk from rs in
|
707
|
+
// the middle (proper position).
|
708
|
+
or_(rt, rt, t8);
|
709
|
+
// rt now contains the result of the ins instruction in R2 mode.
|
710
|
+
}
|
711
|
+
}
|
483
712
|
|
484
|
-
|
485
|
-
|
713
|
+
|
714
|
+
void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
|
715
|
+
// Move the data from fs to t4.
|
716
|
+
mfc1(t4, fs);
|
717
|
+
return Cvt_d_uw(fd, t4);
|
718
|
+
}
|
719
|
+
|
720
|
+
|
721
|
+
void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
|
722
|
+
// Convert rs to a FP value in fd (and fd + 1).
|
723
|
+
// We do this by converting rs minus the MSB to avoid sign conversion,
|
724
|
+
// then adding 2^31-1 and 1 to the result.
|
725
|
+
|
726
|
+
ASSERT(!fd.is(f20));
|
727
|
+
ASSERT(!rs.is(t9));
|
728
|
+
ASSERT(!rs.is(t8));
|
729
|
+
|
730
|
+
// Save rs's MSB to t8
|
731
|
+
And(t8, rs, 0x80000000);
|
732
|
+
// Remove rs's MSB.
|
733
|
+
And(t9, rs, 0x7FFFFFFF);
|
734
|
+
// Move t9 to fd
|
735
|
+
mtc1(t9, fd);
|
736
|
+
|
737
|
+
// Convert fd to a real FP value.
|
738
|
+
cvt_d_w(fd, fd);
|
739
|
+
|
740
|
+
Label conversion_done;
|
741
|
+
|
742
|
+
// If rs's MSB was 0, it's done.
|
743
|
+
// Otherwise we need to add that to the FP register.
|
744
|
+
Branch(&conversion_done, eq, t8, Operand(zero_reg));
|
745
|
+
|
746
|
+
// First load 2^31 - 1 into f20.
|
747
|
+
Or(t9, zero_reg, 0x7FFFFFFF);
|
748
|
+
mtc1(t9, f20);
|
749
|
+
|
750
|
+
// Convert it to FP and add it to fd.
|
751
|
+
cvt_d_w(f20, f20);
|
752
|
+
add_d(fd, fd, f20);
|
753
|
+
// Now add 1.
|
754
|
+
Or(t9, zero_reg, 1);
|
755
|
+
mtc1(t9, f20);
|
756
|
+
|
757
|
+
cvt_d_w(f20, f20);
|
758
|
+
add_d(fd, fd, f20);
|
759
|
+
bind(&conversion_done);
|
760
|
+
}
|
761
|
+
|
762
|
+
|
763
|
+
void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
|
764
|
+
Trunc_uw_d(fs, t4);
|
765
|
+
mtc1(t4, fd);
|
766
|
+
}
|
767
|
+
|
768
|
+
|
769
|
+
void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
|
770
|
+
ASSERT(!fd.is(f22));
|
771
|
+
ASSERT(!rs.is(t6));
|
772
|
+
|
773
|
+
// Load 2^31 into f22.
|
774
|
+
Or(t6, zero_reg, 0x80000000);
|
775
|
+
Cvt_d_uw(f22, t6);
|
776
|
+
|
777
|
+
// Test if f22 > fd.
|
778
|
+
c(OLT, D, fd, f22);
|
779
|
+
|
780
|
+
Label simple_convert;
|
781
|
+
// If fd < 2^31 we can convert it normally.
|
782
|
+
bc1t(&simple_convert);
|
783
|
+
|
784
|
+
// First we subtract 2^31 from fd, then trunc it to rs
|
785
|
+
// and add 2^31 to rs.
|
786
|
+
|
787
|
+
sub_d(f22, fd, f22);
|
788
|
+
trunc_w_d(f22, f22);
|
789
|
+
mfc1(rs, f22);
|
790
|
+
or_(rs, rs, t6);
|
791
|
+
|
792
|
+
Label done;
|
793
|
+
Branch(&done);
|
794
|
+
// Simple conversion.
|
795
|
+
bind(&simple_convert);
|
796
|
+
trunc_w_d(f22, fd);
|
797
|
+
mfc1(rs, f22);
|
798
|
+
|
799
|
+
bind(&done);
|
800
|
+
}
|
801
|
+
|
802
|
+
|
803
|
+
// Tries to get a signed int32 out of a double precision floating point heap
|
804
|
+
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
|
805
|
+
// 32bits signed integer range.
|
806
|
+
// This method implementation differs from the ARM version for performance
|
807
|
+
// reasons.
|
808
|
+
void MacroAssembler::ConvertToInt32(Register source,
|
809
|
+
Register dest,
|
810
|
+
Register scratch,
|
811
|
+
Register scratch2,
|
812
|
+
FPURegister double_scratch,
|
813
|
+
Label *not_int32) {
|
814
|
+
Label right_exponent, done;
|
815
|
+
// Get exponent word (ENDIAN issues).
|
816
|
+
lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
|
817
|
+
// Get exponent alone in scratch2.
|
818
|
+
And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
|
819
|
+
// Load dest with zero. We use this either for the final shift or
|
820
|
+
// for the answer.
|
821
|
+
mov(dest, zero_reg);
|
822
|
+
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
|
823
|
+
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
|
824
|
+
// the exponent that we are fastest at and also the highest exponent we can
|
825
|
+
// handle here.
|
826
|
+
const uint32_t non_smi_exponent =
|
827
|
+
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
|
828
|
+
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
|
829
|
+
Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
|
830
|
+
// If the exponent is higher than that then go to not_int32 case. This
|
831
|
+
// catches numbers that don't fit in a signed int32, infinities and NaNs.
|
832
|
+
Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
|
833
|
+
|
834
|
+
// We know the exponent is smaller than 30 (biased). If it is less than
|
835
|
+
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
|
836
|
+
// it rounds to zero.
|
837
|
+
const uint32_t zero_exponent =
|
838
|
+
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
|
839
|
+
Subu(scratch2, scratch2, Operand(zero_exponent));
|
840
|
+
// Dest already has a Smi zero.
|
841
|
+
Branch(&done, lt, scratch2, Operand(zero_reg));
|
842
|
+
if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
|
843
|
+
// We have a shifted exponent between 0 and 30 in scratch2.
|
844
|
+
srl(dest, scratch2, HeapNumber::kExponentShift);
|
845
|
+
// We now have the exponent in dest. Subtract from 30 to get
|
846
|
+
// how much to shift down.
|
847
|
+
li(at, Operand(30));
|
848
|
+
subu(dest, at, dest);
|
486
849
|
}
|
487
|
-
|
488
|
-
|
850
|
+
bind(&right_exponent);
|
851
|
+
if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
|
852
|
+
CpuFeatures::Scope scope(FPU);
|
853
|
+
// MIPS FPU instructions implementing double precision to integer
|
854
|
+
// conversion using round to zero. Since the FP value was qualified
|
855
|
+
// above, the resulting integer should be a legal int32.
|
856
|
+
// The original 'Exponent' word is still in scratch.
|
857
|
+
lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
|
858
|
+
mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
|
859
|
+
trunc_w_d(double_scratch, double_scratch);
|
860
|
+
mfc1(dest, double_scratch);
|
861
|
+
} else {
|
862
|
+
// On entry, dest has final downshift, scratch has original sign/exp/mant.
|
863
|
+
// Save sign bit in top bit of dest.
|
864
|
+
And(scratch2, scratch, Operand(0x80000000));
|
865
|
+
Or(dest, dest, Operand(scratch2));
|
866
|
+
// Put back the implicit 1, just above mantissa field.
|
867
|
+
Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
|
868
|
+
|
869
|
+
// Shift up the mantissa bits to take up the space the exponent used to
|
870
|
+
// take. We just orred in the implicit bit so that took care of one and
|
871
|
+
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
|
872
|
+
// distance. But we want to clear the sign-bit so shift one more bit
|
873
|
+
// left, then shift right one bit.
|
874
|
+
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
|
875
|
+
sll(scratch, scratch, shift_distance + 1);
|
876
|
+
srl(scratch, scratch, 1);
|
877
|
+
|
878
|
+
// Get the second half of the double. For some exponents we don't
|
879
|
+
// actually need this because the bits get shifted out again, but
|
880
|
+
// it's probably slower to test than just to do it.
|
881
|
+
lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
|
882
|
+
// Extract the top 10 bits, and insert those bottom 10 bits of scratch.
|
883
|
+
// The width of the field here is the same as the shift amount above.
|
884
|
+
const int field_width = shift_distance;
|
885
|
+
Ext(scratch2, scratch2, 32-shift_distance, field_width);
|
886
|
+
Ins(scratch, scratch2, 0, field_width);
|
887
|
+
// Move down according to the exponent.
|
888
|
+
srlv(scratch, scratch, dest);
|
889
|
+
// Prepare the negative version of our integer.
|
890
|
+
subu(scratch2, zero_reg, scratch);
|
891
|
+
// Trick to check sign bit (msb) held in dest, count leading zero.
|
892
|
+
// 0 indicates negative, save negative version with conditional move.
|
893
|
+
clz(dest, dest);
|
894
|
+
movz(scratch, scratch2, dest);
|
895
|
+
mov(dest, scratch);
|
896
|
+
}
|
897
|
+
bind(&done);
|
898
|
+
}
|
899
|
+
|
900
|
+
|
901
|
+
// Emulated condtional branches do not emit a nop in the branch delay slot.
|
902
|
+
//
|
903
|
+
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
|
904
|
+
#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
|
905
|
+
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
|
906
|
+
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
|
907
|
+
|
908
|
+
|
909
|
+
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
|
910
|
+
b(offset);
|
911
|
+
|
912
|
+
// Emit a nop in the branch delay slot if required.
|
913
|
+
if (bdslot == PROTECT)
|
914
|
+
nop();
|
489
915
|
}
|
490
916
|
|
491
917
|
|
492
|
-
void MacroAssembler::Branch(Condition cond,
|
493
|
-
const Operand& rt,
|
918
|
+
void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
|
919
|
+
const Operand& rt,
|
920
|
+
BranchDelaySlot bdslot) {
|
921
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
922
|
+
ASSERT(!rs.is(zero_reg));
|
494
923
|
Register r2 = no_reg;
|
924
|
+
Register scratch = at;
|
925
|
+
|
495
926
|
if (rt.is_reg()) {
|
927
|
+
// We don't want any other register but scratch clobbered.
|
928
|
+
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
|
496
929
|
r2 = rt.rm_;
|
497
|
-
|
498
|
-
|
499
|
-
|
930
|
+
switch (cond) {
|
931
|
+
case cc_always:
|
932
|
+
b(offset);
|
933
|
+
break;
|
934
|
+
case eq:
|
935
|
+
beq(rs, r2, offset);
|
936
|
+
break;
|
937
|
+
case ne:
|
938
|
+
bne(rs, r2, offset);
|
939
|
+
break;
|
940
|
+
// Signed comparison
|
941
|
+
case greater:
|
942
|
+
if (r2.is(zero_reg)) {
|
943
|
+
bgtz(rs, offset);
|
944
|
+
} else {
|
945
|
+
slt(scratch, r2, rs);
|
946
|
+
bne(scratch, zero_reg, offset);
|
947
|
+
}
|
948
|
+
break;
|
949
|
+
case greater_equal:
|
950
|
+
if (r2.is(zero_reg)) {
|
951
|
+
bgez(rs, offset);
|
952
|
+
} else {
|
953
|
+
slt(scratch, rs, r2);
|
954
|
+
beq(scratch, zero_reg, offset);
|
955
|
+
}
|
956
|
+
break;
|
957
|
+
case less:
|
958
|
+
if (r2.is(zero_reg)) {
|
959
|
+
bltz(rs, offset);
|
960
|
+
} else {
|
961
|
+
slt(scratch, rs, r2);
|
962
|
+
bne(scratch, zero_reg, offset);
|
963
|
+
}
|
964
|
+
break;
|
965
|
+
case less_equal:
|
966
|
+
if (r2.is(zero_reg)) {
|
967
|
+
blez(rs, offset);
|
968
|
+
} else {
|
969
|
+
slt(scratch, r2, rs);
|
970
|
+
beq(scratch, zero_reg, offset);
|
971
|
+
}
|
972
|
+
break;
|
973
|
+
// Unsigned comparison.
|
974
|
+
case Ugreater:
|
975
|
+
if (r2.is(zero_reg)) {
|
976
|
+
bgtz(rs, offset);
|
977
|
+
} else {
|
978
|
+
sltu(scratch, r2, rs);
|
979
|
+
bne(scratch, zero_reg, offset);
|
980
|
+
}
|
981
|
+
break;
|
982
|
+
case Ugreater_equal:
|
983
|
+
if (r2.is(zero_reg)) {
|
984
|
+
bgez(rs, offset);
|
985
|
+
} else {
|
986
|
+
sltu(scratch, rs, r2);
|
987
|
+
beq(scratch, zero_reg, offset);
|
988
|
+
}
|
989
|
+
break;
|
990
|
+
case Uless:
|
991
|
+
if (r2.is(zero_reg)) {
|
992
|
+
b(offset);
|
993
|
+
} else {
|
994
|
+
sltu(scratch, rs, r2);
|
995
|
+
bne(scratch, zero_reg, offset);
|
996
|
+
}
|
997
|
+
break;
|
998
|
+
case Uless_equal:
|
999
|
+
if (r2.is(zero_reg)) {
|
1000
|
+
b(offset);
|
1001
|
+
} else {
|
1002
|
+
sltu(scratch, r2, rs);
|
1003
|
+
beq(scratch, zero_reg, offset);
|
1004
|
+
}
|
1005
|
+
break;
|
1006
|
+
default:
|
1007
|
+
UNREACHABLE();
|
1008
|
+
}
|
1009
|
+
} else {
|
1010
|
+
// Be careful to always use shifted_branch_offset only just before the
|
1011
|
+
// branch instruction, as the location will be remember for patching the
|
1012
|
+
// target.
|
1013
|
+
switch (cond) {
|
1014
|
+
case cc_always:
|
1015
|
+
b(offset);
|
1016
|
+
break;
|
1017
|
+
case eq:
|
1018
|
+
// We don't want any other register but scratch clobbered.
|
1019
|
+
ASSERT(!scratch.is(rs));
|
1020
|
+
r2 = scratch;
|
1021
|
+
li(r2, rt);
|
1022
|
+
beq(rs, r2, offset);
|
1023
|
+
break;
|
1024
|
+
case ne:
|
1025
|
+
// We don't want any other register but scratch clobbered.
|
1026
|
+
ASSERT(!scratch.is(rs));
|
1027
|
+
r2 = scratch;
|
1028
|
+
li(r2, rt);
|
1029
|
+
bne(rs, r2, offset);
|
1030
|
+
break;
|
1031
|
+
// Signed comparison
|
1032
|
+
case greater:
|
1033
|
+
if (rt.imm32_ == 0) {
|
1034
|
+
bgtz(rs, offset);
|
1035
|
+
} else {
|
1036
|
+
r2 = scratch;
|
1037
|
+
li(r2, rt);
|
1038
|
+
slt(scratch, r2, rs);
|
1039
|
+
bne(scratch, zero_reg, offset);
|
1040
|
+
}
|
1041
|
+
break;
|
1042
|
+
case greater_equal:
|
1043
|
+
if (rt.imm32_ == 0) {
|
1044
|
+
bgez(rs, offset);
|
1045
|
+
} else if (is_int16(rt.imm32_)) {
|
1046
|
+
slti(scratch, rs, rt.imm32_);
|
1047
|
+
beq(scratch, zero_reg, offset);
|
1048
|
+
} else {
|
1049
|
+
r2 = scratch;
|
1050
|
+
li(r2, rt);
|
1051
|
+
sltu(scratch, rs, r2);
|
1052
|
+
beq(scratch, zero_reg, offset);
|
1053
|
+
}
|
1054
|
+
break;
|
1055
|
+
case less:
|
1056
|
+
if (rt.imm32_ == 0) {
|
1057
|
+
bltz(rs, offset);
|
1058
|
+
} else if (is_int16(rt.imm32_)) {
|
1059
|
+
slti(scratch, rs, rt.imm32_);
|
1060
|
+
bne(scratch, zero_reg, offset);
|
1061
|
+
} else {
|
1062
|
+
r2 = scratch;
|
1063
|
+
li(r2, rt);
|
1064
|
+
slt(scratch, rs, r2);
|
1065
|
+
bne(scratch, zero_reg, offset);
|
1066
|
+
}
|
1067
|
+
break;
|
1068
|
+
case less_equal:
|
1069
|
+
if (rt.imm32_ == 0) {
|
1070
|
+
blez(rs, offset);
|
1071
|
+
} else {
|
1072
|
+
r2 = scratch;
|
1073
|
+
li(r2, rt);
|
1074
|
+
slt(scratch, r2, rs);
|
1075
|
+
beq(scratch, zero_reg, offset);
|
1076
|
+
}
|
1077
|
+
break;
|
1078
|
+
// Unsigned comparison.
|
1079
|
+
case Ugreater:
|
1080
|
+
if (rt.imm32_ == 0) {
|
1081
|
+
bgtz(rs, offset);
|
1082
|
+
} else {
|
1083
|
+
r2 = scratch;
|
1084
|
+
li(r2, rt);
|
1085
|
+
sltu(scratch, r2, rs);
|
1086
|
+
bne(scratch, zero_reg, offset);
|
1087
|
+
}
|
1088
|
+
break;
|
1089
|
+
case Ugreater_equal:
|
1090
|
+
if (rt.imm32_ == 0) {
|
1091
|
+
bgez(rs, offset);
|
1092
|
+
} else if (is_int16(rt.imm32_)) {
|
1093
|
+
sltiu(scratch, rs, rt.imm32_);
|
1094
|
+
beq(scratch, zero_reg, offset);
|
1095
|
+
} else {
|
1096
|
+
r2 = scratch;
|
1097
|
+
li(r2, rt);
|
1098
|
+
sltu(scratch, rs, r2);
|
1099
|
+
beq(scratch, zero_reg, offset);
|
1100
|
+
}
|
1101
|
+
break;
|
1102
|
+
case Uless:
|
1103
|
+
if (rt.imm32_ == 0) {
|
1104
|
+
b(offset);
|
1105
|
+
} else if (is_int16(rt.imm32_)) {
|
1106
|
+
sltiu(scratch, rs, rt.imm32_);
|
1107
|
+
bne(scratch, zero_reg, offset);
|
1108
|
+
} else {
|
1109
|
+
r2 = scratch;
|
1110
|
+
li(r2, rt);
|
1111
|
+
sltu(scratch, rs, r2);
|
1112
|
+
bne(scratch, zero_reg, offset);
|
1113
|
+
}
|
1114
|
+
break;
|
1115
|
+
case Uless_equal:
|
1116
|
+
if (rt.imm32_ == 0) {
|
1117
|
+
b(offset);
|
1118
|
+
} else {
|
1119
|
+
r2 = scratch;
|
1120
|
+
li(r2, rt);
|
1121
|
+
sltu(scratch, r2, rs);
|
1122
|
+
beq(scratch, zero_reg, offset);
|
1123
|
+
}
|
1124
|
+
break;
|
1125
|
+
default:
|
1126
|
+
UNREACHABLE();
|
1127
|
+
}
|
500
1128
|
}
|
1129
|
+
// Emit a nop in the branch delay slot if required.
|
1130
|
+
if (bdslot == PROTECT)
|
1131
|
+
nop();
|
1132
|
+
}
|
1133
|
+
|
501
1134
|
|
1135
|
+
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
|
502
1136
|
// We use branch_offset as an argument for the branch instructions to be sure
|
503
1137
|
// it is called just before generating the branch instruction, as needed.
|
504
1138
|
|
505
|
-
|
506
|
-
case cc_always:
|
507
|
-
b(shifted_branch_offset(L, false));
|
508
|
-
break;
|
509
|
-
case eq:
|
510
|
-
beq(rs, r2, shifted_branch_offset(L, false));
|
511
|
-
break;
|
512
|
-
case ne:
|
513
|
-
bne(rs, r2, shifted_branch_offset(L, false));
|
514
|
-
break;
|
1139
|
+
b(shifted_branch_offset(L, false));
|
515
1140
|
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
break;
|
521
|
-
case greater_equal:
|
522
|
-
slt(scratch, rs, r2);
|
523
|
-
beq(scratch, zero_reg, shifted_branch_offset(L, false));
|
524
|
-
break;
|
525
|
-
case less:
|
526
|
-
slt(scratch, rs, r2);
|
527
|
-
bne(scratch, zero_reg, shifted_branch_offset(L, false));
|
528
|
-
break;
|
529
|
-
case less_equal:
|
530
|
-
slt(scratch, r2, rs);
|
531
|
-
beq(scratch, zero_reg, shifted_branch_offset(L, false));
|
532
|
-
break;
|
1141
|
+
// Emit a nop in the branch delay slot if required.
|
1142
|
+
if (bdslot == PROTECT)
|
1143
|
+
nop();
|
1144
|
+
}
|
533
1145
|
|
534
|
-
// Unsigned comparison.
|
535
|
-
case Ugreater:
|
536
|
-
sltu(scratch, r2, rs);
|
537
|
-
bne(scratch, zero_reg, shifted_branch_offset(L, false));
|
538
|
-
break;
|
539
|
-
case Ugreater_equal:
|
540
|
-
sltu(scratch, rs, r2);
|
541
|
-
beq(scratch, zero_reg, shifted_branch_offset(L, false));
|
542
|
-
break;
|
543
|
-
case Uless:
|
544
|
-
sltu(scratch, rs, r2);
|
545
|
-
bne(scratch, zero_reg, shifted_branch_offset(L, false));
|
546
|
-
break;
|
547
|
-
case Uless_equal:
|
548
|
-
sltu(scratch, r2, rs);
|
549
|
-
beq(scratch, zero_reg, shifted_branch_offset(L, false));
|
550
|
-
break;
|
551
1146
|
|
552
|
-
|
553
|
-
|
1147
|
+
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
|
1148
|
+
const Operand& rt,
|
1149
|
+
BranchDelaySlot bdslot) {
|
1150
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1151
|
+
|
1152
|
+
int32_t offset;
|
1153
|
+
Register r2 = no_reg;
|
1154
|
+
Register scratch = at;
|
1155
|
+
if (rt.is_reg()) {
|
1156
|
+
r2 = rt.rm_;
|
1157
|
+
// Be careful to always use shifted_branch_offset only just before the
|
1158
|
+
// branch instruction, as the location will be remember for patching the
|
1159
|
+
// target.
|
1160
|
+
switch (cond) {
|
1161
|
+
case cc_always:
|
1162
|
+
offset = shifted_branch_offset(L, false);
|
1163
|
+
b(offset);
|
1164
|
+
break;
|
1165
|
+
case eq:
|
1166
|
+
offset = shifted_branch_offset(L, false);
|
1167
|
+
beq(rs, r2, offset);
|
1168
|
+
break;
|
1169
|
+
case ne:
|
1170
|
+
offset = shifted_branch_offset(L, false);
|
1171
|
+
bne(rs, r2, offset);
|
1172
|
+
break;
|
1173
|
+
// Signed comparison
|
1174
|
+
case greater:
|
1175
|
+
if (r2.is(zero_reg)) {
|
1176
|
+
offset = shifted_branch_offset(L, false);
|
1177
|
+
bgtz(rs, offset);
|
1178
|
+
} else {
|
1179
|
+
slt(scratch, r2, rs);
|
1180
|
+
offset = shifted_branch_offset(L, false);
|
1181
|
+
bne(scratch, zero_reg, offset);
|
1182
|
+
}
|
1183
|
+
break;
|
1184
|
+
case greater_equal:
|
1185
|
+
if (r2.is(zero_reg)) {
|
1186
|
+
offset = shifted_branch_offset(L, false);
|
1187
|
+
bgez(rs, offset);
|
1188
|
+
} else {
|
1189
|
+
slt(scratch, rs, r2);
|
1190
|
+
offset = shifted_branch_offset(L, false);
|
1191
|
+
beq(scratch, zero_reg, offset);
|
1192
|
+
}
|
1193
|
+
break;
|
1194
|
+
case less:
|
1195
|
+
if (r2.is(zero_reg)) {
|
1196
|
+
offset = shifted_branch_offset(L, false);
|
1197
|
+
bltz(rs, offset);
|
1198
|
+
} else {
|
1199
|
+
slt(scratch, rs, r2);
|
1200
|
+
offset = shifted_branch_offset(L, false);
|
1201
|
+
bne(scratch, zero_reg, offset);
|
1202
|
+
}
|
1203
|
+
break;
|
1204
|
+
case less_equal:
|
1205
|
+
if (r2.is(zero_reg)) {
|
1206
|
+
offset = shifted_branch_offset(L, false);
|
1207
|
+
blez(rs, offset);
|
1208
|
+
} else {
|
1209
|
+
slt(scratch, r2, rs);
|
1210
|
+
offset = shifted_branch_offset(L, false);
|
1211
|
+
beq(scratch, zero_reg, offset);
|
1212
|
+
}
|
1213
|
+
break;
|
1214
|
+
// Unsigned comparison.
|
1215
|
+
case Ugreater:
|
1216
|
+
if (r2.is(zero_reg)) {
|
1217
|
+
offset = shifted_branch_offset(L, false);
|
1218
|
+
bgtz(rs, offset);
|
1219
|
+
} else {
|
1220
|
+
sltu(scratch, r2, rs);
|
1221
|
+
offset = shifted_branch_offset(L, false);
|
1222
|
+
bne(scratch, zero_reg, offset);
|
1223
|
+
}
|
1224
|
+
break;
|
1225
|
+
case Ugreater_equal:
|
1226
|
+
if (r2.is(zero_reg)) {
|
1227
|
+
offset = shifted_branch_offset(L, false);
|
1228
|
+
bgez(rs, offset);
|
1229
|
+
} else {
|
1230
|
+
sltu(scratch, rs, r2);
|
1231
|
+
offset = shifted_branch_offset(L, false);
|
1232
|
+
beq(scratch, zero_reg, offset);
|
1233
|
+
}
|
1234
|
+
break;
|
1235
|
+
case Uless:
|
1236
|
+
if (r2.is(zero_reg)) {
|
1237
|
+
offset = shifted_branch_offset(L, false);
|
1238
|
+
b(offset);
|
1239
|
+
} else {
|
1240
|
+
sltu(scratch, rs, r2);
|
1241
|
+
offset = shifted_branch_offset(L, false);
|
1242
|
+
bne(scratch, zero_reg, offset);
|
1243
|
+
}
|
1244
|
+
break;
|
1245
|
+
case Uless_equal:
|
1246
|
+
if (r2.is(zero_reg)) {
|
1247
|
+
offset = shifted_branch_offset(L, false);
|
1248
|
+
b(offset);
|
1249
|
+
} else {
|
1250
|
+
sltu(scratch, r2, rs);
|
1251
|
+
offset = shifted_branch_offset(L, false);
|
1252
|
+
beq(scratch, zero_reg, offset);
|
1253
|
+
}
|
1254
|
+
break;
|
1255
|
+
default:
|
1256
|
+
UNREACHABLE();
|
1257
|
+
}
|
1258
|
+
} else {
|
1259
|
+
// Be careful to always use shifted_branch_offset only just before the
|
1260
|
+
// branch instruction, as the location will be remember for patching the
|
1261
|
+
// target.
|
1262
|
+
switch (cond) {
|
1263
|
+
case cc_always:
|
1264
|
+
offset = shifted_branch_offset(L, false);
|
1265
|
+
b(offset);
|
1266
|
+
break;
|
1267
|
+
case eq:
|
1268
|
+
r2 = scratch;
|
1269
|
+
li(r2, rt);
|
1270
|
+
offset = shifted_branch_offset(L, false);
|
1271
|
+
beq(rs, r2, offset);
|
1272
|
+
break;
|
1273
|
+
case ne:
|
1274
|
+
r2 = scratch;
|
1275
|
+
li(r2, rt);
|
1276
|
+
offset = shifted_branch_offset(L, false);
|
1277
|
+
bne(rs, r2, offset);
|
1278
|
+
break;
|
1279
|
+
// Signed comparison
|
1280
|
+
case greater:
|
1281
|
+
if (rt.imm32_ == 0) {
|
1282
|
+
offset = shifted_branch_offset(L, false);
|
1283
|
+
bgtz(rs, offset);
|
1284
|
+
} else {
|
1285
|
+
r2 = scratch;
|
1286
|
+
li(r2, rt);
|
1287
|
+
slt(scratch, r2, rs);
|
1288
|
+
offset = shifted_branch_offset(L, false);
|
1289
|
+
bne(scratch, zero_reg, offset);
|
1290
|
+
}
|
1291
|
+
break;
|
1292
|
+
case greater_equal:
|
1293
|
+
if (rt.imm32_ == 0) {
|
1294
|
+
offset = shifted_branch_offset(L, false);
|
1295
|
+
bgez(rs, offset);
|
1296
|
+
} else if (is_int16(rt.imm32_)) {
|
1297
|
+
slti(scratch, rs, rt.imm32_);
|
1298
|
+
offset = shifted_branch_offset(L, false);
|
1299
|
+
beq(scratch, zero_reg, offset);
|
1300
|
+
} else {
|
1301
|
+
r2 = scratch;
|
1302
|
+
li(r2, rt);
|
1303
|
+
sltu(scratch, rs, r2);
|
1304
|
+
offset = shifted_branch_offset(L, false);
|
1305
|
+
beq(scratch, zero_reg, offset);
|
1306
|
+
}
|
1307
|
+
break;
|
1308
|
+
case less:
|
1309
|
+
if (rt.imm32_ == 0) {
|
1310
|
+
offset = shifted_branch_offset(L, false);
|
1311
|
+
bltz(rs, offset);
|
1312
|
+
} else if (is_int16(rt.imm32_)) {
|
1313
|
+
slti(scratch, rs, rt.imm32_);
|
1314
|
+
offset = shifted_branch_offset(L, false);
|
1315
|
+
bne(scratch, zero_reg, offset);
|
1316
|
+
} else {
|
1317
|
+
r2 = scratch;
|
1318
|
+
li(r2, rt);
|
1319
|
+
slt(scratch, rs, r2);
|
1320
|
+
offset = shifted_branch_offset(L, false);
|
1321
|
+
bne(scratch, zero_reg, offset);
|
1322
|
+
}
|
1323
|
+
break;
|
1324
|
+
case less_equal:
|
1325
|
+
if (rt.imm32_ == 0) {
|
1326
|
+
offset = shifted_branch_offset(L, false);
|
1327
|
+
blez(rs, offset);
|
1328
|
+
} else {
|
1329
|
+
r2 = scratch;
|
1330
|
+
li(r2, rt);
|
1331
|
+
slt(scratch, r2, rs);
|
1332
|
+
offset = shifted_branch_offset(L, false);
|
1333
|
+
beq(scratch, zero_reg, offset);
|
1334
|
+
}
|
1335
|
+
break;
|
1336
|
+
// Unsigned comparison.
|
1337
|
+
case Ugreater:
|
1338
|
+
if (rt.imm32_ == 0) {
|
1339
|
+
offset = shifted_branch_offset(L, false);
|
1340
|
+
bgtz(rs, offset);
|
1341
|
+
} else {
|
1342
|
+
r2 = scratch;
|
1343
|
+
li(r2, rt);
|
1344
|
+
sltu(scratch, r2, rs);
|
1345
|
+
offset = shifted_branch_offset(L, false);
|
1346
|
+
bne(scratch, zero_reg, offset);
|
1347
|
+
}
|
1348
|
+
break;
|
1349
|
+
case Ugreater_equal:
|
1350
|
+
if (rt.imm32_ == 0) {
|
1351
|
+
offset = shifted_branch_offset(L, false);
|
1352
|
+
bgez(rs, offset);
|
1353
|
+
} else if (is_int16(rt.imm32_)) {
|
1354
|
+
sltiu(scratch, rs, rt.imm32_);
|
1355
|
+
offset = shifted_branch_offset(L, false);
|
1356
|
+
beq(scratch, zero_reg, offset);
|
1357
|
+
} else {
|
1358
|
+
r2 = scratch;
|
1359
|
+
li(r2, rt);
|
1360
|
+
sltu(scratch, rs, r2);
|
1361
|
+
offset = shifted_branch_offset(L, false);
|
1362
|
+
beq(scratch, zero_reg, offset);
|
1363
|
+
}
|
1364
|
+
break;
|
1365
|
+
case Uless:
|
1366
|
+
if (rt.imm32_ == 0) {
|
1367
|
+
offset = shifted_branch_offset(L, false);
|
1368
|
+
b(offset);
|
1369
|
+
} else if (is_int16(rt.imm32_)) {
|
1370
|
+
sltiu(scratch, rs, rt.imm32_);
|
1371
|
+
offset = shifted_branch_offset(L, false);
|
1372
|
+
bne(scratch, zero_reg, offset);
|
1373
|
+
} else {
|
1374
|
+
r2 = scratch;
|
1375
|
+
li(r2, rt);
|
1376
|
+
sltu(scratch, rs, r2);
|
1377
|
+
offset = shifted_branch_offset(L, false);
|
1378
|
+
bne(scratch, zero_reg, offset);
|
1379
|
+
}
|
1380
|
+
break;
|
1381
|
+
case Uless_equal:
|
1382
|
+
if (rt.imm32_ == 0) {
|
1383
|
+
offset = shifted_branch_offset(L, false);
|
1384
|
+
b(offset);
|
1385
|
+
} else {
|
1386
|
+
r2 = scratch;
|
1387
|
+
li(r2, rt);
|
1388
|
+
sltu(scratch, r2, rs);
|
1389
|
+
offset = shifted_branch_offset(L, false);
|
1390
|
+
beq(scratch, zero_reg, offset);
|
1391
|
+
}
|
1392
|
+
break;
|
1393
|
+
default:
|
1394
|
+
UNREACHABLE();
|
1395
|
+
}
|
554
1396
|
}
|
555
|
-
//
|
556
|
-
|
1397
|
+
// Check that offset could actually hold on an int16_t.
|
1398
|
+
ASSERT(is_int16(offset));
|
1399
|
+
// Emit a nop in the branch delay slot if required.
|
1400
|
+
if (bdslot == PROTECT)
|
1401
|
+
nop();
|
557
1402
|
}
|
558
1403
|
|
559
1404
|
|
560
|
-
// Trashes the at register if no scratch register is provided.
|
561
1405
|
// We need to use a bgezal or bltzal, but they can't be used directly with the
|
562
1406
|
// slt instructions. We could use sub or add instead but we would miss overflow
|
563
1407
|
// cases, so we keep slt and add an intermediate third instruction.
|
564
|
-
void MacroAssembler::BranchAndLink(
|
565
|
-
|
1408
|
+
void MacroAssembler::BranchAndLink(int16_t offset,
|
1409
|
+
BranchDelaySlot bdslot) {
|
1410
|
+
bal(offset);
|
1411
|
+
|
1412
|
+
// Emit a nop in the branch delay slot if required.
|
1413
|
+
if (bdslot == PROTECT)
|
1414
|
+
nop();
|
1415
|
+
}
|
1416
|
+
|
1417
|
+
|
1418
|
+
void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
|
1419
|
+
const Operand& rt,
|
1420
|
+
BranchDelaySlot bdslot) {
|
1421
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
566
1422
|
Register r2 = no_reg;
|
1423
|
+
Register scratch = at;
|
1424
|
+
|
567
1425
|
if (rt.is_reg()) {
|
568
1426
|
r2 = rt.rm_;
|
569
1427
|
} else if (cond != cc_always) {
|
@@ -633,14 +1491,29 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
|
|
633
1491
|
default:
|
634
1492
|
UNREACHABLE();
|
635
1493
|
}
|
636
|
-
// Emit a nop in the branch delay slot.
|
637
|
-
|
1494
|
+
// Emit a nop in the branch delay slot if required.
|
1495
|
+
if (bdslot == PROTECT)
|
1496
|
+
nop();
|
1497
|
+
}
|
1498
|
+
|
1499
|
+
|
1500
|
+
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
|
1501
|
+
bal(shifted_branch_offset(L, false));
|
1502
|
+
|
1503
|
+
// Emit a nop in the branch delay slot if required.
|
1504
|
+
if (bdslot == PROTECT)
|
1505
|
+
nop();
|
638
1506
|
}
|
639
1507
|
|
640
1508
|
|
641
|
-
void MacroAssembler::BranchAndLink(
|
642
|
-
const Operand& rt,
|
1509
|
+
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
|
1510
|
+
const Operand& rt,
|
1511
|
+
BranchDelaySlot bdslot) {
|
1512
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1513
|
+
|
1514
|
+
int32_t offset;
|
643
1515
|
Register r2 = no_reg;
|
1516
|
+
Register scratch = at;
|
644
1517
|
if (rt.is_reg()) {
|
645
1518
|
r2 = rt.rm_;
|
646
1519
|
} else if (cond != cc_always) {
|
@@ -650,157 +1523,280 @@ void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
|
|
650
1523
|
|
651
1524
|
switch (cond) {
|
652
1525
|
case cc_always:
|
653
|
-
|
1526
|
+
offset = shifted_branch_offset(L, false);
|
1527
|
+
bal(offset);
|
654
1528
|
break;
|
655
1529
|
case eq:
|
656
1530
|
bne(rs, r2, 2);
|
657
1531
|
nop();
|
658
|
-
|
1532
|
+
offset = shifted_branch_offset(L, false);
|
1533
|
+
bal(offset);
|
659
1534
|
break;
|
660
1535
|
case ne:
|
661
1536
|
beq(rs, r2, 2);
|
662
1537
|
nop();
|
663
|
-
|
1538
|
+
offset = shifted_branch_offset(L, false);
|
1539
|
+
bal(offset);
|
664
1540
|
break;
|
665
1541
|
|
666
1542
|
// Signed comparison
|
667
1543
|
case greater:
|
668
1544
|
slt(scratch, r2, rs);
|
669
1545
|
addiu(scratch, scratch, -1);
|
670
|
-
|
1546
|
+
offset = shifted_branch_offset(L, false);
|
1547
|
+
bgezal(scratch, offset);
|
671
1548
|
break;
|
672
1549
|
case greater_equal:
|
673
1550
|
slt(scratch, rs, r2);
|
674
1551
|
addiu(scratch, scratch, -1);
|
675
|
-
|
1552
|
+
offset = shifted_branch_offset(L, false);
|
1553
|
+
bltzal(scratch, offset);
|
676
1554
|
break;
|
677
1555
|
case less:
|
678
1556
|
slt(scratch, rs, r2);
|
679
1557
|
addiu(scratch, scratch, -1);
|
680
|
-
|
1558
|
+
offset = shifted_branch_offset(L, false);
|
1559
|
+
bgezal(scratch, offset);
|
681
1560
|
break;
|
682
1561
|
case less_equal:
|
683
1562
|
slt(scratch, r2, rs);
|
684
1563
|
addiu(scratch, scratch, -1);
|
685
|
-
|
1564
|
+
offset = shifted_branch_offset(L, false);
|
1565
|
+
bltzal(scratch, offset);
|
686
1566
|
break;
|
687
1567
|
|
688
1568
|
// Unsigned comparison.
|
689
1569
|
case Ugreater:
|
690
1570
|
sltu(scratch, r2, rs);
|
691
1571
|
addiu(scratch, scratch, -1);
|
692
|
-
|
1572
|
+
offset = shifted_branch_offset(L, false);
|
1573
|
+
bgezal(scratch, offset);
|
693
1574
|
break;
|
694
1575
|
case Ugreater_equal:
|
695
1576
|
sltu(scratch, rs, r2);
|
696
1577
|
addiu(scratch, scratch, -1);
|
697
|
-
|
1578
|
+
offset = shifted_branch_offset(L, false);
|
1579
|
+
bltzal(scratch, offset);
|
698
1580
|
break;
|
699
1581
|
case Uless:
|
700
1582
|
sltu(scratch, rs, r2);
|
701
1583
|
addiu(scratch, scratch, -1);
|
702
|
-
|
1584
|
+
offset = shifted_branch_offset(L, false);
|
1585
|
+
bgezal(scratch, offset);
|
703
1586
|
break;
|
704
1587
|
case Uless_equal:
|
705
1588
|
sltu(scratch, r2, rs);
|
706
1589
|
addiu(scratch, scratch, -1);
|
707
|
-
|
1590
|
+
offset = shifted_branch_offset(L, false);
|
1591
|
+
bltzal(scratch, offset);
|
708
1592
|
break;
|
709
1593
|
|
710
1594
|
default:
|
711
1595
|
UNREACHABLE();
|
712
1596
|
}
|
713
|
-
|
714
|
-
|
1597
|
+
|
1598
|
+
// Check that offset could actually hold on an int16_t.
|
1599
|
+
ASSERT(is_int16(offset));
|
1600
|
+
|
1601
|
+
// Emit a nop in the branch delay slot if required.
|
1602
|
+
if (bdslot == PROTECT)
|
1603
|
+
nop();
|
1604
|
+
}
|
1605
|
+
|
1606
|
+
|
1607
|
+
void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
|
1608
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1609
|
+
if (target.is_reg()) {
|
1610
|
+
jr(target.rm());
|
1611
|
+
} else {
|
1612
|
+
if (!MustUseReg(target.rmode_)) {
|
1613
|
+
j(target.imm32_);
|
1614
|
+
} else {
|
1615
|
+
li(t9, target);
|
1616
|
+
jr(t9);
|
1617
|
+
}
|
1618
|
+
}
|
1619
|
+
// Emit a nop in the branch delay slot if required.
|
1620
|
+
if (bdslot == PROTECT)
|
1621
|
+
nop();
|
715
1622
|
}
|
716
1623
|
|
717
1624
|
|
718
1625
|
void MacroAssembler::Jump(const Operand& target,
|
719
|
-
Condition cond, Register rs, const Operand& rt
|
1626
|
+
Condition cond, Register rs, const Operand& rt,
|
1627
|
+
BranchDelaySlot bdslot) {
|
1628
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1629
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
720
1630
|
if (target.is_reg()) {
|
721
1631
|
if (cond == cc_always) {
|
722
1632
|
jr(target.rm());
|
723
1633
|
} else {
|
724
|
-
Branch(NegateCondition(cond),
|
1634
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
725
1635
|
jr(target.rm());
|
726
1636
|
}
|
727
|
-
} else {
|
728
|
-
if (!
|
1637
|
+
} else { // Not register target.
|
1638
|
+
if (!MustUseReg(target.rmode_)) {
|
729
1639
|
if (cond == cc_always) {
|
730
1640
|
j(target.imm32_);
|
731
1641
|
} else {
|
732
|
-
Branch(NegateCondition(cond),
|
1642
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
733
1643
|
j(target.imm32_); // Will generate only one instruction.
|
734
1644
|
}
|
735
|
-
} else { //
|
736
|
-
li(
|
1645
|
+
} else { // MustUseReg(target)
|
1646
|
+
li(t9, target);
|
737
1647
|
if (cond == cc_always) {
|
738
|
-
jr(
|
1648
|
+
jr(t9);
|
739
1649
|
} else {
|
740
|
-
Branch(NegateCondition(cond),
|
741
|
-
jr(
|
1650
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1651
|
+
jr(t9); // Will generate only one instruction.
|
742
1652
|
}
|
743
1653
|
}
|
744
1654
|
}
|
745
|
-
// Emit a nop in the branch delay slot.
|
746
|
-
|
1655
|
+
// Emit a nop in the branch delay slot if required.
|
1656
|
+
if (bdslot == PROTECT)
|
1657
|
+
nop();
|
1658
|
+
}
|
1659
|
+
|
1660
|
+
|
1661
|
+
// Note: To call gcc-compiled C code on mips, you must call thru t9.
|
1662
|
+
void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
|
1663
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1664
|
+
if (target.is_reg()) {
|
1665
|
+
jalr(target.rm());
|
1666
|
+
} else { // !target.is_reg()
|
1667
|
+
if (!MustUseReg(target.rmode_)) {
|
1668
|
+
jal(target.imm32_);
|
1669
|
+
} else { // MustUseReg(target)
|
1670
|
+
li(t9, target);
|
1671
|
+
jalr(t9);
|
1672
|
+
}
|
1673
|
+
}
|
1674
|
+
// Emit a nop in the branch delay slot if required.
|
1675
|
+
if (bdslot == PROTECT)
|
1676
|
+
nop();
|
747
1677
|
}
|
748
1678
|
|
749
1679
|
|
1680
|
+
// Note: To call gcc-compiled C code on mips, you must call thru t9.
|
750
1681
|
void MacroAssembler::Call(const Operand& target,
|
751
|
-
Condition cond, Register rs, const Operand& rt
|
1682
|
+
Condition cond, Register rs, const Operand& rt,
|
1683
|
+
BranchDelaySlot bdslot) {
|
1684
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1685
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
752
1686
|
if (target.is_reg()) {
|
753
1687
|
if (cond == cc_always) {
|
754
1688
|
jalr(target.rm());
|
755
1689
|
} else {
|
756
|
-
Branch(NegateCondition(cond),
|
1690
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
757
1691
|
jalr(target.rm());
|
758
1692
|
}
|
759
1693
|
} else { // !target.is_reg()
|
760
|
-
if (!
|
1694
|
+
if (!MustUseReg(target.rmode_)) {
|
761
1695
|
if (cond == cc_always) {
|
762
1696
|
jal(target.imm32_);
|
763
1697
|
} else {
|
764
|
-
Branch(NegateCondition(cond),
|
1698
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
765
1699
|
jal(target.imm32_); // Will generate only one instruction.
|
766
1700
|
}
|
767
|
-
} else { //
|
768
|
-
li(
|
1701
|
+
} else { // MustUseReg(target)
|
1702
|
+
li(t9, target);
|
769
1703
|
if (cond == cc_always) {
|
770
|
-
jalr(
|
1704
|
+
jalr(t9);
|
771
1705
|
} else {
|
772
|
-
Branch(NegateCondition(cond),
|
773
|
-
jalr(
|
1706
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1707
|
+
jalr(t9); // Will generate only one instruction.
|
774
1708
|
}
|
775
1709
|
}
|
776
1710
|
}
|
777
|
-
// Emit a nop in the branch delay slot.
|
778
|
-
|
1711
|
+
// Emit a nop in the branch delay slot if required.
|
1712
|
+
if (bdslot == PROTECT)
|
1713
|
+
nop();
|
779
1714
|
}
|
780
1715
|
|
781
|
-
|
782
|
-
|
1716
|
+
|
1717
|
+
void MacroAssembler::Drop(int count,
|
1718
|
+
Condition cond,
|
1719
|
+
Register reg,
|
1720
|
+
const Operand& op) {
|
1721
|
+
if (count <= 0) {
|
1722
|
+
return;
|
1723
|
+
}
|
1724
|
+
|
1725
|
+
Label skip;
|
1726
|
+
|
1727
|
+
if (cond != al) {
|
1728
|
+
Branch(&skip, NegateCondition(cond), reg, op);
|
1729
|
+
}
|
1730
|
+
|
1731
|
+
if (count > 0) {
|
1732
|
+
addiu(sp, sp, count * kPointerSize);
|
1733
|
+
}
|
1734
|
+
|
1735
|
+
if (cond != al) {
|
1736
|
+
bind(&skip);
|
1737
|
+
}
|
783
1738
|
}
|
784
1739
|
|
785
1740
|
|
786
|
-
void MacroAssembler::
|
787
|
-
|
1741
|
+
void MacroAssembler::DropAndRet(int drop,
|
1742
|
+
Condition cond,
|
1743
|
+
Register r1,
|
1744
|
+
const Operand& r2) {
|
1745
|
+
// This is a workaround to make sure only one branch instruction is
|
1746
|
+
// generated. It relies on Drop and Ret not creating branches if
|
1747
|
+
// cond == cc_always.
|
1748
|
+
Label skip;
|
1749
|
+
if (cond != cc_always) {
|
1750
|
+
Branch(&skip, NegateCondition(cond), r1, r2);
|
1751
|
+
}
|
1752
|
+
|
1753
|
+
Drop(drop);
|
1754
|
+
Ret();
|
1755
|
+
|
1756
|
+
if (cond != cc_always) {
|
1757
|
+
bind(&skip);
|
1758
|
+
}
|
1759
|
+
}
|
1760
|
+
|
1761
|
+
|
1762
|
+
void MacroAssembler::Swap(Register reg1,
|
1763
|
+
Register reg2,
|
1764
|
+
Register scratch) {
|
1765
|
+
if (scratch.is(no_reg)) {
|
1766
|
+
Xor(reg1, reg1, Operand(reg2));
|
1767
|
+
Xor(reg2, reg2, Operand(reg1));
|
1768
|
+
Xor(reg1, reg1, Operand(reg2));
|
1769
|
+
} else {
|
1770
|
+
mov(scratch, reg1);
|
1771
|
+
mov(reg1, reg2);
|
1772
|
+
mov(reg2, scratch);
|
1773
|
+
}
|
788
1774
|
}
|
789
1775
|
|
790
1776
|
|
791
1777
|
void MacroAssembler::Call(Label* target) {
|
792
|
-
|
1778
|
+
BranchAndLink(target);
|
1779
|
+
}
|
1780
|
+
|
1781
|
+
|
1782
|
+
void MacroAssembler::Move(Register dst, Register src) {
|
1783
|
+
if (!dst.is(src)) {
|
1784
|
+
mov(dst, src);
|
1785
|
+
}
|
793
1786
|
}
|
794
1787
|
|
795
1788
|
|
796
1789
|
#ifdef ENABLE_DEBUGGER_SUPPORT
|
797
|
-
// ---------------------------------------------------------------------------
|
798
|
-
// Debugger Support
|
799
1790
|
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
1791
|
+
void MacroAssembler::DebugBreak() {
|
1792
|
+
ASSERT(allow_stub_calls());
|
1793
|
+
mov(a0, zero_reg);
|
1794
|
+
li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
|
1795
|
+
CEntryStub ces(1);
|
1796
|
+
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
|
1797
|
+
}
|
1798
|
+
|
1799
|
+
#endif // ENABLE_DEBUGGER_SUPPORT
|
804
1800
|
|
805
1801
|
|
806
1802
|
// ---------------------------------------------------------------------------
|
@@ -822,7 +1818,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
|
|
822
1818
|
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
|
823
1819
|
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
|
824
1820
|
// Save the current handler as the next handler.
|
825
|
-
|
1821
|
+
li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
826
1822
|
lw(t1, MemOperand(t2));
|
827
1823
|
|
828
1824
|
addiu(sp, sp, -StackHandlerConstants::kSize);
|
@@ -848,7 +1844,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
|
|
848
1844
|
li(t0, Operand(StackHandler::ENTRY));
|
849
1845
|
|
850
1846
|
// Save the current handler as the next handler.
|
851
|
-
|
1847
|
+
li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
852
1848
|
lw(t1, MemOperand(t2));
|
853
1849
|
|
854
1850
|
addiu(sp, sp, -StackHandlerConstants::kSize);
|
@@ -864,45 +1860,377 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
|
|
864
1860
|
|
865
1861
|
|
866
1862
|
void MacroAssembler::PopTryHandler() {
|
867
|
-
|
1863
|
+
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
|
1864
|
+
pop(a1);
|
1865
|
+
Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
|
1866
|
+
li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
1867
|
+
sw(a1, MemOperand(at));
|
1868
|
+
}
|
1869
|
+
|
1870
|
+
|
1871
|
+
void MacroAssembler::AllocateInNewSpace(int object_size,
|
1872
|
+
Register result,
|
1873
|
+
Register scratch1,
|
1874
|
+
Register scratch2,
|
1875
|
+
Label* gc_required,
|
1876
|
+
AllocationFlags flags) {
|
1877
|
+
if (!FLAG_inline_new) {
|
1878
|
+
if (FLAG_debug_code) {
|
1879
|
+
// Trash the registers to simulate an allocation failure.
|
1880
|
+
li(result, 0x7091);
|
1881
|
+
li(scratch1, 0x7191);
|
1882
|
+
li(scratch2, 0x7291);
|
1883
|
+
}
|
1884
|
+
jmp(gc_required);
|
1885
|
+
return;
|
1886
|
+
}
|
1887
|
+
|
1888
|
+
ASSERT(!result.is(scratch1));
|
1889
|
+
ASSERT(!result.is(scratch2));
|
1890
|
+
ASSERT(!scratch1.is(scratch2));
|
1891
|
+
ASSERT(!scratch1.is(t9));
|
1892
|
+
ASSERT(!scratch2.is(t9));
|
1893
|
+
ASSERT(!result.is(t9));
|
1894
|
+
|
1895
|
+
// Make object size into bytes.
|
1896
|
+
if ((flags & SIZE_IN_WORDS) != 0) {
|
1897
|
+
object_size *= kPointerSize;
|
1898
|
+
}
|
1899
|
+
ASSERT_EQ(0, object_size & kObjectAlignmentMask);
|
1900
|
+
|
1901
|
+
// Check relative positions of allocation top and limit addresses.
|
1902
|
+
// ARM adds additional checks to make sure the ldm instruction can be
|
1903
|
+
// used. On MIPS we don't have ldm so we don't need additional checks either.
|
1904
|
+
ExternalReference new_space_allocation_top =
|
1905
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
1906
|
+
ExternalReference new_space_allocation_limit =
|
1907
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
1908
|
+
intptr_t top =
|
1909
|
+
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
|
1910
|
+
intptr_t limit =
|
1911
|
+
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
|
1912
|
+
ASSERT((limit - top) == kPointerSize);
|
1913
|
+
|
1914
|
+
// Set up allocation top address and object size registers.
|
1915
|
+
Register topaddr = scratch1;
|
1916
|
+
Register obj_size_reg = scratch2;
|
1917
|
+
li(topaddr, Operand(new_space_allocation_top));
|
1918
|
+
li(obj_size_reg, Operand(object_size));
|
1919
|
+
|
1920
|
+
// This code stores a temporary value in t9.
|
1921
|
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
1922
|
+
// Load allocation top into result and allocation limit into t9.
|
1923
|
+
lw(result, MemOperand(topaddr));
|
1924
|
+
lw(t9, MemOperand(topaddr, kPointerSize));
|
1925
|
+
} else {
|
1926
|
+
if (FLAG_debug_code) {
|
1927
|
+
// Assert that result actually contains top on entry. t9 is used
|
1928
|
+
// immediately below so this use of t9 does not cause difference with
|
1929
|
+
// respect to register content between debug and release mode.
|
1930
|
+
lw(t9, MemOperand(topaddr));
|
1931
|
+
Check(eq, "Unexpected allocation top", result, Operand(t9));
|
1932
|
+
}
|
1933
|
+
// Load allocation limit into t9. Result already contains allocation top.
|
1934
|
+
lw(t9, MemOperand(topaddr, limit - top));
|
1935
|
+
}
|
1936
|
+
|
1937
|
+
// Calculate new top and bail out if new space is exhausted. Use result
|
1938
|
+
// to calculate the new top.
|
1939
|
+
Addu(scratch2, result, Operand(obj_size_reg));
|
1940
|
+
Branch(gc_required, Ugreater, scratch2, Operand(t9));
|
1941
|
+
sw(scratch2, MemOperand(topaddr));
|
1942
|
+
|
1943
|
+
// Tag object if requested.
|
1944
|
+
if ((flags & TAG_OBJECT) != 0) {
|
1945
|
+
Addu(result, result, Operand(kHeapObjectTag));
|
1946
|
+
}
|
868
1947
|
}
|
869
1948
|
|
870
1949
|
|
1950
|
+
void MacroAssembler::AllocateInNewSpace(Register object_size,
|
1951
|
+
Register result,
|
1952
|
+
Register scratch1,
|
1953
|
+
Register scratch2,
|
1954
|
+
Label* gc_required,
|
1955
|
+
AllocationFlags flags) {
|
1956
|
+
if (!FLAG_inline_new) {
|
1957
|
+
if (FLAG_debug_code) {
|
1958
|
+
// Trash the registers to simulate an allocation failure.
|
1959
|
+
li(result, 0x7091);
|
1960
|
+
li(scratch1, 0x7191);
|
1961
|
+
li(scratch2, 0x7291);
|
1962
|
+
}
|
1963
|
+
jmp(gc_required);
|
1964
|
+
return;
|
1965
|
+
}
|
871
1966
|
|
872
|
-
|
873
|
-
|
1967
|
+
ASSERT(!result.is(scratch1));
|
1968
|
+
ASSERT(!result.is(scratch2));
|
1969
|
+
ASSERT(!scratch1.is(scratch2));
|
1970
|
+
ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
|
1971
|
+
|
1972
|
+
// Check relative positions of allocation top and limit addresses.
|
1973
|
+
// ARM adds additional checks to make sure the ldm instruction can be
|
1974
|
+
// used. On MIPS we don't have ldm so we don't need additional checks either.
|
1975
|
+
ExternalReference new_space_allocation_top =
|
1976
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
1977
|
+
ExternalReference new_space_allocation_limit =
|
1978
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
1979
|
+
intptr_t top =
|
1980
|
+
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
|
1981
|
+
intptr_t limit =
|
1982
|
+
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
|
1983
|
+
ASSERT((limit - top) == kPointerSize);
|
1984
|
+
|
1985
|
+
// Set up allocation top address and object size registers.
|
1986
|
+
Register topaddr = scratch1;
|
1987
|
+
li(topaddr, Operand(new_space_allocation_top));
|
1988
|
+
|
1989
|
+
// This code stores a temporary value in t9.
|
1990
|
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
1991
|
+
// Load allocation top into result and allocation limit into t9.
|
1992
|
+
lw(result, MemOperand(topaddr));
|
1993
|
+
lw(t9, MemOperand(topaddr, kPointerSize));
|
1994
|
+
} else {
|
1995
|
+
if (FLAG_debug_code) {
|
1996
|
+
// Assert that result actually contains top on entry. t9 is used
|
1997
|
+
// immediately below so this use of t9 does not cause difference with
|
1998
|
+
// respect to register content between debug and release mode.
|
1999
|
+
lw(t9, MemOperand(topaddr));
|
2000
|
+
Check(eq, "Unexpected allocation top", result, Operand(t9));
|
2001
|
+
}
|
2002
|
+
// Load allocation limit into t9. Result already contains allocation top.
|
2003
|
+
lw(t9, MemOperand(topaddr, limit - top));
|
2004
|
+
}
|
874
2005
|
|
875
|
-
|
876
|
-
|
2006
|
+
// Calculate new top and bail out if new space is exhausted. Use result
|
2007
|
+
// to calculate the new top. Object size may be in words so a shift is
|
2008
|
+
// required to get the number of bytes.
|
2009
|
+
if ((flags & SIZE_IN_WORDS) != 0) {
|
2010
|
+
sll(scratch2, object_size, kPointerSizeLog2);
|
2011
|
+
Addu(scratch2, result, scratch2);
|
2012
|
+
} else {
|
2013
|
+
Addu(scratch2, result, Operand(object_size));
|
2014
|
+
}
|
2015
|
+
Branch(gc_required, Ugreater, scratch2, Operand(t9));
|
2016
|
+
|
2017
|
+
// Update allocation top. result temporarily holds the new top.
|
2018
|
+
if (FLAG_debug_code) {
|
2019
|
+
And(t9, scratch2, Operand(kObjectAlignmentMask));
|
2020
|
+
Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
|
2021
|
+
}
|
2022
|
+
sw(scratch2, MemOperand(topaddr));
|
877
2023
|
|
878
|
-
|
2024
|
+
// Tag object if requested.
|
2025
|
+
if ((flags & TAG_OBJECT) != 0) {
|
2026
|
+
Addu(result, result, Operand(kHeapObjectTag));
|
2027
|
+
}
|
2028
|
+
}
|
879
2029
|
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
2030
|
+
|
2031
|
+
void MacroAssembler::UndoAllocationInNewSpace(Register object,
|
2032
|
+
Register scratch) {
|
2033
|
+
ExternalReference new_space_allocation_top =
|
2034
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
2035
|
+
|
2036
|
+
// Make sure the object has no tag before resetting top.
|
2037
|
+
And(object, object, Operand(~kHeapObjectTagMask));
|
2038
|
+
#ifdef DEBUG
|
2039
|
+
// Check that the object un-allocated is below the current top.
|
2040
|
+
li(scratch, Operand(new_space_allocation_top));
|
2041
|
+
lw(scratch, MemOperand(scratch));
|
2042
|
+
Check(less, "Undo allocation of non allocated memory",
|
2043
|
+
object, Operand(scratch));
|
2044
|
+
#endif
|
2045
|
+
// Write the address of the object to un-allocate as the current top.
|
2046
|
+
li(scratch, Operand(new_space_allocation_top));
|
2047
|
+
sw(object, MemOperand(scratch));
|
2048
|
+
}
|
2049
|
+
|
2050
|
+
|
2051
|
+
void MacroAssembler::AllocateTwoByteString(Register result,
|
2052
|
+
Register length,
|
2053
|
+
Register scratch1,
|
2054
|
+
Register scratch2,
|
2055
|
+
Register scratch3,
|
2056
|
+
Label* gc_required) {
|
2057
|
+
// Calculate the number of bytes needed for the characters in the string while
|
2058
|
+
// observing object alignment.
|
2059
|
+
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
2060
|
+
sll(scratch1, length, 1); // Length in bytes, not chars.
|
2061
|
+
addiu(scratch1, scratch1,
|
2062
|
+
kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
|
2063
|
+
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
2064
|
+
|
2065
|
+
// Allocate two-byte string in new space.
|
2066
|
+
AllocateInNewSpace(scratch1,
|
2067
|
+
result,
|
2068
|
+
scratch2,
|
2069
|
+
scratch3,
|
2070
|
+
gc_required,
|
2071
|
+
TAG_OBJECT);
|
2072
|
+
|
2073
|
+
// Set the map, length and hash field.
|
2074
|
+
InitializeNewString(result,
|
2075
|
+
length,
|
2076
|
+
Heap::kStringMapRootIndex,
|
2077
|
+
scratch1,
|
2078
|
+
scratch2);
|
2079
|
+
}
|
2080
|
+
|
2081
|
+
|
2082
|
+
void MacroAssembler::AllocateAsciiString(Register result,
|
2083
|
+
Register length,
|
2084
|
+
Register scratch1,
|
2085
|
+
Register scratch2,
|
2086
|
+
Register scratch3,
|
2087
|
+
Label* gc_required) {
|
2088
|
+
// Calculate the number of bytes needed for the characters in the string
|
2089
|
+
// while observing object alignment.
|
2090
|
+
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
|
2091
|
+
ASSERT(kCharSize == 1);
|
2092
|
+
addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
|
2093
|
+
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
2094
|
+
|
2095
|
+
// Allocate ASCII string in new space.
|
2096
|
+
AllocateInNewSpace(scratch1,
|
2097
|
+
result,
|
2098
|
+
scratch2,
|
2099
|
+
scratch3,
|
2100
|
+
gc_required,
|
2101
|
+
TAG_OBJECT);
|
2102
|
+
|
2103
|
+
// Set the map, length and hash field.
|
2104
|
+
InitializeNewString(result,
|
2105
|
+
length,
|
2106
|
+
Heap::kAsciiStringMapRootIndex,
|
2107
|
+
scratch1,
|
2108
|
+
scratch2);
|
2109
|
+
}
|
2110
|
+
|
2111
|
+
|
2112
|
+
void MacroAssembler::AllocateTwoByteConsString(Register result,
|
2113
|
+
Register length,
|
2114
|
+
Register scratch1,
|
2115
|
+
Register scratch2,
|
2116
|
+
Label* gc_required) {
|
2117
|
+
AllocateInNewSpace(ConsString::kSize,
|
2118
|
+
result,
|
2119
|
+
scratch1,
|
2120
|
+
scratch2,
|
2121
|
+
gc_required,
|
2122
|
+
TAG_OBJECT);
|
2123
|
+
InitializeNewString(result,
|
2124
|
+
length,
|
2125
|
+
Heap::kConsStringMapRootIndex,
|
2126
|
+
scratch1,
|
2127
|
+
scratch2);
|
2128
|
+
}
|
2129
|
+
|
2130
|
+
|
2131
|
+
void MacroAssembler::AllocateAsciiConsString(Register result,
|
2132
|
+
Register length,
|
2133
|
+
Register scratch1,
|
2134
|
+
Register scratch2,
|
2135
|
+
Label* gc_required) {
|
2136
|
+
AllocateInNewSpace(ConsString::kSize,
|
2137
|
+
result,
|
2138
|
+
scratch1,
|
2139
|
+
scratch2,
|
2140
|
+
gc_required,
|
2141
|
+
TAG_OBJECT);
|
2142
|
+
InitializeNewString(result,
|
2143
|
+
length,
|
2144
|
+
Heap::kConsAsciiStringMapRootIndex,
|
2145
|
+
scratch1,
|
2146
|
+
scratch2);
|
2147
|
+
}
|
2148
|
+
|
2149
|
+
|
2150
|
+
// Allocates a heap number or jumps to the label if the young space is full and
|
2151
|
+
// a scavenge is needed.
|
2152
|
+
void MacroAssembler::AllocateHeapNumber(Register result,
|
2153
|
+
Register scratch1,
|
2154
|
+
Register scratch2,
|
2155
|
+
Register heap_number_map,
|
2156
|
+
Label* need_gc) {
|
2157
|
+
// Allocate an object in the heap for the heap number and tag it as a heap
|
2158
|
+
// object.
|
2159
|
+
AllocateInNewSpace(HeapNumber::kSize,
|
2160
|
+
result,
|
2161
|
+
scratch1,
|
2162
|
+
scratch2,
|
2163
|
+
need_gc,
|
2164
|
+
TAG_OBJECT);
|
2165
|
+
|
2166
|
+
// Store heap number map in the allocated object.
|
2167
|
+
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
2168
|
+
sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
|
2169
|
+
}
|
2170
|
+
|
2171
|
+
|
2172
|
+
void MacroAssembler::AllocateHeapNumberWithValue(Register result,
|
2173
|
+
FPURegister value,
|
2174
|
+
Register scratch1,
|
2175
|
+
Register scratch2,
|
2176
|
+
Label* gc_required) {
|
2177
|
+
LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
2178
|
+
AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
|
2179
|
+
sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
|
2180
|
+
}
|
2181
|
+
|
2182
|
+
|
2183
|
+
// Copies a fixed number of fields of heap objects from src to dst.
|
2184
|
+
void MacroAssembler::CopyFields(Register dst,
|
2185
|
+
Register src,
|
2186
|
+
RegList temps,
|
2187
|
+
int field_count) {
|
2188
|
+
ASSERT((temps & dst.bit()) == 0);
|
2189
|
+
ASSERT((temps & src.bit()) == 0);
|
2190
|
+
// Primitive implementation using only one temporary register.
|
2191
|
+
|
2192
|
+
Register tmp = no_reg;
|
2193
|
+
// Find a temp register in temps list.
|
2194
|
+
for (int i = 0; i < kNumRegisters; i++) {
|
2195
|
+
if ((temps & (1 << i)) != 0) {
|
2196
|
+
tmp.code_ = i;
|
2197
|
+
break;
|
2198
|
+
}
|
886
2199
|
}
|
2200
|
+
ASSERT(!tmp.is(no_reg));
|
887
2201
|
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
2202
|
+
for (int i = 0; i < field_count; i++) {
|
2203
|
+
lw(tmp, FieldMemOperand(src, i * kPointerSize));
|
2204
|
+
sw(tmp, FieldMemOperand(dst, i * kPointerSize));
|
2205
|
+
}
|
2206
|
+
}
|
892
2207
|
|
893
|
-
// Align before saving sp on the stack.
|
894
|
-
bind(&extra_push);
|
895
|
-
mov(scratch, sp);
|
896
|
-
addiu(sp, sp, -8);
|
897
|
-
sw(scratch, MemOperand(sp));
|
898
2208
|
|
899
|
-
|
900
|
-
|
2209
|
+
void MacroAssembler::CheckMap(Register obj,
|
2210
|
+
Register scratch,
|
2211
|
+
Handle<Map> map,
|
2212
|
+
Label* fail,
|
2213
|
+
bool is_heap_object) {
|
2214
|
+
if (!is_heap_object) {
|
2215
|
+
JumpIfSmi(obj, fail);
|
2216
|
+
}
|
2217
|
+
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
|
2218
|
+
li(at, Operand(map));
|
2219
|
+
Branch(fail, ne, scratch, Operand(at));
|
901
2220
|
}
|
902
2221
|
|
903
2222
|
|
904
|
-
void MacroAssembler::
|
905
|
-
|
2223
|
+
void MacroAssembler::CheckMap(Register obj,
|
2224
|
+
Register scratch,
|
2225
|
+
Heap::RootListIndex index,
|
2226
|
+
Label* fail,
|
2227
|
+
bool is_heap_object) {
|
2228
|
+
if (!is_heap_object) {
|
2229
|
+
JumpIfSmi(obj, fail);
|
2230
|
+
}
|
2231
|
+
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
|
2232
|
+
LoadRoot(at, index);
|
2233
|
+
Branch(fail, ne, scratch, Operand(at));
|
906
2234
|
}
|
907
2235
|
|
908
2236
|
|
@@ -914,7 +2242,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
|
914
2242
|
Handle<Code> code_constant,
|
915
2243
|
Register code_reg,
|
916
2244
|
Label* done,
|
917
|
-
InvokeFlag flag
|
2245
|
+
InvokeFlag flag,
|
2246
|
+
PostCallGenerator* post_call_generator) {
|
918
2247
|
bool definitely_matches = false;
|
919
2248
|
Label regular_invoke;
|
920
2249
|
|
@@ -949,11 +2278,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
|
949
2278
|
li(a2, Operand(expected.immediate()));
|
950
2279
|
}
|
951
2280
|
}
|
952
|
-
} else if (actual.is_immediate()) {
|
953
|
-
Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate()));
|
954
|
-
li(a0, Operand(actual.immediate()));
|
955
2281
|
} else {
|
956
|
-
|
2282
|
+
if (actual.is_immediate()) {
|
2283
|
+
Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
|
2284
|
+
li(a0, Operand(actual.immediate()));
|
2285
|
+
} else {
|
2286
|
+
Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
|
2287
|
+
}
|
957
2288
|
}
|
958
2289
|
|
959
2290
|
if (!definitely_matches) {
|
@@ -962,25 +2293,29 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
|
962
2293
|
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
|
963
2294
|
}
|
964
2295
|
|
965
|
-
|
2296
|
+
Handle<Code> adaptor =
|
2297
|
+
isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
966
2298
|
if (flag == CALL_FUNCTION) {
|
967
|
-
|
968
|
-
|
969
|
-
|
2299
|
+
Call(adaptor, RelocInfo::CODE_TARGET);
|
2300
|
+
if (post_call_generator != NULL) post_call_generator->Generate();
|
2301
|
+
jmp(done);
|
970
2302
|
} else {
|
971
|
-
|
2303
|
+
Jump(adaptor, RelocInfo::CODE_TARGET);
|
972
2304
|
}
|
973
2305
|
bind(®ular_invoke);
|
974
2306
|
}
|
975
2307
|
}
|
976
2308
|
|
2309
|
+
|
977
2310
|
void MacroAssembler::InvokeCode(Register code,
|
978
2311
|
const ParameterCount& expected,
|
979
2312
|
const ParameterCount& actual,
|
980
|
-
InvokeFlag flag
|
2313
|
+
InvokeFlag flag,
|
2314
|
+
PostCallGenerator* post_call_generator) {
|
981
2315
|
Label done;
|
982
2316
|
|
983
|
-
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag
|
2317
|
+
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
|
2318
|
+
post_call_generator);
|
984
2319
|
if (flag == CALL_FUNCTION) {
|
985
2320
|
Call(code);
|
986
2321
|
} else {
|
@@ -1014,7 +2349,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
|
|
1014
2349
|
|
1015
2350
|
void MacroAssembler::InvokeFunction(Register function,
|
1016
2351
|
const ParameterCount& actual,
|
1017
|
-
InvokeFlag flag
|
2352
|
+
InvokeFlag flag,
|
2353
|
+
PostCallGenerator* post_call_generator) {
|
1018
2354
|
// Contract with called JS functions requires that function is passed in a1.
|
1019
2355
|
ASSERT(function.is(a1));
|
1020
2356
|
Register expected_reg = a2;
|
@@ -1025,68 +2361,120 @@ void MacroAssembler::InvokeFunction(Register function,
|
|
1025
2361
|
lw(expected_reg,
|
1026
2362
|
FieldMemOperand(code_reg,
|
1027
2363
|
SharedFunctionInfo::kFormalParameterCountOffset));
|
1028
|
-
|
1029
|
-
|
1030
|
-
addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
|
2364
|
+
sra(expected_reg, expected_reg, kSmiTagSize);
|
2365
|
+
lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
1031
2366
|
|
1032
2367
|
ParameterCount expected(expected_reg);
|
1033
|
-
InvokeCode(code_reg, expected, actual, flag);
|
2368
|
+
InvokeCode(code_reg, expected, actual, flag, post_call_generator);
|
2369
|
+
}
|
2370
|
+
|
2371
|
+
|
2372
|
+
void MacroAssembler::InvokeFunction(JSFunction* function,
|
2373
|
+
const ParameterCount& actual,
|
2374
|
+
InvokeFlag flag) {
|
2375
|
+
ASSERT(function->is_compiled());
|
2376
|
+
|
2377
|
+
// Get the function and setup the context.
|
2378
|
+
li(a1, Operand(Handle<JSFunction>(function)));
|
2379
|
+
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
2380
|
+
|
2381
|
+
// Invoke the cached code.
|
2382
|
+
Handle<Code> code(function->code());
|
2383
|
+
ParameterCount expected(function->shared()->formal_parameter_count());
|
2384
|
+
if (V8::UseCrankshaft()) {
|
2385
|
+
UNIMPLEMENTED_MIPS();
|
2386
|
+
} else {
|
2387
|
+
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
|
2388
|
+
}
|
2389
|
+
}
|
2390
|
+
|
2391
|
+
|
2392
|
+
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
|
2393
|
+
Register map,
|
2394
|
+
Register scratch,
|
2395
|
+
Label* fail) {
|
2396
|
+
lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
|
2397
|
+
IsInstanceJSObjectType(map, scratch, fail);
|
2398
|
+
}
|
2399
|
+
|
2400
|
+
|
2401
|
+
void MacroAssembler::IsInstanceJSObjectType(Register map,
|
2402
|
+
Register scratch,
|
2403
|
+
Label* fail) {
|
2404
|
+
lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
2405
|
+
Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
|
2406
|
+
Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
|
2407
|
+
}
|
2408
|
+
|
2409
|
+
|
2410
|
+
void MacroAssembler::IsObjectJSStringType(Register object,
|
2411
|
+
Register scratch,
|
2412
|
+
Label* fail) {
|
2413
|
+
ASSERT(kNotStringTag != 0);
|
2414
|
+
|
2415
|
+
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
2416
|
+
lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
2417
|
+
And(scratch, scratch, Operand(kIsNotStringMask));
|
2418
|
+
Branch(fail, ne, scratch, Operand(zero_reg));
|
1034
2419
|
}
|
1035
2420
|
|
1036
2421
|
|
1037
2422
|
// ---------------------------------------------------------------------------
|
1038
2423
|
// Support functions.
|
1039
2424
|
|
1040
|
-
void MacroAssembler::GetObjectType(Register function,
|
1041
|
-
Register map,
|
1042
|
-
Register type_reg) {
|
1043
|
-
lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
|
1044
|
-
lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
1045
|
-
}
|
1046
2425
|
|
2426
|
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
|
2427
|
+
Register result,
|
2428
|
+
Register scratch,
|
2429
|
+
Label* miss) {
|
2430
|
+
// Check that the receiver isn't a smi.
|
2431
|
+
JumpIfSmi(function, miss);
|
1047
2432
|
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
lw(t9, MemOperand(t9)); // Deref address.
|
1052
|
-
addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
|
1053
|
-
// Call and allocate arguments slots.
|
1054
|
-
jalr(t9);
|
1055
|
-
// Use the branch delay slot to allocated argument slots.
|
1056
|
-
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
|
1057
|
-
addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
|
1058
|
-
}
|
2433
|
+
// Check that the function really is a function. Load map into result reg.
|
2434
|
+
GetObjectType(function, result, scratch);
|
2435
|
+
Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
|
1059
2436
|
|
2437
|
+
// Make sure that the function has an instance prototype.
|
2438
|
+
Label non_instance;
|
2439
|
+
lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
|
2440
|
+
And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
|
2441
|
+
Branch(&non_instance, ne, scratch, Operand(zero_reg));
|
1060
2442
|
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
jalr(target);
|
1065
|
-
// Use the branch delay slot to allocated argument slots.
|
1066
|
-
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
|
1067
|
-
addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
|
1068
|
-
}
|
2443
|
+
// Get the prototype or initial map from the function.
|
2444
|
+
lw(result,
|
2445
|
+
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
1069
2446
|
|
2447
|
+
// If the prototype or initial map is the hole, don't return it and
|
2448
|
+
// simply miss the cache instead. This will allow us to allocate a
|
2449
|
+
// prototype object on-demand in the runtime system.
|
2450
|
+
LoadRoot(t8, Heap::kTheHoleValueRootIndex);
|
2451
|
+
Branch(miss, eq, result, Operand(t8));
|
1070
2452
|
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
1074
|
-
|
1075
|
-
addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
|
1076
|
-
// Call and allocate arguments slots.
|
1077
|
-
jr(t9);
|
1078
|
-
// Use the branch delay slot to allocated argument slots.
|
1079
|
-
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
|
1080
|
-
}
|
2453
|
+
// If the function does not have an initial map, we're done.
|
2454
|
+
Label done;
|
2455
|
+
GetObjectType(result, scratch, scratch);
|
2456
|
+
Branch(&done, ne, scratch, Operand(MAP_TYPE));
|
1081
2457
|
|
2458
|
+
// Get the prototype from the initial map.
|
2459
|
+
lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
|
2460
|
+
jmp(&done);
|
1082
2461
|
|
1083
|
-
|
1084
|
-
|
1085
|
-
|
1086
|
-
|
1087
|
-
|
1088
|
-
|
1089
|
-
|
2462
|
+
// Non-instance prototype: Fetch prototype from constructor field
|
2463
|
+
// in initial map.
|
2464
|
+
bind(&non_instance);
|
2465
|
+
lw(result, FieldMemOperand(result, Map::kConstructorOffset));
|
2466
|
+
|
2467
|
+
// All done.
|
2468
|
+
bind(&done);
|
2469
|
+
}
|
2470
|
+
|
2471
|
+
|
2472
|
+
void MacroAssembler::GetObjectType(Register object,
|
2473
|
+
Register map,
|
2474
|
+
Register type_reg) {
|
2475
|
+
lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
|
2476
|
+
lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
2477
|
+
}
|
1090
2478
|
|
1091
2479
|
|
1092
2480
|
// -----------------------------------------------------------------------------
|
@@ -1099,8 +2487,9 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
|
|
1099
2487
|
}
|
1100
2488
|
|
1101
2489
|
|
1102
|
-
void MacroAssembler::
|
1103
|
-
|
2490
|
+
void MacroAssembler::TailCallStub(CodeStub* stub) {
|
2491
|
+
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
|
2492
|
+
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
|
1104
2493
|
}
|
1105
2494
|
|
1106
2495
|
|
@@ -1112,7 +2501,71 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
|
|
1112
2501
|
}
|
1113
2502
|
|
1114
2503
|
|
1115
|
-
void MacroAssembler::
|
2504
|
+
void MacroAssembler::IndexFromHash(Register hash,
|
2505
|
+
Register index) {
|
2506
|
+
// If the hash field contains an array index pick it out. The assert checks
|
2507
|
+
// that the constants for the maximum number of digits for an array index
|
2508
|
+
// cached in the hash field and the number of bits reserved for it does not
|
2509
|
+
// conflict.
|
2510
|
+
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
2511
|
+
(1 << String::kArrayIndexValueBits));
|
2512
|
+
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
|
2513
|
+
// the low kHashShift bits.
|
2514
|
+
STATIC_ASSERT(kSmiTag == 0);
|
2515
|
+
Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
|
2516
|
+
sll(index, hash, kSmiTagSize);
|
2517
|
+
}
|
2518
|
+
|
2519
|
+
|
2520
|
+
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
|
2521
|
+
FPURegister result,
|
2522
|
+
Register scratch1,
|
2523
|
+
Register scratch2,
|
2524
|
+
Register heap_number_map,
|
2525
|
+
Label* not_number,
|
2526
|
+
ObjectToDoubleFlags flags) {
|
2527
|
+
Label done;
|
2528
|
+
if ((flags & OBJECT_NOT_SMI) == 0) {
|
2529
|
+
Label not_smi;
|
2530
|
+
JumpIfNotSmi(object, ¬_smi);
|
2531
|
+
// Remove smi tag and convert to double.
|
2532
|
+
sra(scratch1, object, kSmiTagSize);
|
2533
|
+
mtc1(scratch1, result);
|
2534
|
+
cvt_d_w(result, result);
|
2535
|
+
Branch(&done);
|
2536
|
+
bind(¬_smi);
|
2537
|
+
}
|
2538
|
+
// Check for heap number and load double value from it.
|
2539
|
+
lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
|
2540
|
+
Branch(not_number, ne, scratch1, Operand(heap_number_map));
|
2541
|
+
|
2542
|
+
if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
|
2543
|
+
// If exponent is all ones the number is either a NaN or +/-Infinity.
|
2544
|
+
Register exponent = scratch1;
|
2545
|
+
Register mask_reg = scratch2;
|
2546
|
+
lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
2547
|
+
li(mask_reg, HeapNumber::kExponentMask);
|
2548
|
+
|
2549
|
+
And(exponent, exponent, mask_reg);
|
2550
|
+
Branch(not_number, eq, exponent, Operand(mask_reg));
|
2551
|
+
}
|
2552
|
+
ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
|
2553
|
+
bind(&done);
|
2554
|
+
}
|
2555
|
+
|
2556
|
+
|
2557
|
+
|
2558
|
+
void MacroAssembler::SmiToDoubleFPURegister(Register smi,
|
2559
|
+
FPURegister value,
|
2560
|
+
Register scratch1) {
|
2561
|
+
sra(scratch1, smi, kSmiTagSize);
|
2562
|
+
mtc1(scratch1, value);
|
2563
|
+
cvt_d_w(value, value);
|
2564
|
+
}
|
2565
|
+
|
2566
|
+
|
2567
|
+
void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
2568
|
+
int num_arguments) {
|
1116
2569
|
// All parameters are on the stack. v0 has the return value after call.
|
1117
2570
|
|
1118
2571
|
// If the expected number of arguments of the runtime function is
|
@@ -1128,69 +2581,129 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
|
|
1128
2581
|
// should remove this need and make the runtime routine entry code
|
1129
2582
|
// smarter.
|
1130
2583
|
li(a0, num_arguments);
|
1131
|
-
|
2584
|
+
li(a1, Operand(ExternalReference(f, isolate())));
|
1132
2585
|
CEntryStub stub(1);
|
1133
2586
|
CallStub(&stub);
|
1134
2587
|
}
|
1135
2588
|
|
1136
2589
|
|
2590
|
+
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
|
2591
|
+
const Runtime::Function* function = Runtime::FunctionForId(id);
|
2592
|
+
li(a0, Operand(function->nargs));
|
2593
|
+
li(a1, Operand(ExternalReference(function, isolate())));
|
2594
|
+
CEntryStub stub(1);
|
2595
|
+
stub.SaveDoubles();
|
2596
|
+
CallStub(&stub);
|
2597
|
+
}
|
2598
|
+
|
2599
|
+
|
1137
2600
|
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
|
1138
2601
|
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
|
1139
2602
|
}
|
1140
2603
|
|
1141
2604
|
|
2605
|
+
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
|
2606
|
+
int num_arguments) {
|
2607
|
+
li(a0, Operand(num_arguments));
|
2608
|
+
li(a1, Operand(ext));
|
2609
|
+
|
2610
|
+
CEntryStub stub(1);
|
2611
|
+
CallStub(&stub);
|
2612
|
+
}
|
2613
|
+
|
2614
|
+
|
1142
2615
|
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
|
1143
2616
|
int num_arguments,
|
1144
2617
|
int result_size) {
|
1145
|
-
|
2618
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
2619
|
+
// arguments passed in because it is constant. At some point we
|
2620
|
+
// should remove this need and make the runtime routine entry code
|
2621
|
+
// smarter.
|
2622
|
+
li(a0, Operand(num_arguments));
|
2623
|
+
JumpToExternalReference(ext);
|
1146
2624
|
}
|
1147
2625
|
|
1148
2626
|
|
1149
2627
|
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
|
1150
2628
|
int num_arguments,
|
1151
2629
|
int result_size) {
|
1152
|
-
TailCallExternalReference(ExternalReference(fid
|
2630
|
+
TailCallExternalReference(ExternalReference(fid, isolate()),
|
2631
|
+
num_arguments,
|
2632
|
+
result_size);
|
1153
2633
|
}
|
1154
2634
|
|
1155
2635
|
|
1156
2636
|
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
|
1157
|
-
|
2637
|
+
li(a1, Operand(builtin));
|
2638
|
+
CEntryStub stub(1);
|
2639
|
+
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
1158
2640
|
}
|
1159
2641
|
|
1160
2642
|
|
1161
|
-
|
1162
|
-
|
1163
|
-
|
1164
|
-
|
2643
|
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
2644
|
+
InvokeJSFlags flags,
|
2645
|
+
PostCallGenerator* post_call_generator) {
|
2646
|
+
GetBuiltinEntry(t9, id);
|
2647
|
+
if (flags == CALL_JS) {
|
2648
|
+
Call(t9);
|
2649
|
+
if (post_call_generator != NULL) post_call_generator->Generate();
|
2650
|
+
} else {
|
2651
|
+
ASSERT(flags == JUMP_JS);
|
2652
|
+
Jump(t9);
|
2653
|
+
}
|
1165
2654
|
}
|
1166
2655
|
|
1167
2656
|
|
1168
|
-
void MacroAssembler::
|
1169
|
-
|
1170
|
-
|
2657
|
+
void MacroAssembler::GetBuiltinFunction(Register target,
|
2658
|
+
Builtins::JavaScript id) {
|
2659
|
+
// Load the builtins object into target register.
|
2660
|
+
lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
2661
|
+
lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
|
2662
|
+
// Load the JavaScript builtin function from the builtins object.
|
2663
|
+
lw(target, FieldMemOperand(target,
|
2664
|
+
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
|
1171
2665
|
}
|
1172
2666
|
|
1173
2667
|
|
1174
2668
|
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
1175
|
-
|
2669
|
+
ASSERT(!target.is(a1));
|
2670
|
+
GetBuiltinFunction(a1, id);
|
2671
|
+
// Load the code entry point from the builtins object.
|
2672
|
+
lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
1176
2673
|
}
|
1177
2674
|
|
1178
2675
|
|
1179
2676
|
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
|
1180
2677
|
Register scratch1, Register scratch2) {
|
1181
|
-
|
2678
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
2679
|
+
li(scratch1, Operand(value));
|
2680
|
+
li(scratch2, Operand(ExternalReference(counter)));
|
2681
|
+
sw(scratch1, MemOperand(scratch2));
|
2682
|
+
}
|
1182
2683
|
}
|
1183
2684
|
|
1184
2685
|
|
1185
2686
|
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
|
1186
2687
|
Register scratch1, Register scratch2) {
|
1187
|
-
|
2688
|
+
ASSERT(value > 0);
|
2689
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
2690
|
+
li(scratch2, Operand(ExternalReference(counter)));
|
2691
|
+
lw(scratch1, MemOperand(scratch2));
|
2692
|
+
Addu(scratch1, scratch1, Operand(value));
|
2693
|
+
sw(scratch1, MemOperand(scratch2));
|
2694
|
+
}
|
1188
2695
|
}
|
1189
2696
|
|
1190
2697
|
|
1191
2698
|
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
1192
2699
|
Register scratch1, Register scratch2) {
|
1193
|
-
|
2700
|
+
ASSERT(value > 0);
|
2701
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
2702
|
+
li(scratch2, Operand(ExternalReference(counter)));
|
2703
|
+
lw(scratch1, MemOperand(scratch2));
|
2704
|
+
Subu(scratch1, scratch1, Operand(value));
|
2705
|
+
sw(scratch1, MemOperand(scratch2));
|
2706
|
+
}
|
1194
2707
|
}
|
1195
2708
|
|
1196
2709
|
|
@@ -1199,30 +2712,144 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
|
1199
2712
|
|
1200
2713
|
void MacroAssembler::Assert(Condition cc, const char* msg,
|
1201
2714
|
Register rs, Operand rt) {
|
1202
|
-
|
2715
|
+
if (FLAG_debug_code)
|
2716
|
+
Check(cc, msg, rs, rt);
|
2717
|
+
}
|
2718
|
+
|
2719
|
+
|
2720
|
+
void MacroAssembler::AssertRegisterIsRoot(Register reg,
|
2721
|
+
Heap::RootListIndex index) {
|
2722
|
+
if (FLAG_debug_code) {
|
2723
|
+
LoadRoot(at, index);
|
2724
|
+
Check(eq, "Register did not match expected root", reg, Operand(at));
|
2725
|
+
}
|
2726
|
+
}
|
2727
|
+
|
2728
|
+
|
2729
|
+
void MacroAssembler::AssertFastElements(Register elements) {
|
2730
|
+
if (FLAG_debug_code) {
|
2731
|
+
ASSERT(!elements.is(at));
|
2732
|
+
Label ok;
|
2733
|
+
Push(elements);
|
2734
|
+
lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
|
2735
|
+
LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
2736
|
+
Branch(&ok, eq, elements, Operand(at));
|
2737
|
+
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
|
2738
|
+
Branch(&ok, eq, elements, Operand(at));
|
2739
|
+
Abort("JSObject with fast elements map has slow elements");
|
2740
|
+
bind(&ok);
|
2741
|
+
Pop(elements);
|
2742
|
+
}
|
1203
2743
|
}
|
1204
2744
|
|
1205
2745
|
|
1206
2746
|
void MacroAssembler::Check(Condition cc, const char* msg,
|
1207
2747
|
Register rs, Operand rt) {
|
1208
|
-
|
2748
|
+
Label L;
|
2749
|
+
Branch(&L, cc, rs, rt);
|
2750
|
+
Abort(msg);
|
2751
|
+
// will not return here
|
2752
|
+
bind(&L);
|
1209
2753
|
}
|
1210
2754
|
|
1211
2755
|
|
1212
2756
|
void MacroAssembler::Abort(const char* msg) {
|
1213
|
-
|
2757
|
+
Label abort_start;
|
2758
|
+
bind(&abort_start);
|
2759
|
+
// We want to pass the msg string like a smi to avoid GC
|
2760
|
+
// problems, however msg is not guaranteed to be aligned
|
2761
|
+
// properly. Instead, we pass an aligned pointer that is
|
2762
|
+
// a proper v8 smi, but also pass the alignment difference
|
2763
|
+
// from the real pointer as a smi.
|
2764
|
+
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
|
2765
|
+
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
|
2766
|
+
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
|
2767
|
+
#ifdef DEBUG
|
2768
|
+
if (msg != NULL) {
|
2769
|
+
RecordComment("Abort message: ");
|
2770
|
+
RecordComment(msg);
|
2771
|
+
}
|
2772
|
+
#endif
|
2773
|
+
// Disable stub call restrictions to always allow calls to abort.
|
2774
|
+
AllowStubCallsScope allow_scope(this, true);
|
2775
|
+
|
2776
|
+
li(a0, Operand(p0));
|
2777
|
+
Push(a0);
|
2778
|
+
li(a0, Operand(Smi::FromInt(p1 - p0)));
|
2779
|
+
Push(a0);
|
2780
|
+
CallRuntime(Runtime::kAbort, 2);
|
2781
|
+
// will not return here
|
2782
|
+
if (is_trampoline_pool_blocked()) {
|
2783
|
+
// If the calling code cares about the exact number of
|
2784
|
+
// instructions generated, we insert padding here to keep the size
|
2785
|
+
// of the Abort macro constant.
|
2786
|
+
// Currently in debug mode with debug_code enabled the number of
|
2787
|
+
// generated instructions is 14, so we use this as a maximum value.
|
2788
|
+
static const int kExpectedAbortInstructions = 14;
|
2789
|
+
int abort_instructions = InstructionsGeneratedSince(&abort_start);
|
2790
|
+
ASSERT(abort_instructions <= kExpectedAbortInstructions);
|
2791
|
+
while (abort_instructions++ < kExpectedAbortInstructions) {
|
2792
|
+
nop();
|
2793
|
+
}
|
2794
|
+
}
|
2795
|
+
}
|
2796
|
+
|
2797
|
+
|
2798
|
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
2799
|
+
if (context_chain_length > 0) {
|
2800
|
+
// Move up the chain of contexts to the context containing the slot.
|
2801
|
+
lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
2802
|
+
// Load the function context (which is the incoming, outer context).
|
2803
|
+
lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
|
2804
|
+
for (int i = 1; i < context_chain_length; i++) {
|
2805
|
+
lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
2806
|
+
lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
|
2807
|
+
}
|
2808
|
+
// The context may be an intermediate context, not a function context.
|
2809
|
+
lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
2810
|
+
} else { // Slot is in the current function context.
|
2811
|
+
// The context may be an intermediate context, not a function context.
|
2812
|
+
lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
2813
|
+
}
|
2814
|
+
}
|
2815
|
+
|
2816
|
+
|
2817
|
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
2818
|
+
// Load the global or builtins object from the current context.
|
2819
|
+
lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
2820
|
+
// Load the global context from the global or builtins object.
|
2821
|
+
lw(function, FieldMemOperand(function,
|
2822
|
+
GlobalObject::kGlobalContextOffset));
|
2823
|
+
// Load the function from the global context.
|
2824
|
+
lw(function, MemOperand(function, Context::SlotOffset(index)));
|
2825
|
+
}
|
2826
|
+
|
2827
|
+
|
2828
|
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
|
2829
|
+
Register map,
|
2830
|
+
Register scratch) {
|
2831
|
+
// Load the initial map. The global functions all have initial maps.
|
2832
|
+
lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
2833
|
+
if (FLAG_debug_code) {
|
2834
|
+
Label ok, fail;
|
2835
|
+
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
|
2836
|
+
Branch(&ok);
|
2837
|
+
bind(&fail);
|
2838
|
+
Abort("Global functions must have initial map");
|
2839
|
+
bind(&ok);
|
2840
|
+
}
|
1214
2841
|
}
|
1215
2842
|
|
1216
2843
|
|
1217
2844
|
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
1218
2845
|
addiu(sp, sp, -5 * kPointerSize);
|
1219
|
-
li(
|
1220
|
-
li(
|
2846
|
+
li(t8, Operand(Smi::FromInt(type)));
|
2847
|
+
li(t9, Operand(CodeObject()));
|
1221
2848
|
sw(ra, MemOperand(sp, 4 * kPointerSize));
|
1222
2849
|
sw(fp, MemOperand(sp, 3 * kPointerSize));
|
1223
2850
|
sw(cp, MemOperand(sp, 2 * kPointerSize));
|
1224
|
-
sw(
|
1225
|
-
sw(
|
2851
|
+
sw(t8, MemOperand(sp, 1 * kPointerSize));
|
2852
|
+
sw(t9, MemOperand(sp, 0 * kPointerSize));
|
1226
2853
|
addiu(fp, sp, 3 * kPointerSize);
|
1227
2854
|
}
|
1228
2855
|
|
@@ -1235,62 +2862,98 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
|
1235
2862
|
}
|
1236
2863
|
|
1237
2864
|
|
1238
|
-
void MacroAssembler::EnterExitFrame(
|
1239
|
-
Register hold_argc,
|
2865
|
+
void MacroAssembler::EnterExitFrame(Register hold_argc,
|
1240
2866
|
Register hold_argv,
|
1241
|
-
Register hold_function
|
1242
|
-
|
2867
|
+
Register hold_function,
|
2868
|
+
bool save_doubles) {
|
1243
2869
|
// a0 is argc.
|
1244
|
-
sll(
|
1245
|
-
|
1246
|
-
|
2870
|
+
sll(t8, a0, kPointerSizeLog2);
|
2871
|
+
addu(hold_argv, sp, t8);
|
2872
|
+
addiu(hold_argv, hold_argv, -kPointerSize);
|
1247
2873
|
|
1248
2874
|
// Compute callee's stack pointer before making changes and save it as
|
1249
|
-
//
|
2875
|
+
// t9 register so that it is restored as sp register on exit, thereby
|
1250
2876
|
// popping the args.
|
1251
|
-
//
|
1252
|
-
|
2877
|
+
// t9 = sp + kPointerSize * #args
|
2878
|
+
addu(t9, sp, t8);
|
2879
|
+
|
2880
|
+
// Compute the argv pointer and keep it in a callee-saved register.
|
2881
|
+
// This only seems to be needed for crankshaft and may cause problems
|
2882
|
+
// so it's disabled for now.
|
2883
|
+
// Subu(s6, t9, Operand(kPointerSize));
|
1253
2884
|
|
1254
2885
|
// Align the stack at this point.
|
1255
2886
|
AlignStack(0);
|
1256
2887
|
|
1257
2888
|
// Save registers.
|
1258
2889
|
addiu(sp, sp, -12);
|
1259
|
-
sw(
|
2890
|
+
sw(t9, MemOperand(sp, 8));
|
1260
2891
|
sw(ra, MemOperand(sp, 4));
|
1261
2892
|
sw(fp, MemOperand(sp, 0));
|
1262
2893
|
mov(fp, sp); // Setup new frame pointer.
|
1263
2894
|
|
1264
|
-
|
1265
|
-
|
1266
|
-
Push(zero_reg);
|
1267
|
-
} else {
|
1268
|
-
li(t0, Operand(CodeObject()));
|
1269
|
-
Push(t0);
|
1270
|
-
}
|
2895
|
+
li(t8, Operand(CodeObject()));
|
2896
|
+
Push(t8); // Accessed from ExitFrame::code_slot.
|
1271
2897
|
|
1272
2898
|
// Save the frame pointer and the context in top.
|
1273
|
-
|
1274
|
-
sw(fp, MemOperand(
|
1275
|
-
|
1276
|
-
sw(cp, MemOperand(
|
2899
|
+
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
|
2900
|
+
sw(fp, MemOperand(t8));
|
2901
|
+
li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
|
2902
|
+
sw(cp, MemOperand(t8));
|
1277
2903
|
|
1278
2904
|
// Setup argc and the builtin function in callee-saved registers.
|
1279
2905
|
mov(hold_argc, a0);
|
1280
2906
|
mov(hold_function, a1);
|
2907
|
+
|
2908
|
+
// Optionally save all double registers.
|
2909
|
+
if (save_doubles) {
|
2910
|
+
#ifdef DEBUG
|
2911
|
+
int frame_alignment = ActivationFrameAlignment();
|
2912
|
+
#endif
|
2913
|
+
// The stack alignment code above made sp unaligned, so add space for one
|
2914
|
+
// more double register and use aligned addresses.
|
2915
|
+
ASSERT(kDoubleSize == frame_alignment);
|
2916
|
+
// Mark the frame as containing doubles by pushing a non-valid return
|
2917
|
+
// address, i.e. 0.
|
2918
|
+
ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
|
2919
|
+
push(zero_reg); // Marker and alignment word.
|
2920
|
+
int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
|
2921
|
+
Subu(sp, sp, Operand(space));
|
2922
|
+
// Remember: we only need to save every 2nd double FPU value.
|
2923
|
+
for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
|
2924
|
+
FPURegister reg = FPURegister::from_code(i);
|
2925
|
+
sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
|
2926
|
+
}
|
2927
|
+
// Note that f0 will be accessible at fp - 2*kPointerSize -
|
2928
|
+
// FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
|
2929
|
+
// alignment word were pushed after the fp.
|
2930
|
+
}
|
1281
2931
|
}
|
1282
2932
|
|
1283
2933
|
|
1284
|
-
void MacroAssembler::LeaveExitFrame(
|
2934
|
+
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
|
2935
|
+
// Optionally restore all double registers.
|
2936
|
+
if (save_doubles) {
|
2937
|
+
// TODO(regis): Use vldrm instruction.
|
2938
|
+
// Remember: we only need to restore every 2nd double FPU value.
|
2939
|
+
for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
|
2940
|
+
FPURegister reg = FPURegister::from_code(i);
|
2941
|
+
// Register f30-f31 is just below the marker.
|
2942
|
+
const int offset = ExitFrameConstants::kMarkerOffset;
|
2943
|
+
ldc1(reg, MemOperand(fp,
|
2944
|
+
(i - FPURegister::kNumRegisters) * kDoubleSize + offset));
|
2945
|
+
}
|
2946
|
+
}
|
2947
|
+
|
1285
2948
|
// Clear top frame.
|
1286
|
-
|
1287
|
-
sw(zero_reg, MemOperand(
|
2949
|
+
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
|
2950
|
+
sw(zero_reg, MemOperand(t8));
|
1288
2951
|
|
1289
2952
|
// Restore current context from top and clear it in debug mode.
|
1290
|
-
|
1291
|
-
lw(cp, MemOperand(
|
2953
|
+
li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
|
2954
|
+
lw(cp, MemOperand(t8));
|
1292
2955
|
#ifdef DEBUG
|
1293
|
-
sw(a3, MemOperand(
|
2956
|
+
sw(a3, MemOperand(t8));
|
1294
2957
|
#endif
|
1295
2958
|
|
1296
2959
|
// Pop the arguments, restore registers, and return.
|
@@ -1303,24 +2966,362 @@ void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
|
|
1303
2966
|
}
|
1304
2967
|
|
1305
2968
|
|
2969
|
+
void MacroAssembler::InitializeNewString(Register string,
|
2970
|
+
Register length,
|
2971
|
+
Heap::RootListIndex map_index,
|
2972
|
+
Register scratch1,
|
2973
|
+
Register scratch2) {
|
2974
|
+
sll(scratch1, length, kSmiTagSize);
|
2975
|
+
LoadRoot(scratch2, map_index);
|
2976
|
+
sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
|
2977
|
+
li(scratch1, Operand(String::kEmptyHashField));
|
2978
|
+
sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
|
2979
|
+
sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
|
2980
|
+
}
|
2981
|
+
|
2982
|
+
|
2983
|
+
int MacroAssembler::ActivationFrameAlignment() {
|
2984
|
+
#if defined(V8_HOST_ARCH_MIPS)
|
2985
|
+
// Running on the real platform. Use the alignment as mandated by the local
|
2986
|
+
// environment.
|
2987
|
+
// Note: This will break if we ever start generating snapshots on one Mips
|
2988
|
+
// platform for another Mips platform with a different alignment.
|
2989
|
+
return OS::ActivationFrameAlignment();
|
2990
|
+
#else // defined(V8_HOST_ARCH_MIPS)
|
2991
|
+
// If we are using the simulator then we should always align to the expected
|
2992
|
+
// alignment. As the simulator is used to generate snapshots we do not know
|
2993
|
+
// if the target platform will need alignment, so this is controlled from a
|
2994
|
+
// flag.
|
2995
|
+
return FLAG_sim_stack_alignment;
|
2996
|
+
#endif // defined(V8_HOST_ARCH_MIPS)
|
2997
|
+
}
|
2998
|
+
|
2999
|
+
|
1306
3000
|
void MacroAssembler::AlignStack(int offset) {
|
1307
3001
|
// On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
|
1308
3002
|
// and an offset of 1 aligns to 4 modulo 8 bytes.
|
3003
|
+
#if defined(V8_HOST_ARCH_MIPS)
|
3004
|
+
// Running on the real platform. Use the alignment as mandated by the local
|
3005
|
+
// environment.
|
3006
|
+
// Note: This will break if we ever start generating snapshots on one MIPS
|
3007
|
+
// platform for another MIPS platform with a different alignment.
|
1309
3008
|
int activation_frame_alignment = OS::ActivationFrameAlignment();
|
3009
|
+
#else // defined(V8_HOST_ARCH_MIPS)
|
3010
|
+
// If we are using the simulator then we should always align to the expected
|
3011
|
+
// alignment. As the simulator is used to generate snapshots we do not know
|
3012
|
+
// if the target platform will need alignment, so we will always align at
|
3013
|
+
// this point here.
|
3014
|
+
int activation_frame_alignment = 2 * kPointerSize;
|
3015
|
+
#endif // defined(V8_HOST_ARCH_MIPS)
|
1310
3016
|
if (activation_frame_alignment != kPointerSize) {
|
1311
3017
|
// This code needs to be made more general if this assert doesn't hold.
|
1312
3018
|
ASSERT(activation_frame_alignment == 2 * kPointerSize);
|
1313
3019
|
if (offset == 0) {
|
1314
|
-
andi(
|
1315
|
-
Push(zero_reg, eq,
|
3020
|
+
andi(t8, sp, activation_frame_alignment - 1);
|
3021
|
+
Push(zero_reg, eq, t8, zero_reg);
|
1316
3022
|
} else {
|
1317
|
-
andi(
|
1318
|
-
addiu(
|
1319
|
-
Push(zero_reg, eq,
|
3023
|
+
andi(t8, sp, activation_frame_alignment - 1);
|
3024
|
+
addiu(t8, t8, -4);
|
3025
|
+
Push(zero_reg, eq, t8, zero_reg);
|
3026
|
+
}
|
3027
|
+
}
|
3028
|
+
}
|
3029
|
+
|
3030
|
+
|
3031
|
+
|
3032
|
+
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
|
3033
|
+
Register reg,
|
3034
|
+
Register scratch,
|
3035
|
+
Label* not_power_of_two_or_zero) {
|
3036
|
+
Subu(scratch, reg, Operand(1));
|
3037
|
+
Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
|
3038
|
+
scratch, Operand(zero_reg));
|
3039
|
+
and_(at, scratch, reg); // In the delay slot.
|
3040
|
+
Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
|
3041
|
+
}
|
3042
|
+
|
3043
|
+
|
3044
|
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
3045
|
+
Register reg2,
|
3046
|
+
Label* on_not_both_smi) {
|
3047
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3048
|
+
ASSERT_EQ(1, kSmiTagMask);
|
3049
|
+
or_(at, reg1, reg2);
|
3050
|
+
andi(at, at, kSmiTagMask);
|
3051
|
+
Branch(on_not_both_smi, ne, at, Operand(zero_reg));
|
3052
|
+
}
|
3053
|
+
|
3054
|
+
|
3055
|
+
void MacroAssembler::JumpIfEitherSmi(Register reg1,
|
3056
|
+
Register reg2,
|
3057
|
+
Label* on_either_smi) {
|
3058
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3059
|
+
ASSERT_EQ(1, kSmiTagMask);
|
3060
|
+
// Both Smi tags must be 1 (not Smi).
|
3061
|
+
and_(at, reg1, reg2);
|
3062
|
+
andi(at, at, kSmiTagMask);
|
3063
|
+
Branch(on_either_smi, eq, at, Operand(zero_reg));
|
3064
|
+
}
|
3065
|
+
|
3066
|
+
|
3067
|
+
void MacroAssembler::AbortIfSmi(Register object) {
|
3068
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3069
|
+
andi(at, object, kSmiTagMask);
|
3070
|
+
Assert(ne, "Operand is a smi", at, Operand(zero_reg));
|
3071
|
+
}
|
3072
|
+
|
3073
|
+
|
3074
|
+
void MacroAssembler::AbortIfNotSmi(Register object) {
|
3075
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3076
|
+
andi(at, object, kSmiTagMask);
|
3077
|
+
Assert(eq, "Operand is a smi", at, Operand(zero_reg));
|
3078
|
+
}
|
3079
|
+
|
3080
|
+
|
3081
|
+
void MacroAssembler::AbortIfNotRootValue(Register src,
|
3082
|
+
Heap::RootListIndex root_value_index,
|
3083
|
+
const char* message) {
|
3084
|
+
ASSERT(!src.is(at));
|
3085
|
+
LoadRoot(at, root_value_index);
|
3086
|
+
Assert(eq, message, src, Operand(at));
|
3087
|
+
}
|
3088
|
+
|
3089
|
+
|
3090
|
+
void MacroAssembler::JumpIfNotHeapNumber(Register object,
|
3091
|
+
Register heap_number_map,
|
3092
|
+
Register scratch,
|
3093
|
+
Label* on_not_heap_number) {
|
3094
|
+
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
3095
|
+
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
3096
|
+
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
|
3097
|
+
}
|
3098
|
+
|
3099
|
+
|
3100
|
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
|
3101
|
+
Register first,
|
3102
|
+
Register second,
|
3103
|
+
Register scratch1,
|
3104
|
+
Register scratch2,
|
3105
|
+
Label* failure) {
|
3106
|
+
// Test that both first and second are sequential ASCII strings.
|
3107
|
+
// Assume that they are non-smis.
|
3108
|
+
lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
|
3109
|
+
lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
|
3110
|
+
lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
|
3111
|
+
lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
|
3112
|
+
|
3113
|
+
JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
|
3114
|
+
scratch2,
|
3115
|
+
scratch1,
|
3116
|
+
scratch2,
|
3117
|
+
failure);
|
3118
|
+
}
|
3119
|
+
|
3120
|
+
|
3121
|
+
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
|
3122
|
+
Register second,
|
3123
|
+
Register scratch1,
|
3124
|
+
Register scratch2,
|
3125
|
+
Label* failure) {
|
3126
|
+
// Check that neither is a smi.
|
3127
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3128
|
+
And(scratch1, first, Operand(second));
|
3129
|
+
And(scratch1, scratch1, Operand(kSmiTagMask));
|
3130
|
+
Branch(failure, eq, scratch1, Operand(zero_reg));
|
3131
|
+
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
|
3132
|
+
second,
|
3133
|
+
scratch1,
|
3134
|
+
scratch2,
|
3135
|
+
failure);
|
3136
|
+
}
|
3137
|
+
|
3138
|
+
|
3139
|
+
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
|
3140
|
+
Register first,
|
3141
|
+
Register second,
|
3142
|
+
Register scratch1,
|
3143
|
+
Register scratch2,
|
3144
|
+
Label* failure) {
|
3145
|
+
int kFlatAsciiStringMask =
|
3146
|
+
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
|
3147
|
+
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
|
3148
|
+
ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
|
3149
|
+
andi(scratch1, first, kFlatAsciiStringMask);
|
3150
|
+
Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
|
3151
|
+
andi(scratch2, second, kFlatAsciiStringMask);
|
3152
|
+
Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
|
3153
|
+
}
|
3154
|
+
|
3155
|
+
|
3156
|
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
|
3157
|
+
Register scratch,
|
3158
|
+
Label* failure) {
|
3159
|
+
int kFlatAsciiStringMask =
|
3160
|
+
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
|
3161
|
+
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
|
3162
|
+
And(scratch, type, Operand(kFlatAsciiStringMask));
|
3163
|
+
Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
|
3164
|
+
}
|
3165
|
+
|
3166
|
+
|
3167
|
+
static const int kRegisterPassedArguments = 4;
|
3168
|
+
|
3169
|
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
3170
|
+
int frame_alignment = ActivationFrameAlignment();
|
3171
|
+
|
3172
|
+
// Reserve space for Isolate address which is always passed as last parameter
|
3173
|
+
num_arguments += 1;
|
3174
|
+
|
3175
|
+
// Up to four simple arguments are passed in registers a0..a3.
|
3176
|
+
// Those four arguments must have reserved argument slots on the stack for
|
3177
|
+
// mips, even though those argument slots are not normally used.
|
3178
|
+
// Remaining arguments are pushed on the stack, above (higher address than)
|
3179
|
+
// the argument slots.
|
3180
|
+
ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
|
3181
|
+
int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
|
3182
|
+
0 : num_arguments - kRegisterPassedArguments) +
|
3183
|
+
(StandardFrameConstants::kCArgsSlotsSize /
|
3184
|
+
kPointerSize);
|
3185
|
+
if (frame_alignment > kPointerSize) {
|
3186
|
+
// Make stack end at alignment and make room for num_arguments - 4 words
|
3187
|
+
// and the original value of sp.
|
3188
|
+
mov(scratch, sp);
|
3189
|
+
Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
|
3190
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3191
|
+
And(sp, sp, Operand(-frame_alignment));
|
3192
|
+
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
3193
|
+
} else {
|
3194
|
+
Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
|
3195
|
+
}
|
3196
|
+
}
|
3197
|
+
|
3198
|
+
|
3199
|
+
void MacroAssembler::CallCFunction(ExternalReference function,
|
3200
|
+
int num_arguments) {
|
3201
|
+
CallCFunctionHelper(no_reg, function, at, num_arguments);
|
3202
|
+
}
|
3203
|
+
|
3204
|
+
|
3205
|
+
void MacroAssembler::CallCFunction(Register function,
|
3206
|
+
Register scratch,
|
3207
|
+
int num_arguments) {
|
3208
|
+
CallCFunctionHelper(function,
|
3209
|
+
ExternalReference::the_hole_value_location(isolate()),
|
3210
|
+
scratch,
|
3211
|
+
num_arguments);
|
3212
|
+
}
|
3213
|
+
|
3214
|
+
|
3215
|
+
void MacroAssembler::CallCFunctionHelper(Register function,
|
3216
|
+
ExternalReference function_reference,
|
3217
|
+
Register scratch,
|
3218
|
+
int num_arguments) {
|
3219
|
+
// Push Isolate address as the last argument.
|
3220
|
+
if (num_arguments < kRegisterPassedArguments) {
|
3221
|
+
Register arg_to_reg[] = {a0, a1, a2, a3};
|
3222
|
+
Register r = arg_to_reg[num_arguments];
|
3223
|
+
li(r, Operand(ExternalReference::isolate_address()));
|
3224
|
+
} else {
|
3225
|
+
int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
|
3226
|
+
(StandardFrameConstants::kCArgsSlotsSize /
|
3227
|
+
kPointerSize);
|
3228
|
+
// Push Isolate address on the stack after the arguments.
|
3229
|
+
li(scratch, Operand(ExternalReference::isolate_address()));
|
3230
|
+
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
3231
|
+
}
|
3232
|
+
num_arguments += 1;
|
3233
|
+
|
3234
|
+
// Make sure that the stack is aligned before calling a C function unless
|
3235
|
+
// running in the simulator. The simulator has its own alignment check which
|
3236
|
+
// provides more information.
|
3237
|
+
// The argument stots are presumed to have been set up by
|
3238
|
+
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
|
3239
|
+
|
3240
|
+
#if defined(V8_HOST_ARCH_MIPS)
|
3241
|
+
if (emit_debug_code()) {
|
3242
|
+
int frame_alignment = OS::ActivationFrameAlignment();
|
3243
|
+
int frame_alignment_mask = frame_alignment - 1;
|
3244
|
+
if (frame_alignment > kPointerSize) {
|
3245
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3246
|
+
Label alignment_as_expected;
|
3247
|
+
And(at, sp, Operand(frame_alignment_mask));
|
3248
|
+
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
|
3249
|
+
// Don't use Check here, as it will call Runtime_Abort possibly
|
3250
|
+
// re-entering here.
|
3251
|
+
stop("Unexpected alignment in CallCFunction");
|
3252
|
+
bind(&alignment_as_expected);
|
1320
3253
|
}
|
1321
3254
|
}
|
3255
|
+
#endif // V8_HOST_ARCH_MIPS
|
3256
|
+
|
3257
|
+
// Just call directly. The function called cannot cause a GC, or
|
3258
|
+
// allow preemption, so the return address in the link register
|
3259
|
+
// stays correct.
|
3260
|
+
if (!function.is(t9)) {
|
3261
|
+
mov(t9, function);
|
3262
|
+
function = t9;
|
3263
|
+
}
|
3264
|
+
|
3265
|
+
if (function.is(no_reg)) {
|
3266
|
+
li(t9, Operand(function_reference));
|
3267
|
+
function = t9;
|
3268
|
+
}
|
3269
|
+
|
3270
|
+
Call(function);
|
3271
|
+
|
3272
|
+
ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
|
3273
|
+
int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
|
3274
|
+
0 : num_arguments - kRegisterPassedArguments) +
|
3275
|
+
(StandardFrameConstants::kCArgsSlotsSize /
|
3276
|
+
kPointerSize);
|
3277
|
+
|
3278
|
+
if (OS::ActivationFrameAlignment() > kPointerSize) {
|
3279
|
+
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
3280
|
+
} else {
|
3281
|
+
Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
|
3282
|
+
}
|
1322
3283
|
}
|
1323
3284
|
|
3285
|
+
|
3286
|
+
#undef BRANCH_ARGS_CHECK
|
3287
|
+
|
3288
|
+
|
3289
|
+
#ifdef ENABLE_DEBUGGER_SUPPORT
|
3290
|
+
CodePatcher::CodePatcher(byte* address, int instructions)
|
3291
|
+
: address_(address),
|
3292
|
+
instructions_(instructions),
|
3293
|
+
size_(instructions * Assembler::kInstrSize),
|
3294
|
+
masm_(address, size_ + Assembler::kGap) {
|
3295
|
+
// Create a new macro assembler pointing to the address of the code to patch.
|
3296
|
+
// The size is adjusted with kGap on order for the assembler to generate size
|
3297
|
+
// bytes of instructions without failing with buffer size constraints.
|
3298
|
+
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
3299
|
+
}
|
3300
|
+
|
3301
|
+
|
3302
|
+
CodePatcher::~CodePatcher() {
|
3303
|
+
// Indicate that code has changed.
|
3304
|
+
CPU::FlushICache(address_, size_);
|
3305
|
+
|
3306
|
+
// Check that the code was patched as expected.
|
3307
|
+
ASSERT(masm_.pc_ == address_ + size_);
|
3308
|
+
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
3309
|
+
}
|
3310
|
+
|
3311
|
+
|
3312
|
+
void CodePatcher::Emit(Instr x) {
|
3313
|
+
masm()->emit(x);
|
3314
|
+
}
|
3315
|
+
|
3316
|
+
|
3317
|
+
void CodePatcher::Emit(Address addr) {
|
3318
|
+
masm()->emit(reinterpret_cast<Instr>(addr));
|
3319
|
+
}
|
3320
|
+
|
3321
|
+
|
3322
|
+
#endif // ENABLE_DEBUGGER_SUPPORT
|
3323
|
+
|
3324
|
+
|
1324
3325
|
} } // namespace v8::internal
|
1325
3326
|
|
1326
3327
|
#endif // V8_TARGET_ARCH_MIPS
|