libv8-sgonyea 3.3.10
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +8 -0
- data/.gitmodules +3 -0
- data/Gemfile +4 -0
- data/README.md +76 -0
- data/Rakefile +113 -0
- data/ext/libv8/extconf.rb +28 -0
- data/lib/libv8.rb +15 -0
- data/lib/libv8/Makefile +30 -0
- data/lib/libv8/detect_cpu.rb +27 -0
- data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
- data/lib/libv8/v8/.gitignore +35 -0
- data/lib/libv8/v8/AUTHORS +44 -0
- data/lib/libv8/v8/ChangeLog +2839 -0
- data/lib/libv8/v8/LICENSE +52 -0
- data/lib/libv8/v8/LICENSE.strongtalk +29 -0
- data/lib/libv8/v8/LICENSE.v8 +26 -0
- data/lib/libv8/v8/LICENSE.valgrind +45 -0
- data/lib/libv8/v8/SConstruct +1478 -0
- data/lib/libv8/v8/build/README.txt +49 -0
- data/lib/libv8/v8/build/all.gyp +18 -0
- data/lib/libv8/v8/build/armu.gypi +32 -0
- data/lib/libv8/v8/build/common.gypi +144 -0
- data/lib/libv8/v8/build/gyp_v8 +145 -0
- data/lib/libv8/v8/include/v8-debug.h +395 -0
- data/lib/libv8/v8/include/v8-preparser.h +117 -0
- data/lib/libv8/v8/include/v8-profiler.h +505 -0
- data/lib/libv8/v8/include/v8-testing.h +104 -0
- data/lib/libv8/v8/include/v8.h +4124 -0
- data/lib/libv8/v8/include/v8stdint.h +53 -0
- data/lib/libv8/v8/preparser/SConscript +38 -0
- data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
- data/lib/libv8/v8/src/SConscript +368 -0
- data/lib/libv8/v8/src/accessors.cc +767 -0
- data/lib/libv8/v8/src/accessors.h +123 -0
- data/lib/libv8/v8/src/allocation-inl.h +49 -0
- data/lib/libv8/v8/src/allocation.cc +122 -0
- data/lib/libv8/v8/src/allocation.h +143 -0
- data/lib/libv8/v8/src/api.cc +5845 -0
- data/lib/libv8/v8/src/api.h +574 -0
- data/lib/libv8/v8/src/apinatives.js +110 -0
- data/lib/libv8/v8/src/apiutils.h +73 -0
- data/lib/libv8/v8/src/arguments.h +118 -0
- data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
- data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
- data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
- data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
- data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
- data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
- data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
- data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
- data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
- data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
- data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
- data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
- data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
- data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
- data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
- data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
- data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
- data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
- data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
- data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
- data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
- data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
- data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
- data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
- data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
- data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
- data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
- data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
- data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
- data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
- data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
- data/lib/libv8/v8/src/array.js +1366 -0
- data/lib/libv8/v8/src/assembler.cc +1207 -0
- data/lib/libv8/v8/src/assembler.h +858 -0
- data/lib/libv8/v8/src/ast-inl.h +112 -0
- data/lib/libv8/v8/src/ast.cc +1146 -0
- data/lib/libv8/v8/src/ast.h +2188 -0
- data/lib/libv8/v8/src/atomicops.h +167 -0
- data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
- data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
- data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
- data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
- data/lib/libv8/v8/src/bignum.cc +768 -0
- data/lib/libv8/v8/src/bignum.h +140 -0
- data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
- data/lib/libv8/v8/src/bootstrapper.h +188 -0
- data/lib/libv8/v8/src/builtins.cc +1707 -0
- data/lib/libv8/v8/src/builtins.h +371 -0
- data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
- data/lib/libv8/v8/src/cached-powers.cc +177 -0
- data/lib/libv8/v8/src/cached-powers.h +65 -0
- data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
- data/lib/libv8/v8/src/char-predicates.h +67 -0
- data/lib/libv8/v8/src/checks.cc +110 -0
- data/lib/libv8/v8/src/checks.h +296 -0
- data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
- data/lib/libv8/v8/src/circular-queue.cc +122 -0
- data/lib/libv8/v8/src/circular-queue.h +103 -0
- data/lib/libv8/v8/src/code-stubs.cc +267 -0
- data/lib/libv8/v8/src/code-stubs.h +1011 -0
- data/lib/libv8/v8/src/code.h +70 -0
- data/lib/libv8/v8/src/codegen.cc +231 -0
- data/lib/libv8/v8/src/codegen.h +84 -0
- data/lib/libv8/v8/src/compilation-cache.cc +540 -0
- data/lib/libv8/v8/src/compilation-cache.h +287 -0
- data/lib/libv8/v8/src/compiler.cc +786 -0
- data/lib/libv8/v8/src/compiler.h +312 -0
- data/lib/libv8/v8/src/contexts.cc +347 -0
- data/lib/libv8/v8/src/contexts.h +391 -0
- data/lib/libv8/v8/src/conversions-inl.h +106 -0
- data/lib/libv8/v8/src/conversions.cc +1131 -0
- data/lib/libv8/v8/src/conversions.h +135 -0
- data/lib/libv8/v8/src/counters.cc +93 -0
- data/lib/libv8/v8/src/counters.h +254 -0
- data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
- data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
- data/lib/libv8/v8/src/cpu-profiler.h +302 -0
- data/lib/libv8/v8/src/cpu.h +69 -0
- data/lib/libv8/v8/src/d8-debug.cc +367 -0
- data/lib/libv8/v8/src/d8-debug.h +158 -0
- data/lib/libv8/v8/src/d8-posix.cc +695 -0
- data/lib/libv8/v8/src/d8-readline.cc +130 -0
- data/lib/libv8/v8/src/d8-windows.cc +42 -0
- data/lib/libv8/v8/src/d8.cc +803 -0
- data/lib/libv8/v8/src/d8.gyp +91 -0
- data/lib/libv8/v8/src/d8.h +235 -0
- data/lib/libv8/v8/src/d8.js +2798 -0
- data/lib/libv8/v8/src/data-flow.cc +66 -0
- data/lib/libv8/v8/src/data-flow.h +205 -0
- data/lib/libv8/v8/src/date.js +1103 -0
- data/lib/libv8/v8/src/dateparser-inl.h +127 -0
- data/lib/libv8/v8/src/dateparser.cc +178 -0
- data/lib/libv8/v8/src/dateparser.h +266 -0
- data/lib/libv8/v8/src/debug-agent.cc +447 -0
- data/lib/libv8/v8/src/debug-agent.h +129 -0
- data/lib/libv8/v8/src/debug-debugger.js +2569 -0
- data/lib/libv8/v8/src/debug.cc +3165 -0
- data/lib/libv8/v8/src/debug.h +1057 -0
- data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
- data/lib/libv8/v8/src/deoptimizer.h +602 -0
- data/lib/libv8/v8/src/disasm.h +80 -0
- data/lib/libv8/v8/src/disassembler.cc +343 -0
- data/lib/libv8/v8/src/disassembler.h +58 -0
- data/lib/libv8/v8/src/diy-fp.cc +58 -0
- data/lib/libv8/v8/src/diy-fp.h +117 -0
- data/lib/libv8/v8/src/double.h +238 -0
- data/lib/libv8/v8/src/dtoa.cc +103 -0
- data/lib/libv8/v8/src/dtoa.h +85 -0
- data/lib/libv8/v8/src/execution.cc +849 -0
- data/lib/libv8/v8/src/execution.h +297 -0
- data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
- data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
- data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
- data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
- data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
- data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
- data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
- data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
- data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
- data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
- data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
- data/lib/libv8/v8/src/factory.cc +1222 -0
- data/lib/libv8/v8/src/factory.h +442 -0
- data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
- data/lib/libv8/v8/src/fast-dtoa.h +83 -0
- data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
- data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
- data/lib/libv8/v8/src/flag-definitions.h +560 -0
- data/lib/libv8/v8/src/flags.cc +551 -0
- data/lib/libv8/v8/src/flags.h +79 -0
- data/lib/libv8/v8/src/frames-inl.h +247 -0
- data/lib/libv8/v8/src/frames.cc +1243 -0
- data/lib/libv8/v8/src/frames.h +870 -0
- data/lib/libv8/v8/src/full-codegen.cc +1374 -0
- data/lib/libv8/v8/src/full-codegen.h +771 -0
- data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
- data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
- data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
- data/lib/libv8/v8/src/gdb-jit.h +143 -0
- data/lib/libv8/v8/src/global-handles.cc +665 -0
- data/lib/libv8/v8/src/global-handles.h +284 -0
- data/lib/libv8/v8/src/globals.h +325 -0
- data/lib/libv8/v8/src/handles-inl.h +177 -0
- data/lib/libv8/v8/src/handles.cc +987 -0
- data/lib/libv8/v8/src/handles.h +382 -0
- data/lib/libv8/v8/src/hashmap.cc +230 -0
- data/lib/libv8/v8/src/hashmap.h +123 -0
- data/lib/libv8/v8/src/heap-inl.h +704 -0
- data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
- data/lib/libv8/v8/src/heap-profiler.h +397 -0
- data/lib/libv8/v8/src/heap.cc +5930 -0
- data/lib/libv8/v8/src/heap.h +2268 -0
- data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
- data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
- data/lib/libv8/v8/src/hydrogen.cc +6239 -0
- data/lib/libv8/v8/src/hydrogen.h +1202 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
- data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
- data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
- data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
- data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
- data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
- data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
- data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
- data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
- data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
- data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
- data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
- data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
- data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
- data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
- data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
- data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
- data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
- data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
- data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
- data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
- data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
- data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
- data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
- data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
- data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
- data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
- data/lib/libv8/v8/src/ic-inl.h +130 -0
- data/lib/libv8/v8/src/ic.cc +2577 -0
- data/lib/libv8/v8/src/ic.h +736 -0
- data/lib/libv8/v8/src/inspector.cc +63 -0
- data/lib/libv8/v8/src/inspector.h +62 -0
- data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
- data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
- data/lib/libv8/v8/src/isolate-inl.h +50 -0
- data/lib/libv8/v8/src/isolate.cc +1869 -0
- data/lib/libv8/v8/src/isolate.h +1382 -0
- data/lib/libv8/v8/src/json-parser.cc +504 -0
- data/lib/libv8/v8/src/json-parser.h +161 -0
- data/lib/libv8/v8/src/json.js +342 -0
- data/lib/libv8/v8/src/jsregexp.cc +5385 -0
- data/lib/libv8/v8/src/jsregexp.h +1492 -0
- data/lib/libv8/v8/src/list-inl.h +212 -0
- data/lib/libv8/v8/src/list.h +174 -0
- data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
- data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
- data/lib/libv8/v8/src/lithium-allocator.h +630 -0
- data/lib/libv8/v8/src/lithium.cc +190 -0
- data/lib/libv8/v8/src/lithium.h +597 -0
- data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
- data/lib/libv8/v8/src/liveedit.cc +1691 -0
- data/lib/libv8/v8/src/liveedit.h +180 -0
- data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
- data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
- data/lib/libv8/v8/src/liveobjectlist.h +322 -0
- data/lib/libv8/v8/src/log-inl.h +59 -0
- data/lib/libv8/v8/src/log-utils.cc +428 -0
- data/lib/libv8/v8/src/log-utils.h +231 -0
- data/lib/libv8/v8/src/log.cc +1993 -0
- data/lib/libv8/v8/src/log.h +476 -0
- data/lib/libv8/v8/src/macro-assembler.h +120 -0
- data/lib/libv8/v8/src/macros.py +178 -0
- data/lib/libv8/v8/src/mark-compact.cc +3143 -0
- data/lib/libv8/v8/src/mark-compact.h +506 -0
- data/lib/libv8/v8/src/math.js +264 -0
- data/lib/libv8/v8/src/messages.cc +179 -0
- data/lib/libv8/v8/src/messages.h +113 -0
- data/lib/libv8/v8/src/messages.js +1096 -0
- data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
- data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
- data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
- data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
- data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
- data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
- data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
- data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
- data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
- data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
- data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
- data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
- data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
- data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
- data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
- data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
- data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
- data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
- data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
- data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
- data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
- data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
- data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
- data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
- data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
- data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
- data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
- data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
- data/lib/libv8/v8/src/mksnapshot.cc +328 -0
- data/lib/libv8/v8/src/natives.h +64 -0
- data/lib/libv8/v8/src/objects-debug.cc +738 -0
- data/lib/libv8/v8/src/objects-inl.h +4323 -0
- data/lib/libv8/v8/src/objects-printer.cc +829 -0
- data/lib/libv8/v8/src/objects-visiting.cc +148 -0
- data/lib/libv8/v8/src/objects-visiting.h +424 -0
- data/lib/libv8/v8/src/objects.cc +10585 -0
- data/lib/libv8/v8/src/objects.h +6838 -0
- data/lib/libv8/v8/src/parser.cc +4997 -0
- data/lib/libv8/v8/src/parser.h +765 -0
- data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
- data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
- data/lib/libv8/v8/src/platform-linux.cc +1149 -0
- data/lib/libv8/v8/src/platform-macos.cc +830 -0
- data/lib/libv8/v8/src/platform-nullos.cc +479 -0
- data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
- data/lib/libv8/v8/src/platform-posix.cc +424 -0
- data/lib/libv8/v8/src/platform-solaris.cc +762 -0
- data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
- data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
- data/lib/libv8/v8/src/platform-tls.h +50 -0
- data/lib/libv8/v8/src/platform-win32.cc +2021 -0
- data/lib/libv8/v8/src/platform.h +667 -0
- data/lib/libv8/v8/src/preparse-data-format.h +62 -0
- data/lib/libv8/v8/src/preparse-data.cc +183 -0
- data/lib/libv8/v8/src/preparse-data.h +225 -0
- data/lib/libv8/v8/src/preparser-api.cc +220 -0
- data/lib/libv8/v8/src/preparser.cc +1450 -0
- data/lib/libv8/v8/src/preparser.h +493 -0
- data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
- data/lib/libv8/v8/src/prettyprinter.h +223 -0
- data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
- data/lib/libv8/v8/src/profile-generator.cc +3098 -0
- data/lib/libv8/v8/src/profile-generator.h +1126 -0
- data/lib/libv8/v8/src/property.cc +105 -0
- data/lib/libv8/v8/src/property.h +365 -0
- data/lib/libv8/v8/src/proxy.js +83 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
- data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
- data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
- data/lib/libv8/v8/src/regexp-stack.cc +111 -0
- data/lib/libv8/v8/src/regexp-stack.h +147 -0
- data/lib/libv8/v8/src/regexp.js +483 -0
- data/lib/libv8/v8/src/rewriter.cc +360 -0
- data/lib/libv8/v8/src/rewriter.h +50 -0
- data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
- data/lib/libv8/v8/src/runtime-profiler.h +201 -0
- data/lib/libv8/v8/src/runtime.cc +12227 -0
- data/lib/libv8/v8/src/runtime.h +652 -0
- data/lib/libv8/v8/src/runtime.js +649 -0
- data/lib/libv8/v8/src/safepoint-table.cc +256 -0
- data/lib/libv8/v8/src/safepoint-table.h +270 -0
- data/lib/libv8/v8/src/scanner-base.cc +952 -0
- data/lib/libv8/v8/src/scanner-base.h +670 -0
- data/lib/libv8/v8/src/scanner.cc +345 -0
- data/lib/libv8/v8/src/scanner.h +146 -0
- data/lib/libv8/v8/src/scopeinfo.cc +646 -0
- data/lib/libv8/v8/src/scopeinfo.h +254 -0
- data/lib/libv8/v8/src/scopes.cc +1150 -0
- data/lib/libv8/v8/src/scopes.h +507 -0
- data/lib/libv8/v8/src/serialize.cc +1574 -0
- data/lib/libv8/v8/src/serialize.h +589 -0
- data/lib/libv8/v8/src/shell.h +55 -0
- data/lib/libv8/v8/src/simulator.h +43 -0
- data/lib/libv8/v8/src/small-pointer-list.h +163 -0
- data/lib/libv8/v8/src/smart-pointer.h +109 -0
- data/lib/libv8/v8/src/snapshot-common.cc +83 -0
- data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
- data/lib/libv8/v8/src/snapshot.h +91 -0
- data/lib/libv8/v8/src/spaces-inl.h +529 -0
- data/lib/libv8/v8/src/spaces.cc +3145 -0
- data/lib/libv8/v8/src/spaces.h +2369 -0
- data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
- data/lib/libv8/v8/src/splay-tree.h +205 -0
- data/lib/libv8/v8/src/string-search.cc +41 -0
- data/lib/libv8/v8/src/string-search.h +568 -0
- data/lib/libv8/v8/src/string-stream.cc +592 -0
- data/lib/libv8/v8/src/string-stream.h +191 -0
- data/lib/libv8/v8/src/string.js +994 -0
- data/lib/libv8/v8/src/strtod.cc +440 -0
- data/lib/libv8/v8/src/strtod.h +40 -0
- data/lib/libv8/v8/src/stub-cache.cc +1965 -0
- data/lib/libv8/v8/src/stub-cache.h +924 -0
- data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
- data/lib/libv8/v8/src/token.cc +63 -0
- data/lib/libv8/v8/src/token.h +288 -0
- data/lib/libv8/v8/src/type-info.cc +507 -0
- data/lib/libv8/v8/src/type-info.h +272 -0
- data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
- data/lib/libv8/v8/src/unbound-queue.h +69 -0
- data/lib/libv8/v8/src/unicode-inl.h +238 -0
- data/lib/libv8/v8/src/unicode.cc +1624 -0
- data/lib/libv8/v8/src/unicode.h +280 -0
- data/lib/libv8/v8/src/uri.js +408 -0
- data/lib/libv8/v8/src/utils-inl.h +48 -0
- data/lib/libv8/v8/src/utils.cc +371 -0
- data/lib/libv8/v8/src/utils.h +800 -0
- data/lib/libv8/v8/src/v8-counters.cc +62 -0
- data/lib/libv8/v8/src/v8-counters.h +314 -0
- data/lib/libv8/v8/src/v8.cc +213 -0
- data/lib/libv8/v8/src/v8.h +131 -0
- data/lib/libv8/v8/src/v8checks.h +64 -0
- data/lib/libv8/v8/src/v8dll-main.cc +44 -0
- data/lib/libv8/v8/src/v8globals.h +512 -0
- data/lib/libv8/v8/src/v8memory.h +82 -0
- data/lib/libv8/v8/src/v8natives.js +1310 -0
- data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
- data/lib/libv8/v8/src/v8threads.cc +464 -0
- data/lib/libv8/v8/src/v8threads.h +165 -0
- data/lib/libv8/v8/src/v8utils.h +319 -0
- data/lib/libv8/v8/src/variables.cc +114 -0
- data/lib/libv8/v8/src/variables.h +167 -0
- data/lib/libv8/v8/src/version.cc +116 -0
- data/lib/libv8/v8/src/version.h +68 -0
- data/lib/libv8/v8/src/vm-state-inl.h +138 -0
- data/lib/libv8/v8/src/vm-state.h +71 -0
- data/lib/libv8/v8/src/win32-headers.h +96 -0
- data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
- data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
- data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
- data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
- data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
- data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
- data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
- data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
- data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
- data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
- data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
- data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
- data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
- data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
- data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
- data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
- data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
- data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
- data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
- data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
- data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
- data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
- data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
- data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
- data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
- data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
- data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
- data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
- data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
- data/lib/libv8/v8/src/zone-inl.h +140 -0
- data/lib/libv8/v8/src/zone.cc +196 -0
- data/lib/libv8/v8/src/zone.h +240 -0
- data/lib/libv8/v8/tools/codemap.js +265 -0
- data/lib/libv8/v8/tools/consarray.js +93 -0
- data/lib/libv8/v8/tools/csvparser.js +78 -0
- data/lib/libv8/v8/tools/disasm.py +92 -0
- data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
- data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
- data/lib/libv8/v8/tools/gcmole/README +62 -0
- data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
- data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
- data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
- data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
- data/lib/libv8/v8/tools/grokdump.py +841 -0
- data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
- data/lib/libv8/v8/tools/js2c.py +364 -0
- data/lib/libv8/v8/tools/jsmin.py +280 -0
- data/lib/libv8/v8/tools/linux-tick-processor +35 -0
- data/lib/libv8/v8/tools/ll_prof.py +942 -0
- data/lib/libv8/v8/tools/logreader.js +185 -0
- data/lib/libv8/v8/tools/mac-nm +18 -0
- data/lib/libv8/v8/tools/mac-tick-processor +6 -0
- data/lib/libv8/v8/tools/oom_dump/README +31 -0
- data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
- data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
- data/lib/libv8/v8/tools/presubmit.py +305 -0
- data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
- data/lib/libv8/v8/tools/profile.js +751 -0
- data/lib/libv8/v8/tools/profile_view.js +219 -0
- data/lib/libv8/v8/tools/run-valgrind.py +77 -0
- data/lib/libv8/v8/tools/splaytree.js +316 -0
- data/lib/libv8/v8/tools/stats-viewer.py +468 -0
- data/lib/libv8/v8/tools/test.py +1510 -0
- data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
- data/lib/libv8/v8/tools/tickprocessor.js +877 -0
- data/lib/libv8/v8/tools/utils.py +96 -0
- data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
- data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
- data/lib/libv8/version.rb +5 -0
- data/libv8.gemspec +36 -0
- metadata +578 -0
@@ -0,0 +1,3745 @@
|
|
1
|
+
// Copyright 2011 the V8 project authors. All rights reserved.
|
2
|
+
// Redistribution and use in source and binary forms, with or without
|
3
|
+
// modification, are permitted provided that the following conditions are
|
4
|
+
// met:
|
5
|
+
//
|
6
|
+
// * Redistributions of source code must retain the above copyright
|
7
|
+
// notice, this list of conditions and the following disclaimer.
|
8
|
+
// * Redistributions in binary form must reproduce the above
|
9
|
+
// copyright notice, this list of conditions and the following
|
10
|
+
// disclaimer in the documentation and/or other materials provided
|
11
|
+
// with the distribution.
|
12
|
+
// * Neither the name of Google Inc. nor the names of its
|
13
|
+
// contributors may be used to endorse or promote products derived
|
14
|
+
// from this software without specific prior written permission.
|
15
|
+
//
|
16
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
|
28
|
+
#include "v8.h"
|
29
|
+
|
30
|
+
#if defined(V8_TARGET_ARCH_X64)
|
31
|
+
|
32
|
+
#include "bootstrapper.h"
|
33
|
+
#include "codegen.h"
|
34
|
+
#include "assembler-x64.h"
|
35
|
+
#include "macro-assembler-x64.h"
|
36
|
+
#include "serialize.h"
|
37
|
+
#include "debug.h"
|
38
|
+
#include "heap.h"
|
39
|
+
|
40
|
+
namespace v8 {
|
41
|
+
namespace internal {
|
42
|
+
|
43
|
+
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
|
44
|
+
: Assembler(arg_isolate, buffer, size),
|
45
|
+
generating_stub_(false),
|
46
|
+
allow_stub_calls_(true),
|
47
|
+
root_array_available_(true) {
|
48
|
+
if (isolate() != NULL) {
|
49
|
+
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
|
50
|
+
isolate());
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
|
55
|
+
static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
|
56
|
+
Address roots_register_value = kRootRegisterBias +
|
57
|
+
reinterpret_cast<Address>(isolate->heap()->roots_address());
|
58
|
+
intptr_t delta = other.address() - roots_register_value;
|
59
|
+
return delta;
|
60
|
+
}
|
61
|
+
|
62
|
+
|
63
|
+
Operand MacroAssembler::ExternalOperand(ExternalReference target,
|
64
|
+
Register scratch) {
|
65
|
+
if (root_array_available_ && !Serializer::enabled()) {
|
66
|
+
intptr_t delta = RootRegisterDelta(target, isolate());
|
67
|
+
if (is_int32(delta)) {
|
68
|
+
Serializer::TooLateToEnableNow();
|
69
|
+
return Operand(kRootRegister, static_cast<int32_t>(delta));
|
70
|
+
}
|
71
|
+
}
|
72
|
+
movq(scratch, target);
|
73
|
+
return Operand(scratch, 0);
|
74
|
+
}
|
75
|
+
|
76
|
+
|
77
|
+
void MacroAssembler::Load(Register destination, ExternalReference source) {
|
78
|
+
if (root_array_available_ && !Serializer::enabled()) {
|
79
|
+
intptr_t delta = RootRegisterDelta(source, isolate());
|
80
|
+
if (is_int32(delta)) {
|
81
|
+
Serializer::TooLateToEnableNow();
|
82
|
+
movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
|
83
|
+
return;
|
84
|
+
}
|
85
|
+
}
|
86
|
+
// Safe code.
|
87
|
+
if (destination.is(rax)) {
|
88
|
+
load_rax(source);
|
89
|
+
} else {
|
90
|
+
movq(kScratchRegister, source);
|
91
|
+
movq(destination, Operand(kScratchRegister, 0));
|
92
|
+
}
|
93
|
+
}
|
94
|
+
|
95
|
+
|
96
|
+
void MacroAssembler::Store(ExternalReference destination, Register source) {
|
97
|
+
if (root_array_available_ && !Serializer::enabled()) {
|
98
|
+
intptr_t delta = RootRegisterDelta(destination, isolate());
|
99
|
+
if (is_int32(delta)) {
|
100
|
+
Serializer::TooLateToEnableNow();
|
101
|
+
movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
|
102
|
+
return;
|
103
|
+
}
|
104
|
+
}
|
105
|
+
// Safe code.
|
106
|
+
if (source.is(rax)) {
|
107
|
+
store_rax(destination);
|
108
|
+
} else {
|
109
|
+
movq(kScratchRegister, destination);
|
110
|
+
movq(Operand(kScratchRegister, 0), source);
|
111
|
+
}
|
112
|
+
}
|
113
|
+
|
114
|
+
|
115
|
+
void MacroAssembler::LoadAddress(Register destination,
|
116
|
+
ExternalReference source) {
|
117
|
+
if (root_array_available_ && !Serializer::enabled()) {
|
118
|
+
intptr_t delta = RootRegisterDelta(source, isolate());
|
119
|
+
if (is_int32(delta)) {
|
120
|
+
Serializer::TooLateToEnableNow();
|
121
|
+
lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
|
122
|
+
return;
|
123
|
+
}
|
124
|
+
}
|
125
|
+
// Safe code.
|
126
|
+
movq(destination, source);
|
127
|
+
}
|
128
|
+
|
129
|
+
|
130
|
+
int MacroAssembler::LoadAddressSize(ExternalReference source) {
|
131
|
+
if (root_array_available_ && !Serializer::enabled()) {
|
132
|
+
// This calculation depends on the internals of LoadAddress.
|
133
|
+
// It's correctness is ensured by the asserts in the Call
|
134
|
+
// instruction below.
|
135
|
+
intptr_t delta = RootRegisterDelta(source, isolate());
|
136
|
+
if (is_int32(delta)) {
|
137
|
+
Serializer::TooLateToEnableNow();
|
138
|
+
// Operand is lea(scratch, Operand(kRootRegister, delta));
|
139
|
+
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
|
140
|
+
int size = 4;
|
141
|
+
if (!is_int8(static_cast<int32_t>(delta))) {
|
142
|
+
size += 3; // Need full four-byte displacement in lea.
|
143
|
+
}
|
144
|
+
return size;
|
145
|
+
}
|
146
|
+
}
|
147
|
+
// Size of movq(destination, src);
|
148
|
+
return 10;
|
149
|
+
}
|
150
|
+
|
151
|
+
|
152
|
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
|
153
|
+
ASSERT(root_array_available_);
|
154
|
+
movq(destination, Operand(kRootRegister,
|
155
|
+
(index << kPointerSizeLog2) - kRootRegisterBias));
|
156
|
+
}
|
157
|
+
|
158
|
+
|
159
|
+
void MacroAssembler::LoadRootIndexed(Register destination,
|
160
|
+
Register variable_offset,
|
161
|
+
int fixed_offset) {
|
162
|
+
ASSERT(root_array_available_);
|
163
|
+
movq(destination,
|
164
|
+
Operand(kRootRegister,
|
165
|
+
variable_offset, times_pointer_size,
|
166
|
+
(fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
|
167
|
+
}
|
168
|
+
|
169
|
+
|
170
|
+
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
|
171
|
+
ASSERT(root_array_available_);
|
172
|
+
movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
|
173
|
+
source);
|
174
|
+
}
|
175
|
+
|
176
|
+
|
177
|
+
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
|
178
|
+
ASSERT(root_array_available_);
|
179
|
+
push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
|
180
|
+
}
|
181
|
+
|
182
|
+
|
183
|
+
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
|
184
|
+
ASSERT(root_array_available_);
|
185
|
+
cmpq(with, Operand(kRootRegister,
|
186
|
+
(index << kPointerSizeLog2) - kRootRegisterBias));
|
187
|
+
}
|
188
|
+
|
189
|
+
|
190
|
+
void MacroAssembler::CompareRoot(const Operand& with,
|
191
|
+
Heap::RootListIndex index) {
|
192
|
+
ASSERT(root_array_available_);
|
193
|
+
ASSERT(!with.AddressUsesRegister(kScratchRegister));
|
194
|
+
LoadRoot(kScratchRegister, index);
|
195
|
+
cmpq(with, kScratchRegister);
|
196
|
+
}
|
197
|
+
|
198
|
+
|
199
|
+
void MacroAssembler::RecordWriteHelper(Register object,
|
200
|
+
Register addr,
|
201
|
+
Register scratch) {
|
202
|
+
if (emit_debug_code()) {
|
203
|
+
// Check that the object is not in new space.
|
204
|
+
Label not_in_new_space;
|
205
|
+
InNewSpace(object, scratch, not_equal, ¬_in_new_space, Label::kNear);
|
206
|
+
Abort("new-space object passed to RecordWriteHelper");
|
207
|
+
bind(¬_in_new_space);
|
208
|
+
}
|
209
|
+
|
210
|
+
// Compute the page start address from the heap object pointer, and reuse
|
211
|
+
// the 'object' register for it.
|
212
|
+
and_(object, Immediate(~Page::kPageAlignmentMask));
|
213
|
+
|
214
|
+
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
|
215
|
+
// method for more details.
|
216
|
+
shrl(addr, Immediate(Page::kRegionSizeLog2));
|
217
|
+
andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
|
218
|
+
|
219
|
+
// Set dirty mark for region.
|
220
|
+
bts(Operand(object, Page::kDirtyFlagOffset), addr);
|
221
|
+
}
|
222
|
+
|
223
|
+
|
224
|
+
void MacroAssembler::InNewSpace(Register object,
|
225
|
+
Register scratch,
|
226
|
+
Condition cc,
|
227
|
+
Label* branch,
|
228
|
+
Label::Distance near_jump) {
|
229
|
+
if (Serializer::enabled()) {
|
230
|
+
// Can't do arithmetic on external references if it might get serialized.
|
231
|
+
// The mask isn't really an address. We load it as an external reference in
|
232
|
+
// case the size of the new space is different between the snapshot maker
|
233
|
+
// and the running system.
|
234
|
+
if (scratch.is(object)) {
|
235
|
+
movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
|
236
|
+
and_(scratch, kScratchRegister);
|
237
|
+
} else {
|
238
|
+
movq(scratch, ExternalReference::new_space_mask(isolate()));
|
239
|
+
and_(scratch, object);
|
240
|
+
}
|
241
|
+
movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
|
242
|
+
cmpq(scratch, kScratchRegister);
|
243
|
+
j(cc, branch, near_jump);
|
244
|
+
} else {
|
245
|
+
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
|
246
|
+
intptr_t new_space_start =
|
247
|
+
reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
|
248
|
+
movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
|
249
|
+
if (scratch.is(object)) {
|
250
|
+
addq(scratch, kScratchRegister);
|
251
|
+
} else {
|
252
|
+
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
|
253
|
+
}
|
254
|
+
and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
|
255
|
+
j(cc, branch, near_jump);
|
256
|
+
}
|
257
|
+
}
|
258
|
+
|
259
|
+
|
260
|
+
void MacroAssembler::RecordWrite(Register object,
|
261
|
+
int offset,
|
262
|
+
Register value,
|
263
|
+
Register index) {
|
264
|
+
// The compiled code assumes that record write doesn't change the
|
265
|
+
// context register, so we check that none of the clobbered
|
266
|
+
// registers are rsi.
|
267
|
+
ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
|
268
|
+
|
269
|
+
// First, check if a write barrier is even needed. The tests below
|
270
|
+
// catch stores of smis and stores into the young generation.
|
271
|
+
Label done;
|
272
|
+
JumpIfSmi(value, &done);
|
273
|
+
|
274
|
+
RecordWriteNonSmi(object, offset, value, index);
|
275
|
+
bind(&done);
|
276
|
+
|
277
|
+
// Clobber all input registers when running with the debug-code flag
|
278
|
+
// turned on to provoke errors. This clobbering repeats the
|
279
|
+
// clobbering done inside RecordWriteNonSmi but it's necessary to
|
280
|
+
// avoid having the fast case for smis leave the registers
|
281
|
+
// unchanged.
|
282
|
+
if (emit_debug_code()) {
|
283
|
+
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
284
|
+
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
285
|
+
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
286
|
+
}
|
287
|
+
}
|
288
|
+
|
289
|
+
|
290
|
+
void MacroAssembler::RecordWrite(Register object,
|
291
|
+
Register address,
|
292
|
+
Register value) {
|
293
|
+
// The compiled code assumes that record write doesn't change the
|
294
|
+
// context register, so we check that none of the clobbered
|
295
|
+
// registers are rsi.
|
296
|
+
ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
|
297
|
+
|
298
|
+
// First, check if a write barrier is even needed. The tests below
|
299
|
+
// catch stores of smis and stores into the young generation.
|
300
|
+
Label done;
|
301
|
+
JumpIfSmi(value, &done);
|
302
|
+
|
303
|
+
InNewSpace(object, value, equal, &done);
|
304
|
+
|
305
|
+
RecordWriteHelper(object, address, value);
|
306
|
+
|
307
|
+
bind(&done);
|
308
|
+
|
309
|
+
// Clobber all input registers when running with the debug-code flag
|
310
|
+
// turned on to provoke errors.
|
311
|
+
if (emit_debug_code()) {
|
312
|
+
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
313
|
+
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
314
|
+
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
315
|
+
}
|
316
|
+
}
|
317
|
+
|
318
|
+
|
319
|
+
void MacroAssembler::RecordWriteNonSmi(Register object,
|
320
|
+
int offset,
|
321
|
+
Register scratch,
|
322
|
+
Register index) {
|
323
|
+
Label done;
|
324
|
+
|
325
|
+
if (emit_debug_code()) {
|
326
|
+
Label okay;
|
327
|
+
JumpIfNotSmi(object, &okay, Label::kNear);
|
328
|
+
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
|
329
|
+
bind(&okay);
|
330
|
+
|
331
|
+
if (offset == 0) {
|
332
|
+
// index must be int32.
|
333
|
+
Register tmp = index.is(rax) ? rbx : rax;
|
334
|
+
push(tmp);
|
335
|
+
movl(tmp, index);
|
336
|
+
cmpq(tmp, index);
|
337
|
+
Check(equal, "Index register for RecordWrite must be untagged int32.");
|
338
|
+
pop(tmp);
|
339
|
+
}
|
340
|
+
}
|
341
|
+
|
342
|
+
// Test that the object address is not in the new space. We cannot
|
343
|
+
// update page dirty marks for new space pages.
|
344
|
+
InNewSpace(object, scratch, equal, &done);
|
345
|
+
|
346
|
+
// The offset is relative to a tagged or untagged HeapObject pointer,
|
347
|
+
// so either offset or offset + kHeapObjectTag must be a
|
348
|
+
// multiple of kPointerSize.
|
349
|
+
ASSERT(IsAligned(offset, kPointerSize) ||
|
350
|
+
IsAligned(offset + kHeapObjectTag, kPointerSize));
|
351
|
+
|
352
|
+
Register dst = index;
|
353
|
+
if (offset != 0) {
|
354
|
+
lea(dst, Operand(object, offset));
|
355
|
+
} else {
|
356
|
+
// array access: calculate the destination address in the same manner as
|
357
|
+
// KeyedStoreIC::GenerateGeneric.
|
358
|
+
lea(dst, FieldOperand(object,
|
359
|
+
index,
|
360
|
+
times_pointer_size,
|
361
|
+
FixedArray::kHeaderSize));
|
362
|
+
}
|
363
|
+
RecordWriteHelper(object, dst, scratch);
|
364
|
+
|
365
|
+
bind(&done);
|
366
|
+
|
367
|
+
// Clobber all input registers when running with the debug-code flag
|
368
|
+
// turned on to provoke errors.
|
369
|
+
if (emit_debug_code()) {
|
370
|
+
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
371
|
+
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
372
|
+
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
|
373
|
+
}
|
374
|
+
}
|
375
|
+
|
376
|
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
|
377
|
+
if (emit_debug_code()) Check(cc, msg);
|
378
|
+
}
|
379
|
+
|
380
|
+
|
381
|
+
void MacroAssembler::AssertFastElements(Register elements) {
|
382
|
+
if (emit_debug_code()) {
|
383
|
+
Label ok;
|
384
|
+
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
|
385
|
+
Heap::kFixedArrayMapRootIndex);
|
386
|
+
j(equal, &ok, Label::kNear);
|
387
|
+
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
|
388
|
+
Heap::kFixedCOWArrayMapRootIndex);
|
389
|
+
j(equal, &ok, Label::kNear);
|
390
|
+
Abort("JSObject with fast elements map has slow elements");
|
391
|
+
bind(&ok);
|
392
|
+
}
|
393
|
+
}
|
394
|
+
|
395
|
+
|
396
|
+
void MacroAssembler::Check(Condition cc, const char* msg) {
|
397
|
+
Label L;
|
398
|
+
j(cc, &L, Label::kNear);
|
399
|
+
Abort(msg);
|
400
|
+
// will not return here
|
401
|
+
bind(&L);
|
402
|
+
}
|
403
|
+
|
404
|
+
|
405
|
+
void MacroAssembler::CheckStackAlignment() {
|
406
|
+
int frame_alignment = OS::ActivationFrameAlignment();
|
407
|
+
int frame_alignment_mask = frame_alignment - 1;
|
408
|
+
if (frame_alignment > kPointerSize) {
|
409
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
410
|
+
Label alignment_as_expected;
|
411
|
+
testq(rsp, Immediate(frame_alignment_mask));
|
412
|
+
j(zero, &alignment_as_expected, Label::kNear);
|
413
|
+
// Abort if stack is not aligned.
|
414
|
+
int3();
|
415
|
+
bind(&alignment_as_expected);
|
416
|
+
}
|
417
|
+
}
|
418
|
+
|
419
|
+
|
420
|
+
void MacroAssembler::NegativeZeroTest(Register result,
|
421
|
+
Register op,
|
422
|
+
Label* then_label) {
|
423
|
+
Label ok;
|
424
|
+
testl(result, result);
|
425
|
+
j(not_zero, &ok, Label::kNear);
|
426
|
+
testl(op, op);
|
427
|
+
j(sign, then_label);
|
428
|
+
bind(&ok);
|
429
|
+
}
|
430
|
+
|
431
|
+
|
432
|
+
void MacroAssembler::Abort(const char* msg) {
|
433
|
+
// We want to pass the msg string like a smi to avoid GC
|
434
|
+
// problems, however msg is not guaranteed to be aligned
|
435
|
+
// properly. Instead, we pass an aligned pointer that is
|
436
|
+
// a proper v8 smi, but also pass the alignment difference
|
437
|
+
// from the real pointer as a smi.
|
438
|
+
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
|
439
|
+
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
|
440
|
+
// Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
|
441
|
+
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
|
442
|
+
#ifdef DEBUG
|
443
|
+
if (msg != NULL) {
|
444
|
+
RecordComment("Abort message: ");
|
445
|
+
RecordComment(msg);
|
446
|
+
}
|
447
|
+
#endif
|
448
|
+
// Disable stub call restrictions to always allow calls to abort.
|
449
|
+
AllowStubCallsScope allow_scope(this, true);
|
450
|
+
|
451
|
+
push(rax);
|
452
|
+
movq(kScratchRegister, p0, RelocInfo::NONE);
|
453
|
+
push(kScratchRegister);
|
454
|
+
movq(kScratchRegister,
|
455
|
+
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
|
456
|
+
RelocInfo::NONE);
|
457
|
+
push(kScratchRegister);
|
458
|
+
CallRuntime(Runtime::kAbort, 2);
|
459
|
+
// will not return here
|
460
|
+
int3();
|
461
|
+
}
|
462
|
+
|
463
|
+
|
464
|
+
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
|
465
|
+
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
|
466
|
+
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
|
467
|
+
}
|
468
|
+
|
469
|
+
|
470
|
+
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
|
471
|
+
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
|
472
|
+
MaybeObject* result = stub->TryGetCode();
|
473
|
+
if (!result->IsFailure()) {
|
474
|
+
call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
|
475
|
+
RelocInfo::CODE_TARGET);
|
476
|
+
}
|
477
|
+
return result;
|
478
|
+
}
|
479
|
+
|
480
|
+
|
481
|
+
void MacroAssembler::TailCallStub(CodeStub* stub) {
|
482
|
+
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
|
483
|
+
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
|
484
|
+
}
|
485
|
+
|
486
|
+
|
487
|
+
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
|
488
|
+
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
|
489
|
+
MaybeObject* result = stub->TryGetCode();
|
490
|
+
if (!result->IsFailure()) {
|
491
|
+
jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
|
492
|
+
RelocInfo::CODE_TARGET);
|
493
|
+
}
|
494
|
+
return result;
|
495
|
+
}
|
496
|
+
|
497
|
+
|
498
|
+
void MacroAssembler::StubReturn(int argc) {
|
499
|
+
ASSERT(argc >= 1 && generating_stub());
|
500
|
+
ret((argc - 1) * kPointerSize);
|
501
|
+
}
|
502
|
+
|
503
|
+
|
504
|
+
void MacroAssembler::IllegalOperation(int num_arguments) {
|
505
|
+
if (num_arguments > 0) {
|
506
|
+
addq(rsp, Immediate(num_arguments * kPointerSize));
|
507
|
+
}
|
508
|
+
LoadRoot(rax, Heap::kUndefinedValueRootIndex);
|
509
|
+
}
|
510
|
+
|
511
|
+
|
512
|
+
void MacroAssembler::IndexFromHash(Register hash, Register index) {
|
513
|
+
// The assert checks that the constants for the maximum number of digits
|
514
|
+
// for an array index cached in the hash field and the number of bits
|
515
|
+
// reserved for it does not conflict.
|
516
|
+
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
517
|
+
(1 << String::kArrayIndexValueBits));
|
518
|
+
// We want the smi-tagged index in key. Even if we subsequently go to
|
519
|
+
// the slow case, converting the key to a smi is always valid.
|
520
|
+
// key: string key
|
521
|
+
// hash: key's hash field, including its array index value.
|
522
|
+
and_(hash, Immediate(String::kArrayIndexValueMask));
|
523
|
+
shr(hash, Immediate(String::kHashShift));
|
524
|
+
// Here we actually clobber the key which will be used if calling into
|
525
|
+
// runtime later. However as the new key is the numeric value of a string key
|
526
|
+
// there is no difference in using either key.
|
527
|
+
Integer32ToSmi(index, hash);
|
528
|
+
}
|
529
|
+
|
530
|
+
|
531
|
+
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
532
|
+
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
533
|
+
}
|
534
|
+
|
535
|
+
|
536
|
+
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
|
537
|
+
const Runtime::Function* function = Runtime::FunctionForId(id);
|
538
|
+
Set(rax, function->nargs);
|
539
|
+
LoadAddress(rbx, ExternalReference(function, isolate()));
|
540
|
+
CEntryStub ces(1);
|
541
|
+
ces.SaveDoubles();
|
542
|
+
CallStub(&ces);
|
543
|
+
}
|
544
|
+
|
545
|
+
|
546
|
+
MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
|
547
|
+
int num_arguments) {
|
548
|
+
return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
|
549
|
+
}
|
550
|
+
|
551
|
+
|
552
|
+
void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
553
|
+
int num_arguments) {
|
554
|
+
// If the expected number of arguments of the runtime function is
|
555
|
+
// constant, we check that the actual number of arguments match the
|
556
|
+
// expectation.
|
557
|
+
if (f->nargs >= 0 && f->nargs != num_arguments) {
|
558
|
+
IllegalOperation(num_arguments);
|
559
|
+
return;
|
560
|
+
}
|
561
|
+
|
562
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
563
|
+
// arguments passed in because it is constant. At some point we
|
564
|
+
// should remove this need and make the runtime routine entry code
|
565
|
+
// smarter.
|
566
|
+
Set(rax, num_arguments);
|
567
|
+
LoadAddress(rbx, ExternalReference(f, isolate()));
|
568
|
+
CEntryStub ces(f->result_size);
|
569
|
+
CallStub(&ces);
|
570
|
+
}
|
571
|
+
|
572
|
+
|
573
|
+
MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
|
574
|
+
int num_arguments) {
|
575
|
+
if (f->nargs >= 0 && f->nargs != num_arguments) {
|
576
|
+
IllegalOperation(num_arguments);
|
577
|
+
// Since we did not call the stub, there was no allocation failure.
|
578
|
+
// Return some non-failure object.
|
579
|
+
return HEAP->undefined_value();
|
580
|
+
}
|
581
|
+
|
582
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
583
|
+
// arguments passed in because it is constant. At some point we
|
584
|
+
// should remove this need and make the runtime routine entry code
|
585
|
+
// smarter.
|
586
|
+
Set(rax, num_arguments);
|
587
|
+
LoadAddress(rbx, ExternalReference(f, isolate()));
|
588
|
+
CEntryStub ces(f->result_size);
|
589
|
+
return TryCallStub(&ces);
|
590
|
+
}
|
591
|
+
|
592
|
+
|
593
|
+
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
|
594
|
+
int num_arguments) {
|
595
|
+
Set(rax, num_arguments);
|
596
|
+
LoadAddress(rbx, ext);
|
597
|
+
|
598
|
+
CEntryStub stub(1);
|
599
|
+
CallStub(&stub);
|
600
|
+
}
|
601
|
+
|
602
|
+
|
603
|
+
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
|
604
|
+
int num_arguments,
|
605
|
+
int result_size) {
|
606
|
+
// ----------- S t a t e -------------
|
607
|
+
// -- rsp[0] : return address
|
608
|
+
// -- rsp[8] : argument num_arguments - 1
|
609
|
+
// ...
|
610
|
+
// -- rsp[8 * num_arguments] : argument 0 (receiver)
|
611
|
+
// -----------------------------------
|
612
|
+
|
613
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
614
|
+
// arguments passed in because it is constant. At some point we
|
615
|
+
// should remove this need and make the runtime routine entry code
|
616
|
+
// smarter.
|
617
|
+
Set(rax, num_arguments);
|
618
|
+
JumpToExternalReference(ext, result_size);
|
619
|
+
}
|
620
|
+
|
621
|
+
|
622
|
+
MaybeObject* MacroAssembler::TryTailCallExternalReference(
|
623
|
+
const ExternalReference& ext, int num_arguments, int result_size) {
|
624
|
+
// ----------- S t a t e -------------
|
625
|
+
// -- rsp[0] : return address
|
626
|
+
// -- rsp[8] : argument num_arguments - 1
|
627
|
+
// ...
|
628
|
+
// -- rsp[8 * num_arguments] : argument 0 (receiver)
|
629
|
+
// -----------------------------------
|
630
|
+
|
631
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
632
|
+
// arguments passed in because it is constant. At some point we
|
633
|
+
// should remove this need and make the runtime routine entry code
|
634
|
+
// smarter.
|
635
|
+
Set(rax, num_arguments);
|
636
|
+
return TryJumpToExternalReference(ext, result_size);
|
637
|
+
}
|
638
|
+
|
639
|
+
|
640
|
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
|
641
|
+
int num_arguments,
|
642
|
+
int result_size) {
|
643
|
+
TailCallExternalReference(ExternalReference(fid, isolate()),
|
644
|
+
num_arguments,
|
645
|
+
result_size);
|
646
|
+
}
|
647
|
+
|
648
|
+
|
649
|
+
MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
|
650
|
+
int num_arguments,
|
651
|
+
int result_size) {
|
652
|
+
return TryTailCallExternalReference(ExternalReference(fid, isolate()),
|
653
|
+
num_arguments,
|
654
|
+
result_size);
|
655
|
+
}
|
656
|
+
|
657
|
+
|
658
|
+
static int Offset(ExternalReference ref0, ExternalReference ref1) {
|
659
|
+
int64_t offset = (ref0.address() - ref1.address());
|
660
|
+
// Check that fits into int.
|
661
|
+
ASSERT(static_cast<int>(offset) == offset);
|
662
|
+
return static_cast<int>(offset);
|
663
|
+
}
|
664
|
+
|
665
|
+
|
666
|
+
void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
|
667
|
+
#ifdef _WIN64
|
668
|
+
// We need to prepare a slot for result handle on stack and put
|
669
|
+
// a pointer to it into 1st arg register.
|
670
|
+
EnterApiExitFrame(arg_stack_space + 1);
|
671
|
+
|
672
|
+
// rcx must be used to pass the pointer to the return value slot.
|
673
|
+
lea(rcx, StackSpaceOperand(arg_stack_space));
|
674
|
+
#else
|
675
|
+
EnterApiExitFrame(arg_stack_space);
|
676
|
+
#endif
|
677
|
+
}
|
678
|
+
|
679
|
+
|
680
|
+
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
|
681
|
+
ApiFunction* function, int stack_space) {
|
682
|
+
Label empty_result;
|
683
|
+
Label prologue;
|
684
|
+
Label promote_scheduled_exception;
|
685
|
+
Label delete_allocated_handles;
|
686
|
+
Label leave_exit_frame;
|
687
|
+
Label write_back;
|
688
|
+
|
689
|
+
Factory* factory = isolate()->factory();
|
690
|
+
ExternalReference next_address =
|
691
|
+
ExternalReference::handle_scope_next_address();
|
692
|
+
const int kNextOffset = 0;
|
693
|
+
const int kLimitOffset = Offset(
|
694
|
+
ExternalReference::handle_scope_limit_address(),
|
695
|
+
next_address);
|
696
|
+
const int kLevelOffset = Offset(
|
697
|
+
ExternalReference::handle_scope_level_address(),
|
698
|
+
next_address);
|
699
|
+
ExternalReference scheduled_exception_address =
|
700
|
+
ExternalReference::scheduled_exception_address(isolate());
|
701
|
+
|
702
|
+
// Allocate HandleScope in callee-save registers.
|
703
|
+
Register prev_next_address_reg = r14;
|
704
|
+
Register prev_limit_reg = rbx;
|
705
|
+
Register base_reg = r15;
|
706
|
+
movq(base_reg, next_address);
|
707
|
+
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
|
708
|
+
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
|
709
|
+
addl(Operand(base_reg, kLevelOffset), Immediate(1));
|
710
|
+
// Call the api function!
|
711
|
+
movq(rax,
|
712
|
+
reinterpret_cast<int64_t>(function->address()),
|
713
|
+
RelocInfo::RUNTIME_ENTRY);
|
714
|
+
call(rax);
|
715
|
+
|
716
|
+
#ifdef _WIN64
|
717
|
+
// rax keeps a pointer to v8::Handle, unpack it.
|
718
|
+
movq(rax, Operand(rax, 0));
|
719
|
+
#endif
|
720
|
+
// Check if the result handle holds 0.
|
721
|
+
testq(rax, rax);
|
722
|
+
j(zero, &empty_result);
|
723
|
+
// It was non-zero. Dereference to get the result value.
|
724
|
+
movq(rax, Operand(rax, 0));
|
725
|
+
bind(&prologue);
|
726
|
+
|
727
|
+
// No more valid handles (the result handle was the last one). Restore
|
728
|
+
// previous handle scope.
|
729
|
+
subl(Operand(base_reg, kLevelOffset), Immediate(1));
|
730
|
+
movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
|
731
|
+
cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
|
732
|
+
j(not_equal, &delete_allocated_handles);
|
733
|
+
bind(&leave_exit_frame);
|
734
|
+
|
735
|
+
// Check if the function scheduled an exception.
|
736
|
+
movq(rsi, scheduled_exception_address);
|
737
|
+
Cmp(Operand(rsi, 0), factory->the_hole_value());
|
738
|
+
j(not_equal, &promote_scheduled_exception);
|
739
|
+
|
740
|
+
LeaveApiExitFrame();
|
741
|
+
ret(stack_space * kPointerSize);
|
742
|
+
|
743
|
+
bind(&promote_scheduled_exception);
|
744
|
+
MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
|
745
|
+
0, 1);
|
746
|
+
if (result->IsFailure()) {
|
747
|
+
return result;
|
748
|
+
}
|
749
|
+
|
750
|
+
bind(&empty_result);
|
751
|
+
// It was zero; the result is undefined.
|
752
|
+
Move(rax, factory->undefined_value());
|
753
|
+
jmp(&prologue);
|
754
|
+
|
755
|
+
// HandleScope limit has changed. Delete allocated extensions.
|
756
|
+
bind(&delete_allocated_handles);
|
757
|
+
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
|
758
|
+
movq(prev_limit_reg, rax);
|
759
|
+
#ifdef _WIN64
|
760
|
+
LoadAddress(rcx, ExternalReference::isolate_address());
|
761
|
+
#else
|
762
|
+
LoadAddress(rdi, ExternalReference::isolate_address());
|
763
|
+
#endif
|
764
|
+
LoadAddress(rax,
|
765
|
+
ExternalReference::delete_handle_scope_extensions(isolate()));
|
766
|
+
call(rax);
|
767
|
+
movq(rax, prev_limit_reg);
|
768
|
+
jmp(&leave_exit_frame);
|
769
|
+
|
770
|
+
return result;
|
771
|
+
}
|
772
|
+
|
773
|
+
|
774
|
+
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
|
775
|
+
int result_size) {
|
776
|
+
// Set the entry point and jump to the C entry runtime stub.
|
777
|
+
LoadAddress(rbx, ext);
|
778
|
+
CEntryStub ces(result_size);
|
779
|
+
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
|
780
|
+
}
|
781
|
+
|
782
|
+
|
783
|
+
MaybeObject* MacroAssembler::TryJumpToExternalReference(
|
784
|
+
const ExternalReference& ext, int result_size) {
|
785
|
+
// Set the entry point and jump to the C entry runtime stub.
|
786
|
+
LoadAddress(rbx, ext);
|
787
|
+
CEntryStub ces(result_size);
|
788
|
+
return TryTailCallStub(&ces);
|
789
|
+
}
|
790
|
+
|
791
|
+
|
792
|
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
793
|
+
InvokeFlag flag,
|
794
|
+
const CallWrapper& call_wrapper) {
|
795
|
+
// Calls are not allowed in some stubs.
|
796
|
+
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
|
797
|
+
|
798
|
+
// Rely on the assertion to check that the number of provided
|
799
|
+
// arguments match the expected number of arguments. Fake a
|
800
|
+
// parameter count to avoid emitting code to do the check.
|
801
|
+
ParameterCount expected(0);
|
802
|
+
GetBuiltinEntry(rdx, id);
|
803
|
+
InvokeCode(rdx, expected, expected, flag, call_wrapper);
|
804
|
+
}
|
805
|
+
|
806
|
+
|
807
|
+
void MacroAssembler::GetBuiltinFunction(Register target,
|
808
|
+
Builtins::JavaScript id) {
|
809
|
+
// Load the builtins object into target register.
|
810
|
+
movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
811
|
+
movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
|
812
|
+
movq(target, FieldOperand(target,
|
813
|
+
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
|
814
|
+
}
|
815
|
+
|
816
|
+
|
817
|
+
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
818
|
+
ASSERT(!target.is(rdi));
|
819
|
+
// Load the JavaScript builtin function from the builtins object.
|
820
|
+
GetBuiltinFunction(rdi, id);
|
821
|
+
movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
|
822
|
+
}
|
823
|
+
|
824
|
+
|
825
|
+
void MacroAssembler::Set(Register dst, int64_t x) {
|
826
|
+
if (x == 0) {
|
827
|
+
xorl(dst, dst);
|
828
|
+
} else if (is_uint32(x)) {
|
829
|
+
movl(dst, Immediate(static_cast<uint32_t>(x)));
|
830
|
+
} else if (is_int32(x)) {
|
831
|
+
movq(dst, Immediate(static_cast<int32_t>(x)));
|
832
|
+
} else {
|
833
|
+
movq(dst, x, RelocInfo::NONE);
|
834
|
+
}
|
835
|
+
}
|
836
|
+
|
837
|
+
void MacroAssembler::Set(const Operand& dst, int64_t x) {
|
838
|
+
if (is_int32(x)) {
|
839
|
+
movq(dst, Immediate(static_cast<int32_t>(x)));
|
840
|
+
} else {
|
841
|
+
Set(kScratchRegister, x);
|
842
|
+
movq(dst, kScratchRegister);
|
843
|
+
}
|
844
|
+
}
|
845
|
+
|
846
|
+
// ----------------------------------------------------------------------------
|
847
|
+
// Smi tagging, untagging and tag detection.
|
848
|
+
|
849
|
+
Register MacroAssembler::GetSmiConstant(Smi* source) {
|
850
|
+
int value = source->value();
|
851
|
+
if (value == 0) {
|
852
|
+
xorl(kScratchRegister, kScratchRegister);
|
853
|
+
return kScratchRegister;
|
854
|
+
}
|
855
|
+
if (value == 1) {
|
856
|
+
return kSmiConstantRegister;
|
857
|
+
}
|
858
|
+
LoadSmiConstant(kScratchRegister, source);
|
859
|
+
return kScratchRegister;
|
860
|
+
}
|
861
|
+
|
862
|
+
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
|
863
|
+
if (emit_debug_code()) {
|
864
|
+
movq(dst,
|
865
|
+
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
|
866
|
+
RelocInfo::NONE);
|
867
|
+
cmpq(dst, kSmiConstantRegister);
|
868
|
+
if (allow_stub_calls()) {
|
869
|
+
Assert(equal, "Uninitialized kSmiConstantRegister");
|
870
|
+
} else {
|
871
|
+
Label ok;
|
872
|
+
j(equal, &ok, Label::kNear);
|
873
|
+
int3();
|
874
|
+
bind(&ok);
|
875
|
+
}
|
876
|
+
}
|
877
|
+
int value = source->value();
|
878
|
+
if (value == 0) {
|
879
|
+
xorl(dst, dst);
|
880
|
+
return;
|
881
|
+
}
|
882
|
+
bool negative = value < 0;
|
883
|
+
unsigned int uvalue = negative ? -value : value;
|
884
|
+
|
885
|
+
switch (uvalue) {
|
886
|
+
case 9:
|
887
|
+
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
|
888
|
+
break;
|
889
|
+
case 8:
|
890
|
+
xorl(dst, dst);
|
891
|
+
lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
|
892
|
+
break;
|
893
|
+
case 4:
|
894
|
+
xorl(dst, dst);
|
895
|
+
lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
|
896
|
+
break;
|
897
|
+
case 5:
|
898
|
+
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
|
899
|
+
break;
|
900
|
+
case 3:
|
901
|
+
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
|
902
|
+
break;
|
903
|
+
case 2:
|
904
|
+
lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
|
905
|
+
break;
|
906
|
+
case 1:
|
907
|
+
movq(dst, kSmiConstantRegister);
|
908
|
+
break;
|
909
|
+
case 0:
|
910
|
+
UNREACHABLE();
|
911
|
+
return;
|
912
|
+
default:
|
913
|
+
movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
|
914
|
+
return;
|
915
|
+
}
|
916
|
+
if (negative) {
|
917
|
+
neg(dst);
|
918
|
+
}
|
919
|
+
}
|
920
|
+
|
921
|
+
|
922
|
+
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
|
923
|
+
ASSERT_EQ(0, kSmiTag);
|
924
|
+
if (!dst.is(src)) {
|
925
|
+
movl(dst, src);
|
926
|
+
}
|
927
|
+
shl(dst, Immediate(kSmiShift));
|
928
|
+
}
|
929
|
+
|
930
|
+
|
931
|
+
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
|
932
|
+
if (emit_debug_code()) {
|
933
|
+
testb(dst, Immediate(0x01));
|
934
|
+
Label ok;
|
935
|
+
j(zero, &ok, Label::kNear);
|
936
|
+
if (allow_stub_calls()) {
|
937
|
+
Abort("Integer32ToSmiField writing to non-smi location");
|
938
|
+
} else {
|
939
|
+
int3();
|
940
|
+
}
|
941
|
+
bind(&ok);
|
942
|
+
}
|
943
|
+
ASSERT(kSmiShift % kBitsPerByte == 0);
|
944
|
+
movl(Operand(dst, kSmiShift / kBitsPerByte), src);
|
945
|
+
}
|
946
|
+
|
947
|
+
|
948
|
+
void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
|
949
|
+
Register src,
|
950
|
+
int constant) {
|
951
|
+
if (dst.is(src)) {
|
952
|
+
addl(dst, Immediate(constant));
|
953
|
+
} else {
|
954
|
+
leal(dst, Operand(src, constant));
|
955
|
+
}
|
956
|
+
shl(dst, Immediate(kSmiShift));
|
957
|
+
}
|
958
|
+
|
959
|
+
|
960
|
+
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
|
961
|
+
ASSERT_EQ(0, kSmiTag);
|
962
|
+
if (!dst.is(src)) {
|
963
|
+
movq(dst, src);
|
964
|
+
}
|
965
|
+
shr(dst, Immediate(kSmiShift));
|
966
|
+
}
|
967
|
+
|
968
|
+
|
969
|
+
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
|
970
|
+
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
|
971
|
+
}
|
972
|
+
|
973
|
+
|
974
|
+
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
|
975
|
+
ASSERT_EQ(0, kSmiTag);
|
976
|
+
if (!dst.is(src)) {
|
977
|
+
movq(dst, src);
|
978
|
+
}
|
979
|
+
sar(dst, Immediate(kSmiShift));
|
980
|
+
}
|
981
|
+
|
982
|
+
|
983
|
+
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
|
984
|
+
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
|
985
|
+
}
|
986
|
+
|
987
|
+
|
988
|
+
void MacroAssembler::SmiTest(Register src) {
|
989
|
+
testq(src, src);
|
990
|
+
}
|
991
|
+
|
992
|
+
|
993
|
+
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
|
994
|
+
if (emit_debug_code()) {
|
995
|
+
AbortIfNotSmi(smi1);
|
996
|
+
AbortIfNotSmi(smi2);
|
997
|
+
}
|
998
|
+
cmpq(smi1, smi2);
|
999
|
+
}
|
1000
|
+
|
1001
|
+
|
1002
|
+
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
|
1003
|
+
if (emit_debug_code()) {
|
1004
|
+
AbortIfNotSmi(dst);
|
1005
|
+
}
|
1006
|
+
Cmp(dst, src);
|
1007
|
+
}
|
1008
|
+
|
1009
|
+
|
1010
|
+
void MacroAssembler::Cmp(Register dst, Smi* src) {
|
1011
|
+
ASSERT(!dst.is(kScratchRegister));
|
1012
|
+
if (src->value() == 0) {
|
1013
|
+
testq(dst, dst);
|
1014
|
+
} else {
|
1015
|
+
Register constant_reg = GetSmiConstant(src);
|
1016
|
+
cmpq(dst, constant_reg);
|
1017
|
+
}
|
1018
|
+
}
|
1019
|
+
|
1020
|
+
|
1021
|
+
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
|
1022
|
+
if (emit_debug_code()) {
|
1023
|
+
AbortIfNotSmi(dst);
|
1024
|
+
AbortIfNotSmi(src);
|
1025
|
+
}
|
1026
|
+
cmpq(dst, src);
|
1027
|
+
}
|
1028
|
+
|
1029
|
+
|
1030
|
+
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
|
1031
|
+
if (emit_debug_code()) {
|
1032
|
+
AbortIfNotSmi(dst);
|
1033
|
+
AbortIfNotSmi(src);
|
1034
|
+
}
|
1035
|
+
cmpq(dst, src);
|
1036
|
+
}
|
1037
|
+
|
1038
|
+
|
1039
|
+
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
|
1040
|
+
if (emit_debug_code()) {
|
1041
|
+
AbortIfNotSmi(dst);
|
1042
|
+
}
|
1043
|
+
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
|
1044
|
+
}
|
1045
|
+
|
1046
|
+
|
1047
|
+
void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
|
1048
|
+
// The Operand cannot use the smi register.
|
1049
|
+
Register smi_reg = GetSmiConstant(src);
|
1050
|
+
ASSERT(!dst.AddressUsesRegister(smi_reg));
|
1051
|
+
cmpq(dst, smi_reg);
|
1052
|
+
}
|
1053
|
+
|
1054
|
+
|
1055
|
+
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
|
1056
|
+
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
|
1057
|
+
}
|
1058
|
+
|
1059
|
+
|
1060
|
+
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
|
1061
|
+
Register src,
|
1062
|
+
int power) {
|
1063
|
+
ASSERT(power >= 0);
|
1064
|
+
ASSERT(power < 64);
|
1065
|
+
if (power == 0) {
|
1066
|
+
SmiToInteger64(dst, src);
|
1067
|
+
return;
|
1068
|
+
}
|
1069
|
+
if (!dst.is(src)) {
|
1070
|
+
movq(dst, src);
|
1071
|
+
}
|
1072
|
+
if (power < kSmiShift) {
|
1073
|
+
sar(dst, Immediate(kSmiShift - power));
|
1074
|
+
} else if (power > kSmiShift) {
|
1075
|
+
shl(dst, Immediate(power - kSmiShift));
|
1076
|
+
}
|
1077
|
+
}
|
1078
|
+
|
1079
|
+
|
1080
|
+
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
|
1081
|
+
Register src,
|
1082
|
+
int power) {
|
1083
|
+
ASSERT((0 <= power) && (power < 32));
|
1084
|
+
if (dst.is(src)) {
|
1085
|
+
shr(dst, Immediate(power + kSmiShift));
|
1086
|
+
} else {
|
1087
|
+
UNIMPLEMENTED(); // Not used.
|
1088
|
+
}
|
1089
|
+
}
|
1090
|
+
|
1091
|
+
|
1092
|
+
void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
|
1093
|
+
Label* on_not_smis,
|
1094
|
+
Label::Distance near_jump) {
|
1095
|
+
if (dst.is(src1) || dst.is(src2)) {
|
1096
|
+
ASSERT(!src1.is(kScratchRegister));
|
1097
|
+
ASSERT(!src2.is(kScratchRegister));
|
1098
|
+
movq(kScratchRegister, src1);
|
1099
|
+
or_(kScratchRegister, src2);
|
1100
|
+
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
|
1101
|
+
movq(dst, kScratchRegister);
|
1102
|
+
} else {
|
1103
|
+
movq(dst, src1);
|
1104
|
+
or_(dst, src2);
|
1105
|
+
JumpIfNotSmi(dst, on_not_smis, near_jump);
|
1106
|
+
}
|
1107
|
+
}
|
1108
|
+
|
1109
|
+
|
1110
|
+
Condition MacroAssembler::CheckSmi(Register src) {
|
1111
|
+
ASSERT_EQ(0, kSmiTag);
|
1112
|
+
testb(src, Immediate(kSmiTagMask));
|
1113
|
+
return zero;
|
1114
|
+
}
|
1115
|
+
|
1116
|
+
|
1117
|
+
Condition MacroAssembler::CheckSmi(const Operand& src) {
|
1118
|
+
ASSERT_EQ(0, kSmiTag);
|
1119
|
+
testb(src, Immediate(kSmiTagMask));
|
1120
|
+
return zero;
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
|
1124
|
+
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
|
1125
|
+
ASSERT_EQ(0, kSmiTag);
|
1126
|
+
// Test that both bits of the mask 0x8000000000000001 are zero.
|
1127
|
+
movq(kScratchRegister, src);
|
1128
|
+
rol(kScratchRegister, Immediate(1));
|
1129
|
+
testb(kScratchRegister, Immediate(3));
|
1130
|
+
return zero;
|
1131
|
+
}
|
1132
|
+
|
1133
|
+
|
1134
|
+
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
|
1135
|
+
if (first.is(second)) {
|
1136
|
+
return CheckSmi(first);
|
1137
|
+
}
|
1138
|
+
ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
|
1139
|
+
leal(kScratchRegister, Operand(first, second, times_1, 0));
|
1140
|
+
testb(kScratchRegister, Immediate(0x03));
|
1141
|
+
return zero;
|
1142
|
+
}
|
1143
|
+
|
1144
|
+
|
1145
|
+
Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
|
1146
|
+
Register second) {
|
1147
|
+
if (first.is(second)) {
|
1148
|
+
return CheckNonNegativeSmi(first);
|
1149
|
+
}
|
1150
|
+
movq(kScratchRegister, first);
|
1151
|
+
or_(kScratchRegister, second);
|
1152
|
+
rol(kScratchRegister, Immediate(1));
|
1153
|
+
testl(kScratchRegister, Immediate(3));
|
1154
|
+
return zero;
|
1155
|
+
}
|
1156
|
+
|
1157
|
+
|
1158
|
+
Condition MacroAssembler::CheckEitherSmi(Register first,
|
1159
|
+
Register second,
|
1160
|
+
Register scratch) {
|
1161
|
+
if (first.is(second)) {
|
1162
|
+
return CheckSmi(first);
|
1163
|
+
}
|
1164
|
+
if (scratch.is(second)) {
|
1165
|
+
andl(scratch, first);
|
1166
|
+
} else {
|
1167
|
+
if (!scratch.is(first)) {
|
1168
|
+
movl(scratch, first);
|
1169
|
+
}
|
1170
|
+
andl(scratch, second);
|
1171
|
+
}
|
1172
|
+
testb(scratch, Immediate(kSmiTagMask));
|
1173
|
+
return zero;
|
1174
|
+
}
|
1175
|
+
|
1176
|
+
|
1177
|
+
Condition MacroAssembler::CheckIsMinSmi(Register src) {
|
1178
|
+
ASSERT(!src.is(kScratchRegister));
|
1179
|
+
// If we overflow by subtracting one, it's the minimal smi value.
|
1180
|
+
cmpq(src, kSmiConstantRegister);
|
1181
|
+
return overflow;
|
1182
|
+
}
|
1183
|
+
|
1184
|
+
|
1185
|
+
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
|
1186
|
+
// A 32-bit integer value can always be converted to a smi.
|
1187
|
+
return always;
|
1188
|
+
}
|
1189
|
+
|
1190
|
+
|
1191
|
+
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
|
1192
|
+
// An unsigned 32-bit integer value is valid as long as the high bit
|
1193
|
+
// is not set.
|
1194
|
+
testl(src, src);
|
1195
|
+
return positive;
|
1196
|
+
}
|
1197
|
+
|
1198
|
+
|
1199
|
+
void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
|
1200
|
+
if (dst.is(src)) {
|
1201
|
+
andl(dst, Immediate(kSmiTagMask));
|
1202
|
+
} else {
|
1203
|
+
movl(dst, Immediate(kSmiTagMask));
|
1204
|
+
andl(dst, src);
|
1205
|
+
}
|
1206
|
+
}
|
1207
|
+
|
1208
|
+
|
1209
|
+
void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
|
1210
|
+
if (!(src.AddressUsesRegister(dst))) {
|
1211
|
+
movl(dst, Immediate(kSmiTagMask));
|
1212
|
+
andl(dst, src);
|
1213
|
+
} else {
|
1214
|
+
movl(dst, src);
|
1215
|
+
andl(dst, Immediate(kSmiTagMask));
|
1216
|
+
}
|
1217
|
+
}
|
1218
|
+
|
1219
|
+
|
1220
|
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
|
1221
|
+
Label* on_invalid,
|
1222
|
+
Label::Distance near_jump) {
|
1223
|
+
Condition is_valid = CheckInteger32ValidSmiValue(src);
|
1224
|
+
j(NegateCondition(is_valid), on_invalid, near_jump);
|
1225
|
+
}
|
1226
|
+
|
1227
|
+
|
1228
|
+
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
|
1229
|
+
Label* on_invalid,
|
1230
|
+
Label::Distance near_jump) {
|
1231
|
+
Condition is_valid = CheckUInteger32ValidSmiValue(src);
|
1232
|
+
j(NegateCondition(is_valid), on_invalid, near_jump);
|
1233
|
+
}
|
1234
|
+
|
1235
|
+
|
1236
|
+
void MacroAssembler::JumpIfSmi(Register src,
|
1237
|
+
Label* on_smi,
|
1238
|
+
Label::Distance near_jump) {
|
1239
|
+
Condition smi = CheckSmi(src);
|
1240
|
+
j(smi, on_smi, near_jump);
|
1241
|
+
}
|
1242
|
+
|
1243
|
+
|
1244
|
+
void MacroAssembler::JumpIfNotSmi(Register src,
|
1245
|
+
Label* on_not_smi,
|
1246
|
+
Label::Distance near_jump) {
|
1247
|
+
Condition smi = CheckSmi(src);
|
1248
|
+
j(NegateCondition(smi), on_not_smi, near_jump);
|
1249
|
+
}
|
1250
|
+
|
1251
|
+
|
1252
|
+
void MacroAssembler::JumpUnlessNonNegativeSmi(
|
1253
|
+
Register src, Label* on_not_smi_or_negative,
|
1254
|
+
Label::Distance near_jump) {
|
1255
|
+
Condition non_negative_smi = CheckNonNegativeSmi(src);
|
1256
|
+
j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
|
1257
|
+
}
|
1258
|
+
|
1259
|
+
|
1260
|
+
void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
|
1261
|
+
Smi* constant,
|
1262
|
+
Label* on_equals,
|
1263
|
+
Label::Distance near_jump) {
|
1264
|
+
SmiCompare(src, constant);
|
1265
|
+
j(equal, on_equals, near_jump);
|
1266
|
+
}
|
1267
|
+
|
1268
|
+
|
1269
|
+
void MacroAssembler::JumpIfNotBothSmi(Register src1,
|
1270
|
+
Register src2,
|
1271
|
+
Label* on_not_both_smi,
|
1272
|
+
Label::Distance near_jump) {
|
1273
|
+
Condition both_smi = CheckBothSmi(src1, src2);
|
1274
|
+
j(NegateCondition(both_smi), on_not_both_smi, near_jump);
|
1275
|
+
}
|
1276
|
+
|
1277
|
+
|
1278
|
+
void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
|
1279
|
+
Register src2,
|
1280
|
+
Label* on_not_both_smi,
|
1281
|
+
Label::Distance near_jump) {
|
1282
|
+
Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
|
1283
|
+
j(NegateCondition(both_smi), on_not_both_smi, near_jump);
|
1284
|
+
}
|
1285
|
+
|
1286
|
+
|
1287
|
+
void MacroAssembler::SmiTryAddConstant(Register dst,
|
1288
|
+
Register src,
|
1289
|
+
Smi* constant,
|
1290
|
+
Label* on_not_smi_result,
|
1291
|
+
Label::Distance near_jump) {
|
1292
|
+
// Does not assume that src is a smi.
|
1293
|
+
ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
|
1294
|
+
ASSERT_EQ(0, kSmiTag);
|
1295
|
+
ASSERT(!dst.is(kScratchRegister));
|
1296
|
+
ASSERT(!src.is(kScratchRegister));
|
1297
|
+
|
1298
|
+
JumpIfNotSmi(src, on_not_smi_result, near_jump);
|
1299
|
+
Register tmp = (dst.is(src) ? kScratchRegister : dst);
|
1300
|
+
LoadSmiConstant(tmp, constant);
|
1301
|
+
addq(tmp, src);
|
1302
|
+
j(overflow, on_not_smi_result, near_jump);
|
1303
|
+
if (dst.is(src)) {
|
1304
|
+
movq(dst, tmp);
|
1305
|
+
}
|
1306
|
+
}
|
1307
|
+
|
1308
|
+
|
1309
|
+
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
|
1310
|
+
if (constant->value() == 0) {
|
1311
|
+
if (!dst.is(src)) {
|
1312
|
+
movq(dst, src);
|
1313
|
+
}
|
1314
|
+
return;
|
1315
|
+
} else if (dst.is(src)) {
|
1316
|
+
ASSERT(!dst.is(kScratchRegister));
|
1317
|
+
switch (constant->value()) {
|
1318
|
+
case 1:
|
1319
|
+
addq(dst, kSmiConstantRegister);
|
1320
|
+
return;
|
1321
|
+
case 2:
|
1322
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
|
1323
|
+
return;
|
1324
|
+
case 4:
|
1325
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
|
1326
|
+
return;
|
1327
|
+
case 8:
|
1328
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
|
1329
|
+
return;
|
1330
|
+
default:
|
1331
|
+
Register constant_reg = GetSmiConstant(constant);
|
1332
|
+
addq(dst, constant_reg);
|
1333
|
+
return;
|
1334
|
+
}
|
1335
|
+
} else {
|
1336
|
+
switch (constant->value()) {
|
1337
|
+
case 1:
|
1338
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
|
1339
|
+
return;
|
1340
|
+
case 2:
|
1341
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
|
1342
|
+
return;
|
1343
|
+
case 4:
|
1344
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
|
1345
|
+
return;
|
1346
|
+
case 8:
|
1347
|
+
lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
|
1348
|
+
return;
|
1349
|
+
default:
|
1350
|
+
LoadSmiConstant(dst, constant);
|
1351
|
+
addq(dst, src);
|
1352
|
+
return;
|
1353
|
+
}
|
1354
|
+
}
|
1355
|
+
}
|
1356
|
+
|
1357
|
+
|
1358
|
+
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
|
1359
|
+
if (constant->value() != 0) {
|
1360
|
+
addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
|
1361
|
+
}
|
1362
|
+
}
|
1363
|
+
|
1364
|
+
|
1365
|
+
void MacroAssembler::SmiAddConstant(Register dst,
|
1366
|
+
Register src,
|
1367
|
+
Smi* constant,
|
1368
|
+
Label* on_not_smi_result,
|
1369
|
+
Label::Distance near_jump) {
|
1370
|
+
if (constant->value() == 0) {
|
1371
|
+
if (!dst.is(src)) {
|
1372
|
+
movq(dst, src);
|
1373
|
+
}
|
1374
|
+
} else if (dst.is(src)) {
|
1375
|
+
ASSERT(!dst.is(kScratchRegister));
|
1376
|
+
|
1377
|
+
LoadSmiConstant(kScratchRegister, constant);
|
1378
|
+
addq(kScratchRegister, src);
|
1379
|
+
j(overflow, on_not_smi_result, near_jump);
|
1380
|
+
movq(dst, kScratchRegister);
|
1381
|
+
} else {
|
1382
|
+
LoadSmiConstant(dst, constant);
|
1383
|
+
addq(dst, src);
|
1384
|
+
j(overflow, on_not_smi_result, near_jump);
|
1385
|
+
}
|
1386
|
+
}
|
1387
|
+
|
1388
|
+
|
1389
|
+
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
|
1390
|
+
if (constant->value() == 0) {
|
1391
|
+
if (!dst.is(src)) {
|
1392
|
+
movq(dst, src);
|
1393
|
+
}
|
1394
|
+
} else if (dst.is(src)) {
|
1395
|
+
ASSERT(!dst.is(kScratchRegister));
|
1396
|
+
Register constant_reg = GetSmiConstant(constant);
|
1397
|
+
subq(dst, constant_reg);
|
1398
|
+
} else {
|
1399
|
+
if (constant->value() == Smi::kMinValue) {
|
1400
|
+
LoadSmiConstant(dst, constant);
|
1401
|
+
// Adding and subtracting the min-value gives the same result, it only
|
1402
|
+
// differs on the overflow bit, which we don't check here.
|
1403
|
+
addq(dst, src);
|
1404
|
+
} else {
|
1405
|
+
// Subtract by adding the negation.
|
1406
|
+
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
|
1407
|
+
addq(dst, src);
|
1408
|
+
}
|
1409
|
+
}
|
1410
|
+
}
|
1411
|
+
|
1412
|
+
|
1413
|
+
void MacroAssembler::SmiSubConstant(Register dst,
|
1414
|
+
Register src,
|
1415
|
+
Smi* constant,
|
1416
|
+
Label* on_not_smi_result,
|
1417
|
+
Label::Distance near_jump) {
|
1418
|
+
if (constant->value() == 0) {
|
1419
|
+
if (!dst.is(src)) {
|
1420
|
+
movq(dst, src);
|
1421
|
+
}
|
1422
|
+
} else if (dst.is(src)) {
|
1423
|
+
ASSERT(!dst.is(kScratchRegister));
|
1424
|
+
if (constant->value() == Smi::kMinValue) {
|
1425
|
+
// Subtracting min-value from any non-negative value will overflow.
|
1426
|
+
// We test the non-negativeness before doing the subtraction.
|
1427
|
+
testq(src, src);
|
1428
|
+
j(not_sign, on_not_smi_result, near_jump);
|
1429
|
+
LoadSmiConstant(kScratchRegister, constant);
|
1430
|
+
subq(dst, kScratchRegister);
|
1431
|
+
} else {
|
1432
|
+
// Subtract by adding the negation.
|
1433
|
+
LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
|
1434
|
+
addq(kScratchRegister, dst);
|
1435
|
+
j(overflow, on_not_smi_result, near_jump);
|
1436
|
+
movq(dst, kScratchRegister);
|
1437
|
+
}
|
1438
|
+
} else {
|
1439
|
+
if (constant->value() == Smi::kMinValue) {
|
1440
|
+
// Subtracting min-value from any non-negative value will overflow.
|
1441
|
+
// We test the non-negativeness before doing the subtraction.
|
1442
|
+
testq(src, src);
|
1443
|
+
j(not_sign, on_not_smi_result, near_jump);
|
1444
|
+
LoadSmiConstant(dst, constant);
|
1445
|
+
// Adding and subtracting the min-value gives the same result, it only
|
1446
|
+
// differs on the overflow bit, which we don't check here.
|
1447
|
+
addq(dst, src);
|
1448
|
+
} else {
|
1449
|
+
// Subtract by adding the negation.
|
1450
|
+
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
|
1451
|
+
addq(dst, src);
|
1452
|
+
j(overflow, on_not_smi_result, near_jump);
|
1453
|
+
}
|
1454
|
+
}
|
1455
|
+
}
|
1456
|
+
|
1457
|
+
|
1458
|
+
void MacroAssembler::SmiNeg(Register dst,
|
1459
|
+
Register src,
|
1460
|
+
Label* on_smi_result,
|
1461
|
+
Label::Distance near_jump) {
|
1462
|
+
if (dst.is(src)) {
|
1463
|
+
ASSERT(!dst.is(kScratchRegister));
|
1464
|
+
movq(kScratchRegister, src);
|
1465
|
+
neg(dst); // Low 32 bits are retained as zero by negation.
|
1466
|
+
// Test if result is zero or Smi::kMinValue.
|
1467
|
+
cmpq(dst, kScratchRegister);
|
1468
|
+
j(not_equal, on_smi_result, near_jump);
|
1469
|
+
movq(src, kScratchRegister);
|
1470
|
+
} else {
|
1471
|
+
movq(dst, src);
|
1472
|
+
neg(dst);
|
1473
|
+
cmpq(dst, src);
|
1474
|
+
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
|
1475
|
+
j(not_equal, on_smi_result, near_jump);
|
1476
|
+
}
|
1477
|
+
}
|
1478
|
+
|
1479
|
+
|
1480
|
+
void MacroAssembler::SmiAdd(Register dst,
|
1481
|
+
Register src1,
|
1482
|
+
Register src2,
|
1483
|
+
Label* on_not_smi_result,
|
1484
|
+
Label::Distance near_jump) {
|
1485
|
+
ASSERT_NOT_NULL(on_not_smi_result);
|
1486
|
+
ASSERT(!dst.is(src2));
|
1487
|
+
if (dst.is(src1)) {
|
1488
|
+
movq(kScratchRegister, src1);
|
1489
|
+
addq(kScratchRegister, src2);
|
1490
|
+
j(overflow, on_not_smi_result, near_jump);
|
1491
|
+
movq(dst, kScratchRegister);
|
1492
|
+
} else {
|
1493
|
+
movq(dst, src1);
|
1494
|
+
addq(dst, src2);
|
1495
|
+
j(overflow, on_not_smi_result, near_jump);
|
1496
|
+
}
|
1497
|
+
}
|
1498
|
+
|
1499
|
+
|
1500
|
+
void MacroAssembler::SmiAdd(Register dst,
|
1501
|
+
Register src1,
|
1502
|
+
const Operand& src2,
|
1503
|
+
Label* on_not_smi_result,
|
1504
|
+
Label::Distance near_jump) {
|
1505
|
+
ASSERT_NOT_NULL(on_not_smi_result);
|
1506
|
+
if (dst.is(src1)) {
|
1507
|
+
movq(kScratchRegister, src1);
|
1508
|
+
addq(kScratchRegister, src2);
|
1509
|
+
j(overflow, on_not_smi_result, near_jump);
|
1510
|
+
movq(dst, kScratchRegister);
|
1511
|
+
} else {
|
1512
|
+
ASSERT(!src2.AddressUsesRegister(dst));
|
1513
|
+
movq(dst, src1);
|
1514
|
+
addq(dst, src2);
|
1515
|
+
j(overflow, on_not_smi_result, near_jump);
|
1516
|
+
}
|
1517
|
+
}
|
1518
|
+
|
1519
|
+
|
1520
|
+
void MacroAssembler::SmiAdd(Register dst,
|
1521
|
+
Register src1,
|
1522
|
+
Register src2) {
|
1523
|
+
// No overflow checking. Use only when it's known that
|
1524
|
+
// overflowing is impossible.
|
1525
|
+
if (!dst.is(src1)) {
|
1526
|
+
if (emit_debug_code()) {
|
1527
|
+
movq(kScratchRegister, src1);
|
1528
|
+
addq(kScratchRegister, src2);
|
1529
|
+
Check(no_overflow, "Smi addition overflow");
|
1530
|
+
}
|
1531
|
+
lea(dst, Operand(src1, src2, times_1, 0));
|
1532
|
+
} else {
|
1533
|
+
addq(dst, src2);
|
1534
|
+
Assert(no_overflow, "Smi addition overflow");
|
1535
|
+
}
|
1536
|
+
}
|
1537
|
+
|
1538
|
+
|
1539
|
+
void MacroAssembler::SmiSub(Register dst,
|
1540
|
+
Register src1,
|
1541
|
+
Register src2,
|
1542
|
+
Label* on_not_smi_result,
|
1543
|
+
Label::Distance near_jump) {
|
1544
|
+
ASSERT_NOT_NULL(on_not_smi_result);
|
1545
|
+
ASSERT(!dst.is(src2));
|
1546
|
+
if (dst.is(src1)) {
|
1547
|
+
cmpq(dst, src2);
|
1548
|
+
j(overflow, on_not_smi_result, near_jump);
|
1549
|
+
subq(dst, src2);
|
1550
|
+
} else {
|
1551
|
+
movq(dst, src1);
|
1552
|
+
subq(dst, src2);
|
1553
|
+
j(overflow, on_not_smi_result, near_jump);
|
1554
|
+
}
|
1555
|
+
}
|
1556
|
+
|
1557
|
+
|
1558
|
+
void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
|
1559
|
+
// No overflow checking. Use only when it's known that
|
1560
|
+
// overflowing is impossible (e.g., subtracting two positive smis).
|
1561
|
+
ASSERT(!dst.is(src2));
|
1562
|
+
if (!dst.is(src1)) {
|
1563
|
+
movq(dst, src1);
|
1564
|
+
}
|
1565
|
+
subq(dst, src2);
|
1566
|
+
Assert(no_overflow, "Smi subtraction overflow");
|
1567
|
+
}
|
1568
|
+
|
1569
|
+
|
1570
|
+
void MacroAssembler::SmiSub(Register dst,
|
1571
|
+
Register src1,
|
1572
|
+
const Operand& src2,
|
1573
|
+
Label* on_not_smi_result,
|
1574
|
+
Label::Distance near_jump) {
|
1575
|
+
ASSERT_NOT_NULL(on_not_smi_result);
|
1576
|
+
if (dst.is(src1)) {
|
1577
|
+
movq(kScratchRegister, src2);
|
1578
|
+
cmpq(src1, kScratchRegister);
|
1579
|
+
j(overflow, on_not_smi_result, near_jump);
|
1580
|
+
subq(src1, kScratchRegister);
|
1581
|
+
} else {
|
1582
|
+
movq(dst, src1);
|
1583
|
+
subq(dst, src2);
|
1584
|
+
j(overflow, on_not_smi_result, near_jump);
|
1585
|
+
}
|
1586
|
+
}
|
1587
|
+
|
1588
|
+
|
1589
|
+
void MacroAssembler::SmiSub(Register dst,
|
1590
|
+
Register src1,
|
1591
|
+
const Operand& src2) {
|
1592
|
+
// No overflow checking. Use only when it's known that
|
1593
|
+
// overflowing is impossible (e.g., subtracting two positive smis).
|
1594
|
+
if (!dst.is(src1)) {
|
1595
|
+
movq(dst, src1);
|
1596
|
+
}
|
1597
|
+
subq(dst, src2);
|
1598
|
+
Assert(no_overflow, "Smi subtraction overflow");
|
1599
|
+
}
|
1600
|
+
|
1601
|
+
|
1602
|
+
void MacroAssembler::SmiMul(Register dst,
|
1603
|
+
Register src1,
|
1604
|
+
Register src2,
|
1605
|
+
Label* on_not_smi_result,
|
1606
|
+
Label::Distance near_jump) {
|
1607
|
+
ASSERT(!dst.is(src2));
|
1608
|
+
ASSERT(!dst.is(kScratchRegister));
|
1609
|
+
ASSERT(!src1.is(kScratchRegister));
|
1610
|
+
ASSERT(!src2.is(kScratchRegister));
|
1611
|
+
|
1612
|
+
if (dst.is(src1)) {
|
1613
|
+
Label failure, zero_correct_result;
|
1614
|
+
movq(kScratchRegister, src1); // Create backup for later testing.
|
1615
|
+
SmiToInteger64(dst, src1);
|
1616
|
+
imul(dst, src2);
|
1617
|
+
j(overflow, &failure, Label::kNear);
|
1618
|
+
|
1619
|
+
// Check for negative zero result. If product is zero, and one
|
1620
|
+
// argument is negative, go to slow case.
|
1621
|
+
Label correct_result;
|
1622
|
+
testq(dst, dst);
|
1623
|
+
j(not_zero, &correct_result, Label::kNear);
|
1624
|
+
|
1625
|
+
movq(dst, kScratchRegister);
|
1626
|
+
xor_(dst, src2);
|
1627
|
+
// Result was positive zero.
|
1628
|
+
j(positive, &zero_correct_result, Label::kNear);
|
1629
|
+
|
1630
|
+
bind(&failure); // Reused failure exit, restores src1.
|
1631
|
+
movq(src1, kScratchRegister);
|
1632
|
+
jmp(on_not_smi_result, near_jump);
|
1633
|
+
|
1634
|
+
bind(&zero_correct_result);
|
1635
|
+
Set(dst, 0);
|
1636
|
+
|
1637
|
+
bind(&correct_result);
|
1638
|
+
} else {
|
1639
|
+
SmiToInteger64(dst, src1);
|
1640
|
+
imul(dst, src2);
|
1641
|
+
j(overflow, on_not_smi_result, near_jump);
|
1642
|
+
// Check for negative zero result. If product is zero, and one
|
1643
|
+
// argument is negative, go to slow case.
|
1644
|
+
Label correct_result;
|
1645
|
+
testq(dst, dst);
|
1646
|
+
j(not_zero, &correct_result, Label::kNear);
|
1647
|
+
// One of src1 and src2 is zero, the check whether the other is
|
1648
|
+
// negative.
|
1649
|
+
movq(kScratchRegister, src1);
|
1650
|
+
xor_(kScratchRegister, src2);
|
1651
|
+
j(negative, on_not_smi_result, near_jump);
|
1652
|
+
bind(&correct_result);
|
1653
|
+
}
|
1654
|
+
}
|
1655
|
+
|
1656
|
+
|
1657
|
+
void MacroAssembler::SmiDiv(Register dst,
|
1658
|
+
Register src1,
|
1659
|
+
Register src2,
|
1660
|
+
Label* on_not_smi_result,
|
1661
|
+
Label::Distance near_jump) {
|
1662
|
+
ASSERT(!src1.is(kScratchRegister));
|
1663
|
+
ASSERT(!src2.is(kScratchRegister));
|
1664
|
+
ASSERT(!dst.is(kScratchRegister));
|
1665
|
+
ASSERT(!src2.is(rax));
|
1666
|
+
ASSERT(!src2.is(rdx));
|
1667
|
+
ASSERT(!src1.is(rdx));
|
1668
|
+
|
1669
|
+
// Check for 0 divisor (result is +/-Infinity).
|
1670
|
+
testq(src2, src2);
|
1671
|
+
j(zero, on_not_smi_result, near_jump);
|
1672
|
+
|
1673
|
+
if (src1.is(rax)) {
|
1674
|
+
movq(kScratchRegister, src1);
|
1675
|
+
}
|
1676
|
+
SmiToInteger32(rax, src1);
|
1677
|
+
// We need to rule out dividing Smi::kMinValue by -1, since that would
|
1678
|
+
// overflow in idiv and raise an exception.
|
1679
|
+
// We combine this with negative zero test (negative zero only happens
|
1680
|
+
// when dividing zero by a negative number).
|
1681
|
+
|
1682
|
+
// We overshoot a little and go to slow case if we divide min-value
|
1683
|
+
// by any negative value, not just -1.
|
1684
|
+
Label safe_div;
|
1685
|
+
testl(rax, Immediate(0x7fffffff));
|
1686
|
+
j(not_zero, &safe_div, Label::kNear);
|
1687
|
+
testq(src2, src2);
|
1688
|
+
if (src1.is(rax)) {
|
1689
|
+
j(positive, &safe_div, Label::kNear);
|
1690
|
+
movq(src1, kScratchRegister);
|
1691
|
+
jmp(on_not_smi_result, near_jump);
|
1692
|
+
} else {
|
1693
|
+
j(negative, on_not_smi_result, near_jump);
|
1694
|
+
}
|
1695
|
+
bind(&safe_div);
|
1696
|
+
|
1697
|
+
SmiToInteger32(src2, src2);
|
1698
|
+
// Sign extend src1 into edx:eax.
|
1699
|
+
cdq();
|
1700
|
+
idivl(src2);
|
1701
|
+
Integer32ToSmi(src2, src2);
|
1702
|
+
// Check that the remainder is zero.
|
1703
|
+
testl(rdx, rdx);
|
1704
|
+
if (src1.is(rax)) {
|
1705
|
+
Label smi_result;
|
1706
|
+
j(zero, &smi_result, Label::kNear);
|
1707
|
+
movq(src1, kScratchRegister);
|
1708
|
+
jmp(on_not_smi_result, near_jump);
|
1709
|
+
bind(&smi_result);
|
1710
|
+
} else {
|
1711
|
+
j(not_zero, on_not_smi_result, near_jump);
|
1712
|
+
}
|
1713
|
+
if (!dst.is(src1) && src1.is(rax)) {
|
1714
|
+
movq(src1, kScratchRegister);
|
1715
|
+
}
|
1716
|
+
Integer32ToSmi(dst, rax);
|
1717
|
+
}
|
1718
|
+
|
1719
|
+
|
1720
|
+
void MacroAssembler::SmiMod(Register dst,
|
1721
|
+
Register src1,
|
1722
|
+
Register src2,
|
1723
|
+
Label* on_not_smi_result,
|
1724
|
+
Label::Distance near_jump) {
|
1725
|
+
ASSERT(!dst.is(kScratchRegister));
|
1726
|
+
ASSERT(!src1.is(kScratchRegister));
|
1727
|
+
ASSERT(!src2.is(kScratchRegister));
|
1728
|
+
ASSERT(!src2.is(rax));
|
1729
|
+
ASSERT(!src2.is(rdx));
|
1730
|
+
ASSERT(!src1.is(rdx));
|
1731
|
+
ASSERT(!src1.is(src2));
|
1732
|
+
|
1733
|
+
testq(src2, src2);
|
1734
|
+
j(zero, on_not_smi_result, near_jump);
|
1735
|
+
|
1736
|
+
if (src1.is(rax)) {
|
1737
|
+
movq(kScratchRegister, src1);
|
1738
|
+
}
|
1739
|
+
SmiToInteger32(rax, src1);
|
1740
|
+
SmiToInteger32(src2, src2);
|
1741
|
+
|
1742
|
+
// Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
|
1743
|
+
Label safe_div;
|
1744
|
+
cmpl(rax, Immediate(Smi::kMinValue));
|
1745
|
+
j(not_equal, &safe_div, Label::kNear);
|
1746
|
+
cmpl(src2, Immediate(-1));
|
1747
|
+
j(not_equal, &safe_div, Label::kNear);
|
1748
|
+
// Retag inputs and go slow case.
|
1749
|
+
Integer32ToSmi(src2, src2);
|
1750
|
+
if (src1.is(rax)) {
|
1751
|
+
movq(src1, kScratchRegister);
|
1752
|
+
}
|
1753
|
+
jmp(on_not_smi_result, near_jump);
|
1754
|
+
bind(&safe_div);
|
1755
|
+
|
1756
|
+
// Sign extend eax into edx:eax.
|
1757
|
+
cdq();
|
1758
|
+
idivl(src2);
|
1759
|
+
// Restore smi tags on inputs.
|
1760
|
+
Integer32ToSmi(src2, src2);
|
1761
|
+
if (src1.is(rax)) {
|
1762
|
+
movq(src1, kScratchRegister);
|
1763
|
+
}
|
1764
|
+
// Check for a negative zero result. If the result is zero, and the
|
1765
|
+
// dividend is negative, go slow to return a floating point negative zero.
|
1766
|
+
Label smi_result;
|
1767
|
+
testl(rdx, rdx);
|
1768
|
+
j(not_zero, &smi_result, Label::kNear);
|
1769
|
+
testq(src1, src1);
|
1770
|
+
j(negative, on_not_smi_result, near_jump);
|
1771
|
+
bind(&smi_result);
|
1772
|
+
Integer32ToSmi(dst, rdx);
|
1773
|
+
}
|
1774
|
+
|
1775
|
+
|
1776
|
+
void MacroAssembler::SmiNot(Register dst, Register src) {
|
1777
|
+
ASSERT(!dst.is(kScratchRegister));
|
1778
|
+
ASSERT(!src.is(kScratchRegister));
|
1779
|
+
// Set tag and padding bits before negating, so that they are zero afterwards.
|
1780
|
+
movl(kScratchRegister, Immediate(~0));
|
1781
|
+
if (dst.is(src)) {
|
1782
|
+
xor_(dst, kScratchRegister);
|
1783
|
+
} else {
|
1784
|
+
lea(dst, Operand(src, kScratchRegister, times_1, 0));
|
1785
|
+
}
|
1786
|
+
not_(dst);
|
1787
|
+
}
|
1788
|
+
|
1789
|
+
|
1790
|
+
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
|
1791
|
+
ASSERT(!dst.is(src2));
|
1792
|
+
if (!dst.is(src1)) {
|
1793
|
+
movq(dst, src1);
|
1794
|
+
}
|
1795
|
+
and_(dst, src2);
|
1796
|
+
}
|
1797
|
+
|
1798
|
+
|
1799
|
+
void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
|
1800
|
+
if (constant->value() == 0) {
|
1801
|
+
Set(dst, 0);
|
1802
|
+
} else if (dst.is(src)) {
|
1803
|
+
ASSERT(!dst.is(kScratchRegister));
|
1804
|
+
Register constant_reg = GetSmiConstant(constant);
|
1805
|
+
and_(dst, constant_reg);
|
1806
|
+
} else {
|
1807
|
+
LoadSmiConstant(dst, constant);
|
1808
|
+
and_(dst, src);
|
1809
|
+
}
|
1810
|
+
}
|
1811
|
+
|
1812
|
+
|
1813
|
+
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
|
1814
|
+
if (!dst.is(src1)) {
|
1815
|
+
ASSERT(!src1.is(src2));
|
1816
|
+
movq(dst, src1);
|
1817
|
+
}
|
1818
|
+
or_(dst, src2);
|
1819
|
+
}
|
1820
|
+
|
1821
|
+
|
1822
|
+
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
|
1823
|
+
if (dst.is(src)) {
|
1824
|
+
ASSERT(!dst.is(kScratchRegister));
|
1825
|
+
Register constant_reg = GetSmiConstant(constant);
|
1826
|
+
or_(dst, constant_reg);
|
1827
|
+
} else {
|
1828
|
+
LoadSmiConstant(dst, constant);
|
1829
|
+
or_(dst, src);
|
1830
|
+
}
|
1831
|
+
}
|
1832
|
+
|
1833
|
+
|
1834
|
+
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
|
1835
|
+
if (!dst.is(src1)) {
|
1836
|
+
ASSERT(!src1.is(src2));
|
1837
|
+
movq(dst, src1);
|
1838
|
+
}
|
1839
|
+
xor_(dst, src2);
|
1840
|
+
}
|
1841
|
+
|
1842
|
+
|
1843
|
+
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
|
1844
|
+
if (dst.is(src)) {
|
1845
|
+
ASSERT(!dst.is(kScratchRegister));
|
1846
|
+
Register constant_reg = GetSmiConstant(constant);
|
1847
|
+
xor_(dst, constant_reg);
|
1848
|
+
} else {
|
1849
|
+
LoadSmiConstant(dst, constant);
|
1850
|
+
xor_(dst, src);
|
1851
|
+
}
|
1852
|
+
}
|
1853
|
+
|
1854
|
+
|
1855
|
+
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
|
1856
|
+
Register src,
|
1857
|
+
int shift_value) {
|
1858
|
+
ASSERT(is_uint5(shift_value));
|
1859
|
+
if (shift_value > 0) {
|
1860
|
+
if (dst.is(src)) {
|
1861
|
+
sar(dst, Immediate(shift_value + kSmiShift));
|
1862
|
+
shl(dst, Immediate(kSmiShift));
|
1863
|
+
} else {
|
1864
|
+
UNIMPLEMENTED(); // Not used.
|
1865
|
+
}
|
1866
|
+
}
|
1867
|
+
}
|
1868
|
+
|
1869
|
+
|
1870
|
+
void MacroAssembler::SmiShiftLeftConstant(Register dst,
|
1871
|
+
Register src,
|
1872
|
+
int shift_value) {
|
1873
|
+
if (!dst.is(src)) {
|
1874
|
+
movq(dst, src);
|
1875
|
+
}
|
1876
|
+
if (shift_value > 0) {
|
1877
|
+
shl(dst, Immediate(shift_value));
|
1878
|
+
}
|
1879
|
+
}
|
1880
|
+
|
1881
|
+
|
1882
|
+
void MacroAssembler::SmiShiftLogicalRightConstant(
|
1883
|
+
Register dst, Register src, int shift_value,
|
1884
|
+
Label* on_not_smi_result, Label::Distance near_jump) {
|
1885
|
+
// Logic right shift interprets its result as an *unsigned* number.
|
1886
|
+
if (dst.is(src)) {
|
1887
|
+
UNIMPLEMENTED(); // Not used.
|
1888
|
+
} else {
|
1889
|
+
movq(dst, src);
|
1890
|
+
if (shift_value == 0) {
|
1891
|
+
testq(dst, dst);
|
1892
|
+
j(negative, on_not_smi_result, near_jump);
|
1893
|
+
}
|
1894
|
+
shr(dst, Immediate(shift_value + kSmiShift));
|
1895
|
+
shl(dst, Immediate(kSmiShift));
|
1896
|
+
}
|
1897
|
+
}
|
1898
|
+
|
1899
|
+
|
1900
|
+
void MacroAssembler::SmiShiftLeft(Register dst,
|
1901
|
+
Register src1,
|
1902
|
+
Register src2) {
|
1903
|
+
ASSERT(!dst.is(rcx));
|
1904
|
+
// Untag shift amount.
|
1905
|
+
if (!dst.is(src1)) {
|
1906
|
+
movq(dst, src1);
|
1907
|
+
}
|
1908
|
+
SmiToInteger32(rcx, src2);
|
1909
|
+
// Shift amount specified by lower 5 bits, not six as the shl opcode.
|
1910
|
+
and_(rcx, Immediate(0x1f));
|
1911
|
+
shl_cl(dst);
|
1912
|
+
}
|
1913
|
+
|
1914
|
+
|
1915
|
+
void MacroAssembler::SmiShiftLogicalRight(Register dst,
|
1916
|
+
Register src1,
|
1917
|
+
Register src2,
|
1918
|
+
Label* on_not_smi_result,
|
1919
|
+
Label::Distance near_jump) {
|
1920
|
+
ASSERT(!dst.is(kScratchRegister));
|
1921
|
+
ASSERT(!src1.is(kScratchRegister));
|
1922
|
+
ASSERT(!src2.is(kScratchRegister));
|
1923
|
+
ASSERT(!dst.is(rcx));
|
1924
|
+
// dst and src1 can be the same, because the one case that bails out
|
1925
|
+
// is a shift by 0, which leaves dst, and therefore src1, unchanged.
|
1926
|
+
if (src1.is(rcx) || src2.is(rcx)) {
|
1927
|
+
movq(kScratchRegister, rcx);
|
1928
|
+
}
|
1929
|
+
if (!dst.is(src1)) {
|
1930
|
+
movq(dst, src1);
|
1931
|
+
}
|
1932
|
+
SmiToInteger32(rcx, src2);
|
1933
|
+
orl(rcx, Immediate(kSmiShift));
|
1934
|
+
shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
|
1935
|
+
shl(dst, Immediate(kSmiShift));
|
1936
|
+
testq(dst, dst);
|
1937
|
+
if (src1.is(rcx) || src2.is(rcx)) {
|
1938
|
+
Label positive_result;
|
1939
|
+
j(positive, &positive_result, Label::kNear);
|
1940
|
+
if (src1.is(rcx)) {
|
1941
|
+
movq(src1, kScratchRegister);
|
1942
|
+
} else {
|
1943
|
+
movq(src2, kScratchRegister);
|
1944
|
+
}
|
1945
|
+
jmp(on_not_smi_result, near_jump);
|
1946
|
+
bind(&positive_result);
|
1947
|
+
} else {
|
1948
|
+
// src2 was zero and src1 negative.
|
1949
|
+
j(negative, on_not_smi_result, near_jump);
|
1950
|
+
}
|
1951
|
+
}
|
1952
|
+
|
1953
|
+
|
1954
|
+
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
|
1955
|
+
Register src1,
|
1956
|
+
Register src2) {
|
1957
|
+
ASSERT(!dst.is(kScratchRegister));
|
1958
|
+
ASSERT(!src1.is(kScratchRegister));
|
1959
|
+
ASSERT(!src2.is(kScratchRegister));
|
1960
|
+
ASSERT(!dst.is(rcx));
|
1961
|
+
if (src1.is(rcx)) {
|
1962
|
+
movq(kScratchRegister, src1);
|
1963
|
+
} else if (src2.is(rcx)) {
|
1964
|
+
movq(kScratchRegister, src2);
|
1965
|
+
}
|
1966
|
+
if (!dst.is(src1)) {
|
1967
|
+
movq(dst, src1);
|
1968
|
+
}
|
1969
|
+
SmiToInteger32(rcx, src2);
|
1970
|
+
orl(rcx, Immediate(kSmiShift));
|
1971
|
+
sar_cl(dst); // Shift 32 + original rcx & 0x1f.
|
1972
|
+
shl(dst, Immediate(kSmiShift));
|
1973
|
+
if (src1.is(rcx)) {
|
1974
|
+
movq(src1, kScratchRegister);
|
1975
|
+
} else if (src2.is(rcx)) {
|
1976
|
+
movq(src2, kScratchRegister);
|
1977
|
+
}
|
1978
|
+
}
|
1979
|
+
|
1980
|
+
|
1981
|
+
void MacroAssembler::SelectNonSmi(Register dst,
|
1982
|
+
Register src1,
|
1983
|
+
Register src2,
|
1984
|
+
Label* on_not_smis,
|
1985
|
+
Label::Distance near_jump) {
|
1986
|
+
ASSERT(!dst.is(kScratchRegister));
|
1987
|
+
ASSERT(!src1.is(kScratchRegister));
|
1988
|
+
ASSERT(!src2.is(kScratchRegister));
|
1989
|
+
ASSERT(!dst.is(src1));
|
1990
|
+
ASSERT(!dst.is(src2));
|
1991
|
+
// Both operands must not be smis.
|
1992
|
+
#ifdef DEBUG
|
1993
|
+
if (allow_stub_calls()) { // Check contains a stub call.
|
1994
|
+
Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
|
1995
|
+
Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
|
1996
|
+
}
|
1997
|
+
#endif
|
1998
|
+
ASSERT_EQ(0, kSmiTag);
|
1999
|
+
ASSERT_EQ(0, Smi::FromInt(0));
|
2000
|
+
movl(kScratchRegister, Immediate(kSmiTagMask));
|
2001
|
+
and_(kScratchRegister, src1);
|
2002
|
+
testl(kScratchRegister, src2);
|
2003
|
+
// If non-zero then both are smis.
|
2004
|
+
j(not_zero, on_not_smis, near_jump);
|
2005
|
+
|
2006
|
+
// Exactly one operand is a smi.
|
2007
|
+
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
|
2008
|
+
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
|
2009
|
+
subq(kScratchRegister, Immediate(1));
|
2010
|
+
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
|
2011
|
+
movq(dst, src1);
|
2012
|
+
xor_(dst, src2);
|
2013
|
+
and_(dst, kScratchRegister);
|
2014
|
+
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
|
2015
|
+
xor_(dst, src1);
|
2016
|
+
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
|
2017
|
+
}
|
2018
|
+
|
2019
|
+
|
2020
|
+
SmiIndex MacroAssembler::SmiToIndex(Register dst,
|
2021
|
+
Register src,
|
2022
|
+
int shift) {
|
2023
|
+
ASSERT(is_uint6(shift));
|
2024
|
+
// There is a possible optimization if shift is in the range 60-63, but that
|
2025
|
+
// will (and must) never happen.
|
2026
|
+
if (!dst.is(src)) {
|
2027
|
+
movq(dst, src);
|
2028
|
+
}
|
2029
|
+
if (shift < kSmiShift) {
|
2030
|
+
sar(dst, Immediate(kSmiShift - shift));
|
2031
|
+
} else {
|
2032
|
+
shl(dst, Immediate(shift - kSmiShift));
|
2033
|
+
}
|
2034
|
+
return SmiIndex(dst, times_1);
|
2035
|
+
}
|
2036
|
+
|
2037
|
+
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
|
2038
|
+
Register src,
|
2039
|
+
int shift) {
|
2040
|
+
// Register src holds a positive smi.
|
2041
|
+
ASSERT(is_uint6(shift));
|
2042
|
+
if (!dst.is(src)) {
|
2043
|
+
movq(dst, src);
|
2044
|
+
}
|
2045
|
+
neg(dst);
|
2046
|
+
if (shift < kSmiShift) {
|
2047
|
+
sar(dst, Immediate(kSmiShift - shift));
|
2048
|
+
} else {
|
2049
|
+
shl(dst, Immediate(shift - kSmiShift));
|
2050
|
+
}
|
2051
|
+
return SmiIndex(dst, times_1);
|
2052
|
+
}
|
2053
|
+
|
2054
|
+
|
2055
|
+
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
|
2056
|
+
ASSERT_EQ(0, kSmiShift % kBitsPerByte);
|
2057
|
+
addl(dst, Operand(src, kSmiShift / kBitsPerByte));
|
2058
|
+
}
|
2059
|
+
|
2060
|
+
|
2061
|
+
void MacroAssembler::JumpIfNotString(Register object,
|
2062
|
+
Register object_map,
|
2063
|
+
Label* not_string,
|
2064
|
+
Label::Distance near_jump) {
|
2065
|
+
Condition is_smi = CheckSmi(object);
|
2066
|
+
j(is_smi, not_string, near_jump);
|
2067
|
+
CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
|
2068
|
+
j(above_equal, not_string, near_jump);
|
2069
|
+
}
|
2070
|
+
|
2071
|
+
|
2072
|
+
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
|
2073
|
+
Register first_object,
|
2074
|
+
Register second_object,
|
2075
|
+
Register scratch1,
|
2076
|
+
Register scratch2,
|
2077
|
+
Label* on_fail,
|
2078
|
+
Label::Distance near_jump) {
|
2079
|
+
// Check that both objects are not smis.
|
2080
|
+
Condition either_smi = CheckEitherSmi(first_object, second_object);
|
2081
|
+
j(either_smi, on_fail, near_jump);
|
2082
|
+
|
2083
|
+
// Load instance type for both strings.
|
2084
|
+
movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
|
2085
|
+
movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
|
2086
|
+
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
|
2087
|
+
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
|
2088
|
+
|
2089
|
+
// Check that both are flat ascii strings.
|
2090
|
+
ASSERT(kNotStringTag != 0);
|
2091
|
+
const int kFlatAsciiStringMask =
|
2092
|
+
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
|
2093
|
+
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
|
2094
|
+
|
2095
|
+
andl(scratch1, Immediate(kFlatAsciiStringMask));
|
2096
|
+
andl(scratch2, Immediate(kFlatAsciiStringMask));
|
2097
|
+
// Interleave the bits to check both scratch1 and scratch2 in one test.
|
2098
|
+
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
|
2099
|
+
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
|
2100
|
+
cmpl(scratch1,
|
2101
|
+
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
|
2102
|
+
j(not_equal, on_fail, near_jump);
|
2103
|
+
}
|
2104
|
+
|
2105
|
+
|
2106
|
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
|
2107
|
+
Register instance_type,
|
2108
|
+
Register scratch,
|
2109
|
+
Label* failure,
|
2110
|
+
Label::Distance near_jump) {
|
2111
|
+
if (!scratch.is(instance_type)) {
|
2112
|
+
movl(scratch, instance_type);
|
2113
|
+
}
|
2114
|
+
|
2115
|
+
const int kFlatAsciiStringMask =
|
2116
|
+
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
|
2117
|
+
|
2118
|
+
andl(scratch, Immediate(kFlatAsciiStringMask));
|
2119
|
+
cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
|
2120
|
+
j(not_equal, failure, near_jump);
|
2121
|
+
}
|
2122
|
+
|
2123
|
+
|
2124
|
+
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
|
2125
|
+
Register first_object_instance_type,
|
2126
|
+
Register second_object_instance_type,
|
2127
|
+
Register scratch1,
|
2128
|
+
Register scratch2,
|
2129
|
+
Label* on_fail,
|
2130
|
+
Label::Distance near_jump) {
|
2131
|
+
// Load instance type for both strings.
|
2132
|
+
movq(scratch1, first_object_instance_type);
|
2133
|
+
movq(scratch2, second_object_instance_type);
|
2134
|
+
|
2135
|
+
// Check that both are flat ascii strings.
|
2136
|
+
ASSERT(kNotStringTag != 0);
|
2137
|
+
const int kFlatAsciiStringMask =
|
2138
|
+
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
|
2139
|
+
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
|
2140
|
+
|
2141
|
+
andl(scratch1, Immediate(kFlatAsciiStringMask));
|
2142
|
+
andl(scratch2, Immediate(kFlatAsciiStringMask));
|
2143
|
+
// Interleave the bits to check both scratch1 and scratch2 in one test.
|
2144
|
+
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
|
2145
|
+
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
|
2146
|
+
cmpl(scratch1,
|
2147
|
+
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
|
2148
|
+
j(not_equal, on_fail, near_jump);
|
2149
|
+
}
|
2150
|
+
|
2151
|
+
|
2152
|
+
|
2153
|
+
void MacroAssembler::Move(Register dst, Register src) {
|
2154
|
+
if (!dst.is(src)) {
|
2155
|
+
movq(dst, src);
|
2156
|
+
}
|
2157
|
+
}
|
2158
|
+
|
2159
|
+
|
2160
|
+
void MacroAssembler::Move(Register dst, Handle<Object> source) {
|
2161
|
+
ASSERT(!source->IsFailure());
|
2162
|
+
if (source->IsSmi()) {
|
2163
|
+
Move(dst, Smi::cast(*source));
|
2164
|
+
} else {
|
2165
|
+
movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
|
2166
|
+
}
|
2167
|
+
}
|
2168
|
+
|
2169
|
+
|
2170
|
+
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
|
2171
|
+
ASSERT(!source->IsFailure());
|
2172
|
+
if (source->IsSmi()) {
|
2173
|
+
Move(dst, Smi::cast(*source));
|
2174
|
+
} else {
|
2175
|
+
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
|
2176
|
+
movq(dst, kScratchRegister);
|
2177
|
+
}
|
2178
|
+
}
|
2179
|
+
|
2180
|
+
|
2181
|
+
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
|
2182
|
+
if (source->IsSmi()) {
|
2183
|
+
Cmp(dst, Smi::cast(*source));
|
2184
|
+
} else {
|
2185
|
+
Move(kScratchRegister, source);
|
2186
|
+
cmpq(dst, kScratchRegister);
|
2187
|
+
}
|
2188
|
+
}
|
2189
|
+
|
2190
|
+
|
2191
|
+
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
|
2192
|
+
if (source->IsSmi()) {
|
2193
|
+
Cmp(dst, Smi::cast(*source));
|
2194
|
+
} else {
|
2195
|
+
ASSERT(source->IsHeapObject());
|
2196
|
+
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
|
2197
|
+
cmpq(dst, kScratchRegister);
|
2198
|
+
}
|
2199
|
+
}
|
2200
|
+
|
2201
|
+
|
2202
|
+
void MacroAssembler::Push(Handle<Object> source) {
|
2203
|
+
if (source->IsSmi()) {
|
2204
|
+
Push(Smi::cast(*source));
|
2205
|
+
} else {
|
2206
|
+
ASSERT(source->IsHeapObject());
|
2207
|
+
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
|
2208
|
+
push(kScratchRegister);
|
2209
|
+
}
|
2210
|
+
}
|
2211
|
+
|
2212
|
+
|
2213
|
+
void MacroAssembler::Push(Smi* source) {
|
2214
|
+
intptr_t smi = reinterpret_cast<intptr_t>(source);
|
2215
|
+
if (is_int32(smi)) {
|
2216
|
+
push(Immediate(static_cast<int32_t>(smi)));
|
2217
|
+
} else {
|
2218
|
+
Register constant = GetSmiConstant(source);
|
2219
|
+
push(constant);
|
2220
|
+
}
|
2221
|
+
}
|
2222
|
+
|
2223
|
+
|
2224
|
+
void MacroAssembler::Drop(int stack_elements) {
|
2225
|
+
if (stack_elements > 0) {
|
2226
|
+
addq(rsp, Immediate(stack_elements * kPointerSize));
|
2227
|
+
}
|
2228
|
+
}
|
2229
|
+
|
2230
|
+
|
2231
|
+
void MacroAssembler::Test(const Operand& src, Smi* source) {
|
2232
|
+
testl(Operand(src, kIntSize), Immediate(source->value()));
|
2233
|
+
}
|
2234
|
+
|
2235
|
+
|
2236
|
+
void MacroAssembler::Jump(ExternalReference ext) {
|
2237
|
+
LoadAddress(kScratchRegister, ext);
|
2238
|
+
jmp(kScratchRegister);
|
2239
|
+
}
|
2240
|
+
|
2241
|
+
|
2242
|
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
|
2243
|
+
movq(kScratchRegister, destination, rmode);
|
2244
|
+
jmp(kScratchRegister);
|
2245
|
+
}
|
2246
|
+
|
2247
|
+
|
2248
|
+
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
2249
|
+
// TODO(X64): Inline this
|
2250
|
+
jmp(code_object, rmode);
|
2251
|
+
}
|
2252
|
+
|
2253
|
+
|
2254
|
+
int MacroAssembler::CallSize(ExternalReference ext) {
|
2255
|
+
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
|
2256
|
+
const int kCallInstructionSize = 3;
|
2257
|
+
return LoadAddressSize(ext) + kCallInstructionSize;
|
2258
|
+
}
|
2259
|
+
|
2260
|
+
|
2261
|
+
void MacroAssembler::Call(ExternalReference ext) {
|
2262
|
+
#ifdef DEBUG
|
2263
|
+
int end_position = pc_offset() + CallSize(ext);
|
2264
|
+
#endif
|
2265
|
+
LoadAddress(kScratchRegister, ext);
|
2266
|
+
call(kScratchRegister);
|
2267
|
+
#ifdef DEBUG
|
2268
|
+
CHECK_EQ(end_position, pc_offset());
|
2269
|
+
#endif
|
2270
|
+
}
|
2271
|
+
|
2272
|
+
|
2273
|
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
|
2274
|
+
#ifdef DEBUG
|
2275
|
+
int end_position = pc_offset() + CallSize(destination, rmode);
|
2276
|
+
#endif
|
2277
|
+
movq(kScratchRegister, destination, rmode);
|
2278
|
+
call(kScratchRegister);
|
2279
|
+
#ifdef DEBUG
|
2280
|
+
CHECK_EQ(pc_offset(), end_position);
|
2281
|
+
#endif
|
2282
|
+
}
|
2283
|
+
|
2284
|
+
|
2285
|
+
void MacroAssembler::Call(Handle<Code> code_object,
|
2286
|
+
RelocInfo::Mode rmode,
|
2287
|
+
unsigned ast_id) {
|
2288
|
+
#ifdef DEBUG
|
2289
|
+
int end_position = pc_offset() + CallSize(code_object);
|
2290
|
+
#endif
|
2291
|
+
ASSERT(RelocInfo::IsCodeTarget(rmode));
|
2292
|
+
call(code_object, rmode, ast_id);
|
2293
|
+
#ifdef DEBUG
|
2294
|
+
CHECK_EQ(end_position, pc_offset());
|
2295
|
+
#endif
|
2296
|
+
}
|
2297
|
+
|
2298
|
+
|
2299
|
+
void MacroAssembler::Pushad() {
|
2300
|
+
push(rax);
|
2301
|
+
push(rcx);
|
2302
|
+
push(rdx);
|
2303
|
+
push(rbx);
|
2304
|
+
// Not pushing rsp or rbp.
|
2305
|
+
push(rsi);
|
2306
|
+
push(rdi);
|
2307
|
+
push(r8);
|
2308
|
+
push(r9);
|
2309
|
+
// r10 is kScratchRegister.
|
2310
|
+
push(r11);
|
2311
|
+
// r12 is kSmiConstantRegister.
|
2312
|
+
// r13 is kRootRegister.
|
2313
|
+
push(r14);
|
2314
|
+
push(r15);
|
2315
|
+
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
|
2316
|
+
// Use lea for symmetry with Popad.
|
2317
|
+
int sp_delta =
|
2318
|
+
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
|
2319
|
+
lea(rsp, Operand(rsp, -sp_delta));
|
2320
|
+
}
|
2321
|
+
|
2322
|
+
|
2323
|
+
void MacroAssembler::Popad() {
|
2324
|
+
// Popad must not change the flags, so use lea instead of addq.
|
2325
|
+
int sp_delta =
|
2326
|
+
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
|
2327
|
+
lea(rsp, Operand(rsp, sp_delta));
|
2328
|
+
pop(r15);
|
2329
|
+
pop(r14);
|
2330
|
+
pop(r11);
|
2331
|
+
pop(r9);
|
2332
|
+
pop(r8);
|
2333
|
+
pop(rdi);
|
2334
|
+
pop(rsi);
|
2335
|
+
pop(rbx);
|
2336
|
+
pop(rdx);
|
2337
|
+
pop(rcx);
|
2338
|
+
pop(rax);
|
2339
|
+
}
|
2340
|
+
|
2341
|
+
|
2342
|
+
void MacroAssembler::Dropad() {
|
2343
|
+
addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
|
2344
|
+
}
|
2345
|
+
|
2346
|
+
|
2347
|
+
// Order general registers are pushed by Pushad:
|
2348
|
+
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
|
2349
|
+
int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
|
2350
|
+
0,
|
2351
|
+
1,
|
2352
|
+
2,
|
2353
|
+
3,
|
2354
|
+
-1,
|
2355
|
+
-1,
|
2356
|
+
4,
|
2357
|
+
5,
|
2358
|
+
6,
|
2359
|
+
7,
|
2360
|
+
-1,
|
2361
|
+
8,
|
2362
|
+
-1,
|
2363
|
+
-1,
|
2364
|
+
9,
|
2365
|
+
10
|
2366
|
+
};
|
2367
|
+
|
2368
|
+
|
2369
|
+
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
|
2370
|
+
movq(SafepointRegisterSlot(dst), src);
|
2371
|
+
}
|
2372
|
+
|
2373
|
+
|
2374
|
+
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
|
2375
|
+
movq(dst, SafepointRegisterSlot(src));
|
2376
|
+
}
|
2377
|
+
|
2378
|
+
|
2379
|
+
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
|
2380
|
+
return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
|
2381
|
+
}
|
2382
|
+
|
2383
|
+
|
2384
|
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
|
2385
|
+
HandlerType type) {
|
2386
|
+
// Adjust this code if not the case.
|
2387
|
+
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
|
2388
|
+
|
2389
|
+
// The pc (return address) is already on TOS. This code pushes state,
|
2390
|
+
// frame pointer and current handler. Check that they are expected
|
2391
|
+
// next on the stack, in that order.
|
2392
|
+
ASSERT_EQ(StackHandlerConstants::kStateOffset,
|
2393
|
+
StackHandlerConstants::kPCOffset - kPointerSize);
|
2394
|
+
ASSERT_EQ(StackHandlerConstants::kFPOffset,
|
2395
|
+
StackHandlerConstants::kStateOffset - kPointerSize);
|
2396
|
+
ASSERT_EQ(StackHandlerConstants::kNextOffset,
|
2397
|
+
StackHandlerConstants::kFPOffset - kPointerSize);
|
2398
|
+
|
2399
|
+
if (try_location == IN_JAVASCRIPT) {
|
2400
|
+
if (type == TRY_CATCH_HANDLER) {
|
2401
|
+
push(Immediate(StackHandler::TRY_CATCH));
|
2402
|
+
} else {
|
2403
|
+
push(Immediate(StackHandler::TRY_FINALLY));
|
2404
|
+
}
|
2405
|
+
push(rbp);
|
2406
|
+
} else {
|
2407
|
+
ASSERT(try_location == IN_JS_ENTRY);
|
2408
|
+
// The frame pointer does not point to a JS frame so we save NULL
|
2409
|
+
// for rbp. We expect the code throwing an exception to check rbp
|
2410
|
+
// before dereferencing it to restore the context.
|
2411
|
+
push(Immediate(StackHandler::ENTRY));
|
2412
|
+
push(Immediate(0)); // NULL frame pointer.
|
2413
|
+
}
|
2414
|
+
// Save the current handler.
|
2415
|
+
Operand handler_operand =
|
2416
|
+
ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
|
2417
|
+
push(handler_operand);
|
2418
|
+
// Link this handler.
|
2419
|
+
movq(handler_operand, rsp);
|
2420
|
+
}
|
2421
|
+
|
2422
|
+
|
2423
|
+
void MacroAssembler::PopTryHandler() {
|
2424
|
+
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
|
2425
|
+
// Unlink this handler.
|
2426
|
+
Operand handler_operand =
|
2427
|
+
ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
|
2428
|
+
pop(handler_operand);
|
2429
|
+
// Remove the remaining fields.
|
2430
|
+
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
|
2431
|
+
}
|
2432
|
+
|
2433
|
+
|
2434
|
+
void MacroAssembler::Throw(Register value) {
|
2435
|
+
// Check that stack should contain next handler, frame pointer, state and
|
2436
|
+
// return address in that order.
|
2437
|
+
STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
|
2438
|
+
StackHandlerConstants::kStateOffset);
|
2439
|
+
STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
|
2440
|
+
StackHandlerConstants::kPCOffset);
|
2441
|
+
// Keep thrown value in rax.
|
2442
|
+
if (!value.is(rax)) {
|
2443
|
+
movq(rax, value);
|
2444
|
+
}
|
2445
|
+
|
2446
|
+
ExternalReference handler_address(Isolate::k_handler_address, isolate());
|
2447
|
+
Operand handler_operand = ExternalOperand(handler_address);
|
2448
|
+
movq(rsp, handler_operand);
|
2449
|
+
// get next in chain
|
2450
|
+
pop(handler_operand);
|
2451
|
+
pop(rbp); // pop frame pointer
|
2452
|
+
pop(rdx); // remove state
|
2453
|
+
|
2454
|
+
// Before returning we restore the context from the frame pointer if not NULL.
|
2455
|
+
// The frame pointer is NULL in the exception handler of a JS entry frame.
|
2456
|
+
Set(rsi, 0); // Tentatively set context pointer to NULL
|
2457
|
+
Label skip;
|
2458
|
+
cmpq(rbp, Immediate(0));
|
2459
|
+
j(equal, &skip, Label::kNear);
|
2460
|
+
movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
2461
|
+
bind(&skip);
|
2462
|
+
ret(0);
|
2463
|
+
}
|
2464
|
+
|
2465
|
+
|
2466
|
+
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
|
2467
|
+
Register value) {
|
2468
|
+
// Keep thrown value in rax.
|
2469
|
+
if (!value.is(rax)) {
|
2470
|
+
movq(rax, value);
|
2471
|
+
}
|
2472
|
+
// Fetch top stack handler.
|
2473
|
+
ExternalReference handler_address(Isolate::k_handler_address, isolate());
|
2474
|
+
Load(rsp, handler_address);
|
2475
|
+
|
2476
|
+
// Unwind the handlers until the ENTRY handler is found.
|
2477
|
+
Label loop, done;
|
2478
|
+
bind(&loop);
|
2479
|
+
// Load the type of the current stack handler.
|
2480
|
+
const int kStateOffset = StackHandlerConstants::kStateOffset;
|
2481
|
+
cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
|
2482
|
+
j(equal, &done, Label::kNear);
|
2483
|
+
// Fetch the next handler in the list.
|
2484
|
+
const int kNextOffset = StackHandlerConstants::kNextOffset;
|
2485
|
+
movq(rsp, Operand(rsp, kNextOffset));
|
2486
|
+
jmp(&loop);
|
2487
|
+
bind(&done);
|
2488
|
+
|
2489
|
+
// Set the top handler address to next handler past the current ENTRY handler.
|
2490
|
+
Operand handler_operand = ExternalOperand(handler_address);
|
2491
|
+
pop(handler_operand);
|
2492
|
+
|
2493
|
+
if (type == OUT_OF_MEMORY) {
|
2494
|
+
// Set external caught exception to false.
|
2495
|
+
ExternalReference external_caught(
|
2496
|
+
Isolate::k_external_caught_exception_address, isolate());
|
2497
|
+
Set(rax, static_cast<int64_t>(false));
|
2498
|
+
Store(external_caught, rax);
|
2499
|
+
|
2500
|
+
// Set pending exception and rax to out of memory exception.
|
2501
|
+
ExternalReference pending_exception(Isolate::k_pending_exception_address,
|
2502
|
+
isolate());
|
2503
|
+
movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
|
2504
|
+
Store(pending_exception, rax);
|
2505
|
+
}
|
2506
|
+
|
2507
|
+
// Clear the context pointer.
|
2508
|
+
Set(rsi, 0);
|
2509
|
+
|
2510
|
+
// Restore registers from handler.
|
2511
|
+
STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
|
2512
|
+
StackHandlerConstants::kFPOffset);
|
2513
|
+
pop(rbp); // FP
|
2514
|
+
STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
|
2515
|
+
StackHandlerConstants::kStateOffset);
|
2516
|
+
pop(rdx); // State
|
2517
|
+
|
2518
|
+
STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
|
2519
|
+
StackHandlerConstants::kPCOffset);
|
2520
|
+
ret(0);
|
2521
|
+
}
|
2522
|
+
|
2523
|
+
|
2524
|
+
void MacroAssembler::Ret() {
|
2525
|
+
ret(0);
|
2526
|
+
}
|
2527
|
+
|
2528
|
+
|
2529
|
+
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
|
2530
|
+
if (is_uint16(bytes_dropped)) {
|
2531
|
+
ret(bytes_dropped);
|
2532
|
+
} else {
|
2533
|
+
pop(scratch);
|
2534
|
+
addq(rsp, Immediate(bytes_dropped));
|
2535
|
+
push(scratch);
|
2536
|
+
ret(0);
|
2537
|
+
}
|
2538
|
+
}
|
2539
|
+
|
2540
|
+
|
2541
|
+
void MacroAssembler::FCmp() {
|
2542
|
+
fucomip();
|
2543
|
+
fstp(0);
|
2544
|
+
}
|
2545
|
+
|
2546
|
+
|
2547
|
+
void MacroAssembler::CmpObjectType(Register heap_object,
|
2548
|
+
InstanceType type,
|
2549
|
+
Register map) {
|
2550
|
+
movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
|
2551
|
+
CmpInstanceType(map, type);
|
2552
|
+
}
|
2553
|
+
|
2554
|
+
|
2555
|
+
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
|
2556
|
+
cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
|
2557
|
+
Immediate(static_cast<int8_t>(type)));
|
2558
|
+
}
|
2559
|
+
|
2560
|
+
|
2561
|
+
void MacroAssembler::CheckMap(Register obj,
|
2562
|
+
Handle<Map> map,
|
2563
|
+
Label* fail,
|
2564
|
+
SmiCheckType smi_check_type) {
|
2565
|
+
if (smi_check_type == DO_SMI_CHECK) {
|
2566
|
+
JumpIfSmi(obj, fail);
|
2567
|
+
}
|
2568
|
+
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
|
2569
|
+
j(not_equal, fail);
|
2570
|
+
}
|
2571
|
+
|
2572
|
+
|
2573
|
+
void MacroAssembler::ClampUint8(Register reg) {
|
2574
|
+
Label done;
|
2575
|
+
testl(reg, Immediate(0xFFFFFF00));
|
2576
|
+
j(zero, &done, Label::kNear);
|
2577
|
+
setcc(negative, reg); // 1 if negative, 0 if positive.
|
2578
|
+
decb(reg); // 0 if negative, 255 if positive.
|
2579
|
+
bind(&done);
|
2580
|
+
}
|
2581
|
+
|
2582
|
+
|
2583
|
+
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
|
2584
|
+
XMMRegister temp_xmm_reg,
|
2585
|
+
Register result_reg,
|
2586
|
+
Register temp_reg) {
|
2587
|
+
Label done;
|
2588
|
+
Set(result_reg, 0);
|
2589
|
+
xorps(temp_xmm_reg, temp_xmm_reg);
|
2590
|
+
ucomisd(input_reg, temp_xmm_reg);
|
2591
|
+
j(below, &done, Label::kNear);
|
2592
|
+
uint64_t one_half = BitCast<uint64_t, double>(0.5);
|
2593
|
+
Set(temp_reg, one_half);
|
2594
|
+
movq(temp_xmm_reg, temp_reg);
|
2595
|
+
addsd(temp_xmm_reg, input_reg);
|
2596
|
+
cvttsd2si(result_reg, temp_xmm_reg);
|
2597
|
+
testl(result_reg, Immediate(0xFFFFFF00));
|
2598
|
+
j(zero, &done, Label::kNear);
|
2599
|
+
Set(result_reg, 255);
|
2600
|
+
bind(&done);
|
2601
|
+
}
|
2602
|
+
|
2603
|
+
|
2604
|
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
|
2605
|
+
Register descriptors) {
|
2606
|
+
movq(descriptors, FieldOperand(map,
|
2607
|
+
Map::kInstanceDescriptorsOrBitField3Offset));
|
2608
|
+
Label not_smi;
|
2609
|
+
JumpIfNotSmi(descriptors, ¬_smi, Label::kNear);
|
2610
|
+
Move(descriptors, isolate()->factory()->empty_descriptor_array());
|
2611
|
+
bind(¬_smi);
|
2612
|
+
}
|
2613
|
+
|
2614
|
+
|
2615
|
+
void MacroAssembler::DispatchMap(Register obj,
|
2616
|
+
Handle<Map> map,
|
2617
|
+
Handle<Code> success,
|
2618
|
+
SmiCheckType smi_check_type) {
|
2619
|
+
Label fail;
|
2620
|
+
if (smi_check_type == DO_SMI_CHECK) {
|
2621
|
+
JumpIfSmi(obj, &fail);
|
2622
|
+
}
|
2623
|
+
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
|
2624
|
+
j(equal, success, RelocInfo::CODE_TARGET);
|
2625
|
+
|
2626
|
+
bind(&fail);
|
2627
|
+
}
|
2628
|
+
|
2629
|
+
|
2630
|
+
void MacroAssembler::AbortIfNotNumber(Register object) {
|
2631
|
+
Label ok;
|
2632
|
+
Condition is_smi = CheckSmi(object);
|
2633
|
+
j(is_smi, &ok, Label::kNear);
|
2634
|
+
Cmp(FieldOperand(object, HeapObject::kMapOffset),
|
2635
|
+
isolate()->factory()->heap_number_map());
|
2636
|
+
Assert(equal, "Operand not a number");
|
2637
|
+
bind(&ok);
|
2638
|
+
}
|
2639
|
+
|
2640
|
+
|
2641
|
+
void MacroAssembler::AbortIfSmi(Register object) {
|
2642
|
+
Condition is_smi = CheckSmi(object);
|
2643
|
+
Assert(NegateCondition(is_smi), "Operand is a smi");
|
2644
|
+
}
|
2645
|
+
|
2646
|
+
|
2647
|
+
void MacroAssembler::AbortIfNotSmi(Register object) {
|
2648
|
+
Condition is_smi = CheckSmi(object);
|
2649
|
+
Assert(is_smi, "Operand is not a smi");
|
2650
|
+
}
|
2651
|
+
|
2652
|
+
|
2653
|
+
void MacroAssembler::AbortIfNotSmi(const Operand& object) {
|
2654
|
+
Condition is_smi = CheckSmi(object);
|
2655
|
+
Assert(is_smi, "Operand is not a smi");
|
2656
|
+
}
|
2657
|
+
|
2658
|
+
|
2659
|
+
void MacroAssembler::AbortIfNotString(Register object) {
|
2660
|
+
testb(object, Immediate(kSmiTagMask));
|
2661
|
+
Assert(not_equal, "Operand is not a string");
|
2662
|
+
push(object);
|
2663
|
+
movq(object, FieldOperand(object, HeapObject::kMapOffset));
|
2664
|
+
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
|
2665
|
+
pop(object);
|
2666
|
+
Assert(below, "Operand is not a string");
|
2667
|
+
}
|
2668
|
+
|
2669
|
+
|
2670
|
+
void MacroAssembler::AbortIfNotRootValue(Register src,
|
2671
|
+
Heap::RootListIndex root_value_index,
|
2672
|
+
const char* message) {
|
2673
|
+
ASSERT(!src.is(kScratchRegister));
|
2674
|
+
LoadRoot(kScratchRegister, root_value_index);
|
2675
|
+
cmpq(src, kScratchRegister);
|
2676
|
+
Check(equal, message);
|
2677
|
+
}
|
2678
|
+
|
2679
|
+
|
2680
|
+
|
2681
|
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
|
2682
|
+
Register map,
|
2683
|
+
Register instance_type) {
|
2684
|
+
movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
|
2685
|
+
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
|
2686
|
+
ASSERT(kNotStringTag != 0);
|
2687
|
+
testb(instance_type, Immediate(kIsNotStringMask));
|
2688
|
+
return zero;
|
2689
|
+
}
|
2690
|
+
|
2691
|
+
|
2692
|
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
|
2693
|
+
Register result,
|
2694
|
+
Label* miss) {
|
2695
|
+
// Check that the receiver isn't a smi.
|
2696
|
+
testl(function, Immediate(kSmiTagMask));
|
2697
|
+
j(zero, miss);
|
2698
|
+
|
2699
|
+
// Check that the function really is a function.
|
2700
|
+
CmpObjectType(function, JS_FUNCTION_TYPE, result);
|
2701
|
+
j(not_equal, miss);
|
2702
|
+
|
2703
|
+
// Make sure that the function has an instance prototype.
|
2704
|
+
Label non_instance;
|
2705
|
+
testb(FieldOperand(result, Map::kBitFieldOffset),
|
2706
|
+
Immediate(1 << Map::kHasNonInstancePrototype));
|
2707
|
+
j(not_zero, &non_instance, Label::kNear);
|
2708
|
+
|
2709
|
+
// Get the prototype or initial map from the function.
|
2710
|
+
movq(result,
|
2711
|
+
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
2712
|
+
|
2713
|
+
// If the prototype or initial map is the hole, don't return it and
|
2714
|
+
// simply miss the cache instead. This will allow us to allocate a
|
2715
|
+
// prototype object on-demand in the runtime system.
|
2716
|
+
CompareRoot(result, Heap::kTheHoleValueRootIndex);
|
2717
|
+
j(equal, miss);
|
2718
|
+
|
2719
|
+
// If the function does not have an initial map, we're done.
|
2720
|
+
Label done;
|
2721
|
+
CmpObjectType(result, MAP_TYPE, kScratchRegister);
|
2722
|
+
j(not_equal, &done, Label::kNear);
|
2723
|
+
|
2724
|
+
// Get the prototype from the initial map.
|
2725
|
+
movq(result, FieldOperand(result, Map::kPrototypeOffset));
|
2726
|
+
jmp(&done, Label::kNear);
|
2727
|
+
|
2728
|
+
// Non-instance prototype: Fetch prototype from constructor field
|
2729
|
+
// in initial map.
|
2730
|
+
bind(&non_instance);
|
2731
|
+
movq(result, FieldOperand(result, Map::kConstructorOffset));
|
2732
|
+
|
2733
|
+
// All done.
|
2734
|
+
bind(&done);
|
2735
|
+
}
|
2736
|
+
|
2737
|
+
|
2738
|
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
|
2739
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
2740
|
+
Operand counter_operand = ExternalOperand(ExternalReference(counter));
|
2741
|
+
movl(counter_operand, Immediate(value));
|
2742
|
+
}
|
2743
|
+
}
|
2744
|
+
|
2745
|
+
|
2746
|
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
|
2747
|
+
ASSERT(value > 0);
|
2748
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
2749
|
+
Operand counter_operand = ExternalOperand(ExternalReference(counter));
|
2750
|
+
if (value == 1) {
|
2751
|
+
incl(counter_operand);
|
2752
|
+
} else {
|
2753
|
+
addl(counter_operand, Immediate(value));
|
2754
|
+
}
|
2755
|
+
}
|
2756
|
+
}
|
2757
|
+
|
2758
|
+
|
2759
|
+
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
|
2760
|
+
ASSERT(value > 0);
|
2761
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
2762
|
+
Operand counter_operand = ExternalOperand(ExternalReference(counter));
|
2763
|
+
if (value == 1) {
|
2764
|
+
decl(counter_operand);
|
2765
|
+
} else {
|
2766
|
+
subl(counter_operand, Immediate(value));
|
2767
|
+
}
|
2768
|
+
}
|
2769
|
+
}
|
2770
|
+
|
2771
|
+
|
2772
|
+
#ifdef ENABLE_DEBUGGER_SUPPORT
|
2773
|
+
void MacroAssembler::DebugBreak() {
|
2774
|
+
ASSERT(allow_stub_calls());
|
2775
|
+
Set(rax, 0); // No arguments.
|
2776
|
+
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
|
2777
|
+
CEntryStub ces(1);
|
2778
|
+
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
|
2779
|
+
}
|
2780
|
+
#endif // ENABLE_DEBUGGER_SUPPORT
|
2781
|
+
|
2782
|
+
|
2783
|
+
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
|
2784
|
+
// This macro takes the dst register to make the code more readable
|
2785
|
+
// at the call sites. However, the dst register has to be rcx to
|
2786
|
+
// follow the calling convention which requires the call type to be
|
2787
|
+
// in rcx.
|
2788
|
+
ASSERT(dst.is(rcx));
|
2789
|
+
if (call_kind == CALL_AS_FUNCTION) {
|
2790
|
+
LoadSmiConstant(dst, Smi::FromInt(1));
|
2791
|
+
} else {
|
2792
|
+
LoadSmiConstant(dst, Smi::FromInt(0));
|
2793
|
+
}
|
2794
|
+
}
|
2795
|
+
|
2796
|
+
|
2797
|
+
void MacroAssembler::InvokeCode(Register code,
|
2798
|
+
const ParameterCount& expected,
|
2799
|
+
const ParameterCount& actual,
|
2800
|
+
InvokeFlag flag,
|
2801
|
+
const CallWrapper& call_wrapper,
|
2802
|
+
CallKind call_kind) {
|
2803
|
+
Label done;
|
2804
|
+
InvokePrologue(expected,
|
2805
|
+
actual,
|
2806
|
+
Handle<Code>::null(),
|
2807
|
+
code,
|
2808
|
+
&done,
|
2809
|
+
flag,
|
2810
|
+
Label::kNear,
|
2811
|
+
call_wrapper,
|
2812
|
+
call_kind);
|
2813
|
+
if (flag == CALL_FUNCTION) {
|
2814
|
+
call_wrapper.BeforeCall(CallSize(code));
|
2815
|
+
SetCallKind(rcx, call_kind);
|
2816
|
+
call(code);
|
2817
|
+
call_wrapper.AfterCall();
|
2818
|
+
} else {
|
2819
|
+
ASSERT(flag == JUMP_FUNCTION);
|
2820
|
+
SetCallKind(rcx, call_kind);
|
2821
|
+
jmp(code);
|
2822
|
+
}
|
2823
|
+
bind(&done);
|
2824
|
+
}
|
2825
|
+
|
2826
|
+
|
2827
|
+
void MacroAssembler::InvokeCode(Handle<Code> code,
|
2828
|
+
const ParameterCount& expected,
|
2829
|
+
const ParameterCount& actual,
|
2830
|
+
RelocInfo::Mode rmode,
|
2831
|
+
InvokeFlag flag,
|
2832
|
+
const CallWrapper& call_wrapper,
|
2833
|
+
CallKind call_kind) {
|
2834
|
+
Label done;
|
2835
|
+
Register dummy = rax;
|
2836
|
+
InvokePrologue(expected,
|
2837
|
+
actual,
|
2838
|
+
code,
|
2839
|
+
dummy,
|
2840
|
+
&done,
|
2841
|
+
flag,
|
2842
|
+
Label::kNear,
|
2843
|
+
call_wrapper,
|
2844
|
+
call_kind);
|
2845
|
+
if (flag == CALL_FUNCTION) {
|
2846
|
+
call_wrapper.BeforeCall(CallSize(code));
|
2847
|
+
SetCallKind(rcx, call_kind);
|
2848
|
+
Call(code, rmode);
|
2849
|
+
call_wrapper.AfterCall();
|
2850
|
+
} else {
|
2851
|
+
ASSERT(flag == JUMP_FUNCTION);
|
2852
|
+
SetCallKind(rcx, call_kind);
|
2853
|
+
Jump(code, rmode);
|
2854
|
+
}
|
2855
|
+
bind(&done);
|
2856
|
+
}
|
2857
|
+
|
2858
|
+
|
2859
|
+
void MacroAssembler::InvokeFunction(Register function,
|
2860
|
+
const ParameterCount& actual,
|
2861
|
+
InvokeFlag flag,
|
2862
|
+
const CallWrapper& call_wrapper,
|
2863
|
+
CallKind call_kind) {
|
2864
|
+
ASSERT(function.is(rdi));
|
2865
|
+
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
2866
|
+
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
|
2867
|
+
movsxlq(rbx,
|
2868
|
+
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
|
2869
|
+
// Advances rdx to the end of the Code object header, to the start of
|
2870
|
+
// the executable code.
|
2871
|
+
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
|
2872
|
+
|
2873
|
+
ParameterCount expected(rbx);
|
2874
|
+
InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
|
2875
|
+
}
|
2876
|
+
|
2877
|
+
|
2878
|
+
void MacroAssembler::InvokeFunction(JSFunction* function,
|
2879
|
+
const ParameterCount& actual,
|
2880
|
+
InvokeFlag flag,
|
2881
|
+
const CallWrapper& call_wrapper) {
|
2882
|
+
ASSERT(function->is_compiled());
|
2883
|
+
// Get the function and setup the context.
|
2884
|
+
Move(rdi, Handle<JSFunction>(function));
|
2885
|
+
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
2886
|
+
|
2887
|
+
if (V8::UseCrankshaft()) {
|
2888
|
+
// Since Crankshaft can recompile a function, we need to load
|
2889
|
+
// the Code object every time we call the function.
|
2890
|
+
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
|
2891
|
+
ParameterCount expected(function->shared()->formal_parameter_count());
|
2892
|
+
InvokeCode(rdx, expected, actual, flag, call_wrapper);
|
2893
|
+
} else {
|
2894
|
+
// Invoke the cached code.
|
2895
|
+
Handle<Code> code(function->code());
|
2896
|
+
ParameterCount expected(function->shared()->formal_parameter_count());
|
2897
|
+
InvokeCode(code,
|
2898
|
+
expected,
|
2899
|
+
actual,
|
2900
|
+
RelocInfo::CODE_TARGET,
|
2901
|
+
flag,
|
2902
|
+
call_wrapper);
|
2903
|
+
}
|
2904
|
+
}
|
2905
|
+
|
2906
|
+
|
2907
|
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
2908
|
+
const ParameterCount& actual,
|
2909
|
+
Handle<Code> code_constant,
|
2910
|
+
Register code_register,
|
2911
|
+
Label* done,
|
2912
|
+
InvokeFlag flag,
|
2913
|
+
Label::Distance near_jump,
|
2914
|
+
const CallWrapper& call_wrapper,
|
2915
|
+
CallKind call_kind) {
|
2916
|
+
bool definitely_matches = false;
|
2917
|
+
Label invoke;
|
2918
|
+
if (expected.is_immediate()) {
|
2919
|
+
ASSERT(actual.is_immediate());
|
2920
|
+
if (expected.immediate() == actual.immediate()) {
|
2921
|
+
definitely_matches = true;
|
2922
|
+
} else {
|
2923
|
+
Set(rax, actual.immediate());
|
2924
|
+
if (expected.immediate() ==
|
2925
|
+
SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
|
2926
|
+
// Don't worry about adapting arguments for built-ins that
|
2927
|
+
// don't want that done. Skip adaption code by making it look
|
2928
|
+
// like we have a match between expected and actual number of
|
2929
|
+
// arguments.
|
2930
|
+
definitely_matches = true;
|
2931
|
+
} else {
|
2932
|
+
Set(rbx, expected.immediate());
|
2933
|
+
}
|
2934
|
+
}
|
2935
|
+
} else {
|
2936
|
+
if (actual.is_immediate()) {
|
2937
|
+
// Expected is in register, actual is immediate. This is the
|
2938
|
+
// case when we invoke function values without going through the
|
2939
|
+
// IC mechanism.
|
2940
|
+
cmpq(expected.reg(), Immediate(actual.immediate()));
|
2941
|
+
j(equal, &invoke, Label::kNear);
|
2942
|
+
ASSERT(expected.reg().is(rbx));
|
2943
|
+
Set(rax, actual.immediate());
|
2944
|
+
} else if (!expected.reg().is(actual.reg())) {
|
2945
|
+
// Both expected and actual are in (different) registers. This
|
2946
|
+
// is the case when we invoke functions using call and apply.
|
2947
|
+
cmpq(expected.reg(), actual.reg());
|
2948
|
+
j(equal, &invoke, Label::kNear);
|
2949
|
+
ASSERT(actual.reg().is(rax));
|
2950
|
+
ASSERT(expected.reg().is(rbx));
|
2951
|
+
}
|
2952
|
+
}
|
2953
|
+
|
2954
|
+
if (!definitely_matches) {
|
2955
|
+
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
2956
|
+
if (!code_constant.is_null()) {
|
2957
|
+
movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
|
2958
|
+
addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
2959
|
+
} else if (!code_register.is(rdx)) {
|
2960
|
+
movq(rdx, code_register);
|
2961
|
+
}
|
2962
|
+
|
2963
|
+
if (flag == CALL_FUNCTION) {
|
2964
|
+
call_wrapper.BeforeCall(CallSize(adaptor));
|
2965
|
+
SetCallKind(rcx, call_kind);
|
2966
|
+
Call(adaptor, RelocInfo::CODE_TARGET);
|
2967
|
+
call_wrapper.AfterCall();
|
2968
|
+
jmp(done, near_jump);
|
2969
|
+
} else {
|
2970
|
+
SetCallKind(rcx, call_kind);
|
2971
|
+
Jump(adaptor, RelocInfo::CODE_TARGET);
|
2972
|
+
}
|
2973
|
+
bind(&invoke);
|
2974
|
+
}
|
2975
|
+
}
|
2976
|
+
|
2977
|
+
|
2978
|
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
2979
|
+
push(rbp);
|
2980
|
+
movq(rbp, rsp);
|
2981
|
+
push(rsi); // Context.
|
2982
|
+
Push(Smi::FromInt(type));
|
2983
|
+
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
|
2984
|
+
push(kScratchRegister);
|
2985
|
+
if (emit_debug_code()) {
|
2986
|
+
movq(kScratchRegister,
|
2987
|
+
isolate()->factory()->undefined_value(),
|
2988
|
+
RelocInfo::EMBEDDED_OBJECT);
|
2989
|
+
cmpq(Operand(rsp, 0), kScratchRegister);
|
2990
|
+
Check(not_equal, "code object not properly patched");
|
2991
|
+
}
|
2992
|
+
}
|
2993
|
+
|
2994
|
+
|
2995
|
+
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
2996
|
+
if (emit_debug_code()) {
|
2997
|
+
Move(kScratchRegister, Smi::FromInt(type));
|
2998
|
+
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
|
2999
|
+
Check(equal, "stack frame types must match");
|
3000
|
+
}
|
3001
|
+
movq(rsp, rbp);
|
3002
|
+
pop(rbp);
|
3003
|
+
}
|
3004
|
+
|
3005
|
+
|
3006
|
+
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
|
3007
|
+
// Setup the frame structure on the stack.
|
3008
|
+
// All constants are relative to the frame pointer of the exit frame.
|
3009
|
+
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
|
3010
|
+
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
|
3011
|
+
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
|
3012
|
+
push(rbp);
|
3013
|
+
movq(rbp, rsp);
|
3014
|
+
|
3015
|
+
// Reserve room for entry stack pointer and push the code object.
|
3016
|
+
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
|
3017
|
+
push(Immediate(0)); // Saved entry sp, patched before call.
|
3018
|
+
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
|
3019
|
+
push(kScratchRegister); // Accessed from EditFrame::code_slot.
|
3020
|
+
|
3021
|
+
// Save the frame pointer and the context in top.
|
3022
|
+
if (save_rax) {
|
3023
|
+
movq(r14, rax); // Backup rax in callee-save register.
|
3024
|
+
}
|
3025
|
+
|
3026
|
+
Store(ExternalReference(Isolate::k_c_entry_fp_address, isolate()), rbp);
|
3027
|
+
Store(ExternalReference(Isolate::k_context_address, isolate()), rsi);
|
3028
|
+
}
|
3029
|
+
|
3030
|
+
|
3031
|
+
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
|
3032
|
+
bool save_doubles) {
|
3033
|
+
#ifdef _WIN64
|
3034
|
+
const int kShadowSpace = 4;
|
3035
|
+
arg_stack_space += kShadowSpace;
|
3036
|
+
#endif
|
3037
|
+
// Optionally save all XMM registers.
|
3038
|
+
if (save_doubles) {
|
3039
|
+
int space = XMMRegister::kNumRegisters * kDoubleSize +
|
3040
|
+
arg_stack_space * kPointerSize;
|
3041
|
+
subq(rsp, Immediate(space));
|
3042
|
+
int offset = -2 * kPointerSize;
|
3043
|
+
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
|
3044
|
+
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
|
3045
|
+
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
|
3046
|
+
}
|
3047
|
+
} else if (arg_stack_space > 0) {
|
3048
|
+
subq(rsp, Immediate(arg_stack_space * kPointerSize));
|
3049
|
+
}
|
3050
|
+
|
3051
|
+
// Get the required frame alignment for the OS.
|
3052
|
+
const int kFrameAlignment = OS::ActivationFrameAlignment();
|
3053
|
+
if (kFrameAlignment > 0) {
|
3054
|
+
ASSERT(IsPowerOf2(kFrameAlignment));
|
3055
|
+
ASSERT(is_int8(kFrameAlignment));
|
3056
|
+
and_(rsp, Immediate(-kFrameAlignment));
|
3057
|
+
}
|
3058
|
+
|
3059
|
+
// Patch the saved entry sp.
|
3060
|
+
movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
|
3061
|
+
}
|
3062
|
+
|
3063
|
+
|
3064
|
+
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
|
3065
|
+
EnterExitFramePrologue(true);
|
3066
|
+
|
3067
|
+
// Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
|
3068
|
+
// so it must be retained across the C-call.
|
3069
|
+
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
|
3070
|
+
lea(r15, Operand(rbp, r14, times_pointer_size, offset));
|
3071
|
+
|
3072
|
+
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
|
3073
|
+
}
|
3074
|
+
|
3075
|
+
|
3076
|
+
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
|
3077
|
+
EnterExitFramePrologue(false);
|
3078
|
+
EnterExitFrameEpilogue(arg_stack_space, false);
|
3079
|
+
}
|
3080
|
+
|
3081
|
+
|
3082
|
+
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
|
3083
|
+
// Registers:
|
3084
|
+
// r15 : argv
|
3085
|
+
if (save_doubles) {
|
3086
|
+
int offset = -2 * kPointerSize;
|
3087
|
+
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
|
3088
|
+
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
|
3089
|
+
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
|
3090
|
+
}
|
3091
|
+
}
|
3092
|
+
// Get the return address from the stack and restore the frame pointer.
|
3093
|
+
movq(rcx, Operand(rbp, 1 * kPointerSize));
|
3094
|
+
movq(rbp, Operand(rbp, 0 * kPointerSize));
|
3095
|
+
|
3096
|
+
// Drop everything up to and including the arguments and the receiver
|
3097
|
+
// from the caller stack.
|
3098
|
+
lea(rsp, Operand(r15, 1 * kPointerSize));
|
3099
|
+
|
3100
|
+
// Push the return address to get ready to return.
|
3101
|
+
push(rcx);
|
3102
|
+
|
3103
|
+
LeaveExitFrameEpilogue();
|
3104
|
+
}
|
3105
|
+
|
3106
|
+
|
3107
|
+
void MacroAssembler::LeaveApiExitFrame() {
|
3108
|
+
movq(rsp, rbp);
|
3109
|
+
pop(rbp);
|
3110
|
+
|
3111
|
+
LeaveExitFrameEpilogue();
|
3112
|
+
}
|
3113
|
+
|
3114
|
+
|
3115
|
+
void MacroAssembler::LeaveExitFrameEpilogue() {
|
3116
|
+
// Restore current context from top and clear it in debug mode.
|
3117
|
+
ExternalReference context_address(Isolate::k_context_address, isolate());
|
3118
|
+
Operand context_operand = ExternalOperand(context_address);
|
3119
|
+
movq(rsi, context_operand);
|
3120
|
+
#ifdef DEBUG
|
3121
|
+
movq(context_operand, Immediate(0));
|
3122
|
+
#endif
|
3123
|
+
|
3124
|
+
// Clear the top frame.
|
3125
|
+
ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
|
3126
|
+
isolate());
|
3127
|
+
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
|
3128
|
+
movq(c_entry_fp_operand, Immediate(0));
|
3129
|
+
}
|
3130
|
+
|
3131
|
+
|
3132
|
+
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
3133
|
+
Register scratch,
|
3134
|
+
Label* miss) {
|
3135
|
+
Label same_contexts;
|
3136
|
+
|
3137
|
+
ASSERT(!holder_reg.is(scratch));
|
3138
|
+
ASSERT(!scratch.is(kScratchRegister));
|
3139
|
+
// Load current lexical context from the stack frame.
|
3140
|
+
movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
|
3141
|
+
|
3142
|
+
// When generating debug code, make sure the lexical context is set.
|
3143
|
+
if (emit_debug_code()) {
|
3144
|
+
cmpq(scratch, Immediate(0));
|
3145
|
+
Check(not_equal, "we should not have an empty lexical context");
|
3146
|
+
}
|
3147
|
+
// Load the global context of the current context.
|
3148
|
+
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
|
3149
|
+
movq(scratch, FieldOperand(scratch, offset));
|
3150
|
+
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
|
3151
|
+
|
3152
|
+
// Check the context is a global context.
|
3153
|
+
if (emit_debug_code()) {
|
3154
|
+
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
|
3155
|
+
isolate()->factory()->global_context_map());
|
3156
|
+
Check(equal, "JSGlobalObject::global_context should be a global context.");
|
3157
|
+
}
|
3158
|
+
|
3159
|
+
// Check if both contexts are the same.
|
3160
|
+
cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
3161
|
+
j(equal, &same_contexts);
|
3162
|
+
|
3163
|
+
// Compare security tokens.
|
3164
|
+
// Check that the security token in the calling global object is
|
3165
|
+
// compatible with the security token in the receiving global
|
3166
|
+
// object.
|
3167
|
+
|
3168
|
+
// Check the context is a global context.
|
3169
|
+
if (emit_debug_code()) {
|
3170
|
+
// Preserve original value of holder_reg.
|
3171
|
+
push(holder_reg);
|
3172
|
+
movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
3173
|
+
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
|
3174
|
+
Check(not_equal, "JSGlobalProxy::context() should not be null.");
|
3175
|
+
|
3176
|
+
// Read the first word and compare to global_context_map(),
|
3177
|
+
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
|
3178
|
+
CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
|
3179
|
+
Check(equal, "JSGlobalObject::global_context should be a global context.");
|
3180
|
+
pop(holder_reg);
|
3181
|
+
}
|
3182
|
+
|
3183
|
+
movq(kScratchRegister,
|
3184
|
+
FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
3185
|
+
int token_offset =
|
3186
|
+
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
|
3187
|
+
movq(scratch, FieldOperand(scratch, token_offset));
|
3188
|
+
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
|
3189
|
+
j(not_equal, miss);
|
3190
|
+
|
3191
|
+
bind(&same_contexts);
|
3192
|
+
}
|
3193
|
+
|
3194
|
+
|
3195
|
+
void MacroAssembler::LoadAllocationTopHelper(Register result,
|
3196
|
+
Register scratch,
|
3197
|
+
AllocationFlags flags) {
|
3198
|
+
ExternalReference new_space_allocation_top =
|
3199
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
3200
|
+
|
3201
|
+
// Just return if allocation top is already known.
|
3202
|
+
if ((flags & RESULT_CONTAINS_TOP) != 0) {
|
3203
|
+
// No use of scratch if allocation top is provided.
|
3204
|
+
ASSERT(!scratch.is_valid());
|
3205
|
+
#ifdef DEBUG
|
3206
|
+
// Assert that result actually contains top on entry.
|
3207
|
+
Operand top_operand = ExternalOperand(new_space_allocation_top);
|
3208
|
+
cmpq(result, top_operand);
|
3209
|
+
Check(equal, "Unexpected allocation top");
|
3210
|
+
#endif
|
3211
|
+
return;
|
3212
|
+
}
|
3213
|
+
|
3214
|
+
// Move address of new object to result. Use scratch register if available,
|
3215
|
+
// and keep address in scratch until call to UpdateAllocationTopHelper.
|
3216
|
+
if (scratch.is_valid()) {
|
3217
|
+
LoadAddress(scratch, new_space_allocation_top);
|
3218
|
+
movq(result, Operand(scratch, 0));
|
3219
|
+
} else {
|
3220
|
+
Load(result, new_space_allocation_top);
|
3221
|
+
}
|
3222
|
+
}
|
3223
|
+
|
3224
|
+
|
3225
|
+
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
|
3226
|
+
Register scratch) {
|
3227
|
+
if (emit_debug_code()) {
|
3228
|
+
testq(result_end, Immediate(kObjectAlignmentMask));
|
3229
|
+
Check(zero, "Unaligned allocation in new space");
|
3230
|
+
}
|
3231
|
+
|
3232
|
+
ExternalReference new_space_allocation_top =
|
3233
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
3234
|
+
|
3235
|
+
// Update new top.
|
3236
|
+
if (scratch.is_valid()) {
|
3237
|
+
// Scratch already contains address of allocation top.
|
3238
|
+
movq(Operand(scratch, 0), result_end);
|
3239
|
+
} else {
|
3240
|
+
Store(new_space_allocation_top, result_end);
|
3241
|
+
}
|
3242
|
+
}
|
3243
|
+
|
3244
|
+
|
3245
|
+
void MacroAssembler::AllocateInNewSpace(int object_size,
|
3246
|
+
Register result,
|
3247
|
+
Register result_end,
|
3248
|
+
Register scratch,
|
3249
|
+
Label* gc_required,
|
3250
|
+
AllocationFlags flags) {
|
3251
|
+
if (!FLAG_inline_new) {
|
3252
|
+
if (emit_debug_code()) {
|
3253
|
+
// Trash the registers to simulate an allocation failure.
|
3254
|
+
movl(result, Immediate(0x7091));
|
3255
|
+
if (result_end.is_valid()) {
|
3256
|
+
movl(result_end, Immediate(0x7191));
|
3257
|
+
}
|
3258
|
+
if (scratch.is_valid()) {
|
3259
|
+
movl(scratch, Immediate(0x7291));
|
3260
|
+
}
|
3261
|
+
}
|
3262
|
+
jmp(gc_required);
|
3263
|
+
return;
|
3264
|
+
}
|
3265
|
+
ASSERT(!result.is(result_end));
|
3266
|
+
|
3267
|
+
// Load address of new object into result.
|
3268
|
+
LoadAllocationTopHelper(result, scratch, flags);
|
3269
|
+
|
3270
|
+
// Calculate new top and bail out if new space is exhausted.
|
3271
|
+
ExternalReference new_space_allocation_limit =
|
3272
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
3273
|
+
|
3274
|
+
Register top_reg = result_end.is_valid() ? result_end : result;
|
3275
|
+
|
3276
|
+
if (!top_reg.is(result)) {
|
3277
|
+
movq(top_reg, result);
|
3278
|
+
}
|
3279
|
+
addq(top_reg, Immediate(object_size));
|
3280
|
+
j(carry, gc_required);
|
3281
|
+
Operand limit_operand = ExternalOperand(new_space_allocation_limit);
|
3282
|
+
cmpq(top_reg, limit_operand);
|
3283
|
+
j(above, gc_required);
|
3284
|
+
|
3285
|
+
// Update allocation top.
|
3286
|
+
UpdateAllocationTopHelper(top_reg, scratch);
|
3287
|
+
|
3288
|
+
if (top_reg.is(result)) {
|
3289
|
+
if ((flags & TAG_OBJECT) != 0) {
|
3290
|
+
subq(result, Immediate(object_size - kHeapObjectTag));
|
3291
|
+
} else {
|
3292
|
+
subq(result, Immediate(object_size));
|
3293
|
+
}
|
3294
|
+
} else if ((flags & TAG_OBJECT) != 0) {
|
3295
|
+
// Tag the result if requested.
|
3296
|
+
addq(result, Immediate(kHeapObjectTag));
|
3297
|
+
}
|
3298
|
+
}
|
3299
|
+
|
3300
|
+
|
3301
|
+
void MacroAssembler::AllocateInNewSpace(int header_size,
|
3302
|
+
ScaleFactor element_size,
|
3303
|
+
Register element_count,
|
3304
|
+
Register result,
|
3305
|
+
Register result_end,
|
3306
|
+
Register scratch,
|
3307
|
+
Label* gc_required,
|
3308
|
+
AllocationFlags flags) {
|
3309
|
+
if (!FLAG_inline_new) {
|
3310
|
+
if (emit_debug_code()) {
|
3311
|
+
// Trash the registers to simulate an allocation failure.
|
3312
|
+
movl(result, Immediate(0x7091));
|
3313
|
+
movl(result_end, Immediate(0x7191));
|
3314
|
+
if (scratch.is_valid()) {
|
3315
|
+
movl(scratch, Immediate(0x7291));
|
3316
|
+
}
|
3317
|
+
// Register element_count is not modified by the function.
|
3318
|
+
}
|
3319
|
+
jmp(gc_required);
|
3320
|
+
return;
|
3321
|
+
}
|
3322
|
+
ASSERT(!result.is(result_end));
|
3323
|
+
|
3324
|
+
// Load address of new object into result.
|
3325
|
+
LoadAllocationTopHelper(result, scratch, flags);
|
3326
|
+
|
3327
|
+
// Calculate new top and bail out if new space is exhausted.
|
3328
|
+
ExternalReference new_space_allocation_limit =
|
3329
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
3330
|
+
|
3331
|
+
// We assume that element_count*element_size + header_size does not
|
3332
|
+
// overflow.
|
3333
|
+
lea(result_end, Operand(element_count, element_size, header_size));
|
3334
|
+
addq(result_end, result);
|
3335
|
+
j(carry, gc_required);
|
3336
|
+
Operand limit_operand = ExternalOperand(new_space_allocation_limit);
|
3337
|
+
cmpq(result_end, limit_operand);
|
3338
|
+
j(above, gc_required);
|
3339
|
+
|
3340
|
+
// Update allocation top.
|
3341
|
+
UpdateAllocationTopHelper(result_end, scratch);
|
3342
|
+
|
3343
|
+
// Tag the result if requested.
|
3344
|
+
if ((flags & TAG_OBJECT) != 0) {
|
3345
|
+
addq(result, Immediate(kHeapObjectTag));
|
3346
|
+
}
|
3347
|
+
}
|
3348
|
+
|
3349
|
+
|
3350
|
+
void MacroAssembler::AllocateInNewSpace(Register object_size,
|
3351
|
+
Register result,
|
3352
|
+
Register result_end,
|
3353
|
+
Register scratch,
|
3354
|
+
Label* gc_required,
|
3355
|
+
AllocationFlags flags) {
|
3356
|
+
if (!FLAG_inline_new) {
|
3357
|
+
if (emit_debug_code()) {
|
3358
|
+
// Trash the registers to simulate an allocation failure.
|
3359
|
+
movl(result, Immediate(0x7091));
|
3360
|
+
movl(result_end, Immediate(0x7191));
|
3361
|
+
if (scratch.is_valid()) {
|
3362
|
+
movl(scratch, Immediate(0x7291));
|
3363
|
+
}
|
3364
|
+
// object_size is left unchanged by this function.
|
3365
|
+
}
|
3366
|
+
jmp(gc_required);
|
3367
|
+
return;
|
3368
|
+
}
|
3369
|
+
ASSERT(!result.is(result_end));
|
3370
|
+
|
3371
|
+
// Load address of new object into result.
|
3372
|
+
LoadAllocationTopHelper(result, scratch, flags);
|
3373
|
+
|
3374
|
+
// Calculate new top and bail out if new space is exhausted.
|
3375
|
+
ExternalReference new_space_allocation_limit =
|
3376
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
3377
|
+
if (!object_size.is(result_end)) {
|
3378
|
+
movq(result_end, object_size);
|
3379
|
+
}
|
3380
|
+
addq(result_end, result);
|
3381
|
+
j(carry, gc_required);
|
3382
|
+
Operand limit_operand = ExternalOperand(new_space_allocation_limit);
|
3383
|
+
cmpq(result_end, limit_operand);
|
3384
|
+
j(above, gc_required);
|
3385
|
+
|
3386
|
+
// Update allocation top.
|
3387
|
+
UpdateAllocationTopHelper(result_end, scratch);
|
3388
|
+
|
3389
|
+
// Tag the result if requested.
|
3390
|
+
if ((flags & TAG_OBJECT) != 0) {
|
3391
|
+
addq(result, Immediate(kHeapObjectTag));
|
3392
|
+
}
|
3393
|
+
}
|
3394
|
+
|
3395
|
+
|
3396
|
+
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
|
3397
|
+
ExternalReference new_space_allocation_top =
|
3398
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
3399
|
+
|
3400
|
+
// Make sure the object has no tag before resetting top.
|
3401
|
+
and_(object, Immediate(~kHeapObjectTagMask));
|
3402
|
+
Operand top_operand = ExternalOperand(new_space_allocation_top);
|
3403
|
+
#ifdef DEBUG
|
3404
|
+
cmpq(object, top_operand);
|
3405
|
+
Check(below, "Undo allocation of non allocated memory");
|
3406
|
+
#endif
|
3407
|
+
movq(top_operand, object);
|
3408
|
+
}
|
3409
|
+
|
3410
|
+
|
3411
|
+
void MacroAssembler::AllocateHeapNumber(Register result,
|
3412
|
+
Register scratch,
|
3413
|
+
Label* gc_required) {
|
3414
|
+
// Allocate heap number in new space.
|
3415
|
+
AllocateInNewSpace(HeapNumber::kSize,
|
3416
|
+
result,
|
3417
|
+
scratch,
|
3418
|
+
no_reg,
|
3419
|
+
gc_required,
|
3420
|
+
TAG_OBJECT);
|
3421
|
+
|
3422
|
+
// Set the map.
|
3423
|
+
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
|
3424
|
+
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
3425
|
+
}
|
3426
|
+
|
3427
|
+
|
3428
|
+
void MacroAssembler::AllocateTwoByteString(Register result,
|
3429
|
+
Register length,
|
3430
|
+
Register scratch1,
|
3431
|
+
Register scratch2,
|
3432
|
+
Register scratch3,
|
3433
|
+
Label* gc_required) {
|
3434
|
+
// Calculate the number of bytes needed for the characters in the string while
|
3435
|
+
// observing object alignment.
|
3436
|
+
const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
|
3437
|
+
kObjectAlignmentMask;
|
3438
|
+
ASSERT(kShortSize == 2);
|
3439
|
+
// scratch1 = length * 2 + kObjectAlignmentMask.
|
3440
|
+
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
|
3441
|
+
kHeaderAlignment));
|
3442
|
+
and_(scratch1, Immediate(~kObjectAlignmentMask));
|
3443
|
+
if (kHeaderAlignment > 0) {
|
3444
|
+
subq(scratch1, Immediate(kHeaderAlignment));
|
3445
|
+
}
|
3446
|
+
|
3447
|
+
// Allocate two byte string in new space.
|
3448
|
+
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
|
3449
|
+
times_1,
|
3450
|
+
scratch1,
|
3451
|
+
result,
|
3452
|
+
scratch2,
|
3453
|
+
scratch3,
|
3454
|
+
gc_required,
|
3455
|
+
TAG_OBJECT);
|
3456
|
+
|
3457
|
+
// Set the map, length and hash field.
|
3458
|
+
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
|
3459
|
+
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
3460
|
+
Integer32ToSmi(scratch1, length);
|
3461
|
+
movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
3462
|
+
movq(FieldOperand(result, String::kHashFieldOffset),
|
3463
|
+
Immediate(String::kEmptyHashField));
|
3464
|
+
}
|
3465
|
+
|
3466
|
+
|
3467
|
+
void MacroAssembler::AllocateAsciiString(Register result,
|
3468
|
+
Register length,
|
3469
|
+
Register scratch1,
|
3470
|
+
Register scratch2,
|
3471
|
+
Register scratch3,
|
3472
|
+
Label* gc_required) {
|
3473
|
+
// Calculate the number of bytes needed for the characters in the string while
|
3474
|
+
// observing object alignment.
|
3475
|
+
const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
|
3476
|
+
kObjectAlignmentMask;
|
3477
|
+
movl(scratch1, length);
|
3478
|
+
ASSERT(kCharSize == 1);
|
3479
|
+
addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
|
3480
|
+
and_(scratch1, Immediate(~kObjectAlignmentMask));
|
3481
|
+
if (kHeaderAlignment > 0) {
|
3482
|
+
subq(scratch1, Immediate(kHeaderAlignment));
|
3483
|
+
}
|
3484
|
+
|
3485
|
+
// Allocate ascii string in new space.
|
3486
|
+
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
|
3487
|
+
times_1,
|
3488
|
+
scratch1,
|
3489
|
+
result,
|
3490
|
+
scratch2,
|
3491
|
+
scratch3,
|
3492
|
+
gc_required,
|
3493
|
+
TAG_OBJECT);
|
3494
|
+
|
3495
|
+
// Set the map, length and hash field.
|
3496
|
+
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
|
3497
|
+
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
3498
|
+
Integer32ToSmi(scratch1, length);
|
3499
|
+
movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
3500
|
+
movq(FieldOperand(result, String::kHashFieldOffset),
|
3501
|
+
Immediate(String::kEmptyHashField));
|
3502
|
+
}
|
3503
|
+
|
3504
|
+
|
3505
|
+
void MacroAssembler::AllocateConsString(Register result,
|
3506
|
+
Register scratch1,
|
3507
|
+
Register scratch2,
|
3508
|
+
Label* gc_required) {
|
3509
|
+
// Allocate heap number in new space.
|
3510
|
+
AllocateInNewSpace(ConsString::kSize,
|
3511
|
+
result,
|
3512
|
+
scratch1,
|
3513
|
+
scratch2,
|
3514
|
+
gc_required,
|
3515
|
+
TAG_OBJECT);
|
3516
|
+
|
3517
|
+
// Set the map. The other fields are left uninitialized.
|
3518
|
+
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
|
3519
|
+
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
3520
|
+
}
|
3521
|
+
|
3522
|
+
|
3523
|
+
void MacroAssembler::AllocateAsciiConsString(Register result,
|
3524
|
+
Register scratch1,
|
3525
|
+
Register scratch2,
|
3526
|
+
Label* gc_required) {
|
3527
|
+
// Allocate heap number in new space.
|
3528
|
+
AllocateInNewSpace(ConsString::kSize,
|
3529
|
+
result,
|
3530
|
+
scratch1,
|
3531
|
+
scratch2,
|
3532
|
+
gc_required,
|
3533
|
+
TAG_OBJECT);
|
3534
|
+
|
3535
|
+
// Set the map. The other fields are left uninitialized.
|
3536
|
+
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
|
3537
|
+
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
3538
|
+
}
|
3539
|
+
|
3540
|
+
|
3541
|
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
|
3542
|
+
// long or aligned copies. The contents of scratch and length are destroyed.
|
3543
|
+
// Destination is incremented by length, source, length and scratch are
|
3544
|
+
// clobbered.
|
3545
|
+
// A simpler loop is faster on small copies, but slower on large ones.
|
3546
|
+
// The cld() instruction must have been emitted, to set the direction flag(),
|
3547
|
+
// before calling this function.
|
3548
|
+
void MacroAssembler::CopyBytes(Register destination,
|
3549
|
+
Register source,
|
3550
|
+
Register length,
|
3551
|
+
int min_length,
|
3552
|
+
Register scratch) {
|
3553
|
+
ASSERT(min_length >= 0);
|
3554
|
+
if (FLAG_debug_code) {
|
3555
|
+
cmpl(length, Immediate(min_length));
|
3556
|
+
Assert(greater_equal, "Invalid min_length");
|
3557
|
+
}
|
3558
|
+
Label loop, done, short_string, short_loop;
|
3559
|
+
|
3560
|
+
const int kLongStringLimit = 20;
|
3561
|
+
if (min_length <= kLongStringLimit) {
|
3562
|
+
cmpl(length, Immediate(kLongStringLimit));
|
3563
|
+
j(less_equal, &short_string);
|
3564
|
+
}
|
3565
|
+
|
3566
|
+
ASSERT(source.is(rsi));
|
3567
|
+
ASSERT(destination.is(rdi));
|
3568
|
+
ASSERT(length.is(rcx));
|
3569
|
+
|
3570
|
+
// Because source is 8-byte aligned in our uses of this function,
|
3571
|
+
// we keep source aligned for the rep movs operation by copying the odd bytes
|
3572
|
+
// at the end of the ranges.
|
3573
|
+
movq(scratch, length);
|
3574
|
+
shrl(length, Immediate(3));
|
3575
|
+
repmovsq();
|
3576
|
+
// Move remaining bytes of length.
|
3577
|
+
andl(scratch, Immediate(0x7));
|
3578
|
+
movq(length, Operand(source, scratch, times_1, -8));
|
3579
|
+
movq(Operand(destination, scratch, times_1, -8), length);
|
3580
|
+
addq(destination, scratch);
|
3581
|
+
|
3582
|
+
if (min_length <= kLongStringLimit) {
|
3583
|
+
jmp(&done);
|
3584
|
+
|
3585
|
+
bind(&short_string);
|
3586
|
+
if (min_length == 0) {
|
3587
|
+
testl(length, length);
|
3588
|
+
j(zero, &done);
|
3589
|
+
}
|
3590
|
+
lea(scratch, Operand(destination, length, times_1, 0));
|
3591
|
+
|
3592
|
+
bind(&short_loop);
|
3593
|
+
movb(length, Operand(source, 0));
|
3594
|
+
movb(Operand(destination, 0), length);
|
3595
|
+
incq(source);
|
3596
|
+
incq(destination);
|
3597
|
+
cmpq(destination, scratch);
|
3598
|
+
j(not_equal, &short_loop);
|
3599
|
+
|
3600
|
+
bind(&done);
|
3601
|
+
}
|
3602
|
+
}
|
3603
|
+
|
3604
|
+
|
3605
|
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
3606
|
+
if (context_chain_length > 0) {
|
3607
|
+
// Move up the chain of contexts to the context containing the slot.
|
3608
|
+
movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
3609
|
+
// Load the function context (which is the incoming, outer context).
|
3610
|
+
movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
|
3611
|
+
for (int i = 1; i < context_chain_length; i++) {
|
3612
|
+
movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
3613
|
+
movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
|
3614
|
+
}
|
3615
|
+
// The context may be an intermediate context, not a function context.
|
3616
|
+
movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
3617
|
+
} else {
|
3618
|
+
// Slot is in the current function context. Move it into the
|
3619
|
+
// destination register in case we store into it (the write barrier
|
3620
|
+
// cannot be allowed to destroy the context in rsi).
|
3621
|
+
movq(dst, rsi);
|
3622
|
+
}
|
3623
|
+
|
3624
|
+
// We should not have found a 'with' context by walking the context chain
|
3625
|
+
// (i.e., the static scope chain and runtime context chain do not agree).
|
3626
|
+
// A variable occurring in such a scope should have slot type LOOKUP and
|
3627
|
+
// not CONTEXT.
|
3628
|
+
if (emit_debug_code()) {
|
3629
|
+
cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
3630
|
+
Check(equal, "Yo dawg, I heard you liked function contexts "
|
3631
|
+
"so I put function contexts in all your contexts");
|
3632
|
+
}
|
3633
|
+
}
|
3634
|
+
|
3635
|
+
#ifdef _WIN64
|
3636
|
+
static const int kRegisterPassedArguments = 4;
|
3637
|
+
#else
|
3638
|
+
static const int kRegisterPassedArguments = 6;
|
3639
|
+
#endif
|
3640
|
+
|
3641
|
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
3642
|
+
// Load the global or builtins object from the current context.
|
3643
|
+
movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
3644
|
+
// Load the global context from the global or builtins object.
|
3645
|
+
movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
|
3646
|
+
// Load the function from the global context.
|
3647
|
+
movq(function, Operand(function, Context::SlotOffset(index)));
|
3648
|
+
}
|
3649
|
+
|
3650
|
+
|
3651
|
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
|
3652
|
+
Register map) {
|
3653
|
+
// Load the initial map. The global functions all have initial maps.
|
3654
|
+
movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
3655
|
+
if (emit_debug_code()) {
|
3656
|
+
Label ok, fail;
|
3657
|
+
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
|
3658
|
+
jmp(&ok);
|
3659
|
+
bind(&fail);
|
3660
|
+
Abort("Global functions must have initial map");
|
3661
|
+
bind(&ok);
|
3662
|
+
}
|
3663
|
+
}
|
3664
|
+
|
3665
|
+
|
3666
|
+
int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
|
3667
|
+
// On Windows 64 stack slots are reserved by the caller for all arguments
|
3668
|
+
// including the ones passed in registers, and space is always allocated for
|
3669
|
+
// the four register arguments even if the function takes fewer than four
|
3670
|
+
// arguments.
|
3671
|
+
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
|
3672
|
+
// and the caller does not reserve stack slots for them.
|
3673
|
+
ASSERT(num_arguments >= 0);
|
3674
|
+
#ifdef _WIN64
|
3675
|
+
const int kMinimumStackSlots = kRegisterPassedArguments;
|
3676
|
+
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
|
3677
|
+
return num_arguments;
|
3678
|
+
#else
|
3679
|
+
if (num_arguments < kRegisterPassedArguments) return 0;
|
3680
|
+
return num_arguments - kRegisterPassedArguments;
|
3681
|
+
#endif
|
3682
|
+
}
|
3683
|
+
|
3684
|
+
|
3685
|
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
|
3686
|
+
int frame_alignment = OS::ActivationFrameAlignment();
|
3687
|
+
ASSERT(frame_alignment != 0);
|
3688
|
+
ASSERT(num_arguments >= 0);
|
3689
|
+
|
3690
|
+
// Make stack end at alignment and allocate space for arguments and old rsp.
|
3691
|
+
movq(kScratchRegister, rsp);
|
3692
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3693
|
+
int argument_slots_on_stack =
|
3694
|
+
ArgumentStackSlotsForCFunctionCall(num_arguments);
|
3695
|
+
subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
|
3696
|
+
and_(rsp, Immediate(-frame_alignment));
|
3697
|
+
movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
|
3698
|
+
}
|
3699
|
+
|
3700
|
+
|
3701
|
+
void MacroAssembler::CallCFunction(ExternalReference function,
|
3702
|
+
int num_arguments) {
|
3703
|
+
LoadAddress(rax, function);
|
3704
|
+
CallCFunction(rax, num_arguments);
|
3705
|
+
}
|
3706
|
+
|
3707
|
+
|
3708
|
+
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
|
3709
|
+
// Check stack alignment.
|
3710
|
+
if (emit_debug_code()) {
|
3711
|
+
CheckStackAlignment();
|
3712
|
+
}
|
3713
|
+
|
3714
|
+
call(function);
|
3715
|
+
ASSERT(OS::ActivationFrameAlignment() != 0);
|
3716
|
+
ASSERT(num_arguments >= 0);
|
3717
|
+
int argument_slots_on_stack =
|
3718
|
+
ArgumentStackSlotsForCFunctionCall(num_arguments);
|
3719
|
+
movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
|
3720
|
+
}
|
3721
|
+
|
3722
|
+
|
3723
|
+
CodePatcher::CodePatcher(byte* address, int size)
|
3724
|
+
: address_(address),
|
3725
|
+
size_(size),
|
3726
|
+
masm_(Isolate::Current(), address, size + Assembler::kGap) {
|
3727
|
+
// Create a new macro assembler pointing to the address of the code to patch.
|
3728
|
+
// The size is adjusted with kGap on order for the assembler to generate size
|
3729
|
+
// bytes of instructions without failing with buffer size constraints.
|
3730
|
+
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
3731
|
+
}
|
3732
|
+
|
3733
|
+
|
3734
|
+
CodePatcher::~CodePatcher() {
|
3735
|
+
// Indicate that code has changed.
|
3736
|
+
CPU::FlushICache(address_, size_);
|
3737
|
+
|
3738
|
+
// Check that the code was patched as expected.
|
3739
|
+
ASSERT(masm_.pc_ == address_ + size_);
|
3740
|
+
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
3741
|
+
}
|
3742
|
+
|
3743
|
+
} } // namespace v8::internal
|
3744
|
+
|
3745
|
+
#endif // V8_TARGET_ARCH_X64
|