therubyracer 0.4.9-x86-linux
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of therubyracer might be problematic. Click here for more details.
- data/.gitignore +11 -0
- data/.gitmodules +3 -0
- data/Doxyfile +1514 -0
- data/History.txt +43 -0
- data/README.rdoc +157 -0
- data/Rakefile +51 -0
- data/docs/data_conversion.txt +18 -0
- data/ext/v8/callbacks.cpp +160 -0
- data/ext/v8/callbacks.h +14 -0
- data/ext/v8/convert_ruby.cpp +8 -0
- data/ext/v8/convert_ruby.h +99 -0
- data/ext/v8/convert_string.cpp +10 -0
- data/ext/v8/convert_string.h +73 -0
- data/ext/v8/convert_v8.cpp +9 -0
- data/ext/v8/convert_v8.h +121 -0
- data/ext/v8/converters.cpp +83 -0
- data/ext/v8/converters.h +23 -0
- data/ext/v8/extconf.rb +36 -0
- data/ext/v8/upstream/2.0.6/.gitignore +26 -0
- data/ext/v8/upstream/2.0.6/AUTHORS +23 -0
- data/ext/v8/upstream/2.0.6/ChangeLog +1479 -0
- data/ext/v8/upstream/2.0.6/LICENSE +55 -0
- data/ext/v8/upstream/2.0.6/SConstruct +1028 -0
- data/ext/v8/upstream/2.0.6/include/v8-debug.h +275 -0
- data/ext/v8/upstream/2.0.6/include/v8.h +3236 -0
- data/ext/v8/upstream/2.0.6/src/SConscript +283 -0
- data/ext/v8/upstream/2.0.6/src/accessors.cc +695 -0
- data/ext/v8/upstream/2.0.6/src/accessors.h +114 -0
- data/ext/v8/upstream/2.0.6/src/allocation.cc +198 -0
- data/ext/v8/upstream/2.0.6/src/allocation.h +169 -0
- data/ext/v8/upstream/2.0.6/src/api.cc +3831 -0
- data/ext/v8/upstream/2.0.6/src/api.h +479 -0
- data/ext/v8/upstream/2.0.6/src/apinatives.js +110 -0
- data/ext/v8/upstream/2.0.6/src/apiutils.h +69 -0
- data/ext/v8/upstream/2.0.6/src/arguments.h +97 -0
- data/ext/v8/upstream/2.0.6/src/arm/assembler-arm-inl.h +277 -0
- data/ext/v8/upstream/2.0.6/src/arm/assembler-arm.cc +1821 -0
- data/ext/v8/upstream/2.0.6/src/arm/assembler-arm.h +1027 -0
- data/ext/v8/upstream/2.0.6/src/arm/assembler-thumb2-inl.h +267 -0
- data/ext/v8/upstream/2.0.6/src/arm/assembler-thumb2.cc +1821 -0
- data/ext/v8/upstream/2.0.6/src/arm/assembler-thumb2.h +1027 -0
- data/ext/v8/upstream/2.0.6/src/arm/builtins-arm.cc +1271 -0
- data/ext/v8/upstream/2.0.6/src/arm/codegen-arm-inl.h +74 -0
- data/ext/v8/upstream/2.0.6/src/arm/codegen-arm.cc +6682 -0
- data/ext/v8/upstream/2.0.6/src/arm/codegen-arm.h +535 -0
- data/ext/v8/upstream/2.0.6/src/arm/constants-arm.cc +112 -0
- data/ext/v8/upstream/2.0.6/src/arm/constants-arm.h +347 -0
- data/ext/v8/upstream/2.0.6/src/arm/cpu-arm.cc +132 -0
- data/ext/v8/upstream/2.0.6/src/arm/debug-arm.cc +213 -0
- data/ext/v8/upstream/2.0.6/src/arm/disasm-arm.cc +1166 -0
- data/ext/v8/upstream/2.0.6/src/arm/fast-codegen-arm.cc +1698 -0
- data/ext/v8/upstream/2.0.6/src/arm/frames-arm.cc +123 -0
- data/ext/v8/upstream/2.0.6/src/arm/frames-arm.h +162 -0
- data/ext/v8/upstream/2.0.6/src/arm/ic-arm.cc +849 -0
- data/ext/v8/upstream/2.0.6/src/arm/jump-target-arm.cc +238 -0
- data/ext/v8/upstream/2.0.6/src/arm/macro-assembler-arm.cc +1259 -0
- data/ext/v8/upstream/2.0.6/src/arm/macro-assembler-arm.h +423 -0
- data/ext/v8/upstream/2.0.6/src/arm/regexp-macro-assembler-arm.cc +1266 -0
- data/ext/v8/upstream/2.0.6/src/arm/regexp-macro-assembler-arm.h +282 -0
- data/ext/v8/upstream/2.0.6/src/arm/register-allocator-arm-inl.h +103 -0
- data/ext/v8/upstream/2.0.6/src/arm/register-allocator-arm.cc +59 -0
- data/ext/v8/upstream/2.0.6/src/arm/register-allocator-arm.h +43 -0
- data/ext/v8/upstream/2.0.6/src/arm/simulator-arm.cc +2264 -0
- data/ext/v8/upstream/2.0.6/src/arm/simulator-arm.h +306 -0
- data/ext/v8/upstream/2.0.6/src/arm/stub-cache-arm.cc +1516 -0
- data/ext/v8/upstream/2.0.6/src/arm/virtual-frame-arm.cc +412 -0
- data/ext/v8/upstream/2.0.6/src/arm/virtual-frame-arm.h +532 -0
- data/ext/v8/upstream/2.0.6/src/array.js +1154 -0
- data/ext/v8/upstream/2.0.6/src/assembler.cc +772 -0
- data/ext/v8/upstream/2.0.6/src/assembler.h +525 -0
- data/ext/v8/upstream/2.0.6/src/ast.cc +512 -0
- data/ext/v8/upstream/2.0.6/src/ast.h +1820 -0
- data/ext/v8/upstream/2.0.6/src/bootstrapper.cc +1680 -0
- data/ext/v8/upstream/2.0.6/src/bootstrapper.h +103 -0
- data/ext/v8/upstream/2.0.6/src/builtins.cc +851 -0
- data/ext/v8/upstream/2.0.6/src/builtins.h +245 -0
- data/ext/v8/upstream/2.0.6/src/bytecodes-irregexp.h +104 -0
- data/ext/v8/upstream/2.0.6/src/char-predicates-inl.h +86 -0
- data/ext/v8/upstream/2.0.6/src/char-predicates.h +65 -0
- data/ext/v8/upstream/2.0.6/src/checks.cc +100 -0
- data/ext/v8/upstream/2.0.6/src/checks.h +284 -0
- data/ext/v8/upstream/2.0.6/src/code-stubs.cc +164 -0
- data/ext/v8/upstream/2.0.6/src/code-stubs.h +164 -0
- data/ext/v8/upstream/2.0.6/src/code.h +68 -0
- data/ext/v8/upstream/2.0.6/src/codegen-inl.h +88 -0
- data/ext/v8/upstream/2.0.6/src/codegen.cc +504 -0
- data/ext/v8/upstream/2.0.6/src/codegen.h +522 -0
- data/ext/v8/upstream/2.0.6/src/compilation-cache.cc +490 -0
- data/ext/v8/upstream/2.0.6/src/compilation-cache.h +98 -0
- data/ext/v8/upstream/2.0.6/src/compiler.cc +1132 -0
- data/ext/v8/upstream/2.0.6/src/compiler.h +107 -0
- data/ext/v8/upstream/2.0.6/src/contexts.cc +256 -0
- data/ext/v8/upstream/2.0.6/src/contexts.h +345 -0
- data/ext/v8/upstream/2.0.6/src/conversions-inl.h +95 -0
- data/ext/v8/upstream/2.0.6/src/conversions.cc +709 -0
- data/ext/v8/upstream/2.0.6/src/conversions.h +118 -0
- data/ext/v8/upstream/2.0.6/src/counters.cc +78 -0
- data/ext/v8/upstream/2.0.6/src/counters.h +239 -0
- data/ext/v8/upstream/2.0.6/src/cpu.h +65 -0
- data/ext/v8/upstream/2.0.6/src/d8-debug.cc +345 -0
- data/ext/v8/upstream/2.0.6/src/d8-debug.h +155 -0
- data/ext/v8/upstream/2.0.6/src/d8-posix.cc +675 -0
- data/ext/v8/upstream/2.0.6/src/d8-readline.cc +128 -0
- data/ext/v8/upstream/2.0.6/src/d8-windows.cc +42 -0
- data/ext/v8/upstream/2.0.6/src/d8.cc +776 -0
- data/ext/v8/upstream/2.0.6/src/d8.h +225 -0
- data/ext/v8/upstream/2.0.6/src/d8.js +1625 -0
- data/ext/v8/upstream/2.0.6/src/date-delay.js +1138 -0
- data/ext/v8/upstream/2.0.6/src/dateparser-inl.h +114 -0
- data/ext/v8/upstream/2.0.6/src/dateparser.cc +186 -0
- data/ext/v8/upstream/2.0.6/src/dateparser.h +240 -0
- data/ext/v8/upstream/2.0.6/src/debug-agent.cc +425 -0
- data/ext/v8/upstream/2.0.6/src/debug-agent.h +129 -0
- data/ext/v8/upstream/2.0.6/src/debug-delay.js +2073 -0
- data/ext/v8/upstream/2.0.6/src/debug.cc +2751 -0
- data/ext/v8/upstream/2.0.6/src/debug.h +866 -0
- data/ext/v8/upstream/2.0.6/src/disasm.h +77 -0
- data/ext/v8/upstream/2.0.6/src/disassembler.cc +318 -0
- data/ext/v8/upstream/2.0.6/src/disassembler.h +56 -0
- data/ext/v8/upstream/2.0.6/src/dtoa-config.c +91 -0
- data/ext/v8/upstream/2.0.6/src/execution.cc +701 -0
- data/ext/v8/upstream/2.0.6/src/execution.h +312 -0
- data/ext/v8/upstream/2.0.6/src/factory.cc +957 -0
- data/ext/v8/upstream/2.0.6/src/factory.h +393 -0
- data/ext/v8/upstream/2.0.6/src/fast-codegen.cc +725 -0
- data/ext/v8/upstream/2.0.6/src/fast-codegen.h +371 -0
- data/ext/v8/upstream/2.0.6/src/flag-definitions.h +426 -0
- data/ext/v8/upstream/2.0.6/src/flags.cc +555 -0
- data/ext/v8/upstream/2.0.6/src/flags.h +81 -0
- data/ext/v8/upstream/2.0.6/src/frame-element.cc +45 -0
- data/ext/v8/upstream/2.0.6/src/frame-element.h +235 -0
- data/ext/v8/upstream/2.0.6/src/frames-inl.h +215 -0
- data/ext/v8/upstream/2.0.6/src/frames.cc +749 -0
- data/ext/v8/upstream/2.0.6/src/frames.h +659 -0
- data/ext/v8/upstream/2.0.6/src/func-name-inferrer.cc +76 -0
- data/ext/v8/upstream/2.0.6/src/func-name-inferrer.h +135 -0
- data/ext/v8/upstream/2.0.6/src/global-handles.cc +516 -0
- data/ext/v8/upstream/2.0.6/src/global-handles.h +180 -0
- data/ext/v8/upstream/2.0.6/src/globals.h +608 -0
- data/ext/v8/upstream/2.0.6/src/handles-inl.h +76 -0
- data/ext/v8/upstream/2.0.6/src/handles.cc +811 -0
- data/ext/v8/upstream/2.0.6/src/handles.h +367 -0
- data/ext/v8/upstream/2.0.6/src/hashmap.cc +226 -0
- data/ext/v8/upstream/2.0.6/src/hashmap.h +120 -0
- data/ext/v8/upstream/2.0.6/src/heap-inl.h +407 -0
- data/ext/v8/upstream/2.0.6/src/heap-profiler.cc +695 -0
- data/ext/v8/upstream/2.0.6/src/heap-profiler.h +277 -0
- data/ext/v8/upstream/2.0.6/src/heap.cc +4204 -0
- data/ext/v8/upstream/2.0.6/src/heap.h +1704 -0
- data/ext/v8/upstream/2.0.6/src/ia32/assembler-ia32-inl.h +325 -0
- data/ext/v8/upstream/2.0.6/src/ia32/assembler-ia32.cc +2375 -0
- data/ext/v8/upstream/2.0.6/src/ia32/assembler-ia32.h +914 -0
- data/ext/v8/upstream/2.0.6/src/ia32/builtins-ia32.cc +1222 -0
- data/ext/v8/upstream/2.0.6/src/ia32/codegen-ia32-inl.h +46 -0
- data/ext/v8/upstream/2.0.6/src/ia32/codegen-ia32.cc +9770 -0
- data/ext/v8/upstream/2.0.6/src/ia32/codegen-ia32.h +834 -0
- data/ext/v8/upstream/2.0.6/src/ia32/cpu-ia32.cc +79 -0
- data/ext/v8/upstream/2.0.6/src/ia32/debug-ia32.cc +208 -0
- data/ext/v8/upstream/2.0.6/src/ia32/disasm-ia32.cc +1357 -0
- data/ext/v8/upstream/2.0.6/src/ia32/fast-codegen-ia32.cc +1813 -0
- data/ext/v8/upstream/2.0.6/src/ia32/frames-ia32.cc +111 -0
- data/ext/v8/upstream/2.0.6/src/ia32/frames-ia32.h +135 -0
- data/ext/v8/upstream/2.0.6/src/ia32/ic-ia32.cc +1490 -0
- data/ext/v8/upstream/2.0.6/src/ia32/jump-target-ia32.cc +432 -0
- data/ext/v8/upstream/2.0.6/src/ia32/macro-assembler-ia32.cc +1517 -0
- data/ext/v8/upstream/2.0.6/src/ia32/macro-assembler-ia32.h +528 -0
- data/ext/v8/upstream/2.0.6/src/ia32/regexp-macro-assembler-ia32.cc +1219 -0
- data/ext/v8/upstream/2.0.6/src/ia32/regexp-macro-assembler-ia32.h +230 -0
- data/ext/v8/upstream/2.0.6/src/ia32/register-allocator-ia32-inl.h +82 -0
- data/ext/v8/upstream/2.0.6/src/ia32/register-allocator-ia32.cc +99 -0
- data/ext/v8/upstream/2.0.6/src/ia32/register-allocator-ia32.h +43 -0
- data/ext/v8/upstream/2.0.6/src/ia32/simulator-ia32.cc +30 -0
- data/ext/v8/upstream/2.0.6/src/ia32/simulator-ia32.h +62 -0
- data/ext/v8/upstream/2.0.6/src/ia32/stub-cache-ia32.cc +1961 -0
- data/ext/v8/upstream/2.0.6/src/ia32/virtual-frame-ia32.cc +1105 -0
- data/ext/v8/upstream/2.0.6/src/ia32/virtual-frame-ia32.h +580 -0
- data/ext/v8/upstream/2.0.6/src/ic-inl.h +93 -0
- data/ext/v8/upstream/2.0.6/src/ic.cc +1426 -0
- data/ext/v8/upstream/2.0.6/src/ic.h +443 -0
- data/ext/v8/upstream/2.0.6/src/interpreter-irregexp.cc +646 -0
- data/ext/v8/upstream/2.0.6/src/interpreter-irregexp.h +48 -0
- data/ext/v8/upstream/2.0.6/src/json-delay.js +254 -0
- data/ext/v8/upstream/2.0.6/src/jsregexp.cc +5234 -0
- data/ext/v8/upstream/2.0.6/src/jsregexp.h +1439 -0
- data/ext/v8/upstream/2.0.6/src/jump-target-inl.h +49 -0
- data/ext/v8/upstream/2.0.6/src/jump-target.cc +383 -0
- data/ext/v8/upstream/2.0.6/src/jump-target.h +280 -0
- data/ext/v8/upstream/2.0.6/src/list-inl.h +166 -0
- data/ext/v8/upstream/2.0.6/src/list.h +158 -0
- data/ext/v8/upstream/2.0.6/src/log-inl.h +126 -0
- data/ext/v8/upstream/2.0.6/src/log-utils.cc +503 -0
- data/ext/v8/upstream/2.0.6/src/log-utils.h +292 -0
- data/ext/v8/upstream/2.0.6/src/log.cc +1457 -0
- data/ext/v8/upstream/2.0.6/src/log.h +371 -0
- data/ext/v8/upstream/2.0.6/src/macro-assembler.h +93 -0
- data/ext/v8/upstream/2.0.6/src/macros.py +137 -0
- data/ext/v8/upstream/2.0.6/src/mark-compact.cc +2007 -0
- data/ext/v8/upstream/2.0.6/src/mark-compact.h +442 -0
- data/ext/v8/upstream/2.0.6/src/math.js +263 -0
- data/ext/v8/upstream/2.0.6/src/memory.h +74 -0
- data/ext/v8/upstream/2.0.6/src/messages.cc +177 -0
- data/ext/v8/upstream/2.0.6/src/messages.h +112 -0
- data/ext/v8/upstream/2.0.6/src/messages.js +937 -0
- data/ext/v8/upstream/2.0.6/src/mirror-delay.js +2332 -0
- data/ext/v8/upstream/2.0.6/src/mksnapshot.cc +169 -0
- data/ext/v8/upstream/2.0.6/src/natives.h +63 -0
- data/ext/v8/upstream/2.0.6/src/objects-debug.cc +1317 -0
- data/ext/v8/upstream/2.0.6/src/objects-inl.h +3044 -0
- data/ext/v8/upstream/2.0.6/src/objects.cc +8306 -0
- data/ext/v8/upstream/2.0.6/src/objects.h +4960 -0
- data/ext/v8/upstream/2.0.6/src/oprofile-agent.cc +116 -0
- data/ext/v8/upstream/2.0.6/src/oprofile-agent.h +69 -0
- data/ext/v8/upstream/2.0.6/src/parser.cc +4810 -0
- data/ext/v8/upstream/2.0.6/src/parser.h +195 -0
- data/ext/v8/upstream/2.0.6/src/platform-freebsd.cc +645 -0
- data/ext/v8/upstream/2.0.6/src/platform-linux.cc +808 -0
- data/ext/v8/upstream/2.0.6/src/platform-macos.cc +643 -0
- data/ext/v8/upstream/2.0.6/src/platform-nullos.cc +454 -0
- data/ext/v8/upstream/2.0.6/src/platform-openbsd.cc +597 -0
- data/ext/v8/upstream/2.0.6/src/platform-posix.cc +380 -0
- data/ext/v8/upstream/2.0.6/src/platform-win32.cc +1908 -0
- data/ext/v8/upstream/2.0.6/src/platform.h +556 -0
- data/ext/v8/upstream/2.0.6/src/prettyprinter.cc +1511 -0
- data/ext/v8/upstream/2.0.6/src/prettyprinter.h +219 -0
- data/ext/v8/upstream/2.0.6/src/property.cc +96 -0
- data/ext/v8/upstream/2.0.6/src/property.h +327 -0
- data/ext/v8/upstream/2.0.6/src/regexp-delay.js +406 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler-irregexp-inl.h +78 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler-irregexp.cc +464 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler-irregexp.h +141 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler-tracer.cc +356 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler-tracer.h +103 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler.cc +240 -0
- data/ext/v8/upstream/2.0.6/src/regexp-macro-assembler.h +220 -0
- data/ext/v8/upstream/2.0.6/src/regexp-stack.cc +103 -0
- data/ext/v8/upstream/2.0.6/src/regexp-stack.h +123 -0
- data/ext/v8/upstream/2.0.6/src/register-allocator-inl.h +74 -0
- data/ext/v8/upstream/2.0.6/src/register-allocator.cc +100 -0
- data/ext/v8/upstream/2.0.6/src/register-allocator.h +295 -0
- data/ext/v8/upstream/2.0.6/src/rewriter.cc +855 -0
- data/ext/v8/upstream/2.0.6/src/rewriter.h +54 -0
- data/ext/v8/upstream/2.0.6/src/runtime.cc +8163 -0
- data/ext/v8/upstream/2.0.6/src/runtime.h +432 -0
- data/ext/v8/upstream/2.0.6/src/runtime.js +626 -0
- data/ext/v8/upstream/2.0.6/src/scanner.cc +1098 -0
- data/ext/v8/upstream/2.0.6/src/scanner.h +425 -0
- data/ext/v8/upstream/2.0.6/src/scopeinfo.cc +649 -0
- data/ext/v8/upstream/2.0.6/src/scopeinfo.h +236 -0
- data/ext/v8/upstream/2.0.6/src/scopes.cc +963 -0
- data/ext/v8/upstream/2.0.6/src/scopes.h +401 -0
- data/ext/v8/upstream/2.0.6/src/serialize.cc +1260 -0
- data/ext/v8/upstream/2.0.6/src/serialize.h +404 -0
- data/ext/v8/upstream/2.0.6/src/shell.h +55 -0
- data/ext/v8/upstream/2.0.6/src/simulator.h +41 -0
- data/ext/v8/upstream/2.0.6/src/smart-pointer.h +109 -0
- data/ext/v8/upstream/2.0.6/src/snapshot-common.cc +97 -0
- data/ext/v8/upstream/2.0.6/src/snapshot-empty.cc +40 -0
- data/ext/v8/upstream/2.0.6/src/snapshot.h +59 -0
- data/ext/v8/upstream/2.0.6/src/spaces-inl.h +372 -0
- data/ext/v8/upstream/2.0.6/src/spaces.cc +2864 -0
- data/ext/v8/upstream/2.0.6/src/spaces.h +2072 -0
- data/ext/v8/upstream/2.0.6/src/string-stream.cc +584 -0
- data/ext/v8/upstream/2.0.6/src/string-stream.h +189 -0
- data/ext/v8/upstream/2.0.6/src/string.js +901 -0
- data/ext/v8/upstream/2.0.6/src/stub-cache.cc +1108 -0
- data/ext/v8/upstream/2.0.6/src/stub-cache.h +578 -0
- data/ext/v8/upstream/2.0.6/src/third_party/dtoa/COPYING +15 -0
- data/ext/v8/upstream/2.0.6/src/third_party/dtoa/dtoa.c +3330 -0
- data/ext/v8/upstream/2.0.6/src/third_party/valgrind/valgrind.h +3925 -0
- data/ext/v8/upstream/2.0.6/src/token.cc +56 -0
- data/ext/v8/upstream/2.0.6/src/token.h +270 -0
- data/ext/v8/upstream/2.0.6/src/top.cc +991 -0
- data/ext/v8/upstream/2.0.6/src/top.h +459 -0
- data/ext/v8/upstream/2.0.6/src/unicode-inl.h +238 -0
- data/ext/v8/upstream/2.0.6/src/unicode.cc +749 -0
- data/ext/v8/upstream/2.0.6/src/unicode.h +279 -0
- data/ext/v8/upstream/2.0.6/src/uri.js +415 -0
- data/ext/v8/upstream/2.0.6/src/usage-analyzer.cc +426 -0
- data/ext/v8/upstream/2.0.6/src/usage-analyzer.h +40 -0
- data/ext/v8/upstream/2.0.6/src/utils.cc +322 -0
- data/ext/v8/upstream/2.0.6/src/utils.h +592 -0
- data/ext/v8/upstream/2.0.6/src/v8-counters.cc +55 -0
- data/ext/v8/upstream/2.0.6/src/v8-counters.h +198 -0
- data/ext/v8/upstream/2.0.6/src/v8.cc +193 -0
- data/ext/v8/upstream/2.0.6/src/v8.h +119 -0
- data/ext/v8/upstream/2.0.6/src/v8natives.js +846 -0
- data/ext/v8/upstream/2.0.6/src/v8threads.cc +450 -0
- data/ext/v8/upstream/2.0.6/src/v8threads.h +144 -0
- data/ext/v8/upstream/2.0.6/src/variables.cc +163 -0
- data/ext/v8/upstream/2.0.6/src/variables.h +235 -0
- data/ext/v8/upstream/2.0.6/src/version.cc +88 -0
- data/ext/v8/upstream/2.0.6/src/version.h +64 -0
- data/ext/v8/upstream/2.0.6/src/virtual-frame.cc +381 -0
- data/ext/v8/upstream/2.0.6/src/virtual-frame.h +44 -0
- data/ext/v8/upstream/2.0.6/src/x64/assembler-x64-inl.h +352 -0
- data/ext/v8/upstream/2.0.6/src/x64/assembler-x64.cc +2539 -0
- data/ext/v8/upstream/2.0.6/src/x64/assembler-x64.h +1399 -0
- data/ext/v8/upstream/2.0.6/src/x64/builtins-x64.cc +1255 -0
- data/ext/v8/upstream/2.0.6/src/x64/codegen-x64-inl.h +46 -0
- data/ext/v8/upstream/2.0.6/src/x64/codegen-x64.cc +8223 -0
- data/ext/v8/upstream/2.0.6/src/x64/codegen-x64.h +785 -0
- data/ext/v8/upstream/2.0.6/src/x64/cpu-x64.cc +79 -0
- data/ext/v8/upstream/2.0.6/src/x64/debug-x64.cc +202 -0
- data/ext/v8/upstream/2.0.6/src/x64/disasm-x64.cc +1596 -0
- data/ext/v8/upstream/2.0.6/src/x64/fast-codegen-x64.cc +1820 -0
- data/ext/v8/upstream/2.0.6/src/x64/frames-x64.cc +109 -0
- data/ext/v8/upstream/2.0.6/src/x64/frames-x64.h +121 -0
- data/ext/v8/upstream/2.0.6/src/x64/ic-x64.cc +1392 -0
- data/ext/v8/upstream/2.0.6/src/x64/jump-target-x64.cc +432 -0
- data/ext/v8/upstream/2.0.6/src/x64/macro-assembler-x64.cc +2409 -0
- data/ext/v8/upstream/2.0.6/src/x64/macro-assembler-x64.h +765 -0
- data/ext/v8/upstream/2.0.6/src/x64/regexp-macro-assembler-x64.cc +1337 -0
- data/ext/v8/upstream/2.0.6/src/x64/regexp-macro-assembler-x64.h +295 -0
- data/ext/v8/upstream/2.0.6/src/x64/register-allocator-x64-inl.h +86 -0
- data/ext/v8/upstream/2.0.6/src/x64/register-allocator-x64.cc +84 -0
- data/ext/v8/upstream/2.0.6/src/x64/register-allocator-x64.h +43 -0
- data/ext/v8/upstream/2.0.6/src/x64/simulator-x64.cc +27 -0
- data/ext/v8/upstream/2.0.6/src/x64/simulator-x64.h +63 -0
- data/ext/v8/upstream/2.0.6/src/x64/stub-cache-x64.cc +1884 -0
- data/ext/v8/upstream/2.0.6/src/x64/virtual-frame-x64.cc +1089 -0
- data/ext/v8/upstream/2.0.6/src/x64/virtual-frame-x64.h +560 -0
- data/ext/v8/upstream/2.0.6/src/zone-inl.h +297 -0
- data/ext/v8/upstream/2.0.6/src/zone.cc +193 -0
- data/ext/v8/upstream/2.0.6/src/zone.h +305 -0
- data/ext/v8/upstream/2.0.6/tools/codemap.js +258 -0
- data/ext/v8/upstream/2.0.6/tools/consarray.js +93 -0
- data/ext/v8/upstream/2.0.6/tools/csvparser.js +98 -0
- data/ext/v8/upstream/2.0.6/tools/gyp/v8.gyp +620 -0
- data/ext/v8/upstream/2.0.6/tools/js2c.py +376 -0
- data/ext/v8/upstream/2.0.6/tools/jsmin.py +280 -0
- data/ext/v8/upstream/2.0.6/tools/linux-tick-processor +24 -0
- data/ext/v8/upstream/2.0.6/tools/linux-tick-processor.py +78 -0
- data/ext/v8/upstream/2.0.6/tools/logreader.js +320 -0
- data/ext/v8/upstream/2.0.6/tools/mac-nm +18 -0
- data/ext/v8/upstream/2.0.6/tools/mac-tick-processor +6 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/annotate +7 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/common +19 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/dump +7 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/report +7 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/reset +7 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/run +14 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/shutdown +7 -0
- data/ext/v8/upstream/2.0.6/tools/oprofile/start +7 -0
- data/ext/v8/upstream/2.0.6/tools/presubmit.py +299 -0
- data/ext/v8/upstream/2.0.6/tools/process-heap-prof.py +120 -0
- data/ext/v8/upstream/2.0.6/tools/profile.js +621 -0
- data/ext/v8/upstream/2.0.6/tools/profile_view.js +224 -0
- data/ext/v8/upstream/2.0.6/tools/run-valgrind.py +77 -0
- data/ext/v8/upstream/2.0.6/tools/splaytree.js +322 -0
- data/ext/v8/upstream/2.0.6/tools/splaytree.py +226 -0
- data/ext/v8/upstream/2.0.6/tools/stats-viewer.py +456 -0
- data/ext/v8/upstream/2.0.6/tools/test.py +1370 -0
- data/ext/v8/upstream/2.0.6/tools/tickprocessor-driver.js +53 -0
- data/ext/v8/upstream/2.0.6/tools/tickprocessor.js +731 -0
- data/ext/v8/upstream/2.0.6/tools/tickprocessor.py +535 -0
- data/ext/v8/upstream/2.0.6/tools/utils.py +82 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/README.txt +71 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/arm.vsprops +14 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/common.vsprops +35 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/d8.vcproj +199 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/d8_arm.vcproj +199 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/d8_x64.vcproj +201 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/d8js2c.cmd +6 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/debug.vsprops +17 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/ia32.vsprops +13 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/js2c.cmd +6 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/release.vsprops +24 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8.sln +101 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8.vcproj +223 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_arm.sln +74 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_arm.vcproj +223 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_base.vcproj +971 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_base_arm.vcproj +983 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_base_x64.vcproj +959 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_cctest.vcproj +255 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_cctest_arm.vcproj +243 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_cctest_x64.vcproj +257 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_mksnapshot.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_mksnapshot_x64.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_process_sample.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_process_sample_arm.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_process_sample_x64.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_shell_sample.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_shell_sample_arm.vcproj +151 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_shell_sample_x64.vcproj +153 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_snapshot.vcproj +142 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_snapshot_cc.vcproj +92 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_snapshot_cc_x64.vcproj +92 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_snapshot_x64.vcproj +142 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_x64.sln +101 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/v8_x64.vcproj +223 -0
- data/ext/v8/upstream/2.0.6/tools/visual_studio/x64.vsprops +13 -0
- data/ext/v8/upstream/2.0.6/tools/windows-tick-processor.bat +5 -0
- data/ext/v8/upstream/2.0.6/tools/windows-tick-processor.py +137 -0
- data/ext/v8/upstream/Makefile +31 -0
- data/ext/v8/upstream/no-strict-aliasing.patch +13 -0
- data/ext/v8/upstream/scons/CHANGES.txt +5183 -0
- data/ext/v8/upstream/scons/LICENSE.txt +20 -0
- data/ext/v8/upstream/scons/MANIFEST +202 -0
- data/ext/v8/upstream/scons/PKG-INFO +13 -0
- data/ext/v8/upstream/scons/README.txt +273 -0
- data/ext/v8/upstream/scons/RELEASE.txt +1040 -0
- data/ext/v8/upstream/scons/engine/SCons/Action.py +1256 -0
- data/ext/v8/upstream/scons/engine/SCons/Builder.py +868 -0
- data/ext/v8/upstream/scons/engine/SCons/CacheDir.py +217 -0
- data/ext/v8/upstream/scons/engine/SCons/Conftest.py +794 -0
- data/ext/v8/upstream/scons/engine/SCons/Debug.py +237 -0
- data/ext/v8/upstream/scons/engine/SCons/Defaults.py +485 -0
- data/ext/v8/upstream/scons/engine/SCons/Environment.py +2327 -0
- data/ext/v8/upstream/scons/engine/SCons/Errors.py +207 -0
- data/ext/v8/upstream/scons/engine/SCons/Executor.py +636 -0
- data/ext/v8/upstream/scons/engine/SCons/Job.py +435 -0
- data/ext/v8/upstream/scons/engine/SCons/Memoize.py +292 -0
- data/ext/v8/upstream/scons/engine/SCons/Node/Alias.py +153 -0
- data/ext/v8/upstream/scons/engine/SCons/Node/FS.py +3220 -0
- data/ext/v8/upstream/scons/engine/SCons/Node/Python.py +128 -0
- data/ext/v8/upstream/scons/engine/SCons/Node/__init__.py +1341 -0
- data/ext/v8/upstream/scons/engine/SCons/Options/BoolOption.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Options/EnumOption.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Options/ListOption.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Options/PackageOption.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Options/PathOption.py +76 -0
- data/ext/v8/upstream/scons/engine/SCons/Options/__init__.py +74 -0
- data/ext/v8/upstream/scons/engine/SCons/PathList.py +232 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/__init__.py +236 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/aix.py +70 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/cygwin.py +55 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/darwin.py +46 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/hpux.py +46 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/irix.py +44 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/os2.py +58 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/posix.py +264 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/sunos.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Platform/win32.py +386 -0
- data/ext/v8/upstream/scons/engine/SCons/SConf.py +1038 -0
- data/ext/v8/upstream/scons/engine/SCons/SConsign.py +381 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/C.py +132 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/D.py +74 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/Dir.py +111 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/Fortran.py +320 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/IDL.py +48 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/LaTeX.py +378 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/Prog.py +103 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/RC.py +55 -0
- data/ext/v8/upstream/scons/engine/SCons/Scanner/__init__.py +415 -0
- data/ext/v8/upstream/scons/engine/SCons/Script/Interactive.py +386 -0
- data/ext/v8/upstream/scons/engine/SCons/Script/Main.py +1360 -0
- data/ext/v8/upstream/scons/engine/SCons/Script/SConsOptions.py +944 -0
- data/ext/v8/upstream/scons/engine/SCons/Script/SConscript.py +642 -0
- data/ext/v8/upstream/scons/engine/SCons/Script/__init__.py +414 -0
- data/ext/v8/upstream/scons/engine/SCons/Sig.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Subst.py +911 -0
- data/ext/v8/upstream/scons/engine/SCons/Taskmaster.py +1030 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/386asm.py +61 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/BitKeeper.py +65 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/CVS.py +73 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/FortranCommon.py +247 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/JavaCommon.py +324 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/__init__.py +56 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/arch.py +61 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/common.py +210 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/netframework.py +84 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/sdk.py +321 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/vc.py +367 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/vs.py +497 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/Perforce.py +104 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/PharLapCommon.py +138 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/RCS.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/SCCS.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/Subversion.py +71 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/__init__.py +675 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/aixc++.py +82 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/aixcc.py +74 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/aixf77.py +80 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/aixlink.py +76 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/applelink.py +71 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/ar.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/as.py +78 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/bcc32.py +82 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/c++.py +99 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/cc.py +114 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/cvf.py +58 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/default.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/dmd.py +224 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/dvi.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/dvipdf.py +125 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/dvips.py +94 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/f77.py +62 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/f90.py +62 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/f95.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/filesystem.py +98 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/fortran.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/g++.py +90 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/g77.py +73 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/gas.py +53 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/gcc.py +80 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/gfortran.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/gnulink.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/gs.py +81 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/hpc++.py +85 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/hpcc.py +53 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/hplink.py +77 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/icc.py +59 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/icl.py +52 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/ifl.py +72 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/ifort.py +90 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/ilink.py +59 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/ilink32.py +60 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/install.py +229 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/intelc.py +490 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/ipkg.py +71 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/jar.py +110 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/javac.py +234 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/javah.py +138 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/latex.py +79 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/lex.py +99 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/link.py +121 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/linkloc.py +112 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/m4.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/masm.py +77 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/midl.py +90 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/mingw.py +159 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/mslib.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/mslink.py +266 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/mssdk.py +50 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/msvc.py +269 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/msvs.py +1439 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/mwcc.py +208 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/mwld.py +107 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/nasm.py +72 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/__init__.py +314 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/ipk.py +185 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/msi.py +526 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/rpm.py +367 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/src_tarbz2.py +43 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/src_targz.py +43 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/src_zip.py +43 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/tarbz2.py +44 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/targz.py +44 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/zip.py +44 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/pdf.py +78 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/pdflatex.py +83 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/pdftex.py +108 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/qt.py +336 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/rmic.py +121 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/rpcgen.py +70 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/rpm.py +132 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sgiar.py +68 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sgic++.py +58 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sgicc.py +53 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sgilink.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sunar.py +67 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sunc++.py +142 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/suncc.py +58 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sunf77.py +63 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sunf90.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sunf95.py +64 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/sunlink.py +77 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/swig.py +186 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/tar.py +73 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/tex.py +805 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/textfile.py +175 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/tlib.py +53 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/wix.py +100 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/yacc.py +131 -0
- data/ext/v8/upstream/scons/engine/SCons/Tool/zip.py +100 -0
- data/ext/v8/upstream/scons/engine/SCons/Util.py +1645 -0
- data/ext/v8/upstream/scons/engine/SCons/Variables/BoolVariable.py +91 -0
- data/ext/v8/upstream/scons/engine/SCons/Variables/EnumVariable.py +107 -0
- data/ext/v8/upstream/scons/engine/SCons/Variables/ListVariable.py +139 -0
- data/ext/v8/upstream/scons/engine/SCons/Variables/PackageVariable.py +109 -0
- data/ext/v8/upstream/scons/engine/SCons/Variables/PathVariable.py +147 -0
- data/ext/v8/upstream/scons/engine/SCons/Variables/__init__.py +317 -0
- data/ext/v8/upstream/scons/engine/SCons/Warnings.py +228 -0
- data/ext/v8/upstream/scons/engine/SCons/__init__.py +49 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/__init__.py +302 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_UserString.py +98 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_hashlib.py +91 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_itertools.py +124 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_optparse.py +1725 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_sets.py +583 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_sets15.py +176 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_shlex.py +325 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_subprocess.py +1296 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/_scons_textwrap.py +382 -0
- data/ext/v8/upstream/scons/engine/SCons/compat/builtins.py +187 -0
- data/ext/v8/upstream/scons/engine/SCons/cpp.py +598 -0
- data/ext/v8/upstream/scons/engine/SCons/dblite.py +248 -0
- data/ext/v8/upstream/scons/engine/SCons/exitfuncs.py +77 -0
- data/ext/v8/upstream/scons/os_spawnv_fix.diff +83 -0
- data/ext/v8/upstream/scons/scons-time.1 +1017 -0
- data/ext/v8/upstream/scons/scons.1 +15179 -0
- data/ext/v8/upstream/scons/sconsign.1 +208 -0
- data/ext/v8/upstream/scons/script/scons +184 -0
- data/ext/v8/upstream/scons/script/scons-time +1529 -0
- data/ext/v8/upstream/scons/script/scons.bat +31 -0
- data/ext/v8/upstream/scons/script/sconsign +508 -0
- data/ext/v8/upstream/scons/setup.cfg +6 -0
- data/ext/v8/upstream/scons/setup.py +427 -0
- data/ext/v8/v8.cpp +89 -0
- data/ext/v8/v8_cxt.cpp +92 -0
- data/ext/v8/v8_cxt.h +20 -0
- data/ext/v8/v8_func.cpp +10 -0
- data/ext/v8/v8_func.h +11 -0
- data/ext/v8/v8_msg.cpp +54 -0
- data/ext/v8/v8_msg.h +18 -0
- data/ext/v8/v8_obj.cpp +52 -0
- data/ext/v8/v8_obj.h +13 -0
- data/ext/v8/v8_ref.cpp +26 -0
- data/ext/v8/v8_ref.h +31 -0
- data/ext/v8/v8_script.cpp +20 -0
- data/ext/v8/v8_script.h +8 -0
- data/ext/v8/v8_standalone.cpp +69 -0
- data/ext/v8/v8_standalone.h +31 -0
- data/ext/v8/v8_str.cpp +17 -0
- data/ext/v8/v8_str.h +9 -0
- data/ext/v8/v8_template.cpp +53 -0
- data/ext/v8/v8_template.h +13 -0
- data/lib/v8.rb +10 -0
- data/lib/v8/context.rb +101 -0
- data/lib/v8/object.rb +38 -0
- data/lib/v8/to.rb +33 -0
- data/lib/v8/v8.so +0 -0
- data/script/console +10 -0
- data/script/destroy +14 -0
- data/script/generate +14 -0
- data/spec/ext/cxt_spec.rb +25 -0
- data/spec/ext/obj_spec.rb +13 -0
- data/spec/redjs/jsapi_spec.rb +405 -0
- data/spec/redjs/tap.rb +8 -0
- data/spec/redjs_helper.rb +3 -0
- data/spec/spec.opts +1 -0
- data/spec/spec_helper.rb +14 -0
- data/spec/v8/to_spec.rb +15 -0
- data/tasks/rspec.rake +21 -0
- data/therubyracer.gemspec +680 -0
- metadata +697 -0
@@ -0,0 +1,2864 @@
|
|
1
|
+
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
2
|
+
// Redistribution and use in source and binary forms, with or without
|
3
|
+
// modification, are permitted provided that the following conditions are
|
4
|
+
// met:
|
5
|
+
//
|
6
|
+
// * Redistributions of source code must retain the above copyright
|
7
|
+
// notice, this list of conditions and the following disclaimer.
|
8
|
+
// * Redistributions in binary form must reproduce the above
|
9
|
+
// copyright notice, this list of conditions and the following
|
10
|
+
// disclaimer in the documentation and/or other materials provided
|
11
|
+
// with the distribution.
|
12
|
+
// * Neither the name of Google Inc. nor the names of its
|
13
|
+
// contributors may be used to endorse or promote products derived
|
14
|
+
// from this software without specific prior written permission.
|
15
|
+
//
|
16
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
|
28
|
+
#include "v8.h"
|
29
|
+
|
30
|
+
#include "macro-assembler.h"
|
31
|
+
#include "mark-compact.h"
|
32
|
+
#include "platform.h"
|
33
|
+
|
34
|
+
namespace v8 {
|
35
|
+
namespace internal {
|
36
|
+
|
37
|
+
// For contiguous spaces, top should be in the space (or at the end) and limit
|
38
|
+
// should be the end of the space.
|
39
|
+
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
|
40
|
+
ASSERT((space).low() <= (info).top \
|
41
|
+
&& (info).top <= (space).high() \
|
42
|
+
&& (info).limit == (space).high())
|
43
|
+
|
44
|
+
|
45
|
+
// ----------------------------------------------------------------------------
|
46
|
+
// HeapObjectIterator
|
47
|
+
|
48
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
|
49
|
+
Initialize(space->bottom(), space->top(), NULL);
|
50
|
+
}
|
51
|
+
|
52
|
+
|
53
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
|
54
|
+
HeapObjectCallback size_func) {
|
55
|
+
Initialize(space->bottom(), space->top(), size_func);
|
56
|
+
}
|
57
|
+
|
58
|
+
|
59
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
|
60
|
+
Initialize(start, space->top(), NULL);
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
|
65
|
+
HeapObjectCallback size_func) {
|
66
|
+
Initialize(start, space->top(), size_func);
|
67
|
+
}
|
68
|
+
|
69
|
+
|
70
|
+
void HeapObjectIterator::Initialize(Address cur, Address end,
|
71
|
+
HeapObjectCallback size_f) {
|
72
|
+
cur_addr_ = cur;
|
73
|
+
end_addr_ = end;
|
74
|
+
end_page_ = Page::FromAllocationTop(end);
|
75
|
+
size_func_ = size_f;
|
76
|
+
Page* p = Page::FromAllocationTop(cur_addr_);
|
77
|
+
cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
|
78
|
+
|
79
|
+
#ifdef DEBUG
|
80
|
+
Verify();
|
81
|
+
#endif
|
82
|
+
}
|
83
|
+
|
84
|
+
|
85
|
+
bool HeapObjectIterator::HasNextInNextPage() {
|
86
|
+
if (cur_addr_ == end_addr_) return false;
|
87
|
+
|
88
|
+
Page* cur_page = Page::FromAllocationTop(cur_addr_);
|
89
|
+
cur_page = cur_page->next_page();
|
90
|
+
ASSERT(cur_page->is_valid());
|
91
|
+
|
92
|
+
cur_addr_ = cur_page->ObjectAreaStart();
|
93
|
+
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
|
94
|
+
|
95
|
+
if (cur_addr_ == end_addr_) return false;
|
96
|
+
ASSERT(cur_addr_ < cur_limit_);
|
97
|
+
#ifdef DEBUG
|
98
|
+
Verify();
|
99
|
+
#endif
|
100
|
+
return true;
|
101
|
+
}
|
102
|
+
|
103
|
+
|
104
|
+
#ifdef DEBUG
|
105
|
+
void HeapObjectIterator::Verify() {
|
106
|
+
Page* p = Page::FromAllocationTop(cur_addr_);
|
107
|
+
ASSERT(p == Page::FromAllocationTop(cur_limit_));
|
108
|
+
ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
|
109
|
+
}
|
110
|
+
#endif
|
111
|
+
|
112
|
+
|
113
|
+
// -----------------------------------------------------------------------------
|
114
|
+
// PageIterator
|
115
|
+
|
116
|
+
PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
|
117
|
+
prev_page_ = NULL;
|
118
|
+
switch (mode) {
|
119
|
+
case PAGES_IN_USE:
|
120
|
+
stop_page_ = space->AllocationTopPage();
|
121
|
+
break;
|
122
|
+
case PAGES_USED_BY_MC:
|
123
|
+
stop_page_ = space->MCRelocationTopPage();
|
124
|
+
break;
|
125
|
+
case ALL_PAGES:
|
126
|
+
#ifdef DEBUG
|
127
|
+
// Verify that the cached last page in the space is actually the
|
128
|
+
// last page.
|
129
|
+
for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
|
130
|
+
if (!p->next_page()->is_valid()) {
|
131
|
+
ASSERT(space->last_page_ == p);
|
132
|
+
}
|
133
|
+
}
|
134
|
+
#endif
|
135
|
+
stop_page_ = space->last_page_;
|
136
|
+
break;
|
137
|
+
}
|
138
|
+
}
|
139
|
+
|
140
|
+
|
141
|
+
// -----------------------------------------------------------------------------
|
142
|
+
// Page
|
143
|
+
|
144
|
+
#ifdef DEBUG
|
145
|
+
Page::RSetState Page::rset_state_ = Page::IN_USE;
|
146
|
+
#endif
|
147
|
+
|
148
|
+
// -----------------------------------------------------------------------------
|
149
|
+
// CodeRange
|
150
|
+
|
151
|
+
List<CodeRange::FreeBlock> CodeRange::free_list_(0);
|
152
|
+
List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
|
153
|
+
int CodeRange::current_allocation_block_index_ = 0;
|
154
|
+
VirtualMemory* CodeRange::code_range_ = NULL;
|
155
|
+
|
156
|
+
|
157
|
+
bool CodeRange::Setup(const size_t requested) {
|
158
|
+
ASSERT(code_range_ == NULL);
|
159
|
+
|
160
|
+
code_range_ = new VirtualMemory(requested);
|
161
|
+
CHECK(code_range_ != NULL);
|
162
|
+
if (!code_range_->IsReserved()) {
|
163
|
+
delete code_range_;
|
164
|
+
code_range_ = NULL;
|
165
|
+
return false;
|
166
|
+
}
|
167
|
+
|
168
|
+
// We are sure that we have mapped a block of requested addresses.
|
169
|
+
ASSERT(code_range_->size() == requested);
|
170
|
+
LOG(NewEvent("CodeRange", code_range_->address(), requested));
|
171
|
+
allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
|
172
|
+
current_allocation_block_index_ = 0;
|
173
|
+
return true;
|
174
|
+
}
|
175
|
+
|
176
|
+
|
177
|
+
int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
|
178
|
+
const FreeBlock* right) {
|
179
|
+
// The entire point of CodeRange is that the difference between two
|
180
|
+
// addresses in the range can be represented as a signed 32-bit int,
|
181
|
+
// so the cast is semantically correct.
|
182
|
+
return static_cast<int>(left->start - right->start);
|
183
|
+
}
|
184
|
+
|
185
|
+
|
186
|
+
void CodeRange::GetNextAllocationBlock(size_t requested) {
|
187
|
+
for (current_allocation_block_index_++;
|
188
|
+
current_allocation_block_index_ < allocation_list_.length();
|
189
|
+
current_allocation_block_index_++) {
|
190
|
+
if (requested <= allocation_list_[current_allocation_block_index_].size) {
|
191
|
+
return; // Found a large enough allocation block.
|
192
|
+
}
|
193
|
+
}
|
194
|
+
|
195
|
+
// Sort and merge the free blocks on the free list and the allocation list.
|
196
|
+
free_list_.AddAll(allocation_list_);
|
197
|
+
allocation_list_.Clear();
|
198
|
+
free_list_.Sort(&CompareFreeBlockAddress);
|
199
|
+
for (int i = 0; i < free_list_.length();) {
|
200
|
+
FreeBlock merged = free_list_[i];
|
201
|
+
i++;
|
202
|
+
// Add adjacent free blocks to the current merged block.
|
203
|
+
while (i < free_list_.length() &&
|
204
|
+
free_list_[i].start == merged.start + merged.size) {
|
205
|
+
merged.size += free_list_[i].size;
|
206
|
+
i++;
|
207
|
+
}
|
208
|
+
if (merged.size > 0) {
|
209
|
+
allocation_list_.Add(merged);
|
210
|
+
}
|
211
|
+
}
|
212
|
+
free_list_.Clear();
|
213
|
+
|
214
|
+
for (current_allocation_block_index_ = 0;
|
215
|
+
current_allocation_block_index_ < allocation_list_.length();
|
216
|
+
current_allocation_block_index_++) {
|
217
|
+
if (requested <= allocation_list_[current_allocation_block_index_].size) {
|
218
|
+
return; // Found a large enough allocation block.
|
219
|
+
}
|
220
|
+
}
|
221
|
+
|
222
|
+
// Code range is full or too fragmented.
|
223
|
+
V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
|
224
|
+
}
|
225
|
+
|
226
|
+
|
227
|
+
|
228
|
+
void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
|
229
|
+
ASSERT(current_allocation_block_index_ < allocation_list_.length());
|
230
|
+
if (requested > allocation_list_[current_allocation_block_index_].size) {
|
231
|
+
// Find an allocation block large enough. This function call may
|
232
|
+
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
|
233
|
+
GetNextAllocationBlock(requested);
|
234
|
+
}
|
235
|
+
// Commit the requested memory at the start of the current allocation block.
|
236
|
+
*allocated = RoundUp(requested, Page::kPageSize);
|
237
|
+
FreeBlock current = allocation_list_[current_allocation_block_index_];
|
238
|
+
if (*allocated >= current.size - Page::kPageSize) {
|
239
|
+
// Don't leave a small free block, useless for a large object or chunk.
|
240
|
+
*allocated = current.size;
|
241
|
+
}
|
242
|
+
ASSERT(*allocated <= current.size);
|
243
|
+
if (!code_range_->Commit(current.start, *allocated, true)) {
|
244
|
+
*allocated = 0;
|
245
|
+
return NULL;
|
246
|
+
}
|
247
|
+
allocation_list_[current_allocation_block_index_].start += *allocated;
|
248
|
+
allocation_list_[current_allocation_block_index_].size -= *allocated;
|
249
|
+
if (*allocated == current.size) {
|
250
|
+
GetNextAllocationBlock(0); // This block is used up, get the next one.
|
251
|
+
}
|
252
|
+
return current.start;
|
253
|
+
}
|
254
|
+
|
255
|
+
|
256
|
+
void CodeRange::FreeRawMemory(void* address, size_t length) {
|
257
|
+
free_list_.Add(FreeBlock(address, length));
|
258
|
+
code_range_->Uncommit(address, length);
|
259
|
+
}
|
260
|
+
|
261
|
+
|
262
|
+
void CodeRange::TearDown() {
|
263
|
+
delete code_range_; // Frees all memory in the virtual memory range.
|
264
|
+
code_range_ = NULL;
|
265
|
+
free_list_.Free();
|
266
|
+
allocation_list_.Free();
|
267
|
+
}
|
268
|
+
|
269
|
+
|
270
|
+
// -----------------------------------------------------------------------------
|
271
|
+
// MemoryAllocator
|
272
|
+
//
|
273
|
+
int MemoryAllocator::capacity_ = 0;
|
274
|
+
int MemoryAllocator::size_ = 0;
|
275
|
+
|
276
|
+
VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
|
277
|
+
|
278
|
+
// 270 is an estimate based on the static default heap size of a pair of 256K
|
279
|
+
// semispaces and a 64M old generation.
|
280
|
+
const int kEstimatedNumberOfChunks = 270;
|
281
|
+
List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
|
282
|
+
kEstimatedNumberOfChunks);
|
283
|
+
List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
|
284
|
+
int MemoryAllocator::max_nof_chunks_ = 0;
|
285
|
+
int MemoryAllocator::top_ = 0;
|
286
|
+
|
287
|
+
|
288
|
+
void MemoryAllocator::Push(int free_chunk_id) {
|
289
|
+
ASSERT(max_nof_chunks_ > 0);
|
290
|
+
ASSERT(top_ < max_nof_chunks_);
|
291
|
+
free_chunk_ids_[top_++] = free_chunk_id;
|
292
|
+
}
|
293
|
+
|
294
|
+
|
295
|
+
int MemoryAllocator::Pop() {
|
296
|
+
ASSERT(top_ > 0);
|
297
|
+
return free_chunk_ids_[--top_];
|
298
|
+
}
|
299
|
+
|
300
|
+
|
301
|
+
bool MemoryAllocator::Setup(int capacity) {
|
302
|
+
capacity_ = RoundUp(capacity, Page::kPageSize);
|
303
|
+
|
304
|
+
// Over-estimate the size of chunks_ array. It assumes the expansion of old
|
305
|
+
// space is always in the unit of a chunk (kChunkSize) except the last
|
306
|
+
// expansion.
|
307
|
+
//
|
308
|
+
// Due to alignment, allocated space might be one page less than required
|
309
|
+
// number (kPagesPerChunk) of pages for old spaces.
|
310
|
+
//
|
311
|
+
// Reserve two chunk ids for semispaces, one for map space, one for old
|
312
|
+
// space, and one for code space.
|
313
|
+
max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
|
314
|
+
if (max_nof_chunks_ > kMaxNofChunks) return false;
|
315
|
+
|
316
|
+
size_ = 0;
|
317
|
+
ChunkInfo info; // uninitialized element.
|
318
|
+
for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
|
319
|
+
chunks_.Add(info);
|
320
|
+
free_chunk_ids_.Add(i);
|
321
|
+
}
|
322
|
+
top_ = max_nof_chunks_;
|
323
|
+
return true;
|
324
|
+
}
|
325
|
+
|
326
|
+
|
327
|
+
void MemoryAllocator::TearDown() {
|
328
|
+
for (int i = 0; i < max_nof_chunks_; i++) {
|
329
|
+
if (chunks_[i].address() != NULL) DeleteChunk(i);
|
330
|
+
}
|
331
|
+
chunks_.Clear();
|
332
|
+
free_chunk_ids_.Clear();
|
333
|
+
|
334
|
+
if (initial_chunk_ != NULL) {
|
335
|
+
LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
|
336
|
+
delete initial_chunk_;
|
337
|
+
initial_chunk_ = NULL;
|
338
|
+
}
|
339
|
+
|
340
|
+
ASSERT(top_ == max_nof_chunks_); // all chunks are free
|
341
|
+
top_ = 0;
|
342
|
+
capacity_ = 0;
|
343
|
+
size_ = 0;
|
344
|
+
max_nof_chunks_ = 0;
|
345
|
+
}
|
346
|
+
|
347
|
+
|
348
|
+
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
|
349
|
+
size_t* allocated,
|
350
|
+
Executability executable) {
|
351
|
+
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
|
352
|
+
void* mem;
|
353
|
+
if (executable == EXECUTABLE && CodeRange::exists()) {
|
354
|
+
mem = CodeRange::AllocateRawMemory(requested, allocated);
|
355
|
+
} else {
|
356
|
+
mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
|
357
|
+
}
|
358
|
+
int alloced = static_cast<int>(*allocated);
|
359
|
+
size_ += alloced;
|
360
|
+
Counters::memory_allocated.Increment(alloced);
|
361
|
+
return mem;
|
362
|
+
}
|
363
|
+
|
364
|
+
|
365
|
+
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
|
366
|
+
if (CodeRange::contains(static_cast<Address>(mem))) {
|
367
|
+
CodeRange::FreeRawMemory(mem, length);
|
368
|
+
} else {
|
369
|
+
OS::Free(mem, length);
|
370
|
+
}
|
371
|
+
Counters::memory_allocated.Decrement(static_cast<int>(length));
|
372
|
+
size_ -= static_cast<int>(length);
|
373
|
+
ASSERT(size_ >= 0);
|
374
|
+
}
|
375
|
+
|
376
|
+
|
377
|
+
void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
|
378
|
+
ASSERT(initial_chunk_ == NULL);
|
379
|
+
|
380
|
+
initial_chunk_ = new VirtualMemory(requested);
|
381
|
+
CHECK(initial_chunk_ != NULL);
|
382
|
+
if (!initial_chunk_->IsReserved()) {
|
383
|
+
delete initial_chunk_;
|
384
|
+
initial_chunk_ = NULL;
|
385
|
+
return NULL;
|
386
|
+
}
|
387
|
+
|
388
|
+
// We are sure that we have mapped a block of requested addresses.
|
389
|
+
ASSERT(initial_chunk_->size() == requested);
|
390
|
+
LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
|
391
|
+
size_ += static_cast<int>(requested);
|
392
|
+
return initial_chunk_->address();
|
393
|
+
}
|
394
|
+
|
395
|
+
|
396
|
+
static int PagesInChunk(Address start, size_t size) {
|
397
|
+
// The first page starts on the first page-aligned address from start onward
|
398
|
+
// and the last page ends on the last page-aligned address before
|
399
|
+
// start+size. Page::kPageSize is a power of two so we can divide by
|
400
|
+
// shifting.
|
401
|
+
return static_cast<int>((RoundDown(start + size, Page::kPageSize)
|
402
|
+
- RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
|
403
|
+
}
|
404
|
+
|
405
|
+
|
406
|
+
Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
|
407
|
+
PagedSpace* owner) {
|
408
|
+
if (requested_pages <= 0) return Page::FromAddress(NULL);
|
409
|
+
size_t chunk_size = requested_pages * Page::kPageSize;
|
410
|
+
|
411
|
+
// There is not enough space to guarantee the desired number pages can be
|
412
|
+
// allocated.
|
413
|
+
if (size_ + static_cast<int>(chunk_size) > capacity_) {
|
414
|
+
// Request as many pages as we can.
|
415
|
+
chunk_size = capacity_ - size_;
|
416
|
+
requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
|
417
|
+
|
418
|
+
if (requested_pages <= 0) return Page::FromAddress(NULL);
|
419
|
+
}
|
420
|
+
void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
|
421
|
+
if (chunk == NULL) return Page::FromAddress(NULL);
|
422
|
+
LOG(NewEvent("PagedChunk", chunk, chunk_size));
|
423
|
+
|
424
|
+
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
|
425
|
+
if (*allocated_pages == 0) {
|
426
|
+
FreeRawMemory(chunk, chunk_size);
|
427
|
+
LOG(DeleteEvent("PagedChunk", chunk));
|
428
|
+
return Page::FromAddress(NULL);
|
429
|
+
}
|
430
|
+
|
431
|
+
int chunk_id = Pop();
|
432
|
+
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
|
433
|
+
|
434
|
+
return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
|
435
|
+
}
|
436
|
+
|
437
|
+
|
438
|
+
Page* MemoryAllocator::CommitPages(Address start, size_t size,
|
439
|
+
PagedSpace* owner, int* num_pages) {
|
440
|
+
ASSERT(start != NULL);
|
441
|
+
*num_pages = PagesInChunk(start, size);
|
442
|
+
ASSERT(*num_pages > 0);
|
443
|
+
ASSERT(initial_chunk_ != NULL);
|
444
|
+
ASSERT(InInitialChunk(start));
|
445
|
+
ASSERT(InInitialChunk(start + size - 1));
|
446
|
+
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
|
447
|
+
return Page::FromAddress(NULL);
|
448
|
+
}
|
449
|
+
Counters::memory_allocated.Increment(static_cast<int>(size));
|
450
|
+
|
451
|
+
// So long as we correctly overestimated the number of chunks we should not
|
452
|
+
// run out of chunk ids.
|
453
|
+
CHECK(!OutOfChunkIds());
|
454
|
+
int chunk_id = Pop();
|
455
|
+
chunks_[chunk_id].init(start, size, owner);
|
456
|
+
return InitializePagesInChunk(chunk_id, *num_pages, owner);
|
457
|
+
}
|
458
|
+
|
459
|
+
|
460
|
+
bool MemoryAllocator::CommitBlock(Address start,
|
461
|
+
size_t size,
|
462
|
+
Executability executable) {
|
463
|
+
ASSERT(start != NULL);
|
464
|
+
ASSERT(size > 0);
|
465
|
+
ASSERT(initial_chunk_ != NULL);
|
466
|
+
ASSERT(InInitialChunk(start));
|
467
|
+
ASSERT(InInitialChunk(start + size - 1));
|
468
|
+
|
469
|
+
if (!initial_chunk_->Commit(start, size, executable)) return false;
|
470
|
+
Counters::memory_allocated.Increment(static_cast<int>(size));
|
471
|
+
return true;
|
472
|
+
}
|
473
|
+
|
474
|
+
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
|
475
|
+
ASSERT(start != NULL);
|
476
|
+
ASSERT(size > 0);
|
477
|
+
ASSERT(initial_chunk_ != NULL);
|
478
|
+
ASSERT(InInitialChunk(start));
|
479
|
+
ASSERT(InInitialChunk(start + size - 1));
|
480
|
+
|
481
|
+
if (!initial_chunk_->Uncommit(start, size)) return false;
|
482
|
+
Counters::memory_allocated.Decrement(static_cast<int>(size));
|
483
|
+
return true;
|
484
|
+
}
|
485
|
+
|
486
|
+
Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
487
|
+
PagedSpace* owner) {
|
488
|
+
ASSERT(IsValidChunk(chunk_id));
|
489
|
+
ASSERT(pages_in_chunk > 0);
|
490
|
+
|
491
|
+
Address chunk_start = chunks_[chunk_id].address();
|
492
|
+
|
493
|
+
Address low = RoundUp(chunk_start, Page::kPageSize);
|
494
|
+
|
495
|
+
#ifdef DEBUG
|
496
|
+
size_t chunk_size = chunks_[chunk_id].size();
|
497
|
+
Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
|
498
|
+
ASSERT(pages_in_chunk <=
|
499
|
+
((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
|
500
|
+
#endif
|
501
|
+
|
502
|
+
Address page_addr = low;
|
503
|
+
for (int i = 0; i < pages_in_chunk; i++) {
|
504
|
+
Page* p = Page::FromAddress(page_addr);
|
505
|
+
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
|
506
|
+
p->is_normal_page = 1;
|
507
|
+
page_addr += Page::kPageSize;
|
508
|
+
}
|
509
|
+
|
510
|
+
// Set the next page of the last page to 0.
|
511
|
+
Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
|
512
|
+
last_page->opaque_header = OffsetFrom(0) | chunk_id;
|
513
|
+
|
514
|
+
return Page::FromAddress(low);
|
515
|
+
}
|
516
|
+
|
517
|
+
|
518
|
+
Page* MemoryAllocator::FreePages(Page* p) {
|
519
|
+
if (!p->is_valid()) return p;
|
520
|
+
|
521
|
+
// Find the first page in the same chunk as 'p'
|
522
|
+
Page* first_page = FindFirstPageInSameChunk(p);
|
523
|
+
Page* page_to_return = Page::FromAddress(NULL);
|
524
|
+
|
525
|
+
if (p != first_page) {
|
526
|
+
// Find the last page in the same chunk as 'prev'.
|
527
|
+
Page* last_page = FindLastPageInSameChunk(p);
|
528
|
+
first_page = GetNextPage(last_page); // first page in next chunk
|
529
|
+
|
530
|
+
// set the next_page of last_page to NULL
|
531
|
+
SetNextPage(last_page, Page::FromAddress(NULL));
|
532
|
+
page_to_return = p; // return 'p' when exiting
|
533
|
+
}
|
534
|
+
|
535
|
+
while (first_page->is_valid()) {
|
536
|
+
int chunk_id = GetChunkId(first_page);
|
537
|
+
ASSERT(IsValidChunk(chunk_id));
|
538
|
+
|
539
|
+
// Find the first page of the next chunk before deleting this chunk.
|
540
|
+
first_page = GetNextPage(FindLastPageInSameChunk(first_page));
|
541
|
+
|
542
|
+
// Free the current chunk.
|
543
|
+
DeleteChunk(chunk_id);
|
544
|
+
}
|
545
|
+
|
546
|
+
return page_to_return;
|
547
|
+
}
|
548
|
+
|
549
|
+
|
550
|
+
void MemoryAllocator::DeleteChunk(int chunk_id) {
|
551
|
+
ASSERT(IsValidChunk(chunk_id));
|
552
|
+
|
553
|
+
ChunkInfo& c = chunks_[chunk_id];
|
554
|
+
|
555
|
+
// We cannot free a chunk contained in the initial chunk because it was not
|
556
|
+
// allocated with AllocateRawMemory. Instead we uncommit the virtual
|
557
|
+
// memory.
|
558
|
+
if (InInitialChunk(c.address())) {
|
559
|
+
// TODO(1240712): VirtualMemory::Uncommit has a return value which
|
560
|
+
// is ignored here.
|
561
|
+
initial_chunk_->Uncommit(c.address(), c.size());
|
562
|
+
Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
|
563
|
+
} else {
|
564
|
+
LOG(DeleteEvent("PagedChunk", c.address()));
|
565
|
+
FreeRawMemory(c.address(), c.size());
|
566
|
+
}
|
567
|
+
c.init(NULL, 0, NULL);
|
568
|
+
Push(chunk_id);
|
569
|
+
}
|
570
|
+
|
571
|
+
|
572
|
+
Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
|
573
|
+
int chunk_id = GetChunkId(p);
|
574
|
+
ASSERT(IsValidChunk(chunk_id));
|
575
|
+
|
576
|
+
Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
|
577
|
+
return Page::FromAddress(low);
|
578
|
+
}
|
579
|
+
|
580
|
+
|
581
|
+
Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
|
582
|
+
int chunk_id = GetChunkId(p);
|
583
|
+
ASSERT(IsValidChunk(chunk_id));
|
584
|
+
|
585
|
+
Address chunk_start = chunks_[chunk_id].address();
|
586
|
+
size_t chunk_size = chunks_[chunk_id].size();
|
587
|
+
|
588
|
+
Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
|
589
|
+
ASSERT(chunk_start <= p->address() && p->address() < high);
|
590
|
+
|
591
|
+
return Page::FromAddress(high - Page::kPageSize);
|
592
|
+
}
|
593
|
+
|
594
|
+
|
595
|
+
#ifdef DEBUG
|
596
|
+
void MemoryAllocator::ReportStatistics() {
|
597
|
+
float pct = static_cast<float>(capacity_ - size_) / capacity_;
|
598
|
+
PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
|
599
|
+
capacity_, size_, static_cast<int>(pct*100));
|
600
|
+
}
|
601
|
+
#endif
|
602
|
+
|
603
|
+
|
604
|
+
// -----------------------------------------------------------------------------
|
605
|
+
// PagedSpace implementation
|
606
|
+
|
607
|
+
PagedSpace::PagedSpace(int max_capacity,
|
608
|
+
AllocationSpace id,
|
609
|
+
Executability executable)
|
610
|
+
: Space(id, executable) {
|
611
|
+
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
|
612
|
+
* Page::kObjectAreaSize;
|
613
|
+
accounting_stats_.Clear();
|
614
|
+
|
615
|
+
allocation_info_.top = NULL;
|
616
|
+
allocation_info_.limit = NULL;
|
617
|
+
|
618
|
+
mc_forwarding_info_.top = NULL;
|
619
|
+
mc_forwarding_info_.limit = NULL;
|
620
|
+
}
|
621
|
+
|
622
|
+
|
623
|
+
bool PagedSpace::Setup(Address start, size_t size) {
|
624
|
+
if (HasBeenSetup()) return false;
|
625
|
+
|
626
|
+
int num_pages = 0;
|
627
|
+
// Try to use the virtual memory range passed to us. If it is too small to
|
628
|
+
// contain at least one page, ignore it and allocate instead.
|
629
|
+
int pages_in_chunk = PagesInChunk(start, size);
|
630
|
+
if (pages_in_chunk > 0) {
|
631
|
+
first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
|
632
|
+
Page::kPageSize * pages_in_chunk,
|
633
|
+
this, &num_pages);
|
634
|
+
} else {
|
635
|
+
int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
|
636
|
+
max_capacity_ / Page::kObjectAreaSize);
|
637
|
+
first_page_ =
|
638
|
+
MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
|
639
|
+
if (!first_page_->is_valid()) return false;
|
640
|
+
}
|
641
|
+
|
642
|
+
// We are sure that the first page is valid and that we have at least one
|
643
|
+
// page.
|
644
|
+
ASSERT(first_page_->is_valid());
|
645
|
+
ASSERT(num_pages > 0);
|
646
|
+
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
|
647
|
+
ASSERT(Capacity() <= max_capacity_);
|
648
|
+
|
649
|
+
// Sequentially initialize remembered sets in the newly allocated
|
650
|
+
// pages and cache the current last page in the space.
|
651
|
+
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
652
|
+
p->ClearRSet();
|
653
|
+
last_page_ = p;
|
654
|
+
}
|
655
|
+
|
656
|
+
// Use first_page_ for allocation.
|
657
|
+
SetAllocationInfo(&allocation_info_, first_page_);
|
658
|
+
|
659
|
+
return true;
|
660
|
+
}
|
661
|
+
|
662
|
+
|
663
|
+
bool PagedSpace::HasBeenSetup() {
|
664
|
+
return (Capacity() > 0);
|
665
|
+
}
|
666
|
+
|
667
|
+
|
668
|
+
void PagedSpace::TearDown() {
|
669
|
+
first_page_ = MemoryAllocator::FreePages(first_page_);
|
670
|
+
ASSERT(!first_page_->is_valid());
|
671
|
+
|
672
|
+
accounting_stats_.Clear();
|
673
|
+
}
|
674
|
+
|
675
|
+
|
676
|
+
#ifdef ENABLE_HEAP_PROTECTION
|
677
|
+
|
678
|
+
void PagedSpace::Protect() {
|
679
|
+
Page* page = first_page_;
|
680
|
+
while (page->is_valid()) {
|
681
|
+
MemoryAllocator::ProtectChunkFromPage(page);
|
682
|
+
page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
|
683
|
+
}
|
684
|
+
}
|
685
|
+
|
686
|
+
|
687
|
+
void PagedSpace::Unprotect() {
|
688
|
+
Page* page = first_page_;
|
689
|
+
while (page->is_valid()) {
|
690
|
+
MemoryAllocator::UnprotectChunkFromPage(page);
|
691
|
+
page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
|
692
|
+
}
|
693
|
+
}
|
694
|
+
|
695
|
+
#endif
|
696
|
+
|
697
|
+
|
698
|
+
void PagedSpace::ClearRSet() {
|
699
|
+
PageIterator it(this, PageIterator::ALL_PAGES);
|
700
|
+
while (it.has_next()) {
|
701
|
+
it.next()->ClearRSet();
|
702
|
+
}
|
703
|
+
}
|
704
|
+
|
705
|
+
|
706
|
+
Object* PagedSpace::FindObject(Address addr) {
|
707
|
+
// Note: this function can only be called before or after mark-compact GC
|
708
|
+
// because it accesses map pointers.
|
709
|
+
ASSERT(!MarkCompactCollector::in_use());
|
710
|
+
|
711
|
+
if (!Contains(addr)) return Failure::Exception();
|
712
|
+
|
713
|
+
Page* p = Page::FromAddress(addr);
|
714
|
+
ASSERT(IsUsed(p));
|
715
|
+
Address cur = p->ObjectAreaStart();
|
716
|
+
Address end = p->AllocationTop();
|
717
|
+
while (cur < end) {
|
718
|
+
HeapObject* obj = HeapObject::FromAddress(cur);
|
719
|
+
Address next = cur + obj->Size();
|
720
|
+
if ((cur <= addr) && (addr < next)) return obj;
|
721
|
+
cur = next;
|
722
|
+
}
|
723
|
+
|
724
|
+
UNREACHABLE();
|
725
|
+
return Failure::Exception();
|
726
|
+
}
|
727
|
+
|
728
|
+
|
729
|
+
bool PagedSpace::IsUsed(Page* page) {
|
730
|
+
PageIterator it(this, PageIterator::PAGES_IN_USE);
|
731
|
+
while (it.has_next()) {
|
732
|
+
if (page == it.next()) return true;
|
733
|
+
}
|
734
|
+
return false;
|
735
|
+
}
|
736
|
+
|
737
|
+
|
738
|
+
void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
|
739
|
+
alloc_info->top = p->ObjectAreaStart();
|
740
|
+
alloc_info->limit = p->ObjectAreaEnd();
|
741
|
+
ASSERT(alloc_info->VerifyPagedAllocation());
|
742
|
+
}
|
743
|
+
|
744
|
+
|
745
|
+
void PagedSpace::MCResetRelocationInfo() {
|
746
|
+
// Set page indexes.
|
747
|
+
int i = 0;
|
748
|
+
PageIterator it(this, PageIterator::ALL_PAGES);
|
749
|
+
while (it.has_next()) {
|
750
|
+
Page* p = it.next();
|
751
|
+
p->mc_page_index = i++;
|
752
|
+
}
|
753
|
+
|
754
|
+
// Set mc_forwarding_info_ to the first page in the space.
|
755
|
+
SetAllocationInfo(&mc_forwarding_info_, first_page_);
|
756
|
+
// All the bytes in the space are 'available'. We will rediscover
|
757
|
+
// allocated and wasted bytes during GC.
|
758
|
+
accounting_stats_.Reset();
|
759
|
+
}
|
760
|
+
|
761
|
+
|
762
|
+
int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
|
763
|
+
#ifdef DEBUG
|
764
|
+
// The Contains function considers the address at the beginning of a
|
765
|
+
// page in the page, MCSpaceOffsetForAddress considers it is in the
|
766
|
+
// previous page.
|
767
|
+
if (Page::IsAlignedToPageSize(addr)) {
|
768
|
+
ASSERT(Contains(addr - kPointerSize));
|
769
|
+
} else {
|
770
|
+
ASSERT(Contains(addr));
|
771
|
+
}
|
772
|
+
#endif
|
773
|
+
|
774
|
+
// If addr is at the end of a page, it belongs to previous page
|
775
|
+
Page* p = Page::IsAlignedToPageSize(addr)
|
776
|
+
? Page::FromAllocationTop(addr)
|
777
|
+
: Page::FromAddress(addr);
|
778
|
+
int index = p->mc_page_index;
|
779
|
+
return (index * Page::kPageSize) + p->Offset(addr);
|
780
|
+
}
|
781
|
+
|
782
|
+
|
783
|
+
// Slow case for reallocating and promoting objects during a compacting
|
784
|
+
// collection. This function is not space-specific.
|
785
|
+
HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
|
786
|
+
Page* current_page = TopPageOf(mc_forwarding_info_);
|
787
|
+
if (!current_page->next_page()->is_valid()) {
|
788
|
+
if (!Expand(current_page)) {
|
789
|
+
return NULL;
|
790
|
+
}
|
791
|
+
}
|
792
|
+
|
793
|
+
// There are surely more pages in the space now.
|
794
|
+
ASSERT(current_page->next_page()->is_valid());
|
795
|
+
// We do not add the top of page block for current page to the space's
|
796
|
+
// free list---the block may contain live objects so we cannot write
|
797
|
+
// bookkeeping information to it. Instead, we will recover top of page
|
798
|
+
// blocks when we move objects to their new locations.
|
799
|
+
//
|
800
|
+
// We do however write the allocation pointer to the page. The encoding
|
801
|
+
// of forwarding addresses is as an offset in terms of live bytes, so we
|
802
|
+
// need quick access to the allocation top of each page to decode
|
803
|
+
// forwarding addresses.
|
804
|
+
current_page->mc_relocation_top = mc_forwarding_info_.top;
|
805
|
+
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
|
806
|
+
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
|
807
|
+
}
|
808
|
+
|
809
|
+
|
810
|
+
bool PagedSpace::Expand(Page* last_page) {
|
811
|
+
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
|
812
|
+
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
|
813
|
+
|
814
|
+
if (Capacity() == max_capacity_) return false;
|
815
|
+
|
816
|
+
ASSERT(Capacity() < max_capacity_);
|
817
|
+
// Last page must be valid and its next page is invalid.
|
818
|
+
ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
|
819
|
+
|
820
|
+
int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
|
821
|
+
if (available_pages <= 0) return false;
|
822
|
+
|
823
|
+
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
|
824
|
+
Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
|
825
|
+
if (!p->is_valid()) return false;
|
826
|
+
|
827
|
+
accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
|
828
|
+
ASSERT(Capacity() <= max_capacity_);
|
829
|
+
|
830
|
+
MemoryAllocator::SetNextPage(last_page, p);
|
831
|
+
|
832
|
+
// Sequentially clear remembered set of new pages and and cache the
|
833
|
+
// new last page in the space.
|
834
|
+
while (p->is_valid()) {
|
835
|
+
p->ClearRSet();
|
836
|
+
last_page_ = p;
|
837
|
+
p = p->next_page();
|
838
|
+
}
|
839
|
+
|
840
|
+
return true;
|
841
|
+
}
|
842
|
+
|
843
|
+
|
844
|
+
#ifdef DEBUG
|
845
|
+
int PagedSpace::CountTotalPages() {
|
846
|
+
int count = 0;
|
847
|
+
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
848
|
+
count++;
|
849
|
+
}
|
850
|
+
return count;
|
851
|
+
}
|
852
|
+
#endif
|
853
|
+
|
854
|
+
|
855
|
+
void PagedSpace::Shrink() {
|
856
|
+
// Release half of free pages.
|
857
|
+
Page* top_page = AllocationTopPage();
|
858
|
+
ASSERT(top_page->is_valid());
|
859
|
+
|
860
|
+
// Count the number of pages we would like to free.
|
861
|
+
int pages_to_free = 0;
|
862
|
+
for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
|
863
|
+
pages_to_free++;
|
864
|
+
}
|
865
|
+
|
866
|
+
// Free pages after top_page.
|
867
|
+
Page* p = MemoryAllocator::FreePages(top_page->next_page());
|
868
|
+
MemoryAllocator::SetNextPage(top_page, p);
|
869
|
+
|
870
|
+
// Find out how many pages we failed to free and update last_page_.
|
871
|
+
// Please note pages can only be freed in whole chunks.
|
872
|
+
last_page_ = top_page;
|
873
|
+
for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
|
874
|
+
pages_to_free--;
|
875
|
+
last_page_ = p;
|
876
|
+
}
|
877
|
+
|
878
|
+
accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
|
879
|
+
ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
|
880
|
+
}
|
881
|
+
|
882
|
+
|
883
|
+
bool PagedSpace::EnsureCapacity(int capacity) {
|
884
|
+
if (Capacity() >= capacity) return true;
|
885
|
+
|
886
|
+
// Start from the allocation top and loop to the last page in the space.
|
887
|
+
Page* last_page = AllocationTopPage();
|
888
|
+
Page* next_page = last_page->next_page();
|
889
|
+
while (next_page->is_valid()) {
|
890
|
+
last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
|
891
|
+
next_page = last_page->next_page();
|
892
|
+
}
|
893
|
+
|
894
|
+
// Expand the space until it has the required capacity or expansion fails.
|
895
|
+
do {
|
896
|
+
if (!Expand(last_page)) return false;
|
897
|
+
ASSERT(last_page->next_page()->is_valid());
|
898
|
+
last_page =
|
899
|
+
MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
|
900
|
+
} while (Capacity() < capacity);
|
901
|
+
|
902
|
+
return true;
|
903
|
+
}
|
904
|
+
|
905
|
+
|
906
|
+
#ifdef DEBUG
|
907
|
+
void PagedSpace::Print() { }
|
908
|
+
#endif
|
909
|
+
|
910
|
+
|
911
|
+
#ifdef DEBUG
|
912
|
+
// We do not assume that the PageIterator works, because it depends on the
|
913
|
+
// invariants we are checking during verification.
|
914
|
+
void PagedSpace::Verify(ObjectVisitor* visitor) {
|
915
|
+
// The allocation pointer should be valid, and it should be in a page in the
|
916
|
+
// space.
|
917
|
+
ASSERT(allocation_info_.VerifyPagedAllocation());
|
918
|
+
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
|
919
|
+
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
|
920
|
+
|
921
|
+
// Loop over all the pages.
|
922
|
+
bool above_allocation_top = false;
|
923
|
+
Page* current_page = first_page_;
|
924
|
+
while (current_page->is_valid()) {
|
925
|
+
if (above_allocation_top) {
|
926
|
+
// We don't care what's above the allocation top.
|
927
|
+
} else {
|
928
|
+
// Unless this is the last page in the space containing allocated
|
929
|
+
// objects, the allocation top should be at a constant offset from the
|
930
|
+
// object area end.
|
931
|
+
Address top = current_page->AllocationTop();
|
932
|
+
if (current_page == top_page) {
|
933
|
+
ASSERT(top == allocation_info_.top);
|
934
|
+
// The next page will be above the allocation top.
|
935
|
+
above_allocation_top = true;
|
936
|
+
} else {
|
937
|
+
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
|
938
|
+
}
|
939
|
+
|
940
|
+
// It should be packed with objects from the bottom to the top.
|
941
|
+
Address current = current_page->ObjectAreaStart();
|
942
|
+
while (current < top) {
|
943
|
+
HeapObject* object = HeapObject::FromAddress(current);
|
944
|
+
|
945
|
+
// The first word should be a map, and we expect all map pointers to
|
946
|
+
// be in map space.
|
947
|
+
Map* map = object->map();
|
948
|
+
ASSERT(map->IsMap());
|
949
|
+
ASSERT(Heap::map_space()->Contains(map));
|
950
|
+
|
951
|
+
// Perform space-specific object verification.
|
952
|
+
VerifyObject(object);
|
953
|
+
|
954
|
+
// The object itself should look OK.
|
955
|
+
object->Verify();
|
956
|
+
|
957
|
+
// All the interior pointers should be contained in the heap and
|
958
|
+
// have their remembered set bits set if required as determined
|
959
|
+
// by the visitor.
|
960
|
+
int size = object->Size();
|
961
|
+
object->IterateBody(map->instance_type(), size, visitor);
|
962
|
+
|
963
|
+
current += size;
|
964
|
+
}
|
965
|
+
|
966
|
+
// The allocation pointer should not be in the middle of an object.
|
967
|
+
ASSERT(current == top);
|
968
|
+
}
|
969
|
+
|
970
|
+
current_page = current_page->next_page();
|
971
|
+
}
|
972
|
+
}
|
973
|
+
#endif
|
974
|
+
|
975
|
+
|
976
|
+
// -----------------------------------------------------------------------------
|
977
|
+
// NewSpace implementation
|
978
|
+
|
979
|
+
|
980
|
+
bool NewSpace::Setup(Address start, int size) {
|
981
|
+
// Setup new space based on the preallocated memory block defined by
|
982
|
+
// start and size. The provided space is divided into two semi-spaces.
|
983
|
+
// To support fast containment testing in the new space, the size of
|
984
|
+
// this chunk must be a power of two and it must be aligned to its size.
|
985
|
+
int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
|
986
|
+
int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
|
987
|
+
|
988
|
+
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
|
989
|
+
ASSERT(IsPowerOf2(maximum_semispace_capacity));
|
990
|
+
|
991
|
+
// Allocate and setup the histogram arrays if necessary.
|
992
|
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
993
|
+
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
994
|
+
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
995
|
+
|
996
|
+
#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
|
997
|
+
promoted_histogram_[name].set_name(#name);
|
998
|
+
INSTANCE_TYPE_LIST(SET_NAME)
|
999
|
+
#undef SET_NAME
|
1000
|
+
#endif
|
1001
|
+
|
1002
|
+
ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
|
1003
|
+
ASSERT(IsAddressAligned(start, size, 0));
|
1004
|
+
|
1005
|
+
if (!to_space_.Setup(start,
|
1006
|
+
initial_semispace_capacity,
|
1007
|
+
maximum_semispace_capacity)) {
|
1008
|
+
return false;
|
1009
|
+
}
|
1010
|
+
if (!from_space_.Setup(start + maximum_semispace_capacity,
|
1011
|
+
initial_semispace_capacity,
|
1012
|
+
maximum_semispace_capacity)) {
|
1013
|
+
return false;
|
1014
|
+
}
|
1015
|
+
|
1016
|
+
start_ = start;
|
1017
|
+
address_mask_ = ~(size - 1);
|
1018
|
+
object_mask_ = address_mask_ | kHeapObjectTag;
|
1019
|
+
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
|
1020
|
+
|
1021
|
+
allocation_info_.top = to_space_.low();
|
1022
|
+
allocation_info_.limit = to_space_.high();
|
1023
|
+
mc_forwarding_info_.top = NULL;
|
1024
|
+
mc_forwarding_info_.limit = NULL;
|
1025
|
+
|
1026
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1027
|
+
return true;
|
1028
|
+
}
|
1029
|
+
|
1030
|
+
|
1031
|
+
void NewSpace::TearDown() {
|
1032
|
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1033
|
+
if (allocated_histogram_) {
|
1034
|
+
DeleteArray(allocated_histogram_);
|
1035
|
+
allocated_histogram_ = NULL;
|
1036
|
+
}
|
1037
|
+
if (promoted_histogram_) {
|
1038
|
+
DeleteArray(promoted_histogram_);
|
1039
|
+
promoted_histogram_ = NULL;
|
1040
|
+
}
|
1041
|
+
#endif
|
1042
|
+
|
1043
|
+
start_ = NULL;
|
1044
|
+
allocation_info_.top = NULL;
|
1045
|
+
allocation_info_.limit = NULL;
|
1046
|
+
mc_forwarding_info_.top = NULL;
|
1047
|
+
mc_forwarding_info_.limit = NULL;
|
1048
|
+
|
1049
|
+
to_space_.TearDown();
|
1050
|
+
from_space_.TearDown();
|
1051
|
+
}
|
1052
|
+
|
1053
|
+
|
1054
|
+
#ifdef ENABLE_HEAP_PROTECTION
|
1055
|
+
|
1056
|
+
void NewSpace::Protect() {
|
1057
|
+
MemoryAllocator::Protect(ToSpaceLow(), Capacity());
|
1058
|
+
MemoryAllocator::Protect(FromSpaceLow(), Capacity());
|
1059
|
+
}
|
1060
|
+
|
1061
|
+
|
1062
|
+
void NewSpace::Unprotect() {
|
1063
|
+
MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
|
1064
|
+
to_space_.executable());
|
1065
|
+
MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
|
1066
|
+
from_space_.executable());
|
1067
|
+
}
|
1068
|
+
|
1069
|
+
#endif
|
1070
|
+
|
1071
|
+
|
1072
|
+
void NewSpace::Flip() {
|
1073
|
+
SemiSpace tmp = from_space_;
|
1074
|
+
from_space_ = to_space_;
|
1075
|
+
to_space_ = tmp;
|
1076
|
+
}
|
1077
|
+
|
1078
|
+
|
1079
|
+
void NewSpace::Grow() {
|
1080
|
+
ASSERT(Capacity() < MaximumCapacity());
|
1081
|
+
if (to_space_.Grow()) {
|
1082
|
+
// Only grow from space if we managed to grow to space.
|
1083
|
+
if (!from_space_.Grow()) {
|
1084
|
+
// If we managed to grow to space but couldn't grow from space,
|
1085
|
+
// attempt to shrink to space.
|
1086
|
+
if (!to_space_.ShrinkTo(from_space_.Capacity())) {
|
1087
|
+
// We are in an inconsistent state because we could not
|
1088
|
+
// commit/uncommit memory from new space.
|
1089
|
+
V8::FatalProcessOutOfMemory("Failed to grow new space.");
|
1090
|
+
}
|
1091
|
+
}
|
1092
|
+
}
|
1093
|
+
allocation_info_.limit = to_space_.high();
|
1094
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1095
|
+
}
|
1096
|
+
|
1097
|
+
|
1098
|
+
void NewSpace::Shrink() {
|
1099
|
+
int new_capacity = Max(InitialCapacity(), 2 * Size());
|
1100
|
+
int rounded_new_capacity =
|
1101
|
+
RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
|
1102
|
+
if (rounded_new_capacity < Capacity() &&
|
1103
|
+
to_space_.ShrinkTo(rounded_new_capacity)) {
|
1104
|
+
// Only shrink from space if we managed to shrink to space.
|
1105
|
+
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
|
1106
|
+
// If we managed to shrink to space but couldn't shrink from
|
1107
|
+
// space, attempt to grow to space again.
|
1108
|
+
if (!to_space_.GrowTo(from_space_.Capacity())) {
|
1109
|
+
// We are in an inconsistent state because we could not
|
1110
|
+
// commit/uncommit memory from new space.
|
1111
|
+
V8::FatalProcessOutOfMemory("Failed to shrink new space.");
|
1112
|
+
}
|
1113
|
+
}
|
1114
|
+
}
|
1115
|
+
allocation_info_.limit = to_space_.high();
|
1116
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1117
|
+
}
|
1118
|
+
|
1119
|
+
|
1120
|
+
void NewSpace::ResetAllocationInfo() {
|
1121
|
+
allocation_info_.top = to_space_.low();
|
1122
|
+
allocation_info_.limit = to_space_.high();
|
1123
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1124
|
+
}
|
1125
|
+
|
1126
|
+
|
1127
|
+
void NewSpace::MCResetRelocationInfo() {
|
1128
|
+
mc_forwarding_info_.top = from_space_.low();
|
1129
|
+
mc_forwarding_info_.limit = from_space_.high();
|
1130
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
|
1131
|
+
}
|
1132
|
+
|
1133
|
+
|
1134
|
+
void NewSpace::MCCommitRelocationInfo() {
|
1135
|
+
// Assumes that the spaces have been flipped so that mc_forwarding_info_ is
|
1136
|
+
// valid allocation info for the to space.
|
1137
|
+
allocation_info_.top = mc_forwarding_info_.top;
|
1138
|
+
allocation_info_.limit = to_space_.high();
|
1139
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1140
|
+
}
|
1141
|
+
|
1142
|
+
|
1143
|
+
#ifdef DEBUG
|
1144
|
+
// We do not use the SemispaceIterator because verification doesn't assume
|
1145
|
+
// that it works (it depends on the invariants we are checking).
|
1146
|
+
void NewSpace::Verify() {
|
1147
|
+
// The allocation pointer should be in the space or at the very end.
|
1148
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1149
|
+
|
1150
|
+
// There should be objects packed in from the low address up to the
|
1151
|
+
// allocation pointer.
|
1152
|
+
Address current = to_space_.low();
|
1153
|
+
while (current < top()) {
|
1154
|
+
HeapObject* object = HeapObject::FromAddress(current);
|
1155
|
+
|
1156
|
+
// The first word should be a map, and we expect all map pointers to
|
1157
|
+
// be in map space.
|
1158
|
+
Map* map = object->map();
|
1159
|
+
ASSERT(map->IsMap());
|
1160
|
+
ASSERT(Heap::map_space()->Contains(map));
|
1161
|
+
|
1162
|
+
// The object should not be code or a map.
|
1163
|
+
ASSERT(!object->IsMap());
|
1164
|
+
ASSERT(!object->IsCode());
|
1165
|
+
|
1166
|
+
// The object itself should look OK.
|
1167
|
+
object->Verify();
|
1168
|
+
|
1169
|
+
// All the interior pointers should be contained in the heap.
|
1170
|
+
VerifyPointersVisitor visitor;
|
1171
|
+
int size = object->Size();
|
1172
|
+
object->IterateBody(map->instance_type(), size, &visitor);
|
1173
|
+
|
1174
|
+
current += size;
|
1175
|
+
}
|
1176
|
+
|
1177
|
+
// The allocation pointer should not be in the middle of an object.
|
1178
|
+
ASSERT(current == top());
|
1179
|
+
}
|
1180
|
+
#endif
|
1181
|
+
|
1182
|
+
|
1183
|
+
bool SemiSpace::Commit() {
|
1184
|
+
ASSERT(!is_committed());
|
1185
|
+
if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
|
1186
|
+
return false;
|
1187
|
+
}
|
1188
|
+
committed_ = true;
|
1189
|
+
return true;
|
1190
|
+
}
|
1191
|
+
|
1192
|
+
|
1193
|
+
bool SemiSpace::Uncommit() {
|
1194
|
+
ASSERT(is_committed());
|
1195
|
+
if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
|
1196
|
+
return false;
|
1197
|
+
}
|
1198
|
+
committed_ = false;
|
1199
|
+
return true;
|
1200
|
+
}
|
1201
|
+
|
1202
|
+
|
1203
|
+
// -----------------------------------------------------------------------------
|
1204
|
+
// SemiSpace implementation
|
1205
|
+
|
1206
|
+
bool SemiSpace::Setup(Address start,
|
1207
|
+
int initial_capacity,
|
1208
|
+
int maximum_capacity) {
|
1209
|
+
// Creates a space in the young generation. The constructor does not
|
1210
|
+
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
|
1211
|
+
// memory of size 'capacity' when set up, and does not grow or shrink
|
1212
|
+
// otherwise. In the mark-compact collector, the memory region of the from
|
1213
|
+
// space is used as the marking stack. It requires contiguous memory
|
1214
|
+
// addresses.
|
1215
|
+
initial_capacity_ = initial_capacity;
|
1216
|
+
capacity_ = initial_capacity;
|
1217
|
+
maximum_capacity_ = maximum_capacity;
|
1218
|
+
committed_ = false;
|
1219
|
+
|
1220
|
+
start_ = start;
|
1221
|
+
address_mask_ = ~(maximum_capacity - 1);
|
1222
|
+
object_mask_ = address_mask_ | kHeapObjectTag;
|
1223
|
+
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
|
1224
|
+
age_mark_ = start_;
|
1225
|
+
|
1226
|
+
return Commit();
|
1227
|
+
}
|
1228
|
+
|
1229
|
+
|
1230
|
+
void SemiSpace::TearDown() {
|
1231
|
+
start_ = NULL;
|
1232
|
+
capacity_ = 0;
|
1233
|
+
}
|
1234
|
+
|
1235
|
+
|
1236
|
+
bool SemiSpace::Grow() {
|
1237
|
+
// Double the semispace size but only up to maximum capacity.
|
1238
|
+
int maximum_extra = maximum_capacity_ - capacity_;
|
1239
|
+
int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
|
1240
|
+
maximum_extra);
|
1241
|
+
if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
|
1242
|
+
return false;
|
1243
|
+
}
|
1244
|
+
capacity_ += extra;
|
1245
|
+
return true;
|
1246
|
+
}
|
1247
|
+
|
1248
|
+
|
1249
|
+
bool SemiSpace::GrowTo(int new_capacity) {
|
1250
|
+
ASSERT(new_capacity <= maximum_capacity_);
|
1251
|
+
ASSERT(new_capacity > capacity_);
|
1252
|
+
size_t delta = new_capacity - capacity_;
|
1253
|
+
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
|
1254
|
+
if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
|
1255
|
+
return false;
|
1256
|
+
}
|
1257
|
+
capacity_ = new_capacity;
|
1258
|
+
return true;
|
1259
|
+
}
|
1260
|
+
|
1261
|
+
|
1262
|
+
bool SemiSpace::ShrinkTo(int new_capacity) {
|
1263
|
+
ASSERT(new_capacity >= initial_capacity_);
|
1264
|
+
ASSERT(new_capacity < capacity_);
|
1265
|
+
size_t delta = capacity_ - new_capacity;
|
1266
|
+
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
|
1267
|
+
if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
|
1268
|
+
return false;
|
1269
|
+
}
|
1270
|
+
capacity_ = new_capacity;
|
1271
|
+
return true;
|
1272
|
+
}
|
1273
|
+
|
1274
|
+
|
1275
|
+
#ifdef DEBUG
|
1276
|
+
void SemiSpace::Print() { }
|
1277
|
+
|
1278
|
+
|
1279
|
+
void SemiSpace::Verify() { }
|
1280
|
+
#endif
|
1281
|
+
|
1282
|
+
|
1283
|
+
// -----------------------------------------------------------------------------
|
1284
|
+
// SemiSpaceIterator implementation.
|
1285
|
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
|
1286
|
+
Initialize(space, space->bottom(), space->top(), NULL);
|
1287
|
+
}
|
1288
|
+
|
1289
|
+
|
1290
|
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
|
1291
|
+
HeapObjectCallback size_func) {
|
1292
|
+
Initialize(space, space->bottom(), space->top(), size_func);
|
1293
|
+
}
|
1294
|
+
|
1295
|
+
|
1296
|
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
|
1297
|
+
Initialize(space, start, space->top(), NULL);
|
1298
|
+
}
|
1299
|
+
|
1300
|
+
|
1301
|
+
void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
|
1302
|
+
Address end,
|
1303
|
+
HeapObjectCallback size_func) {
|
1304
|
+
ASSERT(space->ToSpaceContains(start));
|
1305
|
+
ASSERT(space->ToSpaceLow() <= end
|
1306
|
+
&& end <= space->ToSpaceHigh());
|
1307
|
+
space_ = &space->to_space_;
|
1308
|
+
current_ = start;
|
1309
|
+
limit_ = end;
|
1310
|
+
size_func_ = size_func;
|
1311
|
+
}
|
1312
|
+
|
1313
|
+
|
1314
|
+
#ifdef DEBUG
|
1315
|
+
// A static array of histogram info for each type.
|
1316
|
+
static HistogramInfo heap_histograms[LAST_TYPE+1];
|
1317
|
+
static JSObject::SpillInformation js_spill_information;
|
1318
|
+
|
1319
|
+
// heap_histograms is shared, always clear it before using it.
|
1320
|
+
static void ClearHistograms() {
|
1321
|
+
// We reset the name each time, though it hasn't changed.
|
1322
|
+
#define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
|
1323
|
+
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
|
1324
|
+
#undef DEF_TYPE_NAME
|
1325
|
+
|
1326
|
+
#define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
|
1327
|
+
INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
|
1328
|
+
#undef CLEAR_HISTOGRAM
|
1329
|
+
|
1330
|
+
js_spill_information.Clear();
|
1331
|
+
}
|
1332
|
+
|
1333
|
+
|
1334
|
+
static int code_kind_statistics[Code::NUMBER_OF_KINDS];
|
1335
|
+
|
1336
|
+
|
1337
|
+
static void ClearCodeKindStatistics() {
|
1338
|
+
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
|
1339
|
+
code_kind_statistics[i] = 0;
|
1340
|
+
}
|
1341
|
+
}
|
1342
|
+
|
1343
|
+
|
1344
|
+
static void ReportCodeKindStatistics() {
|
1345
|
+
const char* table[Code::NUMBER_OF_KINDS];
|
1346
|
+
|
1347
|
+
#define CASE(name) \
|
1348
|
+
case Code::name: table[Code::name] = #name; \
|
1349
|
+
break
|
1350
|
+
|
1351
|
+
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
|
1352
|
+
switch (static_cast<Code::Kind>(i)) {
|
1353
|
+
CASE(FUNCTION);
|
1354
|
+
CASE(STUB);
|
1355
|
+
CASE(BUILTIN);
|
1356
|
+
CASE(LOAD_IC);
|
1357
|
+
CASE(KEYED_LOAD_IC);
|
1358
|
+
CASE(STORE_IC);
|
1359
|
+
CASE(KEYED_STORE_IC);
|
1360
|
+
CASE(CALL_IC);
|
1361
|
+
}
|
1362
|
+
}
|
1363
|
+
|
1364
|
+
#undef CASE
|
1365
|
+
|
1366
|
+
PrintF("\n Code kind histograms: \n");
|
1367
|
+
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
|
1368
|
+
if (code_kind_statistics[i] > 0) {
|
1369
|
+
PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
|
1370
|
+
}
|
1371
|
+
}
|
1372
|
+
PrintF("\n");
|
1373
|
+
}
|
1374
|
+
|
1375
|
+
|
1376
|
+
static int CollectHistogramInfo(HeapObject* obj) {
|
1377
|
+
InstanceType type = obj->map()->instance_type();
|
1378
|
+
ASSERT(0 <= type && type <= LAST_TYPE);
|
1379
|
+
ASSERT(heap_histograms[type].name() != NULL);
|
1380
|
+
heap_histograms[type].increment_number(1);
|
1381
|
+
heap_histograms[type].increment_bytes(obj->Size());
|
1382
|
+
|
1383
|
+
if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
|
1384
|
+
JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
|
1385
|
+
}
|
1386
|
+
|
1387
|
+
return obj->Size();
|
1388
|
+
}
|
1389
|
+
|
1390
|
+
|
1391
|
+
static void ReportHistogram(bool print_spill) {
|
1392
|
+
PrintF("\n Object Histogram:\n");
|
1393
|
+
for (int i = 0; i <= LAST_TYPE; i++) {
|
1394
|
+
if (heap_histograms[i].number() > 0) {
|
1395
|
+
PrintF(" %-33s%10d (%10d bytes)\n",
|
1396
|
+
heap_histograms[i].name(),
|
1397
|
+
heap_histograms[i].number(),
|
1398
|
+
heap_histograms[i].bytes());
|
1399
|
+
}
|
1400
|
+
}
|
1401
|
+
PrintF("\n");
|
1402
|
+
|
1403
|
+
// Summarize string types.
|
1404
|
+
int string_number = 0;
|
1405
|
+
int string_bytes = 0;
|
1406
|
+
#define INCREMENT(type, size, name, camel_name) \
|
1407
|
+
string_number += heap_histograms[type].number(); \
|
1408
|
+
string_bytes += heap_histograms[type].bytes();
|
1409
|
+
STRING_TYPE_LIST(INCREMENT)
|
1410
|
+
#undef INCREMENT
|
1411
|
+
if (string_number > 0) {
|
1412
|
+
PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
|
1413
|
+
string_bytes);
|
1414
|
+
}
|
1415
|
+
|
1416
|
+
if (FLAG_collect_heap_spill_statistics && print_spill) {
|
1417
|
+
js_spill_information.Print();
|
1418
|
+
}
|
1419
|
+
}
|
1420
|
+
#endif // DEBUG
|
1421
|
+
|
1422
|
+
|
1423
|
+
// Support for statistics gathering for --heap-stats and --log-gc.
|
1424
|
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1425
|
+
void NewSpace::ClearHistograms() {
|
1426
|
+
for (int i = 0; i <= LAST_TYPE; i++) {
|
1427
|
+
allocated_histogram_[i].clear();
|
1428
|
+
promoted_histogram_[i].clear();
|
1429
|
+
}
|
1430
|
+
}
|
1431
|
+
|
1432
|
+
// Because the copying collector does not touch garbage objects, we iterate
|
1433
|
+
// the new space before a collection to get a histogram of allocated objects.
|
1434
|
+
// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
|
1435
|
+
// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
|
1436
|
+
// flag is set.
|
1437
|
+
void NewSpace::CollectStatistics() {
|
1438
|
+
ClearHistograms();
|
1439
|
+
SemiSpaceIterator it(this);
|
1440
|
+
while (it.has_next()) RecordAllocation(it.next());
|
1441
|
+
}
|
1442
|
+
|
1443
|
+
|
1444
|
+
#ifdef ENABLE_LOGGING_AND_PROFILING
|
1445
|
+
static void DoReportStatistics(HistogramInfo* info, const char* description) {
|
1446
|
+
LOG(HeapSampleBeginEvent("NewSpace", description));
|
1447
|
+
// Lump all the string types together.
|
1448
|
+
int string_number = 0;
|
1449
|
+
int string_bytes = 0;
|
1450
|
+
#define INCREMENT(type, size, name, camel_name) \
|
1451
|
+
string_number += info[type].number(); \
|
1452
|
+
string_bytes += info[type].bytes();
|
1453
|
+
STRING_TYPE_LIST(INCREMENT)
|
1454
|
+
#undef INCREMENT
|
1455
|
+
if (string_number > 0) {
|
1456
|
+
LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
|
1457
|
+
}
|
1458
|
+
|
1459
|
+
// Then do the other types.
|
1460
|
+
for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
|
1461
|
+
if (info[i].number() > 0) {
|
1462
|
+
LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
|
1463
|
+
info[i].bytes()));
|
1464
|
+
}
|
1465
|
+
}
|
1466
|
+
LOG(HeapSampleEndEvent("NewSpace", description));
|
1467
|
+
}
|
1468
|
+
#endif // ENABLE_LOGGING_AND_PROFILING
|
1469
|
+
|
1470
|
+
|
1471
|
+
void NewSpace::ReportStatistics() {
|
1472
|
+
#ifdef DEBUG
|
1473
|
+
if (FLAG_heap_stats) {
|
1474
|
+
float pct = static_cast<float>(Available()) / Capacity();
|
1475
|
+
PrintF(" capacity: %d, available: %d, %%%d\n",
|
1476
|
+
Capacity(), Available(), static_cast<int>(pct*100));
|
1477
|
+
PrintF("\n Object Histogram:\n");
|
1478
|
+
for (int i = 0; i <= LAST_TYPE; i++) {
|
1479
|
+
if (allocated_histogram_[i].number() > 0) {
|
1480
|
+
PrintF(" %-33s%10d (%10d bytes)\n",
|
1481
|
+
allocated_histogram_[i].name(),
|
1482
|
+
allocated_histogram_[i].number(),
|
1483
|
+
allocated_histogram_[i].bytes());
|
1484
|
+
}
|
1485
|
+
}
|
1486
|
+
PrintF("\n");
|
1487
|
+
}
|
1488
|
+
#endif // DEBUG
|
1489
|
+
|
1490
|
+
#ifdef ENABLE_LOGGING_AND_PROFILING
|
1491
|
+
if (FLAG_log_gc) {
|
1492
|
+
DoReportStatistics(allocated_histogram_, "allocated");
|
1493
|
+
DoReportStatistics(promoted_histogram_, "promoted");
|
1494
|
+
}
|
1495
|
+
#endif // ENABLE_LOGGING_AND_PROFILING
|
1496
|
+
}
|
1497
|
+
|
1498
|
+
|
1499
|
+
void NewSpace::RecordAllocation(HeapObject* obj) {
|
1500
|
+
InstanceType type = obj->map()->instance_type();
|
1501
|
+
ASSERT(0 <= type && type <= LAST_TYPE);
|
1502
|
+
allocated_histogram_[type].increment_number(1);
|
1503
|
+
allocated_histogram_[type].increment_bytes(obj->Size());
|
1504
|
+
}
|
1505
|
+
|
1506
|
+
|
1507
|
+
void NewSpace::RecordPromotion(HeapObject* obj) {
|
1508
|
+
InstanceType type = obj->map()->instance_type();
|
1509
|
+
ASSERT(0 <= type && type <= LAST_TYPE);
|
1510
|
+
promoted_histogram_[type].increment_number(1);
|
1511
|
+
promoted_histogram_[type].increment_bytes(obj->Size());
|
1512
|
+
}
|
1513
|
+
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1514
|
+
|
1515
|
+
|
1516
|
+
// -----------------------------------------------------------------------------
|
1517
|
+
// Free lists for old object spaces implementation
|
1518
|
+
|
1519
|
+
void FreeListNode::set_size(int size_in_bytes) {
|
1520
|
+
ASSERT(size_in_bytes > 0);
|
1521
|
+
ASSERT(IsAligned(size_in_bytes, kPointerSize));
|
1522
|
+
|
1523
|
+
// We write a map and possibly size information to the block. If the block
|
1524
|
+
// is big enough to be a ByteArray with at least one extra word (the next
|
1525
|
+
// pointer), we set its map to be the byte array map and its size to an
|
1526
|
+
// appropriate array length for the desired size from HeapObject::Size().
|
1527
|
+
// If the block is too small (eg, one or two words), to hold both a size
|
1528
|
+
// field and a next pointer, we give it a filler map that gives it the
|
1529
|
+
// correct size.
|
1530
|
+
if (size_in_bytes > ByteArray::kAlignedSize) {
|
1531
|
+
set_map(Heap::raw_unchecked_byte_array_map());
|
1532
|
+
// Can't use ByteArray::cast because it fails during deserialization.
|
1533
|
+
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
|
1534
|
+
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
|
1535
|
+
} else if (size_in_bytes == kPointerSize) {
|
1536
|
+
set_map(Heap::raw_unchecked_one_pointer_filler_map());
|
1537
|
+
} else if (size_in_bytes == 2 * kPointerSize) {
|
1538
|
+
set_map(Heap::raw_unchecked_two_pointer_filler_map());
|
1539
|
+
} else {
|
1540
|
+
UNREACHABLE();
|
1541
|
+
}
|
1542
|
+
// We would like to ASSERT(Size() == size_in_bytes) but this would fail during
|
1543
|
+
// deserialization because the byte array map is not done yet.
|
1544
|
+
}
|
1545
|
+
|
1546
|
+
|
1547
|
+
Address FreeListNode::next() {
|
1548
|
+
ASSERT(IsFreeListNode(this));
|
1549
|
+
if (map() == Heap::raw_unchecked_byte_array_map()) {
|
1550
|
+
ASSERT(Size() >= kNextOffset + kPointerSize);
|
1551
|
+
return Memory::Address_at(address() + kNextOffset);
|
1552
|
+
} else {
|
1553
|
+
return Memory::Address_at(address() + kPointerSize);
|
1554
|
+
}
|
1555
|
+
}
|
1556
|
+
|
1557
|
+
|
1558
|
+
void FreeListNode::set_next(Address next) {
|
1559
|
+
ASSERT(IsFreeListNode(this));
|
1560
|
+
if (map() == Heap::raw_unchecked_byte_array_map()) {
|
1561
|
+
ASSERT(Size() >= kNextOffset + kPointerSize);
|
1562
|
+
Memory::Address_at(address() + kNextOffset) = next;
|
1563
|
+
} else {
|
1564
|
+
Memory::Address_at(address() + kPointerSize) = next;
|
1565
|
+
}
|
1566
|
+
}
|
1567
|
+
|
1568
|
+
|
1569
|
+
OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
|
1570
|
+
Reset();
|
1571
|
+
}
|
1572
|
+
|
1573
|
+
|
1574
|
+
void OldSpaceFreeList::Reset() {
|
1575
|
+
available_ = 0;
|
1576
|
+
for (int i = 0; i < kFreeListsLength; i++) {
|
1577
|
+
free_[i].head_node_ = NULL;
|
1578
|
+
}
|
1579
|
+
needs_rebuild_ = false;
|
1580
|
+
finger_ = kHead;
|
1581
|
+
free_[kHead].next_size_ = kEnd;
|
1582
|
+
}
|
1583
|
+
|
1584
|
+
|
1585
|
+
void OldSpaceFreeList::RebuildSizeList() {
|
1586
|
+
ASSERT(needs_rebuild_);
|
1587
|
+
int cur = kHead;
|
1588
|
+
for (int i = cur + 1; i < kFreeListsLength; i++) {
|
1589
|
+
if (free_[i].head_node_ != NULL) {
|
1590
|
+
free_[cur].next_size_ = i;
|
1591
|
+
cur = i;
|
1592
|
+
}
|
1593
|
+
}
|
1594
|
+
free_[cur].next_size_ = kEnd;
|
1595
|
+
needs_rebuild_ = false;
|
1596
|
+
}
|
1597
|
+
|
1598
|
+
|
1599
|
+
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
|
1600
|
+
#ifdef DEBUG
|
1601
|
+
for (int i = 0; i < size_in_bytes; i += kPointerSize) {
|
1602
|
+
Memory::Address_at(start + i) = kZapValue;
|
1603
|
+
}
|
1604
|
+
#endif
|
1605
|
+
FreeListNode* node = FreeListNode::FromAddress(start);
|
1606
|
+
node->set_size(size_in_bytes);
|
1607
|
+
|
1608
|
+
// We don't use the freelists in compacting mode. This makes it more like a
|
1609
|
+
// GC that only has mark-sweep-compact and doesn't have a mark-sweep
|
1610
|
+
// collector.
|
1611
|
+
if (FLAG_always_compact) {
|
1612
|
+
return size_in_bytes;
|
1613
|
+
}
|
1614
|
+
|
1615
|
+
// Early return to drop too-small blocks on the floor (one or two word
|
1616
|
+
// blocks cannot hold a map pointer, a size field, and a pointer to the
|
1617
|
+
// next block in the free list).
|
1618
|
+
if (size_in_bytes < kMinBlockSize) {
|
1619
|
+
return size_in_bytes;
|
1620
|
+
}
|
1621
|
+
|
1622
|
+
// Insert other blocks at the head of an exact free list.
|
1623
|
+
int index = size_in_bytes >> kPointerSizeLog2;
|
1624
|
+
node->set_next(free_[index].head_node_);
|
1625
|
+
free_[index].head_node_ = node->address();
|
1626
|
+
available_ += size_in_bytes;
|
1627
|
+
needs_rebuild_ = true;
|
1628
|
+
return 0;
|
1629
|
+
}
|
1630
|
+
|
1631
|
+
|
1632
|
+
Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
|
1633
|
+
ASSERT(0 < size_in_bytes);
|
1634
|
+
ASSERT(size_in_bytes <= kMaxBlockSize);
|
1635
|
+
ASSERT(IsAligned(size_in_bytes, kPointerSize));
|
1636
|
+
|
1637
|
+
if (needs_rebuild_) RebuildSizeList();
|
1638
|
+
int index = size_in_bytes >> kPointerSizeLog2;
|
1639
|
+
// Check for a perfect fit.
|
1640
|
+
if (free_[index].head_node_ != NULL) {
|
1641
|
+
FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
|
1642
|
+
// If this was the last block of its size, remove the size.
|
1643
|
+
if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
|
1644
|
+
available_ -= size_in_bytes;
|
1645
|
+
*wasted_bytes = 0;
|
1646
|
+
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
|
1647
|
+
return node;
|
1648
|
+
}
|
1649
|
+
// Search the size list for the best fit.
|
1650
|
+
int prev = finger_ < index ? finger_ : kHead;
|
1651
|
+
int cur = FindSize(index, &prev);
|
1652
|
+
ASSERT(index < cur);
|
1653
|
+
if (cur == kEnd) {
|
1654
|
+
// No large enough size in list.
|
1655
|
+
*wasted_bytes = 0;
|
1656
|
+
return Failure::RetryAfterGC(size_in_bytes, owner_);
|
1657
|
+
}
|
1658
|
+
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
|
1659
|
+
int rem = cur - index;
|
1660
|
+
int rem_bytes = rem << kPointerSizeLog2;
|
1661
|
+
FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
|
1662
|
+
ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
|
1663
|
+
FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
|
1664
|
+
size_in_bytes);
|
1665
|
+
// Distinguish the cases prev < rem < cur and rem <= prev < cur
|
1666
|
+
// to avoid many redundant tests and calls to Insert/RemoveSize.
|
1667
|
+
if (prev < rem) {
|
1668
|
+
// Simple case: insert rem between prev and cur.
|
1669
|
+
finger_ = prev;
|
1670
|
+
free_[prev].next_size_ = rem;
|
1671
|
+
// If this was the last block of size cur, remove the size.
|
1672
|
+
if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
|
1673
|
+
free_[rem].next_size_ = free_[cur].next_size_;
|
1674
|
+
} else {
|
1675
|
+
free_[rem].next_size_ = cur;
|
1676
|
+
}
|
1677
|
+
// Add the remainder block.
|
1678
|
+
rem_node->set_size(rem_bytes);
|
1679
|
+
rem_node->set_next(free_[rem].head_node_);
|
1680
|
+
free_[rem].head_node_ = rem_node->address();
|
1681
|
+
} else {
|
1682
|
+
// If this was the last block of size cur, remove the size.
|
1683
|
+
if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
|
1684
|
+
finger_ = prev;
|
1685
|
+
free_[prev].next_size_ = free_[cur].next_size_;
|
1686
|
+
}
|
1687
|
+
if (rem_bytes < kMinBlockSize) {
|
1688
|
+
// Too-small remainder is wasted.
|
1689
|
+
rem_node->set_size(rem_bytes);
|
1690
|
+
available_ -= size_in_bytes + rem_bytes;
|
1691
|
+
*wasted_bytes = rem_bytes;
|
1692
|
+
return cur_node;
|
1693
|
+
}
|
1694
|
+
// Add the remainder block and, if needed, insert its size.
|
1695
|
+
rem_node->set_size(rem_bytes);
|
1696
|
+
rem_node->set_next(free_[rem].head_node_);
|
1697
|
+
free_[rem].head_node_ = rem_node->address();
|
1698
|
+
if (rem_node->next() == NULL) InsertSize(rem);
|
1699
|
+
}
|
1700
|
+
available_ -= size_in_bytes;
|
1701
|
+
*wasted_bytes = 0;
|
1702
|
+
return cur_node;
|
1703
|
+
}
|
1704
|
+
|
1705
|
+
|
1706
|
+
#ifdef DEBUG
|
1707
|
+
bool OldSpaceFreeList::Contains(FreeListNode* node) {
|
1708
|
+
for (int i = 0; i < kFreeListsLength; i++) {
|
1709
|
+
Address cur_addr = free_[i].head_node_;
|
1710
|
+
while (cur_addr != NULL) {
|
1711
|
+
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
|
1712
|
+
if (cur_node == node) return true;
|
1713
|
+
cur_addr = cur_node->next();
|
1714
|
+
}
|
1715
|
+
}
|
1716
|
+
return false;
|
1717
|
+
}
|
1718
|
+
#endif
|
1719
|
+
|
1720
|
+
|
1721
|
+
FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
|
1722
|
+
: owner_(owner), object_size_(object_size) {
|
1723
|
+
Reset();
|
1724
|
+
}
|
1725
|
+
|
1726
|
+
|
1727
|
+
void FixedSizeFreeList::Reset() {
|
1728
|
+
available_ = 0;
|
1729
|
+
head_ = NULL;
|
1730
|
+
}
|
1731
|
+
|
1732
|
+
|
1733
|
+
void FixedSizeFreeList::Free(Address start) {
|
1734
|
+
#ifdef DEBUG
|
1735
|
+
for (int i = 0; i < object_size_; i += kPointerSize) {
|
1736
|
+
Memory::Address_at(start + i) = kZapValue;
|
1737
|
+
}
|
1738
|
+
#endif
|
1739
|
+
// We only use the freelists with mark-sweep.
|
1740
|
+
ASSERT(!MarkCompactCollector::IsCompacting());
|
1741
|
+
FreeListNode* node = FreeListNode::FromAddress(start);
|
1742
|
+
node->set_size(object_size_);
|
1743
|
+
node->set_next(head_);
|
1744
|
+
head_ = node->address();
|
1745
|
+
available_ += object_size_;
|
1746
|
+
}
|
1747
|
+
|
1748
|
+
|
1749
|
+
Object* FixedSizeFreeList::Allocate() {
|
1750
|
+
if (head_ == NULL) {
|
1751
|
+
return Failure::RetryAfterGC(object_size_, owner_);
|
1752
|
+
}
|
1753
|
+
|
1754
|
+
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
|
1755
|
+
FreeListNode* node = FreeListNode::FromAddress(head_);
|
1756
|
+
head_ = node->next();
|
1757
|
+
available_ -= object_size_;
|
1758
|
+
return node;
|
1759
|
+
}
|
1760
|
+
|
1761
|
+
|
1762
|
+
// -----------------------------------------------------------------------------
|
1763
|
+
// OldSpace implementation
|
1764
|
+
|
1765
|
+
void OldSpace::PrepareForMarkCompact(bool will_compact) {
|
1766
|
+
if (will_compact) {
|
1767
|
+
// Reset relocation info. During a compacting collection, everything in
|
1768
|
+
// the space is considered 'available' and we will rediscover live data
|
1769
|
+
// and waste during the collection.
|
1770
|
+
MCResetRelocationInfo();
|
1771
|
+
ASSERT(Available() == Capacity());
|
1772
|
+
} else {
|
1773
|
+
// During a non-compacting collection, everything below the linear
|
1774
|
+
// allocation pointer is considered allocated (everything above is
|
1775
|
+
// available) and we will rediscover available and wasted bytes during
|
1776
|
+
// the collection.
|
1777
|
+
accounting_stats_.AllocateBytes(free_list_.available());
|
1778
|
+
accounting_stats_.FillWastedBytes(Waste());
|
1779
|
+
}
|
1780
|
+
|
1781
|
+
// Clear the free list before a full GC---it will be rebuilt afterward.
|
1782
|
+
free_list_.Reset();
|
1783
|
+
}
|
1784
|
+
|
1785
|
+
|
1786
|
+
void OldSpace::MCCommitRelocationInfo() {
|
1787
|
+
// Update fast allocation info.
|
1788
|
+
allocation_info_.top = mc_forwarding_info_.top;
|
1789
|
+
allocation_info_.limit = mc_forwarding_info_.limit;
|
1790
|
+
ASSERT(allocation_info_.VerifyPagedAllocation());
|
1791
|
+
|
1792
|
+
// The space is compacted and we haven't yet built free lists or
|
1793
|
+
// wasted any space.
|
1794
|
+
ASSERT(Waste() == 0);
|
1795
|
+
ASSERT(AvailableFree() == 0);
|
1796
|
+
|
1797
|
+
// Build the free list for the space.
|
1798
|
+
int computed_size = 0;
|
1799
|
+
PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
|
1800
|
+
while (it.has_next()) {
|
1801
|
+
Page* p = it.next();
|
1802
|
+
// Space below the relocation pointer is allocated.
|
1803
|
+
computed_size +=
|
1804
|
+
static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
|
1805
|
+
if (it.has_next()) {
|
1806
|
+
// Free the space at the top of the page. We cannot use
|
1807
|
+
// p->mc_relocation_top after the call to Free (because Free will clear
|
1808
|
+
// remembered set bits).
|
1809
|
+
int extra_size =
|
1810
|
+
static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
|
1811
|
+
if (extra_size > 0) {
|
1812
|
+
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
|
1813
|
+
// The bytes we have just "freed" to add to the free list were
|
1814
|
+
// already accounted as available.
|
1815
|
+
accounting_stats_.WasteBytes(wasted_bytes);
|
1816
|
+
}
|
1817
|
+
}
|
1818
|
+
}
|
1819
|
+
|
1820
|
+
// Make sure the computed size - based on the used portion of the pages in
|
1821
|
+
// use - matches the size obtained while computing forwarding addresses.
|
1822
|
+
ASSERT(computed_size == Size());
|
1823
|
+
}
|
1824
|
+
|
1825
|
+
|
1826
|
+
bool NewSpace::ReserveSpace(int bytes) {
|
1827
|
+
// We can't reliably unpack a partial snapshot that needs more new space
|
1828
|
+
// space than the minimum NewSpace size.
|
1829
|
+
ASSERT(bytes <= InitialCapacity());
|
1830
|
+
Address limit = allocation_info_.limit;
|
1831
|
+
Address top = allocation_info_.top;
|
1832
|
+
return limit - top >= bytes;
|
1833
|
+
}
|
1834
|
+
|
1835
|
+
|
1836
|
+
bool PagedSpace::ReserveSpace(int bytes) {
|
1837
|
+
Address limit = allocation_info_.limit;
|
1838
|
+
Address top = allocation_info_.top;
|
1839
|
+
if (limit - top >= bytes) return true;
|
1840
|
+
|
1841
|
+
// There wasn't enough space in the current page. Lets put the rest
|
1842
|
+
// of the page on the free list and start a fresh page.
|
1843
|
+
PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
|
1844
|
+
|
1845
|
+
Page* reserved_page = TopPageOf(allocation_info_);
|
1846
|
+
int bytes_left_to_reserve = bytes;
|
1847
|
+
while (bytes_left_to_reserve > 0) {
|
1848
|
+
if (!reserved_page->next_page()->is_valid()) {
|
1849
|
+
if (Heap::OldGenerationAllocationLimitReached()) return false;
|
1850
|
+
Expand(reserved_page);
|
1851
|
+
}
|
1852
|
+
bytes_left_to_reserve -= Page::kPageSize;
|
1853
|
+
reserved_page = reserved_page->next_page();
|
1854
|
+
if (!reserved_page->is_valid()) return false;
|
1855
|
+
}
|
1856
|
+
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
|
1857
|
+
SetAllocationInfo(&allocation_info_,
|
1858
|
+
TopPageOf(allocation_info_)->next_page());
|
1859
|
+
return true;
|
1860
|
+
}
|
1861
|
+
|
1862
|
+
|
1863
|
+
// You have to call this last, since the implementation from PagedSpace
|
1864
|
+
// doesn't know that memory was 'promised' to large object space.
|
1865
|
+
bool LargeObjectSpace::ReserveSpace(int bytes) {
|
1866
|
+
return Heap::OldGenerationSpaceAvailable() >= bytes;
|
1867
|
+
}
|
1868
|
+
|
1869
|
+
|
1870
|
+
// Slow case for normal allocation. Try in order: (1) allocate in the next
|
1871
|
+
// page in the space, (2) allocate off the space's free list, (3) expand the
|
1872
|
+
// space, (4) fail.
|
1873
|
+
HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
|
1874
|
+
// Linear allocation in this space has failed. If there is another page
|
1875
|
+
// in the space, move to that page and allocate there. This allocation
|
1876
|
+
// should succeed (size_in_bytes should not be greater than a page's
|
1877
|
+
// object area size).
|
1878
|
+
Page* current_page = TopPageOf(allocation_info_);
|
1879
|
+
if (current_page->next_page()->is_valid()) {
|
1880
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
1881
|
+
}
|
1882
|
+
|
1883
|
+
// There is no next page in this space. Try free list allocation unless that
|
1884
|
+
// is currently forbidden.
|
1885
|
+
if (!Heap::linear_allocation()) {
|
1886
|
+
int wasted_bytes;
|
1887
|
+
Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
|
1888
|
+
accounting_stats_.WasteBytes(wasted_bytes);
|
1889
|
+
if (!result->IsFailure()) {
|
1890
|
+
accounting_stats_.AllocateBytes(size_in_bytes);
|
1891
|
+
return HeapObject::cast(result);
|
1892
|
+
}
|
1893
|
+
}
|
1894
|
+
|
1895
|
+
// Free list allocation failed and there is no next page. Fail if we have
|
1896
|
+
// hit the old generation size limit that should cause a garbage
|
1897
|
+
// collection.
|
1898
|
+
if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
|
1899
|
+
return NULL;
|
1900
|
+
}
|
1901
|
+
|
1902
|
+
// Try to expand the space and allocate in the new next page.
|
1903
|
+
ASSERT(!current_page->next_page()->is_valid());
|
1904
|
+
if (Expand(current_page)) {
|
1905
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
1906
|
+
}
|
1907
|
+
|
1908
|
+
// Finally, fail.
|
1909
|
+
return NULL;
|
1910
|
+
}
|
1911
|
+
|
1912
|
+
|
1913
|
+
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
1914
|
+
int free_size =
|
1915
|
+
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
|
1916
|
+
if (free_size > 0) {
|
1917
|
+
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
|
1918
|
+
accounting_stats_.WasteBytes(wasted_bytes);
|
1919
|
+
}
|
1920
|
+
}
|
1921
|
+
|
1922
|
+
|
1923
|
+
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
1924
|
+
int free_size =
|
1925
|
+
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
|
1926
|
+
// In the fixed space free list all the free list items have the right size.
|
1927
|
+
// We use up the rest of the page while preserving this invariant.
|
1928
|
+
while (free_size >= object_size_in_bytes_) {
|
1929
|
+
free_list_.Free(allocation_info_.top);
|
1930
|
+
allocation_info_.top += object_size_in_bytes_;
|
1931
|
+
free_size -= object_size_in_bytes_;
|
1932
|
+
accounting_stats_.WasteBytes(object_size_in_bytes_);
|
1933
|
+
}
|
1934
|
+
}
|
1935
|
+
|
1936
|
+
|
1937
|
+
// Add the block at the top of the page to the space's free list, set the
|
1938
|
+
// allocation info to the next page (assumed to be one), and allocate
|
1939
|
+
// linearly there.
|
1940
|
+
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
|
1941
|
+
int size_in_bytes) {
|
1942
|
+
ASSERT(current_page->next_page()->is_valid());
|
1943
|
+
PutRestOfCurrentPageOnFreeList(current_page);
|
1944
|
+
SetAllocationInfo(&allocation_info_, current_page->next_page());
|
1945
|
+
return AllocateLinearly(&allocation_info_, size_in_bytes);
|
1946
|
+
}
|
1947
|
+
|
1948
|
+
|
1949
|
+
#ifdef DEBUG
|
1950
|
+
struct CommentStatistic {
|
1951
|
+
const char* comment;
|
1952
|
+
int size;
|
1953
|
+
int count;
|
1954
|
+
void Clear() {
|
1955
|
+
comment = NULL;
|
1956
|
+
size = 0;
|
1957
|
+
count = 0;
|
1958
|
+
}
|
1959
|
+
};
|
1960
|
+
|
1961
|
+
|
1962
|
+
// must be small, since an iteration is used for lookup
|
1963
|
+
const int kMaxComments = 64;
|
1964
|
+
static CommentStatistic comments_statistics[kMaxComments+1];
|
1965
|
+
|
1966
|
+
|
1967
|
+
void PagedSpace::ReportCodeStatistics() {
|
1968
|
+
ReportCodeKindStatistics();
|
1969
|
+
PrintF("Code comment statistics (\" [ comment-txt : size/ "
|
1970
|
+
"count (average)\"):\n");
|
1971
|
+
for (int i = 0; i <= kMaxComments; i++) {
|
1972
|
+
const CommentStatistic& cs = comments_statistics[i];
|
1973
|
+
if (cs.size > 0) {
|
1974
|
+
PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
|
1975
|
+
cs.size/cs.count);
|
1976
|
+
}
|
1977
|
+
}
|
1978
|
+
PrintF("\n");
|
1979
|
+
}
|
1980
|
+
|
1981
|
+
|
1982
|
+
void PagedSpace::ResetCodeStatistics() {
|
1983
|
+
ClearCodeKindStatistics();
|
1984
|
+
for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
|
1985
|
+
comments_statistics[kMaxComments].comment = "Unknown";
|
1986
|
+
comments_statistics[kMaxComments].size = 0;
|
1987
|
+
comments_statistics[kMaxComments].count = 0;
|
1988
|
+
}
|
1989
|
+
|
1990
|
+
|
1991
|
+
// Adds comment to 'comment_statistics' table. Performance OK sa long as
|
1992
|
+
// 'kMaxComments' is small
|
1993
|
+
static void EnterComment(const char* comment, int delta) {
|
1994
|
+
// Do not count empty comments
|
1995
|
+
if (delta <= 0) return;
|
1996
|
+
CommentStatistic* cs = &comments_statistics[kMaxComments];
|
1997
|
+
// Search for a free or matching entry in 'comments_statistics': 'cs'
|
1998
|
+
// points to result.
|
1999
|
+
for (int i = 0; i < kMaxComments; i++) {
|
2000
|
+
if (comments_statistics[i].comment == NULL) {
|
2001
|
+
cs = &comments_statistics[i];
|
2002
|
+
cs->comment = comment;
|
2003
|
+
break;
|
2004
|
+
} else if (strcmp(comments_statistics[i].comment, comment) == 0) {
|
2005
|
+
cs = &comments_statistics[i];
|
2006
|
+
break;
|
2007
|
+
}
|
2008
|
+
}
|
2009
|
+
// Update entry for 'comment'
|
2010
|
+
cs->size += delta;
|
2011
|
+
cs->count += 1;
|
2012
|
+
}
|
2013
|
+
|
2014
|
+
|
2015
|
+
// Call for each nested comment start (start marked with '[ xxx', end marked
|
2016
|
+
// with ']'. RelocIterator 'it' must point to a comment reloc info.
|
2017
|
+
static void CollectCommentStatistics(RelocIterator* it) {
|
2018
|
+
ASSERT(!it->done());
|
2019
|
+
ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
|
2020
|
+
const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
|
2021
|
+
if (tmp[0] != '[') {
|
2022
|
+
// Not a nested comment; skip
|
2023
|
+
return;
|
2024
|
+
}
|
2025
|
+
|
2026
|
+
// Search for end of nested comment or a new nested comment
|
2027
|
+
const char* const comment_txt =
|
2028
|
+
reinterpret_cast<const char*>(it->rinfo()->data());
|
2029
|
+
const byte* prev_pc = it->rinfo()->pc();
|
2030
|
+
int flat_delta = 0;
|
2031
|
+
it->next();
|
2032
|
+
while (true) {
|
2033
|
+
// All nested comments must be terminated properly, and therefore exit
|
2034
|
+
// from loop.
|
2035
|
+
ASSERT(!it->done());
|
2036
|
+
if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
|
2037
|
+
const char* const txt =
|
2038
|
+
reinterpret_cast<const char*>(it->rinfo()->data());
|
2039
|
+
flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
|
2040
|
+
if (txt[0] == ']') break; // End of nested comment
|
2041
|
+
// A new comment
|
2042
|
+
CollectCommentStatistics(it);
|
2043
|
+
// Skip code that was covered with previous comment
|
2044
|
+
prev_pc = it->rinfo()->pc();
|
2045
|
+
}
|
2046
|
+
it->next();
|
2047
|
+
}
|
2048
|
+
EnterComment(comment_txt, flat_delta);
|
2049
|
+
}
|
2050
|
+
|
2051
|
+
|
2052
|
+
// Collects code size statistics:
|
2053
|
+
// - by code kind
|
2054
|
+
// - by code comment
|
2055
|
+
void PagedSpace::CollectCodeStatistics() {
|
2056
|
+
HeapObjectIterator obj_it(this);
|
2057
|
+
while (obj_it.has_next()) {
|
2058
|
+
HeapObject* obj = obj_it.next();
|
2059
|
+
if (obj->IsCode()) {
|
2060
|
+
Code* code = Code::cast(obj);
|
2061
|
+
code_kind_statistics[code->kind()] += code->Size();
|
2062
|
+
RelocIterator it(code);
|
2063
|
+
int delta = 0;
|
2064
|
+
const byte* prev_pc = code->instruction_start();
|
2065
|
+
while (!it.done()) {
|
2066
|
+
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
|
2067
|
+
delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
|
2068
|
+
CollectCommentStatistics(&it);
|
2069
|
+
prev_pc = it.rinfo()->pc();
|
2070
|
+
}
|
2071
|
+
it.next();
|
2072
|
+
}
|
2073
|
+
|
2074
|
+
ASSERT(code->instruction_start() <= prev_pc &&
|
2075
|
+
prev_pc <= code->relocation_start());
|
2076
|
+
delta += static_cast<int>(code->relocation_start() - prev_pc);
|
2077
|
+
EnterComment("NoComment", delta);
|
2078
|
+
}
|
2079
|
+
}
|
2080
|
+
}
|
2081
|
+
|
2082
|
+
|
2083
|
+
void OldSpace::ReportStatistics() {
|
2084
|
+
int pct = Available() * 100 / Capacity();
|
2085
|
+
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
|
2086
|
+
Capacity(), Waste(), Available(), pct);
|
2087
|
+
|
2088
|
+
// Report remembered set statistics.
|
2089
|
+
int rset_marked_pointers = 0;
|
2090
|
+
int rset_marked_arrays = 0;
|
2091
|
+
int rset_marked_array_elements = 0;
|
2092
|
+
int cross_gen_pointers = 0;
|
2093
|
+
int cross_gen_array_elements = 0;
|
2094
|
+
|
2095
|
+
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
|
2096
|
+
while (page_it.has_next()) {
|
2097
|
+
Page* p = page_it.next();
|
2098
|
+
|
2099
|
+
for (Address rset_addr = p->RSetStart();
|
2100
|
+
rset_addr < p->RSetEnd();
|
2101
|
+
rset_addr += kIntSize) {
|
2102
|
+
int rset = Memory::int_at(rset_addr);
|
2103
|
+
if (rset != 0) {
|
2104
|
+
// Bits were set
|
2105
|
+
int intoff =
|
2106
|
+
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
|
2107
|
+
int bitoff = 0;
|
2108
|
+
for (; bitoff < kBitsPerInt; ++bitoff) {
|
2109
|
+
if ((rset & (1 << bitoff)) != 0) {
|
2110
|
+
int bitpos = intoff*kBitsPerByte + bitoff;
|
2111
|
+
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
|
2112
|
+
Object** obj = reinterpret_cast<Object**>(slot);
|
2113
|
+
if (*obj == Heap::raw_unchecked_fixed_array_map()) {
|
2114
|
+
rset_marked_arrays++;
|
2115
|
+
FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
|
2116
|
+
|
2117
|
+
rset_marked_array_elements += fa->length();
|
2118
|
+
// Manually inline FixedArray::IterateBody
|
2119
|
+
Address elm_start = slot + FixedArray::kHeaderSize;
|
2120
|
+
Address elm_stop = elm_start + fa->length() * kPointerSize;
|
2121
|
+
for (Address elm_addr = elm_start;
|
2122
|
+
elm_addr < elm_stop; elm_addr += kPointerSize) {
|
2123
|
+
// Filter non-heap-object pointers
|
2124
|
+
Object** elm_p = reinterpret_cast<Object**>(elm_addr);
|
2125
|
+
if (Heap::InNewSpace(*elm_p))
|
2126
|
+
cross_gen_array_elements++;
|
2127
|
+
}
|
2128
|
+
} else {
|
2129
|
+
rset_marked_pointers++;
|
2130
|
+
if (Heap::InNewSpace(*obj))
|
2131
|
+
cross_gen_pointers++;
|
2132
|
+
}
|
2133
|
+
}
|
2134
|
+
}
|
2135
|
+
}
|
2136
|
+
}
|
2137
|
+
}
|
2138
|
+
|
2139
|
+
pct = rset_marked_pointers == 0 ?
|
2140
|
+
0 : cross_gen_pointers * 100 / rset_marked_pointers;
|
2141
|
+
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
|
2142
|
+
rset_marked_pointers, cross_gen_pointers, pct);
|
2143
|
+
PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
|
2144
|
+
PrintF(" elements %d, ", rset_marked_array_elements);
|
2145
|
+
pct = rset_marked_array_elements == 0 ? 0
|
2146
|
+
: cross_gen_array_elements * 100 / rset_marked_array_elements;
|
2147
|
+
PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
|
2148
|
+
PrintF(" total rset-marked bits %d\n",
|
2149
|
+
(rset_marked_pointers + rset_marked_arrays));
|
2150
|
+
pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
|
2151
|
+
: (cross_gen_pointers + cross_gen_array_elements) * 100 /
|
2152
|
+
(rset_marked_pointers + rset_marked_array_elements);
|
2153
|
+
PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
|
2154
|
+
(rset_marked_pointers + rset_marked_array_elements),
|
2155
|
+
(cross_gen_pointers + cross_gen_array_elements),
|
2156
|
+
pct);
|
2157
|
+
|
2158
|
+
ClearHistograms();
|
2159
|
+
HeapObjectIterator obj_it(this);
|
2160
|
+
while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
|
2161
|
+
ReportHistogram(true);
|
2162
|
+
}
|
2163
|
+
|
2164
|
+
|
2165
|
+
// Dump the range of remembered set words between [start, end) corresponding
|
2166
|
+
// to the pointers starting at object_p. The allocation_top is an object
|
2167
|
+
// pointer which should not be read past. This is important for large object
|
2168
|
+
// pages, where some bits in the remembered set range do not correspond to
|
2169
|
+
// allocated addresses.
|
2170
|
+
static void PrintRSetRange(Address start, Address end, Object** object_p,
|
2171
|
+
Address allocation_top) {
|
2172
|
+
Address rset_address = start;
|
2173
|
+
|
2174
|
+
// If the range starts on on odd numbered word (eg, for large object extra
|
2175
|
+
// remembered set ranges), print some spaces.
|
2176
|
+
if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
|
2177
|
+
PrintF(" ");
|
2178
|
+
}
|
2179
|
+
|
2180
|
+
// Loop over all the words in the range.
|
2181
|
+
while (rset_address < end) {
|
2182
|
+
uint32_t rset_word = Memory::uint32_at(rset_address);
|
2183
|
+
int bit_position = 0;
|
2184
|
+
|
2185
|
+
// Loop over all the bits in the word.
|
2186
|
+
while (bit_position < kBitsPerInt) {
|
2187
|
+
if (object_p == reinterpret_cast<Object**>(allocation_top)) {
|
2188
|
+
// Print a bar at the allocation pointer.
|
2189
|
+
PrintF("|");
|
2190
|
+
} else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
|
2191
|
+
// Do not dereference object_p past the allocation pointer.
|
2192
|
+
PrintF("#");
|
2193
|
+
} else if ((rset_word & (1 << bit_position)) == 0) {
|
2194
|
+
// Print a dot for zero bits.
|
2195
|
+
PrintF(".");
|
2196
|
+
} else if (Heap::InNewSpace(*object_p)) {
|
2197
|
+
// Print an X for one bits for pointers to new space.
|
2198
|
+
PrintF("X");
|
2199
|
+
} else {
|
2200
|
+
// Print a circle for one bits for pointers to old space.
|
2201
|
+
PrintF("o");
|
2202
|
+
}
|
2203
|
+
|
2204
|
+
// Print a space after every 8th bit except the last.
|
2205
|
+
if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
|
2206
|
+
PrintF(" ");
|
2207
|
+
}
|
2208
|
+
|
2209
|
+
// Advance to next bit.
|
2210
|
+
bit_position++;
|
2211
|
+
object_p++;
|
2212
|
+
}
|
2213
|
+
|
2214
|
+
// Print a newline after every odd numbered word, otherwise a space.
|
2215
|
+
if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
|
2216
|
+
PrintF("\n");
|
2217
|
+
} else {
|
2218
|
+
PrintF(" ");
|
2219
|
+
}
|
2220
|
+
|
2221
|
+
// Advance to next remembered set word.
|
2222
|
+
rset_address += kIntSize;
|
2223
|
+
}
|
2224
|
+
}
|
2225
|
+
|
2226
|
+
|
2227
|
+
void PagedSpace::DoPrintRSet(const char* space_name) {
|
2228
|
+
PageIterator it(this, PageIterator::PAGES_IN_USE);
|
2229
|
+
while (it.has_next()) {
|
2230
|
+
Page* p = it.next();
|
2231
|
+
PrintF("%s page 0x%x:\n", space_name, p);
|
2232
|
+
PrintRSetRange(p->RSetStart(), p->RSetEnd(),
|
2233
|
+
reinterpret_cast<Object**>(p->ObjectAreaStart()),
|
2234
|
+
p->AllocationTop());
|
2235
|
+
PrintF("\n");
|
2236
|
+
}
|
2237
|
+
}
|
2238
|
+
|
2239
|
+
|
2240
|
+
void OldSpace::PrintRSet() { DoPrintRSet("old"); }
|
2241
|
+
#endif
|
2242
|
+
|
2243
|
+
// -----------------------------------------------------------------------------
|
2244
|
+
// FixedSpace implementation
|
2245
|
+
|
2246
|
+
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
|
2247
|
+
if (will_compact) {
|
2248
|
+
// Reset relocation info.
|
2249
|
+
MCResetRelocationInfo();
|
2250
|
+
|
2251
|
+
// During a compacting collection, everything in the space is considered
|
2252
|
+
// 'available' (set by the call to MCResetRelocationInfo) and we will
|
2253
|
+
// rediscover live and wasted bytes during the collection.
|
2254
|
+
ASSERT(Available() == Capacity());
|
2255
|
+
} else {
|
2256
|
+
// During a non-compacting collection, everything below the linear
|
2257
|
+
// allocation pointer except wasted top-of-page blocks is considered
|
2258
|
+
// allocated and we will rediscover available bytes during the
|
2259
|
+
// collection.
|
2260
|
+
accounting_stats_.AllocateBytes(free_list_.available());
|
2261
|
+
}
|
2262
|
+
|
2263
|
+
// Clear the free list before a full GC---it will be rebuilt afterward.
|
2264
|
+
free_list_.Reset();
|
2265
|
+
}
|
2266
|
+
|
2267
|
+
|
2268
|
+
void FixedSpace::MCCommitRelocationInfo() {
|
2269
|
+
// Update fast allocation info.
|
2270
|
+
allocation_info_.top = mc_forwarding_info_.top;
|
2271
|
+
allocation_info_.limit = mc_forwarding_info_.limit;
|
2272
|
+
ASSERT(allocation_info_.VerifyPagedAllocation());
|
2273
|
+
|
2274
|
+
// The space is compacted and we haven't yet wasted any space.
|
2275
|
+
ASSERT(Waste() == 0);
|
2276
|
+
|
2277
|
+
// Update allocation_top of each page in use and compute waste.
|
2278
|
+
int computed_size = 0;
|
2279
|
+
PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
|
2280
|
+
while (it.has_next()) {
|
2281
|
+
Page* page = it.next();
|
2282
|
+
Address page_top = page->AllocationTop();
|
2283
|
+
computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
|
2284
|
+
if (it.has_next()) {
|
2285
|
+
accounting_stats_.WasteBytes(
|
2286
|
+
static_cast<int>(page->ObjectAreaEnd() - page_top));
|
2287
|
+
}
|
2288
|
+
}
|
2289
|
+
|
2290
|
+
// Make sure the computed size - based on the used portion of the
|
2291
|
+
// pages in use - matches the size we adjust during allocation.
|
2292
|
+
ASSERT(computed_size == Size());
|
2293
|
+
}
|
2294
|
+
|
2295
|
+
|
2296
|
+
// Slow case for normal allocation. Try in order: (1) allocate in the next
|
2297
|
+
// page in the space, (2) allocate off the space's free list, (3) expand the
|
2298
|
+
// space, (4) fail.
|
2299
|
+
HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
|
2300
|
+
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
|
2301
|
+
// Linear allocation in this space has failed. If there is another page
|
2302
|
+
// in the space, move to that page and allocate there. This allocation
|
2303
|
+
// should succeed.
|
2304
|
+
Page* current_page = TopPageOf(allocation_info_);
|
2305
|
+
if (current_page->next_page()->is_valid()) {
|
2306
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
2307
|
+
}
|
2308
|
+
|
2309
|
+
// There is no next page in this space. Try free list allocation unless
|
2310
|
+
// that is currently forbidden. The fixed space free list implicitly assumes
|
2311
|
+
// that all free blocks are of the fixed size.
|
2312
|
+
if (!Heap::linear_allocation()) {
|
2313
|
+
Object* result = free_list_.Allocate();
|
2314
|
+
if (!result->IsFailure()) {
|
2315
|
+
accounting_stats_.AllocateBytes(size_in_bytes);
|
2316
|
+
return HeapObject::cast(result);
|
2317
|
+
}
|
2318
|
+
}
|
2319
|
+
|
2320
|
+
// Free list allocation failed and there is no next page. Fail if we have
|
2321
|
+
// hit the old generation size limit that should cause a garbage
|
2322
|
+
// collection.
|
2323
|
+
if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
|
2324
|
+
return NULL;
|
2325
|
+
}
|
2326
|
+
|
2327
|
+
// Try to expand the space and allocate in the new next page.
|
2328
|
+
ASSERT(!current_page->next_page()->is_valid());
|
2329
|
+
if (Expand(current_page)) {
|
2330
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
2331
|
+
}
|
2332
|
+
|
2333
|
+
// Finally, fail.
|
2334
|
+
return NULL;
|
2335
|
+
}
|
2336
|
+
|
2337
|
+
|
2338
|
+
// Move to the next page (there is assumed to be one) and allocate there.
|
2339
|
+
// The top of page block is always wasted, because it is too small to hold a
|
2340
|
+
// map.
|
2341
|
+
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
|
2342
|
+
int size_in_bytes) {
|
2343
|
+
ASSERT(current_page->next_page()->is_valid());
|
2344
|
+
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
|
2345
|
+
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
|
2346
|
+
accounting_stats_.WasteBytes(page_extra_);
|
2347
|
+
SetAllocationInfo(&allocation_info_, current_page->next_page());
|
2348
|
+
return AllocateLinearly(&allocation_info_, size_in_bytes);
|
2349
|
+
}
|
2350
|
+
|
2351
|
+
|
2352
|
+
#ifdef DEBUG
|
2353
|
+
void FixedSpace::ReportStatistics() {
|
2354
|
+
int pct = Available() * 100 / Capacity();
|
2355
|
+
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
|
2356
|
+
Capacity(), Waste(), Available(), pct);
|
2357
|
+
|
2358
|
+
// Report remembered set statistics.
|
2359
|
+
int rset_marked_pointers = 0;
|
2360
|
+
int cross_gen_pointers = 0;
|
2361
|
+
|
2362
|
+
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
|
2363
|
+
while (page_it.has_next()) {
|
2364
|
+
Page* p = page_it.next();
|
2365
|
+
|
2366
|
+
for (Address rset_addr = p->RSetStart();
|
2367
|
+
rset_addr < p->RSetEnd();
|
2368
|
+
rset_addr += kIntSize) {
|
2369
|
+
int rset = Memory::int_at(rset_addr);
|
2370
|
+
if (rset != 0) {
|
2371
|
+
// Bits were set
|
2372
|
+
int intoff =
|
2373
|
+
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
|
2374
|
+
int bitoff = 0;
|
2375
|
+
for (; bitoff < kBitsPerInt; ++bitoff) {
|
2376
|
+
if ((rset & (1 << bitoff)) != 0) {
|
2377
|
+
int bitpos = intoff*kBitsPerByte + bitoff;
|
2378
|
+
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
|
2379
|
+
Object** obj = reinterpret_cast<Object**>(slot);
|
2380
|
+
rset_marked_pointers++;
|
2381
|
+
if (Heap::InNewSpace(*obj))
|
2382
|
+
cross_gen_pointers++;
|
2383
|
+
}
|
2384
|
+
}
|
2385
|
+
}
|
2386
|
+
}
|
2387
|
+
}
|
2388
|
+
|
2389
|
+
pct = rset_marked_pointers == 0 ?
|
2390
|
+
0 : cross_gen_pointers * 100 / rset_marked_pointers;
|
2391
|
+
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
|
2392
|
+
rset_marked_pointers, cross_gen_pointers, pct);
|
2393
|
+
|
2394
|
+
ClearHistograms();
|
2395
|
+
HeapObjectIterator obj_it(this);
|
2396
|
+
while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
|
2397
|
+
ReportHistogram(false);
|
2398
|
+
}
|
2399
|
+
|
2400
|
+
|
2401
|
+
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
|
2402
|
+
#endif
|
2403
|
+
|
2404
|
+
|
2405
|
+
// -----------------------------------------------------------------------------
|
2406
|
+
// MapSpace implementation
|
2407
|
+
|
2408
|
+
void MapSpace::PrepareForMarkCompact(bool will_compact) {
|
2409
|
+
// Call prepare of the super class.
|
2410
|
+
FixedSpace::PrepareForMarkCompact(will_compact);
|
2411
|
+
|
2412
|
+
if (will_compact) {
|
2413
|
+
// Initialize map index entry.
|
2414
|
+
int page_count = 0;
|
2415
|
+
PageIterator it(this, PageIterator::ALL_PAGES);
|
2416
|
+
while (it.has_next()) {
|
2417
|
+
ASSERT_MAP_PAGE_INDEX(page_count);
|
2418
|
+
|
2419
|
+
Page* p = it.next();
|
2420
|
+
ASSERT(p->mc_page_index == page_count);
|
2421
|
+
|
2422
|
+
page_addresses_[page_count++] = p->address();
|
2423
|
+
}
|
2424
|
+
}
|
2425
|
+
}
|
2426
|
+
|
2427
|
+
|
2428
|
+
#ifdef DEBUG
|
2429
|
+
void MapSpace::VerifyObject(HeapObject* object) {
|
2430
|
+
// The object should be a map or a free-list node.
|
2431
|
+
ASSERT(object->IsMap() || object->IsByteArray());
|
2432
|
+
}
|
2433
|
+
#endif
|
2434
|
+
|
2435
|
+
|
2436
|
+
// -----------------------------------------------------------------------------
|
2437
|
+
// GlobalPropertyCellSpace implementation
|
2438
|
+
|
2439
|
+
#ifdef DEBUG
|
2440
|
+
void CellSpace::VerifyObject(HeapObject* object) {
|
2441
|
+
// The object should be a global object property cell or a free-list node.
|
2442
|
+
ASSERT(object->IsJSGlobalPropertyCell() ||
|
2443
|
+
object->map() == Heap::two_pointer_filler_map());
|
2444
|
+
}
|
2445
|
+
#endif
|
2446
|
+
|
2447
|
+
|
2448
|
+
// -----------------------------------------------------------------------------
|
2449
|
+
// LargeObjectIterator
|
2450
|
+
|
2451
|
+
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
|
2452
|
+
current_ = space->first_chunk_;
|
2453
|
+
size_func_ = NULL;
|
2454
|
+
}
|
2455
|
+
|
2456
|
+
|
2457
|
+
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
|
2458
|
+
HeapObjectCallback size_func) {
|
2459
|
+
current_ = space->first_chunk_;
|
2460
|
+
size_func_ = size_func;
|
2461
|
+
}
|
2462
|
+
|
2463
|
+
|
2464
|
+
HeapObject* LargeObjectIterator::next() {
|
2465
|
+
ASSERT(has_next());
|
2466
|
+
HeapObject* object = current_->GetObject();
|
2467
|
+
current_ = current_->next();
|
2468
|
+
return object;
|
2469
|
+
}
|
2470
|
+
|
2471
|
+
|
2472
|
+
// -----------------------------------------------------------------------------
|
2473
|
+
// LargeObjectChunk
|
2474
|
+
|
2475
|
+
LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
|
2476
|
+
size_t* chunk_size,
|
2477
|
+
Executability executable) {
|
2478
|
+
size_t requested = ChunkSizeFor(size_in_bytes);
|
2479
|
+
void* mem = MemoryAllocator::AllocateRawMemory(requested,
|
2480
|
+
chunk_size,
|
2481
|
+
executable);
|
2482
|
+
if (mem == NULL) return NULL;
|
2483
|
+
LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
|
2484
|
+
if (*chunk_size < requested) {
|
2485
|
+
MemoryAllocator::FreeRawMemory(mem, *chunk_size);
|
2486
|
+
LOG(DeleteEvent("LargeObjectChunk", mem));
|
2487
|
+
return NULL;
|
2488
|
+
}
|
2489
|
+
return reinterpret_cast<LargeObjectChunk*>(mem);
|
2490
|
+
}
|
2491
|
+
|
2492
|
+
|
2493
|
+
int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
|
2494
|
+
int os_alignment = static_cast<int>(OS::AllocateAlignment());
|
2495
|
+
if (os_alignment < Page::kPageSize)
|
2496
|
+
size_in_bytes += (Page::kPageSize - os_alignment);
|
2497
|
+
return size_in_bytes + Page::kObjectStartOffset;
|
2498
|
+
}
|
2499
|
+
|
2500
|
+
// -----------------------------------------------------------------------------
|
2501
|
+
// LargeObjectSpace
|
2502
|
+
|
2503
|
+
LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
|
2504
|
+
: Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
|
2505
|
+
first_chunk_(NULL),
|
2506
|
+
size_(0),
|
2507
|
+
page_count_(0) {}
|
2508
|
+
|
2509
|
+
|
2510
|
+
bool LargeObjectSpace::Setup() {
|
2511
|
+
first_chunk_ = NULL;
|
2512
|
+
size_ = 0;
|
2513
|
+
page_count_ = 0;
|
2514
|
+
return true;
|
2515
|
+
}
|
2516
|
+
|
2517
|
+
|
2518
|
+
void LargeObjectSpace::TearDown() {
|
2519
|
+
while (first_chunk_ != NULL) {
|
2520
|
+
LargeObjectChunk* chunk = first_chunk_;
|
2521
|
+
first_chunk_ = first_chunk_->next();
|
2522
|
+
LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
|
2523
|
+
MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
|
2524
|
+
}
|
2525
|
+
|
2526
|
+
size_ = 0;
|
2527
|
+
page_count_ = 0;
|
2528
|
+
}
|
2529
|
+
|
2530
|
+
|
2531
|
+
#ifdef ENABLE_HEAP_PROTECTION
|
2532
|
+
|
2533
|
+
void LargeObjectSpace::Protect() {
|
2534
|
+
LargeObjectChunk* chunk = first_chunk_;
|
2535
|
+
while (chunk != NULL) {
|
2536
|
+
MemoryAllocator::Protect(chunk->address(), chunk->size());
|
2537
|
+
chunk = chunk->next();
|
2538
|
+
}
|
2539
|
+
}
|
2540
|
+
|
2541
|
+
|
2542
|
+
void LargeObjectSpace::Unprotect() {
|
2543
|
+
LargeObjectChunk* chunk = first_chunk_;
|
2544
|
+
while (chunk != NULL) {
|
2545
|
+
bool is_code = chunk->GetObject()->IsCode();
|
2546
|
+
MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
|
2547
|
+
is_code ? EXECUTABLE : NOT_EXECUTABLE);
|
2548
|
+
chunk = chunk->next();
|
2549
|
+
}
|
2550
|
+
}
|
2551
|
+
|
2552
|
+
#endif
|
2553
|
+
|
2554
|
+
|
2555
|
+
Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
|
2556
|
+
int object_size,
|
2557
|
+
Executability executable) {
|
2558
|
+
ASSERT(0 < object_size && object_size <= requested_size);
|
2559
|
+
|
2560
|
+
// Check if we want to force a GC before growing the old space further.
|
2561
|
+
// If so, fail the allocation.
|
2562
|
+
if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
|
2563
|
+
return Failure::RetryAfterGC(requested_size, identity());
|
2564
|
+
}
|
2565
|
+
|
2566
|
+
size_t chunk_size;
|
2567
|
+
LargeObjectChunk* chunk =
|
2568
|
+
LargeObjectChunk::New(requested_size, &chunk_size, executable);
|
2569
|
+
if (chunk == NULL) {
|
2570
|
+
return Failure::RetryAfterGC(requested_size, identity());
|
2571
|
+
}
|
2572
|
+
|
2573
|
+
size_ += static_cast<int>(chunk_size);
|
2574
|
+
page_count_++;
|
2575
|
+
chunk->set_next(first_chunk_);
|
2576
|
+
chunk->set_size(chunk_size);
|
2577
|
+
first_chunk_ = chunk;
|
2578
|
+
|
2579
|
+
// Set the object address and size in the page header and clear its
|
2580
|
+
// remembered set.
|
2581
|
+
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
|
2582
|
+
Address object_address = page->ObjectAreaStart();
|
2583
|
+
// Clear the low order bit of the second word in the page to flag it as a
|
2584
|
+
// large object page. If the chunk_size happened to be written there, its
|
2585
|
+
// low order bit should already be clear.
|
2586
|
+
ASSERT((chunk_size & 0x1) == 0);
|
2587
|
+
page->is_normal_page &= ~0x1;
|
2588
|
+
page->ClearRSet();
|
2589
|
+
int extra_bytes = requested_size - object_size;
|
2590
|
+
if (extra_bytes > 0) {
|
2591
|
+
// The extra memory for the remembered set should be cleared.
|
2592
|
+
memset(object_address + object_size, 0, extra_bytes);
|
2593
|
+
}
|
2594
|
+
|
2595
|
+
return HeapObject::FromAddress(object_address);
|
2596
|
+
}
|
2597
|
+
|
2598
|
+
|
2599
|
+
Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
|
2600
|
+
ASSERT(0 < size_in_bytes);
|
2601
|
+
return AllocateRawInternal(size_in_bytes,
|
2602
|
+
size_in_bytes,
|
2603
|
+
EXECUTABLE);
|
2604
|
+
}
|
2605
|
+
|
2606
|
+
|
2607
|
+
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
|
2608
|
+
ASSERT(0 < size_in_bytes);
|
2609
|
+
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
|
2610
|
+
return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
|
2611
|
+
size_in_bytes,
|
2612
|
+
NOT_EXECUTABLE);
|
2613
|
+
}
|
2614
|
+
|
2615
|
+
|
2616
|
+
Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
|
2617
|
+
ASSERT(0 < size_in_bytes);
|
2618
|
+
return AllocateRawInternal(size_in_bytes,
|
2619
|
+
size_in_bytes,
|
2620
|
+
NOT_EXECUTABLE);
|
2621
|
+
}
|
2622
|
+
|
2623
|
+
|
2624
|
+
// GC support
|
2625
|
+
Object* LargeObjectSpace::FindObject(Address a) {
|
2626
|
+
for (LargeObjectChunk* chunk = first_chunk_;
|
2627
|
+
chunk != NULL;
|
2628
|
+
chunk = chunk->next()) {
|
2629
|
+
Address chunk_address = chunk->address();
|
2630
|
+
if (chunk_address <= a && a < chunk_address + chunk->size()) {
|
2631
|
+
return chunk->GetObject();
|
2632
|
+
}
|
2633
|
+
}
|
2634
|
+
return Failure::Exception();
|
2635
|
+
}
|
2636
|
+
|
2637
|
+
|
2638
|
+
void LargeObjectSpace::ClearRSet() {
|
2639
|
+
ASSERT(Page::is_rset_in_use());
|
2640
|
+
|
2641
|
+
LargeObjectIterator it(this);
|
2642
|
+
while (it.has_next()) {
|
2643
|
+
HeapObject* object = it.next();
|
2644
|
+
// We only have code, sequential strings, or fixed arrays in large
|
2645
|
+
// object space, and only fixed arrays need remembered set support.
|
2646
|
+
if (object->IsFixedArray()) {
|
2647
|
+
// Clear the normal remembered set region of the page;
|
2648
|
+
Page* page = Page::FromAddress(object->address());
|
2649
|
+
page->ClearRSet();
|
2650
|
+
|
2651
|
+
// Clear the extra remembered set.
|
2652
|
+
int size = object->Size();
|
2653
|
+
int extra_rset_bytes = ExtraRSetBytesFor(size);
|
2654
|
+
memset(object->address() + size, 0, extra_rset_bytes);
|
2655
|
+
}
|
2656
|
+
}
|
2657
|
+
}
|
2658
|
+
|
2659
|
+
|
2660
|
+
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
|
2661
|
+
ASSERT(Page::is_rset_in_use());
|
2662
|
+
|
2663
|
+
static void* lo_rset_histogram = StatsTable::CreateHistogram(
|
2664
|
+
"V8.RSetLO",
|
2665
|
+
0,
|
2666
|
+
// Keeping this histogram's buckets the same as the paged space histogram.
|
2667
|
+
Page::kObjectAreaSize / kPointerSize,
|
2668
|
+
30);
|
2669
|
+
|
2670
|
+
LargeObjectIterator it(this);
|
2671
|
+
while (it.has_next()) {
|
2672
|
+
// We only have code, sequential strings, or fixed arrays in large
|
2673
|
+
// object space, and only fixed arrays can possibly contain pointers to
|
2674
|
+
// the young generation.
|
2675
|
+
HeapObject* object = it.next();
|
2676
|
+
if (object->IsFixedArray()) {
|
2677
|
+
// Iterate the normal page remembered set range.
|
2678
|
+
Page* page = Page::FromAddress(object->address());
|
2679
|
+
Address object_end = object->address() + object->Size();
|
2680
|
+
int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
|
2681
|
+
Min(page->ObjectAreaEnd(), object_end),
|
2682
|
+
page->RSetStart(),
|
2683
|
+
copy_object_func);
|
2684
|
+
|
2685
|
+
// Iterate the extra array elements.
|
2686
|
+
if (object_end > page->ObjectAreaEnd()) {
|
2687
|
+
count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
|
2688
|
+
object_end, copy_object_func);
|
2689
|
+
}
|
2690
|
+
if (lo_rset_histogram != NULL) {
|
2691
|
+
StatsTable::AddHistogramSample(lo_rset_histogram, count);
|
2692
|
+
}
|
2693
|
+
}
|
2694
|
+
}
|
2695
|
+
}
|
2696
|
+
|
2697
|
+
|
2698
|
+
void LargeObjectSpace::FreeUnmarkedObjects() {
|
2699
|
+
LargeObjectChunk* previous = NULL;
|
2700
|
+
LargeObjectChunk* current = first_chunk_;
|
2701
|
+
while (current != NULL) {
|
2702
|
+
HeapObject* object = current->GetObject();
|
2703
|
+
if (object->IsMarked()) {
|
2704
|
+
object->ClearMark();
|
2705
|
+
MarkCompactCollector::tracer()->decrement_marked_count();
|
2706
|
+
previous = current;
|
2707
|
+
current = current->next();
|
2708
|
+
} else {
|
2709
|
+
Address chunk_address = current->address();
|
2710
|
+
size_t chunk_size = current->size();
|
2711
|
+
|
2712
|
+
// Cut the chunk out from the chunk list.
|
2713
|
+
current = current->next();
|
2714
|
+
if (previous == NULL) {
|
2715
|
+
first_chunk_ = current;
|
2716
|
+
} else {
|
2717
|
+
previous->set_next(current);
|
2718
|
+
}
|
2719
|
+
|
2720
|
+
// Free the chunk.
|
2721
|
+
if (object->IsCode()) {
|
2722
|
+
LOG(CodeDeleteEvent(object->address()));
|
2723
|
+
}
|
2724
|
+
size_ -= static_cast<int>(chunk_size);
|
2725
|
+
page_count_--;
|
2726
|
+
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
|
2727
|
+
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
|
2728
|
+
}
|
2729
|
+
}
|
2730
|
+
}
|
2731
|
+
|
2732
|
+
|
2733
|
+
bool LargeObjectSpace::Contains(HeapObject* object) {
|
2734
|
+
Address address = object->address();
|
2735
|
+
Page* page = Page::FromAddress(address);
|
2736
|
+
|
2737
|
+
SLOW_ASSERT(!page->IsLargeObjectPage()
|
2738
|
+
|| !FindObject(address)->IsFailure());
|
2739
|
+
|
2740
|
+
return page->IsLargeObjectPage();
|
2741
|
+
}
|
2742
|
+
|
2743
|
+
|
2744
|
+
#ifdef DEBUG
|
2745
|
+
// We do not assume that the large object iterator works, because it depends
|
2746
|
+
// on the invariants we are checking during verification.
|
2747
|
+
void LargeObjectSpace::Verify() {
|
2748
|
+
for (LargeObjectChunk* chunk = first_chunk_;
|
2749
|
+
chunk != NULL;
|
2750
|
+
chunk = chunk->next()) {
|
2751
|
+
// Each chunk contains an object that starts at the large object page's
|
2752
|
+
// object area start.
|
2753
|
+
HeapObject* object = chunk->GetObject();
|
2754
|
+
Page* page = Page::FromAddress(object->address());
|
2755
|
+
ASSERT(object->address() == page->ObjectAreaStart());
|
2756
|
+
|
2757
|
+
// The first word should be a map, and we expect all map pointers to be
|
2758
|
+
// in map space.
|
2759
|
+
Map* map = object->map();
|
2760
|
+
ASSERT(map->IsMap());
|
2761
|
+
ASSERT(Heap::map_space()->Contains(map));
|
2762
|
+
|
2763
|
+
// We have only code, sequential strings, external strings
|
2764
|
+
// (sequential strings that have been morphed into external
|
2765
|
+
// strings), fixed arrays, and byte arrays in large object space.
|
2766
|
+
ASSERT(object->IsCode() || object->IsSeqString() ||
|
2767
|
+
object->IsExternalString() || object->IsFixedArray() ||
|
2768
|
+
object->IsByteArray());
|
2769
|
+
|
2770
|
+
// The object itself should look OK.
|
2771
|
+
object->Verify();
|
2772
|
+
|
2773
|
+
// Byte arrays and strings don't have interior pointers.
|
2774
|
+
if (object->IsCode()) {
|
2775
|
+
VerifyPointersVisitor code_visitor;
|
2776
|
+
object->IterateBody(map->instance_type(),
|
2777
|
+
object->Size(),
|
2778
|
+
&code_visitor);
|
2779
|
+
} else if (object->IsFixedArray()) {
|
2780
|
+
// We loop over fixed arrays ourselves, rather then using the visitor,
|
2781
|
+
// because the visitor doesn't support the start/offset iteration
|
2782
|
+
// needed for IsRSetSet.
|
2783
|
+
FixedArray* array = FixedArray::cast(object);
|
2784
|
+
for (int j = 0; j < array->length(); j++) {
|
2785
|
+
Object* element = array->get(j);
|
2786
|
+
if (element->IsHeapObject()) {
|
2787
|
+
HeapObject* element_object = HeapObject::cast(element);
|
2788
|
+
ASSERT(Heap::Contains(element_object));
|
2789
|
+
ASSERT(element_object->map()->IsMap());
|
2790
|
+
if (Heap::InNewSpace(element_object)) {
|
2791
|
+
ASSERT(Page::IsRSetSet(object->address(),
|
2792
|
+
FixedArray::kHeaderSize + j * kPointerSize));
|
2793
|
+
}
|
2794
|
+
}
|
2795
|
+
}
|
2796
|
+
}
|
2797
|
+
}
|
2798
|
+
}
|
2799
|
+
|
2800
|
+
|
2801
|
+
void LargeObjectSpace::Print() {
|
2802
|
+
LargeObjectIterator it(this);
|
2803
|
+
while (it.has_next()) {
|
2804
|
+
it.next()->Print();
|
2805
|
+
}
|
2806
|
+
}
|
2807
|
+
|
2808
|
+
|
2809
|
+
void LargeObjectSpace::ReportStatistics() {
|
2810
|
+
PrintF(" size: %d\n", size_);
|
2811
|
+
int num_objects = 0;
|
2812
|
+
ClearHistograms();
|
2813
|
+
LargeObjectIterator it(this);
|
2814
|
+
while (it.has_next()) {
|
2815
|
+
num_objects++;
|
2816
|
+
CollectHistogramInfo(it.next());
|
2817
|
+
}
|
2818
|
+
|
2819
|
+
PrintF(" number of objects %d\n", num_objects);
|
2820
|
+
if (num_objects > 0) ReportHistogram(false);
|
2821
|
+
}
|
2822
|
+
|
2823
|
+
|
2824
|
+
void LargeObjectSpace::CollectCodeStatistics() {
|
2825
|
+
LargeObjectIterator obj_it(this);
|
2826
|
+
while (obj_it.has_next()) {
|
2827
|
+
HeapObject* obj = obj_it.next();
|
2828
|
+
if (obj->IsCode()) {
|
2829
|
+
Code* code = Code::cast(obj);
|
2830
|
+
code_kind_statistics[code->kind()] += code->Size();
|
2831
|
+
}
|
2832
|
+
}
|
2833
|
+
}
|
2834
|
+
|
2835
|
+
|
2836
|
+
void LargeObjectSpace::PrintRSet() {
|
2837
|
+
LargeObjectIterator it(this);
|
2838
|
+
while (it.has_next()) {
|
2839
|
+
HeapObject* object = it.next();
|
2840
|
+
if (object->IsFixedArray()) {
|
2841
|
+
Page* page = Page::FromAddress(object->address());
|
2842
|
+
|
2843
|
+
Address allocation_top = object->address() + object->Size();
|
2844
|
+
PrintF("large page 0x%x:\n", page);
|
2845
|
+
PrintRSetRange(page->RSetStart(), page->RSetEnd(),
|
2846
|
+
reinterpret_cast<Object**>(object->address()),
|
2847
|
+
allocation_top);
|
2848
|
+
int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
|
2849
|
+
int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
|
2850
|
+
kBitsPerInt);
|
2851
|
+
PrintF("------------------------------------------------------------"
|
2852
|
+
"-----------\n");
|
2853
|
+
PrintRSetRange(allocation_top,
|
2854
|
+
allocation_top + extra_rset_bits / kBitsPerByte,
|
2855
|
+
reinterpret_cast<Object**>(object->address()
|
2856
|
+
+ Page::kObjectAreaSize),
|
2857
|
+
allocation_top);
|
2858
|
+
PrintF("\n");
|
2859
|
+
}
|
2860
|
+
}
|
2861
|
+
}
|
2862
|
+
#endif // DEBUG
|
2863
|
+
|
2864
|
+
} } // namespace v8::internal
|