libv8-freebsd 3.3.10.4
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +9 -0
- data/.gitmodules +3 -0
- data/Gemfile +4 -0
- data/README.md +75 -0
- data/Rakefile +115 -0
- data/ext/libv8/extconf.rb +27 -0
- data/lib/libv8.rb +15 -0
- data/lib/libv8/Makefile +39 -0
- data/lib/libv8/detect_cpu.rb +27 -0
- data/lib/libv8/fpic-on-freebsd-amd64.patch +16 -0
- data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
- data/lib/libv8/scons/CHANGES.txt +5541 -0
- data/lib/libv8/scons/LICENSE.txt +20 -0
- data/lib/libv8/scons/MANIFEST +200 -0
- data/lib/libv8/scons/PKG-INFO +13 -0
- data/lib/libv8/scons/README.txt +243 -0
- data/lib/libv8/scons/RELEASE.txt +100 -0
- data/lib/libv8/scons/engine/SCons/Action.py +1257 -0
- data/lib/libv8/scons/engine/SCons/Builder.py +877 -0
- data/lib/libv8/scons/engine/SCons/CacheDir.py +216 -0
- data/lib/libv8/scons/engine/SCons/Conftest.py +793 -0
- data/lib/libv8/scons/engine/SCons/Debug.py +220 -0
- data/lib/libv8/scons/engine/SCons/Defaults.py +494 -0
- data/lib/libv8/scons/engine/SCons/Environment.py +2417 -0
- data/lib/libv8/scons/engine/SCons/Errors.py +205 -0
- data/lib/libv8/scons/engine/SCons/Executor.py +633 -0
- data/lib/libv8/scons/engine/SCons/Job.py +435 -0
- data/lib/libv8/scons/engine/SCons/Memoize.py +244 -0
- data/lib/libv8/scons/engine/SCons/Node/Alias.py +152 -0
- data/lib/libv8/scons/engine/SCons/Node/FS.py +3302 -0
- data/lib/libv8/scons/engine/SCons/Node/Python.py +128 -0
- data/lib/libv8/scons/engine/SCons/Node/__init__.py +1329 -0
- data/lib/libv8/scons/engine/SCons/Options/BoolOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/EnumOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/ListOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/PackageOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/PathOption.py +76 -0
- data/lib/libv8/scons/engine/SCons/Options/__init__.py +67 -0
- data/lib/libv8/scons/engine/SCons/PathList.py +231 -0
- data/lib/libv8/scons/engine/SCons/Platform/__init__.py +241 -0
- data/lib/libv8/scons/engine/SCons/Platform/aix.py +69 -0
- data/lib/libv8/scons/engine/SCons/Platform/cygwin.py +55 -0
- data/lib/libv8/scons/engine/SCons/Platform/darwin.py +70 -0
- data/lib/libv8/scons/engine/SCons/Platform/hpux.py +46 -0
- data/lib/libv8/scons/engine/SCons/Platform/irix.py +44 -0
- data/lib/libv8/scons/engine/SCons/Platform/os2.py +58 -0
- data/lib/libv8/scons/engine/SCons/Platform/posix.py +263 -0
- data/lib/libv8/scons/engine/SCons/Platform/sunos.py +50 -0
- data/lib/libv8/scons/engine/SCons/Platform/win32.py +385 -0
- data/lib/libv8/scons/engine/SCons/SConf.py +1030 -0
- data/lib/libv8/scons/engine/SCons/SConsign.py +389 -0
- data/lib/libv8/scons/engine/SCons/Scanner/C.py +132 -0
- data/lib/libv8/scons/engine/SCons/Scanner/D.py +73 -0
- data/lib/libv8/scons/engine/SCons/Scanner/Dir.py +109 -0
- data/lib/libv8/scons/engine/SCons/Scanner/Fortran.py +316 -0
- data/lib/libv8/scons/engine/SCons/Scanner/IDL.py +48 -0
- data/lib/libv8/scons/engine/SCons/Scanner/LaTeX.py +387 -0
- data/lib/libv8/scons/engine/SCons/Scanner/Prog.py +101 -0
- data/lib/libv8/scons/engine/SCons/Scanner/RC.py +55 -0
- data/lib/libv8/scons/engine/SCons/Scanner/__init__.py +413 -0
- data/lib/libv8/scons/engine/SCons/Script/Interactive.py +384 -0
- data/lib/libv8/scons/engine/SCons/Script/Main.py +1405 -0
- data/lib/libv8/scons/engine/SCons/Script/SConsOptions.py +939 -0
- data/lib/libv8/scons/engine/SCons/Script/SConscript.py +640 -0
- data/lib/libv8/scons/engine/SCons/Script/__init__.py +412 -0
- data/lib/libv8/scons/engine/SCons/Sig.py +63 -0
- data/lib/libv8/scons/engine/SCons/Subst.py +904 -0
- data/lib/libv8/scons/engine/SCons/Taskmaster.py +1025 -0
- data/lib/libv8/scons/engine/SCons/Tool/386asm.py +61 -0
- data/lib/libv8/scons/engine/SCons/Tool/BitKeeper.py +67 -0
- data/lib/libv8/scons/engine/SCons/Tool/CVS.py +73 -0
- data/lib/libv8/scons/engine/SCons/Tool/FortranCommon.py +263 -0
- data/lib/libv8/scons/engine/SCons/Tool/JavaCommon.py +323 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/__init__.py +56 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/arch.py +61 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/common.py +240 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/netframework.py +82 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/sdk.py +391 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vc.py +459 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vs.py +526 -0
- data/lib/libv8/scons/engine/SCons/Tool/Perforce.py +103 -0
- data/lib/libv8/scons/engine/SCons/Tool/PharLapCommon.py +137 -0
- data/lib/libv8/scons/engine/SCons/Tool/RCS.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/SCCS.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/Subversion.py +71 -0
- data/lib/libv8/scons/engine/SCons/Tool/__init__.py +681 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixc++.py +82 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixcc.py +74 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixf77.py +80 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixlink.py +76 -0
- data/lib/libv8/scons/engine/SCons/Tool/applelink.py +71 -0
- data/lib/libv8/scons/engine/SCons/Tool/ar.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/as.py +78 -0
- data/lib/libv8/scons/engine/SCons/Tool/bcc32.py +81 -0
- data/lib/libv8/scons/engine/SCons/Tool/c++.py +99 -0
- data/lib/libv8/scons/engine/SCons/Tool/cc.py +102 -0
- data/lib/libv8/scons/engine/SCons/Tool/cvf.py +58 -0
- data/lib/libv8/scons/engine/SCons/Tool/default.py +50 -0
- data/lib/libv8/scons/engine/SCons/Tool/dmd.py +240 -0
- data/lib/libv8/scons/engine/SCons/Tool/dvi.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/dvipdf.py +125 -0
- data/lib/libv8/scons/engine/SCons/Tool/dvips.py +95 -0
- data/lib/libv8/scons/engine/SCons/Tool/f03.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/f77.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/f90.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/f95.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/filesystem.py +98 -0
- data/lib/libv8/scons/engine/SCons/Tool/fortran.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/g++.py +90 -0
- data/lib/libv8/scons/engine/SCons/Tool/g77.py +73 -0
- data/lib/libv8/scons/engine/SCons/Tool/gas.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/gcc.py +80 -0
- data/lib/libv8/scons/engine/SCons/Tool/gfortran.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/gnulink.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/gs.py +81 -0
- data/lib/libv8/scons/engine/SCons/Tool/hpc++.py +84 -0
- data/lib/libv8/scons/engine/SCons/Tool/hpcc.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/hplink.py +77 -0
- data/lib/libv8/scons/engine/SCons/Tool/icc.py +59 -0
- data/lib/libv8/scons/engine/SCons/Tool/icl.py +52 -0
- data/lib/libv8/scons/engine/SCons/Tool/ifl.py +72 -0
- data/lib/libv8/scons/engine/SCons/Tool/ifort.py +88 -0
- data/lib/libv8/scons/engine/SCons/Tool/ilink.py +59 -0
- data/lib/libv8/scons/engine/SCons/Tool/ilink32.py +60 -0
- data/lib/libv8/scons/engine/SCons/Tool/install.py +283 -0
- data/lib/libv8/scons/engine/SCons/Tool/intelc.py +522 -0
- data/lib/libv8/scons/engine/SCons/Tool/ipkg.py +67 -0
- data/lib/libv8/scons/engine/SCons/Tool/jar.py +116 -0
- data/lib/libv8/scons/engine/SCons/Tool/javac.py +230 -0
- data/lib/libv8/scons/engine/SCons/Tool/javah.py +137 -0
- data/lib/libv8/scons/engine/SCons/Tool/latex.py +80 -0
- data/lib/libv8/scons/engine/SCons/Tool/lex.py +97 -0
- data/lib/libv8/scons/engine/SCons/Tool/link.py +122 -0
- data/lib/libv8/scons/engine/SCons/Tool/linkloc.py +112 -0
- data/lib/libv8/scons/engine/SCons/Tool/m4.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/masm.py +77 -0
- data/lib/libv8/scons/engine/SCons/Tool/midl.py +88 -0
- data/lib/libv8/scons/engine/SCons/Tool/mingw.py +179 -0
- data/lib/libv8/scons/engine/SCons/Tool/mslib.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/mslink.py +318 -0
- data/lib/libv8/scons/engine/SCons/Tool/mssdk.py +50 -0
- data/lib/libv8/scons/engine/SCons/Tool/msvc.py +278 -0
- data/lib/libv8/scons/engine/SCons/Tool/msvs.py +1806 -0
- data/lib/libv8/scons/engine/SCons/Tool/mwcc.py +207 -0
- data/lib/libv8/scons/engine/SCons/Tool/mwld.py +107 -0
- data/lib/libv8/scons/engine/SCons/Tool/nasm.py +72 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/__init__.py +312 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/ipk.py +185 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/msi.py +527 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/rpm.py +365 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/src_tarbz2.py +43 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/src_targz.py +43 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/src_zip.py +43 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/tarbz2.py +44 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/targz.py +44 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/zip.py +44 -0
- data/lib/libv8/scons/engine/SCons/Tool/pdf.py +78 -0
- data/lib/libv8/scons/engine/SCons/Tool/pdflatex.py +84 -0
- data/lib/libv8/scons/engine/SCons/Tool/pdftex.py +109 -0
- data/lib/libv8/scons/engine/SCons/Tool/qt.py +336 -0
- data/lib/libv8/scons/engine/SCons/Tool/rmic.py +126 -0
- data/lib/libv8/scons/engine/SCons/Tool/rpcgen.py +70 -0
- data/lib/libv8/scons/engine/SCons/Tool/rpm.py +132 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgiar.py +68 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgic++.py +58 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgicc.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgilink.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunar.py +67 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunc++.py +142 -0
- data/lib/libv8/scons/engine/SCons/Tool/suncc.py +58 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunf77.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunf90.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunf95.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunlink.py +76 -0
- data/lib/libv8/scons/engine/SCons/Tool/swig.py +183 -0
- data/lib/libv8/scons/engine/SCons/Tool/tar.py +73 -0
- data/lib/libv8/scons/engine/SCons/Tool/tex.py +866 -0
- data/lib/libv8/scons/engine/SCons/Tool/textfile.py +175 -0
- data/lib/libv8/scons/engine/SCons/Tool/tlib.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/wix.py +99 -0
- data/lib/libv8/scons/engine/SCons/Tool/yacc.py +140 -0
- data/lib/libv8/scons/engine/SCons/Tool/zip.py +99 -0
- data/lib/libv8/scons/engine/SCons/Util.py +1492 -0
- data/lib/libv8/scons/engine/SCons/Variables/BoolVariable.py +89 -0
- data/lib/libv8/scons/engine/SCons/Variables/EnumVariable.py +103 -0
- data/lib/libv8/scons/engine/SCons/Variables/ListVariable.py +135 -0
- data/lib/libv8/scons/engine/SCons/Variables/PackageVariable.py +106 -0
- data/lib/libv8/scons/engine/SCons/Variables/PathVariable.py +147 -0
- data/lib/libv8/scons/engine/SCons/Variables/__init__.py +312 -0
- data/lib/libv8/scons/engine/SCons/Warnings.py +246 -0
- data/lib/libv8/scons/engine/SCons/__init__.py +49 -0
- data/lib/libv8/scons/engine/SCons/compat/__init__.py +237 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_builtins.py +150 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_collections.py +45 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_dbm.py +45 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_hashlib.py +76 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_io.py +45 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_sets.py +563 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_subprocess.py +1281 -0
- data/lib/libv8/scons/engine/SCons/cpp.py +589 -0
- data/lib/libv8/scons/engine/SCons/dblite.py +254 -0
- data/lib/libv8/scons/engine/SCons/exitfuncs.py +77 -0
- data/lib/libv8/scons/os_spawnv_fix.diff +83 -0
- data/lib/libv8/scons/scons-time.1 +1017 -0
- data/lib/libv8/scons/scons.1 +15225 -0
- data/lib/libv8/scons/sconsign.1 +208 -0
- data/lib/libv8/scons/script/scons +196 -0
- data/lib/libv8/scons/script/scons-time +1544 -0
- data/lib/libv8/scons/script/scons.bat +34 -0
- data/lib/libv8/scons/script/sconsign +514 -0
- data/lib/libv8/scons/setup.cfg +5 -0
- data/lib/libv8/scons/setup.py +423 -0
- data/lib/libv8/v8/.gitignore +35 -0
- data/lib/libv8/v8/AUTHORS +44 -0
- data/lib/libv8/v8/ChangeLog +2839 -0
- data/lib/libv8/v8/LICENSE +52 -0
- data/lib/libv8/v8/LICENSE.strongtalk +29 -0
- data/lib/libv8/v8/LICENSE.v8 +26 -0
- data/lib/libv8/v8/LICENSE.valgrind +45 -0
- data/lib/libv8/v8/SConstruct +1478 -0
- data/lib/libv8/v8/build/README.txt +49 -0
- data/lib/libv8/v8/build/all.gyp +18 -0
- data/lib/libv8/v8/build/armu.gypi +32 -0
- data/lib/libv8/v8/build/common.gypi +144 -0
- data/lib/libv8/v8/build/gyp_v8 +145 -0
- data/lib/libv8/v8/include/v8-debug.h +395 -0
- data/lib/libv8/v8/include/v8-preparser.h +117 -0
- data/lib/libv8/v8/include/v8-profiler.h +505 -0
- data/lib/libv8/v8/include/v8-testing.h +104 -0
- data/lib/libv8/v8/include/v8.h +4124 -0
- data/lib/libv8/v8/include/v8stdint.h +53 -0
- data/lib/libv8/v8/preparser/SConscript +38 -0
- data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
- data/lib/libv8/v8/src/SConscript +368 -0
- data/lib/libv8/v8/src/accessors.cc +767 -0
- data/lib/libv8/v8/src/accessors.h +123 -0
- data/lib/libv8/v8/src/allocation-inl.h +49 -0
- data/lib/libv8/v8/src/allocation.cc +122 -0
- data/lib/libv8/v8/src/allocation.h +143 -0
- data/lib/libv8/v8/src/api.cc +5845 -0
- data/lib/libv8/v8/src/api.h +574 -0
- data/lib/libv8/v8/src/apinatives.js +110 -0
- data/lib/libv8/v8/src/apiutils.h +73 -0
- data/lib/libv8/v8/src/arguments.h +118 -0
- data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
- data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
- data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
- data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
- data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
- data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
- data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
- data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
- data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
- data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
- data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
- data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
- data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
- data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
- data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
- data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
- data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
- data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
- data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
- data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
- data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
- data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
- data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
- data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
- data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
- data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
- data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
- data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
- data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
- data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
- data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
- data/lib/libv8/v8/src/array.js +1366 -0
- data/lib/libv8/v8/src/assembler.cc +1207 -0
- data/lib/libv8/v8/src/assembler.h +858 -0
- data/lib/libv8/v8/src/ast-inl.h +112 -0
- data/lib/libv8/v8/src/ast.cc +1146 -0
- data/lib/libv8/v8/src/ast.h +2188 -0
- data/lib/libv8/v8/src/atomicops.h +167 -0
- data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
- data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
- data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
- data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
- data/lib/libv8/v8/src/bignum.cc +768 -0
- data/lib/libv8/v8/src/bignum.h +140 -0
- data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
- data/lib/libv8/v8/src/bootstrapper.h +188 -0
- data/lib/libv8/v8/src/builtins.cc +1707 -0
- data/lib/libv8/v8/src/builtins.h +371 -0
- data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
- data/lib/libv8/v8/src/cached-powers.cc +177 -0
- data/lib/libv8/v8/src/cached-powers.h +65 -0
- data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
- data/lib/libv8/v8/src/char-predicates.h +67 -0
- data/lib/libv8/v8/src/checks.cc +110 -0
- data/lib/libv8/v8/src/checks.h +296 -0
- data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
- data/lib/libv8/v8/src/circular-queue.cc +122 -0
- data/lib/libv8/v8/src/circular-queue.h +103 -0
- data/lib/libv8/v8/src/code-stubs.cc +267 -0
- data/lib/libv8/v8/src/code-stubs.h +1011 -0
- data/lib/libv8/v8/src/code.h +70 -0
- data/lib/libv8/v8/src/codegen.cc +231 -0
- data/lib/libv8/v8/src/codegen.h +84 -0
- data/lib/libv8/v8/src/compilation-cache.cc +540 -0
- data/lib/libv8/v8/src/compilation-cache.h +287 -0
- data/lib/libv8/v8/src/compiler.cc +786 -0
- data/lib/libv8/v8/src/compiler.h +312 -0
- data/lib/libv8/v8/src/contexts.cc +347 -0
- data/lib/libv8/v8/src/contexts.h +391 -0
- data/lib/libv8/v8/src/conversions-inl.h +106 -0
- data/lib/libv8/v8/src/conversions.cc +1131 -0
- data/lib/libv8/v8/src/conversions.h +135 -0
- data/lib/libv8/v8/src/counters.cc +93 -0
- data/lib/libv8/v8/src/counters.h +254 -0
- data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
- data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
- data/lib/libv8/v8/src/cpu-profiler.h +302 -0
- data/lib/libv8/v8/src/cpu.h +69 -0
- data/lib/libv8/v8/src/d8-debug.cc +367 -0
- data/lib/libv8/v8/src/d8-debug.h +158 -0
- data/lib/libv8/v8/src/d8-posix.cc +695 -0
- data/lib/libv8/v8/src/d8-readline.cc +130 -0
- data/lib/libv8/v8/src/d8-windows.cc +42 -0
- data/lib/libv8/v8/src/d8.cc +803 -0
- data/lib/libv8/v8/src/d8.gyp +91 -0
- data/lib/libv8/v8/src/d8.h +235 -0
- data/lib/libv8/v8/src/d8.js +2798 -0
- data/lib/libv8/v8/src/data-flow.cc +66 -0
- data/lib/libv8/v8/src/data-flow.h +205 -0
- data/lib/libv8/v8/src/date.js +1103 -0
- data/lib/libv8/v8/src/dateparser-inl.h +127 -0
- data/lib/libv8/v8/src/dateparser.cc +178 -0
- data/lib/libv8/v8/src/dateparser.h +266 -0
- data/lib/libv8/v8/src/debug-agent.cc +447 -0
- data/lib/libv8/v8/src/debug-agent.h +129 -0
- data/lib/libv8/v8/src/debug-debugger.js +2569 -0
- data/lib/libv8/v8/src/debug.cc +3165 -0
- data/lib/libv8/v8/src/debug.h +1057 -0
- data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
- data/lib/libv8/v8/src/deoptimizer.h +602 -0
- data/lib/libv8/v8/src/disasm.h +80 -0
- data/lib/libv8/v8/src/disassembler.cc +343 -0
- data/lib/libv8/v8/src/disassembler.h +58 -0
- data/lib/libv8/v8/src/diy-fp.cc +58 -0
- data/lib/libv8/v8/src/diy-fp.h +117 -0
- data/lib/libv8/v8/src/double.h +238 -0
- data/lib/libv8/v8/src/dtoa.cc +103 -0
- data/lib/libv8/v8/src/dtoa.h +85 -0
- data/lib/libv8/v8/src/execution.cc +849 -0
- data/lib/libv8/v8/src/execution.h +297 -0
- data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
- data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
- data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
- data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
- data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
- data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
- data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
- data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
- data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
- data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
- data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
- data/lib/libv8/v8/src/factory.cc +1222 -0
- data/lib/libv8/v8/src/factory.h +442 -0
- data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
- data/lib/libv8/v8/src/fast-dtoa.h +83 -0
- data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
- data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
- data/lib/libv8/v8/src/flag-definitions.h +560 -0
- data/lib/libv8/v8/src/flags.cc +551 -0
- data/lib/libv8/v8/src/flags.h +79 -0
- data/lib/libv8/v8/src/frames-inl.h +247 -0
- data/lib/libv8/v8/src/frames.cc +1243 -0
- data/lib/libv8/v8/src/frames.h +870 -0
- data/lib/libv8/v8/src/full-codegen.cc +1374 -0
- data/lib/libv8/v8/src/full-codegen.h +771 -0
- data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
- data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
- data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
- data/lib/libv8/v8/src/gdb-jit.h +143 -0
- data/lib/libv8/v8/src/global-handles.cc +665 -0
- data/lib/libv8/v8/src/global-handles.h +284 -0
- data/lib/libv8/v8/src/globals.h +325 -0
- data/lib/libv8/v8/src/handles-inl.h +177 -0
- data/lib/libv8/v8/src/handles.cc +987 -0
- data/lib/libv8/v8/src/handles.h +382 -0
- data/lib/libv8/v8/src/hashmap.cc +230 -0
- data/lib/libv8/v8/src/hashmap.h +123 -0
- data/lib/libv8/v8/src/heap-inl.h +704 -0
- data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
- data/lib/libv8/v8/src/heap-profiler.h +397 -0
- data/lib/libv8/v8/src/heap.cc +5930 -0
- data/lib/libv8/v8/src/heap.h +2268 -0
- data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
- data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
- data/lib/libv8/v8/src/hydrogen.cc +6239 -0
- data/lib/libv8/v8/src/hydrogen.h +1202 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
- data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
- data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
- data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
- data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
- data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
- data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
- data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
- data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
- data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
- data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
- data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
- data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
- data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
- data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
- data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
- data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
- data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
- data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
- data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
- data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
- data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
- data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
- data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
- data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
- data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
- data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
- data/lib/libv8/v8/src/ic-inl.h +130 -0
- data/lib/libv8/v8/src/ic.cc +2577 -0
- data/lib/libv8/v8/src/ic.h +736 -0
- data/lib/libv8/v8/src/inspector.cc +63 -0
- data/lib/libv8/v8/src/inspector.h +62 -0
- data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
- data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
- data/lib/libv8/v8/src/isolate-inl.h +50 -0
- data/lib/libv8/v8/src/isolate.cc +1869 -0
- data/lib/libv8/v8/src/isolate.h +1382 -0
- data/lib/libv8/v8/src/json-parser.cc +504 -0
- data/lib/libv8/v8/src/json-parser.h +161 -0
- data/lib/libv8/v8/src/json.js +342 -0
- data/lib/libv8/v8/src/jsregexp.cc +5385 -0
- data/lib/libv8/v8/src/jsregexp.h +1492 -0
- data/lib/libv8/v8/src/list-inl.h +212 -0
- data/lib/libv8/v8/src/list.h +174 -0
- data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
- data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
- data/lib/libv8/v8/src/lithium-allocator.h +630 -0
- data/lib/libv8/v8/src/lithium.cc +190 -0
- data/lib/libv8/v8/src/lithium.h +597 -0
- data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
- data/lib/libv8/v8/src/liveedit.cc +1691 -0
- data/lib/libv8/v8/src/liveedit.h +180 -0
- data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
- data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
- data/lib/libv8/v8/src/liveobjectlist.h +322 -0
- data/lib/libv8/v8/src/log-inl.h +59 -0
- data/lib/libv8/v8/src/log-utils.cc +428 -0
- data/lib/libv8/v8/src/log-utils.h +231 -0
- data/lib/libv8/v8/src/log.cc +1993 -0
- data/lib/libv8/v8/src/log.h +476 -0
- data/lib/libv8/v8/src/macro-assembler.h +120 -0
- data/lib/libv8/v8/src/macros.py +178 -0
- data/lib/libv8/v8/src/mark-compact.cc +3143 -0
- data/lib/libv8/v8/src/mark-compact.h +506 -0
- data/lib/libv8/v8/src/math.js +264 -0
- data/lib/libv8/v8/src/messages.cc +179 -0
- data/lib/libv8/v8/src/messages.h +113 -0
- data/lib/libv8/v8/src/messages.js +1096 -0
- data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
- data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
- data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
- data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
- data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
- data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
- data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
- data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
- data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
- data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
- data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
- data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
- data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
- data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
- data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
- data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
- data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
- data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
- data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
- data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
- data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
- data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
- data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
- data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
- data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
- data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
- data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
- data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
- data/lib/libv8/v8/src/mksnapshot.cc +328 -0
- data/lib/libv8/v8/src/natives.h +64 -0
- data/lib/libv8/v8/src/objects-debug.cc +738 -0
- data/lib/libv8/v8/src/objects-inl.h +4323 -0
- data/lib/libv8/v8/src/objects-printer.cc +829 -0
- data/lib/libv8/v8/src/objects-visiting.cc +148 -0
- data/lib/libv8/v8/src/objects-visiting.h +424 -0
- data/lib/libv8/v8/src/objects.cc +10585 -0
- data/lib/libv8/v8/src/objects.h +6838 -0
- data/lib/libv8/v8/src/parser.cc +4997 -0
- data/lib/libv8/v8/src/parser.h +765 -0
- data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
- data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
- data/lib/libv8/v8/src/platform-linux.cc +1149 -0
- data/lib/libv8/v8/src/platform-macos.cc +830 -0
- data/lib/libv8/v8/src/platform-nullos.cc +479 -0
- data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
- data/lib/libv8/v8/src/platform-posix.cc +424 -0
- data/lib/libv8/v8/src/platform-solaris.cc +762 -0
- data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
- data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
- data/lib/libv8/v8/src/platform-tls.h +50 -0
- data/lib/libv8/v8/src/platform-win32.cc +2021 -0
- data/lib/libv8/v8/src/platform.h +667 -0
- data/lib/libv8/v8/src/preparse-data-format.h +62 -0
- data/lib/libv8/v8/src/preparse-data.cc +183 -0
- data/lib/libv8/v8/src/preparse-data.h +225 -0
- data/lib/libv8/v8/src/preparser-api.cc +220 -0
- data/lib/libv8/v8/src/preparser.cc +1450 -0
- data/lib/libv8/v8/src/preparser.h +493 -0
- data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
- data/lib/libv8/v8/src/prettyprinter.h +223 -0
- data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
- data/lib/libv8/v8/src/profile-generator.cc +3098 -0
- data/lib/libv8/v8/src/profile-generator.h +1126 -0
- data/lib/libv8/v8/src/property.cc +105 -0
- data/lib/libv8/v8/src/property.h +365 -0
- data/lib/libv8/v8/src/proxy.js +83 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
- data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
- data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
- data/lib/libv8/v8/src/regexp-stack.cc +111 -0
- data/lib/libv8/v8/src/regexp-stack.h +147 -0
- data/lib/libv8/v8/src/regexp.js +483 -0
- data/lib/libv8/v8/src/rewriter.cc +360 -0
- data/lib/libv8/v8/src/rewriter.h +50 -0
- data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
- data/lib/libv8/v8/src/runtime-profiler.h +201 -0
- data/lib/libv8/v8/src/runtime.cc +12227 -0
- data/lib/libv8/v8/src/runtime.h +652 -0
- data/lib/libv8/v8/src/runtime.js +649 -0
- data/lib/libv8/v8/src/safepoint-table.cc +256 -0
- data/lib/libv8/v8/src/safepoint-table.h +270 -0
- data/lib/libv8/v8/src/scanner-base.cc +952 -0
- data/lib/libv8/v8/src/scanner-base.h +670 -0
- data/lib/libv8/v8/src/scanner.cc +345 -0
- data/lib/libv8/v8/src/scanner.h +146 -0
- data/lib/libv8/v8/src/scopeinfo.cc +646 -0
- data/lib/libv8/v8/src/scopeinfo.h +254 -0
- data/lib/libv8/v8/src/scopes.cc +1150 -0
- data/lib/libv8/v8/src/scopes.h +507 -0
- data/lib/libv8/v8/src/serialize.cc +1574 -0
- data/lib/libv8/v8/src/serialize.h +589 -0
- data/lib/libv8/v8/src/shell.h +55 -0
- data/lib/libv8/v8/src/simulator.h +43 -0
- data/lib/libv8/v8/src/small-pointer-list.h +163 -0
- data/lib/libv8/v8/src/smart-pointer.h +109 -0
- data/lib/libv8/v8/src/snapshot-common.cc +83 -0
- data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
- data/lib/libv8/v8/src/snapshot.h +91 -0
- data/lib/libv8/v8/src/spaces-inl.h +529 -0
- data/lib/libv8/v8/src/spaces.cc +3145 -0
- data/lib/libv8/v8/src/spaces.h +2369 -0
- data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
- data/lib/libv8/v8/src/splay-tree.h +205 -0
- data/lib/libv8/v8/src/string-search.cc +41 -0
- data/lib/libv8/v8/src/string-search.h +568 -0
- data/lib/libv8/v8/src/string-stream.cc +592 -0
- data/lib/libv8/v8/src/string-stream.h +191 -0
- data/lib/libv8/v8/src/string.js +994 -0
- data/lib/libv8/v8/src/strtod.cc +440 -0
- data/lib/libv8/v8/src/strtod.h +40 -0
- data/lib/libv8/v8/src/stub-cache.cc +1965 -0
- data/lib/libv8/v8/src/stub-cache.h +924 -0
- data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
- data/lib/libv8/v8/src/token.cc +63 -0
- data/lib/libv8/v8/src/token.h +288 -0
- data/lib/libv8/v8/src/type-info.cc +507 -0
- data/lib/libv8/v8/src/type-info.h +272 -0
- data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
- data/lib/libv8/v8/src/unbound-queue.h +69 -0
- data/lib/libv8/v8/src/unicode-inl.h +238 -0
- data/lib/libv8/v8/src/unicode.cc +1624 -0
- data/lib/libv8/v8/src/unicode.h +280 -0
- data/lib/libv8/v8/src/uri.js +408 -0
- data/lib/libv8/v8/src/utils-inl.h +48 -0
- data/lib/libv8/v8/src/utils.cc +371 -0
- data/lib/libv8/v8/src/utils.h +800 -0
- data/lib/libv8/v8/src/v8-counters.cc +62 -0
- data/lib/libv8/v8/src/v8-counters.h +314 -0
- data/lib/libv8/v8/src/v8.cc +213 -0
- data/lib/libv8/v8/src/v8.h +131 -0
- data/lib/libv8/v8/src/v8checks.h +64 -0
- data/lib/libv8/v8/src/v8dll-main.cc +44 -0
- data/lib/libv8/v8/src/v8globals.h +512 -0
- data/lib/libv8/v8/src/v8memory.h +82 -0
- data/lib/libv8/v8/src/v8natives.js +1310 -0
- data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
- data/lib/libv8/v8/src/v8threads.cc +464 -0
- data/lib/libv8/v8/src/v8threads.h +165 -0
- data/lib/libv8/v8/src/v8utils.h +319 -0
- data/lib/libv8/v8/src/variables.cc +114 -0
- data/lib/libv8/v8/src/variables.h +167 -0
- data/lib/libv8/v8/src/version.cc +116 -0
- data/lib/libv8/v8/src/version.h +68 -0
- data/lib/libv8/v8/src/vm-state-inl.h +138 -0
- data/lib/libv8/v8/src/vm-state.h +71 -0
- data/lib/libv8/v8/src/win32-headers.h +96 -0
- data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
- data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
- data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
- data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
- data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
- data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
- data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
- data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
- data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
- data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
- data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
- data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
- data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
- data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
- data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
- data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
- data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
- data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
- data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
- data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
- data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
- data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
- data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
- data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
- data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
- data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
- data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
- data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
- data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
- data/lib/libv8/v8/src/zone-inl.h +140 -0
- data/lib/libv8/v8/src/zone.cc +196 -0
- data/lib/libv8/v8/src/zone.h +240 -0
- data/lib/libv8/v8/tools/codemap.js +265 -0
- data/lib/libv8/v8/tools/consarray.js +93 -0
- data/lib/libv8/v8/tools/csvparser.js +78 -0
- data/lib/libv8/v8/tools/disasm.py +92 -0
- data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
- data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
- data/lib/libv8/v8/tools/gcmole/README +62 -0
- data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
- data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
- data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
- data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
- data/lib/libv8/v8/tools/grokdump.py +841 -0
- data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
- data/lib/libv8/v8/tools/js2c.py +364 -0
- data/lib/libv8/v8/tools/jsmin.py +280 -0
- data/lib/libv8/v8/tools/linux-tick-processor +35 -0
- data/lib/libv8/v8/tools/ll_prof.py +942 -0
- data/lib/libv8/v8/tools/logreader.js +185 -0
- data/lib/libv8/v8/tools/mac-nm +18 -0
- data/lib/libv8/v8/tools/mac-tick-processor +6 -0
- data/lib/libv8/v8/tools/oom_dump/README +31 -0
- data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
- data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
- data/lib/libv8/v8/tools/presubmit.py +305 -0
- data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
- data/lib/libv8/v8/tools/profile.js +751 -0
- data/lib/libv8/v8/tools/profile_view.js +219 -0
- data/lib/libv8/v8/tools/run-valgrind.py +77 -0
- data/lib/libv8/v8/tools/splaytree.js +316 -0
- data/lib/libv8/v8/tools/stats-viewer.py +468 -0
- data/lib/libv8/v8/tools/test.py +1510 -0
- data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
- data/lib/libv8/v8/tools/tickprocessor.js +877 -0
- data/lib/libv8/v8/tools/utils.py +96 -0
- data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
- data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
- data/lib/libv8/version.rb +6 -0
- data/libv8.gemspec +36 -0
- data/thefrontside.png +0 -0
- metadata +776 -0
@@ -0,0 +1,3145 @@
|
|
1
|
+
// Copyright 2011 the V8 project authors. All rights reserved.
|
2
|
+
// Redistribution and use in source and binary forms, with or without
|
3
|
+
// modification, are permitted provided that the following conditions are
|
4
|
+
// met:
|
5
|
+
//
|
6
|
+
// * Redistributions of source code must retain the above copyright
|
7
|
+
// notice, this list of conditions and the following disclaimer.
|
8
|
+
// * Redistributions in binary form must reproduce the above
|
9
|
+
// copyright notice, this list of conditions and the following
|
10
|
+
// disclaimer in the documentation and/or other materials provided
|
11
|
+
// with the distribution.
|
12
|
+
// * Neither the name of Google Inc. nor the names of its
|
13
|
+
// contributors may be used to endorse or promote products derived
|
14
|
+
// from this software without specific prior written permission.
|
15
|
+
//
|
16
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
|
28
|
+
#include "v8.h"
|
29
|
+
|
30
|
+
#include "liveobjectlist-inl.h"
|
31
|
+
#include "macro-assembler.h"
|
32
|
+
#include "mark-compact.h"
|
33
|
+
#include "platform.h"
|
34
|
+
|
35
|
+
namespace v8 {
|
36
|
+
namespace internal {
|
37
|
+
|
38
|
+
// For contiguous spaces, top should be in the space (or at the end) and limit
|
39
|
+
// should be the end of the space.
|
40
|
+
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
|
41
|
+
ASSERT((space).low() <= (info).top \
|
42
|
+
&& (info).top <= (space).high() \
|
43
|
+
&& (info).limit == (space).high())
|
44
|
+
|
45
|
+
// ----------------------------------------------------------------------------
|
46
|
+
// HeapObjectIterator
|
47
|
+
|
48
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
|
49
|
+
Initialize(space->bottom(), space->top(), NULL);
|
50
|
+
}
|
51
|
+
|
52
|
+
|
53
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
|
54
|
+
HeapObjectCallback size_func) {
|
55
|
+
Initialize(space->bottom(), space->top(), size_func);
|
56
|
+
}
|
57
|
+
|
58
|
+
|
59
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
|
60
|
+
Initialize(start, space->top(), NULL);
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
|
65
|
+
HeapObjectCallback size_func) {
|
66
|
+
Initialize(start, space->top(), size_func);
|
67
|
+
}
|
68
|
+
|
69
|
+
|
70
|
+
HeapObjectIterator::HeapObjectIterator(Page* page,
|
71
|
+
HeapObjectCallback size_func) {
|
72
|
+
Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
|
73
|
+
}
|
74
|
+
|
75
|
+
|
76
|
+
void HeapObjectIterator::Initialize(Address cur, Address end,
|
77
|
+
HeapObjectCallback size_f) {
|
78
|
+
cur_addr_ = cur;
|
79
|
+
end_addr_ = end;
|
80
|
+
end_page_ = Page::FromAllocationTop(end);
|
81
|
+
size_func_ = size_f;
|
82
|
+
Page* p = Page::FromAllocationTop(cur_addr_);
|
83
|
+
cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
|
84
|
+
|
85
|
+
#ifdef DEBUG
|
86
|
+
Verify();
|
87
|
+
#endif
|
88
|
+
}
|
89
|
+
|
90
|
+
|
91
|
+
HeapObject* HeapObjectIterator::FromNextPage() {
|
92
|
+
if (cur_addr_ == end_addr_) return NULL;
|
93
|
+
|
94
|
+
Page* cur_page = Page::FromAllocationTop(cur_addr_);
|
95
|
+
cur_page = cur_page->next_page();
|
96
|
+
ASSERT(cur_page->is_valid());
|
97
|
+
|
98
|
+
cur_addr_ = cur_page->ObjectAreaStart();
|
99
|
+
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
|
100
|
+
|
101
|
+
if (cur_addr_ == end_addr_) return NULL;
|
102
|
+
ASSERT(cur_addr_ < cur_limit_);
|
103
|
+
#ifdef DEBUG
|
104
|
+
Verify();
|
105
|
+
#endif
|
106
|
+
return FromCurrentPage();
|
107
|
+
}
|
108
|
+
|
109
|
+
|
110
|
+
#ifdef DEBUG
|
111
|
+
void HeapObjectIterator::Verify() {
|
112
|
+
Page* p = Page::FromAllocationTop(cur_addr_);
|
113
|
+
ASSERT(p == Page::FromAllocationTop(cur_limit_));
|
114
|
+
ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
|
115
|
+
}
|
116
|
+
#endif
|
117
|
+
|
118
|
+
|
119
|
+
// -----------------------------------------------------------------------------
|
120
|
+
// PageIterator
|
121
|
+
|
122
|
+
PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
|
123
|
+
prev_page_ = NULL;
|
124
|
+
switch (mode) {
|
125
|
+
case PAGES_IN_USE:
|
126
|
+
stop_page_ = space->AllocationTopPage();
|
127
|
+
break;
|
128
|
+
case PAGES_USED_BY_MC:
|
129
|
+
stop_page_ = space->MCRelocationTopPage();
|
130
|
+
break;
|
131
|
+
case ALL_PAGES:
|
132
|
+
#ifdef DEBUG
|
133
|
+
// Verify that the cached last page in the space is actually the
|
134
|
+
// last page.
|
135
|
+
for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
|
136
|
+
if (!p->next_page()->is_valid()) {
|
137
|
+
ASSERT(space->last_page_ == p);
|
138
|
+
}
|
139
|
+
}
|
140
|
+
#endif
|
141
|
+
stop_page_ = space->last_page_;
|
142
|
+
break;
|
143
|
+
}
|
144
|
+
}
|
145
|
+
|
146
|
+
|
147
|
+
// -----------------------------------------------------------------------------
|
148
|
+
// CodeRange
|
149
|
+
|
150
|
+
|
151
|
+
CodeRange::CodeRange()
|
152
|
+
: code_range_(NULL),
|
153
|
+
free_list_(0),
|
154
|
+
allocation_list_(0),
|
155
|
+
current_allocation_block_index_(0),
|
156
|
+
isolate_(NULL) {
|
157
|
+
}
|
158
|
+
|
159
|
+
|
160
|
+
bool CodeRange::Setup(const size_t requested) {
|
161
|
+
ASSERT(code_range_ == NULL);
|
162
|
+
|
163
|
+
code_range_ = new VirtualMemory(requested);
|
164
|
+
CHECK(code_range_ != NULL);
|
165
|
+
if (!code_range_->IsReserved()) {
|
166
|
+
delete code_range_;
|
167
|
+
code_range_ = NULL;
|
168
|
+
return false;
|
169
|
+
}
|
170
|
+
|
171
|
+
// We are sure that we have mapped a block of requested addresses.
|
172
|
+
ASSERT(code_range_->size() == requested);
|
173
|
+
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
|
174
|
+
allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
|
175
|
+
current_allocation_block_index_ = 0;
|
176
|
+
return true;
|
177
|
+
}
|
178
|
+
|
179
|
+
|
180
|
+
int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
|
181
|
+
const FreeBlock* right) {
|
182
|
+
// The entire point of CodeRange is that the difference between two
|
183
|
+
// addresses in the range can be represented as a signed 32-bit int,
|
184
|
+
// so the cast is semantically correct.
|
185
|
+
return static_cast<int>(left->start - right->start);
|
186
|
+
}
|
187
|
+
|
188
|
+
|
189
|
+
void CodeRange::GetNextAllocationBlock(size_t requested) {
|
190
|
+
for (current_allocation_block_index_++;
|
191
|
+
current_allocation_block_index_ < allocation_list_.length();
|
192
|
+
current_allocation_block_index_++) {
|
193
|
+
if (requested <= allocation_list_[current_allocation_block_index_].size) {
|
194
|
+
return; // Found a large enough allocation block.
|
195
|
+
}
|
196
|
+
}
|
197
|
+
|
198
|
+
// Sort and merge the free blocks on the free list and the allocation list.
|
199
|
+
free_list_.AddAll(allocation_list_);
|
200
|
+
allocation_list_.Clear();
|
201
|
+
free_list_.Sort(&CompareFreeBlockAddress);
|
202
|
+
for (int i = 0; i < free_list_.length();) {
|
203
|
+
FreeBlock merged = free_list_[i];
|
204
|
+
i++;
|
205
|
+
// Add adjacent free blocks to the current merged block.
|
206
|
+
while (i < free_list_.length() &&
|
207
|
+
free_list_[i].start == merged.start + merged.size) {
|
208
|
+
merged.size += free_list_[i].size;
|
209
|
+
i++;
|
210
|
+
}
|
211
|
+
if (merged.size > 0) {
|
212
|
+
allocation_list_.Add(merged);
|
213
|
+
}
|
214
|
+
}
|
215
|
+
free_list_.Clear();
|
216
|
+
|
217
|
+
for (current_allocation_block_index_ = 0;
|
218
|
+
current_allocation_block_index_ < allocation_list_.length();
|
219
|
+
current_allocation_block_index_++) {
|
220
|
+
if (requested <= allocation_list_[current_allocation_block_index_].size) {
|
221
|
+
return; // Found a large enough allocation block.
|
222
|
+
}
|
223
|
+
}
|
224
|
+
|
225
|
+
// Code range is full or too fragmented.
|
226
|
+
V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
|
227
|
+
}
|
228
|
+
|
229
|
+
|
230
|
+
|
231
|
+
void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
|
232
|
+
ASSERT(current_allocation_block_index_ < allocation_list_.length());
|
233
|
+
if (requested > allocation_list_[current_allocation_block_index_].size) {
|
234
|
+
// Find an allocation block large enough. This function call may
|
235
|
+
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
|
236
|
+
GetNextAllocationBlock(requested);
|
237
|
+
}
|
238
|
+
// Commit the requested memory at the start of the current allocation block.
|
239
|
+
*allocated = RoundUp(requested, Page::kPageSize);
|
240
|
+
FreeBlock current = allocation_list_[current_allocation_block_index_];
|
241
|
+
if (*allocated >= current.size - Page::kPageSize) {
|
242
|
+
// Don't leave a small free block, useless for a large object or chunk.
|
243
|
+
*allocated = current.size;
|
244
|
+
}
|
245
|
+
ASSERT(*allocated <= current.size);
|
246
|
+
if (!code_range_->Commit(current.start, *allocated, true)) {
|
247
|
+
*allocated = 0;
|
248
|
+
return NULL;
|
249
|
+
}
|
250
|
+
allocation_list_[current_allocation_block_index_].start += *allocated;
|
251
|
+
allocation_list_[current_allocation_block_index_].size -= *allocated;
|
252
|
+
if (*allocated == current.size) {
|
253
|
+
GetNextAllocationBlock(0); // This block is used up, get the next one.
|
254
|
+
}
|
255
|
+
return current.start;
|
256
|
+
}
|
257
|
+
|
258
|
+
|
259
|
+
void CodeRange::FreeRawMemory(void* address, size_t length) {
|
260
|
+
free_list_.Add(FreeBlock(address, length));
|
261
|
+
code_range_->Uncommit(address, length);
|
262
|
+
}
|
263
|
+
|
264
|
+
|
265
|
+
void CodeRange::TearDown() {
|
266
|
+
delete code_range_; // Frees all memory in the virtual memory range.
|
267
|
+
code_range_ = NULL;
|
268
|
+
free_list_.Free();
|
269
|
+
allocation_list_.Free();
|
270
|
+
}
|
271
|
+
|
272
|
+
|
273
|
+
// -----------------------------------------------------------------------------
|
274
|
+
// MemoryAllocator
|
275
|
+
//
|
276
|
+
|
277
|
+
// 270 is an estimate based on the static default heap size of a pair of 256K
|
278
|
+
// semispaces and a 64M old generation.
|
279
|
+
const int kEstimatedNumberOfChunks = 270;
|
280
|
+
|
281
|
+
|
282
|
+
MemoryAllocator::MemoryAllocator()
|
283
|
+
: capacity_(0),
|
284
|
+
capacity_executable_(0),
|
285
|
+
size_(0),
|
286
|
+
size_executable_(0),
|
287
|
+
initial_chunk_(NULL),
|
288
|
+
chunks_(kEstimatedNumberOfChunks),
|
289
|
+
free_chunk_ids_(kEstimatedNumberOfChunks),
|
290
|
+
max_nof_chunks_(0),
|
291
|
+
top_(0),
|
292
|
+
isolate_(NULL) {
|
293
|
+
}
|
294
|
+
|
295
|
+
|
296
|
+
void MemoryAllocator::Push(int free_chunk_id) {
|
297
|
+
ASSERT(max_nof_chunks_ > 0);
|
298
|
+
ASSERT(top_ < max_nof_chunks_);
|
299
|
+
free_chunk_ids_[top_++] = free_chunk_id;
|
300
|
+
}
|
301
|
+
|
302
|
+
|
303
|
+
int MemoryAllocator::Pop() {
|
304
|
+
ASSERT(top_ > 0);
|
305
|
+
return free_chunk_ids_[--top_];
|
306
|
+
}
|
307
|
+
|
308
|
+
|
309
|
+
bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
|
310
|
+
capacity_ = RoundUp(capacity, Page::kPageSize);
|
311
|
+
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
|
312
|
+
ASSERT_GE(capacity_, capacity_executable_);
|
313
|
+
|
314
|
+
// Over-estimate the size of chunks_ array. It assumes the expansion of old
|
315
|
+
// space is always in the unit of a chunk (kChunkSize) except the last
|
316
|
+
// expansion.
|
317
|
+
//
|
318
|
+
// Due to alignment, allocated space might be one page less than required
|
319
|
+
// number (kPagesPerChunk) of pages for old spaces.
|
320
|
+
//
|
321
|
+
// Reserve two chunk ids for semispaces, one for map space, one for old
|
322
|
+
// space, and one for code space.
|
323
|
+
max_nof_chunks_ =
|
324
|
+
static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
|
325
|
+
if (max_nof_chunks_ > kMaxNofChunks) return false;
|
326
|
+
|
327
|
+
size_ = 0;
|
328
|
+
size_executable_ = 0;
|
329
|
+
ChunkInfo info; // uninitialized element.
|
330
|
+
for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
|
331
|
+
chunks_.Add(info);
|
332
|
+
free_chunk_ids_.Add(i);
|
333
|
+
}
|
334
|
+
top_ = max_nof_chunks_;
|
335
|
+
return true;
|
336
|
+
}
|
337
|
+
|
338
|
+
|
339
|
+
void MemoryAllocator::TearDown() {
|
340
|
+
for (int i = 0; i < max_nof_chunks_; i++) {
|
341
|
+
if (chunks_[i].address() != NULL) DeleteChunk(i);
|
342
|
+
}
|
343
|
+
chunks_.Clear();
|
344
|
+
free_chunk_ids_.Clear();
|
345
|
+
|
346
|
+
if (initial_chunk_ != NULL) {
|
347
|
+
LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
|
348
|
+
delete initial_chunk_;
|
349
|
+
initial_chunk_ = NULL;
|
350
|
+
}
|
351
|
+
|
352
|
+
ASSERT(top_ == max_nof_chunks_); // all chunks are free
|
353
|
+
top_ = 0;
|
354
|
+
capacity_ = 0;
|
355
|
+
capacity_executable_ = 0;
|
356
|
+
size_ = 0;
|
357
|
+
max_nof_chunks_ = 0;
|
358
|
+
}
|
359
|
+
|
360
|
+
|
361
|
+
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
|
362
|
+
size_t* allocated,
|
363
|
+
Executability executable) {
|
364
|
+
if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
|
365
|
+
return NULL;
|
366
|
+
}
|
367
|
+
|
368
|
+
void* mem;
|
369
|
+
if (executable == EXECUTABLE) {
|
370
|
+
// Check executable memory limit.
|
371
|
+
if (size_executable_ + requested >
|
372
|
+
static_cast<size_t>(capacity_executable_)) {
|
373
|
+
LOG(isolate_,
|
374
|
+
StringEvent("MemoryAllocator::AllocateRawMemory",
|
375
|
+
"V8 Executable Allocation capacity exceeded"));
|
376
|
+
return NULL;
|
377
|
+
}
|
378
|
+
// Allocate executable memory either from code range or from the
|
379
|
+
// OS.
|
380
|
+
if (isolate_->code_range()->exists()) {
|
381
|
+
mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
|
382
|
+
} else {
|
383
|
+
mem = OS::Allocate(requested, allocated, true);
|
384
|
+
}
|
385
|
+
// Update executable memory size.
|
386
|
+
size_executable_ += static_cast<int>(*allocated);
|
387
|
+
} else {
|
388
|
+
mem = OS::Allocate(requested, allocated, false);
|
389
|
+
}
|
390
|
+
int alloced = static_cast<int>(*allocated);
|
391
|
+
size_ += alloced;
|
392
|
+
|
393
|
+
#ifdef DEBUG
|
394
|
+
ZapBlock(reinterpret_cast<Address>(mem), alloced);
|
395
|
+
#endif
|
396
|
+
isolate_->counters()->memory_allocated()->Increment(alloced);
|
397
|
+
return mem;
|
398
|
+
}
|
399
|
+
|
400
|
+
|
401
|
+
void MemoryAllocator::FreeRawMemory(void* mem,
|
402
|
+
size_t length,
|
403
|
+
Executability executable) {
|
404
|
+
#ifdef DEBUG
|
405
|
+
ZapBlock(reinterpret_cast<Address>(mem), length);
|
406
|
+
#endif
|
407
|
+
if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
|
408
|
+
isolate_->code_range()->FreeRawMemory(mem, length);
|
409
|
+
} else {
|
410
|
+
OS::Free(mem, length);
|
411
|
+
}
|
412
|
+
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
|
413
|
+
size_ -= static_cast<int>(length);
|
414
|
+
if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
|
415
|
+
|
416
|
+
ASSERT(size_ >= 0);
|
417
|
+
ASSERT(size_executable_ >= 0);
|
418
|
+
}
|
419
|
+
|
420
|
+
|
421
|
+
void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
|
422
|
+
AllocationAction action,
|
423
|
+
size_t size) {
|
424
|
+
for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
|
425
|
+
MemoryAllocationCallbackRegistration registration =
|
426
|
+
memory_allocation_callbacks_[i];
|
427
|
+
if ((registration.space & space) == space &&
|
428
|
+
(registration.action & action) == action)
|
429
|
+
registration.callback(space, action, static_cast<int>(size));
|
430
|
+
}
|
431
|
+
}
|
432
|
+
|
433
|
+
|
434
|
+
bool MemoryAllocator::MemoryAllocationCallbackRegistered(
|
435
|
+
MemoryAllocationCallback callback) {
|
436
|
+
for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
|
437
|
+
if (memory_allocation_callbacks_[i].callback == callback) return true;
|
438
|
+
}
|
439
|
+
return false;
|
440
|
+
}
|
441
|
+
|
442
|
+
|
443
|
+
void MemoryAllocator::AddMemoryAllocationCallback(
|
444
|
+
MemoryAllocationCallback callback,
|
445
|
+
ObjectSpace space,
|
446
|
+
AllocationAction action) {
|
447
|
+
ASSERT(callback != NULL);
|
448
|
+
MemoryAllocationCallbackRegistration registration(callback, space, action);
|
449
|
+
ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
|
450
|
+
return memory_allocation_callbacks_.Add(registration);
|
451
|
+
}
|
452
|
+
|
453
|
+
|
454
|
+
void MemoryAllocator::RemoveMemoryAllocationCallback(
|
455
|
+
MemoryAllocationCallback callback) {
|
456
|
+
ASSERT(callback != NULL);
|
457
|
+
for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
|
458
|
+
if (memory_allocation_callbacks_[i].callback == callback) {
|
459
|
+
memory_allocation_callbacks_.Remove(i);
|
460
|
+
return;
|
461
|
+
}
|
462
|
+
}
|
463
|
+
UNREACHABLE();
|
464
|
+
}
|
465
|
+
|
466
|
+
void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
|
467
|
+
ASSERT(initial_chunk_ == NULL);
|
468
|
+
|
469
|
+
initial_chunk_ = new VirtualMemory(requested);
|
470
|
+
CHECK(initial_chunk_ != NULL);
|
471
|
+
if (!initial_chunk_->IsReserved()) {
|
472
|
+
delete initial_chunk_;
|
473
|
+
initial_chunk_ = NULL;
|
474
|
+
return NULL;
|
475
|
+
}
|
476
|
+
|
477
|
+
// We are sure that we have mapped a block of requested addresses.
|
478
|
+
ASSERT(initial_chunk_->size() == requested);
|
479
|
+
LOG(isolate_,
|
480
|
+
NewEvent("InitialChunk", initial_chunk_->address(), requested));
|
481
|
+
size_ += static_cast<int>(requested);
|
482
|
+
return initial_chunk_->address();
|
483
|
+
}
|
484
|
+
|
485
|
+
|
486
|
+
static int PagesInChunk(Address start, size_t size) {
|
487
|
+
// The first page starts on the first page-aligned address from start onward
|
488
|
+
// and the last page ends on the last page-aligned address before
|
489
|
+
// start+size. Page::kPageSize is a power of two so we can divide by
|
490
|
+
// shifting.
|
491
|
+
return static_cast<int>((RoundDown(start + size, Page::kPageSize)
|
492
|
+
- RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
|
493
|
+
}
|
494
|
+
|
495
|
+
|
496
|
+
Page* MemoryAllocator::AllocatePages(int requested_pages,
|
497
|
+
int* allocated_pages,
|
498
|
+
PagedSpace* owner) {
|
499
|
+
if (requested_pages <= 0) return Page::FromAddress(NULL);
|
500
|
+
size_t chunk_size = requested_pages * Page::kPageSize;
|
501
|
+
|
502
|
+
void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
|
503
|
+
if (chunk == NULL) return Page::FromAddress(NULL);
|
504
|
+
LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
|
505
|
+
|
506
|
+
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
|
507
|
+
// We may 'lose' a page due to alignment.
|
508
|
+
ASSERT(*allocated_pages >= kPagesPerChunk - 1);
|
509
|
+
if (*allocated_pages == 0) {
|
510
|
+
FreeRawMemory(chunk, chunk_size, owner->executable());
|
511
|
+
LOG(isolate_, DeleteEvent("PagedChunk", chunk));
|
512
|
+
return Page::FromAddress(NULL);
|
513
|
+
}
|
514
|
+
|
515
|
+
int chunk_id = Pop();
|
516
|
+
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
|
517
|
+
|
518
|
+
ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
|
519
|
+
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
|
520
|
+
Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
|
521
|
+
|
522
|
+
return new_pages;
|
523
|
+
}
|
524
|
+
|
525
|
+
|
526
|
+
Page* MemoryAllocator::CommitPages(Address start, size_t size,
|
527
|
+
PagedSpace* owner, int* num_pages) {
|
528
|
+
ASSERT(start != NULL);
|
529
|
+
*num_pages = PagesInChunk(start, size);
|
530
|
+
ASSERT(*num_pages > 0);
|
531
|
+
ASSERT(initial_chunk_ != NULL);
|
532
|
+
ASSERT(InInitialChunk(start));
|
533
|
+
ASSERT(InInitialChunk(start + size - 1));
|
534
|
+
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
|
535
|
+
return Page::FromAddress(NULL);
|
536
|
+
}
|
537
|
+
#ifdef DEBUG
|
538
|
+
ZapBlock(start, size);
|
539
|
+
#endif
|
540
|
+
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
|
541
|
+
|
542
|
+
// So long as we correctly overestimated the number of chunks we should not
|
543
|
+
// run out of chunk ids.
|
544
|
+
CHECK(!OutOfChunkIds());
|
545
|
+
int chunk_id = Pop();
|
546
|
+
chunks_[chunk_id].init(start, size, owner);
|
547
|
+
return InitializePagesInChunk(chunk_id, *num_pages, owner);
|
548
|
+
}
|
549
|
+
|
550
|
+
|
551
|
+
bool MemoryAllocator::CommitBlock(Address start,
|
552
|
+
size_t size,
|
553
|
+
Executability executable) {
|
554
|
+
ASSERT(start != NULL);
|
555
|
+
ASSERT(size > 0);
|
556
|
+
ASSERT(initial_chunk_ != NULL);
|
557
|
+
ASSERT(InInitialChunk(start));
|
558
|
+
ASSERT(InInitialChunk(start + size - 1));
|
559
|
+
|
560
|
+
if (!initial_chunk_->Commit(start, size, executable)) return false;
|
561
|
+
#ifdef DEBUG
|
562
|
+
ZapBlock(start, size);
|
563
|
+
#endif
|
564
|
+
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
|
565
|
+
return true;
|
566
|
+
}
|
567
|
+
|
568
|
+
|
569
|
+
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
|
570
|
+
ASSERT(start != NULL);
|
571
|
+
ASSERT(size > 0);
|
572
|
+
ASSERT(initial_chunk_ != NULL);
|
573
|
+
ASSERT(InInitialChunk(start));
|
574
|
+
ASSERT(InInitialChunk(start + size - 1));
|
575
|
+
|
576
|
+
if (!initial_chunk_->Uncommit(start, size)) return false;
|
577
|
+
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
578
|
+
return true;
|
579
|
+
}
|
580
|
+
|
581
|
+
|
582
|
+
void MemoryAllocator::ZapBlock(Address start, size_t size) {
|
583
|
+
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
|
584
|
+
Memory::Address_at(start + s) = kZapValue;
|
585
|
+
}
|
586
|
+
}
|
587
|
+
|
588
|
+
|
589
|
+
Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
590
|
+
PagedSpace* owner) {
|
591
|
+
ASSERT(IsValidChunk(chunk_id));
|
592
|
+
ASSERT(pages_in_chunk > 0);
|
593
|
+
|
594
|
+
Address chunk_start = chunks_[chunk_id].address();
|
595
|
+
|
596
|
+
Address low = RoundUp(chunk_start, Page::kPageSize);
|
597
|
+
|
598
|
+
#ifdef DEBUG
|
599
|
+
size_t chunk_size = chunks_[chunk_id].size();
|
600
|
+
Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
|
601
|
+
ASSERT(pages_in_chunk <=
|
602
|
+
((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
|
603
|
+
#endif
|
604
|
+
|
605
|
+
Address page_addr = low;
|
606
|
+
for (int i = 0; i < pages_in_chunk; i++) {
|
607
|
+
Page* p = Page::FromAddress(page_addr);
|
608
|
+
p->heap_ = owner->heap();
|
609
|
+
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
|
610
|
+
p->InvalidateWatermark(true);
|
611
|
+
p->SetIsLargeObjectPage(false);
|
612
|
+
p->SetAllocationWatermark(p->ObjectAreaStart());
|
613
|
+
p->SetCachedAllocationWatermark(p->ObjectAreaStart());
|
614
|
+
page_addr += Page::kPageSize;
|
615
|
+
}
|
616
|
+
|
617
|
+
// Set the next page of the last page to 0.
|
618
|
+
Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
|
619
|
+
last_page->opaque_header = OffsetFrom(0) | chunk_id;
|
620
|
+
|
621
|
+
return Page::FromAddress(low);
|
622
|
+
}
|
623
|
+
|
624
|
+
|
625
|
+
Page* MemoryAllocator::FreePages(Page* p) {
|
626
|
+
if (!p->is_valid()) return p;
|
627
|
+
|
628
|
+
// Find the first page in the same chunk as 'p'
|
629
|
+
Page* first_page = FindFirstPageInSameChunk(p);
|
630
|
+
Page* page_to_return = Page::FromAddress(NULL);
|
631
|
+
|
632
|
+
if (p != first_page) {
|
633
|
+
// Find the last page in the same chunk as 'prev'.
|
634
|
+
Page* last_page = FindLastPageInSameChunk(p);
|
635
|
+
first_page = GetNextPage(last_page); // first page in next chunk
|
636
|
+
|
637
|
+
// set the next_page of last_page to NULL
|
638
|
+
SetNextPage(last_page, Page::FromAddress(NULL));
|
639
|
+
page_to_return = p; // return 'p' when exiting
|
640
|
+
}
|
641
|
+
|
642
|
+
while (first_page->is_valid()) {
|
643
|
+
int chunk_id = GetChunkId(first_page);
|
644
|
+
ASSERT(IsValidChunk(chunk_id));
|
645
|
+
|
646
|
+
// Find the first page of the next chunk before deleting this chunk.
|
647
|
+
first_page = GetNextPage(FindLastPageInSameChunk(first_page));
|
648
|
+
|
649
|
+
// Free the current chunk.
|
650
|
+
DeleteChunk(chunk_id);
|
651
|
+
}
|
652
|
+
|
653
|
+
return page_to_return;
|
654
|
+
}
|
655
|
+
|
656
|
+
|
657
|
+
void MemoryAllocator::FreeAllPages(PagedSpace* space) {
|
658
|
+
for (int i = 0, length = chunks_.length(); i < length; i++) {
|
659
|
+
if (chunks_[i].owner() == space) {
|
660
|
+
DeleteChunk(i);
|
661
|
+
}
|
662
|
+
}
|
663
|
+
}
|
664
|
+
|
665
|
+
|
666
|
+
void MemoryAllocator::DeleteChunk(int chunk_id) {
|
667
|
+
ASSERT(IsValidChunk(chunk_id));
|
668
|
+
|
669
|
+
ChunkInfo& c = chunks_[chunk_id];
|
670
|
+
|
671
|
+
// We cannot free a chunk contained in the initial chunk because it was not
|
672
|
+
// allocated with AllocateRawMemory. Instead we uncommit the virtual
|
673
|
+
// memory.
|
674
|
+
if (InInitialChunk(c.address())) {
|
675
|
+
// TODO(1240712): VirtualMemory::Uncommit has a return value which
|
676
|
+
// is ignored here.
|
677
|
+
initial_chunk_->Uncommit(c.address(), c.size());
|
678
|
+
Counters* counters = isolate_->counters();
|
679
|
+
counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
|
680
|
+
} else {
|
681
|
+
LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
|
682
|
+
ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
|
683
|
+
size_t size = c.size();
|
684
|
+
FreeRawMemory(c.address(), size, c.executable());
|
685
|
+
PerformAllocationCallback(space, kAllocationActionFree, size);
|
686
|
+
}
|
687
|
+
c.init(NULL, 0, NULL);
|
688
|
+
Push(chunk_id);
|
689
|
+
}
|
690
|
+
|
691
|
+
|
692
|
+
Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
|
693
|
+
int chunk_id = GetChunkId(p);
|
694
|
+
ASSERT(IsValidChunk(chunk_id));
|
695
|
+
|
696
|
+
Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
|
697
|
+
return Page::FromAddress(low);
|
698
|
+
}
|
699
|
+
|
700
|
+
|
701
|
+
Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
|
702
|
+
int chunk_id = GetChunkId(p);
|
703
|
+
ASSERT(IsValidChunk(chunk_id));
|
704
|
+
|
705
|
+
Address chunk_start = chunks_[chunk_id].address();
|
706
|
+
size_t chunk_size = chunks_[chunk_id].size();
|
707
|
+
|
708
|
+
Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
|
709
|
+
ASSERT(chunk_start <= p->address() && p->address() < high);
|
710
|
+
|
711
|
+
return Page::FromAddress(high - Page::kPageSize);
|
712
|
+
}
|
713
|
+
|
714
|
+
|
715
|
+
#ifdef DEBUG
|
716
|
+
void MemoryAllocator::ReportStatistics() {
|
717
|
+
float pct = static_cast<float>(capacity_ - size_) / capacity_;
|
718
|
+
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
719
|
+
", used: %" V8_PTR_PREFIX "d"
|
720
|
+
", available: %%%d\n\n",
|
721
|
+
capacity_, size_, static_cast<int>(pct*100));
|
722
|
+
}
|
723
|
+
#endif
|
724
|
+
|
725
|
+
|
726
|
+
void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
|
727
|
+
Page** first_page,
|
728
|
+
Page** last_page,
|
729
|
+
Page** last_page_in_use) {
|
730
|
+
Page* first = NULL;
|
731
|
+
Page* last = NULL;
|
732
|
+
|
733
|
+
for (int i = 0, length = chunks_.length(); i < length; i++) {
|
734
|
+
ChunkInfo& chunk = chunks_[i];
|
735
|
+
|
736
|
+
if (chunk.owner() == space) {
|
737
|
+
if (first == NULL) {
|
738
|
+
Address low = RoundUp(chunk.address(), Page::kPageSize);
|
739
|
+
first = Page::FromAddress(low);
|
740
|
+
}
|
741
|
+
last = RelinkPagesInChunk(i,
|
742
|
+
chunk.address(),
|
743
|
+
chunk.size(),
|
744
|
+
last,
|
745
|
+
last_page_in_use);
|
746
|
+
}
|
747
|
+
}
|
748
|
+
|
749
|
+
if (first_page != NULL) {
|
750
|
+
*first_page = first;
|
751
|
+
}
|
752
|
+
|
753
|
+
if (last_page != NULL) {
|
754
|
+
*last_page = last;
|
755
|
+
}
|
756
|
+
}
|
757
|
+
|
758
|
+
|
759
|
+
Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
|
760
|
+
Address chunk_start,
|
761
|
+
size_t chunk_size,
|
762
|
+
Page* prev,
|
763
|
+
Page** last_page_in_use) {
|
764
|
+
Address page_addr = RoundUp(chunk_start, Page::kPageSize);
|
765
|
+
int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
|
766
|
+
|
767
|
+
if (prev->is_valid()) {
|
768
|
+
SetNextPage(prev, Page::FromAddress(page_addr));
|
769
|
+
}
|
770
|
+
|
771
|
+
for (int i = 0; i < pages_in_chunk; i++) {
|
772
|
+
Page* p = Page::FromAddress(page_addr);
|
773
|
+
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
|
774
|
+
page_addr += Page::kPageSize;
|
775
|
+
|
776
|
+
p->InvalidateWatermark(true);
|
777
|
+
if (p->WasInUseBeforeMC()) {
|
778
|
+
*last_page_in_use = p;
|
779
|
+
}
|
780
|
+
}
|
781
|
+
|
782
|
+
// Set the next page of the last page to 0.
|
783
|
+
Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
|
784
|
+
last_page->opaque_header = OffsetFrom(0) | chunk_id;
|
785
|
+
|
786
|
+
if (last_page->WasInUseBeforeMC()) {
|
787
|
+
*last_page_in_use = last_page;
|
788
|
+
}
|
789
|
+
|
790
|
+
return last_page;
|
791
|
+
}
|
792
|
+
|
793
|
+
|
794
|
+
// -----------------------------------------------------------------------------
|
795
|
+
// PagedSpace implementation
|
796
|
+
|
797
|
+
PagedSpace::PagedSpace(Heap* heap,
|
798
|
+
intptr_t max_capacity,
|
799
|
+
AllocationSpace id,
|
800
|
+
Executability executable)
|
801
|
+
: Space(heap, id, executable) {
|
802
|
+
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
|
803
|
+
* Page::kObjectAreaSize;
|
804
|
+
accounting_stats_.Clear();
|
805
|
+
|
806
|
+
allocation_info_.top = NULL;
|
807
|
+
allocation_info_.limit = NULL;
|
808
|
+
|
809
|
+
mc_forwarding_info_.top = NULL;
|
810
|
+
mc_forwarding_info_.limit = NULL;
|
811
|
+
}
|
812
|
+
|
813
|
+
|
814
|
+
bool PagedSpace::Setup(Address start, size_t size) {
|
815
|
+
if (HasBeenSetup()) return false;
|
816
|
+
|
817
|
+
int num_pages = 0;
|
818
|
+
// Try to use the virtual memory range passed to us. If it is too small to
|
819
|
+
// contain at least one page, ignore it and allocate instead.
|
820
|
+
int pages_in_chunk = PagesInChunk(start, size);
|
821
|
+
if (pages_in_chunk > 0) {
|
822
|
+
first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
|
823
|
+
RoundUp(start, Page::kPageSize),
|
824
|
+
Page::kPageSize * pages_in_chunk,
|
825
|
+
this, &num_pages);
|
826
|
+
} else {
|
827
|
+
int requested_pages =
|
828
|
+
Min(MemoryAllocator::kPagesPerChunk,
|
829
|
+
static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
|
830
|
+
first_page_ =
|
831
|
+
Isolate::Current()->memory_allocator()->AllocatePages(
|
832
|
+
requested_pages, &num_pages, this);
|
833
|
+
if (!first_page_->is_valid()) return false;
|
834
|
+
}
|
835
|
+
|
836
|
+
// We are sure that the first page is valid and that we have at least one
|
837
|
+
// page.
|
838
|
+
ASSERT(first_page_->is_valid());
|
839
|
+
ASSERT(num_pages > 0);
|
840
|
+
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
|
841
|
+
ASSERT(Capacity() <= max_capacity_);
|
842
|
+
|
843
|
+
// Sequentially clear region marks in the newly allocated
|
844
|
+
// pages and cache the current last page in the space.
|
845
|
+
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
846
|
+
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
847
|
+
last_page_ = p;
|
848
|
+
}
|
849
|
+
|
850
|
+
// Use first_page_ for allocation.
|
851
|
+
SetAllocationInfo(&allocation_info_, first_page_);
|
852
|
+
|
853
|
+
page_list_is_chunk_ordered_ = true;
|
854
|
+
|
855
|
+
return true;
|
856
|
+
}
|
857
|
+
|
858
|
+
|
859
|
+
bool PagedSpace::HasBeenSetup() {
|
860
|
+
return (Capacity() > 0);
|
861
|
+
}
|
862
|
+
|
863
|
+
|
864
|
+
void PagedSpace::TearDown() {
|
865
|
+
Isolate::Current()->memory_allocator()->FreeAllPages(this);
|
866
|
+
first_page_ = NULL;
|
867
|
+
accounting_stats_.Clear();
|
868
|
+
}
|
869
|
+
|
870
|
+
|
871
|
+
#ifdef ENABLE_HEAP_PROTECTION
|
872
|
+
|
873
|
+
void PagedSpace::Protect() {
|
874
|
+
Page* page = first_page_;
|
875
|
+
while (page->is_valid()) {
|
876
|
+
Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
|
877
|
+
page = Isolate::Current()->memory_allocator()->
|
878
|
+
FindLastPageInSameChunk(page)->next_page();
|
879
|
+
}
|
880
|
+
}
|
881
|
+
|
882
|
+
|
883
|
+
void PagedSpace::Unprotect() {
|
884
|
+
Page* page = first_page_;
|
885
|
+
while (page->is_valid()) {
|
886
|
+
Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
|
887
|
+
page = Isolate::Current()->memory_allocator()->
|
888
|
+
FindLastPageInSameChunk(page)->next_page();
|
889
|
+
}
|
890
|
+
}
|
891
|
+
|
892
|
+
#endif
|
893
|
+
|
894
|
+
|
895
|
+
void PagedSpace::MarkAllPagesClean() {
|
896
|
+
PageIterator it(this, PageIterator::ALL_PAGES);
|
897
|
+
while (it.has_next()) {
|
898
|
+
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
899
|
+
}
|
900
|
+
}
|
901
|
+
|
902
|
+
|
903
|
+
MaybeObject* PagedSpace::FindObject(Address addr) {
|
904
|
+
// Note: this function can only be called before or after mark-compact GC
|
905
|
+
// because it accesses map pointers.
|
906
|
+
ASSERT(!heap()->mark_compact_collector()->in_use());
|
907
|
+
|
908
|
+
if (!Contains(addr)) return Failure::Exception();
|
909
|
+
|
910
|
+
Page* p = Page::FromAddress(addr);
|
911
|
+
ASSERT(IsUsed(p));
|
912
|
+
Address cur = p->ObjectAreaStart();
|
913
|
+
Address end = p->AllocationTop();
|
914
|
+
while (cur < end) {
|
915
|
+
HeapObject* obj = HeapObject::FromAddress(cur);
|
916
|
+
Address next = cur + obj->Size();
|
917
|
+
if ((cur <= addr) && (addr < next)) return obj;
|
918
|
+
cur = next;
|
919
|
+
}
|
920
|
+
|
921
|
+
UNREACHABLE();
|
922
|
+
return Failure::Exception();
|
923
|
+
}
|
924
|
+
|
925
|
+
|
926
|
+
bool PagedSpace::IsUsed(Page* page) {
|
927
|
+
PageIterator it(this, PageIterator::PAGES_IN_USE);
|
928
|
+
while (it.has_next()) {
|
929
|
+
if (page == it.next()) return true;
|
930
|
+
}
|
931
|
+
return false;
|
932
|
+
}
|
933
|
+
|
934
|
+
|
935
|
+
void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
|
936
|
+
alloc_info->top = p->ObjectAreaStart();
|
937
|
+
alloc_info->limit = p->ObjectAreaEnd();
|
938
|
+
ASSERT(alloc_info->VerifyPagedAllocation());
|
939
|
+
}
|
940
|
+
|
941
|
+
|
942
|
+
void PagedSpace::MCResetRelocationInfo() {
|
943
|
+
// Set page indexes.
|
944
|
+
int i = 0;
|
945
|
+
PageIterator it(this, PageIterator::ALL_PAGES);
|
946
|
+
while (it.has_next()) {
|
947
|
+
Page* p = it.next();
|
948
|
+
p->mc_page_index = i++;
|
949
|
+
}
|
950
|
+
|
951
|
+
// Set mc_forwarding_info_ to the first page in the space.
|
952
|
+
SetAllocationInfo(&mc_forwarding_info_, first_page_);
|
953
|
+
// All the bytes in the space are 'available'. We will rediscover
|
954
|
+
// allocated and wasted bytes during GC.
|
955
|
+
accounting_stats_.Reset();
|
956
|
+
}
|
957
|
+
|
958
|
+
|
959
|
+
int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
|
960
|
+
#ifdef DEBUG
|
961
|
+
// The Contains function considers the address at the beginning of a
|
962
|
+
// page in the page, MCSpaceOffsetForAddress considers it is in the
|
963
|
+
// previous page.
|
964
|
+
if (Page::IsAlignedToPageSize(addr)) {
|
965
|
+
ASSERT(Contains(addr - kPointerSize));
|
966
|
+
} else {
|
967
|
+
ASSERT(Contains(addr));
|
968
|
+
}
|
969
|
+
#endif
|
970
|
+
|
971
|
+
// If addr is at the end of a page, it belongs to previous page
|
972
|
+
Page* p = Page::IsAlignedToPageSize(addr)
|
973
|
+
? Page::FromAllocationTop(addr)
|
974
|
+
: Page::FromAddress(addr);
|
975
|
+
int index = p->mc_page_index;
|
976
|
+
return (index * Page::kPageSize) + p->Offset(addr);
|
977
|
+
}
|
978
|
+
|
979
|
+
|
980
|
+
// Slow case for reallocating and promoting objects during a compacting
|
981
|
+
// collection. This function is not space-specific.
|
982
|
+
HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
|
983
|
+
Page* current_page = TopPageOf(mc_forwarding_info_);
|
984
|
+
if (!current_page->next_page()->is_valid()) {
|
985
|
+
if (!Expand(current_page)) {
|
986
|
+
return NULL;
|
987
|
+
}
|
988
|
+
}
|
989
|
+
|
990
|
+
// There are surely more pages in the space now.
|
991
|
+
ASSERT(current_page->next_page()->is_valid());
|
992
|
+
// We do not add the top of page block for current page to the space's
|
993
|
+
// free list---the block may contain live objects so we cannot write
|
994
|
+
// bookkeeping information to it. Instead, we will recover top of page
|
995
|
+
// blocks when we move objects to their new locations.
|
996
|
+
//
|
997
|
+
// We do however write the allocation pointer to the page. The encoding
|
998
|
+
// of forwarding addresses is as an offset in terms of live bytes, so we
|
999
|
+
// need quick access to the allocation top of each page to decode
|
1000
|
+
// forwarding addresses.
|
1001
|
+
current_page->SetAllocationWatermark(mc_forwarding_info_.top);
|
1002
|
+
current_page->next_page()->InvalidateWatermark(true);
|
1003
|
+
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
|
1004
|
+
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
|
1005
|
+
}
|
1006
|
+
|
1007
|
+
|
1008
|
+
bool PagedSpace::Expand(Page* last_page) {
|
1009
|
+
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
|
1010
|
+
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
|
1011
|
+
|
1012
|
+
if (Capacity() == max_capacity_) return false;
|
1013
|
+
|
1014
|
+
ASSERT(Capacity() < max_capacity_);
|
1015
|
+
// Last page must be valid and its next page is invalid.
|
1016
|
+
ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
|
1017
|
+
|
1018
|
+
int available_pages =
|
1019
|
+
static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
|
1020
|
+
// We don't want to have to handle small chunks near the end so if there are
|
1021
|
+
// not kPagesPerChunk pages available without exceeding the max capacity then
|
1022
|
+
// act as if memory has run out.
|
1023
|
+
if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
|
1024
|
+
|
1025
|
+
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
|
1026
|
+
Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
|
1027
|
+
desired_pages, &desired_pages, this);
|
1028
|
+
if (!p->is_valid()) return false;
|
1029
|
+
|
1030
|
+
accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
|
1031
|
+
ASSERT(Capacity() <= max_capacity_);
|
1032
|
+
|
1033
|
+
heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
|
1034
|
+
|
1035
|
+
// Sequentially clear region marks of new pages and and cache the
|
1036
|
+
// new last page in the space.
|
1037
|
+
while (p->is_valid()) {
|
1038
|
+
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
1039
|
+
last_page_ = p;
|
1040
|
+
p = p->next_page();
|
1041
|
+
}
|
1042
|
+
|
1043
|
+
return true;
|
1044
|
+
}
|
1045
|
+
|
1046
|
+
|
1047
|
+
#ifdef DEBUG
|
1048
|
+
int PagedSpace::CountTotalPages() {
|
1049
|
+
int count = 0;
|
1050
|
+
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
1051
|
+
count++;
|
1052
|
+
}
|
1053
|
+
return count;
|
1054
|
+
}
|
1055
|
+
#endif
|
1056
|
+
|
1057
|
+
|
1058
|
+
void PagedSpace::Shrink() {
|
1059
|
+
if (!page_list_is_chunk_ordered_) {
|
1060
|
+
// We can't shrink space if pages is not chunk-ordered
|
1061
|
+
// (see comment for class MemoryAllocator for definition).
|
1062
|
+
return;
|
1063
|
+
}
|
1064
|
+
|
1065
|
+
// Release half of free pages.
|
1066
|
+
Page* top_page = AllocationTopPage();
|
1067
|
+
ASSERT(top_page->is_valid());
|
1068
|
+
|
1069
|
+
// Count the number of pages we would like to free.
|
1070
|
+
int pages_to_free = 0;
|
1071
|
+
for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
|
1072
|
+
pages_to_free++;
|
1073
|
+
}
|
1074
|
+
|
1075
|
+
// Free pages after top_page.
|
1076
|
+
Page* p = heap()->isolate()->memory_allocator()->
|
1077
|
+
FreePages(top_page->next_page());
|
1078
|
+
heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
|
1079
|
+
|
1080
|
+
// Find out how many pages we failed to free and update last_page_.
|
1081
|
+
// Please note pages can only be freed in whole chunks.
|
1082
|
+
last_page_ = top_page;
|
1083
|
+
for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
|
1084
|
+
pages_to_free--;
|
1085
|
+
last_page_ = p;
|
1086
|
+
}
|
1087
|
+
|
1088
|
+
accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
|
1089
|
+
ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
|
1090
|
+
}
|
1091
|
+
|
1092
|
+
|
1093
|
+
bool PagedSpace::EnsureCapacity(int capacity) {
|
1094
|
+
if (Capacity() >= capacity) return true;
|
1095
|
+
|
1096
|
+
// Start from the allocation top and loop to the last page in the space.
|
1097
|
+
Page* last_page = AllocationTopPage();
|
1098
|
+
Page* next_page = last_page->next_page();
|
1099
|
+
while (next_page->is_valid()) {
|
1100
|
+
last_page = heap()->isolate()->memory_allocator()->
|
1101
|
+
FindLastPageInSameChunk(next_page);
|
1102
|
+
next_page = last_page->next_page();
|
1103
|
+
}
|
1104
|
+
|
1105
|
+
// Expand the space until it has the required capacity or expansion fails.
|
1106
|
+
do {
|
1107
|
+
if (!Expand(last_page)) return false;
|
1108
|
+
ASSERT(last_page->next_page()->is_valid());
|
1109
|
+
last_page =
|
1110
|
+
heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
|
1111
|
+
last_page->next_page());
|
1112
|
+
} while (Capacity() < capacity);
|
1113
|
+
|
1114
|
+
return true;
|
1115
|
+
}
|
1116
|
+
|
1117
|
+
|
1118
|
+
#ifdef DEBUG
|
1119
|
+
void PagedSpace::Print() { }
|
1120
|
+
#endif
|
1121
|
+
|
1122
|
+
|
1123
|
+
#ifdef DEBUG
|
1124
|
+
// We do not assume that the PageIterator works, because it depends on the
|
1125
|
+
// invariants we are checking during verification.
|
1126
|
+
void PagedSpace::Verify(ObjectVisitor* visitor) {
|
1127
|
+
// The allocation pointer should be valid, and it should be in a page in the
|
1128
|
+
// space.
|
1129
|
+
ASSERT(allocation_info_.VerifyPagedAllocation());
|
1130
|
+
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
|
1131
|
+
ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
|
1132
|
+
|
1133
|
+
// Loop over all the pages.
|
1134
|
+
bool above_allocation_top = false;
|
1135
|
+
Page* current_page = first_page_;
|
1136
|
+
while (current_page->is_valid()) {
|
1137
|
+
if (above_allocation_top) {
|
1138
|
+
// We don't care what's above the allocation top.
|
1139
|
+
} else {
|
1140
|
+
Address top = current_page->AllocationTop();
|
1141
|
+
if (current_page == top_page) {
|
1142
|
+
ASSERT(top == allocation_info_.top);
|
1143
|
+
// The next page will be above the allocation top.
|
1144
|
+
above_allocation_top = true;
|
1145
|
+
}
|
1146
|
+
|
1147
|
+
// It should be packed with objects from the bottom to the top.
|
1148
|
+
Address current = current_page->ObjectAreaStart();
|
1149
|
+
while (current < top) {
|
1150
|
+
HeapObject* object = HeapObject::FromAddress(current);
|
1151
|
+
|
1152
|
+
// The first word should be a map, and we expect all map pointers to
|
1153
|
+
// be in map space.
|
1154
|
+
Map* map = object->map();
|
1155
|
+
ASSERT(map->IsMap());
|
1156
|
+
ASSERT(heap()->map_space()->Contains(map));
|
1157
|
+
|
1158
|
+
// Perform space-specific object verification.
|
1159
|
+
VerifyObject(object);
|
1160
|
+
|
1161
|
+
// The object itself should look OK.
|
1162
|
+
object->Verify();
|
1163
|
+
|
1164
|
+
// All the interior pointers should be contained in the heap and
|
1165
|
+
// have page regions covering intergenerational references should be
|
1166
|
+
// marked dirty.
|
1167
|
+
int size = object->Size();
|
1168
|
+
object->IterateBody(map->instance_type(), size, visitor);
|
1169
|
+
|
1170
|
+
current += size;
|
1171
|
+
}
|
1172
|
+
|
1173
|
+
// The allocation pointer should not be in the middle of an object.
|
1174
|
+
ASSERT(current == top);
|
1175
|
+
}
|
1176
|
+
|
1177
|
+
current_page = current_page->next_page();
|
1178
|
+
}
|
1179
|
+
}
|
1180
|
+
#endif
|
1181
|
+
|
1182
|
+
|
1183
|
+
// -----------------------------------------------------------------------------
|
1184
|
+
// NewSpace implementation
|
1185
|
+
|
1186
|
+
|
1187
|
+
bool NewSpace::Setup(Address start, int size) {
|
1188
|
+
// Setup new space based on the preallocated memory block defined by
|
1189
|
+
// start and size. The provided space is divided into two semi-spaces.
|
1190
|
+
// To support fast containment testing in the new space, the size of
|
1191
|
+
// this chunk must be a power of two and it must be aligned to its size.
|
1192
|
+
int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
|
1193
|
+
int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
|
1194
|
+
|
1195
|
+
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
|
1196
|
+
ASSERT(IsPowerOf2(maximum_semispace_capacity));
|
1197
|
+
|
1198
|
+
// Allocate and setup the histogram arrays if necessary.
|
1199
|
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1200
|
+
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
1201
|
+
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
|
1202
|
+
|
1203
|
+
#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
|
1204
|
+
promoted_histogram_[name].set_name(#name);
|
1205
|
+
INSTANCE_TYPE_LIST(SET_NAME)
|
1206
|
+
#undef SET_NAME
|
1207
|
+
#endif
|
1208
|
+
|
1209
|
+
ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
|
1210
|
+
ASSERT(IsAddressAligned(start, size, 0));
|
1211
|
+
|
1212
|
+
if (!to_space_.Setup(start,
|
1213
|
+
initial_semispace_capacity,
|
1214
|
+
maximum_semispace_capacity)) {
|
1215
|
+
return false;
|
1216
|
+
}
|
1217
|
+
if (!from_space_.Setup(start + maximum_semispace_capacity,
|
1218
|
+
initial_semispace_capacity,
|
1219
|
+
maximum_semispace_capacity)) {
|
1220
|
+
return false;
|
1221
|
+
}
|
1222
|
+
|
1223
|
+
start_ = start;
|
1224
|
+
address_mask_ = ~(size - 1);
|
1225
|
+
object_mask_ = address_mask_ | kHeapObjectTagMask;
|
1226
|
+
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
|
1227
|
+
|
1228
|
+
allocation_info_.top = to_space_.low();
|
1229
|
+
allocation_info_.limit = to_space_.high();
|
1230
|
+
mc_forwarding_info_.top = NULL;
|
1231
|
+
mc_forwarding_info_.limit = NULL;
|
1232
|
+
|
1233
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1234
|
+
return true;
|
1235
|
+
}
|
1236
|
+
|
1237
|
+
|
1238
|
+
void NewSpace::TearDown() {
|
1239
|
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1240
|
+
if (allocated_histogram_) {
|
1241
|
+
DeleteArray(allocated_histogram_);
|
1242
|
+
allocated_histogram_ = NULL;
|
1243
|
+
}
|
1244
|
+
if (promoted_histogram_) {
|
1245
|
+
DeleteArray(promoted_histogram_);
|
1246
|
+
promoted_histogram_ = NULL;
|
1247
|
+
}
|
1248
|
+
#endif
|
1249
|
+
|
1250
|
+
start_ = NULL;
|
1251
|
+
allocation_info_.top = NULL;
|
1252
|
+
allocation_info_.limit = NULL;
|
1253
|
+
mc_forwarding_info_.top = NULL;
|
1254
|
+
mc_forwarding_info_.limit = NULL;
|
1255
|
+
|
1256
|
+
to_space_.TearDown();
|
1257
|
+
from_space_.TearDown();
|
1258
|
+
}
|
1259
|
+
|
1260
|
+
|
1261
|
+
#ifdef ENABLE_HEAP_PROTECTION
|
1262
|
+
|
1263
|
+
void NewSpace::Protect() {
|
1264
|
+
heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
|
1265
|
+
heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
|
1266
|
+
}
|
1267
|
+
|
1268
|
+
|
1269
|
+
void NewSpace::Unprotect() {
|
1270
|
+
heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
|
1271
|
+
to_space_.executable());
|
1272
|
+
heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
|
1273
|
+
from_space_.executable());
|
1274
|
+
}
|
1275
|
+
|
1276
|
+
#endif
|
1277
|
+
|
1278
|
+
|
1279
|
+
void NewSpace::Flip() {
|
1280
|
+
SemiSpace tmp = from_space_;
|
1281
|
+
from_space_ = to_space_;
|
1282
|
+
to_space_ = tmp;
|
1283
|
+
}
|
1284
|
+
|
1285
|
+
|
1286
|
+
void NewSpace::Grow() {
|
1287
|
+
ASSERT(Capacity() < MaximumCapacity());
|
1288
|
+
if (to_space_.Grow()) {
|
1289
|
+
// Only grow from space if we managed to grow to space.
|
1290
|
+
if (!from_space_.Grow()) {
|
1291
|
+
// If we managed to grow to space but couldn't grow from space,
|
1292
|
+
// attempt to shrink to space.
|
1293
|
+
if (!to_space_.ShrinkTo(from_space_.Capacity())) {
|
1294
|
+
// We are in an inconsistent state because we could not
|
1295
|
+
// commit/uncommit memory from new space.
|
1296
|
+
V8::FatalProcessOutOfMemory("Failed to grow new space.");
|
1297
|
+
}
|
1298
|
+
}
|
1299
|
+
}
|
1300
|
+
allocation_info_.limit = to_space_.high();
|
1301
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1302
|
+
}
|
1303
|
+
|
1304
|
+
|
1305
|
+
void NewSpace::Shrink() {
|
1306
|
+
int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
|
1307
|
+
int rounded_new_capacity =
|
1308
|
+
RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
|
1309
|
+
if (rounded_new_capacity < Capacity() &&
|
1310
|
+
to_space_.ShrinkTo(rounded_new_capacity)) {
|
1311
|
+
// Only shrink from space if we managed to shrink to space.
|
1312
|
+
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
|
1313
|
+
// If we managed to shrink to space but couldn't shrink from
|
1314
|
+
// space, attempt to grow to space again.
|
1315
|
+
if (!to_space_.GrowTo(from_space_.Capacity())) {
|
1316
|
+
// We are in an inconsistent state because we could not
|
1317
|
+
// commit/uncommit memory from new space.
|
1318
|
+
V8::FatalProcessOutOfMemory("Failed to shrink new space.");
|
1319
|
+
}
|
1320
|
+
}
|
1321
|
+
}
|
1322
|
+
allocation_info_.limit = to_space_.high();
|
1323
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1324
|
+
}
|
1325
|
+
|
1326
|
+
|
1327
|
+
void NewSpace::ResetAllocationInfo() {
|
1328
|
+
allocation_info_.top = to_space_.low();
|
1329
|
+
allocation_info_.limit = to_space_.high();
|
1330
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1331
|
+
}
|
1332
|
+
|
1333
|
+
|
1334
|
+
void NewSpace::MCResetRelocationInfo() {
|
1335
|
+
mc_forwarding_info_.top = from_space_.low();
|
1336
|
+
mc_forwarding_info_.limit = from_space_.high();
|
1337
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
|
1338
|
+
}
|
1339
|
+
|
1340
|
+
|
1341
|
+
void NewSpace::MCCommitRelocationInfo() {
|
1342
|
+
// Assumes that the spaces have been flipped so that mc_forwarding_info_ is
|
1343
|
+
// valid allocation info for the to space.
|
1344
|
+
allocation_info_.top = mc_forwarding_info_.top;
|
1345
|
+
allocation_info_.limit = to_space_.high();
|
1346
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1347
|
+
}
|
1348
|
+
|
1349
|
+
|
1350
|
+
#ifdef DEBUG
|
1351
|
+
// We do not use the SemispaceIterator because verification doesn't assume
|
1352
|
+
// that it works (it depends on the invariants we are checking).
|
1353
|
+
void NewSpace::Verify() {
|
1354
|
+
// The allocation pointer should be in the space or at the very end.
|
1355
|
+
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
1356
|
+
|
1357
|
+
// There should be objects packed in from the low address up to the
|
1358
|
+
// allocation pointer.
|
1359
|
+
Address current = to_space_.low();
|
1360
|
+
while (current < top()) {
|
1361
|
+
HeapObject* object = HeapObject::FromAddress(current);
|
1362
|
+
|
1363
|
+
// The first word should be a map, and we expect all map pointers to
|
1364
|
+
// be in map space.
|
1365
|
+
Map* map = object->map();
|
1366
|
+
ASSERT(map->IsMap());
|
1367
|
+
ASSERT(heap()->map_space()->Contains(map));
|
1368
|
+
|
1369
|
+
// The object should not be code or a map.
|
1370
|
+
ASSERT(!object->IsMap());
|
1371
|
+
ASSERT(!object->IsCode());
|
1372
|
+
|
1373
|
+
// The object itself should look OK.
|
1374
|
+
object->Verify();
|
1375
|
+
|
1376
|
+
// All the interior pointers should be contained in the heap.
|
1377
|
+
VerifyPointersVisitor visitor;
|
1378
|
+
int size = object->Size();
|
1379
|
+
object->IterateBody(map->instance_type(), size, &visitor);
|
1380
|
+
|
1381
|
+
current += size;
|
1382
|
+
}
|
1383
|
+
|
1384
|
+
// The allocation pointer should not be in the middle of an object.
|
1385
|
+
ASSERT(current == top());
|
1386
|
+
}
|
1387
|
+
#endif
|
1388
|
+
|
1389
|
+
|
1390
|
+
bool SemiSpace::Commit() {
|
1391
|
+
ASSERT(!is_committed());
|
1392
|
+
if (!heap()->isolate()->memory_allocator()->CommitBlock(
|
1393
|
+
start_, capacity_, executable())) {
|
1394
|
+
return false;
|
1395
|
+
}
|
1396
|
+
committed_ = true;
|
1397
|
+
return true;
|
1398
|
+
}
|
1399
|
+
|
1400
|
+
|
1401
|
+
bool SemiSpace::Uncommit() {
|
1402
|
+
ASSERT(is_committed());
|
1403
|
+
if (!heap()->isolate()->memory_allocator()->UncommitBlock(
|
1404
|
+
start_, capacity_)) {
|
1405
|
+
return false;
|
1406
|
+
}
|
1407
|
+
committed_ = false;
|
1408
|
+
return true;
|
1409
|
+
}
|
1410
|
+
|
1411
|
+
|
1412
|
+
// -----------------------------------------------------------------------------
|
1413
|
+
// SemiSpace implementation
|
1414
|
+
|
1415
|
+
bool SemiSpace::Setup(Address start,
|
1416
|
+
int initial_capacity,
|
1417
|
+
int maximum_capacity) {
|
1418
|
+
// Creates a space in the young generation. The constructor does not
|
1419
|
+
// allocate memory from the OS. A SemiSpace is given a contiguous chunk of
|
1420
|
+
// memory of size 'capacity' when set up, and does not grow or shrink
|
1421
|
+
// otherwise. In the mark-compact collector, the memory region of the from
|
1422
|
+
// space is used as the marking stack. It requires contiguous memory
|
1423
|
+
// addresses.
|
1424
|
+
initial_capacity_ = initial_capacity;
|
1425
|
+
capacity_ = initial_capacity;
|
1426
|
+
maximum_capacity_ = maximum_capacity;
|
1427
|
+
committed_ = false;
|
1428
|
+
|
1429
|
+
start_ = start;
|
1430
|
+
address_mask_ = ~(maximum_capacity - 1);
|
1431
|
+
object_mask_ = address_mask_ | kHeapObjectTagMask;
|
1432
|
+
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
|
1433
|
+
age_mark_ = start_;
|
1434
|
+
|
1435
|
+
return Commit();
|
1436
|
+
}
|
1437
|
+
|
1438
|
+
|
1439
|
+
void SemiSpace::TearDown() {
|
1440
|
+
start_ = NULL;
|
1441
|
+
capacity_ = 0;
|
1442
|
+
}
|
1443
|
+
|
1444
|
+
|
1445
|
+
bool SemiSpace::Grow() {
|
1446
|
+
// Double the semispace size but only up to maximum capacity.
|
1447
|
+
int maximum_extra = maximum_capacity_ - capacity_;
|
1448
|
+
int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
|
1449
|
+
maximum_extra);
|
1450
|
+
if (!heap()->isolate()->memory_allocator()->CommitBlock(
|
1451
|
+
high(), extra, executable())) {
|
1452
|
+
return false;
|
1453
|
+
}
|
1454
|
+
capacity_ += extra;
|
1455
|
+
return true;
|
1456
|
+
}
|
1457
|
+
|
1458
|
+
|
1459
|
+
bool SemiSpace::GrowTo(int new_capacity) {
|
1460
|
+
ASSERT(new_capacity <= maximum_capacity_);
|
1461
|
+
ASSERT(new_capacity > capacity_);
|
1462
|
+
size_t delta = new_capacity - capacity_;
|
1463
|
+
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
|
1464
|
+
if (!heap()->isolate()->memory_allocator()->CommitBlock(
|
1465
|
+
high(), delta, executable())) {
|
1466
|
+
return false;
|
1467
|
+
}
|
1468
|
+
capacity_ = new_capacity;
|
1469
|
+
return true;
|
1470
|
+
}
|
1471
|
+
|
1472
|
+
|
1473
|
+
bool SemiSpace::ShrinkTo(int new_capacity) {
|
1474
|
+
ASSERT(new_capacity >= initial_capacity_);
|
1475
|
+
ASSERT(new_capacity < capacity_);
|
1476
|
+
size_t delta = capacity_ - new_capacity;
|
1477
|
+
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
|
1478
|
+
if (!heap()->isolate()->memory_allocator()->UncommitBlock(
|
1479
|
+
high() - delta, delta)) {
|
1480
|
+
return false;
|
1481
|
+
}
|
1482
|
+
capacity_ = new_capacity;
|
1483
|
+
return true;
|
1484
|
+
}
|
1485
|
+
|
1486
|
+
|
1487
|
+
#ifdef DEBUG
|
1488
|
+
void SemiSpace::Print() { }
|
1489
|
+
|
1490
|
+
|
1491
|
+
void SemiSpace::Verify() { }
|
1492
|
+
#endif
|
1493
|
+
|
1494
|
+
|
1495
|
+
// -----------------------------------------------------------------------------
|
1496
|
+
// SemiSpaceIterator implementation.
|
1497
|
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
|
1498
|
+
Initialize(space, space->bottom(), space->top(), NULL);
|
1499
|
+
}
|
1500
|
+
|
1501
|
+
|
1502
|
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
|
1503
|
+
HeapObjectCallback size_func) {
|
1504
|
+
Initialize(space, space->bottom(), space->top(), size_func);
|
1505
|
+
}
|
1506
|
+
|
1507
|
+
|
1508
|
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
|
1509
|
+
Initialize(space, start, space->top(), NULL);
|
1510
|
+
}
|
1511
|
+
|
1512
|
+
|
1513
|
+
void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
|
1514
|
+
Address end,
|
1515
|
+
HeapObjectCallback size_func) {
|
1516
|
+
ASSERT(space->ToSpaceContains(start));
|
1517
|
+
ASSERT(space->ToSpaceLow() <= end
|
1518
|
+
&& end <= space->ToSpaceHigh());
|
1519
|
+
space_ = &space->to_space_;
|
1520
|
+
current_ = start;
|
1521
|
+
limit_ = end;
|
1522
|
+
size_func_ = size_func;
|
1523
|
+
}
|
1524
|
+
|
1525
|
+
|
1526
|
+
#ifdef DEBUG
|
1527
|
+
// heap_histograms is shared, always clear it before using it.
|
1528
|
+
static void ClearHistograms() {
|
1529
|
+
Isolate* isolate = Isolate::Current();
|
1530
|
+
// We reset the name each time, though it hasn't changed.
|
1531
|
+
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
|
1532
|
+
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
|
1533
|
+
#undef DEF_TYPE_NAME
|
1534
|
+
|
1535
|
+
#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
|
1536
|
+
INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
|
1537
|
+
#undef CLEAR_HISTOGRAM
|
1538
|
+
|
1539
|
+
isolate->js_spill_information()->Clear();
|
1540
|
+
}
|
1541
|
+
|
1542
|
+
|
1543
|
+
static void ClearCodeKindStatistics() {
|
1544
|
+
Isolate* isolate = Isolate::Current();
|
1545
|
+
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
|
1546
|
+
isolate->code_kind_statistics()[i] = 0;
|
1547
|
+
}
|
1548
|
+
}
|
1549
|
+
|
1550
|
+
|
1551
|
+
static void ReportCodeKindStatistics() {
|
1552
|
+
Isolate* isolate = Isolate::Current();
|
1553
|
+
const char* table[Code::NUMBER_OF_KINDS] = { NULL };
|
1554
|
+
|
1555
|
+
#define CASE(name) \
|
1556
|
+
case Code::name: table[Code::name] = #name; \
|
1557
|
+
break
|
1558
|
+
|
1559
|
+
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
|
1560
|
+
switch (static_cast<Code::Kind>(i)) {
|
1561
|
+
CASE(FUNCTION);
|
1562
|
+
CASE(OPTIMIZED_FUNCTION);
|
1563
|
+
CASE(STUB);
|
1564
|
+
CASE(BUILTIN);
|
1565
|
+
CASE(LOAD_IC);
|
1566
|
+
CASE(KEYED_LOAD_IC);
|
1567
|
+
CASE(STORE_IC);
|
1568
|
+
CASE(KEYED_STORE_IC);
|
1569
|
+
CASE(CALL_IC);
|
1570
|
+
CASE(KEYED_CALL_IC);
|
1571
|
+
CASE(UNARY_OP_IC);
|
1572
|
+
CASE(BINARY_OP_IC);
|
1573
|
+
CASE(COMPARE_IC);
|
1574
|
+
}
|
1575
|
+
}
|
1576
|
+
|
1577
|
+
#undef CASE
|
1578
|
+
|
1579
|
+
PrintF("\n Code kind histograms: \n");
|
1580
|
+
for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
|
1581
|
+
if (isolate->code_kind_statistics()[i] > 0) {
|
1582
|
+
PrintF(" %-20s: %10d bytes\n", table[i],
|
1583
|
+
isolate->code_kind_statistics()[i]);
|
1584
|
+
}
|
1585
|
+
}
|
1586
|
+
PrintF("\n");
|
1587
|
+
}
|
1588
|
+
|
1589
|
+
|
1590
|
+
static int CollectHistogramInfo(HeapObject* obj) {
|
1591
|
+
Isolate* isolate = Isolate::Current();
|
1592
|
+
InstanceType type = obj->map()->instance_type();
|
1593
|
+
ASSERT(0 <= type && type <= LAST_TYPE);
|
1594
|
+
ASSERT(isolate->heap_histograms()[type].name() != NULL);
|
1595
|
+
isolate->heap_histograms()[type].increment_number(1);
|
1596
|
+
isolate->heap_histograms()[type].increment_bytes(obj->Size());
|
1597
|
+
|
1598
|
+
if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
|
1599
|
+
JSObject::cast(obj)->IncrementSpillStatistics(
|
1600
|
+
isolate->js_spill_information());
|
1601
|
+
}
|
1602
|
+
|
1603
|
+
return obj->Size();
|
1604
|
+
}
|
1605
|
+
|
1606
|
+
|
1607
|
+
static void ReportHistogram(bool print_spill) {
|
1608
|
+
Isolate* isolate = Isolate::Current();
|
1609
|
+
PrintF("\n Object Histogram:\n");
|
1610
|
+
for (int i = 0; i <= LAST_TYPE; i++) {
|
1611
|
+
if (isolate->heap_histograms()[i].number() > 0) {
|
1612
|
+
PrintF(" %-34s%10d (%10d bytes)\n",
|
1613
|
+
isolate->heap_histograms()[i].name(),
|
1614
|
+
isolate->heap_histograms()[i].number(),
|
1615
|
+
isolate->heap_histograms()[i].bytes());
|
1616
|
+
}
|
1617
|
+
}
|
1618
|
+
PrintF("\n");
|
1619
|
+
|
1620
|
+
// Summarize string types.
|
1621
|
+
int string_number = 0;
|
1622
|
+
int string_bytes = 0;
|
1623
|
+
#define INCREMENT(type, size, name, camel_name) \
|
1624
|
+
string_number += isolate->heap_histograms()[type].number(); \
|
1625
|
+
string_bytes += isolate->heap_histograms()[type].bytes();
|
1626
|
+
STRING_TYPE_LIST(INCREMENT)
|
1627
|
+
#undef INCREMENT
|
1628
|
+
if (string_number > 0) {
|
1629
|
+
PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
|
1630
|
+
string_bytes);
|
1631
|
+
}
|
1632
|
+
|
1633
|
+
if (FLAG_collect_heap_spill_statistics && print_spill) {
|
1634
|
+
isolate->js_spill_information()->Print();
|
1635
|
+
}
|
1636
|
+
}
|
1637
|
+
#endif // DEBUG
|
1638
|
+
|
1639
|
+
|
1640
|
+
// Support for statistics gathering for --heap-stats and --log-gc.
|
1641
|
+
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1642
|
+
void NewSpace::ClearHistograms() {
|
1643
|
+
for (int i = 0; i <= LAST_TYPE; i++) {
|
1644
|
+
allocated_histogram_[i].clear();
|
1645
|
+
promoted_histogram_[i].clear();
|
1646
|
+
}
|
1647
|
+
}
|
1648
|
+
|
1649
|
+
// Because the copying collector does not touch garbage objects, we iterate
|
1650
|
+
// the new space before a collection to get a histogram of allocated objects.
|
1651
|
+
// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
|
1652
|
+
// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
|
1653
|
+
// flag is set.
|
1654
|
+
void NewSpace::CollectStatistics() {
|
1655
|
+
ClearHistograms();
|
1656
|
+
SemiSpaceIterator it(this);
|
1657
|
+
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
|
1658
|
+
RecordAllocation(obj);
|
1659
|
+
}
|
1660
|
+
|
1661
|
+
|
1662
|
+
#ifdef ENABLE_LOGGING_AND_PROFILING
|
1663
|
+
static void DoReportStatistics(Isolate* isolate,
|
1664
|
+
HistogramInfo* info, const char* description) {
|
1665
|
+
LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
|
1666
|
+
// Lump all the string types together.
|
1667
|
+
int string_number = 0;
|
1668
|
+
int string_bytes = 0;
|
1669
|
+
#define INCREMENT(type, size, name, camel_name) \
|
1670
|
+
string_number += info[type].number(); \
|
1671
|
+
string_bytes += info[type].bytes();
|
1672
|
+
STRING_TYPE_LIST(INCREMENT)
|
1673
|
+
#undef INCREMENT
|
1674
|
+
if (string_number > 0) {
|
1675
|
+
LOG(isolate,
|
1676
|
+
HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
|
1677
|
+
}
|
1678
|
+
|
1679
|
+
// Then do the other types.
|
1680
|
+
for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
|
1681
|
+
if (info[i].number() > 0) {
|
1682
|
+
LOG(isolate,
|
1683
|
+
HeapSampleItemEvent(info[i].name(), info[i].number(),
|
1684
|
+
info[i].bytes()));
|
1685
|
+
}
|
1686
|
+
}
|
1687
|
+
LOG(isolate, HeapSampleEndEvent("NewSpace", description));
|
1688
|
+
}
|
1689
|
+
#endif // ENABLE_LOGGING_AND_PROFILING
|
1690
|
+
|
1691
|
+
|
1692
|
+
void NewSpace::ReportStatistics() {
|
1693
|
+
#ifdef DEBUG
|
1694
|
+
if (FLAG_heap_stats) {
|
1695
|
+
float pct = static_cast<float>(Available()) / Capacity();
|
1696
|
+
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
1697
|
+
", available: %" V8_PTR_PREFIX "d, %%%d\n",
|
1698
|
+
Capacity(), Available(), static_cast<int>(pct*100));
|
1699
|
+
PrintF("\n Object Histogram:\n");
|
1700
|
+
for (int i = 0; i <= LAST_TYPE; i++) {
|
1701
|
+
if (allocated_histogram_[i].number() > 0) {
|
1702
|
+
PrintF(" %-34s%10d (%10d bytes)\n",
|
1703
|
+
allocated_histogram_[i].name(),
|
1704
|
+
allocated_histogram_[i].number(),
|
1705
|
+
allocated_histogram_[i].bytes());
|
1706
|
+
}
|
1707
|
+
}
|
1708
|
+
PrintF("\n");
|
1709
|
+
}
|
1710
|
+
#endif // DEBUG
|
1711
|
+
|
1712
|
+
#ifdef ENABLE_LOGGING_AND_PROFILING
|
1713
|
+
if (FLAG_log_gc) {
|
1714
|
+
Isolate* isolate = ISOLATE;
|
1715
|
+
DoReportStatistics(isolate, allocated_histogram_, "allocated");
|
1716
|
+
DoReportStatistics(isolate, promoted_histogram_, "promoted");
|
1717
|
+
}
|
1718
|
+
#endif // ENABLE_LOGGING_AND_PROFILING
|
1719
|
+
}
|
1720
|
+
|
1721
|
+
|
1722
|
+
void NewSpace::RecordAllocation(HeapObject* obj) {
|
1723
|
+
InstanceType type = obj->map()->instance_type();
|
1724
|
+
ASSERT(0 <= type && type <= LAST_TYPE);
|
1725
|
+
allocated_histogram_[type].increment_number(1);
|
1726
|
+
allocated_histogram_[type].increment_bytes(obj->Size());
|
1727
|
+
}
|
1728
|
+
|
1729
|
+
|
1730
|
+
void NewSpace::RecordPromotion(HeapObject* obj) {
|
1731
|
+
InstanceType type = obj->map()->instance_type();
|
1732
|
+
ASSERT(0 <= type && type <= LAST_TYPE);
|
1733
|
+
promoted_histogram_[type].increment_number(1);
|
1734
|
+
promoted_histogram_[type].increment_bytes(obj->Size());
|
1735
|
+
}
|
1736
|
+
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
1737
|
+
|
1738
|
+
|
1739
|
+
// -----------------------------------------------------------------------------
|
1740
|
+
// Free lists for old object spaces implementation
|
1741
|
+
|
1742
|
+
void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
|
1743
|
+
ASSERT(size_in_bytes > 0);
|
1744
|
+
ASSERT(IsAligned(size_in_bytes, kPointerSize));
|
1745
|
+
|
1746
|
+
// We write a map and possibly size information to the block. If the block
|
1747
|
+
// is big enough to be a ByteArray with at least one extra word (the next
|
1748
|
+
// pointer), we set its map to be the byte array map and its size to an
|
1749
|
+
// appropriate array length for the desired size from HeapObject::Size().
|
1750
|
+
// If the block is too small (eg, one or two words), to hold both a size
|
1751
|
+
// field and a next pointer, we give it a filler map that gives it the
|
1752
|
+
// correct size.
|
1753
|
+
if (size_in_bytes > ByteArray::kHeaderSize) {
|
1754
|
+
set_map(heap->raw_unchecked_byte_array_map());
|
1755
|
+
// Can't use ByteArray::cast because it fails during deserialization.
|
1756
|
+
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
|
1757
|
+
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
|
1758
|
+
} else if (size_in_bytes == kPointerSize) {
|
1759
|
+
set_map(heap->raw_unchecked_one_pointer_filler_map());
|
1760
|
+
} else if (size_in_bytes == 2 * kPointerSize) {
|
1761
|
+
set_map(heap->raw_unchecked_two_pointer_filler_map());
|
1762
|
+
} else {
|
1763
|
+
UNREACHABLE();
|
1764
|
+
}
|
1765
|
+
// We would like to ASSERT(Size() == size_in_bytes) but this would fail during
|
1766
|
+
// deserialization because the byte array map is not done yet.
|
1767
|
+
}
|
1768
|
+
|
1769
|
+
|
1770
|
+
Address FreeListNode::next(Heap* heap) {
|
1771
|
+
ASSERT(IsFreeListNode(this));
|
1772
|
+
if (map() == heap->raw_unchecked_byte_array_map()) {
|
1773
|
+
ASSERT(Size() >= kNextOffset + kPointerSize);
|
1774
|
+
return Memory::Address_at(address() + kNextOffset);
|
1775
|
+
} else {
|
1776
|
+
return Memory::Address_at(address() + kPointerSize);
|
1777
|
+
}
|
1778
|
+
}
|
1779
|
+
|
1780
|
+
|
1781
|
+
void FreeListNode::set_next(Heap* heap, Address next) {
|
1782
|
+
ASSERT(IsFreeListNode(this));
|
1783
|
+
if (map() == heap->raw_unchecked_byte_array_map()) {
|
1784
|
+
ASSERT(Size() >= kNextOffset + kPointerSize);
|
1785
|
+
Memory::Address_at(address() + kNextOffset) = next;
|
1786
|
+
} else {
|
1787
|
+
Memory::Address_at(address() + kPointerSize) = next;
|
1788
|
+
}
|
1789
|
+
}
|
1790
|
+
|
1791
|
+
|
1792
|
+
OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
|
1793
|
+
: heap_(heap),
|
1794
|
+
owner_(owner) {
|
1795
|
+
Reset();
|
1796
|
+
}
|
1797
|
+
|
1798
|
+
|
1799
|
+
void OldSpaceFreeList::Reset() {
|
1800
|
+
available_ = 0;
|
1801
|
+
for (int i = 0; i < kFreeListsLength; i++) {
|
1802
|
+
free_[i].head_node_ = NULL;
|
1803
|
+
}
|
1804
|
+
needs_rebuild_ = false;
|
1805
|
+
finger_ = kHead;
|
1806
|
+
free_[kHead].next_size_ = kEnd;
|
1807
|
+
}
|
1808
|
+
|
1809
|
+
|
1810
|
+
void OldSpaceFreeList::RebuildSizeList() {
|
1811
|
+
ASSERT(needs_rebuild_);
|
1812
|
+
int cur = kHead;
|
1813
|
+
for (int i = cur + 1; i < kFreeListsLength; i++) {
|
1814
|
+
if (free_[i].head_node_ != NULL) {
|
1815
|
+
free_[cur].next_size_ = i;
|
1816
|
+
cur = i;
|
1817
|
+
}
|
1818
|
+
}
|
1819
|
+
free_[cur].next_size_ = kEnd;
|
1820
|
+
needs_rebuild_ = false;
|
1821
|
+
}
|
1822
|
+
|
1823
|
+
|
1824
|
+
int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
|
1825
|
+
#ifdef DEBUG
|
1826
|
+
Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
|
1827
|
+
#endif
|
1828
|
+
FreeListNode* node = FreeListNode::FromAddress(start);
|
1829
|
+
node->set_size(heap_, size_in_bytes);
|
1830
|
+
|
1831
|
+
// We don't use the freelists in compacting mode. This makes it more like a
|
1832
|
+
// GC that only has mark-sweep-compact and doesn't have a mark-sweep
|
1833
|
+
// collector.
|
1834
|
+
if (FLAG_always_compact) {
|
1835
|
+
return size_in_bytes;
|
1836
|
+
}
|
1837
|
+
|
1838
|
+
// Early return to drop too-small blocks on the floor (one or two word
|
1839
|
+
// blocks cannot hold a map pointer, a size field, and a pointer to the
|
1840
|
+
// next block in the free list).
|
1841
|
+
if (size_in_bytes < kMinBlockSize) {
|
1842
|
+
return size_in_bytes;
|
1843
|
+
}
|
1844
|
+
|
1845
|
+
// Insert other blocks at the head of an exact free list.
|
1846
|
+
int index = size_in_bytes >> kPointerSizeLog2;
|
1847
|
+
node->set_next(heap_, free_[index].head_node_);
|
1848
|
+
free_[index].head_node_ = node->address();
|
1849
|
+
available_ += size_in_bytes;
|
1850
|
+
needs_rebuild_ = true;
|
1851
|
+
return 0;
|
1852
|
+
}
|
1853
|
+
|
1854
|
+
|
1855
|
+
MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
|
1856
|
+
ASSERT(0 < size_in_bytes);
|
1857
|
+
ASSERT(size_in_bytes <= kMaxBlockSize);
|
1858
|
+
ASSERT(IsAligned(size_in_bytes, kPointerSize));
|
1859
|
+
|
1860
|
+
if (needs_rebuild_) RebuildSizeList();
|
1861
|
+
int index = size_in_bytes >> kPointerSizeLog2;
|
1862
|
+
// Check for a perfect fit.
|
1863
|
+
if (free_[index].head_node_ != NULL) {
|
1864
|
+
FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
|
1865
|
+
// If this was the last block of its size, remove the size.
|
1866
|
+
if ((free_[index].head_node_ = node->next(heap_)) == NULL)
|
1867
|
+
RemoveSize(index);
|
1868
|
+
available_ -= size_in_bytes;
|
1869
|
+
*wasted_bytes = 0;
|
1870
|
+
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
|
1871
|
+
return node;
|
1872
|
+
}
|
1873
|
+
// Search the size list for the best fit.
|
1874
|
+
int prev = finger_ < index ? finger_ : kHead;
|
1875
|
+
int cur = FindSize(index, &prev);
|
1876
|
+
ASSERT(index < cur);
|
1877
|
+
if (cur == kEnd) {
|
1878
|
+
// No large enough size in list.
|
1879
|
+
*wasted_bytes = 0;
|
1880
|
+
return Failure::RetryAfterGC(owner_);
|
1881
|
+
}
|
1882
|
+
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
|
1883
|
+
int rem = cur - index;
|
1884
|
+
int rem_bytes = rem << kPointerSizeLog2;
|
1885
|
+
FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
|
1886
|
+
ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
|
1887
|
+
FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
|
1888
|
+
size_in_bytes);
|
1889
|
+
// Distinguish the cases prev < rem < cur and rem <= prev < cur
|
1890
|
+
// to avoid many redundant tests and calls to Insert/RemoveSize.
|
1891
|
+
if (prev < rem) {
|
1892
|
+
// Simple case: insert rem between prev and cur.
|
1893
|
+
finger_ = prev;
|
1894
|
+
free_[prev].next_size_ = rem;
|
1895
|
+
// If this was the last block of size cur, remove the size.
|
1896
|
+
if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
|
1897
|
+
free_[rem].next_size_ = free_[cur].next_size_;
|
1898
|
+
} else {
|
1899
|
+
free_[rem].next_size_ = cur;
|
1900
|
+
}
|
1901
|
+
// Add the remainder block.
|
1902
|
+
rem_node->set_size(heap_, rem_bytes);
|
1903
|
+
rem_node->set_next(heap_, free_[rem].head_node_);
|
1904
|
+
free_[rem].head_node_ = rem_node->address();
|
1905
|
+
} else {
|
1906
|
+
// If this was the last block of size cur, remove the size.
|
1907
|
+
if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
|
1908
|
+
finger_ = prev;
|
1909
|
+
free_[prev].next_size_ = free_[cur].next_size_;
|
1910
|
+
}
|
1911
|
+
if (rem_bytes < kMinBlockSize) {
|
1912
|
+
// Too-small remainder is wasted.
|
1913
|
+
rem_node->set_size(heap_, rem_bytes);
|
1914
|
+
available_ -= size_in_bytes + rem_bytes;
|
1915
|
+
*wasted_bytes = rem_bytes;
|
1916
|
+
return cur_node;
|
1917
|
+
}
|
1918
|
+
// Add the remainder block and, if needed, insert its size.
|
1919
|
+
rem_node->set_size(heap_, rem_bytes);
|
1920
|
+
rem_node->set_next(heap_, free_[rem].head_node_);
|
1921
|
+
free_[rem].head_node_ = rem_node->address();
|
1922
|
+
if (rem_node->next(heap_) == NULL) InsertSize(rem);
|
1923
|
+
}
|
1924
|
+
available_ -= size_in_bytes;
|
1925
|
+
*wasted_bytes = 0;
|
1926
|
+
return cur_node;
|
1927
|
+
}
|
1928
|
+
|
1929
|
+
|
1930
|
+
void OldSpaceFreeList::MarkNodes() {
|
1931
|
+
for (int i = 0; i < kFreeListsLength; i++) {
|
1932
|
+
Address cur_addr = free_[i].head_node_;
|
1933
|
+
while (cur_addr != NULL) {
|
1934
|
+
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
|
1935
|
+
cur_addr = cur_node->next(heap_);
|
1936
|
+
cur_node->SetMark();
|
1937
|
+
}
|
1938
|
+
}
|
1939
|
+
}
|
1940
|
+
|
1941
|
+
|
1942
|
+
#ifdef DEBUG
|
1943
|
+
bool OldSpaceFreeList::Contains(FreeListNode* node) {
|
1944
|
+
for (int i = 0; i < kFreeListsLength; i++) {
|
1945
|
+
Address cur_addr = free_[i].head_node_;
|
1946
|
+
while (cur_addr != NULL) {
|
1947
|
+
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
|
1948
|
+
if (cur_node == node) return true;
|
1949
|
+
cur_addr = cur_node->next(heap_);
|
1950
|
+
}
|
1951
|
+
}
|
1952
|
+
return false;
|
1953
|
+
}
|
1954
|
+
#endif
|
1955
|
+
|
1956
|
+
|
1957
|
+
FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
|
1958
|
+
AllocationSpace owner,
|
1959
|
+
int object_size)
|
1960
|
+
: heap_(heap), owner_(owner), object_size_(object_size) {
|
1961
|
+
Reset();
|
1962
|
+
}
|
1963
|
+
|
1964
|
+
|
1965
|
+
void FixedSizeFreeList::Reset() {
|
1966
|
+
available_ = 0;
|
1967
|
+
head_ = tail_ = NULL;
|
1968
|
+
}
|
1969
|
+
|
1970
|
+
|
1971
|
+
void FixedSizeFreeList::Free(Address start) {
|
1972
|
+
#ifdef DEBUG
|
1973
|
+
Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
|
1974
|
+
#endif
|
1975
|
+
// We only use the freelists with mark-sweep.
|
1976
|
+
ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
|
1977
|
+
FreeListNode* node = FreeListNode::FromAddress(start);
|
1978
|
+
node->set_size(heap_, object_size_);
|
1979
|
+
node->set_next(heap_, NULL);
|
1980
|
+
if (head_ == NULL) {
|
1981
|
+
tail_ = head_ = node->address();
|
1982
|
+
} else {
|
1983
|
+
FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
|
1984
|
+
tail_ = node->address();
|
1985
|
+
}
|
1986
|
+
available_ += object_size_;
|
1987
|
+
}
|
1988
|
+
|
1989
|
+
|
1990
|
+
MaybeObject* FixedSizeFreeList::Allocate() {
|
1991
|
+
if (head_ == NULL) {
|
1992
|
+
return Failure::RetryAfterGC(owner_);
|
1993
|
+
}
|
1994
|
+
|
1995
|
+
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
|
1996
|
+
FreeListNode* node = FreeListNode::FromAddress(head_);
|
1997
|
+
head_ = node->next(heap_);
|
1998
|
+
available_ -= object_size_;
|
1999
|
+
return node;
|
2000
|
+
}
|
2001
|
+
|
2002
|
+
|
2003
|
+
void FixedSizeFreeList::MarkNodes() {
|
2004
|
+
Address cur_addr = head_;
|
2005
|
+
while (cur_addr != NULL && cur_addr != tail_) {
|
2006
|
+
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
|
2007
|
+
cur_addr = cur_node->next(heap_);
|
2008
|
+
cur_node->SetMark();
|
2009
|
+
}
|
2010
|
+
}
|
2011
|
+
|
2012
|
+
|
2013
|
+
// -----------------------------------------------------------------------------
|
2014
|
+
// OldSpace implementation
|
2015
|
+
|
2016
|
+
void OldSpace::PrepareForMarkCompact(bool will_compact) {
|
2017
|
+
// Call prepare of the super class.
|
2018
|
+
PagedSpace::PrepareForMarkCompact(will_compact);
|
2019
|
+
|
2020
|
+
if (will_compact) {
|
2021
|
+
// Reset relocation info. During a compacting collection, everything in
|
2022
|
+
// the space is considered 'available' and we will rediscover live data
|
2023
|
+
// and waste during the collection.
|
2024
|
+
MCResetRelocationInfo();
|
2025
|
+
ASSERT(Available() == Capacity());
|
2026
|
+
} else {
|
2027
|
+
// During a non-compacting collection, everything below the linear
|
2028
|
+
// allocation pointer is considered allocated (everything above is
|
2029
|
+
// available) and we will rediscover available and wasted bytes during
|
2030
|
+
// the collection.
|
2031
|
+
accounting_stats_.AllocateBytes(free_list_.available());
|
2032
|
+
accounting_stats_.FillWastedBytes(Waste());
|
2033
|
+
}
|
2034
|
+
|
2035
|
+
// Clear the free list before a full GC---it will be rebuilt afterward.
|
2036
|
+
free_list_.Reset();
|
2037
|
+
}
|
2038
|
+
|
2039
|
+
|
2040
|
+
void OldSpace::MCCommitRelocationInfo() {
|
2041
|
+
// Update fast allocation info.
|
2042
|
+
allocation_info_.top = mc_forwarding_info_.top;
|
2043
|
+
allocation_info_.limit = mc_forwarding_info_.limit;
|
2044
|
+
ASSERT(allocation_info_.VerifyPagedAllocation());
|
2045
|
+
|
2046
|
+
// The space is compacted and we haven't yet built free lists or
|
2047
|
+
// wasted any space.
|
2048
|
+
ASSERT(Waste() == 0);
|
2049
|
+
ASSERT(AvailableFree() == 0);
|
2050
|
+
|
2051
|
+
// Build the free list for the space.
|
2052
|
+
int computed_size = 0;
|
2053
|
+
PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
|
2054
|
+
while (it.has_next()) {
|
2055
|
+
Page* p = it.next();
|
2056
|
+
// Space below the relocation pointer is allocated.
|
2057
|
+
computed_size +=
|
2058
|
+
static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
|
2059
|
+
if (it.has_next()) {
|
2060
|
+
// Free the space at the top of the page.
|
2061
|
+
int extra_size =
|
2062
|
+
static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
|
2063
|
+
if (extra_size > 0) {
|
2064
|
+
int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
|
2065
|
+
extra_size);
|
2066
|
+
// The bytes we have just "freed" to add to the free list were
|
2067
|
+
// already accounted as available.
|
2068
|
+
accounting_stats_.WasteBytes(wasted_bytes);
|
2069
|
+
}
|
2070
|
+
}
|
2071
|
+
}
|
2072
|
+
|
2073
|
+
// Make sure the computed size - based on the used portion of the pages in
|
2074
|
+
// use - matches the size obtained while computing forwarding addresses.
|
2075
|
+
ASSERT(computed_size == Size());
|
2076
|
+
}
|
2077
|
+
|
2078
|
+
|
2079
|
+
bool NewSpace::ReserveSpace(int bytes) {
|
2080
|
+
// We can't reliably unpack a partial snapshot that needs more new space
|
2081
|
+
// space than the minimum NewSpace size.
|
2082
|
+
ASSERT(bytes <= InitialCapacity());
|
2083
|
+
Address limit = allocation_info_.limit;
|
2084
|
+
Address top = allocation_info_.top;
|
2085
|
+
return limit - top >= bytes;
|
2086
|
+
}
|
2087
|
+
|
2088
|
+
|
2089
|
+
void PagedSpace::FreePages(Page* prev, Page* last) {
|
2090
|
+
if (last == AllocationTopPage()) {
|
2091
|
+
// Pages are already at the end of used pages.
|
2092
|
+
return;
|
2093
|
+
}
|
2094
|
+
|
2095
|
+
Page* first = NULL;
|
2096
|
+
|
2097
|
+
// Remove pages from the list.
|
2098
|
+
if (prev == NULL) {
|
2099
|
+
first = first_page_;
|
2100
|
+
first_page_ = last->next_page();
|
2101
|
+
} else {
|
2102
|
+
first = prev->next_page();
|
2103
|
+
heap()->isolate()->memory_allocator()->SetNextPage(
|
2104
|
+
prev, last->next_page());
|
2105
|
+
}
|
2106
|
+
|
2107
|
+
// Attach it after the last page.
|
2108
|
+
heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
|
2109
|
+
last_page_ = last;
|
2110
|
+
heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
|
2111
|
+
|
2112
|
+
// Clean them up.
|
2113
|
+
do {
|
2114
|
+
first->InvalidateWatermark(true);
|
2115
|
+
first->SetAllocationWatermark(first->ObjectAreaStart());
|
2116
|
+
first->SetCachedAllocationWatermark(first->ObjectAreaStart());
|
2117
|
+
first->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
2118
|
+
first = first->next_page();
|
2119
|
+
} while (first != NULL);
|
2120
|
+
|
2121
|
+
// Order of pages in this space might no longer be consistent with
|
2122
|
+
// order of pages in chunks.
|
2123
|
+
page_list_is_chunk_ordered_ = false;
|
2124
|
+
}
|
2125
|
+
|
2126
|
+
|
2127
|
+
void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
|
2128
|
+
const bool add_to_freelist = true;
|
2129
|
+
|
2130
|
+
// Mark used and unused pages to properly fill unused pages
|
2131
|
+
// after reordering.
|
2132
|
+
PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
|
2133
|
+
Page* last_in_use = AllocationTopPage();
|
2134
|
+
bool in_use = true;
|
2135
|
+
|
2136
|
+
while (all_pages_iterator.has_next()) {
|
2137
|
+
Page* p = all_pages_iterator.next();
|
2138
|
+
p->SetWasInUseBeforeMC(in_use);
|
2139
|
+
if (p == last_in_use) {
|
2140
|
+
// We passed a page containing allocation top. All consequent
|
2141
|
+
// pages are not used.
|
2142
|
+
in_use = false;
|
2143
|
+
}
|
2144
|
+
}
|
2145
|
+
|
2146
|
+
if (page_list_is_chunk_ordered_) return;
|
2147
|
+
|
2148
|
+
Page* new_last_in_use = Page::FromAddress(NULL);
|
2149
|
+
heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
|
2150
|
+
this, &first_page_, &last_page_, &new_last_in_use);
|
2151
|
+
ASSERT(new_last_in_use->is_valid());
|
2152
|
+
|
2153
|
+
if (new_last_in_use != last_in_use) {
|
2154
|
+
// Current allocation top points to a page which is now in the middle
|
2155
|
+
// of page list. We should move allocation top forward to the new last
|
2156
|
+
// used page so various object iterators will continue to work properly.
|
2157
|
+
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
|
2158
|
+
last_in_use->AllocationTop());
|
2159
|
+
|
2160
|
+
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
|
2161
|
+
if (size_in_bytes > 0) {
|
2162
|
+
Address start = last_in_use->AllocationTop();
|
2163
|
+
if (deallocate_blocks) {
|
2164
|
+
accounting_stats_.AllocateBytes(size_in_bytes);
|
2165
|
+
DeallocateBlock(start, size_in_bytes, add_to_freelist);
|
2166
|
+
} else {
|
2167
|
+
heap()->CreateFillerObjectAt(start, size_in_bytes);
|
2168
|
+
}
|
2169
|
+
}
|
2170
|
+
|
2171
|
+
// New last in use page was in the middle of the list before
|
2172
|
+
// sorting so it full.
|
2173
|
+
SetTop(new_last_in_use->AllocationTop());
|
2174
|
+
|
2175
|
+
ASSERT(AllocationTopPage() == new_last_in_use);
|
2176
|
+
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
|
2177
|
+
}
|
2178
|
+
|
2179
|
+
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
|
2180
|
+
while (pages_in_use_iterator.has_next()) {
|
2181
|
+
Page* p = pages_in_use_iterator.next();
|
2182
|
+
if (!p->WasInUseBeforeMC()) {
|
2183
|
+
// Empty page is in the middle of a sequence of used pages.
|
2184
|
+
// Allocate it as a whole and deallocate immediately.
|
2185
|
+
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
|
2186
|
+
p->ObjectAreaStart());
|
2187
|
+
|
2188
|
+
p->SetAllocationWatermark(p->ObjectAreaStart());
|
2189
|
+
Address start = p->ObjectAreaStart();
|
2190
|
+
if (deallocate_blocks) {
|
2191
|
+
accounting_stats_.AllocateBytes(size_in_bytes);
|
2192
|
+
DeallocateBlock(start, size_in_bytes, add_to_freelist);
|
2193
|
+
} else {
|
2194
|
+
heap()->CreateFillerObjectAt(start, size_in_bytes);
|
2195
|
+
}
|
2196
|
+
}
|
2197
|
+
}
|
2198
|
+
|
2199
|
+
page_list_is_chunk_ordered_ = true;
|
2200
|
+
}
|
2201
|
+
|
2202
|
+
|
2203
|
+
void PagedSpace::PrepareForMarkCompact(bool will_compact) {
|
2204
|
+
if (will_compact) {
|
2205
|
+
RelinkPageListInChunkOrder(false);
|
2206
|
+
}
|
2207
|
+
}
|
2208
|
+
|
2209
|
+
|
2210
|
+
bool PagedSpace::ReserveSpace(int bytes) {
|
2211
|
+
Address limit = allocation_info_.limit;
|
2212
|
+
Address top = allocation_info_.top;
|
2213
|
+
if (limit - top >= bytes) return true;
|
2214
|
+
|
2215
|
+
// There wasn't enough space in the current page. Lets put the rest
|
2216
|
+
// of the page on the free list and start a fresh page.
|
2217
|
+
PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
|
2218
|
+
|
2219
|
+
Page* reserved_page = TopPageOf(allocation_info_);
|
2220
|
+
int bytes_left_to_reserve = bytes;
|
2221
|
+
while (bytes_left_to_reserve > 0) {
|
2222
|
+
if (!reserved_page->next_page()->is_valid()) {
|
2223
|
+
if (heap()->OldGenerationAllocationLimitReached()) return false;
|
2224
|
+
Expand(reserved_page);
|
2225
|
+
}
|
2226
|
+
bytes_left_to_reserve -= Page::kPageSize;
|
2227
|
+
reserved_page = reserved_page->next_page();
|
2228
|
+
if (!reserved_page->is_valid()) return false;
|
2229
|
+
}
|
2230
|
+
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
|
2231
|
+
TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
|
2232
|
+
SetAllocationInfo(&allocation_info_,
|
2233
|
+
TopPageOf(allocation_info_)->next_page());
|
2234
|
+
return true;
|
2235
|
+
}
|
2236
|
+
|
2237
|
+
|
2238
|
+
// You have to call this last, since the implementation from PagedSpace
|
2239
|
+
// doesn't know that memory was 'promised' to large object space.
|
2240
|
+
bool LargeObjectSpace::ReserveSpace(int bytes) {
|
2241
|
+
return heap()->OldGenerationSpaceAvailable() >= bytes;
|
2242
|
+
}
|
2243
|
+
|
2244
|
+
|
2245
|
+
// Slow case for normal allocation. Try in order: (1) allocate in the next
|
2246
|
+
// page in the space, (2) allocate off the space's free list, (3) expand the
|
2247
|
+
// space, (4) fail.
|
2248
|
+
HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
|
2249
|
+
// Linear allocation in this space has failed. If there is another page
|
2250
|
+
// in the space, move to that page and allocate there. This allocation
|
2251
|
+
// should succeed (size_in_bytes should not be greater than a page's
|
2252
|
+
// object area size).
|
2253
|
+
Page* current_page = TopPageOf(allocation_info_);
|
2254
|
+
if (current_page->next_page()->is_valid()) {
|
2255
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
2256
|
+
}
|
2257
|
+
|
2258
|
+
// There is no next page in this space. Try free list allocation unless that
|
2259
|
+
// is currently forbidden.
|
2260
|
+
if (!heap()->linear_allocation()) {
|
2261
|
+
int wasted_bytes;
|
2262
|
+
Object* result;
|
2263
|
+
MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
|
2264
|
+
accounting_stats_.WasteBytes(wasted_bytes);
|
2265
|
+
if (maybe->ToObject(&result)) {
|
2266
|
+
accounting_stats_.AllocateBytes(size_in_bytes);
|
2267
|
+
|
2268
|
+
HeapObject* obj = HeapObject::cast(result);
|
2269
|
+
Page* p = Page::FromAddress(obj->address());
|
2270
|
+
|
2271
|
+
if (obj->address() >= p->AllocationWatermark()) {
|
2272
|
+
// There should be no hole between the allocation watermark
|
2273
|
+
// and allocated object address.
|
2274
|
+
// Memory above the allocation watermark was not swept and
|
2275
|
+
// might contain garbage pointers to new space.
|
2276
|
+
ASSERT(obj->address() == p->AllocationWatermark());
|
2277
|
+
p->SetAllocationWatermark(obj->address() + size_in_bytes);
|
2278
|
+
}
|
2279
|
+
|
2280
|
+
return obj;
|
2281
|
+
}
|
2282
|
+
}
|
2283
|
+
|
2284
|
+
// Free list allocation failed and there is no next page. Fail if we have
|
2285
|
+
// hit the old generation size limit that should cause a garbage
|
2286
|
+
// collection.
|
2287
|
+
if (!heap()->always_allocate() &&
|
2288
|
+
heap()->OldGenerationAllocationLimitReached()) {
|
2289
|
+
return NULL;
|
2290
|
+
}
|
2291
|
+
|
2292
|
+
// Try to expand the space and allocate in the new next page.
|
2293
|
+
ASSERT(!current_page->next_page()->is_valid());
|
2294
|
+
if (Expand(current_page)) {
|
2295
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
2296
|
+
}
|
2297
|
+
|
2298
|
+
// Finally, fail.
|
2299
|
+
return NULL;
|
2300
|
+
}
|
2301
|
+
|
2302
|
+
|
2303
|
+
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
2304
|
+
current_page->SetAllocationWatermark(allocation_info_.top);
|
2305
|
+
int free_size =
|
2306
|
+
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
|
2307
|
+
if (free_size > 0) {
|
2308
|
+
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
|
2309
|
+
accounting_stats_.WasteBytes(wasted_bytes);
|
2310
|
+
}
|
2311
|
+
}
|
2312
|
+
|
2313
|
+
|
2314
|
+
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
2315
|
+
current_page->SetAllocationWatermark(allocation_info_.top);
|
2316
|
+
int free_size =
|
2317
|
+
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
|
2318
|
+
// In the fixed space free list all the free list items have the right size.
|
2319
|
+
// We use up the rest of the page while preserving this invariant.
|
2320
|
+
while (free_size >= object_size_in_bytes_) {
|
2321
|
+
free_list_.Free(allocation_info_.top);
|
2322
|
+
allocation_info_.top += object_size_in_bytes_;
|
2323
|
+
free_size -= object_size_in_bytes_;
|
2324
|
+
accounting_stats_.WasteBytes(object_size_in_bytes_);
|
2325
|
+
}
|
2326
|
+
}
|
2327
|
+
|
2328
|
+
|
2329
|
+
// Add the block at the top of the page to the space's free list, set the
|
2330
|
+
// allocation info to the next page (assumed to be one), and allocate
|
2331
|
+
// linearly there.
|
2332
|
+
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
|
2333
|
+
int size_in_bytes) {
|
2334
|
+
ASSERT(current_page->next_page()->is_valid());
|
2335
|
+
Page* next_page = current_page->next_page();
|
2336
|
+
next_page->ClearGCFields();
|
2337
|
+
PutRestOfCurrentPageOnFreeList(current_page);
|
2338
|
+
SetAllocationInfo(&allocation_info_, next_page);
|
2339
|
+
return AllocateLinearly(&allocation_info_, size_in_bytes);
|
2340
|
+
}
|
2341
|
+
|
2342
|
+
|
2343
|
+
void OldSpace::DeallocateBlock(Address start,
|
2344
|
+
int size_in_bytes,
|
2345
|
+
bool add_to_freelist) {
|
2346
|
+
Free(start, size_in_bytes, add_to_freelist);
|
2347
|
+
}
|
2348
|
+
|
2349
|
+
|
2350
|
+
#ifdef DEBUG
|
2351
|
+
void PagedSpace::ReportCodeStatistics() {
|
2352
|
+
Isolate* isolate = Isolate::Current();
|
2353
|
+
CommentStatistic* comments_statistics =
|
2354
|
+
isolate->paged_space_comments_statistics();
|
2355
|
+
ReportCodeKindStatistics();
|
2356
|
+
PrintF("Code comment statistics (\" [ comment-txt : size/ "
|
2357
|
+
"count (average)\"):\n");
|
2358
|
+
for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
|
2359
|
+
const CommentStatistic& cs = comments_statistics[i];
|
2360
|
+
if (cs.size > 0) {
|
2361
|
+
PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
|
2362
|
+
cs.size/cs.count);
|
2363
|
+
}
|
2364
|
+
}
|
2365
|
+
PrintF("\n");
|
2366
|
+
}
|
2367
|
+
|
2368
|
+
|
2369
|
+
void PagedSpace::ResetCodeStatistics() {
|
2370
|
+
Isolate* isolate = Isolate::Current();
|
2371
|
+
CommentStatistic* comments_statistics =
|
2372
|
+
isolate->paged_space_comments_statistics();
|
2373
|
+
ClearCodeKindStatistics();
|
2374
|
+
for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
|
2375
|
+
comments_statistics[i].Clear();
|
2376
|
+
}
|
2377
|
+
comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
|
2378
|
+
comments_statistics[CommentStatistic::kMaxComments].size = 0;
|
2379
|
+
comments_statistics[CommentStatistic::kMaxComments].count = 0;
|
2380
|
+
}
|
2381
|
+
|
2382
|
+
|
2383
|
+
// Adds comment to 'comment_statistics' table. Performance OK as long as
|
2384
|
+
// 'kMaxComments' is small
|
2385
|
+
static void EnterComment(Isolate* isolate, const char* comment, int delta) {
|
2386
|
+
CommentStatistic* comments_statistics =
|
2387
|
+
isolate->paged_space_comments_statistics();
|
2388
|
+
// Do not count empty comments
|
2389
|
+
if (delta <= 0) return;
|
2390
|
+
CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
|
2391
|
+
// Search for a free or matching entry in 'comments_statistics': 'cs'
|
2392
|
+
// points to result.
|
2393
|
+
for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
|
2394
|
+
if (comments_statistics[i].comment == NULL) {
|
2395
|
+
cs = &comments_statistics[i];
|
2396
|
+
cs->comment = comment;
|
2397
|
+
break;
|
2398
|
+
} else if (strcmp(comments_statistics[i].comment, comment) == 0) {
|
2399
|
+
cs = &comments_statistics[i];
|
2400
|
+
break;
|
2401
|
+
}
|
2402
|
+
}
|
2403
|
+
// Update entry for 'comment'
|
2404
|
+
cs->size += delta;
|
2405
|
+
cs->count += 1;
|
2406
|
+
}
|
2407
|
+
|
2408
|
+
|
2409
|
+
// Call for each nested comment start (start marked with '[ xxx', end marked
|
2410
|
+
// with ']'. RelocIterator 'it' must point to a comment reloc info.
|
2411
|
+
static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
|
2412
|
+
ASSERT(!it->done());
|
2413
|
+
ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
|
2414
|
+
const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
|
2415
|
+
if (tmp[0] != '[') {
|
2416
|
+
// Not a nested comment; skip
|
2417
|
+
return;
|
2418
|
+
}
|
2419
|
+
|
2420
|
+
// Search for end of nested comment or a new nested comment
|
2421
|
+
const char* const comment_txt =
|
2422
|
+
reinterpret_cast<const char*>(it->rinfo()->data());
|
2423
|
+
const byte* prev_pc = it->rinfo()->pc();
|
2424
|
+
int flat_delta = 0;
|
2425
|
+
it->next();
|
2426
|
+
while (true) {
|
2427
|
+
// All nested comments must be terminated properly, and therefore exit
|
2428
|
+
// from loop.
|
2429
|
+
ASSERT(!it->done());
|
2430
|
+
if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
|
2431
|
+
const char* const txt =
|
2432
|
+
reinterpret_cast<const char*>(it->rinfo()->data());
|
2433
|
+
flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
|
2434
|
+
if (txt[0] == ']') break; // End of nested comment
|
2435
|
+
// A new comment
|
2436
|
+
CollectCommentStatistics(isolate, it);
|
2437
|
+
// Skip code that was covered with previous comment
|
2438
|
+
prev_pc = it->rinfo()->pc();
|
2439
|
+
}
|
2440
|
+
it->next();
|
2441
|
+
}
|
2442
|
+
EnterComment(isolate, comment_txt, flat_delta);
|
2443
|
+
}
|
2444
|
+
|
2445
|
+
|
2446
|
+
// Collects code size statistics:
|
2447
|
+
// - by code kind
|
2448
|
+
// - by code comment
|
2449
|
+
void PagedSpace::CollectCodeStatistics() {
|
2450
|
+
Isolate* isolate = heap()->isolate();
|
2451
|
+
HeapObjectIterator obj_it(this);
|
2452
|
+
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
|
2453
|
+
if (obj->IsCode()) {
|
2454
|
+
Code* code = Code::cast(obj);
|
2455
|
+
isolate->code_kind_statistics()[code->kind()] += code->Size();
|
2456
|
+
RelocIterator it(code);
|
2457
|
+
int delta = 0;
|
2458
|
+
const byte* prev_pc = code->instruction_start();
|
2459
|
+
while (!it.done()) {
|
2460
|
+
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
|
2461
|
+
delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
|
2462
|
+
CollectCommentStatistics(isolate, &it);
|
2463
|
+
prev_pc = it.rinfo()->pc();
|
2464
|
+
}
|
2465
|
+
it.next();
|
2466
|
+
}
|
2467
|
+
|
2468
|
+
ASSERT(code->instruction_start() <= prev_pc &&
|
2469
|
+
prev_pc <= code->instruction_end());
|
2470
|
+
delta += static_cast<int>(code->instruction_end() - prev_pc);
|
2471
|
+
EnterComment(isolate, "NoComment", delta);
|
2472
|
+
}
|
2473
|
+
}
|
2474
|
+
}
|
2475
|
+
|
2476
|
+
|
2477
|
+
void OldSpace::ReportStatistics() {
|
2478
|
+
int pct = static_cast<int>(Available() * 100 / Capacity());
|
2479
|
+
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
2480
|
+
", waste: %" V8_PTR_PREFIX "d"
|
2481
|
+
", available: %" V8_PTR_PREFIX "d, %%%d\n",
|
2482
|
+
Capacity(), Waste(), Available(), pct);
|
2483
|
+
|
2484
|
+
ClearHistograms();
|
2485
|
+
HeapObjectIterator obj_it(this);
|
2486
|
+
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
|
2487
|
+
CollectHistogramInfo(obj);
|
2488
|
+
ReportHistogram(true);
|
2489
|
+
}
|
2490
|
+
#endif
|
2491
|
+
|
2492
|
+
// -----------------------------------------------------------------------------
|
2493
|
+
// FixedSpace implementation
|
2494
|
+
|
2495
|
+
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
|
2496
|
+
// Call prepare of the super class.
|
2497
|
+
PagedSpace::PrepareForMarkCompact(will_compact);
|
2498
|
+
|
2499
|
+
if (will_compact) {
|
2500
|
+
// Reset relocation info.
|
2501
|
+
MCResetRelocationInfo();
|
2502
|
+
|
2503
|
+
// During a compacting collection, everything in the space is considered
|
2504
|
+
// 'available' (set by the call to MCResetRelocationInfo) and we will
|
2505
|
+
// rediscover live and wasted bytes during the collection.
|
2506
|
+
ASSERT(Available() == Capacity());
|
2507
|
+
} else {
|
2508
|
+
// During a non-compacting collection, everything below the linear
|
2509
|
+
// allocation pointer except wasted top-of-page blocks is considered
|
2510
|
+
// allocated and we will rediscover available bytes during the
|
2511
|
+
// collection.
|
2512
|
+
accounting_stats_.AllocateBytes(free_list_.available());
|
2513
|
+
}
|
2514
|
+
|
2515
|
+
// Clear the free list before a full GC---it will be rebuilt afterward.
|
2516
|
+
free_list_.Reset();
|
2517
|
+
}
|
2518
|
+
|
2519
|
+
|
2520
|
+
void FixedSpace::MCCommitRelocationInfo() {
|
2521
|
+
// Update fast allocation info.
|
2522
|
+
allocation_info_.top = mc_forwarding_info_.top;
|
2523
|
+
allocation_info_.limit = mc_forwarding_info_.limit;
|
2524
|
+
ASSERT(allocation_info_.VerifyPagedAllocation());
|
2525
|
+
|
2526
|
+
// The space is compacted and we haven't yet wasted any space.
|
2527
|
+
ASSERT(Waste() == 0);
|
2528
|
+
|
2529
|
+
// Update allocation_top of each page in use and compute waste.
|
2530
|
+
int computed_size = 0;
|
2531
|
+
PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
|
2532
|
+
while (it.has_next()) {
|
2533
|
+
Page* page = it.next();
|
2534
|
+
Address page_top = page->AllocationTop();
|
2535
|
+
computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
|
2536
|
+
if (it.has_next()) {
|
2537
|
+
accounting_stats_.WasteBytes(
|
2538
|
+
static_cast<int>(page->ObjectAreaEnd() - page_top));
|
2539
|
+
page->SetAllocationWatermark(page_top);
|
2540
|
+
}
|
2541
|
+
}
|
2542
|
+
|
2543
|
+
// Make sure the computed size - based on the used portion of the
|
2544
|
+
// pages in use - matches the size we adjust during allocation.
|
2545
|
+
ASSERT(computed_size == Size());
|
2546
|
+
}
|
2547
|
+
|
2548
|
+
|
2549
|
+
// Slow case for normal allocation. Try in order: (1) allocate in the next
|
2550
|
+
// page in the space, (2) allocate off the space's free list, (3) expand the
|
2551
|
+
// space, (4) fail.
|
2552
|
+
HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
|
2553
|
+
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
|
2554
|
+
// Linear allocation in this space has failed. If there is another page
|
2555
|
+
// in the space, move to that page and allocate there. This allocation
|
2556
|
+
// should succeed.
|
2557
|
+
Page* current_page = TopPageOf(allocation_info_);
|
2558
|
+
if (current_page->next_page()->is_valid()) {
|
2559
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
2560
|
+
}
|
2561
|
+
|
2562
|
+
// There is no next page in this space. Try free list allocation unless
|
2563
|
+
// that is currently forbidden. The fixed space free list implicitly assumes
|
2564
|
+
// that all free blocks are of the fixed size.
|
2565
|
+
if (!heap()->linear_allocation()) {
|
2566
|
+
Object* result;
|
2567
|
+
MaybeObject* maybe = free_list_.Allocate();
|
2568
|
+
if (maybe->ToObject(&result)) {
|
2569
|
+
accounting_stats_.AllocateBytes(size_in_bytes);
|
2570
|
+
HeapObject* obj = HeapObject::cast(result);
|
2571
|
+
Page* p = Page::FromAddress(obj->address());
|
2572
|
+
|
2573
|
+
if (obj->address() >= p->AllocationWatermark()) {
|
2574
|
+
// There should be no hole between the allocation watermark
|
2575
|
+
// and allocated object address.
|
2576
|
+
// Memory above the allocation watermark was not swept and
|
2577
|
+
// might contain garbage pointers to new space.
|
2578
|
+
ASSERT(obj->address() == p->AllocationWatermark());
|
2579
|
+
p->SetAllocationWatermark(obj->address() + size_in_bytes);
|
2580
|
+
}
|
2581
|
+
|
2582
|
+
return obj;
|
2583
|
+
}
|
2584
|
+
}
|
2585
|
+
|
2586
|
+
// Free list allocation failed and there is no next page. Fail if we have
|
2587
|
+
// hit the old generation size limit that should cause a garbage
|
2588
|
+
// collection.
|
2589
|
+
if (!heap()->always_allocate() &&
|
2590
|
+
heap()->OldGenerationAllocationLimitReached()) {
|
2591
|
+
return NULL;
|
2592
|
+
}
|
2593
|
+
|
2594
|
+
// Try to expand the space and allocate in the new next page.
|
2595
|
+
ASSERT(!current_page->next_page()->is_valid());
|
2596
|
+
if (Expand(current_page)) {
|
2597
|
+
return AllocateInNextPage(current_page, size_in_bytes);
|
2598
|
+
}
|
2599
|
+
|
2600
|
+
// Finally, fail.
|
2601
|
+
return NULL;
|
2602
|
+
}
|
2603
|
+
|
2604
|
+
|
2605
|
+
// Move to the next page (there is assumed to be one) and allocate there.
|
2606
|
+
// The top of page block is always wasted, because it is too small to hold a
|
2607
|
+
// map.
|
2608
|
+
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
|
2609
|
+
int size_in_bytes) {
|
2610
|
+
ASSERT(current_page->next_page()->is_valid());
|
2611
|
+
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
|
2612
|
+
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
|
2613
|
+
Page* next_page = current_page->next_page();
|
2614
|
+
next_page->ClearGCFields();
|
2615
|
+
current_page->SetAllocationWatermark(allocation_info_.top);
|
2616
|
+
accounting_stats_.WasteBytes(page_extra_);
|
2617
|
+
SetAllocationInfo(&allocation_info_, next_page);
|
2618
|
+
return AllocateLinearly(&allocation_info_, size_in_bytes);
|
2619
|
+
}
|
2620
|
+
|
2621
|
+
|
2622
|
+
void FixedSpace::DeallocateBlock(Address start,
|
2623
|
+
int size_in_bytes,
|
2624
|
+
bool add_to_freelist) {
|
2625
|
+
// Free-list elements in fixed space are assumed to have a fixed size.
|
2626
|
+
// We break the free block into chunks and add them to the free list
|
2627
|
+
// individually.
|
2628
|
+
int size = object_size_in_bytes();
|
2629
|
+
ASSERT(size_in_bytes % size == 0);
|
2630
|
+
Address end = start + size_in_bytes;
|
2631
|
+
for (Address a = start; a < end; a += size) {
|
2632
|
+
Free(a, add_to_freelist);
|
2633
|
+
}
|
2634
|
+
}
|
2635
|
+
|
2636
|
+
|
2637
|
+
#ifdef DEBUG
|
2638
|
+
void FixedSpace::ReportStatistics() {
|
2639
|
+
int pct = static_cast<int>(Available() * 100 / Capacity());
|
2640
|
+
PrintF(" capacity: %" V8_PTR_PREFIX "d"
|
2641
|
+
", waste: %" V8_PTR_PREFIX "d"
|
2642
|
+
", available: %" V8_PTR_PREFIX "d, %%%d\n",
|
2643
|
+
Capacity(), Waste(), Available(), pct);
|
2644
|
+
|
2645
|
+
ClearHistograms();
|
2646
|
+
HeapObjectIterator obj_it(this);
|
2647
|
+
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
|
2648
|
+
CollectHistogramInfo(obj);
|
2649
|
+
ReportHistogram(false);
|
2650
|
+
}
|
2651
|
+
#endif
|
2652
|
+
|
2653
|
+
|
2654
|
+
// -----------------------------------------------------------------------------
|
2655
|
+
// MapSpace implementation
|
2656
|
+
|
2657
|
+
void MapSpace::PrepareForMarkCompact(bool will_compact) {
|
2658
|
+
// Call prepare of the super class.
|
2659
|
+
FixedSpace::PrepareForMarkCompact(will_compact);
|
2660
|
+
|
2661
|
+
if (will_compact) {
|
2662
|
+
// Initialize map index entry.
|
2663
|
+
int page_count = 0;
|
2664
|
+
PageIterator it(this, PageIterator::ALL_PAGES);
|
2665
|
+
while (it.has_next()) {
|
2666
|
+
ASSERT_MAP_PAGE_INDEX(page_count);
|
2667
|
+
|
2668
|
+
Page* p = it.next();
|
2669
|
+
ASSERT(p->mc_page_index == page_count);
|
2670
|
+
|
2671
|
+
page_addresses_[page_count++] = p->address();
|
2672
|
+
}
|
2673
|
+
}
|
2674
|
+
}
|
2675
|
+
|
2676
|
+
|
2677
|
+
#ifdef DEBUG
|
2678
|
+
void MapSpace::VerifyObject(HeapObject* object) {
|
2679
|
+
// The object should be a map or a free-list node.
|
2680
|
+
ASSERT(object->IsMap() || object->IsByteArray());
|
2681
|
+
}
|
2682
|
+
#endif
|
2683
|
+
|
2684
|
+
|
2685
|
+
// -----------------------------------------------------------------------------
|
2686
|
+
// GlobalPropertyCellSpace implementation
|
2687
|
+
|
2688
|
+
#ifdef DEBUG
|
2689
|
+
void CellSpace::VerifyObject(HeapObject* object) {
|
2690
|
+
// The object should be a global object property cell or a free-list node.
|
2691
|
+
ASSERT(object->IsJSGlobalPropertyCell() ||
|
2692
|
+
object->map() == heap()->two_pointer_filler_map());
|
2693
|
+
}
|
2694
|
+
#endif
|
2695
|
+
|
2696
|
+
|
2697
|
+
// -----------------------------------------------------------------------------
|
2698
|
+
// LargeObjectIterator
|
2699
|
+
|
2700
|
+
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
|
2701
|
+
current_ = space->first_chunk_;
|
2702
|
+
size_func_ = NULL;
|
2703
|
+
}
|
2704
|
+
|
2705
|
+
|
2706
|
+
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
|
2707
|
+
HeapObjectCallback size_func) {
|
2708
|
+
current_ = space->first_chunk_;
|
2709
|
+
size_func_ = size_func;
|
2710
|
+
}
|
2711
|
+
|
2712
|
+
|
2713
|
+
HeapObject* LargeObjectIterator::next() {
|
2714
|
+
if (current_ == NULL) return NULL;
|
2715
|
+
|
2716
|
+
HeapObject* object = current_->GetObject();
|
2717
|
+
current_ = current_->next();
|
2718
|
+
return object;
|
2719
|
+
}
|
2720
|
+
|
2721
|
+
|
2722
|
+
// -----------------------------------------------------------------------------
|
2723
|
+
// LargeObjectChunk
|
2724
|
+
|
2725
|
+
LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
|
2726
|
+
Executability executable) {
|
2727
|
+
size_t requested = ChunkSizeFor(size_in_bytes);
|
2728
|
+
size_t size;
|
2729
|
+
Isolate* isolate = Isolate::Current();
|
2730
|
+
void* mem = isolate->memory_allocator()->AllocateRawMemory(
|
2731
|
+
requested, &size, executable);
|
2732
|
+
if (mem == NULL) return NULL;
|
2733
|
+
|
2734
|
+
// The start of the chunk may be overlayed with a page so we have to
|
2735
|
+
// make sure that the page flags fit in the size field.
|
2736
|
+
ASSERT((size & Page::kPageFlagMask) == 0);
|
2737
|
+
|
2738
|
+
LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
|
2739
|
+
if (size < requested) {
|
2740
|
+
isolate->memory_allocator()->FreeRawMemory(
|
2741
|
+
mem, size, executable);
|
2742
|
+
LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
|
2743
|
+
return NULL;
|
2744
|
+
}
|
2745
|
+
|
2746
|
+
ObjectSpace space = (executable == EXECUTABLE)
|
2747
|
+
? kObjectSpaceCodeSpace
|
2748
|
+
: kObjectSpaceLoSpace;
|
2749
|
+
isolate->memory_allocator()->PerformAllocationCallback(
|
2750
|
+
space, kAllocationActionAllocate, size);
|
2751
|
+
|
2752
|
+
LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
|
2753
|
+
chunk->size_ = size;
|
2754
|
+
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
|
2755
|
+
page->heap_ = isolate->heap();
|
2756
|
+
return chunk;
|
2757
|
+
}
|
2758
|
+
|
2759
|
+
|
2760
|
+
int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
|
2761
|
+
int os_alignment = static_cast<int>(OS::AllocateAlignment());
|
2762
|
+
if (os_alignment < Page::kPageSize) {
|
2763
|
+
size_in_bytes += (Page::kPageSize - os_alignment);
|
2764
|
+
}
|
2765
|
+
return size_in_bytes + Page::kObjectStartOffset;
|
2766
|
+
}
|
2767
|
+
|
2768
|
+
// -----------------------------------------------------------------------------
|
2769
|
+
// LargeObjectSpace
|
2770
|
+
|
2771
|
+
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
|
2772
|
+
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
|
2773
|
+
first_chunk_(NULL),
|
2774
|
+
size_(0),
|
2775
|
+
page_count_(0),
|
2776
|
+
objects_size_(0) {}
|
2777
|
+
|
2778
|
+
|
2779
|
+
bool LargeObjectSpace::Setup() {
|
2780
|
+
first_chunk_ = NULL;
|
2781
|
+
size_ = 0;
|
2782
|
+
page_count_ = 0;
|
2783
|
+
objects_size_ = 0;
|
2784
|
+
return true;
|
2785
|
+
}
|
2786
|
+
|
2787
|
+
|
2788
|
+
void LargeObjectSpace::TearDown() {
|
2789
|
+
while (first_chunk_ != NULL) {
|
2790
|
+
LargeObjectChunk* chunk = first_chunk_;
|
2791
|
+
first_chunk_ = first_chunk_->next();
|
2792
|
+
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
|
2793
|
+
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
|
2794
|
+
Executability executable =
|
2795
|
+
page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
|
2796
|
+
ObjectSpace space = kObjectSpaceLoSpace;
|
2797
|
+
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
|
2798
|
+
size_t size = chunk->size();
|
2799
|
+
heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
|
2800
|
+
size,
|
2801
|
+
executable);
|
2802
|
+
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
|
2803
|
+
space, kAllocationActionFree, size);
|
2804
|
+
}
|
2805
|
+
|
2806
|
+
size_ = 0;
|
2807
|
+
page_count_ = 0;
|
2808
|
+
objects_size_ = 0;
|
2809
|
+
}
|
2810
|
+
|
2811
|
+
|
2812
|
+
#ifdef ENABLE_HEAP_PROTECTION
|
2813
|
+
|
2814
|
+
void LargeObjectSpace::Protect() {
|
2815
|
+
LargeObjectChunk* chunk = first_chunk_;
|
2816
|
+
while (chunk != NULL) {
|
2817
|
+
heap()->isolate()->memory_allocator()->Protect(chunk->address(),
|
2818
|
+
chunk->size());
|
2819
|
+
chunk = chunk->next();
|
2820
|
+
}
|
2821
|
+
}
|
2822
|
+
|
2823
|
+
|
2824
|
+
void LargeObjectSpace::Unprotect() {
|
2825
|
+
LargeObjectChunk* chunk = first_chunk_;
|
2826
|
+
while (chunk != NULL) {
|
2827
|
+
bool is_code = chunk->GetObject()->IsCode();
|
2828
|
+
heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
|
2829
|
+
chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
|
2830
|
+
chunk = chunk->next();
|
2831
|
+
}
|
2832
|
+
}
|
2833
|
+
|
2834
|
+
#endif
|
2835
|
+
|
2836
|
+
|
2837
|
+
MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
|
2838
|
+
int object_size,
|
2839
|
+
Executability executable) {
|
2840
|
+
ASSERT(0 < object_size && object_size <= requested_size);
|
2841
|
+
|
2842
|
+
// Check if we want to force a GC before growing the old space further.
|
2843
|
+
// If so, fail the allocation.
|
2844
|
+
if (!heap()->always_allocate() &&
|
2845
|
+
heap()->OldGenerationAllocationLimitReached()) {
|
2846
|
+
return Failure::RetryAfterGC(identity());
|
2847
|
+
}
|
2848
|
+
|
2849
|
+
LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
|
2850
|
+
if (chunk == NULL) {
|
2851
|
+
return Failure::RetryAfterGC(identity());
|
2852
|
+
}
|
2853
|
+
|
2854
|
+
size_ += static_cast<int>(chunk->size());
|
2855
|
+
objects_size_ += requested_size;
|
2856
|
+
page_count_++;
|
2857
|
+
chunk->set_next(first_chunk_);
|
2858
|
+
first_chunk_ = chunk;
|
2859
|
+
|
2860
|
+
// Initialize page header.
|
2861
|
+
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
|
2862
|
+
Address object_address = page->ObjectAreaStart();
|
2863
|
+
|
2864
|
+
// Clear the low order bit of the second word in the page to flag it as a
|
2865
|
+
// large object page. If the chunk_size happened to be written there, its
|
2866
|
+
// low order bit should already be clear.
|
2867
|
+
page->SetIsLargeObjectPage(true);
|
2868
|
+
page->SetIsPageExecutable(executable);
|
2869
|
+
page->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
2870
|
+
return HeapObject::FromAddress(object_address);
|
2871
|
+
}
|
2872
|
+
|
2873
|
+
|
2874
|
+
MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
|
2875
|
+
ASSERT(0 < size_in_bytes);
|
2876
|
+
return AllocateRawInternal(size_in_bytes,
|
2877
|
+
size_in_bytes,
|
2878
|
+
EXECUTABLE);
|
2879
|
+
}
|
2880
|
+
|
2881
|
+
|
2882
|
+
MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
|
2883
|
+
ASSERT(0 < size_in_bytes);
|
2884
|
+
return AllocateRawInternal(size_in_bytes,
|
2885
|
+
size_in_bytes,
|
2886
|
+
NOT_EXECUTABLE);
|
2887
|
+
}
|
2888
|
+
|
2889
|
+
|
2890
|
+
MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
|
2891
|
+
ASSERT(0 < size_in_bytes);
|
2892
|
+
return AllocateRawInternal(size_in_bytes,
|
2893
|
+
size_in_bytes,
|
2894
|
+
NOT_EXECUTABLE);
|
2895
|
+
}
|
2896
|
+
|
2897
|
+
|
2898
|
+
// GC support
|
2899
|
+
MaybeObject* LargeObjectSpace::FindObject(Address a) {
|
2900
|
+
for (LargeObjectChunk* chunk = first_chunk_;
|
2901
|
+
chunk != NULL;
|
2902
|
+
chunk = chunk->next()) {
|
2903
|
+
Address chunk_address = chunk->address();
|
2904
|
+
if (chunk_address <= a && a < chunk_address + chunk->size()) {
|
2905
|
+
return chunk->GetObject();
|
2906
|
+
}
|
2907
|
+
}
|
2908
|
+
return Failure::Exception();
|
2909
|
+
}
|
2910
|
+
|
2911
|
+
|
2912
|
+
LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
|
2913
|
+
// TODO(853): Change this implementation to only find executable
|
2914
|
+
// chunks and use some kind of hash-based approach to speed it up.
|
2915
|
+
for (LargeObjectChunk* chunk = first_chunk_;
|
2916
|
+
chunk != NULL;
|
2917
|
+
chunk = chunk->next()) {
|
2918
|
+
Address chunk_address = chunk->address();
|
2919
|
+
if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
|
2920
|
+
return chunk;
|
2921
|
+
}
|
2922
|
+
}
|
2923
|
+
return NULL;
|
2924
|
+
}
|
2925
|
+
|
2926
|
+
|
2927
|
+
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
|
2928
|
+
LargeObjectIterator it(this);
|
2929
|
+
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
|
2930
|
+
// We only have code, sequential strings, or fixed arrays in large
|
2931
|
+
// object space, and only fixed arrays can possibly contain pointers to
|
2932
|
+
// the young generation.
|
2933
|
+
if (object->IsFixedArray()) {
|
2934
|
+
Page* page = Page::FromAddress(object->address());
|
2935
|
+
uint32_t marks = page->GetRegionMarks();
|
2936
|
+
uint32_t newmarks = Page::kAllRegionsCleanMarks;
|
2937
|
+
|
2938
|
+
if (marks != Page::kAllRegionsCleanMarks) {
|
2939
|
+
// For a large page a single dirty mark corresponds to several
|
2940
|
+
// regions (modulo 32). So we treat a large page as a sequence of
|
2941
|
+
// normal pages of size Page::kPageSize having same dirty marks
|
2942
|
+
// and subsequently iterate dirty regions on each of these pages.
|
2943
|
+
Address start = object->address();
|
2944
|
+
Address end = page->ObjectAreaEnd();
|
2945
|
+
Address object_end = start + object->Size();
|
2946
|
+
|
2947
|
+
// Iterate regions of the first normal page covering object.
|
2948
|
+
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
|
2949
|
+
newmarks |=
|
2950
|
+
heap()->IterateDirtyRegions(marks >> first_region_number,
|
2951
|
+
start,
|
2952
|
+
end,
|
2953
|
+
&Heap::IteratePointersInDirtyRegion,
|
2954
|
+
copy_object) << first_region_number;
|
2955
|
+
|
2956
|
+
start = end;
|
2957
|
+
end = start + Page::kPageSize;
|
2958
|
+
while (end <= object_end) {
|
2959
|
+
// Iterate next 32 regions.
|
2960
|
+
newmarks |=
|
2961
|
+
heap()->IterateDirtyRegions(marks,
|
2962
|
+
start,
|
2963
|
+
end,
|
2964
|
+
&Heap::IteratePointersInDirtyRegion,
|
2965
|
+
copy_object);
|
2966
|
+
start = end;
|
2967
|
+
end = start + Page::kPageSize;
|
2968
|
+
}
|
2969
|
+
|
2970
|
+
if (start != object_end) {
|
2971
|
+
// Iterate the last piece of an object which is less than
|
2972
|
+
// Page::kPageSize.
|
2973
|
+
newmarks |=
|
2974
|
+
heap()->IterateDirtyRegions(marks,
|
2975
|
+
start,
|
2976
|
+
object_end,
|
2977
|
+
&Heap::IteratePointersInDirtyRegion,
|
2978
|
+
copy_object);
|
2979
|
+
}
|
2980
|
+
|
2981
|
+
page->SetRegionMarks(newmarks);
|
2982
|
+
}
|
2983
|
+
}
|
2984
|
+
}
|
2985
|
+
}
|
2986
|
+
|
2987
|
+
|
2988
|
+
void LargeObjectSpace::FreeUnmarkedObjects() {
|
2989
|
+
LargeObjectChunk* previous = NULL;
|
2990
|
+
LargeObjectChunk* current = first_chunk_;
|
2991
|
+
while (current != NULL) {
|
2992
|
+
HeapObject* object = current->GetObject();
|
2993
|
+
if (object->IsMarked()) {
|
2994
|
+
object->ClearMark();
|
2995
|
+
heap()->mark_compact_collector()->tracer()->decrement_marked_count();
|
2996
|
+
previous = current;
|
2997
|
+
current = current->next();
|
2998
|
+
} else {
|
2999
|
+
Page* page = Page::FromAddress(RoundUp(current->address(),
|
3000
|
+
Page::kPageSize));
|
3001
|
+
Executability executable =
|
3002
|
+
page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
|
3003
|
+
Address chunk_address = current->address();
|
3004
|
+
size_t chunk_size = current->size();
|
3005
|
+
|
3006
|
+
// Cut the chunk out from the chunk list.
|
3007
|
+
current = current->next();
|
3008
|
+
if (previous == NULL) {
|
3009
|
+
first_chunk_ = current;
|
3010
|
+
} else {
|
3011
|
+
previous->set_next(current);
|
3012
|
+
}
|
3013
|
+
|
3014
|
+
// Free the chunk.
|
3015
|
+
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
|
3016
|
+
object, heap()->isolate());
|
3017
|
+
LiveObjectList::ProcessNonLive(object);
|
3018
|
+
|
3019
|
+
size_ -= static_cast<int>(chunk_size);
|
3020
|
+
objects_size_ -= object->Size();
|
3021
|
+
page_count_--;
|
3022
|
+
ObjectSpace space = kObjectSpaceLoSpace;
|
3023
|
+
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
|
3024
|
+
heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
|
3025
|
+
chunk_size,
|
3026
|
+
executable);
|
3027
|
+
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
|
3028
|
+
space, kAllocationActionFree, size_);
|
3029
|
+
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
|
3030
|
+
}
|
3031
|
+
}
|
3032
|
+
}
|
3033
|
+
|
3034
|
+
|
3035
|
+
bool LargeObjectSpace::Contains(HeapObject* object) {
|
3036
|
+
Address address = object->address();
|
3037
|
+
if (heap()->new_space()->Contains(address)) {
|
3038
|
+
return false;
|
3039
|
+
}
|
3040
|
+
Page* page = Page::FromAddress(address);
|
3041
|
+
|
3042
|
+
SLOW_ASSERT(!page->IsLargeObjectPage()
|
3043
|
+
|| !FindObject(address)->IsFailure());
|
3044
|
+
|
3045
|
+
return page->IsLargeObjectPage();
|
3046
|
+
}
|
3047
|
+
|
3048
|
+
|
3049
|
+
#ifdef DEBUG
|
3050
|
+
// We do not assume that the large object iterator works, because it depends
|
3051
|
+
// on the invariants we are checking during verification.
|
3052
|
+
void LargeObjectSpace::Verify() {
|
3053
|
+
for (LargeObjectChunk* chunk = first_chunk_;
|
3054
|
+
chunk != NULL;
|
3055
|
+
chunk = chunk->next()) {
|
3056
|
+
// Each chunk contains an object that starts at the large object page's
|
3057
|
+
// object area start.
|
3058
|
+
HeapObject* object = chunk->GetObject();
|
3059
|
+
Page* page = Page::FromAddress(object->address());
|
3060
|
+
ASSERT(object->address() == page->ObjectAreaStart());
|
3061
|
+
|
3062
|
+
// The first word should be a map, and we expect all map pointers to be
|
3063
|
+
// in map space.
|
3064
|
+
Map* map = object->map();
|
3065
|
+
ASSERT(map->IsMap());
|
3066
|
+
ASSERT(heap()->map_space()->Contains(map));
|
3067
|
+
|
3068
|
+
// We have only code, sequential strings, external strings
|
3069
|
+
// (sequential strings that have been morphed into external
|
3070
|
+
// strings), fixed arrays, and byte arrays in large object space.
|
3071
|
+
ASSERT(object->IsCode() || object->IsSeqString() ||
|
3072
|
+
object->IsExternalString() || object->IsFixedArray() ||
|
3073
|
+
object->IsByteArray());
|
3074
|
+
|
3075
|
+
// The object itself should look OK.
|
3076
|
+
object->Verify();
|
3077
|
+
|
3078
|
+
// Byte arrays and strings don't have interior pointers.
|
3079
|
+
if (object->IsCode()) {
|
3080
|
+
VerifyPointersVisitor code_visitor;
|
3081
|
+
object->IterateBody(map->instance_type(),
|
3082
|
+
object->Size(),
|
3083
|
+
&code_visitor);
|
3084
|
+
} else if (object->IsFixedArray()) {
|
3085
|
+
// We loop over fixed arrays ourselves, rather then using the visitor,
|
3086
|
+
// because the visitor doesn't support the start/offset iteration
|
3087
|
+
// needed for IsRegionDirty.
|
3088
|
+
FixedArray* array = FixedArray::cast(object);
|
3089
|
+
for (int j = 0; j < array->length(); j++) {
|
3090
|
+
Object* element = array->get(j);
|
3091
|
+
if (element->IsHeapObject()) {
|
3092
|
+
HeapObject* element_object = HeapObject::cast(element);
|
3093
|
+
ASSERT(heap()->Contains(element_object));
|
3094
|
+
ASSERT(element_object->map()->IsMap());
|
3095
|
+
if (heap()->InNewSpace(element_object)) {
|
3096
|
+
Address array_addr = object->address();
|
3097
|
+
Address element_addr = array_addr + FixedArray::kHeaderSize +
|
3098
|
+
j * kPointerSize;
|
3099
|
+
|
3100
|
+
ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
|
3101
|
+
}
|
3102
|
+
}
|
3103
|
+
}
|
3104
|
+
}
|
3105
|
+
}
|
3106
|
+
}
|
3107
|
+
|
3108
|
+
|
3109
|
+
void LargeObjectSpace::Print() {
|
3110
|
+
LargeObjectIterator it(this);
|
3111
|
+
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
|
3112
|
+
obj->Print();
|
3113
|
+
}
|
3114
|
+
}
|
3115
|
+
|
3116
|
+
|
3117
|
+
void LargeObjectSpace::ReportStatistics() {
|
3118
|
+
PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
|
3119
|
+
int num_objects = 0;
|
3120
|
+
ClearHistograms();
|
3121
|
+
LargeObjectIterator it(this);
|
3122
|
+
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
|
3123
|
+
num_objects++;
|
3124
|
+
CollectHistogramInfo(obj);
|
3125
|
+
}
|
3126
|
+
|
3127
|
+
PrintF(" number of objects %d, "
|
3128
|
+
"size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
|
3129
|
+
if (num_objects > 0) ReportHistogram(false);
|
3130
|
+
}
|
3131
|
+
|
3132
|
+
|
3133
|
+
void LargeObjectSpace::CollectCodeStatistics() {
|
3134
|
+
Isolate* isolate = heap()->isolate();
|
3135
|
+
LargeObjectIterator obj_it(this);
|
3136
|
+
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
|
3137
|
+
if (obj->IsCode()) {
|
3138
|
+
Code* code = Code::cast(obj);
|
3139
|
+
isolate->code_kind_statistics()[code->kind()] += code->Size();
|
3140
|
+
}
|
3141
|
+
}
|
3142
|
+
}
|
3143
|
+
#endif // DEBUG
|
3144
|
+
|
3145
|
+
} } // namespace v8::internal
|