libv8 3.3.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +8 -0
- data/.gitmodules +3 -0
- data/Gemfile +4 -0
- data/README.md +44 -0
- data/Rakefile +73 -0
- data/ext/libv8/extconf.rb +9 -0
- data/lib/libv8.rb +15 -0
- data/lib/libv8/Makefile +38 -0
- data/lib/libv8/detect_cpu.rb +27 -0
- data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
- data/lib/libv8/scons/CHANGES.txt +5334 -0
- data/lib/libv8/scons/LICENSE.txt +20 -0
- data/lib/libv8/scons/MANIFEST +199 -0
- data/lib/libv8/scons/PKG-INFO +13 -0
- data/lib/libv8/scons/README.txt +243 -0
- data/lib/libv8/scons/RELEASE.txt +98 -0
- data/lib/libv8/scons/engine/SCons/Action.py +1241 -0
- data/lib/libv8/scons/engine/SCons/Builder.py +877 -0
- data/lib/libv8/scons/engine/SCons/CacheDir.py +216 -0
- data/lib/libv8/scons/engine/SCons/Conftest.py +793 -0
- data/lib/libv8/scons/engine/SCons/Debug.py +220 -0
- data/lib/libv8/scons/engine/SCons/Defaults.py +480 -0
- data/lib/libv8/scons/engine/SCons/Environment.py +2318 -0
- data/lib/libv8/scons/engine/SCons/Errors.py +205 -0
- data/lib/libv8/scons/engine/SCons/Executor.py +633 -0
- data/lib/libv8/scons/engine/SCons/Job.py +435 -0
- data/lib/libv8/scons/engine/SCons/Memoize.py +244 -0
- data/lib/libv8/scons/engine/SCons/Node/Alias.py +152 -0
- data/lib/libv8/scons/engine/SCons/Node/FS.py +3142 -0
- data/lib/libv8/scons/engine/SCons/Node/Python.py +128 -0
- data/lib/libv8/scons/engine/SCons/Node/__init__.py +1328 -0
- data/lib/libv8/scons/engine/SCons/Options/BoolOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/EnumOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/ListOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/PackageOption.py +50 -0
- data/lib/libv8/scons/engine/SCons/Options/PathOption.py +76 -0
- data/lib/libv8/scons/engine/SCons/Options/__init__.py +67 -0
- data/lib/libv8/scons/engine/SCons/PathList.py +231 -0
- data/lib/libv8/scons/engine/SCons/Platform/__init__.py +241 -0
- data/lib/libv8/scons/engine/SCons/Platform/aix.py +69 -0
- data/lib/libv8/scons/engine/SCons/Platform/cygwin.py +55 -0
- data/lib/libv8/scons/engine/SCons/Platform/darwin.py +46 -0
- data/lib/libv8/scons/engine/SCons/Platform/hpux.py +46 -0
- data/lib/libv8/scons/engine/SCons/Platform/irix.py +44 -0
- data/lib/libv8/scons/engine/SCons/Platform/os2.py +58 -0
- data/lib/libv8/scons/engine/SCons/Platform/posix.py +263 -0
- data/lib/libv8/scons/engine/SCons/Platform/sunos.py +50 -0
- data/lib/libv8/scons/engine/SCons/Platform/win32.py +385 -0
- data/lib/libv8/scons/engine/SCons/SConf.py +1030 -0
- data/lib/libv8/scons/engine/SCons/SConsign.py +383 -0
- data/lib/libv8/scons/engine/SCons/Scanner/C.py +132 -0
- data/lib/libv8/scons/engine/SCons/Scanner/D.py +73 -0
- data/lib/libv8/scons/engine/SCons/Scanner/Dir.py +109 -0
- data/lib/libv8/scons/engine/SCons/Scanner/Fortran.py +316 -0
- data/lib/libv8/scons/engine/SCons/Scanner/IDL.py +48 -0
- data/lib/libv8/scons/engine/SCons/Scanner/LaTeX.py +384 -0
- data/lib/libv8/scons/engine/SCons/Scanner/Prog.py +101 -0
- data/lib/libv8/scons/engine/SCons/Scanner/RC.py +55 -0
- data/lib/libv8/scons/engine/SCons/Scanner/__init__.py +413 -0
- data/lib/libv8/scons/engine/SCons/Script/Interactive.py +384 -0
- data/lib/libv8/scons/engine/SCons/Script/Main.py +1334 -0
- data/lib/libv8/scons/engine/SCons/Script/SConsOptions.py +939 -0
- data/lib/libv8/scons/engine/SCons/Script/SConscript.py +640 -0
- data/lib/libv8/scons/engine/SCons/Script/__init__.py +412 -0
- data/lib/libv8/scons/engine/SCons/Sig.py +63 -0
- data/lib/libv8/scons/engine/SCons/Subst.py +904 -0
- data/lib/libv8/scons/engine/SCons/Taskmaster.py +1017 -0
- data/lib/libv8/scons/engine/SCons/Tool/386asm.py +61 -0
- data/lib/libv8/scons/engine/SCons/Tool/BitKeeper.py +67 -0
- data/lib/libv8/scons/engine/SCons/Tool/CVS.py +73 -0
- data/lib/libv8/scons/engine/SCons/Tool/FortranCommon.py +246 -0
- data/lib/libv8/scons/engine/SCons/Tool/JavaCommon.py +323 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/__init__.py +56 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/arch.py +61 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/common.py +240 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/netframework.py +82 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/sdk.py +391 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vc.py +456 -0
- data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vs.py +499 -0
- data/lib/libv8/scons/engine/SCons/Tool/Perforce.py +103 -0
- data/lib/libv8/scons/engine/SCons/Tool/PharLapCommon.py +137 -0
- data/lib/libv8/scons/engine/SCons/Tool/RCS.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/SCCS.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/Subversion.py +71 -0
- data/lib/libv8/scons/engine/SCons/Tool/__init__.py +681 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixc++.py +82 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixcc.py +74 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixf77.py +80 -0
- data/lib/libv8/scons/engine/SCons/Tool/aixlink.py +76 -0
- data/lib/libv8/scons/engine/SCons/Tool/applelink.py +71 -0
- data/lib/libv8/scons/engine/SCons/Tool/ar.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/as.py +78 -0
- data/lib/libv8/scons/engine/SCons/Tool/bcc32.py +81 -0
- data/lib/libv8/scons/engine/SCons/Tool/c++.py +99 -0
- data/lib/libv8/scons/engine/SCons/Tool/cc.py +102 -0
- data/lib/libv8/scons/engine/SCons/Tool/cvf.py +58 -0
- data/lib/libv8/scons/engine/SCons/Tool/default.py +50 -0
- data/lib/libv8/scons/engine/SCons/Tool/dmd.py +223 -0
- data/lib/libv8/scons/engine/SCons/Tool/dvi.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/dvipdf.py +124 -0
- data/lib/libv8/scons/engine/SCons/Tool/dvips.py +94 -0
- data/lib/libv8/scons/engine/SCons/Tool/f77.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/f90.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/f95.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/filesystem.py +98 -0
- data/lib/libv8/scons/engine/SCons/Tool/fortran.py +62 -0
- data/lib/libv8/scons/engine/SCons/Tool/g++.py +90 -0
- data/lib/libv8/scons/engine/SCons/Tool/g77.py +73 -0
- data/lib/libv8/scons/engine/SCons/Tool/gas.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/gcc.py +80 -0
- data/lib/libv8/scons/engine/SCons/Tool/gfortran.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/gnulink.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/gs.py +81 -0
- data/lib/libv8/scons/engine/SCons/Tool/hpc++.py +84 -0
- data/lib/libv8/scons/engine/SCons/Tool/hpcc.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/hplink.py +77 -0
- data/lib/libv8/scons/engine/SCons/Tool/icc.py +59 -0
- data/lib/libv8/scons/engine/SCons/Tool/icl.py +52 -0
- data/lib/libv8/scons/engine/SCons/Tool/ifl.py +72 -0
- data/lib/libv8/scons/engine/SCons/Tool/ifort.py +88 -0
- data/lib/libv8/scons/engine/SCons/Tool/ilink.py +59 -0
- data/lib/libv8/scons/engine/SCons/Tool/ilink32.py +60 -0
- data/lib/libv8/scons/engine/SCons/Tool/install.py +229 -0
- data/lib/libv8/scons/engine/SCons/Tool/intelc.py +482 -0
- data/lib/libv8/scons/engine/SCons/Tool/ipkg.py +67 -0
- data/lib/libv8/scons/engine/SCons/Tool/jar.py +110 -0
- data/lib/libv8/scons/engine/SCons/Tool/javac.py +230 -0
- data/lib/libv8/scons/engine/SCons/Tool/javah.py +137 -0
- data/lib/libv8/scons/engine/SCons/Tool/latex.py +79 -0
- data/lib/libv8/scons/engine/SCons/Tool/lex.py +97 -0
- data/lib/libv8/scons/engine/SCons/Tool/link.py +121 -0
- data/lib/libv8/scons/engine/SCons/Tool/linkloc.py +112 -0
- data/lib/libv8/scons/engine/SCons/Tool/m4.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/masm.py +77 -0
- data/lib/libv8/scons/engine/SCons/Tool/midl.py +88 -0
- data/lib/libv8/scons/engine/SCons/Tool/mingw.py +158 -0
- data/lib/libv8/scons/engine/SCons/Tool/mslib.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/mslink.py +266 -0
- data/lib/libv8/scons/engine/SCons/Tool/mssdk.py +50 -0
- data/lib/libv8/scons/engine/SCons/Tool/msvc.py +268 -0
- data/lib/libv8/scons/engine/SCons/Tool/msvs.py +1388 -0
- data/lib/libv8/scons/engine/SCons/Tool/mwcc.py +207 -0
- data/lib/libv8/scons/engine/SCons/Tool/mwld.py +107 -0
- data/lib/libv8/scons/engine/SCons/Tool/nasm.py +72 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/__init__.py +312 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/ipk.py +185 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/msi.py +527 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/rpm.py +365 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/src_tarbz2.py +43 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/src_targz.py +43 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/src_zip.py +43 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/tarbz2.py +44 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/targz.py +44 -0
- data/lib/libv8/scons/engine/SCons/Tool/packaging/zip.py +44 -0
- data/lib/libv8/scons/engine/SCons/Tool/pdf.py +78 -0
- data/lib/libv8/scons/engine/SCons/Tool/pdflatex.py +83 -0
- data/lib/libv8/scons/engine/SCons/Tool/pdftex.py +108 -0
- data/lib/libv8/scons/engine/SCons/Tool/qt.py +336 -0
- data/lib/libv8/scons/engine/SCons/Tool/rmic.py +120 -0
- data/lib/libv8/scons/engine/SCons/Tool/rpcgen.py +70 -0
- data/lib/libv8/scons/engine/SCons/Tool/rpm.py +132 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgiar.py +68 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgic++.py +58 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgicc.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/sgilink.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunar.py +67 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunc++.py +142 -0
- data/lib/libv8/scons/engine/SCons/Tool/suncc.py +58 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunf77.py +63 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunf90.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunf95.py +64 -0
- data/lib/libv8/scons/engine/SCons/Tool/sunlink.py +77 -0
- data/lib/libv8/scons/engine/SCons/Tool/swig.py +182 -0
- data/lib/libv8/scons/engine/SCons/Tool/tar.py +73 -0
- data/lib/libv8/scons/engine/SCons/Tool/tex.py +813 -0
- data/lib/libv8/scons/engine/SCons/Tool/textfile.py +175 -0
- data/lib/libv8/scons/engine/SCons/Tool/tlib.py +53 -0
- data/lib/libv8/scons/engine/SCons/Tool/wix.py +99 -0
- data/lib/libv8/scons/engine/SCons/Tool/yacc.py +130 -0
- data/lib/libv8/scons/engine/SCons/Tool/zip.py +99 -0
- data/lib/libv8/scons/engine/SCons/Util.py +1492 -0
- data/lib/libv8/scons/engine/SCons/Variables/BoolVariable.py +89 -0
- data/lib/libv8/scons/engine/SCons/Variables/EnumVariable.py +103 -0
- data/lib/libv8/scons/engine/SCons/Variables/ListVariable.py +135 -0
- data/lib/libv8/scons/engine/SCons/Variables/PackageVariable.py +106 -0
- data/lib/libv8/scons/engine/SCons/Variables/PathVariable.py +147 -0
- data/lib/libv8/scons/engine/SCons/Variables/__init__.py +312 -0
- data/lib/libv8/scons/engine/SCons/Warnings.py +246 -0
- data/lib/libv8/scons/engine/SCons/__init__.py +49 -0
- data/lib/libv8/scons/engine/SCons/compat/__init__.py +237 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_builtins.py +150 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_collections.py +45 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_dbm.py +45 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_hashlib.py +76 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_io.py +45 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_sets.py +563 -0
- data/lib/libv8/scons/engine/SCons/compat/_scons_subprocess.py +1281 -0
- data/lib/libv8/scons/engine/SCons/cpp.py +589 -0
- data/lib/libv8/scons/engine/SCons/dblite.py +251 -0
- data/lib/libv8/scons/engine/SCons/exitfuncs.py +77 -0
- data/lib/libv8/scons/os_spawnv_fix.diff +83 -0
- data/lib/libv8/scons/scons-time.1 +1017 -0
- data/lib/libv8/scons/scons.1 +15219 -0
- data/lib/libv8/scons/sconsign.1 +208 -0
- data/lib/libv8/scons/script/scons +196 -0
- data/lib/libv8/scons/script/scons-time +1544 -0
- data/lib/libv8/scons/script/scons.bat +31 -0
- data/lib/libv8/scons/script/sconsign +513 -0
- data/lib/libv8/scons/setup.cfg +6 -0
- data/lib/libv8/scons/setup.py +425 -0
- data/lib/libv8/v8/.gitignore +35 -0
- data/lib/libv8/v8/AUTHORS +44 -0
- data/lib/libv8/v8/ChangeLog +2839 -0
- data/lib/libv8/v8/LICENSE +52 -0
- data/lib/libv8/v8/LICENSE.strongtalk +29 -0
- data/lib/libv8/v8/LICENSE.v8 +26 -0
- data/lib/libv8/v8/LICENSE.valgrind +45 -0
- data/lib/libv8/v8/SConstruct +1478 -0
- data/lib/libv8/v8/build/README.txt +49 -0
- data/lib/libv8/v8/build/all.gyp +18 -0
- data/lib/libv8/v8/build/armu.gypi +32 -0
- data/lib/libv8/v8/build/common.gypi +144 -0
- data/lib/libv8/v8/build/gyp_v8 +145 -0
- data/lib/libv8/v8/include/v8-debug.h +395 -0
- data/lib/libv8/v8/include/v8-preparser.h +117 -0
- data/lib/libv8/v8/include/v8-profiler.h +505 -0
- data/lib/libv8/v8/include/v8-testing.h +104 -0
- data/lib/libv8/v8/include/v8.h +4124 -0
- data/lib/libv8/v8/include/v8stdint.h +53 -0
- data/lib/libv8/v8/preparser/SConscript +38 -0
- data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
- data/lib/libv8/v8/src/SConscript +368 -0
- data/lib/libv8/v8/src/accessors.cc +767 -0
- data/lib/libv8/v8/src/accessors.h +123 -0
- data/lib/libv8/v8/src/allocation-inl.h +49 -0
- data/lib/libv8/v8/src/allocation.cc +122 -0
- data/lib/libv8/v8/src/allocation.h +143 -0
- data/lib/libv8/v8/src/api.cc +5845 -0
- data/lib/libv8/v8/src/api.h +574 -0
- data/lib/libv8/v8/src/apinatives.js +110 -0
- data/lib/libv8/v8/src/apiutils.h +73 -0
- data/lib/libv8/v8/src/arguments.h +118 -0
- data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
- data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
- data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
- data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
- data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
- data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
- data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
- data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
- data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
- data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
- data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
- data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
- data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
- data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
- data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
- data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
- data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
- data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
- data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
- data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
- data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
- data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
- data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
- data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
- data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
- data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
- data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
- data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
- data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
- data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
- data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
- data/lib/libv8/v8/src/array.js +1366 -0
- data/lib/libv8/v8/src/assembler.cc +1207 -0
- data/lib/libv8/v8/src/assembler.h +858 -0
- data/lib/libv8/v8/src/ast-inl.h +112 -0
- data/lib/libv8/v8/src/ast.cc +1146 -0
- data/lib/libv8/v8/src/ast.h +2188 -0
- data/lib/libv8/v8/src/atomicops.h +167 -0
- data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
- data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
- data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
- data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
- data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
- data/lib/libv8/v8/src/bignum.cc +768 -0
- data/lib/libv8/v8/src/bignum.h +140 -0
- data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
- data/lib/libv8/v8/src/bootstrapper.h +188 -0
- data/lib/libv8/v8/src/builtins.cc +1707 -0
- data/lib/libv8/v8/src/builtins.h +371 -0
- data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
- data/lib/libv8/v8/src/cached-powers.cc +177 -0
- data/lib/libv8/v8/src/cached-powers.h +65 -0
- data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
- data/lib/libv8/v8/src/char-predicates.h +67 -0
- data/lib/libv8/v8/src/checks.cc +110 -0
- data/lib/libv8/v8/src/checks.h +296 -0
- data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
- data/lib/libv8/v8/src/circular-queue.cc +122 -0
- data/lib/libv8/v8/src/circular-queue.h +103 -0
- data/lib/libv8/v8/src/code-stubs.cc +267 -0
- data/lib/libv8/v8/src/code-stubs.h +1011 -0
- data/lib/libv8/v8/src/code.h +70 -0
- data/lib/libv8/v8/src/codegen.cc +231 -0
- data/lib/libv8/v8/src/codegen.h +84 -0
- data/lib/libv8/v8/src/compilation-cache.cc +540 -0
- data/lib/libv8/v8/src/compilation-cache.h +287 -0
- data/lib/libv8/v8/src/compiler.cc +786 -0
- data/lib/libv8/v8/src/compiler.h +312 -0
- data/lib/libv8/v8/src/contexts.cc +347 -0
- data/lib/libv8/v8/src/contexts.h +391 -0
- data/lib/libv8/v8/src/conversions-inl.h +106 -0
- data/lib/libv8/v8/src/conversions.cc +1131 -0
- data/lib/libv8/v8/src/conversions.h +135 -0
- data/lib/libv8/v8/src/counters.cc +93 -0
- data/lib/libv8/v8/src/counters.h +254 -0
- data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
- data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
- data/lib/libv8/v8/src/cpu-profiler.h +302 -0
- data/lib/libv8/v8/src/cpu.h +69 -0
- data/lib/libv8/v8/src/d8-debug.cc +367 -0
- data/lib/libv8/v8/src/d8-debug.h +158 -0
- data/lib/libv8/v8/src/d8-posix.cc +695 -0
- data/lib/libv8/v8/src/d8-readline.cc +130 -0
- data/lib/libv8/v8/src/d8-windows.cc +42 -0
- data/lib/libv8/v8/src/d8.cc +803 -0
- data/lib/libv8/v8/src/d8.gyp +91 -0
- data/lib/libv8/v8/src/d8.h +235 -0
- data/lib/libv8/v8/src/d8.js +2798 -0
- data/lib/libv8/v8/src/data-flow.cc +66 -0
- data/lib/libv8/v8/src/data-flow.h +205 -0
- data/lib/libv8/v8/src/date.js +1103 -0
- data/lib/libv8/v8/src/dateparser-inl.h +127 -0
- data/lib/libv8/v8/src/dateparser.cc +178 -0
- data/lib/libv8/v8/src/dateparser.h +266 -0
- data/lib/libv8/v8/src/debug-agent.cc +447 -0
- data/lib/libv8/v8/src/debug-agent.h +129 -0
- data/lib/libv8/v8/src/debug-debugger.js +2569 -0
- data/lib/libv8/v8/src/debug.cc +3165 -0
- data/lib/libv8/v8/src/debug.h +1057 -0
- data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
- data/lib/libv8/v8/src/deoptimizer.h +602 -0
- data/lib/libv8/v8/src/disasm.h +80 -0
- data/lib/libv8/v8/src/disassembler.cc +343 -0
- data/lib/libv8/v8/src/disassembler.h +58 -0
- data/lib/libv8/v8/src/diy-fp.cc +58 -0
- data/lib/libv8/v8/src/diy-fp.h +117 -0
- data/lib/libv8/v8/src/double.h +238 -0
- data/lib/libv8/v8/src/dtoa.cc +103 -0
- data/lib/libv8/v8/src/dtoa.h +85 -0
- data/lib/libv8/v8/src/execution.cc +849 -0
- data/lib/libv8/v8/src/execution.h +297 -0
- data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
- data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
- data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
- data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
- data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
- data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
- data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
- data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
- data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
- data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
- data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
- data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
- data/lib/libv8/v8/src/factory.cc +1222 -0
- data/lib/libv8/v8/src/factory.h +442 -0
- data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
- data/lib/libv8/v8/src/fast-dtoa.h +83 -0
- data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
- data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
- data/lib/libv8/v8/src/flag-definitions.h +560 -0
- data/lib/libv8/v8/src/flags.cc +551 -0
- data/lib/libv8/v8/src/flags.h +79 -0
- data/lib/libv8/v8/src/frames-inl.h +247 -0
- data/lib/libv8/v8/src/frames.cc +1243 -0
- data/lib/libv8/v8/src/frames.h +870 -0
- data/lib/libv8/v8/src/full-codegen.cc +1374 -0
- data/lib/libv8/v8/src/full-codegen.h +771 -0
- data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
- data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
- data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
- data/lib/libv8/v8/src/gdb-jit.h +143 -0
- data/lib/libv8/v8/src/global-handles.cc +665 -0
- data/lib/libv8/v8/src/global-handles.h +284 -0
- data/lib/libv8/v8/src/globals.h +325 -0
- data/lib/libv8/v8/src/handles-inl.h +177 -0
- data/lib/libv8/v8/src/handles.cc +987 -0
- data/lib/libv8/v8/src/handles.h +382 -0
- data/lib/libv8/v8/src/hashmap.cc +230 -0
- data/lib/libv8/v8/src/hashmap.h +123 -0
- data/lib/libv8/v8/src/heap-inl.h +704 -0
- data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
- data/lib/libv8/v8/src/heap-profiler.h +397 -0
- data/lib/libv8/v8/src/heap.cc +5930 -0
- data/lib/libv8/v8/src/heap.h +2268 -0
- data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
- data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
- data/lib/libv8/v8/src/hydrogen.cc +6239 -0
- data/lib/libv8/v8/src/hydrogen.h +1202 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
- data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
- data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
- data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
- data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
- data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
- data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
- data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
- data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
- data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
- data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
- data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
- data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
- data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
- data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
- data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
- data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
- data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
- data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
- data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
- data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
- data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
- data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
- data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
- data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
- data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
- data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
- data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
- data/lib/libv8/v8/src/ic-inl.h +130 -0
- data/lib/libv8/v8/src/ic.cc +2577 -0
- data/lib/libv8/v8/src/ic.h +736 -0
- data/lib/libv8/v8/src/inspector.cc +63 -0
- data/lib/libv8/v8/src/inspector.h +62 -0
- data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
- data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
- data/lib/libv8/v8/src/isolate-inl.h +50 -0
- data/lib/libv8/v8/src/isolate.cc +1869 -0
- data/lib/libv8/v8/src/isolate.h +1382 -0
- data/lib/libv8/v8/src/json-parser.cc +504 -0
- data/lib/libv8/v8/src/json-parser.h +161 -0
- data/lib/libv8/v8/src/json.js +342 -0
- data/lib/libv8/v8/src/jsregexp.cc +5385 -0
- data/lib/libv8/v8/src/jsregexp.h +1492 -0
- data/lib/libv8/v8/src/list-inl.h +212 -0
- data/lib/libv8/v8/src/list.h +174 -0
- data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
- data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
- data/lib/libv8/v8/src/lithium-allocator.h +630 -0
- data/lib/libv8/v8/src/lithium.cc +190 -0
- data/lib/libv8/v8/src/lithium.h +597 -0
- data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
- data/lib/libv8/v8/src/liveedit.cc +1691 -0
- data/lib/libv8/v8/src/liveedit.h +180 -0
- data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
- data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
- data/lib/libv8/v8/src/liveobjectlist.h +322 -0
- data/lib/libv8/v8/src/log-inl.h +59 -0
- data/lib/libv8/v8/src/log-utils.cc +428 -0
- data/lib/libv8/v8/src/log-utils.h +231 -0
- data/lib/libv8/v8/src/log.cc +1993 -0
- data/lib/libv8/v8/src/log.h +476 -0
- data/lib/libv8/v8/src/macro-assembler.h +120 -0
- data/lib/libv8/v8/src/macros.py +178 -0
- data/lib/libv8/v8/src/mark-compact.cc +3143 -0
- data/lib/libv8/v8/src/mark-compact.h +506 -0
- data/lib/libv8/v8/src/math.js +264 -0
- data/lib/libv8/v8/src/messages.cc +179 -0
- data/lib/libv8/v8/src/messages.h +113 -0
- data/lib/libv8/v8/src/messages.js +1096 -0
- data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
- data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
- data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
- data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
- data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
- data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
- data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
- data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
- data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
- data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
- data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
- data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
- data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
- data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
- data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
- data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
- data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
- data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
- data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
- data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
- data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
- data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
- data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
- data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
- data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
- data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
- data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
- data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
- data/lib/libv8/v8/src/mksnapshot.cc +328 -0
- data/lib/libv8/v8/src/natives.h +64 -0
- data/lib/libv8/v8/src/objects-debug.cc +738 -0
- data/lib/libv8/v8/src/objects-inl.h +4323 -0
- data/lib/libv8/v8/src/objects-printer.cc +829 -0
- data/lib/libv8/v8/src/objects-visiting.cc +148 -0
- data/lib/libv8/v8/src/objects-visiting.h +424 -0
- data/lib/libv8/v8/src/objects.cc +10585 -0
- data/lib/libv8/v8/src/objects.h +6838 -0
- data/lib/libv8/v8/src/parser.cc +4997 -0
- data/lib/libv8/v8/src/parser.h +765 -0
- data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
- data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
- data/lib/libv8/v8/src/platform-linux.cc +1149 -0
- data/lib/libv8/v8/src/platform-macos.cc +830 -0
- data/lib/libv8/v8/src/platform-nullos.cc +479 -0
- data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
- data/lib/libv8/v8/src/platform-posix.cc +424 -0
- data/lib/libv8/v8/src/platform-solaris.cc +762 -0
- data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
- data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
- data/lib/libv8/v8/src/platform-tls.h +50 -0
- data/lib/libv8/v8/src/platform-win32.cc +2021 -0
- data/lib/libv8/v8/src/platform.h +667 -0
- data/lib/libv8/v8/src/preparse-data-format.h +62 -0
- data/lib/libv8/v8/src/preparse-data.cc +183 -0
- data/lib/libv8/v8/src/preparse-data.h +225 -0
- data/lib/libv8/v8/src/preparser-api.cc +220 -0
- data/lib/libv8/v8/src/preparser.cc +1450 -0
- data/lib/libv8/v8/src/preparser.h +493 -0
- data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
- data/lib/libv8/v8/src/prettyprinter.h +223 -0
- data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
- data/lib/libv8/v8/src/profile-generator.cc +3098 -0
- data/lib/libv8/v8/src/profile-generator.h +1126 -0
- data/lib/libv8/v8/src/property.cc +105 -0
- data/lib/libv8/v8/src/property.h +365 -0
- data/lib/libv8/v8/src/proxy.js +83 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
- data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
- data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
- data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
- data/lib/libv8/v8/src/regexp-stack.cc +111 -0
- data/lib/libv8/v8/src/regexp-stack.h +147 -0
- data/lib/libv8/v8/src/regexp.js +483 -0
- data/lib/libv8/v8/src/rewriter.cc +360 -0
- data/lib/libv8/v8/src/rewriter.h +50 -0
- data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
- data/lib/libv8/v8/src/runtime-profiler.h +201 -0
- data/lib/libv8/v8/src/runtime.cc +12227 -0
- data/lib/libv8/v8/src/runtime.h +652 -0
- data/lib/libv8/v8/src/runtime.js +649 -0
- data/lib/libv8/v8/src/safepoint-table.cc +256 -0
- data/lib/libv8/v8/src/safepoint-table.h +270 -0
- data/lib/libv8/v8/src/scanner-base.cc +952 -0
- data/lib/libv8/v8/src/scanner-base.h +670 -0
- data/lib/libv8/v8/src/scanner.cc +345 -0
- data/lib/libv8/v8/src/scanner.h +146 -0
- data/lib/libv8/v8/src/scopeinfo.cc +646 -0
- data/lib/libv8/v8/src/scopeinfo.h +254 -0
- data/lib/libv8/v8/src/scopes.cc +1150 -0
- data/lib/libv8/v8/src/scopes.h +507 -0
- data/lib/libv8/v8/src/serialize.cc +1574 -0
- data/lib/libv8/v8/src/serialize.h +589 -0
- data/lib/libv8/v8/src/shell.h +55 -0
- data/lib/libv8/v8/src/simulator.h +43 -0
- data/lib/libv8/v8/src/small-pointer-list.h +163 -0
- data/lib/libv8/v8/src/smart-pointer.h +109 -0
- data/lib/libv8/v8/src/snapshot-common.cc +83 -0
- data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
- data/lib/libv8/v8/src/snapshot.h +91 -0
- data/lib/libv8/v8/src/spaces-inl.h +529 -0
- data/lib/libv8/v8/src/spaces.cc +3145 -0
- data/lib/libv8/v8/src/spaces.h +2369 -0
- data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
- data/lib/libv8/v8/src/splay-tree.h +205 -0
- data/lib/libv8/v8/src/string-search.cc +41 -0
- data/lib/libv8/v8/src/string-search.h +568 -0
- data/lib/libv8/v8/src/string-stream.cc +592 -0
- data/lib/libv8/v8/src/string-stream.h +191 -0
- data/lib/libv8/v8/src/string.js +994 -0
- data/lib/libv8/v8/src/strtod.cc +440 -0
- data/lib/libv8/v8/src/strtod.h +40 -0
- data/lib/libv8/v8/src/stub-cache.cc +1965 -0
- data/lib/libv8/v8/src/stub-cache.h +924 -0
- data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
- data/lib/libv8/v8/src/token.cc +63 -0
- data/lib/libv8/v8/src/token.h +288 -0
- data/lib/libv8/v8/src/type-info.cc +507 -0
- data/lib/libv8/v8/src/type-info.h +272 -0
- data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
- data/lib/libv8/v8/src/unbound-queue.h +69 -0
- data/lib/libv8/v8/src/unicode-inl.h +238 -0
- data/lib/libv8/v8/src/unicode.cc +1624 -0
- data/lib/libv8/v8/src/unicode.h +280 -0
- data/lib/libv8/v8/src/uri.js +408 -0
- data/lib/libv8/v8/src/utils-inl.h +48 -0
- data/lib/libv8/v8/src/utils.cc +371 -0
- data/lib/libv8/v8/src/utils.h +800 -0
- data/lib/libv8/v8/src/v8-counters.cc +62 -0
- data/lib/libv8/v8/src/v8-counters.h +314 -0
- data/lib/libv8/v8/src/v8.cc +213 -0
- data/lib/libv8/v8/src/v8.h +131 -0
- data/lib/libv8/v8/src/v8checks.h +64 -0
- data/lib/libv8/v8/src/v8dll-main.cc +44 -0
- data/lib/libv8/v8/src/v8globals.h +512 -0
- data/lib/libv8/v8/src/v8memory.h +82 -0
- data/lib/libv8/v8/src/v8natives.js +1310 -0
- data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
- data/lib/libv8/v8/src/v8threads.cc +464 -0
- data/lib/libv8/v8/src/v8threads.h +165 -0
- data/lib/libv8/v8/src/v8utils.h +319 -0
- data/lib/libv8/v8/src/variables.cc +114 -0
- data/lib/libv8/v8/src/variables.h +167 -0
- data/lib/libv8/v8/src/version.cc +116 -0
- data/lib/libv8/v8/src/version.h +68 -0
- data/lib/libv8/v8/src/vm-state-inl.h +138 -0
- data/lib/libv8/v8/src/vm-state.h +71 -0
- data/lib/libv8/v8/src/win32-headers.h +96 -0
- data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
- data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
- data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
- data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
- data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
- data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
- data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
- data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
- data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
- data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
- data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
- data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
- data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
- data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
- data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
- data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
- data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
- data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
- data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
- data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
- data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
- data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
- data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
- data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
- data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
- data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
- data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
- data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
- data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
- data/lib/libv8/v8/src/zone-inl.h +140 -0
- data/lib/libv8/v8/src/zone.cc +196 -0
- data/lib/libv8/v8/src/zone.h +240 -0
- data/lib/libv8/v8/tools/codemap.js +265 -0
- data/lib/libv8/v8/tools/consarray.js +93 -0
- data/lib/libv8/v8/tools/csvparser.js +78 -0
- data/lib/libv8/v8/tools/disasm.py +92 -0
- data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
- data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
- data/lib/libv8/v8/tools/gcmole/README +62 -0
- data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
- data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
- data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
- data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
- data/lib/libv8/v8/tools/grokdump.py +841 -0
- data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
- data/lib/libv8/v8/tools/js2c.py +364 -0
- data/lib/libv8/v8/tools/jsmin.py +280 -0
- data/lib/libv8/v8/tools/linux-tick-processor +35 -0
- data/lib/libv8/v8/tools/ll_prof.py +942 -0
- data/lib/libv8/v8/tools/logreader.js +185 -0
- data/lib/libv8/v8/tools/mac-nm +18 -0
- data/lib/libv8/v8/tools/mac-tick-processor +6 -0
- data/lib/libv8/v8/tools/oom_dump/README +31 -0
- data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
- data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
- data/lib/libv8/v8/tools/presubmit.py +305 -0
- data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
- data/lib/libv8/v8/tools/profile.js +751 -0
- data/lib/libv8/v8/tools/profile_view.js +219 -0
- data/lib/libv8/v8/tools/run-valgrind.py +77 -0
- data/lib/libv8/v8/tools/splaytree.js +316 -0
- data/lib/libv8/v8/tools/stats-viewer.py +468 -0
- data/lib/libv8/v8/tools/test.py +1510 -0
- data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
- data/lib/libv8/v8/tools/tickprocessor.js +877 -0
- data/lib/libv8/v8/tools/utils.py +96 -0
- data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
- data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
- data/lib/libv8/version.rb +4 -0
- data/libv8.gemspec +31 -0
- metadata +800 -0
@@ -0,0 +1,65 @@
|
|
1
|
+
// Copyright 2011 the V8 project authors. All rights reserved.
|
2
|
+
// Redistribution and use in source and binary forms, with or without
|
3
|
+
// modification, are permitted provided that the following conditions are
|
4
|
+
// met:
|
5
|
+
//
|
6
|
+
// * Redistributions of source code must retain the above copyright
|
7
|
+
// notice, this list of conditions and the following disclaimer.
|
8
|
+
// * Redistributions in binary form must reproduce the above
|
9
|
+
// copyright notice, this list of conditions and the following
|
10
|
+
// disclaimer in the documentation and/or other materials provided
|
11
|
+
// with the distribution.
|
12
|
+
// * Neither the name of Google Inc. nor the names of its
|
13
|
+
// contributors may be used to endorse or promote products derived
|
14
|
+
// from this software without specific prior written permission.
|
15
|
+
//
|
16
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
|
28
|
+
#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
|
29
|
+
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
|
30
|
+
|
31
|
+
#include "mips/lithium-mips.h"
|
32
|
+
|
33
|
+
#include "deoptimizer.h"
|
34
|
+
#include "safepoint-table.h"
|
35
|
+
#include "scopes.h"
|
36
|
+
|
37
|
+
// Note: this file was taken from the X64 version. ARM has a partially working
|
38
|
+
// lithium implementation, but for now it is not ported to mips.
|
39
|
+
|
40
|
+
namespace v8 {
|
41
|
+
namespace internal {
|
42
|
+
|
43
|
+
// Forward declarations.
|
44
|
+
class LDeferredCode;
|
45
|
+
|
46
|
+
class LCodeGen BASE_EMBEDDED {
|
47
|
+
public:
|
48
|
+
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
|
49
|
+
|
50
|
+
// Try to generate code for the entire chunk, but it may fail if the
|
51
|
+
// chunk contains constructs we cannot handle. Returns true if the
|
52
|
+
// code generation attempt succeeded.
|
53
|
+
bool GenerateCode() {
|
54
|
+
UNIMPLEMENTED();
|
55
|
+
return false;
|
56
|
+
}
|
57
|
+
|
58
|
+
// Finish the code by setting stack height, safepoint, and bailout
|
59
|
+
// information on it.
|
60
|
+
void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
|
61
|
+
};
|
62
|
+
|
63
|
+
} } // namespace v8::internal
|
64
|
+
|
65
|
+
#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
|
@@ -0,0 +1,307 @@
|
|
1
|
+
// Copyright 2011 the V8 project authors. All rights reserved.
|
2
|
+
// Redistribution and use in source and binary forms, with or without
|
3
|
+
// modification, are permitted provided that the following conditions are
|
4
|
+
// met:
|
5
|
+
//
|
6
|
+
// * Redistributions of source code must retain the above copyright
|
7
|
+
// notice, this list of conditions and the following disclaimer.
|
8
|
+
// * Redistributions in binary form must reproduce the above
|
9
|
+
// copyright notice, this list of conditions and the following
|
10
|
+
// disclaimer in the documentation and/or other materials provided
|
11
|
+
// with the distribution.
|
12
|
+
// * Neither the name of Google Inc. nor the names of its
|
13
|
+
// contributors may be used to endorse or promote products derived
|
14
|
+
// from this software without specific prior written permission.
|
15
|
+
//
|
16
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
|
28
|
+
#ifndef V8_MIPS_LITHIUM_MIPS_H_
|
29
|
+
#define V8_MIPS_LITHIUM_MIPS_H_
|
30
|
+
|
31
|
+
#include "hydrogen.h"
|
32
|
+
#include "lithium-allocator.h"
|
33
|
+
#include "lithium.h"
|
34
|
+
#include "safepoint-table.h"
|
35
|
+
|
36
|
+
// Note: this file was taken from the X64 version. ARM has a partially working
|
37
|
+
// lithium implementation, but for now it is not ported to mips.
|
38
|
+
|
39
|
+
namespace v8 {
|
40
|
+
namespace internal {
|
41
|
+
|
42
|
+
// Forward declarations.
|
43
|
+
class LCodeGen;
|
44
|
+
class LEnvironment;
|
45
|
+
class Translation;
|
46
|
+
|
47
|
+
class LInstruction: public ZoneObject {
|
48
|
+
public:
|
49
|
+
LInstruction() { }
|
50
|
+
virtual ~LInstruction() { }
|
51
|
+
|
52
|
+
// Predicates should be generated by macro as in lithium-ia32.h.
|
53
|
+
virtual bool IsLabel() const {
|
54
|
+
UNIMPLEMENTED();
|
55
|
+
return false;
|
56
|
+
}
|
57
|
+
virtual bool IsOsrEntry() const {
|
58
|
+
UNIMPLEMENTED();
|
59
|
+
return false;
|
60
|
+
}
|
61
|
+
|
62
|
+
LPointerMap* pointer_map() const {
|
63
|
+
UNIMPLEMENTED();
|
64
|
+
return NULL;
|
65
|
+
}
|
66
|
+
|
67
|
+
bool HasPointerMap() const {
|
68
|
+
UNIMPLEMENTED();
|
69
|
+
return false;
|
70
|
+
}
|
71
|
+
|
72
|
+
void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
|
73
|
+
|
74
|
+
LEnvironment* environment() const {
|
75
|
+
UNIMPLEMENTED();
|
76
|
+
return NULL;
|
77
|
+
}
|
78
|
+
|
79
|
+
bool HasEnvironment() const {
|
80
|
+
UNIMPLEMENTED();
|
81
|
+
return false;
|
82
|
+
}
|
83
|
+
|
84
|
+
virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
|
85
|
+
|
86
|
+
virtual bool IsControl() const {
|
87
|
+
UNIMPLEMENTED();
|
88
|
+
return false;
|
89
|
+
}
|
90
|
+
|
91
|
+
void MarkAsCall() { UNIMPLEMENTED(); }
|
92
|
+
void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
|
93
|
+
|
94
|
+
// Interface to the register allocator and iterators.
|
95
|
+
bool IsMarkedAsCall() const {
|
96
|
+
UNIMPLEMENTED();
|
97
|
+
return false;
|
98
|
+
}
|
99
|
+
|
100
|
+
bool IsMarkedAsSaveDoubles() const {
|
101
|
+
UNIMPLEMENTED();
|
102
|
+
return false;
|
103
|
+
}
|
104
|
+
|
105
|
+
virtual bool HasResult() const {
|
106
|
+
UNIMPLEMENTED();
|
107
|
+
return false;
|
108
|
+
}
|
109
|
+
|
110
|
+
virtual LOperand* result() {
|
111
|
+
UNIMPLEMENTED();
|
112
|
+
return NULL;
|
113
|
+
}
|
114
|
+
|
115
|
+
virtual int InputCount() {
|
116
|
+
UNIMPLEMENTED();
|
117
|
+
return 0;
|
118
|
+
}
|
119
|
+
|
120
|
+
virtual LOperand* InputAt(int i) {
|
121
|
+
UNIMPLEMENTED();
|
122
|
+
return NULL;
|
123
|
+
}
|
124
|
+
|
125
|
+
virtual int TempCount() {
|
126
|
+
UNIMPLEMENTED();
|
127
|
+
return 0;
|
128
|
+
}
|
129
|
+
|
130
|
+
virtual LOperand* TempAt(int i) {
|
131
|
+
UNIMPLEMENTED();
|
132
|
+
return NULL;
|
133
|
+
}
|
134
|
+
|
135
|
+
LOperand* FirstInput() {
|
136
|
+
UNIMPLEMENTED();
|
137
|
+
return NULL;
|
138
|
+
}
|
139
|
+
|
140
|
+
LOperand* Output() {
|
141
|
+
UNIMPLEMENTED();
|
142
|
+
return NULL;
|
143
|
+
}
|
144
|
+
|
145
|
+
#ifdef DEBUG
|
146
|
+
void VerifyCall() { UNIMPLEMENTED(); }
|
147
|
+
#endif
|
148
|
+
};
|
149
|
+
|
150
|
+
|
151
|
+
class LGap: public LInstruction {
|
152
|
+
public:
|
153
|
+
explicit LGap(HBasicBlock* block) { }
|
154
|
+
|
155
|
+
HBasicBlock* block() const {
|
156
|
+
UNIMPLEMENTED();
|
157
|
+
return NULL;
|
158
|
+
}
|
159
|
+
|
160
|
+
enum InnerPosition {
|
161
|
+
BEFORE,
|
162
|
+
START,
|
163
|
+
END,
|
164
|
+
AFTER,
|
165
|
+
FIRST_INNER_POSITION = BEFORE,
|
166
|
+
LAST_INNER_POSITION = AFTER
|
167
|
+
};
|
168
|
+
|
169
|
+
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
|
170
|
+
UNIMPLEMENTED();
|
171
|
+
return NULL;
|
172
|
+
}
|
173
|
+
|
174
|
+
LParallelMove* GetParallelMove(InnerPosition pos) {
|
175
|
+
UNIMPLEMENTED();
|
176
|
+
return NULL;
|
177
|
+
}
|
178
|
+
};
|
179
|
+
|
180
|
+
|
181
|
+
class LLabel: public LGap {
|
182
|
+
public:
|
183
|
+
explicit LLabel(HBasicBlock* block) : LGap(block) { }
|
184
|
+
};
|
185
|
+
|
186
|
+
|
187
|
+
class LOsrEntry: public LInstruction {
|
188
|
+
public:
|
189
|
+
// Function could be generated by a macro as in lithium-ia32.h.
|
190
|
+
static LOsrEntry* cast(LInstruction* instr) {
|
191
|
+
UNIMPLEMENTED();
|
192
|
+
return NULL;
|
193
|
+
}
|
194
|
+
|
195
|
+
LOperand** SpilledRegisterArray() {
|
196
|
+
UNIMPLEMENTED();
|
197
|
+
return NULL;
|
198
|
+
}
|
199
|
+
LOperand** SpilledDoubleRegisterArray() {
|
200
|
+
UNIMPLEMENTED();
|
201
|
+
return NULL;
|
202
|
+
}
|
203
|
+
|
204
|
+
void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
|
205
|
+
UNIMPLEMENTED();
|
206
|
+
}
|
207
|
+
void MarkSpilledDoubleRegister(int allocation_index,
|
208
|
+
LOperand* spill_operand) {
|
209
|
+
UNIMPLEMENTED();
|
210
|
+
}
|
211
|
+
};
|
212
|
+
|
213
|
+
|
214
|
+
class LChunk: public ZoneObject {
|
215
|
+
public:
|
216
|
+
explicit LChunk(HGraph* graph) { }
|
217
|
+
|
218
|
+
HGraph* graph() const {
|
219
|
+
UNIMPLEMENTED();
|
220
|
+
return NULL;
|
221
|
+
}
|
222
|
+
|
223
|
+
const ZoneList<LPointerMap*>* pointer_maps() const {
|
224
|
+
UNIMPLEMENTED();
|
225
|
+
return NULL;
|
226
|
+
}
|
227
|
+
|
228
|
+
LOperand* GetNextSpillSlot(bool double_slot) {
|
229
|
+
UNIMPLEMENTED();
|
230
|
+
return NULL;
|
231
|
+
}
|
232
|
+
|
233
|
+
LConstantOperand* DefineConstantOperand(HConstant* constant) {
|
234
|
+
UNIMPLEMENTED();
|
235
|
+
return NULL;
|
236
|
+
}
|
237
|
+
|
238
|
+
LLabel* GetLabel(int block_id) const {
|
239
|
+
UNIMPLEMENTED();
|
240
|
+
return NULL;
|
241
|
+
}
|
242
|
+
|
243
|
+
const ZoneList<LInstruction*>* instructions() const {
|
244
|
+
UNIMPLEMENTED();
|
245
|
+
return NULL;
|
246
|
+
}
|
247
|
+
|
248
|
+
int GetParameterStackSlot(int index) const {
|
249
|
+
UNIMPLEMENTED();
|
250
|
+
return 0;
|
251
|
+
}
|
252
|
+
|
253
|
+
void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
|
254
|
+
|
255
|
+
LGap* GetGapAt(int index) const {
|
256
|
+
UNIMPLEMENTED();
|
257
|
+
return NULL;
|
258
|
+
}
|
259
|
+
|
260
|
+
bool IsGapAt(int index) const {
|
261
|
+
UNIMPLEMENTED();
|
262
|
+
return false;
|
263
|
+
}
|
264
|
+
|
265
|
+
int NearestGapPos(int index) const {
|
266
|
+
UNIMPLEMENTED();
|
267
|
+
return 0;
|
268
|
+
}
|
269
|
+
|
270
|
+
void MarkEmptyBlocks() { UNIMPLEMENTED(); }
|
271
|
+
|
272
|
+
CompilationInfo* info() const {
|
273
|
+
UNIMPLEMENTED();
|
274
|
+
return NULL;
|
275
|
+
}
|
276
|
+
|
277
|
+
#ifdef DEBUG
|
278
|
+
void Verify() { UNIMPLEMENTED(); }
|
279
|
+
#endif
|
280
|
+
};
|
281
|
+
|
282
|
+
|
283
|
+
class LChunkBuilder BASE_EMBEDDED {
|
284
|
+
public:
|
285
|
+
LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
|
286
|
+
|
287
|
+
// Build the sequence for the graph.
|
288
|
+
LChunk* Build() {
|
289
|
+
UNIMPLEMENTED();
|
290
|
+
return NULL;
|
291
|
+
};
|
292
|
+
|
293
|
+
// Declare methods that deal with the individual node types.
|
294
|
+
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
|
295
|
+
UNIMPLEMENTED(); \
|
296
|
+
return NULL; \
|
297
|
+
}
|
298
|
+
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
|
299
|
+
#undef DECLARE_DO
|
300
|
+
|
301
|
+
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
|
302
|
+
};
|
303
|
+
|
304
|
+
|
305
|
+
} } // namespace v8::internal
|
306
|
+
|
307
|
+
#endif // V8_MIPS_LITHIUM_MIPS_H_
|
@@ -0,0 +1,4056 @@
|
|
1
|
+
// Copyright 2011 the V8 project authors. All rights reserved.
|
2
|
+
// Redistribution and use in source and binary forms, with or without
|
3
|
+
// modification, are permitted provided that the following conditions are
|
4
|
+
// met:
|
5
|
+
//
|
6
|
+
// * Redistributions of source code must retain the above copyright
|
7
|
+
// notice, this list of conditions and the following disclaimer.
|
8
|
+
// * Redistributions in binary form must reproduce the above
|
9
|
+
// copyright notice, this list of conditions and the following
|
10
|
+
// disclaimer in the documentation and/or other materials provided
|
11
|
+
// with the distribution.
|
12
|
+
// * Neither the name of Google Inc. nor the names of its
|
13
|
+
// contributors may be used to endorse or promote products derived
|
14
|
+
// from this software without specific prior written permission.
|
15
|
+
//
|
16
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20
|
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
|
+
|
28
|
+
#include <limits.h> // For LONG_MIN, LONG_MAX.
|
29
|
+
|
30
|
+
#include "v8.h"
|
31
|
+
|
32
|
+
#if defined(V8_TARGET_ARCH_MIPS)
|
33
|
+
|
34
|
+
#include "bootstrapper.h"
|
35
|
+
#include "codegen.h"
|
36
|
+
#include "debug.h"
|
37
|
+
#include "runtime.h"
|
38
|
+
|
39
|
+
namespace v8 {
|
40
|
+
namespace internal {
|
41
|
+
|
42
|
+
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
|
43
|
+
: Assembler(arg_isolate, buffer, size),
|
44
|
+
generating_stub_(false),
|
45
|
+
allow_stub_calls_(true) {
|
46
|
+
if (isolate() != NULL) {
|
47
|
+
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
|
48
|
+
isolate());
|
49
|
+
}
|
50
|
+
}
|
51
|
+
|
52
|
+
|
53
|
+
// Arguments macros.
|
54
|
+
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
|
55
|
+
#define COND_ARGS cond, r1, r2
|
56
|
+
|
57
|
+
#define REGISTER_TARGET_BODY(Name) \
|
58
|
+
void MacroAssembler::Name(Register target, \
|
59
|
+
BranchDelaySlot bd) { \
|
60
|
+
Name(Operand(target), bd); \
|
61
|
+
} \
|
62
|
+
void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
|
63
|
+
BranchDelaySlot bd) { \
|
64
|
+
Name(Operand(target), COND_ARGS, bd); \
|
65
|
+
}
|
66
|
+
|
67
|
+
|
68
|
+
#define INT_PTR_TARGET_BODY(Name) \
|
69
|
+
void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
|
70
|
+
BranchDelaySlot bd) { \
|
71
|
+
Name(Operand(target, rmode), bd); \
|
72
|
+
} \
|
73
|
+
void MacroAssembler::Name(intptr_t target, \
|
74
|
+
RelocInfo::Mode rmode, \
|
75
|
+
COND_TYPED_ARGS, \
|
76
|
+
BranchDelaySlot bd) { \
|
77
|
+
Name(Operand(target, rmode), COND_ARGS, bd); \
|
78
|
+
}
|
79
|
+
|
80
|
+
|
81
|
+
#define BYTE_PTR_TARGET_BODY(Name) \
|
82
|
+
void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
|
83
|
+
BranchDelaySlot bd) { \
|
84
|
+
Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
|
85
|
+
} \
|
86
|
+
void MacroAssembler::Name(byte* target, \
|
87
|
+
RelocInfo::Mode rmode, \
|
88
|
+
COND_TYPED_ARGS, \
|
89
|
+
BranchDelaySlot bd) { \
|
90
|
+
Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
|
91
|
+
}
|
92
|
+
|
93
|
+
|
94
|
+
#define CODE_TARGET_BODY(Name) \
|
95
|
+
void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
|
96
|
+
BranchDelaySlot bd) { \
|
97
|
+
Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
|
98
|
+
} \
|
99
|
+
void MacroAssembler::Name(Handle<Code> target, \
|
100
|
+
RelocInfo::Mode rmode, \
|
101
|
+
COND_TYPED_ARGS, \
|
102
|
+
BranchDelaySlot bd) { \
|
103
|
+
Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
|
104
|
+
}
|
105
|
+
|
106
|
+
|
107
|
+
REGISTER_TARGET_BODY(Jump)
|
108
|
+
REGISTER_TARGET_BODY(Call)
|
109
|
+
INT_PTR_TARGET_BODY(Jump)
|
110
|
+
INT_PTR_TARGET_BODY(Call)
|
111
|
+
BYTE_PTR_TARGET_BODY(Jump)
|
112
|
+
BYTE_PTR_TARGET_BODY(Call)
|
113
|
+
CODE_TARGET_BODY(Jump)
|
114
|
+
CODE_TARGET_BODY(Call)
|
115
|
+
|
116
|
+
#undef COND_TYPED_ARGS
|
117
|
+
#undef COND_ARGS
|
118
|
+
#undef REGISTER_TARGET_BODY
|
119
|
+
#undef BYTE_PTR_TARGET_BODY
|
120
|
+
#undef CODE_TARGET_BODY
|
121
|
+
|
122
|
+
|
123
|
+
void MacroAssembler::Ret(BranchDelaySlot bd) {
|
124
|
+
Jump(Operand(ra), bd);
|
125
|
+
}
|
126
|
+
|
127
|
+
|
128
|
+
void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
|
129
|
+
BranchDelaySlot bd) {
|
130
|
+
Jump(Operand(ra), cond, r1, r2, bd);
|
131
|
+
}
|
132
|
+
|
133
|
+
|
134
|
+
void MacroAssembler::LoadRoot(Register destination,
|
135
|
+
Heap::RootListIndex index) {
|
136
|
+
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
|
137
|
+
}
|
138
|
+
|
139
|
+
|
140
|
+
void MacroAssembler::LoadRoot(Register destination,
|
141
|
+
Heap::RootListIndex index,
|
142
|
+
Condition cond,
|
143
|
+
Register src1, const Operand& src2) {
|
144
|
+
Branch(2, NegateCondition(cond), src1, src2);
|
145
|
+
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
|
146
|
+
}
|
147
|
+
|
148
|
+
|
149
|
+
void MacroAssembler::StoreRoot(Register source,
|
150
|
+
Heap::RootListIndex index) {
|
151
|
+
sw(source, MemOperand(s6, index << kPointerSizeLog2));
|
152
|
+
}
|
153
|
+
|
154
|
+
|
155
|
+
void MacroAssembler::StoreRoot(Register source,
|
156
|
+
Heap::RootListIndex index,
|
157
|
+
Condition cond,
|
158
|
+
Register src1, const Operand& src2) {
|
159
|
+
Branch(2, NegateCondition(cond), src1, src2);
|
160
|
+
sw(source, MemOperand(s6, index << kPointerSizeLog2));
|
161
|
+
}
|
162
|
+
|
163
|
+
|
164
|
+
void MacroAssembler::RecordWriteHelper(Register object,
|
165
|
+
Register address,
|
166
|
+
Register scratch) {
|
167
|
+
if (emit_debug_code()) {
|
168
|
+
// Check that the object is not in new space.
|
169
|
+
Label not_in_new_space;
|
170
|
+
InNewSpace(object, scratch, ne, ¬_in_new_space);
|
171
|
+
Abort("new-space object passed to RecordWriteHelper");
|
172
|
+
bind(¬_in_new_space);
|
173
|
+
}
|
174
|
+
|
175
|
+
// Calculate page address: Clear bits from 0 to kPageSizeBits.
|
176
|
+
if (mips32r2) {
|
177
|
+
Ins(object, zero_reg, 0, kPageSizeBits);
|
178
|
+
} else {
|
179
|
+
// The Ins macro is slow on r1, so use shifts instead.
|
180
|
+
srl(object, object, kPageSizeBits);
|
181
|
+
sll(object, object, kPageSizeBits);
|
182
|
+
}
|
183
|
+
|
184
|
+
// Calculate region number.
|
185
|
+
Ext(address, address, Page::kRegionSizeLog2,
|
186
|
+
kPageSizeBits - Page::kRegionSizeLog2);
|
187
|
+
|
188
|
+
// Mark region dirty.
|
189
|
+
lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
190
|
+
li(at, Operand(1));
|
191
|
+
sllv(at, at, address);
|
192
|
+
or_(scratch, scratch, at);
|
193
|
+
sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
194
|
+
}
|
195
|
+
|
196
|
+
// Push and pop all registers that can hold pointers.
|
197
|
+
void MacroAssembler::PushSafepointRegisters() {
|
198
|
+
// Safepoints expect a block of kNumSafepointRegisters values on the
|
199
|
+
// stack, so adjust the stack for unsaved registers.
|
200
|
+
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
|
201
|
+
ASSERT(num_unsaved >= 0);
|
202
|
+
Subu(sp, sp, Operand(num_unsaved * kPointerSize));
|
203
|
+
MultiPush(kSafepointSavedRegisters);
|
204
|
+
}
|
205
|
+
|
206
|
+
void MacroAssembler::PopSafepointRegisters() {
|
207
|
+
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
|
208
|
+
MultiPop(kSafepointSavedRegisters);
|
209
|
+
Addu(sp, sp, Operand(num_unsaved * kPointerSize));
|
210
|
+
}
|
211
|
+
|
212
|
+
void MacroAssembler::PushSafepointRegistersAndDoubles() {
|
213
|
+
PushSafepointRegisters();
|
214
|
+
Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
|
215
|
+
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
|
216
|
+
FPURegister reg = FPURegister::FromAllocationIndex(i);
|
217
|
+
sdc1(reg, MemOperand(sp, i * kDoubleSize));
|
218
|
+
}
|
219
|
+
}
|
220
|
+
|
221
|
+
void MacroAssembler::PopSafepointRegistersAndDoubles() {
|
222
|
+
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
|
223
|
+
FPURegister reg = FPURegister::FromAllocationIndex(i);
|
224
|
+
ldc1(reg, MemOperand(sp, i * kDoubleSize));
|
225
|
+
}
|
226
|
+
Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
|
227
|
+
PopSafepointRegisters();
|
228
|
+
}
|
229
|
+
|
230
|
+
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
|
231
|
+
Register dst) {
|
232
|
+
sw(src, SafepointRegistersAndDoublesSlot(dst));
|
233
|
+
}
|
234
|
+
|
235
|
+
|
236
|
+
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
|
237
|
+
sw(src, SafepointRegisterSlot(dst));
|
238
|
+
}
|
239
|
+
|
240
|
+
|
241
|
+
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
|
242
|
+
lw(dst, SafepointRegisterSlot(src));
|
243
|
+
}
|
244
|
+
|
245
|
+
|
246
|
+
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
|
247
|
+
// The registers are pushed starting with the highest encoding,
|
248
|
+
// which means that lowest encodings are closest to the stack pointer.
|
249
|
+
return kSafepointRegisterStackIndexMap[reg_code];
|
250
|
+
}
|
251
|
+
|
252
|
+
|
253
|
+
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
|
254
|
+
return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
|
255
|
+
}
|
256
|
+
|
257
|
+
|
258
|
+
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
|
259
|
+
// General purpose registers are pushed last on the stack.
|
260
|
+
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
|
261
|
+
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
|
262
|
+
return MemOperand(sp, doubles_size + register_offset);
|
263
|
+
}
|
264
|
+
|
265
|
+
|
266
|
+
|
267
|
+
|
268
|
+
void MacroAssembler::InNewSpace(Register object,
|
269
|
+
Register scratch,
|
270
|
+
Condition cc,
|
271
|
+
Label* branch) {
|
272
|
+
ASSERT(cc == eq || cc == ne);
|
273
|
+
And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
|
274
|
+
Branch(branch, cc, scratch,
|
275
|
+
Operand(ExternalReference::new_space_start(isolate())));
|
276
|
+
}
|
277
|
+
|
278
|
+
|
279
|
+
// Will clobber 4 registers: object, scratch0, scratch1, at. The
|
280
|
+
// register 'object' contains a heap object pointer. The heap object
|
281
|
+
// tag is shifted away.
|
282
|
+
void MacroAssembler::RecordWrite(Register object,
|
283
|
+
Operand offset,
|
284
|
+
Register scratch0,
|
285
|
+
Register scratch1) {
|
286
|
+
// The compiled code assumes that record write doesn't change the
|
287
|
+
// context register, so we check that none of the clobbered
|
288
|
+
// registers are cp.
|
289
|
+
ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
|
290
|
+
|
291
|
+
Label done;
|
292
|
+
|
293
|
+
// First, test that the object is not in the new space. We cannot set
|
294
|
+
// region marks for new space pages.
|
295
|
+
InNewSpace(object, scratch0, eq, &done);
|
296
|
+
|
297
|
+
// Add offset into the object.
|
298
|
+
Addu(scratch0, object, offset);
|
299
|
+
|
300
|
+
// Record the actual write.
|
301
|
+
RecordWriteHelper(object, scratch0, scratch1);
|
302
|
+
|
303
|
+
bind(&done);
|
304
|
+
|
305
|
+
// Clobber all input registers when running with the debug-code flag
|
306
|
+
// turned on to provoke errors.
|
307
|
+
if (emit_debug_code()) {
|
308
|
+
li(object, Operand(BitCast<int32_t>(kZapValue)));
|
309
|
+
li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
|
310
|
+
li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
|
311
|
+
}
|
312
|
+
}
|
313
|
+
|
314
|
+
|
315
|
+
// Will clobber 4 registers: object, address, scratch, ip. The
|
316
|
+
// register 'object' contains a heap object pointer. The heap object
|
317
|
+
// tag is shifted away.
|
318
|
+
void MacroAssembler::RecordWrite(Register object,
|
319
|
+
Register address,
|
320
|
+
Register scratch) {
|
321
|
+
// The compiled code assumes that record write doesn't change the
|
322
|
+
// context register, so we check that none of the clobbered
|
323
|
+
// registers are cp.
|
324
|
+
ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
|
325
|
+
|
326
|
+
Label done;
|
327
|
+
|
328
|
+
// First, test that the object is not in the new space. We cannot set
|
329
|
+
// region marks for new space pages.
|
330
|
+
InNewSpace(object, scratch, eq, &done);
|
331
|
+
|
332
|
+
// Record the actual write.
|
333
|
+
RecordWriteHelper(object, address, scratch);
|
334
|
+
|
335
|
+
bind(&done);
|
336
|
+
|
337
|
+
// Clobber all input registers when running with the debug-code flag
|
338
|
+
// turned on to provoke errors.
|
339
|
+
if (emit_debug_code()) {
|
340
|
+
li(object, Operand(BitCast<int32_t>(kZapValue)));
|
341
|
+
li(address, Operand(BitCast<int32_t>(kZapValue)));
|
342
|
+
li(scratch, Operand(BitCast<int32_t>(kZapValue)));
|
343
|
+
}
|
344
|
+
}
|
345
|
+
|
346
|
+
|
347
|
+
// -----------------------------------------------------------------------------
|
348
|
+
// Allocation support.
|
349
|
+
|
350
|
+
|
351
|
+
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
352
|
+
Register scratch,
|
353
|
+
Label* miss) {
|
354
|
+
Label same_contexts;
|
355
|
+
|
356
|
+
ASSERT(!holder_reg.is(scratch));
|
357
|
+
ASSERT(!holder_reg.is(at));
|
358
|
+
ASSERT(!scratch.is(at));
|
359
|
+
|
360
|
+
// Load current lexical context from the stack frame.
|
361
|
+
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
362
|
+
// In debug mode, make sure the lexical context is set.
|
363
|
+
#ifdef DEBUG
|
364
|
+
Check(ne, "we should not have an empty lexical context",
|
365
|
+
scratch, Operand(zero_reg));
|
366
|
+
#endif
|
367
|
+
|
368
|
+
// Load the global context of the current context.
|
369
|
+
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
|
370
|
+
lw(scratch, FieldMemOperand(scratch, offset));
|
371
|
+
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
|
372
|
+
|
373
|
+
// Check the context is a global context.
|
374
|
+
if (emit_debug_code()) {
|
375
|
+
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
|
376
|
+
push(holder_reg); // Temporarily save holder on the stack.
|
377
|
+
// Read the first word and compare to the global_context_map.
|
378
|
+
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
379
|
+
LoadRoot(at, Heap::kGlobalContextMapRootIndex);
|
380
|
+
Check(eq, "JSGlobalObject::global_context should be a global context.",
|
381
|
+
holder_reg, Operand(at));
|
382
|
+
pop(holder_reg); // Restore holder.
|
383
|
+
}
|
384
|
+
|
385
|
+
// Check if both contexts are the same.
|
386
|
+
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
387
|
+
Branch(&same_contexts, eq, scratch, Operand(at));
|
388
|
+
|
389
|
+
// Check the context is a global context.
|
390
|
+
if (emit_debug_code()) {
|
391
|
+
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
|
392
|
+
push(holder_reg); // Temporarily save holder on the stack.
|
393
|
+
mov(holder_reg, at); // Move at to its holding place.
|
394
|
+
LoadRoot(at, Heap::kNullValueRootIndex);
|
395
|
+
Check(ne, "JSGlobalProxy::context() should not be null.",
|
396
|
+
holder_reg, Operand(at));
|
397
|
+
|
398
|
+
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
|
399
|
+
LoadRoot(at, Heap::kGlobalContextMapRootIndex);
|
400
|
+
Check(eq, "JSGlobalObject::global_context should be a global context.",
|
401
|
+
holder_reg, Operand(at));
|
402
|
+
// Restore at is not needed. at is reloaded below.
|
403
|
+
pop(holder_reg); // Restore holder.
|
404
|
+
// Restore at to holder's context.
|
405
|
+
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
406
|
+
}
|
407
|
+
|
408
|
+
// Check that the security token in the calling global object is
|
409
|
+
// compatible with the security token in the receiving global
|
410
|
+
// object.
|
411
|
+
int token_offset = Context::kHeaderSize +
|
412
|
+
Context::SECURITY_TOKEN_INDEX * kPointerSize;
|
413
|
+
|
414
|
+
lw(scratch, FieldMemOperand(scratch, token_offset));
|
415
|
+
lw(at, FieldMemOperand(at, token_offset));
|
416
|
+
Branch(miss, ne, scratch, Operand(at));
|
417
|
+
|
418
|
+
bind(&same_contexts);
|
419
|
+
}
|
420
|
+
|
421
|
+
|
422
|
+
// ---------------------------------------------------------------------------
|
423
|
+
// Instruction macros.
|
424
|
+
|
425
|
+
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
|
426
|
+
if (rt.is_reg()) {
|
427
|
+
addu(rd, rs, rt.rm());
|
428
|
+
} else {
|
429
|
+
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
430
|
+
addiu(rd, rs, rt.imm32_);
|
431
|
+
} else {
|
432
|
+
// li handles the relocation.
|
433
|
+
ASSERT(!rs.is(at));
|
434
|
+
li(at, rt);
|
435
|
+
addu(rd, rs, at);
|
436
|
+
}
|
437
|
+
}
|
438
|
+
}
|
439
|
+
|
440
|
+
|
441
|
+
void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
|
442
|
+
if (rt.is_reg()) {
|
443
|
+
subu(rd, rs, rt.rm());
|
444
|
+
} else {
|
445
|
+
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
446
|
+
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
|
447
|
+
} else {
|
448
|
+
// li handles the relocation.
|
449
|
+
ASSERT(!rs.is(at));
|
450
|
+
li(at, rt);
|
451
|
+
subu(rd, rs, at);
|
452
|
+
}
|
453
|
+
}
|
454
|
+
}
|
455
|
+
|
456
|
+
|
457
|
+
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
|
458
|
+
if (rt.is_reg()) {
|
459
|
+
mul(rd, rs, rt.rm());
|
460
|
+
} else {
|
461
|
+
// li handles the relocation.
|
462
|
+
ASSERT(!rs.is(at));
|
463
|
+
li(at, rt);
|
464
|
+
mul(rd, rs, at);
|
465
|
+
}
|
466
|
+
}
|
467
|
+
|
468
|
+
|
469
|
+
void MacroAssembler::Mult(Register rs, const Operand& rt) {
|
470
|
+
if (rt.is_reg()) {
|
471
|
+
mult(rs, rt.rm());
|
472
|
+
} else {
|
473
|
+
// li handles the relocation.
|
474
|
+
ASSERT(!rs.is(at));
|
475
|
+
li(at, rt);
|
476
|
+
mult(rs, at);
|
477
|
+
}
|
478
|
+
}
|
479
|
+
|
480
|
+
|
481
|
+
void MacroAssembler::Multu(Register rs, const Operand& rt) {
|
482
|
+
if (rt.is_reg()) {
|
483
|
+
multu(rs, rt.rm());
|
484
|
+
} else {
|
485
|
+
// li handles the relocation.
|
486
|
+
ASSERT(!rs.is(at));
|
487
|
+
li(at, rt);
|
488
|
+
multu(rs, at);
|
489
|
+
}
|
490
|
+
}
|
491
|
+
|
492
|
+
|
493
|
+
void MacroAssembler::Div(Register rs, const Operand& rt) {
|
494
|
+
if (rt.is_reg()) {
|
495
|
+
div(rs, rt.rm());
|
496
|
+
} else {
|
497
|
+
// li handles the relocation.
|
498
|
+
ASSERT(!rs.is(at));
|
499
|
+
li(at, rt);
|
500
|
+
div(rs, at);
|
501
|
+
}
|
502
|
+
}
|
503
|
+
|
504
|
+
|
505
|
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
|
506
|
+
if (rt.is_reg()) {
|
507
|
+
divu(rs, rt.rm());
|
508
|
+
} else {
|
509
|
+
// li handles the relocation.
|
510
|
+
ASSERT(!rs.is(at));
|
511
|
+
li(at, rt);
|
512
|
+
divu(rs, at);
|
513
|
+
}
|
514
|
+
}
|
515
|
+
|
516
|
+
|
517
|
+
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
|
518
|
+
if (rt.is_reg()) {
|
519
|
+
and_(rd, rs, rt.rm());
|
520
|
+
} else {
|
521
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
522
|
+
andi(rd, rs, rt.imm32_);
|
523
|
+
} else {
|
524
|
+
// li handles the relocation.
|
525
|
+
ASSERT(!rs.is(at));
|
526
|
+
li(at, rt);
|
527
|
+
and_(rd, rs, at);
|
528
|
+
}
|
529
|
+
}
|
530
|
+
}
|
531
|
+
|
532
|
+
|
533
|
+
void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
|
534
|
+
if (rt.is_reg()) {
|
535
|
+
or_(rd, rs, rt.rm());
|
536
|
+
} else {
|
537
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
538
|
+
ori(rd, rs, rt.imm32_);
|
539
|
+
} else {
|
540
|
+
// li handles the relocation.
|
541
|
+
ASSERT(!rs.is(at));
|
542
|
+
li(at, rt);
|
543
|
+
or_(rd, rs, at);
|
544
|
+
}
|
545
|
+
}
|
546
|
+
}
|
547
|
+
|
548
|
+
|
549
|
+
void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
|
550
|
+
if (rt.is_reg()) {
|
551
|
+
xor_(rd, rs, rt.rm());
|
552
|
+
} else {
|
553
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
554
|
+
xori(rd, rs, rt.imm32_);
|
555
|
+
} else {
|
556
|
+
// li handles the relocation.
|
557
|
+
ASSERT(!rs.is(at));
|
558
|
+
li(at, rt);
|
559
|
+
xor_(rd, rs, at);
|
560
|
+
}
|
561
|
+
}
|
562
|
+
}
|
563
|
+
|
564
|
+
|
565
|
+
void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
|
566
|
+
if (rt.is_reg()) {
|
567
|
+
nor(rd, rs, rt.rm());
|
568
|
+
} else {
|
569
|
+
// li handles the relocation.
|
570
|
+
ASSERT(!rs.is(at));
|
571
|
+
li(at, rt);
|
572
|
+
nor(rd, rs, at);
|
573
|
+
}
|
574
|
+
}
|
575
|
+
|
576
|
+
|
577
|
+
void MacroAssembler::Neg(Register rs, const Operand& rt) {
|
578
|
+
ASSERT(rt.is_reg());
|
579
|
+
ASSERT(!at.is(rs));
|
580
|
+
ASSERT(!at.is(rt.rm()));
|
581
|
+
li(at, -1);
|
582
|
+
xor_(rs, rt.rm(), at);
|
583
|
+
}
|
584
|
+
|
585
|
+
|
586
|
+
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
|
587
|
+
if (rt.is_reg()) {
|
588
|
+
slt(rd, rs, rt.rm());
|
589
|
+
} else {
|
590
|
+
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
591
|
+
slti(rd, rs, rt.imm32_);
|
592
|
+
} else {
|
593
|
+
// li handles the relocation.
|
594
|
+
ASSERT(!rs.is(at));
|
595
|
+
li(at, rt);
|
596
|
+
slt(rd, rs, at);
|
597
|
+
}
|
598
|
+
}
|
599
|
+
}
|
600
|
+
|
601
|
+
|
602
|
+
void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
|
603
|
+
if (rt.is_reg()) {
|
604
|
+
sltu(rd, rs, rt.rm());
|
605
|
+
} else {
|
606
|
+
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
|
607
|
+
sltiu(rd, rs, rt.imm32_);
|
608
|
+
} else {
|
609
|
+
// li handles the relocation.
|
610
|
+
ASSERT(!rs.is(at));
|
611
|
+
li(at, rt);
|
612
|
+
sltu(rd, rs, at);
|
613
|
+
}
|
614
|
+
}
|
615
|
+
}
|
616
|
+
|
617
|
+
|
618
|
+
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
|
619
|
+
if (mips32r2) {
|
620
|
+
if (rt.is_reg()) {
|
621
|
+
rotrv(rd, rs, rt.rm());
|
622
|
+
} else {
|
623
|
+
rotr(rd, rs, rt.imm32_);
|
624
|
+
}
|
625
|
+
} else {
|
626
|
+
if (rt.is_reg()) {
|
627
|
+
subu(at, zero_reg, rt.rm());
|
628
|
+
sllv(at, rs, at);
|
629
|
+
srlv(rd, rs, rt.rm());
|
630
|
+
or_(rd, rd, at);
|
631
|
+
} else {
|
632
|
+
if (rt.imm32_ == 0) {
|
633
|
+
srl(rd, rs, 0);
|
634
|
+
} else {
|
635
|
+
srl(at, rs, rt.imm32_);
|
636
|
+
sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
|
637
|
+
or_(rd, rd, at);
|
638
|
+
}
|
639
|
+
}
|
640
|
+
}
|
641
|
+
}
|
642
|
+
|
643
|
+
|
644
|
+
//------------Pseudo-instructions-------------
|
645
|
+
|
646
|
+
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
|
647
|
+
ASSERT(!j.is_reg());
|
648
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
649
|
+
if (!MustUseReg(j.rmode_) && !gen2instr) {
|
650
|
+
// Normal load of an immediate value which does not need Relocation Info.
|
651
|
+
if (is_int16(j.imm32_)) {
|
652
|
+
addiu(rd, zero_reg, j.imm32_);
|
653
|
+
} else if (!(j.imm32_ & kHiMask)) {
|
654
|
+
ori(rd, zero_reg, j.imm32_);
|
655
|
+
} else if (!(j.imm32_ & kImm16Mask)) {
|
656
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
657
|
+
} else {
|
658
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
659
|
+
ori(rd, rd, (j.imm32_ & kImm16Mask));
|
660
|
+
}
|
661
|
+
} else if (MustUseReg(j.rmode_) || gen2instr) {
|
662
|
+
if (MustUseReg(j.rmode_)) {
|
663
|
+
RecordRelocInfo(j.rmode_, j.imm32_);
|
664
|
+
}
|
665
|
+
// We need always the same number of instructions as we may need to patch
|
666
|
+
// this code to load another value which may need 2 instructions to load.
|
667
|
+
lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
|
668
|
+
ori(rd, rd, (j.imm32_ & kImm16Mask));
|
669
|
+
}
|
670
|
+
}
|
671
|
+
|
672
|
+
|
673
|
+
// Exception-generating instructions and debugging support.
|
674
|
+
void MacroAssembler::stop(const char* msg) {
|
675
|
+
// TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
|
676
|
+
// We use the 0x54321 value to be able to find it easily when reading memory.
|
677
|
+
break_(0x54321);
|
678
|
+
}
|
679
|
+
|
680
|
+
|
681
|
+
void MacroAssembler::MultiPush(RegList regs) {
|
682
|
+
int16_t NumSaved = 0;
|
683
|
+
int16_t NumToPush = NumberOfBitsSet(regs);
|
684
|
+
|
685
|
+
addiu(sp, sp, -4 * NumToPush);
|
686
|
+
for (int16_t i = kNumRegisters; i > 0; i--) {
|
687
|
+
if ((regs & (1 << i)) != 0) {
|
688
|
+
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
|
689
|
+
}
|
690
|
+
}
|
691
|
+
}
|
692
|
+
|
693
|
+
|
694
|
+
void MacroAssembler::MultiPushReversed(RegList regs) {
|
695
|
+
int16_t NumSaved = 0;
|
696
|
+
int16_t NumToPush = NumberOfBitsSet(regs);
|
697
|
+
|
698
|
+
addiu(sp, sp, -4 * NumToPush);
|
699
|
+
for (int16_t i = 0; i < kNumRegisters; i++) {
|
700
|
+
if ((regs & (1 << i)) != 0) {
|
701
|
+
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
|
702
|
+
}
|
703
|
+
}
|
704
|
+
}
|
705
|
+
|
706
|
+
|
707
|
+
void MacroAssembler::MultiPop(RegList regs) {
|
708
|
+
int16_t NumSaved = 0;
|
709
|
+
|
710
|
+
for (int16_t i = 0; i < kNumRegisters; i++) {
|
711
|
+
if ((regs & (1 << i)) != 0) {
|
712
|
+
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
|
713
|
+
}
|
714
|
+
}
|
715
|
+
addiu(sp, sp, 4 * NumSaved);
|
716
|
+
}
|
717
|
+
|
718
|
+
|
719
|
+
void MacroAssembler::MultiPopReversed(RegList regs) {
|
720
|
+
int16_t NumSaved = 0;
|
721
|
+
|
722
|
+
for (int16_t i = kNumRegisters; i > 0; i--) {
|
723
|
+
if ((regs & (1 << i)) != 0) {
|
724
|
+
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
|
725
|
+
}
|
726
|
+
}
|
727
|
+
addiu(sp, sp, 4 * NumSaved);
|
728
|
+
}
|
729
|
+
|
730
|
+
|
731
|
+
void MacroAssembler::Ext(Register rt,
|
732
|
+
Register rs,
|
733
|
+
uint16_t pos,
|
734
|
+
uint16_t size) {
|
735
|
+
ASSERT(pos < 32);
|
736
|
+
ASSERT(pos + size < 32);
|
737
|
+
|
738
|
+
if (mips32r2) {
|
739
|
+
ext_(rt, rs, pos, size);
|
740
|
+
} else {
|
741
|
+
// Move rs to rt and shift it left then right to get the
|
742
|
+
// desired bitfield on the right side and zeroes on the left.
|
743
|
+
sll(rt, rs, 32 - (pos + size));
|
744
|
+
srl(rt, rt, 32 - size);
|
745
|
+
}
|
746
|
+
}
|
747
|
+
|
748
|
+
|
749
|
+
void MacroAssembler::Ins(Register rt,
|
750
|
+
Register rs,
|
751
|
+
uint16_t pos,
|
752
|
+
uint16_t size) {
|
753
|
+
ASSERT(pos < 32);
|
754
|
+
ASSERT(pos + size < 32);
|
755
|
+
|
756
|
+
if (mips32r2) {
|
757
|
+
ins_(rt, rs, pos, size);
|
758
|
+
} else {
|
759
|
+
ASSERT(!rt.is(t8) && !rs.is(t8));
|
760
|
+
|
761
|
+
srl(t8, rt, pos + size);
|
762
|
+
// The left chunk from rt that needs to
|
763
|
+
// be saved is on the right side of t8.
|
764
|
+
sll(at, t8, pos + size);
|
765
|
+
// The 'at' register now contains the left chunk on
|
766
|
+
// the left (proper position) and zeroes.
|
767
|
+
sll(t8, rt, 32 - pos);
|
768
|
+
// t8 now contains the right chunk on the left and zeroes.
|
769
|
+
srl(t8, t8, 32 - pos);
|
770
|
+
// t8 now contains the right chunk on
|
771
|
+
// the right (proper position) and zeroes.
|
772
|
+
or_(rt, at, t8);
|
773
|
+
// rt now contains the left and right chunks from the original rt
|
774
|
+
// in their proper position and zeroes in the middle.
|
775
|
+
sll(t8, rs, 32 - size);
|
776
|
+
// t8 now contains the chunk from rs on the left and zeroes.
|
777
|
+
srl(t8, t8, 32 - size - pos);
|
778
|
+
// t8 now contains the original chunk from rs in
|
779
|
+
// the middle (proper position).
|
780
|
+
or_(rt, rt, t8);
|
781
|
+
// rt now contains the result of the ins instruction in R2 mode.
|
782
|
+
}
|
783
|
+
}
|
784
|
+
|
785
|
+
|
786
|
+
void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
|
787
|
+
// Move the data from fs to t4.
|
788
|
+
mfc1(t4, fs);
|
789
|
+
return Cvt_d_uw(fd, t4);
|
790
|
+
}
|
791
|
+
|
792
|
+
|
793
|
+
void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
|
794
|
+
// Convert rs to a FP value in fd (and fd + 1).
|
795
|
+
// We do this by converting rs minus the MSB to avoid sign conversion,
|
796
|
+
// then adding 2^31-1 and 1 to the result.
|
797
|
+
|
798
|
+
ASSERT(!fd.is(f20));
|
799
|
+
ASSERT(!rs.is(t9));
|
800
|
+
ASSERT(!rs.is(t8));
|
801
|
+
|
802
|
+
// Save rs's MSB to t8.
|
803
|
+
And(t8, rs, 0x80000000);
|
804
|
+
// Remove rs's MSB.
|
805
|
+
And(t9, rs, 0x7FFFFFFF);
|
806
|
+
// Move t9 to fd.
|
807
|
+
mtc1(t9, fd);
|
808
|
+
|
809
|
+
// Convert fd to a real FP value.
|
810
|
+
cvt_d_w(fd, fd);
|
811
|
+
|
812
|
+
Label conversion_done;
|
813
|
+
|
814
|
+
// If rs's MSB was 0, it's done.
|
815
|
+
// Otherwise we need to add that to the FP register.
|
816
|
+
Branch(&conversion_done, eq, t8, Operand(zero_reg));
|
817
|
+
|
818
|
+
// First load 2^31 - 1 into f20.
|
819
|
+
Or(t9, zero_reg, 0x7FFFFFFF);
|
820
|
+
mtc1(t9, f20);
|
821
|
+
|
822
|
+
// Convert it to FP and add it to fd.
|
823
|
+
cvt_d_w(f20, f20);
|
824
|
+
add_d(fd, fd, f20);
|
825
|
+
// Now add 1.
|
826
|
+
Or(t9, zero_reg, 1);
|
827
|
+
mtc1(t9, f20);
|
828
|
+
|
829
|
+
cvt_d_w(f20, f20);
|
830
|
+
add_d(fd, fd, f20);
|
831
|
+
bind(&conversion_done);
|
832
|
+
}
|
833
|
+
|
834
|
+
|
835
|
+
void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
|
836
|
+
Trunc_uw_d(fs, t4);
|
837
|
+
mtc1(t4, fd);
|
838
|
+
}
|
839
|
+
|
840
|
+
|
841
|
+
void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
|
842
|
+
ASSERT(!fd.is(f22));
|
843
|
+
ASSERT(!rs.is(t6));
|
844
|
+
|
845
|
+
// Load 2^31 into f22.
|
846
|
+
Or(t6, zero_reg, 0x80000000);
|
847
|
+
Cvt_d_uw(f22, t6);
|
848
|
+
|
849
|
+
// Test if f22 > fd.
|
850
|
+
c(OLT, D, fd, f22);
|
851
|
+
|
852
|
+
Label simple_convert;
|
853
|
+
// If fd < 2^31 we can convert it normally.
|
854
|
+
bc1t(&simple_convert);
|
855
|
+
|
856
|
+
// First we subtract 2^31 from fd, then trunc it to rs
|
857
|
+
// and add 2^31 to rs.
|
858
|
+
|
859
|
+
sub_d(f22, fd, f22);
|
860
|
+
trunc_w_d(f22, f22);
|
861
|
+
mfc1(rs, f22);
|
862
|
+
or_(rs, rs, t6);
|
863
|
+
|
864
|
+
Label done;
|
865
|
+
Branch(&done);
|
866
|
+
// Simple conversion.
|
867
|
+
bind(&simple_convert);
|
868
|
+
trunc_w_d(f22, fd);
|
869
|
+
mfc1(rs, f22);
|
870
|
+
|
871
|
+
bind(&done);
|
872
|
+
}
|
873
|
+
|
874
|
+
|
875
|
+
// Tries to get a signed int32 out of a double precision floating point heap
|
876
|
+
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
|
877
|
+
// 32bits signed integer range.
|
878
|
+
// This method implementation differs from the ARM version for performance
|
879
|
+
// reasons.
|
880
|
+
void MacroAssembler::ConvertToInt32(Register source,
|
881
|
+
Register dest,
|
882
|
+
Register scratch,
|
883
|
+
Register scratch2,
|
884
|
+
FPURegister double_scratch,
|
885
|
+
Label *not_int32) {
|
886
|
+
Label right_exponent, done;
|
887
|
+
// Get exponent word (ENDIAN issues).
|
888
|
+
lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
|
889
|
+
// Get exponent alone in scratch2.
|
890
|
+
And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
|
891
|
+
// Load dest with zero. We use this either for the final shift or
|
892
|
+
// for the answer.
|
893
|
+
mov(dest, zero_reg);
|
894
|
+
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
|
895
|
+
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
|
896
|
+
// the exponent that we are fastest at and also the highest exponent we can
|
897
|
+
// handle here.
|
898
|
+
const uint32_t non_smi_exponent =
|
899
|
+
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
|
900
|
+
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
|
901
|
+
Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
|
902
|
+
// If the exponent is higher than that then go to not_int32 case. This
|
903
|
+
// catches numbers that don't fit in a signed int32, infinities and NaNs.
|
904
|
+
Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
|
905
|
+
|
906
|
+
// We know the exponent is smaller than 30 (biased). If it is less than
|
907
|
+
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
|
908
|
+
// it rounds to zero.
|
909
|
+
const uint32_t zero_exponent =
|
910
|
+
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
|
911
|
+
Subu(scratch2, scratch2, Operand(zero_exponent));
|
912
|
+
// Dest already has a Smi zero.
|
913
|
+
Branch(&done, lt, scratch2, Operand(zero_reg));
|
914
|
+
if (!CpuFeatures::IsSupported(FPU)) {
|
915
|
+
// We have a shifted exponent between 0 and 30 in scratch2.
|
916
|
+
srl(dest, scratch2, HeapNumber::kExponentShift);
|
917
|
+
// We now have the exponent in dest. Subtract from 30 to get
|
918
|
+
// how much to shift down.
|
919
|
+
li(at, Operand(30));
|
920
|
+
subu(dest, at, dest);
|
921
|
+
}
|
922
|
+
bind(&right_exponent);
|
923
|
+
if (CpuFeatures::IsSupported(FPU)) {
|
924
|
+
CpuFeatures::Scope scope(FPU);
|
925
|
+
// MIPS FPU instructions implementing double precision to integer
|
926
|
+
// conversion using round to zero. Since the FP value was qualified
|
927
|
+
// above, the resulting integer should be a legal int32.
|
928
|
+
// The original 'Exponent' word is still in scratch.
|
929
|
+
lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
|
930
|
+
mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
|
931
|
+
trunc_w_d(double_scratch, double_scratch);
|
932
|
+
mfc1(dest, double_scratch);
|
933
|
+
} else {
|
934
|
+
// On entry, dest has final downshift, scratch has original sign/exp/mant.
|
935
|
+
// Save sign bit in top bit of dest.
|
936
|
+
And(scratch2, scratch, Operand(0x80000000));
|
937
|
+
Or(dest, dest, Operand(scratch2));
|
938
|
+
// Put back the implicit 1, just above mantissa field.
|
939
|
+
Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
|
940
|
+
|
941
|
+
// Shift up the mantissa bits to take up the space the exponent used to
|
942
|
+
// take. We just orred in the implicit bit so that took care of one and
|
943
|
+
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
|
944
|
+
// distance. But we want to clear the sign-bit so shift one more bit
|
945
|
+
// left, then shift right one bit.
|
946
|
+
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
|
947
|
+
sll(scratch, scratch, shift_distance + 1);
|
948
|
+
srl(scratch, scratch, 1);
|
949
|
+
|
950
|
+
// Get the second half of the double. For some exponents we don't
|
951
|
+
// actually need this because the bits get shifted out again, but
|
952
|
+
// it's probably slower to test than just to do it.
|
953
|
+
lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
|
954
|
+
// Extract the top 10 bits, and insert those bottom 10 bits of scratch.
|
955
|
+
// The width of the field here is the same as the shift amount above.
|
956
|
+
const int field_width = shift_distance;
|
957
|
+
Ext(scratch2, scratch2, 32-shift_distance, field_width);
|
958
|
+
Ins(scratch, scratch2, 0, field_width);
|
959
|
+
// Move down according to the exponent.
|
960
|
+
srlv(scratch, scratch, dest);
|
961
|
+
// Prepare the negative version of our integer.
|
962
|
+
subu(scratch2, zero_reg, scratch);
|
963
|
+
// Trick to check sign bit (msb) held in dest, count leading zero.
|
964
|
+
// 0 indicates negative, save negative version with conditional move.
|
965
|
+
clz(dest, dest);
|
966
|
+
movz(scratch, scratch2, dest);
|
967
|
+
mov(dest, scratch);
|
968
|
+
}
|
969
|
+
bind(&done);
|
970
|
+
}
|
971
|
+
|
972
|
+
|
973
|
+
void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
|
974
|
+
Register input_high,
|
975
|
+
Register input_low,
|
976
|
+
Register scratch) {
|
977
|
+
Label done, normal_exponent, restore_sign;
|
978
|
+
// Extract the biased exponent in result.
|
979
|
+
Ext(result,
|
980
|
+
input_high,
|
981
|
+
HeapNumber::kExponentShift,
|
982
|
+
HeapNumber::kExponentBits);
|
983
|
+
|
984
|
+
// Check for Infinity and NaNs, which should return 0.
|
985
|
+
Subu(scratch, result, HeapNumber::kExponentMask);
|
986
|
+
movz(result, zero_reg, scratch);
|
987
|
+
Branch(&done, eq, scratch, Operand(zero_reg));
|
988
|
+
|
989
|
+
// Express exponent as delta to (number of mantissa bits + 31).
|
990
|
+
Subu(result,
|
991
|
+
result,
|
992
|
+
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
|
993
|
+
|
994
|
+
// If the delta is strictly positive, all bits would be shifted away,
|
995
|
+
// which means that we can return 0.
|
996
|
+
Branch(&normal_exponent, le, result, Operand(zero_reg));
|
997
|
+
mov(result, zero_reg);
|
998
|
+
Branch(&done);
|
999
|
+
|
1000
|
+
bind(&normal_exponent);
|
1001
|
+
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
|
1002
|
+
// Calculate shift.
|
1003
|
+
Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
|
1004
|
+
|
1005
|
+
// Save the sign.
|
1006
|
+
Register sign = result;
|
1007
|
+
result = no_reg;
|
1008
|
+
And(sign, input_high, Operand(HeapNumber::kSignMask));
|
1009
|
+
|
1010
|
+
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
|
1011
|
+
// to check for this specific case.
|
1012
|
+
Label high_shift_needed, high_shift_done;
|
1013
|
+
Branch(&high_shift_needed, lt, scratch, Operand(32));
|
1014
|
+
mov(input_high, zero_reg);
|
1015
|
+
Branch(&high_shift_done);
|
1016
|
+
bind(&high_shift_needed);
|
1017
|
+
|
1018
|
+
// Set the implicit 1 before the mantissa part in input_high.
|
1019
|
+
Or(input_high,
|
1020
|
+
input_high,
|
1021
|
+
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
1022
|
+
// Shift the mantissa bits to the correct position.
|
1023
|
+
// We don't need to clear non-mantissa bits as they will be shifted away.
|
1024
|
+
// If they weren't, it would mean that the answer is in the 32bit range.
|
1025
|
+
sllv(input_high, input_high, scratch);
|
1026
|
+
|
1027
|
+
bind(&high_shift_done);
|
1028
|
+
|
1029
|
+
// Replace the shifted bits with bits from the lower mantissa word.
|
1030
|
+
Label pos_shift, shift_done;
|
1031
|
+
li(at, 32);
|
1032
|
+
subu(scratch, at, scratch);
|
1033
|
+
Branch(&pos_shift, ge, scratch, Operand(zero_reg));
|
1034
|
+
|
1035
|
+
// Negate scratch.
|
1036
|
+
Subu(scratch, zero_reg, scratch);
|
1037
|
+
sllv(input_low, input_low, scratch);
|
1038
|
+
Branch(&shift_done);
|
1039
|
+
|
1040
|
+
bind(&pos_shift);
|
1041
|
+
srlv(input_low, input_low, scratch);
|
1042
|
+
|
1043
|
+
bind(&shift_done);
|
1044
|
+
Or(input_high, input_high, Operand(input_low));
|
1045
|
+
// Restore sign if necessary.
|
1046
|
+
mov(scratch, sign);
|
1047
|
+
result = sign;
|
1048
|
+
sign = no_reg;
|
1049
|
+
Subu(result, zero_reg, input_high);
|
1050
|
+
movz(result, input_high, scratch);
|
1051
|
+
bind(&done);
|
1052
|
+
}
|
1053
|
+
|
1054
|
+
|
1055
|
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
|
1056
|
+
Register src,
|
1057
|
+
int num_least_bits) {
|
1058
|
+
Ext(dst, src, kSmiTagSize, num_least_bits);
|
1059
|
+
}
|
1060
|
+
|
1061
|
+
|
1062
|
+
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
|
1063
|
+
Register src,
|
1064
|
+
int num_least_bits) {
|
1065
|
+
And(dst, src, Operand((1 << num_least_bits) - 1));
|
1066
|
+
}
|
1067
|
+
|
1068
|
+
|
1069
|
+
// Emulated condtional branches do not emit a nop in the branch delay slot.
|
1070
|
+
//
|
1071
|
+
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
|
1072
|
+
#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
|
1073
|
+
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
|
1074
|
+
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
|
1075
|
+
|
1076
|
+
|
1077
|
+
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
|
1078
|
+
b(offset);
|
1079
|
+
|
1080
|
+
// Emit a nop in the branch delay slot if required.
|
1081
|
+
if (bdslot == PROTECT)
|
1082
|
+
nop();
|
1083
|
+
}
|
1084
|
+
|
1085
|
+
|
1086
|
+
void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
|
1087
|
+
const Operand& rt,
|
1088
|
+
BranchDelaySlot bdslot) {
|
1089
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1090
|
+
ASSERT(!rs.is(zero_reg));
|
1091
|
+
Register r2 = no_reg;
|
1092
|
+
Register scratch = at;
|
1093
|
+
|
1094
|
+
if (rt.is_reg()) {
|
1095
|
+
// We don't want any other register but scratch clobbered.
|
1096
|
+
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
|
1097
|
+
r2 = rt.rm_;
|
1098
|
+
switch (cond) {
|
1099
|
+
case cc_always:
|
1100
|
+
b(offset);
|
1101
|
+
break;
|
1102
|
+
case eq:
|
1103
|
+
beq(rs, r2, offset);
|
1104
|
+
break;
|
1105
|
+
case ne:
|
1106
|
+
bne(rs, r2, offset);
|
1107
|
+
break;
|
1108
|
+
// Signed comparison.
|
1109
|
+
case greater:
|
1110
|
+
if (r2.is(zero_reg)) {
|
1111
|
+
bgtz(rs, offset);
|
1112
|
+
} else {
|
1113
|
+
slt(scratch, r2, rs);
|
1114
|
+
bne(scratch, zero_reg, offset);
|
1115
|
+
}
|
1116
|
+
break;
|
1117
|
+
case greater_equal:
|
1118
|
+
if (r2.is(zero_reg)) {
|
1119
|
+
bgez(rs, offset);
|
1120
|
+
} else {
|
1121
|
+
slt(scratch, rs, r2);
|
1122
|
+
beq(scratch, zero_reg, offset);
|
1123
|
+
}
|
1124
|
+
break;
|
1125
|
+
case less:
|
1126
|
+
if (r2.is(zero_reg)) {
|
1127
|
+
bltz(rs, offset);
|
1128
|
+
} else {
|
1129
|
+
slt(scratch, rs, r2);
|
1130
|
+
bne(scratch, zero_reg, offset);
|
1131
|
+
}
|
1132
|
+
break;
|
1133
|
+
case less_equal:
|
1134
|
+
if (r2.is(zero_reg)) {
|
1135
|
+
blez(rs, offset);
|
1136
|
+
} else {
|
1137
|
+
slt(scratch, r2, rs);
|
1138
|
+
beq(scratch, zero_reg, offset);
|
1139
|
+
}
|
1140
|
+
break;
|
1141
|
+
// Unsigned comparison.
|
1142
|
+
case Ugreater:
|
1143
|
+
if (r2.is(zero_reg)) {
|
1144
|
+
bgtz(rs, offset);
|
1145
|
+
} else {
|
1146
|
+
sltu(scratch, r2, rs);
|
1147
|
+
bne(scratch, zero_reg, offset);
|
1148
|
+
}
|
1149
|
+
break;
|
1150
|
+
case Ugreater_equal:
|
1151
|
+
if (r2.is(zero_reg)) {
|
1152
|
+
bgez(rs, offset);
|
1153
|
+
} else {
|
1154
|
+
sltu(scratch, rs, r2);
|
1155
|
+
beq(scratch, zero_reg, offset);
|
1156
|
+
}
|
1157
|
+
break;
|
1158
|
+
case Uless:
|
1159
|
+
if (r2.is(zero_reg)) {
|
1160
|
+
b(offset);
|
1161
|
+
} else {
|
1162
|
+
sltu(scratch, rs, r2);
|
1163
|
+
bne(scratch, zero_reg, offset);
|
1164
|
+
}
|
1165
|
+
break;
|
1166
|
+
case Uless_equal:
|
1167
|
+
if (r2.is(zero_reg)) {
|
1168
|
+
b(offset);
|
1169
|
+
} else {
|
1170
|
+
sltu(scratch, r2, rs);
|
1171
|
+
beq(scratch, zero_reg, offset);
|
1172
|
+
}
|
1173
|
+
break;
|
1174
|
+
default:
|
1175
|
+
UNREACHABLE();
|
1176
|
+
}
|
1177
|
+
} else {
|
1178
|
+
// Be careful to always use shifted_branch_offset only just before the
|
1179
|
+
// branch instruction, as the location will be remember for patching the
|
1180
|
+
// target.
|
1181
|
+
switch (cond) {
|
1182
|
+
case cc_always:
|
1183
|
+
b(offset);
|
1184
|
+
break;
|
1185
|
+
case eq:
|
1186
|
+
// We don't want any other register but scratch clobbered.
|
1187
|
+
ASSERT(!scratch.is(rs));
|
1188
|
+
r2 = scratch;
|
1189
|
+
li(r2, rt);
|
1190
|
+
beq(rs, r2, offset);
|
1191
|
+
break;
|
1192
|
+
case ne:
|
1193
|
+
// We don't want any other register but scratch clobbered.
|
1194
|
+
ASSERT(!scratch.is(rs));
|
1195
|
+
r2 = scratch;
|
1196
|
+
li(r2, rt);
|
1197
|
+
bne(rs, r2, offset);
|
1198
|
+
break;
|
1199
|
+
// Signed comparison.
|
1200
|
+
case greater:
|
1201
|
+
if (rt.imm32_ == 0) {
|
1202
|
+
bgtz(rs, offset);
|
1203
|
+
} else {
|
1204
|
+
r2 = scratch;
|
1205
|
+
li(r2, rt);
|
1206
|
+
slt(scratch, r2, rs);
|
1207
|
+
bne(scratch, zero_reg, offset);
|
1208
|
+
}
|
1209
|
+
break;
|
1210
|
+
case greater_equal:
|
1211
|
+
if (rt.imm32_ == 0) {
|
1212
|
+
bgez(rs, offset);
|
1213
|
+
} else if (is_int16(rt.imm32_)) {
|
1214
|
+
slti(scratch, rs, rt.imm32_);
|
1215
|
+
beq(scratch, zero_reg, offset);
|
1216
|
+
} else {
|
1217
|
+
r2 = scratch;
|
1218
|
+
li(r2, rt);
|
1219
|
+
sltu(scratch, rs, r2);
|
1220
|
+
beq(scratch, zero_reg, offset);
|
1221
|
+
}
|
1222
|
+
break;
|
1223
|
+
case less:
|
1224
|
+
if (rt.imm32_ == 0) {
|
1225
|
+
bltz(rs, offset);
|
1226
|
+
} else if (is_int16(rt.imm32_)) {
|
1227
|
+
slti(scratch, rs, rt.imm32_);
|
1228
|
+
bne(scratch, zero_reg, offset);
|
1229
|
+
} else {
|
1230
|
+
r2 = scratch;
|
1231
|
+
li(r2, rt);
|
1232
|
+
slt(scratch, rs, r2);
|
1233
|
+
bne(scratch, zero_reg, offset);
|
1234
|
+
}
|
1235
|
+
break;
|
1236
|
+
case less_equal:
|
1237
|
+
if (rt.imm32_ == 0) {
|
1238
|
+
blez(rs, offset);
|
1239
|
+
} else {
|
1240
|
+
r2 = scratch;
|
1241
|
+
li(r2, rt);
|
1242
|
+
slt(scratch, r2, rs);
|
1243
|
+
beq(scratch, zero_reg, offset);
|
1244
|
+
}
|
1245
|
+
break;
|
1246
|
+
// Unsigned comparison.
|
1247
|
+
case Ugreater:
|
1248
|
+
if (rt.imm32_ == 0) {
|
1249
|
+
bgtz(rs, offset);
|
1250
|
+
} else {
|
1251
|
+
r2 = scratch;
|
1252
|
+
li(r2, rt);
|
1253
|
+
sltu(scratch, r2, rs);
|
1254
|
+
bne(scratch, zero_reg, offset);
|
1255
|
+
}
|
1256
|
+
break;
|
1257
|
+
case Ugreater_equal:
|
1258
|
+
if (rt.imm32_ == 0) {
|
1259
|
+
bgez(rs, offset);
|
1260
|
+
} else if (is_int16(rt.imm32_)) {
|
1261
|
+
sltiu(scratch, rs, rt.imm32_);
|
1262
|
+
beq(scratch, zero_reg, offset);
|
1263
|
+
} else {
|
1264
|
+
r2 = scratch;
|
1265
|
+
li(r2, rt);
|
1266
|
+
sltu(scratch, rs, r2);
|
1267
|
+
beq(scratch, zero_reg, offset);
|
1268
|
+
}
|
1269
|
+
break;
|
1270
|
+
case Uless:
|
1271
|
+
if (rt.imm32_ == 0) {
|
1272
|
+
b(offset);
|
1273
|
+
} else if (is_int16(rt.imm32_)) {
|
1274
|
+
sltiu(scratch, rs, rt.imm32_);
|
1275
|
+
bne(scratch, zero_reg, offset);
|
1276
|
+
} else {
|
1277
|
+
r2 = scratch;
|
1278
|
+
li(r2, rt);
|
1279
|
+
sltu(scratch, rs, r2);
|
1280
|
+
bne(scratch, zero_reg, offset);
|
1281
|
+
}
|
1282
|
+
break;
|
1283
|
+
case Uless_equal:
|
1284
|
+
if (rt.imm32_ == 0) {
|
1285
|
+
b(offset);
|
1286
|
+
} else {
|
1287
|
+
r2 = scratch;
|
1288
|
+
li(r2, rt);
|
1289
|
+
sltu(scratch, r2, rs);
|
1290
|
+
beq(scratch, zero_reg, offset);
|
1291
|
+
}
|
1292
|
+
break;
|
1293
|
+
default:
|
1294
|
+
UNREACHABLE();
|
1295
|
+
}
|
1296
|
+
}
|
1297
|
+
// Emit a nop in the branch delay slot if required.
|
1298
|
+
if (bdslot == PROTECT)
|
1299
|
+
nop();
|
1300
|
+
}
|
1301
|
+
|
1302
|
+
|
1303
|
+
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
|
1304
|
+
// We use branch_offset as an argument for the branch instructions to be sure
|
1305
|
+
// it is called just before generating the branch instruction, as needed.
|
1306
|
+
|
1307
|
+
b(shifted_branch_offset(L, false));
|
1308
|
+
|
1309
|
+
// Emit a nop in the branch delay slot if required.
|
1310
|
+
if (bdslot == PROTECT)
|
1311
|
+
nop();
|
1312
|
+
}
|
1313
|
+
|
1314
|
+
|
1315
|
+
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
|
1316
|
+
const Operand& rt,
|
1317
|
+
BranchDelaySlot bdslot) {
|
1318
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1319
|
+
|
1320
|
+
int32_t offset;
|
1321
|
+
Register r2 = no_reg;
|
1322
|
+
Register scratch = at;
|
1323
|
+
if (rt.is_reg()) {
|
1324
|
+
r2 = rt.rm_;
|
1325
|
+
// Be careful to always use shifted_branch_offset only just before the
|
1326
|
+
// branch instruction, as the location will be remember for patching the
|
1327
|
+
// target.
|
1328
|
+
switch (cond) {
|
1329
|
+
case cc_always:
|
1330
|
+
offset = shifted_branch_offset(L, false);
|
1331
|
+
b(offset);
|
1332
|
+
break;
|
1333
|
+
case eq:
|
1334
|
+
offset = shifted_branch_offset(L, false);
|
1335
|
+
beq(rs, r2, offset);
|
1336
|
+
break;
|
1337
|
+
case ne:
|
1338
|
+
offset = shifted_branch_offset(L, false);
|
1339
|
+
bne(rs, r2, offset);
|
1340
|
+
break;
|
1341
|
+
// Signed comparison.
|
1342
|
+
case greater:
|
1343
|
+
if (r2.is(zero_reg)) {
|
1344
|
+
offset = shifted_branch_offset(L, false);
|
1345
|
+
bgtz(rs, offset);
|
1346
|
+
} else {
|
1347
|
+
slt(scratch, r2, rs);
|
1348
|
+
offset = shifted_branch_offset(L, false);
|
1349
|
+
bne(scratch, zero_reg, offset);
|
1350
|
+
}
|
1351
|
+
break;
|
1352
|
+
case greater_equal:
|
1353
|
+
if (r2.is(zero_reg)) {
|
1354
|
+
offset = shifted_branch_offset(L, false);
|
1355
|
+
bgez(rs, offset);
|
1356
|
+
} else {
|
1357
|
+
slt(scratch, rs, r2);
|
1358
|
+
offset = shifted_branch_offset(L, false);
|
1359
|
+
beq(scratch, zero_reg, offset);
|
1360
|
+
}
|
1361
|
+
break;
|
1362
|
+
case less:
|
1363
|
+
if (r2.is(zero_reg)) {
|
1364
|
+
offset = shifted_branch_offset(L, false);
|
1365
|
+
bltz(rs, offset);
|
1366
|
+
} else {
|
1367
|
+
slt(scratch, rs, r2);
|
1368
|
+
offset = shifted_branch_offset(L, false);
|
1369
|
+
bne(scratch, zero_reg, offset);
|
1370
|
+
}
|
1371
|
+
break;
|
1372
|
+
case less_equal:
|
1373
|
+
if (r2.is(zero_reg)) {
|
1374
|
+
offset = shifted_branch_offset(L, false);
|
1375
|
+
blez(rs, offset);
|
1376
|
+
} else {
|
1377
|
+
slt(scratch, r2, rs);
|
1378
|
+
offset = shifted_branch_offset(L, false);
|
1379
|
+
beq(scratch, zero_reg, offset);
|
1380
|
+
}
|
1381
|
+
break;
|
1382
|
+
// Unsigned comparison.
|
1383
|
+
case Ugreater:
|
1384
|
+
if (r2.is(zero_reg)) {
|
1385
|
+
offset = shifted_branch_offset(L, false);
|
1386
|
+
bgtz(rs, offset);
|
1387
|
+
} else {
|
1388
|
+
sltu(scratch, r2, rs);
|
1389
|
+
offset = shifted_branch_offset(L, false);
|
1390
|
+
bne(scratch, zero_reg, offset);
|
1391
|
+
}
|
1392
|
+
break;
|
1393
|
+
case Ugreater_equal:
|
1394
|
+
if (r2.is(zero_reg)) {
|
1395
|
+
offset = shifted_branch_offset(L, false);
|
1396
|
+
bgez(rs, offset);
|
1397
|
+
} else {
|
1398
|
+
sltu(scratch, rs, r2);
|
1399
|
+
offset = shifted_branch_offset(L, false);
|
1400
|
+
beq(scratch, zero_reg, offset);
|
1401
|
+
}
|
1402
|
+
break;
|
1403
|
+
case Uless:
|
1404
|
+
if (r2.is(zero_reg)) {
|
1405
|
+
offset = shifted_branch_offset(L, false);
|
1406
|
+
b(offset);
|
1407
|
+
} else {
|
1408
|
+
sltu(scratch, rs, r2);
|
1409
|
+
offset = shifted_branch_offset(L, false);
|
1410
|
+
bne(scratch, zero_reg, offset);
|
1411
|
+
}
|
1412
|
+
break;
|
1413
|
+
case Uless_equal:
|
1414
|
+
if (r2.is(zero_reg)) {
|
1415
|
+
offset = shifted_branch_offset(L, false);
|
1416
|
+
b(offset);
|
1417
|
+
} else {
|
1418
|
+
sltu(scratch, r2, rs);
|
1419
|
+
offset = shifted_branch_offset(L, false);
|
1420
|
+
beq(scratch, zero_reg, offset);
|
1421
|
+
}
|
1422
|
+
break;
|
1423
|
+
default:
|
1424
|
+
UNREACHABLE();
|
1425
|
+
}
|
1426
|
+
} else {
|
1427
|
+
// Be careful to always use shifted_branch_offset only just before the
|
1428
|
+
// branch instruction, as the location will be remember for patching the
|
1429
|
+
// target.
|
1430
|
+
switch (cond) {
|
1431
|
+
case cc_always:
|
1432
|
+
offset = shifted_branch_offset(L, false);
|
1433
|
+
b(offset);
|
1434
|
+
break;
|
1435
|
+
case eq:
|
1436
|
+
r2 = scratch;
|
1437
|
+
li(r2, rt);
|
1438
|
+
offset = shifted_branch_offset(L, false);
|
1439
|
+
beq(rs, r2, offset);
|
1440
|
+
break;
|
1441
|
+
case ne:
|
1442
|
+
r2 = scratch;
|
1443
|
+
li(r2, rt);
|
1444
|
+
offset = shifted_branch_offset(L, false);
|
1445
|
+
bne(rs, r2, offset);
|
1446
|
+
break;
|
1447
|
+
// Signed comparison.
|
1448
|
+
case greater:
|
1449
|
+
if (rt.imm32_ == 0) {
|
1450
|
+
offset = shifted_branch_offset(L, false);
|
1451
|
+
bgtz(rs, offset);
|
1452
|
+
} else {
|
1453
|
+
r2 = scratch;
|
1454
|
+
li(r2, rt);
|
1455
|
+
slt(scratch, r2, rs);
|
1456
|
+
offset = shifted_branch_offset(L, false);
|
1457
|
+
bne(scratch, zero_reg, offset);
|
1458
|
+
}
|
1459
|
+
break;
|
1460
|
+
case greater_equal:
|
1461
|
+
if (rt.imm32_ == 0) {
|
1462
|
+
offset = shifted_branch_offset(L, false);
|
1463
|
+
bgez(rs, offset);
|
1464
|
+
} else if (is_int16(rt.imm32_)) {
|
1465
|
+
slti(scratch, rs, rt.imm32_);
|
1466
|
+
offset = shifted_branch_offset(L, false);
|
1467
|
+
beq(scratch, zero_reg, offset);
|
1468
|
+
} else {
|
1469
|
+
r2 = scratch;
|
1470
|
+
li(r2, rt);
|
1471
|
+
sltu(scratch, rs, r2);
|
1472
|
+
offset = shifted_branch_offset(L, false);
|
1473
|
+
beq(scratch, zero_reg, offset);
|
1474
|
+
}
|
1475
|
+
break;
|
1476
|
+
case less:
|
1477
|
+
if (rt.imm32_ == 0) {
|
1478
|
+
offset = shifted_branch_offset(L, false);
|
1479
|
+
bltz(rs, offset);
|
1480
|
+
} else if (is_int16(rt.imm32_)) {
|
1481
|
+
slti(scratch, rs, rt.imm32_);
|
1482
|
+
offset = shifted_branch_offset(L, false);
|
1483
|
+
bne(scratch, zero_reg, offset);
|
1484
|
+
} else {
|
1485
|
+
r2 = scratch;
|
1486
|
+
li(r2, rt);
|
1487
|
+
slt(scratch, rs, r2);
|
1488
|
+
offset = shifted_branch_offset(L, false);
|
1489
|
+
bne(scratch, zero_reg, offset);
|
1490
|
+
}
|
1491
|
+
break;
|
1492
|
+
case less_equal:
|
1493
|
+
if (rt.imm32_ == 0) {
|
1494
|
+
offset = shifted_branch_offset(L, false);
|
1495
|
+
blez(rs, offset);
|
1496
|
+
} else {
|
1497
|
+
r2 = scratch;
|
1498
|
+
li(r2, rt);
|
1499
|
+
slt(scratch, r2, rs);
|
1500
|
+
offset = shifted_branch_offset(L, false);
|
1501
|
+
beq(scratch, zero_reg, offset);
|
1502
|
+
}
|
1503
|
+
break;
|
1504
|
+
// Unsigned comparison.
|
1505
|
+
case Ugreater:
|
1506
|
+
if (rt.imm32_ == 0) {
|
1507
|
+
offset = shifted_branch_offset(L, false);
|
1508
|
+
bgtz(rs, offset);
|
1509
|
+
} else {
|
1510
|
+
r2 = scratch;
|
1511
|
+
li(r2, rt);
|
1512
|
+
sltu(scratch, r2, rs);
|
1513
|
+
offset = shifted_branch_offset(L, false);
|
1514
|
+
bne(scratch, zero_reg, offset);
|
1515
|
+
}
|
1516
|
+
break;
|
1517
|
+
case Ugreater_equal:
|
1518
|
+
if (rt.imm32_ == 0) {
|
1519
|
+
offset = shifted_branch_offset(L, false);
|
1520
|
+
bgez(rs, offset);
|
1521
|
+
} else if (is_int16(rt.imm32_)) {
|
1522
|
+
sltiu(scratch, rs, rt.imm32_);
|
1523
|
+
offset = shifted_branch_offset(L, false);
|
1524
|
+
beq(scratch, zero_reg, offset);
|
1525
|
+
} else {
|
1526
|
+
r2 = scratch;
|
1527
|
+
li(r2, rt);
|
1528
|
+
sltu(scratch, rs, r2);
|
1529
|
+
offset = shifted_branch_offset(L, false);
|
1530
|
+
beq(scratch, zero_reg, offset);
|
1531
|
+
}
|
1532
|
+
break;
|
1533
|
+
case Uless:
|
1534
|
+
if (rt.imm32_ == 0) {
|
1535
|
+
offset = shifted_branch_offset(L, false);
|
1536
|
+
b(offset);
|
1537
|
+
} else if (is_int16(rt.imm32_)) {
|
1538
|
+
sltiu(scratch, rs, rt.imm32_);
|
1539
|
+
offset = shifted_branch_offset(L, false);
|
1540
|
+
bne(scratch, zero_reg, offset);
|
1541
|
+
} else {
|
1542
|
+
r2 = scratch;
|
1543
|
+
li(r2, rt);
|
1544
|
+
sltu(scratch, rs, r2);
|
1545
|
+
offset = shifted_branch_offset(L, false);
|
1546
|
+
bne(scratch, zero_reg, offset);
|
1547
|
+
}
|
1548
|
+
break;
|
1549
|
+
case Uless_equal:
|
1550
|
+
if (rt.imm32_ == 0) {
|
1551
|
+
offset = shifted_branch_offset(L, false);
|
1552
|
+
b(offset);
|
1553
|
+
} else {
|
1554
|
+
r2 = scratch;
|
1555
|
+
li(r2, rt);
|
1556
|
+
sltu(scratch, r2, rs);
|
1557
|
+
offset = shifted_branch_offset(L, false);
|
1558
|
+
beq(scratch, zero_reg, offset);
|
1559
|
+
}
|
1560
|
+
break;
|
1561
|
+
default:
|
1562
|
+
UNREACHABLE();
|
1563
|
+
}
|
1564
|
+
}
|
1565
|
+
// Check that offset could actually hold on an int16_t.
|
1566
|
+
ASSERT(is_int16(offset));
|
1567
|
+
// Emit a nop in the branch delay slot if required.
|
1568
|
+
if (bdslot == PROTECT)
|
1569
|
+
nop();
|
1570
|
+
}
|
1571
|
+
|
1572
|
+
|
1573
|
+
// We need to use a bgezal or bltzal, but they can't be used directly with the
|
1574
|
+
// slt instructions. We could use sub or add instead but we would miss overflow
|
1575
|
+
// cases, so we keep slt and add an intermediate third instruction.
|
1576
|
+
void MacroAssembler::BranchAndLink(int16_t offset,
|
1577
|
+
BranchDelaySlot bdslot) {
|
1578
|
+
bal(offset);
|
1579
|
+
|
1580
|
+
// Emit a nop in the branch delay slot if required.
|
1581
|
+
if (bdslot == PROTECT)
|
1582
|
+
nop();
|
1583
|
+
}
|
1584
|
+
|
1585
|
+
|
1586
|
+
void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
|
1587
|
+
const Operand& rt,
|
1588
|
+
BranchDelaySlot bdslot) {
|
1589
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1590
|
+
Register r2 = no_reg;
|
1591
|
+
Register scratch = at;
|
1592
|
+
|
1593
|
+
if (rt.is_reg()) {
|
1594
|
+
r2 = rt.rm_;
|
1595
|
+
} else if (cond != cc_always) {
|
1596
|
+
r2 = scratch;
|
1597
|
+
li(r2, rt);
|
1598
|
+
}
|
1599
|
+
|
1600
|
+
switch (cond) {
|
1601
|
+
case cc_always:
|
1602
|
+
bal(offset);
|
1603
|
+
break;
|
1604
|
+
case eq:
|
1605
|
+
bne(rs, r2, 2);
|
1606
|
+
nop();
|
1607
|
+
bal(offset);
|
1608
|
+
break;
|
1609
|
+
case ne:
|
1610
|
+
beq(rs, r2, 2);
|
1611
|
+
nop();
|
1612
|
+
bal(offset);
|
1613
|
+
break;
|
1614
|
+
|
1615
|
+
// Signed comparison.
|
1616
|
+
case greater:
|
1617
|
+
slt(scratch, r2, rs);
|
1618
|
+
addiu(scratch, scratch, -1);
|
1619
|
+
bgezal(scratch, offset);
|
1620
|
+
break;
|
1621
|
+
case greater_equal:
|
1622
|
+
slt(scratch, rs, r2);
|
1623
|
+
addiu(scratch, scratch, -1);
|
1624
|
+
bltzal(scratch, offset);
|
1625
|
+
break;
|
1626
|
+
case less:
|
1627
|
+
slt(scratch, rs, r2);
|
1628
|
+
addiu(scratch, scratch, -1);
|
1629
|
+
bgezal(scratch, offset);
|
1630
|
+
break;
|
1631
|
+
case less_equal:
|
1632
|
+
slt(scratch, r2, rs);
|
1633
|
+
addiu(scratch, scratch, -1);
|
1634
|
+
bltzal(scratch, offset);
|
1635
|
+
break;
|
1636
|
+
|
1637
|
+
// Unsigned comparison.
|
1638
|
+
case Ugreater:
|
1639
|
+
sltu(scratch, r2, rs);
|
1640
|
+
addiu(scratch, scratch, -1);
|
1641
|
+
bgezal(scratch, offset);
|
1642
|
+
break;
|
1643
|
+
case Ugreater_equal:
|
1644
|
+
sltu(scratch, rs, r2);
|
1645
|
+
addiu(scratch, scratch, -1);
|
1646
|
+
bltzal(scratch, offset);
|
1647
|
+
break;
|
1648
|
+
case Uless:
|
1649
|
+
sltu(scratch, rs, r2);
|
1650
|
+
addiu(scratch, scratch, -1);
|
1651
|
+
bgezal(scratch, offset);
|
1652
|
+
break;
|
1653
|
+
case Uless_equal:
|
1654
|
+
sltu(scratch, r2, rs);
|
1655
|
+
addiu(scratch, scratch, -1);
|
1656
|
+
bltzal(scratch, offset);
|
1657
|
+
break;
|
1658
|
+
|
1659
|
+
default:
|
1660
|
+
UNREACHABLE();
|
1661
|
+
}
|
1662
|
+
// Emit a nop in the branch delay slot if required.
|
1663
|
+
if (bdslot == PROTECT)
|
1664
|
+
nop();
|
1665
|
+
}
|
1666
|
+
|
1667
|
+
|
1668
|
+
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
|
1669
|
+
bal(shifted_branch_offset(L, false));
|
1670
|
+
|
1671
|
+
// Emit a nop in the branch delay slot if required.
|
1672
|
+
if (bdslot == PROTECT)
|
1673
|
+
nop();
|
1674
|
+
}
|
1675
|
+
|
1676
|
+
|
1677
|
+
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
|
1678
|
+
const Operand& rt,
|
1679
|
+
BranchDelaySlot bdslot) {
|
1680
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1681
|
+
|
1682
|
+
int32_t offset;
|
1683
|
+
Register r2 = no_reg;
|
1684
|
+
Register scratch = at;
|
1685
|
+
if (rt.is_reg()) {
|
1686
|
+
r2 = rt.rm_;
|
1687
|
+
} else if (cond != cc_always) {
|
1688
|
+
r2 = scratch;
|
1689
|
+
li(r2, rt);
|
1690
|
+
}
|
1691
|
+
|
1692
|
+
switch (cond) {
|
1693
|
+
case cc_always:
|
1694
|
+
offset = shifted_branch_offset(L, false);
|
1695
|
+
bal(offset);
|
1696
|
+
break;
|
1697
|
+
case eq:
|
1698
|
+
bne(rs, r2, 2);
|
1699
|
+
nop();
|
1700
|
+
offset = shifted_branch_offset(L, false);
|
1701
|
+
bal(offset);
|
1702
|
+
break;
|
1703
|
+
case ne:
|
1704
|
+
beq(rs, r2, 2);
|
1705
|
+
nop();
|
1706
|
+
offset = shifted_branch_offset(L, false);
|
1707
|
+
bal(offset);
|
1708
|
+
break;
|
1709
|
+
|
1710
|
+
// Signed comparison.
|
1711
|
+
case greater:
|
1712
|
+
slt(scratch, r2, rs);
|
1713
|
+
addiu(scratch, scratch, -1);
|
1714
|
+
offset = shifted_branch_offset(L, false);
|
1715
|
+
bgezal(scratch, offset);
|
1716
|
+
break;
|
1717
|
+
case greater_equal:
|
1718
|
+
slt(scratch, rs, r2);
|
1719
|
+
addiu(scratch, scratch, -1);
|
1720
|
+
offset = shifted_branch_offset(L, false);
|
1721
|
+
bltzal(scratch, offset);
|
1722
|
+
break;
|
1723
|
+
case less:
|
1724
|
+
slt(scratch, rs, r2);
|
1725
|
+
addiu(scratch, scratch, -1);
|
1726
|
+
offset = shifted_branch_offset(L, false);
|
1727
|
+
bgezal(scratch, offset);
|
1728
|
+
break;
|
1729
|
+
case less_equal:
|
1730
|
+
slt(scratch, r2, rs);
|
1731
|
+
addiu(scratch, scratch, -1);
|
1732
|
+
offset = shifted_branch_offset(L, false);
|
1733
|
+
bltzal(scratch, offset);
|
1734
|
+
break;
|
1735
|
+
|
1736
|
+
// Unsigned comparison.
|
1737
|
+
case Ugreater:
|
1738
|
+
sltu(scratch, r2, rs);
|
1739
|
+
addiu(scratch, scratch, -1);
|
1740
|
+
offset = shifted_branch_offset(L, false);
|
1741
|
+
bgezal(scratch, offset);
|
1742
|
+
break;
|
1743
|
+
case Ugreater_equal:
|
1744
|
+
sltu(scratch, rs, r2);
|
1745
|
+
addiu(scratch, scratch, -1);
|
1746
|
+
offset = shifted_branch_offset(L, false);
|
1747
|
+
bltzal(scratch, offset);
|
1748
|
+
break;
|
1749
|
+
case Uless:
|
1750
|
+
sltu(scratch, rs, r2);
|
1751
|
+
addiu(scratch, scratch, -1);
|
1752
|
+
offset = shifted_branch_offset(L, false);
|
1753
|
+
bgezal(scratch, offset);
|
1754
|
+
break;
|
1755
|
+
case Uless_equal:
|
1756
|
+
sltu(scratch, r2, rs);
|
1757
|
+
addiu(scratch, scratch, -1);
|
1758
|
+
offset = shifted_branch_offset(L, false);
|
1759
|
+
bltzal(scratch, offset);
|
1760
|
+
break;
|
1761
|
+
|
1762
|
+
default:
|
1763
|
+
UNREACHABLE();
|
1764
|
+
}
|
1765
|
+
|
1766
|
+
// Check that offset could actually hold on an int16_t.
|
1767
|
+
ASSERT(is_int16(offset));
|
1768
|
+
|
1769
|
+
// Emit a nop in the branch delay slot if required.
|
1770
|
+
if (bdslot == PROTECT)
|
1771
|
+
nop();
|
1772
|
+
}
|
1773
|
+
|
1774
|
+
|
1775
|
+
void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
|
1776
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1777
|
+
if (target.is_reg()) {
|
1778
|
+
jr(target.rm());
|
1779
|
+
} else {
|
1780
|
+
if (!MustUseReg(target.rmode_)) {
|
1781
|
+
j(target.imm32_);
|
1782
|
+
} else {
|
1783
|
+
li(t9, target);
|
1784
|
+
jr(t9);
|
1785
|
+
}
|
1786
|
+
}
|
1787
|
+
// Emit a nop in the branch delay slot if required.
|
1788
|
+
if (bdslot == PROTECT)
|
1789
|
+
nop();
|
1790
|
+
}
|
1791
|
+
|
1792
|
+
|
1793
|
+
void MacroAssembler::Jump(const Operand& target,
|
1794
|
+
Condition cond, Register rs, const Operand& rt,
|
1795
|
+
BranchDelaySlot bdslot) {
|
1796
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1797
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1798
|
+
if (target.is_reg()) {
|
1799
|
+
if (cond == cc_always) {
|
1800
|
+
jr(target.rm());
|
1801
|
+
} else {
|
1802
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1803
|
+
jr(target.rm());
|
1804
|
+
}
|
1805
|
+
} else { // Not register target.
|
1806
|
+
if (!MustUseReg(target.rmode_)) {
|
1807
|
+
if (cond == cc_always) {
|
1808
|
+
j(target.imm32_);
|
1809
|
+
} else {
|
1810
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1811
|
+
j(target.imm32_); // Will generate only one instruction.
|
1812
|
+
}
|
1813
|
+
} else { // MustUseReg(target).
|
1814
|
+
li(t9, target);
|
1815
|
+
if (cond == cc_always) {
|
1816
|
+
jr(t9);
|
1817
|
+
} else {
|
1818
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1819
|
+
jr(t9); // Will generate only one instruction.
|
1820
|
+
}
|
1821
|
+
}
|
1822
|
+
}
|
1823
|
+
// Emit a nop in the branch delay slot if required.
|
1824
|
+
if (bdslot == PROTECT)
|
1825
|
+
nop();
|
1826
|
+
}
|
1827
|
+
|
1828
|
+
|
1829
|
+
int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
|
1830
|
+
return 4 * kInstrSize;
|
1831
|
+
}
|
1832
|
+
|
1833
|
+
|
1834
|
+
int MacroAssembler::CallSize(Register reg) {
|
1835
|
+
return 2 * kInstrSize;
|
1836
|
+
}
|
1837
|
+
|
1838
|
+
|
1839
|
+
// Note: To call gcc-compiled C code on mips, you must call thru t9.
|
1840
|
+
void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
|
1841
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1842
|
+
if (target.is_reg()) {
|
1843
|
+
jalr(target.rm());
|
1844
|
+
} else { // !target.is_reg().
|
1845
|
+
if (!MustUseReg(target.rmode_)) {
|
1846
|
+
jal(target.imm32_);
|
1847
|
+
} else { // MustUseReg(target).
|
1848
|
+
// Must record previous source positions before the
|
1849
|
+
// li() generates a new code target.
|
1850
|
+
positions_recorder()->WriteRecordedPositions();
|
1851
|
+
li(t9, target);
|
1852
|
+
jalr(t9);
|
1853
|
+
}
|
1854
|
+
}
|
1855
|
+
// Emit a nop in the branch delay slot if required.
|
1856
|
+
if (bdslot == PROTECT)
|
1857
|
+
nop();
|
1858
|
+
}
|
1859
|
+
|
1860
|
+
|
1861
|
+
// Note: To call gcc-compiled C code on mips, you must call thru t9.
|
1862
|
+
void MacroAssembler::Call(const Operand& target,
|
1863
|
+
Condition cond, Register rs, const Operand& rt,
|
1864
|
+
BranchDelaySlot bdslot) {
|
1865
|
+
BlockTrampolinePoolScope block_trampoline_pool(this);
|
1866
|
+
BRANCH_ARGS_CHECK(cond, rs, rt);
|
1867
|
+
if (target.is_reg()) {
|
1868
|
+
if (cond == cc_always) {
|
1869
|
+
jalr(target.rm());
|
1870
|
+
} else {
|
1871
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1872
|
+
jalr(target.rm());
|
1873
|
+
}
|
1874
|
+
} else { // !target.is_reg().
|
1875
|
+
if (!MustUseReg(target.rmode_)) {
|
1876
|
+
if (cond == cc_always) {
|
1877
|
+
jal(target.imm32_);
|
1878
|
+
} else {
|
1879
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1880
|
+
jal(target.imm32_); // Will generate only one instruction.
|
1881
|
+
}
|
1882
|
+
} else { // MustUseReg(target)
|
1883
|
+
li(t9, target);
|
1884
|
+
if (cond == cc_always) {
|
1885
|
+
jalr(t9);
|
1886
|
+
} else {
|
1887
|
+
Branch(2, NegateCondition(cond), rs, rt);
|
1888
|
+
jalr(t9); // Will generate only one instruction.
|
1889
|
+
}
|
1890
|
+
}
|
1891
|
+
}
|
1892
|
+
// Emit a nop in the branch delay slot if required.
|
1893
|
+
if (bdslot == PROTECT)
|
1894
|
+
nop();
|
1895
|
+
}
|
1896
|
+
|
1897
|
+
|
1898
|
+
void MacroAssembler::CallWithAstId(Handle<Code> code,
|
1899
|
+
RelocInfo::Mode rmode,
|
1900
|
+
unsigned ast_id,
|
1901
|
+
Condition cond,
|
1902
|
+
Register r1,
|
1903
|
+
const Operand& r2) {
|
1904
|
+
ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
|
1905
|
+
ASSERT(ast_id != kNoASTId);
|
1906
|
+
ASSERT(ast_id_for_reloc_info_ == kNoASTId);
|
1907
|
+
ast_id_for_reloc_info_ = ast_id;
|
1908
|
+
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
|
1909
|
+
}
|
1910
|
+
|
1911
|
+
|
1912
|
+
void MacroAssembler::Drop(int count,
|
1913
|
+
Condition cond,
|
1914
|
+
Register reg,
|
1915
|
+
const Operand& op) {
|
1916
|
+
if (count <= 0) {
|
1917
|
+
return;
|
1918
|
+
}
|
1919
|
+
|
1920
|
+
Label skip;
|
1921
|
+
|
1922
|
+
if (cond != al) {
|
1923
|
+
Branch(&skip, NegateCondition(cond), reg, op);
|
1924
|
+
}
|
1925
|
+
|
1926
|
+
if (count > 0) {
|
1927
|
+
addiu(sp, sp, count * kPointerSize);
|
1928
|
+
}
|
1929
|
+
|
1930
|
+
if (cond != al) {
|
1931
|
+
bind(&skip);
|
1932
|
+
}
|
1933
|
+
}
|
1934
|
+
|
1935
|
+
|
1936
|
+
void MacroAssembler::DropAndRet(int drop,
|
1937
|
+
Condition cond,
|
1938
|
+
Register r1,
|
1939
|
+
const Operand& r2) {
|
1940
|
+
// This is a workaround to make sure only one branch instruction is
|
1941
|
+
// generated. It relies on Drop and Ret not creating branches if
|
1942
|
+
// cond == cc_always.
|
1943
|
+
Label skip;
|
1944
|
+
if (cond != cc_always) {
|
1945
|
+
Branch(&skip, NegateCondition(cond), r1, r2);
|
1946
|
+
}
|
1947
|
+
|
1948
|
+
Drop(drop);
|
1949
|
+
Ret();
|
1950
|
+
|
1951
|
+
if (cond != cc_always) {
|
1952
|
+
bind(&skip);
|
1953
|
+
}
|
1954
|
+
}
|
1955
|
+
|
1956
|
+
|
1957
|
+
void MacroAssembler::Swap(Register reg1,
|
1958
|
+
Register reg2,
|
1959
|
+
Register scratch) {
|
1960
|
+
if (scratch.is(no_reg)) {
|
1961
|
+
Xor(reg1, reg1, Operand(reg2));
|
1962
|
+
Xor(reg2, reg2, Operand(reg1));
|
1963
|
+
Xor(reg1, reg1, Operand(reg2));
|
1964
|
+
} else {
|
1965
|
+
mov(scratch, reg1);
|
1966
|
+
mov(reg1, reg2);
|
1967
|
+
mov(reg2, scratch);
|
1968
|
+
}
|
1969
|
+
}
|
1970
|
+
|
1971
|
+
|
1972
|
+
void MacroAssembler::Call(Label* target) {
|
1973
|
+
BranchAndLink(target);
|
1974
|
+
}
|
1975
|
+
|
1976
|
+
|
1977
|
+
#ifdef ENABLE_DEBUGGER_SUPPORT
|
1978
|
+
|
1979
|
+
void MacroAssembler::DebugBreak() {
|
1980
|
+
ASSERT(allow_stub_calls());
|
1981
|
+
mov(a0, zero_reg);
|
1982
|
+
li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
|
1983
|
+
CEntryStub ces(1);
|
1984
|
+
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
|
1985
|
+
}
|
1986
|
+
|
1987
|
+
#endif // ENABLE_DEBUGGER_SUPPORT
|
1988
|
+
|
1989
|
+
|
1990
|
+
// ---------------------------------------------------------------------------
|
1991
|
+
// Exception handling.
|
1992
|
+
|
1993
|
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
|
1994
|
+
HandlerType type) {
|
1995
|
+
// Adjust this code if not the case.
|
1996
|
+
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
|
1997
|
+
// The return address is passed in register ra.
|
1998
|
+
if (try_location == IN_JAVASCRIPT) {
|
1999
|
+
if (type == TRY_CATCH_HANDLER) {
|
2000
|
+
li(t0, Operand(StackHandler::TRY_CATCH));
|
2001
|
+
} else {
|
2002
|
+
li(t0, Operand(StackHandler::TRY_FINALLY));
|
2003
|
+
}
|
2004
|
+
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
|
2005
|
+
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
|
2006
|
+
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
|
2007
|
+
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
|
2008
|
+
// Save the current handler as the next handler.
|
2009
|
+
li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
2010
|
+
lw(t1, MemOperand(t2));
|
2011
|
+
|
2012
|
+
addiu(sp, sp, -StackHandlerConstants::kSize);
|
2013
|
+
sw(ra, MemOperand(sp, 12));
|
2014
|
+
sw(fp, MemOperand(sp, 8));
|
2015
|
+
sw(t0, MemOperand(sp, 4));
|
2016
|
+
sw(t1, MemOperand(sp, 0));
|
2017
|
+
|
2018
|
+
// Link this handler as the new current one.
|
2019
|
+
sw(sp, MemOperand(t2));
|
2020
|
+
|
2021
|
+
} else {
|
2022
|
+
// Must preserve a0-a3, and s0 (argv).
|
2023
|
+
ASSERT(try_location == IN_JS_ENTRY);
|
2024
|
+
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
|
2025
|
+
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
|
2026
|
+
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
|
2027
|
+
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
|
2028
|
+
|
2029
|
+
// The frame pointer does not point to a JS frame so we save NULL
|
2030
|
+
// for fp. We expect the code throwing an exception to check fp
|
2031
|
+
// before dereferencing it to restore the context.
|
2032
|
+
li(t0, Operand(StackHandler::ENTRY));
|
2033
|
+
|
2034
|
+
// Save the current handler as the next handler.
|
2035
|
+
li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
2036
|
+
lw(t1, MemOperand(t2));
|
2037
|
+
|
2038
|
+
addiu(sp, sp, -StackHandlerConstants::kSize);
|
2039
|
+
sw(ra, MemOperand(sp, 12));
|
2040
|
+
sw(zero_reg, MemOperand(sp, 8));
|
2041
|
+
sw(t0, MemOperand(sp, 4));
|
2042
|
+
sw(t1, MemOperand(sp, 0));
|
2043
|
+
|
2044
|
+
// Link this handler as the new current one.
|
2045
|
+
sw(sp, MemOperand(t2));
|
2046
|
+
}
|
2047
|
+
}
|
2048
|
+
|
2049
|
+
|
2050
|
+
void MacroAssembler::PopTryHandler() {
|
2051
|
+
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
|
2052
|
+
pop(a1);
|
2053
|
+
Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
|
2054
|
+
li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
2055
|
+
sw(a1, MemOperand(at));
|
2056
|
+
}
|
2057
|
+
|
2058
|
+
|
2059
|
+
void MacroAssembler::Throw(Register value) {
|
2060
|
+
// v0 is expected to hold the exception.
|
2061
|
+
Move(v0, value);
|
2062
|
+
|
2063
|
+
// Adjust this code if not the case.
|
2064
|
+
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
|
2065
|
+
|
2066
|
+
// Drop the sp to the top of the handler.
|
2067
|
+
li(a3, Operand(ExternalReference(Isolate::k_handler_address,
|
2068
|
+
isolate())));
|
2069
|
+
lw(sp, MemOperand(a3));
|
2070
|
+
|
2071
|
+
// Restore the next handler and frame pointer, discard handler state.
|
2072
|
+
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
2073
|
+
pop(a2);
|
2074
|
+
sw(a2, MemOperand(a3));
|
2075
|
+
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
|
2076
|
+
MultiPop(a3.bit() | fp.bit());
|
2077
|
+
|
2078
|
+
// Before returning we restore the context from the frame pointer if
|
2079
|
+
// not NULL. The frame pointer is NULL in the exception handler of a
|
2080
|
+
// JS entry frame.
|
2081
|
+
// Set cp to NULL if fp is NULL.
|
2082
|
+
Label done;
|
2083
|
+
Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
|
2084
|
+
mov(cp, zero_reg); // In branch delay slot.
|
2085
|
+
lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
2086
|
+
bind(&done);
|
2087
|
+
|
2088
|
+
#ifdef DEBUG
|
2089
|
+
// When emitting debug_code, set ra as return address for the jump.
|
2090
|
+
// 5 instructions: add: 1, pop: 2, jump: 2.
|
2091
|
+
const int kOffsetRaInstructions = 5;
|
2092
|
+
Label find_ra;
|
2093
|
+
|
2094
|
+
if (emit_debug_code()) {
|
2095
|
+
// Compute ra for the Jump(t9).
|
2096
|
+
const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
|
2097
|
+
|
2098
|
+
// This branch-and-link sequence is needed to get the current PC on mips,
|
2099
|
+
// saved to the ra register. Then adjusted for instruction count.
|
2100
|
+
bal(&find_ra); // bal exposes branch-delay.
|
2101
|
+
nop(); // Branch delay slot nop.
|
2102
|
+
bind(&find_ra);
|
2103
|
+
addiu(ra, ra, kOffsetRaBytes);
|
2104
|
+
}
|
2105
|
+
#endif
|
2106
|
+
|
2107
|
+
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
|
2108
|
+
pop(t9); // 2 instructions: lw, add sp.
|
2109
|
+
Jump(t9); // 2 instructions: jr, nop (in delay slot).
|
2110
|
+
|
2111
|
+
if (emit_debug_code()) {
|
2112
|
+
// Make sure that the expected number of instructions were generated.
|
2113
|
+
ASSERT_EQ(kOffsetRaInstructions,
|
2114
|
+
InstructionsGeneratedSince(&find_ra));
|
2115
|
+
}
|
2116
|
+
}
|
2117
|
+
|
2118
|
+
|
2119
|
+
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
|
2120
|
+
Register value) {
|
2121
|
+
// Adjust this code if not the case.
|
2122
|
+
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
|
2123
|
+
|
2124
|
+
// v0 is expected to hold the exception.
|
2125
|
+
Move(v0, value);
|
2126
|
+
|
2127
|
+
// Drop sp to the top stack handler.
|
2128
|
+
li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
|
2129
|
+
lw(sp, MemOperand(a3));
|
2130
|
+
|
2131
|
+
// Unwind the handlers until the ENTRY handler is found.
|
2132
|
+
Label loop, done;
|
2133
|
+
bind(&loop);
|
2134
|
+
// Load the type of the current stack handler.
|
2135
|
+
const int kStateOffset = StackHandlerConstants::kStateOffset;
|
2136
|
+
lw(a2, MemOperand(sp, kStateOffset));
|
2137
|
+
Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
|
2138
|
+
// Fetch the next handler in the list.
|
2139
|
+
const int kNextOffset = StackHandlerConstants::kNextOffset;
|
2140
|
+
lw(sp, MemOperand(sp, kNextOffset));
|
2141
|
+
jmp(&loop);
|
2142
|
+
bind(&done);
|
2143
|
+
|
2144
|
+
// Set the top handler address to next handler past the current ENTRY handler.
|
2145
|
+
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
2146
|
+
pop(a2);
|
2147
|
+
sw(a2, MemOperand(a3));
|
2148
|
+
|
2149
|
+
if (type == OUT_OF_MEMORY) {
|
2150
|
+
// Set external caught exception to false.
|
2151
|
+
ExternalReference external_caught(
|
2152
|
+
Isolate::k_external_caught_exception_address, isolate());
|
2153
|
+
li(a0, Operand(false, RelocInfo::NONE));
|
2154
|
+
li(a2, Operand(external_caught));
|
2155
|
+
sw(a0, MemOperand(a2));
|
2156
|
+
|
2157
|
+
// Set pending exception and v0 to out of memory exception.
|
2158
|
+
Failure* out_of_memory = Failure::OutOfMemoryException();
|
2159
|
+
li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
|
2160
|
+
li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
|
2161
|
+
isolate())));
|
2162
|
+
sw(v0, MemOperand(a2));
|
2163
|
+
}
|
2164
|
+
|
2165
|
+
// Stack layout at this point. See also StackHandlerConstants.
|
2166
|
+
// sp -> state (ENTRY)
|
2167
|
+
// fp
|
2168
|
+
// ra
|
2169
|
+
|
2170
|
+
// Discard handler state (a2 is not used) and restore frame pointer.
|
2171
|
+
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
|
2172
|
+
MultiPop(a2.bit() | fp.bit()); // a2: discarded state.
|
2173
|
+
// Before returning we restore the context from the frame pointer if
|
2174
|
+
// not NULL. The frame pointer is NULL in the exception handler of a
|
2175
|
+
// JS entry frame.
|
2176
|
+
Label cp_null;
|
2177
|
+
Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
|
2178
|
+
mov(cp, zero_reg); // In the branch delay slot.
|
2179
|
+
lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
2180
|
+
bind(&cp_null);
|
2181
|
+
|
2182
|
+
#ifdef DEBUG
|
2183
|
+
// When emitting debug_code, set ra as return address for the jump.
|
2184
|
+
// 5 instructions: add: 1, pop: 2, jump: 2.
|
2185
|
+
const int kOffsetRaInstructions = 5;
|
2186
|
+
Label find_ra;
|
2187
|
+
|
2188
|
+
if (emit_debug_code()) {
|
2189
|
+
// Compute ra for the Jump(t9).
|
2190
|
+
const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
|
2191
|
+
|
2192
|
+
// This branch-and-link sequence is needed to get the current PC on mips,
|
2193
|
+
// saved to the ra register. Then adjusted for instruction count.
|
2194
|
+
bal(&find_ra); // bal exposes branch-delay slot.
|
2195
|
+
nop(); // Branch delay slot nop.
|
2196
|
+
bind(&find_ra);
|
2197
|
+
addiu(ra, ra, kOffsetRaBytes);
|
2198
|
+
}
|
2199
|
+
#endif
|
2200
|
+
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
|
2201
|
+
pop(t9); // 2 instructions: lw, add sp.
|
2202
|
+
Jump(t9); // 2 instructions: jr, nop (in delay slot).
|
2203
|
+
|
2204
|
+
if (emit_debug_code()) {
|
2205
|
+
// Make sure that the expected number of instructions were generated.
|
2206
|
+
ASSERT_EQ(kOffsetRaInstructions,
|
2207
|
+
InstructionsGeneratedSince(&find_ra));
|
2208
|
+
}
|
2209
|
+
}
|
2210
|
+
|
2211
|
+
|
2212
|
+
void MacroAssembler::AllocateInNewSpace(int object_size,
|
2213
|
+
Register result,
|
2214
|
+
Register scratch1,
|
2215
|
+
Register scratch2,
|
2216
|
+
Label* gc_required,
|
2217
|
+
AllocationFlags flags) {
|
2218
|
+
if (!FLAG_inline_new) {
|
2219
|
+
if (emit_debug_code()) {
|
2220
|
+
// Trash the registers to simulate an allocation failure.
|
2221
|
+
li(result, 0x7091);
|
2222
|
+
li(scratch1, 0x7191);
|
2223
|
+
li(scratch2, 0x7291);
|
2224
|
+
}
|
2225
|
+
jmp(gc_required);
|
2226
|
+
return;
|
2227
|
+
}
|
2228
|
+
|
2229
|
+
ASSERT(!result.is(scratch1));
|
2230
|
+
ASSERT(!result.is(scratch2));
|
2231
|
+
ASSERT(!scratch1.is(scratch2));
|
2232
|
+
ASSERT(!scratch1.is(t9));
|
2233
|
+
ASSERT(!scratch2.is(t9));
|
2234
|
+
ASSERT(!result.is(t9));
|
2235
|
+
|
2236
|
+
// Make object size into bytes.
|
2237
|
+
if ((flags & SIZE_IN_WORDS) != 0) {
|
2238
|
+
object_size *= kPointerSize;
|
2239
|
+
}
|
2240
|
+
ASSERT_EQ(0, object_size & kObjectAlignmentMask);
|
2241
|
+
|
2242
|
+
// Check relative positions of allocation top and limit addresses.
|
2243
|
+
// ARM adds additional checks to make sure the ldm instruction can be
|
2244
|
+
// used. On MIPS we don't have ldm so we don't need additional checks either.
|
2245
|
+
ExternalReference new_space_allocation_top =
|
2246
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
2247
|
+
ExternalReference new_space_allocation_limit =
|
2248
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
2249
|
+
intptr_t top =
|
2250
|
+
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
|
2251
|
+
intptr_t limit =
|
2252
|
+
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
|
2253
|
+
ASSERT((limit - top) == kPointerSize);
|
2254
|
+
|
2255
|
+
// Set up allocation top address and object size registers.
|
2256
|
+
Register topaddr = scratch1;
|
2257
|
+
Register obj_size_reg = scratch2;
|
2258
|
+
li(topaddr, Operand(new_space_allocation_top));
|
2259
|
+
li(obj_size_reg, Operand(object_size));
|
2260
|
+
|
2261
|
+
// This code stores a temporary value in t9.
|
2262
|
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
2263
|
+
// Load allocation top into result and allocation limit into t9.
|
2264
|
+
lw(result, MemOperand(topaddr));
|
2265
|
+
lw(t9, MemOperand(topaddr, kPointerSize));
|
2266
|
+
} else {
|
2267
|
+
if (emit_debug_code()) {
|
2268
|
+
// Assert that result actually contains top on entry. t9 is used
|
2269
|
+
// immediately below so this use of t9 does not cause difference with
|
2270
|
+
// respect to register content between debug and release mode.
|
2271
|
+
lw(t9, MemOperand(topaddr));
|
2272
|
+
Check(eq, "Unexpected allocation top", result, Operand(t9));
|
2273
|
+
}
|
2274
|
+
// Load allocation limit into t9. Result already contains allocation top.
|
2275
|
+
lw(t9, MemOperand(topaddr, limit - top));
|
2276
|
+
}
|
2277
|
+
|
2278
|
+
// Calculate new top and bail out if new space is exhausted. Use result
|
2279
|
+
// to calculate the new top.
|
2280
|
+
Addu(scratch2, result, Operand(obj_size_reg));
|
2281
|
+
Branch(gc_required, Ugreater, scratch2, Operand(t9));
|
2282
|
+
sw(scratch2, MemOperand(topaddr));
|
2283
|
+
|
2284
|
+
// Tag object if requested.
|
2285
|
+
if ((flags & TAG_OBJECT) != 0) {
|
2286
|
+
Addu(result, result, Operand(kHeapObjectTag));
|
2287
|
+
}
|
2288
|
+
}
|
2289
|
+
|
2290
|
+
|
2291
|
+
void MacroAssembler::AllocateInNewSpace(Register object_size,
|
2292
|
+
Register result,
|
2293
|
+
Register scratch1,
|
2294
|
+
Register scratch2,
|
2295
|
+
Label* gc_required,
|
2296
|
+
AllocationFlags flags) {
|
2297
|
+
if (!FLAG_inline_new) {
|
2298
|
+
if (emit_debug_code()) {
|
2299
|
+
// Trash the registers to simulate an allocation failure.
|
2300
|
+
li(result, 0x7091);
|
2301
|
+
li(scratch1, 0x7191);
|
2302
|
+
li(scratch2, 0x7291);
|
2303
|
+
}
|
2304
|
+
jmp(gc_required);
|
2305
|
+
return;
|
2306
|
+
}
|
2307
|
+
|
2308
|
+
ASSERT(!result.is(scratch1));
|
2309
|
+
ASSERT(!result.is(scratch2));
|
2310
|
+
ASSERT(!scratch1.is(scratch2));
|
2311
|
+
ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
|
2312
|
+
|
2313
|
+
// Check relative positions of allocation top and limit addresses.
|
2314
|
+
// ARM adds additional checks to make sure the ldm instruction can be
|
2315
|
+
// used. On MIPS we don't have ldm so we don't need additional checks either.
|
2316
|
+
ExternalReference new_space_allocation_top =
|
2317
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
2318
|
+
ExternalReference new_space_allocation_limit =
|
2319
|
+
ExternalReference::new_space_allocation_limit_address(isolate());
|
2320
|
+
intptr_t top =
|
2321
|
+
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
|
2322
|
+
intptr_t limit =
|
2323
|
+
reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
|
2324
|
+
ASSERT((limit - top) == kPointerSize);
|
2325
|
+
|
2326
|
+
// Set up allocation top address and object size registers.
|
2327
|
+
Register topaddr = scratch1;
|
2328
|
+
li(topaddr, Operand(new_space_allocation_top));
|
2329
|
+
|
2330
|
+
// This code stores a temporary value in t9.
|
2331
|
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
|
2332
|
+
// Load allocation top into result and allocation limit into t9.
|
2333
|
+
lw(result, MemOperand(topaddr));
|
2334
|
+
lw(t9, MemOperand(topaddr, kPointerSize));
|
2335
|
+
} else {
|
2336
|
+
if (emit_debug_code()) {
|
2337
|
+
// Assert that result actually contains top on entry. t9 is used
|
2338
|
+
// immediately below so this use of t9 does not cause difference with
|
2339
|
+
// respect to register content between debug and release mode.
|
2340
|
+
lw(t9, MemOperand(topaddr));
|
2341
|
+
Check(eq, "Unexpected allocation top", result, Operand(t9));
|
2342
|
+
}
|
2343
|
+
// Load allocation limit into t9. Result already contains allocation top.
|
2344
|
+
lw(t9, MemOperand(topaddr, limit - top));
|
2345
|
+
}
|
2346
|
+
|
2347
|
+
// Calculate new top and bail out if new space is exhausted. Use result
|
2348
|
+
// to calculate the new top. Object size may be in words so a shift is
|
2349
|
+
// required to get the number of bytes.
|
2350
|
+
if ((flags & SIZE_IN_WORDS) != 0) {
|
2351
|
+
sll(scratch2, object_size, kPointerSizeLog2);
|
2352
|
+
Addu(scratch2, result, scratch2);
|
2353
|
+
} else {
|
2354
|
+
Addu(scratch2, result, Operand(object_size));
|
2355
|
+
}
|
2356
|
+
Branch(gc_required, Ugreater, scratch2, Operand(t9));
|
2357
|
+
|
2358
|
+
// Update allocation top. result temporarily holds the new top.
|
2359
|
+
if (emit_debug_code()) {
|
2360
|
+
And(t9, scratch2, Operand(kObjectAlignmentMask));
|
2361
|
+
Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
|
2362
|
+
}
|
2363
|
+
sw(scratch2, MemOperand(topaddr));
|
2364
|
+
|
2365
|
+
// Tag object if requested.
|
2366
|
+
if ((flags & TAG_OBJECT) != 0) {
|
2367
|
+
Addu(result, result, Operand(kHeapObjectTag));
|
2368
|
+
}
|
2369
|
+
}
|
2370
|
+
|
2371
|
+
|
2372
|
+
void MacroAssembler::UndoAllocationInNewSpace(Register object,
|
2373
|
+
Register scratch) {
|
2374
|
+
ExternalReference new_space_allocation_top =
|
2375
|
+
ExternalReference::new_space_allocation_top_address(isolate());
|
2376
|
+
|
2377
|
+
// Make sure the object has no tag before resetting top.
|
2378
|
+
And(object, object, Operand(~kHeapObjectTagMask));
|
2379
|
+
#ifdef DEBUG
|
2380
|
+
// Check that the object un-allocated is below the current top.
|
2381
|
+
li(scratch, Operand(new_space_allocation_top));
|
2382
|
+
lw(scratch, MemOperand(scratch));
|
2383
|
+
Check(less, "Undo allocation of non allocated memory",
|
2384
|
+
object, Operand(scratch));
|
2385
|
+
#endif
|
2386
|
+
// Write the address of the object to un-allocate as the current top.
|
2387
|
+
li(scratch, Operand(new_space_allocation_top));
|
2388
|
+
sw(object, MemOperand(scratch));
|
2389
|
+
}
|
2390
|
+
|
2391
|
+
|
2392
|
+
void MacroAssembler::AllocateTwoByteString(Register result,
|
2393
|
+
Register length,
|
2394
|
+
Register scratch1,
|
2395
|
+
Register scratch2,
|
2396
|
+
Register scratch3,
|
2397
|
+
Label* gc_required) {
|
2398
|
+
// Calculate the number of bytes needed for the characters in the string while
|
2399
|
+
// observing object alignment.
|
2400
|
+
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
2401
|
+
sll(scratch1, length, 1); // Length in bytes, not chars.
|
2402
|
+
addiu(scratch1, scratch1,
|
2403
|
+
kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
|
2404
|
+
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
2405
|
+
|
2406
|
+
// Allocate two-byte string in new space.
|
2407
|
+
AllocateInNewSpace(scratch1,
|
2408
|
+
result,
|
2409
|
+
scratch2,
|
2410
|
+
scratch3,
|
2411
|
+
gc_required,
|
2412
|
+
TAG_OBJECT);
|
2413
|
+
|
2414
|
+
// Set the map, length and hash field.
|
2415
|
+
InitializeNewString(result,
|
2416
|
+
length,
|
2417
|
+
Heap::kStringMapRootIndex,
|
2418
|
+
scratch1,
|
2419
|
+
scratch2);
|
2420
|
+
}
|
2421
|
+
|
2422
|
+
|
2423
|
+
void MacroAssembler::AllocateAsciiString(Register result,
|
2424
|
+
Register length,
|
2425
|
+
Register scratch1,
|
2426
|
+
Register scratch2,
|
2427
|
+
Register scratch3,
|
2428
|
+
Label* gc_required) {
|
2429
|
+
// Calculate the number of bytes needed for the characters in the string
|
2430
|
+
// while observing object alignment.
|
2431
|
+
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
|
2432
|
+
ASSERT(kCharSize == 1);
|
2433
|
+
addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
|
2434
|
+
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
2435
|
+
|
2436
|
+
// Allocate ASCII string in new space.
|
2437
|
+
AllocateInNewSpace(scratch1,
|
2438
|
+
result,
|
2439
|
+
scratch2,
|
2440
|
+
scratch3,
|
2441
|
+
gc_required,
|
2442
|
+
TAG_OBJECT);
|
2443
|
+
|
2444
|
+
// Set the map, length and hash field.
|
2445
|
+
InitializeNewString(result,
|
2446
|
+
length,
|
2447
|
+
Heap::kAsciiStringMapRootIndex,
|
2448
|
+
scratch1,
|
2449
|
+
scratch2);
|
2450
|
+
}
|
2451
|
+
|
2452
|
+
|
2453
|
+
void MacroAssembler::AllocateTwoByteConsString(Register result,
|
2454
|
+
Register length,
|
2455
|
+
Register scratch1,
|
2456
|
+
Register scratch2,
|
2457
|
+
Label* gc_required) {
|
2458
|
+
AllocateInNewSpace(ConsString::kSize,
|
2459
|
+
result,
|
2460
|
+
scratch1,
|
2461
|
+
scratch2,
|
2462
|
+
gc_required,
|
2463
|
+
TAG_OBJECT);
|
2464
|
+
InitializeNewString(result,
|
2465
|
+
length,
|
2466
|
+
Heap::kConsStringMapRootIndex,
|
2467
|
+
scratch1,
|
2468
|
+
scratch2);
|
2469
|
+
}
|
2470
|
+
|
2471
|
+
|
2472
|
+
void MacroAssembler::AllocateAsciiConsString(Register result,
|
2473
|
+
Register length,
|
2474
|
+
Register scratch1,
|
2475
|
+
Register scratch2,
|
2476
|
+
Label* gc_required) {
|
2477
|
+
AllocateInNewSpace(ConsString::kSize,
|
2478
|
+
result,
|
2479
|
+
scratch1,
|
2480
|
+
scratch2,
|
2481
|
+
gc_required,
|
2482
|
+
TAG_OBJECT);
|
2483
|
+
InitializeNewString(result,
|
2484
|
+
length,
|
2485
|
+
Heap::kConsAsciiStringMapRootIndex,
|
2486
|
+
scratch1,
|
2487
|
+
scratch2);
|
2488
|
+
}
|
2489
|
+
|
2490
|
+
|
2491
|
+
// Allocates a heap number or jumps to the label if the young space is full and
|
2492
|
+
// a scavenge is needed.
|
2493
|
+
void MacroAssembler::AllocateHeapNumber(Register result,
|
2494
|
+
Register scratch1,
|
2495
|
+
Register scratch2,
|
2496
|
+
Register heap_number_map,
|
2497
|
+
Label* need_gc) {
|
2498
|
+
// Allocate an object in the heap for the heap number and tag it as a heap
|
2499
|
+
// object.
|
2500
|
+
AllocateInNewSpace(HeapNumber::kSize,
|
2501
|
+
result,
|
2502
|
+
scratch1,
|
2503
|
+
scratch2,
|
2504
|
+
need_gc,
|
2505
|
+
TAG_OBJECT);
|
2506
|
+
|
2507
|
+
// Store heap number map in the allocated object.
|
2508
|
+
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
2509
|
+
sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
|
2510
|
+
}
|
2511
|
+
|
2512
|
+
|
2513
|
+
void MacroAssembler::AllocateHeapNumberWithValue(Register result,
|
2514
|
+
FPURegister value,
|
2515
|
+
Register scratch1,
|
2516
|
+
Register scratch2,
|
2517
|
+
Label* gc_required) {
|
2518
|
+
LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
2519
|
+
AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
|
2520
|
+
sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
|
2521
|
+
}
|
2522
|
+
|
2523
|
+
|
2524
|
+
// Copies a fixed number of fields of heap objects from src to dst.
|
2525
|
+
void MacroAssembler::CopyFields(Register dst,
|
2526
|
+
Register src,
|
2527
|
+
RegList temps,
|
2528
|
+
int field_count) {
|
2529
|
+
ASSERT((temps & dst.bit()) == 0);
|
2530
|
+
ASSERT((temps & src.bit()) == 0);
|
2531
|
+
// Primitive implementation using only one temporary register.
|
2532
|
+
|
2533
|
+
Register tmp = no_reg;
|
2534
|
+
// Find a temp register in temps list.
|
2535
|
+
for (int i = 0; i < kNumRegisters; i++) {
|
2536
|
+
if ((temps & (1 << i)) != 0) {
|
2537
|
+
tmp.code_ = i;
|
2538
|
+
break;
|
2539
|
+
}
|
2540
|
+
}
|
2541
|
+
ASSERT(!tmp.is(no_reg));
|
2542
|
+
|
2543
|
+
for (int i = 0; i < field_count; i++) {
|
2544
|
+
lw(tmp, FieldMemOperand(src, i * kPointerSize));
|
2545
|
+
sw(tmp, FieldMemOperand(dst, i * kPointerSize));
|
2546
|
+
}
|
2547
|
+
}
|
2548
|
+
|
2549
|
+
|
2550
|
+
void MacroAssembler::CopyBytes(Register src,
|
2551
|
+
Register dst,
|
2552
|
+
Register length,
|
2553
|
+
Register scratch) {
|
2554
|
+
Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
|
2555
|
+
|
2556
|
+
// Align src before copying in word size chunks.
|
2557
|
+
bind(&align_loop);
|
2558
|
+
Branch(&done, eq, length, Operand(zero_reg));
|
2559
|
+
bind(&align_loop_1);
|
2560
|
+
And(scratch, src, kPointerSize - 1);
|
2561
|
+
Branch(&word_loop, eq, scratch, Operand(zero_reg));
|
2562
|
+
lbu(scratch, MemOperand(src));
|
2563
|
+
Addu(src, src, 1);
|
2564
|
+
sb(scratch, MemOperand(dst));
|
2565
|
+
Addu(dst, dst, 1);
|
2566
|
+
Subu(length, length, Operand(1));
|
2567
|
+
Branch(&byte_loop_1, ne, length, Operand(zero_reg));
|
2568
|
+
|
2569
|
+
// Copy bytes in word size chunks.
|
2570
|
+
bind(&word_loop);
|
2571
|
+
if (emit_debug_code()) {
|
2572
|
+
And(scratch, src, kPointerSize - 1);
|
2573
|
+
Assert(eq, "Expecting alignment for CopyBytes",
|
2574
|
+
scratch, Operand(zero_reg));
|
2575
|
+
}
|
2576
|
+
Branch(&byte_loop, lt, length, Operand(kPointerSize));
|
2577
|
+
lw(scratch, MemOperand(src));
|
2578
|
+
Addu(src, src, kPointerSize);
|
2579
|
+
|
2580
|
+
// TODO(kalmard) check if this can be optimized to use sw in most cases.
|
2581
|
+
// Can't use unaligned access - copy byte by byte.
|
2582
|
+
sb(scratch, MemOperand(dst, 0));
|
2583
|
+
srl(scratch, scratch, 8);
|
2584
|
+
sb(scratch, MemOperand(dst, 1));
|
2585
|
+
srl(scratch, scratch, 8);
|
2586
|
+
sb(scratch, MemOperand(dst, 2));
|
2587
|
+
srl(scratch, scratch, 8);
|
2588
|
+
sb(scratch, MemOperand(dst, 3));
|
2589
|
+
Addu(dst, dst, 4);
|
2590
|
+
|
2591
|
+
Subu(length, length, Operand(kPointerSize));
|
2592
|
+
Branch(&word_loop);
|
2593
|
+
|
2594
|
+
// Copy the last bytes if any left.
|
2595
|
+
bind(&byte_loop);
|
2596
|
+
Branch(&done, eq, length, Operand(zero_reg));
|
2597
|
+
bind(&byte_loop_1);
|
2598
|
+
lbu(scratch, MemOperand(src));
|
2599
|
+
Addu(src, src, 1);
|
2600
|
+
sb(scratch, MemOperand(dst));
|
2601
|
+
Addu(dst, dst, 1);
|
2602
|
+
Subu(length, length, Operand(1));
|
2603
|
+
Branch(&byte_loop_1, ne, length, Operand(zero_reg));
|
2604
|
+
bind(&done);
|
2605
|
+
}
|
2606
|
+
|
2607
|
+
|
2608
|
+
void MacroAssembler::CheckMap(Register obj,
|
2609
|
+
Register scratch,
|
2610
|
+
Handle<Map> map,
|
2611
|
+
Label* fail,
|
2612
|
+
SmiCheckType smi_check_type) {
|
2613
|
+
if (smi_check_type == DO_SMI_CHECK) {
|
2614
|
+
JumpIfSmi(obj, fail);
|
2615
|
+
}
|
2616
|
+
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
|
2617
|
+
li(at, Operand(map));
|
2618
|
+
Branch(fail, ne, scratch, Operand(at));
|
2619
|
+
}
|
2620
|
+
|
2621
|
+
|
2622
|
+
void MacroAssembler::DispatchMap(Register obj,
|
2623
|
+
Register scratch,
|
2624
|
+
Handle<Map> map,
|
2625
|
+
Handle<Code> success,
|
2626
|
+
SmiCheckType smi_check_type) {
|
2627
|
+
Label fail;
|
2628
|
+
if (smi_check_type == DO_SMI_CHECK) {
|
2629
|
+
JumpIfSmi(obj, &fail);
|
2630
|
+
}
|
2631
|
+
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
|
2632
|
+
Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
|
2633
|
+
bind(&fail);
|
2634
|
+
}
|
2635
|
+
|
2636
|
+
|
2637
|
+
void MacroAssembler::CheckMap(Register obj,
|
2638
|
+
Register scratch,
|
2639
|
+
Heap::RootListIndex index,
|
2640
|
+
Label* fail,
|
2641
|
+
SmiCheckType smi_check_type) {
|
2642
|
+
if (smi_check_type == DO_SMI_CHECK) {
|
2643
|
+
JumpIfSmi(obj, fail);
|
2644
|
+
}
|
2645
|
+
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
|
2646
|
+
LoadRoot(at, index);
|
2647
|
+
Branch(fail, ne, scratch, Operand(at));
|
2648
|
+
}
|
2649
|
+
|
2650
|
+
|
2651
|
+
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
|
2652
|
+
CpuFeatures::Scope scope(FPU);
|
2653
|
+
if (IsMipsSoftFloatABI) {
|
2654
|
+
Move(dst, v0, v1);
|
2655
|
+
} else {
|
2656
|
+
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
|
2657
|
+
}
|
2658
|
+
}
|
2659
|
+
|
2660
|
+
|
2661
|
+
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
|
2662
|
+
CpuFeatures::Scope scope(FPU);
|
2663
|
+
if (!IsMipsSoftFloatABI) {
|
2664
|
+
Move(f12, dreg);
|
2665
|
+
} else {
|
2666
|
+
Move(a0, a1, dreg);
|
2667
|
+
}
|
2668
|
+
}
|
2669
|
+
|
2670
|
+
|
2671
|
+
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
|
2672
|
+
DoubleRegister dreg2) {
|
2673
|
+
CpuFeatures::Scope scope(FPU);
|
2674
|
+
if (!IsMipsSoftFloatABI) {
|
2675
|
+
if (dreg2.is(f12)) {
|
2676
|
+
ASSERT(!dreg1.is(f14));
|
2677
|
+
Move(f14, dreg2);
|
2678
|
+
Move(f12, dreg1);
|
2679
|
+
} else {
|
2680
|
+
Move(f12, dreg1);
|
2681
|
+
Move(f14, dreg2);
|
2682
|
+
}
|
2683
|
+
} else {
|
2684
|
+
Move(a0, a1, dreg1);
|
2685
|
+
Move(a2, a3, dreg2);
|
2686
|
+
}
|
2687
|
+
}
|
2688
|
+
|
2689
|
+
|
2690
|
+
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
|
2691
|
+
Register reg) {
|
2692
|
+
CpuFeatures::Scope scope(FPU);
|
2693
|
+
if (!IsMipsSoftFloatABI) {
|
2694
|
+
Move(f12, dreg);
|
2695
|
+
Move(a2, reg);
|
2696
|
+
} else {
|
2697
|
+
Move(a2, reg);
|
2698
|
+
Move(a0, a1, dreg);
|
2699
|
+
}
|
2700
|
+
}
|
2701
|
+
|
2702
|
+
|
2703
|
+
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
|
2704
|
+
// This macro takes the dst register to make the code more readable
|
2705
|
+
// at the call sites. However, the dst register has to be t1 to
|
2706
|
+
// follow the calling convention which requires the call type to be
|
2707
|
+
// in t1.
|
2708
|
+
ASSERT(dst.is(t1));
|
2709
|
+
if (call_kind == CALL_AS_FUNCTION) {
|
2710
|
+
li(dst, Operand(Smi::FromInt(1)));
|
2711
|
+
} else {
|
2712
|
+
li(dst, Operand(Smi::FromInt(0)));
|
2713
|
+
}
|
2714
|
+
}
|
2715
|
+
|
2716
|
+
|
2717
|
+
// -----------------------------------------------------------------------------
|
2718
|
+
// JavaScript invokes.
|
2719
|
+
|
2720
|
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
2721
|
+
const ParameterCount& actual,
|
2722
|
+
Handle<Code> code_constant,
|
2723
|
+
Register code_reg,
|
2724
|
+
Label* done,
|
2725
|
+
InvokeFlag flag,
|
2726
|
+
const CallWrapper& call_wrapper,
|
2727
|
+
CallKind call_kind) {
|
2728
|
+
bool definitely_matches = false;
|
2729
|
+
Label regular_invoke;
|
2730
|
+
|
2731
|
+
// Check whether the expected and actual arguments count match. If not,
|
2732
|
+
// setup registers according to contract with ArgumentsAdaptorTrampoline:
|
2733
|
+
// a0: actual arguments count
|
2734
|
+
// a1: function (passed through to callee)
|
2735
|
+
// a2: expected arguments count
|
2736
|
+
// a3: callee code entry
|
2737
|
+
|
2738
|
+
// The code below is made a lot easier because the calling code already sets
|
2739
|
+
// up actual and expected registers according to the contract if values are
|
2740
|
+
// passed in registers.
|
2741
|
+
ASSERT(actual.is_immediate() || actual.reg().is(a0));
|
2742
|
+
ASSERT(expected.is_immediate() || expected.reg().is(a2));
|
2743
|
+
ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
|
2744
|
+
|
2745
|
+
if (expected.is_immediate()) {
|
2746
|
+
ASSERT(actual.is_immediate());
|
2747
|
+
if (expected.immediate() == actual.immediate()) {
|
2748
|
+
definitely_matches = true;
|
2749
|
+
} else {
|
2750
|
+
li(a0, Operand(actual.immediate()));
|
2751
|
+
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
|
2752
|
+
if (expected.immediate() == sentinel) {
|
2753
|
+
// Don't worry about adapting arguments for builtins that
|
2754
|
+
// don't want that done. Skip adaption code by making it look
|
2755
|
+
// like we have a match between expected and actual number of
|
2756
|
+
// arguments.
|
2757
|
+
definitely_matches = true;
|
2758
|
+
} else {
|
2759
|
+
li(a2, Operand(expected.immediate()));
|
2760
|
+
}
|
2761
|
+
}
|
2762
|
+
} else if (actual.is_immediate()) {
|
2763
|
+
Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
|
2764
|
+
li(a0, Operand(actual.immediate()));
|
2765
|
+
} else {
|
2766
|
+
Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
|
2767
|
+
}
|
2768
|
+
|
2769
|
+
if (!definitely_matches) {
|
2770
|
+
if (!code_constant.is_null()) {
|
2771
|
+
li(a3, Operand(code_constant));
|
2772
|
+
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
|
2773
|
+
}
|
2774
|
+
|
2775
|
+
Handle<Code> adaptor =
|
2776
|
+
isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
2777
|
+
if (flag == CALL_FUNCTION) {
|
2778
|
+
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
|
2779
|
+
SetCallKind(t1, call_kind);
|
2780
|
+
Call(adaptor, RelocInfo::CODE_TARGET);
|
2781
|
+
call_wrapper.AfterCall();
|
2782
|
+
jmp(done);
|
2783
|
+
} else {
|
2784
|
+
SetCallKind(t1, call_kind);
|
2785
|
+
Jump(adaptor, RelocInfo::CODE_TARGET);
|
2786
|
+
}
|
2787
|
+
bind(®ular_invoke);
|
2788
|
+
}
|
2789
|
+
}
|
2790
|
+
|
2791
|
+
|
2792
|
+
void MacroAssembler::InvokeCode(Register code,
|
2793
|
+
const ParameterCount& expected,
|
2794
|
+
const ParameterCount& actual,
|
2795
|
+
InvokeFlag flag,
|
2796
|
+
const CallWrapper& call_wrapper,
|
2797
|
+
CallKind call_kind) {
|
2798
|
+
Label done;
|
2799
|
+
|
2800
|
+
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
|
2801
|
+
call_wrapper, call_kind);
|
2802
|
+
if (flag == CALL_FUNCTION) {
|
2803
|
+
SetCallKind(t1, call_kind);
|
2804
|
+
Call(code);
|
2805
|
+
} else {
|
2806
|
+
ASSERT(flag == JUMP_FUNCTION);
|
2807
|
+
SetCallKind(t1, call_kind);
|
2808
|
+
Jump(code);
|
2809
|
+
}
|
2810
|
+
// Continue here if InvokePrologue does handle the invocation due to
|
2811
|
+
// mismatched parameter counts.
|
2812
|
+
bind(&done);
|
2813
|
+
}
|
2814
|
+
|
2815
|
+
|
2816
|
+
void MacroAssembler::InvokeCode(Handle<Code> code,
|
2817
|
+
const ParameterCount& expected,
|
2818
|
+
const ParameterCount& actual,
|
2819
|
+
RelocInfo::Mode rmode,
|
2820
|
+
InvokeFlag flag,
|
2821
|
+
CallKind call_kind) {
|
2822
|
+
Label done;
|
2823
|
+
|
2824
|
+
InvokePrologue(expected, actual, code, no_reg, &done, flag,
|
2825
|
+
NullCallWrapper(), call_kind);
|
2826
|
+
if (flag == CALL_FUNCTION) {
|
2827
|
+
SetCallKind(t1, call_kind);
|
2828
|
+
Call(code, rmode);
|
2829
|
+
} else {
|
2830
|
+
SetCallKind(t1, call_kind);
|
2831
|
+
Jump(code, rmode);
|
2832
|
+
}
|
2833
|
+
// Continue here if InvokePrologue does handle the invocation due to
|
2834
|
+
// mismatched parameter counts.
|
2835
|
+
bind(&done);
|
2836
|
+
}
|
2837
|
+
|
2838
|
+
|
2839
|
+
void MacroAssembler::InvokeFunction(Register function,
|
2840
|
+
const ParameterCount& actual,
|
2841
|
+
InvokeFlag flag,
|
2842
|
+
const CallWrapper& call_wrapper,
|
2843
|
+
CallKind call_kind) {
|
2844
|
+
// Contract with called JS functions requires that function is passed in a1.
|
2845
|
+
ASSERT(function.is(a1));
|
2846
|
+
Register expected_reg = a2;
|
2847
|
+
Register code_reg = a3;
|
2848
|
+
|
2849
|
+
lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
2850
|
+
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
2851
|
+
lw(expected_reg,
|
2852
|
+
FieldMemOperand(code_reg,
|
2853
|
+
SharedFunctionInfo::kFormalParameterCountOffset));
|
2854
|
+
sra(expected_reg, expected_reg, kSmiTagSize);
|
2855
|
+
lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
2856
|
+
|
2857
|
+
ParameterCount expected(expected_reg);
|
2858
|
+
InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
|
2859
|
+
}
|
2860
|
+
|
2861
|
+
|
2862
|
+
void MacroAssembler::InvokeFunction(JSFunction* function,
|
2863
|
+
const ParameterCount& actual,
|
2864
|
+
InvokeFlag flag) {
|
2865
|
+
ASSERT(function->is_compiled());
|
2866
|
+
|
2867
|
+
// Get the function and setup the context.
|
2868
|
+
li(a1, Operand(Handle<JSFunction>(function)));
|
2869
|
+
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
2870
|
+
|
2871
|
+
// Invoke the cached code.
|
2872
|
+
Handle<Code> code(function->code());
|
2873
|
+
ParameterCount expected(function->shared()->formal_parameter_count());
|
2874
|
+
if (V8::UseCrankshaft()) {
|
2875
|
+
UNIMPLEMENTED_MIPS();
|
2876
|
+
} else {
|
2877
|
+
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
|
2878
|
+
}
|
2879
|
+
}
|
2880
|
+
|
2881
|
+
|
2882
|
+
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
|
2883
|
+
Register map,
|
2884
|
+
Register scratch,
|
2885
|
+
Label* fail) {
|
2886
|
+
lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
|
2887
|
+
IsInstanceJSObjectType(map, scratch, fail);
|
2888
|
+
}
|
2889
|
+
|
2890
|
+
|
2891
|
+
void MacroAssembler::IsInstanceJSObjectType(Register map,
|
2892
|
+
Register scratch,
|
2893
|
+
Label* fail) {
|
2894
|
+
lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
2895
|
+
Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
|
2896
|
+
Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
|
2897
|
+
}
|
2898
|
+
|
2899
|
+
|
2900
|
+
void MacroAssembler::IsObjectJSStringType(Register object,
|
2901
|
+
Register scratch,
|
2902
|
+
Label* fail) {
|
2903
|
+
ASSERT(kNotStringTag != 0);
|
2904
|
+
|
2905
|
+
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
2906
|
+
lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
2907
|
+
And(scratch, scratch, Operand(kIsNotStringMask));
|
2908
|
+
Branch(fail, ne, scratch, Operand(zero_reg));
|
2909
|
+
}
|
2910
|
+
|
2911
|
+
|
2912
|
+
// ---------------------------------------------------------------------------
|
2913
|
+
// Support functions.
|
2914
|
+
|
2915
|
+
|
2916
|
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
|
2917
|
+
Register result,
|
2918
|
+
Register scratch,
|
2919
|
+
Label* miss) {
|
2920
|
+
// Check that the receiver isn't a smi.
|
2921
|
+
JumpIfSmi(function, miss);
|
2922
|
+
|
2923
|
+
// Check that the function really is a function. Load map into result reg.
|
2924
|
+
GetObjectType(function, result, scratch);
|
2925
|
+
Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
|
2926
|
+
|
2927
|
+
// Make sure that the function has an instance prototype.
|
2928
|
+
Label non_instance;
|
2929
|
+
lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
|
2930
|
+
And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
|
2931
|
+
Branch(&non_instance, ne, scratch, Operand(zero_reg));
|
2932
|
+
|
2933
|
+
// Get the prototype or initial map from the function.
|
2934
|
+
lw(result,
|
2935
|
+
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
2936
|
+
|
2937
|
+
// If the prototype or initial map is the hole, don't return it and
|
2938
|
+
// simply miss the cache instead. This will allow us to allocate a
|
2939
|
+
// prototype object on-demand in the runtime system.
|
2940
|
+
LoadRoot(t8, Heap::kTheHoleValueRootIndex);
|
2941
|
+
Branch(miss, eq, result, Operand(t8));
|
2942
|
+
|
2943
|
+
// If the function does not have an initial map, we're done.
|
2944
|
+
Label done;
|
2945
|
+
GetObjectType(result, scratch, scratch);
|
2946
|
+
Branch(&done, ne, scratch, Operand(MAP_TYPE));
|
2947
|
+
|
2948
|
+
// Get the prototype from the initial map.
|
2949
|
+
lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
|
2950
|
+
jmp(&done);
|
2951
|
+
|
2952
|
+
// Non-instance prototype: Fetch prototype from constructor field
|
2953
|
+
// in initial map.
|
2954
|
+
bind(&non_instance);
|
2955
|
+
lw(result, FieldMemOperand(result, Map::kConstructorOffset));
|
2956
|
+
|
2957
|
+
// All done.
|
2958
|
+
bind(&done);
|
2959
|
+
}
|
2960
|
+
|
2961
|
+
|
2962
|
+
void MacroAssembler::GetObjectType(Register object,
|
2963
|
+
Register map,
|
2964
|
+
Register type_reg) {
|
2965
|
+
lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
|
2966
|
+
lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
2967
|
+
}
|
2968
|
+
|
2969
|
+
|
2970
|
+
// -----------------------------------------------------------------------------
|
2971
|
+
// Runtime calls.
|
2972
|
+
|
2973
|
+
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
|
2974
|
+
Register r1, const Operand& r2) {
|
2975
|
+
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
|
2976
|
+
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
|
2977
|
+
}
|
2978
|
+
|
2979
|
+
|
2980
|
+
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
|
2981
|
+
Register r1, const Operand& r2) {
|
2982
|
+
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
|
2983
|
+
Object* result;
|
2984
|
+
{ MaybeObject* maybe_result = stub->TryGetCode();
|
2985
|
+
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2986
|
+
}
|
2987
|
+
Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
|
2988
|
+
return result;
|
2989
|
+
}
|
2990
|
+
|
2991
|
+
|
2992
|
+
|
2993
|
+
void MacroAssembler::TailCallStub(CodeStub* stub) {
|
2994
|
+
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
|
2995
|
+
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
|
2996
|
+
}
|
2997
|
+
|
2998
|
+
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
|
2999
|
+
Condition cond,
|
3000
|
+
Register r1,
|
3001
|
+
const Operand& r2) {
|
3002
|
+
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
|
3003
|
+
Object* result;
|
3004
|
+
{ MaybeObject* maybe_result = stub->TryGetCode();
|
3005
|
+
if (!maybe_result->ToObject(&result)) return maybe_result;
|
3006
|
+
}
|
3007
|
+
Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
|
3008
|
+
return result;
|
3009
|
+
}
|
3010
|
+
|
3011
|
+
|
3012
|
+
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
|
3013
|
+
return ref0.address() - ref1.address();
|
3014
|
+
}
|
3015
|
+
|
3016
|
+
|
3017
|
+
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
|
3018
|
+
ExternalReference function, int stack_space) {
|
3019
|
+
ExternalReference next_address =
|
3020
|
+
ExternalReference::handle_scope_next_address();
|
3021
|
+
const int kNextOffset = 0;
|
3022
|
+
const int kLimitOffset = AddressOffset(
|
3023
|
+
ExternalReference::handle_scope_limit_address(),
|
3024
|
+
next_address);
|
3025
|
+
const int kLevelOffset = AddressOffset(
|
3026
|
+
ExternalReference::handle_scope_level_address(),
|
3027
|
+
next_address);
|
3028
|
+
|
3029
|
+
// Allocate HandleScope in callee-save registers.
|
3030
|
+
li(s3, Operand(next_address));
|
3031
|
+
lw(s0, MemOperand(s3, kNextOffset));
|
3032
|
+
lw(s1, MemOperand(s3, kLimitOffset));
|
3033
|
+
lw(s2, MemOperand(s3, kLevelOffset));
|
3034
|
+
Addu(s2, s2, Operand(1));
|
3035
|
+
sw(s2, MemOperand(s3, kLevelOffset));
|
3036
|
+
|
3037
|
+
// The O32 ABI requires us to pass a pointer in a0 where the returned struct
|
3038
|
+
// (4 bytes) will be placed. This is also built into the Simulator.
|
3039
|
+
// Set up the pointer to the returned value (a0). It was allocated in
|
3040
|
+
// EnterExitFrame.
|
3041
|
+
addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
|
3042
|
+
|
3043
|
+
// Native call returns to the DirectCEntry stub which redirects to the
|
3044
|
+
// return address pushed on stack (could have moved after GC).
|
3045
|
+
// DirectCEntry stub itself is generated early and never moves.
|
3046
|
+
DirectCEntryStub stub;
|
3047
|
+
stub.GenerateCall(this, function);
|
3048
|
+
|
3049
|
+
// As mentioned above, on MIPS a pointer is returned - we need to dereference
|
3050
|
+
// it to get the actual return value (which is also a pointer).
|
3051
|
+
lw(v0, MemOperand(v0));
|
3052
|
+
|
3053
|
+
Label promote_scheduled_exception;
|
3054
|
+
Label delete_allocated_handles;
|
3055
|
+
Label leave_exit_frame;
|
3056
|
+
|
3057
|
+
// If result is non-zero, dereference to get the result value
|
3058
|
+
// otherwise set it to undefined.
|
3059
|
+
Label skip;
|
3060
|
+
LoadRoot(a0, Heap::kUndefinedValueRootIndex);
|
3061
|
+
Branch(&skip, eq, v0, Operand(zero_reg));
|
3062
|
+
lw(a0, MemOperand(v0));
|
3063
|
+
bind(&skip);
|
3064
|
+
mov(v0, a0);
|
3065
|
+
|
3066
|
+
// No more valid handles (the result handle was the last one). Restore
|
3067
|
+
// previous handle scope.
|
3068
|
+
sw(s0, MemOperand(s3, kNextOffset));
|
3069
|
+
if (emit_debug_code()) {
|
3070
|
+
lw(a1, MemOperand(s3, kLevelOffset));
|
3071
|
+
Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
|
3072
|
+
}
|
3073
|
+
Subu(s2, s2, Operand(1));
|
3074
|
+
sw(s2, MemOperand(s3, kLevelOffset));
|
3075
|
+
lw(at, MemOperand(s3, kLimitOffset));
|
3076
|
+
Branch(&delete_allocated_handles, ne, s1, Operand(at));
|
3077
|
+
|
3078
|
+
// Check if the function scheduled an exception.
|
3079
|
+
bind(&leave_exit_frame);
|
3080
|
+
LoadRoot(t0, Heap::kTheHoleValueRootIndex);
|
3081
|
+
li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
|
3082
|
+
lw(t1, MemOperand(at));
|
3083
|
+
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
|
3084
|
+
li(s0, Operand(stack_space));
|
3085
|
+
LeaveExitFrame(false, s0);
|
3086
|
+
Ret();
|
3087
|
+
|
3088
|
+
bind(&promote_scheduled_exception);
|
3089
|
+
MaybeObject* result = TryTailCallExternalReference(
|
3090
|
+
ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
|
3091
|
+
if (result->IsFailure()) {
|
3092
|
+
return result;
|
3093
|
+
}
|
3094
|
+
|
3095
|
+
// HandleScope limit has changed. Delete allocated extensions.
|
3096
|
+
bind(&delete_allocated_handles);
|
3097
|
+
sw(s1, MemOperand(s3, kLimitOffset));
|
3098
|
+
mov(s0, v0);
|
3099
|
+
mov(a0, v0);
|
3100
|
+
PrepareCallCFunction(1, s1);
|
3101
|
+
li(a0, Operand(ExternalReference::isolate_address()));
|
3102
|
+
CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
|
3103
|
+
1);
|
3104
|
+
mov(v0, s0);
|
3105
|
+
jmp(&leave_exit_frame);
|
3106
|
+
|
3107
|
+
return result;
|
3108
|
+
}
|
3109
|
+
|
3110
|
+
|
3111
|
+
void MacroAssembler::IllegalOperation(int num_arguments) {
|
3112
|
+
if (num_arguments > 0) {
|
3113
|
+
addiu(sp, sp, num_arguments * kPointerSize);
|
3114
|
+
}
|
3115
|
+
LoadRoot(v0, Heap::kUndefinedValueRootIndex);
|
3116
|
+
}
|
3117
|
+
|
3118
|
+
|
3119
|
+
void MacroAssembler::IndexFromHash(Register hash,
|
3120
|
+
Register index) {
|
3121
|
+
// If the hash field contains an array index pick it out. The assert checks
|
3122
|
+
// that the constants for the maximum number of digits for an array index
|
3123
|
+
// cached in the hash field and the number of bits reserved for it does not
|
3124
|
+
// conflict.
|
3125
|
+
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
3126
|
+
(1 << String::kArrayIndexValueBits));
|
3127
|
+
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
|
3128
|
+
// the low kHashShift bits.
|
3129
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3130
|
+
Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
|
3131
|
+
sll(index, hash, kSmiTagSize);
|
3132
|
+
}
|
3133
|
+
|
3134
|
+
|
3135
|
+
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
|
3136
|
+
FPURegister result,
|
3137
|
+
Register scratch1,
|
3138
|
+
Register scratch2,
|
3139
|
+
Register heap_number_map,
|
3140
|
+
Label* not_number,
|
3141
|
+
ObjectToDoubleFlags flags) {
|
3142
|
+
Label done;
|
3143
|
+
if ((flags & OBJECT_NOT_SMI) == 0) {
|
3144
|
+
Label not_smi;
|
3145
|
+
JumpIfNotSmi(object, ¬_smi);
|
3146
|
+
// Remove smi tag and convert to double.
|
3147
|
+
sra(scratch1, object, kSmiTagSize);
|
3148
|
+
mtc1(scratch1, result);
|
3149
|
+
cvt_d_w(result, result);
|
3150
|
+
Branch(&done);
|
3151
|
+
bind(¬_smi);
|
3152
|
+
}
|
3153
|
+
// Check for heap number and load double value from it.
|
3154
|
+
lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
|
3155
|
+
Branch(not_number, ne, scratch1, Operand(heap_number_map));
|
3156
|
+
|
3157
|
+
if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
|
3158
|
+
// If exponent is all ones the number is either a NaN or +/-Infinity.
|
3159
|
+
Register exponent = scratch1;
|
3160
|
+
Register mask_reg = scratch2;
|
3161
|
+
lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
3162
|
+
li(mask_reg, HeapNumber::kExponentMask);
|
3163
|
+
|
3164
|
+
And(exponent, exponent, mask_reg);
|
3165
|
+
Branch(not_number, eq, exponent, Operand(mask_reg));
|
3166
|
+
}
|
3167
|
+
ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
|
3168
|
+
bind(&done);
|
3169
|
+
}
|
3170
|
+
|
3171
|
+
|
3172
|
+
void MacroAssembler::SmiToDoubleFPURegister(Register smi,
|
3173
|
+
FPURegister value,
|
3174
|
+
Register scratch1) {
|
3175
|
+
sra(scratch1, smi, kSmiTagSize);
|
3176
|
+
mtc1(scratch1, value);
|
3177
|
+
cvt_d_w(value, value);
|
3178
|
+
}
|
3179
|
+
|
3180
|
+
|
3181
|
+
void MacroAssembler::AdduAndCheckForOverflow(Register dst,
|
3182
|
+
Register left,
|
3183
|
+
Register right,
|
3184
|
+
Register overflow_dst,
|
3185
|
+
Register scratch) {
|
3186
|
+
ASSERT(!dst.is(overflow_dst));
|
3187
|
+
ASSERT(!dst.is(scratch));
|
3188
|
+
ASSERT(!overflow_dst.is(scratch));
|
3189
|
+
ASSERT(!overflow_dst.is(left));
|
3190
|
+
ASSERT(!overflow_dst.is(right));
|
3191
|
+
ASSERT(!left.is(right));
|
3192
|
+
|
3193
|
+
// TODO(kalmard) There must be a way to optimize dst == left and dst == right
|
3194
|
+
// cases.
|
3195
|
+
|
3196
|
+
if (dst.is(left)) {
|
3197
|
+
addu(overflow_dst, left, right);
|
3198
|
+
xor_(dst, overflow_dst, left);
|
3199
|
+
xor_(scratch, overflow_dst, right);
|
3200
|
+
and_(scratch, scratch, dst);
|
3201
|
+
mov(dst, overflow_dst);
|
3202
|
+
mov(overflow_dst, scratch);
|
3203
|
+
} else if (dst.is(right)) {
|
3204
|
+
addu(overflow_dst, left, right);
|
3205
|
+
xor_(dst, overflow_dst, right);
|
3206
|
+
xor_(scratch, overflow_dst, left);
|
3207
|
+
and_(scratch, scratch, dst);
|
3208
|
+
mov(dst, overflow_dst);
|
3209
|
+
mov(overflow_dst, scratch);
|
3210
|
+
} else {
|
3211
|
+
addu(dst, left, right);
|
3212
|
+
xor_(overflow_dst, dst, left);
|
3213
|
+
xor_(scratch, dst, right);
|
3214
|
+
and_(overflow_dst, scratch, overflow_dst);
|
3215
|
+
}
|
3216
|
+
}
|
3217
|
+
|
3218
|
+
|
3219
|
+
void MacroAssembler::SubuAndCheckForOverflow(Register dst,
|
3220
|
+
Register left,
|
3221
|
+
Register right,
|
3222
|
+
Register overflow_dst,
|
3223
|
+
Register scratch) {
|
3224
|
+
ASSERT(!dst.is(overflow_dst));
|
3225
|
+
ASSERT(!dst.is(scratch));
|
3226
|
+
ASSERT(!overflow_dst.is(scratch));
|
3227
|
+
ASSERT(!overflow_dst.is(left));
|
3228
|
+
ASSERT(!overflow_dst.is(right));
|
3229
|
+
ASSERT(!left.is(right));
|
3230
|
+
ASSERT(!scratch.is(left));
|
3231
|
+
ASSERT(!scratch.is(right));
|
3232
|
+
|
3233
|
+
// TODO(kalmard) There must be a way to optimize dst == left and dst == right
|
3234
|
+
// cases.
|
3235
|
+
|
3236
|
+
if (dst.is(left)) {
|
3237
|
+
subu(overflow_dst, left, right);
|
3238
|
+
xor_(scratch, overflow_dst, left);
|
3239
|
+
xor_(dst, left, right);
|
3240
|
+
and_(scratch, scratch, dst);
|
3241
|
+
mov(dst, overflow_dst);
|
3242
|
+
mov(overflow_dst, scratch);
|
3243
|
+
} else if (dst.is(right)) {
|
3244
|
+
subu(overflow_dst, left, right);
|
3245
|
+
xor_(dst, left, right);
|
3246
|
+
xor_(scratch, overflow_dst, left);
|
3247
|
+
and_(scratch, scratch, dst);
|
3248
|
+
mov(dst, overflow_dst);
|
3249
|
+
mov(overflow_dst, scratch);
|
3250
|
+
} else {
|
3251
|
+
subu(dst, left, right);
|
3252
|
+
xor_(overflow_dst, dst, left);
|
3253
|
+
xor_(scratch, left, right);
|
3254
|
+
and_(overflow_dst, scratch, overflow_dst);
|
3255
|
+
}
|
3256
|
+
}
|
3257
|
+
|
3258
|
+
|
3259
|
+
void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
3260
|
+
int num_arguments) {
|
3261
|
+
// All parameters are on the stack. v0 has the return value after call.
|
3262
|
+
|
3263
|
+
// If the expected number of arguments of the runtime function is
|
3264
|
+
// constant, we check that the actual number of arguments match the
|
3265
|
+
// expectation.
|
3266
|
+
if (f->nargs >= 0 && f->nargs != num_arguments) {
|
3267
|
+
IllegalOperation(num_arguments);
|
3268
|
+
return;
|
3269
|
+
}
|
3270
|
+
|
3271
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
3272
|
+
// arguments passed in because it is constant. At some point we
|
3273
|
+
// should remove this need and make the runtime routine entry code
|
3274
|
+
// smarter.
|
3275
|
+
li(a0, num_arguments);
|
3276
|
+
li(a1, Operand(ExternalReference(f, isolate())));
|
3277
|
+
CEntryStub stub(1);
|
3278
|
+
CallStub(&stub);
|
3279
|
+
}
|
3280
|
+
|
3281
|
+
|
3282
|
+
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
|
3283
|
+
const Runtime::Function* function = Runtime::FunctionForId(id);
|
3284
|
+
li(a0, Operand(function->nargs));
|
3285
|
+
li(a1, Operand(ExternalReference(function, isolate())));
|
3286
|
+
CEntryStub stub(1);
|
3287
|
+
stub.SaveDoubles();
|
3288
|
+
CallStub(&stub);
|
3289
|
+
}
|
3290
|
+
|
3291
|
+
|
3292
|
+
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
|
3293
|
+
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
|
3294
|
+
}
|
3295
|
+
|
3296
|
+
|
3297
|
+
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
|
3298
|
+
int num_arguments) {
|
3299
|
+
li(a0, Operand(num_arguments));
|
3300
|
+
li(a1, Operand(ext));
|
3301
|
+
|
3302
|
+
CEntryStub stub(1);
|
3303
|
+
CallStub(&stub);
|
3304
|
+
}
|
3305
|
+
|
3306
|
+
|
3307
|
+
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
|
3308
|
+
int num_arguments,
|
3309
|
+
int result_size) {
|
3310
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
3311
|
+
// arguments passed in because it is constant. At some point we
|
3312
|
+
// should remove this need and make the runtime routine entry code
|
3313
|
+
// smarter.
|
3314
|
+
li(a0, Operand(num_arguments));
|
3315
|
+
JumpToExternalReference(ext);
|
3316
|
+
}
|
3317
|
+
|
3318
|
+
MaybeObject* MacroAssembler::TryTailCallExternalReference(
|
3319
|
+
const ExternalReference& ext, int num_arguments, int result_size) {
|
3320
|
+
// TODO(1236192): Most runtime routines don't need the number of
|
3321
|
+
// arguments passed in because it is constant. At some point we
|
3322
|
+
// should remove this need and make the runtime routine entry code
|
3323
|
+
// smarter.
|
3324
|
+
li(a0, num_arguments);
|
3325
|
+
return TryJumpToExternalReference(ext);
|
3326
|
+
}
|
3327
|
+
|
3328
|
+
|
3329
|
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
|
3330
|
+
int num_arguments,
|
3331
|
+
int result_size) {
|
3332
|
+
TailCallExternalReference(ExternalReference(fid, isolate()),
|
3333
|
+
num_arguments,
|
3334
|
+
result_size);
|
3335
|
+
}
|
3336
|
+
|
3337
|
+
|
3338
|
+
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
|
3339
|
+
li(a1, Operand(builtin));
|
3340
|
+
CEntryStub stub(1);
|
3341
|
+
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
3342
|
+
}
|
3343
|
+
|
3344
|
+
|
3345
|
+
MaybeObject* MacroAssembler::TryJumpToExternalReference(
|
3346
|
+
const ExternalReference& builtin) {
|
3347
|
+
li(a1, Operand(builtin));
|
3348
|
+
CEntryStub stub(1);
|
3349
|
+
return TryTailCallStub(&stub);
|
3350
|
+
}
|
3351
|
+
|
3352
|
+
|
3353
|
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
3354
|
+
InvokeFlag flag,
|
3355
|
+
const CallWrapper& call_wrapper) {
|
3356
|
+
GetBuiltinEntry(t9, id);
|
3357
|
+
if (flag == CALL_FUNCTION) {
|
3358
|
+
call_wrapper.BeforeCall(CallSize(t9));
|
3359
|
+
Call(t9);
|
3360
|
+
call_wrapper.AfterCall();
|
3361
|
+
} else {
|
3362
|
+
ASSERT(flag == JUMP_FUNCTION);
|
3363
|
+
Jump(t9);
|
3364
|
+
}
|
3365
|
+
}
|
3366
|
+
|
3367
|
+
|
3368
|
+
void MacroAssembler::GetBuiltinFunction(Register target,
|
3369
|
+
Builtins::JavaScript id) {
|
3370
|
+
// Load the builtins object into target register.
|
3371
|
+
lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
3372
|
+
lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
|
3373
|
+
// Load the JavaScript builtin function from the builtins object.
|
3374
|
+
lw(target, FieldMemOperand(target,
|
3375
|
+
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
|
3376
|
+
}
|
3377
|
+
|
3378
|
+
|
3379
|
+
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
3380
|
+
ASSERT(!target.is(a1));
|
3381
|
+
GetBuiltinFunction(a1, id);
|
3382
|
+
// Load the code entry point from the builtins object.
|
3383
|
+
lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
3384
|
+
}
|
3385
|
+
|
3386
|
+
|
3387
|
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
|
3388
|
+
Register scratch1, Register scratch2) {
|
3389
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
3390
|
+
li(scratch1, Operand(value));
|
3391
|
+
li(scratch2, Operand(ExternalReference(counter)));
|
3392
|
+
sw(scratch1, MemOperand(scratch2));
|
3393
|
+
}
|
3394
|
+
}
|
3395
|
+
|
3396
|
+
|
3397
|
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
|
3398
|
+
Register scratch1, Register scratch2) {
|
3399
|
+
ASSERT(value > 0);
|
3400
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
3401
|
+
li(scratch2, Operand(ExternalReference(counter)));
|
3402
|
+
lw(scratch1, MemOperand(scratch2));
|
3403
|
+
Addu(scratch1, scratch1, Operand(value));
|
3404
|
+
sw(scratch1, MemOperand(scratch2));
|
3405
|
+
}
|
3406
|
+
}
|
3407
|
+
|
3408
|
+
|
3409
|
+
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
3410
|
+
Register scratch1, Register scratch2) {
|
3411
|
+
ASSERT(value > 0);
|
3412
|
+
if (FLAG_native_code_counters && counter->Enabled()) {
|
3413
|
+
li(scratch2, Operand(ExternalReference(counter)));
|
3414
|
+
lw(scratch1, MemOperand(scratch2));
|
3415
|
+
Subu(scratch1, scratch1, Operand(value));
|
3416
|
+
sw(scratch1, MemOperand(scratch2));
|
3417
|
+
}
|
3418
|
+
}
|
3419
|
+
|
3420
|
+
|
3421
|
+
// -----------------------------------------------------------------------------
|
3422
|
+
// Debugging.
|
3423
|
+
|
3424
|
+
void MacroAssembler::Assert(Condition cc, const char* msg,
|
3425
|
+
Register rs, Operand rt) {
|
3426
|
+
if (emit_debug_code())
|
3427
|
+
Check(cc, msg, rs, rt);
|
3428
|
+
}
|
3429
|
+
|
3430
|
+
|
3431
|
+
void MacroAssembler::AssertRegisterIsRoot(Register reg,
|
3432
|
+
Heap::RootListIndex index) {
|
3433
|
+
if (emit_debug_code()) {
|
3434
|
+
LoadRoot(at, index);
|
3435
|
+
Check(eq, "Register did not match expected root", reg, Operand(at));
|
3436
|
+
}
|
3437
|
+
}
|
3438
|
+
|
3439
|
+
|
3440
|
+
void MacroAssembler::AssertFastElements(Register elements) {
|
3441
|
+
if (emit_debug_code()) {
|
3442
|
+
ASSERT(!elements.is(at));
|
3443
|
+
Label ok;
|
3444
|
+
push(elements);
|
3445
|
+
lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
|
3446
|
+
LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
3447
|
+
Branch(&ok, eq, elements, Operand(at));
|
3448
|
+
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
|
3449
|
+
Branch(&ok, eq, elements, Operand(at));
|
3450
|
+
Abort("JSObject with fast elements map has slow elements");
|
3451
|
+
bind(&ok);
|
3452
|
+
pop(elements);
|
3453
|
+
}
|
3454
|
+
}
|
3455
|
+
|
3456
|
+
|
3457
|
+
void MacroAssembler::Check(Condition cc, const char* msg,
|
3458
|
+
Register rs, Operand rt) {
|
3459
|
+
Label L;
|
3460
|
+
Branch(&L, cc, rs, rt);
|
3461
|
+
Abort(msg);
|
3462
|
+
// Will not return here.
|
3463
|
+
bind(&L);
|
3464
|
+
}
|
3465
|
+
|
3466
|
+
|
3467
|
+
void MacroAssembler::Abort(const char* msg) {
|
3468
|
+
Label abort_start;
|
3469
|
+
bind(&abort_start);
|
3470
|
+
// We want to pass the msg string like a smi to avoid GC
|
3471
|
+
// problems, however msg is not guaranteed to be aligned
|
3472
|
+
// properly. Instead, we pass an aligned pointer that is
|
3473
|
+
// a proper v8 smi, but also pass the alignment difference
|
3474
|
+
// from the real pointer as a smi.
|
3475
|
+
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
|
3476
|
+
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
|
3477
|
+
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
|
3478
|
+
#ifdef DEBUG
|
3479
|
+
if (msg != NULL) {
|
3480
|
+
RecordComment("Abort message: ");
|
3481
|
+
RecordComment(msg);
|
3482
|
+
}
|
3483
|
+
#endif
|
3484
|
+
// Disable stub call restrictions to always allow calls to abort.
|
3485
|
+
AllowStubCallsScope allow_scope(this, true);
|
3486
|
+
|
3487
|
+
li(a0, Operand(p0));
|
3488
|
+
push(a0);
|
3489
|
+
li(a0, Operand(Smi::FromInt(p1 - p0)));
|
3490
|
+
push(a0);
|
3491
|
+
CallRuntime(Runtime::kAbort, 2);
|
3492
|
+
// Will not return here.
|
3493
|
+
if (is_trampoline_pool_blocked()) {
|
3494
|
+
// If the calling code cares about the exact number of
|
3495
|
+
// instructions generated, we insert padding here to keep the size
|
3496
|
+
// of the Abort macro constant.
|
3497
|
+
// Currently in debug mode with debug_code enabled the number of
|
3498
|
+
// generated instructions is 14, so we use this as a maximum value.
|
3499
|
+
static const int kExpectedAbortInstructions = 14;
|
3500
|
+
int abort_instructions = InstructionsGeneratedSince(&abort_start);
|
3501
|
+
ASSERT(abort_instructions <= kExpectedAbortInstructions);
|
3502
|
+
while (abort_instructions++ < kExpectedAbortInstructions) {
|
3503
|
+
nop();
|
3504
|
+
}
|
3505
|
+
}
|
3506
|
+
}
|
3507
|
+
|
3508
|
+
|
3509
|
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
3510
|
+
if (context_chain_length > 0) {
|
3511
|
+
// Move up the chain of contexts to the context containing the slot.
|
3512
|
+
lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
3513
|
+
// Load the function context (which is the incoming, outer context).
|
3514
|
+
lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
|
3515
|
+
for (int i = 1; i < context_chain_length; i++) {
|
3516
|
+
lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
3517
|
+
lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
|
3518
|
+
}
|
3519
|
+
} else {
|
3520
|
+
// Slot is in the current function context. Move it into the
|
3521
|
+
// destination register in case we store into it (the write barrier
|
3522
|
+
// cannot be allowed to destroy the context in esi).
|
3523
|
+
Move(dst, cp);
|
3524
|
+
}
|
3525
|
+
|
3526
|
+
// We should not have found a 'with' context by walking the context chain
|
3527
|
+
// (i.e., the static scope chain and runtime context chain do not agree).
|
3528
|
+
// A variable occurring in such a scope should have slot type LOOKUP and
|
3529
|
+
// not CONTEXT.
|
3530
|
+
if (emit_debug_code()) {
|
3531
|
+
lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
3532
|
+
Check(eq, "Yo dawg, I heard you liked function contexts "
|
3533
|
+
"so I put function contexts in all your contexts",
|
3534
|
+
dst, Operand(t9));
|
3535
|
+
}
|
3536
|
+
}
|
3537
|
+
|
3538
|
+
|
3539
|
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
3540
|
+
// Load the global or builtins object from the current context.
|
3541
|
+
lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
3542
|
+
// Load the global context from the global or builtins object.
|
3543
|
+
lw(function, FieldMemOperand(function,
|
3544
|
+
GlobalObject::kGlobalContextOffset));
|
3545
|
+
// Load the function from the global context.
|
3546
|
+
lw(function, MemOperand(function, Context::SlotOffset(index)));
|
3547
|
+
}
|
3548
|
+
|
3549
|
+
|
3550
|
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
|
3551
|
+
Register map,
|
3552
|
+
Register scratch) {
|
3553
|
+
// Load the initial map. The global functions all have initial maps.
|
3554
|
+
lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
3555
|
+
if (emit_debug_code()) {
|
3556
|
+
Label ok, fail;
|
3557
|
+
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
|
3558
|
+
Branch(&ok);
|
3559
|
+
bind(&fail);
|
3560
|
+
Abort("Global functions must have initial map");
|
3561
|
+
bind(&ok);
|
3562
|
+
}
|
3563
|
+
}
|
3564
|
+
|
3565
|
+
|
3566
|
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
3567
|
+
addiu(sp, sp, -5 * kPointerSize);
|
3568
|
+
li(t8, Operand(Smi::FromInt(type)));
|
3569
|
+
li(t9, Operand(CodeObject()));
|
3570
|
+
sw(ra, MemOperand(sp, 4 * kPointerSize));
|
3571
|
+
sw(fp, MemOperand(sp, 3 * kPointerSize));
|
3572
|
+
sw(cp, MemOperand(sp, 2 * kPointerSize));
|
3573
|
+
sw(t8, MemOperand(sp, 1 * kPointerSize));
|
3574
|
+
sw(t9, MemOperand(sp, 0 * kPointerSize));
|
3575
|
+
addiu(fp, sp, 3 * kPointerSize);
|
3576
|
+
}
|
3577
|
+
|
3578
|
+
|
3579
|
+
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
3580
|
+
mov(sp, fp);
|
3581
|
+
lw(fp, MemOperand(sp, 0 * kPointerSize));
|
3582
|
+
lw(ra, MemOperand(sp, 1 * kPointerSize));
|
3583
|
+
addiu(sp, sp, 2 * kPointerSize);
|
3584
|
+
}
|
3585
|
+
|
3586
|
+
|
3587
|
+
void MacroAssembler::EnterExitFrame(bool save_doubles,
|
3588
|
+
int stack_space) {
|
3589
|
+
// Setup the frame structure on the stack.
|
3590
|
+
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
|
3591
|
+
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
|
3592
|
+
STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
|
3593
|
+
|
3594
|
+
// This is how the stack will look:
|
3595
|
+
// fp + 2 (==kCallerSPDisplacement) - old stack's end
|
3596
|
+
// [fp + 1 (==kCallerPCOffset)] - saved old ra
|
3597
|
+
// [fp + 0 (==kCallerFPOffset)] - saved old fp
|
3598
|
+
// [fp - 1 (==kSPOffset)] - sp of the called function
|
3599
|
+
// [fp - 2 (==kCodeOffset)] - CodeObject
|
3600
|
+
// fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
|
3601
|
+
// new stack (will contain saved ra)
|
3602
|
+
|
3603
|
+
// Save registers.
|
3604
|
+
addiu(sp, sp, -4 * kPointerSize);
|
3605
|
+
sw(ra, MemOperand(sp, 3 * kPointerSize));
|
3606
|
+
sw(fp, MemOperand(sp, 2 * kPointerSize));
|
3607
|
+
addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
|
3608
|
+
|
3609
|
+
if (emit_debug_code()) {
|
3610
|
+
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
3611
|
+
}
|
3612
|
+
|
3613
|
+
li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
|
3614
|
+
sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
|
3615
|
+
|
3616
|
+
// Save the frame pointer and the context in top.
|
3617
|
+
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
|
3618
|
+
sw(fp, MemOperand(t8));
|
3619
|
+
li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
|
3620
|
+
sw(cp, MemOperand(t8));
|
3621
|
+
|
3622
|
+
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
3623
|
+
if (save_doubles) {
|
3624
|
+
// The stack must be allign to 0 modulo 8 for stores with sdc1.
|
3625
|
+
ASSERT(kDoubleSize == frame_alignment);
|
3626
|
+
if (frame_alignment > 0) {
|
3627
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3628
|
+
And(sp, sp, Operand(-frame_alignment)); // Align stack.
|
3629
|
+
}
|
3630
|
+
int space = FPURegister::kNumRegisters * kDoubleSize;
|
3631
|
+
Subu(sp, sp, Operand(space));
|
3632
|
+
// Remember: we only need to save every 2nd double FPU value.
|
3633
|
+
for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
|
3634
|
+
FPURegister reg = FPURegister::from_code(i);
|
3635
|
+
sdc1(reg, MemOperand(sp, i * kDoubleSize));
|
3636
|
+
}
|
3637
|
+
}
|
3638
|
+
|
3639
|
+
// Reserve place for the return address, stack space and an optional slot
|
3640
|
+
// (used by the DirectCEntryStub to hold the return value if a struct is
|
3641
|
+
// returned) and align the frame preparing for calling the runtime function.
|
3642
|
+
ASSERT(stack_space >= 0);
|
3643
|
+
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
|
3644
|
+
if (frame_alignment > 0) {
|
3645
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3646
|
+
And(sp, sp, Operand(-frame_alignment)); // Align stack.
|
3647
|
+
}
|
3648
|
+
|
3649
|
+
// Set the exit frame sp value to point just before the return address
|
3650
|
+
// location.
|
3651
|
+
addiu(at, sp, kPointerSize);
|
3652
|
+
sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
3653
|
+
}
|
3654
|
+
|
3655
|
+
|
3656
|
+
void MacroAssembler::LeaveExitFrame(bool save_doubles,
|
3657
|
+
Register argument_count) {
|
3658
|
+
// Optionally restore all double registers.
|
3659
|
+
if (save_doubles) {
|
3660
|
+
// Remember: we only need to restore every 2nd double FPU value.
|
3661
|
+
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
3662
|
+
for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
|
3663
|
+
FPURegister reg = FPURegister::from_code(i);
|
3664
|
+
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
|
3665
|
+
}
|
3666
|
+
}
|
3667
|
+
|
3668
|
+
// Clear top frame.
|
3669
|
+
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
|
3670
|
+
sw(zero_reg, MemOperand(t8));
|
3671
|
+
|
3672
|
+
// Restore current context from top and clear it in debug mode.
|
3673
|
+
li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
|
3674
|
+
lw(cp, MemOperand(t8));
|
3675
|
+
#ifdef DEBUG
|
3676
|
+
sw(a3, MemOperand(t8));
|
3677
|
+
#endif
|
3678
|
+
|
3679
|
+
// Pop the arguments, restore registers, and return.
|
3680
|
+
mov(sp, fp); // Respect ABI stack constraint.
|
3681
|
+
lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
|
3682
|
+
lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
|
3683
|
+
addiu(sp, sp, 8);
|
3684
|
+
if (argument_count.is_valid()) {
|
3685
|
+
sll(t8, argument_count, kPointerSizeLog2);
|
3686
|
+
addu(sp, sp, t8);
|
3687
|
+
}
|
3688
|
+
}
|
3689
|
+
|
3690
|
+
|
3691
|
+
void MacroAssembler::InitializeNewString(Register string,
|
3692
|
+
Register length,
|
3693
|
+
Heap::RootListIndex map_index,
|
3694
|
+
Register scratch1,
|
3695
|
+
Register scratch2) {
|
3696
|
+
sll(scratch1, length, kSmiTagSize);
|
3697
|
+
LoadRoot(scratch2, map_index);
|
3698
|
+
sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
|
3699
|
+
li(scratch1, Operand(String::kEmptyHashField));
|
3700
|
+
sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
|
3701
|
+
sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
|
3702
|
+
}
|
3703
|
+
|
3704
|
+
|
3705
|
+
int MacroAssembler::ActivationFrameAlignment() {
|
3706
|
+
#if defined(V8_HOST_ARCH_MIPS)
|
3707
|
+
// Running on the real platform. Use the alignment as mandated by the local
|
3708
|
+
// environment.
|
3709
|
+
// Note: This will break if we ever start generating snapshots on one Mips
|
3710
|
+
// platform for another Mips platform with a different alignment.
|
3711
|
+
return OS::ActivationFrameAlignment();
|
3712
|
+
#else // defined(V8_HOST_ARCH_MIPS)
|
3713
|
+
// If we are using the simulator then we should always align to the expected
|
3714
|
+
// alignment. As the simulator is used to generate snapshots we do not know
|
3715
|
+
// if the target platform will need alignment, so this is controlled from a
|
3716
|
+
// flag.
|
3717
|
+
return FLAG_sim_stack_alignment;
|
3718
|
+
#endif // defined(V8_HOST_ARCH_MIPS)
|
3719
|
+
}
|
3720
|
+
|
3721
|
+
void MacroAssembler::AssertStackIsAligned() {
|
3722
|
+
if (emit_debug_code()) {
|
3723
|
+
const int frame_alignment = ActivationFrameAlignment();
|
3724
|
+
const int frame_alignment_mask = frame_alignment - 1;
|
3725
|
+
|
3726
|
+
if (frame_alignment > kPointerSize) {
|
3727
|
+
Label alignment_as_expected;
|
3728
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3729
|
+
andi(at, sp, frame_alignment_mask);
|
3730
|
+
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
|
3731
|
+
// Don't use Check here, as it will call Runtime_Abort re-entering here.
|
3732
|
+
stop("Unexpected stack alignment");
|
3733
|
+
bind(&alignment_as_expected);
|
3734
|
+
}
|
3735
|
+
}
|
3736
|
+
}
|
3737
|
+
|
3738
|
+
|
3739
|
+
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
|
3740
|
+
Register reg,
|
3741
|
+
Register scratch,
|
3742
|
+
Label* not_power_of_two_or_zero) {
|
3743
|
+
Subu(scratch, reg, Operand(1));
|
3744
|
+
Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
|
3745
|
+
scratch, Operand(zero_reg));
|
3746
|
+
and_(at, scratch, reg); // In the delay slot.
|
3747
|
+
Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
|
3748
|
+
}
|
3749
|
+
|
3750
|
+
|
3751
|
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
3752
|
+
Register reg2,
|
3753
|
+
Label* on_not_both_smi) {
|
3754
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3755
|
+
ASSERT_EQ(1, kSmiTagMask);
|
3756
|
+
or_(at, reg1, reg2);
|
3757
|
+
andi(at, at, kSmiTagMask);
|
3758
|
+
Branch(on_not_both_smi, ne, at, Operand(zero_reg));
|
3759
|
+
}
|
3760
|
+
|
3761
|
+
|
3762
|
+
void MacroAssembler::JumpIfEitherSmi(Register reg1,
|
3763
|
+
Register reg2,
|
3764
|
+
Label* on_either_smi) {
|
3765
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3766
|
+
ASSERT_EQ(1, kSmiTagMask);
|
3767
|
+
// Both Smi tags must be 1 (not Smi).
|
3768
|
+
and_(at, reg1, reg2);
|
3769
|
+
andi(at, at, kSmiTagMask);
|
3770
|
+
Branch(on_either_smi, eq, at, Operand(zero_reg));
|
3771
|
+
}
|
3772
|
+
|
3773
|
+
|
3774
|
+
void MacroAssembler::AbortIfSmi(Register object) {
|
3775
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3776
|
+
andi(at, object, kSmiTagMask);
|
3777
|
+
Assert(ne, "Operand is a smi", at, Operand(zero_reg));
|
3778
|
+
}
|
3779
|
+
|
3780
|
+
|
3781
|
+
void MacroAssembler::AbortIfNotSmi(Register object) {
|
3782
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3783
|
+
andi(at, object, kSmiTagMask);
|
3784
|
+
Assert(eq, "Operand is a smi", at, Operand(zero_reg));
|
3785
|
+
}
|
3786
|
+
|
3787
|
+
|
3788
|
+
void MacroAssembler::AbortIfNotString(Register object) {
|
3789
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3790
|
+
And(t0, object, Operand(kSmiTagMask));
|
3791
|
+
Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
|
3792
|
+
push(object);
|
3793
|
+
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
|
3794
|
+
lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
|
3795
|
+
Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
|
3796
|
+
pop(object);
|
3797
|
+
}
|
3798
|
+
|
3799
|
+
|
3800
|
+
void MacroAssembler::AbortIfNotRootValue(Register src,
|
3801
|
+
Heap::RootListIndex root_value_index,
|
3802
|
+
const char* message) {
|
3803
|
+
ASSERT(!src.is(at));
|
3804
|
+
LoadRoot(at, root_value_index);
|
3805
|
+
Assert(eq, message, src, Operand(at));
|
3806
|
+
}
|
3807
|
+
|
3808
|
+
|
3809
|
+
void MacroAssembler::JumpIfNotHeapNumber(Register object,
|
3810
|
+
Register heap_number_map,
|
3811
|
+
Register scratch,
|
3812
|
+
Label* on_not_heap_number) {
|
3813
|
+
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
3814
|
+
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
3815
|
+
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
|
3816
|
+
}
|
3817
|
+
|
3818
|
+
|
3819
|
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
|
3820
|
+
Register first,
|
3821
|
+
Register second,
|
3822
|
+
Register scratch1,
|
3823
|
+
Register scratch2,
|
3824
|
+
Label* failure) {
|
3825
|
+
// Test that both first and second are sequential ASCII strings.
|
3826
|
+
// Assume that they are non-smis.
|
3827
|
+
lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
|
3828
|
+
lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
|
3829
|
+
lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
|
3830
|
+
lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
|
3831
|
+
|
3832
|
+
JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
|
3833
|
+
scratch2,
|
3834
|
+
scratch1,
|
3835
|
+
scratch2,
|
3836
|
+
failure);
|
3837
|
+
}
|
3838
|
+
|
3839
|
+
|
3840
|
+
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
|
3841
|
+
Register second,
|
3842
|
+
Register scratch1,
|
3843
|
+
Register scratch2,
|
3844
|
+
Label* failure) {
|
3845
|
+
// Check that neither is a smi.
|
3846
|
+
STATIC_ASSERT(kSmiTag == 0);
|
3847
|
+
And(scratch1, first, Operand(second));
|
3848
|
+
And(scratch1, scratch1, Operand(kSmiTagMask));
|
3849
|
+
Branch(failure, eq, scratch1, Operand(zero_reg));
|
3850
|
+
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
|
3851
|
+
second,
|
3852
|
+
scratch1,
|
3853
|
+
scratch2,
|
3854
|
+
failure);
|
3855
|
+
}
|
3856
|
+
|
3857
|
+
|
3858
|
+
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
|
3859
|
+
Register first,
|
3860
|
+
Register second,
|
3861
|
+
Register scratch1,
|
3862
|
+
Register scratch2,
|
3863
|
+
Label* failure) {
|
3864
|
+
int kFlatAsciiStringMask =
|
3865
|
+
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
|
3866
|
+
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
|
3867
|
+
ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
|
3868
|
+
andi(scratch1, first, kFlatAsciiStringMask);
|
3869
|
+
Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
|
3870
|
+
andi(scratch2, second, kFlatAsciiStringMask);
|
3871
|
+
Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
|
3872
|
+
}
|
3873
|
+
|
3874
|
+
|
3875
|
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
|
3876
|
+
Register scratch,
|
3877
|
+
Label* failure) {
|
3878
|
+
int kFlatAsciiStringMask =
|
3879
|
+
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
|
3880
|
+
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
|
3881
|
+
And(scratch, type, Operand(kFlatAsciiStringMask));
|
3882
|
+
Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
|
3883
|
+
}
|
3884
|
+
|
3885
|
+
|
3886
|
+
static const int kRegisterPassedArguments = 4;
|
3887
|
+
|
3888
|
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
3889
|
+
int frame_alignment = ActivationFrameAlignment();
|
3890
|
+
|
3891
|
+
// Up to four simple arguments are passed in registers a0..a3.
|
3892
|
+
// Those four arguments must have reserved argument slots on the stack for
|
3893
|
+
// mips, even though those argument slots are not normally used.
|
3894
|
+
// Remaining arguments are pushed on the stack, above (higher address than)
|
3895
|
+
// the argument slots.
|
3896
|
+
ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
|
3897
|
+
int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
|
3898
|
+
0 : num_arguments - kRegisterPassedArguments) +
|
3899
|
+
(StandardFrameConstants::kCArgsSlotsSize /
|
3900
|
+
kPointerSize);
|
3901
|
+
if (frame_alignment > kPointerSize) {
|
3902
|
+
// Make stack end at alignment and make room for num_arguments - 4 words
|
3903
|
+
// and the original value of sp.
|
3904
|
+
mov(scratch, sp);
|
3905
|
+
Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
|
3906
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3907
|
+
And(sp, sp, Operand(-frame_alignment));
|
3908
|
+
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
3909
|
+
} else {
|
3910
|
+
Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
|
3911
|
+
}
|
3912
|
+
}
|
3913
|
+
|
3914
|
+
|
3915
|
+
void MacroAssembler::CallCFunction(ExternalReference function,
|
3916
|
+
int num_arguments) {
|
3917
|
+
CallCFunctionHelper(no_reg, function, t8, num_arguments);
|
3918
|
+
}
|
3919
|
+
|
3920
|
+
|
3921
|
+
void MacroAssembler::CallCFunction(Register function,
|
3922
|
+
Register scratch,
|
3923
|
+
int num_arguments) {
|
3924
|
+
CallCFunctionHelper(function,
|
3925
|
+
ExternalReference::the_hole_value_location(isolate()),
|
3926
|
+
scratch,
|
3927
|
+
num_arguments);
|
3928
|
+
}
|
3929
|
+
|
3930
|
+
|
3931
|
+
void MacroAssembler::CallCFunctionHelper(Register function,
|
3932
|
+
ExternalReference function_reference,
|
3933
|
+
Register scratch,
|
3934
|
+
int num_arguments) {
|
3935
|
+
// Make sure that the stack is aligned before calling a C function unless
|
3936
|
+
// running in the simulator. The simulator has its own alignment check which
|
3937
|
+
// provides more information.
|
3938
|
+
// The argument stots are presumed to have been set up by
|
3939
|
+
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
|
3940
|
+
|
3941
|
+
#if defined(V8_HOST_ARCH_MIPS)
|
3942
|
+
if (emit_debug_code()) {
|
3943
|
+
int frame_alignment = OS::ActivationFrameAlignment();
|
3944
|
+
int frame_alignment_mask = frame_alignment - 1;
|
3945
|
+
if (frame_alignment > kPointerSize) {
|
3946
|
+
ASSERT(IsPowerOf2(frame_alignment));
|
3947
|
+
Label alignment_as_expected;
|
3948
|
+
And(at, sp, Operand(frame_alignment_mask));
|
3949
|
+
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
|
3950
|
+
// Don't use Check here, as it will call Runtime_Abort possibly
|
3951
|
+
// re-entering here.
|
3952
|
+
stop("Unexpected alignment in CallCFunction");
|
3953
|
+
bind(&alignment_as_expected);
|
3954
|
+
}
|
3955
|
+
}
|
3956
|
+
#endif // V8_HOST_ARCH_MIPS
|
3957
|
+
|
3958
|
+
// Just call directly. The function called cannot cause a GC, or
|
3959
|
+
// allow preemption, so the return address in the link register
|
3960
|
+
// stays correct.
|
3961
|
+
|
3962
|
+
if (function.is(no_reg)) {
|
3963
|
+
function = t9;
|
3964
|
+
li(function, Operand(function_reference));
|
3965
|
+
} else if (!function.is(t9)) {
|
3966
|
+
mov(t9, function);
|
3967
|
+
function = t9;
|
3968
|
+
}
|
3969
|
+
|
3970
|
+
Call(function);
|
3971
|
+
|
3972
|
+
ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
|
3973
|
+
int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
|
3974
|
+
0 : num_arguments - kRegisterPassedArguments) +
|
3975
|
+
(StandardFrameConstants::kCArgsSlotsSize /
|
3976
|
+
kPointerSize);
|
3977
|
+
|
3978
|
+
if (OS::ActivationFrameAlignment() > kPointerSize) {
|
3979
|
+
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
3980
|
+
} else {
|
3981
|
+
Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
|
3982
|
+
}
|
3983
|
+
}
|
3984
|
+
|
3985
|
+
|
3986
|
+
#undef BRANCH_ARGS_CHECK
|
3987
|
+
|
3988
|
+
|
3989
|
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
|
3990
|
+
Register descriptors) {
|
3991
|
+
lw(descriptors,
|
3992
|
+
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
|
3993
|
+
Label not_smi;
|
3994
|
+
JumpIfNotSmi(descriptors, ¬_smi);
|
3995
|
+
li(descriptors, Operand(FACTORY->empty_descriptor_array()));
|
3996
|
+
bind(¬_smi);
|
3997
|
+
}
|
3998
|
+
|
3999
|
+
|
4000
|
+
CodePatcher::CodePatcher(byte* address, int instructions)
|
4001
|
+
: address_(address),
|
4002
|
+
instructions_(instructions),
|
4003
|
+
size_(instructions * Assembler::kInstrSize),
|
4004
|
+
masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
|
4005
|
+
// Create a new macro assembler pointing to the address of the code to patch.
|
4006
|
+
// The size is adjusted with kGap on order for the assembler to generate size
|
4007
|
+
// bytes of instructions without failing with buffer size constraints.
|
4008
|
+
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
4009
|
+
}
|
4010
|
+
|
4011
|
+
|
4012
|
+
CodePatcher::~CodePatcher() {
|
4013
|
+
// Indicate that code has changed.
|
4014
|
+
CPU::FlushICache(address_, size_);
|
4015
|
+
|
4016
|
+
// Check that the code was patched as expected.
|
4017
|
+
ASSERT(masm_.pc_ == address_ + size_);
|
4018
|
+
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
4019
|
+
}
|
4020
|
+
|
4021
|
+
|
4022
|
+
void CodePatcher::Emit(Instr instr) {
|
4023
|
+
masm()->emit(instr);
|
4024
|
+
}
|
4025
|
+
|
4026
|
+
|
4027
|
+
void CodePatcher::Emit(Address addr) {
|
4028
|
+
masm()->emit(reinterpret_cast<Instr>(addr));
|
4029
|
+
}
|
4030
|
+
|
4031
|
+
|
4032
|
+
void CodePatcher::ChangeBranchCondition(Condition cond) {
|
4033
|
+
Instr instr = Assembler::instr_at(masm_.pc_);
|
4034
|
+
ASSERT(Assembler::IsBranch(instr));
|
4035
|
+
uint32_t opcode = Assembler::GetOpcodeField(instr);
|
4036
|
+
// Currently only the 'eq' and 'ne' cond values are supported and the simple
|
4037
|
+
// branch instructions (with opcode being the branch type).
|
4038
|
+
// There are some special cases (see Assembler::IsBranch()) so extending this
|
4039
|
+
// would be tricky.
|
4040
|
+
ASSERT(opcode == BEQ ||
|
4041
|
+
opcode == BNE ||
|
4042
|
+
opcode == BLEZ ||
|
4043
|
+
opcode == BGTZ ||
|
4044
|
+
opcode == BEQL ||
|
4045
|
+
opcode == BNEL ||
|
4046
|
+
opcode == BLEZL ||
|
4047
|
+
opcode == BGTZL);
|
4048
|
+
opcode = (cond == eq) ? BEQ : BNE;
|
4049
|
+
instr = (instr & ~kOpcodeMask) | opcode;
|
4050
|
+
masm_.emit(instr);
|
4051
|
+
}
|
4052
|
+
|
4053
|
+
|
4054
|
+
} } // namespace v8::internal
|
4055
|
+
|
4056
|
+
#endif // V8_TARGET_ARCH_MIPS
|