libv8 3.3.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (700) hide show
  1. data/.gitignore +8 -0
  2. data/.gitmodules +3 -0
  3. data/Gemfile +4 -0
  4. data/README.md +44 -0
  5. data/Rakefile +73 -0
  6. data/ext/libv8/extconf.rb +9 -0
  7. data/lib/libv8.rb +15 -0
  8. data/lib/libv8/Makefile +38 -0
  9. data/lib/libv8/detect_cpu.rb +27 -0
  10. data/lib/libv8/fpic-on-linux-amd64.patch +13 -0
  11. data/lib/libv8/scons/CHANGES.txt +5334 -0
  12. data/lib/libv8/scons/LICENSE.txt +20 -0
  13. data/lib/libv8/scons/MANIFEST +199 -0
  14. data/lib/libv8/scons/PKG-INFO +13 -0
  15. data/lib/libv8/scons/README.txt +243 -0
  16. data/lib/libv8/scons/RELEASE.txt +98 -0
  17. data/lib/libv8/scons/engine/SCons/Action.py +1241 -0
  18. data/lib/libv8/scons/engine/SCons/Builder.py +877 -0
  19. data/lib/libv8/scons/engine/SCons/CacheDir.py +216 -0
  20. data/lib/libv8/scons/engine/SCons/Conftest.py +793 -0
  21. data/lib/libv8/scons/engine/SCons/Debug.py +220 -0
  22. data/lib/libv8/scons/engine/SCons/Defaults.py +480 -0
  23. data/lib/libv8/scons/engine/SCons/Environment.py +2318 -0
  24. data/lib/libv8/scons/engine/SCons/Errors.py +205 -0
  25. data/lib/libv8/scons/engine/SCons/Executor.py +633 -0
  26. data/lib/libv8/scons/engine/SCons/Job.py +435 -0
  27. data/lib/libv8/scons/engine/SCons/Memoize.py +244 -0
  28. data/lib/libv8/scons/engine/SCons/Node/Alias.py +152 -0
  29. data/lib/libv8/scons/engine/SCons/Node/FS.py +3142 -0
  30. data/lib/libv8/scons/engine/SCons/Node/Python.py +128 -0
  31. data/lib/libv8/scons/engine/SCons/Node/__init__.py +1328 -0
  32. data/lib/libv8/scons/engine/SCons/Options/BoolOption.py +50 -0
  33. data/lib/libv8/scons/engine/SCons/Options/EnumOption.py +50 -0
  34. data/lib/libv8/scons/engine/SCons/Options/ListOption.py +50 -0
  35. data/lib/libv8/scons/engine/SCons/Options/PackageOption.py +50 -0
  36. data/lib/libv8/scons/engine/SCons/Options/PathOption.py +76 -0
  37. data/lib/libv8/scons/engine/SCons/Options/__init__.py +67 -0
  38. data/lib/libv8/scons/engine/SCons/PathList.py +231 -0
  39. data/lib/libv8/scons/engine/SCons/Platform/__init__.py +241 -0
  40. data/lib/libv8/scons/engine/SCons/Platform/aix.py +69 -0
  41. data/lib/libv8/scons/engine/SCons/Platform/cygwin.py +55 -0
  42. data/lib/libv8/scons/engine/SCons/Platform/darwin.py +46 -0
  43. data/lib/libv8/scons/engine/SCons/Platform/hpux.py +46 -0
  44. data/lib/libv8/scons/engine/SCons/Platform/irix.py +44 -0
  45. data/lib/libv8/scons/engine/SCons/Platform/os2.py +58 -0
  46. data/lib/libv8/scons/engine/SCons/Platform/posix.py +263 -0
  47. data/lib/libv8/scons/engine/SCons/Platform/sunos.py +50 -0
  48. data/lib/libv8/scons/engine/SCons/Platform/win32.py +385 -0
  49. data/lib/libv8/scons/engine/SCons/SConf.py +1030 -0
  50. data/lib/libv8/scons/engine/SCons/SConsign.py +383 -0
  51. data/lib/libv8/scons/engine/SCons/Scanner/C.py +132 -0
  52. data/lib/libv8/scons/engine/SCons/Scanner/D.py +73 -0
  53. data/lib/libv8/scons/engine/SCons/Scanner/Dir.py +109 -0
  54. data/lib/libv8/scons/engine/SCons/Scanner/Fortran.py +316 -0
  55. data/lib/libv8/scons/engine/SCons/Scanner/IDL.py +48 -0
  56. data/lib/libv8/scons/engine/SCons/Scanner/LaTeX.py +384 -0
  57. data/lib/libv8/scons/engine/SCons/Scanner/Prog.py +101 -0
  58. data/lib/libv8/scons/engine/SCons/Scanner/RC.py +55 -0
  59. data/lib/libv8/scons/engine/SCons/Scanner/__init__.py +413 -0
  60. data/lib/libv8/scons/engine/SCons/Script/Interactive.py +384 -0
  61. data/lib/libv8/scons/engine/SCons/Script/Main.py +1334 -0
  62. data/lib/libv8/scons/engine/SCons/Script/SConsOptions.py +939 -0
  63. data/lib/libv8/scons/engine/SCons/Script/SConscript.py +640 -0
  64. data/lib/libv8/scons/engine/SCons/Script/__init__.py +412 -0
  65. data/lib/libv8/scons/engine/SCons/Sig.py +63 -0
  66. data/lib/libv8/scons/engine/SCons/Subst.py +904 -0
  67. data/lib/libv8/scons/engine/SCons/Taskmaster.py +1017 -0
  68. data/lib/libv8/scons/engine/SCons/Tool/386asm.py +61 -0
  69. data/lib/libv8/scons/engine/SCons/Tool/BitKeeper.py +67 -0
  70. data/lib/libv8/scons/engine/SCons/Tool/CVS.py +73 -0
  71. data/lib/libv8/scons/engine/SCons/Tool/FortranCommon.py +246 -0
  72. data/lib/libv8/scons/engine/SCons/Tool/JavaCommon.py +323 -0
  73. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/__init__.py +56 -0
  74. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/arch.py +61 -0
  75. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/common.py +240 -0
  76. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/netframework.py +82 -0
  77. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/sdk.py +391 -0
  78. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vc.py +456 -0
  79. data/lib/libv8/scons/engine/SCons/Tool/MSCommon/vs.py +499 -0
  80. data/lib/libv8/scons/engine/SCons/Tool/Perforce.py +103 -0
  81. data/lib/libv8/scons/engine/SCons/Tool/PharLapCommon.py +137 -0
  82. data/lib/libv8/scons/engine/SCons/Tool/RCS.py +64 -0
  83. data/lib/libv8/scons/engine/SCons/Tool/SCCS.py +64 -0
  84. data/lib/libv8/scons/engine/SCons/Tool/Subversion.py +71 -0
  85. data/lib/libv8/scons/engine/SCons/Tool/__init__.py +681 -0
  86. data/lib/libv8/scons/engine/SCons/Tool/aixc++.py +82 -0
  87. data/lib/libv8/scons/engine/SCons/Tool/aixcc.py +74 -0
  88. data/lib/libv8/scons/engine/SCons/Tool/aixf77.py +80 -0
  89. data/lib/libv8/scons/engine/SCons/Tool/aixlink.py +76 -0
  90. data/lib/libv8/scons/engine/SCons/Tool/applelink.py +71 -0
  91. data/lib/libv8/scons/engine/SCons/Tool/ar.py +63 -0
  92. data/lib/libv8/scons/engine/SCons/Tool/as.py +78 -0
  93. data/lib/libv8/scons/engine/SCons/Tool/bcc32.py +81 -0
  94. data/lib/libv8/scons/engine/SCons/Tool/c++.py +99 -0
  95. data/lib/libv8/scons/engine/SCons/Tool/cc.py +102 -0
  96. data/lib/libv8/scons/engine/SCons/Tool/cvf.py +58 -0
  97. data/lib/libv8/scons/engine/SCons/Tool/default.py +50 -0
  98. data/lib/libv8/scons/engine/SCons/Tool/dmd.py +223 -0
  99. data/lib/libv8/scons/engine/SCons/Tool/dvi.py +64 -0
  100. data/lib/libv8/scons/engine/SCons/Tool/dvipdf.py +124 -0
  101. data/lib/libv8/scons/engine/SCons/Tool/dvips.py +94 -0
  102. data/lib/libv8/scons/engine/SCons/Tool/f77.py +62 -0
  103. data/lib/libv8/scons/engine/SCons/Tool/f90.py +62 -0
  104. data/lib/libv8/scons/engine/SCons/Tool/f95.py +63 -0
  105. data/lib/libv8/scons/engine/SCons/Tool/filesystem.py +98 -0
  106. data/lib/libv8/scons/engine/SCons/Tool/fortran.py +62 -0
  107. data/lib/libv8/scons/engine/SCons/Tool/g++.py +90 -0
  108. data/lib/libv8/scons/engine/SCons/Tool/g77.py +73 -0
  109. data/lib/libv8/scons/engine/SCons/Tool/gas.py +53 -0
  110. data/lib/libv8/scons/engine/SCons/Tool/gcc.py +80 -0
  111. data/lib/libv8/scons/engine/SCons/Tool/gfortran.py +64 -0
  112. data/lib/libv8/scons/engine/SCons/Tool/gnulink.py +63 -0
  113. data/lib/libv8/scons/engine/SCons/Tool/gs.py +81 -0
  114. data/lib/libv8/scons/engine/SCons/Tool/hpc++.py +84 -0
  115. data/lib/libv8/scons/engine/SCons/Tool/hpcc.py +53 -0
  116. data/lib/libv8/scons/engine/SCons/Tool/hplink.py +77 -0
  117. data/lib/libv8/scons/engine/SCons/Tool/icc.py +59 -0
  118. data/lib/libv8/scons/engine/SCons/Tool/icl.py +52 -0
  119. data/lib/libv8/scons/engine/SCons/Tool/ifl.py +72 -0
  120. data/lib/libv8/scons/engine/SCons/Tool/ifort.py +88 -0
  121. data/lib/libv8/scons/engine/SCons/Tool/ilink.py +59 -0
  122. data/lib/libv8/scons/engine/SCons/Tool/ilink32.py +60 -0
  123. data/lib/libv8/scons/engine/SCons/Tool/install.py +229 -0
  124. data/lib/libv8/scons/engine/SCons/Tool/intelc.py +482 -0
  125. data/lib/libv8/scons/engine/SCons/Tool/ipkg.py +67 -0
  126. data/lib/libv8/scons/engine/SCons/Tool/jar.py +110 -0
  127. data/lib/libv8/scons/engine/SCons/Tool/javac.py +230 -0
  128. data/lib/libv8/scons/engine/SCons/Tool/javah.py +137 -0
  129. data/lib/libv8/scons/engine/SCons/Tool/latex.py +79 -0
  130. data/lib/libv8/scons/engine/SCons/Tool/lex.py +97 -0
  131. data/lib/libv8/scons/engine/SCons/Tool/link.py +121 -0
  132. data/lib/libv8/scons/engine/SCons/Tool/linkloc.py +112 -0
  133. data/lib/libv8/scons/engine/SCons/Tool/m4.py +63 -0
  134. data/lib/libv8/scons/engine/SCons/Tool/masm.py +77 -0
  135. data/lib/libv8/scons/engine/SCons/Tool/midl.py +88 -0
  136. data/lib/libv8/scons/engine/SCons/Tool/mingw.py +158 -0
  137. data/lib/libv8/scons/engine/SCons/Tool/mslib.py +64 -0
  138. data/lib/libv8/scons/engine/SCons/Tool/mslink.py +266 -0
  139. data/lib/libv8/scons/engine/SCons/Tool/mssdk.py +50 -0
  140. data/lib/libv8/scons/engine/SCons/Tool/msvc.py +268 -0
  141. data/lib/libv8/scons/engine/SCons/Tool/msvs.py +1388 -0
  142. data/lib/libv8/scons/engine/SCons/Tool/mwcc.py +207 -0
  143. data/lib/libv8/scons/engine/SCons/Tool/mwld.py +107 -0
  144. data/lib/libv8/scons/engine/SCons/Tool/nasm.py +72 -0
  145. data/lib/libv8/scons/engine/SCons/Tool/packaging/__init__.py +312 -0
  146. data/lib/libv8/scons/engine/SCons/Tool/packaging/ipk.py +185 -0
  147. data/lib/libv8/scons/engine/SCons/Tool/packaging/msi.py +527 -0
  148. data/lib/libv8/scons/engine/SCons/Tool/packaging/rpm.py +365 -0
  149. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_tarbz2.py +43 -0
  150. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_targz.py +43 -0
  151. data/lib/libv8/scons/engine/SCons/Tool/packaging/src_zip.py +43 -0
  152. data/lib/libv8/scons/engine/SCons/Tool/packaging/tarbz2.py +44 -0
  153. data/lib/libv8/scons/engine/SCons/Tool/packaging/targz.py +44 -0
  154. data/lib/libv8/scons/engine/SCons/Tool/packaging/zip.py +44 -0
  155. data/lib/libv8/scons/engine/SCons/Tool/pdf.py +78 -0
  156. data/lib/libv8/scons/engine/SCons/Tool/pdflatex.py +83 -0
  157. data/lib/libv8/scons/engine/SCons/Tool/pdftex.py +108 -0
  158. data/lib/libv8/scons/engine/SCons/Tool/qt.py +336 -0
  159. data/lib/libv8/scons/engine/SCons/Tool/rmic.py +120 -0
  160. data/lib/libv8/scons/engine/SCons/Tool/rpcgen.py +70 -0
  161. data/lib/libv8/scons/engine/SCons/Tool/rpm.py +132 -0
  162. data/lib/libv8/scons/engine/SCons/Tool/sgiar.py +68 -0
  163. data/lib/libv8/scons/engine/SCons/Tool/sgic++.py +58 -0
  164. data/lib/libv8/scons/engine/SCons/Tool/sgicc.py +53 -0
  165. data/lib/libv8/scons/engine/SCons/Tool/sgilink.py +63 -0
  166. data/lib/libv8/scons/engine/SCons/Tool/sunar.py +67 -0
  167. data/lib/libv8/scons/engine/SCons/Tool/sunc++.py +142 -0
  168. data/lib/libv8/scons/engine/SCons/Tool/suncc.py +58 -0
  169. data/lib/libv8/scons/engine/SCons/Tool/sunf77.py +63 -0
  170. data/lib/libv8/scons/engine/SCons/Tool/sunf90.py +64 -0
  171. data/lib/libv8/scons/engine/SCons/Tool/sunf95.py +64 -0
  172. data/lib/libv8/scons/engine/SCons/Tool/sunlink.py +77 -0
  173. data/lib/libv8/scons/engine/SCons/Tool/swig.py +182 -0
  174. data/lib/libv8/scons/engine/SCons/Tool/tar.py +73 -0
  175. data/lib/libv8/scons/engine/SCons/Tool/tex.py +813 -0
  176. data/lib/libv8/scons/engine/SCons/Tool/textfile.py +175 -0
  177. data/lib/libv8/scons/engine/SCons/Tool/tlib.py +53 -0
  178. data/lib/libv8/scons/engine/SCons/Tool/wix.py +99 -0
  179. data/lib/libv8/scons/engine/SCons/Tool/yacc.py +130 -0
  180. data/lib/libv8/scons/engine/SCons/Tool/zip.py +99 -0
  181. data/lib/libv8/scons/engine/SCons/Util.py +1492 -0
  182. data/lib/libv8/scons/engine/SCons/Variables/BoolVariable.py +89 -0
  183. data/lib/libv8/scons/engine/SCons/Variables/EnumVariable.py +103 -0
  184. data/lib/libv8/scons/engine/SCons/Variables/ListVariable.py +135 -0
  185. data/lib/libv8/scons/engine/SCons/Variables/PackageVariable.py +106 -0
  186. data/lib/libv8/scons/engine/SCons/Variables/PathVariable.py +147 -0
  187. data/lib/libv8/scons/engine/SCons/Variables/__init__.py +312 -0
  188. data/lib/libv8/scons/engine/SCons/Warnings.py +246 -0
  189. data/lib/libv8/scons/engine/SCons/__init__.py +49 -0
  190. data/lib/libv8/scons/engine/SCons/compat/__init__.py +237 -0
  191. data/lib/libv8/scons/engine/SCons/compat/_scons_builtins.py +150 -0
  192. data/lib/libv8/scons/engine/SCons/compat/_scons_collections.py +45 -0
  193. data/lib/libv8/scons/engine/SCons/compat/_scons_dbm.py +45 -0
  194. data/lib/libv8/scons/engine/SCons/compat/_scons_hashlib.py +76 -0
  195. data/lib/libv8/scons/engine/SCons/compat/_scons_io.py +45 -0
  196. data/lib/libv8/scons/engine/SCons/compat/_scons_sets.py +563 -0
  197. data/lib/libv8/scons/engine/SCons/compat/_scons_subprocess.py +1281 -0
  198. data/lib/libv8/scons/engine/SCons/cpp.py +589 -0
  199. data/lib/libv8/scons/engine/SCons/dblite.py +251 -0
  200. data/lib/libv8/scons/engine/SCons/exitfuncs.py +77 -0
  201. data/lib/libv8/scons/os_spawnv_fix.diff +83 -0
  202. data/lib/libv8/scons/scons-time.1 +1017 -0
  203. data/lib/libv8/scons/scons.1 +15219 -0
  204. data/lib/libv8/scons/sconsign.1 +208 -0
  205. data/lib/libv8/scons/script/scons +196 -0
  206. data/lib/libv8/scons/script/scons-time +1544 -0
  207. data/lib/libv8/scons/script/scons.bat +31 -0
  208. data/lib/libv8/scons/script/sconsign +513 -0
  209. data/lib/libv8/scons/setup.cfg +6 -0
  210. data/lib/libv8/scons/setup.py +425 -0
  211. data/lib/libv8/v8/.gitignore +35 -0
  212. data/lib/libv8/v8/AUTHORS +44 -0
  213. data/lib/libv8/v8/ChangeLog +2839 -0
  214. data/lib/libv8/v8/LICENSE +52 -0
  215. data/lib/libv8/v8/LICENSE.strongtalk +29 -0
  216. data/lib/libv8/v8/LICENSE.v8 +26 -0
  217. data/lib/libv8/v8/LICENSE.valgrind +45 -0
  218. data/lib/libv8/v8/SConstruct +1478 -0
  219. data/lib/libv8/v8/build/README.txt +49 -0
  220. data/lib/libv8/v8/build/all.gyp +18 -0
  221. data/lib/libv8/v8/build/armu.gypi +32 -0
  222. data/lib/libv8/v8/build/common.gypi +144 -0
  223. data/lib/libv8/v8/build/gyp_v8 +145 -0
  224. data/lib/libv8/v8/include/v8-debug.h +395 -0
  225. data/lib/libv8/v8/include/v8-preparser.h +117 -0
  226. data/lib/libv8/v8/include/v8-profiler.h +505 -0
  227. data/lib/libv8/v8/include/v8-testing.h +104 -0
  228. data/lib/libv8/v8/include/v8.h +4124 -0
  229. data/lib/libv8/v8/include/v8stdint.h +53 -0
  230. data/lib/libv8/v8/preparser/SConscript +38 -0
  231. data/lib/libv8/v8/preparser/preparser-process.cc +379 -0
  232. data/lib/libv8/v8/src/SConscript +368 -0
  233. data/lib/libv8/v8/src/accessors.cc +767 -0
  234. data/lib/libv8/v8/src/accessors.h +123 -0
  235. data/lib/libv8/v8/src/allocation-inl.h +49 -0
  236. data/lib/libv8/v8/src/allocation.cc +122 -0
  237. data/lib/libv8/v8/src/allocation.h +143 -0
  238. data/lib/libv8/v8/src/api.cc +5845 -0
  239. data/lib/libv8/v8/src/api.h +574 -0
  240. data/lib/libv8/v8/src/apinatives.js +110 -0
  241. data/lib/libv8/v8/src/apiutils.h +73 -0
  242. data/lib/libv8/v8/src/arguments.h +118 -0
  243. data/lib/libv8/v8/src/arm/assembler-arm-inl.h +353 -0
  244. data/lib/libv8/v8/src/arm/assembler-arm.cc +2661 -0
  245. data/lib/libv8/v8/src/arm/assembler-arm.h +1375 -0
  246. data/lib/libv8/v8/src/arm/builtins-arm.cc +1658 -0
  247. data/lib/libv8/v8/src/arm/code-stubs-arm.cc +6398 -0
  248. data/lib/libv8/v8/src/arm/code-stubs-arm.h +673 -0
  249. data/lib/libv8/v8/src/arm/codegen-arm.cc +52 -0
  250. data/lib/libv8/v8/src/arm/codegen-arm.h +91 -0
  251. data/lib/libv8/v8/src/arm/constants-arm.cc +152 -0
  252. data/lib/libv8/v8/src/arm/constants-arm.h +775 -0
  253. data/lib/libv8/v8/src/arm/cpu-arm.cc +120 -0
  254. data/lib/libv8/v8/src/arm/debug-arm.cc +317 -0
  255. data/lib/libv8/v8/src/arm/deoptimizer-arm.cc +754 -0
  256. data/lib/libv8/v8/src/arm/disasm-arm.cc +1506 -0
  257. data/lib/libv8/v8/src/arm/frames-arm.cc +45 -0
  258. data/lib/libv8/v8/src/arm/frames-arm.h +168 -0
  259. data/lib/libv8/v8/src/arm/full-codegen-arm.cc +4375 -0
  260. data/lib/libv8/v8/src/arm/ic-arm.cc +1562 -0
  261. data/lib/libv8/v8/src/arm/lithium-arm.cc +2206 -0
  262. data/lib/libv8/v8/src/arm/lithium-arm.h +2348 -0
  263. data/lib/libv8/v8/src/arm/lithium-codegen-arm.cc +4526 -0
  264. data/lib/libv8/v8/src/arm/lithium-codegen-arm.h +403 -0
  265. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  266. data/lib/libv8/v8/src/arm/lithium-gap-resolver-arm.h +84 -0
  267. data/lib/libv8/v8/src/arm/macro-assembler-arm.cc +3163 -0
  268. data/lib/libv8/v8/src/arm/macro-assembler-arm.h +1126 -0
  269. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  270. data/lib/libv8/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  271. data/lib/libv8/v8/src/arm/simulator-arm.cc +3424 -0
  272. data/lib/libv8/v8/src/arm/simulator-arm.h +431 -0
  273. data/lib/libv8/v8/src/arm/stub-cache-arm.cc +4243 -0
  274. data/lib/libv8/v8/src/array.js +1366 -0
  275. data/lib/libv8/v8/src/assembler.cc +1207 -0
  276. data/lib/libv8/v8/src/assembler.h +858 -0
  277. data/lib/libv8/v8/src/ast-inl.h +112 -0
  278. data/lib/libv8/v8/src/ast.cc +1146 -0
  279. data/lib/libv8/v8/src/ast.h +2188 -0
  280. data/lib/libv8/v8/src/atomicops.h +167 -0
  281. data/lib/libv8/v8/src/atomicops_internals_arm_gcc.h +145 -0
  282. data/lib/libv8/v8/src/atomicops_internals_mips_gcc.h +169 -0
  283. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.cc +133 -0
  284. data/lib/libv8/v8/src/atomicops_internals_x86_gcc.h +287 -0
  285. data/lib/libv8/v8/src/atomicops_internals_x86_macosx.h +301 -0
  286. data/lib/libv8/v8/src/atomicops_internals_x86_msvc.h +203 -0
  287. data/lib/libv8/v8/src/bignum-dtoa.cc +655 -0
  288. data/lib/libv8/v8/src/bignum-dtoa.h +81 -0
  289. data/lib/libv8/v8/src/bignum.cc +768 -0
  290. data/lib/libv8/v8/src/bignum.h +140 -0
  291. data/lib/libv8/v8/src/bootstrapper.cc +2184 -0
  292. data/lib/libv8/v8/src/bootstrapper.h +188 -0
  293. data/lib/libv8/v8/src/builtins.cc +1707 -0
  294. data/lib/libv8/v8/src/builtins.h +371 -0
  295. data/lib/libv8/v8/src/bytecodes-irregexp.h +105 -0
  296. data/lib/libv8/v8/src/cached-powers.cc +177 -0
  297. data/lib/libv8/v8/src/cached-powers.h +65 -0
  298. data/lib/libv8/v8/src/char-predicates-inl.h +94 -0
  299. data/lib/libv8/v8/src/char-predicates.h +67 -0
  300. data/lib/libv8/v8/src/checks.cc +110 -0
  301. data/lib/libv8/v8/src/checks.h +296 -0
  302. data/lib/libv8/v8/src/circular-queue-inl.h +53 -0
  303. data/lib/libv8/v8/src/circular-queue.cc +122 -0
  304. data/lib/libv8/v8/src/circular-queue.h +103 -0
  305. data/lib/libv8/v8/src/code-stubs.cc +267 -0
  306. data/lib/libv8/v8/src/code-stubs.h +1011 -0
  307. data/lib/libv8/v8/src/code.h +70 -0
  308. data/lib/libv8/v8/src/codegen.cc +231 -0
  309. data/lib/libv8/v8/src/codegen.h +84 -0
  310. data/lib/libv8/v8/src/compilation-cache.cc +540 -0
  311. data/lib/libv8/v8/src/compilation-cache.h +287 -0
  312. data/lib/libv8/v8/src/compiler.cc +786 -0
  313. data/lib/libv8/v8/src/compiler.h +312 -0
  314. data/lib/libv8/v8/src/contexts.cc +347 -0
  315. data/lib/libv8/v8/src/contexts.h +391 -0
  316. data/lib/libv8/v8/src/conversions-inl.h +106 -0
  317. data/lib/libv8/v8/src/conversions.cc +1131 -0
  318. data/lib/libv8/v8/src/conversions.h +135 -0
  319. data/lib/libv8/v8/src/counters.cc +93 -0
  320. data/lib/libv8/v8/src/counters.h +254 -0
  321. data/lib/libv8/v8/src/cpu-profiler-inl.h +101 -0
  322. data/lib/libv8/v8/src/cpu-profiler.cc +609 -0
  323. data/lib/libv8/v8/src/cpu-profiler.h +302 -0
  324. data/lib/libv8/v8/src/cpu.h +69 -0
  325. data/lib/libv8/v8/src/d8-debug.cc +367 -0
  326. data/lib/libv8/v8/src/d8-debug.h +158 -0
  327. data/lib/libv8/v8/src/d8-posix.cc +695 -0
  328. data/lib/libv8/v8/src/d8-readline.cc +130 -0
  329. data/lib/libv8/v8/src/d8-windows.cc +42 -0
  330. data/lib/libv8/v8/src/d8.cc +803 -0
  331. data/lib/libv8/v8/src/d8.gyp +91 -0
  332. data/lib/libv8/v8/src/d8.h +235 -0
  333. data/lib/libv8/v8/src/d8.js +2798 -0
  334. data/lib/libv8/v8/src/data-flow.cc +66 -0
  335. data/lib/libv8/v8/src/data-flow.h +205 -0
  336. data/lib/libv8/v8/src/date.js +1103 -0
  337. data/lib/libv8/v8/src/dateparser-inl.h +127 -0
  338. data/lib/libv8/v8/src/dateparser.cc +178 -0
  339. data/lib/libv8/v8/src/dateparser.h +266 -0
  340. data/lib/libv8/v8/src/debug-agent.cc +447 -0
  341. data/lib/libv8/v8/src/debug-agent.h +129 -0
  342. data/lib/libv8/v8/src/debug-debugger.js +2569 -0
  343. data/lib/libv8/v8/src/debug.cc +3165 -0
  344. data/lib/libv8/v8/src/debug.h +1057 -0
  345. data/lib/libv8/v8/src/deoptimizer.cc +1256 -0
  346. data/lib/libv8/v8/src/deoptimizer.h +602 -0
  347. data/lib/libv8/v8/src/disasm.h +80 -0
  348. data/lib/libv8/v8/src/disassembler.cc +343 -0
  349. data/lib/libv8/v8/src/disassembler.h +58 -0
  350. data/lib/libv8/v8/src/diy-fp.cc +58 -0
  351. data/lib/libv8/v8/src/diy-fp.h +117 -0
  352. data/lib/libv8/v8/src/double.h +238 -0
  353. data/lib/libv8/v8/src/dtoa.cc +103 -0
  354. data/lib/libv8/v8/src/dtoa.h +85 -0
  355. data/lib/libv8/v8/src/execution.cc +849 -0
  356. data/lib/libv8/v8/src/execution.h +297 -0
  357. data/lib/libv8/v8/src/extensions/experimental/break-iterator.cc +250 -0
  358. data/lib/libv8/v8/src/extensions/experimental/break-iterator.h +89 -0
  359. data/lib/libv8/v8/src/extensions/experimental/collator.cc +218 -0
  360. data/lib/libv8/v8/src/extensions/experimental/collator.h +69 -0
  361. data/lib/libv8/v8/src/extensions/experimental/experimental.gyp +94 -0
  362. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.cc +78 -0
  363. data/lib/libv8/v8/src/extensions/experimental/i18n-extension.h +54 -0
  364. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.cc +112 -0
  365. data/lib/libv8/v8/src/extensions/experimental/i18n-locale.h +60 -0
  366. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.cc +43 -0
  367. data/lib/libv8/v8/src/extensions/experimental/i18n-utils.h +49 -0
  368. data/lib/libv8/v8/src/extensions/experimental/i18n.js +180 -0
  369. data/lib/libv8/v8/src/extensions/experimental/language-matcher.cc +251 -0
  370. data/lib/libv8/v8/src/extensions/experimental/language-matcher.h +95 -0
  371. data/lib/libv8/v8/src/extensions/externalize-string-extension.cc +141 -0
  372. data/lib/libv8/v8/src/extensions/externalize-string-extension.h +50 -0
  373. data/lib/libv8/v8/src/extensions/gc-extension.cc +58 -0
  374. data/lib/libv8/v8/src/extensions/gc-extension.h +49 -0
  375. data/lib/libv8/v8/src/factory.cc +1222 -0
  376. data/lib/libv8/v8/src/factory.h +442 -0
  377. data/lib/libv8/v8/src/fast-dtoa.cc +736 -0
  378. data/lib/libv8/v8/src/fast-dtoa.h +83 -0
  379. data/lib/libv8/v8/src/fixed-dtoa.cc +405 -0
  380. data/lib/libv8/v8/src/fixed-dtoa.h +55 -0
  381. data/lib/libv8/v8/src/flag-definitions.h +560 -0
  382. data/lib/libv8/v8/src/flags.cc +551 -0
  383. data/lib/libv8/v8/src/flags.h +79 -0
  384. data/lib/libv8/v8/src/frames-inl.h +247 -0
  385. data/lib/libv8/v8/src/frames.cc +1243 -0
  386. data/lib/libv8/v8/src/frames.h +870 -0
  387. data/lib/libv8/v8/src/full-codegen.cc +1374 -0
  388. data/lib/libv8/v8/src/full-codegen.h +771 -0
  389. data/lib/libv8/v8/src/func-name-inferrer.cc +92 -0
  390. data/lib/libv8/v8/src/func-name-inferrer.h +111 -0
  391. data/lib/libv8/v8/src/gdb-jit.cc +1555 -0
  392. data/lib/libv8/v8/src/gdb-jit.h +143 -0
  393. data/lib/libv8/v8/src/global-handles.cc +665 -0
  394. data/lib/libv8/v8/src/global-handles.h +284 -0
  395. data/lib/libv8/v8/src/globals.h +325 -0
  396. data/lib/libv8/v8/src/handles-inl.h +177 -0
  397. data/lib/libv8/v8/src/handles.cc +987 -0
  398. data/lib/libv8/v8/src/handles.h +382 -0
  399. data/lib/libv8/v8/src/hashmap.cc +230 -0
  400. data/lib/libv8/v8/src/hashmap.h +123 -0
  401. data/lib/libv8/v8/src/heap-inl.h +704 -0
  402. data/lib/libv8/v8/src/heap-profiler.cc +1173 -0
  403. data/lib/libv8/v8/src/heap-profiler.h +397 -0
  404. data/lib/libv8/v8/src/heap.cc +5930 -0
  405. data/lib/libv8/v8/src/heap.h +2268 -0
  406. data/lib/libv8/v8/src/hydrogen-instructions.cc +1769 -0
  407. data/lib/libv8/v8/src/hydrogen-instructions.h +3971 -0
  408. data/lib/libv8/v8/src/hydrogen.cc +6239 -0
  409. data/lib/libv8/v8/src/hydrogen.h +1202 -0
  410. data/lib/libv8/v8/src/ia32/assembler-ia32-inl.h +446 -0
  411. data/lib/libv8/v8/src/ia32/assembler-ia32.cc +2487 -0
  412. data/lib/libv8/v8/src/ia32/assembler-ia32.h +1144 -0
  413. data/lib/libv8/v8/src/ia32/builtins-ia32.cc +1621 -0
  414. data/lib/libv8/v8/src/ia32/code-stubs-ia32.cc +6198 -0
  415. data/lib/libv8/v8/src/ia32/code-stubs-ia32.h +517 -0
  416. data/lib/libv8/v8/src/ia32/codegen-ia32.cc +265 -0
  417. data/lib/libv8/v8/src/ia32/codegen-ia32.h +79 -0
  418. data/lib/libv8/v8/src/ia32/cpu-ia32.cc +88 -0
  419. data/lib/libv8/v8/src/ia32/debug-ia32.cc +312 -0
  420. data/lib/libv8/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  421. data/lib/libv8/v8/src/ia32/disasm-ia32.cc +1628 -0
  422. data/lib/libv8/v8/src/ia32/frames-ia32.cc +45 -0
  423. data/lib/libv8/v8/src/ia32/frames-ia32.h +142 -0
  424. data/lib/libv8/v8/src/ia32/full-codegen-ia32.cc +4338 -0
  425. data/lib/libv8/v8/src/ia32/ic-ia32.cc +1597 -0
  426. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.cc +4461 -0
  427. data/lib/libv8/v8/src/ia32/lithium-codegen-ia32.h +375 -0
  428. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.cc +475 -0
  429. data/lib/libv8/v8/src/ia32/lithium-gap-resolver-ia32.h +110 -0
  430. data/lib/libv8/v8/src/ia32/lithium-ia32.cc +2261 -0
  431. data/lib/libv8/v8/src/ia32/lithium-ia32.h +2396 -0
  432. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.cc +2136 -0
  433. data/lib/libv8/v8/src/ia32/macro-assembler-ia32.h +775 -0
  434. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.cc +1263 -0
  435. data/lib/libv8/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  436. data/lib/libv8/v8/src/ia32/simulator-ia32.cc +30 -0
  437. data/lib/libv8/v8/src/ia32/simulator-ia32.h +74 -0
  438. data/lib/libv8/v8/src/ia32/stub-cache-ia32.cc +3847 -0
  439. data/lib/libv8/v8/src/ic-inl.h +130 -0
  440. data/lib/libv8/v8/src/ic.cc +2577 -0
  441. data/lib/libv8/v8/src/ic.h +736 -0
  442. data/lib/libv8/v8/src/inspector.cc +63 -0
  443. data/lib/libv8/v8/src/inspector.h +62 -0
  444. data/lib/libv8/v8/src/interpreter-irregexp.cc +659 -0
  445. data/lib/libv8/v8/src/interpreter-irregexp.h +49 -0
  446. data/lib/libv8/v8/src/isolate-inl.h +50 -0
  447. data/lib/libv8/v8/src/isolate.cc +1869 -0
  448. data/lib/libv8/v8/src/isolate.h +1382 -0
  449. data/lib/libv8/v8/src/json-parser.cc +504 -0
  450. data/lib/libv8/v8/src/json-parser.h +161 -0
  451. data/lib/libv8/v8/src/json.js +342 -0
  452. data/lib/libv8/v8/src/jsregexp.cc +5385 -0
  453. data/lib/libv8/v8/src/jsregexp.h +1492 -0
  454. data/lib/libv8/v8/src/list-inl.h +212 -0
  455. data/lib/libv8/v8/src/list.h +174 -0
  456. data/lib/libv8/v8/src/lithium-allocator-inl.h +142 -0
  457. data/lib/libv8/v8/src/lithium-allocator.cc +2123 -0
  458. data/lib/libv8/v8/src/lithium-allocator.h +630 -0
  459. data/lib/libv8/v8/src/lithium.cc +190 -0
  460. data/lib/libv8/v8/src/lithium.h +597 -0
  461. data/lib/libv8/v8/src/liveedit-debugger.js +1082 -0
  462. data/lib/libv8/v8/src/liveedit.cc +1691 -0
  463. data/lib/libv8/v8/src/liveedit.h +180 -0
  464. data/lib/libv8/v8/src/liveobjectlist-inl.h +126 -0
  465. data/lib/libv8/v8/src/liveobjectlist.cc +2589 -0
  466. data/lib/libv8/v8/src/liveobjectlist.h +322 -0
  467. data/lib/libv8/v8/src/log-inl.h +59 -0
  468. data/lib/libv8/v8/src/log-utils.cc +428 -0
  469. data/lib/libv8/v8/src/log-utils.h +231 -0
  470. data/lib/libv8/v8/src/log.cc +1993 -0
  471. data/lib/libv8/v8/src/log.h +476 -0
  472. data/lib/libv8/v8/src/macro-assembler.h +120 -0
  473. data/lib/libv8/v8/src/macros.py +178 -0
  474. data/lib/libv8/v8/src/mark-compact.cc +3143 -0
  475. data/lib/libv8/v8/src/mark-compact.h +506 -0
  476. data/lib/libv8/v8/src/math.js +264 -0
  477. data/lib/libv8/v8/src/messages.cc +179 -0
  478. data/lib/libv8/v8/src/messages.h +113 -0
  479. data/lib/libv8/v8/src/messages.js +1096 -0
  480. data/lib/libv8/v8/src/mips/assembler-mips-inl.h +312 -0
  481. data/lib/libv8/v8/src/mips/assembler-mips.cc +1960 -0
  482. data/lib/libv8/v8/src/mips/assembler-mips.h +1138 -0
  483. data/lib/libv8/v8/src/mips/builtins-mips.cc +1628 -0
  484. data/lib/libv8/v8/src/mips/code-stubs-mips.cc +6656 -0
  485. data/lib/libv8/v8/src/mips/code-stubs-mips.h +682 -0
  486. data/lib/libv8/v8/src/mips/codegen-mips.cc +52 -0
  487. data/lib/libv8/v8/src/mips/codegen-mips.h +98 -0
  488. data/lib/libv8/v8/src/mips/constants-mips.cc +352 -0
  489. data/lib/libv8/v8/src/mips/constants-mips.h +739 -0
  490. data/lib/libv8/v8/src/mips/cpu-mips.cc +96 -0
  491. data/lib/libv8/v8/src/mips/debug-mips.cc +308 -0
  492. data/lib/libv8/v8/src/mips/deoptimizer-mips.cc +91 -0
  493. data/lib/libv8/v8/src/mips/disasm-mips.cc +1050 -0
  494. data/lib/libv8/v8/src/mips/frames-mips.cc +47 -0
  495. data/lib/libv8/v8/src/mips/frames-mips.h +219 -0
  496. data/lib/libv8/v8/src/mips/full-codegen-mips.cc +4388 -0
  497. data/lib/libv8/v8/src/mips/ic-mips.cc +1580 -0
  498. data/lib/libv8/v8/src/mips/lithium-codegen-mips.h +65 -0
  499. data/lib/libv8/v8/src/mips/lithium-mips.h +307 -0
  500. data/lib/libv8/v8/src/mips/macro-assembler-mips.cc +4056 -0
  501. data/lib/libv8/v8/src/mips/macro-assembler-mips.h +1214 -0
  502. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.cc +1251 -0
  503. data/lib/libv8/v8/src/mips/regexp-macro-assembler-mips.h +252 -0
  504. data/lib/libv8/v8/src/mips/simulator-mips.cc +2621 -0
  505. data/lib/libv8/v8/src/mips/simulator-mips.h +401 -0
  506. data/lib/libv8/v8/src/mips/stub-cache-mips.cc +4285 -0
  507. data/lib/libv8/v8/src/mirror-debugger.js +2382 -0
  508. data/lib/libv8/v8/src/mksnapshot.cc +328 -0
  509. data/lib/libv8/v8/src/natives.h +64 -0
  510. data/lib/libv8/v8/src/objects-debug.cc +738 -0
  511. data/lib/libv8/v8/src/objects-inl.h +4323 -0
  512. data/lib/libv8/v8/src/objects-printer.cc +829 -0
  513. data/lib/libv8/v8/src/objects-visiting.cc +148 -0
  514. data/lib/libv8/v8/src/objects-visiting.h +424 -0
  515. data/lib/libv8/v8/src/objects.cc +10585 -0
  516. data/lib/libv8/v8/src/objects.h +6838 -0
  517. data/lib/libv8/v8/src/parser.cc +4997 -0
  518. data/lib/libv8/v8/src/parser.h +765 -0
  519. data/lib/libv8/v8/src/platform-cygwin.cc +779 -0
  520. data/lib/libv8/v8/src/platform-freebsd.cc +826 -0
  521. data/lib/libv8/v8/src/platform-linux.cc +1149 -0
  522. data/lib/libv8/v8/src/platform-macos.cc +830 -0
  523. data/lib/libv8/v8/src/platform-nullos.cc +479 -0
  524. data/lib/libv8/v8/src/platform-openbsd.cc +640 -0
  525. data/lib/libv8/v8/src/platform-posix.cc +424 -0
  526. data/lib/libv8/v8/src/platform-solaris.cc +762 -0
  527. data/lib/libv8/v8/src/platform-tls-mac.h +62 -0
  528. data/lib/libv8/v8/src/platform-tls-win32.h +62 -0
  529. data/lib/libv8/v8/src/platform-tls.h +50 -0
  530. data/lib/libv8/v8/src/platform-win32.cc +2021 -0
  531. data/lib/libv8/v8/src/platform.h +667 -0
  532. data/lib/libv8/v8/src/preparse-data-format.h +62 -0
  533. data/lib/libv8/v8/src/preparse-data.cc +183 -0
  534. data/lib/libv8/v8/src/preparse-data.h +225 -0
  535. data/lib/libv8/v8/src/preparser-api.cc +220 -0
  536. data/lib/libv8/v8/src/preparser.cc +1450 -0
  537. data/lib/libv8/v8/src/preparser.h +493 -0
  538. data/lib/libv8/v8/src/prettyprinter.cc +1493 -0
  539. data/lib/libv8/v8/src/prettyprinter.h +223 -0
  540. data/lib/libv8/v8/src/profile-generator-inl.h +128 -0
  541. data/lib/libv8/v8/src/profile-generator.cc +3098 -0
  542. data/lib/libv8/v8/src/profile-generator.h +1126 -0
  543. data/lib/libv8/v8/src/property.cc +105 -0
  544. data/lib/libv8/v8/src/property.h +365 -0
  545. data/lib/libv8/v8/src/proxy.js +83 -0
  546. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp-inl.h +78 -0
  547. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.cc +471 -0
  548. data/lib/libv8/v8/src/regexp-macro-assembler-irregexp.h +142 -0
  549. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.cc +373 -0
  550. data/lib/libv8/v8/src/regexp-macro-assembler-tracer.h +104 -0
  551. data/lib/libv8/v8/src/regexp-macro-assembler.cc +267 -0
  552. data/lib/libv8/v8/src/regexp-macro-assembler.h +243 -0
  553. data/lib/libv8/v8/src/regexp-stack.cc +111 -0
  554. data/lib/libv8/v8/src/regexp-stack.h +147 -0
  555. data/lib/libv8/v8/src/regexp.js +483 -0
  556. data/lib/libv8/v8/src/rewriter.cc +360 -0
  557. data/lib/libv8/v8/src/rewriter.h +50 -0
  558. data/lib/libv8/v8/src/runtime-profiler.cc +489 -0
  559. data/lib/libv8/v8/src/runtime-profiler.h +201 -0
  560. data/lib/libv8/v8/src/runtime.cc +12227 -0
  561. data/lib/libv8/v8/src/runtime.h +652 -0
  562. data/lib/libv8/v8/src/runtime.js +649 -0
  563. data/lib/libv8/v8/src/safepoint-table.cc +256 -0
  564. data/lib/libv8/v8/src/safepoint-table.h +270 -0
  565. data/lib/libv8/v8/src/scanner-base.cc +952 -0
  566. data/lib/libv8/v8/src/scanner-base.h +670 -0
  567. data/lib/libv8/v8/src/scanner.cc +345 -0
  568. data/lib/libv8/v8/src/scanner.h +146 -0
  569. data/lib/libv8/v8/src/scopeinfo.cc +646 -0
  570. data/lib/libv8/v8/src/scopeinfo.h +254 -0
  571. data/lib/libv8/v8/src/scopes.cc +1150 -0
  572. data/lib/libv8/v8/src/scopes.h +507 -0
  573. data/lib/libv8/v8/src/serialize.cc +1574 -0
  574. data/lib/libv8/v8/src/serialize.h +589 -0
  575. data/lib/libv8/v8/src/shell.h +55 -0
  576. data/lib/libv8/v8/src/simulator.h +43 -0
  577. data/lib/libv8/v8/src/small-pointer-list.h +163 -0
  578. data/lib/libv8/v8/src/smart-pointer.h +109 -0
  579. data/lib/libv8/v8/src/snapshot-common.cc +83 -0
  580. data/lib/libv8/v8/src/snapshot-empty.cc +54 -0
  581. data/lib/libv8/v8/src/snapshot.h +91 -0
  582. data/lib/libv8/v8/src/spaces-inl.h +529 -0
  583. data/lib/libv8/v8/src/spaces.cc +3145 -0
  584. data/lib/libv8/v8/src/spaces.h +2369 -0
  585. data/lib/libv8/v8/src/splay-tree-inl.h +310 -0
  586. data/lib/libv8/v8/src/splay-tree.h +205 -0
  587. data/lib/libv8/v8/src/string-search.cc +41 -0
  588. data/lib/libv8/v8/src/string-search.h +568 -0
  589. data/lib/libv8/v8/src/string-stream.cc +592 -0
  590. data/lib/libv8/v8/src/string-stream.h +191 -0
  591. data/lib/libv8/v8/src/string.js +994 -0
  592. data/lib/libv8/v8/src/strtod.cc +440 -0
  593. data/lib/libv8/v8/src/strtod.h +40 -0
  594. data/lib/libv8/v8/src/stub-cache.cc +1965 -0
  595. data/lib/libv8/v8/src/stub-cache.h +924 -0
  596. data/lib/libv8/v8/src/third_party/valgrind/valgrind.h +3925 -0
  597. data/lib/libv8/v8/src/token.cc +63 -0
  598. data/lib/libv8/v8/src/token.h +288 -0
  599. data/lib/libv8/v8/src/type-info.cc +507 -0
  600. data/lib/libv8/v8/src/type-info.h +272 -0
  601. data/lib/libv8/v8/src/unbound-queue-inl.h +95 -0
  602. data/lib/libv8/v8/src/unbound-queue.h +69 -0
  603. data/lib/libv8/v8/src/unicode-inl.h +238 -0
  604. data/lib/libv8/v8/src/unicode.cc +1624 -0
  605. data/lib/libv8/v8/src/unicode.h +280 -0
  606. data/lib/libv8/v8/src/uri.js +408 -0
  607. data/lib/libv8/v8/src/utils-inl.h +48 -0
  608. data/lib/libv8/v8/src/utils.cc +371 -0
  609. data/lib/libv8/v8/src/utils.h +800 -0
  610. data/lib/libv8/v8/src/v8-counters.cc +62 -0
  611. data/lib/libv8/v8/src/v8-counters.h +314 -0
  612. data/lib/libv8/v8/src/v8.cc +213 -0
  613. data/lib/libv8/v8/src/v8.h +131 -0
  614. data/lib/libv8/v8/src/v8checks.h +64 -0
  615. data/lib/libv8/v8/src/v8dll-main.cc +44 -0
  616. data/lib/libv8/v8/src/v8globals.h +512 -0
  617. data/lib/libv8/v8/src/v8memory.h +82 -0
  618. data/lib/libv8/v8/src/v8natives.js +1310 -0
  619. data/lib/libv8/v8/src/v8preparserdll-main.cc +39 -0
  620. data/lib/libv8/v8/src/v8threads.cc +464 -0
  621. data/lib/libv8/v8/src/v8threads.h +165 -0
  622. data/lib/libv8/v8/src/v8utils.h +319 -0
  623. data/lib/libv8/v8/src/variables.cc +114 -0
  624. data/lib/libv8/v8/src/variables.h +167 -0
  625. data/lib/libv8/v8/src/version.cc +116 -0
  626. data/lib/libv8/v8/src/version.h +68 -0
  627. data/lib/libv8/v8/src/vm-state-inl.h +138 -0
  628. data/lib/libv8/v8/src/vm-state.h +71 -0
  629. data/lib/libv8/v8/src/win32-headers.h +96 -0
  630. data/lib/libv8/v8/src/x64/assembler-x64-inl.h +462 -0
  631. data/lib/libv8/v8/src/x64/assembler-x64.cc +3027 -0
  632. data/lib/libv8/v8/src/x64/assembler-x64.h +1633 -0
  633. data/lib/libv8/v8/src/x64/builtins-x64.cc +1520 -0
  634. data/lib/libv8/v8/src/x64/code-stubs-x64.cc +5132 -0
  635. data/lib/libv8/v8/src/x64/code-stubs-x64.h +514 -0
  636. data/lib/libv8/v8/src/x64/codegen-x64.cc +146 -0
  637. data/lib/libv8/v8/src/x64/codegen-x64.h +76 -0
  638. data/lib/libv8/v8/src/x64/cpu-x64.cc +88 -0
  639. data/lib/libv8/v8/src/x64/debug-x64.cc +319 -0
  640. data/lib/libv8/v8/src/x64/deoptimizer-x64.cc +815 -0
  641. data/lib/libv8/v8/src/x64/disasm-x64.cc +1832 -0
  642. data/lib/libv8/v8/src/x64/frames-x64.cc +45 -0
  643. data/lib/libv8/v8/src/x64/frames-x64.h +130 -0
  644. data/lib/libv8/v8/src/x64/full-codegen-x64.cc +4318 -0
  645. data/lib/libv8/v8/src/x64/ic-x64.cc +1608 -0
  646. data/lib/libv8/v8/src/x64/lithium-codegen-x64.cc +4267 -0
  647. data/lib/libv8/v8/src/x64/lithium-codegen-x64.h +367 -0
  648. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.cc +320 -0
  649. data/lib/libv8/v8/src/x64/lithium-gap-resolver-x64.h +74 -0
  650. data/lib/libv8/v8/src/x64/lithium-x64.cc +2202 -0
  651. data/lib/libv8/v8/src/x64/lithium-x64.h +2333 -0
  652. data/lib/libv8/v8/src/x64/macro-assembler-x64.cc +3745 -0
  653. data/lib/libv8/v8/src/x64/macro-assembler-x64.h +1290 -0
  654. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  655. data/lib/libv8/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  656. data/lib/libv8/v8/src/x64/simulator-x64.cc +27 -0
  657. data/lib/libv8/v8/src/x64/simulator-x64.h +72 -0
  658. data/lib/libv8/v8/src/x64/stub-cache-x64.cc +3610 -0
  659. data/lib/libv8/v8/src/zone-inl.h +140 -0
  660. data/lib/libv8/v8/src/zone.cc +196 -0
  661. data/lib/libv8/v8/src/zone.h +240 -0
  662. data/lib/libv8/v8/tools/codemap.js +265 -0
  663. data/lib/libv8/v8/tools/consarray.js +93 -0
  664. data/lib/libv8/v8/tools/csvparser.js +78 -0
  665. data/lib/libv8/v8/tools/disasm.py +92 -0
  666. data/lib/libv8/v8/tools/freebsd-tick-processor +10 -0
  667. data/lib/libv8/v8/tools/gc-nvp-trace-processor.py +342 -0
  668. data/lib/libv8/v8/tools/gcmole/README +62 -0
  669. data/lib/libv8/v8/tools/gcmole/gccause.lua +60 -0
  670. data/lib/libv8/v8/tools/gcmole/gcmole.cc +1261 -0
  671. data/lib/libv8/v8/tools/gcmole/gcmole.lua +378 -0
  672. data/lib/libv8/v8/tools/generate-ten-powers.scm +286 -0
  673. data/lib/libv8/v8/tools/grokdump.py +841 -0
  674. data/lib/libv8/v8/tools/gyp/v8.gyp +995 -0
  675. data/lib/libv8/v8/tools/js2c.py +364 -0
  676. data/lib/libv8/v8/tools/jsmin.py +280 -0
  677. data/lib/libv8/v8/tools/linux-tick-processor +35 -0
  678. data/lib/libv8/v8/tools/ll_prof.py +942 -0
  679. data/lib/libv8/v8/tools/logreader.js +185 -0
  680. data/lib/libv8/v8/tools/mac-nm +18 -0
  681. data/lib/libv8/v8/tools/mac-tick-processor +6 -0
  682. data/lib/libv8/v8/tools/oom_dump/README +31 -0
  683. data/lib/libv8/v8/tools/oom_dump/SConstruct +42 -0
  684. data/lib/libv8/v8/tools/oom_dump/oom_dump.cc +288 -0
  685. data/lib/libv8/v8/tools/presubmit.py +305 -0
  686. data/lib/libv8/v8/tools/process-heap-prof.py +120 -0
  687. data/lib/libv8/v8/tools/profile.js +751 -0
  688. data/lib/libv8/v8/tools/profile_view.js +219 -0
  689. data/lib/libv8/v8/tools/run-valgrind.py +77 -0
  690. data/lib/libv8/v8/tools/splaytree.js +316 -0
  691. data/lib/libv8/v8/tools/stats-viewer.py +468 -0
  692. data/lib/libv8/v8/tools/test.py +1510 -0
  693. data/lib/libv8/v8/tools/tickprocessor-driver.js +59 -0
  694. data/lib/libv8/v8/tools/tickprocessor.js +877 -0
  695. data/lib/libv8/v8/tools/utils.py +96 -0
  696. data/lib/libv8/v8/tools/visual_studio/README.txt +12 -0
  697. data/lib/libv8/v8/tools/windows-tick-processor.bat +30 -0
  698. data/lib/libv8/version.rb +4 -0
  699. data/libv8.gemspec +31 -0
  700. metadata +800 -0
@@ -0,0 +1,2369 @@
1
+ // Copyright 2011 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef V8_SPACES_H_
29
+ #define V8_SPACES_H_
30
+
31
+ #include "allocation.h"
32
+ #include "list.h"
33
+ #include "log.h"
34
+
35
+ namespace v8 {
36
+ namespace internal {
37
+
38
+ class Isolate;
39
+
40
+ // -----------------------------------------------------------------------------
41
+ // Heap structures:
42
+ //
43
+ // A JS heap consists of a young generation, an old generation, and a large
44
+ // object space. The young generation is divided into two semispaces. A
45
+ // scavenger implements Cheney's copying algorithm. The old generation is
46
+ // separated into a map space and an old object space. The map space contains
47
+ // all (and only) map objects, the rest of old objects go into the old space.
48
+ // The old generation is collected by a mark-sweep-compact collector.
49
+ //
50
+ // The semispaces of the young generation are contiguous. The old and map
51
+ // spaces consists of a list of pages. A page has a page header and an object
52
+ // area. A page size is deliberately chosen as 8K bytes.
53
+ // The first word of a page is an opaque page header that has the
54
+ // address of the next page and its ownership information. The second word may
55
+ // have the allocation top address of this page. Heap objects are aligned to the
56
+ // pointer size.
57
+ //
58
+ // There is a separate large object space for objects larger than
59
+ // Page::kMaxHeapObjectSize, so that they do not have to move during
60
+ // collection. The large object space is paged. Pages in large object space
61
+ // may be larger than 8K.
62
+ //
63
+ // A card marking write barrier is used to keep track of intergenerational
64
+ // references. Old space pages are divided into regions of Page::kRegionSize
65
+ // size. Each region has a corresponding dirty bit in the page header which is
66
+ // set if the region might contain pointers to new space. For details about
67
+ // dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
68
+ // method body.
69
+ //
70
+ // During scavenges and mark-sweep collections we iterate intergenerational
71
+ // pointers without decoding heap object maps so if the page belongs to old
72
+ // pointer space or large object space it is essential to guarantee that
73
+ // the page does not contain any garbage pointers to new space: every pointer
74
+ // aligned word which satisfies the Heap::InNewSpace() predicate must be a
75
+ // pointer to a live heap object in new space. Thus objects in old pointer
76
+ // and large object spaces should have a special layout (e.g. no bare integer
77
+ // fields). This requirement does not apply to map space which is iterated in
78
+ // a special fashion. However we still require pointer fields of dead maps to
79
+ // be cleaned.
80
+ //
81
+ // To enable lazy cleaning of old space pages we use a notion of allocation
82
+ // watermark. Every pointer under watermark is considered to be well formed.
83
+ // Page allocation watermark is not necessarily equal to page allocation top but
84
+ // all alive objects on page should reside under allocation watermark.
85
+ // During scavenge allocation watermark might be bumped and invalid pointers
86
+ // might appear below it. To avoid following them we store a valid watermark
87
+ // into special field in the page header and set a page WATERMARK_INVALIDATED
88
+ // flag. For details see comments in the Page::SetAllocationWatermark() method
89
+ // body.
90
+ //
91
+
92
+ // Some assertion macros used in the debugging mode.
93
+
94
+ #define ASSERT_PAGE_ALIGNED(address) \
95
+ ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
96
+
97
+ #define ASSERT_OBJECT_ALIGNED(address) \
98
+ ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
99
+
100
+ #define ASSERT_MAP_ALIGNED(address) \
101
+ ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
102
+
103
+ #define ASSERT_OBJECT_SIZE(size) \
104
+ ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
105
+
106
+ #define ASSERT_PAGE_OFFSET(offset) \
107
+ ASSERT((Page::kObjectStartOffset <= offset) \
108
+ && (offset <= Page::kPageSize))
109
+
110
+ #define ASSERT_MAP_PAGE_INDEX(index) \
111
+ ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
112
+
113
+
114
+ class PagedSpace;
115
+ class MemoryAllocator;
116
+ class AllocationInfo;
117
+
118
+ // -----------------------------------------------------------------------------
119
+ // A page normally has 8K bytes. Large object pages may be larger. A page
120
+ // address is always aligned to the 8K page size.
121
+ //
122
+ // Each page starts with a header of Page::kPageHeaderSize size which contains
123
+ // bookkeeping data.
124
+ //
125
+ // The mark-compact collector transforms a map pointer into a page index and a
126
+ // page offset. The exact encoding is described in the comments for
127
+ // class MapWord in objects.h.
128
+ //
129
+ // The only way to get a page pointer is by calling factory methods:
130
+ // Page* p = Page::FromAddress(addr); or
131
+ // Page* p = Page::FromAllocationTop(top);
132
+ class Page {
133
+ public:
134
+ // Returns the page containing a given address. The address ranges
135
+ // from [page_addr .. page_addr + kPageSize[
136
+ //
137
+ // Note that this function only works for addresses in normal paged
138
+ // spaces and addresses in the first 8K of large object pages (i.e.,
139
+ // the start of large objects but not necessarily derived pointers
140
+ // within them).
141
+ INLINE(static Page* FromAddress(Address a)) {
142
+ return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
143
+ }
144
+
145
+ // Returns the page containing an allocation top. Because an allocation
146
+ // top address can be the upper bound of the page, we need to subtract
147
+ // it with kPointerSize first. The address ranges from
148
+ // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
149
+ INLINE(static Page* FromAllocationTop(Address top)) {
150
+ Page* p = FromAddress(top - kPointerSize);
151
+ ASSERT_PAGE_OFFSET(p->Offset(top));
152
+ return p;
153
+ }
154
+
155
+ // Returns the start address of this page.
156
+ Address address() { return reinterpret_cast<Address>(this); }
157
+
158
+ // Checks whether this is a valid page address.
159
+ bool is_valid() { return address() != NULL; }
160
+
161
+ // Returns the next page of this page.
162
+ inline Page* next_page();
163
+
164
+ // Return the end of allocation in this page. Undefined for unused pages.
165
+ inline Address AllocationTop();
166
+
167
+ // Return the allocation watermark for the page.
168
+ // For old space pages it is guaranteed that the area under the watermark
169
+ // does not contain any garbage pointers to new space.
170
+ inline Address AllocationWatermark();
171
+
172
+ // Return the allocation watermark offset from the beginning of the page.
173
+ inline uint32_t AllocationWatermarkOffset();
174
+
175
+ inline void SetAllocationWatermark(Address allocation_watermark);
176
+
177
+ inline void SetCachedAllocationWatermark(Address allocation_watermark);
178
+ inline Address CachedAllocationWatermark();
179
+
180
+ // Returns the start address of the object area in this page.
181
+ Address ObjectAreaStart() { return address() + kObjectStartOffset; }
182
+
183
+ // Returns the end address (exclusive) of the object area in this page.
184
+ Address ObjectAreaEnd() { return address() + Page::kPageSize; }
185
+
186
+ // Checks whether an address is page aligned.
187
+ static bool IsAlignedToPageSize(Address a) {
188
+ return 0 == (OffsetFrom(a) & kPageAlignmentMask);
189
+ }
190
+
191
+ // True if this page was in use before current compaction started.
192
+ // Result is valid only for pages owned by paged spaces and
193
+ // only after PagedSpace::PrepareForMarkCompact was called.
194
+ inline bool WasInUseBeforeMC();
195
+
196
+ inline void SetWasInUseBeforeMC(bool was_in_use);
197
+
198
+ // True if this page is a large object page.
199
+ inline bool IsLargeObjectPage();
200
+
201
+ inline void SetIsLargeObjectPage(bool is_large_object_page);
202
+
203
+ inline bool IsPageExecutable();
204
+
205
+ inline void SetIsPageExecutable(bool is_page_executable);
206
+
207
+ // Returns the offset of a given address to this page.
208
+ INLINE(int Offset(Address a)) {
209
+ int offset = static_cast<int>(a - address());
210
+ ASSERT_PAGE_OFFSET(offset);
211
+ return offset;
212
+ }
213
+
214
+ // Returns the address for a given offset to the this page.
215
+ Address OffsetToAddress(int offset) {
216
+ ASSERT_PAGE_OFFSET(offset);
217
+ return address() + offset;
218
+ }
219
+
220
+ // ---------------------------------------------------------------------
221
+ // Card marking support
222
+
223
+ static const uint32_t kAllRegionsCleanMarks = 0x0;
224
+ static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
225
+
226
+ inline uint32_t GetRegionMarks();
227
+ inline void SetRegionMarks(uint32_t dirty);
228
+
229
+ inline uint32_t GetRegionMaskForAddress(Address addr);
230
+ inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
231
+ inline int GetRegionNumberForAddress(Address addr);
232
+
233
+ inline void MarkRegionDirty(Address addr);
234
+ inline bool IsRegionDirty(Address addr);
235
+
236
+ inline void ClearRegionMarks(Address start,
237
+ Address end,
238
+ bool reaches_limit);
239
+
240
+ // Page size in bytes. This must be a multiple of the OS page size.
241
+ static const int kPageSize = 1 << kPageSizeBits;
242
+
243
+ // Page size mask.
244
+ static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
245
+
246
+ static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
247
+ kIntSize + kPointerSize + kPointerSize;
248
+
249
+ // The start offset of the object area in a page. Aligned to both maps and
250
+ // code alignment to be suitable for both.
251
+ static const int kObjectStartOffset =
252
+ CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
253
+
254
+ // Object area size in bytes.
255
+ static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
256
+
257
+ // Maximum object size that fits in a page.
258
+ static const int kMaxHeapObjectSize = kObjectAreaSize;
259
+
260
+ static const int kDirtyFlagOffset = 2 * kPointerSize;
261
+ static const int kRegionSizeLog2 = 8;
262
+ static const int kRegionSize = 1 << kRegionSizeLog2;
263
+ static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
264
+
265
+ STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
266
+
267
+ enum PageFlag {
268
+ IS_NORMAL_PAGE = 0,
269
+ WAS_IN_USE_BEFORE_MC,
270
+
271
+ // Page allocation watermark was bumped by preallocation during scavenge.
272
+ // Correct watermark can be retrieved by CachedAllocationWatermark() method
273
+ WATERMARK_INVALIDATED,
274
+ IS_EXECUTABLE,
275
+ NUM_PAGE_FLAGS // Must be last
276
+ };
277
+ static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
278
+
279
+ // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
280
+ // scavenge we just invalidate the watermark on each old space page after
281
+ // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
282
+ // flag at the beginning of the next scavenge and each page becomes marked as
283
+ // having a valid watermark.
284
+ //
285
+ // The following invariant must hold for pages in old pointer and map spaces:
286
+ // If page is in use then page is marked as having invalid watermark at
287
+ // the beginning and at the end of any GC.
288
+ //
289
+ // This invariant guarantees that after flipping flag meaning at the
290
+ // beginning of scavenge all pages in use will be marked as having valid
291
+ // watermark.
292
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
293
+
294
+ // Returns true if the page allocation watermark was not altered during
295
+ // scavenge.
296
+ inline bool IsWatermarkValid();
297
+
298
+ inline void InvalidateWatermark(bool value);
299
+
300
+ inline bool GetPageFlag(PageFlag flag);
301
+ inline void SetPageFlag(PageFlag flag, bool value);
302
+ inline void ClearPageFlags();
303
+
304
+ inline void ClearGCFields();
305
+
306
+ static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
307
+ static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
308
+ static const uint32_t kAllocationWatermarkOffsetMask =
309
+ ((1 << kAllocationWatermarkOffsetBits) - 1) <<
310
+ kAllocationWatermarkOffsetShift;
311
+
312
+ static const uint32_t kFlagsMask =
313
+ ((1 << kAllocationWatermarkOffsetShift) - 1);
314
+
315
+ STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
316
+ kAllocationWatermarkOffsetBits);
317
+
318
+ //---------------------------------------------------------------------------
319
+ // Page header description.
320
+ //
321
+ // If a page is not in the large object space, the first word,
322
+ // opaque_header, encodes the next page address (aligned to kPageSize 8K)
323
+ // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
324
+ // opaque_header. The value range of the opaque_header is [0..kPageSize[,
325
+ // or [next_page_start, next_page_end[. It cannot point to a valid address
326
+ // in the current page. If a page is in the large object space, the first
327
+ // word *may* (if the page start and large object chunk start are the
328
+ // same) contain the address of the next large object chunk.
329
+ intptr_t opaque_header;
330
+
331
+ // If the page is not in the large object space, the low-order bit of the
332
+ // second word is set. If the page is in the large object space, the
333
+ // second word *may* (if the page start and large object chunk start are
334
+ // the same) contain the large object chunk size. In either case, the
335
+ // low-order bit for large object pages will be cleared.
336
+ // For normal pages this word is used to store page flags and
337
+ // offset of allocation top.
338
+ intptr_t flags_;
339
+
340
+ // This field contains dirty marks for regions covering the page. Only dirty
341
+ // regions might contain intergenerational references.
342
+ // Only 32 dirty marks are supported so for large object pages several regions
343
+ // might be mapped to a single dirty mark.
344
+ uint32_t dirty_regions_;
345
+
346
+ // The index of the page in its owner space.
347
+ int mc_page_index;
348
+
349
+ // During mark-compact collections this field contains the forwarding address
350
+ // of the first live object in this page.
351
+ // During scavenge collection this field is used to store allocation watermark
352
+ // if it is altered during scavenge.
353
+ Address mc_first_forwarded;
354
+
355
+ Heap* heap_;
356
+ };
357
+
358
+
359
+ // ----------------------------------------------------------------------------
360
+ // Space is the abstract superclass for all allocation spaces.
361
+ class Space : public Malloced {
362
+ public:
363
+ Space(Heap* heap, AllocationSpace id, Executability executable)
364
+ : heap_(heap), id_(id), executable_(executable) {}
365
+
366
+ virtual ~Space() {}
367
+
368
+ Heap* heap() const { return heap_; }
369
+
370
+ // Does the space need executable memory?
371
+ Executability executable() { return executable_; }
372
+
373
+ // Identity used in error reporting.
374
+ AllocationSpace identity() { return id_; }
375
+
376
+ // Returns allocated size.
377
+ virtual intptr_t Size() = 0;
378
+
379
+ // Returns size of objects. Can differ from the allocated size
380
+ // (e.g. see LargeObjectSpace).
381
+ virtual intptr_t SizeOfObjects() { return Size(); }
382
+
383
+ #ifdef ENABLE_HEAP_PROTECTION
384
+ // Protect/unprotect the space by marking it read-only/writable.
385
+ virtual void Protect() = 0;
386
+ virtual void Unprotect() = 0;
387
+ #endif
388
+
389
+ #ifdef DEBUG
390
+ virtual void Print() = 0;
391
+ #endif
392
+
393
+ // After calling this we can allocate a certain number of bytes using only
394
+ // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
395
+ // without using freelists or causing a GC. This is used by partial
396
+ // snapshots. It returns true of space was reserved or false if a GC is
397
+ // needed. For paged spaces the space requested must include the space wasted
398
+ // at the end of each when allocating linearly.
399
+ virtual bool ReserveSpace(int bytes) = 0;
400
+
401
+ private:
402
+ Heap* heap_;
403
+ AllocationSpace id_;
404
+ Executability executable_;
405
+ };
406
+
407
+
408
+ // ----------------------------------------------------------------------------
409
+ // All heap objects containing executable code (code objects) must be allocated
410
+ // from a 2 GB range of memory, so that they can call each other using 32-bit
411
+ // displacements. This happens automatically on 32-bit platforms, where 32-bit
412
+ // displacements cover the entire 4GB virtual address space. On 64-bit
413
+ // platforms, we support this using the CodeRange object, which reserves and
414
+ // manages a range of virtual memory.
415
+ class CodeRange {
416
+ public:
417
+ // Reserves a range of virtual memory, but does not commit any of it.
418
+ // Can only be called once, at heap initialization time.
419
+ // Returns false on failure.
420
+ bool Setup(const size_t requested_size);
421
+
422
+ // Frees the range of virtual memory, and frees the data structures used to
423
+ // manage it.
424
+ void TearDown();
425
+
426
+ bool exists() { return code_range_ != NULL; }
427
+ bool contains(Address address) {
428
+ if (code_range_ == NULL) return false;
429
+ Address start = static_cast<Address>(code_range_->address());
430
+ return start <= address && address < start + code_range_->size();
431
+ }
432
+
433
+ // Allocates a chunk of memory from the large-object portion of
434
+ // the code range. On platforms with no separate code range, should
435
+ // not be called.
436
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
437
+ size_t* allocated);
438
+ void FreeRawMemory(void* buf, size_t length);
439
+
440
+ private:
441
+ CodeRange();
442
+
443
+ // The reserved range of virtual memory that all code objects are put in.
444
+ VirtualMemory* code_range_;
445
+ // Plain old data class, just a struct plus a constructor.
446
+ class FreeBlock {
447
+ public:
448
+ FreeBlock(Address start_arg, size_t size_arg)
449
+ : start(start_arg), size(size_arg) {}
450
+ FreeBlock(void* start_arg, size_t size_arg)
451
+ : start(static_cast<Address>(start_arg)), size(size_arg) {}
452
+
453
+ Address start;
454
+ size_t size;
455
+ };
456
+
457
+ // Freed blocks of memory are added to the free list. When the allocation
458
+ // list is exhausted, the free list is sorted and merged to make the new
459
+ // allocation list.
460
+ List<FreeBlock> free_list_;
461
+ // Memory is allocated from the free blocks on the allocation list.
462
+ // The block at current_allocation_block_index_ is the current block.
463
+ List<FreeBlock> allocation_list_;
464
+ int current_allocation_block_index_;
465
+
466
+ // Finds a block on the allocation list that contains at least the
467
+ // requested amount of memory. If none is found, sorts and merges
468
+ // the existing free memory blocks, and searches again.
469
+ // If none can be found, terminates V8 with FatalProcessOutOfMemory.
470
+ void GetNextAllocationBlock(size_t requested);
471
+ // Compares the start addresses of two free blocks.
472
+ static int CompareFreeBlockAddress(const FreeBlock* left,
473
+ const FreeBlock* right);
474
+
475
+ friend class Isolate;
476
+
477
+ Isolate* isolate_;
478
+
479
+ DISALLOW_COPY_AND_ASSIGN(CodeRange);
480
+ };
481
+
482
+
483
+ // ----------------------------------------------------------------------------
484
+ // A space acquires chunks of memory from the operating system. The memory
485
+ // allocator manages chunks for the paged heap spaces (old space and map
486
+ // space). A paged chunk consists of pages. Pages in a chunk have contiguous
487
+ // addresses and are linked as a list.
488
+ //
489
+ // The allocator keeps an initial chunk which is used for the new space. The
490
+ // leftover regions of the initial chunk are used for the initial chunks of
491
+ // old space and map space if they are big enough to hold at least one page.
492
+ // The allocator assumes that there is one old space and one map space, each
493
+ // expands the space by allocating kPagesPerChunk pages except the last
494
+ // expansion (before running out of space). The first chunk may contain fewer
495
+ // than kPagesPerChunk pages as well.
496
+ //
497
+ // The memory allocator also allocates chunks for the large object space, but
498
+ // they are managed by the space itself. The new space does not expand.
499
+ //
500
+ // The fact that pages for paged spaces are allocated and deallocated in chunks
501
+ // induces a constraint on the order of pages in a linked lists. We say that
502
+ // pages are linked in the chunk-order if and only if every two consecutive
503
+ // pages from the same chunk are consecutive in the linked list.
504
+ //
505
+
506
+
507
+ class MemoryAllocator {
508
+ public:
509
+ // Initializes its internal bookkeeping structures.
510
+ // Max capacity of the total space and executable memory limit.
511
+ bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
512
+
513
+ // Deletes valid chunks.
514
+ void TearDown();
515
+
516
+ // Reserves an initial address range of virtual memory to be split between
517
+ // the two new space semispaces, the old space, and the map space. The
518
+ // memory is not yet committed or assigned to spaces and split into pages.
519
+ // The initial chunk is unmapped when the memory allocator is torn down.
520
+ // This function should only be called when there is not already a reserved
521
+ // initial chunk (initial_chunk_ should be NULL). It returns the start
522
+ // address of the initial chunk if successful, with the side effect of
523
+ // setting the initial chunk, or else NULL if unsuccessful and leaves the
524
+ // initial chunk NULL.
525
+ void* ReserveInitialChunk(const size_t requested);
526
+
527
+ // Commits pages from an as-yet-unmanaged block of virtual memory into a
528
+ // paged space. The block should be part of the initial chunk reserved via
529
+ // a call to ReserveInitialChunk. The number of pages is always returned in
530
+ // the output parameter num_pages. This function assumes that the start
531
+ // address is non-null and that it is big enough to hold at least one
532
+ // page-aligned page. The call always succeeds, and num_pages is always
533
+ // greater than zero.
534
+ Page* CommitPages(Address start, size_t size, PagedSpace* owner,
535
+ int* num_pages);
536
+
537
+ // Commit a contiguous block of memory from the initial chunk. Assumes that
538
+ // the address is not NULL, the size is greater than zero, and that the
539
+ // block is contained in the initial chunk. Returns true if it succeeded
540
+ // and false otherwise.
541
+ bool CommitBlock(Address start, size_t size, Executability executable);
542
+
543
+ // Uncommit a contiguous block of memory [start..(start+size)[.
544
+ // start is not NULL, the size is greater than zero, and the
545
+ // block is contained in the initial chunk. Returns true if it succeeded
546
+ // and false otherwise.
547
+ bool UncommitBlock(Address start, size_t size);
548
+
549
+ // Zaps a contiguous block of memory [start..(start+size)[ thus
550
+ // filling it up with a recognizable non-NULL bit pattern.
551
+ void ZapBlock(Address start, size_t size);
552
+
553
+ // Attempts to allocate the requested (non-zero) number of pages from the
554
+ // OS. Fewer pages might be allocated than requested. If it fails to
555
+ // allocate memory for the OS or cannot allocate a single page, this
556
+ // function returns an invalid page pointer (NULL). The caller must check
557
+ // whether the returned page is valid (by calling Page::is_valid()). It is
558
+ // guaranteed that allocated pages have contiguous addresses. The actual
559
+ // number of allocated pages is returned in the output parameter
560
+ // allocated_pages. If the PagedSpace owner is executable and there is
561
+ // a code range, the pages are allocated from the code range.
562
+ Page* AllocatePages(int requested_pages, int* allocated_pages,
563
+ PagedSpace* owner);
564
+
565
+ // Frees pages from a given page and after. Requires pages to be
566
+ // linked in chunk-order (see comment for class).
567
+ // If 'p' is the first page of a chunk, pages from 'p' are freed
568
+ // and this function returns an invalid page pointer.
569
+ // Otherwise, the function searches a page after 'p' that is
570
+ // the first page of a chunk. Pages after the found page
571
+ // are freed and the function returns 'p'.
572
+ Page* FreePages(Page* p);
573
+
574
+ // Frees all pages owned by given space.
575
+ void FreeAllPages(PagedSpace* space);
576
+
577
+ // Allocates and frees raw memory of certain size.
578
+ // These are just thin wrappers around OS::Allocate and OS::Free,
579
+ // but keep track of allocated bytes as part of heap.
580
+ // If the flag is EXECUTABLE and a code range exists, the requested
581
+ // memory is allocated from the code range. If a code range exists
582
+ // and the freed memory is in it, the code range manages the freed memory.
583
+ MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
584
+ size_t* allocated,
585
+ Executability executable);
586
+ void FreeRawMemory(void* buf,
587
+ size_t length,
588
+ Executability executable);
589
+ void PerformAllocationCallback(ObjectSpace space,
590
+ AllocationAction action,
591
+ size_t size);
592
+
593
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
594
+ ObjectSpace space,
595
+ AllocationAction action);
596
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
597
+ bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
598
+
599
+ // Returns the maximum available bytes of heaps.
600
+ intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
601
+
602
+ // Returns allocated spaces in bytes.
603
+ intptr_t Size() { return size_; }
604
+
605
+ // Returns the maximum available executable bytes of heaps.
606
+ intptr_t AvailableExecutable() {
607
+ if (capacity_executable_ < size_executable_) return 0;
608
+ return capacity_executable_ - size_executable_;
609
+ }
610
+
611
+ // Returns allocated executable spaces in bytes.
612
+ intptr_t SizeExecutable() { return size_executable_; }
613
+
614
+ // Returns maximum available bytes that the old space can have.
615
+ intptr_t MaxAvailable() {
616
+ return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
617
+ }
618
+
619
+ // Links two pages.
620
+ inline void SetNextPage(Page* prev, Page* next);
621
+
622
+ // Returns the next page of a given page.
623
+ inline Page* GetNextPage(Page* p);
624
+
625
+ // Checks whether a page belongs to a space.
626
+ inline bool IsPageInSpace(Page* p, PagedSpace* space);
627
+
628
+ // Returns the space that owns the given page.
629
+ inline PagedSpace* PageOwner(Page* page);
630
+
631
+ // Finds the first/last page in the same chunk as a given page.
632
+ Page* FindFirstPageInSameChunk(Page* p);
633
+ Page* FindLastPageInSameChunk(Page* p);
634
+
635
+ // Relinks list of pages owned by space to make it chunk-ordered.
636
+ // Returns new first and last pages of space.
637
+ // Also returns last page in relinked list which has WasInUsedBeforeMC
638
+ // flag set.
639
+ void RelinkPageListInChunkOrder(PagedSpace* space,
640
+ Page** first_page,
641
+ Page** last_page,
642
+ Page** last_page_in_use);
643
+
644
+ #ifdef ENABLE_HEAP_PROTECTION
645
+ // Protect/unprotect a block of memory by marking it read-only/writable.
646
+ inline void Protect(Address start, size_t size);
647
+ inline void Unprotect(Address start, size_t size,
648
+ Executability executable);
649
+
650
+ // Protect/unprotect a chunk given a page in the chunk.
651
+ inline void ProtectChunkFromPage(Page* page);
652
+ inline void UnprotectChunkFromPage(Page* page);
653
+ #endif
654
+
655
+ #ifdef DEBUG
656
+ // Reports statistic info of the space.
657
+ void ReportStatistics();
658
+ #endif
659
+
660
+ // Due to encoding limitation, we can only have 8K chunks.
661
+ static const int kMaxNofChunks = 1 << kPageSizeBits;
662
+ // If a chunk has at least 16 pages, the maximum heap size is about
663
+ // 8K * 8K * 16 = 1G bytes.
664
+ #ifdef V8_TARGET_ARCH_X64
665
+ static const int kPagesPerChunk = 32;
666
+ // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
667
+ static const int kPagesPerChunkLog2 = 5;
668
+ static const int kChunkTableLevels = 4;
669
+ static const int kChunkTableBitsPerLevel = 12;
670
+ #else
671
+ static const int kPagesPerChunk = 16;
672
+ // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
673
+ static const int kPagesPerChunkLog2 = 4;
674
+ static const int kChunkTableLevels = 2;
675
+ static const int kChunkTableBitsPerLevel = 8;
676
+ #endif
677
+
678
+ private:
679
+ MemoryAllocator();
680
+
681
+ static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
682
+ static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
683
+
684
+ // Maximum space size in bytes.
685
+ intptr_t capacity_;
686
+ // Maximum subset of capacity_ that can be executable
687
+ intptr_t capacity_executable_;
688
+
689
+ // Allocated space size in bytes.
690
+ intptr_t size_;
691
+
692
+ // Allocated executable space size in bytes.
693
+ intptr_t size_executable_;
694
+
695
+ struct MemoryAllocationCallbackRegistration {
696
+ MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
697
+ ObjectSpace space,
698
+ AllocationAction action)
699
+ : callback(callback), space(space), action(action) {
700
+ }
701
+ MemoryAllocationCallback callback;
702
+ ObjectSpace space;
703
+ AllocationAction action;
704
+ };
705
+ // A List of callback that are triggered when memory is allocated or free'd
706
+ List<MemoryAllocationCallbackRegistration>
707
+ memory_allocation_callbacks_;
708
+
709
+ // The initial chunk of virtual memory.
710
+ VirtualMemory* initial_chunk_;
711
+
712
+ // Allocated chunk info: chunk start address, chunk size, and owning space.
713
+ class ChunkInfo BASE_EMBEDDED {
714
+ public:
715
+ ChunkInfo() : address_(NULL),
716
+ size_(0),
717
+ owner_(NULL),
718
+ executable_(NOT_EXECUTABLE),
719
+ owner_identity_(FIRST_SPACE) {}
720
+ inline void init(Address a, size_t s, PagedSpace* o);
721
+ Address address() { return address_; }
722
+ size_t size() { return size_; }
723
+ PagedSpace* owner() { return owner_; }
724
+ // We save executability of the owner to allow using it
725
+ // when collecting stats after the owner has been destroyed.
726
+ Executability executable() const { return executable_; }
727
+ AllocationSpace owner_identity() const { return owner_identity_; }
728
+
729
+ private:
730
+ Address address_;
731
+ size_t size_;
732
+ PagedSpace* owner_;
733
+ Executability executable_;
734
+ AllocationSpace owner_identity_;
735
+ };
736
+
737
+ // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
738
+ List<ChunkInfo> chunks_;
739
+ List<int> free_chunk_ids_;
740
+ int max_nof_chunks_;
741
+ int top_;
742
+
743
+ // Push/pop a free chunk id onto/from the stack.
744
+ void Push(int free_chunk_id);
745
+ int Pop();
746
+ bool OutOfChunkIds() { return top_ == 0; }
747
+
748
+ // Frees a chunk.
749
+ void DeleteChunk(int chunk_id);
750
+
751
+ // Basic check whether a chunk id is in the valid range.
752
+ inline bool IsValidChunkId(int chunk_id);
753
+
754
+ // Checks whether a chunk id identifies an allocated chunk.
755
+ inline bool IsValidChunk(int chunk_id);
756
+
757
+ // Returns the chunk id that a page belongs to.
758
+ inline int GetChunkId(Page* p);
759
+
760
+ // True if the address lies in the initial chunk.
761
+ inline bool InInitialChunk(Address address);
762
+
763
+ // Initializes pages in a chunk. Returns the first page address.
764
+ // This function and GetChunkId() are provided for the mark-compact
765
+ // collector to rebuild page headers in the from space, which is
766
+ // used as a marking stack and its page headers are destroyed.
767
+ Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
768
+ PagedSpace* owner);
769
+
770
+ Page* RelinkPagesInChunk(int chunk_id,
771
+ Address chunk_start,
772
+ size_t chunk_size,
773
+ Page* prev,
774
+ Page** last_page_in_use);
775
+
776
+ friend class Isolate;
777
+
778
+ Isolate* isolate_;
779
+
780
+ DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
781
+ };
782
+
783
+
784
+ // -----------------------------------------------------------------------------
785
+ // Interface for heap object iterator to be implemented by all object space
786
+ // object iterators.
787
+ //
788
+ // NOTE: The space specific object iterators also implements the own next()
789
+ // method which is used to avoid using virtual functions
790
+ // iterating a specific space.
791
+
792
+ class ObjectIterator : public Malloced {
793
+ public:
794
+ virtual ~ObjectIterator() { }
795
+
796
+ virtual HeapObject* next_object() = 0;
797
+ };
798
+
799
+
800
+ // -----------------------------------------------------------------------------
801
+ // Heap object iterator in new/old/map spaces.
802
+ //
803
+ // A HeapObjectIterator iterates objects from a given address to the
804
+ // top of a space. The given address must be below the current
805
+ // allocation pointer (space top). There are some caveats.
806
+ //
807
+ // (1) If the space top changes upward during iteration (because of
808
+ // allocating new objects), the iterator does not iterate objects
809
+ // above the original space top. The caller must create a new
810
+ // iterator starting from the old top in order to visit these new
811
+ // objects.
812
+ //
813
+ // (2) If new objects are allocated below the original allocation top
814
+ // (e.g., free-list allocation in paged spaces), the new objects
815
+ // may or may not be iterated depending on their position with
816
+ // respect to the current point of iteration.
817
+ //
818
+ // (3) The space top should not change downward during iteration,
819
+ // otherwise the iterator will return not-necessarily-valid
820
+ // objects.
821
+
822
+ class HeapObjectIterator: public ObjectIterator {
823
+ public:
824
+ // Creates a new object iterator in a given space. If a start
825
+ // address is not given, the iterator starts from the space bottom.
826
+ // If the size function is not given, the iterator calls the default
827
+ // Object::Size().
828
+ explicit HeapObjectIterator(PagedSpace* space);
829
+ HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
830
+ HeapObjectIterator(PagedSpace* space, Address start);
831
+ HeapObjectIterator(PagedSpace* space,
832
+ Address start,
833
+ HeapObjectCallback size_func);
834
+ HeapObjectIterator(Page* page, HeapObjectCallback size_func);
835
+
836
+ inline HeapObject* next() {
837
+ return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
838
+ }
839
+
840
+ // implementation of ObjectIterator.
841
+ virtual HeapObject* next_object() { return next(); }
842
+
843
+ private:
844
+ Address cur_addr_; // current iteration point
845
+ Address end_addr_; // end iteration point
846
+ Address cur_limit_; // current page limit
847
+ HeapObjectCallback size_func_; // size function
848
+ Page* end_page_; // caches the page of the end address
849
+
850
+ HeapObject* FromCurrentPage() {
851
+ ASSERT(cur_addr_ < cur_limit_);
852
+
853
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
854
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
855
+ ASSERT_OBJECT_SIZE(obj_size);
856
+
857
+ cur_addr_ += obj_size;
858
+ ASSERT(cur_addr_ <= cur_limit_);
859
+
860
+ return obj;
861
+ }
862
+
863
+ // Slow path of next, goes into the next page.
864
+ HeapObject* FromNextPage();
865
+
866
+ // Initializes fields.
867
+ void Initialize(Address start, Address end, HeapObjectCallback size_func);
868
+
869
+ #ifdef DEBUG
870
+ // Verifies whether fields have valid values.
871
+ void Verify();
872
+ #endif
873
+ };
874
+
875
+
876
+ // -----------------------------------------------------------------------------
877
+ // A PageIterator iterates the pages in a paged space.
878
+ //
879
+ // The PageIterator class provides three modes for iterating pages in a space:
880
+ // PAGES_IN_USE iterates pages containing allocated objects.
881
+ // PAGES_USED_BY_MC iterates pages that hold relocated objects during a
882
+ // mark-compact collection.
883
+ // ALL_PAGES iterates all pages in the space.
884
+ //
885
+ // There are some caveats.
886
+ //
887
+ // (1) If the space expands during iteration, new pages will not be
888
+ // returned by the iterator in any mode.
889
+ //
890
+ // (2) If new objects are allocated during iteration, they will appear
891
+ // in pages returned by the iterator. Allocation may cause the
892
+ // allocation pointer or MC allocation pointer in the last page to
893
+ // change between constructing the iterator and iterating the last
894
+ // page.
895
+ //
896
+ // (3) The space should not shrink during iteration, otherwise the
897
+ // iterator will return deallocated pages.
898
+
899
+ class PageIterator BASE_EMBEDDED {
900
+ public:
901
+ enum Mode {
902
+ PAGES_IN_USE,
903
+ PAGES_USED_BY_MC,
904
+ ALL_PAGES
905
+ };
906
+
907
+ PageIterator(PagedSpace* space, Mode mode);
908
+
909
+ inline bool has_next();
910
+ inline Page* next();
911
+
912
+ private:
913
+ PagedSpace* space_;
914
+ Page* prev_page_; // Previous page returned.
915
+ Page* stop_page_; // Page to stop at (last page returned by the iterator).
916
+ };
917
+
918
+
919
+ // -----------------------------------------------------------------------------
920
+ // A space has a list of pages. The next page can be accessed via
921
+ // Page::next_page() call. The next page of the last page is an
922
+ // invalid page pointer. A space can expand and shrink dynamically.
923
+
924
+ // An abstraction of allocation and relocation pointers in a page-structured
925
+ // space.
926
+ class AllocationInfo {
927
+ public:
928
+ Address top; // current allocation top
929
+ Address limit; // current allocation limit
930
+
931
+ #ifdef DEBUG
932
+ bool VerifyPagedAllocation() {
933
+ return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
934
+ && (top <= limit);
935
+ }
936
+ #endif
937
+ };
938
+
939
+
940
+ // An abstraction of the accounting statistics of a page-structured space.
941
+ // The 'capacity' of a space is the number of object-area bytes (ie, not
942
+ // including page bookkeeping structures) currently in the space. The 'size'
943
+ // of a space is the number of allocated bytes, the 'waste' in the space is
944
+ // the number of bytes that are not allocated and not available to
945
+ // allocation without reorganizing the space via a GC (eg, small blocks due
946
+ // to internal fragmentation, top of page areas in map space), and the bytes
947
+ // 'available' is the number of unallocated bytes that are not waste. The
948
+ // capacity is the sum of size, waste, and available.
949
+ //
950
+ // The stats are only set by functions that ensure they stay balanced. These
951
+ // functions increase or decrease one of the non-capacity stats in
952
+ // conjunction with capacity, or else they always balance increases and
953
+ // decreases to the non-capacity stats.
954
+ class AllocationStats BASE_EMBEDDED {
955
+ public:
956
+ AllocationStats() { Clear(); }
957
+
958
+ // Zero out all the allocation statistics (ie, no capacity).
959
+ void Clear() {
960
+ capacity_ = 0;
961
+ available_ = 0;
962
+ size_ = 0;
963
+ waste_ = 0;
964
+ }
965
+
966
+ // Reset the allocation statistics (ie, available = capacity with no
967
+ // wasted or allocated bytes).
968
+ void Reset() {
969
+ available_ = capacity_;
970
+ size_ = 0;
971
+ waste_ = 0;
972
+ }
973
+
974
+ // Accessors for the allocation statistics.
975
+ intptr_t Capacity() { return capacity_; }
976
+ intptr_t Available() { return available_; }
977
+ intptr_t Size() { return size_; }
978
+ intptr_t Waste() { return waste_; }
979
+
980
+ // Grow the space by adding available bytes.
981
+ void ExpandSpace(int size_in_bytes) {
982
+ capacity_ += size_in_bytes;
983
+ available_ += size_in_bytes;
984
+ }
985
+
986
+ // Shrink the space by removing available bytes.
987
+ void ShrinkSpace(int size_in_bytes) {
988
+ capacity_ -= size_in_bytes;
989
+ available_ -= size_in_bytes;
990
+ }
991
+
992
+ // Allocate from available bytes (available -> size).
993
+ void AllocateBytes(intptr_t size_in_bytes) {
994
+ available_ -= size_in_bytes;
995
+ size_ += size_in_bytes;
996
+ }
997
+
998
+ // Free allocated bytes, making them available (size -> available).
999
+ void DeallocateBytes(intptr_t size_in_bytes) {
1000
+ size_ -= size_in_bytes;
1001
+ available_ += size_in_bytes;
1002
+ }
1003
+
1004
+ // Waste free bytes (available -> waste).
1005
+ void WasteBytes(int size_in_bytes) {
1006
+ available_ -= size_in_bytes;
1007
+ waste_ += size_in_bytes;
1008
+ }
1009
+
1010
+ // Consider the wasted bytes to be allocated, as they contain filler
1011
+ // objects (waste -> size).
1012
+ void FillWastedBytes(intptr_t size_in_bytes) {
1013
+ waste_ -= size_in_bytes;
1014
+ size_ += size_in_bytes;
1015
+ }
1016
+
1017
+ private:
1018
+ intptr_t capacity_;
1019
+ intptr_t available_;
1020
+ intptr_t size_;
1021
+ intptr_t waste_;
1022
+ };
1023
+
1024
+
1025
+ class PagedSpace : public Space {
1026
+ public:
1027
+ // Creates a space with a maximum capacity, and an id.
1028
+ PagedSpace(Heap* heap,
1029
+ intptr_t max_capacity,
1030
+ AllocationSpace id,
1031
+ Executability executable);
1032
+
1033
+ virtual ~PagedSpace() {}
1034
+
1035
+ // Set up the space using the given address range of virtual memory (from
1036
+ // the memory allocator's initial chunk) if possible. If the block of
1037
+ // addresses is not big enough to contain a single page-aligned page, a
1038
+ // fresh chunk will be allocated.
1039
+ bool Setup(Address start, size_t size);
1040
+
1041
+ // Returns true if the space has been successfully set up and not
1042
+ // subsequently torn down.
1043
+ bool HasBeenSetup();
1044
+
1045
+ // Cleans up the space, frees all pages in this space except those belonging
1046
+ // to the initial chunk, uncommits addresses in the initial chunk.
1047
+ void TearDown();
1048
+
1049
+ // Checks whether an object/address is in this space.
1050
+ inline bool Contains(Address a);
1051
+ bool Contains(HeapObject* o) { return Contains(o->address()); }
1052
+ // Never crashes even if a is not a valid pointer.
1053
+ inline bool SafeContains(Address a);
1054
+
1055
+ // Given an address occupied by a live object, return that object if it is
1056
+ // in this space, or Failure::Exception() if it is not. The implementation
1057
+ // iterates over objects in the page containing the address, the cost is
1058
+ // linear in the number of objects in the page. It may be slow.
1059
+ MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1060
+
1061
+ // Checks whether page is currently in use by this space.
1062
+ bool IsUsed(Page* page);
1063
+
1064
+ void MarkAllPagesClean();
1065
+
1066
+ // Prepares for a mark-compact GC.
1067
+ virtual void PrepareForMarkCompact(bool will_compact);
1068
+
1069
+ // The top of allocation in a page in this space. Undefined if page is unused.
1070
+ Address PageAllocationTop(Page* page) {
1071
+ return page == TopPageOf(allocation_info_) ? top()
1072
+ : PageAllocationLimit(page);
1073
+ }
1074
+
1075
+ // The limit of allocation for a page in this space.
1076
+ virtual Address PageAllocationLimit(Page* page) = 0;
1077
+
1078
+ void FlushTopPageWatermark() {
1079
+ AllocationTopPage()->SetCachedAllocationWatermark(top());
1080
+ AllocationTopPage()->InvalidateWatermark(true);
1081
+ }
1082
+
1083
+ // Current capacity without growing (Size() + Available() + Waste()).
1084
+ intptr_t Capacity() { return accounting_stats_.Capacity(); }
1085
+
1086
+ // Total amount of memory committed for this space. For paged
1087
+ // spaces this equals the capacity.
1088
+ intptr_t CommittedMemory() { return Capacity(); }
1089
+
1090
+ // Available bytes without growing.
1091
+ intptr_t Available() { return accounting_stats_.Available(); }
1092
+
1093
+ // Allocated bytes in this space.
1094
+ virtual intptr_t Size() { return accounting_stats_.Size(); }
1095
+
1096
+ // Wasted bytes due to fragmentation and not recoverable until the
1097
+ // next GC of this space.
1098
+ intptr_t Waste() { return accounting_stats_.Waste(); }
1099
+
1100
+ // Returns the address of the first object in this space.
1101
+ Address bottom() { return first_page_->ObjectAreaStart(); }
1102
+
1103
+ // Returns the allocation pointer in this space.
1104
+ Address top() { return allocation_info_.top; }
1105
+
1106
+ // Allocate the requested number of bytes in the space if possible, return a
1107
+ // failure object if not.
1108
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1109
+
1110
+ // Allocate the requested number of bytes for relocation during mark-compact
1111
+ // collection.
1112
+ MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
1113
+
1114
+ virtual bool ReserveSpace(int bytes);
1115
+
1116
+ // Used by ReserveSpace.
1117
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
1118
+
1119
+ // Free all pages in range from prev (exclusive) to last (inclusive).
1120
+ // Freed pages are moved to the end of page list.
1121
+ void FreePages(Page* prev, Page* last);
1122
+
1123
+ // Deallocates a block.
1124
+ virtual void DeallocateBlock(Address start,
1125
+ int size_in_bytes,
1126
+ bool add_to_freelist) = 0;
1127
+
1128
+ // Set space allocation info.
1129
+ void SetTop(Address top) {
1130
+ allocation_info_.top = top;
1131
+ allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1132
+ }
1133
+
1134
+ // ---------------------------------------------------------------------------
1135
+ // Mark-compact collection support functions
1136
+
1137
+ // Set the relocation point to the beginning of the space.
1138
+ void MCResetRelocationInfo();
1139
+
1140
+ // Writes relocation info to the top page.
1141
+ void MCWriteRelocationInfoToPage() {
1142
+ TopPageOf(mc_forwarding_info_)->
1143
+ SetAllocationWatermark(mc_forwarding_info_.top);
1144
+ }
1145
+
1146
+ // Computes the offset of a given address in this space to the beginning
1147
+ // of the space.
1148
+ int MCSpaceOffsetForAddress(Address addr);
1149
+
1150
+ // Updates the allocation pointer to the relocation top after a mark-compact
1151
+ // collection.
1152
+ virtual void MCCommitRelocationInfo() = 0;
1153
+
1154
+ // Releases half of unused pages.
1155
+ void Shrink();
1156
+
1157
+ // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1158
+ bool EnsureCapacity(int capacity);
1159
+
1160
+ #ifdef ENABLE_HEAP_PROTECTION
1161
+ // Protect/unprotect the space by marking it read-only/writable.
1162
+ void Protect();
1163
+ void Unprotect();
1164
+ #endif
1165
+
1166
+ #ifdef DEBUG
1167
+ // Print meta info and objects in this space.
1168
+ virtual void Print();
1169
+
1170
+ // Verify integrity of this space.
1171
+ virtual void Verify(ObjectVisitor* visitor);
1172
+
1173
+ // Overridden by subclasses to verify space-specific object
1174
+ // properties (e.g., only maps or free-list nodes are in map space).
1175
+ virtual void VerifyObject(HeapObject* obj) {}
1176
+
1177
+ // Report code object related statistics
1178
+ void CollectCodeStatistics();
1179
+ static void ReportCodeStatistics();
1180
+ static void ResetCodeStatistics();
1181
+ #endif
1182
+
1183
+ // Returns the page of the allocation pointer.
1184
+ Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1185
+
1186
+ void RelinkPageListInChunkOrder(bool deallocate_blocks);
1187
+
1188
+ protected:
1189
+ // Maximum capacity of this space.
1190
+ intptr_t max_capacity_;
1191
+
1192
+ // Accounting information for this space.
1193
+ AllocationStats accounting_stats_;
1194
+
1195
+ // The first page in this space.
1196
+ Page* first_page_;
1197
+
1198
+ // The last page in this space. Initially set in Setup, updated in
1199
+ // Expand and Shrink.
1200
+ Page* last_page_;
1201
+
1202
+ // True if pages owned by this space are linked in chunk-order.
1203
+ // See comment for class MemoryAllocator for definition of chunk-order.
1204
+ bool page_list_is_chunk_ordered_;
1205
+
1206
+ // Normal allocation information.
1207
+ AllocationInfo allocation_info_;
1208
+
1209
+ // Relocation information during mark-compact collections.
1210
+ AllocationInfo mc_forwarding_info_;
1211
+
1212
+ // Bytes of each page that cannot be allocated. Possibly non-zero
1213
+ // for pages in spaces with only fixed-size objects. Always zero
1214
+ // for pages in spaces with variable sized objects (those pages are
1215
+ // padded with free-list nodes).
1216
+ int page_extra_;
1217
+
1218
+ // Sets allocation pointer to a page bottom.
1219
+ static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1220
+
1221
+ // Returns the top page specified by an allocation info structure.
1222
+ static Page* TopPageOf(AllocationInfo alloc_info) {
1223
+ return Page::FromAllocationTop(alloc_info.limit);
1224
+ }
1225
+
1226
+ int CountPagesToTop() {
1227
+ Page* p = Page::FromAllocationTop(allocation_info_.top);
1228
+ PageIterator it(this, PageIterator::ALL_PAGES);
1229
+ int counter = 1;
1230
+ while (it.has_next()) {
1231
+ if (it.next() == p) return counter;
1232
+ counter++;
1233
+ }
1234
+ UNREACHABLE();
1235
+ return -1;
1236
+ }
1237
+
1238
+ // Expands the space by allocating a fixed number of pages. Returns false if
1239
+ // it cannot allocate requested number of pages from OS. Newly allocated
1240
+ // pages are append to the last_page;
1241
+ bool Expand(Page* last_page);
1242
+
1243
+ // Generic fast case allocation function that tries linear allocation in
1244
+ // the top page of 'alloc_info'. Returns NULL on failure.
1245
+ inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1246
+ int size_in_bytes);
1247
+
1248
+ // During normal allocation or deserialization, roll to the next page in
1249
+ // the space (there is assumed to be one) and allocate there. This
1250
+ // function is space-dependent.
1251
+ virtual HeapObject* AllocateInNextPage(Page* current_page,
1252
+ int size_in_bytes) = 0;
1253
+
1254
+ // Slow path of AllocateRaw. This function is space-dependent.
1255
+ MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1256
+
1257
+ // Slow path of MCAllocateRaw.
1258
+ MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1259
+
1260
+ #ifdef DEBUG
1261
+ // Returns the number of total pages in this space.
1262
+ int CountTotalPages();
1263
+ #endif
1264
+ private:
1265
+
1266
+ // Returns a pointer to the page of the relocation pointer.
1267
+ Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1268
+
1269
+ friend class PageIterator;
1270
+ };
1271
+
1272
+
1273
+ #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1274
+ class NumberAndSizeInfo BASE_EMBEDDED {
1275
+ public:
1276
+ NumberAndSizeInfo() : number_(0), bytes_(0) {}
1277
+
1278
+ int number() const { return number_; }
1279
+ void increment_number(int num) { number_ += num; }
1280
+
1281
+ int bytes() const { return bytes_; }
1282
+ void increment_bytes(int size) { bytes_ += size; }
1283
+
1284
+ void clear() {
1285
+ number_ = 0;
1286
+ bytes_ = 0;
1287
+ }
1288
+
1289
+ private:
1290
+ int number_;
1291
+ int bytes_;
1292
+ };
1293
+
1294
+
1295
+ // HistogramInfo class for recording a single "bar" of a histogram. This
1296
+ // class is used for collecting statistics to print to stdout (when compiled
1297
+ // with DEBUG) or to the log file (when compiled with
1298
+ // ENABLE_LOGGING_AND_PROFILING).
1299
+ class HistogramInfo: public NumberAndSizeInfo {
1300
+ public:
1301
+ HistogramInfo() : NumberAndSizeInfo() {}
1302
+
1303
+ const char* name() { return name_; }
1304
+ void set_name(const char* name) { name_ = name; }
1305
+
1306
+ private:
1307
+ const char* name_;
1308
+ };
1309
+ #endif
1310
+
1311
+
1312
+ // -----------------------------------------------------------------------------
1313
+ // SemiSpace in young generation
1314
+ //
1315
+ // A semispace is a contiguous chunk of memory. The mark-compact collector
1316
+ // uses the memory in the from space as a marking stack when tracing live
1317
+ // objects.
1318
+
1319
+ class SemiSpace : public Space {
1320
+ public:
1321
+ // Constructor.
1322
+ explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
1323
+ start_ = NULL;
1324
+ age_mark_ = NULL;
1325
+ }
1326
+
1327
+ // Sets up the semispace using the given chunk.
1328
+ bool Setup(Address start, int initial_capacity, int maximum_capacity);
1329
+
1330
+ // Tear down the space. Heap memory was not allocated by the space, so it
1331
+ // is not deallocated here.
1332
+ void TearDown();
1333
+
1334
+ // True if the space has been set up but not torn down.
1335
+ bool HasBeenSetup() { return start_ != NULL; }
1336
+
1337
+ // Grow the size of the semispace by committing extra virtual memory.
1338
+ // Assumes that the caller has checked that the semispace has not reached
1339
+ // its maximum capacity (and thus there is space available in the reserved
1340
+ // address range to grow).
1341
+ bool Grow();
1342
+
1343
+ // Grow the semispace to the new capacity. The new capacity
1344
+ // requested must be larger than the current capacity.
1345
+ bool GrowTo(int new_capacity);
1346
+
1347
+ // Shrinks the semispace to the new capacity. The new capacity
1348
+ // requested must be more than the amount of used memory in the
1349
+ // semispace and less than the current capacity.
1350
+ bool ShrinkTo(int new_capacity);
1351
+
1352
+ // Returns the start address of the space.
1353
+ Address low() { return start_; }
1354
+ // Returns one past the end address of the space.
1355
+ Address high() { return low() + capacity_; }
1356
+
1357
+ // Age mark accessors.
1358
+ Address age_mark() { return age_mark_; }
1359
+ void set_age_mark(Address mark) { age_mark_ = mark; }
1360
+
1361
+ // True if the address is in the address range of this semispace (not
1362
+ // necessarily below the allocation pointer).
1363
+ bool Contains(Address a) {
1364
+ return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1365
+ == reinterpret_cast<uintptr_t>(start_);
1366
+ }
1367
+
1368
+ // True if the object is a heap object in the address range of this
1369
+ // semispace (not necessarily below the allocation pointer).
1370
+ bool Contains(Object* o) {
1371
+ return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1372
+ }
1373
+
1374
+ // The offset of an address from the beginning of the space.
1375
+ int SpaceOffsetForAddress(Address addr) {
1376
+ return static_cast<int>(addr - low());
1377
+ }
1378
+
1379
+ // If we don't have these here then SemiSpace will be abstract. However
1380
+ // they should never be called.
1381
+ virtual intptr_t Size() {
1382
+ UNREACHABLE();
1383
+ return 0;
1384
+ }
1385
+
1386
+ virtual bool ReserveSpace(int bytes) {
1387
+ UNREACHABLE();
1388
+ return false;
1389
+ }
1390
+
1391
+ bool is_committed() { return committed_; }
1392
+ bool Commit();
1393
+ bool Uncommit();
1394
+
1395
+ #ifdef ENABLE_HEAP_PROTECTION
1396
+ // Protect/unprotect the space by marking it read-only/writable.
1397
+ virtual void Protect() {}
1398
+ virtual void Unprotect() {}
1399
+ #endif
1400
+
1401
+ #ifdef DEBUG
1402
+ virtual void Print();
1403
+ virtual void Verify();
1404
+ #endif
1405
+
1406
+ // Returns the current capacity of the semi space.
1407
+ int Capacity() { return capacity_; }
1408
+
1409
+ // Returns the maximum capacity of the semi space.
1410
+ int MaximumCapacity() { return maximum_capacity_; }
1411
+
1412
+ // Returns the initial capacity of the semi space.
1413
+ int InitialCapacity() { return initial_capacity_; }
1414
+
1415
+ private:
1416
+ // The current and maximum capacity of the space.
1417
+ int capacity_;
1418
+ int maximum_capacity_;
1419
+ int initial_capacity_;
1420
+
1421
+ // The start address of the space.
1422
+ Address start_;
1423
+ // Used to govern object promotion during mark-compact collection.
1424
+ Address age_mark_;
1425
+
1426
+ // Masks and comparison values to test for containment in this semispace.
1427
+ uintptr_t address_mask_;
1428
+ uintptr_t object_mask_;
1429
+ uintptr_t object_expected_;
1430
+
1431
+ bool committed_;
1432
+
1433
+ public:
1434
+ TRACK_MEMORY("SemiSpace")
1435
+ };
1436
+
1437
+
1438
+ // A SemiSpaceIterator is an ObjectIterator that iterates over the active
1439
+ // semispace of the heap's new space. It iterates over the objects in the
1440
+ // semispace from a given start address (defaulting to the bottom of the
1441
+ // semispace) to the top of the semispace. New objects allocated after the
1442
+ // iterator is created are not iterated.
1443
+ class SemiSpaceIterator : public ObjectIterator {
1444
+ public:
1445
+ // Create an iterator over the objects in the given space. If no start
1446
+ // address is given, the iterator starts from the bottom of the space. If
1447
+ // no size function is given, the iterator calls Object::Size().
1448
+ explicit SemiSpaceIterator(NewSpace* space);
1449
+ SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1450
+ SemiSpaceIterator(NewSpace* space, Address start);
1451
+
1452
+ HeapObject* next() {
1453
+ if (current_ == limit_) return NULL;
1454
+
1455
+ HeapObject* object = HeapObject::FromAddress(current_);
1456
+ int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1457
+
1458
+ current_ += size;
1459
+ return object;
1460
+ }
1461
+
1462
+ // Implementation of the ObjectIterator functions.
1463
+ virtual HeapObject* next_object() { return next(); }
1464
+
1465
+ private:
1466
+ void Initialize(NewSpace* space, Address start, Address end,
1467
+ HeapObjectCallback size_func);
1468
+
1469
+ // The semispace.
1470
+ SemiSpace* space_;
1471
+ // The current iteration point.
1472
+ Address current_;
1473
+ // The end of iteration.
1474
+ Address limit_;
1475
+ // The callback function.
1476
+ HeapObjectCallback size_func_;
1477
+ };
1478
+
1479
+
1480
+ // -----------------------------------------------------------------------------
1481
+ // The young generation space.
1482
+ //
1483
+ // The new space consists of a contiguous pair of semispaces. It simply
1484
+ // forwards most functions to the appropriate semispace.
1485
+
1486
+ class NewSpace : public Space {
1487
+ public:
1488
+ // Constructor.
1489
+ explicit NewSpace(Heap* heap)
1490
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1491
+ to_space_(heap),
1492
+ from_space_(heap) {}
1493
+
1494
+ // Sets up the new space using the given chunk.
1495
+ bool Setup(Address start, int size);
1496
+
1497
+ // Tears down the space. Heap memory was not allocated by the space, so it
1498
+ // is not deallocated here.
1499
+ void TearDown();
1500
+
1501
+ // True if the space has been set up but not torn down.
1502
+ bool HasBeenSetup() {
1503
+ return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1504
+ }
1505
+
1506
+ // Flip the pair of spaces.
1507
+ void Flip();
1508
+
1509
+ // Grow the capacity of the semispaces. Assumes that they are not at
1510
+ // their maximum capacity.
1511
+ void Grow();
1512
+
1513
+ // Shrink the capacity of the semispaces.
1514
+ void Shrink();
1515
+
1516
+ // True if the address or object lies in the address range of either
1517
+ // semispace (not necessarily below the allocation pointer).
1518
+ bool Contains(Address a) {
1519
+ return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1520
+ == reinterpret_cast<uintptr_t>(start_);
1521
+ }
1522
+ bool Contains(Object* o) {
1523
+ return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1524
+ }
1525
+
1526
+ // Return the allocated bytes in the active semispace.
1527
+ virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1528
+ // The same, but returning an int. We have to have the one that returns
1529
+ // intptr_t because it is inherited, but if we know we are dealing with the
1530
+ // new space, which can't get as big as the other spaces then this is useful:
1531
+ int SizeAsInt() { return static_cast<int>(Size()); }
1532
+
1533
+ // Return the current capacity of a semispace.
1534
+ intptr_t Capacity() {
1535
+ ASSERT(to_space_.Capacity() == from_space_.Capacity());
1536
+ return to_space_.Capacity();
1537
+ }
1538
+
1539
+ // Return the total amount of memory committed for new space.
1540
+ intptr_t CommittedMemory() {
1541
+ if (from_space_.is_committed()) return 2 * Capacity();
1542
+ return Capacity();
1543
+ }
1544
+
1545
+ // Return the available bytes without growing in the active semispace.
1546
+ intptr_t Available() { return Capacity() - Size(); }
1547
+
1548
+ // Return the maximum capacity of a semispace.
1549
+ int MaximumCapacity() {
1550
+ ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1551
+ return to_space_.MaximumCapacity();
1552
+ }
1553
+
1554
+ // Returns the initial capacity of a semispace.
1555
+ int InitialCapacity() {
1556
+ ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1557
+ return to_space_.InitialCapacity();
1558
+ }
1559
+
1560
+ // Return the address of the allocation pointer in the active semispace.
1561
+ Address top() { return allocation_info_.top; }
1562
+ // Return the address of the first object in the active semispace.
1563
+ Address bottom() { return to_space_.low(); }
1564
+
1565
+ // Get the age mark of the inactive semispace.
1566
+ Address age_mark() { return from_space_.age_mark(); }
1567
+ // Set the age mark in the active semispace.
1568
+ void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1569
+
1570
+ // The start address of the space and a bit mask. Anding an address in the
1571
+ // new space with the mask will result in the start address.
1572
+ Address start() { return start_; }
1573
+ uintptr_t mask() { return address_mask_; }
1574
+
1575
+ // The allocation top and limit addresses.
1576
+ Address* allocation_top_address() { return &allocation_info_.top; }
1577
+ Address* allocation_limit_address() { return &allocation_info_.limit; }
1578
+
1579
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
1580
+ return AllocateRawInternal(size_in_bytes, &allocation_info_);
1581
+ }
1582
+
1583
+ // Allocate the requested number of bytes for relocation during mark-compact
1584
+ // collection.
1585
+ MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
1586
+ return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1587
+ }
1588
+
1589
+ // Reset the allocation pointer to the beginning of the active semispace.
1590
+ void ResetAllocationInfo();
1591
+ // Reset the reloction pointer to the bottom of the inactive semispace in
1592
+ // preparation for mark-compact collection.
1593
+ void MCResetRelocationInfo();
1594
+ // Update the allocation pointer in the active semispace after a
1595
+ // mark-compact collection.
1596
+ void MCCommitRelocationInfo();
1597
+
1598
+ // Get the extent of the inactive semispace (for use as a marking stack).
1599
+ Address FromSpaceLow() { return from_space_.low(); }
1600
+ Address FromSpaceHigh() { return from_space_.high(); }
1601
+
1602
+ // Get the extent of the active semispace (to sweep newly copied objects
1603
+ // during a scavenge collection).
1604
+ Address ToSpaceLow() { return to_space_.low(); }
1605
+ Address ToSpaceHigh() { return to_space_.high(); }
1606
+
1607
+ // Offsets from the beginning of the semispaces.
1608
+ int ToSpaceOffsetForAddress(Address a) {
1609
+ return to_space_.SpaceOffsetForAddress(a);
1610
+ }
1611
+ int FromSpaceOffsetForAddress(Address a) {
1612
+ return from_space_.SpaceOffsetForAddress(a);
1613
+ }
1614
+
1615
+ // True if the object is a heap object in the address range of the
1616
+ // respective semispace (not necessarily below the allocation pointer of the
1617
+ // semispace).
1618
+ bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1619
+ bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1620
+
1621
+ bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1622
+ bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1623
+
1624
+ virtual bool ReserveSpace(int bytes);
1625
+
1626
+ // Resizes a sequential string which must be the most recent thing that was
1627
+ // allocated in new space.
1628
+ template <typename StringType>
1629
+ inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1630
+
1631
+ #ifdef ENABLE_HEAP_PROTECTION
1632
+ // Protect/unprotect the space by marking it read-only/writable.
1633
+ virtual void Protect();
1634
+ virtual void Unprotect();
1635
+ #endif
1636
+
1637
+ #ifdef DEBUG
1638
+ // Verify the active semispace.
1639
+ virtual void Verify();
1640
+ // Print the active semispace.
1641
+ virtual void Print() { to_space_.Print(); }
1642
+ #endif
1643
+
1644
+ #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1645
+ // Iterates the active semispace to collect statistics.
1646
+ void CollectStatistics();
1647
+ // Reports previously collected statistics of the active semispace.
1648
+ void ReportStatistics();
1649
+ // Clears previously collected statistics.
1650
+ void ClearHistograms();
1651
+
1652
+ // Record the allocation or promotion of a heap object. Note that we don't
1653
+ // record every single allocation, but only those that happen in the
1654
+ // to space during a scavenge GC.
1655
+ void RecordAllocation(HeapObject* obj);
1656
+ void RecordPromotion(HeapObject* obj);
1657
+ #endif
1658
+
1659
+ // Return whether the operation succeded.
1660
+ bool CommitFromSpaceIfNeeded() {
1661
+ if (from_space_.is_committed()) return true;
1662
+ return from_space_.Commit();
1663
+ }
1664
+
1665
+ bool UncommitFromSpace() {
1666
+ if (!from_space_.is_committed()) return true;
1667
+ return from_space_.Uncommit();
1668
+ }
1669
+
1670
+ private:
1671
+ // The semispaces.
1672
+ SemiSpace to_space_;
1673
+ SemiSpace from_space_;
1674
+
1675
+ // Start address and bit mask for containment testing.
1676
+ Address start_;
1677
+ uintptr_t address_mask_;
1678
+ uintptr_t object_mask_;
1679
+ uintptr_t object_expected_;
1680
+
1681
+ // Allocation pointer and limit for normal allocation and allocation during
1682
+ // mark-compact collection.
1683
+ AllocationInfo allocation_info_;
1684
+ AllocationInfo mc_forwarding_info_;
1685
+
1686
+ #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1687
+ HistogramInfo* allocated_histogram_;
1688
+ HistogramInfo* promoted_histogram_;
1689
+ #endif
1690
+
1691
+ // Implementation of AllocateRaw and MCAllocateRaw.
1692
+ MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1693
+ int size_in_bytes,
1694
+ AllocationInfo* alloc_info);
1695
+
1696
+ friend class SemiSpaceIterator;
1697
+
1698
+ public:
1699
+ TRACK_MEMORY("NewSpace")
1700
+ };
1701
+
1702
+
1703
+ // -----------------------------------------------------------------------------
1704
+ // Free lists for old object spaces
1705
+ //
1706
+ // Free-list nodes are free blocks in the heap. They look like heap objects
1707
+ // (free-list node pointers have the heap object tag, and they have a map like
1708
+ // a heap object). They have a size and a next pointer. The next pointer is
1709
+ // the raw address of the next free list node (or NULL).
1710
+ class FreeListNode: public HeapObject {
1711
+ public:
1712
+ // Obtain a free-list node from a raw address. This is not a cast because
1713
+ // it does not check nor require that the first word at the address is a map
1714
+ // pointer.
1715
+ static FreeListNode* FromAddress(Address address) {
1716
+ return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1717
+ }
1718
+
1719
+ static inline bool IsFreeListNode(HeapObject* object);
1720
+
1721
+ // Set the size in bytes, which can be read with HeapObject::Size(). This
1722
+ // function also writes a map to the first word of the block so that it
1723
+ // looks like a heap object to the garbage collector and heap iteration
1724
+ // functions.
1725
+ void set_size(Heap* heap, int size_in_bytes);
1726
+
1727
+ // Accessors for the next field.
1728
+ inline Address next(Heap* heap);
1729
+ inline void set_next(Heap* heap, Address next);
1730
+
1731
+ private:
1732
+ static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1733
+
1734
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1735
+ };
1736
+
1737
+
1738
+ // The free list for the old space.
1739
+ class OldSpaceFreeList BASE_EMBEDDED {
1740
+ public:
1741
+ OldSpaceFreeList(Heap* heap, AllocationSpace owner);
1742
+
1743
+ // Clear the free list.
1744
+ void Reset();
1745
+
1746
+ // Return the number of bytes available on the free list.
1747
+ intptr_t available() { return available_; }
1748
+
1749
+ // Place a node on the free list. The block of size 'size_in_bytes'
1750
+ // starting at 'start' is placed on the free list. The return value is the
1751
+ // number of bytes that have been lost due to internal fragmentation by
1752
+ // freeing the block. Bookkeeping information will be written to the block,
1753
+ // ie, its contents will be destroyed. The start address should be word
1754
+ // aligned, and the size should be a non-zero multiple of the word size.
1755
+ int Free(Address start, int size_in_bytes);
1756
+
1757
+ // Allocate a block of size 'size_in_bytes' from the free list. The block
1758
+ // is unitialized. A failure is returned if no block is available. The
1759
+ // number of bytes lost to fragmentation is returned in the output parameter
1760
+ // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1761
+ MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
1762
+
1763
+ void MarkNodes();
1764
+
1765
+ private:
1766
+ // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1767
+ // will always result in waste.)
1768
+ static const int kMinBlockSize = 2 * kPointerSize;
1769
+ static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1770
+
1771
+ Heap* heap_;
1772
+
1773
+ // The identity of the owning space, for building allocation Failure
1774
+ // objects.
1775
+ AllocationSpace owner_;
1776
+
1777
+ // Total available bytes in all blocks on this free list.
1778
+ int available_;
1779
+
1780
+ // Blocks are put on exact free lists in an array, indexed by size in words.
1781
+ // The available sizes are kept in an increasingly ordered list. Entries
1782
+ // corresponding to sizes < kMinBlockSize always have an empty free list
1783
+ // (but index kHead is used for the head of the size list).
1784
+ struct SizeNode {
1785
+ // Address of the head FreeListNode of the implied block size or NULL.
1786
+ Address head_node_;
1787
+ // Size (words) of the next larger available size if head_node_ != NULL.
1788
+ int next_size_;
1789
+ };
1790
+ static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1791
+ SizeNode free_[kFreeListsLength];
1792
+
1793
+ // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1794
+ static const int kHead = kMinBlockSize / kPointerSize - 1;
1795
+ static const int kEnd = kMaxInt;
1796
+
1797
+ // We keep a "finger" in the size list to speed up a common pattern:
1798
+ // repeated requests for the same or increasing sizes.
1799
+ int finger_;
1800
+
1801
+ // Starting from *prev, find and return the smallest size >= index (words),
1802
+ // or kEnd. Update *prev to be the largest size < index, or kHead.
1803
+ int FindSize(int index, int* prev) {
1804
+ int cur = free_[*prev].next_size_;
1805
+ while (cur < index) {
1806
+ *prev = cur;
1807
+ cur = free_[cur].next_size_;
1808
+ }
1809
+ return cur;
1810
+ }
1811
+
1812
+ // Remove an existing element from the size list.
1813
+ void RemoveSize(int index) {
1814
+ int prev = kHead;
1815
+ int cur = FindSize(index, &prev);
1816
+ ASSERT(cur == index);
1817
+ free_[prev].next_size_ = free_[cur].next_size_;
1818
+ finger_ = prev;
1819
+ }
1820
+
1821
+ // Insert a new element into the size list.
1822
+ void InsertSize(int index) {
1823
+ int prev = kHead;
1824
+ int cur = FindSize(index, &prev);
1825
+ ASSERT(cur != index);
1826
+ free_[prev].next_size_ = index;
1827
+ free_[index].next_size_ = cur;
1828
+ }
1829
+
1830
+ // The size list is not updated during a sequence of calls to Free, but is
1831
+ // rebuilt before the next allocation.
1832
+ void RebuildSizeList();
1833
+ bool needs_rebuild_;
1834
+
1835
+ #ifdef DEBUG
1836
+ // Does this free list contain a free block located at the address of 'node'?
1837
+ bool Contains(FreeListNode* node);
1838
+ #endif
1839
+
1840
+ DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1841
+ };
1842
+
1843
+
1844
+ // The free list for the map space.
1845
+ class FixedSizeFreeList BASE_EMBEDDED {
1846
+ public:
1847
+ FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
1848
+
1849
+ // Clear the free list.
1850
+ void Reset();
1851
+
1852
+ // Return the number of bytes available on the free list.
1853
+ intptr_t available() { return available_; }
1854
+
1855
+ // Place a node on the free list. The block starting at 'start' (assumed to
1856
+ // have size object_size_) is placed on the free list. Bookkeeping
1857
+ // information will be written to the block, ie, its contents will be
1858
+ // destroyed. The start address should be word aligned.
1859
+ void Free(Address start);
1860
+
1861
+ // Allocate a fixed sized block from the free list. The block is unitialized.
1862
+ // A failure is returned if no block is available.
1863
+ MUST_USE_RESULT MaybeObject* Allocate();
1864
+
1865
+ void MarkNodes();
1866
+
1867
+ private:
1868
+
1869
+ Heap* heap_;
1870
+
1871
+ // Available bytes on the free list.
1872
+ intptr_t available_;
1873
+
1874
+ // The head of the free list.
1875
+ Address head_;
1876
+
1877
+ // The tail of the free list.
1878
+ Address tail_;
1879
+
1880
+ // The identity of the owning space, for building allocation Failure
1881
+ // objects.
1882
+ AllocationSpace owner_;
1883
+
1884
+ // The size of the objects in this space.
1885
+ int object_size_;
1886
+
1887
+ DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1888
+ };
1889
+
1890
+
1891
+ // -----------------------------------------------------------------------------
1892
+ // Old object space (excluding map objects)
1893
+
1894
+ class OldSpace : public PagedSpace {
1895
+ public:
1896
+ // Creates an old space object with a given maximum capacity.
1897
+ // The constructor does not allocate pages from OS.
1898
+ OldSpace(Heap* heap,
1899
+ intptr_t max_capacity,
1900
+ AllocationSpace id,
1901
+ Executability executable)
1902
+ : PagedSpace(heap, max_capacity, id, executable),
1903
+ free_list_(heap, id) {
1904
+ page_extra_ = 0;
1905
+ }
1906
+
1907
+ // The bytes available on the free list (ie, not above the linear allocation
1908
+ // pointer).
1909
+ intptr_t AvailableFree() { return free_list_.available(); }
1910
+
1911
+ // The limit of allocation for a page in this space.
1912
+ virtual Address PageAllocationLimit(Page* page) {
1913
+ return page->ObjectAreaEnd();
1914
+ }
1915
+
1916
+ // Give a block of memory to the space's free list. It might be added to
1917
+ // the free list or accounted as waste.
1918
+ // If add_to_freelist is false then just accounting stats are updated and
1919
+ // no attempt to add area to free list is made.
1920
+ void Free(Address start, int size_in_bytes, bool add_to_freelist) {
1921
+ accounting_stats_.DeallocateBytes(size_in_bytes);
1922
+
1923
+ if (add_to_freelist) {
1924
+ int wasted_bytes = free_list_.Free(start, size_in_bytes);
1925
+ accounting_stats_.WasteBytes(wasted_bytes);
1926
+ }
1927
+ }
1928
+
1929
+ virtual void DeallocateBlock(Address start,
1930
+ int size_in_bytes,
1931
+ bool add_to_freelist);
1932
+
1933
+ // Prepare for full garbage collection. Resets the relocation pointer and
1934
+ // clears the free list.
1935
+ virtual void PrepareForMarkCompact(bool will_compact);
1936
+
1937
+ // Updates the allocation pointer to the relocation top after a mark-compact
1938
+ // collection.
1939
+ virtual void MCCommitRelocationInfo();
1940
+
1941
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1942
+
1943
+ void MarkFreeListNodes() { free_list_.MarkNodes(); }
1944
+
1945
+ #ifdef DEBUG
1946
+ // Reports statistics for the space
1947
+ void ReportStatistics();
1948
+ #endif
1949
+
1950
+ protected:
1951
+ // Virtual function in the superclass. Slow path of AllocateRaw.
1952
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1953
+
1954
+ // Virtual function in the superclass. Allocate linearly at the start of
1955
+ // the page after current_page (there is assumed to be one).
1956
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1957
+
1958
+ private:
1959
+ // The space's free list.
1960
+ OldSpaceFreeList free_list_;
1961
+
1962
+ public:
1963
+ TRACK_MEMORY("OldSpace")
1964
+ };
1965
+
1966
+
1967
+ // -----------------------------------------------------------------------------
1968
+ // Old space for objects of a fixed size
1969
+
1970
+ class FixedSpace : public PagedSpace {
1971
+ public:
1972
+ FixedSpace(Heap* heap,
1973
+ intptr_t max_capacity,
1974
+ AllocationSpace id,
1975
+ int object_size_in_bytes,
1976
+ const char* name)
1977
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
1978
+ object_size_in_bytes_(object_size_in_bytes),
1979
+ name_(name),
1980
+ free_list_(heap, id, object_size_in_bytes) {
1981
+ page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1982
+ }
1983
+
1984
+ // The limit of allocation for a page in this space.
1985
+ virtual Address PageAllocationLimit(Page* page) {
1986
+ return page->ObjectAreaEnd() - page_extra_;
1987
+ }
1988
+
1989
+ int object_size_in_bytes() { return object_size_in_bytes_; }
1990
+
1991
+ // Give a fixed sized block of memory to the space's free list.
1992
+ // If add_to_freelist is false then just accounting stats are updated and
1993
+ // no attempt to add area to free list is made.
1994
+ void Free(Address start, bool add_to_freelist) {
1995
+ if (add_to_freelist) {
1996
+ free_list_.Free(start);
1997
+ }
1998
+ accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1999
+ }
2000
+
2001
+ // Prepares for a mark-compact GC.
2002
+ virtual void PrepareForMarkCompact(bool will_compact);
2003
+
2004
+ // Updates the allocation pointer to the relocation top after a mark-compact
2005
+ // collection.
2006
+ virtual void MCCommitRelocationInfo();
2007
+
2008
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
2009
+
2010
+ virtual void DeallocateBlock(Address start,
2011
+ int size_in_bytes,
2012
+ bool add_to_freelist);
2013
+
2014
+ void MarkFreeListNodes() { free_list_.MarkNodes(); }
2015
+
2016
+ #ifdef DEBUG
2017
+ // Reports statistic info of the space
2018
+ void ReportStatistics();
2019
+ #endif
2020
+
2021
+ protected:
2022
+ // Virtual function in the superclass. Slow path of AllocateRaw.
2023
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2024
+
2025
+ // Virtual function in the superclass. Allocate linearly at the start of
2026
+ // the page after current_page (there is assumed to be one).
2027
+ HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
2028
+
2029
+ void ResetFreeList() {
2030
+ free_list_.Reset();
2031
+ }
2032
+
2033
+ private:
2034
+ // The size of objects in this space.
2035
+ int object_size_in_bytes_;
2036
+
2037
+ // The name of this space.
2038
+ const char* name_;
2039
+
2040
+ // The space's free list.
2041
+ FixedSizeFreeList free_list_;
2042
+ };
2043
+
2044
+
2045
+ // -----------------------------------------------------------------------------
2046
+ // Old space for all map objects
2047
+
2048
+ class MapSpace : public FixedSpace {
2049
+ public:
2050
+ // Creates a map space object with a maximum capacity.
2051
+ MapSpace(Heap* heap,
2052
+ intptr_t max_capacity,
2053
+ int max_map_space_pages,
2054
+ AllocationSpace id)
2055
+ : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
2056
+ max_map_space_pages_(max_map_space_pages) {
2057
+ ASSERT(max_map_space_pages < kMaxMapPageIndex);
2058
+ }
2059
+
2060
+ // Prepares for a mark-compact GC.
2061
+ virtual void PrepareForMarkCompact(bool will_compact);
2062
+
2063
+ // Given an index, returns the page address.
2064
+ Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2065
+
2066
+ static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
2067
+
2068
+ // Are map pointers encodable into map word?
2069
+ bool MapPointersEncodable() {
2070
+ if (!FLAG_use_big_map_space) {
2071
+ ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
2072
+ return true;
2073
+ }
2074
+ return CountPagesToTop() <= max_map_space_pages_;
2075
+ }
2076
+
2077
+ // Should be called after forced sweep to find out if map space needs
2078
+ // compaction.
2079
+ bool NeedsCompaction(int live_maps) {
2080
+ return !MapPointersEncodable() && live_maps <= CompactionThreshold();
2081
+ }
2082
+
2083
+ Address TopAfterCompaction(int live_maps) {
2084
+ ASSERT(NeedsCompaction(live_maps));
2085
+
2086
+ int pages_left = live_maps / kMapsPerPage;
2087
+ PageIterator it(this, PageIterator::ALL_PAGES);
2088
+ while (pages_left-- > 0) {
2089
+ ASSERT(it.has_next());
2090
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
2091
+ }
2092
+ ASSERT(it.has_next());
2093
+ Page* top_page = it.next();
2094
+ top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2095
+ ASSERT(top_page->is_valid());
2096
+
2097
+ int offset = live_maps % kMapsPerPage * Map::kSize;
2098
+ Address top = top_page->ObjectAreaStart() + offset;
2099
+ ASSERT(top < top_page->ObjectAreaEnd());
2100
+ ASSERT(Contains(top));
2101
+
2102
+ return top;
2103
+ }
2104
+
2105
+ void FinishCompaction(Address new_top, int live_maps) {
2106
+ Page* top_page = Page::FromAddress(new_top);
2107
+ ASSERT(top_page->is_valid());
2108
+
2109
+ SetAllocationInfo(&allocation_info_, top_page);
2110
+ allocation_info_.top = new_top;
2111
+
2112
+ int new_size = live_maps * Map::kSize;
2113
+ accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2114
+ accounting_stats_.AllocateBytes(new_size);
2115
+
2116
+ // Flush allocation watermarks.
2117
+ for (Page* p = first_page_; p != top_page; p = p->next_page()) {
2118
+ p->SetAllocationWatermark(p->AllocationTop());
2119
+ }
2120
+ top_page->SetAllocationWatermark(new_top);
2121
+
2122
+ #ifdef DEBUG
2123
+ if (FLAG_enable_slow_asserts) {
2124
+ intptr_t actual_size = 0;
2125
+ for (Page* p = first_page_; p != top_page; p = p->next_page())
2126
+ actual_size += kMapsPerPage * Map::kSize;
2127
+ actual_size += (new_top - top_page->ObjectAreaStart());
2128
+ ASSERT(accounting_stats_.Size() == actual_size);
2129
+ }
2130
+ #endif
2131
+
2132
+ Shrink();
2133
+ ResetFreeList();
2134
+ }
2135
+
2136
+ protected:
2137
+ #ifdef DEBUG
2138
+ virtual void VerifyObject(HeapObject* obj);
2139
+ #endif
2140
+
2141
+ private:
2142
+ static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2143
+
2144
+ // Do map space compaction if there is a page gap.
2145
+ int CompactionThreshold() {
2146
+ return kMapsPerPage * (max_map_space_pages_ - 1);
2147
+ }
2148
+
2149
+ const int max_map_space_pages_;
2150
+
2151
+ // An array of page start address in a map space.
2152
+ Address page_addresses_[kMaxMapPageIndex];
2153
+
2154
+ public:
2155
+ TRACK_MEMORY("MapSpace")
2156
+ };
2157
+
2158
+
2159
+ // -----------------------------------------------------------------------------
2160
+ // Old space for all global object property cell objects
2161
+
2162
+ class CellSpace : public FixedSpace {
2163
+ public:
2164
+ // Creates a property cell space object with a maximum capacity.
2165
+ CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2166
+ : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2167
+ {}
2168
+
2169
+ protected:
2170
+ #ifdef DEBUG
2171
+ virtual void VerifyObject(HeapObject* obj);
2172
+ #endif
2173
+
2174
+ public:
2175
+ TRACK_MEMORY("CellSpace")
2176
+ };
2177
+
2178
+
2179
+ // -----------------------------------------------------------------------------
2180
+ // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2181
+ // the large object space. A large object is allocated from OS heap with
2182
+ // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2183
+ // A large object always starts at Page::kObjectStartOffset to a page.
2184
+ // Large objects do not move during garbage collections.
2185
+
2186
+ // A LargeObjectChunk holds exactly one large object page with exactly one
2187
+ // large object.
2188
+ class LargeObjectChunk {
2189
+ public:
2190
+ // Allocates a new LargeObjectChunk that contains a large object page
2191
+ // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
2192
+ // object) bytes after the object area start of that page.
2193
+ static LargeObjectChunk* New(int size_in_bytes, Executability executable);
2194
+
2195
+ // Free the memory associated with the chunk.
2196
+ inline void Free(Executability executable);
2197
+
2198
+ // Interpret a raw address as a large object chunk.
2199
+ static LargeObjectChunk* FromAddress(Address address) {
2200
+ return reinterpret_cast<LargeObjectChunk*>(address);
2201
+ }
2202
+
2203
+ // Returns the address of this chunk.
2204
+ Address address() { return reinterpret_cast<Address>(this); }
2205
+
2206
+ // Accessors for the fields of the chunk.
2207
+ LargeObjectChunk* next() { return next_; }
2208
+ void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2209
+ size_t size() { return size_ & ~Page::kPageFlagMask; }
2210
+
2211
+ // Compute the start address in the chunk.
2212
+ inline Address GetStartAddress();
2213
+
2214
+ // Returns the object in this chunk.
2215
+ HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
2216
+
2217
+ // Given a requested size returns the physical size of a chunk to be
2218
+ // allocated.
2219
+ static int ChunkSizeFor(int size_in_bytes);
2220
+
2221
+ // Given a chunk size, returns the object size it can accommodate. Used by
2222
+ // LargeObjectSpace::Available.
2223
+ static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2224
+ if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2225
+ return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2226
+ }
2227
+
2228
+ private:
2229
+ // A pointer to the next large object chunk in the space or NULL.
2230
+ LargeObjectChunk* next_;
2231
+
2232
+ // The total size of this chunk.
2233
+ size_t size_;
2234
+
2235
+ public:
2236
+ TRACK_MEMORY("LargeObjectChunk")
2237
+ };
2238
+
2239
+
2240
+ class LargeObjectSpace : public Space {
2241
+ public:
2242
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
2243
+ virtual ~LargeObjectSpace() {}
2244
+
2245
+ // Initializes internal data structures.
2246
+ bool Setup();
2247
+
2248
+ // Releases internal resources, frees objects in this space.
2249
+ void TearDown();
2250
+
2251
+ // Allocates a (non-FixedArray, non-Code) large object.
2252
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
2253
+ // Allocates a large Code object.
2254
+ MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
2255
+ // Allocates a large FixedArray.
2256
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
2257
+
2258
+ // Available bytes for objects in this space.
2259
+ inline intptr_t Available();
2260
+
2261
+ virtual intptr_t Size() {
2262
+ return size_;
2263
+ }
2264
+
2265
+ virtual intptr_t SizeOfObjects() {
2266
+ return objects_size_;
2267
+ }
2268
+
2269
+ int PageCount() {
2270
+ return page_count_;
2271
+ }
2272
+
2273
+ // Finds an object for a given address, returns Failure::Exception()
2274
+ // if it is not found. The function iterates through all objects in this
2275
+ // space, may be slow.
2276
+ MaybeObject* FindObject(Address a);
2277
+
2278
+ // Finds a large object page containing the given pc, returns NULL
2279
+ // if such a page doesn't exist.
2280
+ LargeObjectChunk* FindChunkContainingPc(Address pc);
2281
+
2282
+ // Iterates objects covered by dirty regions.
2283
+ void IterateDirtyRegions(ObjectSlotCallback func);
2284
+
2285
+ // Frees unmarked objects.
2286
+ void FreeUnmarkedObjects();
2287
+
2288
+ // Checks whether a heap object is in this space; O(1).
2289
+ bool Contains(HeapObject* obj);
2290
+
2291
+ // Checks whether the space is empty.
2292
+ bool IsEmpty() { return first_chunk_ == NULL; }
2293
+
2294
+ // See the comments for ReserveSpace in the Space class. This has to be
2295
+ // called after ReserveSpace has been called on the paged spaces, since they
2296
+ // may use some memory, leaving less for large objects.
2297
+ virtual bool ReserveSpace(int bytes);
2298
+
2299
+ #ifdef ENABLE_HEAP_PROTECTION
2300
+ // Protect/unprotect the space by marking it read-only/writable.
2301
+ void Protect();
2302
+ void Unprotect();
2303
+ #endif
2304
+
2305
+ #ifdef DEBUG
2306
+ virtual void Verify();
2307
+ virtual void Print();
2308
+ void ReportStatistics();
2309
+ void CollectCodeStatistics();
2310
+ #endif
2311
+ // Checks whether an address is in the object area in this space. It
2312
+ // iterates all objects in the space. May be slow.
2313
+ bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2314
+
2315
+ private:
2316
+ // The head of the linked list of large object chunks.
2317
+ LargeObjectChunk* first_chunk_;
2318
+ intptr_t size_; // allocated bytes
2319
+ int page_count_; // number of chunks
2320
+ intptr_t objects_size_; // size of objects
2321
+
2322
+ // Shared implementation of AllocateRaw, AllocateRawCode and
2323
+ // AllocateRawFixedArray.
2324
+ MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2325
+ int object_size,
2326
+ Executability executable);
2327
+
2328
+ friend class LargeObjectIterator;
2329
+
2330
+ public:
2331
+ TRACK_MEMORY("LargeObjectSpace")
2332
+ };
2333
+
2334
+
2335
+ class LargeObjectIterator: public ObjectIterator {
2336
+ public:
2337
+ explicit LargeObjectIterator(LargeObjectSpace* space);
2338
+ LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2339
+
2340
+ HeapObject* next();
2341
+
2342
+ // implementation of ObjectIterator.
2343
+ virtual HeapObject* next_object() { return next(); }
2344
+
2345
+ private:
2346
+ LargeObjectChunk* current_;
2347
+ HeapObjectCallback size_func_;
2348
+ };
2349
+
2350
+
2351
+ #ifdef DEBUG
2352
+ struct CommentStatistic {
2353
+ const char* comment;
2354
+ int size;
2355
+ int count;
2356
+ void Clear() {
2357
+ comment = NULL;
2358
+ size = 0;
2359
+ count = 0;
2360
+ }
2361
+ // Must be small, since an iteration is used for lookup.
2362
+ static const int kMaxComments = 64;
2363
+ };
2364
+ #endif
2365
+
2366
+
2367
+ } } // namespace v8::internal
2368
+
2369
+ #endif // V8_SPACES_H_