therubyracer 0.9.0beta4 → 0.9.0beta5

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of therubyracer might be problematic. Click here for more details.

Files changed (771) hide show
  1. data/.gitignore +1 -0
  2. data/Rakefile +0 -11
  3. data/ext/v8/extconf.rb +9 -9
  4. data/ext/v8/v8_external.cpp +40 -10
  5. data/lib/v8/cli.rb +2 -1
  6. data/lib/v8/version.rb +1 -1
  7. data/spec/redjs/.gitignore +1 -0
  8. data/therubyracer.gemspec +2 -3
  9. metadata +33 -779
  10. data/ext/v8/upstream/Makefile +0 -38
  11. data/ext/v8/upstream/detect_cpu.rb +0 -27
  12. data/ext/v8/upstream/fpic-on-linux-amd64.patch +0 -13
  13. data/ext/v8/upstream/scons/CHANGES.txt +0 -5183
  14. data/ext/v8/upstream/scons/LICENSE.txt +0 -20
  15. data/ext/v8/upstream/scons/MANIFEST +0 -202
  16. data/ext/v8/upstream/scons/PKG-INFO +0 -13
  17. data/ext/v8/upstream/scons/README.txt +0 -273
  18. data/ext/v8/upstream/scons/RELEASE.txt +0 -1040
  19. data/ext/v8/upstream/scons/engine/SCons/Action.py +0 -1256
  20. data/ext/v8/upstream/scons/engine/SCons/Builder.py +0 -868
  21. data/ext/v8/upstream/scons/engine/SCons/CacheDir.py +0 -217
  22. data/ext/v8/upstream/scons/engine/SCons/Conftest.py +0 -794
  23. data/ext/v8/upstream/scons/engine/SCons/Debug.py +0 -237
  24. data/ext/v8/upstream/scons/engine/SCons/Defaults.py +0 -485
  25. data/ext/v8/upstream/scons/engine/SCons/Environment.py +0 -2327
  26. data/ext/v8/upstream/scons/engine/SCons/Errors.py +0 -207
  27. data/ext/v8/upstream/scons/engine/SCons/Executor.py +0 -636
  28. data/ext/v8/upstream/scons/engine/SCons/Job.py +0 -435
  29. data/ext/v8/upstream/scons/engine/SCons/Memoize.py +0 -292
  30. data/ext/v8/upstream/scons/engine/SCons/Node/Alias.py +0 -153
  31. data/ext/v8/upstream/scons/engine/SCons/Node/FS.py +0 -3220
  32. data/ext/v8/upstream/scons/engine/SCons/Node/Python.py +0 -128
  33. data/ext/v8/upstream/scons/engine/SCons/Node/__init__.py +0 -1341
  34. data/ext/v8/upstream/scons/engine/SCons/Options/BoolOption.py +0 -50
  35. data/ext/v8/upstream/scons/engine/SCons/Options/EnumOption.py +0 -50
  36. data/ext/v8/upstream/scons/engine/SCons/Options/ListOption.py +0 -50
  37. data/ext/v8/upstream/scons/engine/SCons/Options/PackageOption.py +0 -50
  38. data/ext/v8/upstream/scons/engine/SCons/Options/PathOption.py +0 -76
  39. data/ext/v8/upstream/scons/engine/SCons/Options/__init__.py +0 -74
  40. data/ext/v8/upstream/scons/engine/SCons/PathList.py +0 -232
  41. data/ext/v8/upstream/scons/engine/SCons/Platform/__init__.py +0 -236
  42. data/ext/v8/upstream/scons/engine/SCons/Platform/aix.py +0 -70
  43. data/ext/v8/upstream/scons/engine/SCons/Platform/cygwin.py +0 -55
  44. data/ext/v8/upstream/scons/engine/SCons/Platform/darwin.py +0 -46
  45. data/ext/v8/upstream/scons/engine/SCons/Platform/hpux.py +0 -46
  46. data/ext/v8/upstream/scons/engine/SCons/Platform/irix.py +0 -44
  47. data/ext/v8/upstream/scons/engine/SCons/Platform/os2.py +0 -58
  48. data/ext/v8/upstream/scons/engine/SCons/Platform/posix.py +0 -264
  49. data/ext/v8/upstream/scons/engine/SCons/Platform/sunos.py +0 -50
  50. data/ext/v8/upstream/scons/engine/SCons/Platform/win32.py +0 -386
  51. data/ext/v8/upstream/scons/engine/SCons/SConf.py +0 -1038
  52. data/ext/v8/upstream/scons/engine/SCons/SConsign.py +0 -381
  53. data/ext/v8/upstream/scons/engine/SCons/Scanner/C.py +0 -132
  54. data/ext/v8/upstream/scons/engine/SCons/Scanner/D.py +0 -74
  55. data/ext/v8/upstream/scons/engine/SCons/Scanner/Dir.py +0 -111
  56. data/ext/v8/upstream/scons/engine/SCons/Scanner/Fortran.py +0 -320
  57. data/ext/v8/upstream/scons/engine/SCons/Scanner/IDL.py +0 -48
  58. data/ext/v8/upstream/scons/engine/SCons/Scanner/LaTeX.py +0 -378
  59. data/ext/v8/upstream/scons/engine/SCons/Scanner/Prog.py +0 -103
  60. data/ext/v8/upstream/scons/engine/SCons/Scanner/RC.py +0 -55
  61. data/ext/v8/upstream/scons/engine/SCons/Scanner/__init__.py +0 -415
  62. data/ext/v8/upstream/scons/engine/SCons/Script/Interactive.py +0 -386
  63. data/ext/v8/upstream/scons/engine/SCons/Script/Main.py +0 -1360
  64. data/ext/v8/upstream/scons/engine/SCons/Script/SConsOptions.py +0 -944
  65. data/ext/v8/upstream/scons/engine/SCons/Script/SConscript.py +0 -642
  66. data/ext/v8/upstream/scons/engine/SCons/Script/__init__.py +0 -414
  67. data/ext/v8/upstream/scons/engine/SCons/Sig.py +0 -63
  68. data/ext/v8/upstream/scons/engine/SCons/Subst.py +0 -911
  69. data/ext/v8/upstream/scons/engine/SCons/Taskmaster.py +0 -1030
  70. data/ext/v8/upstream/scons/engine/SCons/Tool/386asm.py +0 -61
  71. data/ext/v8/upstream/scons/engine/SCons/Tool/BitKeeper.py +0 -65
  72. data/ext/v8/upstream/scons/engine/SCons/Tool/CVS.py +0 -73
  73. data/ext/v8/upstream/scons/engine/SCons/Tool/FortranCommon.py +0 -247
  74. data/ext/v8/upstream/scons/engine/SCons/Tool/JavaCommon.py +0 -324
  75. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/__init__.py +0 -56
  76. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/arch.py +0 -61
  77. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/common.py +0 -210
  78. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/netframework.py +0 -84
  79. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/sdk.py +0 -321
  80. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/vc.py +0 -367
  81. data/ext/v8/upstream/scons/engine/SCons/Tool/MSCommon/vs.py +0 -497
  82. data/ext/v8/upstream/scons/engine/SCons/Tool/Perforce.py +0 -104
  83. data/ext/v8/upstream/scons/engine/SCons/Tool/PharLapCommon.py +0 -138
  84. data/ext/v8/upstream/scons/engine/SCons/Tool/RCS.py +0 -64
  85. data/ext/v8/upstream/scons/engine/SCons/Tool/SCCS.py +0 -64
  86. data/ext/v8/upstream/scons/engine/SCons/Tool/Subversion.py +0 -71
  87. data/ext/v8/upstream/scons/engine/SCons/Tool/__init__.py +0 -675
  88. data/ext/v8/upstream/scons/engine/SCons/Tool/aixc++.py +0 -82
  89. data/ext/v8/upstream/scons/engine/SCons/Tool/aixcc.py +0 -74
  90. data/ext/v8/upstream/scons/engine/SCons/Tool/aixf77.py +0 -80
  91. data/ext/v8/upstream/scons/engine/SCons/Tool/aixlink.py +0 -76
  92. data/ext/v8/upstream/scons/engine/SCons/Tool/applelink.py +0 -71
  93. data/ext/v8/upstream/scons/engine/SCons/Tool/ar.py +0 -63
  94. data/ext/v8/upstream/scons/engine/SCons/Tool/as.py +0 -78
  95. data/ext/v8/upstream/scons/engine/SCons/Tool/bcc32.py +0 -82
  96. data/ext/v8/upstream/scons/engine/SCons/Tool/c++.py +0 -99
  97. data/ext/v8/upstream/scons/engine/SCons/Tool/cc.py +0 -114
  98. data/ext/v8/upstream/scons/engine/SCons/Tool/cvf.py +0 -58
  99. data/ext/v8/upstream/scons/engine/SCons/Tool/default.py +0 -50
  100. data/ext/v8/upstream/scons/engine/SCons/Tool/dmd.py +0 -224
  101. data/ext/v8/upstream/scons/engine/SCons/Tool/dvi.py +0 -64
  102. data/ext/v8/upstream/scons/engine/SCons/Tool/dvipdf.py +0 -125
  103. data/ext/v8/upstream/scons/engine/SCons/Tool/dvips.py +0 -94
  104. data/ext/v8/upstream/scons/engine/SCons/Tool/f77.py +0 -62
  105. data/ext/v8/upstream/scons/engine/SCons/Tool/f90.py +0 -62
  106. data/ext/v8/upstream/scons/engine/SCons/Tool/f95.py +0 -63
  107. data/ext/v8/upstream/scons/engine/SCons/Tool/filesystem.py +0 -98
  108. data/ext/v8/upstream/scons/engine/SCons/Tool/fortran.py +0 -63
  109. data/ext/v8/upstream/scons/engine/SCons/Tool/g++.py +0 -90
  110. data/ext/v8/upstream/scons/engine/SCons/Tool/g77.py +0 -73
  111. data/ext/v8/upstream/scons/engine/SCons/Tool/gas.py +0 -53
  112. data/ext/v8/upstream/scons/engine/SCons/Tool/gcc.py +0 -80
  113. data/ext/v8/upstream/scons/engine/SCons/Tool/gfortran.py +0 -64
  114. data/ext/v8/upstream/scons/engine/SCons/Tool/gnulink.py +0 -63
  115. data/ext/v8/upstream/scons/engine/SCons/Tool/gs.py +0 -81
  116. data/ext/v8/upstream/scons/engine/SCons/Tool/hpc++.py +0 -85
  117. data/ext/v8/upstream/scons/engine/SCons/Tool/hpcc.py +0 -53
  118. data/ext/v8/upstream/scons/engine/SCons/Tool/hplink.py +0 -77
  119. data/ext/v8/upstream/scons/engine/SCons/Tool/icc.py +0 -59
  120. data/ext/v8/upstream/scons/engine/SCons/Tool/icl.py +0 -52
  121. data/ext/v8/upstream/scons/engine/SCons/Tool/ifl.py +0 -72
  122. data/ext/v8/upstream/scons/engine/SCons/Tool/ifort.py +0 -90
  123. data/ext/v8/upstream/scons/engine/SCons/Tool/ilink.py +0 -59
  124. data/ext/v8/upstream/scons/engine/SCons/Tool/ilink32.py +0 -60
  125. data/ext/v8/upstream/scons/engine/SCons/Tool/install.py +0 -229
  126. data/ext/v8/upstream/scons/engine/SCons/Tool/intelc.py +0 -490
  127. data/ext/v8/upstream/scons/engine/SCons/Tool/ipkg.py +0 -71
  128. data/ext/v8/upstream/scons/engine/SCons/Tool/jar.py +0 -110
  129. data/ext/v8/upstream/scons/engine/SCons/Tool/javac.py +0 -234
  130. data/ext/v8/upstream/scons/engine/SCons/Tool/javah.py +0 -138
  131. data/ext/v8/upstream/scons/engine/SCons/Tool/latex.py +0 -79
  132. data/ext/v8/upstream/scons/engine/SCons/Tool/lex.py +0 -99
  133. data/ext/v8/upstream/scons/engine/SCons/Tool/link.py +0 -121
  134. data/ext/v8/upstream/scons/engine/SCons/Tool/linkloc.py +0 -112
  135. data/ext/v8/upstream/scons/engine/SCons/Tool/m4.py +0 -63
  136. data/ext/v8/upstream/scons/engine/SCons/Tool/masm.py +0 -77
  137. data/ext/v8/upstream/scons/engine/SCons/Tool/midl.py +0 -90
  138. data/ext/v8/upstream/scons/engine/SCons/Tool/mingw.py +0 -159
  139. data/ext/v8/upstream/scons/engine/SCons/Tool/mslib.py +0 -64
  140. data/ext/v8/upstream/scons/engine/SCons/Tool/mslink.py +0 -266
  141. data/ext/v8/upstream/scons/engine/SCons/Tool/mssdk.py +0 -50
  142. data/ext/v8/upstream/scons/engine/SCons/Tool/msvc.py +0 -269
  143. data/ext/v8/upstream/scons/engine/SCons/Tool/msvs.py +0 -1439
  144. data/ext/v8/upstream/scons/engine/SCons/Tool/mwcc.py +0 -208
  145. data/ext/v8/upstream/scons/engine/SCons/Tool/mwld.py +0 -107
  146. data/ext/v8/upstream/scons/engine/SCons/Tool/nasm.py +0 -72
  147. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/__init__.py +0 -314
  148. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/ipk.py +0 -185
  149. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/msi.py +0 -526
  150. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/rpm.py +0 -367
  151. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/src_tarbz2.py +0 -43
  152. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/src_targz.py +0 -43
  153. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/src_zip.py +0 -43
  154. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/tarbz2.py +0 -44
  155. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/targz.py +0 -44
  156. data/ext/v8/upstream/scons/engine/SCons/Tool/packaging/zip.py +0 -44
  157. data/ext/v8/upstream/scons/engine/SCons/Tool/pdf.py +0 -78
  158. data/ext/v8/upstream/scons/engine/SCons/Tool/pdflatex.py +0 -83
  159. data/ext/v8/upstream/scons/engine/SCons/Tool/pdftex.py +0 -108
  160. data/ext/v8/upstream/scons/engine/SCons/Tool/qt.py +0 -336
  161. data/ext/v8/upstream/scons/engine/SCons/Tool/rmic.py +0 -121
  162. data/ext/v8/upstream/scons/engine/SCons/Tool/rpcgen.py +0 -70
  163. data/ext/v8/upstream/scons/engine/SCons/Tool/rpm.py +0 -132
  164. data/ext/v8/upstream/scons/engine/SCons/Tool/sgiar.py +0 -68
  165. data/ext/v8/upstream/scons/engine/SCons/Tool/sgic++.py +0 -58
  166. data/ext/v8/upstream/scons/engine/SCons/Tool/sgicc.py +0 -53
  167. data/ext/v8/upstream/scons/engine/SCons/Tool/sgilink.py +0 -63
  168. data/ext/v8/upstream/scons/engine/SCons/Tool/sunar.py +0 -67
  169. data/ext/v8/upstream/scons/engine/SCons/Tool/sunc++.py +0 -142
  170. data/ext/v8/upstream/scons/engine/SCons/Tool/suncc.py +0 -58
  171. data/ext/v8/upstream/scons/engine/SCons/Tool/sunf77.py +0 -63
  172. data/ext/v8/upstream/scons/engine/SCons/Tool/sunf90.py +0 -64
  173. data/ext/v8/upstream/scons/engine/SCons/Tool/sunf95.py +0 -64
  174. data/ext/v8/upstream/scons/engine/SCons/Tool/sunlink.py +0 -77
  175. data/ext/v8/upstream/scons/engine/SCons/Tool/swig.py +0 -186
  176. data/ext/v8/upstream/scons/engine/SCons/Tool/tar.py +0 -73
  177. data/ext/v8/upstream/scons/engine/SCons/Tool/tex.py +0 -805
  178. data/ext/v8/upstream/scons/engine/SCons/Tool/textfile.py +0 -175
  179. data/ext/v8/upstream/scons/engine/SCons/Tool/tlib.py +0 -53
  180. data/ext/v8/upstream/scons/engine/SCons/Tool/wix.py +0 -100
  181. data/ext/v8/upstream/scons/engine/SCons/Tool/yacc.py +0 -131
  182. data/ext/v8/upstream/scons/engine/SCons/Tool/zip.py +0 -100
  183. data/ext/v8/upstream/scons/engine/SCons/Util.py +0 -1645
  184. data/ext/v8/upstream/scons/engine/SCons/Variables/BoolVariable.py +0 -91
  185. data/ext/v8/upstream/scons/engine/SCons/Variables/EnumVariable.py +0 -107
  186. data/ext/v8/upstream/scons/engine/SCons/Variables/ListVariable.py +0 -139
  187. data/ext/v8/upstream/scons/engine/SCons/Variables/PackageVariable.py +0 -109
  188. data/ext/v8/upstream/scons/engine/SCons/Variables/PathVariable.py +0 -147
  189. data/ext/v8/upstream/scons/engine/SCons/Variables/__init__.py +0 -317
  190. data/ext/v8/upstream/scons/engine/SCons/Warnings.py +0 -228
  191. data/ext/v8/upstream/scons/engine/SCons/__init__.py +0 -49
  192. data/ext/v8/upstream/scons/engine/SCons/compat/__init__.py +0 -302
  193. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_UserString.py +0 -98
  194. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_hashlib.py +0 -91
  195. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_itertools.py +0 -124
  196. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_optparse.py +0 -1725
  197. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_sets.py +0 -583
  198. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_sets15.py +0 -176
  199. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_shlex.py +0 -325
  200. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_subprocess.py +0 -1296
  201. data/ext/v8/upstream/scons/engine/SCons/compat/_scons_textwrap.py +0 -382
  202. data/ext/v8/upstream/scons/engine/SCons/compat/builtins.py +0 -187
  203. data/ext/v8/upstream/scons/engine/SCons/cpp.py +0 -598
  204. data/ext/v8/upstream/scons/engine/SCons/dblite.py +0 -248
  205. data/ext/v8/upstream/scons/engine/SCons/exitfuncs.py +0 -77
  206. data/ext/v8/upstream/scons/os_spawnv_fix.diff +0 -83
  207. data/ext/v8/upstream/scons/scons-time.1 +0 -1017
  208. data/ext/v8/upstream/scons/scons.1 +0 -15179
  209. data/ext/v8/upstream/scons/sconsign.1 +0 -208
  210. data/ext/v8/upstream/scons/script/scons +0 -184
  211. data/ext/v8/upstream/scons/script/scons-time +0 -1529
  212. data/ext/v8/upstream/scons/script/scons.bat +0 -31
  213. data/ext/v8/upstream/scons/script/sconsign +0 -508
  214. data/ext/v8/upstream/scons/setup.cfg +0 -6
  215. data/ext/v8/upstream/scons/setup.py +0 -427
  216. data/ext/v8/upstream/v8/.gitignore +0 -33
  217. data/ext/v8/upstream/v8/AUTHORS +0 -42
  218. data/ext/v8/upstream/v8/ChangeLog +0 -2663
  219. data/ext/v8/upstream/v8/LICENSE +0 -52
  220. data/ext/v8/upstream/v8/LICENSE.strongtalk +0 -29
  221. data/ext/v8/upstream/v8/LICENSE.v8 +0 -26
  222. data/ext/v8/upstream/v8/LICENSE.valgrind +0 -45
  223. data/ext/v8/upstream/v8/SConstruct +0 -1473
  224. data/ext/v8/upstream/v8/build/README.txt +0 -25
  225. data/ext/v8/upstream/v8/build/all.gyp +0 -18
  226. data/ext/v8/upstream/v8/build/armu.gypi +0 -32
  227. data/ext/v8/upstream/v8/build/common.gypi +0 -82
  228. data/ext/v8/upstream/v8/build/gyp_v8 +0 -145
  229. data/ext/v8/upstream/v8/include/v8-debug.h +0 -394
  230. data/ext/v8/upstream/v8/include/v8-preparser.h +0 -116
  231. data/ext/v8/upstream/v8/include/v8-profiler.h +0 -505
  232. data/ext/v8/upstream/v8/include/v8-testing.h +0 -104
  233. data/ext/v8/upstream/v8/include/v8.h +0 -4000
  234. data/ext/v8/upstream/v8/include/v8stdint.h +0 -53
  235. data/ext/v8/upstream/v8/preparser/SConscript +0 -38
  236. data/ext/v8/upstream/v8/preparser/preparser-process.cc +0 -169
  237. data/ext/v8/upstream/v8/src/SConscript +0 -380
  238. data/ext/v8/upstream/v8/src/accessors.cc +0 -766
  239. data/ext/v8/upstream/v8/src/accessors.h +0 -121
  240. data/ext/v8/upstream/v8/src/allocation-inl.h +0 -49
  241. data/ext/v8/upstream/v8/src/allocation.cc +0 -122
  242. data/ext/v8/upstream/v8/src/allocation.h +0 -143
  243. data/ext/v8/upstream/v8/src/api.cc +0 -5678
  244. data/ext/v8/upstream/v8/src/api.h +0 -572
  245. data/ext/v8/upstream/v8/src/apinatives.js +0 -110
  246. data/ext/v8/upstream/v8/src/apiutils.h +0 -73
  247. data/ext/v8/upstream/v8/src/arguments.h +0 -116
  248. data/ext/v8/upstream/v8/src/arm/assembler-arm-inl.h +0 -353
  249. data/ext/v8/upstream/v8/src/arm/assembler-arm.cc +0 -2877
  250. data/ext/v8/upstream/v8/src/arm/assembler-arm.h +0 -1382
  251. data/ext/v8/upstream/v8/src/arm/builtins-arm.cc +0 -1634
  252. data/ext/v8/upstream/v8/src/arm/code-stubs-arm.cc +0 -6917
  253. data/ext/v8/upstream/v8/src/arm/code-stubs-arm.h +0 -623
  254. data/ext/v8/upstream/v8/src/arm/codegen-arm-inl.h +0 -48
  255. data/ext/v8/upstream/v8/src/arm/codegen-arm.cc +0 -7437
  256. data/ext/v8/upstream/v8/src/arm/codegen-arm.h +0 -595
  257. data/ext/v8/upstream/v8/src/arm/constants-arm.cc +0 -152
  258. data/ext/v8/upstream/v8/src/arm/constants-arm.h +0 -778
  259. data/ext/v8/upstream/v8/src/arm/cpu-arm.cc +0 -149
  260. data/ext/v8/upstream/v8/src/arm/debug-arm.cc +0 -317
  261. data/ext/v8/upstream/v8/src/arm/deoptimizer-arm.cc +0 -737
  262. data/ext/v8/upstream/v8/src/arm/disasm-arm.cc +0 -1503
  263. data/ext/v8/upstream/v8/src/arm/frames-arm.cc +0 -45
  264. data/ext/v8/upstream/v8/src/arm/frames-arm.h +0 -168
  265. data/ext/v8/upstream/v8/src/arm/full-codegen-arm.cc +0 -4374
  266. data/ext/v8/upstream/v8/src/arm/ic-arm.cc +0 -1793
  267. data/ext/v8/upstream/v8/src/arm/jump-target-arm.cc +0 -174
  268. data/ext/v8/upstream/v8/src/arm/lithium-arm.cc +0 -2120
  269. data/ext/v8/upstream/v8/src/arm/lithium-arm.h +0 -2179
  270. data/ext/v8/upstream/v8/src/arm/lithium-codegen-arm.cc +0 -4132
  271. data/ext/v8/upstream/v8/src/arm/lithium-codegen-arm.h +0 -329
  272. data/ext/v8/upstream/v8/src/arm/lithium-gap-resolver-arm.cc +0 -305
  273. data/ext/v8/upstream/v8/src/arm/lithium-gap-resolver-arm.h +0 -84
  274. data/ext/v8/upstream/v8/src/arm/macro-assembler-arm.cc +0 -2939
  275. data/ext/v8/upstream/v8/src/arm/macro-assembler-arm.h +0 -1071
  276. data/ext/v8/upstream/v8/src/arm/regexp-macro-assembler-arm.cc +0 -1287
  277. data/ext/v8/upstream/v8/src/arm/regexp-macro-assembler-arm.h +0 -253
  278. data/ext/v8/upstream/v8/src/arm/register-allocator-arm-inl.h +0 -100
  279. data/ext/v8/upstream/v8/src/arm/register-allocator-arm.cc +0 -63
  280. data/ext/v8/upstream/v8/src/arm/register-allocator-arm.h +0 -44
  281. data/ext/v8/upstream/v8/src/arm/simulator-arm.cc +0 -3288
  282. data/ext/v8/upstream/v8/src/arm/simulator-arm.h +0 -413
  283. data/ext/v8/upstream/v8/src/arm/stub-cache-arm.cc +0 -4034
  284. data/ext/v8/upstream/v8/src/arm/virtual-frame-arm-inl.h +0 -59
  285. data/ext/v8/upstream/v8/src/arm/virtual-frame-arm.cc +0 -843
  286. data/ext/v8/upstream/v8/src/arm/virtual-frame-arm.h +0 -523
  287. data/ext/v8/upstream/v8/src/array.js +0 -1249
  288. data/ext/v8/upstream/v8/src/assembler.cc +0 -1067
  289. data/ext/v8/upstream/v8/src/assembler.h +0 -823
  290. data/ext/v8/upstream/v8/src/ast-inl.h +0 -112
  291. data/ext/v8/upstream/v8/src/ast.cc +0 -1078
  292. data/ext/v8/upstream/v8/src/ast.h +0 -2234
  293. data/ext/v8/upstream/v8/src/atomicops.h +0 -167
  294. data/ext/v8/upstream/v8/src/atomicops_internals_arm_gcc.h +0 -145
  295. data/ext/v8/upstream/v8/src/atomicops_internals_mips_gcc.h +0 -169
  296. data/ext/v8/upstream/v8/src/atomicops_internals_x86_gcc.cc +0 -126
  297. data/ext/v8/upstream/v8/src/atomicops_internals_x86_gcc.h +0 -287
  298. data/ext/v8/upstream/v8/src/atomicops_internals_x86_macosx.h +0 -301
  299. data/ext/v8/upstream/v8/src/atomicops_internals_x86_msvc.h +0 -203
  300. data/ext/v8/upstream/v8/src/bignum-dtoa.cc +0 -655
  301. data/ext/v8/upstream/v8/src/bignum-dtoa.h +0 -81
  302. data/ext/v8/upstream/v8/src/bignum.cc +0 -768
  303. data/ext/v8/upstream/v8/src/bignum.h +0 -140
  304. data/ext/v8/upstream/v8/src/bootstrapper.cc +0 -2138
  305. data/ext/v8/upstream/v8/src/bootstrapper.h +0 -185
  306. data/ext/v8/upstream/v8/src/builtins.cc +0 -1708
  307. data/ext/v8/upstream/v8/src/builtins.h +0 -368
  308. data/ext/v8/upstream/v8/src/bytecodes-irregexp.h +0 -105
  309. data/ext/v8/upstream/v8/src/cached-powers.cc +0 -177
  310. data/ext/v8/upstream/v8/src/cached-powers.h +0 -65
  311. data/ext/v8/upstream/v8/src/char-predicates-inl.h +0 -94
  312. data/ext/v8/upstream/v8/src/char-predicates.h +0 -65
  313. data/ext/v8/upstream/v8/src/checks.cc +0 -110
  314. data/ext/v8/upstream/v8/src/checks.h +0 -296
  315. data/ext/v8/upstream/v8/src/circular-queue-inl.h +0 -53
  316. data/ext/v8/upstream/v8/src/circular-queue.cc +0 -122
  317. data/ext/v8/upstream/v8/src/circular-queue.h +0 -103
  318. data/ext/v8/upstream/v8/src/code-stubs.cc +0 -240
  319. data/ext/v8/upstream/v8/src/code-stubs.h +0 -971
  320. data/ext/v8/upstream/v8/src/code.h +0 -68
  321. data/ext/v8/upstream/v8/src/codegen-inl.h +0 -68
  322. data/ext/v8/upstream/v8/src/codegen.cc +0 -505
  323. data/ext/v8/upstream/v8/src/codegen.h +0 -245
  324. data/ext/v8/upstream/v8/src/compilation-cache.cc +0 -540
  325. data/ext/v8/upstream/v8/src/compilation-cache.h +0 -287
  326. data/ext/v8/upstream/v8/src/compiler.cc +0 -792
  327. data/ext/v8/upstream/v8/src/compiler.h +0 -307
  328. data/ext/v8/upstream/v8/src/contexts.cc +0 -327
  329. data/ext/v8/upstream/v8/src/contexts.h +0 -382
  330. data/ext/v8/upstream/v8/src/conversions-inl.h +0 -110
  331. data/ext/v8/upstream/v8/src/conversions.cc +0 -1125
  332. data/ext/v8/upstream/v8/src/conversions.h +0 -122
  333. data/ext/v8/upstream/v8/src/counters.cc +0 -93
  334. data/ext/v8/upstream/v8/src/counters.h +0 -254
  335. data/ext/v8/upstream/v8/src/cpu-profiler-inl.h +0 -101
  336. data/ext/v8/upstream/v8/src/cpu-profiler.cc +0 -606
  337. data/ext/v8/upstream/v8/src/cpu-profiler.h +0 -305
  338. data/ext/v8/upstream/v8/src/cpu.h +0 -67
  339. data/ext/v8/upstream/v8/src/d8-debug.cc +0 -367
  340. data/ext/v8/upstream/v8/src/d8-debug.h +0 -158
  341. data/ext/v8/upstream/v8/src/d8-posix.cc +0 -695
  342. data/ext/v8/upstream/v8/src/d8-readline.cc +0 -128
  343. data/ext/v8/upstream/v8/src/d8-windows.cc +0 -42
  344. data/ext/v8/upstream/v8/src/d8.cc +0 -796
  345. data/ext/v8/upstream/v8/src/d8.gyp +0 -88
  346. data/ext/v8/upstream/v8/src/d8.h +0 -231
  347. data/ext/v8/upstream/v8/src/d8.js +0 -2798
  348. data/ext/v8/upstream/v8/src/data-flow.cc +0 -545
  349. data/ext/v8/upstream/v8/src/data-flow.h +0 -379
  350. data/ext/v8/upstream/v8/src/date.js +0 -1103
  351. data/ext/v8/upstream/v8/src/dateparser-inl.h +0 -125
  352. data/ext/v8/upstream/v8/src/dateparser.cc +0 -178
  353. data/ext/v8/upstream/v8/src/dateparser.h +0 -265
  354. data/ext/v8/upstream/v8/src/debug-agent.cc +0 -447
  355. data/ext/v8/upstream/v8/src/debug-agent.h +0 -129
  356. data/ext/v8/upstream/v8/src/debug-debugger.js +0 -2569
  357. data/ext/v8/upstream/v8/src/debug.cc +0 -3188
  358. data/ext/v8/upstream/v8/src/debug.h +0 -1055
  359. data/ext/v8/upstream/v8/src/deoptimizer.cc +0 -1296
  360. data/ext/v8/upstream/v8/src/deoptimizer.h +0 -629
  361. data/ext/v8/upstream/v8/src/disasm.h +0 -80
  362. data/ext/v8/upstream/v8/src/disassembler.cc +0 -339
  363. data/ext/v8/upstream/v8/src/disassembler.h +0 -56
  364. data/ext/v8/upstream/v8/src/diy-fp.cc +0 -58
  365. data/ext/v8/upstream/v8/src/diy-fp.h +0 -117
  366. data/ext/v8/upstream/v8/src/double.h +0 -238
  367. data/ext/v8/upstream/v8/src/dtoa.cc +0 -103
  368. data/ext/v8/upstream/v8/src/dtoa.h +0 -85
  369. data/ext/v8/upstream/v8/src/execution.cc +0 -791
  370. data/ext/v8/upstream/v8/src/execution.h +0 -291
  371. data/ext/v8/upstream/v8/src/extensions/experimental/break-iterator.cc +0 -250
  372. data/ext/v8/upstream/v8/src/extensions/experimental/break-iterator.h +0 -89
  373. data/ext/v8/upstream/v8/src/extensions/experimental/experimental.gyp +0 -55
  374. data/ext/v8/upstream/v8/src/extensions/experimental/i18n-extension.cc +0 -284
  375. data/ext/v8/upstream/v8/src/extensions/experimental/i18n-extension.h +0 -64
  376. data/ext/v8/upstream/v8/src/extensions/externalize-string-extension.cc +0 -141
  377. data/ext/v8/upstream/v8/src/extensions/externalize-string-extension.h +0 -50
  378. data/ext/v8/upstream/v8/src/extensions/gc-extension.cc +0 -58
  379. data/ext/v8/upstream/v8/src/extensions/gc-extension.h +0 -49
  380. data/ext/v8/upstream/v8/src/factory.cc +0 -1194
  381. data/ext/v8/upstream/v8/src/factory.h +0 -436
  382. data/ext/v8/upstream/v8/src/fast-dtoa.cc +0 -736
  383. data/ext/v8/upstream/v8/src/fast-dtoa.h +0 -83
  384. data/ext/v8/upstream/v8/src/fixed-dtoa.cc +0 -405
  385. data/ext/v8/upstream/v8/src/fixed-dtoa.h +0 -55
  386. data/ext/v8/upstream/v8/src/flag-definitions.h +0 -556
  387. data/ext/v8/upstream/v8/src/flags.cc +0 -551
  388. data/ext/v8/upstream/v8/src/flags.h +0 -79
  389. data/ext/v8/upstream/v8/src/frame-element.cc +0 -37
  390. data/ext/v8/upstream/v8/src/frame-element.h +0 -269
  391. data/ext/v8/upstream/v8/src/frames-inl.h +0 -236
  392. data/ext/v8/upstream/v8/src/frames.cc +0 -1273
  393. data/ext/v8/upstream/v8/src/frames.h +0 -854
  394. data/ext/v8/upstream/v8/src/full-codegen.cc +0 -1385
  395. data/ext/v8/upstream/v8/src/full-codegen.h +0 -753
  396. data/ext/v8/upstream/v8/src/func-name-inferrer.cc +0 -91
  397. data/ext/v8/upstream/v8/src/func-name-inferrer.h +0 -111
  398. data/ext/v8/upstream/v8/src/gdb-jit.cc +0 -1548
  399. data/ext/v8/upstream/v8/src/gdb-jit.h +0 -138
  400. data/ext/v8/upstream/v8/src/global-handles.cc +0 -596
  401. data/ext/v8/upstream/v8/src/global-handles.h +0 -239
  402. data/ext/v8/upstream/v8/src/globals.h +0 -325
  403. data/ext/v8/upstream/v8/src/handles-inl.h +0 -177
  404. data/ext/v8/upstream/v8/src/handles.cc +0 -965
  405. data/ext/v8/upstream/v8/src/handles.h +0 -372
  406. data/ext/v8/upstream/v8/src/hashmap.cc +0 -230
  407. data/ext/v8/upstream/v8/src/hashmap.h +0 -121
  408. data/ext/v8/upstream/v8/src/heap-inl.h +0 -703
  409. data/ext/v8/upstream/v8/src/heap-profiler.cc +0 -1173
  410. data/ext/v8/upstream/v8/src/heap-profiler.h +0 -396
  411. data/ext/v8/upstream/v8/src/heap.cc +0 -5856
  412. data/ext/v8/upstream/v8/src/heap.h +0 -2264
  413. data/ext/v8/upstream/v8/src/hydrogen-instructions.cc +0 -1639
  414. data/ext/v8/upstream/v8/src/hydrogen-instructions.h +0 -3657
  415. data/ext/v8/upstream/v8/src/hydrogen.cc +0 -6011
  416. data/ext/v8/upstream/v8/src/hydrogen.h +0 -1137
  417. data/ext/v8/upstream/v8/src/ia32/assembler-ia32-inl.h +0 -430
  418. data/ext/v8/upstream/v8/src/ia32/assembler-ia32.cc +0 -2846
  419. data/ext/v8/upstream/v8/src/ia32/assembler-ia32.h +0 -1159
  420. data/ext/v8/upstream/v8/src/ia32/builtins-ia32.cc +0 -1596
  421. data/ext/v8/upstream/v8/src/ia32/code-stubs-ia32.cc +0 -6549
  422. data/ext/v8/upstream/v8/src/ia32/code-stubs-ia32.h +0 -495
  423. data/ext/v8/upstream/v8/src/ia32/codegen-ia32-inl.h +0 -46
  424. data/ext/v8/upstream/v8/src/ia32/codegen-ia32.cc +0 -10385
  425. data/ext/v8/upstream/v8/src/ia32/codegen-ia32.h +0 -801
  426. data/ext/v8/upstream/v8/src/ia32/cpu-ia32.cc +0 -88
  427. data/ext/v8/upstream/v8/src/ia32/debug-ia32.cc +0 -312
  428. data/ext/v8/upstream/v8/src/ia32/deoptimizer-ia32.cc +0 -774
  429. data/ext/v8/upstream/v8/src/ia32/disasm-ia32.cc +0 -1620
  430. data/ext/v8/upstream/v8/src/ia32/frames-ia32.cc +0 -45
  431. data/ext/v8/upstream/v8/src/ia32/frames-ia32.h +0 -140
  432. data/ext/v8/upstream/v8/src/ia32/full-codegen-ia32.cc +0 -4357
  433. data/ext/v8/upstream/v8/src/ia32/ic-ia32.cc +0 -1779
  434. data/ext/v8/upstream/v8/src/ia32/jump-target-ia32.cc +0 -437
  435. data/ext/v8/upstream/v8/src/ia32/lithium-codegen-ia32.cc +0 -4158
  436. data/ext/v8/upstream/v8/src/ia32/lithium-codegen-ia32.h +0 -318
  437. data/ext/v8/upstream/v8/src/ia32/lithium-gap-resolver-ia32.cc +0 -466
  438. data/ext/v8/upstream/v8/src/ia32/lithium-gap-resolver-ia32.h +0 -110
  439. data/ext/v8/upstream/v8/src/ia32/lithium-ia32.cc +0 -2181
  440. data/ext/v8/upstream/v8/src/ia32/lithium-ia32.h +0 -2235
  441. data/ext/v8/upstream/v8/src/ia32/macro-assembler-ia32.cc +0 -2056
  442. data/ext/v8/upstream/v8/src/ia32/macro-assembler-ia32.h +0 -807
  443. data/ext/v8/upstream/v8/src/ia32/regexp-macro-assembler-ia32.cc +0 -1264
  444. data/ext/v8/upstream/v8/src/ia32/regexp-macro-assembler-ia32.h +0 -216
  445. data/ext/v8/upstream/v8/src/ia32/register-allocator-ia32-inl.h +0 -82
  446. data/ext/v8/upstream/v8/src/ia32/register-allocator-ia32.cc +0 -157
  447. data/ext/v8/upstream/v8/src/ia32/register-allocator-ia32.h +0 -43
  448. data/ext/v8/upstream/v8/src/ia32/simulator-ia32.cc +0 -30
  449. data/ext/v8/upstream/v8/src/ia32/simulator-ia32.h +0 -72
  450. data/ext/v8/upstream/v8/src/ia32/stub-cache-ia32.cc +0 -3711
  451. data/ext/v8/upstream/v8/src/ia32/virtual-frame-ia32.cc +0 -1366
  452. data/ext/v8/upstream/v8/src/ia32/virtual-frame-ia32.h +0 -650
  453. data/ext/v8/upstream/v8/src/ic-inl.h +0 -130
  454. data/ext/v8/upstream/v8/src/ic.cc +0 -2389
  455. data/ext/v8/upstream/v8/src/ic.h +0 -675
  456. data/ext/v8/upstream/v8/src/inspector.cc +0 -63
  457. data/ext/v8/upstream/v8/src/inspector.h +0 -62
  458. data/ext/v8/upstream/v8/src/interpreter-irregexp.cc +0 -659
  459. data/ext/v8/upstream/v8/src/interpreter-irregexp.h +0 -49
  460. data/ext/v8/upstream/v8/src/isolate.cc +0 -883
  461. data/ext/v8/upstream/v8/src/isolate.h +0 -1306
  462. data/ext/v8/upstream/v8/src/json.js +0 -342
  463. data/ext/v8/upstream/v8/src/jsregexp.cc +0 -5371
  464. data/ext/v8/upstream/v8/src/jsregexp.h +0 -1483
  465. data/ext/v8/upstream/v8/src/jump-target-heavy-inl.h +0 -51
  466. data/ext/v8/upstream/v8/src/jump-target-heavy.cc +0 -427
  467. data/ext/v8/upstream/v8/src/jump-target-heavy.h +0 -238
  468. data/ext/v8/upstream/v8/src/jump-target-inl.h +0 -48
  469. data/ext/v8/upstream/v8/src/jump-target-light-inl.h +0 -56
  470. data/ext/v8/upstream/v8/src/jump-target-light.cc +0 -111
  471. data/ext/v8/upstream/v8/src/jump-target-light.h +0 -193
  472. data/ext/v8/upstream/v8/src/jump-target.cc +0 -91
  473. data/ext/v8/upstream/v8/src/jump-target.h +0 -90
  474. data/ext/v8/upstream/v8/src/list-inl.h +0 -206
  475. data/ext/v8/upstream/v8/src/list.h +0 -164
  476. data/ext/v8/upstream/v8/src/lithium-allocator-inl.h +0 -142
  477. data/ext/v8/upstream/v8/src/lithium-allocator.cc +0 -2105
  478. data/ext/v8/upstream/v8/src/lithium-allocator.h +0 -630
  479. data/ext/v8/upstream/v8/src/lithium.cc +0 -169
  480. data/ext/v8/upstream/v8/src/lithium.h +0 -592
  481. data/ext/v8/upstream/v8/src/liveedit-debugger.js +0 -1082
  482. data/ext/v8/upstream/v8/src/liveedit.cc +0 -1693
  483. data/ext/v8/upstream/v8/src/liveedit.h +0 -179
  484. data/ext/v8/upstream/v8/src/liveobjectlist-inl.h +0 -126
  485. data/ext/v8/upstream/v8/src/liveobjectlist.cc +0 -2589
  486. data/ext/v8/upstream/v8/src/liveobjectlist.h +0 -322
  487. data/ext/v8/upstream/v8/src/log-inl.h +0 -59
  488. data/ext/v8/upstream/v8/src/log-utils.cc +0 -423
  489. data/ext/v8/upstream/v8/src/log-utils.h +0 -229
  490. data/ext/v8/upstream/v8/src/log.cc +0 -1666
  491. data/ext/v8/upstream/v8/src/log.h +0 -446
  492. data/ext/v8/upstream/v8/src/macro-assembler.h +0 -120
  493. data/ext/v8/upstream/v8/src/macros.py +0 -178
  494. data/ext/v8/upstream/v8/src/mark-compact.cc +0 -3092
  495. data/ext/v8/upstream/v8/src/mark-compact.h +0 -506
  496. data/ext/v8/upstream/v8/src/math.js +0 -264
  497. data/ext/v8/upstream/v8/src/messages.cc +0 -166
  498. data/ext/v8/upstream/v8/src/messages.h +0 -114
  499. data/ext/v8/upstream/v8/src/messages.js +0 -1090
  500. data/ext/v8/upstream/v8/src/mips/assembler-mips-inl.h +0 -335
  501. data/ext/v8/upstream/v8/src/mips/assembler-mips.cc +0 -2093
  502. data/ext/v8/upstream/v8/src/mips/assembler-mips.h +0 -1066
  503. data/ext/v8/upstream/v8/src/mips/builtins-mips.cc +0 -148
  504. data/ext/v8/upstream/v8/src/mips/code-stubs-mips.cc +0 -752
  505. data/ext/v8/upstream/v8/src/mips/code-stubs-mips.h +0 -511
  506. data/ext/v8/upstream/v8/src/mips/codegen-mips-inl.h +0 -64
  507. data/ext/v8/upstream/v8/src/mips/codegen-mips.cc +0 -1213
  508. data/ext/v8/upstream/v8/src/mips/codegen-mips.h +0 -633
  509. data/ext/v8/upstream/v8/src/mips/constants-mips.cc +0 -352
  510. data/ext/v8/upstream/v8/src/mips/constants-mips.h +0 -723
  511. data/ext/v8/upstream/v8/src/mips/cpu-mips.cc +0 -90
  512. data/ext/v8/upstream/v8/src/mips/debug-mips.cc +0 -155
  513. data/ext/v8/upstream/v8/src/mips/deoptimizer-mips.cc +0 -91
  514. data/ext/v8/upstream/v8/src/mips/disasm-mips.cc +0 -1023
  515. data/ext/v8/upstream/v8/src/mips/frames-mips.cc +0 -48
  516. data/ext/v8/upstream/v8/src/mips/frames-mips.h +0 -179
  517. data/ext/v8/upstream/v8/src/mips/full-codegen-mips.cc +0 -727
  518. data/ext/v8/upstream/v8/src/mips/ic-mips.cc +0 -244
  519. data/ext/v8/upstream/v8/src/mips/jump-target-mips.cc +0 -80
  520. data/ext/v8/upstream/v8/src/mips/lithium-codegen-mips.h +0 -65
  521. data/ext/v8/upstream/v8/src/mips/lithium-mips.h +0 -304
  522. data/ext/v8/upstream/v8/src/mips/macro-assembler-mips.cc +0 -3327
  523. data/ext/v8/upstream/v8/src/mips/macro-assembler-mips.h +0 -1058
  524. data/ext/v8/upstream/v8/src/mips/regexp-macro-assembler-mips.cc +0 -478
  525. data/ext/v8/upstream/v8/src/mips/regexp-macro-assembler-mips.h +0 -250
  526. data/ext/v8/upstream/v8/src/mips/register-allocator-mips-inl.h +0 -134
  527. data/ext/v8/upstream/v8/src/mips/register-allocator-mips.cc +0 -63
  528. data/ext/v8/upstream/v8/src/mips/register-allocator-mips.h +0 -47
  529. data/ext/v8/upstream/v8/src/mips/simulator-mips.cc +0 -2438
  530. data/ext/v8/upstream/v8/src/mips/simulator-mips.h +0 -394
  531. data/ext/v8/upstream/v8/src/mips/stub-cache-mips.cc +0 -601
  532. data/ext/v8/upstream/v8/src/mips/virtual-frame-mips-inl.h +0 -58
  533. data/ext/v8/upstream/v8/src/mips/virtual-frame-mips.cc +0 -307
  534. data/ext/v8/upstream/v8/src/mips/virtual-frame-mips.h +0 -530
  535. data/ext/v8/upstream/v8/src/mirror-debugger.js +0 -2381
  536. data/ext/v8/upstream/v8/src/mksnapshot.cc +0 -256
  537. data/ext/v8/upstream/v8/src/natives.h +0 -63
  538. data/ext/v8/upstream/v8/src/objects-debug.cc +0 -722
  539. data/ext/v8/upstream/v8/src/objects-inl.h +0 -4166
  540. data/ext/v8/upstream/v8/src/objects-printer.cc +0 -801
  541. data/ext/v8/upstream/v8/src/objects-visiting.cc +0 -142
  542. data/ext/v8/upstream/v8/src/objects-visiting.h +0 -422
  543. data/ext/v8/upstream/v8/src/objects.cc +0 -10296
  544. data/ext/v8/upstream/v8/src/objects.h +0 -6662
  545. data/ext/v8/upstream/v8/src/parser.cc +0 -5168
  546. data/ext/v8/upstream/v8/src/parser.h +0 -823
  547. data/ext/v8/upstream/v8/src/platform-cygwin.cc +0 -811
  548. data/ext/v8/upstream/v8/src/platform-freebsd.cc +0 -854
  549. data/ext/v8/upstream/v8/src/platform-linux.cc +0 -1120
  550. data/ext/v8/upstream/v8/src/platform-macos.cc +0 -865
  551. data/ext/v8/upstream/v8/src/platform-nullos.cc +0 -504
  552. data/ext/v8/upstream/v8/src/platform-openbsd.cc +0 -672
  553. data/ext/v8/upstream/v8/src/platform-posix.cc +0 -424
  554. data/ext/v8/upstream/v8/src/platform-solaris.cc +0 -796
  555. data/ext/v8/upstream/v8/src/platform-tls-mac.h +0 -62
  556. data/ext/v8/upstream/v8/src/platform-tls-win32.h +0 -62
  557. data/ext/v8/upstream/v8/src/platform-tls.h +0 -50
  558. data/ext/v8/upstream/v8/src/platform-win32.cc +0 -2072
  559. data/ext/v8/upstream/v8/src/platform.h +0 -693
  560. data/ext/v8/upstream/v8/src/preparse-data.cc +0 -185
  561. data/ext/v8/upstream/v8/src/preparse-data.h +0 -249
  562. data/ext/v8/upstream/v8/src/preparser-api.cc +0 -219
  563. data/ext/v8/upstream/v8/src/preparser.cc +0 -1205
  564. data/ext/v8/upstream/v8/src/preparser.h +0 -278
  565. data/ext/v8/upstream/v8/src/prettyprinter.cc +0 -1530
  566. data/ext/v8/upstream/v8/src/prettyprinter.h +0 -223
  567. data/ext/v8/upstream/v8/src/profile-generator-inl.h +0 -128
  568. data/ext/v8/upstream/v8/src/profile-generator.cc +0 -3095
  569. data/ext/v8/upstream/v8/src/profile-generator.h +0 -1125
  570. data/ext/v8/upstream/v8/src/property.cc +0 -102
  571. data/ext/v8/upstream/v8/src/property.h +0 -348
  572. data/ext/v8/upstream/v8/src/regexp-macro-assembler-irregexp-inl.h +0 -78
  573. data/ext/v8/upstream/v8/src/regexp-macro-assembler-irregexp.cc +0 -470
  574. data/ext/v8/upstream/v8/src/regexp-macro-assembler-irregexp.h +0 -142
  575. data/ext/v8/upstream/v8/src/regexp-macro-assembler-tracer.cc +0 -373
  576. data/ext/v8/upstream/v8/src/regexp-macro-assembler-tracer.h +0 -104
  577. data/ext/v8/upstream/v8/src/regexp-macro-assembler.cc +0 -266
  578. data/ext/v8/upstream/v8/src/regexp-macro-assembler.h +0 -236
  579. data/ext/v8/upstream/v8/src/regexp-stack.cc +0 -111
  580. data/ext/v8/upstream/v8/src/regexp-stack.h +0 -147
  581. data/ext/v8/upstream/v8/src/regexp.js +0 -483
  582. data/ext/v8/upstream/v8/src/register-allocator-inl.h +0 -141
  583. data/ext/v8/upstream/v8/src/register-allocator.cc +0 -98
  584. data/ext/v8/upstream/v8/src/register-allocator.h +0 -310
  585. data/ext/v8/upstream/v8/src/rewriter.cc +0 -1024
  586. data/ext/v8/upstream/v8/src/rewriter.h +0 -59
  587. data/ext/v8/upstream/v8/src/runtime-profiler.cc +0 -478
  588. data/ext/v8/upstream/v8/src/runtime-profiler.h +0 -192
  589. data/ext/v8/upstream/v8/src/runtime.cc +0 -11949
  590. data/ext/v8/upstream/v8/src/runtime.h +0 -643
  591. data/ext/v8/upstream/v8/src/runtime.js +0 -643
  592. data/ext/v8/upstream/v8/src/safepoint-table.cc +0 -256
  593. data/ext/v8/upstream/v8/src/safepoint-table.h +0 -269
  594. data/ext/v8/upstream/v8/src/scanner-base.cc +0 -964
  595. data/ext/v8/upstream/v8/src/scanner-base.h +0 -664
  596. data/ext/v8/upstream/v8/src/scanner.cc +0 -584
  597. data/ext/v8/upstream/v8/src/scanner.h +0 -196
  598. data/ext/v8/upstream/v8/src/scopeinfo.cc +0 -631
  599. data/ext/v8/upstream/v8/src/scopeinfo.h +0 -249
  600. data/ext/v8/upstream/v8/src/scopes.cc +0 -1093
  601. data/ext/v8/upstream/v8/src/scopes.h +0 -508
  602. data/ext/v8/upstream/v8/src/serialize.cc +0 -1574
  603. data/ext/v8/upstream/v8/src/serialize.h +0 -589
  604. data/ext/v8/upstream/v8/src/shell.h +0 -55
  605. data/ext/v8/upstream/v8/src/simulator.h +0 -43
  606. data/ext/v8/upstream/v8/src/small-pointer-list.h +0 -163
  607. data/ext/v8/upstream/v8/src/smart-pointer.h +0 -109
  608. data/ext/v8/upstream/v8/src/snapshot-common.cc +0 -82
  609. data/ext/v8/upstream/v8/src/snapshot-empty.cc +0 -50
  610. data/ext/v8/upstream/v8/src/snapshot.h +0 -73
  611. data/ext/v8/upstream/v8/src/spaces-inl.h +0 -529
  612. data/ext/v8/upstream/v8/src/spaces.cc +0 -3147
  613. data/ext/v8/upstream/v8/src/spaces.h +0 -2368
  614. data/ext/v8/upstream/v8/src/splay-tree-inl.h +0 -310
  615. data/ext/v8/upstream/v8/src/splay-tree.h +0 -203
  616. data/ext/v8/upstream/v8/src/string-search.cc +0 -41
  617. data/ext/v8/upstream/v8/src/string-search.h +0 -568
  618. data/ext/v8/upstream/v8/src/string-stream.cc +0 -592
  619. data/ext/v8/upstream/v8/src/string-stream.h +0 -191
  620. data/ext/v8/upstream/v8/src/string.js +0 -915
  621. data/ext/v8/upstream/v8/src/strtod.cc +0 -440
  622. data/ext/v8/upstream/v8/src/strtod.h +0 -40
  623. data/ext/v8/upstream/v8/src/stub-cache.cc +0 -1940
  624. data/ext/v8/upstream/v8/src/stub-cache.h +0 -866
  625. data/ext/v8/upstream/v8/src/third_party/valgrind/valgrind.h +0 -3925
  626. data/ext/v8/upstream/v8/src/token.cc +0 -63
  627. data/ext/v8/upstream/v8/src/token.h +0 -288
  628. data/ext/v8/upstream/v8/src/top.cc +0 -983
  629. data/ext/v8/upstream/v8/src/type-info.cc +0 -472
  630. data/ext/v8/upstream/v8/src/type-info.h +0 -290
  631. data/ext/v8/upstream/v8/src/unbound-queue-inl.h +0 -95
  632. data/ext/v8/upstream/v8/src/unbound-queue.h +0 -67
  633. data/ext/v8/upstream/v8/src/unicode-inl.h +0 -238
  634. data/ext/v8/upstream/v8/src/unicode.cc +0 -1624
  635. data/ext/v8/upstream/v8/src/unicode.h +0 -280
  636. data/ext/v8/upstream/v8/src/uri.js +0 -402
  637. data/ext/v8/upstream/v8/src/utils.cc +0 -371
  638. data/ext/v8/upstream/v8/src/utils.h +0 -796
  639. data/ext/v8/upstream/v8/src/v8-counters.cc +0 -62
  640. data/ext/v8/upstream/v8/src/v8-counters.h +0 -311
  641. data/ext/v8/upstream/v8/src/v8.cc +0 -215
  642. data/ext/v8/upstream/v8/src/v8.h +0 -130
  643. data/ext/v8/upstream/v8/src/v8checks.h +0 -64
  644. data/ext/v8/upstream/v8/src/v8dll-main.cc +0 -39
  645. data/ext/v8/upstream/v8/src/v8globals.h +0 -486
  646. data/ext/v8/upstream/v8/src/v8memory.h +0 -82
  647. data/ext/v8/upstream/v8/src/v8natives.js +0 -1293
  648. data/ext/v8/upstream/v8/src/v8preparserdll-main.cc +0 -39
  649. data/ext/v8/upstream/v8/src/v8threads.cc +0 -453
  650. data/ext/v8/upstream/v8/src/v8threads.h +0 -164
  651. data/ext/v8/upstream/v8/src/v8utils.h +0 -317
  652. data/ext/v8/upstream/v8/src/variables.cc +0 -132
  653. data/ext/v8/upstream/v8/src/variables.h +0 -212
  654. data/ext/v8/upstream/v8/src/version.cc +0 -116
  655. data/ext/v8/upstream/v8/src/version.h +0 -68
  656. data/ext/v8/upstream/v8/src/virtual-frame-heavy-inl.h +0 -190
  657. data/ext/v8/upstream/v8/src/virtual-frame-heavy.cc +0 -312
  658. data/ext/v8/upstream/v8/src/virtual-frame-inl.h +0 -39
  659. data/ext/v8/upstream/v8/src/virtual-frame-light-inl.h +0 -171
  660. data/ext/v8/upstream/v8/src/virtual-frame-light.cc +0 -52
  661. data/ext/v8/upstream/v8/src/virtual-frame.cc +0 -49
  662. data/ext/v8/upstream/v8/src/virtual-frame.h +0 -59
  663. data/ext/v8/upstream/v8/src/vm-state-inl.h +0 -138
  664. data/ext/v8/upstream/v8/src/vm-state.h +0 -70
  665. data/ext/v8/upstream/v8/src/win32-headers.h +0 -96
  666. data/ext/v8/upstream/v8/src/x64/assembler-x64-inl.h +0 -456
  667. data/ext/v8/upstream/v8/src/x64/assembler-x64.cc +0 -2954
  668. data/ext/v8/upstream/v8/src/x64/assembler-x64.h +0 -1630
  669. data/ext/v8/upstream/v8/src/x64/builtins-x64.cc +0 -1493
  670. data/ext/v8/upstream/v8/src/x64/code-stubs-x64.cc +0 -5132
  671. data/ext/v8/upstream/v8/src/x64/code-stubs-x64.h +0 -477
  672. data/ext/v8/upstream/v8/src/x64/codegen-x64-inl.h +0 -46
  673. data/ext/v8/upstream/v8/src/x64/codegen-x64.cc +0 -8843
  674. data/ext/v8/upstream/v8/src/x64/codegen-x64.h +0 -753
  675. data/ext/v8/upstream/v8/src/x64/cpu-x64.cc +0 -88
  676. data/ext/v8/upstream/v8/src/x64/debug-x64.cc +0 -318
  677. data/ext/v8/upstream/v8/src/x64/deoptimizer-x64.cc +0 -815
  678. data/ext/v8/upstream/v8/src/x64/disasm-x64.cc +0 -1752
  679. data/ext/v8/upstream/v8/src/x64/frames-x64.cc +0 -45
  680. data/ext/v8/upstream/v8/src/x64/frames-x64.h +0 -130
  681. data/ext/v8/upstream/v8/src/x64/full-codegen-x64.cc +0 -4339
  682. data/ext/v8/upstream/v8/src/x64/ic-x64.cc +0 -1752
  683. data/ext/v8/upstream/v8/src/x64/jump-target-x64.cc +0 -437
  684. data/ext/v8/upstream/v8/src/x64/lithium-codegen-x64.cc +0 -3970
  685. data/ext/v8/upstream/v8/src/x64/lithium-codegen-x64.h +0 -318
  686. data/ext/v8/upstream/v8/src/x64/lithium-gap-resolver-x64.cc +0 -320
  687. data/ext/v8/upstream/v8/src/x64/lithium-gap-resolver-x64.h +0 -74
  688. data/ext/v8/upstream/v8/src/x64/lithium-x64.cc +0 -2115
  689. data/ext/v8/upstream/v8/src/x64/lithium-x64.h +0 -2161
  690. data/ext/v8/upstream/v8/src/x64/macro-assembler-x64.cc +0 -2911
  691. data/ext/v8/upstream/v8/src/x64/macro-assembler-x64.h +0 -1984
  692. data/ext/v8/upstream/v8/src/x64/regexp-macro-assembler-x64.cc +0 -1398
  693. data/ext/v8/upstream/v8/src/x64/regexp-macro-assembler-x64.h +0 -282
  694. data/ext/v8/upstream/v8/src/x64/register-allocator-x64-inl.h +0 -87
  695. data/ext/v8/upstream/v8/src/x64/register-allocator-x64.cc +0 -95
  696. data/ext/v8/upstream/v8/src/x64/register-allocator-x64.h +0 -43
  697. data/ext/v8/upstream/v8/src/x64/simulator-x64.cc +0 -27
  698. data/ext/v8/upstream/v8/src/x64/simulator-x64.h +0 -71
  699. data/ext/v8/upstream/v8/src/x64/stub-cache-x64.cc +0 -3460
  700. data/ext/v8/upstream/v8/src/x64/virtual-frame-x64.cc +0 -1296
  701. data/ext/v8/upstream/v8/src/x64/virtual-frame-x64.h +0 -597
  702. data/ext/v8/upstream/v8/src/zone-inl.h +0 -129
  703. data/ext/v8/upstream/v8/src/zone.cc +0 -196
  704. data/ext/v8/upstream/v8/src/zone.h +0 -236
  705. data/ext/v8/upstream/v8/tools/codemap.js +0 -265
  706. data/ext/v8/upstream/v8/tools/consarray.js +0 -93
  707. data/ext/v8/upstream/v8/tools/csvparser.js +0 -78
  708. data/ext/v8/upstream/v8/tools/disasm.py +0 -92
  709. data/ext/v8/upstream/v8/tools/freebsd-tick-processor +0 -10
  710. data/ext/v8/upstream/v8/tools/gc-nvp-trace-processor.py +0 -328
  711. data/ext/v8/upstream/v8/tools/generate-ten-powers.scm +0 -286
  712. data/ext/v8/upstream/v8/tools/grokdump.py +0 -840
  713. data/ext/v8/upstream/v8/tools/gyp/v8.gyp +0 -844
  714. data/ext/v8/upstream/v8/tools/js2c.py +0 -380
  715. data/ext/v8/upstream/v8/tools/jsmin.py +0 -280
  716. data/ext/v8/upstream/v8/tools/linux-tick-processor +0 -35
  717. data/ext/v8/upstream/v8/tools/ll_prof.py +0 -919
  718. data/ext/v8/upstream/v8/tools/logreader.js +0 -185
  719. data/ext/v8/upstream/v8/tools/mac-nm +0 -18
  720. data/ext/v8/upstream/v8/tools/mac-tick-processor +0 -6
  721. data/ext/v8/upstream/v8/tools/oom_dump/README +0 -31
  722. data/ext/v8/upstream/v8/tools/oom_dump/SConstruct +0 -42
  723. data/ext/v8/upstream/v8/tools/oom_dump/oom_dump.cc +0 -288
  724. data/ext/v8/upstream/v8/tools/presubmit.py +0 -305
  725. data/ext/v8/upstream/v8/tools/process-heap-prof.py +0 -120
  726. data/ext/v8/upstream/v8/tools/profile.js +0 -751
  727. data/ext/v8/upstream/v8/tools/profile_view.js +0 -219
  728. data/ext/v8/upstream/v8/tools/run-valgrind.py +0 -77
  729. data/ext/v8/upstream/v8/tools/splaytree.js +0 -316
  730. data/ext/v8/upstream/v8/tools/stats-viewer.py +0 -468
  731. data/ext/v8/upstream/v8/tools/test.py +0 -1490
  732. data/ext/v8/upstream/v8/tools/tickprocessor-driver.js +0 -59
  733. data/ext/v8/upstream/v8/tools/tickprocessor.js +0 -877
  734. data/ext/v8/upstream/v8/tools/utils.py +0 -96
  735. data/ext/v8/upstream/v8/tools/visual_studio/README.txt +0 -70
  736. data/ext/v8/upstream/v8/tools/visual_studio/arm.vsprops +0 -14
  737. data/ext/v8/upstream/v8/tools/visual_studio/common.vsprops +0 -34
  738. data/ext/v8/upstream/v8/tools/visual_studio/d8.vcproj +0 -193
  739. data/ext/v8/upstream/v8/tools/visual_studio/d8_arm.vcproj +0 -193
  740. data/ext/v8/upstream/v8/tools/visual_studio/d8_x64.vcproj +0 -209
  741. data/ext/v8/upstream/v8/tools/visual_studio/d8js2c.cmd +0 -6
  742. data/ext/v8/upstream/v8/tools/visual_studio/debug.vsprops +0 -17
  743. data/ext/v8/upstream/v8/tools/visual_studio/ia32.vsprops +0 -17
  744. data/ext/v8/upstream/v8/tools/visual_studio/js2c.cmd +0 -6
  745. data/ext/v8/upstream/v8/tools/visual_studio/release.vsprops +0 -24
  746. data/ext/v8/upstream/v8/tools/visual_studio/v8.sln +0 -101
  747. data/ext/v8/upstream/v8/tools/visual_studio/v8.vcproj +0 -227
  748. data/ext/v8/upstream/v8/tools/visual_studio/v8_arm.sln +0 -74
  749. data/ext/v8/upstream/v8/tools/visual_studio/v8_arm.vcproj +0 -227
  750. data/ext/v8/upstream/v8/tools/visual_studio/v8_base.vcproj +0 -1308
  751. data/ext/v8/upstream/v8/tools/visual_studio/v8_base_arm.vcproj +0 -1238
  752. data/ext/v8/upstream/v8/tools/visual_studio/v8_base_x64.vcproj +0 -1300
  753. data/ext/v8/upstream/v8/tools/visual_studio/v8_cctest.vcproj +0 -265
  754. data/ext/v8/upstream/v8/tools/visual_studio/v8_cctest_arm.vcproj +0 -249
  755. data/ext/v8/upstream/v8/tools/visual_studio/v8_cctest_x64.vcproj +0 -257
  756. data/ext/v8/upstream/v8/tools/visual_studio/v8_mksnapshot.vcproj +0 -145
  757. data/ext/v8/upstream/v8/tools/visual_studio/v8_mksnapshot_x64.vcproj +0 -145
  758. data/ext/v8/upstream/v8/tools/visual_studio/v8_process_sample.vcproj +0 -145
  759. data/ext/v8/upstream/v8/tools/visual_studio/v8_process_sample_arm.vcproj +0 -145
  760. data/ext/v8/upstream/v8/tools/visual_studio/v8_process_sample_x64.vcproj +0 -161
  761. data/ext/v8/upstream/v8/tools/visual_studio/v8_shell_sample.vcproj +0 -147
  762. data/ext/v8/upstream/v8/tools/visual_studio/v8_shell_sample_arm.vcproj +0 -147
  763. data/ext/v8/upstream/v8/tools/visual_studio/v8_shell_sample_x64.vcproj +0 -163
  764. data/ext/v8/upstream/v8/tools/visual_studio/v8_snapshot.vcproj +0 -142
  765. data/ext/v8/upstream/v8/tools/visual_studio/v8_snapshot_cc.vcproj +0 -92
  766. data/ext/v8/upstream/v8/tools/visual_studio/v8_snapshot_cc_x64.vcproj +0 -92
  767. data/ext/v8/upstream/v8/tools/visual_studio/v8_snapshot_x64.vcproj +0 -142
  768. data/ext/v8/upstream/v8/tools/visual_studio/v8_x64.sln +0 -101
  769. data/ext/v8/upstream/v8/tools/visual_studio/v8_x64.vcproj +0 -227
  770. data/ext/v8/upstream/v8/tools/visual_studio/x64.vsprops +0 -18
  771. data/ext/v8/upstream/v8/tools/windows-tick-processor.bat +0 -30
@@ -1,3147 +0,0 @@
1
- // Copyright 2006-2010 the V8 project authors. All rights reserved.
2
- // Redistribution and use in source and binary forms, with or without
3
- // modification, are permitted provided that the following conditions are
4
- // met:
5
- //
6
- // * Redistributions of source code must retain the above copyright
7
- // notice, this list of conditions and the following disclaimer.
8
- // * Redistributions in binary form must reproduce the above
9
- // copyright notice, this list of conditions and the following
10
- // disclaimer in the documentation and/or other materials provided
11
- // with the distribution.
12
- // * Neither the name of Google Inc. nor the names of its
13
- // contributors may be used to endorse or promote products derived
14
- // from this software without specific prior written permission.
15
- //
16
- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
- // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
- // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
- // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
- // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
- // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
- // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
- // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
- // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
- // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
-
28
- #include "v8.h"
29
-
30
- #include "liveobjectlist-inl.h"
31
- #include "macro-assembler.h"
32
- #include "mark-compact.h"
33
- #include "platform.h"
34
-
35
- namespace v8 {
36
- namespace internal {
37
-
38
- // For contiguous spaces, top should be in the space (or at the end) and limit
39
- // should be the end of the space.
40
- #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41
- ASSERT((space).low() <= (info).top \
42
- && (info).top <= (space).high() \
43
- && (info).limit == (space).high())
44
-
45
- // ----------------------------------------------------------------------------
46
- // HeapObjectIterator
47
-
48
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49
- Initialize(space->bottom(), space->top(), NULL);
50
- }
51
-
52
-
53
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54
- HeapObjectCallback size_func) {
55
- Initialize(space->bottom(), space->top(), size_func);
56
- }
57
-
58
-
59
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
60
- Initialize(start, space->top(), NULL);
61
- }
62
-
63
-
64
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65
- HeapObjectCallback size_func) {
66
- Initialize(start, space->top(), size_func);
67
- }
68
-
69
-
70
- HeapObjectIterator::HeapObjectIterator(Page* page,
71
- HeapObjectCallback size_func) {
72
- Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
73
- }
74
-
75
-
76
- void HeapObjectIterator::Initialize(Address cur, Address end,
77
- HeapObjectCallback size_f) {
78
- cur_addr_ = cur;
79
- end_addr_ = end;
80
- end_page_ = Page::FromAllocationTop(end);
81
- size_func_ = size_f;
82
- Page* p = Page::FromAllocationTop(cur_addr_);
83
- cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
84
-
85
- #ifdef DEBUG
86
- Verify();
87
- #endif
88
- }
89
-
90
-
91
- HeapObject* HeapObjectIterator::FromNextPage() {
92
- if (cur_addr_ == end_addr_) return NULL;
93
-
94
- Page* cur_page = Page::FromAllocationTop(cur_addr_);
95
- cur_page = cur_page->next_page();
96
- ASSERT(cur_page->is_valid());
97
-
98
- cur_addr_ = cur_page->ObjectAreaStart();
99
- cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
100
-
101
- if (cur_addr_ == end_addr_) return NULL;
102
- ASSERT(cur_addr_ < cur_limit_);
103
- #ifdef DEBUG
104
- Verify();
105
- #endif
106
- return FromCurrentPage();
107
- }
108
-
109
-
110
- #ifdef DEBUG
111
- void HeapObjectIterator::Verify() {
112
- Page* p = Page::FromAllocationTop(cur_addr_);
113
- ASSERT(p == Page::FromAllocationTop(cur_limit_));
114
- ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
115
- }
116
- #endif
117
-
118
-
119
- // -----------------------------------------------------------------------------
120
- // PageIterator
121
-
122
- PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
123
- prev_page_ = NULL;
124
- switch (mode) {
125
- case PAGES_IN_USE:
126
- stop_page_ = space->AllocationTopPage();
127
- break;
128
- case PAGES_USED_BY_MC:
129
- stop_page_ = space->MCRelocationTopPage();
130
- break;
131
- case ALL_PAGES:
132
- #ifdef DEBUG
133
- // Verify that the cached last page in the space is actually the
134
- // last page.
135
- for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
136
- if (!p->next_page()->is_valid()) {
137
- ASSERT(space->last_page_ == p);
138
- }
139
- }
140
- #endif
141
- stop_page_ = space->last_page_;
142
- break;
143
- }
144
- }
145
-
146
-
147
- // -----------------------------------------------------------------------------
148
- // CodeRange
149
-
150
-
151
- CodeRange::CodeRange()
152
- : code_range_(NULL),
153
- free_list_(0),
154
- allocation_list_(0),
155
- current_allocation_block_index_(0),
156
- isolate_(NULL) {
157
- }
158
-
159
-
160
- bool CodeRange::Setup(const size_t requested) {
161
- ASSERT(code_range_ == NULL);
162
-
163
- code_range_ = new VirtualMemory(requested);
164
- CHECK(code_range_ != NULL);
165
- if (!code_range_->IsReserved()) {
166
- delete code_range_;
167
- code_range_ = NULL;
168
- return false;
169
- }
170
-
171
- // We are sure that we have mapped a block of requested addresses.
172
- ASSERT(code_range_->size() == requested);
173
- LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
174
- allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
175
- current_allocation_block_index_ = 0;
176
- return true;
177
- }
178
-
179
-
180
- int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181
- const FreeBlock* right) {
182
- // The entire point of CodeRange is that the difference between two
183
- // addresses in the range can be represented as a signed 32-bit int,
184
- // so the cast is semantically correct.
185
- return static_cast<int>(left->start - right->start);
186
- }
187
-
188
-
189
- void CodeRange::GetNextAllocationBlock(size_t requested) {
190
- for (current_allocation_block_index_++;
191
- current_allocation_block_index_ < allocation_list_.length();
192
- current_allocation_block_index_++) {
193
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
194
- return; // Found a large enough allocation block.
195
- }
196
- }
197
-
198
- // Sort and merge the free blocks on the free list and the allocation list.
199
- free_list_.AddAll(allocation_list_);
200
- allocation_list_.Clear();
201
- free_list_.Sort(&CompareFreeBlockAddress);
202
- for (int i = 0; i < free_list_.length();) {
203
- FreeBlock merged = free_list_[i];
204
- i++;
205
- // Add adjacent free blocks to the current merged block.
206
- while (i < free_list_.length() &&
207
- free_list_[i].start == merged.start + merged.size) {
208
- merged.size += free_list_[i].size;
209
- i++;
210
- }
211
- if (merged.size > 0) {
212
- allocation_list_.Add(merged);
213
- }
214
- }
215
- free_list_.Clear();
216
-
217
- for (current_allocation_block_index_ = 0;
218
- current_allocation_block_index_ < allocation_list_.length();
219
- current_allocation_block_index_++) {
220
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
221
- return; // Found a large enough allocation block.
222
- }
223
- }
224
-
225
- // Code range is full or too fragmented.
226
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
227
- }
228
-
229
-
230
-
231
- void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
232
- ASSERT(current_allocation_block_index_ < allocation_list_.length());
233
- if (requested > allocation_list_[current_allocation_block_index_].size) {
234
- // Find an allocation block large enough. This function call may
235
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
236
- GetNextAllocationBlock(requested);
237
- }
238
- // Commit the requested memory at the start of the current allocation block.
239
- *allocated = RoundUp(requested, Page::kPageSize);
240
- FreeBlock current = allocation_list_[current_allocation_block_index_];
241
- if (*allocated >= current.size - Page::kPageSize) {
242
- // Don't leave a small free block, useless for a large object or chunk.
243
- *allocated = current.size;
244
- }
245
- ASSERT(*allocated <= current.size);
246
- if (!code_range_->Commit(current.start, *allocated, true)) {
247
- *allocated = 0;
248
- return NULL;
249
- }
250
- allocation_list_[current_allocation_block_index_].start += *allocated;
251
- allocation_list_[current_allocation_block_index_].size -= *allocated;
252
- if (*allocated == current.size) {
253
- GetNextAllocationBlock(0); // This block is used up, get the next one.
254
- }
255
- return current.start;
256
- }
257
-
258
-
259
- void CodeRange::FreeRawMemory(void* address, size_t length) {
260
- free_list_.Add(FreeBlock(address, length));
261
- code_range_->Uncommit(address, length);
262
- }
263
-
264
-
265
- void CodeRange::TearDown() {
266
- delete code_range_; // Frees all memory in the virtual memory range.
267
- code_range_ = NULL;
268
- free_list_.Free();
269
- allocation_list_.Free();
270
- }
271
-
272
-
273
- // -----------------------------------------------------------------------------
274
- // MemoryAllocator
275
- //
276
-
277
- // 270 is an estimate based on the static default heap size of a pair of 256K
278
- // semispaces and a 64M old generation.
279
- const int kEstimatedNumberOfChunks = 270;
280
-
281
-
282
- MemoryAllocator::MemoryAllocator()
283
- : capacity_(0),
284
- capacity_executable_(0),
285
- size_(0),
286
- size_executable_(0),
287
- initial_chunk_(NULL),
288
- chunks_(kEstimatedNumberOfChunks),
289
- free_chunk_ids_(kEstimatedNumberOfChunks),
290
- max_nof_chunks_(0),
291
- top_(0),
292
- isolate_(NULL) {
293
- }
294
-
295
-
296
- void MemoryAllocator::Push(int free_chunk_id) {
297
- ASSERT(max_nof_chunks_ > 0);
298
- ASSERT(top_ < max_nof_chunks_);
299
- free_chunk_ids_[top_++] = free_chunk_id;
300
- }
301
-
302
-
303
- int MemoryAllocator::Pop() {
304
- ASSERT(top_ > 0);
305
- return free_chunk_ids_[--top_];
306
- }
307
-
308
-
309
- bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
310
- capacity_ = RoundUp(capacity, Page::kPageSize);
311
- capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312
- ASSERT_GE(capacity_, capacity_executable_);
313
-
314
- // Over-estimate the size of chunks_ array. It assumes the expansion of old
315
- // space is always in the unit of a chunk (kChunkSize) except the last
316
- // expansion.
317
- //
318
- // Due to alignment, allocated space might be one page less than required
319
- // number (kPagesPerChunk) of pages for old spaces.
320
- //
321
- // Reserve two chunk ids for semispaces, one for map space, one for old
322
- // space, and one for code space.
323
- max_nof_chunks_ =
324
- static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
325
- if (max_nof_chunks_ > kMaxNofChunks) return false;
326
-
327
- size_ = 0;
328
- size_executable_ = 0;
329
- ChunkInfo info; // uninitialized element.
330
- for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
331
- chunks_.Add(info);
332
- free_chunk_ids_.Add(i);
333
- }
334
- top_ = max_nof_chunks_;
335
- return true;
336
- }
337
-
338
-
339
- void MemoryAllocator::TearDown() {
340
- for (int i = 0; i < max_nof_chunks_; i++) {
341
- if (chunks_[i].address() != NULL) DeleteChunk(i);
342
- }
343
- chunks_.Clear();
344
- free_chunk_ids_.Clear();
345
-
346
- if (initial_chunk_ != NULL) {
347
- LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
348
- delete initial_chunk_;
349
- initial_chunk_ = NULL;
350
- }
351
-
352
- ASSERT(top_ == max_nof_chunks_); // all chunks are free
353
- top_ = 0;
354
- capacity_ = 0;
355
- capacity_executable_ = 0;
356
- size_ = 0;
357
- max_nof_chunks_ = 0;
358
- }
359
-
360
-
361
- void* MemoryAllocator::AllocateRawMemory(const size_t requested,
362
- size_t* allocated,
363
- Executability executable) {
364
- if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
365
- return NULL;
366
- }
367
-
368
- void* mem;
369
- if (executable == EXECUTABLE) {
370
- // Check executable memory limit.
371
- if (size_executable_ + requested >
372
- static_cast<size_t>(capacity_executable_)) {
373
- LOG(isolate_,
374
- StringEvent("MemoryAllocator::AllocateRawMemory",
375
- "V8 Executable Allocation capacity exceeded"));
376
- return NULL;
377
- }
378
- // Allocate executable memory either from code range or from the
379
- // OS.
380
- if (isolate_->code_range()->exists()) {
381
- mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
382
- } else {
383
- mem = OS::Allocate(requested, allocated, true);
384
- }
385
- // Update executable memory size.
386
- size_executable_ += static_cast<int>(*allocated);
387
- } else {
388
- mem = OS::Allocate(requested, allocated, false);
389
- }
390
- int alloced = static_cast<int>(*allocated);
391
- size_ += alloced;
392
-
393
- #ifdef DEBUG
394
- ZapBlock(reinterpret_cast<Address>(mem), alloced);
395
- #endif
396
- isolate_->counters()->memory_allocated()->Increment(alloced);
397
- return mem;
398
- }
399
-
400
-
401
- void MemoryAllocator::FreeRawMemory(void* mem,
402
- size_t length,
403
- Executability executable) {
404
- #ifdef DEBUG
405
- ZapBlock(reinterpret_cast<Address>(mem), length);
406
- #endif
407
- if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
408
- isolate_->code_range()->FreeRawMemory(mem, length);
409
- } else {
410
- OS::Free(mem, length);
411
- }
412
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
413
- size_ -= static_cast<int>(length);
414
- if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
415
-
416
- ASSERT(size_ >= 0);
417
- ASSERT(size_executable_ >= 0);
418
- }
419
-
420
-
421
- void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
422
- AllocationAction action,
423
- size_t size) {
424
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
425
- MemoryAllocationCallbackRegistration registration =
426
- memory_allocation_callbacks_[i];
427
- if ((registration.space & space) == space &&
428
- (registration.action & action) == action)
429
- registration.callback(space, action, static_cast<int>(size));
430
- }
431
- }
432
-
433
-
434
- bool MemoryAllocator::MemoryAllocationCallbackRegistered(
435
- MemoryAllocationCallback callback) {
436
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
437
- if (memory_allocation_callbacks_[i].callback == callback) return true;
438
- }
439
- return false;
440
- }
441
-
442
-
443
- void MemoryAllocator::AddMemoryAllocationCallback(
444
- MemoryAllocationCallback callback,
445
- ObjectSpace space,
446
- AllocationAction action) {
447
- ASSERT(callback != NULL);
448
- MemoryAllocationCallbackRegistration registration(callback, space, action);
449
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
450
- return memory_allocation_callbacks_.Add(registration);
451
- }
452
-
453
-
454
- void MemoryAllocator::RemoveMemoryAllocationCallback(
455
- MemoryAllocationCallback callback) {
456
- ASSERT(callback != NULL);
457
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
458
- if (memory_allocation_callbacks_[i].callback == callback) {
459
- memory_allocation_callbacks_.Remove(i);
460
- return;
461
- }
462
- }
463
- UNREACHABLE();
464
- }
465
-
466
- void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
467
- ASSERT(initial_chunk_ == NULL);
468
-
469
- initial_chunk_ = new VirtualMemory(requested);
470
- CHECK(initial_chunk_ != NULL);
471
- if (!initial_chunk_->IsReserved()) {
472
- delete initial_chunk_;
473
- initial_chunk_ = NULL;
474
- return NULL;
475
- }
476
-
477
- // We are sure that we have mapped a block of requested addresses.
478
- ASSERT(initial_chunk_->size() == requested);
479
- LOG(isolate_,
480
- NewEvent("InitialChunk", initial_chunk_->address(), requested));
481
- size_ += static_cast<int>(requested);
482
- return initial_chunk_->address();
483
- }
484
-
485
-
486
- static int PagesInChunk(Address start, size_t size) {
487
- // The first page starts on the first page-aligned address from start onward
488
- // and the last page ends on the last page-aligned address before
489
- // start+size. Page::kPageSize is a power of two so we can divide by
490
- // shifting.
491
- return static_cast<int>((RoundDown(start + size, Page::kPageSize)
492
- - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
493
- }
494
-
495
-
496
- Page* MemoryAllocator::AllocatePages(int requested_pages,
497
- int* allocated_pages,
498
- PagedSpace* owner) {
499
- if (requested_pages <= 0) return Page::FromAddress(NULL);
500
- size_t chunk_size = requested_pages * Page::kPageSize;
501
-
502
- void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
503
- if (chunk == NULL) return Page::FromAddress(NULL);
504
- LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
505
-
506
- *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
507
- // We may 'lose' a page due to alignment.
508
- ASSERT(*allocated_pages >= kPagesPerChunk - 1);
509
- if (*allocated_pages == 0) {
510
- FreeRawMemory(chunk, chunk_size, owner->executable());
511
- LOG(isolate_, DeleteEvent("PagedChunk", chunk));
512
- return Page::FromAddress(NULL);
513
- }
514
-
515
- int chunk_id = Pop();
516
- chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
517
-
518
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
519
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
520
- Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
521
-
522
- return new_pages;
523
- }
524
-
525
-
526
- Page* MemoryAllocator::CommitPages(Address start, size_t size,
527
- PagedSpace* owner, int* num_pages) {
528
- ASSERT(start != NULL);
529
- *num_pages = PagesInChunk(start, size);
530
- ASSERT(*num_pages > 0);
531
- ASSERT(initial_chunk_ != NULL);
532
- ASSERT(InInitialChunk(start));
533
- ASSERT(InInitialChunk(start + size - 1));
534
- if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
535
- return Page::FromAddress(NULL);
536
- }
537
- #ifdef DEBUG
538
- ZapBlock(start, size);
539
- #endif
540
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
541
-
542
- // So long as we correctly overestimated the number of chunks we should not
543
- // run out of chunk ids.
544
- CHECK(!OutOfChunkIds());
545
- int chunk_id = Pop();
546
- chunks_[chunk_id].init(start, size, owner);
547
- return InitializePagesInChunk(chunk_id, *num_pages, owner);
548
- }
549
-
550
-
551
- bool MemoryAllocator::CommitBlock(Address start,
552
- size_t size,
553
- Executability executable) {
554
- ASSERT(start != NULL);
555
- ASSERT(size > 0);
556
- ASSERT(initial_chunk_ != NULL);
557
- ASSERT(InInitialChunk(start));
558
- ASSERT(InInitialChunk(start + size - 1));
559
-
560
- if (!initial_chunk_->Commit(start, size, executable)) return false;
561
- #ifdef DEBUG
562
- ZapBlock(start, size);
563
- #endif
564
- isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
565
- return true;
566
- }
567
-
568
-
569
- bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
570
- ASSERT(start != NULL);
571
- ASSERT(size > 0);
572
- ASSERT(initial_chunk_ != NULL);
573
- ASSERT(InInitialChunk(start));
574
- ASSERT(InInitialChunk(start + size - 1));
575
-
576
- if (!initial_chunk_->Uncommit(start, size)) return false;
577
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
578
- return true;
579
- }
580
-
581
-
582
- void MemoryAllocator::ZapBlock(Address start, size_t size) {
583
- for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
584
- Memory::Address_at(start + s) = kZapValue;
585
- }
586
- }
587
-
588
-
589
- Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
590
- PagedSpace* owner) {
591
- ASSERT(IsValidChunk(chunk_id));
592
- ASSERT(pages_in_chunk > 0);
593
-
594
- Address chunk_start = chunks_[chunk_id].address();
595
-
596
- Address low = RoundUp(chunk_start, Page::kPageSize);
597
-
598
- #ifdef DEBUG
599
- size_t chunk_size = chunks_[chunk_id].size();
600
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
601
- ASSERT(pages_in_chunk <=
602
- ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
603
- #endif
604
-
605
- Address page_addr = low;
606
- for (int i = 0; i < pages_in_chunk; i++) {
607
- Page* p = Page::FromAddress(page_addr);
608
- p->heap_ = owner->heap();
609
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
610
- p->InvalidateWatermark(true);
611
- p->SetIsLargeObjectPage(false);
612
- p->SetAllocationWatermark(p->ObjectAreaStart());
613
- p->SetCachedAllocationWatermark(p->ObjectAreaStart());
614
- page_addr += Page::kPageSize;
615
- }
616
-
617
- // Set the next page of the last page to 0.
618
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
619
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
620
-
621
- return Page::FromAddress(low);
622
- }
623
-
624
-
625
- Page* MemoryAllocator::FreePages(Page* p) {
626
- if (!p->is_valid()) return p;
627
-
628
- // Find the first page in the same chunk as 'p'
629
- Page* first_page = FindFirstPageInSameChunk(p);
630
- Page* page_to_return = Page::FromAddress(NULL);
631
-
632
- if (p != first_page) {
633
- // Find the last page in the same chunk as 'prev'.
634
- Page* last_page = FindLastPageInSameChunk(p);
635
- first_page = GetNextPage(last_page); // first page in next chunk
636
-
637
- // set the next_page of last_page to NULL
638
- SetNextPage(last_page, Page::FromAddress(NULL));
639
- page_to_return = p; // return 'p' when exiting
640
- }
641
-
642
- while (first_page->is_valid()) {
643
- int chunk_id = GetChunkId(first_page);
644
- ASSERT(IsValidChunk(chunk_id));
645
-
646
- // Find the first page of the next chunk before deleting this chunk.
647
- first_page = GetNextPage(FindLastPageInSameChunk(first_page));
648
-
649
- // Free the current chunk.
650
- DeleteChunk(chunk_id);
651
- }
652
-
653
- return page_to_return;
654
- }
655
-
656
-
657
- void MemoryAllocator::FreeAllPages(PagedSpace* space) {
658
- for (int i = 0, length = chunks_.length(); i < length; i++) {
659
- if (chunks_[i].owner() == space) {
660
- DeleteChunk(i);
661
- }
662
- }
663
- }
664
-
665
-
666
- void MemoryAllocator::DeleteChunk(int chunk_id) {
667
- ASSERT(IsValidChunk(chunk_id));
668
-
669
- ChunkInfo& c = chunks_[chunk_id];
670
-
671
- // We cannot free a chunk contained in the initial chunk because it was not
672
- // allocated with AllocateRawMemory. Instead we uncommit the virtual
673
- // memory.
674
- if (InInitialChunk(c.address())) {
675
- // TODO(1240712): VirtualMemory::Uncommit has a return value which
676
- // is ignored here.
677
- initial_chunk_->Uncommit(c.address(), c.size());
678
- Counters* counters = isolate_->counters();
679
- counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
680
- } else {
681
- LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
682
- ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
683
- size_t size = c.size();
684
- FreeRawMemory(c.address(), size, c.executable());
685
- PerformAllocationCallback(space, kAllocationActionFree, size);
686
- }
687
- c.init(NULL, 0, NULL);
688
- Push(chunk_id);
689
- }
690
-
691
-
692
- Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
693
- int chunk_id = GetChunkId(p);
694
- ASSERT(IsValidChunk(chunk_id));
695
-
696
- Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
697
- return Page::FromAddress(low);
698
- }
699
-
700
-
701
- Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
702
- int chunk_id = GetChunkId(p);
703
- ASSERT(IsValidChunk(chunk_id));
704
-
705
- Address chunk_start = chunks_[chunk_id].address();
706
- size_t chunk_size = chunks_[chunk_id].size();
707
-
708
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
709
- ASSERT(chunk_start <= p->address() && p->address() < high);
710
-
711
- return Page::FromAddress(high - Page::kPageSize);
712
- }
713
-
714
-
715
- #ifdef DEBUG
716
- void MemoryAllocator::ReportStatistics() {
717
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
718
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
719
- ", used: %" V8_PTR_PREFIX "d"
720
- ", available: %%%d\n\n",
721
- capacity_, size_, static_cast<int>(pct*100));
722
- }
723
- #endif
724
-
725
-
726
- void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
727
- Page** first_page,
728
- Page** last_page,
729
- Page** last_page_in_use) {
730
- Page* first = NULL;
731
- Page* last = NULL;
732
-
733
- for (int i = 0, length = chunks_.length(); i < length; i++) {
734
- ChunkInfo& chunk = chunks_[i];
735
-
736
- if (chunk.owner() == space) {
737
- if (first == NULL) {
738
- Address low = RoundUp(chunk.address(), Page::kPageSize);
739
- first = Page::FromAddress(low);
740
- }
741
- last = RelinkPagesInChunk(i,
742
- chunk.address(),
743
- chunk.size(),
744
- last,
745
- last_page_in_use);
746
- }
747
- }
748
-
749
- if (first_page != NULL) {
750
- *first_page = first;
751
- }
752
-
753
- if (last_page != NULL) {
754
- *last_page = last;
755
- }
756
- }
757
-
758
-
759
- Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
760
- Address chunk_start,
761
- size_t chunk_size,
762
- Page* prev,
763
- Page** last_page_in_use) {
764
- Address page_addr = RoundUp(chunk_start, Page::kPageSize);
765
- int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
766
-
767
- if (prev->is_valid()) {
768
- SetNextPage(prev, Page::FromAddress(page_addr));
769
- }
770
-
771
- for (int i = 0; i < pages_in_chunk; i++) {
772
- Page* p = Page::FromAddress(page_addr);
773
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
774
- page_addr += Page::kPageSize;
775
-
776
- p->InvalidateWatermark(true);
777
- if (p->WasInUseBeforeMC()) {
778
- *last_page_in_use = p;
779
- }
780
- }
781
-
782
- // Set the next page of the last page to 0.
783
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
784
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
785
-
786
- if (last_page->WasInUseBeforeMC()) {
787
- *last_page_in_use = last_page;
788
- }
789
-
790
- return last_page;
791
- }
792
-
793
-
794
- // -----------------------------------------------------------------------------
795
- // PagedSpace implementation
796
-
797
- PagedSpace::PagedSpace(Heap* heap,
798
- intptr_t max_capacity,
799
- AllocationSpace id,
800
- Executability executable)
801
- : Space(heap, id, executable) {
802
- max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
803
- * Page::kObjectAreaSize;
804
- accounting_stats_.Clear();
805
-
806
- allocation_info_.top = NULL;
807
- allocation_info_.limit = NULL;
808
-
809
- mc_forwarding_info_.top = NULL;
810
- mc_forwarding_info_.limit = NULL;
811
- }
812
-
813
-
814
- bool PagedSpace::Setup(Address start, size_t size) {
815
- if (HasBeenSetup()) return false;
816
-
817
- int num_pages = 0;
818
- // Try to use the virtual memory range passed to us. If it is too small to
819
- // contain at least one page, ignore it and allocate instead.
820
- int pages_in_chunk = PagesInChunk(start, size);
821
- if (pages_in_chunk > 0) {
822
- first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
823
- RoundUp(start, Page::kPageSize),
824
- Page::kPageSize * pages_in_chunk,
825
- this, &num_pages);
826
- } else {
827
- int requested_pages =
828
- Min(MemoryAllocator::kPagesPerChunk,
829
- static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
830
- first_page_ =
831
- Isolate::Current()->memory_allocator()->AllocatePages(
832
- requested_pages, &num_pages, this);
833
- if (!first_page_->is_valid()) return false;
834
- }
835
-
836
- // We are sure that the first page is valid and that we have at least one
837
- // page.
838
- ASSERT(first_page_->is_valid());
839
- ASSERT(num_pages > 0);
840
- accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
841
- ASSERT(Capacity() <= max_capacity_);
842
-
843
- // Sequentially clear region marks in the newly allocated
844
- // pages and cache the current last page in the space.
845
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
846
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
847
- last_page_ = p;
848
- }
849
-
850
- // Use first_page_ for allocation.
851
- SetAllocationInfo(&allocation_info_, first_page_);
852
-
853
- page_list_is_chunk_ordered_ = true;
854
-
855
- return true;
856
- }
857
-
858
-
859
- bool PagedSpace::HasBeenSetup() {
860
- return (Capacity() > 0);
861
- }
862
-
863
-
864
- void PagedSpace::TearDown() {
865
- Isolate::Current()->memory_allocator()->FreeAllPages(this);
866
- first_page_ = NULL;
867
- accounting_stats_.Clear();
868
- }
869
-
870
-
871
- #ifdef ENABLE_HEAP_PROTECTION
872
-
873
- void PagedSpace::Protect() {
874
- Page* page = first_page_;
875
- while (page->is_valid()) {
876
- Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
877
- page = Isolate::Current()->memory_allocator()->
878
- FindLastPageInSameChunk(page)->next_page();
879
- }
880
- }
881
-
882
-
883
- void PagedSpace::Unprotect() {
884
- Page* page = first_page_;
885
- while (page->is_valid()) {
886
- Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
887
- page = Isolate::Current()->memory_allocator()->
888
- FindLastPageInSameChunk(page)->next_page();
889
- }
890
- }
891
-
892
- #endif
893
-
894
-
895
- void PagedSpace::MarkAllPagesClean() {
896
- PageIterator it(this, PageIterator::ALL_PAGES);
897
- while (it.has_next()) {
898
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
899
- }
900
- }
901
-
902
-
903
- MaybeObject* PagedSpace::FindObject(Address addr) {
904
- // Note: this function can only be called before or after mark-compact GC
905
- // because it accesses map pointers.
906
- ASSERT(!heap()->mark_compact_collector()->in_use());
907
-
908
- if (!Contains(addr)) return Failure::Exception();
909
-
910
- Page* p = Page::FromAddress(addr);
911
- ASSERT(IsUsed(p));
912
- Address cur = p->ObjectAreaStart();
913
- Address end = p->AllocationTop();
914
- while (cur < end) {
915
- HeapObject* obj = HeapObject::FromAddress(cur);
916
- Address next = cur + obj->Size();
917
- if ((cur <= addr) && (addr < next)) return obj;
918
- cur = next;
919
- }
920
-
921
- UNREACHABLE();
922
- return Failure::Exception();
923
- }
924
-
925
-
926
- bool PagedSpace::IsUsed(Page* page) {
927
- PageIterator it(this, PageIterator::PAGES_IN_USE);
928
- while (it.has_next()) {
929
- if (page == it.next()) return true;
930
- }
931
- return false;
932
- }
933
-
934
-
935
- void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
936
- alloc_info->top = p->ObjectAreaStart();
937
- alloc_info->limit = p->ObjectAreaEnd();
938
- ASSERT(alloc_info->VerifyPagedAllocation());
939
- }
940
-
941
-
942
- void PagedSpace::MCResetRelocationInfo() {
943
- // Set page indexes.
944
- int i = 0;
945
- PageIterator it(this, PageIterator::ALL_PAGES);
946
- while (it.has_next()) {
947
- Page* p = it.next();
948
- p->mc_page_index = i++;
949
- }
950
-
951
- // Set mc_forwarding_info_ to the first page in the space.
952
- SetAllocationInfo(&mc_forwarding_info_, first_page_);
953
- // All the bytes in the space are 'available'. We will rediscover
954
- // allocated and wasted bytes during GC.
955
- accounting_stats_.Reset();
956
- }
957
-
958
-
959
- int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
960
- #ifdef DEBUG
961
- // The Contains function considers the address at the beginning of a
962
- // page in the page, MCSpaceOffsetForAddress considers it is in the
963
- // previous page.
964
- if (Page::IsAlignedToPageSize(addr)) {
965
- ASSERT(Contains(addr - kPointerSize));
966
- } else {
967
- ASSERT(Contains(addr));
968
- }
969
- #endif
970
-
971
- // If addr is at the end of a page, it belongs to previous page
972
- Page* p = Page::IsAlignedToPageSize(addr)
973
- ? Page::FromAllocationTop(addr)
974
- : Page::FromAddress(addr);
975
- int index = p->mc_page_index;
976
- return (index * Page::kPageSize) + p->Offset(addr);
977
- }
978
-
979
-
980
- // Slow case for reallocating and promoting objects during a compacting
981
- // collection. This function is not space-specific.
982
- HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
983
- Page* current_page = TopPageOf(mc_forwarding_info_);
984
- if (!current_page->next_page()->is_valid()) {
985
- if (!Expand(current_page)) {
986
- return NULL;
987
- }
988
- }
989
-
990
- // There are surely more pages in the space now.
991
- ASSERT(current_page->next_page()->is_valid());
992
- // We do not add the top of page block for current page to the space's
993
- // free list---the block may contain live objects so we cannot write
994
- // bookkeeping information to it. Instead, we will recover top of page
995
- // blocks when we move objects to their new locations.
996
- //
997
- // We do however write the allocation pointer to the page. The encoding
998
- // of forwarding addresses is as an offset in terms of live bytes, so we
999
- // need quick access to the allocation top of each page to decode
1000
- // forwarding addresses.
1001
- current_page->SetAllocationWatermark(mc_forwarding_info_.top);
1002
- current_page->next_page()->InvalidateWatermark(true);
1003
- SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
1004
- return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1005
- }
1006
-
1007
-
1008
- bool PagedSpace::Expand(Page* last_page) {
1009
- ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1010
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1011
-
1012
- if (Capacity() == max_capacity_) return false;
1013
-
1014
- ASSERT(Capacity() < max_capacity_);
1015
- // Last page must be valid and its next page is invalid.
1016
- ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1017
-
1018
- int available_pages =
1019
- static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
1020
- // We don't want to have to handle small chunks near the end so if there are
1021
- // not kPagesPerChunk pages available without exceeding the max capacity then
1022
- // act as if memory has run out.
1023
- if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
1024
-
1025
- int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1026
- Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
1027
- desired_pages, &desired_pages, this);
1028
- if (!p->is_valid()) return false;
1029
-
1030
- accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1031
- ASSERT(Capacity() <= max_capacity_);
1032
-
1033
- heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
1034
-
1035
- // Sequentially clear region marks of new pages and and cache the
1036
- // new last page in the space.
1037
- while (p->is_valid()) {
1038
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
1039
- last_page_ = p;
1040
- p = p->next_page();
1041
- }
1042
-
1043
- return true;
1044
- }
1045
-
1046
-
1047
- #ifdef DEBUG
1048
- int PagedSpace::CountTotalPages() {
1049
- int count = 0;
1050
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1051
- count++;
1052
- }
1053
- return count;
1054
- }
1055
- #endif
1056
-
1057
-
1058
- void PagedSpace::Shrink() {
1059
- if (!page_list_is_chunk_ordered_) {
1060
- // We can't shrink space if pages is not chunk-ordered
1061
- // (see comment for class MemoryAllocator for definition).
1062
- return;
1063
- }
1064
-
1065
- // Release half of free pages.
1066
- Page* top_page = AllocationTopPage();
1067
- ASSERT(top_page->is_valid());
1068
-
1069
- // Count the number of pages we would like to free.
1070
- int pages_to_free = 0;
1071
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1072
- pages_to_free++;
1073
- }
1074
-
1075
- // Free pages after top_page.
1076
- Page* p = heap()->isolate()->memory_allocator()->
1077
- FreePages(top_page->next_page());
1078
- heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
1079
-
1080
- // Find out how many pages we failed to free and update last_page_.
1081
- // Please note pages can only be freed in whole chunks.
1082
- last_page_ = top_page;
1083
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1084
- pages_to_free--;
1085
- last_page_ = p;
1086
- }
1087
-
1088
- accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1089
- ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1090
- }
1091
-
1092
-
1093
- bool PagedSpace::EnsureCapacity(int capacity) {
1094
- if (Capacity() >= capacity) return true;
1095
-
1096
- // Start from the allocation top and loop to the last page in the space.
1097
- Page* last_page = AllocationTopPage();
1098
- Page* next_page = last_page->next_page();
1099
- while (next_page->is_valid()) {
1100
- last_page = heap()->isolate()->memory_allocator()->
1101
- FindLastPageInSameChunk(next_page);
1102
- next_page = last_page->next_page();
1103
- }
1104
-
1105
- // Expand the space until it has the required capacity or expansion fails.
1106
- do {
1107
- if (!Expand(last_page)) return false;
1108
- ASSERT(last_page->next_page()->is_valid());
1109
- last_page =
1110
- heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
1111
- last_page->next_page());
1112
- } while (Capacity() < capacity);
1113
-
1114
- return true;
1115
- }
1116
-
1117
-
1118
- #ifdef DEBUG
1119
- void PagedSpace::Print() { }
1120
- #endif
1121
-
1122
-
1123
- #ifdef DEBUG
1124
- // We do not assume that the PageIterator works, because it depends on the
1125
- // invariants we are checking during verification.
1126
- void PagedSpace::Verify(ObjectVisitor* visitor) {
1127
- // The allocation pointer should be valid, and it should be in a page in the
1128
- // space.
1129
- ASSERT(allocation_info_.VerifyPagedAllocation());
1130
- Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1131
- ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
1132
-
1133
- // Loop over all the pages.
1134
- bool above_allocation_top = false;
1135
- Page* current_page = first_page_;
1136
- while (current_page->is_valid()) {
1137
- if (above_allocation_top) {
1138
- // We don't care what's above the allocation top.
1139
- } else {
1140
- Address top = current_page->AllocationTop();
1141
- if (current_page == top_page) {
1142
- ASSERT(top == allocation_info_.top);
1143
- // The next page will be above the allocation top.
1144
- above_allocation_top = true;
1145
- }
1146
-
1147
- // It should be packed with objects from the bottom to the top.
1148
- Address current = current_page->ObjectAreaStart();
1149
- while (current < top) {
1150
- HeapObject* object = HeapObject::FromAddress(current);
1151
-
1152
- // The first word should be a map, and we expect all map pointers to
1153
- // be in map space.
1154
- Map* map = object->map();
1155
- ASSERT(map->IsMap());
1156
- ASSERT(heap()->map_space()->Contains(map));
1157
-
1158
- // Perform space-specific object verification.
1159
- VerifyObject(object);
1160
-
1161
- // The object itself should look OK.
1162
- object->Verify();
1163
-
1164
- // All the interior pointers should be contained in the heap and
1165
- // have page regions covering intergenerational references should be
1166
- // marked dirty.
1167
- int size = object->Size();
1168
- object->IterateBody(map->instance_type(), size, visitor);
1169
-
1170
- current += size;
1171
- }
1172
-
1173
- // The allocation pointer should not be in the middle of an object.
1174
- ASSERT(current == top);
1175
- }
1176
-
1177
- current_page = current_page->next_page();
1178
- }
1179
- }
1180
- #endif
1181
-
1182
-
1183
- // -----------------------------------------------------------------------------
1184
- // NewSpace implementation
1185
-
1186
-
1187
- bool NewSpace::Setup(Address start, int size) {
1188
- // Setup new space based on the preallocated memory block defined by
1189
- // start and size. The provided space is divided into two semi-spaces.
1190
- // To support fast containment testing in the new space, the size of
1191
- // this chunk must be a power of two and it must be aligned to its size.
1192
- int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1193
- int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
1194
-
1195
- ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1196
- ASSERT(IsPowerOf2(maximum_semispace_capacity));
1197
-
1198
- // Allocate and setup the histogram arrays if necessary.
1199
- #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1200
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1201
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1202
-
1203
- #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1204
- promoted_histogram_[name].set_name(#name);
1205
- INSTANCE_TYPE_LIST(SET_NAME)
1206
- #undef SET_NAME
1207
- #endif
1208
-
1209
- ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
1210
- ASSERT(IsAddressAligned(start, size, 0));
1211
-
1212
- if (!to_space_.Setup(start,
1213
- initial_semispace_capacity,
1214
- maximum_semispace_capacity)) {
1215
- return false;
1216
- }
1217
- if (!from_space_.Setup(start + maximum_semispace_capacity,
1218
- initial_semispace_capacity,
1219
- maximum_semispace_capacity)) {
1220
- return false;
1221
- }
1222
-
1223
- start_ = start;
1224
- address_mask_ = ~(size - 1);
1225
- object_mask_ = address_mask_ | kHeapObjectTagMask;
1226
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1227
-
1228
- allocation_info_.top = to_space_.low();
1229
- allocation_info_.limit = to_space_.high();
1230
- mc_forwarding_info_.top = NULL;
1231
- mc_forwarding_info_.limit = NULL;
1232
-
1233
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1234
- return true;
1235
- }
1236
-
1237
-
1238
- void NewSpace::TearDown() {
1239
- #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1240
- if (allocated_histogram_) {
1241
- DeleteArray(allocated_histogram_);
1242
- allocated_histogram_ = NULL;
1243
- }
1244
- if (promoted_histogram_) {
1245
- DeleteArray(promoted_histogram_);
1246
- promoted_histogram_ = NULL;
1247
- }
1248
- #endif
1249
-
1250
- start_ = NULL;
1251
- allocation_info_.top = NULL;
1252
- allocation_info_.limit = NULL;
1253
- mc_forwarding_info_.top = NULL;
1254
- mc_forwarding_info_.limit = NULL;
1255
-
1256
- to_space_.TearDown();
1257
- from_space_.TearDown();
1258
- }
1259
-
1260
-
1261
- #ifdef ENABLE_HEAP_PROTECTION
1262
-
1263
- void NewSpace::Protect() {
1264
- heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
1265
- heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
1266
- }
1267
-
1268
-
1269
- void NewSpace::Unprotect() {
1270
- heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
1271
- to_space_.executable());
1272
- heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
1273
- from_space_.executable());
1274
- }
1275
-
1276
- #endif
1277
-
1278
-
1279
- void NewSpace::Flip() {
1280
- SemiSpace tmp = from_space_;
1281
- from_space_ = to_space_;
1282
- to_space_ = tmp;
1283
- }
1284
-
1285
-
1286
- void NewSpace::Grow() {
1287
- ASSERT(Capacity() < MaximumCapacity());
1288
- if (to_space_.Grow()) {
1289
- // Only grow from space if we managed to grow to space.
1290
- if (!from_space_.Grow()) {
1291
- // If we managed to grow to space but couldn't grow from space,
1292
- // attempt to shrink to space.
1293
- if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1294
- // We are in an inconsistent state because we could not
1295
- // commit/uncommit memory from new space.
1296
- V8::FatalProcessOutOfMemory("Failed to grow new space.");
1297
- }
1298
- }
1299
- }
1300
- allocation_info_.limit = to_space_.high();
1301
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1302
- }
1303
-
1304
-
1305
- void NewSpace::Shrink() {
1306
- int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1307
- int rounded_new_capacity =
1308
- RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
1309
- if (rounded_new_capacity < Capacity() &&
1310
- to_space_.ShrinkTo(rounded_new_capacity)) {
1311
- // Only shrink from space if we managed to shrink to space.
1312
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1313
- // If we managed to shrink to space but couldn't shrink from
1314
- // space, attempt to grow to space again.
1315
- if (!to_space_.GrowTo(from_space_.Capacity())) {
1316
- // We are in an inconsistent state because we could not
1317
- // commit/uncommit memory from new space.
1318
- V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1319
- }
1320
- }
1321
- }
1322
- allocation_info_.limit = to_space_.high();
1323
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1324
- }
1325
-
1326
-
1327
- void NewSpace::ResetAllocationInfo() {
1328
- allocation_info_.top = to_space_.low();
1329
- allocation_info_.limit = to_space_.high();
1330
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1331
- }
1332
-
1333
-
1334
- void NewSpace::MCResetRelocationInfo() {
1335
- mc_forwarding_info_.top = from_space_.low();
1336
- mc_forwarding_info_.limit = from_space_.high();
1337
- ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1338
- }
1339
-
1340
-
1341
- void NewSpace::MCCommitRelocationInfo() {
1342
- // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1343
- // valid allocation info for the to space.
1344
- allocation_info_.top = mc_forwarding_info_.top;
1345
- allocation_info_.limit = to_space_.high();
1346
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1347
- }
1348
-
1349
-
1350
- #ifdef DEBUG
1351
- // We do not use the SemispaceIterator because verification doesn't assume
1352
- // that it works (it depends on the invariants we are checking).
1353
- void NewSpace::Verify() {
1354
- // The allocation pointer should be in the space or at the very end.
1355
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1356
-
1357
- // There should be objects packed in from the low address up to the
1358
- // allocation pointer.
1359
- Address current = to_space_.low();
1360
- while (current < top()) {
1361
- HeapObject* object = HeapObject::FromAddress(current);
1362
-
1363
- // The first word should be a map, and we expect all map pointers to
1364
- // be in map space.
1365
- Map* map = object->map();
1366
- ASSERT(map->IsMap());
1367
- ASSERT(heap()->map_space()->Contains(map));
1368
-
1369
- // The object should not be code or a map.
1370
- ASSERT(!object->IsMap());
1371
- ASSERT(!object->IsCode());
1372
-
1373
- // The object itself should look OK.
1374
- object->Verify();
1375
-
1376
- // All the interior pointers should be contained in the heap.
1377
- VerifyPointersVisitor visitor;
1378
- int size = object->Size();
1379
- object->IterateBody(map->instance_type(), size, &visitor);
1380
-
1381
- current += size;
1382
- }
1383
-
1384
- // The allocation pointer should not be in the middle of an object.
1385
- ASSERT(current == top());
1386
- }
1387
- #endif
1388
-
1389
-
1390
- bool SemiSpace::Commit() {
1391
- ASSERT(!is_committed());
1392
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
1393
- start_, capacity_, executable())) {
1394
- return false;
1395
- }
1396
- committed_ = true;
1397
- return true;
1398
- }
1399
-
1400
-
1401
- bool SemiSpace::Uncommit() {
1402
- ASSERT(is_committed());
1403
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1404
- start_, capacity_)) {
1405
- return false;
1406
- }
1407
- committed_ = false;
1408
- return true;
1409
- }
1410
-
1411
-
1412
- // -----------------------------------------------------------------------------
1413
- // SemiSpace implementation
1414
-
1415
- bool SemiSpace::Setup(Address start,
1416
- int initial_capacity,
1417
- int maximum_capacity) {
1418
- // Creates a space in the young generation. The constructor does not
1419
- // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1420
- // memory of size 'capacity' when set up, and does not grow or shrink
1421
- // otherwise. In the mark-compact collector, the memory region of the from
1422
- // space is used as the marking stack. It requires contiguous memory
1423
- // addresses.
1424
- initial_capacity_ = initial_capacity;
1425
- capacity_ = initial_capacity;
1426
- maximum_capacity_ = maximum_capacity;
1427
- committed_ = false;
1428
-
1429
- start_ = start;
1430
- address_mask_ = ~(maximum_capacity - 1);
1431
- object_mask_ = address_mask_ | kHeapObjectTagMask;
1432
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1433
- age_mark_ = start_;
1434
-
1435
- return Commit();
1436
- }
1437
-
1438
-
1439
- void SemiSpace::TearDown() {
1440
- start_ = NULL;
1441
- capacity_ = 0;
1442
- }
1443
-
1444
-
1445
- bool SemiSpace::Grow() {
1446
- // Double the semispace size but only up to maximum capacity.
1447
- int maximum_extra = maximum_capacity_ - capacity_;
1448
- int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
1449
- maximum_extra);
1450
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
1451
- high(), extra, executable())) {
1452
- return false;
1453
- }
1454
- capacity_ += extra;
1455
- return true;
1456
- }
1457
-
1458
-
1459
- bool SemiSpace::GrowTo(int new_capacity) {
1460
- ASSERT(new_capacity <= maximum_capacity_);
1461
- ASSERT(new_capacity > capacity_);
1462
- size_t delta = new_capacity - capacity_;
1463
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1464
- if (!heap()->isolate()->memory_allocator()->CommitBlock(
1465
- high(), delta, executable())) {
1466
- return false;
1467
- }
1468
- capacity_ = new_capacity;
1469
- return true;
1470
- }
1471
-
1472
-
1473
- bool SemiSpace::ShrinkTo(int new_capacity) {
1474
- ASSERT(new_capacity >= initial_capacity_);
1475
- ASSERT(new_capacity < capacity_);
1476
- size_t delta = capacity_ - new_capacity;
1477
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1478
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1479
- high() - delta, delta)) {
1480
- return false;
1481
- }
1482
- capacity_ = new_capacity;
1483
- return true;
1484
- }
1485
-
1486
-
1487
- #ifdef DEBUG
1488
- void SemiSpace::Print() { }
1489
-
1490
-
1491
- void SemiSpace::Verify() { }
1492
- #endif
1493
-
1494
-
1495
- // -----------------------------------------------------------------------------
1496
- // SemiSpaceIterator implementation.
1497
- SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1498
- Initialize(space, space->bottom(), space->top(), NULL);
1499
- }
1500
-
1501
-
1502
- SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1503
- HeapObjectCallback size_func) {
1504
- Initialize(space, space->bottom(), space->top(), size_func);
1505
- }
1506
-
1507
-
1508
- SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1509
- Initialize(space, start, space->top(), NULL);
1510
- }
1511
-
1512
-
1513
- void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1514
- Address end,
1515
- HeapObjectCallback size_func) {
1516
- ASSERT(space->ToSpaceContains(start));
1517
- ASSERT(space->ToSpaceLow() <= end
1518
- && end <= space->ToSpaceHigh());
1519
- space_ = &space->to_space_;
1520
- current_ = start;
1521
- limit_ = end;
1522
- size_func_ = size_func;
1523
- }
1524
-
1525
-
1526
- #ifdef DEBUG
1527
- // heap_histograms is shared, always clear it before using it.
1528
- static void ClearHistograms() {
1529
- Isolate* isolate = Isolate::Current();
1530
- // We reset the name each time, though it hasn't changed.
1531
- #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1532
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1533
- #undef DEF_TYPE_NAME
1534
-
1535
- #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1536
- INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1537
- #undef CLEAR_HISTOGRAM
1538
-
1539
- isolate->js_spill_information()->Clear();
1540
- }
1541
-
1542
-
1543
- static void ClearCodeKindStatistics() {
1544
- Isolate* isolate = Isolate::Current();
1545
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1546
- isolate->code_kind_statistics()[i] = 0;
1547
- }
1548
- }
1549
-
1550
-
1551
- static void ReportCodeKindStatistics() {
1552
- Isolate* isolate = Isolate::Current();
1553
- const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1554
-
1555
- #define CASE(name) \
1556
- case Code::name: table[Code::name] = #name; \
1557
- break
1558
-
1559
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1560
- switch (static_cast<Code::Kind>(i)) {
1561
- CASE(FUNCTION);
1562
- CASE(OPTIMIZED_FUNCTION);
1563
- CASE(STUB);
1564
- CASE(BUILTIN);
1565
- CASE(LOAD_IC);
1566
- CASE(KEYED_LOAD_IC);
1567
- CASE(KEYED_EXTERNAL_ARRAY_LOAD_IC);
1568
- CASE(STORE_IC);
1569
- CASE(KEYED_STORE_IC);
1570
- CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
1571
- CASE(CALL_IC);
1572
- CASE(KEYED_CALL_IC);
1573
- CASE(BINARY_OP_IC);
1574
- CASE(TYPE_RECORDING_BINARY_OP_IC);
1575
- CASE(COMPARE_IC);
1576
- }
1577
- }
1578
-
1579
- #undef CASE
1580
-
1581
- PrintF("\n Code kind histograms: \n");
1582
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1583
- if (isolate->code_kind_statistics()[i] > 0) {
1584
- PrintF(" %-20s: %10d bytes\n", table[i],
1585
- isolate->code_kind_statistics()[i]);
1586
- }
1587
- }
1588
- PrintF("\n");
1589
- }
1590
-
1591
-
1592
- static int CollectHistogramInfo(HeapObject* obj) {
1593
- Isolate* isolate = Isolate::Current();
1594
- InstanceType type = obj->map()->instance_type();
1595
- ASSERT(0 <= type && type <= LAST_TYPE);
1596
- ASSERT(isolate->heap_histograms()[type].name() != NULL);
1597
- isolate->heap_histograms()[type].increment_number(1);
1598
- isolate->heap_histograms()[type].increment_bytes(obj->Size());
1599
-
1600
- if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1601
- JSObject::cast(obj)->IncrementSpillStatistics(
1602
- isolate->js_spill_information());
1603
- }
1604
-
1605
- return obj->Size();
1606
- }
1607
-
1608
-
1609
- static void ReportHistogram(bool print_spill) {
1610
- Isolate* isolate = Isolate::Current();
1611
- PrintF("\n Object Histogram:\n");
1612
- for (int i = 0; i <= LAST_TYPE; i++) {
1613
- if (isolate->heap_histograms()[i].number() > 0) {
1614
- PrintF(" %-34s%10d (%10d bytes)\n",
1615
- isolate->heap_histograms()[i].name(),
1616
- isolate->heap_histograms()[i].number(),
1617
- isolate->heap_histograms()[i].bytes());
1618
- }
1619
- }
1620
- PrintF("\n");
1621
-
1622
- // Summarize string types.
1623
- int string_number = 0;
1624
- int string_bytes = 0;
1625
- #define INCREMENT(type, size, name, camel_name) \
1626
- string_number += isolate->heap_histograms()[type].number(); \
1627
- string_bytes += isolate->heap_histograms()[type].bytes();
1628
- STRING_TYPE_LIST(INCREMENT)
1629
- #undef INCREMENT
1630
- if (string_number > 0) {
1631
- PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1632
- string_bytes);
1633
- }
1634
-
1635
- if (FLAG_collect_heap_spill_statistics && print_spill) {
1636
- isolate->js_spill_information()->Print();
1637
- }
1638
- }
1639
- #endif // DEBUG
1640
-
1641
-
1642
- // Support for statistics gathering for --heap-stats and --log-gc.
1643
- #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1644
- void NewSpace::ClearHistograms() {
1645
- for (int i = 0; i <= LAST_TYPE; i++) {
1646
- allocated_histogram_[i].clear();
1647
- promoted_histogram_[i].clear();
1648
- }
1649
- }
1650
-
1651
- // Because the copying collector does not touch garbage objects, we iterate
1652
- // the new space before a collection to get a histogram of allocated objects.
1653
- // This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1654
- // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1655
- // flag is set.
1656
- void NewSpace::CollectStatistics() {
1657
- ClearHistograms();
1658
- SemiSpaceIterator it(this);
1659
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1660
- RecordAllocation(obj);
1661
- }
1662
-
1663
-
1664
- #ifdef ENABLE_LOGGING_AND_PROFILING
1665
- static void DoReportStatistics(Isolate* isolate,
1666
- HistogramInfo* info, const char* description) {
1667
- LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1668
- // Lump all the string types together.
1669
- int string_number = 0;
1670
- int string_bytes = 0;
1671
- #define INCREMENT(type, size, name, camel_name) \
1672
- string_number += info[type].number(); \
1673
- string_bytes += info[type].bytes();
1674
- STRING_TYPE_LIST(INCREMENT)
1675
- #undef INCREMENT
1676
- if (string_number > 0) {
1677
- LOG(isolate,
1678
- HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1679
- }
1680
-
1681
- // Then do the other types.
1682
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1683
- if (info[i].number() > 0) {
1684
- LOG(isolate,
1685
- HeapSampleItemEvent(info[i].name(), info[i].number(),
1686
- info[i].bytes()));
1687
- }
1688
- }
1689
- LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1690
- }
1691
- #endif // ENABLE_LOGGING_AND_PROFILING
1692
-
1693
-
1694
- void NewSpace::ReportStatistics() {
1695
- #ifdef DEBUG
1696
- if (FLAG_heap_stats) {
1697
- float pct = static_cast<float>(Available()) / Capacity();
1698
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
1699
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1700
- Capacity(), Available(), static_cast<int>(pct*100));
1701
- PrintF("\n Object Histogram:\n");
1702
- for (int i = 0; i <= LAST_TYPE; i++) {
1703
- if (allocated_histogram_[i].number() > 0) {
1704
- PrintF(" %-34s%10d (%10d bytes)\n",
1705
- allocated_histogram_[i].name(),
1706
- allocated_histogram_[i].number(),
1707
- allocated_histogram_[i].bytes());
1708
- }
1709
- }
1710
- PrintF("\n");
1711
- }
1712
- #endif // DEBUG
1713
-
1714
- #ifdef ENABLE_LOGGING_AND_PROFILING
1715
- if (FLAG_log_gc) {
1716
- Isolate* isolate = ISOLATE;
1717
- DoReportStatistics(isolate, allocated_histogram_, "allocated");
1718
- DoReportStatistics(isolate, promoted_histogram_, "promoted");
1719
- }
1720
- #endif // ENABLE_LOGGING_AND_PROFILING
1721
- }
1722
-
1723
-
1724
- void NewSpace::RecordAllocation(HeapObject* obj) {
1725
- InstanceType type = obj->map()->instance_type();
1726
- ASSERT(0 <= type && type <= LAST_TYPE);
1727
- allocated_histogram_[type].increment_number(1);
1728
- allocated_histogram_[type].increment_bytes(obj->Size());
1729
- }
1730
-
1731
-
1732
- void NewSpace::RecordPromotion(HeapObject* obj) {
1733
- InstanceType type = obj->map()->instance_type();
1734
- ASSERT(0 <= type && type <= LAST_TYPE);
1735
- promoted_histogram_[type].increment_number(1);
1736
- promoted_histogram_[type].increment_bytes(obj->Size());
1737
- }
1738
- #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1739
-
1740
-
1741
- // -----------------------------------------------------------------------------
1742
- // Free lists for old object spaces implementation
1743
-
1744
- void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1745
- ASSERT(size_in_bytes > 0);
1746
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
1747
-
1748
- // We write a map and possibly size information to the block. If the block
1749
- // is big enough to be a ByteArray with at least one extra word (the next
1750
- // pointer), we set its map to be the byte array map and its size to an
1751
- // appropriate array length for the desired size from HeapObject::Size().
1752
- // If the block is too small (eg, one or two words), to hold both a size
1753
- // field and a next pointer, we give it a filler map that gives it the
1754
- // correct size.
1755
- if (size_in_bytes > ByteArray::kHeaderSize) {
1756
- set_map(heap->raw_unchecked_byte_array_map());
1757
- // Can't use ByteArray::cast because it fails during deserialization.
1758
- ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1759
- this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1760
- } else if (size_in_bytes == kPointerSize) {
1761
- set_map(heap->raw_unchecked_one_pointer_filler_map());
1762
- } else if (size_in_bytes == 2 * kPointerSize) {
1763
- set_map(heap->raw_unchecked_two_pointer_filler_map());
1764
- } else {
1765
- UNREACHABLE();
1766
- }
1767
- // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1768
- // deserialization because the byte array map is not done yet.
1769
- }
1770
-
1771
-
1772
- Address FreeListNode::next(Heap* heap) {
1773
- ASSERT(IsFreeListNode(this));
1774
- if (map() == heap->raw_unchecked_byte_array_map()) {
1775
- ASSERT(Size() >= kNextOffset + kPointerSize);
1776
- return Memory::Address_at(address() + kNextOffset);
1777
- } else {
1778
- return Memory::Address_at(address() + kPointerSize);
1779
- }
1780
- }
1781
-
1782
-
1783
- void FreeListNode::set_next(Heap* heap, Address next) {
1784
- ASSERT(IsFreeListNode(this));
1785
- if (map() == heap->raw_unchecked_byte_array_map()) {
1786
- ASSERT(Size() >= kNextOffset + kPointerSize);
1787
- Memory::Address_at(address() + kNextOffset) = next;
1788
- } else {
1789
- Memory::Address_at(address() + kPointerSize) = next;
1790
- }
1791
- }
1792
-
1793
-
1794
- OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
1795
- : heap_(heap),
1796
- owner_(owner) {
1797
- Reset();
1798
- }
1799
-
1800
-
1801
- void OldSpaceFreeList::Reset() {
1802
- available_ = 0;
1803
- for (int i = 0; i < kFreeListsLength; i++) {
1804
- free_[i].head_node_ = NULL;
1805
- }
1806
- needs_rebuild_ = false;
1807
- finger_ = kHead;
1808
- free_[kHead].next_size_ = kEnd;
1809
- }
1810
-
1811
-
1812
- void OldSpaceFreeList::RebuildSizeList() {
1813
- ASSERT(needs_rebuild_);
1814
- int cur = kHead;
1815
- for (int i = cur + 1; i < kFreeListsLength; i++) {
1816
- if (free_[i].head_node_ != NULL) {
1817
- free_[cur].next_size_ = i;
1818
- cur = i;
1819
- }
1820
- }
1821
- free_[cur].next_size_ = kEnd;
1822
- needs_rebuild_ = false;
1823
- }
1824
-
1825
-
1826
- int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1827
- #ifdef DEBUG
1828
- Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
1829
- #endif
1830
- FreeListNode* node = FreeListNode::FromAddress(start);
1831
- node->set_size(heap_, size_in_bytes);
1832
-
1833
- // We don't use the freelists in compacting mode. This makes it more like a
1834
- // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1835
- // collector.
1836
- if (FLAG_always_compact) {
1837
- return size_in_bytes;
1838
- }
1839
-
1840
- // Early return to drop too-small blocks on the floor (one or two word
1841
- // blocks cannot hold a map pointer, a size field, and a pointer to the
1842
- // next block in the free list).
1843
- if (size_in_bytes < kMinBlockSize) {
1844
- return size_in_bytes;
1845
- }
1846
-
1847
- // Insert other blocks at the head of an exact free list.
1848
- int index = size_in_bytes >> kPointerSizeLog2;
1849
- node->set_next(heap_, free_[index].head_node_);
1850
- free_[index].head_node_ = node->address();
1851
- available_ += size_in_bytes;
1852
- needs_rebuild_ = true;
1853
- return 0;
1854
- }
1855
-
1856
-
1857
- MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1858
- ASSERT(0 < size_in_bytes);
1859
- ASSERT(size_in_bytes <= kMaxBlockSize);
1860
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
1861
-
1862
- if (needs_rebuild_) RebuildSizeList();
1863
- int index = size_in_bytes >> kPointerSizeLog2;
1864
- // Check for a perfect fit.
1865
- if (free_[index].head_node_ != NULL) {
1866
- FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1867
- // If this was the last block of its size, remove the size.
1868
- if ((free_[index].head_node_ = node->next(heap_)) == NULL)
1869
- RemoveSize(index);
1870
- available_ -= size_in_bytes;
1871
- *wasted_bytes = 0;
1872
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1873
- return node;
1874
- }
1875
- // Search the size list for the best fit.
1876
- int prev = finger_ < index ? finger_ : kHead;
1877
- int cur = FindSize(index, &prev);
1878
- ASSERT(index < cur);
1879
- if (cur == kEnd) {
1880
- // No large enough size in list.
1881
- *wasted_bytes = 0;
1882
- return Failure::RetryAfterGC(owner_);
1883
- }
1884
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1885
- int rem = cur - index;
1886
- int rem_bytes = rem << kPointerSizeLog2;
1887
- FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
1888
- ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
1889
- FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
1890
- size_in_bytes);
1891
- // Distinguish the cases prev < rem < cur and rem <= prev < cur
1892
- // to avoid many redundant tests and calls to Insert/RemoveSize.
1893
- if (prev < rem) {
1894
- // Simple case: insert rem between prev and cur.
1895
- finger_ = prev;
1896
- free_[prev].next_size_ = rem;
1897
- // If this was the last block of size cur, remove the size.
1898
- if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
1899
- free_[rem].next_size_ = free_[cur].next_size_;
1900
- } else {
1901
- free_[rem].next_size_ = cur;
1902
- }
1903
- // Add the remainder block.
1904
- rem_node->set_size(heap_, rem_bytes);
1905
- rem_node->set_next(heap_, free_[rem].head_node_);
1906
- free_[rem].head_node_ = rem_node->address();
1907
- } else {
1908
- // If this was the last block of size cur, remove the size.
1909
- if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
1910
- finger_ = prev;
1911
- free_[prev].next_size_ = free_[cur].next_size_;
1912
- }
1913
- if (rem_bytes < kMinBlockSize) {
1914
- // Too-small remainder is wasted.
1915
- rem_node->set_size(heap_, rem_bytes);
1916
- available_ -= size_in_bytes + rem_bytes;
1917
- *wasted_bytes = rem_bytes;
1918
- return cur_node;
1919
- }
1920
- // Add the remainder block and, if needed, insert its size.
1921
- rem_node->set_size(heap_, rem_bytes);
1922
- rem_node->set_next(heap_, free_[rem].head_node_);
1923
- free_[rem].head_node_ = rem_node->address();
1924
- if (rem_node->next(heap_) == NULL) InsertSize(rem);
1925
- }
1926
- available_ -= size_in_bytes;
1927
- *wasted_bytes = 0;
1928
- return cur_node;
1929
- }
1930
-
1931
-
1932
- void OldSpaceFreeList::MarkNodes() {
1933
- for (int i = 0; i < kFreeListsLength; i++) {
1934
- Address cur_addr = free_[i].head_node_;
1935
- while (cur_addr != NULL) {
1936
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1937
- cur_addr = cur_node->next(heap_);
1938
- cur_node->SetMark();
1939
- }
1940
- }
1941
- }
1942
-
1943
-
1944
- #ifdef DEBUG
1945
- bool OldSpaceFreeList::Contains(FreeListNode* node) {
1946
- for (int i = 0; i < kFreeListsLength; i++) {
1947
- Address cur_addr = free_[i].head_node_;
1948
- while (cur_addr != NULL) {
1949
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1950
- if (cur_node == node) return true;
1951
- cur_addr = cur_node->next(heap_);
1952
- }
1953
- }
1954
- return false;
1955
- }
1956
- #endif
1957
-
1958
-
1959
- FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
1960
- AllocationSpace owner,
1961
- int object_size)
1962
- : heap_(heap), owner_(owner), object_size_(object_size) {
1963
- Reset();
1964
- }
1965
-
1966
-
1967
- void FixedSizeFreeList::Reset() {
1968
- available_ = 0;
1969
- head_ = tail_ = NULL;
1970
- }
1971
-
1972
-
1973
- void FixedSizeFreeList::Free(Address start) {
1974
- #ifdef DEBUG
1975
- Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
1976
- #endif
1977
- // We only use the freelists with mark-sweep.
1978
- ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
1979
- FreeListNode* node = FreeListNode::FromAddress(start);
1980
- node->set_size(heap_, object_size_);
1981
- node->set_next(heap_, NULL);
1982
- if (head_ == NULL) {
1983
- tail_ = head_ = node->address();
1984
- } else {
1985
- FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
1986
- tail_ = node->address();
1987
- }
1988
- available_ += object_size_;
1989
- }
1990
-
1991
-
1992
- MaybeObject* FixedSizeFreeList::Allocate() {
1993
- if (head_ == NULL) {
1994
- return Failure::RetryAfterGC(owner_);
1995
- }
1996
-
1997
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1998
- FreeListNode* node = FreeListNode::FromAddress(head_);
1999
- head_ = node->next(heap_);
2000
- available_ -= object_size_;
2001
- return node;
2002
- }
2003
-
2004
-
2005
- void FixedSizeFreeList::MarkNodes() {
2006
- Address cur_addr = head_;
2007
- while (cur_addr != NULL && cur_addr != tail_) {
2008
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2009
- cur_addr = cur_node->next(heap_);
2010
- cur_node->SetMark();
2011
- }
2012
- }
2013
-
2014
-
2015
- // -----------------------------------------------------------------------------
2016
- // OldSpace implementation
2017
-
2018
- void OldSpace::PrepareForMarkCompact(bool will_compact) {
2019
- // Call prepare of the super class.
2020
- PagedSpace::PrepareForMarkCompact(will_compact);
2021
-
2022
- if (will_compact) {
2023
- // Reset relocation info. During a compacting collection, everything in
2024
- // the space is considered 'available' and we will rediscover live data
2025
- // and waste during the collection.
2026
- MCResetRelocationInfo();
2027
- ASSERT(Available() == Capacity());
2028
- } else {
2029
- // During a non-compacting collection, everything below the linear
2030
- // allocation pointer is considered allocated (everything above is
2031
- // available) and we will rediscover available and wasted bytes during
2032
- // the collection.
2033
- accounting_stats_.AllocateBytes(free_list_.available());
2034
- accounting_stats_.FillWastedBytes(Waste());
2035
- }
2036
-
2037
- // Clear the free list before a full GC---it will be rebuilt afterward.
2038
- free_list_.Reset();
2039
- }
2040
-
2041
-
2042
- void OldSpace::MCCommitRelocationInfo() {
2043
- // Update fast allocation info.
2044
- allocation_info_.top = mc_forwarding_info_.top;
2045
- allocation_info_.limit = mc_forwarding_info_.limit;
2046
- ASSERT(allocation_info_.VerifyPagedAllocation());
2047
-
2048
- // The space is compacted and we haven't yet built free lists or
2049
- // wasted any space.
2050
- ASSERT(Waste() == 0);
2051
- ASSERT(AvailableFree() == 0);
2052
-
2053
- // Build the free list for the space.
2054
- int computed_size = 0;
2055
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2056
- while (it.has_next()) {
2057
- Page* p = it.next();
2058
- // Space below the relocation pointer is allocated.
2059
- computed_size +=
2060
- static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
2061
- if (it.has_next()) {
2062
- // Free the space at the top of the page.
2063
- int extra_size =
2064
- static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
2065
- if (extra_size > 0) {
2066
- int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2067
- extra_size);
2068
- // The bytes we have just "freed" to add to the free list were
2069
- // already accounted as available.
2070
- accounting_stats_.WasteBytes(wasted_bytes);
2071
- }
2072
- }
2073
- }
2074
-
2075
- // Make sure the computed size - based on the used portion of the pages in
2076
- // use - matches the size obtained while computing forwarding addresses.
2077
- ASSERT(computed_size == Size());
2078
- }
2079
-
2080
-
2081
- bool NewSpace::ReserveSpace(int bytes) {
2082
- // We can't reliably unpack a partial snapshot that needs more new space
2083
- // space than the minimum NewSpace size.
2084
- ASSERT(bytes <= InitialCapacity());
2085
- Address limit = allocation_info_.limit;
2086
- Address top = allocation_info_.top;
2087
- return limit - top >= bytes;
2088
- }
2089
-
2090
-
2091
- void PagedSpace::FreePages(Page* prev, Page* last) {
2092
- if (last == AllocationTopPage()) {
2093
- // Pages are already at the end of used pages.
2094
- return;
2095
- }
2096
-
2097
- Page* first = NULL;
2098
-
2099
- // Remove pages from the list.
2100
- if (prev == NULL) {
2101
- first = first_page_;
2102
- first_page_ = last->next_page();
2103
- } else {
2104
- first = prev->next_page();
2105
- heap()->isolate()->memory_allocator()->SetNextPage(
2106
- prev, last->next_page());
2107
- }
2108
-
2109
- // Attach it after the last page.
2110
- heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
2111
- last_page_ = last;
2112
- heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
2113
-
2114
- // Clean them up.
2115
- do {
2116
- first->InvalidateWatermark(true);
2117
- first->SetAllocationWatermark(first->ObjectAreaStart());
2118
- first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2119
- first->SetRegionMarks(Page::kAllRegionsCleanMarks);
2120
- first = first->next_page();
2121
- } while (first != NULL);
2122
-
2123
- // Order of pages in this space might no longer be consistent with
2124
- // order of pages in chunks.
2125
- page_list_is_chunk_ordered_ = false;
2126
- }
2127
-
2128
-
2129
- void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2130
- const bool add_to_freelist = true;
2131
-
2132
- // Mark used and unused pages to properly fill unused pages
2133
- // after reordering.
2134
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2135
- Page* last_in_use = AllocationTopPage();
2136
- bool in_use = true;
2137
-
2138
- while (all_pages_iterator.has_next()) {
2139
- Page* p = all_pages_iterator.next();
2140
- p->SetWasInUseBeforeMC(in_use);
2141
- if (p == last_in_use) {
2142
- // We passed a page containing allocation top. All consequent
2143
- // pages are not used.
2144
- in_use = false;
2145
- }
2146
- }
2147
-
2148
- if (page_list_is_chunk_ordered_) return;
2149
-
2150
- Page* new_last_in_use = Page::FromAddress(NULL);
2151
- heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
2152
- this, &first_page_, &last_page_, &new_last_in_use);
2153
- ASSERT(new_last_in_use->is_valid());
2154
-
2155
- if (new_last_in_use != last_in_use) {
2156
- // Current allocation top points to a page which is now in the middle
2157
- // of page list. We should move allocation top forward to the new last
2158
- // used page so various object iterators will continue to work properly.
2159
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2160
- last_in_use->AllocationTop());
2161
-
2162
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2163
- if (size_in_bytes > 0) {
2164
- Address start = last_in_use->AllocationTop();
2165
- if (deallocate_blocks) {
2166
- accounting_stats_.AllocateBytes(size_in_bytes);
2167
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
2168
- } else {
2169
- heap()->CreateFillerObjectAt(start, size_in_bytes);
2170
- }
2171
- }
2172
-
2173
- // New last in use page was in the middle of the list before
2174
- // sorting so it full.
2175
- SetTop(new_last_in_use->AllocationTop());
2176
-
2177
- ASSERT(AllocationTopPage() == new_last_in_use);
2178
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2179
- }
2180
-
2181
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2182
- while (pages_in_use_iterator.has_next()) {
2183
- Page* p = pages_in_use_iterator.next();
2184
- if (!p->WasInUseBeforeMC()) {
2185
- // Empty page is in the middle of a sequence of used pages.
2186
- // Allocate it as a whole and deallocate immediately.
2187
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2188
- p->ObjectAreaStart());
2189
-
2190
- p->SetAllocationWatermark(p->ObjectAreaStart());
2191
- Address start = p->ObjectAreaStart();
2192
- if (deallocate_blocks) {
2193
- accounting_stats_.AllocateBytes(size_in_bytes);
2194
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
2195
- } else {
2196
- heap()->CreateFillerObjectAt(start, size_in_bytes);
2197
- }
2198
- }
2199
- }
2200
-
2201
- page_list_is_chunk_ordered_ = true;
2202
- }
2203
-
2204
-
2205
- void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2206
- if (will_compact) {
2207
- RelinkPageListInChunkOrder(false);
2208
- }
2209
- }
2210
-
2211
-
2212
- bool PagedSpace::ReserveSpace(int bytes) {
2213
- Address limit = allocation_info_.limit;
2214
- Address top = allocation_info_.top;
2215
- if (limit - top >= bytes) return true;
2216
-
2217
- // There wasn't enough space in the current page. Lets put the rest
2218
- // of the page on the free list and start a fresh page.
2219
- PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2220
-
2221
- Page* reserved_page = TopPageOf(allocation_info_);
2222
- int bytes_left_to_reserve = bytes;
2223
- while (bytes_left_to_reserve > 0) {
2224
- if (!reserved_page->next_page()->is_valid()) {
2225
- if (heap()->OldGenerationAllocationLimitReached()) return false;
2226
- Expand(reserved_page);
2227
- }
2228
- bytes_left_to_reserve -= Page::kPageSize;
2229
- reserved_page = reserved_page->next_page();
2230
- if (!reserved_page->is_valid()) return false;
2231
- }
2232
- ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2233
- TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2234
- SetAllocationInfo(&allocation_info_,
2235
- TopPageOf(allocation_info_)->next_page());
2236
- return true;
2237
- }
2238
-
2239
-
2240
- // You have to call this last, since the implementation from PagedSpace
2241
- // doesn't know that memory was 'promised' to large object space.
2242
- bool LargeObjectSpace::ReserveSpace(int bytes) {
2243
- return heap()->OldGenerationSpaceAvailable() >= bytes;
2244
- }
2245
-
2246
-
2247
- // Slow case for normal allocation. Try in order: (1) allocate in the next
2248
- // page in the space, (2) allocate off the space's free list, (3) expand the
2249
- // space, (4) fail.
2250
- HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2251
- // Linear allocation in this space has failed. If there is another page
2252
- // in the space, move to that page and allocate there. This allocation
2253
- // should succeed (size_in_bytes should not be greater than a page's
2254
- // object area size).
2255
- Page* current_page = TopPageOf(allocation_info_);
2256
- if (current_page->next_page()->is_valid()) {
2257
- return AllocateInNextPage(current_page, size_in_bytes);
2258
- }
2259
-
2260
- // There is no next page in this space. Try free list allocation unless that
2261
- // is currently forbidden.
2262
- if (!heap()->linear_allocation()) {
2263
- int wasted_bytes;
2264
- Object* result;
2265
- MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2266
- accounting_stats_.WasteBytes(wasted_bytes);
2267
- if (maybe->ToObject(&result)) {
2268
- accounting_stats_.AllocateBytes(size_in_bytes);
2269
-
2270
- HeapObject* obj = HeapObject::cast(result);
2271
- Page* p = Page::FromAddress(obj->address());
2272
-
2273
- if (obj->address() >= p->AllocationWatermark()) {
2274
- // There should be no hole between the allocation watermark
2275
- // and allocated object address.
2276
- // Memory above the allocation watermark was not swept and
2277
- // might contain garbage pointers to new space.
2278
- ASSERT(obj->address() == p->AllocationWatermark());
2279
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
2280
- }
2281
-
2282
- return obj;
2283
- }
2284
- }
2285
-
2286
- // Free list allocation failed and there is no next page. Fail if we have
2287
- // hit the old generation size limit that should cause a garbage
2288
- // collection.
2289
- if (!heap()->always_allocate() &&
2290
- heap()->OldGenerationAllocationLimitReached()) {
2291
- return NULL;
2292
- }
2293
-
2294
- // Try to expand the space and allocate in the new next page.
2295
- ASSERT(!current_page->next_page()->is_valid());
2296
- if (Expand(current_page)) {
2297
- return AllocateInNextPage(current_page, size_in_bytes);
2298
- }
2299
-
2300
- // Finally, fail.
2301
- return NULL;
2302
- }
2303
-
2304
-
2305
- void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2306
- current_page->SetAllocationWatermark(allocation_info_.top);
2307
- int free_size =
2308
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2309
- if (free_size > 0) {
2310
- int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2311
- accounting_stats_.WasteBytes(wasted_bytes);
2312
- }
2313
- }
2314
-
2315
-
2316
- void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2317
- current_page->SetAllocationWatermark(allocation_info_.top);
2318
- int free_size =
2319
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2320
- // In the fixed space free list all the free list items have the right size.
2321
- // We use up the rest of the page while preserving this invariant.
2322
- while (free_size >= object_size_in_bytes_) {
2323
- free_list_.Free(allocation_info_.top);
2324
- allocation_info_.top += object_size_in_bytes_;
2325
- free_size -= object_size_in_bytes_;
2326
- accounting_stats_.WasteBytes(object_size_in_bytes_);
2327
- }
2328
- }
2329
-
2330
-
2331
- // Add the block at the top of the page to the space's free list, set the
2332
- // allocation info to the next page (assumed to be one), and allocate
2333
- // linearly there.
2334
- HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2335
- int size_in_bytes) {
2336
- ASSERT(current_page->next_page()->is_valid());
2337
- Page* next_page = current_page->next_page();
2338
- next_page->ClearGCFields();
2339
- PutRestOfCurrentPageOnFreeList(current_page);
2340
- SetAllocationInfo(&allocation_info_, next_page);
2341
- return AllocateLinearly(&allocation_info_, size_in_bytes);
2342
- }
2343
-
2344
-
2345
- void OldSpace::DeallocateBlock(Address start,
2346
- int size_in_bytes,
2347
- bool add_to_freelist) {
2348
- Free(start, size_in_bytes, add_to_freelist);
2349
- }
2350
-
2351
-
2352
- #ifdef DEBUG
2353
- void PagedSpace::ReportCodeStatistics() {
2354
- Isolate* isolate = Isolate::Current();
2355
- CommentStatistic* comments_statistics =
2356
- isolate->paged_space_comments_statistics();
2357
- ReportCodeKindStatistics();
2358
- PrintF("Code comment statistics (\" [ comment-txt : size/ "
2359
- "count (average)\"):\n");
2360
- for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2361
- const CommentStatistic& cs = comments_statistics[i];
2362
- if (cs.size > 0) {
2363
- PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2364
- cs.size/cs.count);
2365
- }
2366
- }
2367
- PrintF("\n");
2368
- }
2369
-
2370
-
2371
- void PagedSpace::ResetCodeStatistics() {
2372
- Isolate* isolate = Isolate::Current();
2373
- CommentStatistic* comments_statistics =
2374
- isolate->paged_space_comments_statistics();
2375
- ClearCodeKindStatistics();
2376
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2377
- comments_statistics[i].Clear();
2378
- }
2379
- comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2380
- comments_statistics[CommentStatistic::kMaxComments].size = 0;
2381
- comments_statistics[CommentStatistic::kMaxComments].count = 0;
2382
- }
2383
-
2384
-
2385
- // Adds comment to 'comment_statistics' table. Performance OK as long as
2386
- // 'kMaxComments' is small
2387
- static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2388
- CommentStatistic* comments_statistics =
2389
- isolate->paged_space_comments_statistics();
2390
- // Do not count empty comments
2391
- if (delta <= 0) return;
2392
- CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2393
- // Search for a free or matching entry in 'comments_statistics': 'cs'
2394
- // points to result.
2395
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2396
- if (comments_statistics[i].comment == NULL) {
2397
- cs = &comments_statistics[i];
2398
- cs->comment = comment;
2399
- break;
2400
- } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2401
- cs = &comments_statistics[i];
2402
- break;
2403
- }
2404
- }
2405
- // Update entry for 'comment'
2406
- cs->size += delta;
2407
- cs->count += 1;
2408
- }
2409
-
2410
-
2411
- // Call for each nested comment start (start marked with '[ xxx', end marked
2412
- // with ']'. RelocIterator 'it' must point to a comment reloc info.
2413
- static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2414
- ASSERT(!it->done());
2415
- ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2416
- const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2417
- if (tmp[0] != '[') {
2418
- // Not a nested comment; skip
2419
- return;
2420
- }
2421
-
2422
- // Search for end of nested comment or a new nested comment
2423
- const char* const comment_txt =
2424
- reinterpret_cast<const char*>(it->rinfo()->data());
2425
- const byte* prev_pc = it->rinfo()->pc();
2426
- int flat_delta = 0;
2427
- it->next();
2428
- while (true) {
2429
- // All nested comments must be terminated properly, and therefore exit
2430
- // from loop.
2431
- ASSERT(!it->done());
2432
- if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2433
- const char* const txt =
2434
- reinterpret_cast<const char*>(it->rinfo()->data());
2435
- flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2436
- if (txt[0] == ']') break; // End of nested comment
2437
- // A new comment
2438
- CollectCommentStatistics(isolate, it);
2439
- // Skip code that was covered with previous comment
2440
- prev_pc = it->rinfo()->pc();
2441
- }
2442
- it->next();
2443
- }
2444
- EnterComment(isolate, comment_txt, flat_delta);
2445
- }
2446
-
2447
-
2448
- // Collects code size statistics:
2449
- // - by code kind
2450
- // - by code comment
2451
- void PagedSpace::CollectCodeStatistics() {
2452
- Isolate* isolate = heap()->isolate();
2453
- HeapObjectIterator obj_it(this);
2454
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2455
- if (obj->IsCode()) {
2456
- Code* code = Code::cast(obj);
2457
- isolate->code_kind_statistics()[code->kind()] += code->Size();
2458
- RelocIterator it(code);
2459
- int delta = 0;
2460
- const byte* prev_pc = code->instruction_start();
2461
- while (!it.done()) {
2462
- if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2463
- delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2464
- CollectCommentStatistics(isolate, &it);
2465
- prev_pc = it.rinfo()->pc();
2466
- }
2467
- it.next();
2468
- }
2469
-
2470
- ASSERT(code->instruction_start() <= prev_pc &&
2471
- prev_pc <= code->instruction_end());
2472
- delta += static_cast<int>(code->instruction_end() - prev_pc);
2473
- EnterComment(isolate, "NoComment", delta);
2474
- }
2475
- }
2476
- }
2477
-
2478
-
2479
- void OldSpace::ReportStatistics() {
2480
- int pct = static_cast<int>(Available() * 100 / Capacity());
2481
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
2482
- ", waste: %" V8_PTR_PREFIX "d"
2483
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2484
- Capacity(), Waste(), Available(), pct);
2485
-
2486
- ClearHistograms();
2487
- HeapObjectIterator obj_it(this);
2488
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2489
- CollectHistogramInfo(obj);
2490
- ReportHistogram(true);
2491
- }
2492
- #endif
2493
-
2494
- // -----------------------------------------------------------------------------
2495
- // FixedSpace implementation
2496
-
2497
- void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2498
- // Call prepare of the super class.
2499
- PagedSpace::PrepareForMarkCompact(will_compact);
2500
-
2501
- if (will_compact) {
2502
- // Reset relocation info.
2503
- MCResetRelocationInfo();
2504
-
2505
- // During a compacting collection, everything in the space is considered
2506
- // 'available' (set by the call to MCResetRelocationInfo) and we will
2507
- // rediscover live and wasted bytes during the collection.
2508
- ASSERT(Available() == Capacity());
2509
- } else {
2510
- // During a non-compacting collection, everything below the linear
2511
- // allocation pointer except wasted top-of-page blocks is considered
2512
- // allocated and we will rediscover available bytes during the
2513
- // collection.
2514
- accounting_stats_.AllocateBytes(free_list_.available());
2515
- }
2516
-
2517
- // Clear the free list before a full GC---it will be rebuilt afterward.
2518
- free_list_.Reset();
2519
- }
2520
-
2521
-
2522
- void FixedSpace::MCCommitRelocationInfo() {
2523
- // Update fast allocation info.
2524
- allocation_info_.top = mc_forwarding_info_.top;
2525
- allocation_info_.limit = mc_forwarding_info_.limit;
2526
- ASSERT(allocation_info_.VerifyPagedAllocation());
2527
-
2528
- // The space is compacted and we haven't yet wasted any space.
2529
- ASSERT(Waste() == 0);
2530
-
2531
- // Update allocation_top of each page in use and compute waste.
2532
- int computed_size = 0;
2533
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2534
- while (it.has_next()) {
2535
- Page* page = it.next();
2536
- Address page_top = page->AllocationTop();
2537
- computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2538
- if (it.has_next()) {
2539
- accounting_stats_.WasteBytes(
2540
- static_cast<int>(page->ObjectAreaEnd() - page_top));
2541
- page->SetAllocationWatermark(page_top);
2542
- }
2543
- }
2544
-
2545
- // Make sure the computed size - based on the used portion of the
2546
- // pages in use - matches the size we adjust during allocation.
2547
- ASSERT(computed_size == Size());
2548
- }
2549
-
2550
-
2551
- // Slow case for normal allocation. Try in order: (1) allocate in the next
2552
- // page in the space, (2) allocate off the space's free list, (3) expand the
2553
- // space, (4) fail.
2554
- HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2555
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2556
- // Linear allocation in this space has failed. If there is another page
2557
- // in the space, move to that page and allocate there. This allocation
2558
- // should succeed.
2559
- Page* current_page = TopPageOf(allocation_info_);
2560
- if (current_page->next_page()->is_valid()) {
2561
- return AllocateInNextPage(current_page, size_in_bytes);
2562
- }
2563
-
2564
- // There is no next page in this space. Try free list allocation unless
2565
- // that is currently forbidden. The fixed space free list implicitly assumes
2566
- // that all free blocks are of the fixed size.
2567
- if (!heap()->linear_allocation()) {
2568
- Object* result;
2569
- MaybeObject* maybe = free_list_.Allocate();
2570
- if (maybe->ToObject(&result)) {
2571
- accounting_stats_.AllocateBytes(size_in_bytes);
2572
- HeapObject* obj = HeapObject::cast(result);
2573
- Page* p = Page::FromAddress(obj->address());
2574
-
2575
- if (obj->address() >= p->AllocationWatermark()) {
2576
- // There should be no hole between the allocation watermark
2577
- // and allocated object address.
2578
- // Memory above the allocation watermark was not swept and
2579
- // might contain garbage pointers to new space.
2580
- ASSERT(obj->address() == p->AllocationWatermark());
2581
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
2582
- }
2583
-
2584
- return obj;
2585
- }
2586
- }
2587
-
2588
- // Free list allocation failed and there is no next page. Fail if we have
2589
- // hit the old generation size limit that should cause a garbage
2590
- // collection.
2591
- if (!heap()->always_allocate() &&
2592
- heap()->OldGenerationAllocationLimitReached()) {
2593
- return NULL;
2594
- }
2595
-
2596
- // Try to expand the space and allocate in the new next page.
2597
- ASSERT(!current_page->next_page()->is_valid());
2598
- if (Expand(current_page)) {
2599
- return AllocateInNextPage(current_page, size_in_bytes);
2600
- }
2601
-
2602
- // Finally, fail.
2603
- return NULL;
2604
- }
2605
-
2606
-
2607
- // Move to the next page (there is assumed to be one) and allocate there.
2608
- // The top of page block is always wasted, because it is too small to hold a
2609
- // map.
2610
- HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2611
- int size_in_bytes) {
2612
- ASSERT(current_page->next_page()->is_valid());
2613
- ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2614
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2615
- Page* next_page = current_page->next_page();
2616
- next_page->ClearGCFields();
2617
- current_page->SetAllocationWatermark(allocation_info_.top);
2618
- accounting_stats_.WasteBytes(page_extra_);
2619
- SetAllocationInfo(&allocation_info_, next_page);
2620
- return AllocateLinearly(&allocation_info_, size_in_bytes);
2621
- }
2622
-
2623
-
2624
- void FixedSpace::DeallocateBlock(Address start,
2625
- int size_in_bytes,
2626
- bool add_to_freelist) {
2627
- // Free-list elements in fixed space are assumed to have a fixed size.
2628
- // We break the free block into chunks and add them to the free list
2629
- // individually.
2630
- int size = object_size_in_bytes();
2631
- ASSERT(size_in_bytes % size == 0);
2632
- Address end = start + size_in_bytes;
2633
- for (Address a = start; a < end; a += size) {
2634
- Free(a, add_to_freelist);
2635
- }
2636
- }
2637
-
2638
-
2639
- #ifdef DEBUG
2640
- void FixedSpace::ReportStatistics() {
2641
- int pct = static_cast<int>(Available() * 100 / Capacity());
2642
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
2643
- ", waste: %" V8_PTR_PREFIX "d"
2644
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2645
- Capacity(), Waste(), Available(), pct);
2646
-
2647
- ClearHistograms();
2648
- HeapObjectIterator obj_it(this);
2649
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2650
- CollectHistogramInfo(obj);
2651
- ReportHistogram(false);
2652
- }
2653
- #endif
2654
-
2655
-
2656
- // -----------------------------------------------------------------------------
2657
- // MapSpace implementation
2658
-
2659
- void MapSpace::PrepareForMarkCompact(bool will_compact) {
2660
- // Call prepare of the super class.
2661
- FixedSpace::PrepareForMarkCompact(will_compact);
2662
-
2663
- if (will_compact) {
2664
- // Initialize map index entry.
2665
- int page_count = 0;
2666
- PageIterator it(this, PageIterator::ALL_PAGES);
2667
- while (it.has_next()) {
2668
- ASSERT_MAP_PAGE_INDEX(page_count);
2669
-
2670
- Page* p = it.next();
2671
- ASSERT(p->mc_page_index == page_count);
2672
-
2673
- page_addresses_[page_count++] = p->address();
2674
- }
2675
- }
2676
- }
2677
-
2678
-
2679
- #ifdef DEBUG
2680
- void MapSpace::VerifyObject(HeapObject* object) {
2681
- // The object should be a map or a free-list node.
2682
- ASSERT(object->IsMap() || object->IsByteArray());
2683
- }
2684
- #endif
2685
-
2686
-
2687
- // -----------------------------------------------------------------------------
2688
- // GlobalPropertyCellSpace implementation
2689
-
2690
- #ifdef DEBUG
2691
- void CellSpace::VerifyObject(HeapObject* object) {
2692
- // The object should be a global object property cell or a free-list node.
2693
- ASSERT(object->IsJSGlobalPropertyCell() ||
2694
- object->map() == heap()->two_pointer_filler_map());
2695
- }
2696
- #endif
2697
-
2698
-
2699
- // -----------------------------------------------------------------------------
2700
- // LargeObjectIterator
2701
-
2702
- LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2703
- current_ = space->first_chunk_;
2704
- size_func_ = NULL;
2705
- }
2706
-
2707
-
2708
- LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2709
- HeapObjectCallback size_func) {
2710
- current_ = space->first_chunk_;
2711
- size_func_ = size_func;
2712
- }
2713
-
2714
-
2715
- HeapObject* LargeObjectIterator::next() {
2716
- if (current_ == NULL) return NULL;
2717
-
2718
- HeapObject* object = current_->GetObject();
2719
- current_ = current_->next();
2720
- return object;
2721
- }
2722
-
2723
-
2724
- // -----------------------------------------------------------------------------
2725
- // LargeObjectChunk
2726
-
2727
- LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2728
- Executability executable) {
2729
- size_t requested = ChunkSizeFor(size_in_bytes);
2730
- size_t size;
2731
- Isolate* isolate = Isolate::Current();
2732
- void* mem = isolate->memory_allocator()->AllocateRawMemory(
2733
- requested, &size, executable);
2734
- if (mem == NULL) return NULL;
2735
-
2736
- // The start of the chunk may be overlayed with a page so we have to
2737
- // make sure that the page flags fit in the size field.
2738
- ASSERT((size & Page::kPageFlagMask) == 0);
2739
-
2740
- LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
2741
- if (size < requested) {
2742
- isolate->memory_allocator()->FreeRawMemory(
2743
- mem, size, executable);
2744
- LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
2745
- return NULL;
2746
- }
2747
-
2748
- ObjectSpace space = (executable == EXECUTABLE)
2749
- ? kObjectSpaceCodeSpace
2750
- : kObjectSpaceLoSpace;
2751
- isolate->memory_allocator()->PerformAllocationCallback(
2752
- space, kAllocationActionAllocate, size);
2753
-
2754
- LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2755
- chunk->size_ = size;
2756
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2757
- page->heap_ = isolate->heap();
2758
- return chunk;
2759
- }
2760
-
2761
-
2762
- int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2763
- int os_alignment = static_cast<int>(OS::AllocateAlignment());
2764
- if (os_alignment < Page::kPageSize) {
2765
- size_in_bytes += (Page::kPageSize - os_alignment);
2766
- }
2767
- return size_in_bytes + Page::kObjectStartOffset;
2768
- }
2769
-
2770
- // -----------------------------------------------------------------------------
2771
- // LargeObjectSpace
2772
-
2773
- LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2774
- : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2775
- first_chunk_(NULL),
2776
- size_(0),
2777
- page_count_(0),
2778
- objects_size_(0) {}
2779
-
2780
-
2781
- bool LargeObjectSpace::Setup() {
2782
- first_chunk_ = NULL;
2783
- size_ = 0;
2784
- page_count_ = 0;
2785
- objects_size_ = 0;
2786
- return true;
2787
- }
2788
-
2789
-
2790
- void LargeObjectSpace::TearDown() {
2791
- while (first_chunk_ != NULL) {
2792
- LargeObjectChunk* chunk = first_chunk_;
2793
- first_chunk_ = first_chunk_->next();
2794
- LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
2795
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2796
- Executability executable =
2797
- page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2798
- ObjectSpace space = kObjectSpaceLoSpace;
2799
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2800
- size_t size = chunk->size();
2801
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
2802
- size,
2803
- executable);
2804
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2805
- space, kAllocationActionFree, size);
2806
- }
2807
-
2808
- size_ = 0;
2809
- page_count_ = 0;
2810
- objects_size_ = 0;
2811
- }
2812
-
2813
-
2814
- #ifdef ENABLE_HEAP_PROTECTION
2815
-
2816
- void LargeObjectSpace::Protect() {
2817
- LargeObjectChunk* chunk = first_chunk_;
2818
- while (chunk != NULL) {
2819
- heap()->isolate()->memory_allocator()->Protect(chunk->address(),
2820
- chunk->size());
2821
- chunk = chunk->next();
2822
- }
2823
- }
2824
-
2825
-
2826
- void LargeObjectSpace::Unprotect() {
2827
- LargeObjectChunk* chunk = first_chunk_;
2828
- while (chunk != NULL) {
2829
- bool is_code = chunk->GetObject()->IsCode();
2830
- heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
2831
- chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
2832
- chunk = chunk->next();
2833
- }
2834
- }
2835
-
2836
- #endif
2837
-
2838
-
2839
- MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2840
- int object_size,
2841
- Executability executable) {
2842
- ASSERT(0 < object_size && object_size <= requested_size);
2843
-
2844
- // Check if we want to force a GC before growing the old space further.
2845
- // If so, fail the allocation.
2846
- if (!heap()->always_allocate() &&
2847
- heap()->OldGenerationAllocationLimitReached()) {
2848
- return Failure::RetryAfterGC(identity());
2849
- }
2850
-
2851
- LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
2852
- if (chunk == NULL) {
2853
- return Failure::RetryAfterGC(identity());
2854
- }
2855
-
2856
- size_ += static_cast<int>(chunk->size());
2857
- objects_size_ += requested_size;
2858
- page_count_++;
2859
- chunk->set_next(first_chunk_);
2860
- first_chunk_ = chunk;
2861
-
2862
- // Initialize page header.
2863
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2864
- Address object_address = page->ObjectAreaStart();
2865
-
2866
- // Clear the low order bit of the second word in the page to flag it as a
2867
- // large object page. If the chunk_size happened to be written there, its
2868
- // low order bit should already be clear.
2869
- page->SetIsLargeObjectPage(true);
2870
- page->SetIsPageExecutable(executable);
2871
- page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2872
- return HeapObject::FromAddress(object_address);
2873
- }
2874
-
2875
-
2876
- MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2877
- ASSERT(0 < size_in_bytes);
2878
- return AllocateRawInternal(size_in_bytes,
2879
- size_in_bytes,
2880
- EXECUTABLE);
2881
- }
2882
-
2883
-
2884
- MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2885
- ASSERT(0 < size_in_bytes);
2886
- return AllocateRawInternal(size_in_bytes,
2887
- size_in_bytes,
2888
- NOT_EXECUTABLE);
2889
- }
2890
-
2891
-
2892
- MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2893
- ASSERT(0 < size_in_bytes);
2894
- return AllocateRawInternal(size_in_bytes,
2895
- size_in_bytes,
2896
- NOT_EXECUTABLE);
2897
- }
2898
-
2899
-
2900
- // GC support
2901
- MaybeObject* LargeObjectSpace::FindObject(Address a) {
2902
- for (LargeObjectChunk* chunk = first_chunk_;
2903
- chunk != NULL;
2904
- chunk = chunk->next()) {
2905
- Address chunk_address = chunk->address();
2906
- if (chunk_address <= a && a < chunk_address + chunk->size()) {
2907
- return chunk->GetObject();
2908
- }
2909
- }
2910
- return Failure::Exception();
2911
- }
2912
-
2913
-
2914
- LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
2915
- // TODO(853): Change this implementation to only find executable
2916
- // chunks and use some kind of hash-based approach to speed it up.
2917
- for (LargeObjectChunk* chunk = first_chunk_;
2918
- chunk != NULL;
2919
- chunk = chunk->next()) {
2920
- Address chunk_address = chunk->address();
2921
- if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2922
- return chunk;
2923
- }
2924
- }
2925
- return NULL;
2926
- }
2927
-
2928
-
2929
- void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
2930
- LargeObjectIterator it(this);
2931
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2932
- // We only have code, sequential strings, or fixed arrays in large
2933
- // object space, and only fixed arrays can possibly contain pointers to
2934
- // the young generation.
2935
- if (object->IsFixedArray()) {
2936
- Page* page = Page::FromAddress(object->address());
2937
- uint32_t marks = page->GetRegionMarks();
2938
- uint32_t newmarks = Page::kAllRegionsCleanMarks;
2939
-
2940
- if (marks != Page::kAllRegionsCleanMarks) {
2941
- // For a large page a single dirty mark corresponds to several
2942
- // regions (modulo 32). So we treat a large page as a sequence of
2943
- // normal pages of size Page::kPageSize having same dirty marks
2944
- // and subsequently iterate dirty regions on each of these pages.
2945
- Address start = object->address();
2946
- Address end = page->ObjectAreaEnd();
2947
- Address object_end = start + object->Size();
2948
-
2949
- // Iterate regions of the first normal page covering object.
2950
- uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2951
- newmarks |=
2952
- heap()->IterateDirtyRegions(marks >> first_region_number,
2953
- start,
2954
- end,
2955
- &Heap::IteratePointersInDirtyRegion,
2956
- copy_object) << first_region_number;
2957
-
2958
- start = end;
2959
- end = start + Page::kPageSize;
2960
- while (end <= object_end) {
2961
- // Iterate next 32 regions.
2962
- newmarks |=
2963
- heap()->IterateDirtyRegions(marks,
2964
- start,
2965
- end,
2966
- &Heap::IteratePointersInDirtyRegion,
2967
- copy_object);
2968
- start = end;
2969
- end = start + Page::kPageSize;
2970
- }
2971
-
2972
- if (start != object_end) {
2973
- // Iterate the last piece of an object which is less than
2974
- // Page::kPageSize.
2975
- newmarks |=
2976
- heap()->IterateDirtyRegions(marks,
2977
- start,
2978
- object_end,
2979
- &Heap::IteratePointersInDirtyRegion,
2980
- copy_object);
2981
- }
2982
-
2983
- page->SetRegionMarks(newmarks);
2984
- }
2985
- }
2986
- }
2987
- }
2988
-
2989
-
2990
- void LargeObjectSpace::FreeUnmarkedObjects() {
2991
- LargeObjectChunk* previous = NULL;
2992
- LargeObjectChunk* current = first_chunk_;
2993
- while (current != NULL) {
2994
- HeapObject* object = current->GetObject();
2995
- if (object->IsMarked()) {
2996
- object->ClearMark();
2997
- heap()->mark_compact_collector()->tracer()->decrement_marked_count();
2998
- previous = current;
2999
- current = current->next();
3000
- } else {
3001
- Page* page = Page::FromAddress(RoundUp(current->address(),
3002
- Page::kPageSize));
3003
- Executability executable =
3004
- page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
3005
- Address chunk_address = current->address();
3006
- size_t chunk_size = current->size();
3007
-
3008
- // Cut the chunk out from the chunk list.
3009
- current = current->next();
3010
- if (previous == NULL) {
3011
- first_chunk_ = current;
3012
- } else {
3013
- previous->set_next(current);
3014
- }
3015
-
3016
- // Free the chunk.
3017
- heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3018
- object, heap()->isolate());
3019
- LiveObjectList::ProcessNonLive(object);
3020
-
3021
- size_ -= static_cast<int>(chunk_size);
3022
- objects_size_ -= object->Size();
3023
- page_count_--;
3024
- ObjectSpace space = kObjectSpaceLoSpace;
3025
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
3026
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
3027
- chunk_size,
3028
- executable);
3029
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
3030
- space, kAllocationActionFree, size_);
3031
- LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
3032
- }
3033
- }
3034
- }
3035
-
3036
-
3037
- bool LargeObjectSpace::Contains(HeapObject* object) {
3038
- Address address = object->address();
3039
- if (heap()->new_space()->Contains(address)) {
3040
- return false;
3041
- }
3042
- Page* page = Page::FromAddress(address);
3043
-
3044
- SLOW_ASSERT(!page->IsLargeObjectPage()
3045
- || !FindObject(address)->IsFailure());
3046
-
3047
- return page->IsLargeObjectPage();
3048
- }
3049
-
3050
-
3051
- #ifdef DEBUG
3052
- // We do not assume that the large object iterator works, because it depends
3053
- // on the invariants we are checking during verification.
3054
- void LargeObjectSpace::Verify() {
3055
- for (LargeObjectChunk* chunk = first_chunk_;
3056
- chunk != NULL;
3057
- chunk = chunk->next()) {
3058
- // Each chunk contains an object that starts at the large object page's
3059
- // object area start.
3060
- HeapObject* object = chunk->GetObject();
3061
- Page* page = Page::FromAddress(object->address());
3062
- ASSERT(object->address() == page->ObjectAreaStart());
3063
-
3064
- // The first word should be a map, and we expect all map pointers to be
3065
- // in map space.
3066
- Map* map = object->map();
3067
- ASSERT(map->IsMap());
3068
- ASSERT(heap()->map_space()->Contains(map));
3069
-
3070
- // We have only code, sequential strings, external strings
3071
- // (sequential strings that have been morphed into external
3072
- // strings), fixed arrays, and byte arrays in large object space.
3073
- ASSERT(object->IsCode() || object->IsSeqString() ||
3074
- object->IsExternalString() || object->IsFixedArray() ||
3075
- object->IsByteArray());
3076
-
3077
- // The object itself should look OK.
3078
- object->Verify();
3079
-
3080
- // Byte arrays and strings don't have interior pointers.
3081
- if (object->IsCode()) {
3082
- VerifyPointersVisitor code_visitor;
3083
- object->IterateBody(map->instance_type(),
3084
- object->Size(),
3085
- &code_visitor);
3086
- } else if (object->IsFixedArray()) {
3087
- // We loop over fixed arrays ourselves, rather then using the visitor,
3088
- // because the visitor doesn't support the start/offset iteration
3089
- // needed for IsRegionDirty.
3090
- FixedArray* array = FixedArray::cast(object);
3091
- for (int j = 0; j < array->length(); j++) {
3092
- Object* element = array->get(j);
3093
- if (element->IsHeapObject()) {
3094
- HeapObject* element_object = HeapObject::cast(element);
3095
- ASSERT(heap()->Contains(element_object));
3096
- ASSERT(element_object->map()->IsMap());
3097
- if (heap()->InNewSpace(element_object)) {
3098
- Address array_addr = object->address();
3099
- Address element_addr = array_addr + FixedArray::kHeaderSize +
3100
- j * kPointerSize;
3101
-
3102
- ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3103
- }
3104
- }
3105
- }
3106
- }
3107
- }
3108
- }
3109
-
3110
-
3111
- void LargeObjectSpace::Print() {
3112
- LargeObjectIterator it(this);
3113
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3114
- obj->Print();
3115
- }
3116
- }
3117
-
3118
-
3119
- void LargeObjectSpace::ReportStatistics() {
3120
- PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3121
- int num_objects = 0;
3122
- ClearHistograms();
3123
- LargeObjectIterator it(this);
3124
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3125
- num_objects++;
3126
- CollectHistogramInfo(obj);
3127
- }
3128
-
3129
- PrintF(" number of objects %d, "
3130
- "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3131
- if (num_objects > 0) ReportHistogram(false);
3132
- }
3133
-
3134
-
3135
- void LargeObjectSpace::CollectCodeStatistics() {
3136
- Isolate* isolate = heap()->isolate();
3137
- LargeObjectIterator obj_it(this);
3138
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3139
- if (obj->IsCode()) {
3140
- Code* code = Code::cast(obj);
3141
- isolate->code_kind_statistics()[code->kind()] += code->Size();
3142
- }
3143
- }
3144
- }
3145
- #endif // DEBUG
3146
-
3147
- } } // namespace v8::internal