therubyracer 0.9.0beta2 → 0.9.0beta3

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of therubyracer might be problematic. Click here for more details.

Files changed (920) hide show
  1. data/.gitmodules +3 -0
  2. data/ext/v8/upstream/Makefile +1 -2
  3. data/ext/v8/upstream/v8/.gitignore +33 -0
  4. data/ext/v8/upstream/v8/AUTHORS +42 -0
  5. data/ext/v8/upstream/v8/ChangeLog +2663 -0
  6. data/ext/v8/upstream/{3.1.8 → v8}/LICENSE +0 -0
  7. data/ext/v8/upstream/{3.1.8 → v8}/LICENSE.strongtalk +0 -0
  8. data/ext/v8/upstream/{3.1.8 → v8}/LICENSE.v8 +0 -0
  9. data/ext/v8/upstream/{3.1.8 → v8}/LICENSE.valgrind +0 -0
  10. data/ext/v8/upstream/v8/SConstruct +1473 -0
  11. data/ext/v8/upstream/{3.1.8 → v8}/build/README.txt +0 -0
  12. data/ext/v8/upstream/{3.1.8 → v8}/build/all.gyp +0 -0
  13. data/ext/v8/upstream/{3.1.8 → v8}/build/armu.gypi +0 -0
  14. data/ext/v8/upstream/{3.1.8 → v8}/build/common.gypi +0 -0
  15. data/ext/v8/upstream/{3.1.8 → v8}/build/gyp_v8 +0 -0
  16. data/ext/v8/upstream/v8/include/v8-debug.h +394 -0
  17. data/ext/v8/upstream/v8/include/v8-preparser.h +116 -0
  18. data/ext/v8/upstream/v8/include/v8-profiler.h +505 -0
  19. data/ext/v8/upstream/v8/include/v8-testing.h +104 -0
  20. data/ext/v8/upstream/v8/include/v8.h +4000 -0
  21. data/ext/v8/upstream/{3.1.8 → v8}/include/v8stdint.h +0 -0
  22. data/ext/v8/upstream/v8/preparser/SConscript +38 -0
  23. data/ext/v8/upstream/v8/preparser/preparser-process.cc +169 -0
  24. data/ext/v8/upstream/v8/src/SConscript +380 -0
  25. data/ext/v8/upstream/v8/src/accessors.cc +766 -0
  26. data/ext/v8/upstream/{3.1.8 → v8}/src/accessors.h +0 -0
  27. data/ext/v8/upstream/v8/src/allocation-inl.h +49 -0
  28. data/ext/v8/upstream/v8/src/allocation.cc +122 -0
  29. data/ext/v8/upstream/v8/src/allocation.h +143 -0
  30. data/ext/v8/upstream/v8/src/api.cc +5678 -0
  31. data/ext/v8/upstream/v8/src/api.h +572 -0
  32. data/ext/v8/upstream/{3.1.8 → v8}/src/apinatives.js +0 -0
  33. data/ext/v8/upstream/v8/src/apiutils.h +73 -0
  34. data/ext/v8/upstream/v8/src/arguments.h +116 -0
  35. data/ext/v8/upstream/v8/src/arm/assembler-arm-inl.h +353 -0
  36. data/ext/v8/upstream/v8/src/arm/assembler-arm.cc +2877 -0
  37. data/ext/v8/upstream/v8/src/arm/assembler-arm.h +1382 -0
  38. data/ext/v8/upstream/v8/src/arm/builtins-arm.cc +1634 -0
  39. data/ext/v8/upstream/v8/src/arm/code-stubs-arm.cc +6917 -0
  40. data/ext/v8/upstream/v8/src/arm/code-stubs-arm.h +623 -0
  41. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/codegen-arm-inl.h +0 -0
  42. data/ext/v8/upstream/v8/src/arm/codegen-arm.cc +7437 -0
  43. data/ext/v8/upstream/v8/src/arm/codegen-arm.h +595 -0
  44. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/constants-arm.cc +0 -0
  45. data/ext/v8/upstream/v8/src/arm/constants-arm.h +778 -0
  46. data/ext/v8/upstream/v8/src/arm/cpu-arm.cc +149 -0
  47. data/ext/v8/upstream/v8/src/arm/debug-arm.cc +317 -0
  48. data/ext/v8/upstream/v8/src/arm/deoptimizer-arm.cc +737 -0
  49. data/ext/v8/upstream/v8/src/arm/disasm-arm.cc +1503 -0
  50. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/frames-arm.cc +0 -0
  51. data/ext/v8/upstream/v8/src/arm/frames-arm.h +168 -0
  52. data/ext/v8/upstream/v8/src/arm/full-codegen-arm.cc +4374 -0
  53. data/ext/v8/upstream/v8/src/arm/ic-arm.cc +1793 -0
  54. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/jump-target-arm.cc +0 -0
  55. data/ext/v8/upstream/v8/src/arm/lithium-arm.cc +2120 -0
  56. data/ext/v8/upstream/v8/src/arm/lithium-arm.h +2179 -0
  57. data/ext/v8/upstream/v8/src/arm/lithium-codegen-arm.cc +4132 -0
  58. data/ext/v8/upstream/v8/src/arm/lithium-codegen-arm.h +329 -0
  59. data/ext/v8/upstream/v8/src/arm/lithium-gap-resolver-arm.cc +305 -0
  60. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/lithium-gap-resolver-arm.h +0 -0
  61. data/ext/v8/upstream/v8/src/arm/macro-assembler-arm.cc +2939 -0
  62. data/ext/v8/upstream/v8/src/arm/macro-assembler-arm.h +1071 -0
  63. data/ext/v8/upstream/v8/src/arm/regexp-macro-assembler-arm.cc +1287 -0
  64. data/ext/v8/upstream/v8/src/arm/regexp-macro-assembler-arm.h +253 -0
  65. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/register-allocator-arm-inl.h +0 -0
  66. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/register-allocator-arm.cc +0 -0
  67. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/register-allocator-arm.h +0 -0
  68. data/ext/v8/upstream/v8/src/arm/simulator-arm.cc +3288 -0
  69. data/ext/v8/upstream/v8/src/arm/simulator-arm.h +413 -0
  70. data/ext/v8/upstream/v8/src/arm/stub-cache-arm.cc +4034 -0
  71. data/ext/v8/upstream/{3.1.8 → v8}/src/arm/virtual-frame-arm-inl.h +0 -0
  72. data/ext/v8/upstream/v8/src/arm/virtual-frame-arm.cc +843 -0
  73. data/ext/v8/upstream/v8/src/arm/virtual-frame-arm.h +523 -0
  74. data/ext/v8/upstream/v8/src/array.js +1249 -0
  75. data/ext/v8/upstream/v8/src/assembler.cc +1067 -0
  76. data/ext/v8/upstream/v8/src/assembler.h +823 -0
  77. data/ext/v8/upstream/v8/src/ast-inl.h +112 -0
  78. data/ext/v8/upstream/v8/src/ast.cc +1078 -0
  79. data/ext/v8/upstream/v8/src/ast.h +2234 -0
  80. data/ext/v8/upstream/v8/src/atomicops.h +167 -0
  81. data/ext/v8/upstream/{3.1.8 → v8}/src/atomicops_internals_arm_gcc.h +0 -0
  82. data/ext/v8/upstream/v8/src/atomicops_internals_mips_gcc.h +169 -0
  83. data/ext/v8/upstream/{3.1.8 → v8}/src/atomicops_internals_x86_gcc.cc +0 -0
  84. data/ext/v8/upstream/{3.1.8 → v8}/src/atomicops_internals_x86_gcc.h +0 -0
  85. data/ext/v8/upstream/{3.1.8 → v8}/src/atomicops_internals_x86_macosx.h +0 -0
  86. data/ext/v8/upstream/{3.1.8 → v8}/src/atomicops_internals_x86_msvc.h +0 -0
  87. data/ext/v8/upstream/{3.1.8 → v8}/src/bignum-dtoa.cc +0 -0
  88. data/ext/v8/upstream/{3.1.8 → v8}/src/bignum-dtoa.h +0 -0
  89. data/ext/v8/upstream/{3.1.8 → v8}/src/bignum.cc +0 -0
  90. data/ext/v8/upstream/{3.1.8 → v8}/src/bignum.h +0 -0
  91. data/ext/v8/upstream/v8/src/bootstrapper.cc +2138 -0
  92. data/ext/v8/upstream/v8/src/bootstrapper.h +185 -0
  93. data/ext/v8/upstream/v8/src/builtins.cc +1708 -0
  94. data/ext/v8/upstream/v8/src/builtins.h +368 -0
  95. data/ext/v8/upstream/{3.1.8 → v8}/src/bytecodes-irregexp.h +0 -0
  96. data/ext/v8/upstream/{3.1.8 → v8}/src/cached-powers.cc +0 -0
  97. data/ext/v8/upstream/{3.1.8 → v8}/src/cached-powers.h +0 -0
  98. data/ext/v8/upstream/{3.1.8 → v8}/src/char-predicates-inl.h +0 -0
  99. data/ext/v8/upstream/{3.1.8 → v8}/src/char-predicates.h +0 -0
  100. data/ext/v8/upstream/v8/src/checks.cc +110 -0
  101. data/ext/v8/upstream/v8/src/checks.h +296 -0
  102. data/ext/v8/upstream/{3.1.8 → v8}/src/circular-queue-inl.h +0 -0
  103. data/ext/v8/upstream/{3.1.8 → v8}/src/circular-queue.cc +0 -0
  104. data/ext/v8/upstream/{3.1.8 → v8}/src/circular-queue.h +0 -0
  105. data/ext/v8/upstream/v8/src/code-stubs.cc +240 -0
  106. data/ext/v8/upstream/v8/src/code-stubs.h +971 -0
  107. data/ext/v8/upstream/{3.1.8 → v8}/src/code.h +0 -0
  108. data/ext/v8/upstream/v8/src/codegen-inl.h +68 -0
  109. data/ext/v8/upstream/v8/src/codegen.cc +505 -0
  110. data/ext/v8/upstream/v8/src/codegen.h +245 -0
  111. data/ext/v8/upstream/v8/src/compilation-cache.cc +540 -0
  112. data/ext/v8/upstream/v8/src/compilation-cache.h +287 -0
  113. data/ext/v8/upstream/v8/src/compiler.cc +792 -0
  114. data/ext/v8/upstream/v8/src/compiler.h +307 -0
  115. data/ext/v8/upstream/v8/src/contexts.cc +327 -0
  116. data/ext/v8/upstream/v8/src/contexts.h +382 -0
  117. data/ext/v8/upstream/{3.1.8 → v8}/src/conversions-inl.h +0 -0
  118. data/ext/v8/upstream/v8/src/conversions.cc +1125 -0
  119. data/ext/v8/upstream/{3.1.8 → v8}/src/conversions.h +0 -0
  120. data/ext/v8/upstream/v8/src/counters.cc +93 -0
  121. data/ext/v8/upstream/v8/src/counters.h +254 -0
  122. data/ext/v8/upstream/v8/src/cpu-profiler-inl.h +101 -0
  123. data/ext/v8/upstream/v8/src/cpu-profiler.cc +606 -0
  124. data/ext/v8/upstream/v8/src/cpu-profiler.h +305 -0
  125. data/ext/v8/upstream/v8/src/cpu.h +67 -0
  126. data/ext/v8/upstream/v8/src/d8-debug.cc +367 -0
  127. data/ext/v8/upstream/v8/src/d8-debug.h +158 -0
  128. data/ext/v8/upstream/v8/src/d8-posix.cc +695 -0
  129. data/ext/v8/upstream/{3.1.8 → v8}/src/d8-readline.cc +0 -0
  130. data/ext/v8/upstream/{3.1.8 → v8}/src/d8-windows.cc +0 -0
  131. data/ext/v8/upstream/v8/src/d8.cc +796 -0
  132. data/ext/v8/upstream/v8/src/d8.gyp +88 -0
  133. data/ext/v8/upstream/{3.1.8 → v8}/src/d8.h +0 -0
  134. data/ext/v8/upstream/{3.1.8 → v8}/src/d8.js +0 -0
  135. data/ext/v8/upstream/{3.1.8 → v8}/src/data-flow.cc +0 -0
  136. data/ext/v8/upstream/v8/src/data-flow.h +379 -0
  137. data/ext/v8/upstream/{3.1.8 → v8}/src/date.js +0 -0
  138. data/ext/v8/upstream/{3.1.8 → v8}/src/dateparser-inl.h +0 -0
  139. data/ext/v8/upstream/{3.1.8 → v8}/src/dateparser.cc +0 -0
  140. data/ext/v8/upstream/v8/src/dateparser.h +265 -0
  141. data/ext/v8/upstream/v8/src/debug-agent.cc +447 -0
  142. data/ext/v8/upstream/v8/src/debug-agent.h +129 -0
  143. data/ext/v8/upstream/{3.1.8 → v8}/src/debug-debugger.js +0 -0
  144. data/ext/v8/upstream/v8/src/debug.cc +3188 -0
  145. data/ext/v8/upstream/v8/src/debug.h +1055 -0
  146. data/ext/v8/upstream/v8/src/deoptimizer.cc +1296 -0
  147. data/ext/v8/upstream/v8/src/deoptimizer.h +629 -0
  148. data/ext/v8/upstream/v8/src/disasm.h +80 -0
  149. data/ext/v8/upstream/v8/src/disassembler.cc +339 -0
  150. data/ext/v8/upstream/{3.1.8 → v8}/src/disassembler.h +0 -0
  151. data/ext/v8/upstream/{3.1.8 → v8}/src/diy-fp.cc +0 -0
  152. data/ext/v8/upstream/{3.1.8 → v8}/src/diy-fp.h +0 -0
  153. data/ext/v8/upstream/{3.1.8 → v8}/src/double.h +0 -0
  154. data/ext/v8/upstream/{3.1.8 → v8}/src/dtoa.cc +0 -0
  155. data/ext/v8/upstream/{3.1.8 → v8}/src/dtoa.h +0 -0
  156. data/ext/v8/upstream/v8/src/execution.cc +791 -0
  157. data/ext/v8/upstream/v8/src/execution.h +291 -0
  158. data/ext/v8/upstream/v8/src/extensions/experimental/break-iterator.cc +250 -0
  159. data/ext/v8/upstream/v8/src/extensions/experimental/break-iterator.h +89 -0
  160. data/ext/v8/upstream/v8/src/extensions/experimental/experimental.gyp +55 -0
  161. data/ext/v8/upstream/v8/src/extensions/experimental/i18n-extension.cc +284 -0
  162. data/ext/v8/upstream/{3.1.8 → v8}/src/extensions/experimental/i18n-extension.h +0 -0
  163. data/ext/v8/upstream/v8/src/extensions/externalize-string-extension.cc +141 -0
  164. data/ext/v8/upstream/{3.1.8 → v8}/src/extensions/externalize-string-extension.h +0 -0
  165. data/ext/v8/upstream/v8/src/extensions/gc-extension.cc +58 -0
  166. data/ext/v8/upstream/{3.1.8 → v8}/src/extensions/gc-extension.h +0 -0
  167. data/ext/v8/upstream/v8/src/factory.cc +1194 -0
  168. data/ext/v8/upstream/v8/src/factory.h +436 -0
  169. data/ext/v8/upstream/{3.1.8 → v8}/src/fast-dtoa.cc +0 -0
  170. data/ext/v8/upstream/{3.1.8 → v8}/src/fast-dtoa.h +0 -0
  171. data/ext/v8/upstream/{3.1.8 → v8}/src/fixed-dtoa.cc +0 -0
  172. data/ext/v8/upstream/{3.1.8 → v8}/src/fixed-dtoa.h +0 -0
  173. data/ext/v8/upstream/v8/src/flag-definitions.h +556 -0
  174. data/ext/v8/upstream/{3.1.8 → v8}/src/flags.cc +0 -0
  175. data/ext/v8/upstream/{3.1.8 → v8}/src/flags.h +0 -0
  176. data/ext/v8/upstream/v8/src/frame-element.cc +37 -0
  177. data/ext/v8/upstream/v8/src/frame-element.h +269 -0
  178. data/ext/v8/upstream/v8/src/frames-inl.h +236 -0
  179. data/ext/v8/upstream/v8/src/frames.cc +1273 -0
  180. data/ext/v8/upstream/v8/src/frames.h +854 -0
  181. data/ext/v8/upstream/v8/src/full-codegen.cc +1385 -0
  182. data/ext/v8/upstream/v8/src/full-codegen.h +753 -0
  183. data/ext/v8/upstream/v8/src/func-name-inferrer.cc +91 -0
  184. data/ext/v8/upstream/v8/src/func-name-inferrer.h +111 -0
  185. data/ext/v8/upstream/v8/src/gdb-jit.cc +1548 -0
  186. data/ext/v8/upstream/{3.1.8 → v8}/src/gdb-jit.h +0 -0
  187. data/ext/v8/upstream/v8/src/global-handles.cc +596 -0
  188. data/ext/v8/upstream/v8/src/global-handles.h +239 -0
  189. data/ext/v8/upstream/v8/src/globals.h +325 -0
  190. data/ext/v8/upstream/v8/src/handles-inl.h +177 -0
  191. data/ext/v8/upstream/v8/src/handles.cc +965 -0
  192. data/ext/v8/upstream/v8/src/handles.h +372 -0
  193. data/ext/v8/upstream/{3.1.8 → v8}/src/hashmap.cc +0 -0
  194. data/ext/v8/upstream/v8/src/hashmap.h +121 -0
  195. data/ext/v8/upstream/v8/src/heap-inl.h +703 -0
  196. data/ext/v8/upstream/v8/src/heap-profiler.cc +1173 -0
  197. data/ext/v8/upstream/v8/src/heap-profiler.h +396 -0
  198. data/ext/v8/upstream/v8/src/heap.cc +5856 -0
  199. data/ext/v8/upstream/v8/src/heap.h +2264 -0
  200. data/ext/v8/upstream/v8/src/hydrogen-instructions.cc +1639 -0
  201. data/ext/v8/upstream/v8/src/hydrogen-instructions.h +3657 -0
  202. data/ext/v8/upstream/v8/src/hydrogen.cc +6011 -0
  203. data/ext/v8/upstream/v8/src/hydrogen.h +1137 -0
  204. data/ext/v8/upstream/v8/src/ia32/assembler-ia32-inl.h +430 -0
  205. data/ext/v8/upstream/v8/src/ia32/assembler-ia32.cc +2846 -0
  206. data/ext/v8/upstream/v8/src/ia32/assembler-ia32.h +1159 -0
  207. data/ext/v8/upstream/v8/src/ia32/builtins-ia32.cc +1596 -0
  208. data/ext/v8/upstream/v8/src/ia32/code-stubs-ia32.cc +6549 -0
  209. data/ext/v8/upstream/v8/src/ia32/code-stubs-ia32.h +495 -0
  210. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/codegen-ia32-inl.h +0 -0
  211. data/ext/v8/upstream/v8/src/ia32/codegen-ia32.cc +10385 -0
  212. data/ext/v8/upstream/v8/src/ia32/codegen-ia32.h +801 -0
  213. data/ext/v8/upstream/v8/src/ia32/cpu-ia32.cc +88 -0
  214. data/ext/v8/upstream/v8/src/ia32/debug-ia32.cc +312 -0
  215. data/ext/v8/upstream/v8/src/ia32/deoptimizer-ia32.cc +774 -0
  216. data/ext/v8/upstream/v8/src/ia32/disasm-ia32.cc +1620 -0
  217. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/frames-ia32.cc +0 -0
  218. data/ext/v8/upstream/v8/src/ia32/frames-ia32.h +140 -0
  219. data/ext/v8/upstream/v8/src/ia32/full-codegen-ia32.cc +4357 -0
  220. data/ext/v8/upstream/v8/src/ia32/ic-ia32.cc +1779 -0
  221. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/jump-target-ia32.cc +0 -0
  222. data/ext/v8/upstream/v8/src/ia32/lithium-codegen-ia32.cc +4158 -0
  223. data/ext/v8/upstream/v8/src/ia32/lithium-codegen-ia32.h +318 -0
  224. data/ext/v8/upstream/v8/src/ia32/lithium-gap-resolver-ia32.cc +466 -0
  225. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/lithium-gap-resolver-ia32.h +0 -0
  226. data/ext/v8/upstream/v8/src/ia32/lithium-ia32.cc +2181 -0
  227. data/ext/v8/upstream/v8/src/ia32/lithium-ia32.h +2235 -0
  228. data/ext/v8/upstream/v8/src/ia32/macro-assembler-ia32.cc +2056 -0
  229. data/ext/v8/upstream/v8/src/ia32/macro-assembler-ia32.h +807 -0
  230. data/ext/v8/upstream/v8/src/ia32/regexp-macro-assembler-ia32.cc +1264 -0
  231. data/ext/v8/upstream/v8/src/ia32/regexp-macro-assembler-ia32.h +216 -0
  232. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/register-allocator-ia32-inl.h +0 -0
  233. data/ext/v8/upstream/v8/src/ia32/register-allocator-ia32.cc +157 -0
  234. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/register-allocator-ia32.h +0 -0
  235. data/ext/v8/upstream/{3.1.8 → v8}/src/ia32/simulator-ia32.cc +0 -0
  236. data/ext/v8/upstream/v8/src/ia32/simulator-ia32.h +72 -0
  237. data/ext/v8/upstream/v8/src/ia32/stub-cache-ia32.cc +3711 -0
  238. data/ext/v8/upstream/v8/src/ia32/virtual-frame-ia32.cc +1366 -0
  239. data/ext/v8/upstream/v8/src/ia32/virtual-frame-ia32.h +650 -0
  240. data/ext/v8/upstream/v8/src/ic-inl.h +130 -0
  241. data/ext/v8/upstream/v8/src/ic.cc +2389 -0
  242. data/ext/v8/upstream/v8/src/ic.h +675 -0
  243. data/ext/v8/upstream/{3.1.8 → v8}/src/inspector.cc +0 -0
  244. data/ext/v8/upstream/{3.1.8 → v8}/src/inspector.h +0 -0
  245. data/ext/v8/upstream/v8/src/interpreter-irregexp.cc +659 -0
  246. data/ext/v8/upstream/v8/src/interpreter-irregexp.h +49 -0
  247. data/ext/v8/upstream/v8/src/isolate.cc +883 -0
  248. data/ext/v8/upstream/v8/src/isolate.h +1306 -0
  249. data/ext/v8/upstream/v8/src/json.js +342 -0
  250. data/ext/v8/upstream/v8/src/jsregexp.cc +5371 -0
  251. data/ext/v8/upstream/v8/src/jsregexp.h +1483 -0
  252. data/ext/v8/upstream/{3.1.8 → v8}/src/jump-target-heavy-inl.h +0 -0
  253. data/ext/v8/upstream/v8/src/jump-target-heavy.cc +427 -0
  254. data/ext/v8/upstream/v8/src/jump-target-heavy.h +238 -0
  255. data/ext/v8/upstream/v8/src/jump-target-inl.h +48 -0
  256. data/ext/v8/upstream/{3.1.8 → v8}/src/jump-target-light-inl.h +0 -0
  257. data/ext/v8/upstream/v8/src/jump-target-light.cc +111 -0
  258. data/ext/v8/upstream/{3.1.8 → v8}/src/jump-target-light.h +0 -0
  259. data/ext/v8/upstream/{3.1.8 → v8}/src/jump-target.cc +0 -0
  260. data/ext/v8/upstream/{3.1.8 → v8}/src/jump-target.h +0 -0
  261. data/ext/v8/upstream/{3.1.8 → v8}/src/list-inl.h +0 -0
  262. data/ext/v8/upstream/{3.1.8 → v8}/src/list.h +0 -0
  263. data/ext/v8/upstream/v8/src/lithium-allocator-inl.h +142 -0
  264. data/ext/v8/upstream/v8/src/lithium-allocator.cc +2105 -0
  265. data/ext/v8/upstream/v8/src/lithium-allocator.h +630 -0
  266. data/ext/v8/upstream/v8/src/lithium.cc +169 -0
  267. data/ext/v8/upstream/{3.1.8 → v8}/src/lithium.h +0 -0
  268. data/ext/v8/upstream/{3.1.8 → v8}/src/liveedit-debugger.js +0 -0
  269. data/ext/v8/upstream/v8/src/liveedit.cc +1693 -0
  270. data/ext/v8/upstream/v8/src/liveedit.h +179 -0
  271. data/ext/v8/upstream/{3.1.8 → v8}/src/liveobjectlist-inl.h +0 -0
  272. data/ext/v8/upstream/v8/src/liveobjectlist.cc +2589 -0
  273. data/ext/v8/upstream/v8/src/liveobjectlist.h +322 -0
  274. data/ext/v8/upstream/{3.1.8 → v8}/src/log-inl.h +0 -0
  275. data/ext/v8/upstream/v8/src/log-utils.cc +423 -0
  276. data/ext/v8/upstream/v8/src/log-utils.h +229 -0
  277. data/ext/v8/upstream/v8/src/log.cc +1666 -0
  278. data/ext/v8/upstream/v8/src/log.h +446 -0
  279. data/ext/v8/upstream/{3.1.8 → v8}/src/macro-assembler.h +0 -0
  280. data/ext/v8/upstream/{3.1.8 → v8}/src/macros.py +0 -0
  281. data/ext/v8/upstream/v8/src/mark-compact.cc +3092 -0
  282. data/ext/v8/upstream/v8/src/mark-compact.h +506 -0
  283. data/ext/v8/upstream/{3.1.8 → v8}/src/math.js +0 -0
  284. data/ext/v8/upstream/v8/src/messages.cc +166 -0
  285. data/ext/v8/upstream/{3.1.8 → v8}/src/messages.h +0 -0
  286. data/ext/v8/upstream/v8/src/messages.js +1090 -0
  287. data/ext/v8/upstream/v8/src/mips/assembler-mips-inl.h +335 -0
  288. data/ext/v8/upstream/v8/src/mips/assembler-mips.cc +2093 -0
  289. data/ext/v8/upstream/v8/src/mips/assembler-mips.h +1066 -0
  290. data/ext/v8/upstream/v8/src/mips/builtins-mips.cc +148 -0
  291. data/ext/v8/upstream/v8/src/mips/code-stubs-mips.cc +752 -0
  292. data/ext/v8/upstream/v8/src/mips/code-stubs-mips.h +511 -0
  293. data/ext/v8/upstream/v8/src/mips/codegen-mips-inl.h +64 -0
  294. data/ext/v8/upstream/v8/src/mips/codegen-mips.cc +1213 -0
  295. data/ext/v8/upstream/v8/src/mips/codegen-mips.h +633 -0
  296. data/ext/v8/upstream/v8/src/mips/constants-mips.cc +352 -0
  297. data/ext/v8/upstream/v8/src/mips/constants-mips.h +723 -0
  298. data/ext/v8/upstream/v8/src/mips/cpu-mips.cc +90 -0
  299. data/ext/v8/upstream/v8/src/mips/debug-mips.cc +155 -0
  300. data/ext/v8/upstream/v8/src/mips/deoptimizer-mips.cc +91 -0
  301. data/ext/v8/upstream/v8/src/mips/disasm-mips.cc +1023 -0
  302. data/ext/v8/upstream/v8/src/mips/frames-mips.cc +48 -0
  303. data/ext/v8/upstream/v8/src/mips/frames-mips.h +179 -0
  304. data/ext/v8/upstream/v8/src/mips/full-codegen-mips.cc +727 -0
  305. data/ext/v8/upstream/v8/src/mips/ic-mips.cc +244 -0
  306. data/ext/v8/upstream/v8/src/mips/jump-target-mips.cc +80 -0
  307. data/ext/v8/upstream/v8/src/mips/lithium-codegen-mips.h +65 -0
  308. data/ext/v8/upstream/v8/src/mips/lithium-mips.h +304 -0
  309. data/ext/v8/upstream/v8/src/mips/macro-assembler-mips.cc +3327 -0
  310. data/ext/v8/upstream/v8/src/mips/macro-assembler-mips.h +1058 -0
  311. data/ext/v8/upstream/v8/src/mips/regexp-macro-assembler-mips.cc +478 -0
  312. data/ext/v8/upstream/v8/src/mips/regexp-macro-assembler-mips.h +250 -0
  313. data/ext/v8/upstream/v8/src/mips/register-allocator-mips-inl.h +134 -0
  314. data/ext/v8/upstream/{3.1.8 → v8}/src/mips/register-allocator-mips.cc +0 -0
  315. data/ext/v8/upstream/v8/src/mips/register-allocator-mips.h +47 -0
  316. data/ext/v8/upstream/v8/src/mips/simulator-mips.cc +2438 -0
  317. data/ext/v8/upstream/v8/src/mips/simulator-mips.h +394 -0
  318. data/ext/v8/upstream/v8/src/mips/stub-cache-mips.cc +601 -0
  319. data/ext/v8/upstream/v8/src/mips/virtual-frame-mips-inl.h +58 -0
  320. data/ext/v8/upstream/v8/src/mips/virtual-frame-mips.cc +307 -0
  321. data/ext/v8/upstream/v8/src/mips/virtual-frame-mips.h +530 -0
  322. data/ext/v8/upstream/v8/src/mirror-debugger.js +2381 -0
  323. data/ext/v8/upstream/v8/src/mksnapshot.cc +256 -0
  324. data/ext/v8/upstream/{3.1.8 → v8}/src/natives.h +0 -0
  325. data/ext/v8/upstream/v8/src/objects-debug.cc +722 -0
  326. data/ext/v8/upstream/v8/src/objects-inl.h +4166 -0
  327. data/ext/v8/upstream/v8/src/objects-printer.cc +801 -0
  328. data/ext/v8/upstream/v8/src/objects-visiting.cc +142 -0
  329. data/ext/v8/upstream/v8/src/objects-visiting.h +422 -0
  330. data/ext/v8/upstream/v8/src/objects.cc +10296 -0
  331. data/ext/v8/upstream/v8/src/objects.h +6662 -0
  332. data/ext/v8/upstream/v8/src/parser.cc +5168 -0
  333. data/ext/v8/upstream/v8/src/parser.h +823 -0
  334. data/ext/v8/upstream/v8/src/platform-cygwin.cc +811 -0
  335. data/ext/v8/upstream/v8/src/platform-freebsd.cc +854 -0
  336. data/ext/v8/upstream/v8/src/platform-linux.cc +1120 -0
  337. data/ext/v8/upstream/v8/src/platform-macos.cc +865 -0
  338. data/ext/v8/upstream/v8/src/platform-nullos.cc +504 -0
  339. data/ext/v8/upstream/v8/src/platform-openbsd.cc +672 -0
  340. data/ext/v8/upstream/v8/src/platform-posix.cc +424 -0
  341. data/ext/v8/upstream/v8/src/platform-solaris.cc +796 -0
  342. data/ext/v8/upstream/v8/src/platform-tls-mac.h +62 -0
  343. data/ext/v8/upstream/v8/src/platform-tls-win32.h +62 -0
  344. data/ext/v8/upstream/v8/src/platform-tls.h +50 -0
  345. data/ext/v8/upstream/v8/src/platform-win32.cc +2072 -0
  346. data/ext/v8/upstream/v8/src/platform.h +693 -0
  347. data/ext/v8/upstream/v8/src/preparse-data.cc +185 -0
  348. data/ext/v8/upstream/{3.1.8 → v8}/src/preparse-data.h +0 -0
  349. data/ext/v8/upstream/v8/src/preparser-api.cc +219 -0
  350. data/ext/v8/upstream/v8/src/preparser.cc +1205 -0
  351. data/ext/v8/upstream/{3.1.8 → v8}/src/preparser.h +0 -0
  352. data/ext/v8/upstream/v8/src/prettyprinter.cc +1530 -0
  353. data/ext/v8/upstream/v8/src/prettyprinter.h +223 -0
  354. data/ext/v8/upstream/{3.1.8 → v8}/src/profile-generator-inl.h +0 -0
  355. data/ext/v8/upstream/v8/src/profile-generator.cc +3095 -0
  356. data/ext/v8/upstream/v8/src/profile-generator.h +1125 -0
  357. data/ext/v8/upstream/v8/src/property.cc +102 -0
  358. data/ext/v8/upstream/v8/src/property.h +348 -0
  359. data/ext/v8/upstream/{3.1.8 → v8}/src/regexp-macro-assembler-irregexp-inl.h +0 -0
  360. data/ext/v8/upstream/v8/src/regexp-macro-assembler-irregexp.cc +470 -0
  361. data/ext/v8/upstream/{3.1.8 → v8}/src/regexp-macro-assembler-irregexp.h +0 -0
  362. data/ext/v8/upstream/{3.1.8 → v8}/src/regexp-macro-assembler-tracer.cc +0 -0
  363. data/ext/v8/upstream/{3.1.8 → v8}/src/regexp-macro-assembler-tracer.h +0 -0
  364. data/ext/v8/upstream/v8/src/regexp-macro-assembler.cc +266 -0
  365. data/ext/v8/upstream/v8/src/regexp-macro-assembler.h +236 -0
  366. data/ext/v8/upstream/v8/src/regexp-stack.cc +111 -0
  367. data/ext/v8/upstream/v8/src/regexp-stack.h +147 -0
  368. data/ext/v8/upstream/v8/src/regexp.js +483 -0
  369. data/ext/v8/upstream/v8/src/register-allocator-inl.h +141 -0
  370. data/ext/v8/upstream/v8/src/register-allocator.cc +98 -0
  371. data/ext/v8/upstream/v8/src/register-allocator.h +310 -0
  372. data/ext/v8/upstream/v8/src/rewriter.cc +1024 -0
  373. data/ext/v8/upstream/{3.1.8 → v8}/src/rewriter.h +0 -0
  374. data/ext/v8/upstream/v8/src/runtime-profiler.cc +478 -0
  375. data/ext/v8/upstream/v8/src/runtime-profiler.h +192 -0
  376. data/ext/v8/upstream/v8/src/runtime.cc +11949 -0
  377. data/ext/v8/upstream/v8/src/runtime.h +643 -0
  378. data/ext/v8/upstream/{3.1.8 → v8}/src/runtime.js +0 -0
  379. data/ext/v8/upstream/v8/src/safepoint-table.cc +256 -0
  380. data/ext/v8/upstream/v8/src/safepoint-table.h +269 -0
  381. data/ext/v8/upstream/v8/src/scanner-base.cc +964 -0
  382. data/ext/v8/upstream/v8/src/scanner-base.h +664 -0
  383. data/ext/v8/upstream/v8/src/scanner.cc +584 -0
  384. data/ext/v8/upstream/v8/src/scanner.h +196 -0
  385. data/ext/v8/upstream/v8/src/scopeinfo.cc +631 -0
  386. data/ext/v8/upstream/v8/src/scopeinfo.h +249 -0
  387. data/ext/v8/upstream/v8/src/scopes.cc +1093 -0
  388. data/ext/v8/upstream/v8/src/scopes.h +508 -0
  389. data/ext/v8/upstream/v8/src/serialize.cc +1574 -0
  390. data/ext/v8/upstream/v8/src/serialize.h +589 -0
  391. data/ext/v8/upstream/{3.1.8 → v8}/src/shell.h +0 -0
  392. data/ext/v8/upstream/{3.1.8 → v8}/src/simulator.h +0 -0
  393. data/ext/v8/upstream/v8/src/small-pointer-list.h +163 -0
  394. data/ext/v8/upstream/{3.1.8 → v8}/src/smart-pointer.h +0 -0
  395. data/ext/v8/upstream/v8/src/snapshot-common.cc +82 -0
  396. data/ext/v8/upstream/{3.1.8 → v8}/src/snapshot-empty.cc +0 -0
  397. data/ext/v8/upstream/v8/src/snapshot.h +73 -0
  398. data/ext/v8/upstream/v8/src/spaces-inl.h +529 -0
  399. data/ext/v8/upstream/v8/src/spaces.cc +3147 -0
  400. data/ext/v8/upstream/v8/src/spaces.h +2368 -0
  401. data/ext/v8/upstream/{3.1.8 → v8}/src/splay-tree-inl.h +0 -0
  402. data/ext/v8/upstream/{3.1.8 → v8}/src/splay-tree.h +0 -0
  403. data/ext/v8/upstream/v8/src/string-search.cc +41 -0
  404. data/ext/v8/upstream/v8/src/string-search.h +568 -0
  405. data/ext/v8/upstream/v8/src/string-stream.cc +592 -0
  406. data/ext/v8/upstream/{3.1.8 → v8}/src/string-stream.h +0 -0
  407. data/ext/v8/upstream/v8/src/string.js +915 -0
  408. data/ext/v8/upstream/{3.1.8 → v8}/src/strtod.cc +0 -0
  409. data/ext/v8/upstream/{3.1.8 → v8}/src/strtod.h +0 -0
  410. data/ext/v8/upstream/v8/src/stub-cache.cc +1940 -0
  411. data/ext/v8/upstream/v8/src/stub-cache.h +866 -0
  412. data/ext/v8/upstream/{3.1.8 → v8}/src/third_party/valgrind/valgrind.h +0 -0
  413. data/ext/v8/upstream/v8/src/token.cc +63 -0
  414. data/ext/v8/upstream/v8/src/token.h +288 -0
  415. data/ext/v8/upstream/v8/src/top.cc +983 -0
  416. data/ext/v8/upstream/v8/src/type-info.cc +472 -0
  417. data/ext/v8/upstream/v8/src/type-info.h +290 -0
  418. data/ext/v8/upstream/{3.1.8 → v8}/src/unbound-queue-inl.h +0 -0
  419. data/ext/v8/upstream/{3.1.8 → v8}/src/unbound-queue.h +0 -0
  420. data/ext/v8/upstream/{3.1.8 → v8}/src/unicode-inl.h +0 -0
  421. data/ext/v8/upstream/v8/src/unicode.cc +1624 -0
  422. data/ext/v8/upstream/v8/src/unicode.h +280 -0
  423. data/ext/v8/upstream/{3.1.8 → v8}/src/uri.js +0 -0
  424. data/ext/v8/upstream/{3.1.8 → v8}/src/utils.cc +0 -0
  425. data/ext/v8/upstream/v8/src/utils.h +796 -0
  426. data/ext/v8/upstream/v8/src/v8-counters.cc +62 -0
  427. data/ext/v8/upstream/v8/src/v8-counters.h +311 -0
  428. data/ext/v8/upstream/v8/src/v8.cc +215 -0
  429. data/ext/v8/upstream/v8/src/v8.h +130 -0
  430. data/ext/v8/upstream/{3.1.8 → v8}/src/v8checks.h +0 -0
  431. data/ext/v8/upstream/{3.1.8 → v8}/src/v8dll-main.cc +0 -0
  432. data/ext/v8/upstream/v8/src/v8globals.h +486 -0
  433. data/ext/v8/upstream/{3.1.8/src/memory.h → v8/src/v8memory.h} +0 -0
  434. data/ext/v8/upstream/v8/src/v8natives.js +1293 -0
  435. data/ext/v8/upstream/{3.1.8 → v8}/src/v8preparserdll-main.cc +0 -0
  436. data/ext/v8/upstream/v8/src/v8threads.cc +453 -0
  437. data/ext/v8/upstream/v8/src/v8threads.h +164 -0
  438. data/ext/v8/upstream/v8/src/v8utils.h +317 -0
  439. data/ext/v8/upstream/{3.1.8 → v8}/src/variables.cc +0 -0
  440. data/ext/v8/upstream/v8/src/variables.h +212 -0
  441. data/ext/v8/upstream/v8/src/version.cc +116 -0
  442. data/ext/v8/upstream/v8/src/version.h +68 -0
  443. data/ext/v8/upstream/{3.1.8 → v8}/src/virtual-frame-heavy-inl.h +0 -0
  444. data/ext/v8/upstream/{3.1.8 → v8}/src/virtual-frame-heavy.cc +0 -0
  445. data/ext/v8/upstream/{3.1.8 → v8}/src/virtual-frame-inl.h +0 -0
  446. data/ext/v8/upstream/v8/src/virtual-frame-light-inl.h +171 -0
  447. data/ext/v8/upstream/{3.1.8 → v8}/src/virtual-frame-light.cc +0 -0
  448. data/ext/v8/upstream/{3.1.8 → v8}/src/virtual-frame.cc +0 -0
  449. data/ext/v8/upstream/{3.1.8 → v8}/src/virtual-frame.h +0 -0
  450. data/ext/v8/upstream/v8/src/vm-state-inl.h +138 -0
  451. data/ext/v8/upstream/v8/src/vm-state.h +70 -0
  452. data/ext/v8/upstream/v8/src/win32-headers.h +96 -0
  453. data/ext/v8/upstream/v8/src/x64/assembler-x64-inl.h +456 -0
  454. data/ext/v8/upstream/v8/src/x64/assembler-x64.cc +2954 -0
  455. data/ext/v8/upstream/v8/src/x64/assembler-x64.h +1630 -0
  456. data/ext/v8/upstream/v8/src/x64/builtins-x64.cc +1493 -0
  457. data/ext/v8/upstream/v8/src/x64/code-stubs-x64.cc +5132 -0
  458. data/ext/v8/upstream/v8/src/x64/code-stubs-x64.h +477 -0
  459. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/codegen-x64-inl.h +0 -0
  460. data/ext/v8/upstream/v8/src/x64/codegen-x64.cc +8843 -0
  461. data/ext/v8/upstream/v8/src/x64/codegen-x64.h +753 -0
  462. data/ext/v8/upstream/v8/src/x64/cpu-x64.cc +88 -0
  463. data/ext/v8/upstream/v8/src/x64/debug-x64.cc +318 -0
  464. data/ext/v8/upstream/v8/src/x64/deoptimizer-x64.cc +815 -0
  465. data/ext/v8/upstream/v8/src/x64/disasm-x64.cc +1752 -0
  466. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/frames-x64.cc +0 -0
  467. data/ext/v8/upstream/v8/src/x64/frames-x64.h +130 -0
  468. data/ext/v8/upstream/v8/src/x64/full-codegen-x64.cc +4339 -0
  469. data/ext/v8/upstream/v8/src/x64/ic-x64.cc +1752 -0
  470. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/jump-target-x64.cc +0 -0
  471. data/ext/v8/upstream/v8/src/x64/lithium-codegen-x64.cc +3970 -0
  472. data/ext/v8/upstream/v8/src/x64/lithium-codegen-x64.h +318 -0
  473. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/lithium-gap-resolver-x64.cc +0 -0
  474. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/lithium-gap-resolver-x64.h +0 -0
  475. data/ext/v8/upstream/v8/src/x64/lithium-x64.cc +2115 -0
  476. data/ext/v8/upstream/v8/src/x64/lithium-x64.h +2161 -0
  477. data/ext/v8/upstream/v8/src/x64/macro-assembler-x64.cc +2911 -0
  478. data/ext/v8/upstream/v8/src/x64/macro-assembler-x64.h +1984 -0
  479. data/ext/v8/upstream/v8/src/x64/regexp-macro-assembler-x64.cc +1398 -0
  480. data/ext/v8/upstream/v8/src/x64/regexp-macro-assembler-x64.h +282 -0
  481. data/ext/v8/upstream/v8/src/x64/register-allocator-x64-inl.h +87 -0
  482. data/ext/v8/upstream/v8/src/x64/register-allocator-x64.cc +95 -0
  483. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/register-allocator-x64.h +0 -0
  484. data/ext/v8/upstream/{3.1.8 → v8}/src/x64/simulator-x64.cc +0 -0
  485. data/ext/v8/upstream/v8/src/x64/simulator-x64.h +71 -0
  486. data/ext/v8/upstream/v8/src/x64/stub-cache-x64.cc +3460 -0
  487. data/ext/v8/upstream/v8/src/x64/virtual-frame-x64.cc +1296 -0
  488. data/ext/v8/upstream/v8/src/x64/virtual-frame-x64.h +597 -0
  489. data/ext/v8/upstream/v8/src/zone-inl.h +129 -0
  490. data/ext/v8/upstream/v8/src/zone.cc +196 -0
  491. data/ext/v8/upstream/v8/src/zone.h +236 -0
  492. data/ext/v8/upstream/{3.1.8 → v8}/tools/codemap.js +0 -0
  493. data/ext/v8/upstream/{3.1.8 → v8}/tools/consarray.js +0 -0
  494. data/ext/v8/upstream/{3.1.8 → v8}/tools/csvparser.js +0 -0
  495. data/ext/v8/upstream/{3.1.8 → v8}/tools/disasm.py +0 -0
  496. data/ext/v8/upstream/v8/tools/freebsd-tick-processor +10 -0
  497. data/ext/v8/upstream/{3.1.8 → v8}/tools/gc-nvp-trace-processor.py +0 -0
  498. data/ext/v8/upstream/{3.1.8 → v8}/tools/generate-ten-powers.scm +0 -0
  499. data/ext/v8/upstream/{3.1.8 → v8}/tools/grokdump.py +0 -0
  500. data/ext/v8/upstream/v8/tools/gyp/v8.gyp +844 -0
  501. data/ext/v8/upstream/{3.1.8 → v8}/tools/js2c.py +0 -0
  502. data/ext/v8/upstream/{3.1.8 → v8}/tools/jsmin.py +0 -0
  503. data/ext/v8/upstream/v8/tools/linux-tick-processor +35 -0
  504. data/ext/v8/upstream/{3.1.8 → v8}/tools/ll_prof.py +0 -0
  505. data/ext/v8/upstream/{3.1.8 → v8}/tools/logreader.js +0 -0
  506. data/ext/v8/upstream/{3.1.8 → v8}/tools/mac-nm +0 -0
  507. data/ext/v8/upstream/{3.1.8 → v8}/tools/mac-tick-processor +0 -0
  508. data/ext/v8/upstream/{3.1.8 → v8}/tools/oom_dump/README +0 -0
  509. data/ext/v8/upstream/{3.1.8 → v8}/tools/oom_dump/SConstruct +0 -0
  510. data/ext/v8/upstream/{3.1.8 → v8}/tools/oom_dump/oom_dump.cc +0 -0
  511. data/ext/v8/upstream/{3.1.8 → v8}/tools/presubmit.py +0 -0
  512. data/ext/v8/upstream/{3.1.8 → v8}/tools/process-heap-prof.py +0 -0
  513. data/ext/v8/upstream/{3.1.8 → v8}/tools/profile.js +0 -0
  514. data/ext/v8/upstream/{3.1.8 → v8}/tools/profile_view.js +0 -0
  515. data/ext/v8/upstream/{3.1.8 → v8}/tools/run-valgrind.py +0 -0
  516. data/ext/v8/upstream/{3.1.8 → v8}/tools/splaytree.js +0 -0
  517. data/ext/v8/upstream/{3.1.8 → v8}/tools/stats-viewer.py +0 -0
  518. data/ext/v8/upstream/v8/tools/test.py +1490 -0
  519. data/ext/v8/upstream/{3.1.8 → v8}/tools/tickprocessor-driver.js +0 -0
  520. data/ext/v8/upstream/v8/tools/tickprocessor.js +877 -0
  521. data/ext/v8/upstream/{3.1.8 → v8}/tools/utils.py +0 -0
  522. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/README.txt +0 -0
  523. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/arm.vsprops +0 -0
  524. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/common.vsprops +0 -0
  525. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/d8.vcproj +0 -0
  526. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/d8_arm.vcproj +0 -0
  527. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/d8_x64.vcproj +0 -0
  528. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/d8js2c.cmd +0 -0
  529. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/debug.vsprops +0 -0
  530. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/ia32.vsprops +0 -0
  531. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/js2c.cmd +0 -0
  532. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/release.vsprops +0 -0
  533. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8.sln +0 -0
  534. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8.vcproj +0 -0
  535. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_arm.sln +0 -0
  536. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_arm.vcproj +0 -0
  537. data/ext/v8/upstream/v8/tools/visual_studio/v8_base.vcproj +1308 -0
  538. data/ext/v8/upstream/v8/tools/visual_studio/v8_base_arm.vcproj +1238 -0
  539. data/ext/v8/upstream/v8/tools/visual_studio/v8_base_x64.vcproj +1300 -0
  540. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_cctest.vcproj +0 -0
  541. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_cctest_arm.vcproj +0 -0
  542. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_cctest_x64.vcproj +0 -0
  543. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_mksnapshot.vcproj +0 -0
  544. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_mksnapshot_x64.vcproj +0 -0
  545. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_process_sample.vcproj +0 -0
  546. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_process_sample_arm.vcproj +0 -0
  547. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_process_sample_x64.vcproj +0 -0
  548. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_shell_sample.vcproj +0 -0
  549. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_shell_sample_arm.vcproj +0 -0
  550. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_shell_sample_x64.vcproj +0 -0
  551. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_snapshot.vcproj +0 -0
  552. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_snapshot_cc.vcproj +0 -0
  553. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_snapshot_cc_x64.vcproj +0 -0
  554. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_snapshot_x64.vcproj +0 -0
  555. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_x64.sln +0 -0
  556. data/ext/v8/upstream/{3.1.8 → v8}/tools/visual_studio/v8_x64.vcproj +0 -0
  557. data/ext/v8/upstream/v8/tools/visual_studio/x64.vsprops +18 -0
  558. data/ext/v8/upstream/{3.1.8 → v8}/tools/windows-tick-processor.bat +0 -0
  559. data/ext/v8/v8_callbacks.cpp +52 -92
  560. data/ext/v8/v8_date.cpp +2 -3
  561. data/ext/v8/v8_object.cpp +4 -0
  562. data/ext/v8/v8_template.cpp +2 -2
  563. data/ext/v8/v8_try_catch.cpp +8 -38
  564. data/lib/v8/version.rb +1 -1
  565. data/spec/ext/ext_spec_helper.rb +2 -20
  566. data/spec/ext/object_spec.rb +0 -12
  567. data/spec/ext/try_catch_spec.rb +29 -1
  568. data/spec/spec_helper.rb +1 -0
  569. data/spec/v8/portal/proxies_spec.rb +1 -84
  570. data/specmem/handle_memspec.rb +41 -0
  571. data/specmem/object_memspec.rb +16 -0
  572. data/specmem/proxies_memspec.rb +86 -0
  573. data/specmem/spec_helper.rb +24 -0
  574. data/therubyracer.gemspec +7 -2
  575. metadata +564 -541
  576. data/ext/v8/upstream/3.1.8/.gitignore +0 -31
  577. data/ext/v8/upstream/3.1.8/AUTHORS +0 -40
  578. data/ext/v8/upstream/3.1.8/ChangeLog +0 -2566
  579. data/ext/v8/upstream/3.1.8/SConstruct +0 -1192
  580. data/ext/v8/upstream/3.1.8/include/v8-debug.h +0 -384
  581. data/ext/v8/upstream/3.1.8/include/v8-preparser.h +0 -116
  582. data/ext/v8/upstream/3.1.8/include/v8-profiler.h +0 -426
  583. data/ext/v8/upstream/3.1.8/include/v8-testing.h +0 -99
  584. data/ext/v8/upstream/3.1.8/include/v8.h +0 -3846
  585. data/ext/v8/upstream/3.1.8/preparser/preparser-process.cc +0 -206
  586. data/ext/v8/upstream/3.1.8/src/SConscript +0 -356
  587. data/ext/v8/upstream/3.1.8/src/accessors.cc +0 -907
  588. data/ext/v8/upstream/3.1.8/src/allocation.cc +0 -204
  589. data/ext/v8/upstream/3.1.8/src/allocation.h +0 -176
  590. data/ext/v8/upstream/3.1.8/src/api.cc +0 -5191
  591. data/ext/v8/upstream/3.1.8/src/api.h +0 -508
  592. data/ext/v8/upstream/3.1.8/src/apiutils.h +0 -80
  593. data/ext/v8/upstream/3.1.8/src/arguments.h +0 -105
  594. data/ext/v8/upstream/3.1.8/src/arm/assembler-arm-inl.h +0 -352
  595. data/ext/v8/upstream/3.1.8/src/arm/assembler-arm.cc +0 -2756
  596. data/ext/v8/upstream/3.1.8/src/arm/assembler-arm.h +0 -1294
  597. data/ext/v8/upstream/3.1.8/src/arm/builtins-arm.cc +0 -1628
  598. data/ext/v8/upstream/3.1.8/src/arm/code-stubs-arm.cc +0 -6783
  599. data/ext/v8/upstream/3.1.8/src/arm/code-stubs-arm.h +0 -657
  600. data/ext/v8/upstream/3.1.8/src/arm/codegen-arm.cc +0 -7403
  601. data/ext/v8/upstream/3.1.8/src/arm/codegen-arm.h +0 -595
  602. data/ext/v8/upstream/3.1.8/src/arm/constants-arm.h +0 -769
  603. data/ext/v8/upstream/3.1.8/src/arm/cpu-arm.cc +0 -147
  604. data/ext/v8/upstream/3.1.8/src/arm/debug-arm.cc +0 -315
  605. data/ext/v8/upstream/3.1.8/src/arm/deoptimizer-arm.cc +0 -700
  606. data/ext/v8/upstream/3.1.8/src/arm/disasm-arm.cc +0 -1439
  607. data/ext/v8/upstream/3.1.8/src/arm/frames-arm.h +0 -168
  608. data/ext/v8/upstream/3.1.8/src/arm/full-codegen-arm.cc +0 -4230
  609. data/ext/v8/upstream/3.1.8/src/arm/ic-arm.cc +0 -1799
  610. data/ext/v8/upstream/3.1.8/src/arm/lithium-arm.cc +0 -2041
  611. data/ext/v8/upstream/3.1.8/src/arm/lithium-arm.h +0 -2046
  612. data/ext/v8/upstream/3.1.8/src/arm/lithium-codegen-arm.cc +0 -3822
  613. data/ext/v8/upstream/3.1.8/src/arm/lithium-codegen-arm.h +0 -312
  614. data/ext/v8/upstream/3.1.8/src/arm/lithium-gap-resolver-arm.cc +0 -303
  615. data/ext/v8/upstream/3.1.8/src/arm/macro-assembler-arm.cc +0 -2701
  616. data/ext/v8/upstream/3.1.8/src/arm/macro-assembler-arm.h +0 -1015
  617. data/ext/v8/upstream/3.1.8/src/arm/regexp-macro-assembler-arm.cc +0 -1280
  618. data/ext/v8/upstream/3.1.8/src/arm/regexp-macro-assembler-arm.h +0 -252
  619. data/ext/v8/upstream/3.1.8/src/arm/simulator-arm.cc +0 -3165
  620. data/ext/v8/upstream/3.1.8/src/arm/simulator-arm.h +0 -402
  621. data/ext/v8/upstream/3.1.8/src/arm/stub-cache-arm.cc +0 -4077
  622. data/ext/v8/upstream/3.1.8/src/arm/virtual-frame-arm.cc +0 -843
  623. data/ext/v8/upstream/3.1.8/src/arm/virtual-frame-arm.h +0 -520
  624. data/ext/v8/upstream/3.1.8/src/array.js +0 -1231
  625. data/ext/v8/upstream/3.1.8/src/assembler.cc +0 -973
  626. data/ext/v8/upstream/3.1.8/src/assembler.h +0 -787
  627. data/ext/v8/upstream/3.1.8/src/ast-inl.h +0 -107
  628. data/ext/v8/upstream/3.1.8/src/ast.cc +0 -1067
  629. data/ext/v8/upstream/3.1.8/src/ast.h +0 -2177
  630. data/ext/v8/upstream/3.1.8/src/atomicops.h +0 -165
  631. data/ext/v8/upstream/3.1.8/src/bootstrapper.cc +0 -1888
  632. data/ext/v8/upstream/3.1.8/src/bootstrapper.h +0 -118
  633. data/ext/v8/upstream/3.1.8/src/builtins.cc +0 -1586
  634. data/ext/v8/upstream/3.1.8/src/builtins.h +0 -339
  635. data/ext/v8/upstream/3.1.8/src/checks.cc +0 -110
  636. data/ext/v8/upstream/3.1.8/src/checks.h +0 -292
  637. data/ext/v8/upstream/3.1.8/src/code-stubs.cc +0 -230
  638. data/ext/v8/upstream/3.1.8/src/code-stubs.h +0 -950
  639. data/ext/v8/upstream/3.1.8/src/codegen-inl.h +0 -64
  640. data/ext/v8/upstream/3.1.8/src/codegen.cc +0 -495
  641. data/ext/v8/upstream/3.1.8/src/codegen.h +0 -245
  642. data/ext/v8/upstream/3.1.8/src/compilation-cache.cc +0 -654
  643. data/ext/v8/upstream/3.1.8/src/compilation-cache.h +0 -112
  644. data/ext/v8/upstream/3.1.8/src/compiler.cc +0 -806
  645. data/ext/v8/upstream/3.1.8/src/compiler.h +0 -290
  646. data/ext/v8/upstream/3.1.8/src/contexts.cc +0 -320
  647. data/ext/v8/upstream/3.1.8/src/contexts.h +0 -376
  648. data/ext/v8/upstream/3.1.8/src/conversions.cc +0 -1069
  649. data/ext/v8/upstream/3.1.8/src/counters.cc +0 -78
  650. data/ext/v8/upstream/3.1.8/src/counters.h +0 -242
  651. data/ext/v8/upstream/3.1.8/src/cpu-profiler-inl.h +0 -100
  652. data/ext/v8/upstream/3.1.8/src/cpu-profiler.cc +0 -554
  653. data/ext/v8/upstream/3.1.8/src/cpu-profiler.h +0 -291
  654. data/ext/v8/upstream/3.1.8/src/cpu.h +0 -65
  655. data/ext/v8/upstream/3.1.8/src/d8-debug.cc +0 -367
  656. data/ext/v8/upstream/3.1.8/src/d8-debug.h +0 -157
  657. data/ext/v8/upstream/3.1.8/src/d8-posix.cc +0 -693
  658. data/ext/v8/upstream/3.1.8/src/d8.cc +0 -792
  659. data/ext/v8/upstream/3.1.8/src/d8.gyp +0 -85
  660. data/ext/v8/upstream/3.1.8/src/data-flow.h +0 -379
  661. data/ext/v8/upstream/3.1.8/src/dateparser.h +0 -263
  662. data/ext/v8/upstream/3.1.8/src/debug-agent.cc +0 -446
  663. data/ext/v8/upstream/3.1.8/src/debug-agent.h +0 -131
  664. data/ext/v8/upstream/3.1.8/src/debug.cc +0 -3085
  665. data/ext/v8/upstream/3.1.8/src/debug.h +0 -1025
  666. data/ext/v8/upstream/3.1.8/src/deoptimizer.cc +0 -1185
  667. data/ext/v8/upstream/3.1.8/src/deoptimizer.h +0 -529
  668. data/ext/v8/upstream/3.1.8/src/disasm.h +0 -77
  669. data/ext/v8/upstream/3.1.8/src/disassembler.cc +0 -338
  670. data/ext/v8/upstream/3.1.8/src/execution.cc +0 -735
  671. data/ext/v8/upstream/3.1.8/src/execution.h +0 -322
  672. data/ext/v8/upstream/3.1.8/src/extensions/experimental/experimental.gyp +0 -53
  673. data/ext/v8/upstream/3.1.8/src/extensions/experimental/i18n-extension.cc +0 -264
  674. data/ext/v8/upstream/3.1.8/src/extensions/externalize-string-extension.cc +0 -141
  675. data/ext/v8/upstream/3.1.8/src/extensions/gc-extension.cc +0 -58
  676. data/ext/v8/upstream/3.1.8/src/factory.cc +0 -1087
  677. data/ext/v8/upstream/3.1.8/src/factory.h +0 -432
  678. data/ext/v8/upstream/3.1.8/src/flag-definitions.h +0 -552
  679. data/ext/v8/upstream/3.1.8/src/frame-element.cc +0 -42
  680. data/ext/v8/upstream/3.1.8/src/frame-element.h +0 -277
  681. data/ext/v8/upstream/3.1.8/src/frames-inl.h +0 -210
  682. data/ext/v8/upstream/3.1.8/src/frames.cc +0 -1232
  683. data/ext/v8/upstream/3.1.8/src/frames.h +0 -826
  684. data/ext/v8/upstream/3.1.8/src/full-codegen.cc +0 -1382
  685. data/ext/v8/upstream/3.1.8/src/full-codegen.h +0 -751
  686. data/ext/v8/upstream/3.1.8/src/func-name-inferrer.cc +0 -90
  687. data/ext/v8/upstream/3.1.8/src/func-name-inferrer.h +0 -111
  688. data/ext/v8/upstream/3.1.8/src/gdb-jit.cc +0 -1547
  689. data/ext/v8/upstream/3.1.8/src/global-handles.cc +0 -534
  690. data/ext/v8/upstream/3.1.8/src/global-handles.h +0 -181
  691. data/ext/v8/upstream/3.1.8/src/globals.h +0 -325
  692. data/ext/v8/upstream/3.1.8/src/handles-inl.h +0 -80
  693. data/ext/v8/upstream/3.1.8/src/handles.cc +0 -910
  694. data/ext/v8/upstream/3.1.8/src/handles.h +0 -424
  695. data/ext/v8/upstream/3.1.8/src/hashmap.h +0 -121
  696. data/ext/v8/upstream/3.1.8/src/heap-inl.h +0 -587
  697. data/ext/v8/upstream/3.1.8/src/heap-profiler.cc +0 -1128
  698. data/ext/v8/upstream/3.1.8/src/heap-profiler.h +0 -381
  699. data/ext/v8/upstream/3.1.8/src/heap.cc +0 -5610
  700. data/ext/v8/upstream/3.1.8/src/heap.h +0 -2218
  701. data/ext/v8/upstream/3.1.8/src/hydrogen-instructions.cc +0 -1490
  702. data/ext/v8/upstream/3.1.8/src/hydrogen-instructions.h +0 -3493
  703. data/ext/v8/upstream/3.1.8/src/hydrogen.cc +0 -6056
  704. data/ext/v8/upstream/3.1.8/src/hydrogen.h +0 -1091
  705. data/ext/v8/upstream/3.1.8/src/ia32/assembler-ia32-inl.h +0 -429
  706. data/ext/v8/upstream/3.1.8/src/ia32/assembler-ia32.cc +0 -2800
  707. data/ext/v8/upstream/3.1.8/src/ia32/assembler-ia32.h +0 -1093
  708. data/ext/v8/upstream/3.1.8/src/ia32/builtins-ia32.cc +0 -1590
  709. data/ext/v8/upstream/3.1.8/src/ia32/code-stubs-ia32.cc +0 -6624
  710. data/ext/v8/upstream/3.1.8/src/ia32/code-stubs-ia32.h +0 -536
  711. data/ext/v8/upstream/3.1.8/src/ia32/codegen-ia32.cc +0 -10354
  712. data/ext/v8/upstream/3.1.8/src/ia32/codegen-ia32.h +0 -798
  713. data/ext/v8/upstream/3.1.8/src/ia32/cpu-ia32.cc +0 -87
  714. data/ext/v8/upstream/3.1.8/src/ia32/debug-ia32.cc +0 -309
  715. data/ext/v8/upstream/3.1.8/src/ia32/deoptimizer-ia32.cc +0 -664
  716. data/ext/v8/upstream/3.1.8/src/ia32/disasm-ia32.cc +0 -1597
  717. data/ext/v8/upstream/3.1.8/src/ia32/frames-ia32.h +0 -140
  718. data/ext/v8/upstream/3.1.8/src/ia32/full-codegen-ia32.cc +0 -4278
  719. data/ext/v8/upstream/3.1.8/src/ia32/ic-ia32.cc +0 -1786
  720. data/ext/v8/upstream/3.1.8/src/ia32/lithium-codegen-ia32.cc +0 -3880
  721. data/ext/v8/upstream/3.1.8/src/ia32/lithium-codegen-ia32.h +0 -309
  722. data/ext/v8/upstream/3.1.8/src/ia32/lithium-gap-resolver-ia32.cc +0 -460
  723. data/ext/v8/upstream/3.1.8/src/ia32/lithium-ia32.cc +0 -2095
  724. data/ext/v8/upstream/3.1.8/src/ia32/lithium-ia32.h +0 -2127
  725. data/ext/v8/upstream/3.1.8/src/ia32/macro-assembler-ia32.cc +0 -2031
  726. data/ext/v8/upstream/3.1.8/src/ia32/macro-assembler-ia32.h +0 -798
  727. data/ext/v8/upstream/3.1.8/src/ia32/regexp-macro-assembler-ia32.cc +0 -1253
  728. data/ext/v8/upstream/3.1.8/src/ia32/regexp-macro-assembler-ia32.h +0 -215
  729. data/ext/v8/upstream/3.1.8/src/ia32/register-allocator-ia32.cc +0 -157
  730. data/ext/v8/upstream/3.1.8/src/ia32/simulator-ia32.h +0 -72
  731. data/ext/v8/upstream/3.1.8/src/ia32/stub-cache-ia32.cc +0 -3732
  732. data/ext/v8/upstream/3.1.8/src/ia32/virtual-frame-ia32.cc +0 -1360
  733. data/ext/v8/upstream/3.1.8/src/ia32/virtual-frame-ia32.h +0 -646
  734. data/ext/v8/upstream/3.1.8/src/ic-inl.h +0 -129
  735. data/ext/v8/upstream/3.1.8/src/ic.cc +0 -2333
  736. data/ext/v8/upstream/3.1.8/src/ic.h +0 -639
  737. data/ext/v8/upstream/3.1.8/src/interpreter-irregexp.cc +0 -655
  738. data/ext/v8/upstream/3.1.8/src/interpreter-irregexp.h +0 -48
  739. data/ext/v8/upstream/3.1.8/src/json.js +0 -342
  740. data/ext/v8/upstream/3.1.8/src/jsregexp.cc +0 -5340
  741. data/ext/v8/upstream/3.1.8/src/jsregexp.h +0 -1484
  742. data/ext/v8/upstream/3.1.8/src/jump-target-heavy.cc +0 -430
  743. data/ext/v8/upstream/3.1.8/src/jump-target-heavy.h +0 -244
  744. data/ext/v8/upstream/3.1.8/src/jump-target-inl.h +0 -48
  745. data/ext/v8/upstream/3.1.8/src/jump-target-light.cc +0 -111
  746. data/ext/v8/upstream/3.1.8/src/lithium-allocator-inl.h +0 -140
  747. data/ext/v8/upstream/3.1.8/src/lithium-allocator.cc +0 -2093
  748. data/ext/v8/upstream/3.1.8/src/lithium-allocator.h +0 -644
  749. data/ext/v8/upstream/3.1.8/src/lithium.cc +0 -168
  750. data/ext/v8/upstream/3.1.8/src/liveedit.cc +0 -1650
  751. data/ext/v8/upstream/3.1.8/src/liveedit.h +0 -174
  752. data/ext/v8/upstream/3.1.8/src/liveobjectlist.cc +0 -2527
  753. data/ext/v8/upstream/3.1.8/src/liveobjectlist.h +0 -322
  754. data/ext/v8/upstream/3.1.8/src/log-utils.cc +0 -336
  755. data/ext/v8/upstream/3.1.8/src/log-utils.h +0 -232
  756. data/ext/v8/upstream/3.1.8/src/log.cc +0 -1608
  757. data/ext/v8/upstream/3.1.8/src/log.h +0 -379
  758. data/ext/v8/upstream/3.1.8/src/mark-compact.cc +0 -2957
  759. data/ext/v8/upstream/3.1.8/src/mark-compact.h +0 -433
  760. data/ext/v8/upstream/3.1.8/src/messages.cc +0 -164
  761. data/ext/v8/upstream/3.1.8/src/messages.js +0 -1071
  762. data/ext/v8/upstream/3.1.8/src/mips/assembler-mips-inl.h +0 -215
  763. data/ext/v8/upstream/3.1.8/src/mips/assembler-mips.cc +0 -1219
  764. data/ext/v8/upstream/3.1.8/src/mips/assembler-mips.h +0 -667
  765. data/ext/v8/upstream/3.1.8/src/mips/builtins-mips.cc +0 -205
  766. data/ext/v8/upstream/3.1.8/src/mips/codegen-mips-inl.h +0 -70
  767. data/ext/v8/upstream/3.1.8/src/mips/codegen-mips.cc +0 -1437
  768. data/ext/v8/upstream/3.1.8/src/mips/codegen-mips.h +0 -431
  769. data/ext/v8/upstream/3.1.8/src/mips/constants-mips.cc +0 -328
  770. data/ext/v8/upstream/3.1.8/src/mips/constants-mips.h +0 -525
  771. data/ext/v8/upstream/3.1.8/src/mips/cpu-mips.cc +0 -73
  772. data/ext/v8/upstream/3.1.8/src/mips/debug-mips.cc +0 -127
  773. data/ext/v8/upstream/3.1.8/src/mips/disasm-mips.cc +0 -787
  774. data/ext/v8/upstream/3.1.8/src/mips/fast-codegen-mips.cc +0 -77
  775. data/ext/v8/upstream/3.1.8/src/mips/frames-mips.cc +0 -96
  776. data/ext/v8/upstream/3.1.8/src/mips/frames-mips.h +0 -164
  777. data/ext/v8/upstream/3.1.8/src/mips/full-codegen-mips.cc +0 -277
  778. data/ext/v8/upstream/3.1.8/src/mips/ic-mips.cc +0 -208
  779. data/ext/v8/upstream/3.1.8/src/mips/jump-target-mips.cc +0 -175
  780. data/ext/v8/upstream/3.1.8/src/mips/macro-assembler-mips.cc +0 -1326
  781. data/ext/v8/upstream/3.1.8/src/mips/macro-assembler-mips.h +0 -461
  782. data/ext/v8/upstream/3.1.8/src/mips/register-allocator-mips-inl.h +0 -137
  783. data/ext/v8/upstream/3.1.8/src/mips/register-allocator-mips.h +0 -46
  784. data/ext/v8/upstream/3.1.8/src/mips/simulator-mips.cc +0 -1650
  785. data/ext/v8/upstream/3.1.8/src/mips/simulator-mips.h +0 -311
  786. data/ext/v8/upstream/3.1.8/src/mips/stub-cache-mips.cc +0 -418
  787. data/ext/v8/upstream/3.1.8/src/mips/virtual-frame-mips.cc +0 -319
  788. data/ext/v8/upstream/3.1.8/src/mips/virtual-frame-mips.h +0 -548
  789. data/ext/v8/upstream/3.1.8/src/mirror-debugger.js +0 -2380
  790. data/ext/v8/upstream/3.1.8/src/mksnapshot.cc +0 -256
  791. data/ext/v8/upstream/3.1.8/src/objects-debug.cc +0 -722
  792. data/ext/v8/upstream/3.1.8/src/objects-inl.h +0 -3946
  793. data/ext/v8/upstream/3.1.8/src/objects-printer.cc +0 -801
  794. data/ext/v8/upstream/3.1.8/src/objects-visiting.cc +0 -142
  795. data/ext/v8/upstream/3.1.8/src/objects-visiting.h +0 -401
  796. data/ext/v8/upstream/3.1.8/src/objects.cc +0 -10044
  797. data/ext/v8/upstream/3.1.8/src/objects.h +0 -6571
  798. data/ext/v8/upstream/3.1.8/src/parser.cc +0 -5165
  799. data/ext/v8/upstream/3.1.8/src/parser.h +0 -802
  800. data/ext/v8/upstream/3.1.8/src/platform-cygwin.cc +0 -745
  801. data/ext/v8/upstream/3.1.8/src/platform-freebsd.cc +0 -702
  802. data/ext/v8/upstream/3.1.8/src/platform-linux.cc +0 -981
  803. data/ext/v8/upstream/3.1.8/src/platform-macos.cc +0 -732
  804. data/ext/v8/upstream/3.1.8/src/platform-nullos.cc +0 -498
  805. data/ext/v8/upstream/3.1.8/src/platform-openbsd.cc +0 -657
  806. data/ext/v8/upstream/3.1.8/src/platform-posix.cc +0 -399
  807. data/ext/v8/upstream/3.1.8/src/platform-solaris.cc +0 -714
  808. data/ext/v8/upstream/3.1.8/src/platform-win32.cc +0 -1974
  809. data/ext/v8/upstream/3.1.8/src/platform.h +0 -636
  810. data/ext/v8/upstream/3.1.8/src/preparse-data.cc +0 -183
  811. data/ext/v8/upstream/3.1.8/src/preparser-api.cc +0 -213
  812. data/ext/v8/upstream/3.1.8/src/preparser.cc +0 -1205
  813. data/ext/v8/upstream/3.1.8/src/prettyprinter.cc +0 -1539
  814. data/ext/v8/upstream/3.1.8/src/prettyprinter.h +0 -223
  815. data/ext/v8/upstream/3.1.8/src/profile-generator.cc +0 -2899
  816. data/ext/v8/upstream/3.1.8/src/profile-generator.h +0 -1151
  817. data/ext/v8/upstream/3.1.8/src/property.cc +0 -96
  818. data/ext/v8/upstream/3.1.8/src/property.h +0 -337
  819. data/ext/v8/upstream/3.1.8/src/regexp-macro-assembler-irregexp.cc +0 -470
  820. data/ext/v8/upstream/3.1.8/src/regexp-macro-assembler.cc +0 -257
  821. data/ext/v8/upstream/3.1.8/src/regexp-macro-assembler.h +0 -231
  822. data/ext/v8/upstream/3.1.8/src/regexp-stack.cc +0 -103
  823. data/ext/v8/upstream/3.1.8/src/regexp-stack.h +0 -123
  824. data/ext/v8/upstream/3.1.8/src/regexp.js +0 -483
  825. data/ext/v8/upstream/3.1.8/src/register-allocator-inl.h +0 -141
  826. data/ext/v8/upstream/3.1.8/src/register-allocator.cc +0 -104
  827. data/ext/v8/upstream/3.1.8/src/register-allocator.h +0 -320
  828. data/ext/v8/upstream/3.1.8/src/rewriter.cc +0 -1023
  829. data/ext/v8/upstream/3.1.8/src/runtime-profiler.cc +0 -443
  830. data/ext/v8/upstream/3.1.8/src/runtime-profiler.h +0 -77
  831. data/ext/v8/upstream/3.1.8/src/runtime.cc +0 -11592
  832. data/ext/v8/upstream/3.1.8/src/runtime.h +0 -582
  833. data/ext/v8/upstream/3.1.8/src/safepoint-table.cc +0 -253
  834. data/ext/v8/upstream/3.1.8/src/safepoint-table.h +0 -263
  835. data/ext/v8/upstream/3.1.8/src/scanner-base.cc +0 -971
  836. data/ext/v8/upstream/3.1.8/src/scanner-base.h +0 -653
  837. data/ext/v8/upstream/3.1.8/src/scanner.cc +0 -586
  838. data/ext/v8/upstream/3.1.8/src/scanner.h +0 -194
  839. data/ext/v8/upstream/3.1.8/src/scopeinfo.cc +0 -636
  840. data/ext/v8/upstream/3.1.8/src/scopeinfo.h +0 -238
  841. data/ext/v8/upstream/3.1.8/src/scopes.cc +0 -1063
  842. data/ext/v8/upstream/3.1.8/src/scopes.h +0 -494
  843. data/ext/v8/upstream/3.1.8/src/serialize.cc +0 -1535
  844. data/ext/v8/upstream/3.1.8/src/serialize.h +0 -584
  845. data/ext/v8/upstream/3.1.8/src/snapshot-common.cc +0 -82
  846. data/ext/v8/upstream/3.1.8/src/snapshot.h +0 -71
  847. data/ext/v8/upstream/3.1.8/src/spaces-inl.h +0 -524
  848. data/ext/v8/upstream/3.1.8/src/spaces.cc +0 -3254
  849. data/ext/v8/upstream/3.1.8/src/spaces.h +0 -2362
  850. data/ext/v8/upstream/3.1.8/src/string-search.cc +0 -40
  851. data/ext/v8/upstream/3.1.8/src/string-search.h +0 -567
  852. data/ext/v8/upstream/3.1.8/src/string-stream.cc +0 -584
  853. data/ext/v8/upstream/3.1.8/src/string.js +0 -915
  854. data/ext/v8/upstream/3.1.8/src/stub-cache.cc +0 -1878
  855. data/ext/v8/upstream/3.1.8/src/stub-cache.h +0 -849
  856. data/ext/v8/upstream/3.1.8/src/token.cc +0 -63
  857. data/ext/v8/upstream/3.1.8/src/token.h +0 -288
  858. data/ext/v8/upstream/3.1.8/src/top.cc +0 -1152
  859. data/ext/v8/upstream/3.1.8/src/top.h +0 -608
  860. data/ext/v8/upstream/3.1.8/src/type-info.cc +0 -406
  861. data/ext/v8/upstream/3.1.8/src/type-info.h +0 -283
  862. data/ext/v8/upstream/3.1.8/src/unicode.cc +0 -1624
  863. data/ext/v8/upstream/3.1.8/src/unicode.h +0 -280
  864. data/ext/v8/upstream/3.1.8/src/utils.h +0 -793
  865. data/ext/v8/upstream/3.1.8/src/v8-counters.cc +0 -55
  866. data/ext/v8/upstream/3.1.8/src/v8-counters.h +0 -290
  867. data/ext/v8/upstream/3.1.8/src/v8.cc +0 -270
  868. data/ext/v8/upstream/3.1.8/src/v8.h +0 -127
  869. data/ext/v8/upstream/3.1.8/src/v8globals.h +0 -480
  870. data/ext/v8/upstream/3.1.8/src/v8natives.js +0 -1252
  871. data/ext/v8/upstream/3.1.8/src/v8threads.cc +0 -440
  872. data/ext/v8/upstream/3.1.8/src/v8threads.h +0 -157
  873. data/ext/v8/upstream/3.1.8/src/v8utils.h +0 -354
  874. data/ext/v8/upstream/3.1.8/src/variables.h +0 -212
  875. data/ext/v8/upstream/3.1.8/src/version.cc +0 -95
  876. data/ext/v8/upstream/3.1.8/src/version.h +0 -64
  877. data/ext/v8/upstream/3.1.8/src/virtual-frame-light-inl.h +0 -170
  878. data/ext/v8/upstream/3.1.8/src/vm-state-inl.h +0 -134
  879. data/ext/v8/upstream/3.1.8/src/vm-state.h +0 -68
  880. data/ext/v8/upstream/3.1.8/src/win32-headers.h +0 -95
  881. data/ext/v8/upstream/3.1.8/src/x64/assembler-x64-inl.h +0 -455
  882. data/ext/v8/upstream/3.1.8/src/x64/assembler-x64.cc +0 -3162
  883. data/ext/v8/upstream/3.1.8/src/x64/assembler-x64.h +0 -1584
  884. data/ext/v8/upstream/3.1.8/src/x64/builtins-x64.cc +0 -1492
  885. data/ext/v8/upstream/3.1.8/src/x64/code-stubs-x64.cc +0 -5150
  886. data/ext/v8/upstream/3.1.8/src/x64/code-stubs-x64.h +0 -519
  887. data/ext/v8/upstream/3.1.8/src/x64/codegen-x64.cc +0 -8835
  888. data/ext/v8/upstream/3.1.8/src/x64/codegen-x64.h +0 -750
  889. data/ext/v8/upstream/3.1.8/src/x64/cpu-x64.cc +0 -86
  890. data/ext/v8/upstream/3.1.8/src/x64/debug-x64.cc +0 -316
  891. data/ext/v8/upstream/3.1.8/src/x64/deoptimizer-x64.cc +0 -781
  892. data/ext/v8/upstream/3.1.8/src/x64/disasm-x64.cc +0 -1737
  893. data/ext/v8/upstream/3.1.8/src/x64/frames-x64.h +0 -130
  894. data/ext/v8/upstream/3.1.8/src/x64/full-codegen-x64.cc +0 -3984
  895. data/ext/v8/upstream/3.1.8/src/x64/ic-x64.cc +0 -1761
  896. data/ext/v8/upstream/3.1.8/src/x64/lithium-codegen-x64.cc +0 -3639
  897. data/ext/v8/upstream/3.1.8/src/x64/lithium-codegen-x64.h +0 -305
  898. data/ext/v8/upstream/3.1.8/src/x64/lithium-x64.cc +0 -2044
  899. data/ext/v8/upstream/3.1.8/src/x64/lithium-x64.h +0 -2052
  900. data/ext/v8/upstream/3.1.8/src/x64/macro-assembler-x64.cc +0 -2660
  901. data/ext/v8/upstream/3.1.8/src/x64/macro-assembler-x64.h +0 -1852
  902. data/ext/v8/upstream/3.1.8/src/x64/regexp-macro-assembler-x64.cc +0 -1382
  903. data/ext/v8/upstream/3.1.8/src/x64/regexp-macro-assembler-x64.h +0 -278
  904. data/ext/v8/upstream/3.1.8/src/x64/register-allocator-x64-inl.h +0 -87
  905. data/ext/v8/upstream/3.1.8/src/x64/register-allocator-x64.cc +0 -91
  906. data/ext/v8/upstream/3.1.8/src/x64/simulator-x64.h +0 -71
  907. data/ext/v8/upstream/3.1.8/src/x64/stub-cache-x64.cc +0 -3509
  908. data/ext/v8/upstream/3.1.8/src/x64/virtual-frame-x64.cc +0 -1292
  909. data/ext/v8/upstream/3.1.8/src/x64/virtual-frame-x64.h +0 -593
  910. data/ext/v8/upstream/3.1.8/src/zone-inl.h +0 -83
  911. data/ext/v8/upstream/3.1.8/src/zone.cc +0 -195
  912. data/ext/v8/upstream/3.1.8/src/zone.h +0 -233
  913. data/ext/v8/upstream/3.1.8/tools/gyp/v8.gyp +0 -869
  914. data/ext/v8/upstream/3.1.8/tools/linux-tick-processor +0 -33
  915. data/ext/v8/upstream/3.1.8/tools/tickprocessor.js +0 -863
  916. data/ext/v8/upstream/3.1.8/tools/visual_studio/v8_base.vcproj +0 -1296
  917. data/ext/v8/upstream/3.1.8/tools/visual_studio/v8_base_arm.vcproj +0 -1234
  918. data/ext/v8/upstream/3.1.8/tools/visual_studio/v8_base_x64.vcproj +0 -1296
  919. data/ext/v8/upstream/3.1.8/tools/visual_studio/x64.vsprops +0 -17
  920. data/spec/ext/mem_spec.rb +0 -42
@@ -1,3254 +0,0 @@
1
- // Copyright 2006-2010 the V8 project authors. All rights reserved.
2
- // Redistribution and use in source and binary forms, with or without
3
- // modification, are permitted provided that the following conditions are
4
- // met:
5
- //
6
- // * Redistributions of source code must retain the above copyright
7
- // notice, this list of conditions and the following disclaimer.
8
- // * Redistributions in binary form must reproduce the above
9
- // copyright notice, this list of conditions and the following
10
- // disclaimer in the documentation and/or other materials provided
11
- // with the distribution.
12
- // * Neither the name of Google Inc. nor the names of its
13
- // contributors may be used to endorse or promote products derived
14
- // from this software without specific prior written permission.
15
- //
16
- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
- // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
- // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
- // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
- // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
- // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
- // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
- // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
- // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
- // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
-
28
- #include "v8.h"
29
-
30
- #include "liveobjectlist-inl.h"
31
- #include "macro-assembler.h"
32
- #include "mark-compact.h"
33
- #include "platform.h"
34
-
35
- namespace v8 {
36
- namespace internal {
37
-
38
- // For contiguous spaces, top should be in the space (or at the end) and limit
39
- // should be the end of the space.
40
- #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41
- ASSERT((space).low() <= (info).top \
42
- && (info).top <= (space).high() \
43
- && (info).limit == (space).high())
44
-
45
- intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
46
-
47
- // ----------------------------------------------------------------------------
48
- // HeapObjectIterator
49
-
50
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
51
- Initialize(space->bottom(), space->top(), NULL);
52
- }
53
-
54
-
55
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56
- HeapObjectCallback size_func) {
57
- Initialize(space->bottom(), space->top(), size_func);
58
- }
59
-
60
-
61
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
62
- Initialize(start, space->top(), NULL);
63
- }
64
-
65
-
66
- HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
67
- HeapObjectCallback size_func) {
68
- Initialize(start, space->top(), size_func);
69
- }
70
-
71
-
72
- HeapObjectIterator::HeapObjectIterator(Page* page,
73
- HeapObjectCallback size_func) {
74
- Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
75
- }
76
-
77
-
78
- void HeapObjectIterator::Initialize(Address cur, Address end,
79
- HeapObjectCallback size_f) {
80
- cur_addr_ = cur;
81
- end_addr_ = end;
82
- end_page_ = Page::FromAllocationTop(end);
83
- size_func_ = size_f;
84
- Page* p = Page::FromAllocationTop(cur_addr_);
85
- cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
86
-
87
- #ifdef DEBUG
88
- Verify();
89
- #endif
90
- }
91
-
92
-
93
- HeapObject* HeapObjectIterator::FromNextPage() {
94
- if (cur_addr_ == end_addr_) return NULL;
95
-
96
- Page* cur_page = Page::FromAllocationTop(cur_addr_);
97
- cur_page = cur_page->next_page();
98
- ASSERT(cur_page->is_valid());
99
-
100
- cur_addr_ = cur_page->ObjectAreaStart();
101
- cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
102
-
103
- if (cur_addr_ == end_addr_) return NULL;
104
- ASSERT(cur_addr_ < cur_limit_);
105
- #ifdef DEBUG
106
- Verify();
107
- #endif
108
- return FromCurrentPage();
109
- }
110
-
111
-
112
- #ifdef DEBUG
113
- void HeapObjectIterator::Verify() {
114
- Page* p = Page::FromAllocationTop(cur_addr_);
115
- ASSERT(p == Page::FromAllocationTop(cur_limit_));
116
- ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
117
- }
118
- #endif
119
-
120
-
121
- // -----------------------------------------------------------------------------
122
- // PageIterator
123
-
124
- PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
125
- prev_page_ = NULL;
126
- switch (mode) {
127
- case PAGES_IN_USE:
128
- stop_page_ = space->AllocationTopPage();
129
- break;
130
- case PAGES_USED_BY_MC:
131
- stop_page_ = space->MCRelocationTopPage();
132
- break;
133
- case ALL_PAGES:
134
- #ifdef DEBUG
135
- // Verify that the cached last page in the space is actually the
136
- // last page.
137
- for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
138
- if (!p->next_page()->is_valid()) {
139
- ASSERT(space->last_page_ == p);
140
- }
141
- }
142
- #endif
143
- stop_page_ = space->last_page_;
144
- break;
145
- }
146
- }
147
-
148
-
149
- // -----------------------------------------------------------------------------
150
- // CodeRange
151
-
152
- List<CodeRange::FreeBlock> CodeRange::free_list_(0);
153
- List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
154
- int CodeRange::current_allocation_block_index_ = 0;
155
- VirtualMemory* CodeRange::code_range_ = NULL;
156
-
157
-
158
- bool CodeRange::Setup(const size_t requested) {
159
- ASSERT(code_range_ == NULL);
160
-
161
- code_range_ = new VirtualMemory(requested);
162
- CHECK(code_range_ != NULL);
163
- if (!code_range_->IsReserved()) {
164
- delete code_range_;
165
- code_range_ = NULL;
166
- return false;
167
- }
168
-
169
- // We are sure that we have mapped a block of requested addresses.
170
- ASSERT(code_range_->size() == requested);
171
- LOG(NewEvent("CodeRange", code_range_->address(), requested));
172
- allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
173
- current_allocation_block_index_ = 0;
174
- return true;
175
- }
176
-
177
-
178
- int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
179
- const FreeBlock* right) {
180
- // The entire point of CodeRange is that the difference between two
181
- // addresses in the range can be represented as a signed 32-bit int,
182
- // so the cast is semantically correct.
183
- return static_cast<int>(left->start - right->start);
184
- }
185
-
186
-
187
- void CodeRange::GetNextAllocationBlock(size_t requested) {
188
- for (current_allocation_block_index_++;
189
- current_allocation_block_index_ < allocation_list_.length();
190
- current_allocation_block_index_++) {
191
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
192
- return; // Found a large enough allocation block.
193
- }
194
- }
195
-
196
- // Sort and merge the free blocks on the free list and the allocation list.
197
- free_list_.AddAll(allocation_list_);
198
- allocation_list_.Clear();
199
- free_list_.Sort(&CompareFreeBlockAddress);
200
- for (int i = 0; i < free_list_.length();) {
201
- FreeBlock merged = free_list_[i];
202
- i++;
203
- // Add adjacent free blocks to the current merged block.
204
- while (i < free_list_.length() &&
205
- free_list_[i].start == merged.start + merged.size) {
206
- merged.size += free_list_[i].size;
207
- i++;
208
- }
209
- if (merged.size > 0) {
210
- allocation_list_.Add(merged);
211
- }
212
- }
213
- free_list_.Clear();
214
-
215
- for (current_allocation_block_index_ = 0;
216
- current_allocation_block_index_ < allocation_list_.length();
217
- current_allocation_block_index_++) {
218
- if (requested <= allocation_list_[current_allocation_block_index_].size) {
219
- return; // Found a large enough allocation block.
220
- }
221
- }
222
-
223
- // Code range is full or too fragmented.
224
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
225
- }
226
-
227
-
228
-
229
- void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
230
- ASSERT(current_allocation_block_index_ < allocation_list_.length());
231
- if (requested > allocation_list_[current_allocation_block_index_].size) {
232
- // Find an allocation block large enough. This function call may
233
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
234
- GetNextAllocationBlock(requested);
235
- }
236
- // Commit the requested memory at the start of the current allocation block.
237
- *allocated = RoundUp(requested, Page::kPageSize);
238
- FreeBlock current = allocation_list_[current_allocation_block_index_];
239
- if (*allocated >= current.size - Page::kPageSize) {
240
- // Don't leave a small free block, useless for a large object or chunk.
241
- *allocated = current.size;
242
- }
243
- ASSERT(*allocated <= current.size);
244
- if (!code_range_->Commit(current.start, *allocated, true)) {
245
- *allocated = 0;
246
- return NULL;
247
- }
248
- allocation_list_[current_allocation_block_index_].start += *allocated;
249
- allocation_list_[current_allocation_block_index_].size -= *allocated;
250
- if (*allocated == current.size) {
251
- GetNextAllocationBlock(0); // This block is used up, get the next one.
252
- }
253
- return current.start;
254
- }
255
-
256
-
257
- void CodeRange::FreeRawMemory(void* address, size_t length) {
258
- free_list_.Add(FreeBlock(address, length));
259
- code_range_->Uncommit(address, length);
260
- }
261
-
262
-
263
- void CodeRange::TearDown() {
264
- delete code_range_; // Frees all memory in the virtual memory range.
265
- code_range_ = NULL;
266
- free_list_.Free();
267
- allocation_list_.Free();
268
- }
269
-
270
-
271
- // -----------------------------------------------------------------------------
272
- // MemoryAllocator
273
- //
274
- intptr_t MemoryAllocator::capacity_ = 0;
275
- intptr_t MemoryAllocator::capacity_executable_ = 0;
276
- intptr_t MemoryAllocator::size_ = 0;
277
- intptr_t MemoryAllocator::size_executable_ = 0;
278
-
279
- List<MemoryAllocator::MemoryAllocationCallbackRegistration>
280
- MemoryAllocator::memory_allocation_callbacks_;
281
-
282
- VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
283
-
284
- // 270 is an estimate based on the static default heap size of a pair of 256K
285
- // semispaces and a 64M old generation.
286
- const int kEstimatedNumberOfChunks = 270;
287
- List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
288
- kEstimatedNumberOfChunks);
289
- List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
290
- int MemoryAllocator::max_nof_chunks_ = 0;
291
- int MemoryAllocator::top_ = 0;
292
-
293
-
294
- void MemoryAllocator::Push(int free_chunk_id) {
295
- ASSERT(max_nof_chunks_ > 0);
296
- ASSERT(top_ < max_nof_chunks_);
297
- free_chunk_ids_[top_++] = free_chunk_id;
298
- }
299
-
300
-
301
- int MemoryAllocator::Pop() {
302
- ASSERT(top_ > 0);
303
- return free_chunk_ids_[--top_];
304
- }
305
-
306
-
307
- bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
308
- capacity_ = RoundUp(capacity, Page::kPageSize);
309
- capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
310
- ASSERT_GE(capacity_, capacity_executable_);
311
-
312
- // Over-estimate the size of chunks_ array. It assumes the expansion of old
313
- // space is always in the unit of a chunk (kChunkSize) except the last
314
- // expansion.
315
- //
316
- // Due to alignment, allocated space might be one page less than required
317
- // number (kPagesPerChunk) of pages for old spaces.
318
- //
319
- // Reserve two chunk ids for semispaces, one for map space, one for old
320
- // space, and one for code space.
321
- max_nof_chunks_ =
322
- static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
323
- if (max_nof_chunks_ > kMaxNofChunks) return false;
324
-
325
- size_ = 0;
326
- size_executable_ = 0;
327
- ChunkInfo info; // uninitialized element.
328
- for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
329
- chunks_.Add(info);
330
- free_chunk_ids_.Add(i);
331
- }
332
- top_ = max_nof_chunks_;
333
- return true;
334
- }
335
-
336
-
337
- bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
338
- return InInitialChunk(addr) || InAllocatedChunks(addr);
339
- }
340
-
341
-
342
- void MemoryAllocator::TearDown() {
343
- for (int i = 0; i < max_nof_chunks_; i++) {
344
- if (chunks_[i].address() != NULL) DeleteChunk(i);
345
- }
346
- chunks_.Clear();
347
- free_chunk_ids_.Clear();
348
-
349
- if (initial_chunk_ != NULL) {
350
- LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
351
- delete initial_chunk_;
352
- initial_chunk_ = NULL;
353
- }
354
-
355
- FreeChunkTables(&chunk_table_[0],
356
- kChunkTableTopLevelEntries,
357
- kChunkTableLevels);
358
-
359
- ASSERT(top_ == max_nof_chunks_); // all chunks are free
360
- top_ = 0;
361
- capacity_ = 0;
362
- capacity_executable_ = 0;
363
- size_ = 0;
364
- max_nof_chunks_ = 0;
365
- }
366
-
367
-
368
- void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
369
- for (int i = 0; i < len; i++) {
370
- if (array[i] != kUnusedChunkTableEntry) {
371
- uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
372
- if (level > 1) {
373
- array[i] = kUnusedChunkTableEntry;
374
- FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
375
- } else {
376
- array[i] = kUnusedChunkTableEntry;
377
- }
378
- delete[] subarray;
379
- }
380
- }
381
- }
382
-
383
-
384
- void* MemoryAllocator::AllocateRawMemory(const size_t requested,
385
- size_t* allocated,
386
- Executability executable) {
387
- if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
388
- return NULL;
389
- }
390
-
391
- void* mem;
392
- if (executable == EXECUTABLE) {
393
- // Check executable memory limit.
394
- if (size_executable_ + requested >
395
- static_cast<size_t>(capacity_executable_)) {
396
- LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
397
- "V8 Executable Allocation capacity exceeded"));
398
- return NULL;
399
- }
400
- // Allocate executable memory either from code range or from the
401
- // OS.
402
- if (CodeRange::exists()) {
403
- mem = CodeRange::AllocateRawMemory(requested, allocated);
404
- } else {
405
- mem = OS::Allocate(requested, allocated, true);
406
- }
407
- // Update executable memory size.
408
- size_executable_ += static_cast<int>(*allocated);
409
- } else {
410
- mem = OS::Allocate(requested, allocated, false);
411
- }
412
- int alloced = static_cast<int>(*allocated);
413
- size_ += alloced;
414
-
415
- #ifdef DEBUG
416
- ZapBlock(reinterpret_cast<Address>(mem), alloced);
417
- #endif
418
- Counters::memory_allocated.Increment(alloced);
419
- return mem;
420
- }
421
-
422
-
423
- void MemoryAllocator::FreeRawMemory(void* mem,
424
- size_t length,
425
- Executability executable) {
426
- #ifdef DEBUG
427
- ZapBlock(reinterpret_cast<Address>(mem), length);
428
- #endif
429
- if (CodeRange::contains(static_cast<Address>(mem))) {
430
- CodeRange::FreeRawMemory(mem, length);
431
- } else {
432
- OS::Free(mem, length);
433
- }
434
- Counters::memory_allocated.Decrement(static_cast<int>(length));
435
- size_ -= static_cast<int>(length);
436
- if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
437
-
438
- ASSERT(size_ >= 0);
439
- ASSERT(size_executable_ >= 0);
440
- }
441
-
442
-
443
- void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
444
- AllocationAction action,
445
- size_t size) {
446
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
447
- MemoryAllocationCallbackRegistration registration =
448
- memory_allocation_callbacks_[i];
449
- if ((registration.space & space) == space &&
450
- (registration.action & action) == action)
451
- registration.callback(space, action, static_cast<int>(size));
452
- }
453
- }
454
-
455
-
456
- bool MemoryAllocator::MemoryAllocationCallbackRegistered(
457
- MemoryAllocationCallback callback) {
458
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
459
- if (memory_allocation_callbacks_[i].callback == callback) return true;
460
- }
461
- return false;
462
- }
463
-
464
-
465
- void MemoryAllocator::AddMemoryAllocationCallback(
466
- MemoryAllocationCallback callback,
467
- ObjectSpace space,
468
- AllocationAction action) {
469
- ASSERT(callback != NULL);
470
- MemoryAllocationCallbackRegistration registration(callback, space, action);
471
- ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
472
- return memory_allocation_callbacks_.Add(registration);
473
- }
474
-
475
-
476
- void MemoryAllocator::RemoveMemoryAllocationCallback(
477
- MemoryAllocationCallback callback) {
478
- ASSERT(callback != NULL);
479
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
480
- if (memory_allocation_callbacks_[i].callback == callback) {
481
- memory_allocation_callbacks_.Remove(i);
482
- return;
483
- }
484
- }
485
- UNREACHABLE();
486
- }
487
-
488
- void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
489
- ASSERT(initial_chunk_ == NULL);
490
-
491
- initial_chunk_ = new VirtualMemory(requested);
492
- CHECK(initial_chunk_ != NULL);
493
- if (!initial_chunk_->IsReserved()) {
494
- delete initial_chunk_;
495
- initial_chunk_ = NULL;
496
- return NULL;
497
- }
498
-
499
- // We are sure that we have mapped a block of requested addresses.
500
- ASSERT(initial_chunk_->size() == requested);
501
- LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
502
- size_ += static_cast<int>(requested);
503
- return initial_chunk_->address();
504
- }
505
-
506
-
507
- static int PagesInChunk(Address start, size_t size) {
508
- // The first page starts on the first page-aligned address from start onward
509
- // and the last page ends on the last page-aligned address before
510
- // start+size. Page::kPageSize is a power of two so we can divide by
511
- // shifting.
512
- return static_cast<int>((RoundDown(start + size, Page::kPageSize)
513
- - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
514
- }
515
-
516
-
517
- Page* MemoryAllocator::AllocatePages(int requested_pages,
518
- int* allocated_pages,
519
- PagedSpace* owner) {
520
- if (requested_pages <= 0) return Page::FromAddress(NULL);
521
- size_t chunk_size = requested_pages * Page::kPageSize;
522
-
523
- void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
524
- if (chunk == NULL) return Page::FromAddress(NULL);
525
- LOG(NewEvent("PagedChunk", chunk, chunk_size));
526
-
527
- *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
528
- // We may 'lose' a page due to alignment.
529
- ASSERT(*allocated_pages >= kPagesPerChunk - 1);
530
- if (*allocated_pages == 0) {
531
- FreeRawMemory(chunk, chunk_size, owner->executable());
532
- LOG(DeleteEvent("PagedChunk", chunk));
533
- return Page::FromAddress(NULL);
534
- }
535
-
536
- int chunk_id = Pop();
537
- chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
538
-
539
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
540
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
541
- Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
542
-
543
- AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
544
-
545
- return new_pages;
546
- }
547
-
548
-
549
- Page* MemoryAllocator::CommitPages(Address start, size_t size,
550
- PagedSpace* owner, int* num_pages) {
551
- ASSERT(start != NULL);
552
- *num_pages = PagesInChunk(start, size);
553
- ASSERT(*num_pages > 0);
554
- ASSERT(initial_chunk_ != NULL);
555
- ASSERT(InInitialChunk(start));
556
- ASSERT(InInitialChunk(start + size - 1));
557
- if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
558
- return Page::FromAddress(NULL);
559
- }
560
- #ifdef DEBUG
561
- ZapBlock(start, size);
562
- #endif
563
- Counters::memory_allocated.Increment(static_cast<int>(size));
564
-
565
- // So long as we correctly overestimated the number of chunks we should not
566
- // run out of chunk ids.
567
- CHECK(!OutOfChunkIds());
568
- int chunk_id = Pop();
569
- chunks_[chunk_id].init(start, size, owner);
570
- return InitializePagesInChunk(chunk_id, *num_pages, owner);
571
- }
572
-
573
-
574
- bool MemoryAllocator::CommitBlock(Address start,
575
- size_t size,
576
- Executability executable) {
577
- ASSERT(start != NULL);
578
- ASSERT(size > 0);
579
- ASSERT(initial_chunk_ != NULL);
580
- ASSERT(InInitialChunk(start));
581
- ASSERT(InInitialChunk(start + size - 1));
582
-
583
- if (!initial_chunk_->Commit(start, size, executable)) return false;
584
- #ifdef DEBUG
585
- ZapBlock(start, size);
586
- #endif
587
- Counters::memory_allocated.Increment(static_cast<int>(size));
588
- return true;
589
- }
590
-
591
-
592
- bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
593
- ASSERT(start != NULL);
594
- ASSERT(size > 0);
595
- ASSERT(initial_chunk_ != NULL);
596
- ASSERT(InInitialChunk(start));
597
- ASSERT(InInitialChunk(start + size - 1));
598
-
599
- if (!initial_chunk_->Uncommit(start, size)) return false;
600
- Counters::memory_allocated.Decrement(static_cast<int>(size));
601
- return true;
602
- }
603
-
604
-
605
- void MemoryAllocator::ZapBlock(Address start, size_t size) {
606
- for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
607
- Memory::Address_at(start + s) = kZapValue;
608
- }
609
- }
610
-
611
-
612
- Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
613
- PagedSpace* owner) {
614
- ASSERT(IsValidChunk(chunk_id));
615
- ASSERT(pages_in_chunk > 0);
616
-
617
- Address chunk_start = chunks_[chunk_id].address();
618
-
619
- Address low = RoundUp(chunk_start, Page::kPageSize);
620
-
621
- #ifdef DEBUG
622
- size_t chunk_size = chunks_[chunk_id].size();
623
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
624
- ASSERT(pages_in_chunk <=
625
- ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
626
- #endif
627
-
628
- Address page_addr = low;
629
- for (int i = 0; i < pages_in_chunk; i++) {
630
- Page* p = Page::FromAddress(page_addr);
631
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
632
- p->InvalidateWatermark(true);
633
- p->SetIsLargeObjectPage(false);
634
- p->SetAllocationWatermark(p->ObjectAreaStart());
635
- p->SetCachedAllocationWatermark(p->ObjectAreaStart());
636
- page_addr += Page::kPageSize;
637
- }
638
-
639
- // Set the next page of the last page to 0.
640
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
641
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
642
-
643
- return Page::FromAddress(low);
644
- }
645
-
646
-
647
- Page* MemoryAllocator::FreePages(Page* p) {
648
- if (!p->is_valid()) return p;
649
-
650
- // Find the first page in the same chunk as 'p'
651
- Page* first_page = FindFirstPageInSameChunk(p);
652
- Page* page_to_return = Page::FromAddress(NULL);
653
-
654
- if (p != first_page) {
655
- // Find the last page in the same chunk as 'prev'.
656
- Page* last_page = FindLastPageInSameChunk(p);
657
- first_page = GetNextPage(last_page); // first page in next chunk
658
-
659
- // set the next_page of last_page to NULL
660
- SetNextPage(last_page, Page::FromAddress(NULL));
661
- page_to_return = p; // return 'p' when exiting
662
- }
663
-
664
- while (first_page->is_valid()) {
665
- int chunk_id = GetChunkId(first_page);
666
- ASSERT(IsValidChunk(chunk_id));
667
-
668
- // Find the first page of the next chunk before deleting this chunk.
669
- first_page = GetNextPage(FindLastPageInSameChunk(first_page));
670
-
671
- // Free the current chunk.
672
- DeleteChunk(chunk_id);
673
- }
674
-
675
- return page_to_return;
676
- }
677
-
678
-
679
- void MemoryAllocator::FreeAllPages(PagedSpace* space) {
680
- for (int i = 0, length = chunks_.length(); i < length; i++) {
681
- if (chunks_[i].owner() == space) {
682
- DeleteChunk(i);
683
- }
684
- }
685
- }
686
-
687
-
688
- void MemoryAllocator::DeleteChunk(int chunk_id) {
689
- ASSERT(IsValidChunk(chunk_id));
690
-
691
- ChunkInfo& c = chunks_[chunk_id];
692
-
693
- // We cannot free a chunk contained in the initial chunk because it was not
694
- // allocated with AllocateRawMemory. Instead we uncommit the virtual
695
- // memory.
696
- if (InInitialChunk(c.address())) {
697
- // TODO(1240712): VirtualMemory::Uncommit has a return value which
698
- // is ignored here.
699
- initial_chunk_->Uncommit(c.address(), c.size());
700
- Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
701
- } else {
702
- RemoveFromAllocatedChunks(c.address(), c.size());
703
- LOG(DeleteEvent("PagedChunk", c.address()));
704
- ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
705
- size_t size = c.size();
706
- FreeRawMemory(c.address(), size, c.executable());
707
- PerformAllocationCallback(space, kAllocationActionFree, size);
708
- }
709
- c.init(NULL, 0, NULL);
710
- Push(chunk_id);
711
- }
712
-
713
-
714
- Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
715
- int chunk_id = GetChunkId(p);
716
- ASSERT(IsValidChunk(chunk_id));
717
-
718
- Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
719
- return Page::FromAddress(low);
720
- }
721
-
722
-
723
- Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
724
- int chunk_id = GetChunkId(p);
725
- ASSERT(IsValidChunk(chunk_id));
726
-
727
- Address chunk_start = chunks_[chunk_id].address();
728
- size_t chunk_size = chunks_[chunk_id].size();
729
-
730
- Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
731
- ASSERT(chunk_start <= p->address() && p->address() < high);
732
-
733
- return Page::FromAddress(high - Page::kPageSize);
734
- }
735
-
736
-
737
- #ifdef DEBUG
738
- void MemoryAllocator::ReportStatistics() {
739
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
740
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
741
- ", used: %" V8_PTR_PREFIX "d"
742
- ", available: %%%d\n\n",
743
- capacity_, size_, static_cast<int>(pct*100));
744
- }
745
- #endif
746
-
747
-
748
- void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
749
- Page** first_page,
750
- Page** last_page,
751
- Page** last_page_in_use) {
752
- Page* first = NULL;
753
- Page* last = NULL;
754
-
755
- for (int i = 0, length = chunks_.length(); i < length; i++) {
756
- ChunkInfo& chunk = chunks_[i];
757
-
758
- if (chunk.owner() == space) {
759
- if (first == NULL) {
760
- Address low = RoundUp(chunk.address(), Page::kPageSize);
761
- first = Page::FromAddress(low);
762
- }
763
- last = RelinkPagesInChunk(i,
764
- chunk.address(),
765
- chunk.size(),
766
- last,
767
- last_page_in_use);
768
- }
769
- }
770
-
771
- if (first_page != NULL) {
772
- *first_page = first;
773
- }
774
-
775
- if (last_page != NULL) {
776
- *last_page = last;
777
- }
778
- }
779
-
780
-
781
- Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
782
- Address chunk_start,
783
- size_t chunk_size,
784
- Page* prev,
785
- Page** last_page_in_use) {
786
- Address page_addr = RoundUp(chunk_start, Page::kPageSize);
787
- int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
788
-
789
- if (prev->is_valid()) {
790
- SetNextPage(prev, Page::FromAddress(page_addr));
791
- }
792
-
793
- for (int i = 0; i < pages_in_chunk; i++) {
794
- Page* p = Page::FromAddress(page_addr);
795
- p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
796
- page_addr += Page::kPageSize;
797
-
798
- p->InvalidateWatermark(true);
799
- if (p->WasInUseBeforeMC()) {
800
- *last_page_in_use = p;
801
- }
802
- }
803
-
804
- // Set the next page of the last page to 0.
805
- Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
806
- last_page->opaque_header = OffsetFrom(0) | chunk_id;
807
-
808
- if (last_page->WasInUseBeforeMC()) {
809
- *last_page_in_use = last_page;
810
- }
811
-
812
- return last_page;
813
- }
814
-
815
-
816
- void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
817
- ASSERT(size == kChunkSize);
818
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
819
- AddChunkUsingAddress(int_address, int_address);
820
- AddChunkUsingAddress(int_address, int_address + size - 1);
821
- }
822
-
823
-
824
- void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
825
- uintptr_t chunk_index_base) {
826
- uintptr_t* fine_grained = AllocatedChunksFinder(
827
- chunk_table_,
828
- chunk_index_base,
829
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
830
- kCreateTablesAsNeeded);
831
- int index = FineGrainedIndexForAddress(chunk_index_base);
832
- if (fine_grained[index] != kUnusedChunkTableEntry) index++;
833
- ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
834
- fine_grained[index] = chunk_start;
835
- }
836
-
837
-
838
- void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
839
- ASSERT(size == kChunkSize);
840
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
841
- RemoveChunkFoundUsingAddress(int_address, int_address);
842
- RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
843
- }
844
-
845
-
846
- void MemoryAllocator::RemoveChunkFoundUsingAddress(
847
- uintptr_t chunk_start,
848
- uintptr_t chunk_index_base) {
849
- uintptr_t* fine_grained = AllocatedChunksFinder(
850
- chunk_table_,
851
- chunk_index_base,
852
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
853
- kDontCreateTables);
854
- // Can't remove an entry that's not there.
855
- ASSERT(fine_grained != kUnusedChunkTableEntry);
856
- int index = FineGrainedIndexForAddress(chunk_index_base);
857
- ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
858
- if (fine_grained[index] != chunk_start) {
859
- index++;
860
- ASSERT(fine_grained[index] == chunk_start);
861
- fine_grained[index] = kUnusedChunkTableEntry;
862
- } else {
863
- // If only one of the entries is used it must be the first, since
864
- // InAllocatedChunks relies on that. Move things around so that this is
865
- // the case.
866
- fine_grained[index] = fine_grained[index + 1];
867
- fine_grained[index + 1] = kUnusedChunkTableEntry;
868
- }
869
- }
870
-
871
-
872
- bool MemoryAllocator::InAllocatedChunks(Address addr) {
873
- uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
874
- uintptr_t* fine_grained = AllocatedChunksFinder(
875
- chunk_table_,
876
- int_address,
877
- kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
878
- kDontCreateTables);
879
- if (fine_grained == NULL) return false;
880
- int index = FineGrainedIndexForAddress(int_address);
881
- if (fine_grained[index] == kUnusedChunkTableEntry) return false;
882
- uintptr_t entry = fine_grained[index];
883
- if (entry <= int_address && entry + kChunkSize > int_address) return true;
884
- index++;
885
- if (fine_grained[index] == kUnusedChunkTableEntry) return false;
886
- entry = fine_grained[index];
887
- if (entry <= int_address && entry + kChunkSize > int_address) return true;
888
- return false;
889
- }
890
-
891
-
892
- uintptr_t* MemoryAllocator::AllocatedChunksFinder(
893
- uintptr_t* table,
894
- uintptr_t address,
895
- int bit_position,
896
- CreateTables create_as_needed) {
897
- if (bit_position == kChunkSizeLog2) {
898
- return table;
899
- }
900
- ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
901
- int index =
902
- ((address >> bit_position) &
903
- ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
904
- uintptr_t more_fine_grained_address =
905
- address & ((V8_INTPTR_C(1) << bit_position) - 1);
906
- ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
907
- (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
908
- uintptr_t* more_fine_grained_table =
909
- reinterpret_cast<uintptr_t*>(table[index]);
910
- if (more_fine_grained_table == kUnusedChunkTableEntry) {
911
- if (create_as_needed == kDontCreateTables) return NULL;
912
- int words_needed = 1 << kChunkTableBitsPerLevel;
913
- if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
914
- words_needed =
915
- (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
916
- }
917
- more_fine_grained_table = new uintptr_t[words_needed];
918
- for (int i = 0; i < words_needed; i++) {
919
- more_fine_grained_table[i] = kUnusedChunkTableEntry;
920
- }
921
- table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
922
- }
923
- return AllocatedChunksFinder(
924
- more_fine_grained_table,
925
- more_fine_grained_address,
926
- bit_position - kChunkTableBitsPerLevel,
927
- create_as_needed);
928
- }
929
-
930
-
931
- uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
932
-
933
-
934
- // -----------------------------------------------------------------------------
935
- // PagedSpace implementation
936
-
937
- PagedSpace::PagedSpace(intptr_t max_capacity,
938
- AllocationSpace id,
939
- Executability executable)
940
- : Space(id, executable) {
941
- max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
942
- * Page::kObjectAreaSize;
943
- accounting_stats_.Clear();
944
-
945
- allocation_info_.top = NULL;
946
- allocation_info_.limit = NULL;
947
-
948
- mc_forwarding_info_.top = NULL;
949
- mc_forwarding_info_.limit = NULL;
950
- }
951
-
952
-
953
- bool PagedSpace::Setup(Address start, size_t size) {
954
- if (HasBeenSetup()) return false;
955
-
956
- int num_pages = 0;
957
- // Try to use the virtual memory range passed to us. If it is too small to
958
- // contain at least one page, ignore it and allocate instead.
959
- int pages_in_chunk = PagesInChunk(start, size);
960
- if (pages_in_chunk > 0) {
961
- first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
962
- Page::kPageSize * pages_in_chunk,
963
- this, &num_pages);
964
- } else {
965
- int requested_pages =
966
- Min(MemoryAllocator::kPagesPerChunk,
967
- static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
968
- first_page_ =
969
- MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
970
- if (!first_page_->is_valid()) return false;
971
- }
972
-
973
- // We are sure that the first page is valid and that we have at least one
974
- // page.
975
- ASSERT(first_page_->is_valid());
976
- ASSERT(num_pages > 0);
977
- accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
978
- ASSERT(Capacity() <= max_capacity_);
979
-
980
- // Sequentially clear region marks in the newly allocated
981
- // pages and cache the current last page in the space.
982
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
983
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
984
- last_page_ = p;
985
- }
986
-
987
- // Use first_page_ for allocation.
988
- SetAllocationInfo(&allocation_info_, first_page_);
989
-
990
- page_list_is_chunk_ordered_ = true;
991
-
992
- return true;
993
- }
994
-
995
-
996
- bool PagedSpace::HasBeenSetup() {
997
- return (Capacity() > 0);
998
- }
999
-
1000
-
1001
- void PagedSpace::TearDown() {
1002
- MemoryAllocator::FreeAllPages(this);
1003
- first_page_ = NULL;
1004
- accounting_stats_.Clear();
1005
- }
1006
-
1007
-
1008
- #ifdef ENABLE_HEAP_PROTECTION
1009
-
1010
- void PagedSpace::Protect() {
1011
- Page* page = first_page_;
1012
- while (page->is_valid()) {
1013
- MemoryAllocator::ProtectChunkFromPage(page);
1014
- page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
1015
- }
1016
- }
1017
-
1018
-
1019
- void PagedSpace::Unprotect() {
1020
- Page* page = first_page_;
1021
- while (page->is_valid()) {
1022
- MemoryAllocator::UnprotectChunkFromPage(page);
1023
- page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page();
1024
- }
1025
- }
1026
-
1027
- #endif
1028
-
1029
-
1030
- void PagedSpace::MarkAllPagesClean() {
1031
- PageIterator it(this, PageIterator::ALL_PAGES);
1032
- while (it.has_next()) {
1033
- it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
1034
- }
1035
- }
1036
-
1037
-
1038
- MaybeObject* PagedSpace::FindObject(Address addr) {
1039
- // Note: this function can only be called before or after mark-compact GC
1040
- // because it accesses map pointers.
1041
- ASSERT(!MarkCompactCollector::in_use());
1042
-
1043
- if (!Contains(addr)) return Failure::Exception();
1044
-
1045
- Page* p = Page::FromAddress(addr);
1046
- ASSERT(IsUsed(p));
1047
- Address cur = p->ObjectAreaStart();
1048
- Address end = p->AllocationTop();
1049
- while (cur < end) {
1050
- HeapObject* obj = HeapObject::FromAddress(cur);
1051
- Address next = cur + obj->Size();
1052
- if ((cur <= addr) && (addr < next)) return obj;
1053
- cur = next;
1054
- }
1055
-
1056
- UNREACHABLE();
1057
- return Failure::Exception();
1058
- }
1059
-
1060
-
1061
- bool PagedSpace::IsUsed(Page* page) {
1062
- PageIterator it(this, PageIterator::PAGES_IN_USE);
1063
- while (it.has_next()) {
1064
- if (page == it.next()) return true;
1065
- }
1066
- return false;
1067
- }
1068
-
1069
-
1070
- void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
1071
- alloc_info->top = p->ObjectAreaStart();
1072
- alloc_info->limit = p->ObjectAreaEnd();
1073
- ASSERT(alloc_info->VerifyPagedAllocation());
1074
- }
1075
-
1076
-
1077
- void PagedSpace::MCResetRelocationInfo() {
1078
- // Set page indexes.
1079
- int i = 0;
1080
- PageIterator it(this, PageIterator::ALL_PAGES);
1081
- while (it.has_next()) {
1082
- Page* p = it.next();
1083
- p->mc_page_index = i++;
1084
- }
1085
-
1086
- // Set mc_forwarding_info_ to the first page in the space.
1087
- SetAllocationInfo(&mc_forwarding_info_, first_page_);
1088
- // All the bytes in the space are 'available'. We will rediscover
1089
- // allocated and wasted bytes during GC.
1090
- accounting_stats_.Reset();
1091
- }
1092
-
1093
-
1094
- int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
1095
- #ifdef DEBUG
1096
- // The Contains function considers the address at the beginning of a
1097
- // page in the page, MCSpaceOffsetForAddress considers it is in the
1098
- // previous page.
1099
- if (Page::IsAlignedToPageSize(addr)) {
1100
- ASSERT(Contains(addr - kPointerSize));
1101
- } else {
1102
- ASSERT(Contains(addr));
1103
- }
1104
- #endif
1105
-
1106
- // If addr is at the end of a page, it belongs to previous page
1107
- Page* p = Page::IsAlignedToPageSize(addr)
1108
- ? Page::FromAllocationTop(addr)
1109
- : Page::FromAddress(addr);
1110
- int index = p->mc_page_index;
1111
- return (index * Page::kPageSize) + p->Offset(addr);
1112
- }
1113
-
1114
-
1115
- // Slow case for reallocating and promoting objects during a compacting
1116
- // collection. This function is not space-specific.
1117
- HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
1118
- Page* current_page = TopPageOf(mc_forwarding_info_);
1119
- if (!current_page->next_page()->is_valid()) {
1120
- if (!Expand(current_page)) {
1121
- return NULL;
1122
- }
1123
- }
1124
-
1125
- // There are surely more pages in the space now.
1126
- ASSERT(current_page->next_page()->is_valid());
1127
- // We do not add the top of page block for current page to the space's
1128
- // free list---the block may contain live objects so we cannot write
1129
- // bookkeeping information to it. Instead, we will recover top of page
1130
- // blocks when we move objects to their new locations.
1131
- //
1132
- // We do however write the allocation pointer to the page. The encoding
1133
- // of forwarding addresses is as an offset in terms of live bytes, so we
1134
- // need quick access to the allocation top of each page to decode
1135
- // forwarding addresses.
1136
- current_page->SetAllocationWatermark(mc_forwarding_info_.top);
1137
- current_page->next_page()->InvalidateWatermark(true);
1138
- SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
1139
- return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1140
- }
1141
-
1142
-
1143
- bool PagedSpace::Expand(Page* last_page) {
1144
- ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1145
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1146
-
1147
- if (Capacity() == max_capacity_) return false;
1148
-
1149
- ASSERT(Capacity() < max_capacity_);
1150
- // Last page must be valid and its next page is invalid.
1151
- ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1152
-
1153
- int available_pages =
1154
- static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
1155
- // We don't want to have to handle small chunks near the end so if there are
1156
- // not kPagesPerChunk pages available without exceeding the max capacity then
1157
- // act as if memory has run out.
1158
- if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
1159
-
1160
- int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1161
- Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
1162
- if (!p->is_valid()) return false;
1163
-
1164
- accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1165
- ASSERT(Capacity() <= max_capacity_);
1166
-
1167
- MemoryAllocator::SetNextPage(last_page, p);
1168
-
1169
- // Sequentially clear region marks of new pages and and cache the
1170
- // new last page in the space.
1171
- while (p->is_valid()) {
1172
- p->SetRegionMarks(Page::kAllRegionsCleanMarks);
1173
- last_page_ = p;
1174
- p = p->next_page();
1175
- }
1176
-
1177
- return true;
1178
- }
1179
-
1180
-
1181
- #ifdef DEBUG
1182
- int PagedSpace::CountTotalPages() {
1183
- int count = 0;
1184
- for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
1185
- count++;
1186
- }
1187
- return count;
1188
- }
1189
- #endif
1190
-
1191
-
1192
- void PagedSpace::Shrink() {
1193
- if (!page_list_is_chunk_ordered_) {
1194
- // We can't shrink space if pages is not chunk-ordered
1195
- // (see comment for class MemoryAllocator for definition).
1196
- return;
1197
- }
1198
-
1199
- // Release half of free pages.
1200
- Page* top_page = AllocationTopPage();
1201
- ASSERT(top_page->is_valid());
1202
-
1203
- // Count the number of pages we would like to free.
1204
- int pages_to_free = 0;
1205
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1206
- pages_to_free++;
1207
- }
1208
-
1209
- // Free pages after top_page.
1210
- Page* p = MemoryAllocator::FreePages(top_page->next_page());
1211
- MemoryAllocator::SetNextPage(top_page, p);
1212
-
1213
- // Find out how many pages we failed to free and update last_page_.
1214
- // Please note pages can only be freed in whole chunks.
1215
- last_page_ = top_page;
1216
- for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1217
- pages_to_free--;
1218
- last_page_ = p;
1219
- }
1220
-
1221
- accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1222
- ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1223
- }
1224
-
1225
-
1226
- bool PagedSpace::EnsureCapacity(int capacity) {
1227
- if (Capacity() >= capacity) return true;
1228
-
1229
- // Start from the allocation top and loop to the last page in the space.
1230
- Page* last_page = AllocationTopPage();
1231
- Page* next_page = last_page->next_page();
1232
- while (next_page->is_valid()) {
1233
- last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
1234
- next_page = last_page->next_page();
1235
- }
1236
-
1237
- // Expand the space until it has the required capacity or expansion fails.
1238
- do {
1239
- if (!Expand(last_page)) return false;
1240
- ASSERT(last_page->next_page()->is_valid());
1241
- last_page =
1242
- MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
1243
- } while (Capacity() < capacity);
1244
-
1245
- return true;
1246
- }
1247
-
1248
-
1249
- #ifdef DEBUG
1250
- void PagedSpace::Print() { }
1251
- #endif
1252
-
1253
-
1254
- #ifdef DEBUG
1255
- // We do not assume that the PageIterator works, because it depends on the
1256
- // invariants we are checking during verification.
1257
- void PagedSpace::Verify(ObjectVisitor* visitor) {
1258
- // The allocation pointer should be valid, and it should be in a page in the
1259
- // space.
1260
- ASSERT(allocation_info_.VerifyPagedAllocation());
1261
- Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1262
- ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
1263
-
1264
- // Loop over all the pages.
1265
- bool above_allocation_top = false;
1266
- Page* current_page = first_page_;
1267
- while (current_page->is_valid()) {
1268
- if (above_allocation_top) {
1269
- // We don't care what's above the allocation top.
1270
- } else {
1271
- Address top = current_page->AllocationTop();
1272
- if (current_page == top_page) {
1273
- ASSERT(top == allocation_info_.top);
1274
- // The next page will be above the allocation top.
1275
- above_allocation_top = true;
1276
- }
1277
-
1278
- // It should be packed with objects from the bottom to the top.
1279
- Address current = current_page->ObjectAreaStart();
1280
- while (current < top) {
1281
- HeapObject* object = HeapObject::FromAddress(current);
1282
-
1283
- // The first word should be a map, and we expect all map pointers to
1284
- // be in map space.
1285
- Map* map = object->map();
1286
- ASSERT(map->IsMap());
1287
- ASSERT(Heap::map_space()->Contains(map));
1288
-
1289
- // Perform space-specific object verification.
1290
- VerifyObject(object);
1291
-
1292
- // The object itself should look OK.
1293
- object->Verify();
1294
-
1295
- // All the interior pointers should be contained in the heap and
1296
- // have page regions covering intergenerational references should be
1297
- // marked dirty.
1298
- int size = object->Size();
1299
- object->IterateBody(map->instance_type(), size, visitor);
1300
-
1301
- current += size;
1302
- }
1303
-
1304
- // The allocation pointer should not be in the middle of an object.
1305
- ASSERT(current == top);
1306
- }
1307
-
1308
- current_page = current_page->next_page();
1309
- }
1310
- }
1311
- #endif
1312
-
1313
-
1314
- // -----------------------------------------------------------------------------
1315
- // NewSpace implementation
1316
-
1317
-
1318
- bool NewSpace::Setup(Address start, int size) {
1319
- // Setup new space based on the preallocated memory block defined by
1320
- // start and size. The provided space is divided into two semi-spaces.
1321
- // To support fast containment testing in the new space, the size of
1322
- // this chunk must be a power of two and it must be aligned to its size.
1323
- int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
1324
- int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
1325
-
1326
- ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1327
- ASSERT(IsPowerOf2(maximum_semispace_capacity));
1328
-
1329
- // Allocate and setup the histogram arrays if necessary.
1330
- #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1331
- allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1332
- promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1333
-
1334
- #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1335
- promoted_histogram_[name].set_name(#name);
1336
- INSTANCE_TYPE_LIST(SET_NAME)
1337
- #undef SET_NAME
1338
- #endif
1339
-
1340
- ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
1341
- ASSERT(IsAddressAligned(start, size, 0));
1342
-
1343
- if (!to_space_.Setup(start,
1344
- initial_semispace_capacity,
1345
- maximum_semispace_capacity)) {
1346
- return false;
1347
- }
1348
- if (!from_space_.Setup(start + maximum_semispace_capacity,
1349
- initial_semispace_capacity,
1350
- maximum_semispace_capacity)) {
1351
- return false;
1352
- }
1353
-
1354
- start_ = start;
1355
- address_mask_ = ~(size - 1);
1356
- object_mask_ = address_mask_ | kHeapObjectTagMask;
1357
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1358
-
1359
- allocation_info_.top = to_space_.low();
1360
- allocation_info_.limit = to_space_.high();
1361
- mc_forwarding_info_.top = NULL;
1362
- mc_forwarding_info_.limit = NULL;
1363
-
1364
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1365
- return true;
1366
- }
1367
-
1368
-
1369
- void NewSpace::TearDown() {
1370
- #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1371
- if (allocated_histogram_) {
1372
- DeleteArray(allocated_histogram_);
1373
- allocated_histogram_ = NULL;
1374
- }
1375
- if (promoted_histogram_) {
1376
- DeleteArray(promoted_histogram_);
1377
- promoted_histogram_ = NULL;
1378
- }
1379
- #endif
1380
-
1381
- start_ = NULL;
1382
- allocation_info_.top = NULL;
1383
- allocation_info_.limit = NULL;
1384
- mc_forwarding_info_.top = NULL;
1385
- mc_forwarding_info_.limit = NULL;
1386
-
1387
- to_space_.TearDown();
1388
- from_space_.TearDown();
1389
- }
1390
-
1391
-
1392
- #ifdef ENABLE_HEAP_PROTECTION
1393
-
1394
- void NewSpace::Protect() {
1395
- MemoryAllocator::Protect(ToSpaceLow(), Capacity());
1396
- MemoryAllocator::Protect(FromSpaceLow(), Capacity());
1397
- }
1398
-
1399
-
1400
- void NewSpace::Unprotect() {
1401
- MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(),
1402
- to_space_.executable());
1403
- MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(),
1404
- from_space_.executable());
1405
- }
1406
-
1407
- #endif
1408
-
1409
-
1410
- void NewSpace::Flip() {
1411
- SemiSpace tmp = from_space_;
1412
- from_space_ = to_space_;
1413
- to_space_ = tmp;
1414
- }
1415
-
1416
-
1417
- void NewSpace::Grow() {
1418
- ASSERT(Capacity() < MaximumCapacity());
1419
- if (to_space_.Grow()) {
1420
- // Only grow from space if we managed to grow to space.
1421
- if (!from_space_.Grow()) {
1422
- // If we managed to grow to space but couldn't grow from space,
1423
- // attempt to shrink to space.
1424
- if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1425
- // We are in an inconsistent state because we could not
1426
- // commit/uncommit memory from new space.
1427
- V8::FatalProcessOutOfMemory("Failed to grow new space.");
1428
- }
1429
- }
1430
- }
1431
- allocation_info_.limit = to_space_.high();
1432
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1433
- }
1434
-
1435
-
1436
- void NewSpace::Shrink() {
1437
- int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1438
- int rounded_new_capacity =
1439
- RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
1440
- if (rounded_new_capacity < Capacity() &&
1441
- to_space_.ShrinkTo(rounded_new_capacity)) {
1442
- // Only shrink from space if we managed to shrink to space.
1443
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1444
- // If we managed to shrink to space but couldn't shrink from
1445
- // space, attempt to grow to space again.
1446
- if (!to_space_.GrowTo(from_space_.Capacity())) {
1447
- // We are in an inconsistent state because we could not
1448
- // commit/uncommit memory from new space.
1449
- V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1450
- }
1451
- }
1452
- }
1453
- allocation_info_.limit = to_space_.high();
1454
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1455
- }
1456
-
1457
-
1458
- void NewSpace::ResetAllocationInfo() {
1459
- allocation_info_.top = to_space_.low();
1460
- allocation_info_.limit = to_space_.high();
1461
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1462
- }
1463
-
1464
-
1465
- void NewSpace::MCResetRelocationInfo() {
1466
- mc_forwarding_info_.top = from_space_.low();
1467
- mc_forwarding_info_.limit = from_space_.high();
1468
- ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
1469
- }
1470
-
1471
-
1472
- void NewSpace::MCCommitRelocationInfo() {
1473
- // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
1474
- // valid allocation info for the to space.
1475
- allocation_info_.top = mc_forwarding_info_.top;
1476
- allocation_info_.limit = to_space_.high();
1477
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1478
- }
1479
-
1480
-
1481
- #ifdef DEBUG
1482
- // We do not use the SemispaceIterator because verification doesn't assume
1483
- // that it works (it depends on the invariants we are checking).
1484
- void NewSpace::Verify() {
1485
- // The allocation pointer should be in the space or at the very end.
1486
- ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1487
-
1488
- // There should be objects packed in from the low address up to the
1489
- // allocation pointer.
1490
- Address current = to_space_.low();
1491
- while (current < top()) {
1492
- HeapObject* object = HeapObject::FromAddress(current);
1493
-
1494
- // The first word should be a map, and we expect all map pointers to
1495
- // be in map space.
1496
- Map* map = object->map();
1497
- ASSERT(map->IsMap());
1498
- ASSERT(Heap::map_space()->Contains(map));
1499
-
1500
- // The object should not be code or a map.
1501
- ASSERT(!object->IsMap());
1502
- ASSERT(!object->IsCode());
1503
-
1504
- // The object itself should look OK.
1505
- object->Verify();
1506
-
1507
- // All the interior pointers should be contained in the heap.
1508
- VerifyPointersVisitor visitor;
1509
- int size = object->Size();
1510
- object->IterateBody(map->instance_type(), size, &visitor);
1511
-
1512
- current += size;
1513
- }
1514
-
1515
- // The allocation pointer should not be in the middle of an object.
1516
- ASSERT(current == top());
1517
- }
1518
- #endif
1519
-
1520
-
1521
- bool SemiSpace::Commit() {
1522
- ASSERT(!is_committed());
1523
- if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
1524
- return false;
1525
- }
1526
- committed_ = true;
1527
- return true;
1528
- }
1529
-
1530
-
1531
- bool SemiSpace::Uncommit() {
1532
- ASSERT(is_committed());
1533
- if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
1534
- return false;
1535
- }
1536
- committed_ = false;
1537
- return true;
1538
- }
1539
-
1540
-
1541
- // -----------------------------------------------------------------------------
1542
- // SemiSpace implementation
1543
-
1544
- bool SemiSpace::Setup(Address start,
1545
- int initial_capacity,
1546
- int maximum_capacity) {
1547
- // Creates a space in the young generation. The constructor does not
1548
- // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1549
- // memory of size 'capacity' when set up, and does not grow or shrink
1550
- // otherwise. In the mark-compact collector, the memory region of the from
1551
- // space is used as the marking stack. It requires contiguous memory
1552
- // addresses.
1553
- initial_capacity_ = initial_capacity;
1554
- capacity_ = initial_capacity;
1555
- maximum_capacity_ = maximum_capacity;
1556
- committed_ = false;
1557
-
1558
- start_ = start;
1559
- address_mask_ = ~(maximum_capacity - 1);
1560
- object_mask_ = address_mask_ | kHeapObjectTagMask;
1561
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1562
- age_mark_ = start_;
1563
-
1564
- return Commit();
1565
- }
1566
-
1567
-
1568
- void SemiSpace::TearDown() {
1569
- start_ = NULL;
1570
- capacity_ = 0;
1571
- }
1572
-
1573
-
1574
- bool SemiSpace::Grow() {
1575
- // Double the semispace size but only up to maximum capacity.
1576
- int maximum_extra = maximum_capacity_ - capacity_;
1577
- int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
1578
- maximum_extra);
1579
- if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
1580
- return false;
1581
- }
1582
- capacity_ += extra;
1583
- return true;
1584
- }
1585
-
1586
-
1587
- bool SemiSpace::GrowTo(int new_capacity) {
1588
- ASSERT(new_capacity <= maximum_capacity_);
1589
- ASSERT(new_capacity > capacity_);
1590
- size_t delta = new_capacity - capacity_;
1591
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1592
- if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
1593
- return false;
1594
- }
1595
- capacity_ = new_capacity;
1596
- return true;
1597
- }
1598
-
1599
-
1600
- bool SemiSpace::ShrinkTo(int new_capacity) {
1601
- ASSERT(new_capacity >= initial_capacity_);
1602
- ASSERT(new_capacity < capacity_);
1603
- size_t delta = capacity_ - new_capacity;
1604
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1605
- if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
1606
- return false;
1607
- }
1608
- capacity_ = new_capacity;
1609
- return true;
1610
- }
1611
-
1612
-
1613
- #ifdef DEBUG
1614
- void SemiSpace::Print() { }
1615
-
1616
-
1617
- void SemiSpace::Verify() { }
1618
- #endif
1619
-
1620
-
1621
- // -----------------------------------------------------------------------------
1622
- // SemiSpaceIterator implementation.
1623
- SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1624
- Initialize(space, space->bottom(), space->top(), NULL);
1625
- }
1626
-
1627
-
1628
- SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1629
- HeapObjectCallback size_func) {
1630
- Initialize(space, space->bottom(), space->top(), size_func);
1631
- }
1632
-
1633
-
1634
- SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1635
- Initialize(space, start, space->top(), NULL);
1636
- }
1637
-
1638
-
1639
- void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
1640
- Address end,
1641
- HeapObjectCallback size_func) {
1642
- ASSERT(space->ToSpaceContains(start));
1643
- ASSERT(space->ToSpaceLow() <= end
1644
- && end <= space->ToSpaceHigh());
1645
- space_ = &space->to_space_;
1646
- current_ = start;
1647
- limit_ = end;
1648
- size_func_ = size_func;
1649
- }
1650
-
1651
-
1652
- #ifdef DEBUG
1653
- // A static array of histogram info for each type.
1654
- static HistogramInfo heap_histograms[LAST_TYPE+1];
1655
- static JSObject::SpillInformation js_spill_information;
1656
-
1657
- // heap_histograms is shared, always clear it before using it.
1658
- static void ClearHistograms() {
1659
- // We reset the name each time, though it hasn't changed.
1660
- #define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
1661
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1662
- #undef DEF_TYPE_NAME
1663
-
1664
- #define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
1665
- INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1666
- #undef CLEAR_HISTOGRAM
1667
-
1668
- js_spill_information.Clear();
1669
- }
1670
-
1671
-
1672
- static int code_kind_statistics[Code::NUMBER_OF_KINDS];
1673
-
1674
-
1675
- static void ClearCodeKindStatistics() {
1676
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1677
- code_kind_statistics[i] = 0;
1678
- }
1679
- }
1680
-
1681
-
1682
- static void ReportCodeKindStatistics() {
1683
- const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1684
-
1685
- #define CASE(name) \
1686
- case Code::name: table[Code::name] = #name; \
1687
- break
1688
-
1689
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1690
- switch (static_cast<Code::Kind>(i)) {
1691
- CASE(FUNCTION);
1692
- CASE(OPTIMIZED_FUNCTION);
1693
- CASE(STUB);
1694
- CASE(BUILTIN);
1695
- CASE(LOAD_IC);
1696
- CASE(KEYED_LOAD_IC);
1697
- CASE(STORE_IC);
1698
- CASE(KEYED_STORE_IC);
1699
- CASE(CALL_IC);
1700
- CASE(KEYED_CALL_IC);
1701
- CASE(BINARY_OP_IC);
1702
- CASE(TYPE_RECORDING_BINARY_OP_IC);
1703
- CASE(COMPARE_IC);
1704
- }
1705
- }
1706
-
1707
- #undef CASE
1708
-
1709
- PrintF("\n Code kind histograms: \n");
1710
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1711
- if (code_kind_statistics[i] > 0) {
1712
- PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
1713
- }
1714
- }
1715
- PrintF("\n");
1716
- }
1717
-
1718
-
1719
- static int CollectHistogramInfo(HeapObject* obj) {
1720
- InstanceType type = obj->map()->instance_type();
1721
- ASSERT(0 <= type && type <= LAST_TYPE);
1722
- ASSERT(heap_histograms[type].name() != NULL);
1723
- heap_histograms[type].increment_number(1);
1724
- heap_histograms[type].increment_bytes(obj->Size());
1725
-
1726
- if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1727
- JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
1728
- }
1729
-
1730
- return obj->Size();
1731
- }
1732
-
1733
-
1734
- static void ReportHistogram(bool print_spill) {
1735
- PrintF("\n Object Histogram:\n");
1736
- for (int i = 0; i <= LAST_TYPE; i++) {
1737
- if (heap_histograms[i].number() > 0) {
1738
- PrintF(" %-34s%10d (%10d bytes)\n",
1739
- heap_histograms[i].name(),
1740
- heap_histograms[i].number(),
1741
- heap_histograms[i].bytes());
1742
- }
1743
- }
1744
- PrintF("\n");
1745
-
1746
- // Summarize string types.
1747
- int string_number = 0;
1748
- int string_bytes = 0;
1749
- #define INCREMENT(type, size, name, camel_name) \
1750
- string_number += heap_histograms[type].number(); \
1751
- string_bytes += heap_histograms[type].bytes();
1752
- STRING_TYPE_LIST(INCREMENT)
1753
- #undef INCREMENT
1754
- if (string_number > 0) {
1755
- PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1756
- string_bytes);
1757
- }
1758
-
1759
- if (FLAG_collect_heap_spill_statistics && print_spill) {
1760
- js_spill_information.Print();
1761
- }
1762
- }
1763
- #endif // DEBUG
1764
-
1765
-
1766
- // Support for statistics gathering for --heap-stats and --log-gc.
1767
- #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1768
- void NewSpace::ClearHistograms() {
1769
- for (int i = 0; i <= LAST_TYPE; i++) {
1770
- allocated_histogram_[i].clear();
1771
- promoted_histogram_[i].clear();
1772
- }
1773
- }
1774
-
1775
- // Because the copying collector does not touch garbage objects, we iterate
1776
- // the new space before a collection to get a histogram of allocated objects.
1777
- // This only happens (1) when compiled with DEBUG and the --heap-stats flag is
1778
- // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1779
- // flag is set.
1780
- void NewSpace::CollectStatistics() {
1781
- ClearHistograms();
1782
- SemiSpaceIterator it(this);
1783
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1784
- RecordAllocation(obj);
1785
- }
1786
-
1787
-
1788
- #ifdef ENABLE_LOGGING_AND_PROFILING
1789
- static void DoReportStatistics(HistogramInfo* info, const char* description) {
1790
- LOG(HeapSampleBeginEvent("NewSpace", description));
1791
- // Lump all the string types together.
1792
- int string_number = 0;
1793
- int string_bytes = 0;
1794
- #define INCREMENT(type, size, name, camel_name) \
1795
- string_number += info[type].number(); \
1796
- string_bytes += info[type].bytes();
1797
- STRING_TYPE_LIST(INCREMENT)
1798
- #undef INCREMENT
1799
- if (string_number > 0) {
1800
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1801
- }
1802
-
1803
- // Then do the other types.
1804
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1805
- if (info[i].number() > 0) {
1806
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
1807
- info[i].bytes()));
1808
- }
1809
- }
1810
- LOG(HeapSampleEndEvent("NewSpace", description));
1811
- }
1812
- #endif // ENABLE_LOGGING_AND_PROFILING
1813
-
1814
-
1815
- void NewSpace::ReportStatistics() {
1816
- #ifdef DEBUG
1817
- if (FLAG_heap_stats) {
1818
- float pct = static_cast<float>(Available()) / Capacity();
1819
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
1820
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1821
- Capacity(), Available(), static_cast<int>(pct*100));
1822
- PrintF("\n Object Histogram:\n");
1823
- for (int i = 0; i <= LAST_TYPE; i++) {
1824
- if (allocated_histogram_[i].number() > 0) {
1825
- PrintF(" %-34s%10d (%10d bytes)\n",
1826
- allocated_histogram_[i].name(),
1827
- allocated_histogram_[i].number(),
1828
- allocated_histogram_[i].bytes());
1829
- }
1830
- }
1831
- PrintF("\n");
1832
- }
1833
- #endif // DEBUG
1834
-
1835
- #ifdef ENABLE_LOGGING_AND_PROFILING
1836
- if (FLAG_log_gc) {
1837
- DoReportStatistics(allocated_histogram_, "allocated");
1838
- DoReportStatistics(promoted_histogram_, "promoted");
1839
- }
1840
- #endif // ENABLE_LOGGING_AND_PROFILING
1841
- }
1842
-
1843
-
1844
- void NewSpace::RecordAllocation(HeapObject* obj) {
1845
- InstanceType type = obj->map()->instance_type();
1846
- ASSERT(0 <= type && type <= LAST_TYPE);
1847
- allocated_histogram_[type].increment_number(1);
1848
- allocated_histogram_[type].increment_bytes(obj->Size());
1849
- }
1850
-
1851
-
1852
- void NewSpace::RecordPromotion(HeapObject* obj) {
1853
- InstanceType type = obj->map()->instance_type();
1854
- ASSERT(0 <= type && type <= LAST_TYPE);
1855
- promoted_histogram_[type].increment_number(1);
1856
- promoted_histogram_[type].increment_bytes(obj->Size());
1857
- }
1858
- #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1859
-
1860
-
1861
- // -----------------------------------------------------------------------------
1862
- // Free lists for old object spaces implementation
1863
-
1864
- void FreeListNode::set_size(int size_in_bytes) {
1865
- ASSERT(size_in_bytes > 0);
1866
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
1867
-
1868
- // We write a map and possibly size information to the block. If the block
1869
- // is big enough to be a ByteArray with at least one extra word (the next
1870
- // pointer), we set its map to be the byte array map and its size to an
1871
- // appropriate array length for the desired size from HeapObject::Size().
1872
- // If the block is too small (eg, one or two words), to hold both a size
1873
- // field and a next pointer, we give it a filler map that gives it the
1874
- // correct size.
1875
- if (size_in_bytes > ByteArray::kHeaderSize) {
1876
- set_map(Heap::raw_unchecked_byte_array_map());
1877
- // Can't use ByteArray::cast because it fails during deserialization.
1878
- ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1879
- this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1880
- } else if (size_in_bytes == kPointerSize) {
1881
- set_map(Heap::raw_unchecked_one_pointer_filler_map());
1882
- } else if (size_in_bytes == 2 * kPointerSize) {
1883
- set_map(Heap::raw_unchecked_two_pointer_filler_map());
1884
- } else {
1885
- UNREACHABLE();
1886
- }
1887
- // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1888
- // deserialization because the byte array map is not done yet.
1889
- }
1890
-
1891
-
1892
- Address FreeListNode::next() {
1893
- ASSERT(IsFreeListNode(this));
1894
- if (map() == Heap::raw_unchecked_byte_array_map()) {
1895
- ASSERT(Size() >= kNextOffset + kPointerSize);
1896
- return Memory::Address_at(address() + kNextOffset);
1897
- } else {
1898
- return Memory::Address_at(address() + kPointerSize);
1899
- }
1900
- }
1901
-
1902
-
1903
- void FreeListNode::set_next(Address next) {
1904
- ASSERT(IsFreeListNode(this));
1905
- if (map() == Heap::raw_unchecked_byte_array_map()) {
1906
- ASSERT(Size() >= kNextOffset + kPointerSize);
1907
- Memory::Address_at(address() + kNextOffset) = next;
1908
- } else {
1909
- Memory::Address_at(address() + kPointerSize) = next;
1910
- }
1911
- }
1912
-
1913
-
1914
- OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1915
- Reset();
1916
- }
1917
-
1918
-
1919
- void OldSpaceFreeList::Reset() {
1920
- available_ = 0;
1921
- for (int i = 0; i < kFreeListsLength; i++) {
1922
- free_[i].head_node_ = NULL;
1923
- }
1924
- needs_rebuild_ = false;
1925
- finger_ = kHead;
1926
- free_[kHead].next_size_ = kEnd;
1927
- }
1928
-
1929
-
1930
- void OldSpaceFreeList::RebuildSizeList() {
1931
- ASSERT(needs_rebuild_);
1932
- int cur = kHead;
1933
- for (int i = cur + 1; i < kFreeListsLength; i++) {
1934
- if (free_[i].head_node_ != NULL) {
1935
- free_[cur].next_size_ = i;
1936
- cur = i;
1937
- }
1938
- }
1939
- free_[cur].next_size_ = kEnd;
1940
- needs_rebuild_ = false;
1941
- }
1942
-
1943
-
1944
- int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1945
- #ifdef DEBUG
1946
- MemoryAllocator::ZapBlock(start, size_in_bytes);
1947
- #endif
1948
- FreeListNode* node = FreeListNode::FromAddress(start);
1949
- node->set_size(size_in_bytes);
1950
-
1951
- // We don't use the freelists in compacting mode. This makes it more like a
1952
- // GC that only has mark-sweep-compact and doesn't have a mark-sweep
1953
- // collector.
1954
- if (FLAG_always_compact) {
1955
- return size_in_bytes;
1956
- }
1957
-
1958
- // Early return to drop too-small blocks on the floor (one or two word
1959
- // blocks cannot hold a map pointer, a size field, and a pointer to the
1960
- // next block in the free list).
1961
- if (size_in_bytes < kMinBlockSize) {
1962
- return size_in_bytes;
1963
- }
1964
-
1965
- // Insert other blocks at the head of an exact free list.
1966
- int index = size_in_bytes >> kPointerSizeLog2;
1967
- node->set_next(free_[index].head_node_);
1968
- free_[index].head_node_ = node->address();
1969
- available_ += size_in_bytes;
1970
- needs_rebuild_ = true;
1971
- return 0;
1972
- }
1973
-
1974
-
1975
- MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1976
- ASSERT(0 < size_in_bytes);
1977
- ASSERT(size_in_bytes <= kMaxBlockSize);
1978
- ASSERT(IsAligned(size_in_bytes, kPointerSize));
1979
-
1980
- if (needs_rebuild_) RebuildSizeList();
1981
- int index = size_in_bytes >> kPointerSizeLog2;
1982
- // Check for a perfect fit.
1983
- if (free_[index].head_node_ != NULL) {
1984
- FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
1985
- // If this was the last block of its size, remove the size.
1986
- if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
1987
- available_ -= size_in_bytes;
1988
- *wasted_bytes = 0;
1989
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1990
- return node;
1991
- }
1992
- // Search the size list for the best fit.
1993
- int prev = finger_ < index ? finger_ : kHead;
1994
- int cur = FindSize(index, &prev);
1995
- ASSERT(index < cur);
1996
- if (cur == kEnd) {
1997
- // No large enough size in list.
1998
- *wasted_bytes = 0;
1999
- return Failure::RetryAfterGC(owner_);
2000
- }
2001
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
2002
- int rem = cur - index;
2003
- int rem_bytes = rem << kPointerSizeLog2;
2004
- FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
2005
- ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
2006
- FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
2007
- size_in_bytes);
2008
- // Distinguish the cases prev < rem < cur and rem <= prev < cur
2009
- // to avoid many redundant tests and calls to Insert/RemoveSize.
2010
- if (prev < rem) {
2011
- // Simple case: insert rem between prev and cur.
2012
- finger_ = prev;
2013
- free_[prev].next_size_ = rem;
2014
- // If this was the last block of size cur, remove the size.
2015
- if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
2016
- free_[rem].next_size_ = free_[cur].next_size_;
2017
- } else {
2018
- free_[rem].next_size_ = cur;
2019
- }
2020
- // Add the remainder block.
2021
- rem_node->set_size(rem_bytes);
2022
- rem_node->set_next(free_[rem].head_node_);
2023
- free_[rem].head_node_ = rem_node->address();
2024
- } else {
2025
- // If this was the last block of size cur, remove the size.
2026
- if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
2027
- finger_ = prev;
2028
- free_[prev].next_size_ = free_[cur].next_size_;
2029
- }
2030
- if (rem_bytes < kMinBlockSize) {
2031
- // Too-small remainder is wasted.
2032
- rem_node->set_size(rem_bytes);
2033
- available_ -= size_in_bytes + rem_bytes;
2034
- *wasted_bytes = rem_bytes;
2035
- return cur_node;
2036
- }
2037
- // Add the remainder block and, if needed, insert its size.
2038
- rem_node->set_size(rem_bytes);
2039
- rem_node->set_next(free_[rem].head_node_);
2040
- free_[rem].head_node_ = rem_node->address();
2041
- if (rem_node->next() == NULL) InsertSize(rem);
2042
- }
2043
- available_ -= size_in_bytes;
2044
- *wasted_bytes = 0;
2045
- return cur_node;
2046
- }
2047
-
2048
-
2049
- void OldSpaceFreeList::MarkNodes() {
2050
- for (int i = 0; i < kFreeListsLength; i++) {
2051
- Address cur_addr = free_[i].head_node_;
2052
- while (cur_addr != NULL) {
2053
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2054
- cur_addr = cur_node->next();
2055
- cur_node->SetMark();
2056
- }
2057
- }
2058
- }
2059
-
2060
-
2061
- #ifdef DEBUG
2062
- bool OldSpaceFreeList::Contains(FreeListNode* node) {
2063
- for (int i = 0; i < kFreeListsLength; i++) {
2064
- Address cur_addr = free_[i].head_node_;
2065
- while (cur_addr != NULL) {
2066
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2067
- if (cur_node == node) return true;
2068
- cur_addr = cur_node->next();
2069
- }
2070
- }
2071
- return false;
2072
- }
2073
- #endif
2074
-
2075
-
2076
- FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
2077
- : owner_(owner), object_size_(object_size) {
2078
- Reset();
2079
- }
2080
-
2081
-
2082
- void FixedSizeFreeList::Reset() {
2083
- available_ = 0;
2084
- head_ = tail_ = NULL;
2085
- }
2086
-
2087
-
2088
- void FixedSizeFreeList::Free(Address start) {
2089
- #ifdef DEBUG
2090
- MemoryAllocator::ZapBlock(start, object_size_);
2091
- #endif
2092
- // We only use the freelists with mark-sweep.
2093
- ASSERT(!MarkCompactCollector::IsCompacting());
2094
- FreeListNode* node = FreeListNode::FromAddress(start);
2095
- node->set_size(object_size_);
2096
- node->set_next(NULL);
2097
- if (head_ == NULL) {
2098
- tail_ = head_ = node->address();
2099
- } else {
2100
- FreeListNode::FromAddress(tail_)->set_next(node->address());
2101
- tail_ = node->address();
2102
- }
2103
- available_ += object_size_;
2104
- }
2105
-
2106
-
2107
- MaybeObject* FixedSizeFreeList::Allocate() {
2108
- if (head_ == NULL) {
2109
- return Failure::RetryAfterGC(owner_);
2110
- }
2111
-
2112
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
2113
- FreeListNode* node = FreeListNode::FromAddress(head_);
2114
- head_ = node->next();
2115
- available_ -= object_size_;
2116
- return node;
2117
- }
2118
-
2119
-
2120
- void FixedSizeFreeList::MarkNodes() {
2121
- Address cur_addr = head_;
2122
- while (cur_addr != NULL && cur_addr != tail_) {
2123
- FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
2124
- cur_addr = cur_node->next();
2125
- cur_node->SetMark();
2126
- }
2127
- }
2128
-
2129
-
2130
- // -----------------------------------------------------------------------------
2131
- // OldSpace implementation
2132
-
2133
- void OldSpace::PrepareForMarkCompact(bool will_compact) {
2134
- // Call prepare of the super class.
2135
- PagedSpace::PrepareForMarkCompact(will_compact);
2136
-
2137
- if (will_compact) {
2138
- // Reset relocation info. During a compacting collection, everything in
2139
- // the space is considered 'available' and we will rediscover live data
2140
- // and waste during the collection.
2141
- MCResetRelocationInfo();
2142
- ASSERT(Available() == Capacity());
2143
- } else {
2144
- // During a non-compacting collection, everything below the linear
2145
- // allocation pointer is considered allocated (everything above is
2146
- // available) and we will rediscover available and wasted bytes during
2147
- // the collection.
2148
- accounting_stats_.AllocateBytes(free_list_.available());
2149
- accounting_stats_.FillWastedBytes(Waste());
2150
- }
2151
-
2152
- // Clear the free list before a full GC---it will be rebuilt afterward.
2153
- free_list_.Reset();
2154
- }
2155
-
2156
-
2157
- void OldSpace::MCCommitRelocationInfo() {
2158
- // Update fast allocation info.
2159
- allocation_info_.top = mc_forwarding_info_.top;
2160
- allocation_info_.limit = mc_forwarding_info_.limit;
2161
- ASSERT(allocation_info_.VerifyPagedAllocation());
2162
-
2163
- // The space is compacted and we haven't yet built free lists or
2164
- // wasted any space.
2165
- ASSERT(Waste() == 0);
2166
- ASSERT(AvailableFree() == 0);
2167
-
2168
- // Build the free list for the space.
2169
- int computed_size = 0;
2170
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2171
- while (it.has_next()) {
2172
- Page* p = it.next();
2173
- // Space below the relocation pointer is allocated.
2174
- computed_size +=
2175
- static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
2176
- if (it.has_next()) {
2177
- // Free the space at the top of the page.
2178
- int extra_size =
2179
- static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
2180
- if (extra_size > 0) {
2181
- int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2182
- extra_size);
2183
- // The bytes we have just "freed" to add to the free list were
2184
- // already accounted as available.
2185
- accounting_stats_.WasteBytes(wasted_bytes);
2186
- }
2187
- }
2188
- }
2189
-
2190
- // Make sure the computed size - based on the used portion of the pages in
2191
- // use - matches the size obtained while computing forwarding addresses.
2192
- ASSERT(computed_size == Size());
2193
- }
2194
-
2195
-
2196
- bool NewSpace::ReserveSpace(int bytes) {
2197
- // We can't reliably unpack a partial snapshot that needs more new space
2198
- // space than the minimum NewSpace size.
2199
- ASSERT(bytes <= InitialCapacity());
2200
- Address limit = allocation_info_.limit;
2201
- Address top = allocation_info_.top;
2202
- return limit - top >= bytes;
2203
- }
2204
-
2205
-
2206
- void PagedSpace::FreePages(Page* prev, Page* last) {
2207
- if (last == AllocationTopPage()) {
2208
- // Pages are already at the end of used pages.
2209
- return;
2210
- }
2211
-
2212
- Page* first = NULL;
2213
-
2214
- // Remove pages from the list.
2215
- if (prev == NULL) {
2216
- first = first_page_;
2217
- first_page_ = last->next_page();
2218
- } else {
2219
- first = prev->next_page();
2220
- MemoryAllocator::SetNextPage(prev, last->next_page());
2221
- }
2222
-
2223
- // Attach it after the last page.
2224
- MemoryAllocator::SetNextPage(last_page_, first);
2225
- last_page_ = last;
2226
- MemoryAllocator::SetNextPage(last, NULL);
2227
-
2228
- // Clean them up.
2229
- do {
2230
- first->InvalidateWatermark(true);
2231
- first->SetAllocationWatermark(first->ObjectAreaStart());
2232
- first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2233
- first->SetRegionMarks(Page::kAllRegionsCleanMarks);
2234
- first = first->next_page();
2235
- } while (first != NULL);
2236
-
2237
- // Order of pages in this space might no longer be consistent with
2238
- // order of pages in chunks.
2239
- page_list_is_chunk_ordered_ = false;
2240
- }
2241
-
2242
-
2243
- void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
2244
- const bool add_to_freelist = true;
2245
-
2246
- // Mark used and unused pages to properly fill unused pages
2247
- // after reordering.
2248
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
2249
- Page* last_in_use = AllocationTopPage();
2250
- bool in_use = true;
2251
-
2252
- while (all_pages_iterator.has_next()) {
2253
- Page* p = all_pages_iterator.next();
2254
- p->SetWasInUseBeforeMC(in_use);
2255
- if (p == last_in_use) {
2256
- // We passed a page containing allocation top. All consequent
2257
- // pages are not used.
2258
- in_use = false;
2259
- }
2260
- }
2261
-
2262
- if (page_list_is_chunk_ordered_) return;
2263
-
2264
- Page* new_last_in_use = Page::FromAddress(NULL);
2265
- MemoryAllocator::RelinkPageListInChunkOrder(this,
2266
- &first_page_,
2267
- &last_page_,
2268
- &new_last_in_use);
2269
- ASSERT(new_last_in_use->is_valid());
2270
-
2271
- if (new_last_in_use != last_in_use) {
2272
- // Current allocation top points to a page which is now in the middle
2273
- // of page list. We should move allocation top forward to the new last
2274
- // used page so various object iterators will continue to work properly.
2275
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2276
- last_in_use->AllocationTop());
2277
-
2278
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2279
- if (size_in_bytes > 0) {
2280
- Address start = last_in_use->AllocationTop();
2281
- if (deallocate_blocks) {
2282
- accounting_stats_.AllocateBytes(size_in_bytes);
2283
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
2284
- } else {
2285
- Heap::CreateFillerObjectAt(start, size_in_bytes);
2286
- }
2287
- }
2288
-
2289
- // New last in use page was in the middle of the list before
2290
- // sorting so it full.
2291
- SetTop(new_last_in_use->AllocationTop());
2292
-
2293
- ASSERT(AllocationTopPage() == new_last_in_use);
2294
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2295
- }
2296
-
2297
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2298
- while (pages_in_use_iterator.has_next()) {
2299
- Page* p = pages_in_use_iterator.next();
2300
- if (!p->WasInUseBeforeMC()) {
2301
- // Empty page is in the middle of a sequence of used pages.
2302
- // Allocate it as a whole and deallocate immediately.
2303
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2304
- p->ObjectAreaStart());
2305
-
2306
- p->SetAllocationWatermark(p->ObjectAreaStart());
2307
- Address start = p->ObjectAreaStart();
2308
- if (deallocate_blocks) {
2309
- accounting_stats_.AllocateBytes(size_in_bytes);
2310
- DeallocateBlock(start, size_in_bytes, add_to_freelist);
2311
- } else {
2312
- Heap::CreateFillerObjectAt(start, size_in_bytes);
2313
- }
2314
- }
2315
- }
2316
-
2317
- page_list_is_chunk_ordered_ = true;
2318
- }
2319
-
2320
-
2321
- void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2322
- if (will_compact) {
2323
- RelinkPageListInChunkOrder(false);
2324
- }
2325
- }
2326
-
2327
-
2328
- bool PagedSpace::ReserveSpace(int bytes) {
2329
- Address limit = allocation_info_.limit;
2330
- Address top = allocation_info_.top;
2331
- if (limit - top >= bytes) return true;
2332
-
2333
- // There wasn't enough space in the current page. Lets put the rest
2334
- // of the page on the free list and start a fresh page.
2335
- PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2336
-
2337
- Page* reserved_page = TopPageOf(allocation_info_);
2338
- int bytes_left_to_reserve = bytes;
2339
- while (bytes_left_to_reserve > 0) {
2340
- if (!reserved_page->next_page()->is_valid()) {
2341
- if (Heap::OldGenerationAllocationLimitReached()) return false;
2342
- Expand(reserved_page);
2343
- }
2344
- bytes_left_to_reserve -= Page::kPageSize;
2345
- reserved_page = reserved_page->next_page();
2346
- if (!reserved_page->is_valid()) return false;
2347
- }
2348
- ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2349
- TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2350
- SetAllocationInfo(&allocation_info_,
2351
- TopPageOf(allocation_info_)->next_page());
2352
- return true;
2353
- }
2354
-
2355
-
2356
- // You have to call this last, since the implementation from PagedSpace
2357
- // doesn't know that memory was 'promised' to large object space.
2358
- bool LargeObjectSpace::ReserveSpace(int bytes) {
2359
- return Heap::OldGenerationSpaceAvailable() >= bytes;
2360
- }
2361
-
2362
-
2363
- // Slow case for normal allocation. Try in order: (1) allocate in the next
2364
- // page in the space, (2) allocate off the space's free list, (3) expand the
2365
- // space, (4) fail.
2366
- HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2367
- // Linear allocation in this space has failed. If there is another page
2368
- // in the space, move to that page and allocate there. This allocation
2369
- // should succeed (size_in_bytes should not be greater than a page's
2370
- // object area size).
2371
- Page* current_page = TopPageOf(allocation_info_);
2372
- if (current_page->next_page()->is_valid()) {
2373
- return AllocateInNextPage(current_page, size_in_bytes);
2374
- }
2375
-
2376
- // There is no next page in this space. Try free list allocation unless that
2377
- // is currently forbidden.
2378
- if (!Heap::linear_allocation()) {
2379
- int wasted_bytes;
2380
- Object* result;
2381
- MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2382
- accounting_stats_.WasteBytes(wasted_bytes);
2383
- if (maybe->ToObject(&result)) {
2384
- accounting_stats_.AllocateBytes(size_in_bytes);
2385
-
2386
- HeapObject* obj = HeapObject::cast(result);
2387
- Page* p = Page::FromAddress(obj->address());
2388
-
2389
- if (obj->address() >= p->AllocationWatermark()) {
2390
- // There should be no hole between the allocation watermark
2391
- // and allocated object address.
2392
- // Memory above the allocation watermark was not swept and
2393
- // might contain garbage pointers to new space.
2394
- ASSERT(obj->address() == p->AllocationWatermark());
2395
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
2396
- }
2397
-
2398
- return obj;
2399
- }
2400
- }
2401
-
2402
- // Free list allocation failed and there is no next page. Fail if we have
2403
- // hit the old generation size limit that should cause a garbage
2404
- // collection.
2405
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2406
- return NULL;
2407
- }
2408
-
2409
- // Try to expand the space and allocate in the new next page.
2410
- ASSERT(!current_page->next_page()->is_valid());
2411
- if (Expand(current_page)) {
2412
- return AllocateInNextPage(current_page, size_in_bytes);
2413
- }
2414
-
2415
- // Finally, fail.
2416
- return NULL;
2417
- }
2418
-
2419
-
2420
- void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2421
- current_page->SetAllocationWatermark(allocation_info_.top);
2422
- int free_size =
2423
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2424
- if (free_size > 0) {
2425
- int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2426
- accounting_stats_.WasteBytes(wasted_bytes);
2427
- }
2428
- }
2429
-
2430
-
2431
- void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2432
- current_page->SetAllocationWatermark(allocation_info_.top);
2433
- int free_size =
2434
- static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2435
- // In the fixed space free list all the free list items have the right size.
2436
- // We use up the rest of the page while preserving this invariant.
2437
- while (free_size >= object_size_in_bytes_) {
2438
- free_list_.Free(allocation_info_.top);
2439
- allocation_info_.top += object_size_in_bytes_;
2440
- free_size -= object_size_in_bytes_;
2441
- accounting_stats_.WasteBytes(object_size_in_bytes_);
2442
- }
2443
- }
2444
-
2445
-
2446
- // Add the block at the top of the page to the space's free list, set the
2447
- // allocation info to the next page (assumed to be one), and allocate
2448
- // linearly there.
2449
- HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2450
- int size_in_bytes) {
2451
- ASSERT(current_page->next_page()->is_valid());
2452
- Page* next_page = current_page->next_page();
2453
- next_page->ClearGCFields();
2454
- PutRestOfCurrentPageOnFreeList(current_page);
2455
- SetAllocationInfo(&allocation_info_, next_page);
2456
- return AllocateLinearly(&allocation_info_, size_in_bytes);
2457
- }
2458
-
2459
-
2460
- void OldSpace::DeallocateBlock(Address start,
2461
- int size_in_bytes,
2462
- bool add_to_freelist) {
2463
- Free(start, size_in_bytes, add_to_freelist);
2464
- }
2465
-
2466
-
2467
- #ifdef DEBUG
2468
- struct CommentStatistic {
2469
- const char* comment;
2470
- int size;
2471
- int count;
2472
- void Clear() {
2473
- comment = NULL;
2474
- size = 0;
2475
- count = 0;
2476
- }
2477
- };
2478
-
2479
-
2480
- // must be small, since an iteration is used for lookup
2481
- const int kMaxComments = 64;
2482
- static CommentStatistic comments_statistics[kMaxComments+1];
2483
-
2484
-
2485
- void PagedSpace::ReportCodeStatistics() {
2486
- ReportCodeKindStatistics();
2487
- PrintF("Code comment statistics (\" [ comment-txt : size/ "
2488
- "count (average)\"):\n");
2489
- for (int i = 0; i <= kMaxComments; i++) {
2490
- const CommentStatistic& cs = comments_statistics[i];
2491
- if (cs.size > 0) {
2492
- PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2493
- cs.size/cs.count);
2494
- }
2495
- }
2496
- PrintF("\n");
2497
- }
2498
-
2499
-
2500
- void PagedSpace::ResetCodeStatistics() {
2501
- ClearCodeKindStatistics();
2502
- for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
2503
- comments_statistics[kMaxComments].comment = "Unknown";
2504
- comments_statistics[kMaxComments].size = 0;
2505
- comments_statistics[kMaxComments].count = 0;
2506
- }
2507
-
2508
-
2509
- // Adds comment to 'comment_statistics' table. Performance OK sa long as
2510
- // 'kMaxComments' is small
2511
- static void EnterComment(const char* comment, int delta) {
2512
- // Do not count empty comments
2513
- if (delta <= 0) return;
2514
- CommentStatistic* cs = &comments_statistics[kMaxComments];
2515
- // Search for a free or matching entry in 'comments_statistics': 'cs'
2516
- // points to result.
2517
- for (int i = 0; i < kMaxComments; i++) {
2518
- if (comments_statistics[i].comment == NULL) {
2519
- cs = &comments_statistics[i];
2520
- cs->comment = comment;
2521
- break;
2522
- } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2523
- cs = &comments_statistics[i];
2524
- break;
2525
- }
2526
- }
2527
- // Update entry for 'comment'
2528
- cs->size += delta;
2529
- cs->count += 1;
2530
- }
2531
-
2532
-
2533
- // Call for each nested comment start (start marked with '[ xxx', end marked
2534
- // with ']'. RelocIterator 'it' must point to a comment reloc info.
2535
- static void CollectCommentStatistics(RelocIterator* it) {
2536
- ASSERT(!it->done());
2537
- ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2538
- const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2539
- if (tmp[0] != '[') {
2540
- // Not a nested comment; skip
2541
- return;
2542
- }
2543
-
2544
- // Search for end of nested comment or a new nested comment
2545
- const char* const comment_txt =
2546
- reinterpret_cast<const char*>(it->rinfo()->data());
2547
- const byte* prev_pc = it->rinfo()->pc();
2548
- int flat_delta = 0;
2549
- it->next();
2550
- while (true) {
2551
- // All nested comments must be terminated properly, and therefore exit
2552
- // from loop.
2553
- ASSERT(!it->done());
2554
- if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2555
- const char* const txt =
2556
- reinterpret_cast<const char*>(it->rinfo()->data());
2557
- flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2558
- if (txt[0] == ']') break; // End of nested comment
2559
- // A new comment
2560
- CollectCommentStatistics(it);
2561
- // Skip code that was covered with previous comment
2562
- prev_pc = it->rinfo()->pc();
2563
- }
2564
- it->next();
2565
- }
2566
- EnterComment(comment_txt, flat_delta);
2567
- }
2568
-
2569
-
2570
- // Collects code size statistics:
2571
- // - by code kind
2572
- // - by code comment
2573
- void PagedSpace::CollectCodeStatistics() {
2574
- HeapObjectIterator obj_it(this);
2575
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2576
- if (obj->IsCode()) {
2577
- Code* code = Code::cast(obj);
2578
- code_kind_statistics[code->kind()] += code->Size();
2579
- RelocIterator it(code);
2580
- int delta = 0;
2581
- const byte* prev_pc = code->instruction_start();
2582
- while (!it.done()) {
2583
- if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2584
- delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2585
- CollectCommentStatistics(&it);
2586
- prev_pc = it.rinfo()->pc();
2587
- }
2588
- it.next();
2589
- }
2590
-
2591
- ASSERT(code->instruction_start() <= prev_pc &&
2592
- prev_pc <= code->instruction_end());
2593
- delta += static_cast<int>(code->instruction_end() - prev_pc);
2594
- EnterComment("NoComment", delta);
2595
- }
2596
- }
2597
- }
2598
-
2599
-
2600
- void OldSpace::ReportStatistics() {
2601
- int pct = static_cast<int>(Available() * 100 / Capacity());
2602
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
2603
- ", waste: %" V8_PTR_PREFIX "d"
2604
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2605
- Capacity(), Waste(), Available(), pct);
2606
-
2607
- ClearHistograms();
2608
- HeapObjectIterator obj_it(this);
2609
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2610
- CollectHistogramInfo(obj);
2611
- ReportHistogram(true);
2612
- }
2613
- #endif
2614
-
2615
- // -----------------------------------------------------------------------------
2616
- // FixedSpace implementation
2617
-
2618
- void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2619
- // Call prepare of the super class.
2620
- PagedSpace::PrepareForMarkCompact(will_compact);
2621
-
2622
- if (will_compact) {
2623
- // Reset relocation info.
2624
- MCResetRelocationInfo();
2625
-
2626
- // During a compacting collection, everything in the space is considered
2627
- // 'available' (set by the call to MCResetRelocationInfo) and we will
2628
- // rediscover live and wasted bytes during the collection.
2629
- ASSERT(Available() == Capacity());
2630
- } else {
2631
- // During a non-compacting collection, everything below the linear
2632
- // allocation pointer except wasted top-of-page blocks is considered
2633
- // allocated and we will rediscover available bytes during the
2634
- // collection.
2635
- accounting_stats_.AllocateBytes(free_list_.available());
2636
- }
2637
-
2638
- // Clear the free list before a full GC---it will be rebuilt afterward.
2639
- free_list_.Reset();
2640
- }
2641
-
2642
-
2643
- void FixedSpace::MCCommitRelocationInfo() {
2644
- // Update fast allocation info.
2645
- allocation_info_.top = mc_forwarding_info_.top;
2646
- allocation_info_.limit = mc_forwarding_info_.limit;
2647
- ASSERT(allocation_info_.VerifyPagedAllocation());
2648
-
2649
- // The space is compacted and we haven't yet wasted any space.
2650
- ASSERT(Waste() == 0);
2651
-
2652
- // Update allocation_top of each page in use and compute waste.
2653
- int computed_size = 0;
2654
- PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2655
- while (it.has_next()) {
2656
- Page* page = it.next();
2657
- Address page_top = page->AllocationTop();
2658
- computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2659
- if (it.has_next()) {
2660
- accounting_stats_.WasteBytes(
2661
- static_cast<int>(page->ObjectAreaEnd() - page_top));
2662
- page->SetAllocationWatermark(page_top);
2663
- }
2664
- }
2665
-
2666
- // Make sure the computed size - based on the used portion of the
2667
- // pages in use - matches the size we adjust during allocation.
2668
- ASSERT(computed_size == Size());
2669
- }
2670
-
2671
-
2672
- // Slow case for normal allocation. Try in order: (1) allocate in the next
2673
- // page in the space, (2) allocate off the space's free list, (3) expand the
2674
- // space, (4) fail.
2675
- HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2676
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2677
- // Linear allocation in this space has failed. If there is another page
2678
- // in the space, move to that page and allocate there. This allocation
2679
- // should succeed.
2680
- Page* current_page = TopPageOf(allocation_info_);
2681
- if (current_page->next_page()->is_valid()) {
2682
- return AllocateInNextPage(current_page, size_in_bytes);
2683
- }
2684
-
2685
- // There is no next page in this space. Try free list allocation unless
2686
- // that is currently forbidden. The fixed space free list implicitly assumes
2687
- // that all free blocks are of the fixed size.
2688
- if (!Heap::linear_allocation()) {
2689
- Object* result;
2690
- MaybeObject* maybe = free_list_.Allocate();
2691
- if (maybe->ToObject(&result)) {
2692
- accounting_stats_.AllocateBytes(size_in_bytes);
2693
- HeapObject* obj = HeapObject::cast(result);
2694
- Page* p = Page::FromAddress(obj->address());
2695
-
2696
- if (obj->address() >= p->AllocationWatermark()) {
2697
- // There should be no hole between the allocation watermark
2698
- // and allocated object address.
2699
- // Memory above the allocation watermark was not swept and
2700
- // might contain garbage pointers to new space.
2701
- ASSERT(obj->address() == p->AllocationWatermark());
2702
- p->SetAllocationWatermark(obj->address() + size_in_bytes);
2703
- }
2704
-
2705
- return obj;
2706
- }
2707
- }
2708
-
2709
- // Free list allocation failed and there is no next page. Fail if we have
2710
- // hit the old generation size limit that should cause a garbage
2711
- // collection.
2712
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2713
- return NULL;
2714
- }
2715
-
2716
- // Try to expand the space and allocate in the new next page.
2717
- ASSERT(!current_page->next_page()->is_valid());
2718
- if (Expand(current_page)) {
2719
- return AllocateInNextPage(current_page, size_in_bytes);
2720
- }
2721
-
2722
- // Finally, fail.
2723
- return NULL;
2724
- }
2725
-
2726
-
2727
- // Move to the next page (there is assumed to be one) and allocate there.
2728
- // The top of page block is always wasted, because it is too small to hold a
2729
- // map.
2730
- HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2731
- int size_in_bytes) {
2732
- ASSERT(current_page->next_page()->is_valid());
2733
- ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2734
- ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2735
- Page* next_page = current_page->next_page();
2736
- next_page->ClearGCFields();
2737
- current_page->SetAllocationWatermark(allocation_info_.top);
2738
- accounting_stats_.WasteBytes(page_extra_);
2739
- SetAllocationInfo(&allocation_info_, next_page);
2740
- return AllocateLinearly(&allocation_info_, size_in_bytes);
2741
- }
2742
-
2743
-
2744
- void FixedSpace::DeallocateBlock(Address start,
2745
- int size_in_bytes,
2746
- bool add_to_freelist) {
2747
- // Free-list elements in fixed space are assumed to have a fixed size.
2748
- // We break the free block into chunks and add them to the free list
2749
- // individually.
2750
- int size = object_size_in_bytes();
2751
- ASSERT(size_in_bytes % size == 0);
2752
- Address end = start + size_in_bytes;
2753
- for (Address a = start; a < end; a += size) {
2754
- Free(a, add_to_freelist);
2755
- }
2756
- }
2757
-
2758
-
2759
- #ifdef DEBUG
2760
- void FixedSpace::ReportStatistics() {
2761
- int pct = static_cast<int>(Available() * 100 / Capacity());
2762
- PrintF(" capacity: %" V8_PTR_PREFIX "d"
2763
- ", waste: %" V8_PTR_PREFIX "d"
2764
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2765
- Capacity(), Waste(), Available(), pct);
2766
-
2767
- ClearHistograms();
2768
- HeapObjectIterator obj_it(this);
2769
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2770
- CollectHistogramInfo(obj);
2771
- ReportHistogram(false);
2772
- }
2773
- #endif
2774
-
2775
-
2776
- // -----------------------------------------------------------------------------
2777
- // MapSpace implementation
2778
-
2779
- void MapSpace::PrepareForMarkCompact(bool will_compact) {
2780
- // Call prepare of the super class.
2781
- FixedSpace::PrepareForMarkCompact(will_compact);
2782
-
2783
- if (will_compact) {
2784
- // Initialize map index entry.
2785
- int page_count = 0;
2786
- PageIterator it(this, PageIterator::ALL_PAGES);
2787
- while (it.has_next()) {
2788
- ASSERT_MAP_PAGE_INDEX(page_count);
2789
-
2790
- Page* p = it.next();
2791
- ASSERT(p->mc_page_index == page_count);
2792
-
2793
- page_addresses_[page_count++] = p->address();
2794
- }
2795
- }
2796
- }
2797
-
2798
-
2799
- #ifdef DEBUG
2800
- void MapSpace::VerifyObject(HeapObject* object) {
2801
- // The object should be a map or a free-list node.
2802
- ASSERT(object->IsMap() || object->IsByteArray());
2803
- }
2804
- #endif
2805
-
2806
-
2807
- // -----------------------------------------------------------------------------
2808
- // GlobalPropertyCellSpace implementation
2809
-
2810
- #ifdef DEBUG
2811
- void CellSpace::VerifyObject(HeapObject* object) {
2812
- // The object should be a global object property cell or a free-list node.
2813
- ASSERT(object->IsJSGlobalPropertyCell() ||
2814
- object->map() == Heap::two_pointer_filler_map());
2815
- }
2816
- #endif
2817
-
2818
-
2819
- // -----------------------------------------------------------------------------
2820
- // LargeObjectIterator
2821
-
2822
- LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2823
- current_ = space->first_chunk_;
2824
- size_func_ = NULL;
2825
- }
2826
-
2827
-
2828
- LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2829
- HeapObjectCallback size_func) {
2830
- current_ = space->first_chunk_;
2831
- size_func_ = size_func;
2832
- }
2833
-
2834
-
2835
- HeapObject* LargeObjectIterator::next() {
2836
- if (current_ == NULL) return NULL;
2837
-
2838
- HeapObject* object = current_->GetObject();
2839
- current_ = current_->next();
2840
- return object;
2841
- }
2842
-
2843
-
2844
- // -----------------------------------------------------------------------------
2845
- // LargeObjectChunk
2846
-
2847
- LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2848
- Executability executable) {
2849
- size_t requested = ChunkSizeFor(size_in_bytes);
2850
- size_t size;
2851
- void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
2852
- if (mem == NULL) return NULL;
2853
-
2854
- // The start of the chunk may be overlayed with a page so we have to
2855
- // make sure that the page flags fit in the size field.
2856
- ASSERT((size & Page::kPageFlagMask) == 0);
2857
-
2858
- LOG(NewEvent("LargeObjectChunk", mem, size));
2859
- if (size < requested) {
2860
- MemoryAllocator::FreeRawMemory(mem, size, executable);
2861
- LOG(DeleteEvent("LargeObjectChunk", mem));
2862
- return NULL;
2863
- }
2864
-
2865
- ObjectSpace space = (executable == EXECUTABLE)
2866
- ? kObjectSpaceCodeSpace
2867
- : kObjectSpaceLoSpace;
2868
- MemoryAllocator::PerformAllocationCallback(
2869
- space, kAllocationActionAllocate, size);
2870
-
2871
- LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2872
- chunk->size_ = size;
2873
- return chunk;
2874
- }
2875
-
2876
-
2877
- int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2878
- int os_alignment = static_cast<int>(OS::AllocateAlignment());
2879
- if (os_alignment < Page::kPageSize) {
2880
- size_in_bytes += (Page::kPageSize - os_alignment);
2881
- }
2882
- return size_in_bytes + Page::kObjectStartOffset;
2883
- }
2884
-
2885
- // -----------------------------------------------------------------------------
2886
- // LargeObjectSpace
2887
-
2888
- LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
2889
- : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2890
- first_chunk_(NULL),
2891
- size_(0),
2892
- page_count_(0),
2893
- objects_size_(0) {}
2894
-
2895
-
2896
- bool LargeObjectSpace::Setup() {
2897
- first_chunk_ = NULL;
2898
- size_ = 0;
2899
- page_count_ = 0;
2900
- objects_size_ = 0;
2901
- return true;
2902
- }
2903
-
2904
-
2905
- void LargeObjectSpace::TearDown() {
2906
- while (first_chunk_ != NULL) {
2907
- LargeObjectChunk* chunk = first_chunk_;
2908
- first_chunk_ = first_chunk_->next();
2909
- LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2910
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2911
- Executability executable =
2912
- page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2913
- ObjectSpace space = kObjectSpaceLoSpace;
2914
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2915
- size_t size = chunk->size();
2916
- MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
2917
- MemoryAllocator::PerformAllocationCallback(
2918
- space, kAllocationActionFree, size);
2919
- }
2920
-
2921
- size_ = 0;
2922
- page_count_ = 0;
2923
- objects_size_ = 0;
2924
- }
2925
-
2926
-
2927
- #ifdef ENABLE_HEAP_PROTECTION
2928
-
2929
- void LargeObjectSpace::Protect() {
2930
- LargeObjectChunk* chunk = first_chunk_;
2931
- while (chunk != NULL) {
2932
- MemoryAllocator::Protect(chunk->address(), chunk->size());
2933
- chunk = chunk->next();
2934
- }
2935
- }
2936
-
2937
-
2938
- void LargeObjectSpace::Unprotect() {
2939
- LargeObjectChunk* chunk = first_chunk_;
2940
- while (chunk != NULL) {
2941
- bool is_code = chunk->GetObject()->IsCode();
2942
- MemoryAllocator::Unprotect(chunk->address(), chunk->size(),
2943
- is_code ? EXECUTABLE : NOT_EXECUTABLE);
2944
- chunk = chunk->next();
2945
- }
2946
- }
2947
-
2948
- #endif
2949
-
2950
-
2951
- MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2952
- int object_size,
2953
- Executability executable) {
2954
- ASSERT(0 < object_size && object_size <= requested_size);
2955
-
2956
- // Check if we want to force a GC before growing the old space further.
2957
- // If so, fail the allocation.
2958
- if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2959
- return Failure::RetryAfterGC(identity());
2960
- }
2961
-
2962
- LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
2963
- if (chunk == NULL) {
2964
- return Failure::RetryAfterGC(identity());
2965
- }
2966
-
2967
- size_ += static_cast<int>(chunk->size());
2968
- objects_size_ += requested_size;
2969
- page_count_++;
2970
- chunk->set_next(first_chunk_);
2971
- first_chunk_ = chunk;
2972
-
2973
- // Initialize page header.
2974
- Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2975
- Address object_address = page->ObjectAreaStart();
2976
-
2977
- // Clear the low order bit of the second word in the page to flag it as a
2978
- // large object page. If the chunk_size happened to be written there, its
2979
- // low order bit should already be clear.
2980
- page->SetIsLargeObjectPage(true);
2981
- page->SetIsPageExecutable(executable);
2982
- page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2983
- return HeapObject::FromAddress(object_address);
2984
- }
2985
-
2986
-
2987
- MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2988
- ASSERT(0 < size_in_bytes);
2989
- return AllocateRawInternal(size_in_bytes,
2990
- size_in_bytes,
2991
- EXECUTABLE);
2992
- }
2993
-
2994
-
2995
- MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2996
- ASSERT(0 < size_in_bytes);
2997
- return AllocateRawInternal(size_in_bytes,
2998
- size_in_bytes,
2999
- NOT_EXECUTABLE);
3000
- }
3001
-
3002
-
3003
- MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
3004
- ASSERT(0 < size_in_bytes);
3005
- return AllocateRawInternal(size_in_bytes,
3006
- size_in_bytes,
3007
- NOT_EXECUTABLE);
3008
- }
3009
-
3010
-
3011
- // GC support
3012
- MaybeObject* LargeObjectSpace::FindObject(Address a) {
3013
- for (LargeObjectChunk* chunk = first_chunk_;
3014
- chunk != NULL;
3015
- chunk = chunk->next()) {
3016
- Address chunk_address = chunk->address();
3017
- if (chunk_address <= a && a < chunk_address + chunk->size()) {
3018
- return chunk->GetObject();
3019
- }
3020
- }
3021
- return Failure::Exception();
3022
- }
3023
-
3024
-
3025
- LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
3026
- // TODO(853): Change this implementation to only find executable
3027
- // chunks and use some kind of hash-based approach to speed it up.
3028
- for (LargeObjectChunk* chunk = first_chunk_;
3029
- chunk != NULL;
3030
- chunk = chunk->next()) {
3031
- Address chunk_address = chunk->address();
3032
- if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
3033
- return chunk;
3034
- }
3035
- }
3036
- return NULL;
3037
- }
3038
-
3039
-
3040
- void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
3041
- LargeObjectIterator it(this);
3042
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3043
- // We only have code, sequential strings, or fixed arrays in large
3044
- // object space, and only fixed arrays can possibly contain pointers to
3045
- // the young generation.
3046
- if (object->IsFixedArray()) {
3047
- Page* page = Page::FromAddress(object->address());
3048
- uint32_t marks = page->GetRegionMarks();
3049
- uint32_t newmarks = Page::kAllRegionsCleanMarks;
3050
-
3051
- if (marks != Page::kAllRegionsCleanMarks) {
3052
- // For a large page a single dirty mark corresponds to several
3053
- // regions (modulo 32). So we treat a large page as a sequence of
3054
- // normal pages of size Page::kPageSize having same dirty marks
3055
- // and subsequently iterate dirty regions on each of these pages.
3056
- Address start = object->address();
3057
- Address end = page->ObjectAreaEnd();
3058
- Address object_end = start + object->Size();
3059
-
3060
- // Iterate regions of the first normal page covering object.
3061
- uint32_t first_region_number = page->GetRegionNumberForAddress(start);
3062
- newmarks |=
3063
- Heap::IterateDirtyRegions(marks >> first_region_number,
3064
- start,
3065
- end,
3066
- &Heap::IteratePointersInDirtyRegion,
3067
- copy_object) << first_region_number;
3068
-
3069
- start = end;
3070
- end = start + Page::kPageSize;
3071
- while (end <= object_end) {
3072
- // Iterate next 32 regions.
3073
- newmarks |=
3074
- Heap::IterateDirtyRegions(marks,
3075
- start,
3076
- end,
3077
- &Heap::IteratePointersInDirtyRegion,
3078
- copy_object);
3079
- start = end;
3080
- end = start + Page::kPageSize;
3081
- }
3082
-
3083
- if (start != object_end) {
3084
- // Iterate the last piece of an object which is less than
3085
- // Page::kPageSize.
3086
- newmarks |=
3087
- Heap::IterateDirtyRegions(marks,
3088
- start,
3089
- object_end,
3090
- &Heap::IteratePointersInDirtyRegion,
3091
- copy_object);
3092
- }
3093
-
3094
- page->SetRegionMarks(newmarks);
3095
- }
3096
- }
3097
- }
3098
- }
3099
-
3100
-
3101
- void LargeObjectSpace::FreeUnmarkedObjects() {
3102
- LargeObjectChunk* previous = NULL;
3103
- LargeObjectChunk* current = first_chunk_;
3104
- while (current != NULL) {
3105
- HeapObject* object = current->GetObject();
3106
- if (object->IsMarked()) {
3107
- object->ClearMark();
3108
- MarkCompactCollector::tracer()->decrement_marked_count();
3109
- previous = current;
3110
- current = current->next();
3111
- } else {
3112
- Page* page = Page::FromAddress(RoundUp(current->address(),
3113
- Page::kPageSize));
3114
- Executability executable =
3115
- page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
3116
- Address chunk_address = current->address();
3117
- size_t chunk_size = current->size();
3118
-
3119
- // Cut the chunk out from the chunk list.
3120
- current = current->next();
3121
- if (previous == NULL) {
3122
- first_chunk_ = current;
3123
- } else {
3124
- previous->set_next(current);
3125
- }
3126
-
3127
- // Free the chunk.
3128
- MarkCompactCollector::ReportDeleteIfNeeded(object);
3129
- LiveObjectList::ProcessNonLive(object);
3130
-
3131
- size_ -= static_cast<int>(chunk_size);
3132
- objects_size_ -= object->Size();
3133
- page_count_--;
3134
- ObjectSpace space = kObjectSpaceLoSpace;
3135
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
3136
- MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
3137
- MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
3138
- size_);
3139
- LOG(DeleteEvent("LargeObjectChunk", chunk_address));
3140
- }
3141
- }
3142
- }
3143
-
3144
-
3145
- bool LargeObjectSpace::Contains(HeapObject* object) {
3146
- Address address = object->address();
3147
- if (Heap::new_space()->Contains(address)) {
3148
- return false;
3149
- }
3150
- Page* page = Page::FromAddress(address);
3151
-
3152
- SLOW_ASSERT(!page->IsLargeObjectPage()
3153
- || !FindObject(address)->IsFailure());
3154
-
3155
- return page->IsLargeObjectPage();
3156
- }
3157
-
3158
-
3159
- #ifdef DEBUG
3160
- // We do not assume that the large object iterator works, because it depends
3161
- // on the invariants we are checking during verification.
3162
- void LargeObjectSpace::Verify() {
3163
- for (LargeObjectChunk* chunk = first_chunk_;
3164
- chunk != NULL;
3165
- chunk = chunk->next()) {
3166
- // Each chunk contains an object that starts at the large object page's
3167
- // object area start.
3168
- HeapObject* object = chunk->GetObject();
3169
- Page* page = Page::FromAddress(object->address());
3170
- ASSERT(object->address() == page->ObjectAreaStart());
3171
-
3172
- // The first word should be a map, and we expect all map pointers to be
3173
- // in map space.
3174
- Map* map = object->map();
3175
- ASSERT(map->IsMap());
3176
- ASSERT(Heap::map_space()->Contains(map));
3177
-
3178
- // We have only code, sequential strings, external strings
3179
- // (sequential strings that have been morphed into external
3180
- // strings), fixed arrays, and byte arrays in large object space.
3181
- ASSERT(object->IsCode() || object->IsSeqString() ||
3182
- object->IsExternalString() || object->IsFixedArray() ||
3183
- object->IsByteArray());
3184
-
3185
- // The object itself should look OK.
3186
- object->Verify();
3187
-
3188
- // Byte arrays and strings don't have interior pointers.
3189
- if (object->IsCode()) {
3190
- VerifyPointersVisitor code_visitor;
3191
- object->IterateBody(map->instance_type(),
3192
- object->Size(),
3193
- &code_visitor);
3194
- } else if (object->IsFixedArray()) {
3195
- // We loop over fixed arrays ourselves, rather then using the visitor,
3196
- // because the visitor doesn't support the start/offset iteration
3197
- // needed for IsRegionDirty.
3198
- FixedArray* array = FixedArray::cast(object);
3199
- for (int j = 0; j < array->length(); j++) {
3200
- Object* element = array->get(j);
3201
- if (element->IsHeapObject()) {
3202
- HeapObject* element_object = HeapObject::cast(element);
3203
- ASSERT(Heap::Contains(element_object));
3204
- ASSERT(element_object->map()->IsMap());
3205
- if (Heap::InNewSpace(element_object)) {
3206
- Address array_addr = object->address();
3207
- Address element_addr = array_addr + FixedArray::kHeaderSize +
3208
- j * kPointerSize;
3209
-
3210
- ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3211
- }
3212
- }
3213
- }
3214
- }
3215
- }
3216
- }
3217
-
3218
-
3219
- void LargeObjectSpace::Print() {
3220
- LargeObjectIterator it(this);
3221
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3222
- obj->Print();
3223
- }
3224
- }
3225
-
3226
-
3227
- void LargeObjectSpace::ReportStatistics() {
3228
- PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3229
- int num_objects = 0;
3230
- ClearHistograms();
3231
- LargeObjectIterator it(this);
3232
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
3233
- num_objects++;
3234
- CollectHistogramInfo(obj);
3235
- }
3236
-
3237
- PrintF(" number of objects %d, "
3238
- "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3239
- if (num_objects > 0) ReportHistogram(false);
3240
- }
3241
-
3242
-
3243
- void LargeObjectSpace::CollectCodeStatistics() {
3244
- LargeObjectIterator obj_it(this);
3245
- for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3246
- if (obj->IsCode()) {
3247
- Code* code = Code::cast(obj);
3248
- code_kind_statistics[code->kind()] += code->Size();
3249
- }
3250
- }
3251
- }
3252
- #endif // DEBUG
3253
-
3254
- } } // namespace v8::internal