@shd101wyy/yo 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (339) hide show
  1. package/LICENSE.md +17 -0
  2. package/README.md +80 -0
  3. package/out/cjs/index.cjs +51 -0
  4. package/out/cjs/yo-cli.cjs +2158 -0
  5. package/out/esm/index.mjs +51 -0
  6. package/out/types/src/codegen/async/runtime.d.ts +2 -0
  7. package/out/types/src/codegen/async/state-code-gen.d.ts +10 -0
  8. package/out/types/src/codegen/async/state-machine.d.ts +13 -0
  9. package/out/types/src/codegen/c/collection.d.ts +3 -0
  10. package/out/types/src/codegen/codegen-c.d.ts +12 -0
  11. package/out/types/src/codegen/constants.d.ts +3 -0
  12. package/out/types/src/codegen/expressions/array.d.ts +4 -0
  13. package/out/types/src/codegen/expressions/generation.d.ts +11 -0
  14. package/out/types/src/codegen/expressions/index.d.ts +2 -0
  15. package/out/types/src/codegen/functions/collection.d.ts +5 -0
  16. package/out/types/src/codegen/functions/context.d.ts +57 -0
  17. package/out/types/src/codegen/functions/generation.d.ts +25 -0
  18. package/out/types/src/codegen/functions/index.d.ts +2 -0
  19. package/out/types/src/codegen/index.d.ts +20 -0
  20. package/out/types/src/codegen/parallelism/runtime.d.ts +2 -0
  21. package/out/types/src/codegen/types/collection.d.ts +8 -0
  22. package/out/types/src/codegen/types/generation.d.ts +13 -0
  23. package/out/types/src/codegen/types/index.d.ts +2 -0
  24. package/out/types/src/codegen/utils/fixup.d.ts +2 -0
  25. package/out/types/src/codegen/utils/index.d.ts +77 -0
  26. package/out/types/src/codegen/values/index.d.ts +1 -0
  27. package/out/types/src/emitter.d.ts +11 -0
  28. package/out/types/src/env.d.ts +85 -0
  29. package/out/types/src/error.d.ts +45 -0
  30. package/out/types/src/evaluator/async/await-analysis-types.d.ts +23 -0
  31. package/out/types/src/evaluator/async/await-analysis.d.ts +5 -0
  32. package/out/types/src/evaluator/builtins/alignof.d.ts +8 -0
  33. package/out/types/src/evaluator/builtins/and_or.d.ts +8 -0
  34. package/out/types/src/evaluator/builtins/arc_fns.d.ts +58 -0
  35. package/out/types/src/evaluator/builtins/array_fns.d.ts +0 -0
  36. package/out/types/src/evaluator/builtins/as.d.ts +8 -0
  37. package/out/types/src/evaluator/builtins/async_fns.d.ts +8 -0
  38. package/out/types/src/evaluator/builtins/compt_assert.d.ts +8 -0
  39. package/out/types/src/evaluator/builtins/compt_boolean_fns.d.ts +8 -0
  40. package/out/types/src/evaluator/builtins/compt_expect_error.d.ts +8 -0
  41. package/out/types/src/evaluator/builtins/compt_list_fns.d.ts +33 -0
  42. package/out/types/src/evaluator/builtins/compt_print.d.ts +8 -0
  43. package/out/types/src/evaluator/builtins/compt_string_fns.d.ts +8 -0
  44. package/out/types/src/evaluator/builtins/consume.d.ts +8 -0
  45. package/out/types/src/evaluator/builtins/drop.d.ts +8 -0
  46. package/out/types/src/evaluator/builtins/dup.d.ts +8 -0
  47. package/out/types/src/evaluator/builtins/expr_fns.d.ts +33 -0
  48. package/out/types/src/evaluator/builtins/future_fns.d.ts +8 -0
  49. package/out/types/src/evaluator/builtins/gc.d.ts +8 -0
  50. package/out/types/src/evaluator/builtins/gensym.d.ts +8 -0
  51. package/out/types/src/evaluator/builtins/impl_constraint.d.ts +8 -0
  52. package/out/types/src/evaluator/builtins/macro_expand.d.ts +8 -0
  53. package/out/types/src/evaluator/builtins/numeric_fns.d.ts +8 -0
  54. package/out/types/src/evaluator/builtins/panic.d.ts +8 -0
  55. package/out/types/src/evaluator/builtins/ptr_fns.d.ts +8 -0
  56. package/out/types/src/evaluator/builtins/quote.d.ts +13 -0
  57. package/out/types/src/evaluator/builtins/rc.d.ts +8 -0
  58. package/out/types/src/evaluator/builtins/sizeof.d.ts +8 -0
  59. package/out/types/src/evaluator/builtins/the.d.ts +8 -0
  60. package/out/types/src/evaluator/builtins/type_fns.d.ts +28 -0
  61. package/out/types/src/evaluator/builtins/va_start.d.ts +8 -0
  62. package/out/types/src/evaluator/builtins/var_fns.d.ts +18 -0
  63. package/out/types/src/evaluator/calls/array.d.ts +13 -0
  64. package/out/types/src/evaluator/calls/array_type.d.ts +11 -0
  65. package/out/types/src/evaluator/calls/closure_type.d.ts +11 -0
  66. package/out/types/src/evaluator/calls/compt_function.d.ts +19 -0
  67. package/out/types/src/evaluator/calls/compt_list_type.d.ts +11 -0
  68. package/out/types/src/evaluator/calls/function.d.ts +16 -0
  69. package/out/types/src/evaluator/calls/function_type.d.ts +15 -0
  70. package/out/types/src/evaluator/calls/helper.d.ts +42 -0
  71. package/out/types/src/evaluator/calls/iso.d.ts +15 -0
  72. package/out/types/src/evaluator/calls/module_type.d.ts +11 -0
  73. package/out/types/src/evaluator/calls/numeric_type.d.ts +15 -0
  74. package/out/types/src/evaluator/calls/pointer.d.ts +8 -0
  75. package/out/types/src/evaluator/calls/pointer_type.d.ts +14 -0
  76. package/out/types/src/evaluator/calls/type.d.ts +12 -0
  77. package/out/types/src/evaluator/context.d.ts +169 -0
  78. package/out/types/src/evaluator/exprs/_expr.d.ts +8 -0
  79. package/out/types/src/evaluator/exprs/assignment.d.ts +9 -0
  80. package/out/types/src/evaluator/exprs/begin.d.ts +10 -0
  81. package/out/types/src/evaluator/exprs/binding.d.ts +12 -0
  82. package/out/types/src/evaluator/exprs/c_include.d.ts +8 -0
  83. package/out/types/src/evaluator/exprs/cond.d.ts +8 -0
  84. package/out/types/src/evaluator/exprs/destructuring_assignment.d.ts +33 -0
  85. package/out/types/src/evaluator/exprs/exists.d.ts +0 -0
  86. package/out/types/src/evaluator/exprs/expr.d.ts +9 -0
  87. package/out/types/src/evaluator/exprs/extern.d.ts +8 -0
  88. package/out/types/src/evaluator/exprs/identifer_and_operator.d.ts +9 -0
  89. package/out/types/src/evaluator/exprs/import.d.ts +9 -0
  90. package/out/types/src/evaluator/exprs/initialization_assignment.d.ts +8 -0
  91. package/out/types/src/evaluator/exprs/match.d.ts +8 -0
  92. package/out/types/src/evaluator/exprs/open.d.ts +8 -0
  93. package/out/types/src/evaluator/exprs/property_access.d.ts +8 -0
  94. package/out/types/src/evaluator/exprs/recur.d.ts +8 -0
  95. package/out/types/src/evaluator/exprs/subtype_of.d.ts +21 -0
  96. package/out/types/src/evaluator/exprs/test.d.ts +8 -0
  97. package/out/types/src/evaluator/exprs/typeof.d.ts +8 -0
  98. package/out/types/src/evaluator/exprs/while.d.ts +8 -0
  99. package/out/types/src/evaluator/index.d.ts +26 -0
  100. package/out/types/src/evaluator/types/array.d.ts +8 -0
  101. package/out/types/src/evaluator/types/closure.d.ts +8 -0
  102. package/out/types/src/evaluator/types/compt_list.d.ts +8 -0
  103. package/out/types/src/evaluator/types/concrete_module.d.ts +8 -0
  104. package/out/types/src/evaluator/types/dyn.d.ts +8 -0
  105. package/out/types/src/evaluator/types/enum.d.ts +8 -0
  106. package/out/types/src/evaluator/types/expr_synthesizer.d.ts +14 -0
  107. package/out/types/src/evaluator/types/field.d.ts +14 -0
  108. package/out/types/src/evaluator/types/fn_module.d.ts +8 -0
  109. package/out/types/src/evaluator/types/function.d.ts +58 -0
  110. package/out/types/src/evaluator/types/future_module.d.ts +8 -0
  111. package/out/types/src/evaluator/types/module.d.ts +19 -0
  112. package/out/types/src/evaluator/types/newtype.d.ts +8 -0
  113. package/out/types/src/evaluator/types/object.d.ts +8 -0
  114. package/out/types/src/evaluator/types/proofs.d.ts +0 -0
  115. package/out/types/src/evaluator/types/slice.d.ts +8 -0
  116. package/out/types/src/evaluator/types/struct.d.ts +8 -0
  117. package/out/types/src/evaluator/types/synthesizer.d.ts +16 -0
  118. package/out/types/src/evaluator/types/tuple.d.ts +18 -0
  119. package/out/types/src/evaluator/types/union.d.ts +8 -0
  120. package/out/types/src/evaluator/types/utils.d.ts +71 -0
  121. package/out/types/src/evaluator/types/validation.d.ts +3 -0
  122. package/out/types/src/evaluator/utils/array-utils.d.ts +15 -0
  123. package/out/types/src/evaluator/utils/closure.d.ts +35 -0
  124. package/out/types/src/evaluator/utils.d.ts +4 -0
  125. package/out/types/src/evaluator/values/anonymous_function.d.ts +8 -0
  126. package/out/types/src/evaluator/values/anonymous_module.d.ts +17 -0
  127. package/out/types/src/evaluator/values/anonymous_struct.d.ts +8 -0
  128. package/out/types/src/evaluator/values/array.d.ts +8 -0
  129. package/out/types/src/evaluator/values/boolean.d.ts +3 -0
  130. package/out/types/src/evaluator/values/char.d.ts +3 -0
  131. package/out/types/src/evaluator/values/compt_list.d.ts +8 -0
  132. package/out/types/src/evaluator/values/dyn.d.ts +8 -0
  133. package/out/types/src/evaluator/values/float.d.ts +4 -0
  134. package/out/types/src/evaluator/values/integer.d.ts +4 -0
  135. package/out/types/src/evaluator/values/module.d.ts +58 -0
  136. package/out/types/src/evaluator/values/string.d.ts +3 -0
  137. package/out/types/src/evaluator/values/tuple.d.ts +32 -0
  138. package/out/types/src/expr.d.ts +456 -0
  139. package/out/types/src/function-value.d.ts +42 -0
  140. package/out/types/src/index.d.ts +4 -0
  141. package/out/types/src/lexer.d.ts +2 -0
  142. package/out/types/src/logger.d.ts +1 -0
  143. package/out/types/src/module-manager.d.ts +30 -0
  144. package/out/types/src/naming-checker.d.ts +4 -0
  145. package/out/types/src/parser.d.ts +33 -0
  146. package/out/types/src/test-runner.d.ts +30 -0
  147. package/out/types/src/tests/codegen.test.d.ts +1 -0
  148. package/out/types/src/tests/fixme.test.d.ts +1 -0
  149. package/out/types/src/tests/module-manager.test.d.ts +1 -0
  150. package/out/types/src/tests/parser.test.d.ts +1 -0
  151. package/out/types/src/tests/sample.test.d.ts +0 -0
  152. package/out/types/src/tests/std.test.d.ts +1 -0
  153. package/out/types/src/token.d.ts +40 -0
  154. package/out/types/src/type-value.d.ts +7 -0
  155. package/out/types/src/types/compatibility.d.ts +16 -0
  156. package/out/types/src/types/creators.d.ts +73 -0
  157. package/out/types/src/types/definitions.d.ts +218 -0
  158. package/out/types/src/types/guards.d.ts +70 -0
  159. package/out/types/src/types/hierarchy.d.ts +4 -0
  160. package/out/types/src/types/index.d.ts +7 -0
  161. package/out/types/src/types/module_field.d.ts +2 -0
  162. package/out/types/src/types/tags.d.ts +45 -0
  163. package/out/types/src/types/utils.d.ts +50 -0
  164. package/out/types/src/unit-value.d.ts +7 -0
  165. package/out/types/src/utils.d.ts +6 -0
  166. package/out/types/src/value-tag.d.ts +29 -0
  167. package/out/types/src/value.d.ts +110 -0
  168. package/out/types/src/yo-cli.d.ts +1 -0
  169. package/out/types/tsconfig.tsbuildinfo +1 -0
  170. package/package.json +57 -0
  171. package/scripts/check-liburing.js +76 -0
  172. package/std/alg/hash.yo +50 -0
  173. package/std/allocator.yo +113 -0
  174. package/std/allocators/c_allocator.yo +118 -0
  175. package/std/async.yo +13 -0
  176. package/std/collections/array_list.yo +415 -0
  177. package/std/collections/hash_map.yo +482 -0
  178. package/std/collections/hash_set.yo +706 -0
  179. package/std/collections/index.yo +11 -0
  180. package/std/collections/linked_list.yo +439 -0
  181. package/std/error.yo +0 -0
  182. package/std/gc.yo +10 -0
  183. package/std/index.yo +12 -0
  184. package/std/io/file.yo +191 -0
  185. package/std/io/index.yo +5 -0
  186. package/std/libc/assert.yo +39 -0
  187. package/std/libc/ctype.yo +57 -0
  188. package/std/libc/errno.yo +182 -0
  189. package/std/libc/float.yo +87 -0
  190. package/std/libc/index.yo +29 -0
  191. package/std/libc/limits.yo +65 -0
  192. package/std/libc/math.yo +679 -0
  193. package/std/libc/signal.yo +101 -0
  194. package/std/libc/stdatomic.yo +213 -0
  195. package/std/libc/stdint.yo +214 -0
  196. package/std/libc/stdio.yo +225 -0
  197. package/std/libc/stdlib.yo +204 -0
  198. package/std/libc/string.yo +151 -0
  199. package/std/libc/time.yo +92 -0
  200. package/std/libc/unistd.yo +130 -0
  201. package/std/monad.yo +152 -0
  202. package/std/prelude.yo +3094 -0
  203. package/std/string/index.yo +8 -0
  204. package/std/string/rune.yo +82 -0
  205. package/std/string/string.yo +288 -0
  206. package/std/sync.yo +95 -0
  207. package/std/thread.yo +36 -0
  208. package/std/time.yo +13 -0
  209. package/std/worker.yo +36 -0
  210. package/vendor/mimalloc/.gitattributes +12 -0
  211. package/vendor/mimalloc/CMakeLists.txt +763 -0
  212. package/vendor/mimalloc/LICENSE +21 -0
  213. package/vendor/mimalloc/SECURITY.md +41 -0
  214. package/vendor/mimalloc/azure-pipelines.yml +251 -0
  215. package/vendor/mimalloc/bin/mimalloc-redirect-arm64.dll +0 -0
  216. package/vendor/mimalloc/bin/mimalloc-redirect-arm64.lib +0 -0
  217. package/vendor/mimalloc/bin/mimalloc-redirect-arm64ec.dll +0 -0
  218. package/vendor/mimalloc/bin/mimalloc-redirect-arm64ec.lib +0 -0
  219. package/vendor/mimalloc/bin/mimalloc-redirect.dll +0 -0
  220. package/vendor/mimalloc/bin/mimalloc-redirect.lib +0 -0
  221. package/vendor/mimalloc/bin/mimalloc-redirect32.dll +0 -0
  222. package/vendor/mimalloc/bin/mimalloc-redirect32.lib +0 -0
  223. package/vendor/mimalloc/bin/minject-arm64.exe +0 -0
  224. package/vendor/mimalloc/bin/minject.exe +0 -0
  225. package/vendor/mimalloc/bin/minject32.exe +0 -0
  226. package/vendor/mimalloc/bin/readme.md +118 -0
  227. package/vendor/mimalloc/cmake/JoinPaths.cmake +23 -0
  228. package/vendor/mimalloc/cmake/mimalloc-config-version.cmake +19 -0
  229. package/vendor/mimalloc/cmake/mimalloc-config.cmake +14 -0
  230. package/vendor/mimalloc/contrib/docker/alpine/Dockerfile +23 -0
  231. package/vendor/mimalloc/contrib/docker/alpine-arm32v7/Dockerfile +28 -0
  232. package/vendor/mimalloc/contrib/docker/alpine-x86/Dockerfile +28 -0
  233. package/vendor/mimalloc/contrib/docker/manylinux-x64/Dockerfile +23 -0
  234. package/vendor/mimalloc/contrib/docker/readme.md +10 -0
  235. package/vendor/mimalloc/contrib/vcpkg/portfile.cmake +64 -0
  236. package/vendor/mimalloc/contrib/vcpkg/readme.md +40 -0
  237. package/vendor/mimalloc/contrib/vcpkg/usage +20 -0
  238. package/vendor/mimalloc/contrib/vcpkg/vcpkg-cmake-wrapper.cmake +20 -0
  239. package/vendor/mimalloc/contrib/vcpkg/vcpkg.json +48 -0
  240. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg +887 -0
  241. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg +1185 -0
  242. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg +757 -0
  243. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg +1028 -0
  244. package/vendor/mimalloc/doc/bench-2020/bench-r5a-1.svg +769 -0
  245. package/vendor/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg +868 -0
  246. package/vendor/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg +1157 -0
  247. package/vendor/mimalloc/doc/bench-2020/bench-r5a-2.svg +983 -0
  248. package/vendor/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg +683 -0
  249. package/vendor/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg +854 -0
  250. package/vendor/mimalloc/doc/bench-2020/bench-spec-rss.svg +713 -0
  251. package/vendor/mimalloc/doc/bench-2020/bench-spec.svg +713 -0
  252. package/vendor/mimalloc/doc/bench-2020/bench-z4-1.svg +890 -0
  253. package/vendor/mimalloc/doc/bench-2020/bench-z4-2.svg +1146 -0
  254. package/vendor/mimalloc/doc/bench-2020/bench-z4-rss-1.svg +796 -0
  255. package/vendor/mimalloc/doc/bench-2020/bench-z4-rss-2.svg +974 -0
  256. package/vendor/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg +952 -0
  257. package/vendor/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg +1255 -0
  258. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg +955 -0
  259. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg +1269 -0
  260. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg +836 -0
  261. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg +1131 -0
  262. package/vendor/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg +766 -0
  263. package/vendor/mimalloc/doc/doxyfile +2895 -0
  264. package/vendor/mimalloc/doc/ds-logo.jpg +0 -0
  265. package/vendor/mimalloc/doc/ds-logo.png +0 -0
  266. package/vendor/mimalloc/doc/mimalloc-doc.h +1452 -0
  267. package/vendor/mimalloc/doc/mimalloc-doxygen.css +60 -0
  268. package/vendor/mimalloc/doc/mimalloc-logo-100.png +0 -0
  269. package/vendor/mimalloc/doc/mimalloc-logo.png +0 -0
  270. package/vendor/mimalloc/doc/mimalloc-logo.svg +161 -0
  271. package/vendor/mimalloc/doc/spades-logo.png +0 -0
  272. package/vendor/mimalloc/doc/unreal-logo.svg +43 -0
  273. package/vendor/mimalloc/ide/vs2022/mimalloc-lib.vcxproj +500 -0
  274. package/vendor/mimalloc/ide/vs2022/mimalloc-lib.vcxproj.filters +108 -0
  275. package/vendor/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj +508 -0
  276. package/vendor/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj.filters +111 -0
  277. package/vendor/mimalloc/ide/vs2022/mimalloc-override-test-dep.vcxproj +355 -0
  278. package/vendor/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj +360 -0
  279. package/vendor/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj +295 -0
  280. package/vendor/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj +292 -0
  281. package/vendor/mimalloc/ide/vs2022/mimalloc-test.vcxproj +289 -0
  282. package/vendor/mimalloc/ide/vs2022/mimalloc.sln +151 -0
  283. package/vendor/mimalloc/include/mimalloc/atomic.h +557 -0
  284. package/vendor/mimalloc/include/mimalloc/internal.h +1153 -0
  285. package/vendor/mimalloc/include/mimalloc/prim.h +421 -0
  286. package/vendor/mimalloc/include/mimalloc/track.h +145 -0
  287. package/vendor/mimalloc/include/mimalloc/types.h +685 -0
  288. package/vendor/mimalloc/include/mimalloc-new-delete.h +66 -0
  289. package/vendor/mimalloc/include/mimalloc-override.h +68 -0
  290. package/vendor/mimalloc/include/mimalloc-stats.h +103 -0
  291. package/vendor/mimalloc/include/mimalloc.h +612 -0
  292. package/vendor/mimalloc/mimalloc.pc.in +11 -0
  293. package/vendor/mimalloc/readme.md +946 -0
  294. package/vendor/mimalloc/src/alloc-aligned.c +360 -0
  295. package/vendor/mimalloc/src/alloc-override.c +316 -0
  296. package/vendor/mimalloc/src/alloc-posix.c +185 -0
  297. package/vendor/mimalloc/src/alloc.c +692 -0
  298. package/vendor/mimalloc/src/arena-abandon.c +346 -0
  299. package/vendor/mimalloc/src/arena.c +1043 -0
  300. package/vendor/mimalloc/src/bitmap.c +441 -0
  301. package/vendor/mimalloc/src/bitmap.h +119 -0
  302. package/vendor/mimalloc/src/free.c +572 -0
  303. package/vendor/mimalloc/src/heap.c +733 -0
  304. package/vendor/mimalloc/src/init.c +714 -0
  305. package/vendor/mimalloc/src/libc.c +334 -0
  306. package/vendor/mimalloc/src/options.c +663 -0
  307. package/vendor/mimalloc/src/os.c +770 -0
  308. package/vendor/mimalloc/src/page-queue.c +390 -0
  309. package/vendor/mimalloc/src/page.c +1049 -0
  310. package/vendor/mimalloc/src/prim/emscripten/prim.c +249 -0
  311. package/vendor/mimalloc/src/prim/osx/alloc-override-zone.c +461 -0
  312. package/vendor/mimalloc/src/prim/osx/prim.c +9 -0
  313. package/vendor/mimalloc/src/prim/prim.c +76 -0
  314. package/vendor/mimalloc/src/prim/readme.md +9 -0
  315. package/vendor/mimalloc/src/prim/unix/prim.c +934 -0
  316. package/vendor/mimalloc/src/prim/wasi/prim.c +284 -0
  317. package/vendor/mimalloc/src/prim/windows/etw-mimalloc.wprp +61 -0
  318. package/vendor/mimalloc/src/prim/windows/etw.h +905 -0
  319. package/vendor/mimalloc/src/prim/windows/etw.man +0 -0
  320. package/vendor/mimalloc/src/prim/windows/prim.c +878 -0
  321. package/vendor/mimalloc/src/prim/windows/readme.md +17 -0
  322. package/vendor/mimalloc/src/random.c +258 -0
  323. package/vendor/mimalloc/src/segment-map.c +142 -0
  324. package/vendor/mimalloc/src/segment.c +1702 -0
  325. package/vendor/mimalloc/src/static.c +41 -0
  326. package/vendor/mimalloc/src/stats.c +635 -0
  327. package/vendor/mimalloc/test/CMakeLists.txt +56 -0
  328. package/vendor/mimalloc/test/main-override-dep.cpp +51 -0
  329. package/vendor/mimalloc/test/main-override-dep.h +11 -0
  330. package/vendor/mimalloc/test/main-override-static.c +539 -0
  331. package/vendor/mimalloc/test/main-override.c +36 -0
  332. package/vendor/mimalloc/test/main-override.cpp +497 -0
  333. package/vendor/mimalloc/test/main.c +46 -0
  334. package/vendor/mimalloc/test/readme.md +16 -0
  335. package/vendor/mimalloc/test/test-api-fill.c +343 -0
  336. package/vendor/mimalloc/test/test-api.c +466 -0
  337. package/vendor/mimalloc/test/test-stress.c +428 -0
  338. package/vendor/mimalloc/test/test-wrong.c +92 -0
  339. package/vendor/mimalloc/test/testhelper.h +49 -0
@@ -0,0 +1,1702 @@
1
+ /* ----------------------------------------------------------------------------
2
+ Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
3
+ This is free software; you can redistribute it and/or modify it under the
4
+ terms of the MIT license. A copy of the license can be found in the file
5
+ "LICENSE" at the root of this distribution.
6
+ -----------------------------------------------------------------------------*/
7
+ #include "mimalloc.h"
8
+ #include "mimalloc/internal.h"
9
+ #include "mimalloc/atomic.h"
10
+
11
+ #include <string.h> // memset
12
+ #include <stdio.h>
13
+
14
+ // -------------------------------------------------------------------
15
+ // Segments
16
+ // mimalloc pages reside in segments. See `mi_segment_valid` for invariants.
17
+ // -------------------------------------------------------------------
18
+
19
+
20
+ static void mi_segment_try_purge(mi_segment_t* segment, bool force);
21
+
22
+
23
+ // -------------------------------------------------------------------
24
+ // commit mask
25
+ // -------------------------------------------------------------------
26
+
27
+ static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
28
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
29
+ if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false;
30
+ }
31
+ return true;
32
+ }
33
+
34
+ static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
35
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
36
+ if ((commit->mask[i] & cm->mask[i]) != 0) return true;
37
+ }
38
+ return false;
39
+ }
40
+
41
+ static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) {
42
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
43
+ res->mask[i] = (commit->mask[i] & cm->mask[i]);
44
+ }
45
+ }
46
+
47
+ static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
48
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
49
+ res->mask[i] &= ~(cm->mask[i]);
50
+ }
51
+ }
52
+
53
+ static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
54
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
55
+ res->mask[i] |= cm->mask[i];
56
+ }
57
+ }
58
+
59
+ static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) {
60
+ mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
61
+ mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
62
+ if (bitcount == MI_COMMIT_MASK_BITS) {
63
+ mi_assert_internal(bitidx==0);
64
+ mi_commit_mask_create_full(cm);
65
+ }
66
+ else if (bitcount == 0) {
67
+ mi_commit_mask_create_empty(cm);
68
+ }
69
+ else {
70
+ mi_commit_mask_create_empty(cm);
71
+ size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS;
72
+ size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS;
73
+ while (bitcount > 0) {
74
+ mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT);
75
+ size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs;
76
+ size_t count = (bitcount > avail ? avail : bitcount);
77
+ size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs);
78
+ cm->mask[i] = mask;
79
+ bitcount -= count;
80
+ ofs = 0;
81
+ i++;
82
+ }
83
+ }
84
+ }
85
+
86
+ size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) {
87
+ mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
88
+ size_t count = 0;
89
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
90
+ size_t mask = cm->mask[i];
91
+ if (~mask == 0) {
92
+ count += MI_COMMIT_MASK_FIELD_BITS;
93
+ }
94
+ else {
95
+ for (; mask != 0; mask >>= 1) { // todo: use popcount
96
+ if ((mask&1)!=0) count++;
97
+ }
98
+ }
99
+ }
100
+ // we use total since for huge segments each commit bit may represent a larger size
101
+ return ((total / MI_COMMIT_MASK_BITS) * count);
102
+ }
103
+
104
+
105
+ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) {
106
+ size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS;
107
+ size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS;
108
+ size_t mask = 0;
109
+ // find first ones
110
+ while (i < MI_COMMIT_MASK_FIELD_COUNT) {
111
+ mask = cm->mask[i];
112
+ mask >>= ofs;
113
+ if (mask != 0) {
114
+ while ((mask&1) == 0) {
115
+ mask >>= 1;
116
+ ofs++;
117
+ }
118
+ break;
119
+ }
120
+ i++;
121
+ ofs = 0;
122
+ }
123
+ if (i >= MI_COMMIT_MASK_FIELD_COUNT) {
124
+ // not found
125
+ *idx = MI_COMMIT_MASK_BITS;
126
+ return 0;
127
+ }
128
+ else {
129
+ // found, count ones
130
+ size_t count = 0;
131
+ *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs;
132
+ do {
133
+ mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1);
134
+ do {
135
+ count++;
136
+ mask >>= 1;
137
+ } while ((mask&1) == 1);
138
+ if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) {
139
+ i++;
140
+ if (i >= MI_COMMIT_MASK_FIELD_COUNT) break;
141
+ mask = cm->mask[i];
142
+ ofs = 0;
143
+ }
144
+ } while ((mask&1) == 1);
145
+ mi_assert_internal(count > 0);
146
+ return count;
147
+ }
148
+ }
149
+
150
+
151
+ /* --------------------------------------------------------------------------------
152
+ Segment allocation
153
+ We allocate pages inside bigger "segments" (32 MiB on 64-bit). This is to avoid
154
+ splitting VMA's on Linux and reduce fragmentation on other OS's.
155
+ Each thread owns its own segments.
156
+
157
+ Currently we have:
158
+ - small pages (64KiB)
159
+ - medium pages (512KiB)
160
+ - large pages (4MiB),
161
+ - huge segments have 1 page in one segment that can be larger than `MI_SEGMENT_SIZE`.
162
+ it is used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or with alignment `> MI_BLOCK_ALIGNMENT_MAX`.
163
+
164
+ The memory for a segment is usually committed on demand.
165
+ (i.e. we are careful to not touch the memory until we actually allocate a block there)
166
+
167
+ If a thread ends, it "abandons" pages that still contain live blocks.
168
+ Such segments are abandoned and these can be reclaimed by still running threads,
169
+ (much like work-stealing).
170
+ -------------------------------------------------------------------------------- */
171
+
172
+
173
+ /* -----------------------------------------------------------
174
+ Slices
175
+ ----------------------------------------------------------- */
176
+
177
+
178
+ static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) {
179
+ return &segment->slices[segment->slice_entries];
180
+ }
181
+
182
+ static uint8_t* mi_slice_start(const mi_slice_t* slice) {
183
+ mi_segment_t* segment = _mi_ptr_segment(slice);
184
+ mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment));
185
+ return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE));
186
+ }
187
+
188
+
189
+ /* -----------------------------------------------------------
190
+ Bins
191
+ ----------------------------------------------------------- */
192
+ // Use bit scan forward to quickly find the first zero bit if it is available
193
+
194
+ static inline size_t mi_slice_bin8(size_t slice_count) {
195
+ if (slice_count<=1) return slice_count;
196
+ mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT);
197
+ slice_count--;
198
+ size_t s = mi_bsr(slice_count); // slice_count > 1
199
+ if (s <= 2) return slice_count + 1;
200
+ size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4;
201
+ return bin;
202
+ }
203
+
204
+ static inline size_t mi_slice_bin(size_t slice_count) {
205
+ mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE);
206
+ mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX);
207
+ size_t bin = mi_slice_bin8(slice_count);
208
+ mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX);
209
+ return bin;
210
+ }
211
+
212
+ static inline size_t mi_slice_index(const mi_slice_t* slice) {
213
+ mi_segment_t* segment = _mi_ptr_segment(slice);
214
+ ptrdiff_t index = slice - segment->slices;
215
+ mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries);
216
+ return index;
217
+ }
218
+
219
+
220
+ /* -----------------------------------------------------------
221
+ Slice span queues
222
+ ----------------------------------------------------------- */
223
+
224
+ static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) {
225
+ // todo: or push to the end?
226
+ mi_assert_internal(slice->prev == NULL && slice->next==NULL);
227
+ slice->prev = NULL; // paranoia
228
+ slice->next = sq->first;
229
+ sq->first = slice;
230
+ if (slice->next != NULL) slice->next->prev = slice;
231
+ else sq->last = slice;
232
+ slice->block_size = 0; // free
233
+ }
234
+
235
+ static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
236
+ size_t bin = mi_slice_bin(slice_count);
237
+ mi_span_queue_t* sq = &tld->spans[bin];
238
+ mi_assert_internal(sq->slice_count >= slice_count);
239
+ return sq;
240
+ }
241
+
242
+ static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
243
+ mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0);
244
+ // should work too if the queue does not contain slice (which can happen during reclaim)
245
+ if (slice->prev != NULL) slice->prev->next = slice->next;
246
+ if (slice == sq->first) sq->first = slice->next;
247
+ if (slice->next != NULL) slice->next->prev = slice->prev;
248
+ if (slice == sq->last) sq->last = slice->prev;
249
+ slice->prev = NULL;
250
+ slice->next = NULL;
251
+ slice->block_size = 1; // no more free
252
+ }
253
+
254
+
255
+ /* -----------------------------------------------------------
256
+ Invariant checking
257
+ ----------------------------------------------------------- */
258
+
259
+ static bool mi_slice_is_used(const mi_slice_t* slice) {
260
+ return (slice->block_size > 0);
261
+ }
262
+
263
+
264
+ #if (MI_DEBUG>=3)
265
+ static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) {
266
+ for (mi_slice_t* s = sq->first; s != NULL; s = s->next) {
267
+ if (s==slice) return true;
268
+ }
269
+ return false;
270
+ }
271
+
272
+ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
273
+ mi_assert_internal(segment != NULL);
274
+ mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
275
+ mi_assert_internal(segment->abandoned <= segment->used);
276
+ mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id());
277
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks
278
+ //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0);
279
+ mi_slice_t* slice = &segment->slices[0];
280
+ const mi_slice_t* end = mi_segment_slices_end(segment);
281
+ size_t used_count = 0;
282
+ mi_span_queue_t* sq;
283
+ while(slice < end) {
284
+ mi_assert_internal(slice->slice_count > 0);
285
+ mi_assert_internal(slice->slice_offset == 0);
286
+ size_t index = mi_slice_index(slice);
287
+ size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
288
+ if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets
289
+ used_count++;
290
+ mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE));
291
+ for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) {
292
+ mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
293
+ mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
294
+ mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1);
295
+ }
296
+ // and the last entry as well (for coalescing)
297
+ const mi_slice_t* last = slice + slice->slice_count - 1;
298
+ if (last > slice && last < mi_segment_slices_end(segment)) {
299
+ mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
300
+ mi_assert_internal(last->slice_count == 0);
301
+ mi_assert_internal(last->block_size == 1);
302
+ }
303
+ }
304
+ else { // free range of slices; only last slice needs a valid back offset
305
+ mi_slice_t* last = &segment->slices[maxindex];
306
+ if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) {
307
+ mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
308
+ }
309
+ mi_assert_internal(slice == last || last->slice_count == 0 );
310
+ mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1));
311
+ if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
312
+ sq = mi_span_queue_for(slice->slice_count,tld);
313
+ mi_assert_internal(mi_span_queue_contains(sq,slice));
314
+ }
315
+ }
316
+ slice = &segment->slices[maxindex+1];
317
+ }
318
+ mi_assert_internal(slice == end);
319
+ mi_assert_internal(used_count == segment->used + 1);
320
+ return true;
321
+ }
322
+ #endif
323
+
324
+ /* -----------------------------------------------------------
325
+ Segment size calculations
326
+ ----------------------------------------------------------- */
327
+
328
+ static size_t mi_segment_info_size(mi_segment_t* segment) {
329
+ return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE;
330
+ }
331
+
332
+ static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t block_size, size_t* page_size)
333
+ {
334
+ const ptrdiff_t idx = slice - segment->slices;
335
+ const size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE;
336
+ uint8_t* const pstart = (uint8_t*)segment + (idx*MI_SEGMENT_SLICE_SIZE);
337
+ // make the start not OS page aligned for smaller blocks to avoid page/cache effects
338
+ // note: the offset must always be a block_size multiple since we assume small allocations
339
+ // are aligned (see `mi_heap_malloc_aligned`).
340
+ size_t start_offset = 0;
341
+ if (block_size > 0 && block_size <= MI_MAX_ALIGN_GUARANTEE) {
342
+ // for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
343
+ const size_t adjust = block_size - ((uintptr_t)pstart % block_size);
344
+ if (adjust < block_size && psize >= block_size + adjust) {
345
+ start_offset += adjust;
346
+ }
347
+ }
348
+ if (block_size >= MI_INTPTR_SIZE) {
349
+ if (block_size <= 64) { start_offset += 3*block_size; }
350
+ else if (block_size <= 512) { start_offset += block_size; }
351
+ }
352
+ start_offset = _mi_align_up(start_offset, MI_MAX_ALIGN_SIZE);
353
+ mi_assert_internal(_mi_is_aligned(pstart + start_offset, MI_MAX_ALIGN_SIZE));
354
+ mi_assert_internal(block_size == 0 || block_size > MI_MAX_ALIGN_GUARANTEE || _mi_is_aligned(pstart + start_offset,block_size));
355
+ if (page_size != NULL) { *page_size = psize - start_offset; }
356
+ return (pstart + start_offset);
357
+ }
358
+
359
+ // Start of the page available memory; can be used on uninitialized pages
360
+ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
361
+ {
362
+ const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
363
+ uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size);
364
+ mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page);
365
+ mi_assert_internal(_mi_ptr_segment(p) == segment);
366
+ return p;
367
+ }
368
+
369
+
370
+ static size_t mi_segment_calculate_slices(size_t required, size_t* info_slices) {
371
+ size_t page_size = _mi_os_page_size();
372
+ size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size);
373
+ size_t guardsize = 0;
374
+
375
+ if (MI_SECURE>0) {
376
+ // in secure mode, we set up a protected page in between the segment info
377
+ // and the page data (and one at the end of the segment)
378
+ guardsize = page_size;
379
+ if (required > 0) {
380
+ required = _mi_align_up(required, MI_SEGMENT_SLICE_SIZE) + page_size;
381
+ }
382
+ }
383
+
384
+ isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE);
385
+ if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE;
386
+ size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) );
387
+ mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0);
388
+ return (segment_size / MI_SEGMENT_SLICE_SIZE);
389
+ }
390
+
391
+
392
+ /* ----------------------------------------------------------------------------
393
+ Segment caches
394
+ We keep a small segment cache per thread to increase local
395
+ reuse and avoid setting/clearing guard pages in secure mode.
396
+ ------------------------------------------------------------------------------- */
397
+
398
+ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
399
+ if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
400
+ else _mi_stat_decrease(&tld->stats->segments,1);
401
+ tld->count += (segment_size >= 0 ? 1 : -1);
402
+ if (tld->count > tld->peak_count) tld->peak_count = tld->count;
403
+ tld->current_size += segment_size;
404
+ if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
405
+ }
406
+
407
+ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
408
+ segment->thread_id = 0;
409
+ _mi_segment_map_freed_at(segment);
410
+ mi_segments_track_size(-((long)mi_segment_size(segment)),tld);
411
+ if (segment->was_reclaimed) {
412
+ tld->reclaim_count--;
413
+ segment->was_reclaimed = false;
414
+ }
415
+ if (MI_SECURE>0) {
416
+ // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set
417
+ // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted
418
+ size_t os_pagesize = _mi_os_page_size();
419
+ _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
420
+ uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
421
+ _mi_os_unprotect(end, os_pagesize);
422
+ }
423
+
424
+ // purge delayed decommits now? (no, leave it to the arena)
425
+ // mi_segment_try_purge(segment,true,tld->stats);
426
+
427
+ const size_t size = mi_segment_size(segment);
428
+ const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
429
+
430
+ _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid);
431
+ }
432
+
433
+ /* -----------------------------------------------------------
434
+ Commit/Decommit ranges
435
+ ----------------------------------------------------------- */
436
+
437
+ static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
438
+ mi_assert_internal(_mi_ptr_segment(p + 1) == segment);
439
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
440
+ mi_commit_mask_create_empty(cm);
441
+ if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return;
442
+ const size_t segstart = mi_segment_info_size(segment);
443
+ const size_t segsize = mi_segment_size(segment);
444
+ if (p >= (uint8_t*)segment + segsize) return;
445
+
446
+ size_t pstart = (p - (uint8_t*)segment);
447
+ mi_assert_internal(pstart + size <= segsize);
448
+
449
+ size_t start;
450
+ size_t end;
451
+ if (conservative) {
452
+ // decommit conservative
453
+ start = _mi_align_up(pstart, MI_COMMIT_SIZE);
454
+ end = _mi_align_down(pstart + size, MI_COMMIT_SIZE);
455
+ mi_assert_internal(start >= segstart);
456
+ mi_assert_internal(end <= segsize);
457
+ }
458
+ else {
459
+ // commit liberal
460
+ start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE);
461
+ end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE);
462
+ }
463
+ if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area
464
+ start = segstart;
465
+ }
466
+ if (end > segsize) {
467
+ end = segsize;
468
+ }
469
+
470
+ mi_assert_internal(start <= pstart && (pstart + size) <= end);
471
+ mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0);
472
+ *start_p = (uint8_t*)segment + start;
473
+ *full_size = (end > start ? end - start : 0);
474
+ if (*full_size == 0) return;
475
+
476
+ size_t bitidx = start / MI_COMMIT_SIZE;
477
+ mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
478
+
479
+ size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0
480
+ if (bitidx + bitcount > MI_COMMIT_MASK_BITS) {
481
+ _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size);
482
+ }
483
+ mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
484
+ mi_commit_mask_create(bitidx, bitcount, cm);
485
+ }
486
+
487
+ static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size) {
488
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
489
+
490
+ // commit liberal
491
+ uint8_t* start = NULL;
492
+ size_t full_size = 0;
493
+ mi_commit_mask_t mask;
494
+ mi_segment_commit_mask(segment, false /* conservative? */, p, size, &start, &full_size, &mask);
495
+ if (mi_commit_mask_is_empty(&mask) || full_size == 0) return true;
496
+
497
+ if (!mi_commit_mask_all_set(&segment->commit_mask, &mask)) {
498
+ // committing
499
+ bool is_zero = false;
500
+ mi_commit_mask_t cmask;
501
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
502
+ _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
503
+ if (!_mi_os_commit(start, full_size, &is_zero)) return false;
504
+ mi_commit_mask_set(&segment->commit_mask, &mask);
505
+ }
506
+
507
+ // increase purge expiration when using part of delayed purges -- we assume more allocations are coming soon.
508
+ if (mi_commit_mask_any_set(&segment->purge_mask, &mask)) {
509
+ segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay);
510
+ }
511
+
512
+ // always clear any delayed purges in our range (as they are either committed now)
513
+ mi_commit_mask_clear(&segment->purge_mask, &mask);
514
+ return true;
515
+ }
516
+
517
+ static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size) {
518
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
519
+ // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
520
+ if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed
521
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
522
+ return mi_segment_commit(segment, p, size);
523
+ }
524
+
525
+ static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size) {
526
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
527
+ if (!segment->allow_purge) return true;
528
+
529
+ // purge conservative
530
+ uint8_t* start = NULL;
531
+ size_t full_size = 0;
532
+ mi_commit_mask_t mask;
533
+ mi_segment_commit_mask(segment, true /* conservative? */, p, size, &start, &full_size, &mask);
534
+ if (mi_commit_mask_is_empty(&mask) || full_size==0) return true;
535
+
536
+ if (mi_commit_mask_any_set(&segment->commit_mask, &mask)) {
537
+ // purging
538
+ mi_assert_internal((void*)start != (void*)segment);
539
+ mi_assert_internal(segment->allow_decommit);
540
+ const bool decommitted = _mi_os_purge(start, full_size); // reset or decommit
541
+ if (decommitted) {
542
+ mi_commit_mask_t cmask;
543
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
544
+ _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting
545
+ mi_commit_mask_clear(&segment->commit_mask, &mask);
546
+ }
547
+ }
548
+
549
+ // always clear any scheduled purges in our range
550
+ mi_commit_mask_clear(&segment->purge_mask, &mask);
551
+ return true;
552
+ }
553
+
554
+ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size) {
555
+ if (!segment->allow_purge) return;
556
+
557
+ if (mi_option_get(mi_option_purge_delay) == 0) {
558
+ mi_segment_purge(segment, p, size);
559
+ }
560
+ else {
561
+ // register for future purge in the purge mask
562
+ uint8_t* start = NULL;
563
+ size_t full_size = 0;
564
+ mi_commit_mask_t mask;
565
+ mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask);
566
+ if (mi_commit_mask_is_empty(&mask) || full_size==0) return;
567
+
568
+ // update delayed commit
569
+ mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask));
570
+ mi_commit_mask_t cmask;
571
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more
572
+ mi_commit_mask_set(&segment->purge_mask, &cmask);
573
+ mi_msecs_t now = _mi_clock_now();
574
+ if (segment->purge_expire == 0) {
575
+ // no previous purgess, initialize now
576
+ segment->purge_expire = now + mi_option_get(mi_option_purge_delay);
577
+ }
578
+ else if (segment->purge_expire <= now) {
579
+ // previous purge mask already expired
580
+ if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) {
581
+ mi_segment_try_purge(segment, true);
582
+ }
583
+ else {
584
+ segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's
585
+ }
586
+ }
587
+ else {
588
+ // previous purge mask is not yet expired, increase the expiration by a bit.
589
+ segment->purge_expire += mi_option_get(mi_option_purge_extend_delay);
590
+ }
591
+ }
592
+ }
593
+
594
+ static void mi_segment_try_purge(mi_segment_t* segment, bool force) {
595
+ if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return;
596
+ mi_msecs_t now = _mi_clock_now();
597
+ if (!force && now < segment->purge_expire) return;
598
+
599
+ mi_commit_mask_t mask = segment->purge_mask;
600
+ segment->purge_expire = 0;
601
+ mi_commit_mask_create_empty(&segment->purge_mask);
602
+
603
+ size_t idx;
604
+ size_t count;
605
+ mi_commit_mask_foreach(&mask, idx, count) {
606
+ // if found, decommit that sequence
607
+ if (count > 0) {
608
+ uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE);
609
+ size_t size = count * MI_COMMIT_SIZE;
610
+ mi_segment_purge(segment, p, size);
611
+ }
612
+ }
613
+ mi_commit_mask_foreach_end()
614
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
615
+ }
616
+
617
+ // called from `mi_heap_collect_ex`
618
+ // this can be called per-page so it is important that try_purge has fast exit path
619
+ void _mi_segment_collect(mi_segment_t* segment, bool force) {
620
+ mi_segment_try_purge(segment, force);
621
+ }
622
+
623
+ /* -----------------------------------------------------------
624
+ Span free
625
+ ----------------------------------------------------------- */
626
+
627
+ static bool mi_segment_is_abandoned(mi_segment_t* segment) {
628
+ return (mi_atomic_load_relaxed(&segment->thread_id) == 0);
629
+ }
630
+
631
+ // note: can be called on abandoned segments
632
+ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) {
633
+ mi_assert_internal(slice_index < segment->slice_entries);
634
+ mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment)
635
+ ? NULL : mi_span_queue_for(slice_count,tld));
636
+ if (slice_count==0) slice_count = 1;
637
+ mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries);
638
+
639
+ // set first and last slice (the intermediates can be undetermined)
640
+ mi_slice_t* slice = &segment->slices[slice_index];
641
+ slice->slice_count = (uint32_t)slice_count;
642
+ mi_assert_internal(slice->slice_count == slice_count); // no overflow?
643
+ slice->slice_offset = 0;
644
+ if (slice_count > 1) {
645
+ mi_slice_t* last = slice + slice_count - 1;
646
+ mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment);
647
+ if (last > end) { last = end; }
648
+ last->slice_count = 0;
649
+ last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
650
+ last->block_size = 0;
651
+ }
652
+
653
+ // perhaps decommit
654
+ if (allow_purge) {
655
+ mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE);
656
+ }
657
+
658
+ // and push it on the free page queue (if it was not a huge page)
659
+ if (sq != NULL) mi_span_queue_push( sq, slice );
660
+ else slice->block_size = 0; // mark huge page as free anyways
661
+ }
662
+
663
+ /*
664
+ // called from reclaim to add existing free spans
665
+ static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) {
666
+ mi_segment_t* segment = _mi_ptr_segment(slice);
667
+ mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
668
+ size_t slice_index = mi_slice_index(slice);
669
+ mi_segment_span_free(segment,slice_index,slice->slice_count,tld);
670
+ }
671
+ */
672
+
673
+ static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
674
+ mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0);
675
+ mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
676
+ mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
677
+ mi_span_queue_delete(sq, slice);
678
+ }
679
+
680
+ // note: can be called on abandoned segments
681
+ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) {
682
+ mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0);
683
+ mi_segment_t* const segment = _mi_ptr_segment(slice);
684
+
685
+ // for huge pages, just mark as free but don't add to the queues
686
+ if (segment->kind == MI_SEGMENT_HUGE) {
687
+ // issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
688
+ mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
689
+ slice->block_size = 0; // mark as free anyways
690
+ // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
691
+ // avoid a possible cache miss (and the segment is about to be freed)
692
+ return slice;
693
+ }
694
+
695
+ // otherwise coalesce the span and add to the free span queues
696
+ const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment);
697
+ size_t slice_count = slice->slice_count;
698
+ mi_slice_t* next = slice + slice->slice_count;
699
+ mi_assert_internal(next <= mi_segment_slices_end(segment));
700
+ if (next < mi_segment_slices_end(segment) && next->block_size==0) {
701
+ // free next block -- remove it from free and merge
702
+ mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
703
+ slice_count += next->slice_count; // extend
704
+ if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); }
705
+ }
706
+ if (slice > segment->slices) {
707
+ mi_slice_t* prev = mi_slice_first(slice - 1);
708
+ mi_assert_internal(prev >= segment->slices);
709
+ if (prev->block_size==0) {
710
+ // free previous slice -- remove it from free and merge
711
+ mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
712
+ slice_count += prev->slice_count;
713
+ slice->slice_count = 0;
714
+ slice->slice_offset = (uint32_t)((uint8_t*)slice - (uint8_t*)prev); // set the slice offset for `segment_force_abandon` (in case the previous free block is very large).
715
+ if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); }
716
+ slice = prev;
717
+ }
718
+ }
719
+
720
+ // and add the new free page
721
+ mi_segment_span_free(segment, mi_slice_index(slice), slice_count, true, tld);
722
+ return slice;
723
+ }
724
+
725
+
726
+
727
+ /* -----------------------------------------------------------
728
+ Page allocation
729
+ ----------------------------------------------------------- */
730
+
731
+ // Note: may still return NULL if committing the memory failed
732
+ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count) {
733
+ mi_assert_internal(slice_index < segment->slice_entries);
734
+ mi_slice_t* const slice = &segment->slices[slice_index];
735
+ mi_assert_internal(slice->block_size==0 || slice->block_size==1);
736
+
737
+ // commit before changing the slice data
738
+ if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE)) {
739
+ return NULL; // commit failed!
740
+ }
741
+
742
+ // convert the slices to a page
743
+ slice->slice_offset = 0;
744
+ slice->slice_count = (uint32_t)slice_count;
745
+ mi_assert_internal(slice->slice_count == slice_count);
746
+ const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
747
+ slice->block_size = bsize;
748
+ mi_page_t* page = mi_slice_to_page(slice);
749
+ mi_assert_internal(mi_page_block_size(page) == bsize);
750
+
751
+ // set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries
752
+ size_t extra = slice_count-1;
753
+ if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT;
754
+ if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
755
+
756
+ mi_slice_t* slice_next = slice + 1;
757
+ for (size_t i = 1; i <= extra; i++, slice_next++) {
758
+ slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
759
+ slice_next->slice_count = 0;
760
+ slice_next->block_size = 1;
761
+ }
762
+
763
+ // and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments)
764
+ // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543)
765
+ mi_slice_t* last = slice + slice_count - 1;
766
+ mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment);
767
+ if (last > end) last = end;
768
+ if (last > slice) {
769
+ last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice));
770
+ last->slice_count = 0;
771
+ last->block_size = 1;
772
+ }
773
+
774
+ // and initialize the page
775
+ page->is_committed = true;
776
+ page->is_huge = (segment->kind == MI_SEGMENT_HUGE);
777
+ segment->used++;
778
+ return page;
779
+ }
780
+
781
+ static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
782
+ mi_assert_internal(_mi_ptr_segment(slice) == segment);
783
+ mi_assert_internal(slice->slice_count >= slice_count);
784
+ mi_assert_internal(slice->block_size > 0); // no more in free queue
785
+ if (slice->slice_count <= slice_count) return;
786
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
787
+ size_t next_index = mi_slice_index(slice) + slice_count;
788
+ size_t next_count = slice->slice_count - slice_count;
789
+ mi_segment_span_free(segment, next_index, next_count, false /* don't purge left-over part */, tld);
790
+ slice->slice_count = (uint32_t)slice_count;
791
+ }
792
+
793
+ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) {
794
+ mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX);
795
+ // search from best fit up
796
+ mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld);
797
+ if (slice_count == 0) slice_count = 1;
798
+ while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) {
799
+ for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) {
800
+ if (slice->slice_count >= slice_count) {
801
+ // found one
802
+ mi_segment_t* segment = _mi_ptr_segment(slice);
803
+ if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) {
804
+ // found a suitable page span
805
+ mi_span_queue_delete(sq, slice);
806
+
807
+ if (slice->slice_count > slice_count) {
808
+ mi_segment_slice_split(segment, slice, slice_count, tld);
809
+ }
810
+ mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0);
811
+ mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count);
812
+ if (page == NULL) {
813
+ // commit failed; return NULL but first restore the slice
814
+ mi_segment_span_free_coalesce(slice, tld);
815
+ return NULL;
816
+ }
817
+ return page;
818
+ }
819
+ }
820
+ }
821
+ sq++;
822
+ }
823
+ // could not find a page..
824
+ return NULL;
825
+ }
826
+
827
+
828
+ /* -----------------------------------------------------------
829
+ Segment allocation
830
+ ----------------------------------------------------------- */
831
+
832
+ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id,
833
+ size_t* psegment_slices, size_t* pinfo_slices,
834
+ bool commit, mi_segments_tld_t* tld)
835
+
836
+ {
837
+ mi_memid_t memid;
838
+ bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
839
+ size_t align_offset = 0;
840
+ size_t alignment = MI_SEGMENT_ALIGN;
841
+
842
+ if (page_alignment > 0) {
843
+ // mi_assert_internal(huge_page != NULL);
844
+ mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN);
845
+ alignment = page_alignment;
846
+ const size_t info_size = (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE;
847
+ align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN );
848
+ const size_t extra = align_offset - info_size;
849
+ // recalculate due to potential guard pages
850
+ *psegment_slices = mi_segment_calculate_slices(required + extra, pinfo_slices);
851
+ mi_assert_internal(*psegment_slices > 0 && *psegment_slices <= UINT32_MAX);
852
+ }
853
+
854
+ const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
855
+ mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid);
856
+ if (segment == NULL) {
857
+ return NULL; // failed to allocate
858
+ }
859
+
860
+ // ensure metadata part of the segment is committed
861
+ mi_commit_mask_t commit_mask;
862
+ if (memid.initially_committed) {
863
+ mi_commit_mask_create_full(&commit_mask);
864
+ }
865
+ else {
866
+ // at least commit the info slices
867
+ const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
868
+ mi_assert_internal(commit_needed>0);
869
+ mi_commit_mask_create(0, commit_needed, &commit_mask);
870
+ mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
871
+ if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL)) {
872
+ _mi_arena_free(segment,segment_size,0,memid);
873
+ return NULL;
874
+ }
875
+ }
876
+ mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
877
+
878
+ segment->memid = memid;
879
+ segment->allow_decommit = !memid.is_pinned;
880
+ segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
881
+ segment->segment_size = segment_size;
882
+ segment->subproc = tld->subproc;
883
+ segment->commit_mask = commit_mask;
884
+ segment->purge_expire = 0;
885
+ mi_commit_mask_create_empty(&segment->purge_mask);
886
+
887
+ mi_segments_track_size((long)(segment_size), tld);
888
+ _mi_segment_map_allocated_at(segment);
889
+ return segment;
890
+ }
891
+
892
+
893
+ // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
894
+ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_page_t** huge_page)
895
+ {
896
+ mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
897
+
898
+ // calculate needed sizes first
899
+ size_t info_slices;
900
+ size_t segment_slices = mi_segment_calculate_slices(required, &info_slices);
901
+ mi_assert_internal(segment_slices > 0 && segment_slices <= UINT32_MAX);
902
+
903
+ // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
904
+ const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems
905
+ _mi_current_thread_count() > 1 && // do not delay for the first N threads
906
+ tld->peak_count < (size_t)mi_option_get(mi_option_eager_commit_delay));
907
+ const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
908
+ bool commit = eager || (required > 0);
909
+
910
+ // Allocate the segment from the OS
911
+ mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
912
+ &segment_slices, &info_slices, commit, tld);
913
+ if (segment == NULL) return NULL;
914
+
915
+ // zero the segment info? -- not always needed as it may be zero initialized from the OS
916
+ if (!segment->memid.initially_zero) {
917
+ ptrdiff_t ofs = offsetof(mi_segment_t, next);
918
+ size_t prefix = offsetof(mi_segment_t, slices) - ofs;
919
+ size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more
920
+ _mi_memzero((uint8_t*)segment + ofs, zsize);
921
+ }
922
+
923
+ // initialize the rest of the segment info
924
+ const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
925
+ segment->segment_slices = segment_slices;
926
+ segment->segment_info_slices = info_slices;
927
+ segment->thread_id = _mi_thread_id();
928
+ segment->cookie = _mi_ptr_cookie(segment);
929
+ segment->slice_entries = slice_entries;
930
+ segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
931
+
932
+ // _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1));
933
+ _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment));
934
+
935
+ // set up guard pages
936
+ size_t guard_slices = 0;
937
+ if (MI_SECURE>0) {
938
+ // in secure mode, we set up a protected page in between the segment info
939
+ // and the page data, and at the end of the segment.
940
+ size_t os_pagesize = _mi_os_page_size();
941
+ _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
942
+ uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
943
+ mi_segment_ensure_committed(segment, end, os_pagesize);
944
+ _mi_os_protect(end, os_pagesize);
945
+ if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-(
946
+ guard_slices = 1;
947
+ }
948
+
949
+ // reserve first slices for segment info
950
+ mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices);
951
+ mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance
952
+ mi_assert_internal(segment->used == 1);
953
+ segment->used = 0; // don't count our internal slices towards usage
954
+
955
+ // initialize initial free pages
956
+ if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
957
+ mi_assert_internal(huge_page==NULL);
958
+ mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld);
959
+ }
960
+ else {
961
+ mi_assert_internal(huge_page!=NULL);
962
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
963
+ mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask));
964
+ *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices);
965
+ mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance
966
+ }
967
+
968
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
969
+ return segment;
970
+ }
971
+
972
+
973
+ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
974
+ MI_UNUSED(force);
975
+ mi_assert_internal(segment != NULL);
976
+ mi_assert_internal(segment->next == NULL);
977
+ mi_assert_internal(segment->used == 0);
978
+
979
+ // in `mi_segment_force_abandon` we set this to true to ensure the segment's memory stays valid
980
+ if (segment->dont_free) return;
981
+
982
+ // Remove the free pages
983
+ mi_slice_t* slice = &segment->slices[0];
984
+ const mi_slice_t* end = mi_segment_slices_end(segment);
985
+ #if MI_DEBUG>1
986
+ size_t page_count = 0;
987
+ #endif
988
+ while (slice < end) {
989
+ mi_assert_internal(slice->slice_count > 0);
990
+ mi_assert_internal(slice->slice_offset == 0);
991
+ mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages ..
992
+ if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
993
+ mi_segment_span_remove_from_queue(slice, tld);
994
+ }
995
+ #if MI_DEBUG>1
996
+ page_count++;
997
+ #endif
998
+ slice = slice + slice->slice_count;
999
+ }
1000
+ mi_assert_internal(page_count == 2); // first page is allocated by the segment itself
1001
+
1002
+ // stats
1003
+ // _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
1004
+
1005
+ // return it to the OS
1006
+ mi_segment_os_free(segment, tld);
1007
+ }
1008
+
1009
+
1010
+ /* -----------------------------------------------------------
1011
+ Page Free
1012
+ ----------------------------------------------------------- */
1013
+
1014
+ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
1015
+
1016
+ // note: can be called on abandoned pages
1017
+ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
1018
+ mi_assert_internal(page->block_size > 0);
1019
+ mi_assert_internal(mi_page_all_free(page));
1020
+ mi_segment_t* segment = _mi_ptr_segment(page);
1021
+ mi_assert_internal(segment->used > 0);
1022
+
1023
+ size_t inuse = page->capacity * mi_page_block_size(page);
1024
+ _mi_stat_decrease(&tld->stats->page_committed, inuse);
1025
+ _mi_stat_decrease(&tld->stats->pages, 1);
1026
+ _mi_stat_decrease(&tld->stats->page_bins[_mi_page_bin(page)], 1);
1027
+
1028
+ // reset the page memory to reduce memory pressure?
1029
+ if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
1030
+ size_t psize;
1031
+ uint8_t* start = _mi_segment_page_start(segment, page, &psize);
1032
+ _mi_os_reset(start, psize);
1033
+ }
1034
+
1035
+ // zero the page data, but not the segment fields and heap tag
1036
+ page->is_zero_init = false;
1037
+ uint8_t heap_tag = page->heap_tag;
1038
+ ptrdiff_t ofs = offsetof(mi_page_t, capacity);
1039
+ _mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
1040
+ page->block_size = 1;
1041
+ page->heap_tag = heap_tag;
1042
+
1043
+ // and free it
1044
+ mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
1045
+ segment->used--;
1046
+ // cannot assert segment valid as it is called during reclaim
1047
+ // mi_assert_expensive(mi_segment_is_valid(segment, tld));
1048
+ return slice;
1049
+ }
1050
+
1051
+ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
1052
+ {
1053
+ mi_assert(page != NULL);
1054
+ mi_segment_t* segment = _mi_page_segment(page);
1055
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
1056
+
1057
+ // mark it as free now
1058
+ mi_segment_page_clear(page, tld);
1059
+ mi_assert_expensive(mi_segment_is_valid(segment, tld));
1060
+
1061
+ if (segment->used == 0) {
1062
+ // no more used pages; remove from the free list and free the segment
1063
+ mi_segment_free(segment, force, tld);
1064
+ }
1065
+ else if (segment->used == segment->abandoned) {
1066
+ // only abandoned pages; remove from free list and abandon
1067
+ mi_segment_abandon(segment,tld);
1068
+ }
1069
+ else {
1070
+ // perform delayed purges
1071
+ mi_segment_try_purge(segment, false /* force? */);
1072
+ }
1073
+ }
1074
+
1075
+
1076
+ /* -----------------------------------------------------------
1077
+ Abandonment
1078
+
1079
+ When threads terminate, they can leave segments with
1080
+ live blocks (reachable through other threads). Such segments
1081
+ are "abandoned" and will be reclaimed by other threads to
1082
+ reuse their pages and/or free them eventually. The
1083
+ `thread_id` of such segments is 0.
1084
+
1085
+ When a block is freed in an abandoned segment, the segment
1086
+ is reclaimed into that thread.
1087
+
1088
+ Moreover, if threads are looking for a fresh segment, they
1089
+ will first consider abandoned segments -- these can be found
1090
+ by scanning the arena memory
1091
+ (segments outside arena memoryare only reclaimed by a free).
1092
+ ----------------------------------------------------------- */
1093
+
1094
+ /* -----------------------------------------------------------
1095
+ Abandon segment/page
1096
+ ----------------------------------------------------------- */
1097
+
1098
+ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
1099
+ mi_assert_internal(segment->used == segment->abandoned);
1100
+ mi_assert_internal(segment->used > 0);
1101
+ mi_assert_internal(segment->abandoned_visits == 0);
1102
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
1103
+
1104
+ // remove the free pages from the free page queues
1105
+ mi_slice_t* slice = &segment->slices[0];
1106
+ const mi_slice_t* end = mi_segment_slices_end(segment);
1107
+ while (slice < end) {
1108
+ mi_assert_internal(slice->slice_count > 0);
1109
+ mi_assert_internal(slice->slice_offset == 0);
1110
+ if (slice->block_size == 0) { // a free page
1111
+ mi_segment_span_remove_from_queue(slice,tld);
1112
+ slice->block_size = 0; // but keep it free
1113
+ }
1114
+ slice = slice + slice->slice_count;
1115
+ }
1116
+
1117
+ // perform delayed decommits (forcing is much slower on mstress)
1118
+ // Only abandoned segments in arena memory can be reclaimed without a free
1119
+ // so if a segment is not from an arena we force purge here to be conservative.
1120
+ const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge);
1121
+ mi_segment_try_purge(segment, force_purge);
1122
+
1123
+ // all pages in the segment are abandoned; add it to the abandoned list
1124
+ _mi_stat_increase(&tld->stats->segments_abandoned, 1);
1125
+ mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
1126
+ segment->thread_id = 0;
1127
+ segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
1128
+ if (segment->was_reclaimed) {
1129
+ tld->reclaim_count--;
1130
+ segment->was_reclaimed = false;
1131
+ }
1132
+ _mi_arena_segment_mark_abandoned(segment);
1133
+ }
1134
+
1135
+ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
1136
+ mi_assert(page != NULL);
1137
+ mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
1138
+ mi_assert_internal(mi_page_heap(page) == NULL);
1139
+ mi_segment_t* segment = _mi_page_segment(page);
1140
+
1141
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
1142
+ segment->abandoned++;
1143
+
1144
+ _mi_stat_increase(&tld->stats->pages_abandoned, 1);
1145
+ mi_assert_internal(segment->abandoned <= segment->used);
1146
+ if (segment->used == segment->abandoned) {
1147
+ // all pages are abandoned, abandon the entire segment
1148
+ mi_segment_abandon(segment, tld);
1149
+ }
1150
+ }
1151
+
1152
+ /* -----------------------------------------------------------
1153
+ Reclaim abandoned pages
1154
+ ----------------------------------------------------------- */
1155
+
1156
+ static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
1157
+ mi_slice_t* slice = &segment->slices[0];
1158
+ *end = mi_segment_slices_end(segment);
1159
+ mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page
1160
+ slice = slice + slice->slice_count; // skip the first segment allocated page
1161
+ return slice;
1162
+ }
1163
+
1164
+ // Possibly free pages and check if free space is available
1165
+ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
1166
+ {
1167
+ mi_assert_internal(mi_segment_is_abandoned(segment));
1168
+ bool has_page = false;
1169
+
1170
+ // for all slices
1171
+ const mi_slice_t* end;
1172
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
1173
+ while (slice < end) {
1174
+ mi_assert_internal(slice->slice_count > 0);
1175
+ mi_assert_internal(slice->slice_offset == 0);
1176
+ if (mi_slice_is_used(slice)) { // used page
1177
+ // ensure used count is up to date and collect potential concurrent frees
1178
+ mi_page_t* const page = mi_slice_to_page(slice);
1179
+ _mi_page_free_collect(page, false);
1180
+ if (mi_page_all_free(page)) {
1181
+ // if this page is all free now, free it without adding to any queues (yet)
1182
+ mi_assert_internal(page->next == NULL && page->prev==NULL);
1183
+ _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
1184
+ segment->abandoned--;
1185
+ slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
1186
+ mi_assert_internal(!mi_slice_is_used(slice));
1187
+ if (slice->slice_count >= slices_needed) {
1188
+ has_page = true;
1189
+ }
1190
+ }
1191
+ else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
1192
+ // a page has available free blocks of the right size
1193
+ has_page = true;
1194
+ }
1195
+ }
1196
+ else {
1197
+ // empty span
1198
+ if (slice->slice_count >= slices_needed) {
1199
+ has_page = true;
1200
+ }
1201
+ }
1202
+ slice = slice + slice->slice_count;
1203
+ }
1204
+ return has_page;
1205
+ }
1206
+
1207
+ // Reclaim an abandoned segment; returns NULL if the segment was freed
1208
+ // set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
1209
+ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
1210
+ if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
1211
+ // can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free.
1212
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id());
1213
+ mi_assert_internal(segment->subproc == heap->tld->segments.subproc); // only reclaim within the same subprocess
1214
+ mi_atomic_store_release(&segment->thread_id, _mi_thread_id());
1215
+ segment->abandoned_visits = 0;
1216
+ segment->was_reclaimed = true;
1217
+ tld->reclaim_count++;
1218
+ mi_segments_track_size((long)mi_segment_size(segment), tld);
1219
+ mi_assert_internal(segment->next == NULL);
1220
+ _mi_stat_decrease(&tld->stats->segments_abandoned, 1);
1221
+
1222
+ // for all slices
1223
+ const mi_slice_t* end;
1224
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
1225
+ while (slice < end) {
1226
+ mi_assert_internal(slice->slice_count > 0);
1227
+ mi_assert_internal(slice->slice_offset == 0);
1228
+ if (mi_slice_is_used(slice)) {
1229
+ // in use: reclaim the page in our heap
1230
+ mi_page_t* page = mi_slice_to_page(slice);
1231
+ mi_assert_internal(page->is_committed);
1232
+ mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
1233
+ mi_assert_internal(mi_page_heap(page) == NULL);
1234
+ mi_assert_internal(page->next == NULL && page->prev==NULL);
1235
+ _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
1236
+ segment->abandoned--;
1237
+ // get the target heap for this thread which has a matching heap tag (so we reclaim into a matching heap)
1238
+ mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects
1239
+ if (target_heap == NULL) {
1240
+ target_heap = heap;
1241
+ _mi_error_message(EFAULT, "page with tag %u cannot be reclaimed by a heap with the same tag (using heap tag %u instead)\n", page->heap_tag, heap->tag );
1242
+ }
1243
+ // associate the heap with this page, and allow heap thread delayed free again.
1244
+ mi_page_set_heap(page, target_heap);
1245
+ _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
1246
+ _mi_page_free_collect(page, false); // ensure used count is up to date
1247
+ if (mi_page_all_free(page)) {
1248
+ // if everything free by now, free the page
1249
+ slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
1250
+ }
1251
+ else {
1252
+ // otherwise reclaim it into the heap
1253
+ _mi_page_reclaim(target_heap, page);
1254
+ if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page) && heap == target_heap) {
1255
+ if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
1256
+ }
1257
+ }
1258
+ }
1259
+ else {
1260
+ // the span is free, add it to our page queues
1261
+ slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing
1262
+ }
1263
+ mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0);
1264
+ slice = slice + slice->slice_count;
1265
+ }
1266
+
1267
+ mi_assert(segment->abandoned == 0);
1268
+ mi_assert_expensive(mi_segment_is_valid(segment, tld));
1269
+ if (segment->used == 0) { // due to page_clear
1270
+ mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed));
1271
+ mi_segment_free(segment, false, tld);
1272
+ return NULL;
1273
+ }
1274
+ else {
1275
+ return segment;
1276
+ }
1277
+ }
1278
+
1279
+
1280
+ // attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`)
1281
+ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
1282
+ if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
1283
+ if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess
1284
+ if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
1285
+ const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
1286
+ if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
1287
+
1288
+ // don't reclaim more from a `free` call than half the current segments
1289
+ // this is to prevent a pure free-ing thread to start owning too many segments
1290
+ // (but not for out-of-arena segments as that is the main way to be reclaimed for those)
1291
+ if (segment->memid.memkind == MI_MEM_ARENA && heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) {
1292
+ return false;
1293
+ }
1294
+ if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon
1295
+ mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments);
1296
+ mi_assert_internal(res == segment);
1297
+ return (res != NULL);
1298
+ }
1299
+ return false;
1300
+ }
1301
+
1302
+ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
1303
+ mi_segment_t* segment;
1304
+ mi_arena_field_cursor_t current;
1305
+ _mi_arena_field_cursor_init(heap, tld->subproc, true /* visit all, blocking */, &current);
1306
+ while ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL) {
1307
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
1308
+ }
1309
+ _mi_arena_field_cursor_done(&current);
1310
+ }
1311
+
1312
+
1313
+ static bool segment_count_is_within_target(mi_segments_tld_t* tld, size_t* ptarget) {
1314
+ const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 0, 1024);
1315
+ if (ptarget != NULL) { *ptarget = target; }
1316
+ return (target == 0 || tld->count < target);
1317
+ }
1318
+
1319
+ static long mi_segment_get_reclaim_tries(mi_segments_tld_t* tld) {
1320
+ // limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries.
1321
+ const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
1322
+ if (perc <= 0) return 0;
1323
+ const size_t total_count = mi_atomic_load_relaxed(&tld->subproc->abandoned_count);
1324
+ if (total_count == 0) return 0;
1325
+ const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow
1326
+ long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count));
1327
+ if (max_tries < 8 && total_count > 8) { max_tries = 8; }
1328
+ return max_tries;
1329
+ }
1330
+
1331
+ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld)
1332
+ {
1333
+ *reclaimed = false;
1334
+ long max_tries = mi_segment_get_reclaim_tries(tld);
1335
+ if (max_tries <= 0) return NULL;
1336
+
1337
+ mi_segment_t* result = NULL;
1338
+ mi_segment_t* segment = NULL;
1339
+ mi_arena_field_cursor_t current;
1340
+ _mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, &current);
1341
+ while (segment_count_is_within_target(tld,NULL) && (max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL))
1342
+ {
1343
+ mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process
1344
+ segment->abandoned_visits++;
1345
+ // todo: should we respect numa affinity for abandoned reclaim? perhaps only for the first visit?
1346
+ // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries
1347
+ // Perhaps we can skip non-suitable ones in a better way?
1348
+ bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
1349
+ bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
1350
+ if (segment->used == 0) {
1351
+ // free the segment (by forced reclaim) to make it available to other threads.
1352
+ // note1: we prefer to free a segment as that might lead to reclaiming another
1353
+ // segment that is still partially used.
1354
+ // note2: we could in principle optimize this by skipping reclaim and directly
1355
+ // freeing but that would violate some invariants temporarily)
1356
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
1357
+ }
1358
+ else if (has_page && is_suitable) {
1359
+ // found a large enough free span, or a page of the right block_size with free space
1360
+ // we return the result of reclaim (which is usually `segment`) as it might free
1361
+ // the segment due to concurrent frees (in which case `NULL` is returned).
1362
+ result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
1363
+ break;
1364
+ }
1365
+ else if (segment->abandoned_visits > 3 && is_suitable) {
1366
+ // always reclaim on 3rd visit to limit the abandoned segment count.
1367
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
1368
+ }
1369
+ else {
1370
+ // otherwise, push on the visited list so it gets not looked at too quickly again
1371
+ max_tries++; // don't count this as a try since it was not suitable
1372
+ mi_segment_try_purge(segment, false /* true force? */); // force purge if needed as we may not visit soon again
1373
+ _mi_arena_segment_mark_abandoned(segment);
1374
+ }
1375
+ }
1376
+ _mi_arena_field_cursor_done(&current);
1377
+ return result;
1378
+ }
1379
+
1380
+ // collect abandoned segments
1381
+ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
1382
+ {
1383
+ mi_segment_t* segment;
1384
+ mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, tld->subproc, force /* blocking? */, &current);
1385
+ long max_tries = (force ? (long)mi_atomic_load_relaxed(&tld->subproc->abandoned_count) : 1024); // limit latency
1386
+ while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL)) {
1387
+ mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
1388
+ if (segment->used == 0) {
1389
+ // free the segment (by forced reclaim) to make it available to other threads.
1390
+ // note: we could in principle optimize this by skipping reclaim and directly
1391
+ // freeing but that would violate some invariants temporarily)
1392
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
1393
+ }
1394
+ else {
1395
+ // otherwise, purge if needed and push on the visited list
1396
+ // note: forced purge can be expensive if many threads are destroyed/created as in mstress.
1397
+ mi_segment_try_purge(segment, force);
1398
+ _mi_arena_segment_mark_abandoned(segment);
1399
+ }
1400
+ }
1401
+ _mi_arena_field_cursor_done(&current);
1402
+ }
1403
+
1404
+ /* -----------------------------------------------------------
1405
+ Force abandon a segment that is in use by our thread
1406
+ ----------------------------------------------------------- */
1407
+
1408
+ // force abandon a segment
1409
+ static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* tld)
1410
+ {
1411
+ mi_assert_internal(!mi_segment_is_abandoned(segment));
1412
+ mi_assert_internal(!segment->dont_free);
1413
+
1414
+ // ensure the segment does not get free'd underneath us (so we can check if a page has been freed in `mi_page_force_abandon`)
1415
+ segment->dont_free = true;
1416
+
1417
+ // for all slices
1418
+ const mi_slice_t* end;
1419
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
1420
+ while (slice < end) {
1421
+ mi_assert_internal(slice->slice_count > 0);
1422
+ mi_assert_internal(slice->slice_offset == 0);
1423
+ if (mi_slice_is_used(slice)) {
1424
+ // ensure used count is up to date and collect potential concurrent frees
1425
+ mi_page_t* const page = mi_slice_to_page(slice);
1426
+ _mi_page_free_collect(page, false);
1427
+ {
1428
+ // abandon the page if it is still in-use (this will free it if possible as well)
1429
+ mi_assert_internal(segment->used > 0);
1430
+ if (segment->used == segment->abandoned+1) {
1431
+ // the last page.. abandon and return as the segment will be abandoned after this
1432
+ // and we should no longer access it.
1433
+ segment->dont_free = false;
1434
+ _mi_page_force_abandon(page);
1435
+ return;
1436
+ }
1437
+ else {
1438
+ // abandon and continue
1439
+ _mi_page_force_abandon(page);
1440
+ // it might be freed, reset the slice (note: relies on coalesce setting the slice_offset)
1441
+ slice = mi_slice_first(slice);
1442
+ }
1443
+ }
1444
+ }
1445
+ slice = slice + slice->slice_count;
1446
+ }
1447
+ segment->dont_free = false;
1448
+ mi_assert(segment->used == segment->abandoned);
1449
+ mi_assert(segment->used == 0);
1450
+ if (segment->used == 0) { // paranoia
1451
+ // all free now
1452
+ mi_segment_free(segment, false, tld);
1453
+ }
1454
+ else {
1455
+ // perform delayed purges
1456
+ mi_segment_try_purge(segment, false /* force? */);
1457
+ }
1458
+ }
1459
+
1460
+
1461
+ // try abandon segments.
1462
+ // this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
1463
+ static void mi_segments_try_abandon_to_target(mi_heap_t* heap, size_t target, mi_segments_tld_t* tld) {
1464
+ if (target <= 1) return;
1465
+ const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
1466
+ // todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages
1467
+ for (int i = 0; i < 64 && tld->count >= min_target; i++) {
1468
+ mi_page_t* page = heap->pages[MI_BIN_FULL].first;
1469
+ while (page != NULL && mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX) {
1470
+ page = page->next;
1471
+ }
1472
+ if (page==NULL) {
1473
+ break;
1474
+ }
1475
+ mi_segment_t* segment = _mi_page_segment(page);
1476
+ mi_segment_force_abandon(segment, tld);
1477
+ mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
1478
+ }
1479
+ }
1480
+
1481
+ // try abandon segments.
1482
+ // this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
1483
+ static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
1484
+ // we call this when we are about to add a fresh segment so we should be under our target segment count.
1485
+ size_t target = 0;
1486
+ if (segment_count_is_within_target(tld, &target)) return;
1487
+ mi_segments_try_abandon_to_target(heap, target, tld);
1488
+ }
1489
+
1490
+ void mi_collect_reduce(size_t target_size) mi_attr_noexcept {
1491
+ mi_collect(true);
1492
+ mi_heap_t* heap = mi_heap_get_default();
1493
+ mi_segments_tld_t* tld = &heap->tld->segments;
1494
+ size_t target = target_size / MI_SEGMENT_SIZE;
1495
+ if (target == 0) {
1496
+ target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 1, 1024);
1497
+ }
1498
+ mi_segments_try_abandon_to_target(heap, target, tld);
1499
+ }
1500
+
1501
+ /* -----------------------------------------------------------
1502
+ Reclaim or allocate
1503
+ ----------------------------------------------------------- */
1504
+
1505
+ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld)
1506
+ {
1507
+ mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
1508
+
1509
+ // try to abandon some segments to increase reuse between threads
1510
+ mi_segments_try_abandon(heap,tld);
1511
+
1512
+ // 1. try to reclaim an abandoned segment
1513
+ bool reclaimed;
1514
+ mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
1515
+ if (reclaimed) {
1516
+ // reclaimed the right page right into the heap
1517
+ mi_assert_internal(segment != NULL);
1518
+ return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks
1519
+ }
1520
+ else if (segment != NULL) {
1521
+ // reclaimed a segment with a large enough empty span in it
1522
+ return segment;
1523
+ }
1524
+ // 2. otherwise allocate a fresh segment
1525
+ return mi_segment_alloc(0, 0, heap->arena_id, tld, NULL);
1526
+ }
1527
+
1528
+
1529
+ /* -----------------------------------------------------------
1530
+ Page allocation
1531
+ ----------------------------------------------------------- */
1532
+
1533
+ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld)
1534
+ {
1535
+ mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
1536
+
1537
+ // find a free page
1538
+ size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
1539
+ size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
1540
+ mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size);
1541
+ mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
1542
+ if (page==NULL) {
1543
+ // no free page, allocate a new segment and try again
1544
+ if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld) == NULL) {
1545
+ // OOM or reclaimed a good page in the heap
1546
+ return NULL;
1547
+ }
1548
+ else {
1549
+ // otherwise try again
1550
+ return mi_segments_page_alloc(heap, page_kind, required, block_size, tld);
1551
+ }
1552
+ }
1553
+ mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
1554
+ mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id());
1555
+ mi_segment_try_purge(_mi_ptr_segment(page), false);
1556
+ return page;
1557
+ }
1558
+
1559
+
1560
+
1561
+ /* -----------------------------------------------------------
1562
+ Huge page allocation
1563
+ ----------------------------------------------------------- */
1564
+
1565
+ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
1566
+ {
1567
+ mi_page_t* page = NULL;
1568
+ mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,&page);
1569
+ if (segment == NULL || page==NULL) return NULL;
1570
+ mi_assert_internal(segment->used==1);
1571
+ mi_assert_internal(mi_page_block_size(page) >= size);
1572
+ #if MI_HUGE_PAGE_ABANDON
1573
+ segment->thread_id = 0; // huge segments are immediately abandoned
1574
+ #endif
1575
+
1576
+ // for huge pages we initialize the block_size as we may
1577
+ // overallocate to accommodate large alignments.
1578
+ size_t psize;
1579
+ uint8_t* start = _mi_segment_page_start(segment, page, &psize);
1580
+ page->block_size = psize;
1581
+ mi_assert_internal(page->is_huge);
1582
+
1583
+ // decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
1584
+ if (page_alignment > 0 && segment->allow_decommit) {
1585
+ uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
1586
+ mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
1587
+ mi_assert_internal(psize - (aligned_p - start) >= size);
1588
+ uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
1589
+ ptrdiff_t decommit_size = aligned_p - decommit_start;
1590
+ _mi_os_reset(decommit_start, decommit_size); // note: cannot use segment_decommit on huge segments
1591
+ }
1592
+
1593
+ return page;
1594
+ }
1595
+
1596
+ #if MI_HUGE_PAGE_ABANDON
1597
+ // free huge block from another thread
1598
+ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
1599
+ // huge page segments are always abandoned and can be freed immediately by any thread
1600
+ mi_assert_internal(segment->kind==MI_SEGMENT_HUGE);
1601
+ mi_assert_internal(segment == _mi_page_segment(page));
1602
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0);
1603
+
1604
+ // claim it and free
1605
+ mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
1606
+ // paranoia: if this it the last reference, the cas should always succeed
1607
+ size_t expected_tid = 0;
1608
+ if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) {
1609
+ mi_block_set_next(page, block, page->free);
1610
+ page->free = block;
1611
+ page->used--;
1612
+ page->is_zero_init = false;
1613
+ mi_assert(page->used == 0);
1614
+ mi_tld_t* tld = heap->tld;
1615
+ _mi_segment_page_free(page, true, &tld->segments);
1616
+ }
1617
+ #if (MI_DEBUG!=0)
1618
+ else {
1619
+ mi_assert_internal(false);
1620
+ }
1621
+ #endif
1622
+ }
1623
+
1624
+ #else
1625
+ // reset memory of a huge block from another thread
1626
+ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
1627
+ MI_UNUSED(page);
1628
+ mi_assert_internal(segment->kind == MI_SEGMENT_HUGE);
1629
+ mi_assert_internal(segment == _mi_page_segment(page));
1630
+ mi_assert_internal(page->used == 1); // this is called just before the free
1631
+ mi_assert_internal(page->free == NULL);
1632
+ if (segment->allow_decommit) {
1633
+ size_t csize = mi_usable_size(block);
1634
+ if (csize > sizeof(mi_block_t)) {
1635
+ csize = csize - sizeof(mi_block_t);
1636
+ uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
1637
+ _mi_os_reset(p, csize); // note: cannot use segment_decommit on huge segments
1638
+ }
1639
+ }
1640
+ }
1641
+ #endif
1642
+
1643
+ /* -----------------------------------------------------------
1644
+ Page allocation and free
1645
+ ----------------------------------------------------------- */
1646
+ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld) {
1647
+ mi_page_t* page;
1648
+ if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
1649
+ mi_assert_internal(_mi_is_power_of_two(page_alignment));
1650
+ mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
1651
+ if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
1652
+ page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld);
1653
+ }
1654
+ else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
1655
+ page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld);
1656
+ }
1657
+ else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
1658
+ page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld);
1659
+ }
1660
+ else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
1661
+ page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld);
1662
+ }
1663
+ else {
1664
+ page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld);
1665
+ }
1666
+ mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
1667
+ mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
1668
+ mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc);
1669
+ return page;
1670
+ }
1671
+
1672
+
1673
+ /* -----------------------------------------------------------
1674
+ Visit blocks in a segment (only used for abandoned segments)
1675
+ ----------------------------------------------------------- */
1676
+
1677
+ static bool mi_segment_visit_page(mi_page_t* page, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
1678
+ mi_heap_area_t area;
1679
+ _mi_heap_area_init(&area, page);
1680
+ if (!visitor(NULL, &area, NULL, area.block_size, arg)) return false;
1681
+ if (visit_blocks) {
1682
+ return _mi_heap_area_visit_blocks(&area, page, visitor, arg);
1683
+ }
1684
+ else {
1685
+ return true;
1686
+ }
1687
+ }
1688
+
1689
+ bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
1690
+ const mi_slice_t* end;
1691
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
1692
+ while (slice < end) {
1693
+ if (mi_slice_is_used(slice)) {
1694
+ mi_page_t* const page = mi_slice_to_page(slice);
1695
+ if (heap_tag < 0 || (int)page->heap_tag == heap_tag) {
1696
+ if (!mi_segment_visit_page(page, visit_blocks, visitor, arg)) return false;
1697
+ }
1698
+ }
1699
+ slice = slice + slice->slice_count;
1700
+ }
1701
+ return true;
1702
+ }