@shd101wyy/yo 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (339) hide show
  1. package/LICENSE.md +17 -0
  2. package/README.md +80 -0
  3. package/out/cjs/index.cjs +51 -0
  4. package/out/cjs/yo-cli.cjs +2158 -0
  5. package/out/esm/index.mjs +51 -0
  6. package/out/types/src/codegen/async/runtime.d.ts +2 -0
  7. package/out/types/src/codegen/async/state-code-gen.d.ts +10 -0
  8. package/out/types/src/codegen/async/state-machine.d.ts +13 -0
  9. package/out/types/src/codegen/c/collection.d.ts +3 -0
  10. package/out/types/src/codegen/codegen-c.d.ts +12 -0
  11. package/out/types/src/codegen/constants.d.ts +3 -0
  12. package/out/types/src/codegen/expressions/array.d.ts +4 -0
  13. package/out/types/src/codegen/expressions/generation.d.ts +11 -0
  14. package/out/types/src/codegen/expressions/index.d.ts +2 -0
  15. package/out/types/src/codegen/functions/collection.d.ts +5 -0
  16. package/out/types/src/codegen/functions/context.d.ts +57 -0
  17. package/out/types/src/codegen/functions/generation.d.ts +25 -0
  18. package/out/types/src/codegen/functions/index.d.ts +2 -0
  19. package/out/types/src/codegen/index.d.ts +20 -0
  20. package/out/types/src/codegen/parallelism/runtime.d.ts +2 -0
  21. package/out/types/src/codegen/types/collection.d.ts +8 -0
  22. package/out/types/src/codegen/types/generation.d.ts +13 -0
  23. package/out/types/src/codegen/types/index.d.ts +2 -0
  24. package/out/types/src/codegen/utils/fixup.d.ts +2 -0
  25. package/out/types/src/codegen/utils/index.d.ts +77 -0
  26. package/out/types/src/codegen/values/index.d.ts +1 -0
  27. package/out/types/src/emitter.d.ts +11 -0
  28. package/out/types/src/env.d.ts +85 -0
  29. package/out/types/src/error.d.ts +45 -0
  30. package/out/types/src/evaluator/async/await-analysis-types.d.ts +23 -0
  31. package/out/types/src/evaluator/async/await-analysis.d.ts +5 -0
  32. package/out/types/src/evaluator/builtins/alignof.d.ts +8 -0
  33. package/out/types/src/evaluator/builtins/and_or.d.ts +8 -0
  34. package/out/types/src/evaluator/builtins/arc_fns.d.ts +58 -0
  35. package/out/types/src/evaluator/builtins/array_fns.d.ts +0 -0
  36. package/out/types/src/evaluator/builtins/as.d.ts +8 -0
  37. package/out/types/src/evaluator/builtins/async_fns.d.ts +8 -0
  38. package/out/types/src/evaluator/builtins/compt_assert.d.ts +8 -0
  39. package/out/types/src/evaluator/builtins/compt_boolean_fns.d.ts +8 -0
  40. package/out/types/src/evaluator/builtins/compt_expect_error.d.ts +8 -0
  41. package/out/types/src/evaluator/builtins/compt_list_fns.d.ts +33 -0
  42. package/out/types/src/evaluator/builtins/compt_print.d.ts +8 -0
  43. package/out/types/src/evaluator/builtins/compt_string_fns.d.ts +8 -0
  44. package/out/types/src/evaluator/builtins/consume.d.ts +8 -0
  45. package/out/types/src/evaluator/builtins/drop.d.ts +8 -0
  46. package/out/types/src/evaluator/builtins/dup.d.ts +8 -0
  47. package/out/types/src/evaluator/builtins/expr_fns.d.ts +33 -0
  48. package/out/types/src/evaluator/builtins/future_fns.d.ts +8 -0
  49. package/out/types/src/evaluator/builtins/gc.d.ts +8 -0
  50. package/out/types/src/evaluator/builtins/gensym.d.ts +8 -0
  51. package/out/types/src/evaluator/builtins/impl_constraint.d.ts +8 -0
  52. package/out/types/src/evaluator/builtins/macro_expand.d.ts +8 -0
  53. package/out/types/src/evaluator/builtins/numeric_fns.d.ts +8 -0
  54. package/out/types/src/evaluator/builtins/panic.d.ts +8 -0
  55. package/out/types/src/evaluator/builtins/ptr_fns.d.ts +8 -0
  56. package/out/types/src/evaluator/builtins/quote.d.ts +13 -0
  57. package/out/types/src/evaluator/builtins/rc.d.ts +8 -0
  58. package/out/types/src/evaluator/builtins/sizeof.d.ts +8 -0
  59. package/out/types/src/evaluator/builtins/the.d.ts +8 -0
  60. package/out/types/src/evaluator/builtins/type_fns.d.ts +28 -0
  61. package/out/types/src/evaluator/builtins/va_start.d.ts +8 -0
  62. package/out/types/src/evaluator/builtins/var_fns.d.ts +18 -0
  63. package/out/types/src/evaluator/calls/array.d.ts +13 -0
  64. package/out/types/src/evaluator/calls/array_type.d.ts +11 -0
  65. package/out/types/src/evaluator/calls/closure_type.d.ts +11 -0
  66. package/out/types/src/evaluator/calls/compt_function.d.ts +19 -0
  67. package/out/types/src/evaluator/calls/compt_list_type.d.ts +11 -0
  68. package/out/types/src/evaluator/calls/function.d.ts +16 -0
  69. package/out/types/src/evaluator/calls/function_type.d.ts +15 -0
  70. package/out/types/src/evaluator/calls/helper.d.ts +42 -0
  71. package/out/types/src/evaluator/calls/iso.d.ts +15 -0
  72. package/out/types/src/evaluator/calls/module_type.d.ts +11 -0
  73. package/out/types/src/evaluator/calls/numeric_type.d.ts +15 -0
  74. package/out/types/src/evaluator/calls/pointer.d.ts +8 -0
  75. package/out/types/src/evaluator/calls/pointer_type.d.ts +14 -0
  76. package/out/types/src/evaluator/calls/type.d.ts +12 -0
  77. package/out/types/src/evaluator/context.d.ts +169 -0
  78. package/out/types/src/evaluator/exprs/_expr.d.ts +8 -0
  79. package/out/types/src/evaluator/exprs/assignment.d.ts +9 -0
  80. package/out/types/src/evaluator/exprs/begin.d.ts +10 -0
  81. package/out/types/src/evaluator/exprs/binding.d.ts +12 -0
  82. package/out/types/src/evaluator/exprs/c_include.d.ts +8 -0
  83. package/out/types/src/evaluator/exprs/cond.d.ts +8 -0
  84. package/out/types/src/evaluator/exprs/destructuring_assignment.d.ts +33 -0
  85. package/out/types/src/evaluator/exprs/exists.d.ts +0 -0
  86. package/out/types/src/evaluator/exprs/expr.d.ts +9 -0
  87. package/out/types/src/evaluator/exprs/extern.d.ts +8 -0
  88. package/out/types/src/evaluator/exprs/identifer_and_operator.d.ts +9 -0
  89. package/out/types/src/evaluator/exprs/import.d.ts +9 -0
  90. package/out/types/src/evaluator/exprs/initialization_assignment.d.ts +8 -0
  91. package/out/types/src/evaluator/exprs/match.d.ts +8 -0
  92. package/out/types/src/evaluator/exprs/open.d.ts +8 -0
  93. package/out/types/src/evaluator/exprs/property_access.d.ts +8 -0
  94. package/out/types/src/evaluator/exprs/recur.d.ts +8 -0
  95. package/out/types/src/evaluator/exprs/subtype_of.d.ts +21 -0
  96. package/out/types/src/evaluator/exprs/test.d.ts +8 -0
  97. package/out/types/src/evaluator/exprs/typeof.d.ts +8 -0
  98. package/out/types/src/evaluator/exprs/while.d.ts +8 -0
  99. package/out/types/src/evaluator/index.d.ts +26 -0
  100. package/out/types/src/evaluator/types/array.d.ts +8 -0
  101. package/out/types/src/evaluator/types/closure.d.ts +8 -0
  102. package/out/types/src/evaluator/types/compt_list.d.ts +8 -0
  103. package/out/types/src/evaluator/types/concrete_module.d.ts +8 -0
  104. package/out/types/src/evaluator/types/dyn.d.ts +8 -0
  105. package/out/types/src/evaluator/types/enum.d.ts +8 -0
  106. package/out/types/src/evaluator/types/expr_synthesizer.d.ts +14 -0
  107. package/out/types/src/evaluator/types/field.d.ts +14 -0
  108. package/out/types/src/evaluator/types/fn_module.d.ts +8 -0
  109. package/out/types/src/evaluator/types/function.d.ts +58 -0
  110. package/out/types/src/evaluator/types/future_module.d.ts +8 -0
  111. package/out/types/src/evaluator/types/module.d.ts +19 -0
  112. package/out/types/src/evaluator/types/newtype.d.ts +8 -0
  113. package/out/types/src/evaluator/types/object.d.ts +8 -0
  114. package/out/types/src/evaluator/types/proofs.d.ts +0 -0
  115. package/out/types/src/evaluator/types/slice.d.ts +8 -0
  116. package/out/types/src/evaluator/types/struct.d.ts +8 -0
  117. package/out/types/src/evaluator/types/synthesizer.d.ts +16 -0
  118. package/out/types/src/evaluator/types/tuple.d.ts +18 -0
  119. package/out/types/src/evaluator/types/union.d.ts +8 -0
  120. package/out/types/src/evaluator/types/utils.d.ts +71 -0
  121. package/out/types/src/evaluator/types/validation.d.ts +3 -0
  122. package/out/types/src/evaluator/utils/array-utils.d.ts +15 -0
  123. package/out/types/src/evaluator/utils/closure.d.ts +35 -0
  124. package/out/types/src/evaluator/utils.d.ts +4 -0
  125. package/out/types/src/evaluator/values/anonymous_function.d.ts +8 -0
  126. package/out/types/src/evaluator/values/anonymous_module.d.ts +17 -0
  127. package/out/types/src/evaluator/values/anonymous_struct.d.ts +8 -0
  128. package/out/types/src/evaluator/values/array.d.ts +8 -0
  129. package/out/types/src/evaluator/values/boolean.d.ts +3 -0
  130. package/out/types/src/evaluator/values/char.d.ts +3 -0
  131. package/out/types/src/evaluator/values/compt_list.d.ts +8 -0
  132. package/out/types/src/evaluator/values/dyn.d.ts +8 -0
  133. package/out/types/src/evaluator/values/float.d.ts +4 -0
  134. package/out/types/src/evaluator/values/integer.d.ts +4 -0
  135. package/out/types/src/evaluator/values/module.d.ts +58 -0
  136. package/out/types/src/evaluator/values/string.d.ts +3 -0
  137. package/out/types/src/evaluator/values/tuple.d.ts +32 -0
  138. package/out/types/src/expr.d.ts +456 -0
  139. package/out/types/src/function-value.d.ts +42 -0
  140. package/out/types/src/index.d.ts +4 -0
  141. package/out/types/src/lexer.d.ts +2 -0
  142. package/out/types/src/logger.d.ts +1 -0
  143. package/out/types/src/module-manager.d.ts +30 -0
  144. package/out/types/src/naming-checker.d.ts +4 -0
  145. package/out/types/src/parser.d.ts +33 -0
  146. package/out/types/src/test-runner.d.ts +30 -0
  147. package/out/types/src/tests/codegen.test.d.ts +1 -0
  148. package/out/types/src/tests/fixme.test.d.ts +1 -0
  149. package/out/types/src/tests/module-manager.test.d.ts +1 -0
  150. package/out/types/src/tests/parser.test.d.ts +1 -0
  151. package/out/types/src/tests/sample.test.d.ts +0 -0
  152. package/out/types/src/tests/std.test.d.ts +1 -0
  153. package/out/types/src/token.d.ts +40 -0
  154. package/out/types/src/type-value.d.ts +7 -0
  155. package/out/types/src/types/compatibility.d.ts +16 -0
  156. package/out/types/src/types/creators.d.ts +73 -0
  157. package/out/types/src/types/definitions.d.ts +218 -0
  158. package/out/types/src/types/guards.d.ts +70 -0
  159. package/out/types/src/types/hierarchy.d.ts +4 -0
  160. package/out/types/src/types/index.d.ts +7 -0
  161. package/out/types/src/types/module_field.d.ts +2 -0
  162. package/out/types/src/types/tags.d.ts +45 -0
  163. package/out/types/src/types/utils.d.ts +50 -0
  164. package/out/types/src/unit-value.d.ts +7 -0
  165. package/out/types/src/utils.d.ts +6 -0
  166. package/out/types/src/value-tag.d.ts +29 -0
  167. package/out/types/src/value.d.ts +110 -0
  168. package/out/types/src/yo-cli.d.ts +1 -0
  169. package/out/types/tsconfig.tsbuildinfo +1 -0
  170. package/package.json +57 -0
  171. package/scripts/check-liburing.js +76 -0
  172. package/std/alg/hash.yo +50 -0
  173. package/std/allocator.yo +113 -0
  174. package/std/allocators/c_allocator.yo +118 -0
  175. package/std/async.yo +13 -0
  176. package/std/collections/array_list.yo +415 -0
  177. package/std/collections/hash_map.yo +482 -0
  178. package/std/collections/hash_set.yo +706 -0
  179. package/std/collections/index.yo +11 -0
  180. package/std/collections/linked_list.yo +439 -0
  181. package/std/error.yo +0 -0
  182. package/std/gc.yo +10 -0
  183. package/std/index.yo +12 -0
  184. package/std/io/file.yo +191 -0
  185. package/std/io/index.yo +5 -0
  186. package/std/libc/assert.yo +39 -0
  187. package/std/libc/ctype.yo +57 -0
  188. package/std/libc/errno.yo +182 -0
  189. package/std/libc/float.yo +87 -0
  190. package/std/libc/index.yo +29 -0
  191. package/std/libc/limits.yo +65 -0
  192. package/std/libc/math.yo +679 -0
  193. package/std/libc/signal.yo +101 -0
  194. package/std/libc/stdatomic.yo +213 -0
  195. package/std/libc/stdint.yo +214 -0
  196. package/std/libc/stdio.yo +225 -0
  197. package/std/libc/stdlib.yo +204 -0
  198. package/std/libc/string.yo +151 -0
  199. package/std/libc/time.yo +92 -0
  200. package/std/libc/unistd.yo +130 -0
  201. package/std/monad.yo +152 -0
  202. package/std/prelude.yo +3094 -0
  203. package/std/string/index.yo +8 -0
  204. package/std/string/rune.yo +82 -0
  205. package/std/string/string.yo +288 -0
  206. package/std/sync.yo +95 -0
  207. package/std/thread.yo +36 -0
  208. package/std/time.yo +13 -0
  209. package/std/worker.yo +36 -0
  210. package/vendor/mimalloc/.gitattributes +12 -0
  211. package/vendor/mimalloc/CMakeLists.txt +763 -0
  212. package/vendor/mimalloc/LICENSE +21 -0
  213. package/vendor/mimalloc/SECURITY.md +41 -0
  214. package/vendor/mimalloc/azure-pipelines.yml +251 -0
  215. package/vendor/mimalloc/bin/mimalloc-redirect-arm64.dll +0 -0
  216. package/vendor/mimalloc/bin/mimalloc-redirect-arm64.lib +0 -0
  217. package/vendor/mimalloc/bin/mimalloc-redirect-arm64ec.dll +0 -0
  218. package/vendor/mimalloc/bin/mimalloc-redirect-arm64ec.lib +0 -0
  219. package/vendor/mimalloc/bin/mimalloc-redirect.dll +0 -0
  220. package/vendor/mimalloc/bin/mimalloc-redirect.lib +0 -0
  221. package/vendor/mimalloc/bin/mimalloc-redirect32.dll +0 -0
  222. package/vendor/mimalloc/bin/mimalloc-redirect32.lib +0 -0
  223. package/vendor/mimalloc/bin/minject-arm64.exe +0 -0
  224. package/vendor/mimalloc/bin/minject.exe +0 -0
  225. package/vendor/mimalloc/bin/minject32.exe +0 -0
  226. package/vendor/mimalloc/bin/readme.md +118 -0
  227. package/vendor/mimalloc/cmake/JoinPaths.cmake +23 -0
  228. package/vendor/mimalloc/cmake/mimalloc-config-version.cmake +19 -0
  229. package/vendor/mimalloc/cmake/mimalloc-config.cmake +14 -0
  230. package/vendor/mimalloc/contrib/docker/alpine/Dockerfile +23 -0
  231. package/vendor/mimalloc/contrib/docker/alpine-arm32v7/Dockerfile +28 -0
  232. package/vendor/mimalloc/contrib/docker/alpine-x86/Dockerfile +28 -0
  233. package/vendor/mimalloc/contrib/docker/manylinux-x64/Dockerfile +23 -0
  234. package/vendor/mimalloc/contrib/docker/readme.md +10 -0
  235. package/vendor/mimalloc/contrib/vcpkg/portfile.cmake +64 -0
  236. package/vendor/mimalloc/contrib/vcpkg/readme.md +40 -0
  237. package/vendor/mimalloc/contrib/vcpkg/usage +20 -0
  238. package/vendor/mimalloc/contrib/vcpkg/vcpkg-cmake-wrapper.cmake +20 -0
  239. package/vendor/mimalloc/contrib/vcpkg/vcpkg.json +48 -0
  240. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg +887 -0
  241. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg +1185 -0
  242. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg +757 -0
  243. package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg +1028 -0
  244. package/vendor/mimalloc/doc/bench-2020/bench-r5a-1.svg +769 -0
  245. package/vendor/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg +868 -0
  246. package/vendor/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg +1157 -0
  247. package/vendor/mimalloc/doc/bench-2020/bench-r5a-2.svg +983 -0
  248. package/vendor/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg +683 -0
  249. package/vendor/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg +854 -0
  250. package/vendor/mimalloc/doc/bench-2020/bench-spec-rss.svg +713 -0
  251. package/vendor/mimalloc/doc/bench-2020/bench-spec.svg +713 -0
  252. package/vendor/mimalloc/doc/bench-2020/bench-z4-1.svg +890 -0
  253. package/vendor/mimalloc/doc/bench-2020/bench-z4-2.svg +1146 -0
  254. package/vendor/mimalloc/doc/bench-2020/bench-z4-rss-1.svg +796 -0
  255. package/vendor/mimalloc/doc/bench-2020/bench-z4-rss-2.svg +974 -0
  256. package/vendor/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg +952 -0
  257. package/vendor/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg +1255 -0
  258. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg +955 -0
  259. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg +1269 -0
  260. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg +836 -0
  261. package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg +1131 -0
  262. package/vendor/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg +766 -0
  263. package/vendor/mimalloc/doc/doxyfile +2895 -0
  264. package/vendor/mimalloc/doc/ds-logo.jpg +0 -0
  265. package/vendor/mimalloc/doc/ds-logo.png +0 -0
  266. package/vendor/mimalloc/doc/mimalloc-doc.h +1452 -0
  267. package/vendor/mimalloc/doc/mimalloc-doxygen.css +60 -0
  268. package/vendor/mimalloc/doc/mimalloc-logo-100.png +0 -0
  269. package/vendor/mimalloc/doc/mimalloc-logo.png +0 -0
  270. package/vendor/mimalloc/doc/mimalloc-logo.svg +161 -0
  271. package/vendor/mimalloc/doc/spades-logo.png +0 -0
  272. package/vendor/mimalloc/doc/unreal-logo.svg +43 -0
  273. package/vendor/mimalloc/ide/vs2022/mimalloc-lib.vcxproj +500 -0
  274. package/vendor/mimalloc/ide/vs2022/mimalloc-lib.vcxproj.filters +108 -0
  275. package/vendor/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj +508 -0
  276. package/vendor/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj.filters +111 -0
  277. package/vendor/mimalloc/ide/vs2022/mimalloc-override-test-dep.vcxproj +355 -0
  278. package/vendor/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj +360 -0
  279. package/vendor/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj +295 -0
  280. package/vendor/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj +292 -0
  281. package/vendor/mimalloc/ide/vs2022/mimalloc-test.vcxproj +289 -0
  282. package/vendor/mimalloc/ide/vs2022/mimalloc.sln +151 -0
  283. package/vendor/mimalloc/include/mimalloc/atomic.h +557 -0
  284. package/vendor/mimalloc/include/mimalloc/internal.h +1153 -0
  285. package/vendor/mimalloc/include/mimalloc/prim.h +421 -0
  286. package/vendor/mimalloc/include/mimalloc/track.h +145 -0
  287. package/vendor/mimalloc/include/mimalloc/types.h +685 -0
  288. package/vendor/mimalloc/include/mimalloc-new-delete.h +66 -0
  289. package/vendor/mimalloc/include/mimalloc-override.h +68 -0
  290. package/vendor/mimalloc/include/mimalloc-stats.h +103 -0
  291. package/vendor/mimalloc/include/mimalloc.h +612 -0
  292. package/vendor/mimalloc/mimalloc.pc.in +11 -0
  293. package/vendor/mimalloc/readme.md +946 -0
  294. package/vendor/mimalloc/src/alloc-aligned.c +360 -0
  295. package/vendor/mimalloc/src/alloc-override.c +316 -0
  296. package/vendor/mimalloc/src/alloc-posix.c +185 -0
  297. package/vendor/mimalloc/src/alloc.c +692 -0
  298. package/vendor/mimalloc/src/arena-abandon.c +346 -0
  299. package/vendor/mimalloc/src/arena.c +1043 -0
  300. package/vendor/mimalloc/src/bitmap.c +441 -0
  301. package/vendor/mimalloc/src/bitmap.h +119 -0
  302. package/vendor/mimalloc/src/free.c +572 -0
  303. package/vendor/mimalloc/src/heap.c +733 -0
  304. package/vendor/mimalloc/src/init.c +714 -0
  305. package/vendor/mimalloc/src/libc.c +334 -0
  306. package/vendor/mimalloc/src/options.c +663 -0
  307. package/vendor/mimalloc/src/os.c +770 -0
  308. package/vendor/mimalloc/src/page-queue.c +390 -0
  309. package/vendor/mimalloc/src/page.c +1049 -0
  310. package/vendor/mimalloc/src/prim/emscripten/prim.c +249 -0
  311. package/vendor/mimalloc/src/prim/osx/alloc-override-zone.c +461 -0
  312. package/vendor/mimalloc/src/prim/osx/prim.c +9 -0
  313. package/vendor/mimalloc/src/prim/prim.c +76 -0
  314. package/vendor/mimalloc/src/prim/readme.md +9 -0
  315. package/vendor/mimalloc/src/prim/unix/prim.c +934 -0
  316. package/vendor/mimalloc/src/prim/wasi/prim.c +284 -0
  317. package/vendor/mimalloc/src/prim/windows/etw-mimalloc.wprp +61 -0
  318. package/vendor/mimalloc/src/prim/windows/etw.h +905 -0
  319. package/vendor/mimalloc/src/prim/windows/etw.man +0 -0
  320. package/vendor/mimalloc/src/prim/windows/prim.c +878 -0
  321. package/vendor/mimalloc/src/prim/windows/readme.md +17 -0
  322. package/vendor/mimalloc/src/random.c +258 -0
  323. package/vendor/mimalloc/src/segment-map.c +142 -0
  324. package/vendor/mimalloc/src/segment.c +1702 -0
  325. package/vendor/mimalloc/src/static.c +41 -0
  326. package/vendor/mimalloc/src/stats.c +635 -0
  327. package/vendor/mimalloc/test/CMakeLists.txt +56 -0
  328. package/vendor/mimalloc/test/main-override-dep.cpp +51 -0
  329. package/vendor/mimalloc/test/main-override-dep.h +11 -0
  330. package/vendor/mimalloc/test/main-override-static.c +539 -0
  331. package/vendor/mimalloc/test/main-override.c +36 -0
  332. package/vendor/mimalloc/test/main-override.cpp +497 -0
  333. package/vendor/mimalloc/test/main.c +46 -0
  334. package/vendor/mimalloc/test/readme.md +16 -0
  335. package/vendor/mimalloc/test/test-api-fill.c +343 -0
  336. package/vendor/mimalloc/test/test-api.c +466 -0
  337. package/vendor/mimalloc/test/test-stress.c +428 -0
  338. package/vendor/mimalloc/test/test-wrong.c +92 -0
  339. package/vendor/mimalloc/test/testhelper.h +49 -0
@@ -0,0 +1,1043 @@
1
+ /* ----------------------------------------------------------------------------
2
+ Copyright (c) 2019-2024, Microsoft Research, Daan Leijen
3
+ This is free software; you can redistribute it and/or modify it under the
4
+ terms of the MIT license. A copy of the license can be found in the file
5
+ "LICENSE" at the root of this distribution.
6
+ -----------------------------------------------------------------------------*/
7
+
8
+ /* ----------------------------------------------------------------------------
9
+ "Arenas" are fixed area's of OS memory from which we can allocate
10
+ large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
11
+ In contrast to the rest of mimalloc, the arenas are shared between
12
+ threads and need to be accessed using atomic operations.
13
+
14
+ Arenas are also used to for huge OS page (1GiB) reservations or for reserving
15
+ OS memory upfront which can be improve performance or is sometimes needed
16
+ on embedded devices. We can also employ this with WASI or `sbrk` systems
17
+ to reserve large arenas upfront and be able to reuse the memory more effectively.
18
+
19
+ The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
20
+ -----------------------------------------------------------------------------*/
21
+
22
+ #include "mimalloc.h"
23
+ #include "mimalloc/internal.h"
24
+ #include "mimalloc/atomic.h"
25
+ #include "bitmap.h"
26
+
27
+
28
+ /* -----------------------------------------------------------
29
+ Arena allocation
30
+ ----------------------------------------------------------- */
31
+
32
+ // A memory arena descriptor
33
+ typedef struct mi_arena_s {
34
+ mi_arena_id_t id; // arena id; 0 for non-specific
35
+ mi_memid_t memid; // memid of the memory area
36
+ _Atomic(uint8_t*) start; // the start of the memory area
37
+ size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
38
+ size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
39
+ size_t meta_size; // size of the arena structure itself (including its bitmaps)
40
+ mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
41
+ int numa_node; // associated NUMA node
42
+ bool exclusive; // only allow allocations if specifically for this arena
43
+ bool is_large; // memory area consists of large- or huge OS pages (always committed)
44
+ mi_lock_t abandoned_visit_lock; // lock is only used when abandoned segments are being visited
45
+ _Atomic(size_t) search_idx; // optimization to start the search for free blocks
46
+ _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be purged from `blocks_purge`.
47
+
48
+ mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
49
+ mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
50
+ mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
51
+ mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
52
+ mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
53
+ // do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields.
54
+ } mi_arena_t;
55
+
56
+
57
+ #define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
58
+ #define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB
59
+ #define MI_MAX_ARENAS (132) // Limited as the reservation exponentially increases (and takes up .bss)
60
+
61
+ // The available arenas
62
+ static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
63
+ static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
64
+ static mi_decl_cache_align _Atomic(int64_t) mi_arenas_purge_expire; // set if there exist purgeable arenas
65
+
66
+ #define MI_IN_ARENA_C
67
+ #include "arena-abandon.c"
68
+ #undef MI_IN_ARENA_C
69
+
70
+ /* -----------------------------------------------------------
71
+ Arena id's
72
+ id = arena_index + 1
73
+ ----------------------------------------------------------- */
74
+
75
+ size_t mi_arena_id_index(mi_arena_id_t id) {
76
+ return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
77
+ }
78
+
79
+ static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
80
+ mi_assert_internal(arena_index < MI_MAX_ARENAS);
81
+ return (int)arena_index + 1;
82
+ }
83
+
84
+ mi_arena_id_t _mi_arena_id_none(void) {
85
+ return 0;
86
+ }
87
+
88
+ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
89
+ return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
90
+ (arena_id == req_arena_id));
91
+ }
92
+
93
+ bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
94
+ if (memid.memkind == MI_MEM_ARENA) {
95
+ return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
96
+ }
97
+ else {
98
+ return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id);
99
+ }
100
+ }
101
+
102
+ bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
103
+ return (memid.memkind == MI_MEM_OS);
104
+ }
105
+
106
+ size_t mi_arena_get_count(void) {
107
+ return mi_atomic_load_relaxed(&mi_arena_count);
108
+ }
109
+
110
+ mi_arena_t* mi_arena_from_index(size_t idx) {
111
+ mi_assert_internal(idx < mi_arena_get_count());
112
+ return mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[idx]);
113
+ }
114
+
115
+
116
+ /* -----------------------------------------------------------
117
+ Arena allocations get a (currently) 16-bit memory id where the
118
+ lower 8 bits are the arena id, and the upper bits the block index.
119
+ ----------------------------------------------------------- */
120
+
121
+ static size_t mi_block_count_of_size(size_t size) {
122
+ return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
123
+ }
124
+
125
+ static size_t mi_arena_block_size(size_t bcount) {
126
+ return (bcount * MI_ARENA_BLOCK_SIZE);
127
+ }
128
+
129
+ static size_t mi_arena_size(mi_arena_t* arena) {
130
+ return mi_arena_block_size(arena->block_count);
131
+ }
132
+
133
+ static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
134
+ mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
135
+ memid.mem.arena.id = id;
136
+ memid.mem.arena.block_index = bitmap_index;
137
+ memid.mem.arena.is_exclusive = is_exclusive;
138
+ return memid;
139
+ }
140
+
141
+ bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
142
+ mi_assert_internal(memid.memkind == MI_MEM_ARENA);
143
+ *arena_index = mi_arena_id_index(memid.mem.arena.id);
144
+ *bitmap_index = memid.mem.arena.block_index;
145
+ return memid.mem.arena.is_exclusive;
146
+ }
147
+
148
+
149
+
150
+ /* -----------------------------------------------------------
151
+ Special static area for mimalloc internal structures
152
+ to avoid OS calls (for example, for the arena metadata (~= 256b))
153
+ ----------------------------------------------------------- */
154
+
155
+ #define MI_ARENA_STATIC_MAX ((MI_INTPTR_SIZE/2)*MI_KiB) // 4 KiB on 64-bit
156
+
157
+ static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895
158
+ static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top;
159
+
160
+ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
161
+ *memid = _mi_memid_none();
162
+ if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
163
+ const size_t toplow = mi_atomic_load_relaxed(&mi_arena_static_top);
164
+ if ((toplow + size) > MI_ARENA_STATIC_MAX) return NULL;
165
+
166
+ // try to claim space
167
+ if (alignment < MI_MAX_ALIGN_SIZE) { alignment = MI_MAX_ALIGN_SIZE; }
168
+ const size_t oversize = size + alignment - 1;
169
+ if (toplow + oversize > MI_ARENA_STATIC_MAX) return NULL;
170
+ const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
171
+ size_t top = oldtop + oversize;
172
+ if (top > MI_ARENA_STATIC_MAX) {
173
+ // try to roll back, ok if this fails
174
+ mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
175
+ return NULL;
176
+ }
177
+
178
+ // success
179
+ *memid = _mi_memid_create(MI_MEM_STATIC);
180
+ memid->initially_zero = true;
181
+ const size_t start = _mi_align_up(oldtop, alignment);
182
+ uint8_t* const p = &mi_arena_static[start];
183
+ _mi_memzero_aligned(p, size);
184
+ return p;
185
+ }
186
+
187
+ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
188
+ *memid = _mi_memid_none();
189
+
190
+ // try static
191
+ void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid);
192
+ if (p != NULL) return p;
193
+
194
+ // or fall back to the OS
195
+ p = _mi_os_zalloc(size, memid);
196
+ if (p == NULL) return NULL;
197
+
198
+ return p;
199
+ }
200
+
201
+ void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
202
+ if (mi_memkind_is_os(memid.memkind)) {
203
+ _mi_os_free(p, size, memid);
204
+ }
205
+ else {
206
+ mi_assert(memid.memkind == MI_MEM_STATIC);
207
+ }
208
+ }
209
+
210
+ void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
211
+ return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
212
+ }
213
+
214
+
215
+ /* -----------------------------------------------------------
216
+ Thread safe allocation in an arena
217
+ ----------------------------------------------------------- */
218
+
219
+ // claim the `blocks_inuse` bits
220
+ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
221
+ {
222
+ size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
223
+ if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
224
+ mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
225
+ return true;
226
+ };
227
+ return false;
228
+ }
229
+
230
+
231
+ /* -----------------------------------------------------------
232
+ Arena Allocation
233
+ ----------------------------------------------------------- */
234
+
235
+ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
236
+ bool commit, mi_memid_t* memid)
237
+ {
238
+ MI_UNUSED(arena_index);
239
+ mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
240
+
241
+ mi_bitmap_index_t bitmap_index;
242
+ if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
243
+
244
+ // claimed it!
245
+ void* p = mi_arena_block_start(arena, bitmap_index);
246
+ *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
247
+ memid->is_pinned = arena->memid.is_pinned;
248
+
249
+ // none of the claimed blocks should be scheduled for a decommit
250
+ if (arena->blocks_purge != NULL) {
251
+ // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
252
+ _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
253
+ }
254
+
255
+ // set the dirty bits (todo: no need for an atomic op here?)
256
+ if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
257
+ memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL, NULL);
258
+ }
259
+
260
+ // set commit state
261
+ if (arena->blocks_committed == NULL) {
262
+ // always committed
263
+ memid->initially_committed = true;
264
+ }
265
+ else if (commit) {
266
+ // commit requested, but the range may not be committed as a whole: ensure it is committed now
267
+ memid->initially_committed = true;
268
+ const size_t commit_size = mi_arena_block_size(needed_bcount);
269
+ bool any_uncommitted;
270
+ size_t already_committed = 0;
271
+ _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted, &already_committed);
272
+ if (any_uncommitted) {
273
+ mi_assert_internal(already_committed < needed_bcount);
274
+ const size_t stat_commit_size = commit_size - mi_arena_block_size(already_committed);
275
+ bool commit_zero = false;
276
+ if (!_mi_os_commit_ex(p, commit_size, &commit_zero, stat_commit_size)) {
277
+ memid->initially_committed = false;
278
+ }
279
+ else {
280
+ if (commit_zero) { memid->initially_zero = true; }
281
+ }
282
+ }
283
+ else {
284
+ // all are already committed: signal that we are reusing memory in case it was purged before
285
+ _mi_os_reuse( p, commit_size );
286
+ }
287
+ }
288
+ else {
289
+ // no need to commit, but check if already fully committed
290
+ size_t already_committed = 0;
291
+ memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &already_committed);
292
+ if (!memid->initially_committed && already_committed > 0) {
293
+ // partially committed: as it will be committed at some time, adjust the stats and pretend the range is fully uncommitted.
294
+ mi_assert_internal(already_committed < needed_bcount);
295
+ _mi_stat_decrease(&_mi_stats_main.committed, mi_arena_block_size(already_committed));
296
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
297
+ }
298
+ }
299
+
300
+ return p;
301
+ }
302
+
303
+ // allocate in a specific arena
304
+ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
305
+ bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid )
306
+ {
307
+ MI_UNUSED_RELEASE(alignment);
308
+ mi_assert(alignment <= MI_SEGMENT_ALIGN);
309
+ const size_t bcount = mi_block_count_of_size(size);
310
+ const size_t arena_index = mi_arena_id_index(arena_id);
311
+ mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
312
+ mi_assert_internal(size <= mi_arena_block_size(bcount));
313
+
314
+ // Check arena suitability
315
+ mi_arena_t* arena = mi_arena_from_index(arena_index);
316
+ if (arena == NULL) return NULL;
317
+ if (!allow_large && arena->is_large) return NULL;
318
+ if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
319
+ if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
320
+ const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
321
+ if (match_numa_node) { if (!numa_suitable) return NULL; }
322
+ else { if (numa_suitable) return NULL; }
323
+ }
324
+
325
+ // try to allocate
326
+ void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid);
327
+ mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
328
+ return p;
329
+ }
330
+
331
+
332
+ // allocate from an arena with fallback to the OS
333
+ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
334
+ bool commit, bool allow_large,
335
+ mi_arena_id_t req_arena_id, mi_memid_t* memid )
336
+ {
337
+ MI_UNUSED(alignment);
338
+ mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
339
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
340
+ if mi_likely(max_arena == 0) return NULL;
341
+
342
+ if (req_arena_id != _mi_arena_id_none()) {
343
+ // try a specific arena if requested
344
+ if (mi_arena_id_index(req_arena_id) < max_arena) {
345
+ void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
346
+ if (p != NULL) return p;
347
+ }
348
+ }
349
+ else {
350
+ // try numa affine allocation
351
+ for (size_t i = 0; i < max_arena; i++) {
352
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
353
+ if (p != NULL) return p;
354
+ }
355
+
356
+ // try from another numa node instead..
357
+ if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
358
+ for (size_t i = 0; i < max_arena; i++) {
359
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
360
+ if (p != NULL) return p;
361
+ }
362
+ }
363
+ }
364
+ return NULL;
365
+ }
366
+
367
+ // try to reserve a fresh arena space
368
+ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t *arena_id)
369
+ {
370
+ if (_mi_preloading()) return false; // use OS only while pre loading
371
+
372
+ const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
373
+ if (arena_count > (MI_MAX_ARENAS - 4)) return false;
374
+
375
+ size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
376
+ if (arena_reserve == 0) return false;
377
+
378
+ if (!_mi_os_has_virtual_reserve()) {
379
+ arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example)
380
+ }
381
+ arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
382
+ arena_reserve = _mi_align_up(arena_reserve, MI_SEGMENT_SIZE);
383
+ if (arena_count >= 8 && arena_count <= 128) {
384
+ // scale up the arena sizes exponentially every 8 entries (128 entries get to 589TiB)
385
+ const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/8, 0, 16 );
386
+ size_t reserve = 0;
387
+ if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) {
388
+ arena_reserve = reserve;
389
+ }
390
+ }
391
+ if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
392
+
393
+ // commit eagerly?
394
+ bool arena_commit = false;
395
+ if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
396
+ else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
397
+
398
+ return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0);
399
+ }
400
+
401
+
402
+ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
403
+ mi_arena_id_t req_arena_id, mi_memid_t* memid)
404
+ {
405
+ mi_assert_internal(memid != NULL);
406
+ mi_assert_internal(size > 0);
407
+ *memid = _mi_memid_none();
408
+
409
+ const int numa_node = _mi_os_numa_node(); // current numa node
410
+
411
+ // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
412
+ if (!mi_option_is_enabled(mi_option_disallow_arena_alloc)) { // is arena allocation allowed?
413
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0)
414
+ {
415
+ void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
416
+ if (p != NULL) return p;
417
+
418
+ // otherwise, try to first eagerly reserve a new arena
419
+ if (req_arena_id == _mi_arena_id_none()) {
420
+ mi_arena_id_t arena_id = 0;
421
+ if (mi_arena_reserve(size, allow_large, &arena_id)) {
422
+ // and try allocate in there
423
+ mi_assert_internal(req_arena_id == _mi_arena_id_none());
424
+ p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
425
+ if (p != NULL) return p;
426
+ }
427
+ }
428
+ }
429
+ }
430
+
431
+ // if we cannot use OS allocation, return NULL
432
+ if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) {
433
+ errno = ENOMEM;
434
+ return NULL;
435
+ }
436
+
437
+ // finally, fall back to the OS
438
+ if (align_offset > 0) {
439
+ return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
440
+ }
441
+ else {
442
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
443
+ }
444
+ }
445
+
446
+ void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid)
447
+ {
448
+ return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid);
449
+ }
450
+
451
+
452
+ void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
453
+ if (size != NULL) *size = 0;
454
+ size_t arena_index = mi_arena_id_index(arena_id);
455
+ if (arena_index >= MI_MAX_ARENAS) return NULL;
456
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
457
+ if (arena == NULL) return NULL;
458
+ if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
459
+ return arena->start;
460
+ }
461
+
462
+
463
+ /* -----------------------------------------------------------
464
+ Arena purge
465
+ ----------------------------------------------------------- */
466
+
467
+ static long mi_arena_purge_delay(void) {
468
+ // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
469
+ return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
470
+ }
471
+
472
+ // reset or decommit in an arena and update the committed/decommit bitmaps
473
+ // assumes we own the area (i.e. blocks_in_use is claimed by us)
474
+ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
475
+ mi_assert_internal(arena->blocks_committed != NULL);
476
+ mi_assert_internal(arena->blocks_purge != NULL);
477
+ mi_assert_internal(!arena->memid.is_pinned);
478
+ const size_t size = mi_arena_block_size(blocks);
479
+ void* const p = mi_arena_block_start(arena, bitmap_idx);
480
+ bool needs_recommit;
481
+ size_t already_committed = 0;
482
+ if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx, &already_committed)) {
483
+ // all blocks are committed, we can purge freely
484
+ mi_assert_internal(already_committed == blocks);
485
+ needs_recommit = _mi_os_purge(p, size);
486
+ }
487
+ else {
488
+ // some blocks are not committed -- this can happen when a partially committed block is freed
489
+ // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
490
+ // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
491
+ mi_assert_internal(already_committed < blocks);
492
+ mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
493
+ needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed));
494
+ }
495
+
496
+ // clear the purged blocks
497
+ _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
498
+ // update committed bitmap
499
+ if (needs_recommit) {
500
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
501
+ }
502
+ }
503
+
504
+ // Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
505
+ // Note: assumes we (still) own the area as we may purge immediately
506
+ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
507
+ mi_assert_internal(arena->blocks_purge != NULL);
508
+ const long delay = mi_arena_purge_delay();
509
+ if (delay < 0) return; // is purging allowed at all?
510
+
511
+ if (_mi_preloading() || delay == 0) {
512
+ // decommit directly
513
+ mi_arena_purge(arena, bitmap_idx, blocks);
514
+ }
515
+ else {
516
+ // schedule purge
517
+ const mi_msecs_t expire = _mi_clock_now() + delay;
518
+ mi_msecs_t expire0 = 0;
519
+ if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) {
520
+ // expiration was not yet set
521
+ // maybe set the global arenas expire as well (if it wasn't set already)
522
+ mi_atomic_casi64_strong_acq_rel(&mi_arenas_purge_expire, &expire0, expire);
523
+ }
524
+ else {
525
+ // already an expiration was set
526
+ }
527
+ _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL, NULL);
528
+ }
529
+ }
530
+
531
+ // purge a range of blocks
532
+ // return true if the full range was purged.
533
+ // assumes we own the area (i.e. blocks_in_use is claimed by us)
534
+ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) {
535
+ const size_t endidx = startidx + bitlen;
536
+ size_t bitidx = startidx;
537
+ bool all_purged = false;
538
+ while (bitidx < endidx) {
539
+ // count consecutive ones in the purge mask
540
+ size_t count = 0;
541
+ while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
542
+ count++;
543
+ }
544
+ if (count > 0) {
545
+ // found range to be purged
546
+ const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
547
+ mi_arena_purge(arena, range_idx, count);
548
+ if (count == bitlen) {
549
+ all_purged = true;
550
+ }
551
+ }
552
+ bitidx += (count+1); // +1 to skip the zero bit (or end)
553
+ }
554
+ return all_purged;
555
+ }
556
+
557
+ // returns true if anything was purged
558
+ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
559
+ {
560
+ // check pre-conditions
561
+ if (arena->memid.is_pinned) return false;
562
+
563
+ // expired yet?
564
+ mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
565
+ if (!force && (expire == 0 || expire > now)) return false;
566
+
567
+ // reset expire (if not already set concurrently)
568
+ mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
569
+ _mi_stat_counter_increase(&_mi_stats_main.arena_purges, 1);
570
+
571
+ // potential purges scheduled, walk through the bitmap
572
+ bool any_purged = false;
573
+ bool full_purge = true;
574
+ for (size_t i = 0; i < arena->field_count; i++) {
575
+ size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
576
+ if (purge != 0) {
577
+ size_t bitidx = 0;
578
+ while (bitidx < MI_BITMAP_FIELD_BITS) {
579
+ // find consecutive range of ones in the purge mask
580
+ size_t bitlen = 0;
581
+ while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
582
+ bitlen++;
583
+ }
584
+ // temporarily claim the purge range as "in-use" to be thread-safe with allocation
585
+ // try to claim the longest range of corresponding in_use bits
586
+ const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
587
+ while( bitlen > 0 ) {
588
+ if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
589
+ break;
590
+ }
591
+ bitlen--;
592
+ }
593
+ // actual claimed bits at `in_use`
594
+ if (bitlen > 0) {
595
+ // read purge again now that we have the in_use bits
596
+ purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
597
+ if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) {
598
+ full_purge = false;
599
+ }
600
+ any_purged = true;
601
+ // release the claimed `in_use` bits again
602
+ _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
603
+ }
604
+ bitidx += (bitlen+1); // +1 to skip the zero (or end)
605
+ } // while bitidx
606
+ } // purge != 0
607
+ }
608
+ // if not fully purged, make sure to purge again in the future
609
+ if (!full_purge) {
610
+ const long delay = mi_arena_purge_delay();
611
+ mi_msecs_t expected = 0;
612
+ mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
613
+ }
614
+ return any_purged;
615
+ }
616
+
617
+ static void mi_arenas_try_purge( bool force, bool visit_all )
618
+ {
619
+ if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
620
+
621
+ // check if any arena needs purging?
622
+ const mi_msecs_t now = _mi_clock_now();
623
+ mi_msecs_t arenas_expire = mi_atomic_loadi64_acquire(&mi_arenas_purge_expire);
624
+ if (!force && (arenas_expire == 0 || arenas_expire < now)) return;
625
+
626
+ const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
627
+ if (max_arena == 0) return;
628
+
629
+ // allow only one thread to purge at a time
630
+ static mi_atomic_guard_t purge_guard;
631
+ mi_atomic_guard(&purge_guard)
632
+ {
633
+ // increase global expire: at most one purge per delay cycle
634
+ mi_atomic_storei64_release(&mi_arenas_purge_expire, now + mi_arena_purge_delay());
635
+ size_t max_purge_count = (visit_all ? max_arena : 2);
636
+ bool all_visited = true;
637
+ for (size_t i = 0; i < max_arena; i++) {
638
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
639
+ if (arena != NULL) {
640
+ if (mi_arena_try_purge(arena, now, force)) {
641
+ if (max_purge_count <= 1) {
642
+ all_visited = false;
643
+ break;
644
+ }
645
+ max_purge_count--;
646
+ }
647
+ }
648
+ }
649
+ if (all_visited) {
650
+ // all arena's were visited and purged: reset global expire
651
+ mi_atomic_storei64_release(&mi_arenas_purge_expire, 0);
652
+ }
653
+ }
654
+ }
655
+
656
+
657
+ /* -----------------------------------------------------------
658
+ Arena free
659
+ ----------------------------------------------------------- */
660
+
661
+ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) {
662
+ mi_assert_internal(size > 0);
663
+ mi_assert_internal(committed_size <= size);
664
+ if (p==NULL) return;
665
+ if (size==0) return;
666
+ const bool all_committed = (committed_size == size);
667
+ const size_t decommitted_size = (committed_size <= size ? size - committed_size : 0);
668
+
669
+ // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
670
+ mi_track_mem_undefined(p,size);
671
+
672
+ if (mi_memkind_is_os(memid.memkind)) {
673
+ // was a direct OS allocation, pass through
674
+ if (!all_committed && decommitted_size > 0) {
675
+ // if partially committed, adjust the committed stats (as `_mi_os_free` will decrease commit by the full size)
676
+ _mi_stat_increase(&_mi_stats_main.committed, decommitted_size);
677
+ }
678
+ _mi_os_free(p, size, memid);
679
+ }
680
+ else if (memid.memkind == MI_MEM_ARENA) {
681
+ // allocated in an arena
682
+ size_t arena_idx;
683
+ size_t bitmap_idx;
684
+ mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
685
+ mi_assert_internal(arena_idx < MI_MAX_ARENAS);
686
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
687
+ mi_assert_internal(arena != NULL);
688
+ const size_t blocks = mi_block_count_of_size(size);
689
+
690
+ // checks
691
+ if (arena == NULL) {
692
+ _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
693
+ return;
694
+ }
695
+ mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx));
696
+ if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) {
697
+ _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
698
+ return;
699
+ }
700
+
701
+ // potentially decommit
702
+ if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
703
+ mi_assert_internal(all_committed);
704
+ }
705
+ else {
706
+ mi_assert_internal(arena->blocks_committed != NULL);
707
+ mi_assert_internal(arena->blocks_purge != NULL);
708
+
709
+ if (!all_committed) {
710
+ // mark the entire range as no longer committed (so we will recommit the full range when re-using)
711
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
712
+ mi_track_mem_noaccess(p,size);
713
+ //if (committed_size > 0) {
714
+ // if partially committed, adjust the committed stats (is it will be recommitted when re-using)
715
+ // in the delayed purge, we do no longer decrease the commit if the range is not marked entirely as committed.
716
+ _mi_stat_decrease(&_mi_stats_main.committed, committed_size);
717
+ //}
718
+ // note: if not all committed, it may be that the purge will reset/decommit the entire range
719
+ // that contains already decommitted parts. Since purge consistently uses reset or decommit that
720
+ // works (as we should never reset decommitted parts).
721
+ }
722
+ // (delay) purge the entire range
723
+ mi_arena_schedule_purge(arena, bitmap_idx, blocks);
724
+ }
725
+
726
+ // and make it available to others again
727
+ bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
728
+ if (!all_inuse) {
729
+ _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
730
+ return;
731
+ };
732
+ }
733
+ else {
734
+ // arena was none, external, or static; nothing to do
735
+ mi_assert_internal(memid.memkind < MI_MEM_OS);
736
+ }
737
+
738
+ // purge expired decommits
739
+ mi_arenas_try_purge(false, false);
740
+ }
741
+
742
+ // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
743
+ // for dynamic libraries that are unloaded and need to release all their allocated memory.
744
+ static void mi_arenas_unsafe_destroy(void) {
745
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
746
+ size_t new_max_arena = 0;
747
+ for (size_t i = 0; i < max_arena; i++) {
748
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
749
+ if (arena != NULL) {
750
+ mi_lock_done(&arena->abandoned_visit_lock);
751
+ if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
752
+ mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
753
+ _mi_os_free(arena->start, mi_arena_size(arena), arena->memid);
754
+ }
755
+ else {
756
+ new_max_arena = i;
757
+ }
758
+ _mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size);
759
+ }
760
+ }
761
+
762
+ // try to lower the max arena.
763
+ size_t expected = max_arena;
764
+ mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
765
+ }
766
+
767
+ // Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
768
+ void _mi_arenas_collect(bool force_purge) {
769
+ mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
770
+ }
771
+
772
+ // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
773
+ // for dynamic libraries that are unloaded and need to release all their allocated memory.
774
+ void _mi_arena_unsafe_destroy_all(void) {
775
+ mi_arenas_unsafe_destroy();
776
+ _mi_arenas_collect(true /* force purge */); // purge non-owned arenas
777
+ }
778
+
779
+ // Is a pointer inside any of our arenas?
780
+ bool _mi_arena_contains(const void* p) {
781
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
782
+ for (size_t i = 0; i < max_arena; i++) {
783
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
784
+ if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
785
+ return true;
786
+ }
787
+ }
788
+ return false;
789
+ }
790
+
791
+ /* -----------------------------------------------------------
792
+ Add an arena.
793
+ ----------------------------------------------------------- */
794
+
795
+ static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* stats) {
796
+ mi_assert_internal(arena != NULL);
797
+ mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
798
+ mi_assert_internal(arena->block_count > 0);
799
+ if (arena_id != NULL) { *arena_id = -1; }
800
+
801
+ size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
802
+ if (i >= MI_MAX_ARENAS) {
803
+ mi_atomic_decrement_acq_rel(&mi_arena_count);
804
+ return false;
805
+ }
806
+ _mi_stat_counter_increase(&stats->arena_count,1);
807
+ arena->id = mi_arena_id_create(i);
808
+ mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
809
+ if (arena_id != NULL) { *arena_id = arena->id; }
810
+ return true;
811
+ }
812
+
813
+ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
814
+ {
815
+ if (arena_id != NULL) *arena_id = _mi_arena_id_none();
816
+ if (size < MI_ARENA_BLOCK_SIZE) {
817
+ _mi_warning_message("the arena size is too small (memory at %p with size %zu)\n", start, size);
818
+ return false;
819
+ }
820
+ if (is_large) {
821
+ mi_assert_internal(memid.initially_committed && memid.is_pinned);
822
+ }
823
+ if (!_mi_is_aligned(start, MI_SEGMENT_ALIGN)) {
824
+ void* const aligned_start = mi_align_up_ptr(start, MI_SEGMENT_ALIGN);
825
+ const size_t diff = (uint8_t*)aligned_start - (uint8_t*)start;
826
+ if (diff >= size || (size - diff) < MI_ARENA_BLOCK_SIZE) {
827
+ _mi_warning_message("after alignment, the size of the arena becomes too small (memory at %p with size %zu)\n", start, size);
828
+ return false;
829
+ }
830
+ start = aligned_start;
831
+ size = size - diff;
832
+ }
833
+
834
+ const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
835
+ const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
836
+ const size_t bitmaps = (memid.is_pinned ? 3 : 5);
837
+ const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
838
+ mi_memid_t meta_memid;
839
+ mi_arena_t* arena = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid);
840
+ if (arena == NULL) return false;
841
+
842
+ // already zero'd due to zalloc
843
+ // _mi_memzero(arena, asize);
844
+ arena->id = _mi_arena_id_none();
845
+ arena->memid = memid;
846
+ arena->exclusive = exclusive;
847
+ arena->meta_size = asize;
848
+ arena->meta_memid = meta_memid;
849
+ arena->block_count = bcount;
850
+ arena->field_count = fields;
851
+ arena->start = (uint8_t*)start;
852
+ arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
853
+ arena->is_large = is_large;
854
+ arena->purge_expire = 0;
855
+ arena->search_idx = 0;
856
+ mi_lock_init(&arena->abandoned_visit_lock);
857
+ // consecutive bitmaps
858
+ arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
859
+ arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap
860
+ arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap
861
+ arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap
862
+ // initialize committed bitmap?
863
+ if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
864
+ memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
865
+ }
866
+
867
+ // and claim leftover blocks if needed (so we never allocate there)
868
+ ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
869
+ mi_assert_internal(post >= 0);
870
+ if (post > 0) {
871
+ // don't use leftover bits at the end
872
+ mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
873
+ _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
874
+ }
875
+ return mi_arena_add(arena, arena_id, &_mi_stats_main);
876
+
877
+ }
878
+
879
+ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
880
+ mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
881
+ memid.initially_committed = is_committed;
882
+ memid.initially_zero = is_zero;
883
+ memid.is_pinned = is_large;
884
+ return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
885
+ }
886
+
887
+ // Reserve a range of regular OS memory
888
+ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
889
+ if (arena_id != NULL) *arena_id = _mi_arena_id_none();
890
+ size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
891
+ mi_memid_t memid;
892
+ void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid);
893
+ if (start == NULL) return ENOMEM;
894
+ const bool is_large = memid.is_pinned; // todo: use separate is_large field?
895
+ if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
896
+ _mi_os_free_ex(start, size, commit, memid);
897
+ _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
898
+ return ENOMEM;
899
+ }
900
+ _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
901
+ return 0;
902
+ }
903
+
904
+
905
+ // Manage a range of regular OS memory
906
+ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
907
+ return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
908
+ }
909
+
910
+ // Reserve a range of regular OS memory
911
+ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
912
+ return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
913
+ }
914
+
915
+
916
+ /* -----------------------------------------------------------
917
+ Debugging
918
+ ----------------------------------------------------------- */
919
+
920
+ static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) {
921
+ _mi_message("%s%s:\n", prefix, header);
922
+ size_t bcount = 0;
923
+ size_t inuse_count = 0;
924
+ for (size_t i = 0; i < field_count; i++) {
925
+ char buf[MI_BITMAP_FIELD_BITS + 1];
926
+ uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
927
+ for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++, bcount++) {
928
+ if (bcount < block_count) {
929
+ bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
930
+ if (inuse) inuse_count++;
931
+ buf[bit] = (inuse ? 'x' : '.');
932
+ }
933
+ else {
934
+ buf[bit] = ' ';
935
+ }
936
+ }
937
+ buf[MI_BITMAP_FIELD_BITS] = 0;
938
+ _mi_message("%s %s\n", prefix, buf);
939
+ }
940
+ _mi_message("%s total ('x'): %zu\n", prefix, inuse_count);
941
+ return inuse_count;
942
+ }
943
+
944
+ void mi_debug_show_arenas(void) mi_attr_noexcept {
945
+ const bool show_inuse = true;
946
+ size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
947
+ size_t inuse_total = 0;
948
+ //size_t abandoned_total = 0;
949
+ //size_t purge_total = 0;
950
+ for (size_t i = 0; i < max_arenas; i++) {
951
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
952
+ if (arena == NULL) break;
953
+ _mi_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, (size_t)(MI_ARENA_BLOCK_SIZE / MI_MiB), arena->field_count, (arena->memid.is_pinned ? ", pinned" : ""));
954
+ if (show_inuse) {
955
+ inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count);
956
+ }
957
+ if (arena->blocks_committed != NULL) {
958
+ mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count);
959
+ }
960
+ //if (show_abandoned) {
961
+ // abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count);
962
+ //}
963
+ //if (show_purge && arena->blocks_purge != NULL) {
964
+ // purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count);
965
+ //}
966
+ }
967
+ if (show_inuse) _mi_message("total inuse blocks : %zu\n", inuse_total);
968
+ //if (show_abandoned) _mi_message("total abandoned blocks: %zu\n", abandoned_total);
969
+ //if (show_purge) _mi_message("total purgeable blocks: %zu\n", purge_total);
970
+ }
971
+
972
+
973
+ void mi_arenas_print(void) mi_attr_noexcept {
974
+ mi_debug_show_arenas();
975
+ }
976
+
977
+
978
+ /* -----------------------------------------------------------
979
+ Reserve a huge page arena.
980
+ ----------------------------------------------------------- */
981
+ // reserve at a specific numa node
982
+ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
983
+ if (arena_id != NULL) *arena_id = -1;
984
+ if (pages==0) return 0;
985
+ if (numa_node < -1) numa_node = -1;
986
+ if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
987
+ size_t hsize = 0;
988
+ size_t pages_reserved = 0;
989
+ mi_memid_t memid;
990
+ void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
991
+ if (p==NULL || pages_reserved==0) {
992
+ _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
993
+ return ENOMEM;
994
+ }
995
+ _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
996
+
997
+ if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
998
+ _mi_os_free(p, hsize, memid);
999
+ return ENOMEM;
1000
+ }
1001
+ return 0;
1002
+ }
1003
+
1004
+ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
1005
+ return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
1006
+ }
1007
+
1008
+ // reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
1009
+ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
1010
+ if (pages == 0) return 0;
1011
+
1012
+ // pages per numa node
1013
+ int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? (int)numa_nodes : _mi_os_numa_node_count());
1014
+ if (numa_count == 0) numa_count = 1;
1015
+ const size_t pages_per = pages / numa_count;
1016
+ const size_t pages_mod = pages % numa_count;
1017
+ const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
1018
+
1019
+ // reserve evenly among numa nodes
1020
+ for (int numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
1021
+ size_t node_pages = pages_per; // can be 0
1022
+ if ((size_t)numa_node < pages_mod) node_pages++;
1023
+ int err = mi_reserve_huge_os_pages_at(node_pages, numa_node, timeout_per);
1024
+ if (err) return err;
1025
+ if (pages < node_pages) {
1026
+ pages = 0;
1027
+ }
1028
+ else {
1029
+ pages -= node_pages;
1030
+ }
1031
+ }
1032
+
1033
+ return 0;
1034
+ }
1035
+
1036
+ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
1037
+ MI_UNUSED(max_secs);
1038
+ _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
1039
+ if (pages_reserved != NULL) *pages_reserved = 0;
1040
+ int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
1041
+ if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
1042
+ return err;
1043
+ }