@shd101wyy/yo 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +17 -0
- package/README.md +80 -0
- package/out/cjs/index.cjs +51 -0
- package/out/cjs/yo-cli.cjs +2158 -0
- package/out/esm/index.mjs +51 -0
- package/out/types/src/codegen/async/runtime.d.ts +2 -0
- package/out/types/src/codegen/async/state-code-gen.d.ts +10 -0
- package/out/types/src/codegen/async/state-machine.d.ts +13 -0
- package/out/types/src/codegen/c/collection.d.ts +3 -0
- package/out/types/src/codegen/codegen-c.d.ts +12 -0
- package/out/types/src/codegen/constants.d.ts +3 -0
- package/out/types/src/codegen/expressions/array.d.ts +4 -0
- package/out/types/src/codegen/expressions/generation.d.ts +11 -0
- package/out/types/src/codegen/expressions/index.d.ts +2 -0
- package/out/types/src/codegen/functions/collection.d.ts +5 -0
- package/out/types/src/codegen/functions/context.d.ts +57 -0
- package/out/types/src/codegen/functions/generation.d.ts +25 -0
- package/out/types/src/codegen/functions/index.d.ts +2 -0
- package/out/types/src/codegen/index.d.ts +20 -0
- package/out/types/src/codegen/parallelism/runtime.d.ts +2 -0
- package/out/types/src/codegen/types/collection.d.ts +8 -0
- package/out/types/src/codegen/types/generation.d.ts +13 -0
- package/out/types/src/codegen/types/index.d.ts +2 -0
- package/out/types/src/codegen/utils/fixup.d.ts +2 -0
- package/out/types/src/codegen/utils/index.d.ts +77 -0
- package/out/types/src/codegen/values/index.d.ts +1 -0
- package/out/types/src/emitter.d.ts +11 -0
- package/out/types/src/env.d.ts +85 -0
- package/out/types/src/error.d.ts +45 -0
- package/out/types/src/evaluator/async/await-analysis-types.d.ts +23 -0
- package/out/types/src/evaluator/async/await-analysis.d.ts +5 -0
- package/out/types/src/evaluator/builtins/alignof.d.ts +8 -0
- package/out/types/src/evaluator/builtins/and_or.d.ts +8 -0
- package/out/types/src/evaluator/builtins/arc_fns.d.ts +58 -0
- package/out/types/src/evaluator/builtins/array_fns.d.ts +0 -0
- package/out/types/src/evaluator/builtins/as.d.ts +8 -0
- package/out/types/src/evaluator/builtins/async_fns.d.ts +8 -0
- package/out/types/src/evaluator/builtins/compt_assert.d.ts +8 -0
- package/out/types/src/evaluator/builtins/compt_boolean_fns.d.ts +8 -0
- package/out/types/src/evaluator/builtins/compt_expect_error.d.ts +8 -0
- package/out/types/src/evaluator/builtins/compt_list_fns.d.ts +33 -0
- package/out/types/src/evaluator/builtins/compt_print.d.ts +8 -0
- package/out/types/src/evaluator/builtins/compt_string_fns.d.ts +8 -0
- package/out/types/src/evaluator/builtins/consume.d.ts +8 -0
- package/out/types/src/evaluator/builtins/drop.d.ts +8 -0
- package/out/types/src/evaluator/builtins/dup.d.ts +8 -0
- package/out/types/src/evaluator/builtins/expr_fns.d.ts +33 -0
- package/out/types/src/evaluator/builtins/future_fns.d.ts +8 -0
- package/out/types/src/evaluator/builtins/gc.d.ts +8 -0
- package/out/types/src/evaluator/builtins/gensym.d.ts +8 -0
- package/out/types/src/evaluator/builtins/impl_constraint.d.ts +8 -0
- package/out/types/src/evaluator/builtins/macro_expand.d.ts +8 -0
- package/out/types/src/evaluator/builtins/numeric_fns.d.ts +8 -0
- package/out/types/src/evaluator/builtins/panic.d.ts +8 -0
- package/out/types/src/evaluator/builtins/ptr_fns.d.ts +8 -0
- package/out/types/src/evaluator/builtins/quote.d.ts +13 -0
- package/out/types/src/evaluator/builtins/rc.d.ts +8 -0
- package/out/types/src/evaluator/builtins/sizeof.d.ts +8 -0
- package/out/types/src/evaluator/builtins/the.d.ts +8 -0
- package/out/types/src/evaluator/builtins/type_fns.d.ts +28 -0
- package/out/types/src/evaluator/builtins/va_start.d.ts +8 -0
- package/out/types/src/evaluator/builtins/var_fns.d.ts +18 -0
- package/out/types/src/evaluator/calls/array.d.ts +13 -0
- package/out/types/src/evaluator/calls/array_type.d.ts +11 -0
- package/out/types/src/evaluator/calls/closure_type.d.ts +11 -0
- package/out/types/src/evaluator/calls/compt_function.d.ts +19 -0
- package/out/types/src/evaluator/calls/compt_list_type.d.ts +11 -0
- package/out/types/src/evaluator/calls/function.d.ts +16 -0
- package/out/types/src/evaluator/calls/function_type.d.ts +15 -0
- package/out/types/src/evaluator/calls/helper.d.ts +42 -0
- package/out/types/src/evaluator/calls/iso.d.ts +15 -0
- package/out/types/src/evaluator/calls/module_type.d.ts +11 -0
- package/out/types/src/evaluator/calls/numeric_type.d.ts +15 -0
- package/out/types/src/evaluator/calls/pointer.d.ts +8 -0
- package/out/types/src/evaluator/calls/pointer_type.d.ts +14 -0
- package/out/types/src/evaluator/calls/type.d.ts +12 -0
- package/out/types/src/evaluator/context.d.ts +169 -0
- package/out/types/src/evaluator/exprs/_expr.d.ts +8 -0
- package/out/types/src/evaluator/exprs/assignment.d.ts +9 -0
- package/out/types/src/evaluator/exprs/begin.d.ts +10 -0
- package/out/types/src/evaluator/exprs/binding.d.ts +12 -0
- package/out/types/src/evaluator/exprs/c_include.d.ts +8 -0
- package/out/types/src/evaluator/exprs/cond.d.ts +8 -0
- package/out/types/src/evaluator/exprs/destructuring_assignment.d.ts +33 -0
- package/out/types/src/evaluator/exprs/exists.d.ts +0 -0
- package/out/types/src/evaluator/exprs/expr.d.ts +9 -0
- package/out/types/src/evaluator/exprs/extern.d.ts +8 -0
- package/out/types/src/evaluator/exprs/identifer_and_operator.d.ts +9 -0
- package/out/types/src/evaluator/exprs/import.d.ts +9 -0
- package/out/types/src/evaluator/exprs/initialization_assignment.d.ts +8 -0
- package/out/types/src/evaluator/exprs/match.d.ts +8 -0
- package/out/types/src/evaluator/exprs/open.d.ts +8 -0
- package/out/types/src/evaluator/exprs/property_access.d.ts +8 -0
- package/out/types/src/evaluator/exprs/recur.d.ts +8 -0
- package/out/types/src/evaluator/exprs/subtype_of.d.ts +21 -0
- package/out/types/src/evaluator/exprs/test.d.ts +8 -0
- package/out/types/src/evaluator/exprs/typeof.d.ts +8 -0
- package/out/types/src/evaluator/exprs/while.d.ts +8 -0
- package/out/types/src/evaluator/index.d.ts +26 -0
- package/out/types/src/evaluator/types/array.d.ts +8 -0
- package/out/types/src/evaluator/types/closure.d.ts +8 -0
- package/out/types/src/evaluator/types/compt_list.d.ts +8 -0
- package/out/types/src/evaluator/types/concrete_module.d.ts +8 -0
- package/out/types/src/evaluator/types/dyn.d.ts +8 -0
- package/out/types/src/evaluator/types/enum.d.ts +8 -0
- package/out/types/src/evaluator/types/expr_synthesizer.d.ts +14 -0
- package/out/types/src/evaluator/types/field.d.ts +14 -0
- package/out/types/src/evaluator/types/fn_module.d.ts +8 -0
- package/out/types/src/evaluator/types/function.d.ts +58 -0
- package/out/types/src/evaluator/types/future_module.d.ts +8 -0
- package/out/types/src/evaluator/types/module.d.ts +19 -0
- package/out/types/src/evaluator/types/newtype.d.ts +8 -0
- package/out/types/src/evaluator/types/object.d.ts +8 -0
- package/out/types/src/evaluator/types/proofs.d.ts +0 -0
- package/out/types/src/evaluator/types/slice.d.ts +8 -0
- package/out/types/src/evaluator/types/struct.d.ts +8 -0
- package/out/types/src/evaluator/types/synthesizer.d.ts +16 -0
- package/out/types/src/evaluator/types/tuple.d.ts +18 -0
- package/out/types/src/evaluator/types/union.d.ts +8 -0
- package/out/types/src/evaluator/types/utils.d.ts +71 -0
- package/out/types/src/evaluator/types/validation.d.ts +3 -0
- package/out/types/src/evaluator/utils/array-utils.d.ts +15 -0
- package/out/types/src/evaluator/utils/closure.d.ts +35 -0
- package/out/types/src/evaluator/utils.d.ts +4 -0
- package/out/types/src/evaluator/values/anonymous_function.d.ts +8 -0
- package/out/types/src/evaluator/values/anonymous_module.d.ts +17 -0
- package/out/types/src/evaluator/values/anonymous_struct.d.ts +8 -0
- package/out/types/src/evaluator/values/array.d.ts +8 -0
- package/out/types/src/evaluator/values/boolean.d.ts +3 -0
- package/out/types/src/evaluator/values/char.d.ts +3 -0
- package/out/types/src/evaluator/values/compt_list.d.ts +8 -0
- package/out/types/src/evaluator/values/dyn.d.ts +8 -0
- package/out/types/src/evaluator/values/float.d.ts +4 -0
- package/out/types/src/evaluator/values/integer.d.ts +4 -0
- package/out/types/src/evaluator/values/module.d.ts +58 -0
- package/out/types/src/evaluator/values/string.d.ts +3 -0
- package/out/types/src/evaluator/values/tuple.d.ts +32 -0
- package/out/types/src/expr.d.ts +456 -0
- package/out/types/src/function-value.d.ts +42 -0
- package/out/types/src/index.d.ts +4 -0
- package/out/types/src/lexer.d.ts +2 -0
- package/out/types/src/logger.d.ts +1 -0
- package/out/types/src/module-manager.d.ts +30 -0
- package/out/types/src/naming-checker.d.ts +4 -0
- package/out/types/src/parser.d.ts +33 -0
- package/out/types/src/test-runner.d.ts +30 -0
- package/out/types/src/tests/codegen.test.d.ts +1 -0
- package/out/types/src/tests/fixme.test.d.ts +1 -0
- package/out/types/src/tests/module-manager.test.d.ts +1 -0
- package/out/types/src/tests/parser.test.d.ts +1 -0
- package/out/types/src/tests/sample.test.d.ts +0 -0
- package/out/types/src/tests/std.test.d.ts +1 -0
- package/out/types/src/token.d.ts +40 -0
- package/out/types/src/type-value.d.ts +7 -0
- package/out/types/src/types/compatibility.d.ts +16 -0
- package/out/types/src/types/creators.d.ts +73 -0
- package/out/types/src/types/definitions.d.ts +218 -0
- package/out/types/src/types/guards.d.ts +70 -0
- package/out/types/src/types/hierarchy.d.ts +4 -0
- package/out/types/src/types/index.d.ts +7 -0
- package/out/types/src/types/module_field.d.ts +2 -0
- package/out/types/src/types/tags.d.ts +45 -0
- package/out/types/src/types/utils.d.ts +50 -0
- package/out/types/src/unit-value.d.ts +7 -0
- package/out/types/src/utils.d.ts +6 -0
- package/out/types/src/value-tag.d.ts +29 -0
- package/out/types/src/value.d.ts +110 -0
- package/out/types/src/yo-cli.d.ts +1 -0
- package/out/types/tsconfig.tsbuildinfo +1 -0
- package/package.json +57 -0
- package/scripts/check-liburing.js +76 -0
- package/std/alg/hash.yo +50 -0
- package/std/allocator.yo +113 -0
- package/std/allocators/c_allocator.yo +118 -0
- package/std/async.yo +13 -0
- package/std/collections/array_list.yo +415 -0
- package/std/collections/hash_map.yo +482 -0
- package/std/collections/hash_set.yo +706 -0
- package/std/collections/index.yo +11 -0
- package/std/collections/linked_list.yo +439 -0
- package/std/error.yo +0 -0
- package/std/gc.yo +10 -0
- package/std/index.yo +12 -0
- package/std/io/file.yo +191 -0
- package/std/io/index.yo +5 -0
- package/std/libc/assert.yo +39 -0
- package/std/libc/ctype.yo +57 -0
- package/std/libc/errno.yo +182 -0
- package/std/libc/float.yo +87 -0
- package/std/libc/index.yo +29 -0
- package/std/libc/limits.yo +65 -0
- package/std/libc/math.yo +679 -0
- package/std/libc/signal.yo +101 -0
- package/std/libc/stdatomic.yo +213 -0
- package/std/libc/stdint.yo +214 -0
- package/std/libc/stdio.yo +225 -0
- package/std/libc/stdlib.yo +204 -0
- package/std/libc/string.yo +151 -0
- package/std/libc/time.yo +92 -0
- package/std/libc/unistd.yo +130 -0
- package/std/monad.yo +152 -0
- package/std/prelude.yo +3094 -0
- package/std/string/index.yo +8 -0
- package/std/string/rune.yo +82 -0
- package/std/string/string.yo +288 -0
- package/std/sync.yo +95 -0
- package/std/thread.yo +36 -0
- package/std/time.yo +13 -0
- package/std/worker.yo +36 -0
- package/vendor/mimalloc/.gitattributes +12 -0
- package/vendor/mimalloc/CMakeLists.txt +763 -0
- package/vendor/mimalloc/LICENSE +21 -0
- package/vendor/mimalloc/SECURITY.md +41 -0
- package/vendor/mimalloc/azure-pipelines.yml +251 -0
- package/vendor/mimalloc/bin/mimalloc-redirect-arm64.dll +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect-arm64.lib +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect-arm64ec.dll +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect-arm64ec.lib +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect.dll +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect.lib +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect32.dll +0 -0
- package/vendor/mimalloc/bin/mimalloc-redirect32.lib +0 -0
- package/vendor/mimalloc/bin/minject-arm64.exe +0 -0
- package/vendor/mimalloc/bin/minject.exe +0 -0
- package/vendor/mimalloc/bin/minject32.exe +0 -0
- package/vendor/mimalloc/bin/readme.md +118 -0
- package/vendor/mimalloc/cmake/JoinPaths.cmake +23 -0
- package/vendor/mimalloc/cmake/mimalloc-config-version.cmake +19 -0
- package/vendor/mimalloc/cmake/mimalloc-config.cmake +14 -0
- package/vendor/mimalloc/contrib/docker/alpine/Dockerfile +23 -0
- package/vendor/mimalloc/contrib/docker/alpine-arm32v7/Dockerfile +28 -0
- package/vendor/mimalloc/contrib/docker/alpine-x86/Dockerfile +28 -0
- package/vendor/mimalloc/contrib/docker/manylinux-x64/Dockerfile +23 -0
- package/vendor/mimalloc/contrib/docker/readme.md +10 -0
- package/vendor/mimalloc/contrib/vcpkg/portfile.cmake +64 -0
- package/vendor/mimalloc/contrib/vcpkg/readme.md +40 -0
- package/vendor/mimalloc/contrib/vcpkg/usage +20 -0
- package/vendor/mimalloc/contrib/vcpkg/vcpkg-cmake-wrapper.cmake +20 -0
- package/vendor/mimalloc/contrib/vcpkg/vcpkg.json +48 -0
- package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg +887 -0
- package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg +1185 -0
- package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg +757 -0
- package/vendor/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg +1028 -0
- package/vendor/mimalloc/doc/bench-2020/bench-r5a-1.svg +769 -0
- package/vendor/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg +868 -0
- package/vendor/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg +1157 -0
- package/vendor/mimalloc/doc/bench-2020/bench-r5a-2.svg +983 -0
- package/vendor/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg +683 -0
- package/vendor/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg +854 -0
- package/vendor/mimalloc/doc/bench-2020/bench-spec-rss.svg +713 -0
- package/vendor/mimalloc/doc/bench-2020/bench-spec.svg +713 -0
- package/vendor/mimalloc/doc/bench-2020/bench-z4-1.svg +890 -0
- package/vendor/mimalloc/doc/bench-2020/bench-z4-2.svg +1146 -0
- package/vendor/mimalloc/doc/bench-2020/bench-z4-rss-1.svg +796 -0
- package/vendor/mimalloc/doc/bench-2020/bench-z4-rss-2.svg +974 -0
- package/vendor/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg +952 -0
- package/vendor/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg +1255 -0
- package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg +955 -0
- package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg +1269 -0
- package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg +836 -0
- package/vendor/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg +1131 -0
- package/vendor/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg +766 -0
- package/vendor/mimalloc/doc/doxyfile +2895 -0
- package/vendor/mimalloc/doc/ds-logo.jpg +0 -0
- package/vendor/mimalloc/doc/ds-logo.png +0 -0
- package/vendor/mimalloc/doc/mimalloc-doc.h +1452 -0
- package/vendor/mimalloc/doc/mimalloc-doxygen.css +60 -0
- package/vendor/mimalloc/doc/mimalloc-logo-100.png +0 -0
- package/vendor/mimalloc/doc/mimalloc-logo.png +0 -0
- package/vendor/mimalloc/doc/mimalloc-logo.svg +161 -0
- package/vendor/mimalloc/doc/spades-logo.png +0 -0
- package/vendor/mimalloc/doc/unreal-logo.svg +43 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-lib.vcxproj +500 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-lib.vcxproj.filters +108 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj +508 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj.filters +111 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-override-test-dep.vcxproj +355 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj +360 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj +295 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj +292 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc-test.vcxproj +289 -0
- package/vendor/mimalloc/ide/vs2022/mimalloc.sln +151 -0
- package/vendor/mimalloc/include/mimalloc/atomic.h +557 -0
- package/vendor/mimalloc/include/mimalloc/internal.h +1153 -0
- package/vendor/mimalloc/include/mimalloc/prim.h +421 -0
- package/vendor/mimalloc/include/mimalloc/track.h +145 -0
- package/vendor/mimalloc/include/mimalloc/types.h +685 -0
- package/vendor/mimalloc/include/mimalloc-new-delete.h +66 -0
- package/vendor/mimalloc/include/mimalloc-override.h +68 -0
- package/vendor/mimalloc/include/mimalloc-stats.h +103 -0
- package/vendor/mimalloc/include/mimalloc.h +612 -0
- package/vendor/mimalloc/mimalloc.pc.in +11 -0
- package/vendor/mimalloc/readme.md +946 -0
- package/vendor/mimalloc/src/alloc-aligned.c +360 -0
- package/vendor/mimalloc/src/alloc-override.c +316 -0
- package/vendor/mimalloc/src/alloc-posix.c +185 -0
- package/vendor/mimalloc/src/alloc.c +692 -0
- package/vendor/mimalloc/src/arena-abandon.c +346 -0
- package/vendor/mimalloc/src/arena.c +1043 -0
- package/vendor/mimalloc/src/bitmap.c +441 -0
- package/vendor/mimalloc/src/bitmap.h +119 -0
- package/vendor/mimalloc/src/free.c +572 -0
- package/vendor/mimalloc/src/heap.c +733 -0
- package/vendor/mimalloc/src/init.c +714 -0
- package/vendor/mimalloc/src/libc.c +334 -0
- package/vendor/mimalloc/src/options.c +663 -0
- package/vendor/mimalloc/src/os.c +770 -0
- package/vendor/mimalloc/src/page-queue.c +390 -0
- package/vendor/mimalloc/src/page.c +1049 -0
- package/vendor/mimalloc/src/prim/emscripten/prim.c +249 -0
- package/vendor/mimalloc/src/prim/osx/alloc-override-zone.c +461 -0
- package/vendor/mimalloc/src/prim/osx/prim.c +9 -0
- package/vendor/mimalloc/src/prim/prim.c +76 -0
- package/vendor/mimalloc/src/prim/readme.md +9 -0
- package/vendor/mimalloc/src/prim/unix/prim.c +934 -0
- package/vendor/mimalloc/src/prim/wasi/prim.c +284 -0
- package/vendor/mimalloc/src/prim/windows/etw-mimalloc.wprp +61 -0
- package/vendor/mimalloc/src/prim/windows/etw.h +905 -0
- package/vendor/mimalloc/src/prim/windows/etw.man +0 -0
- package/vendor/mimalloc/src/prim/windows/prim.c +878 -0
- package/vendor/mimalloc/src/prim/windows/readme.md +17 -0
- package/vendor/mimalloc/src/random.c +258 -0
- package/vendor/mimalloc/src/segment-map.c +142 -0
- package/vendor/mimalloc/src/segment.c +1702 -0
- package/vendor/mimalloc/src/static.c +41 -0
- package/vendor/mimalloc/src/stats.c +635 -0
- package/vendor/mimalloc/test/CMakeLists.txt +56 -0
- package/vendor/mimalloc/test/main-override-dep.cpp +51 -0
- package/vendor/mimalloc/test/main-override-dep.h +11 -0
- package/vendor/mimalloc/test/main-override-static.c +539 -0
- package/vendor/mimalloc/test/main-override.c +36 -0
- package/vendor/mimalloc/test/main-override.cpp +497 -0
- package/vendor/mimalloc/test/main.c +46 -0
- package/vendor/mimalloc/test/readme.md +16 -0
- package/vendor/mimalloc/test/test-api-fill.c +343 -0
- package/vendor/mimalloc/test/test-api.c +466 -0
- package/vendor/mimalloc/test/test-stress.c +428 -0
- package/vendor/mimalloc/test/test-wrong.c +92 -0
- package/vendor/mimalloc/test/testhelper.h +49 -0
|
@@ -0,0 +1,1049 @@
|
|
|
1
|
+
/*----------------------------------------------------------------------------
|
|
2
|
+
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
|
3
|
+
This is free software; you can redistribute it and/or modify it under the
|
|
4
|
+
terms of the MIT license. A copy of the license can be found in the file
|
|
5
|
+
"LICENSE" at the root of this distribution.
|
|
6
|
+
-----------------------------------------------------------------------------*/
|
|
7
|
+
|
|
8
|
+
/* -----------------------------------------------------------
|
|
9
|
+
The core of the allocator. Every segment contains
|
|
10
|
+
pages of a certain block size. The main function
|
|
11
|
+
exported is `mi_malloc_generic`.
|
|
12
|
+
----------------------------------------------------------- */
|
|
13
|
+
|
|
14
|
+
#include "mimalloc.h"
|
|
15
|
+
#include "mimalloc/internal.h"
|
|
16
|
+
#include "mimalloc/atomic.h"
|
|
17
|
+
|
|
18
|
+
/* -----------------------------------------------------------
|
|
19
|
+
Definition of page queues for each block size
|
|
20
|
+
----------------------------------------------------------- */
|
|
21
|
+
|
|
22
|
+
#define MI_IN_PAGE_C
|
|
23
|
+
#include "page-queue.c"
|
|
24
|
+
#undef MI_IN_PAGE_C
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
/* -----------------------------------------------------------
|
|
28
|
+
Page helpers
|
|
29
|
+
----------------------------------------------------------- */
|
|
30
|
+
|
|
31
|
+
// Index a block in a page
|
|
32
|
+
static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) {
|
|
33
|
+
MI_UNUSED(page);
|
|
34
|
+
mi_assert_internal(page != NULL);
|
|
35
|
+
mi_assert_internal(i <= page->reserved);
|
|
36
|
+
return (mi_block_t*)((uint8_t*)page_start + (i * block_size));
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
|
|
40
|
+
static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld);
|
|
41
|
+
|
|
42
|
+
#if (MI_DEBUG>=3)
|
|
43
|
+
static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
|
|
44
|
+
size_t count = 0;
|
|
45
|
+
while (head != NULL) {
|
|
46
|
+
mi_assert_internal(page == _mi_ptr_page(head));
|
|
47
|
+
count++;
|
|
48
|
+
head = mi_block_next(page, head);
|
|
49
|
+
}
|
|
50
|
+
return count;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/*
|
|
54
|
+
// Start of the page available memory
|
|
55
|
+
static inline uint8_t* mi_page_area(const mi_page_t* page) {
|
|
56
|
+
return _mi_page_start(_mi_page_segment(page), page, NULL);
|
|
57
|
+
}
|
|
58
|
+
*/
|
|
59
|
+
|
|
60
|
+
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
|
61
|
+
size_t psize;
|
|
62
|
+
uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
|
63
|
+
mi_block_t* start = (mi_block_t*)page_area;
|
|
64
|
+
mi_block_t* end = (mi_block_t*)(page_area + psize);
|
|
65
|
+
while(p != NULL) {
|
|
66
|
+
if (p < start || p >= end) return false;
|
|
67
|
+
p = mi_block_next(page, p);
|
|
68
|
+
}
|
|
69
|
+
#if MI_DEBUG>3 // generally too expensive to check this
|
|
70
|
+
if (page->free_is_zero) {
|
|
71
|
+
const size_t ubsize = mi_page_usable_block_size(page);
|
|
72
|
+
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
|
|
73
|
+
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
#endif
|
|
77
|
+
return true;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
static bool mi_page_is_valid_init(mi_page_t* page) {
|
|
81
|
+
mi_assert_internal(mi_page_block_size(page) > 0);
|
|
82
|
+
mi_assert_internal(page->used <= page->capacity);
|
|
83
|
+
mi_assert_internal(page->capacity <= page->reserved);
|
|
84
|
+
|
|
85
|
+
uint8_t* start = mi_page_start(page);
|
|
86
|
+
mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
|
|
87
|
+
mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE));
|
|
88
|
+
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
|
89
|
+
|
|
90
|
+
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
|
91
|
+
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
|
|
92
|
+
|
|
93
|
+
#if MI_DEBUG>3 // generally too expensive to check this
|
|
94
|
+
if (page->free_is_zero) {
|
|
95
|
+
const size_t ubsize = mi_page_usable_block_size(page);
|
|
96
|
+
for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
|
|
97
|
+
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
#endif
|
|
101
|
+
|
|
102
|
+
#if !MI_TRACK_ENABLED && !MI_TSAN
|
|
103
|
+
mi_block_t* tfree = mi_page_thread_free(page);
|
|
104
|
+
mi_assert_internal(mi_page_list_is_valid(page, tfree));
|
|
105
|
+
//size_t tfree_count = mi_page_list_count(page, tfree);
|
|
106
|
+
//mi_assert_internal(tfree_count <= page->thread_freed + 1);
|
|
107
|
+
#endif
|
|
108
|
+
|
|
109
|
+
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
|
|
110
|
+
mi_assert_internal(page->used + free_count == page->capacity);
|
|
111
|
+
|
|
112
|
+
return true;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called?
|
|
116
|
+
|
|
117
|
+
bool _mi_page_is_valid(mi_page_t* page) {
|
|
118
|
+
mi_assert_internal(mi_page_is_valid_init(page));
|
|
119
|
+
#if MI_SECURE
|
|
120
|
+
mi_assert_internal(page->keys[0] != 0);
|
|
121
|
+
#endif
|
|
122
|
+
if (mi_page_heap(page)!=NULL) {
|
|
123
|
+
mi_segment_t* segment = _mi_page_segment(page);
|
|
124
|
+
|
|
125
|
+
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
|
|
126
|
+
#if MI_HUGE_PAGE_ABANDON
|
|
127
|
+
if (segment->kind != MI_SEGMENT_HUGE)
|
|
128
|
+
#endif
|
|
129
|
+
{
|
|
130
|
+
mi_page_queue_t* pq = mi_page_queue_of(page);
|
|
131
|
+
mi_assert_internal(mi_page_queue_contains(pq, page));
|
|
132
|
+
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
|
133
|
+
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
return true;
|
|
137
|
+
}
|
|
138
|
+
#endif
|
|
139
|
+
|
|
140
|
+
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
|
|
141
|
+
while (!_mi_page_try_use_delayed_free(page, delay, override_never)) {
|
|
142
|
+
mi_atomic_yield();
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
|
|
147
|
+
mi_thread_free_t tfreex;
|
|
148
|
+
mi_delayed_t old_delay;
|
|
149
|
+
mi_thread_free_t tfree;
|
|
150
|
+
size_t yield_count = 0;
|
|
151
|
+
do {
|
|
152
|
+
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
|
|
153
|
+
tfreex = mi_tf_set_delayed(tfree, delay);
|
|
154
|
+
old_delay = mi_tf_delayed(tfree);
|
|
155
|
+
if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
|
|
156
|
+
if (yield_count >= 4) return false; // give up after 4 tries
|
|
157
|
+
yield_count++;
|
|
158
|
+
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
|
|
159
|
+
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
|
|
160
|
+
}
|
|
161
|
+
else if (delay == old_delay) {
|
|
162
|
+
break; // avoid atomic operation if already equal
|
|
163
|
+
}
|
|
164
|
+
else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) {
|
|
165
|
+
break; // leave never-delayed flag set
|
|
166
|
+
}
|
|
167
|
+
} while ((old_delay == MI_DELAYED_FREEING) ||
|
|
168
|
+
!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
|
169
|
+
|
|
170
|
+
return true; // success
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/* -----------------------------------------------------------
|
|
174
|
+
Page collect the `local_free` and `thread_free` lists
|
|
175
|
+
----------------------------------------------------------- */
|
|
176
|
+
|
|
177
|
+
// Collect the local `thread_free` list using an atomic exchange.
|
|
178
|
+
// Note: The exchange must be done atomically as this is used right after
|
|
179
|
+
// moving to the full list in `mi_page_collect_ex` and we need to
|
|
180
|
+
// ensure that there was no race where the page became unfull just before the move.
|
|
181
|
+
static void _mi_page_thread_free_collect(mi_page_t* page)
|
|
182
|
+
{
|
|
183
|
+
mi_block_t* head;
|
|
184
|
+
mi_thread_free_t tfreex;
|
|
185
|
+
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
|
186
|
+
do {
|
|
187
|
+
head = mi_tf_block(tfree);
|
|
188
|
+
tfreex = mi_tf_set_block(tfree,NULL);
|
|
189
|
+
} while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex));
|
|
190
|
+
|
|
191
|
+
// return if the list is empty
|
|
192
|
+
if (head == NULL) return;
|
|
193
|
+
|
|
194
|
+
// find the tail -- also to get a proper count (without data races)
|
|
195
|
+
size_t max_count = page->capacity; // cannot collect more than capacity
|
|
196
|
+
size_t count = 1;
|
|
197
|
+
mi_block_t* tail = head;
|
|
198
|
+
mi_block_t* next;
|
|
199
|
+
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
|
|
200
|
+
count++;
|
|
201
|
+
tail = next;
|
|
202
|
+
}
|
|
203
|
+
// if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
|
|
204
|
+
if (count > max_count) {
|
|
205
|
+
_mi_error_message(EFAULT, "corrupted thread-free list\n");
|
|
206
|
+
return; // the thread-free items cannot be freed
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// and append the current local free list
|
|
210
|
+
mi_block_set_next(page,tail, page->local_free);
|
|
211
|
+
page->local_free = head;
|
|
212
|
+
|
|
213
|
+
// update counts now
|
|
214
|
+
page->used -= (uint16_t)count;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
void _mi_page_free_collect(mi_page_t* page, bool force) {
|
|
218
|
+
mi_assert_internal(page!=NULL);
|
|
219
|
+
|
|
220
|
+
// collect the thread free list
|
|
221
|
+
if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation
|
|
222
|
+
_mi_page_thread_free_collect(page);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// and the local free list
|
|
226
|
+
if (page->local_free != NULL) {
|
|
227
|
+
if mi_likely(page->free == NULL) {
|
|
228
|
+
// usual case
|
|
229
|
+
page->free = page->local_free;
|
|
230
|
+
page->local_free = NULL;
|
|
231
|
+
page->free_is_zero = false;
|
|
232
|
+
}
|
|
233
|
+
else if (force) {
|
|
234
|
+
// append -- only on shutdown (force) as this is a linear operation
|
|
235
|
+
mi_block_t* tail = page->local_free;
|
|
236
|
+
mi_block_t* next;
|
|
237
|
+
while ((next = mi_block_next(page, tail)) != NULL) {
|
|
238
|
+
tail = next;
|
|
239
|
+
}
|
|
240
|
+
mi_block_set_next(page, tail, page->free);
|
|
241
|
+
page->free = page->local_free;
|
|
242
|
+
page->local_free = NULL;
|
|
243
|
+
page->free_is_zero = false;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
mi_assert_internal(!force || page->local_free == NULL);
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
/* -----------------------------------------------------------
|
|
253
|
+
Page fresh and retire
|
|
254
|
+
----------------------------------------------------------- */
|
|
255
|
+
|
|
256
|
+
// called from segments when reclaiming abandoned pages
|
|
257
|
+
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
|
258
|
+
mi_assert_expensive(mi_page_is_valid_init(page));
|
|
259
|
+
|
|
260
|
+
mi_assert_internal(mi_page_heap(page) == heap);
|
|
261
|
+
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
|
|
262
|
+
#if MI_HUGE_PAGE_ABANDON
|
|
263
|
+
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
|
264
|
+
#endif
|
|
265
|
+
|
|
266
|
+
// TODO: push on full queue immediately if it is full?
|
|
267
|
+
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
|
268
|
+
mi_page_queue_push(heap, pq, page);
|
|
269
|
+
mi_assert_expensive(_mi_page_is_valid(page));
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// allocate a fresh page from a segment
|
|
273
|
+
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
|
|
274
|
+
#if !MI_HUGE_PAGE_ABANDON
|
|
275
|
+
mi_assert_internal(pq != NULL);
|
|
276
|
+
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
|
277
|
+
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
|
|
278
|
+
#endif
|
|
279
|
+
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
|
|
280
|
+
if (page == NULL) {
|
|
281
|
+
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
|
282
|
+
return NULL;
|
|
283
|
+
}
|
|
284
|
+
#if MI_HUGE_PAGE_ABANDON
|
|
285
|
+
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
|
286
|
+
#endif
|
|
287
|
+
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
|
288
|
+
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
|
289
|
+
// a fresh page was found, initialize it
|
|
290
|
+
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
|
291
|
+
mi_assert_internal(full_block_size >= block_size);
|
|
292
|
+
mi_page_init(heap, page, full_block_size, heap->tld);
|
|
293
|
+
mi_heap_stat_increase(heap, pages, 1);
|
|
294
|
+
mi_heap_stat_increase(heap, page_bins[_mi_page_bin(page)], 1);
|
|
295
|
+
if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
|
|
296
|
+
mi_assert_expensive(_mi_page_is_valid(page));
|
|
297
|
+
return page;
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// Get a fresh page to use
|
|
301
|
+
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
|
|
302
|
+
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
|
303
|
+
mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
|
|
304
|
+
if (page==NULL) return NULL;
|
|
305
|
+
mi_assert_internal(pq->block_size==mi_page_block_size(page));
|
|
306
|
+
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
|
|
307
|
+
return page;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
/* -----------------------------------------------------------
|
|
311
|
+
Do any delayed frees
|
|
312
|
+
(put there by other threads if they deallocated in a full page)
|
|
313
|
+
----------------------------------------------------------- */
|
|
314
|
+
void _mi_heap_delayed_free_all(mi_heap_t* heap) {
|
|
315
|
+
while (!_mi_heap_delayed_free_partial(heap)) {
|
|
316
|
+
mi_atomic_yield();
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
// returns true if all delayed frees were processed
|
|
321
|
+
bool _mi_heap_delayed_free_partial(mi_heap_t* heap) {
|
|
322
|
+
// take over the list (note: no atomic exchange since it is often NULL)
|
|
323
|
+
mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
|
|
324
|
+
while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ };
|
|
325
|
+
bool all_freed = true;
|
|
326
|
+
|
|
327
|
+
// and free them all
|
|
328
|
+
while(block != NULL) {
|
|
329
|
+
mi_block_t* next = mi_block_nextx(heap,block, heap->keys);
|
|
330
|
+
// use internal free instead of regular one to keep stats etc correct
|
|
331
|
+
if (!_mi_free_delayed_block(block)) {
|
|
332
|
+
// we might already start delayed freeing while another thread has not yet
|
|
333
|
+
// reset the delayed_freeing flag; in that case delay it further by reinserting the current block
|
|
334
|
+
// into the delayed free list
|
|
335
|
+
all_freed = false;
|
|
336
|
+
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
|
|
337
|
+
do {
|
|
338
|
+
mi_block_set_nextx(heap, block, dfree, heap->keys);
|
|
339
|
+
} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
|
|
340
|
+
}
|
|
341
|
+
block = next;
|
|
342
|
+
}
|
|
343
|
+
return all_freed;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
/* -----------------------------------------------------------
|
|
347
|
+
Unfull, abandon, free and retire
|
|
348
|
+
----------------------------------------------------------- */
|
|
349
|
+
|
|
350
|
+
// Move a page from the full list back to a regular list
|
|
351
|
+
void _mi_page_unfull(mi_page_t* page) {
|
|
352
|
+
mi_assert_internal(page != NULL);
|
|
353
|
+
mi_assert_expensive(_mi_page_is_valid(page));
|
|
354
|
+
mi_assert_internal(mi_page_is_in_full(page));
|
|
355
|
+
if (!mi_page_is_in_full(page)) return;
|
|
356
|
+
|
|
357
|
+
mi_heap_t* heap = mi_page_heap(page);
|
|
358
|
+
mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
|
|
359
|
+
mi_page_set_in_full(page, false); // to get the right queue
|
|
360
|
+
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
|
361
|
+
mi_page_set_in_full(page, true);
|
|
362
|
+
mi_page_queue_enqueue_from_full(pq, pqfull, page);
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
|
366
|
+
mi_assert_internal(pq == mi_page_queue_of(page));
|
|
367
|
+
mi_assert_internal(!mi_page_immediate_available(page));
|
|
368
|
+
mi_assert_internal(!mi_page_is_in_full(page));
|
|
369
|
+
|
|
370
|
+
if (mi_page_is_in_full(page)) return;
|
|
371
|
+
mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
|
|
372
|
+
_mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
// Abandon a page with used blocks at the end of a thread.
|
|
377
|
+
// Note: only call if it is ensured that no references exist from
|
|
378
|
+
// the `page->heap->thread_delayed_free` into this page.
|
|
379
|
+
// Currently only called through `mi_heap_collect_ex` which ensures this.
|
|
380
|
+
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
|
381
|
+
mi_assert_internal(page != NULL);
|
|
382
|
+
mi_assert_expensive(_mi_page_is_valid(page));
|
|
383
|
+
mi_assert_internal(pq == mi_page_queue_of(page));
|
|
384
|
+
mi_assert_internal(mi_page_heap(page) != NULL);
|
|
385
|
+
|
|
386
|
+
mi_heap_t* pheap = mi_page_heap(page);
|
|
387
|
+
|
|
388
|
+
// remove from our page list
|
|
389
|
+
mi_segments_tld_t* segments_tld = &pheap->tld->segments;
|
|
390
|
+
mi_page_queue_remove(pq, page);
|
|
391
|
+
|
|
392
|
+
// page is no longer associated with our heap
|
|
393
|
+
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
|
|
394
|
+
mi_page_set_heap(page, NULL);
|
|
395
|
+
|
|
396
|
+
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
|
|
397
|
+
// check there are no references left..
|
|
398
|
+
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
|
|
399
|
+
mi_assert_internal(_mi_ptr_page(block) != page);
|
|
400
|
+
}
|
|
401
|
+
#endif
|
|
402
|
+
|
|
403
|
+
// and abandon it
|
|
404
|
+
mi_assert_internal(mi_page_heap(page) == NULL);
|
|
405
|
+
_mi_segment_page_abandon(page,segments_tld);
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
// force abandon a page
|
|
409
|
+
void _mi_page_force_abandon(mi_page_t* page) {
|
|
410
|
+
mi_heap_t* heap = mi_page_heap(page);
|
|
411
|
+
// mark page as not using delayed free
|
|
412
|
+
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
|
413
|
+
|
|
414
|
+
// ensure this page is no longer in the heap delayed free list
|
|
415
|
+
_mi_heap_delayed_free_all(heap);
|
|
416
|
+
// We can still access the page meta-info even if it is freed as we ensure
|
|
417
|
+
// in `mi_segment_force_abandon` that the segment is not freed (yet)
|
|
418
|
+
if (page->capacity == 0) return; // it may have been freed now
|
|
419
|
+
|
|
420
|
+
// and now unlink it from the page queue and abandon (or free)
|
|
421
|
+
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
|
422
|
+
if (mi_page_all_free(page)) {
|
|
423
|
+
_mi_page_free(page, pq, false);
|
|
424
|
+
}
|
|
425
|
+
else {
|
|
426
|
+
_mi_page_abandon(page, pq);
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
// Free a page with no more free blocks
|
|
432
|
+
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|
433
|
+
mi_assert_internal(page != NULL);
|
|
434
|
+
mi_assert_expensive(_mi_page_is_valid(page));
|
|
435
|
+
mi_assert_internal(pq == mi_page_queue_of(page));
|
|
436
|
+
mi_assert_internal(mi_page_all_free(page));
|
|
437
|
+
mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING);
|
|
438
|
+
|
|
439
|
+
// no more aligned blocks in here
|
|
440
|
+
mi_page_set_has_aligned(page, false);
|
|
441
|
+
|
|
442
|
+
// remove from the page list
|
|
443
|
+
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
|
|
444
|
+
mi_heap_t* heap = mi_page_heap(page);
|
|
445
|
+
mi_segments_tld_t* segments_tld = &heap->tld->segments;
|
|
446
|
+
mi_page_queue_remove(pq, page);
|
|
447
|
+
|
|
448
|
+
// and free it
|
|
449
|
+
mi_page_set_heap(page,NULL);
|
|
450
|
+
_mi_segment_page_free(page, force, segments_tld);
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
|
454
|
+
#define MI_RETIRE_CYCLES (16)
|
|
455
|
+
|
|
456
|
+
// Retire a page with no more used blocks
|
|
457
|
+
// Important to not retire too quickly though as new
|
|
458
|
+
// allocations might coming.
|
|
459
|
+
// Note: called from `mi_free` and benchmarks often
|
|
460
|
+
// trigger this due to freeing everything and then
|
|
461
|
+
// allocating again so careful when changing this.
|
|
462
|
+
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|
463
|
+
mi_assert_internal(page != NULL);
|
|
464
|
+
mi_assert_expensive(_mi_page_is_valid(page));
|
|
465
|
+
mi_assert_internal(mi_page_all_free(page));
|
|
466
|
+
|
|
467
|
+
mi_page_set_has_aligned(page, false);
|
|
468
|
+
|
|
469
|
+
// don't retire too often..
|
|
470
|
+
// (or we end up retiring and re-allocating most of the time)
|
|
471
|
+
// NOTE: refine this more: we should not retire if this
|
|
472
|
+
// is the only page left with free blocks. It is not clear
|
|
473
|
+
// how to check this efficiently though...
|
|
474
|
+
// for now, we don't retire if it is the only page left of this size class.
|
|
475
|
+
mi_page_queue_t* pq = mi_page_queue_of(page);
|
|
476
|
+
#if MI_RETIRE_CYCLES > 0
|
|
477
|
+
const size_t bsize = mi_page_block_size(page);
|
|
478
|
+
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
|
|
479
|
+
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
|
480
|
+
mi_stat_counter_increase(_mi_stats_main.pages_retire,1);
|
|
481
|
+
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
|
482
|
+
mi_heap_t* heap = mi_page_heap(page);
|
|
483
|
+
mi_assert_internal(pq >= heap->pages);
|
|
484
|
+
const size_t index = pq - heap->pages;
|
|
485
|
+
mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
|
|
486
|
+
if (index < heap->page_retired_min) heap->page_retired_min = index;
|
|
487
|
+
if (index > heap->page_retired_max) heap->page_retired_max = index;
|
|
488
|
+
mi_assert_internal(mi_page_all_free(page));
|
|
489
|
+
return; // don't free after all
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
#endif
|
|
493
|
+
_mi_page_free(page, pq, false);
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
// free retired pages: we don't need to look at the entire queues
|
|
497
|
+
// since we only retire pages that are at the head position in a queue.
|
|
498
|
+
void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
|
|
499
|
+
size_t min = MI_BIN_FULL;
|
|
500
|
+
size_t max = 0;
|
|
501
|
+
for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
|
|
502
|
+
mi_page_queue_t* pq = &heap->pages[bin];
|
|
503
|
+
mi_page_t* page = pq->first;
|
|
504
|
+
if (page != NULL && page->retire_expire != 0) {
|
|
505
|
+
if (mi_page_all_free(page)) {
|
|
506
|
+
page->retire_expire--;
|
|
507
|
+
if (force || page->retire_expire == 0) {
|
|
508
|
+
_mi_page_free(pq->first, pq, force);
|
|
509
|
+
}
|
|
510
|
+
else {
|
|
511
|
+
// keep retired, update min/max
|
|
512
|
+
if (bin < min) min = bin;
|
|
513
|
+
if (bin > max) max = bin;
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
else {
|
|
517
|
+
page->retire_expire = 0;
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
heap->page_retired_min = min;
|
|
522
|
+
heap->page_retired_max = max;
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
/* -----------------------------------------------------------
|
|
527
|
+
Initialize the initial free list in a page.
|
|
528
|
+
In secure mode we initialize a randomized list by
|
|
529
|
+
alternating between slices.
|
|
530
|
+
----------------------------------------------------------- */
|
|
531
|
+
|
|
532
|
+
#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices
|
|
533
|
+
#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT)
|
|
534
|
+
#define MI_MIN_SLICES (2)
|
|
535
|
+
|
|
536
|
+
static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) {
|
|
537
|
+
MI_UNUSED(stats);
|
|
538
|
+
#if (MI_SECURE<=2)
|
|
539
|
+
mi_assert_internal(page->free == NULL);
|
|
540
|
+
mi_assert_internal(page->local_free == NULL);
|
|
541
|
+
#endif
|
|
542
|
+
mi_assert_internal(page->capacity + extend <= page->reserved);
|
|
543
|
+
mi_assert_internal(bsize == mi_page_block_size(page));
|
|
544
|
+
void* const page_area = mi_page_start(page);
|
|
545
|
+
|
|
546
|
+
// initialize a randomized free list
|
|
547
|
+
// set up `slice_count` slices to alternate between
|
|
548
|
+
size_t shift = MI_MAX_SLICE_SHIFT;
|
|
549
|
+
while ((extend >> shift) == 0) {
|
|
550
|
+
shift--;
|
|
551
|
+
}
|
|
552
|
+
const size_t slice_count = (size_t)1U << shift;
|
|
553
|
+
const size_t slice_extend = extend / slice_count;
|
|
554
|
+
mi_assert_internal(slice_extend >= 1);
|
|
555
|
+
mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice
|
|
556
|
+
size_t counts[MI_MAX_SLICES]; // available objects in the slice
|
|
557
|
+
for (size_t i = 0; i < slice_count; i++) {
|
|
558
|
+
blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend);
|
|
559
|
+
counts[i] = slice_extend;
|
|
560
|
+
}
|
|
561
|
+
counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?)
|
|
562
|
+
|
|
563
|
+
// and initialize the free list by randomly threading through them
|
|
564
|
+
// set up first element
|
|
565
|
+
const uintptr_t r = _mi_heap_random_next(heap);
|
|
566
|
+
size_t current = r % slice_count;
|
|
567
|
+
counts[current]--;
|
|
568
|
+
mi_block_t* const free_start = blocks[current];
|
|
569
|
+
// and iterate through the rest; use `random_shuffle` for performance
|
|
570
|
+
uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0
|
|
571
|
+
for (size_t i = 1; i < extend; i++) {
|
|
572
|
+
// call random_shuffle only every INTPTR_SIZE rounds
|
|
573
|
+
const size_t round = i%MI_INTPTR_SIZE;
|
|
574
|
+
if (round == 0) rnd = _mi_random_shuffle(rnd);
|
|
575
|
+
// select a random next slice index
|
|
576
|
+
size_t next = ((rnd >> 8*round) & (slice_count-1));
|
|
577
|
+
while (counts[next]==0) { // ensure it still has space
|
|
578
|
+
next++;
|
|
579
|
+
if (next==slice_count) next = 0;
|
|
580
|
+
}
|
|
581
|
+
// and link the current block to it
|
|
582
|
+
counts[next]--;
|
|
583
|
+
mi_block_t* const block = blocks[current];
|
|
584
|
+
blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block
|
|
585
|
+
mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
|
|
586
|
+
current = next;
|
|
587
|
+
}
|
|
588
|
+
// prepend to the free list (usually NULL)
|
|
589
|
+
mi_block_set_next(page, blocks[current], page->free); // end of the list
|
|
590
|
+
page->free = free_start;
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats)
|
|
594
|
+
{
|
|
595
|
+
MI_UNUSED(stats);
|
|
596
|
+
#if (MI_SECURE <= 2)
|
|
597
|
+
mi_assert_internal(page->free == NULL);
|
|
598
|
+
mi_assert_internal(page->local_free == NULL);
|
|
599
|
+
#endif
|
|
600
|
+
mi_assert_internal(page->capacity + extend <= page->reserved);
|
|
601
|
+
mi_assert_internal(bsize == mi_page_block_size(page));
|
|
602
|
+
void* const page_area = mi_page_start(page);
|
|
603
|
+
|
|
604
|
+
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
|
|
605
|
+
|
|
606
|
+
// initialize a sequential free list
|
|
607
|
+
mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1);
|
|
608
|
+
mi_block_t* block = start;
|
|
609
|
+
while(block <= last) {
|
|
610
|
+
mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
|
|
611
|
+
mi_block_set_next(page,block,next);
|
|
612
|
+
block = next;
|
|
613
|
+
}
|
|
614
|
+
// prepend to free list (usually `NULL`)
|
|
615
|
+
mi_block_set_next(page, last, page->free);
|
|
616
|
+
page->free = start;
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
/* -----------------------------------------------------------
|
|
620
|
+
Page initialize and extend the capacity
|
|
621
|
+
----------------------------------------------------------- */
|
|
622
|
+
|
|
623
|
+
#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
|
|
624
|
+
#if (MI_SECURE>0)
|
|
625
|
+
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
|
|
626
|
+
#else
|
|
627
|
+
#define MI_MIN_EXTEND (4)
|
|
628
|
+
#endif
|
|
629
|
+
|
|
630
|
+
// Extend the capacity (up to reserved) by initializing a free list
|
|
631
|
+
// We do at most `MI_MAX_EXTEND` to avoid touching too much memory
|
|
632
|
+
// Note: we also experimented with "bump" allocation on the first
|
|
633
|
+
// allocations but this did not speed up any benchmark (due to an
|
|
634
|
+
// extra test in malloc? or cache effects?)
|
|
635
|
+
static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
|
|
636
|
+
mi_assert_expensive(mi_page_is_valid_init(page));
|
|
637
|
+
#if (MI_SECURE<=2)
|
|
638
|
+
mi_assert(page->free == NULL);
|
|
639
|
+
mi_assert(page->local_free == NULL);
|
|
640
|
+
if (page->free != NULL) return true;
|
|
641
|
+
#endif
|
|
642
|
+
if (page->capacity >= page->reserved) return true;
|
|
643
|
+
|
|
644
|
+
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
|
645
|
+
|
|
646
|
+
// calculate the extend count
|
|
647
|
+
const size_t bsize = mi_page_block_size(page);
|
|
648
|
+
size_t extend = page->reserved - page->capacity;
|
|
649
|
+
mi_assert_internal(extend > 0);
|
|
650
|
+
|
|
651
|
+
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize);
|
|
652
|
+
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
|
|
653
|
+
mi_assert_internal(max_extend > 0);
|
|
654
|
+
|
|
655
|
+
if (extend > max_extend) {
|
|
656
|
+
// ensure we don't touch memory beyond the page to reduce page commit.
|
|
657
|
+
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
|
|
658
|
+
extend = max_extend;
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
|
|
662
|
+
mi_assert_internal(extend < (1UL<<16));
|
|
663
|
+
|
|
664
|
+
// and append the extend the free list
|
|
665
|
+
if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
|
|
666
|
+
mi_page_free_list_extend(page, bsize, extend, &tld->stats );
|
|
667
|
+
}
|
|
668
|
+
else {
|
|
669
|
+
mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats);
|
|
670
|
+
}
|
|
671
|
+
// enable the new free list
|
|
672
|
+
page->capacity += (uint16_t)extend;
|
|
673
|
+
mi_stat_increase(tld->stats.page_committed, extend * bsize);
|
|
674
|
+
mi_assert_expensive(mi_page_is_valid_init(page));
|
|
675
|
+
return true;
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
// Initialize a fresh page
|
|
679
|
+
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) {
|
|
680
|
+
mi_assert(page != NULL);
|
|
681
|
+
mi_segment_t* segment = _mi_page_segment(page);
|
|
682
|
+
mi_assert(segment != NULL);
|
|
683
|
+
mi_assert_internal(block_size > 0);
|
|
684
|
+
// set fields
|
|
685
|
+
mi_page_set_heap(page, heap);
|
|
686
|
+
page->block_size = block_size;
|
|
687
|
+
size_t page_size;
|
|
688
|
+
page->page_start = _mi_segment_page_start(segment, page, &page_size);
|
|
689
|
+
mi_track_mem_noaccess(page->page_start,page_size);
|
|
690
|
+
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
|
691
|
+
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
|
692
|
+
mi_assert_internal(page_size / block_size < (1L<<16));
|
|
693
|
+
page->reserved = (uint16_t)(page_size / block_size);
|
|
694
|
+
mi_assert_internal(page->reserved > 0);
|
|
695
|
+
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
|
696
|
+
page->keys[0] = _mi_heap_random_next(heap);
|
|
697
|
+
page->keys[1] = _mi_heap_random_next(heap);
|
|
698
|
+
#endif
|
|
699
|
+
page->free_is_zero = page->is_zero_init;
|
|
700
|
+
#if MI_DEBUG>2
|
|
701
|
+
if (page->is_zero_init) {
|
|
702
|
+
mi_track_mem_defined(page->page_start, page_size);
|
|
703
|
+
mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
|
|
704
|
+
}
|
|
705
|
+
#endif
|
|
706
|
+
mi_assert_internal(page->is_committed);
|
|
707
|
+
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
|
708
|
+
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
|
|
709
|
+
}
|
|
710
|
+
else {
|
|
711
|
+
page->block_size_shift = 0;
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
mi_assert_internal(page->capacity == 0);
|
|
715
|
+
mi_assert_internal(page->free == NULL);
|
|
716
|
+
mi_assert_internal(page->used == 0);
|
|
717
|
+
mi_assert_internal(page->xthread_free == 0);
|
|
718
|
+
mi_assert_internal(page->next == NULL);
|
|
719
|
+
mi_assert_internal(page->prev == NULL);
|
|
720
|
+
mi_assert_internal(page->retire_expire == 0);
|
|
721
|
+
mi_assert_internal(!mi_page_has_aligned(page));
|
|
722
|
+
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
|
723
|
+
mi_assert_internal(page->keys[0] != 0);
|
|
724
|
+
mi_assert_internal(page->keys[1] != 0);
|
|
725
|
+
#endif
|
|
726
|
+
mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift)));
|
|
727
|
+
mi_assert_expensive(mi_page_is_valid_init(page));
|
|
728
|
+
|
|
729
|
+
// initialize an initial free list
|
|
730
|
+
if (mi_page_extend_free(heap,page,tld)) {
|
|
731
|
+
mi_assert(mi_page_immediate_available(page));
|
|
732
|
+
}
|
|
733
|
+
return;
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
/* -----------------------------------------------------------
|
|
738
|
+
Find pages with free blocks
|
|
739
|
+
-------------------------------------------------------------*/
|
|
740
|
+
|
|
741
|
+
// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
|
|
742
|
+
#define MI_MAX_CANDIDATE_SEARCH (4)
|
|
743
|
+
|
|
744
|
+
// is the page not yet used up to its reserved space?
|
|
745
|
+
static bool mi_page_is_expandable(const mi_page_t* page) {
|
|
746
|
+
mi_assert_internal(page != NULL);
|
|
747
|
+
mi_assert_internal(page->capacity <= page->reserved);
|
|
748
|
+
return (page->capacity < page->reserved);
|
|
749
|
+
}
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
// Find a page with free blocks of `page->block_size`.
|
|
753
|
+
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
|
|
754
|
+
{
|
|
755
|
+
// search through the pages in "next fit" order
|
|
756
|
+
#if MI_STAT
|
|
757
|
+
size_t count = 0;
|
|
758
|
+
#endif
|
|
759
|
+
size_t candidate_count = 0; // we reset this on the first candidate to limit the search
|
|
760
|
+
mi_page_t* page_candidate = NULL; // a page with free space
|
|
761
|
+
mi_page_t* page = pq->first;
|
|
762
|
+
|
|
763
|
+
while (page != NULL)
|
|
764
|
+
{
|
|
765
|
+
mi_page_t* next = page->next; // remember next
|
|
766
|
+
#if MI_STAT
|
|
767
|
+
count++;
|
|
768
|
+
#endif
|
|
769
|
+
candidate_count++;
|
|
770
|
+
|
|
771
|
+
// collect freed blocks by us and other threads
|
|
772
|
+
_mi_page_free_collect(page, false);
|
|
773
|
+
|
|
774
|
+
#if MI_MAX_CANDIDATE_SEARCH > 1
|
|
775
|
+
// search up to N pages for a best candidate
|
|
776
|
+
|
|
777
|
+
// is the local free list non-empty?
|
|
778
|
+
const bool immediate_available = mi_page_immediate_available(page);
|
|
779
|
+
|
|
780
|
+
// if the page is completely full, move it to the `mi_pages_full`
|
|
781
|
+
// queue so we don't visit long-lived pages too often.
|
|
782
|
+
if (!immediate_available && !mi_page_is_expandable(page)) {
|
|
783
|
+
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
|
784
|
+
mi_page_to_full(page, pq);
|
|
785
|
+
}
|
|
786
|
+
else {
|
|
787
|
+
// the page has free space, make it a candidate
|
|
788
|
+
// we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
|
|
789
|
+
if (page_candidate == NULL) {
|
|
790
|
+
page_candidate = page;
|
|
791
|
+
candidate_count = 0;
|
|
792
|
+
}
|
|
793
|
+
// prefer to reuse fuller pages (in the hope the less used page gets freed)
|
|
794
|
+
else if (page->used >= page_candidate->used && !mi_page_is_mostly_used(page) && !mi_page_is_expandable(page)) {
|
|
795
|
+
page_candidate = page;
|
|
796
|
+
}
|
|
797
|
+
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
|
|
798
|
+
if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
|
|
799
|
+
mi_assert_internal(page_candidate!=NULL);
|
|
800
|
+
break;
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
#else
|
|
804
|
+
// first-fit algorithm
|
|
805
|
+
// If the page contains free blocks, we are done
|
|
806
|
+
if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
|
|
807
|
+
break; // pick this one
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
// If the page is completely full, move it to the `mi_pages_full`
|
|
811
|
+
// queue so we don't visit long-lived pages too often.
|
|
812
|
+
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
|
813
|
+
mi_page_to_full(page, pq);
|
|
814
|
+
#endif
|
|
815
|
+
|
|
816
|
+
page = next;
|
|
817
|
+
} // for each page
|
|
818
|
+
|
|
819
|
+
mi_heap_stat_counter_increase(heap, page_searches, count);
|
|
820
|
+
|
|
821
|
+
// set the page to the best candidate
|
|
822
|
+
if (page_candidate != NULL) {
|
|
823
|
+
page = page_candidate;
|
|
824
|
+
}
|
|
825
|
+
if (page != NULL) {
|
|
826
|
+
if (!mi_page_immediate_available(page)) {
|
|
827
|
+
mi_assert_internal(mi_page_is_expandable(page));
|
|
828
|
+
if (!mi_page_extend_free(heap, page, heap->tld)) {
|
|
829
|
+
page = NULL; // failed to extend
|
|
830
|
+
}
|
|
831
|
+
}
|
|
832
|
+
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
if (page == NULL) {
|
|
836
|
+
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
|
|
837
|
+
page = mi_page_fresh(heap, pq);
|
|
838
|
+
if (page == NULL && first_try) {
|
|
839
|
+
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
|
|
840
|
+
page = mi_page_queue_find_free_ex(heap, pq, false);
|
|
841
|
+
}
|
|
842
|
+
}
|
|
843
|
+
else {
|
|
844
|
+
// move the page to the front of the queue
|
|
845
|
+
mi_page_queue_move_to_front(heap, pq, page);
|
|
846
|
+
page->retire_expire = 0;
|
|
847
|
+
// _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this
|
|
848
|
+
}
|
|
849
|
+
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
return page;
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
// Find a page with free blocks of `size`.
|
|
858
|
+
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
|
|
859
|
+
mi_page_queue_t* pq = mi_page_queue(heap, size);
|
|
860
|
+
|
|
861
|
+
// check the first page: we even do this with candidate search or otherwise we re-search every time
|
|
862
|
+
mi_page_t* page = pq->first;
|
|
863
|
+
if (page != NULL) {
|
|
864
|
+
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
|
|
865
|
+
if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
|
|
866
|
+
mi_page_extend_free(heap, page, heap->tld);
|
|
867
|
+
mi_assert_internal(mi_page_immediate_available(page));
|
|
868
|
+
}
|
|
869
|
+
else
|
|
870
|
+
#endif
|
|
871
|
+
{
|
|
872
|
+
_mi_page_free_collect(page,false);
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
if (mi_page_immediate_available(page)) {
|
|
876
|
+
page->retire_expire = 0;
|
|
877
|
+
return page; // fast path
|
|
878
|
+
}
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
return mi_page_queue_find_free_ex(heap, pq, true);
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
/* -----------------------------------------------------------
|
|
886
|
+
Users can register a deferred free function called
|
|
887
|
+
when the `free` list is empty. Since the `local_free`
|
|
888
|
+
is separate this is deterministically called after
|
|
889
|
+
a certain number of allocations.
|
|
890
|
+
----------------------------------------------------------- */
|
|
891
|
+
|
|
892
|
+
static mi_deferred_free_fun* volatile deferred_free = NULL;
|
|
893
|
+
static _Atomic(void*) deferred_arg; // = NULL
|
|
894
|
+
|
|
895
|
+
void _mi_deferred_free(mi_heap_t* heap, bool force) {
|
|
896
|
+
heap->tld->heartbeat++;
|
|
897
|
+
if (deferred_free != NULL && !heap->tld->recurse) {
|
|
898
|
+
heap->tld->recurse = true;
|
|
899
|
+
deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg));
|
|
900
|
+
heap->tld->recurse = false;
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept {
|
|
905
|
+
deferred_free = fn;
|
|
906
|
+
mi_atomic_store_ptr_release(void,&deferred_arg, arg);
|
|
907
|
+
}
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
/* -----------------------------------------------------------
|
|
911
|
+
General allocation
|
|
912
|
+
----------------------------------------------------------- */
|
|
913
|
+
|
|
914
|
+
// Large and huge page allocation.
|
|
915
|
+
// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
|
|
916
|
+
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
|
|
917
|
+
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
|
|
918
|
+
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
|
919
|
+
size_t block_size = _mi_os_good_alloc_size(size);
|
|
920
|
+
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
|
921
|
+
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
|
|
922
|
+
#if MI_HUGE_PAGE_ABANDON
|
|
923
|
+
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
|
924
|
+
#else
|
|
925
|
+
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
|
|
926
|
+
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
|
|
927
|
+
#endif
|
|
928
|
+
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
|
929
|
+
if (page != NULL) {
|
|
930
|
+
mi_assert_internal(mi_page_immediate_available(page));
|
|
931
|
+
|
|
932
|
+
if (is_huge) {
|
|
933
|
+
mi_assert_internal(mi_page_is_huge(page));
|
|
934
|
+
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
|
935
|
+
mi_assert_internal(_mi_page_segment(page)->used==1);
|
|
936
|
+
#if MI_HUGE_PAGE_ABANDON
|
|
937
|
+
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
|
938
|
+
mi_page_set_heap(page, NULL);
|
|
939
|
+
#endif
|
|
940
|
+
}
|
|
941
|
+
else {
|
|
942
|
+
mi_assert_internal(!mi_page_is_huge(page));
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
|
|
946
|
+
/*if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
|
947
|
+
mi_heap_stat_increase(heap, malloc_large, bsize);
|
|
948
|
+
mi_heap_stat_counter_increase(heap, malloc_large_count, 1);
|
|
949
|
+
}
|
|
950
|
+
else */
|
|
951
|
+
{
|
|
952
|
+
_mi_stat_increase(&heap->tld->stats.malloc_huge, bsize);
|
|
953
|
+
_mi_stat_counter_increase(&heap->tld->stats.malloc_huge_count, 1);
|
|
954
|
+
}
|
|
955
|
+
}
|
|
956
|
+
return page;
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
|
|
960
|
+
// Allocate a page
|
|
961
|
+
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
|
962
|
+
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
|
|
963
|
+
// huge allocation?
|
|
964
|
+
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
|
965
|
+
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
|
966
|
+
if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) {
|
|
967
|
+
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
|
968
|
+
return NULL;
|
|
969
|
+
}
|
|
970
|
+
else {
|
|
971
|
+
return mi_large_huge_page_alloc(heap,size,huge_alignment);
|
|
972
|
+
}
|
|
973
|
+
}
|
|
974
|
+
else {
|
|
975
|
+
// otherwise find a page with free blocks in our size segregated queues
|
|
976
|
+
#if MI_PADDING
|
|
977
|
+
mi_assert_internal(size >= MI_PADDING_SIZE);
|
|
978
|
+
#endif
|
|
979
|
+
return mi_find_free_page(heap, size);
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
|
984
|
+
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
|
985
|
+
// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
|
|
986
|
+
// very large requested alignments in which case we use a huge segment.
|
|
987
|
+
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
|
988
|
+
{
|
|
989
|
+
mi_assert_internal(heap != NULL);
|
|
990
|
+
|
|
991
|
+
// initialize if necessary
|
|
992
|
+
if mi_unlikely(!mi_heap_is_initialized(heap)) {
|
|
993
|
+
heap = mi_heap_get_default(); // calls mi_thread_init
|
|
994
|
+
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
|
|
995
|
+
}
|
|
996
|
+
mi_assert_internal(mi_heap_is_initialized(heap));
|
|
997
|
+
|
|
998
|
+
// do administrative tasks every N generic mallocs
|
|
999
|
+
if mi_unlikely(++heap->generic_count >= 100) {
|
|
1000
|
+
heap->generic_collect_count += heap->generic_count;
|
|
1001
|
+
heap->generic_count = 0;
|
|
1002
|
+
// call potential deferred free routines
|
|
1003
|
+
_mi_deferred_free(heap, false);
|
|
1004
|
+
|
|
1005
|
+
// free delayed frees from other threads (but skip contended ones)
|
|
1006
|
+
_mi_heap_delayed_free_partial(heap);
|
|
1007
|
+
|
|
1008
|
+
// collect every once in a while (10000 by default)
|
|
1009
|
+
const long generic_collect = mi_option_get_clamp(mi_option_generic_collect, 1, 1000000L);
|
|
1010
|
+
if (heap->generic_collect_count >= generic_collect) {
|
|
1011
|
+
heap->generic_collect_count = 0;
|
|
1012
|
+
mi_heap_collect(heap, false /* force? */);
|
|
1013
|
+
}
|
|
1014
|
+
}
|
|
1015
|
+
|
|
1016
|
+
// find (or allocate) a page of the right size
|
|
1017
|
+
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
|
|
1018
|
+
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
|
1019
|
+
mi_heap_collect(heap, true /* force */);
|
|
1020
|
+
page = mi_find_page(heap, size, huge_alignment);
|
|
1021
|
+
}
|
|
1022
|
+
|
|
1023
|
+
if mi_unlikely(page == NULL) { // out of memory
|
|
1024
|
+
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
|
1025
|
+
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
|
|
1026
|
+
return NULL;
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
mi_assert_internal(mi_page_immediate_available(page));
|
|
1030
|
+
mi_assert_internal(mi_page_block_size(page) >= size);
|
|
1031
|
+
|
|
1032
|
+
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
|
1033
|
+
void* p;
|
|
1034
|
+
if mi_unlikely(zero && mi_page_is_huge(page)) {
|
|
1035
|
+
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
|
1036
|
+
p = _mi_page_malloc(heap, page, size);
|
|
1037
|
+
mi_assert_internal(p != NULL);
|
|
1038
|
+
_mi_memzero_aligned(p, mi_page_usable_block_size(page));
|
|
1039
|
+
}
|
|
1040
|
+
else {
|
|
1041
|
+
p = _mi_page_malloc_zero(heap, page, size, zero);
|
|
1042
|
+
mi_assert_internal(p != NULL);
|
|
1043
|
+
}
|
|
1044
|
+
// move singleton pages to the full queue
|
|
1045
|
+
if (page->reserved == page->used) {
|
|
1046
|
+
mi_page_to_full(page, mi_page_queue_of(page));
|
|
1047
|
+
}
|
|
1048
|
+
return p;
|
|
1049
|
+
}
|