asmjit 0.2.0 → 0.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/asmjit.gemspec +1 -1
- data/ext/asmjit/asmjit/.editorconfig +10 -0
- data/ext/asmjit/asmjit/.github/FUNDING.yml +1 -0
- data/ext/asmjit/asmjit/.github/workflows/build-config.json +47 -0
- data/ext/asmjit/asmjit/.github/workflows/build.yml +156 -0
- data/ext/asmjit/asmjit/.gitignore +6 -0
- data/ext/asmjit/asmjit/CMakeLists.txt +611 -0
- data/ext/asmjit/asmjit/LICENSE.md +17 -0
- data/ext/asmjit/asmjit/README.md +69 -0
- data/ext/asmjit/asmjit/src/asmjit/a64.h +62 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64archtraits_p.h +81 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64assembler.cpp +5115 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64assembler.h +72 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64builder.cpp +51 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64builder.h +57 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64compiler.cpp +60 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64compiler.h +247 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64emithelper.cpp +464 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64emithelper_p.h +50 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64emitter.h +1228 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64formatter.cpp +298 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64formatter_p.h +59 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64func.cpp +189 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64func_p.h +33 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64globals.h +1894 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64instapi.cpp +278 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64instapi_p.h +41 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64instdb.cpp +1957 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64instdb.h +74 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64instdb_p.h +876 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64operand.cpp +85 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64operand.h +312 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64rapass.cpp +852 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64rapass_p.h +105 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/a64utils.h +179 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/armformatter.cpp +143 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/armformatter_p.h +44 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/armglobals.h +21 -0
- data/ext/asmjit/asmjit/src/asmjit/arm/armoperand.h +621 -0
- data/ext/asmjit/asmjit/src/asmjit/arm.h +62 -0
- data/ext/asmjit/asmjit/src/asmjit/asmjit-scope-begin.h +17 -0
- data/ext/asmjit/asmjit/src/asmjit/asmjit-scope-end.h +9 -0
- data/ext/asmjit/asmjit/src/asmjit/asmjit.h +33 -0
- data/ext/asmjit/asmjit/src/asmjit/core/api-build_p.h +55 -0
- data/ext/asmjit/asmjit/src/asmjit/core/api-config.h +613 -0
- data/ext/asmjit/asmjit/src/asmjit/core/archcommons.h +229 -0
- data/ext/asmjit/asmjit/src/asmjit/core/archtraits.cpp +160 -0
- data/ext/asmjit/asmjit/src/asmjit/core/archtraits.h +290 -0
- data/ext/asmjit/asmjit/src/asmjit/core/assembler.cpp +406 -0
- data/ext/asmjit/asmjit/src/asmjit/core/assembler.h +129 -0
- data/ext/asmjit/asmjit/src/asmjit/core/builder.cpp +889 -0
- data/ext/asmjit/asmjit/src/asmjit/core/builder.h +1391 -0
- data/ext/asmjit/asmjit/src/asmjit/core/codebuffer.h +113 -0
- data/ext/asmjit/asmjit/src/asmjit/core/codeholder.cpp +1149 -0
- data/ext/asmjit/asmjit/src/asmjit/core/codeholder.h +1035 -0
- data/ext/asmjit/asmjit/src/asmjit/core/codewriter.cpp +175 -0
- data/ext/asmjit/asmjit/src/asmjit/core/codewriter_p.h +179 -0
- data/ext/asmjit/asmjit/src/asmjit/core/compiler.cpp +582 -0
- data/ext/asmjit/asmjit/src/asmjit/core/compiler.h +737 -0
- data/ext/asmjit/asmjit/src/asmjit/core/compilerdefs.h +173 -0
- data/ext/asmjit/asmjit/src/asmjit/core/constpool.cpp +363 -0
- data/ext/asmjit/asmjit/src/asmjit/core/constpool.h +250 -0
- data/ext/asmjit/asmjit/src/asmjit/core/cpuinfo.cpp +1162 -0
- data/ext/asmjit/asmjit/src/asmjit/core/cpuinfo.h +813 -0
- data/ext/asmjit/asmjit/src/asmjit/core/emithelper.cpp +323 -0
- data/ext/asmjit/asmjit/src/asmjit/core/emithelper_p.h +58 -0
- data/ext/asmjit/asmjit/src/asmjit/core/emitter.cpp +333 -0
- data/ext/asmjit/asmjit/src/asmjit/core/emitter.h +741 -0
- data/ext/asmjit/asmjit/src/asmjit/core/emitterutils.cpp +129 -0
- data/ext/asmjit/asmjit/src/asmjit/core/emitterutils_p.h +89 -0
- data/ext/asmjit/asmjit/src/asmjit/core/environment.cpp +46 -0
- data/ext/asmjit/asmjit/src/asmjit/core/environment.h +508 -0
- data/ext/asmjit/asmjit/src/asmjit/core/errorhandler.cpp +14 -0
- data/ext/asmjit/asmjit/src/asmjit/core/errorhandler.h +228 -0
- data/ext/asmjit/asmjit/src/asmjit/core/formatter.cpp +584 -0
- data/ext/asmjit/asmjit/src/asmjit/core/formatter.h +247 -0
- data/ext/asmjit/asmjit/src/asmjit/core/formatter_p.h +34 -0
- data/ext/asmjit/asmjit/src/asmjit/core/func.cpp +286 -0
- data/ext/asmjit/asmjit/src/asmjit/core/func.h +1445 -0
- data/ext/asmjit/asmjit/src/asmjit/core/funcargscontext.cpp +293 -0
- data/ext/asmjit/asmjit/src/asmjit/core/funcargscontext_p.h +199 -0
- data/ext/asmjit/asmjit/src/asmjit/core/globals.cpp +133 -0
- data/ext/asmjit/asmjit/src/asmjit/core/globals.h +393 -0
- data/ext/asmjit/asmjit/src/asmjit/core/inst.cpp +113 -0
- data/ext/asmjit/asmjit/src/asmjit/core/inst.h +772 -0
- data/ext/asmjit/asmjit/src/asmjit/core/jitallocator.cpp +1242 -0
- data/ext/asmjit/asmjit/src/asmjit/core/jitallocator.h +261 -0
- data/ext/asmjit/asmjit/src/asmjit/core/jitruntime.cpp +80 -0
- data/ext/asmjit/asmjit/src/asmjit/core/jitruntime.h +89 -0
- data/ext/asmjit/asmjit/src/asmjit/core/logger.cpp +69 -0
- data/ext/asmjit/asmjit/src/asmjit/core/logger.h +198 -0
- data/ext/asmjit/asmjit/src/asmjit/core/misc_p.h +33 -0
- data/ext/asmjit/asmjit/src/asmjit/core/operand.cpp +132 -0
- data/ext/asmjit/asmjit/src/asmjit/core/operand.h +1611 -0
- data/ext/asmjit/asmjit/src/asmjit/core/osutils.cpp +84 -0
- data/ext/asmjit/asmjit/src/asmjit/core/osutils.h +61 -0
- data/ext/asmjit/asmjit/src/asmjit/core/osutils_p.h +68 -0
- data/ext/asmjit/asmjit/src/asmjit/core/raassignment_p.h +418 -0
- data/ext/asmjit/asmjit/src/asmjit/core/rabuilders_p.h +612 -0
- data/ext/asmjit/asmjit/src/asmjit/core/radefs_p.h +1204 -0
- data/ext/asmjit/asmjit/src/asmjit/core/ralocal.cpp +1166 -0
- data/ext/asmjit/asmjit/src/asmjit/core/ralocal_p.h +254 -0
- data/ext/asmjit/asmjit/src/asmjit/core/rapass.cpp +1969 -0
- data/ext/asmjit/asmjit/src/asmjit/core/rapass_p.h +1183 -0
- data/ext/asmjit/asmjit/src/asmjit/core/rastack.cpp +184 -0
- data/ext/asmjit/asmjit/src/asmjit/core/rastack_p.h +171 -0
- data/ext/asmjit/asmjit/src/asmjit/core/string.cpp +559 -0
- data/ext/asmjit/asmjit/src/asmjit/core/string.h +372 -0
- data/ext/asmjit/asmjit/src/asmjit/core/support.cpp +494 -0
- data/ext/asmjit/asmjit/src/asmjit/core/support.h +1773 -0
- data/ext/asmjit/asmjit/src/asmjit/core/target.cpp +14 -0
- data/ext/asmjit/asmjit/src/asmjit/core/target.h +53 -0
- data/ext/asmjit/asmjit/src/asmjit/core/type.cpp +74 -0
- data/ext/asmjit/asmjit/src/asmjit/core/type.h +419 -0
- data/ext/asmjit/asmjit/src/asmjit/core/virtmem.cpp +722 -0
- data/ext/asmjit/asmjit/src/asmjit/core/virtmem.h +242 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zone.cpp +353 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zone.h +615 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonehash.cpp +309 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonehash.h +186 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonelist.cpp +163 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonelist.h +209 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonestack.cpp +176 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonestack.h +239 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonestring.h +120 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonetree.cpp +99 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonetree.h +380 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonevector.cpp +356 -0
- data/ext/asmjit/asmjit/src/asmjit/core/zonevector.h +690 -0
- data/ext/asmjit/asmjit/src/asmjit/core.h +1861 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86archtraits_p.h +148 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86assembler.cpp +5110 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86assembler.h +685 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86builder.cpp +52 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86builder.h +351 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86compiler.cpp +61 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86compiler.h +721 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86emithelper.cpp +619 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86emithelper_p.h +60 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86emitter.h +4315 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86formatter.cpp +944 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86formatter_p.h +58 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86func.cpp +503 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86func_p.h +33 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86globals.h +2169 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86instapi.cpp +1732 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86instapi_p.h +41 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86instdb.cpp +4427 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86instdb.h +563 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86instdb_p.h +311 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86opcode_p.h +436 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86operand.cpp +231 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86operand.h +1085 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86rapass.cpp +1509 -0
- data/ext/asmjit/asmjit/src/asmjit/x86/x86rapass_p.h +94 -0
- data/ext/asmjit/asmjit/src/asmjit/x86.h +93 -0
- data/ext/asmjit/asmjit/src/asmjit.natvis +245 -0
- data/ext/asmjit/asmjit/test/asmjit_test_assembler.cpp +84 -0
- data/ext/asmjit/asmjit/test/asmjit_test_assembler.h +85 -0
- data/ext/asmjit/asmjit/test/asmjit_test_assembler_a64.cpp +4006 -0
- data/ext/asmjit/asmjit/test/asmjit_test_assembler_x64.cpp +17833 -0
- data/ext/asmjit/asmjit/test/asmjit_test_assembler_x86.cpp +8300 -0
- data/ext/asmjit/asmjit/test/asmjit_test_compiler.cpp +253 -0
- data/ext/asmjit/asmjit/test/asmjit_test_compiler.h +73 -0
- data/ext/asmjit/asmjit/test/asmjit_test_compiler_a64.cpp +690 -0
- data/ext/asmjit/asmjit/test/asmjit_test_compiler_x86.cpp +4317 -0
- data/ext/asmjit/asmjit/test/asmjit_test_emitters.cpp +197 -0
- data/ext/asmjit/asmjit/test/asmjit_test_instinfo.cpp +181 -0
- data/ext/asmjit/asmjit/test/asmjit_test_misc.h +257 -0
- data/ext/asmjit/asmjit/test/asmjit_test_perf.cpp +62 -0
- data/ext/asmjit/asmjit/test/asmjit_test_perf.h +61 -0
- data/ext/asmjit/asmjit/test/asmjit_test_perf_a64.cpp +699 -0
- data/ext/asmjit/asmjit/test/asmjit_test_perf_x86.cpp +5032 -0
- data/ext/asmjit/asmjit/test/asmjit_test_unit.cpp +172 -0
- data/ext/asmjit/asmjit/test/asmjit_test_x86_sections.cpp +172 -0
- data/ext/asmjit/asmjit/test/asmjitutils.h +38 -0
- data/ext/asmjit/asmjit/test/broken.cpp +312 -0
- data/ext/asmjit/asmjit/test/broken.h +148 -0
- data/ext/asmjit/asmjit/test/cmdline.h +61 -0
- data/ext/asmjit/asmjit/test/performancetimer.h +41 -0
- data/ext/asmjit/asmjit/tools/configure-makefiles.sh +13 -0
- data/ext/asmjit/asmjit/tools/configure-ninja.sh +13 -0
- data/ext/asmjit/asmjit/tools/configure-sanitizers.sh +13 -0
- data/ext/asmjit/asmjit/tools/configure-vs2019-x64.bat +2 -0
- data/ext/asmjit/asmjit/tools/configure-vs2019-x86.bat +2 -0
- data/ext/asmjit/asmjit/tools/configure-vs2022-x64.bat +2 -0
- data/ext/asmjit/asmjit/tools/configure-vs2022-x86.bat +2 -0
- data/ext/asmjit/asmjit/tools/configure-xcode.sh +8 -0
- data/ext/asmjit/asmjit/tools/enumgen.js +417 -0
- data/ext/asmjit/asmjit/tools/enumgen.sh +3 -0
- data/ext/asmjit/asmjit/tools/tablegen-arm.js +365 -0
- data/ext/asmjit/asmjit/tools/tablegen-arm.sh +3 -0
- data/ext/asmjit/asmjit/tools/tablegen-x86.js +2638 -0
- data/ext/asmjit/asmjit/tools/tablegen-x86.sh +3 -0
- data/ext/asmjit/asmjit/tools/tablegen.js +947 -0
- data/ext/asmjit/asmjit/tools/tablegen.sh +4 -0
- data/ext/asmjit/asmjit.cc +18 -0
- data/lib/asmjit/version.rb +1 -1
- metadata +197 -2
@@ -0,0 +1,242 @@
|
|
1
|
+
// This file is part of AsmJit project <https://asmjit.com>
|
2
|
+
//
|
3
|
+
// See asmjit.h or LICENSE.md for license and copyright information
|
4
|
+
// SPDX-License-Identifier: Zlib
|
5
|
+
|
6
|
+
#ifndef ASMJIT_CORE_VIRTMEM_H_INCLUDED
|
7
|
+
#define ASMJIT_CORE_VIRTMEM_H_INCLUDED
|
8
|
+
|
9
|
+
#include "../core/api-config.h"
|
10
|
+
#ifndef ASMJIT_NO_JIT
|
11
|
+
|
12
|
+
#include "../core/globals.h"
|
13
|
+
|
14
|
+
ASMJIT_BEGIN_NAMESPACE
|
15
|
+
|
16
|
+
//! \addtogroup asmjit_virtual_memory
|
17
|
+
//! \{
|
18
|
+
|
19
|
+
//! Virtual memory management.
|
20
|
+
namespace VirtMem {
|
21
|
+
|
22
|
+
//! Flushes instruction cache in the given region.
|
23
|
+
//!
|
24
|
+
//! Only useful on non-x86 architectures, however, it's a good practice to call it on any platform to make your
|
25
|
+
//! code more portable.
|
26
|
+
ASMJIT_API void flushInstructionCache(void* p, size_t size) noexcept;
|
27
|
+
|
28
|
+
//! Virtual memory information.
|
29
|
+
struct Info {
|
30
|
+
//! Virtual memory page size.
|
31
|
+
uint32_t pageSize;
|
32
|
+
//! Virtual memory page granularity.
|
33
|
+
uint32_t pageGranularity;
|
34
|
+
};
|
35
|
+
|
36
|
+
//! Returns virtual memory information, see `VirtMem::Info` for more details.
|
37
|
+
ASMJIT_API Info info() noexcept;
|
38
|
+
|
39
|
+
//! Virtual memory access and mmap-specific flags.
|
40
|
+
enum class MemoryFlags : uint32_t {
|
41
|
+
//! No flags.
|
42
|
+
kNone = 0,
|
43
|
+
|
44
|
+
//! Memory is readable.
|
45
|
+
kAccessRead = 0x00000001u,
|
46
|
+
|
47
|
+
//! Memory is writable.
|
48
|
+
kAccessWrite = 0x00000002u,
|
49
|
+
|
50
|
+
//! Memory is executable.
|
51
|
+
kAccessExecute = 0x00000004u,
|
52
|
+
|
53
|
+
//! A combination of \ref MemoryFlags::kAccessRead and \ref MemoryFlags::kAccessWrite.
|
54
|
+
kAccessReadWrite = kAccessRead | kAccessWrite,
|
55
|
+
|
56
|
+
//! A combination of \ref MemoryFlags::kAccessRead, \ref MemoryFlags::kAccessWrite.
|
57
|
+
kAccessRW = kAccessRead | kAccessWrite,
|
58
|
+
|
59
|
+
//! A combination of \ref MemoryFlags::kAccessRead and \ref MemoryFlags::kAccessExecute.
|
60
|
+
kAccessRX = kAccessRead | kAccessExecute,
|
61
|
+
|
62
|
+
//! A combination of \ref MemoryFlags::kAccessRead, \ref MemoryFlags::kAccessWrite, and
|
63
|
+
//! \ref MemoryFlags::kAccessExecute.
|
64
|
+
kAccessRWX = kAccessRead | kAccessWrite | kAccessExecute,
|
65
|
+
|
66
|
+
//! Use a `MAP_JIT` flag available on Apple platforms (introduced by Mojave), which allows JIT code to be executed
|
67
|
+
//! in MAC bundles. This flag is not turned on by default, because when a process uses `fork()` the child process
|
68
|
+
//! has no access to the pages mapped with `MAP_JIT`, which could break code that doesn't expect this behavior.
|
69
|
+
//!
|
70
|
+
//! \note This flag can only be used with \ref VirtMem::alloc().
|
71
|
+
kMMapEnableMapJit = 0x00000010u,
|
72
|
+
|
73
|
+
//! Pass `PROT_MAX(PROT_READ)` to mmap() on platforms that support `PROT_MAX`.
|
74
|
+
//!
|
75
|
+
//! \note This flag can only be used with \ref VirtMem::alloc().
|
76
|
+
kMMapMaxAccessRead = 0x00000020u,
|
77
|
+
//! Pass `PROT_MAX(PROT_WRITE)` to mmap() on platforms that support `PROT_MAX`.
|
78
|
+
//!
|
79
|
+
//! \note This flag can only be used with \ref VirtMem::alloc().
|
80
|
+
kMMapMaxAccessWrite = 0x00000040u,
|
81
|
+
//! Pass `PROT_MAX(PROT_EXEC)` to mmap() on platforms that support `PROT_MAX`.
|
82
|
+
//!
|
83
|
+
//! \note This flag can only be used with \ref VirtMem::alloc().
|
84
|
+
kMMapMaxAccessExecute = 0x00000080u,
|
85
|
+
|
86
|
+
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead and \ref MemoryFlags::kMMapMaxAccessWrite.
|
87
|
+
kMMapMaxAccessReadWrite = kMMapMaxAccessRead | kMMapMaxAccessWrite,
|
88
|
+
|
89
|
+
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead and \ref MemoryFlags::kMMapMaxAccessWrite.
|
90
|
+
kMMapMaxAccessRW = kMMapMaxAccessRead | kMMapMaxAccessWrite,
|
91
|
+
|
92
|
+
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead and \ref MemoryFlags::kMMapMaxAccessExecute.
|
93
|
+
kMMapMaxAccessRX = kMMapMaxAccessRead | kMMapMaxAccessExecute,
|
94
|
+
|
95
|
+
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead, \ref MemoryFlags::kMMapMaxAccessWrite, \ref
|
96
|
+
//! MemoryFlags::kMMapMaxAccessExecute.
|
97
|
+
kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute,
|
98
|
+
|
99
|
+
//! Not an access flag, only used by `allocDualMapping()` to override the default allocation strategy to always use
|
100
|
+
//! a 'tmp' directory instead of "/dev/shm" (on POSIX platforms). Please note that this flag will be ignored if the
|
101
|
+
//! operating system allows to allocate an executable memory by a different API than `open()` or `shm_open()`. For
|
102
|
+
//! example on Linux `memfd_create()` is preferred and on BSDs `shm_open(SHM_ANON, ...)` is used if SHM_ANON is
|
103
|
+
//! defined.
|
104
|
+
//!
|
105
|
+
//! \note This flag can only be used with \ref VirtMem::alloc().
|
106
|
+
kMappingPreferTmp = 0x80000000u
|
107
|
+
};
|
108
|
+
ASMJIT_DEFINE_ENUM_FLAGS(MemoryFlags)
|
109
|
+
|
110
|
+
//! Allocates virtual memory by either using `mmap()` (POSIX) or `VirtualAlloc()` (Windows).
|
111
|
+
//!
|
112
|
+
//! \note `size` should be aligned to page size, use \ref VirtMem::info() to obtain it. Invalid size will not be
|
113
|
+
//! corrected by the implementation and the allocation would not succeed in such case.
|
114
|
+
ASMJIT_API Error alloc(void** p, size_t size, MemoryFlags flags) noexcept;
|
115
|
+
|
116
|
+
//! Releases virtual memory previously allocated by \ref VirtMem::alloc().
|
117
|
+
//!
|
118
|
+
//! \note The size must be the same as used by \ref VirtMem::alloc(). If the size is not the same value the call
|
119
|
+
//! will fail on any POSIX system, but pass on Windows, because it's implemented differently.
|
120
|
+
ASMJIT_API Error release(void* p, size_t size) noexcept;
|
121
|
+
|
122
|
+
//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()` (Windows).
|
123
|
+
ASMJIT_API Error protect(void* p, size_t size, MemoryFlags flags) noexcept;
|
124
|
+
|
125
|
+
//! Dual memory mapping used to map an anonymous memory into two memory regions where one region is read-only, but
|
126
|
+
//! executable, and the second region is read+write, but not executable. See \ref VirtMem::allocDualMapping() for
|
127
|
+
//! more details.
|
128
|
+
struct DualMapping {
|
129
|
+
//! Pointer to data with 'Read+Execute' access (this memory is not writable).
|
130
|
+
void* rx;
|
131
|
+
//! Pointer to data with 'Read+Write' access (this memory is not executable).
|
132
|
+
void* rw;
|
133
|
+
};
|
134
|
+
|
135
|
+
//! Allocates virtual memory and creates two views of it where the first view has no write access. This is an addition
|
136
|
+
//! to the API that should be used in cases in which the operating system either enforces W^X security policy or the
|
137
|
+
//! application wants to use this policy by default to improve security and prevent an accidental (or purposed)
|
138
|
+
//! self-modifying code.
|
139
|
+
//!
|
140
|
+
//! The memory returned in the `dm` are two independent mappings of the same shared memory region. You must use
|
141
|
+
//! \ref VirtMem::releaseDualMapping() to release it when it's no longer needed. Never use `VirtMem::release()` to
|
142
|
+
//! release the memory returned by `allocDualMapping()` as that would fail on Windows.
|
143
|
+
//!
|
144
|
+
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
|
145
|
+
ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags flags) noexcept;
|
146
|
+
|
147
|
+
//! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping().
|
148
|
+
//!
|
149
|
+
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
|
150
|
+
ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
|
151
|
+
|
152
|
+
//! Hardened runtime flags.
|
153
|
+
enum class HardenedRuntimeFlags : uint32_t {
|
154
|
+
//! No flags.
|
155
|
+
kNone = 0,
|
156
|
+
|
157
|
+
//! Hardened runtime is enabled - it's not possible to have "Write & Execute" memory protection. The runtime
|
158
|
+
//! enforces W^X (either write or execute).
|
159
|
+
//!
|
160
|
+
//! \note If the runtime is hardened it means that an operating system specific protection is used. For example on
|
161
|
+
//! MacOS platform it's possible to allocate memory with MAP_JIT flag and then use `pthread_jit_write_protect_np()`
|
162
|
+
//! to temporarily swap access permissions for the current thread. Dual mapping is also a possibility on X86/X64
|
163
|
+
//! architecture.
|
164
|
+
kEnabled = 0x00000001u,
|
165
|
+
|
166
|
+
//! Read+Write+Execute can only be allocated with MAP_JIT flag (Apple specific).
|
167
|
+
kMapJit = 0x00000002u
|
168
|
+
};
|
169
|
+
ASMJIT_DEFINE_ENUM_FLAGS(HardenedRuntimeFlags)
|
170
|
+
|
171
|
+
//! Hardened runtime information.
|
172
|
+
struct HardenedRuntimeInfo {
|
173
|
+
//! Hardened runtime flags.
|
174
|
+
HardenedRuntimeFlags flags;
|
175
|
+
};
|
176
|
+
|
177
|
+
//! Returns runtime features provided by the OS.
|
178
|
+
ASMJIT_API HardenedRuntimeInfo hardenedRuntimeInfo() noexcept;
|
179
|
+
|
180
|
+
//! Values that can be used with `protectJitMemory()` function.
|
181
|
+
enum class ProtectJitAccess : uint32_t {
|
182
|
+
//! Protect JIT memory with Read+Write permissions.
|
183
|
+
kReadWrite = 0,
|
184
|
+
//! Protect JIT memory with Read+Execute permissions.
|
185
|
+
kReadExecute = 1
|
186
|
+
};
|
187
|
+
|
188
|
+
//! Protects access of memory mapped with MAP_JIT flag for the current thread.
|
189
|
+
//!
|
190
|
+
//! \note This feature is only available on Apple hardware (AArch64) at the moment and and uses a non-portable
|
191
|
+
//! `pthread_jit_write_protect_np()` call when available.
|
192
|
+
//!
|
193
|
+
//! This function must be called before and after a memory mapped with MAP_JIT flag is modified. Example:
|
194
|
+
//!
|
195
|
+
//! ```
|
196
|
+
//! void* codePtr = ...;
|
197
|
+
//! size_t codeSize = ...;
|
198
|
+
//!
|
199
|
+
//! VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite);
|
200
|
+
//! memcpy(codePtr, source, codeSize);
|
201
|
+
//! VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute);
|
202
|
+
//! VirtMem::flushInstructionCache(codePtr, codeSize);
|
203
|
+
//! ```
|
204
|
+
//!
|
205
|
+
//! See \ref ProtectJitReadWriteScope, which makes it simpler than the code above.
|
206
|
+
ASMJIT_API void protectJitMemory(ProtectJitAccess access) noexcept;
|
207
|
+
|
208
|
+
//! JIT protection scope that prepares the given memory block to be written to in the current thread.
|
209
|
+
//!
|
210
|
+
//! It calls `VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite)` at construction time and
|
211
|
+
//! `VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute)` combined with `flushInstructionCache()`
|
212
|
+
//! in destructor. The purpose of this class is to make writing to JIT memory easier.
|
213
|
+
class ProtectJitReadWriteScope {
|
214
|
+
public:
|
215
|
+
void* _rxPtr;
|
216
|
+
size_t _size;
|
217
|
+
|
218
|
+
//! Makes the given memory block RW protected.
|
219
|
+
ASMJIT_FORCE_INLINE ProtectJitReadWriteScope(void* rxPtr, size_t size) noexcept
|
220
|
+
: _rxPtr(rxPtr),
|
221
|
+
_size(size) {
|
222
|
+
protectJitMemory(ProtectJitAccess::kReadWrite);
|
223
|
+
}
|
224
|
+
|
225
|
+
// Not copyable.
|
226
|
+
ProtectJitReadWriteScope(const ProtectJitReadWriteScope& other) = delete;
|
227
|
+
|
228
|
+
//! Makes the memory block RX protected again and flushes instruction cache.
|
229
|
+
ASMJIT_FORCE_INLINE ~ProtectJitReadWriteScope() noexcept {
|
230
|
+
protectJitMemory(ProtectJitAccess::kReadExecute);
|
231
|
+
flushInstructionCache(_rxPtr, _size);
|
232
|
+
}
|
233
|
+
};
|
234
|
+
|
235
|
+
} // VirtMem
|
236
|
+
|
237
|
+
//! \}
|
238
|
+
|
239
|
+
ASMJIT_END_NAMESPACE
|
240
|
+
|
241
|
+
#endif
|
242
|
+
#endif // ASMJIT_CORE_VIRTMEM_H_INCLUDED
|
@@ -0,0 +1,353 @@
|
|
1
|
+
// This file is part of AsmJit project <https://asmjit.com>
|
2
|
+
//
|
3
|
+
// See asmjit.h or LICENSE.md for license and copyright information
|
4
|
+
// SPDX-License-Identifier: Zlib
|
5
|
+
|
6
|
+
#include "../core/api-build_p.h"
|
7
|
+
#include "../core/support.h"
|
8
|
+
#include "../core/zone.h"
|
9
|
+
|
10
|
+
ASMJIT_BEGIN_NAMESPACE
|
11
|
+
|
12
|
+
// Zone - Globals
|
13
|
+
// ==============
|
14
|
+
|
15
|
+
// Zero size block used by `Zone` that doesn't have any memory allocated. Should be allocated in read-only memory
|
16
|
+
// and should never be modified.
|
17
|
+
const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
|
18
|
+
|
19
|
+
// Zone - Init & Reset
|
20
|
+
// ===================
|
21
|
+
|
22
|
+
void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
|
23
|
+
ASMJIT_ASSERT(blockSize >= kMinBlockSize);
|
24
|
+
ASMJIT_ASSERT(blockSize <= kMaxBlockSize);
|
25
|
+
ASMJIT_ASSERT(blockAlignment <= 64);
|
26
|
+
|
27
|
+
// Just to make the compiler happy...
|
28
|
+
constexpr size_t kBlockSizeMask = (Support::allOnes<size_t>() >> 4);
|
29
|
+
constexpr size_t kBlockAlignmentShiftMask = 0x7u;
|
30
|
+
|
31
|
+
_assignZeroBlock();
|
32
|
+
_blockSize = blockSize & kBlockSizeMask;
|
33
|
+
_isTemporary = temporary != nullptr;
|
34
|
+
_blockAlignmentShift = Support::ctz(blockAlignment) & kBlockAlignmentShiftMask;
|
35
|
+
|
36
|
+
// Setup the first [temporary] block, if necessary.
|
37
|
+
if (temporary) {
|
38
|
+
Block* block = temporary->data<Block>();
|
39
|
+
block->prev = nullptr;
|
40
|
+
block->next = nullptr;
|
41
|
+
|
42
|
+
ASMJIT_ASSERT(temporary->size() >= kBlockSize);
|
43
|
+
block->size = temporary->size() - kBlockSize;
|
44
|
+
|
45
|
+
_assignBlock(block);
|
46
|
+
}
|
47
|
+
}
|
48
|
+
|
49
|
+
void Zone::reset(ResetPolicy resetPolicy) noexcept {
|
50
|
+
Block* cur = _block;
|
51
|
+
|
52
|
+
// Can't be altered.
|
53
|
+
if (cur == &_zeroBlock)
|
54
|
+
return;
|
55
|
+
|
56
|
+
if (resetPolicy == ResetPolicy::kHard) {
|
57
|
+
Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
|
58
|
+
_ptr = initial->data();
|
59
|
+
_end = initial->data();
|
60
|
+
_block = initial;
|
61
|
+
|
62
|
+
// Since cur can be in the middle of the double-linked list, we have to traverse both directions (`prev` and
|
63
|
+
// `next`) separately to visit all.
|
64
|
+
Block* next = cur->next;
|
65
|
+
do {
|
66
|
+
Block* prev = cur->prev;
|
67
|
+
|
68
|
+
// If this is the first block and this ZoneTmp is temporary then the first block is statically allocated.
|
69
|
+
// We cannot free it and it makes sense to keep it even when this is hard reset.
|
70
|
+
if (prev == nullptr && _isTemporary) {
|
71
|
+
cur->prev = nullptr;
|
72
|
+
cur->next = nullptr;
|
73
|
+
_assignBlock(cur);
|
74
|
+
break;
|
75
|
+
}
|
76
|
+
|
77
|
+
::free(cur);
|
78
|
+
cur = prev;
|
79
|
+
} while (cur);
|
80
|
+
|
81
|
+
cur = next;
|
82
|
+
while (cur) {
|
83
|
+
next = cur->next;
|
84
|
+
::free(cur);
|
85
|
+
cur = next;
|
86
|
+
}
|
87
|
+
}
|
88
|
+
else {
|
89
|
+
while (cur->prev)
|
90
|
+
cur = cur->prev;
|
91
|
+
_assignBlock(cur);
|
92
|
+
}
|
93
|
+
}
|
94
|
+
|
95
|
+
// Zone - Alloc
|
96
|
+
// ============
|
97
|
+
|
98
|
+
void* Zone::_alloc(size_t size, size_t alignment) noexcept {
|
99
|
+
Block* curBlock = _block;
|
100
|
+
Block* next = curBlock->next;
|
101
|
+
|
102
|
+
size_t rawBlockAlignment = blockAlignment();
|
103
|
+
size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
|
104
|
+
|
105
|
+
// If the `Zone` has been cleared the current block doesn't have to be the last one. Check if there is a block
|
106
|
+
// that can be used instead of allocating a new one. If there is a `next` block it's completely unused, we don't
|
107
|
+
// have to check for remaining bytes in that case.
|
108
|
+
if (next) {
|
109
|
+
uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
|
110
|
+
uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
|
111
|
+
|
112
|
+
if (size <= (size_t)(end - ptr)) {
|
113
|
+
_block = next;
|
114
|
+
_ptr = ptr + size;
|
115
|
+
_end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
|
116
|
+
return static_cast<void*>(ptr);
|
117
|
+
}
|
118
|
+
}
|
119
|
+
|
120
|
+
size_t blockAlignmentOverhead = alignment - Support::min<size_t>(alignment, Globals::kAllocAlignment);
|
121
|
+
size_t newSize = Support::max(blockSize(), size);
|
122
|
+
|
123
|
+
// Prevent arithmetic overflow.
|
124
|
+
if (ASMJIT_UNLIKELY(newSize > SIZE_MAX - kBlockSize - blockAlignmentOverhead))
|
125
|
+
return nullptr;
|
126
|
+
|
127
|
+
// Allocate new block - we add alignment overhead to `newSize`, which becomes the new block size, and we also add
|
128
|
+
// `kBlockOverhead` to the allocator as it includes members of `Zone::Block` structure.
|
129
|
+
newSize += blockAlignmentOverhead;
|
130
|
+
Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
|
131
|
+
|
132
|
+
if (ASMJIT_UNLIKELY(!newBlock))
|
133
|
+
return nullptr;
|
134
|
+
|
135
|
+
// Align the pointer to `minimumAlignment` and adjust the size of this block accordingly. It's the same as using
|
136
|
+
// `minimumAlignment - Support::alignUpDiff()`, just written differently.
|
137
|
+
{
|
138
|
+
newBlock->prev = nullptr;
|
139
|
+
newBlock->next = nullptr;
|
140
|
+
newBlock->size = newSize;
|
141
|
+
|
142
|
+
if (curBlock != &_zeroBlock) {
|
143
|
+
newBlock->prev = curBlock;
|
144
|
+
curBlock->next = newBlock;
|
145
|
+
|
146
|
+
// Does only happen if there is a next block, but the requested memory can't fit into it. In this case a new
|
147
|
+
// buffer is allocated and inserted between the current block and the next one.
|
148
|
+
if (next) {
|
149
|
+
newBlock->next = next;
|
150
|
+
next->prev = newBlock;
|
151
|
+
}
|
152
|
+
}
|
153
|
+
|
154
|
+
uint8_t* ptr = Support::alignUp(newBlock->data(), minimumAlignment);
|
155
|
+
uint8_t* end = Support::alignDown(newBlock->data() + newSize, rawBlockAlignment);
|
156
|
+
|
157
|
+
_ptr = ptr + size;
|
158
|
+
_end = end;
|
159
|
+
_block = newBlock;
|
160
|
+
|
161
|
+
ASMJIT_ASSERT(_ptr <= _end);
|
162
|
+
return static_cast<void*>(ptr);
|
163
|
+
}
|
164
|
+
}
|
165
|
+
|
166
|
+
void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
|
167
|
+
void* p = alloc(size, alignment);
|
168
|
+
if (ASMJIT_UNLIKELY(!p))
|
169
|
+
return p;
|
170
|
+
return memset(p, 0, size);
|
171
|
+
}
|
172
|
+
|
173
|
+
void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
|
174
|
+
if (ASMJIT_UNLIKELY(!data || !size))
|
175
|
+
return nullptr;
|
176
|
+
|
177
|
+
ASMJIT_ASSERT(size != SIZE_MAX);
|
178
|
+
uint8_t* m = allocT<uint8_t>(size + nullTerminate);
|
179
|
+
if (ASMJIT_UNLIKELY(!m)) return nullptr;
|
180
|
+
|
181
|
+
memcpy(m, data, size);
|
182
|
+
if (nullTerminate) m[size] = '\0';
|
183
|
+
|
184
|
+
return static_cast<void*>(m);
|
185
|
+
}
|
186
|
+
|
187
|
+
char* Zone::sformat(const char* fmt, ...) noexcept {
|
188
|
+
if (ASMJIT_UNLIKELY(!fmt))
|
189
|
+
return nullptr;
|
190
|
+
|
191
|
+
char buf[512];
|
192
|
+
size_t size;
|
193
|
+
va_list ap;
|
194
|
+
|
195
|
+
va_start(ap, fmt);
|
196
|
+
size = unsigned(vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap));
|
197
|
+
va_end(ap);
|
198
|
+
|
199
|
+
buf[size++] = 0;
|
200
|
+
return static_cast<char*>(dup(buf, size));
|
201
|
+
}
|
202
|
+
|
203
|
+
// ZoneAllocator - Utilities
|
204
|
+
// =========================
|
205
|
+
|
206
|
+
#if defined(ASMJIT_BUILD_DEBUG)
|
207
|
+
static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
|
208
|
+
ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
|
209
|
+
while (cur) {
|
210
|
+
if (cur == block)
|
211
|
+
return true;
|
212
|
+
cur = cur->next;
|
213
|
+
}
|
214
|
+
return false;
|
215
|
+
}
|
216
|
+
#endif
|
217
|
+
|
218
|
+
// ZoneAllocator - Init & Reset
|
219
|
+
// ============================
|
220
|
+
|
221
|
+
void ZoneAllocator::reset(Zone* zone) noexcept {
|
222
|
+
// Free dynamic blocks.
|
223
|
+
DynamicBlock* block = _dynamicBlocks;
|
224
|
+
while (block) {
|
225
|
+
DynamicBlock* next = block->next;
|
226
|
+
::free(block);
|
227
|
+
block = next;
|
228
|
+
}
|
229
|
+
|
230
|
+
// Zero the entire class and initialize to the given `zone`.
|
231
|
+
memset(this, 0, sizeof(*this));
|
232
|
+
_zone = zone;
|
233
|
+
}
|
234
|
+
|
235
|
+
// asmjit::ZoneAllocator - Alloc & Release
|
236
|
+
// =======================================
|
237
|
+
|
238
|
+
void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
|
239
|
+
ASMJIT_ASSERT(isInitialized());
|
240
|
+
|
241
|
+
// Use the memory pool only if the requested block has a reasonable size.
|
242
|
+
uint32_t slot;
|
243
|
+
if (_getSlotIndex(size, slot, allocatedSize)) {
|
244
|
+
// Slot reuse.
|
245
|
+
uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
|
246
|
+
size = allocatedSize;
|
247
|
+
|
248
|
+
if (p) {
|
249
|
+
_slots[slot] = reinterpret_cast<Slot*>(p)->next;
|
250
|
+
return p;
|
251
|
+
}
|
252
|
+
|
253
|
+
_zone->align(kBlockAlignment);
|
254
|
+
p = _zone->ptr();
|
255
|
+
size_t remain = (size_t)(_zone->end() - p);
|
256
|
+
|
257
|
+
if (ASMJIT_LIKELY(remain >= size)) {
|
258
|
+
_zone->setPtr(p + size);
|
259
|
+
return p;
|
260
|
+
}
|
261
|
+
else {
|
262
|
+
// Distribute the remaining memory to suitable slots, if possible.
|
263
|
+
if (remain >= kLoGranularity) {
|
264
|
+
do {
|
265
|
+
size_t distSize = Support::min<size_t>(remain, kLoMaxSize);
|
266
|
+
uint32_t distSlot = uint32_t((distSize - kLoGranularity) / kLoGranularity);
|
267
|
+
ASMJIT_ASSERT(distSlot < kLoCount);
|
268
|
+
|
269
|
+
reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
|
270
|
+
_slots[distSlot] = reinterpret_cast<Slot*>(p);
|
271
|
+
|
272
|
+
p += distSize;
|
273
|
+
remain -= distSize;
|
274
|
+
} while (remain >= kLoGranularity);
|
275
|
+
_zone->setPtr(p);
|
276
|
+
}
|
277
|
+
|
278
|
+
p = static_cast<uint8_t*>(_zone->_alloc(size, kBlockAlignment));
|
279
|
+
if (ASMJIT_UNLIKELY(!p)) {
|
280
|
+
allocatedSize = 0;
|
281
|
+
return nullptr;
|
282
|
+
}
|
283
|
+
|
284
|
+
return p;
|
285
|
+
}
|
286
|
+
}
|
287
|
+
else {
|
288
|
+
// Allocate a dynamic block.
|
289
|
+
size_t kBlockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
|
290
|
+
|
291
|
+
// Handle a possible overflow.
|
292
|
+
if (ASMJIT_UNLIKELY(kBlockOverhead >= SIZE_MAX - size))
|
293
|
+
return nullptr;
|
294
|
+
|
295
|
+
void* p = ::malloc(size + kBlockOverhead);
|
296
|
+
if (ASMJIT_UNLIKELY(!p)) {
|
297
|
+
allocatedSize = 0;
|
298
|
+
return nullptr;
|
299
|
+
}
|
300
|
+
|
301
|
+
// Link as first in `_dynamicBlocks` double-linked list.
|
302
|
+
DynamicBlock* block = static_cast<DynamicBlock*>(p);
|
303
|
+
DynamicBlock* next = _dynamicBlocks;
|
304
|
+
|
305
|
+
if (next)
|
306
|
+
next->prev = block;
|
307
|
+
|
308
|
+
block->prev = nullptr;
|
309
|
+
block->next = next;
|
310
|
+
_dynamicBlocks = block;
|
311
|
+
|
312
|
+
// Align the pointer to the guaranteed alignment and store `DynamicBlock`
|
313
|
+
// at the beginning of the memory block, so `_releaseDynamic()` can find it.
|
314
|
+
p = Support::alignUp(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
|
315
|
+
reinterpret_cast<DynamicBlock**>(p)[-1] = block;
|
316
|
+
|
317
|
+
allocatedSize = size;
|
318
|
+
return p;
|
319
|
+
}
|
320
|
+
}
|
321
|
+
|
322
|
+
void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
|
323
|
+
ASMJIT_ASSERT(isInitialized());
|
324
|
+
|
325
|
+
void* p = _alloc(size, allocatedSize);
|
326
|
+
if (ASMJIT_UNLIKELY(!p)) return p;
|
327
|
+
return memset(p, 0, allocatedSize);
|
328
|
+
}
|
329
|
+
|
330
|
+
void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
|
331
|
+
DebugUtils::unused(size);
|
332
|
+
ASMJIT_ASSERT(isInitialized());
|
333
|
+
|
334
|
+
// Pointer to `DynamicBlock` is stored at [-1].
|
335
|
+
DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
|
336
|
+
ASMJIT_ASSERT(ZoneAllocator_hasDynamicBlock(this, block));
|
337
|
+
|
338
|
+
// Unlink and free.
|
339
|
+
DynamicBlock* prev = block->prev;
|
340
|
+
DynamicBlock* next = block->next;
|
341
|
+
|
342
|
+
if (prev)
|
343
|
+
prev->next = next;
|
344
|
+
else
|
345
|
+
_dynamicBlocks = next;
|
346
|
+
|
347
|
+
if (next)
|
348
|
+
next->prev = prev;
|
349
|
+
|
350
|
+
::free(block);
|
351
|
+
}
|
352
|
+
|
353
|
+
ASMJIT_END_NAMESPACE
|