asmjit 0.2.0 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (201) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +1 -1
  3. data/asmjit.gemspec +1 -1
  4. data/ext/asmjit/asmjit/.editorconfig +10 -0
  5. data/ext/asmjit/asmjit/.github/FUNDING.yml +1 -0
  6. data/ext/asmjit/asmjit/.github/workflows/build-config.json +47 -0
  7. data/ext/asmjit/asmjit/.github/workflows/build.yml +156 -0
  8. data/ext/asmjit/asmjit/.gitignore +6 -0
  9. data/ext/asmjit/asmjit/CMakeLists.txt +611 -0
  10. data/ext/asmjit/asmjit/LICENSE.md +17 -0
  11. data/ext/asmjit/asmjit/README.md +69 -0
  12. data/ext/asmjit/asmjit/src/asmjit/a64.h +62 -0
  13. data/ext/asmjit/asmjit/src/asmjit/arm/a64archtraits_p.h +81 -0
  14. data/ext/asmjit/asmjit/src/asmjit/arm/a64assembler.cpp +5115 -0
  15. data/ext/asmjit/asmjit/src/asmjit/arm/a64assembler.h +72 -0
  16. data/ext/asmjit/asmjit/src/asmjit/arm/a64builder.cpp +51 -0
  17. data/ext/asmjit/asmjit/src/asmjit/arm/a64builder.h +57 -0
  18. data/ext/asmjit/asmjit/src/asmjit/arm/a64compiler.cpp +60 -0
  19. data/ext/asmjit/asmjit/src/asmjit/arm/a64compiler.h +247 -0
  20. data/ext/asmjit/asmjit/src/asmjit/arm/a64emithelper.cpp +464 -0
  21. data/ext/asmjit/asmjit/src/asmjit/arm/a64emithelper_p.h +50 -0
  22. data/ext/asmjit/asmjit/src/asmjit/arm/a64emitter.h +1228 -0
  23. data/ext/asmjit/asmjit/src/asmjit/arm/a64formatter.cpp +298 -0
  24. data/ext/asmjit/asmjit/src/asmjit/arm/a64formatter_p.h +59 -0
  25. data/ext/asmjit/asmjit/src/asmjit/arm/a64func.cpp +189 -0
  26. data/ext/asmjit/asmjit/src/asmjit/arm/a64func_p.h +33 -0
  27. data/ext/asmjit/asmjit/src/asmjit/arm/a64globals.h +1894 -0
  28. data/ext/asmjit/asmjit/src/asmjit/arm/a64instapi.cpp +278 -0
  29. data/ext/asmjit/asmjit/src/asmjit/arm/a64instapi_p.h +41 -0
  30. data/ext/asmjit/asmjit/src/asmjit/arm/a64instdb.cpp +1957 -0
  31. data/ext/asmjit/asmjit/src/asmjit/arm/a64instdb.h +74 -0
  32. data/ext/asmjit/asmjit/src/asmjit/arm/a64instdb_p.h +876 -0
  33. data/ext/asmjit/asmjit/src/asmjit/arm/a64operand.cpp +85 -0
  34. data/ext/asmjit/asmjit/src/asmjit/arm/a64operand.h +312 -0
  35. data/ext/asmjit/asmjit/src/asmjit/arm/a64rapass.cpp +852 -0
  36. data/ext/asmjit/asmjit/src/asmjit/arm/a64rapass_p.h +105 -0
  37. data/ext/asmjit/asmjit/src/asmjit/arm/a64utils.h +179 -0
  38. data/ext/asmjit/asmjit/src/asmjit/arm/armformatter.cpp +143 -0
  39. data/ext/asmjit/asmjit/src/asmjit/arm/armformatter_p.h +44 -0
  40. data/ext/asmjit/asmjit/src/asmjit/arm/armglobals.h +21 -0
  41. data/ext/asmjit/asmjit/src/asmjit/arm/armoperand.h +621 -0
  42. data/ext/asmjit/asmjit/src/asmjit/arm.h +62 -0
  43. data/ext/asmjit/asmjit/src/asmjit/asmjit-scope-begin.h +17 -0
  44. data/ext/asmjit/asmjit/src/asmjit/asmjit-scope-end.h +9 -0
  45. data/ext/asmjit/asmjit/src/asmjit/asmjit.h +33 -0
  46. data/ext/asmjit/asmjit/src/asmjit/core/api-build_p.h +55 -0
  47. data/ext/asmjit/asmjit/src/asmjit/core/api-config.h +613 -0
  48. data/ext/asmjit/asmjit/src/asmjit/core/archcommons.h +229 -0
  49. data/ext/asmjit/asmjit/src/asmjit/core/archtraits.cpp +160 -0
  50. data/ext/asmjit/asmjit/src/asmjit/core/archtraits.h +290 -0
  51. data/ext/asmjit/asmjit/src/asmjit/core/assembler.cpp +406 -0
  52. data/ext/asmjit/asmjit/src/asmjit/core/assembler.h +129 -0
  53. data/ext/asmjit/asmjit/src/asmjit/core/builder.cpp +889 -0
  54. data/ext/asmjit/asmjit/src/asmjit/core/builder.h +1391 -0
  55. data/ext/asmjit/asmjit/src/asmjit/core/codebuffer.h +113 -0
  56. data/ext/asmjit/asmjit/src/asmjit/core/codeholder.cpp +1149 -0
  57. data/ext/asmjit/asmjit/src/asmjit/core/codeholder.h +1035 -0
  58. data/ext/asmjit/asmjit/src/asmjit/core/codewriter.cpp +175 -0
  59. data/ext/asmjit/asmjit/src/asmjit/core/codewriter_p.h +179 -0
  60. data/ext/asmjit/asmjit/src/asmjit/core/compiler.cpp +582 -0
  61. data/ext/asmjit/asmjit/src/asmjit/core/compiler.h +737 -0
  62. data/ext/asmjit/asmjit/src/asmjit/core/compilerdefs.h +173 -0
  63. data/ext/asmjit/asmjit/src/asmjit/core/constpool.cpp +363 -0
  64. data/ext/asmjit/asmjit/src/asmjit/core/constpool.h +250 -0
  65. data/ext/asmjit/asmjit/src/asmjit/core/cpuinfo.cpp +1162 -0
  66. data/ext/asmjit/asmjit/src/asmjit/core/cpuinfo.h +813 -0
  67. data/ext/asmjit/asmjit/src/asmjit/core/emithelper.cpp +323 -0
  68. data/ext/asmjit/asmjit/src/asmjit/core/emithelper_p.h +58 -0
  69. data/ext/asmjit/asmjit/src/asmjit/core/emitter.cpp +333 -0
  70. data/ext/asmjit/asmjit/src/asmjit/core/emitter.h +741 -0
  71. data/ext/asmjit/asmjit/src/asmjit/core/emitterutils.cpp +129 -0
  72. data/ext/asmjit/asmjit/src/asmjit/core/emitterutils_p.h +89 -0
  73. data/ext/asmjit/asmjit/src/asmjit/core/environment.cpp +46 -0
  74. data/ext/asmjit/asmjit/src/asmjit/core/environment.h +508 -0
  75. data/ext/asmjit/asmjit/src/asmjit/core/errorhandler.cpp +14 -0
  76. data/ext/asmjit/asmjit/src/asmjit/core/errorhandler.h +228 -0
  77. data/ext/asmjit/asmjit/src/asmjit/core/formatter.cpp +584 -0
  78. data/ext/asmjit/asmjit/src/asmjit/core/formatter.h +247 -0
  79. data/ext/asmjit/asmjit/src/asmjit/core/formatter_p.h +34 -0
  80. data/ext/asmjit/asmjit/src/asmjit/core/func.cpp +286 -0
  81. data/ext/asmjit/asmjit/src/asmjit/core/func.h +1445 -0
  82. data/ext/asmjit/asmjit/src/asmjit/core/funcargscontext.cpp +293 -0
  83. data/ext/asmjit/asmjit/src/asmjit/core/funcargscontext_p.h +199 -0
  84. data/ext/asmjit/asmjit/src/asmjit/core/globals.cpp +133 -0
  85. data/ext/asmjit/asmjit/src/asmjit/core/globals.h +393 -0
  86. data/ext/asmjit/asmjit/src/asmjit/core/inst.cpp +113 -0
  87. data/ext/asmjit/asmjit/src/asmjit/core/inst.h +772 -0
  88. data/ext/asmjit/asmjit/src/asmjit/core/jitallocator.cpp +1242 -0
  89. data/ext/asmjit/asmjit/src/asmjit/core/jitallocator.h +261 -0
  90. data/ext/asmjit/asmjit/src/asmjit/core/jitruntime.cpp +80 -0
  91. data/ext/asmjit/asmjit/src/asmjit/core/jitruntime.h +89 -0
  92. data/ext/asmjit/asmjit/src/asmjit/core/logger.cpp +69 -0
  93. data/ext/asmjit/asmjit/src/asmjit/core/logger.h +198 -0
  94. data/ext/asmjit/asmjit/src/asmjit/core/misc_p.h +33 -0
  95. data/ext/asmjit/asmjit/src/asmjit/core/operand.cpp +132 -0
  96. data/ext/asmjit/asmjit/src/asmjit/core/operand.h +1611 -0
  97. data/ext/asmjit/asmjit/src/asmjit/core/osutils.cpp +84 -0
  98. data/ext/asmjit/asmjit/src/asmjit/core/osutils.h +61 -0
  99. data/ext/asmjit/asmjit/src/asmjit/core/osutils_p.h +68 -0
  100. data/ext/asmjit/asmjit/src/asmjit/core/raassignment_p.h +418 -0
  101. data/ext/asmjit/asmjit/src/asmjit/core/rabuilders_p.h +612 -0
  102. data/ext/asmjit/asmjit/src/asmjit/core/radefs_p.h +1204 -0
  103. data/ext/asmjit/asmjit/src/asmjit/core/ralocal.cpp +1166 -0
  104. data/ext/asmjit/asmjit/src/asmjit/core/ralocal_p.h +254 -0
  105. data/ext/asmjit/asmjit/src/asmjit/core/rapass.cpp +1969 -0
  106. data/ext/asmjit/asmjit/src/asmjit/core/rapass_p.h +1183 -0
  107. data/ext/asmjit/asmjit/src/asmjit/core/rastack.cpp +184 -0
  108. data/ext/asmjit/asmjit/src/asmjit/core/rastack_p.h +171 -0
  109. data/ext/asmjit/asmjit/src/asmjit/core/string.cpp +559 -0
  110. data/ext/asmjit/asmjit/src/asmjit/core/string.h +372 -0
  111. data/ext/asmjit/asmjit/src/asmjit/core/support.cpp +494 -0
  112. data/ext/asmjit/asmjit/src/asmjit/core/support.h +1773 -0
  113. data/ext/asmjit/asmjit/src/asmjit/core/target.cpp +14 -0
  114. data/ext/asmjit/asmjit/src/asmjit/core/target.h +53 -0
  115. data/ext/asmjit/asmjit/src/asmjit/core/type.cpp +74 -0
  116. data/ext/asmjit/asmjit/src/asmjit/core/type.h +419 -0
  117. data/ext/asmjit/asmjit/src/asmjit/core/virtmem.cpp +722 -0
  118. data/ext/asmjit/asmjit/src/asmjit/core/virtmem.h +242 -0
  119. data/ext/asmjit/asmjit/src/asmjit/core/zone.cpp +353 -0
  120. data/ext/asmjit/asmjit/src/asmjit/core/zone.h +615 -0
  121. data/ext/asmjit/asmjit/src/asmjit/core/zonehash.cpp +309 -0
  122. data/ext/asmjit/asmjit/src/asmjit/core/zonehash.h +186 -0
  123. data/ext/asmjit/asmjit/src/asmjit/core/zonelist.cpp +163 -0
  124. data/ext/asmjit/asmjit/src/asmjit/core/zonelist.h +209 -0
  125. data/ext/asmjit/asmjit/src/asmjit/core/zonestack.cpp +176 -0
  126. data/ext/asmjit/asmjit/src/asmjit/core/zonestack.h +239 -0
  127. data/ext/asmjit/asmjit/src/asmjit/core/zonestring.h +120 -0
  128. data/ext/asmjit/asmjit/src/asmjit/core/zonetree.cpp +99 -0
  129. data/ext/asmjit/asmjit/src/asmjit/core/zonetree.h +380 -0
  130. data/ext/asmjit/asmjit/src/asmjit/core/zonevector.cpp +356 -0
  131. data/ext/asmjit/asmjit/src/asmjit/core/zonevector.h +690 -0
  132. data/ext/asmjit/asmjit/src/asmjit/core.h +1861 -0
  133. data/ext/asmjit/asmjit/src/asmjit/x86/x86archtraits_p.h +148 -0
  134. data/ext/asmjit/asmjit/src/asmjit/x86/x86assembler.cpp +5110 -0
  135. data/ext/asmjit/asmjit/src/asmjit/x86/x86assembler.h +685 -0
  136. data/ext/asmjit/asmjit/src/asmjit/x86/x86builder.cpp +52 -0
  137. data/ext/asmjit/asmjit/src/asmjit/x86/x86builder.h +351 -0
  138. data/ext/asmjit/asmjit/src/asmjit/x86/x86compiler.cpp +61 -0
  139. data/ext/asmjit/asmjit/src/asmjit/x86/x86compiler.h +721 -0
  140. data/ext/asmjit/asmjit/src/asmjit/x86/x86emithelper.cpp +619 -0
  141. data/ext/asmjit/asmjit/src/asmjit/x86/x86emithelper_p.h +60 -0
  142. data/ext/asmjit/asmjit/src/asmjit/x86/x86emitter.h +4315 -0
  143. data/ext/asmjit/asmjit/src/asmjit/x86/x86formatter.cpp +944 -0
  144. data/ext/asmjit/asmjit/src/asmjit/x86/x86formatter_p.h +58 -0
  145. data/ext/asmjit/asmjit/src/asmjit/x86/x86func.cpp +503 -0
  146. data/ext/asmjit/asmjit/src/asmjit/x86/x86func_p.h +33 -0
  147. data/ext/asmjit/asmjit/src/asmjit/x86/x86globals.h +2169 -0
  148. data/ext/asmjit/asmjit/src/asmjit/x86/x86instapi.cpp +1732 -0
  149. data/ext/asmjit/asmjit/src/asmjit/x86/x86instapi_p.h +41 -0
  150. data/ext/asmjit/asmjit/src/asmjit/x86/x86instdb.cpp +4427 -0
  151. data/ext/asmjit/asmjit/src/asmjit/x86/x86instdb.h +563 -0
  152. data/ext/asmjit/asmjit/src/asmjit/x86/x86instdb_p.h +311 -0
  153. data/ext/asmjit/asmjit/src/asmjit/x86/x86opcode_p.h +436 -0
  154. data/ext/asmjit/asmjit/src/asmjit/x86/x86operand.cpp +231 -0
  155. data/ext/asmjit/asmjit/src/asmjit/x86/x86operand.h +1085 -0
  156. data/ext/asmjit/asmjit/src/asmjit/x86/x86rapass.cpp +1509 -0
  157. data/ext/asmjit/asmjit/src/asmjit/x86/x86rapass_p.h +94 -0
  158. data/ext/asmjit/asmjit/src/asmjit/x86.h +93 -0
  159. data/ext/asmjit/asmjit/src/asmjit.natvis +245 -0
  160. data/ext/asmjit/asmjit/test/asmjit_test_assembler.cpp +84 -0
  161. data/ext/asmjit/asmjit/test/asmjit_test_assembler.h +85 -0
  162. data/ext/asmjit/asmjit/test/asmjit_test_assembler_a64.cpp +4006 -0
  163. data/ext/asmjit/asmjit/test/asmjit_test_assembler_x64.cpp +17833 -0
  164. data/ext/asmjit/asmjit/test/asmjit_test_assembler_x86.cpp +8300 -0
  165. data/ext/asmjit/asmjit/test/asmjit_test_compiler.cpp +253 -0
  166. data/ext/asmjit/asmjit/test/asmjit_test_compiler.h +73 -0
  167. data/ext/asmjit/asmjit/test/asmjit_test_compiler_a64.cpp +690 -0
  168. data/ext/asmjit/asmjit/test/asmjit_test_compiler_x86.cpp +4317 -0
  169. data/ext/asmjit/asmjit/test/asmjit_test_emitters.cpp +197 -0
  170. data/ext/asmjit/asmjit/test/asmjit_test_instinfo.cpp +181 -0
  171. data/ext/asmjit/asmjit/test/asmjit_test_misc.h +257 -0
  172. data/ext/asmjit/asmjit/test/asmjit_test_perf.cpp +62 -0
  173. data/ext/asmjit/asmjit/test/asmjit_test_perf.h +61 -0
  174. data/ext/asmjit/asmjit/test/asmjit_test_perf_a64.cpp +699 -0
  175. data/ext/asmjit/asmjit/test/asmjit_test_perf_x86.cpp +5032 -0
  176. data/ext/asmjit/asmjit/test/asmjit_test_unit.cpp +172 -0
  177. data/ext/asmjit/asmjit/test/asmjit_test_x86_sections.cpp +172 -0
  178. data/ext/asmjit/asmjit/test/asmjitutils.h +38 -0
  179. data/ext/asmjit/asmjit/test/broken.cpp +312 -0
  180. data/ext/asmjit/asmjit/test/broken.h +148 -0
  181. data/ext/asmjit/asmjit/test/cmdline.h +61 -0
  182. data/ext/asmjit/asmjit/test/performancetimer.h +41 -0
  183. data/ext/asmjit/asmjit/tools/configure-makefiles.sh +13 -0
  184. data/ext/asmjit/asmjit/tools/configure-ninja.sh +13 -0
  185. data/ext/asmjit/asmjit/tools/configure-sanitizers.sh +13 -0
  186. data/ext/asmjit/asmjit/tools/configure-vs2019-x64.bat +2 -0
  187. data/ext/asmjit/asmjit/tools/configure-vs2019-x86.bat +2 -0
  188. data/ext/asmjit/asmjit/tools/configure-vs2022-x64.bat +2 -0
  189. data/ext/asmjit/asmjit/tools/configure-vs2022-x86.bat +2 -0
  190. data/ext/asmjit/asmjit/tools/configure-xcode.sh +8 -0
  191. data/ext/asmjit/asmjit/tools/enumgen.js +417 -0
  192. data/ext/asmjit/asmjit/tools/enumgen.sh +3 -0
  193. data/ext/asmjit/asmjit/tools/tablegen-arm.js +365 -0
  194. data/ext/asmjit/asmjit/tools/tablegen-arm.sh +3 -0
  195. data/ext/asmjit/asmjit/tools/tablegen-x86.js +2638 -0
  196. data/ext/asmjit/asmjit/tools/tablegen-x86.sh +3 -0
  197. data/ext/asmjit/asmjit/tools/tablegen.js +947 -0
  198. data/ext/asmjit/asmjit/tools/tablegen.sh +4 -0
  199. data/ext/asmjit/asmjit.cc +18 -0
  200. data/lib/asmjit/version.rb +1 -1
  201. metadata +197 -2
@@ -0,0 +1,1242 @@
1
+ // This file is part of AsmJit project <https://asmjit.com>
2
+ //
3
+ // See asmjit.h or LICENSE.md for license and copyright information
4
+ // SPDX-License-Identifier: Zlib
5
+
6
+ #include "../core/api-build_p.h"
7
+ #ifndef ASMJIT_NO_JIT
8
+
9
+ #include "../core/archtraits.h"
10
+ #include "../core/jitallocator.h"
11
+ #include "../core/osutils_p.h"
12
+ #include "../core/support.h"
13
+ #include "../core/virtmem.h"
14
+ #include "../core/zone.h"
15
+ #include "../core/zonelist.h"
16
+ #include "../core/zonetree.h"
17
+
18
+ ASMJIT_BEGIN_NAMESPACE
19
+
20
+ // JitAllocator - Constants
21
+ // ========================
22
+
23
+ //! Number of pools to use when `JitAllocatorOptions::kUseMultiplePools` is set.
24
+ //!
25
+ //! Each pool increases granularity twice to make memory management more
26
+ //! efficient. Ideal number of pools appears to be 3 to 4 as it distributes
27
+ //! small and large functions properly.
28
+ static constexpr uint32_t kJitAllocatorMultiPoolCount = 3;
29
+
30
+ //! Minimum granularity (and the default granularity for pool #0).
31
+ static constexpr uint32_t kJitAllocatorBaseGranularity = 64;
32
+
33
+ //! Maximum block size (32MB).
34
+ static constexpr uint32_t kJitAllocatorMaxBlockSize = 1024 * 1024 * 32;
35
+
36
+ // JitAllocator - Fill Pattern
37
+ // ===========================
38
+
39
+ static inline uint32_t JitAllocator_defaultFillPattern() noexcept {
40
+ // X86 and X86_64 - 4x 'int3' instruction.
41
+ if (ASMJIT_ARCH_X86)
42
+ return 0xCCCCCCCCu;
43
+
44
+ // Unknown...
45
+ return 0u;
46
+ }
47
+
48
+ // JitAllocator - BitVectorRangeIterator
49
+ // =====================================
50
+
51
+ template<typename T, uint32_t B>
52
+ class BitVectorRangeIterator {
53
+ public:
54
+ const T* _ptr;
55
+ size_t _idx;
56
+ size_t _end;
57
+ T _bitWord;
58
+
59
+ enum : uint32_t { kBitWordSize = Support::bitSizeOf<T>() };
60
+ enum : T { kXorMask = B == 0 ? Support::allOnes<T>() : T(0) };
61
+
62
+ ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept {
63
+ init(data, numBitWords);
64
+ }
65
+
66
+ ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
67
+ init(data, numBitWords, start, end);
68
+ }
69
+
70
+ ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords) noexcept {
71
+ init(data, numBitWords, 0, numBitWords * kBitWordSize);
72
+ }
73
+
74
+ ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
75
+ ASMJIT_ASSERT(numBitWords >= (end + kBitWordSize - 1) / kBitWordSize);
76
+ DebugUtils::unused(numBitWords);
77
+
78
+ size_t idx = Support::alignDown(start, kBitWordSize);
79
+ const T* ptr = data + (idx / kBitWordSize);
80
+
81
+ T bitWord = 0;
82
+ if (idx < end)
83
+ bitWord = (*ptr ^ kXorMask) & (Support::allOnes<T>() << (start % kBitWordSize));
84
+
85
+ _ptr = ptr;
86
+ _idx = idx;
87
+ _end = end;
88
+ _bitWord = bitWord;
89
+ }
90
+
91
+ ASMJIT_FORCE_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept {
92
+ // Skip all empty BitWords.
93
+ while (_bitWord == 0) {
94
+ _idx += kBitWordSize;
95
+ if (_idx >= _end)
96
+ return false;
97
+ _bitWord = (*++_ptr) ^ kXorMask;
98
+ }
99
+
100
+ size_t i = Support::ctz(_bitWord);
101
+
102
+ *rangeStart = _idx + i;
103
+ _bitWord = ~(_bitWord ^ ~(Support::allOnes<T>() << i));
104
+
105
+ if (_bitWord == 0) {
106
+ *rangeEnd = Support::min(_idx + kBitWordSize, _end);
107
+ while (*rangeEnd - *rangeStart < rangeHint) {
108
+ _idx += kBitWordSize;
109
+ if (_idx >= _end)
110
+ break;
111
+
112
+ _bitWord = (*++_ptr) ^ kXorMask;
113
+ if (_bitWord != Support::allOnes<T>()) {
114
+ size_t j = Support::ctz(~_bitWord);
115
+ *rangeEnd = Support::min(_idx + j, _end);
116
+ _bitWord = _bitWord ^ ~(Support::allOnes<T>() << j);
117
+ break;
118
+ }
119
+
120
+ *rangeEnd = Support::min(_idx + kBitWordSize, _end);
121
+ _bitWord = 0;
122
+ continue;
123
+ }
124
+
125
+ return true;
126
+ }
127
+ else {
128
+ size_t j = Support::ctz(_bitWord);
129
+ *rangeEnd = Support::min(_idx + j, _end);
130
+
131
+ _bitWord = ~(_bitWord ^ ~(Support::allOnes<T>() << j));
132
+ return true;
133
+ }
134
+ }
135
+ };
136
+
137
+ // JitAllocator - Pool
138
+ // ===================
139
+
140
+ class JitAllocatorBlock;
141
+
142
+ class JitAllocatorPool {
143
+ public:
144
+ ASMJIT_NONCOPYABLE(JitAllocatorPool)
145
+
146
+ //! Double linked list of blocks.
147
+ ZoneList<JitAllocatorBlock> blocks;
148
+ //! Where to start looking first.
149
+ JitAllocatorBlock* cursor;
150
+
151
+ //! Count of blocks.
152
+ uint32_t blockCount;
153
+ //! Allocation granularity.
154
+ uint16_t granularity;
155
+ //! Log2(granularity).
156
+ uint8_t granularityLog2;
157
+ //! Count of empty blocks (either 0 or 1 as we won't keep more blocks empty).
158
+ uint8_t emptyBlockCount;
159
+
160
+ //! Number of bits reserved across all blocks.
161
+ size_t totalAreaSize;
162
+ //! Number of bits used across all blocks.
163
+ size_t totalAreaUsed;
164
+ //! Overhead of all blocks (in bytes).
165
+ size_t totalOverheadBytes;
166
+
167
+ inline JitAllocatorPool(uint32_t granularity) noexcept
168
+ : blocks(),
169
+ cursor(nullptr),
170
+ blockCount(0),
171
+ granularity(uint16_t(granularity)),
172
+ granularityLog2(uint8_t(Support::ctz(granularity))),
173
+ emptyBlockCount(0),
174
+ totalAreaSize(0),
175
+ totalAreaUsed(0),
176
+ totalOverheadBytes(0) {}
177
+
178
+ inline void reset() noexcept {
179
+ blocks.reset();
180
+ cursor = nullptr;
181
+ blockCount = 0;
182
+ totalAreaSize = 0;
183
+ totalAreaUsed = 0;
184
+ totalOverheadBytes = 0;
185
+ }
186
+
187
+ inline size_t byteSizeFromAreaSize(uint32_t areaSize) const noexcept { return size_t(areaSize) * granularity; }
188
+ inline uint32_t areaSizeFromByteSize(size_t size) const noexcept { return uint32_t((size + granularity - 1) >> granularityLog2); }
189
+
190
+ inline size_t bitWordCountFromAreaSize(uint32_t areaSize) const noexcept {
191
+ using namespace Support;
192
+ return alignUp<size_t>(areaSize, kBitWordSizeInBits) / kBitWordSizeInBits;
193
+ }
194
+ };
195
+
196
+ // JitAllocator - Block
197
+ // ====================
198
+
199
+ class JitAllocatorBlock : public ZoneTreeNodeT<JitAllocatorBlock>,
200
+ public ZoneListNode<JitAllocatorBlock> {
201
+ public:
202
+ ASMJIT_NONCOPYABLE(JitAllocatorBlock)
203
+
204
+ enum Flags : uint32_t {
205
+ //! Block is empty.
206
+ kFlagEmpty = 0x00000001u,
207
+ //! Block is dirty (largestUnusedArea, searchStart, searchEnd).
208
+ kFlagDirty = 0x00000002u,
209
+ //! Block is dual-mapped.
210
+ kFlagDualMapped = 0x00000004u
211
+ };
212
+
213
+ //! Link to the pool that owns this block.
214
+ JitAllocatorPool* _pool;
215
+ //! Virtual memory mapping - either single mapping (both pointers equal) or
216
+ //! dual mapping, where one pointer is Read+Execute and the second Read+Write.
217
+ VirtMem::DualMapping _mapping;
218
+ //! Virtual memory size (block size) [bytes].
219
+ size_t _blockSize;
220
+
221
+ //! Block flags.
222
+ uint32_t _flags;
223
+ //! Size of the whole block area (bit-vector size).
224
+ uint32_t _areaSize;
225
+ //! Used area (number of bits in bit-vector used).
226
+ uint32_t _areaUsed;
227
+ //! The largest unused continuous area in the bit-vector (or `areaSize` to initiate rescan).
228
+ uint32_t _largestUnusedArea;
229
+ //! Start of a search range (for unused bits).
230
+ uint32_t _searchStart;
231
+ //! End of a search range (for unused bits).
232
+ uint32_t _searchEnd;
233
+
234
+ //! Used bit-vector (0 = unused, 1 = used).
235
+ Support::BitWord* _usedBitVector;
236
+ //! Stop bit-vector (0 = don't care, 1 = stop).
237
+ Support::BitWord* _stopBitVector;
238
+
239
+ inline JitAllocatorBlock(
240
+ JitAllocatorPool* pool,
241
+ VirtMem::DualMapping mapping,
242
+ size_t blockSize,
243
+ uint32_t blockFlags,
244
+ Support::BitWord* usedBitVector,
245
+ Support::BitWord* stopBitVector,
246
+ uint32_t areaSize) noexcept
247
+ : ZoneTreeNodeT(),
248
+ _pool(pool),
249
+ _mapping(mapping),
250
+ _blockSize(blockSize),
251
+ _flags(blockFlags),
252
+ _areaSize(areaSize),
253
+ _areaUsed(0),
254
+ _largestUnusedArea(areaSize),
255
+ _searchStart(0),
256
+ _searchEnd(areaSize),
257
+ _usedBitVector(usedBitVector),
258
+ _stopBitVector(stopBitVector) {}
259
+
260
+ inline JitAllocatorPool* pool() const noexcept { return _pool; }
261
+
262
+ inline uint8_t* rxPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rx); }
263
+ inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rw); }
264
+
265
+ inline bool hasFlag(uint32_t f) const noexcept { return (_flags & f) != 0; }
266
+ inline void addFlags(uint32_t f) noexcept { _flags |= f; }
267
+ inline void clearFlags(uint32_t f) noexcept { _flags &= ~f; }
268
+
269
+ inline bool isDirty() const noexcept { return hasFlag(kFlagDirty); }
270
+ inline void makeDirty() noexcept { addFlags(kFlagDirty); }
271
+
272
+ inline size_t blockSize() const noexcept { return _blockSize; }
273
+
274
+ inline uint32_t areaSize() const noexcept { return _areaSize; }
275
+ inline uint32_t areaUsed() const noexcept { return _areaUsed; }
276
+ inline uint32_t areaAvailable() const noexcept { return _areaSize - _areaUsed; }
277
+ inline uint32_t largestUnusedArea() const noexcept { return _largestUnusedArea; }
278
+
279
+ inline void decreaseUsedArea(uint32_t value) noexcept {
280
+ _areaUsed -= value;
281
+ _pool->totalAreaUsed -= value;
282
+ }
283
+
284
+ inline void markAllocatedArea(uint32_t allocatedAreaStart, uint32_t allocatedAreaEnd) noexcept {
285
+ uint32_t allocatedAreaSize = allocatedAreaEnd - allocatedAreaStart;
286
+
287
+ // Mark the newly allocated space as occupied and also the sentinel.
288
+ Support::bitVectorFill(_usedBitVector, allocatedAreaStart, allocatedAreaSize);
289
+ Support::bitVectorSetBit(_stopBitVector, allocatedAreaEnd - 1, true);
290
+
291
+ // Update search region and statistics.
292
+ _pool->totalAreaUsed += allocatedAreaSize;
293
+ _areaUsed += allocatedAreaSize;
294
+
295
+ if (areaAvailable() == 0) {
296
+ _searchStart = _areaSize;
297
+ _searchEnd = 0;
298
+ _largestUnusedArea = 0;
299
+ clearFlags(kFlagDirty);
300
+ }
301
+ else {
302
+ if (_searchStart == allocatedAreaStart)
303
+ _searchStart = allocatedAreaEnd;
304
+ if (_searchEnd == allocatedAreaEnd)
305
+ _searchEnd = allocatedAreaStart;
306
+ addFlags(kFlagDirty);
307
+ }
308
+ }
309
+
310
+ inline void markReleasedArea(uint32_t releasedAreaStart, uint32_t releasedAreaEnd) noexcept {
311
+ uint32_t releasedAreaSize = releasedAreaEnd - releasedAreaStart;
312
+
313
+ // Update the search region and statistics.
314
+ _pool->totalAreaUsed -= releasedAreaSize;
315
+ _areaUsed -= releasedAreaSize;
316
+ _searchStart = Support::min(_searchStart, releasedAreaStart);
317
+ _searchEnd = Support::max(_searchEnd, releasedAreaEnd);
318
+
319
+ // Unmark occupied bits and also the sentinel.
320
+ Support::bitVectorClear(_usedBitVector, releasedAreaStart, releasedAreaSize);
321
+ Support::bitVectorSetBit(_stopBitVector, releasedAreaEnd - 1, false);
322
+
323
+ if (areaUsed() == 0) {
324
+ _searchStart = 0;
325
+ _searchEnd = _areaSize;
326
+ _largestUnusedArea = _areaSize;
327
+ addFlags(kFlagEmpty);
328
+ clearFlags(kFlagDirty);
329
+ }
330
+ else {
331
+ addFlags(kFlagDirty);
332
+ }
333
+ }
334
+
335
+ inline void markShrunkArea(uint32_t shrunkAreaStart, uint32_t shrunkAreaEnd) noexcept {
336
+ uint32_t shrunkAreaSize = shrunkAreaEnd - shrunkAreaStart;
337
+
338
+ // Shrunk area cannot start at zero as it would mean that we have shrunk the first
339
+ // block to zero bytes, which is not allowed as such block must be released instead.
340
+ ASMJIT_ASSERT(shrunkAreaStart != 0);
341
+ ASMJIT_ASSERT(shrunkAreaSize != 0);
342
+
343
+ // Update the search region and statistics.
344
+ _pool->totalAreaUsed -= shrunkAreaSize;
345
+ _areaUsed -= shrunkAreaSize;
346
+ _searchStart = Support::min(_searchStart, shrunkAreaStart);
347
+ _searchEnd = Support::max(_searchEnd, shrunkAreaEnd);
348
+
349
+ // Unmark the released space and move the sentinel.
350
+ Support::bitVectorClear(_usedBitVector, shrunkAreaStart, shrunkAreaSize);
351
+ Support::bitVectorSetBit(_stopBitVector, shrunkAreaEnd - 1, false);
352
+ Support::bitVectorSetBit(_stopBitVector, shrunkAreaStart - 1, true);
353
+
354
+ addFlags(kFlagDirty);
355
+ }
356
+
357
+ // RBTree default CMP uses '<' and '>' operators.
358
+ inline bool operator<(const JitAllocatorBlock& other) const noexcept { return rxPtr() < other.rxPtr(); }
359
+ inline bool operator>(const JitAllocatorBlock& other) const noexcept { return rxPtr() > other.rxPtr(); }
360
+
361
+ // Special implementation for querying blocks by `key`, which must be in `[BlockPtr, BlockPtr + BlockSize)` range.
362
+ inline bool operator<(const uint8_t* key) const noexcept { return rxPtr() + _blockSize <= key; }
363
+ inline bool operator>(const uint8_t* key) const noexcept { return rxPtr() > key; }
364
+ };
365
+
366
+ // JitAllocator - PrivateImpl
367
+ // ==========================
368
+
369
+ class JitAllocatorPrivateImpl : public JitAllocator::Impl {
370
+ public:
371
+ //! Lock for thread safety.
372
+ mutable Lock lock;
373
+ //! System page size (also a minimum block size).
374
+ uint32_t pageSize;
375
+ //! Number of active allocations.
376
+ size_t allocationCount;
377
+
378
+ //! Blocks from all pools in RBTree.
379
+ ZoneTree<JitAllocatorBlock> tree;
380
+ //! Allocator pools.
381
+ JitAllocatorPool* pools;
382
+ //! Number of allocator pools.
383
+ size_t poolCount;
384
+
385
+ inline JitAllocatorPrivateImpl(JitAllocatorPool* pools, size_t poolCount) noexcept
386
+ : JitAllocator::Impl {},
387
+ pageSize(0),
388
+ allocationCount(0),
389
+ pools(pools),
390
+ poolCount(poolCount) {}
391
+ inline ~JitAllocatorPrivateImpl() noexcept {}
392
+ };
393
+
394
+ static const JitAllocator::Impl JitAllocatorImpl_none {};
395
+ static const JitAllocator::CreateParams JitAllocatorParams_none {};
396
+
397
+ // JitAllocator - Utilities
398
+ // ========================
399
+
400
+ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::CreateParams* params) noexcept {
401
+ VirtMem::Info vmInfo = VirtMem::info();
402
+
403
+ if (!params)
404
+ params = &JitAllocatorParams_none;
405
+
406
+ JitAllocatorOptions options = params->options;
407
+ uint32_t blockSize = params->blockSize;
408
+ uint32_t granularity = params->granularity;
409
+ uint32_t fillPattern = params->fillPattern;
410
+
411
+ // Setup pool count to [1..3].
412
+ size_t poolCount = 1;
413
+ if (Support::test(options, JitAllocatorOptions::kUseMultiplePools))
414
+ poolCount = kJitAllocatorMultiPoolCount;;
415
+
416
+ // Setup block size [64kB..256MB].
417
+ if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize))
418
+ blockSize = vmInfo.pageGranularity;
419
+
420
+ // Setup granularity [64..256].
421
+ if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity))
422
+ granularity = kJitAllocatorBaseGranularity;
423
+
424
+ // Setup fill-pattern.
425
+ if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0)
426
+ fillPattern = JitAllocator_defaultFillPattern();
427
+
428
+ size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount;
429
+ void* p = ::malloc(size);
430
+ if (ASMJIT_UNLIKELY(!p))
431
+ return nullptr;
432
+
433
+ JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl));
434
+ JitAllocatorPrivateImpl* impl = new(p) JitAllocatorPrivateImpl(pools, poolCount);
435
+
436
+ impl->options = options;
437
+ impl->blockSize = blockSize;
438
+ impl->granularity = granularity;
439
+ impl->fillPattern = fillPattern;
440
+ impl->pageSize = vmInfo.pageSize;
441
+
442
+ for (size_t poolId = 0; poolId < poolCount; poolId++)
443
+ new(&pools[poolId]) JitAllocatorPool(granularity << poolId);
444
+
445
+ return impl;
446
+ }
447
+
448
+ static inline void JitAllocatorImpl_destroy(JitAllocatorPrivateImpl* impl) noexcept {
449
+ impl->~JitAllocatorPrivateImpl();
450
+ ::free(impl);
451
+ }
452
+
453
+ static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl* impl, size_t size) noexcept {
454
+ size_t poolId = impl->poolCount - 1;
455
+ size_t granularity = size_t(impl->granularity) << poolId;
456
+
457
+ while (poolId) {
458
+ if (Support::alignUp(size, granularity) == size)
459
+ break;
460
+ poolId--;
461
+ granularity >>= 1;
462
+ }
463
+
464
+ return poolId;
465
+ }
466
+
467
+ static inline size_t JitAllocatorImpl_bitVectorSizeToByteSize(uint32_t areaSize) noexcept {
468
+ using Support::kBitWordSizeInBits;
469
+ return ((areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits) * sizeof(Support::BitWord);
470
+ }
471
+
472
+ static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t allocationSize) noexcept {
473
+ JitAllocatorBlock* last = pool->blocks.last();
474
+ size_t blockSize = last ? last->blockSize() : size_t(impl->blockSize);
475
+
476
+ if (blockSize < kJitAllocatorMaxBlockSize)
477
+ blockSize *= 2u;
478
+
479
+ if (allocationSize > blockSize) {
480
+ blockSize = Support::alignUp(allocationSize, impl->blockSize);
481
+ if (ASMJIT_UNLIKELY(blockSize < allocationSize))
482
+ return 0; // Overflown.
483
+ }
484
+
485
+ return blockSize;
486
+ }
487
+
488
+ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t pattern, size_t sizeInBytes) noexcept {
489
+ size_t n = sizeInBytes / 4u;
490
+ uint32_t* p = static_cast<uint32_t*>(mem);
491
+
492
+ for (size_t i = 0; i < n; i++)
493
+ p[i] = pattern;
494
+ }
495
+
496
+ // Allocate a new `JitAllocatorBlock` for the given `blockSize`.
497
+ //
498
+ // NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block
499
+ // is only allocated when it's actually needed, so it would be cleared anyway.
500
+ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept {
501
+ using Support::BitWord;
502
+ using Support::kBitWordSizeInBits;
503
+
504
+ uint32_t areaSize = uint32_t((blockSize + pool->granularity - 1) >> pool->granularityLog2);
505
+ uint32_t numBitWords = (areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits;
506
+
507
+ JitAllocatorBlock* block = static_cast<JitAllocatorBlock*>(::malloc(sizeof(JitAllocatorBlock)));
508
+ BitWord* bitWords = nullptr;
509
+ VirtMem::DualMapping virtMem {};
510
+ Error err = kErrorOutOfMemory;
511
+
512
+ if (block != nullptr)
513
+ bitWords = static_cast<BitWord*>(::malloc(size_t(numBitWords) * 2 * sizeof(BitWord)));
514
+
515
+ uint32_t blockFlags = 0;
516
+ if (bitWords != nullptr) {
517
+ if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) {
518
+ err = VirtMem::allocDualMapping(&virtMem, blockSize, VirtMem::MemoryFlags::kAccessRWX);
519
+ blockFlags |= JitAllocatorBlock::kFlagDualMapped;
520
+ }
521
+ else {
522
+ err = VirtMem::alloc(&virtMem.rx, blockSize, VirtMem::MemoryFlags::kAccessRWX);
523
+ virtMem.rw = virtMem.rx;
524
+ }
525
+ }
526
+
527
+ // Out of memory.
528
+ if (ASMJIT_UNLIKELY(!block || !bitWords || err != kErrorOk)) {
529
+ if (bitWords)
530
+ ::free(bitWords);
531
+
532
+ if (block)
533
+ ::free(block);
534
+
535
+ return nullptr;
536
+ }
537
+
538
+ // Fill the memory if the secure mode is enabled.
539
+ if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
540
+ VirtMem::ProtectJitReadWriteScope scope(virtMem.rw, blockSize);
541
+ JitAllocatorImpl_fillPattern(virtMem.rw, impl->fillPattern, blockSize);
542
+ }
543
+
544
+ memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord));
545
+ return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
546
+ }
547
+
548
+ static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
549
+ DebugUtils::unused(impl);
550
+
551
+ if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped))
552
+ VirtMem::releaseDualMapping(&block->_mapping, block->blockSize());
553
+ else
554
+ VirtMem::release(block->rxPtr(), block->blockSize());
555
+
556
+ ::free(block->_usedBitVector);
557
+ ::free(block);
558
+ }
559
+
560
+ static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
561
+ JitAllocatorPool* pool = block->pool();
562
+
563
+ if (!pool->cursor)
564
+ pool->cursor = block;
565
+
566
+ // Add to RBTree and List.
567
+ impl->tree.insert(block);
568
+ pool->blocks.append(block);
569
+
570
+ // Update statistics.
571
+ pool->blockCount++;
572
+ pool->totalAreaSize += block->areaSize();
573
+ pool->totalOverheadBytes += sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize()) * 2u;
574
+ }
575
+
576
+ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
577
+ JitAllocatorPool* pool = block->pool();
578
+
579
+ // Remove from RBTree and List.
580
+ if (pool->cursor == block)
581
+ pool->cursor = block->hasPrev() ? block->prev() : block->next();
582
+
583
+ impl->tree.remove(block);
584
+ pool->blocks.unlink(block);
585
+
586
+ // Update statistics.
587
+ pool->blockCount--;
588
+ pool->totalAreaSize -= block->areaSize();
589
+ pool->totalOverheadBytes -= sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize()) * 2u;
590
+ }
591
+
592
+ static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
593
+ if (block->hasFlag(JitAllocatorBlock::kFlagEmpty))
594
+ return;
595
+
596
+ JitAllocatorPool* pool = block->pool();
597
+ uint32_t areaSize = block->areaSize();
598
+ uint32_t granularity = pool->granularity;
599
+ size_t numBitWords = pool->bitWordCountFromAreaSize(areaSize);
600
+
601
+ VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite);
602
+ if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
603
+ uint8_t* rwPtr = block->rwPtr();
604
+ BitVectorRangeIterator<Support::BitWord, 0> it(block->_usedBitVector, pool->bitWordCountFromAreaSize(block->areaSize()));
605
+
606
+ size_t rangeStart;
607
+ size_t rangeEnd;
608
+
609
+ while (it.nextRange(&rangeStart, &rangeEnd)) {
610
+ uint8_t* spanPtr = rwPtr + rangeStart * granularity;
611
+ size_t spanSize = (rangeEnd - rangeStart) * granularity;
612
+
613
+ JitAllocatorImpl_fillPattern(spanPtr, impl->fillPattern, spanSize);
614
+ VirtMem::flushInstructionCache(spanPtr, spanSize);
615
+ }
616
+ }
617
+ VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute);
618
+
619
+ memset(block->_usedBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
620
+ memset(block->_stopBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
621
+
622
+ block->_areaUsed = 0;
623
+ block->_largestUnusedArea = areaSize;
624
+ block->_searchStart = 0;
625
+ block->_searchEnd = areaSize;
626
+ block->addFlags(JitAllocatorBlock::kFlagEmpty);
627
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
628
+ }
629
+
630
+ // JitAllocator - Construction & Destruction
631
+ // =========================================
632
+
633
+ JitAllocator::JitAllocator(const CreateParams* params) noexcept {
634
+ _impl = JitAllocatorImpl_new(params);
635
+ if (ASMJIT_UNLIKELY(!_impl))
636
+ _impl = const_cast<JitAllocator::Impl*>(&JitAllocatorImpl_none);
637
+ }
638
+
639
+ JitAllocator::~JitAllocator() noexcept {
640
+ if (_impl == &JitAllocatorImpl_none)
641
+ return;
642
+
643
+ reset(ResetPolicy::kHard);
644
+ JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl));
645
+ }
646
+
647
+ // JitAllocator - Reset
648
+ // ====================
649
+
650
+ void JitAllocator::reset(ResetPolicy resetPolicy) noexcept {
651
+ if (_impl == &JitAllocatorImpl_none)
652
+ return;
653
+
654
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
655
+ impl->tree.reset();
656
+ size_t poolCount = impl->poolCount;
657
+
658
+ for (size_t poolId = 0; poolId < poolCount; poolId++) {
659
+ JitAllocatorPool& pool = impl->pools[poolId];
660
+ JitAllocatorBlock* block = pool.blocks.first();
661
+
662
+ JitAllocatorBlock* blockToKeep = nullptr;
663
+ if (resetPolicy != ResetPolicy::kHard && uint32_t(impl->options & JitAllocatorOptions::kImmediateRelease) == 0) {
664
+ blockToKeep = block;
665
+ block = block->next();
666
+ }
667
+
668
+ while (block) {
669
+ JitAllocatorBlock* next = block->next();
670
+ JitAllocatorImpl_deleteBlock(impl, block);
671
+ block = next;
672
+ }
673
+
674
+ pool.reset();
675
+
676
+ if (blockToKeep) {
677
+ blockToKeep->_listNodes[0] = nullptr;
678
+ blockToKeep->_listNodes[1] = nullptr;
679
+ JitAllocatorImpl_wipeOutBlock(impl, blockToKeep);
680
+ JitAllocatorImpl_insertBlock(impl, blockToKeep);
681
+ pool.emptyBlockCount = 1;
682
+ }
683
+ }
684
+ }
685
+
686
+ // JitAllocator - Statistics
687
+ // =========================
688
+
689
+ JitAllocator::Statistics JitAllocator::statistics() const noexcept {
690
+ Statistics statistics;
691
+ statistics.reset();
692
+
693
+ if (ASMJIT_LIKELY(_impl != &JitAllocatorImpl_none)) {
694
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
695
+ LockGuard guard(impl->lock);
696
+
697
+ size_t poolCount = impl->poolCount;
698
+ for (size_t poolId = 0; poolId < poolCount; poolId++) {
699
+ const JitAllocatorPool& pool = impl->pools[poolId];
700
+ statistics._blockCount += size_t(pool.blockCount);
701
+ statistics._reservedSize += size_t(pool.totalAreaSize) * pool.granularity;
702
+ statistics._usedSize += size_t(pool.totalAreaUsed) * pool.granularity;
703
+ statistics._overheadSize += size_t(pool.totalOverheadBytes);
704
+ }
705
+
706
+ statistics._allocationCount = impl->allocationCount;
707
+ }
708
+
709
+ return statistics;
710
+ }
711
+
712
+ // JitAllocator - Alloc & Release
713
+ // ==============================
714
+
715
+ Error JitAllocator::alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcept {
716
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
717
+ return DebugUtils::errored(kErrorNotInitialized);
718
+
719
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
720
+ constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max();
721
+
722
+ *rxPtrOut = nullptr;
723
+ *rwPtrOut = nullptr;
724
+
725
+ // Align to the minimum granularity by default.
726
+ size = Support::alignUp<size_t>(size, impl->granularity);
727
+ if (ASMJIT_UNLIKELY(size == 0))
728
+ return DebugUtils::errored(kErrorInvalidArgument);
729
+
730
+ if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2))
731
+ return DebugUtils::errored(kErrorTooLarge);
732
+
733
+ LockGuard guard(impl->lock);
734
+ JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)];
735
+
736
+ uint32_t areaIndex = kNoIndex;
737
+ uint32_t areaSize = uint32_t(pool->areaSizeFromByteSize(size));
738
+
739
+ // Try to find the requested memory area in existing blocks.
740
+ JitAllocatorBlock* block = pool->blocks.first();
741
+ if (block) {
742
+ JitAllocatorBlock* initial = block;
743
+ do {
744
+ JitAllocatorBlock* next = block->hasNext() ? block->next() : pool->blocks.first();
745
+ if (block->areaAvailable() >= areaSize) {
746
+ if (block->isDirty() || block->largestUnusedArea() >= areaSize) {
747
+ BitVectorRangeIterator<Support::BitWord, 0> it(block->_usedBitVector, pool->bitWordCountFromAreaSize(block->areaSize()), block->_searchStart, block->_searchEnd);
748
+
749
+ size_t rangeStart = 0;
750
+ size_t rangeEnd = block->areaSize();
751
+
752
+ size_t searchStart = SIZE_MAX;
753
+ size_t largestArea = 0;
754
+
755
+ while (it.nextRange(&rangeStart, &rangeEnd, areaSize)) {
756
+ size_t rangeSize = rangeEnd - rangeStart;
757
+ if (rangeSize >= areaSize) {
758
+ areaIndex = uint32_t(rangeStart);
759
+ break;
760
+ }
761
+
762
+ searchStart = Support::min(searchStart, rangeStart);
763
+ largestArea = Support::max(largestArea, rangeSize);
764
+ }
765
+
766
+ if (areaIndex != kNoIndex)
767
+ break;
768
+
769
+ if (searchStart != SIZE_MAX) {
770
+ // Because we have iterated over the entire block, we can now mark the
771
+ // largest unused area that can be used to cache the next traversal.
772
+ size_t searchEnd = rangeEnd;
773
+
774
+ block->_searchStart = uint32_t(searchStart);
775
+ block->_searchEnd = uint32_t(searchEnd);
776
+ block->_largestUnusedArea = uint32_t(largestArea);
777
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
778
+ }
779
+ }
780
+ }
781
+
782
+ block = next;
783
+ } while (block != initial);
784
+ }
785
+
786
+ // Allocate a new block if there is no region of a required width.
787
+ if (areaIndex == kNoIndex) {
788
+ size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size);
789
+ if (ASMJIT_UNLIKELY(!blockSize))
790
+ return DebugUtils::errored(kErrorOutOfMemory);
791
+
792
+ block = JitAllocatorImpl_newBlock(impl, pool, blockSize);
793
+ areaIndex = 0;
794
+
795
+ if (ASMJIT_UNLIKELY(!block))
796
+ return DebugUtils::errored(kErrorOutOfMemory);
797
+
798
+ JitAllocatorImpl_insertBlock(impl, block);
799
+ block->_searchStart = areaSize;
800
+ block->_largestUnusedArea = block->areaSize() - areaSize;
801
+ }
802
+ else if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) {
803
+ pool->emptyBlockCount--;
804
+ block->clearFlags(JitAllocatorBlock::kFlagEmpty);
805
+ }
806
+
807
+ // Update statistics.
808
+ impl->allocationCount++;
809
+ block->markAllocatedArea(areaIndex, areaIndex + areaSize);
810
+
811
+ // Return a pointer to the allocated memory.
812
+ size_t offset = pool->byteSizeFromAreaSize(areaIndex);
813
+ ASMJIT_ASSERT(offset <= block->blockSize() - size);
814
+
815
+ *rxPtrOut = block->rxPtr() + offset;
816
+ *rwPtrOut = block->rwPtr() + offset;
817
+ return kErrorOk;
818
+ }
819
+
820
+ Error JitAllocator::release(void* rxPtr) noexcept {
821
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
822
+ return DebugUtils::errored(kErrorNotInitialized);
823
+
824
+ if (ASMJIT_UNLIKELY(!rxPtr))
825
+ return DebugUtils::errored(kErrorInvalidArgument);
826
+
827
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
828
+ LockGuard guard(impl->lock);
829
+
830
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rxPtr));
831
+ if (ASMJIT_UNLIKELY(!block))
832
+ return DebugUtils::errored(kErrorInvalidState);
833
+
834
+ // Offset relative to the start of the block.
835
+ JitAllocatorPool* pool = block->pool();
836
+ size_t offset = (size_t)((uint8_t*)rxPtr - block->rxPtr());
837
+
838
+ // The first bit representing the allocated area and its size.
839
+ uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2);
840
+ uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaIndex, true)) + 1;
841
+ uint32_t areaSize = areaEnd - areaIndex;
842
+
843
+ impl->allocationCount--;
844
+ block->markReleasedArea(areaIndex, areaEnd);
845
+
846
+ // Fill the released memory if the secure mode is enabled.
847
+ if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
848
+ uint8_t* spanPtr = block->rwPtr() + areaIndex * pool->granularity;
849
+ size_t spanSize = areaSize * pool->granularity;
850
+
851
+ VirtMem::ProtectJitReadWriteScope scope(spanPtr, spanSize);
852
+ JitAllocatorImpl_fillPattern(spanPtr, impl->fillPattern, spanSize);
853
+ }
854
+
855
+ // Release the whole block if it became empty.
856
+ if (block->areaUsed() == 0) {
857
+ if (pool->emptyBlockCount || Support::test(impl->options, JitAllocatorOptions::kImmediateRelease)) {
858
+ JitAllocatorImpl_removeBlock(impl, block);
859
+ JitAllocatorImpl_deleteBlock(impl, block);
860
+ }
861
+ else {
862
+ pool->emptyBlockCount++;
863
+ }
864
+ }
865
+
866
+ return kErrorOk;
867
+ }
868
+
869
+ Error JitAllocator::shrink(void* rxPtr, size_t newSize) noexcept {
870
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
871
+ return DebugUtils::errored(kErrorNotInitialized);
872
+
873
+ if (ASMJIT_UNLIKELY(!rxPtr))
874
+ return DebugUtils::errored(kErrorInvalidArgument);
875
+
876
+ if (ASMJIT_UNLIKELY(newSize == 0))
877
+ return release(rxPtr);
878
+
879
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
880
+ LockGuard guard(impl->lock);
881
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rxPtr));
882
+
883
+ if (ASMJIT_UNLIKELY(!block))
884
+ return DebugUtils::errored(kErrorInvalidArgument);
885
+
886
+ // Offset relative to the start of the block.
887
+ JitAllocatorPool* pool = block->pool();
888
+ size_t offset = (size_t)((uint8_t*)rxPtr - block->rxPtr());
889
+
890
+ // The first bit representing the allocated area and its size.
891
+ uint32_t areaStart = uint32_t(offset >> pool->granularityLog2);
892
+ uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
893
+
894
+ uint32_t areaPrevSize = areaEnd - areaStart;
895
+ uint32_t areaShrunkSize = pool->areaSizeFromByteSize(newSize);
896
+
897
+ if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize))
898
+ return DebugUtils::errored(kErrorInvalidState);
899
+
900
+ uint32_t areaDiff = areaPrevSize - areaShrunkSize;
901
+ if (areaDiff) {
902
+ block->markShrunkArea(areaStart + areaShrunkSize, areaEnd);
903
+
904
+ // Fill released memory if the secure mode is enabled.
905
+ if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory))
906
+ JitAllocatorImpl_fillPattern(block->rwPtr() + (areaStart + areaShrunkSize) * pool->granularity, fillPattern(), areaDiff * pool->granularity);
907
+ }
908
+
909
+ return kErrorOk;
910
+ }
911
+
912
+ // JitAllocator - Tests
913
+ // ====================
914
+
915
+ #if defined(ASMJIT_TEST)
916
+ // A pseudo random number generator based on a paper by Sebastiano Vigna:
917
+ // http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
918
+ class Random {
919
+ public:
920
+ // Constants suggested as `23/18/5`.
921
+ enum Steps : uint32_t {
922
+ kStep1_SHL = 23,
923
+ kStep2_SHR = 18,
924
+ kStep3_SHR = 5
925
+ };
926
+
927
+ inline explicit Random(uint64_t seed = 0) noexcept { reset(seed); }
928
+ inline Random(const Random& other) noexcept = default;
929
+
930
+ inline void reset(uint64_t seed = 0) noexcept {
931
+ // The number is arbitrary, it means nothing.
932
+ constexpr uint64_t kZeroSeed = 0x1F0A2BE71D163FA0u;
933
+
934
+ // Generate the state data by using splitmix64.
935
+ for (uint32_t i = 0; i < 2; i++) {
936
+ seed += 0x9E3779B97F4A7C15u;
937
+ uint64_t x = seed;
938
+ x = (x ^ (x >> 30)) * 0xBF58476D1CE4E5B9u;
939
+ x = (x ^ (x >> 27)) * 0x94D049BB133111EBu;
940
+ x = (x ^ (x >> 31));
941
+ _state[i] = x != 0 ? x : kZeroSeed;
942
+ }
943
+ }
944
+
945
+ inline uint32_t nextUInt32() noexcept {
946
+ return uint32_t(nextUInt64() >> 32);
947
+ }
948
+
949
+ inline uint64_t nextUInt64() noexcept {
950
+ uint64_t x = _state[0];
951
+ uint64_t y = _state[1];
952
+
953
+ x ^= x << kStep1_SHL;
954
+ y ^= y >> kStep3_SHR;
955
+ x ^= x >> kStep2_SHR;
956
+ x ^= y;
957
+
958
+ _state[0] = y;
959
+ _state[1] = x;
960
+ return x + y;
961
+ }
962
+
963
+ uint64_t _state[2];
964
+ };
965
+
966
+ // Helper class to verify that JitAllocator doesn't return addresses that overlap.
967
+ class JitAllocatorWrapper {
968
+ public:
969
+ // Address to a memory region of a given size.
970
+ class Range {
971
+ public:
972
+ inline Range(uint8_t* addr, size_t size) noexcept
973
+ : addr(addr),
974
+ size(size) {}
975
+ uint8_t* addr;
976
+ size_t size;
977
+ };
978
+
979
+ // Based on JitAllocator::Block, serves our purpose well...
980
+ class Record : public ZoneTreeNodeT<Record>,
981
+ public Range {
982
+ public:
983
+ inline Record(uint8_t* addr, size_t size)
984
+ : ZoneTreeNodeT<Record>(),
985
+ Range(addr, size) {}
986
+
987
+ inline bool operator<(const Record& other) const noexcept { return addr < other.addr; }
988
+ inline bool operator>(const Record& other) const noexcept { return addr > other.addr; }
989
+
990
+ inline bool operator<(const uint8_t* key) const noexcept { return addr + size <= key; }
991
+ inline bool operator>(const uint8_t* key) const noexcept { return addr > key; }
992
+ };
993
+
994
+ Zone _zone;
995
+ ZoneAllocator _heap;
996
+ ZoneTree<Record> _records;
997
+ JitAllocator _allocator;
998
+
999
+ explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept
1000
+ : _zone(1024 * 1024),
1001
+ _heap(&_zone),
1002
+ _allocator(params) {}
1003
+
1004
+ void _insert(void* p_, size_t size) noexcept {
1005
+ uint8_t* p = static_cast<uint8_t*>(p_);
1006
+ uint8_t* pEnd = p + size - 1;
1007
+
1008
+ Record* record;
1009
+
1010
+ record = _records.get(p);
1011
+ if (record)
1012
+ EXPECT(record == nullptr, "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
1013
+
1014
+ record = _records.get(pEnd);
1015
+ if (record)
1016
+ EXPECT(record == nullptr, "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
1017
+
1018
+ record = _heap.newT<Record>(p, size);
1019
+ EXPECT(record != nullptr, "Out of memory, cannot allocate 'Record'");
1020
+
1021
+ _records.insert(record);
1022
+ }
1023
+
1024
+ void _remove(void* p) noexcept {
1025
+ Record* record = _records.get(static_cast<uint8_t*>(p));
1026
+ EXPECT(record != nullptr, "Address [%p] doesn't exist\n", p);
1027
+
1028
+ _records.remove(record);
1029
+ _heap.release(record, sizeof(Record));
1030
+ }
1031
+
1032
+ void* alloc(size_t size) noexcept {
1033
+ void* rxPtr;
1034
+ void* rwPtr;
1035
+
1036
+ Error err = _allocator.alloc(&rxPtr, &rwPtr, size);
1037
+ EXPECT(err == kErrorOk, "JitAllocator failed to allocate %zu bytes\n", size);
1038
+
1039
+ _insert(rxPtr, size);
1040
+ return rxPtr;
1041
+ }
1042
+
1043
+ void release(void* p) noexcept {
1044
+ _remove(p);
1045
+ EXPECT(_allocator.release(p) == kErrorOk, "JitAllocator failed to release '%p'\n", p);
1046
+ }
1047
+
1048
+ void shrink(void* p, size_t newSize) noexcept {
1049
+ Record* record = _records.get(static_cast<uint8_t*>(p));
1050
+ EXPECT(record != nullptr, "Address [%p] doesn't exist\n", p);
1051
+
1052
+ if (!newSize)
1053
+ return release(p);
1054
+
1055
+ Error err = _allocator.shrink(p, newSize);
1056
+ EXPECT(err == kErrorOk, "JitAllocator failed to shrink %p to %zu bytes\n", p, newSize);
1057
+
1058
+ record->size = newSize;
1059
+ }
1060
+ };
1061
+
1062
+ static void JitAllocatorTest_shuffle(void** ptrArray, size_t count, Random& prng) noexcept {
1063
+ for (size_t i = 0; i < count; ++i)
1064
+ std::swap(ptrArray[i], ptrArray[size_t(prng.nextUInt32() % count)]);
1065
+ }
1066
+
1067
+ static void JitAllocatorTest_usage(JitAllocator& allocator) noexcept {
1068
+ JitAllocator::Statistics stats = allocator.statistics();
1069
+ INFO(" Block Count : %9llu [Blocks]" , (unsigned long long)(stats.blockCount()));
1070
+ INFO(" Reserved (VirtMem): %9llu [Bytes]" , (unsigned long long)(stats.reservedSize()));
1071
+ INFO(" Used (VirtMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.usedSize()), stats.usedSizeAsPercent());
1072
+ INFO(" Overhead (HeapMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.overheadSize()), stats.overheadSizeAsPercent());
1073
+ }
1074
+
1075
+ template<typename T, size_t kPatternSize, bool Bit>
1076
+ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcept {
1077
+ for (size_t i = 0; i < count; i++) {
1078
+ T in[kPatternSize];
1079
+ T out[kPatternSize];
1080
+
1081
+ for (size_t j = 0; j < kPatternSize; j++) {
1082
+ in[j] = T(uint64_t(rnd.nextUInt32() & 0xFFu) * 0x0101010101010101);
1083
+ out[j] = Bit == 0 ? Support::allOnes<T>() : T(0);
1084
+ }
1085
+
1086
+ {
1087
+ BitVectorRangeIterator<T, Bit> it(in, kPatternSize);
1088
+ size_t rangeStart, rangeEnd;
1089
+ while (it.nextRange(&rangeStart, &rangeEnd)) {
1090
+ if (Bit)
1091
+ Support::bitVectorFill(out, rangeStart, rangeEnd - rangeStart);
1092
+ else
1093
+ Support::bitVectorClear(out, rangeStart, rangeEnd - rangeStart);
1094
+ }
1095
+ }
1096
+
1097
+ for (size_t j = 0; j < kPatternSize; j++) {
1098
+ EXPECT(in[j] == out[j], "Invalid pattern detected at [%zu] (%llX != %llX)", j, (unsigned long long)in[j], (unsigned long long)out[j]);
1099
+ }
1100
+ }
1101
+ }
1102
+
1103
+ UNIT(jit_allocator) {
1104
+ size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000;
1105
+
1106
+ struct TestParams {
1107
+ const char* name;
1108
+ JitAllocatorOptions options;
1109
+ uint32_t blockSize;
1110
+ uint32_t granularity;
1111
+ };
1112
+
1113
+ static TestParams testParams[] = {
1114
+ { "Default", JitAllocatorOptions::kNone, 0, 0 },
1115
+ { "16MB blocks", JitAllocatorOptions::kNone, 16 * 1024 * 1024, 0 },
1116
+ { "256B granularity", JitAllocatorOptions::kNone, 0, 256 },
1117
+ { "kUseDualMapping", JitAllocatorOptions::kUseDualMapping, 0, 0 },
1118
+ { "kUseMultiplePools", JitAllocatorOptions::kUseMultiplePools, 0, 0 },
1119
+ { "kFillUnusedMemory", JitAllocatorOptions::kFillUnusedMemory, 0, 0 },
1120
+ { "kImmediateRelease", JitAllocatorOptions::kImmediateRelease, 0, 0 },
1121
+ { "kUseDualMapping | kFillUnusedMemory", JitAllocatorOptions::kUseDualMapping | JitAllocatorOptions::kFillUnusedMemory, 0, 0 }
1122
+ };
1123
+
1124
+ INFO("BitVectorRangeIterator<uint32_t>");
1125
+ {
1126
+ Random rnd;
1127
+ BitVectorRangeIterator_testRandom<uint32_t, 64, 0>(rnd, kCount);
1128
+ }
1129
+
1130
+ INFO("BitVectorRangeIterator<uint64_t>");
1131
+ {
1132
+ Random rnd;
1133
+ BitVectorRangeIterator_testRandom<uint64_t, 64, 0>(rnd, kCount);
1134
+ }
1135
+
1136
+ for (uint32_t testId = 0; testId < ASMJIT_ARRAY_SIZE(testParams); testId++) {
1137
+ INFO("JitAllocator(%s)", testParams[testId].name);
1138
+
1139
+ JitAllocator::CreateParams params {};
1140
+ params.options = testParams[testId].options;
1141
+ params.blockSize = testParams[testId].blockSize;
1142
+ params.granularity = testParams[testId].granularity;
1143
+
1144
+ size_t fixedBlockSize = 256;
1145
+
1146
+ JitAllocatorWrapper wrapper(&params);
1147
+ Random prng(100);
1148
+
1149
+ size_t i;
1150
+
1151
+ INFO(" Memory alloc/release test - %d allocations", kCount);
1152
+
1153
+ void** ptrArray = (void**)::malloc(sizeof(void*) * size_t(kCount));
1154
+ EXPECT(ptrArray != nullptr,
1155
+ "Couldn't allocate '%u' bytes for pointer-array", unsigned(sizeof(void*) * size_t(kCount)));
1156
+
1157
+ // Random blocks tests...
1158
+ INFO(" Allocating random blocks...");
1159
+ for (i = 0; i < kCount; i++)
1160
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
1161
+ JitAllocatorTest_usage(wrapper._allocator);
1162
+
1163
+ INFO(" Releasing all allocated blocks from the beginning...");
1164
+ for (i = 0; i < kCount; i++)
1165
+ wrapper.release(ptrArray[i]);
1166
+ JitAllocatorTest_usage(wrapper._allocator);
1167
+
1168
+ INFO(" Allocating random blocks again...", kCount);
1169
+ for (i = 0; i < kCount; i++)
1170
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
1171
+ JitAllocatorTest_usage(wrapper._allocator);
1172
+
1173
+ INFO(" Shuffling allocated blocks...");
1174
+ JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
1175
+
1176
+ INFO(" Releasing 50%% of allocated blocks...");
1177
+ for (i = 0; i < kCount / 2; i++)
1178
+ wrapper.release(ptrArray[i]);
1179
+ JitAllocatorTest_usage(wrapper._allocator);
1180
+
1181
+ INFO(" Allocating 50%% more blocks again...");
1182
+ for (i = 0; i < kCount / 2; i++)
1183
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
1184
+ JitAllocatorTest_usage(wrapper._allocator);
1185
+
1186
+ INFO(" Releasing all allocated blocks from the end...");
1187
+ for (i = 0; i < kCount; i++)
1188
+ wrapper.release(ptrArray[kCount - i - 1]);
1189
+ JitAllocatorTest_usage(wrapper._allocator);
1190
+
1191
+ // Fixed blocks tests...
1192
+ INFO(" Allocating %zuB blocks...", fixedBlockSize);
1193
+ for (i = 0; i < kCount / 2; i++)
1194
+ ptrArray[i] = wrapper.alloc(fixedBlockSize);
1195
+ JitAllocatorTest_usage(wrapper._allocator);
1196
+
1197
+ INFO(" Shrinking each %zuB block to 1 byte", fixedBlockSize);
1198
+ for (i = 0; i < kCount / 2; i++)
1199
+ wrapper.shrink(ptrArray[i], 1);
1200
+ JitAllocatorTest_usage(wrapper._allocator);
1201
+
1202
+ INFO(" Allocating more 64B blocks...", 64);
1203
+ for (i = kCount / 2; i < kCount; i++)
1204
+ ptrArray[i] = wrapper.alloc(64);
1205
+ JitAllocatorTest_usage(wrapper._allocator);
1206
+
1207
+ INFO(" Releasing all blocks from the beginning...");
1208
+ for (i = 0; i < kCount; i++)
1209
+ wrapper.release(ptrArray[i]);
1210
+ JitAllocatorTest_usage(wrapper._allocator);
1211
+
1212
+ INFO(" Allocating %zuB blocks...", fixedBlockSize);
1213
+ for (i = 0; i < kCount; i++)
1214
+ ptrArray[i] = wrapper.alloc(fixedBlockSize);
1215
+ JitAllocatorTest_usage(wrapper._allocator);
1216
+
1217
+ INFO(" Shuffling allocated blocks...");
1218
+ JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
1219
+
1220
+ INFO(" Releasing 50%% of allocated blocks...");
1221
+ for (i = 0; i < kCount / 2; i++)
1222
+ wrapper.release(ptrArray[i]);
1223
+ JitAllocatorTest_usage(wrapper._allocator);
1224
+
1225
+ INFO(" Allocating 50%% more %zuB blocks again...", fixedBlockSize);
1226
+ for (i = 0; i < kCount / 2; i++)
1227
+ ptrArray[i] = wrapper.alloc(fixedBlockSize);
1228
+ JitAllocatorTest_usage(wrapper._allocator);
1229
+
1230
+ INFO(" Releasing all allocated blocks from the end...");
1231
+ for (i = 0; i < kCount; i++)
1232
+ wrapper.release(ptrArray[kCount - i - 1]);
1233
+ JitAllocatorTest_usage(wrapper._allocator);
1234
+
1235
+ ::free(ptrArray);
1236
+ }
1237
+ }
1238
+ #endif
1239
+
1240
+ ASMJIT_END_NAMESPACE
1241
+
1242
+ #endif