koffi 2.1.0 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/ChangeLog.md +7 -1
  2. package/build/qemu/2.1.1/koffi_darwin_arm64.tar.gz +0 -0
  3. package/build/qemu/2.1.1/koffi_darwin_x64.tar.gz +0 -0
  4. package/build/qemu/2.1.1/koffi_freebsd_arm64.tar.gz +0 -0
  5. package/build/qemu/2.1.1/koffi_freebsd_ia32.tar.gz +0 -0
  6. package/build/qemu/2.1.1/koffi_freebsd_x64.tar.gz +0 -0
  7. package/build/qemu/2.1.1/koffi_linux_arm32hf.tar.gz +0 -0
  8. package/build/qemu/2.1.1/koffi_linux_arm64.tar.gz +0 -0
  9. package/build/qemu/2.1.1/koffi_linux_ia32.tar.gz +0 -0
  10. package/build/qemu/2.1.1/koffi_linux_riscv64hf64.tar.gz +0 -0
  11. package/build/qemu/2.1.1/koffi_linux_x64.tar.gz +0 -0
  12. package/build/qemu/2.1.1/koffi_openbsd_ia32.tar.gz +0 -0
  13. package/build/qemu/2.1.1/koffi_openbsd_x64.tar.gz +0 -0
  14. package/build/qemu/2.1.1/koffi_win32_arm64.tar.gz +0 -0
  15. package/build/qemu/2.1.1/koffi_win32_ia32.tar.gz +0 -0
  16. package/build/qemu/2.1.1/koffi_win32_x64.tar.gz +0 -0
  17. package/package.json +2 -2
  18. package/src/call.cc +2 -2
  19. package/src/call.hh +1 -1
  20. package/vendor/libcc/libcc.cc +62 -62
  21. package/vendor/libcc/libcc.hh +81 -29
  22. package/build/qemu/2.1.0/koffi_darwin_arm64.tar.gz +0 -0
  23. package/build/qemu/2.1.0/koffi_darwin_x64.tar.gz +0 -0
  24. package/build/qemu/2.1.0/koffi_freebsd_arm64.tar.gz +0 -0
  25. package/build/qemu/2.1.0/koffi_freebsd_ia32.tar.gz +0 -0
  26. package/build/qemu/2.1.0/koffi_freebsd_x64.tar.gz +0 -0
  27. package/build/qemu/2.1.0/koffi_linux_arm32hf.tar.gz +0 -0
  28. package/build/qemu/2.1.0/koffi_linux_arm64.tar.gz +0 -0
  29. package/build/qemu/2.1.0/koffi_linux_ia32.tar.gz +0 -0
  30. package/build/qemu/2.1.0/koffi_linux_riscv64hf64.tar.gz +0 -0
  31. package/build/qemu/2.1.0/koffi_linux_x64.tar.gz +0 -0
  32. package/build/qemu/2.1.0/koffi_openbsd_ia32.tar.gz +0 -0
  33. package/build/qemu/2.1.0/koffi_openbsd_x64.tar.gz +0 -0
  34. package/build/qemu/2.1.0/koffi_win32_arm64.tar.gz +0 -0
  35. package/build/qemu/2.1.0/koffi_win32_ia32.tar.gz +0 -0
  36. package/build/qemu/2.1.0/koffi_win32_x64.tar.gz +0 -0
package/ChangeLog.md CHANGED
@@ -2,12 +2,18 @@
2
2
 
3
3
  ## History
4
4
 
5
+ ### Koffi 2.1.1
6
+
7
+ **Main fixes:**
8
+
9
+ - Fix potential memory allocation bugs
10
+
5
11
  ### Koffi 2.1.0
6
12
 
7
13
  **Main changes:**
8
14
 
9
15
  - Add [koffi.as()](functions.md#polymorphic-parameters) to support polymorphic APIs based on `void *` parameters
10
- - Add [endian-sensitive integer types](types.md#endian-sensitive-types): `intX_le_t`, `intX_be_t`
16
+ - Add [endian-sensitive integer types](types.md#endian-sensitive-types): `intX_le_t`, `intX_be_t`, `uintX_le_t`, `uintX_be_t`
11
17
  - Accept typed arrays for `void *` parameters
12
18
  - Introduce `koffi.opaque()` to replace `koffi.handle()` (which remains supported until Koffi 3.0)
13
19
  - Support JS Array and TypedArray to fill struct and array pointer members
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koffi",
3
- "version": "2.1.0",
4
- "stable": "2.1.0",
3
+ "version": "2.1.1",
4
+ "stable": "2.1.1",
5
5
  "description": "Fast and simple C FFI (foreign function interface) for Node.js",
6
6
  "keywords": [
7
7
  "foreign",
package/src/call.cc CHANGED
@@ -70,7 +70,7 @@ bool CallData::PushString(Napi::Value value, const char **out_str)
70
70
  status = napi_get_value_string_utf8(env, value, nullptr, 0, &len);
71
71
  RG_ASSERT(status == napi_ok);
72
72
 
73
- buf = AllocateMemory<char>(&call_alloc, (Size)len + 1);
73
+ buf = AllocateSpan<char>(&call_alloc, (Size)len + 1);
74
74
 
75
75
  status = napi_get_value_string_utf8(env, value, buf.ptr, (size_t)buf.len, &len);
76
76
  RG_ASSERT(status == napi_ok);
@@ -109,7 +109,7 @@ bool CallData::PushString16(Napi::Value value, const char16_t **out_str16)
109
109
  status = napi_get_value_string_utf16(env, value, nullptr, 0, &len);
110
110
  RG_ASSERT(status == napi_ok);
111
111
 
112
- buf = AllocateMemory<char16_t>(&call_alloc, ((Size)len + 1) * 2);
112
+ buf = AllocateSpan<char16_t>(&call_alloc, ((Size)len + 1) * 2);
113
113
 
114
114
  status = napi_get_value_string_utf16(env, value, buf.ptr, (size_t)buf.len, &len);
115
115
  RG_ASSERT(status == napi_ok);
package/src/call.hh CHANGED
@@ -161,7 +161,7 @@ inline T *CallData::AllocHeap(Size size, Size align)
161
161
  int flags = 0;
162
162
  #endif
163
163
 
164
- ptr = AllocateMemory<uint8_t>(&call_alloc, size + align, flags).ptr;
164
+ ptr = (uint8_t *)AllocateRaw(&call_alloc, size + align, flags);
165
165
  ptr = AlignUp(ptr, align);
166
166
 
167
167
  return ptr;
@@ -148,13 +148,13 @@ protected:
148
148
  return ptr;
149
149
  }
150
150
 
151
- void Resize(void **ptr, Size old_size, Size new_size, unsigned int flags) override
151
+ void *Resize(void *ptr, Size old_size, Size new_size, unsigned int flags) override
152
152
  {
153
153
  if (!new_size) {
154
- Release(*ptr, old_size);
155
- *ptr = nullptr;
154
+ Release(ptr, old_size);
155
+ ptr = nullptr;
156
156
  } else {
157
- void *new_ptr = realloc(*ptr, (size_t)new_size);
157
+ void *new_ptr = realloc(ptr, (size_t)new_size);
158
158
  RG_CRITICAL(new_ptr || !new_size, "Failed to resize %1 memory block to %2",
159
159
  FmtMemSize(old_size), FmtMemSize(new_size));
160
160
 
@@ -162,8 +162,10 @@ protected:
162
162
  memset_safe((uint8_t *)new_ptr + old_size, 0, (size_t)(new_size - old_size));
163
163
  }
164
164
 
165
- *ptr = new_ptr;
165
+ ptr = new_ptr;
166
166
  }
167
+
168
+ return ptr;
167
169
  }
168
170
 
169
171
  void Release(void *ptr, Size) override
@@ -175,7 +177,7 @@ protected:
175
177
  class NullAllocator: public Allocator {
176
178
  protected:
177
179
  void *Allocate(Size, unsigned int) override { RG_UNREACHABLE(); }
178
- void Resize(void **, Size, Size, unsigned int) override { RG_UNREACHABLE(); }
180
+ void *Resize(void *, Size, Size, unsigned int) override { RG_UNREACHABLE(); }
179
181
  void Release(void *, Size) override {}
180
182
  };
181
183
 
@@ -205,7 +207,7 @@ void LinkedAllocator::ReleaseAll()
205
207
  Node *head = list.next;
206
208
  while (head) {
207
209
  Node *next = head->next;
208
- ReleaseMemory(allocator, head, -1);
210
+ ReleaseRaw(allocator, head, -1);
209
211
  head = next;
210
212
  }
211
213
  list = {};
@@ -213,7 +215,7 @@ void LinkedAllocator::ReleaseAll()
213
215
 
214
216
  void *LinkedAllocator::Allocate(Size size, unsigned int flags)
215
217
  {
216
- Bucket *bucket = AllocateMemory<Bucket>(allocator, RG_SIZE(*bucket) + size, flags).ptr;
218
+ Bucket *bucket = (Bucket *)AllocateRaw(allocator, RG_SIZE(Node) + size, flags);
217
219
 
218
220
  if (list.prev) {
219
221
  list.prev->next = &bucket->head;
@@ -227,26 +229,21 @@ void *LinkedAllocator::Allocate(Size size, unsigned int flags)
227
229
  bucket->head.next = nullptr;
228
230
  }
229
231
 
230
- uint8_t *data = bucket->data;
231
- uint8_t *aligned = AlignUp(data, 16);
232
-
233
- RG_ASSERT(aligned - data <= 8);
234
-
235
- return (void *)aligned;
232
+ return (void *)bucket->data;
236
233
  }
237
234
 
238
- void LinkedAllocator::Resize(void **ptr, Size old_size, Size new_size, unsigned int flags)
235
+ void *LinkedAllocator::Resize(void *ptr, Size old_size, Size new_size, unsigned int flags)
239
236
  {
240
- if (!*ptr) {
241
- *ptr = Allocate(new_size, flags);
237
+ if (!ptr) {
238
+ ptr = Allocate(new_size, flags);
242
239
  } else if (!new_size) {
243
- Release(*ptr, old_size);
244
- *ptr = nullptr;
240
+ Release(ptr, old_size);
241
+ ptr = nullptr;
245
242
  } else {
246
- Bucket *bucket = PointerToBucket(*ptr);
243
+ Bucket *bucket = PointerToBucket(ptr);
247
244
 
248
- bucket = ResizeMemory(allocator, bucket, RG_SIZE(*bucket) + old_size,
249
- RG_SIZE(*bucket) + new_size, flags).ptr;
245
+ bucket = (Bucket *)ResizeRaw(allocator, bucket, RG_SIZE(Node) + old_size,
246
+ RG_SIZE(Node) + new_size, flags);
250
247
 
251
248
  if (bucket->head.next) {
252
249
  bucket->head.next->prev = &bucket->head;
@@ -259,13 +256,10 @@ void LinkedAllocator::Resize(void **ptr, Size old_size, Size new_size, unsigned
259
256
  list.next = &bucket->head;
260
257
  }
261
258
 
262
- uint8_t *data = bucket->data;
263
- uint8_t *aligned = AlignUp(data, 16);
264
-
265
- RG_ASSERT(aligned - data <= 8);
266
-
267
- *ptr = (void *)aligned;
259
+ ptr = (void *)bucket->data;
268
260
  }
261
+
262
+ return ptr;
269
263
  }
270
264
 
271
265
  void LinkedAllocator::Release(void *ptr, Size size)
@@ -284,10 +278,16 @@ void LinkedAllocator::Release(void *ptr, Size size)
284
278
  list.next = bucket->head.next;
285
279
  }
286
280
 
287
- ReleaseMemory(allocator, bucket, size);
281
+ ReleaseRaw(allocator, bucket, RG_SIZE(Bucket) + size);
288
282
  }
289
283
  }
290
284
 
285
+ LinkedAllocator::Bucket *LinkedAllocator::PointerToBucket(void *ptr)
286
+ {
287
+ uint8_t *data = (uint8_t *)ptr;
288
+ return (Bucket *)(data - RG_OFFSET_OF(Bucket, data));
289
+ }
290
+
291
291
  void *BlockAllocatorBase::Allocate(Size size, unsigned int flags)
292
292
  {
293
293
  RG_ASSERT(size >= 0);
@@ -298,15 +298,13 @@ void *BlockAllocatorBase::Allocate(Size size, unsigned int flags)
298
298
  Size aligned_size = AlignLen(size, 8);
299
299
 
300
300
  if (AllocateSeparately(aligned_size)) {
301
- uint8_t *ptr = AllocateMemory<uint8_t>(alloc, size, flags).ptr;
301
+ uint8_t *ptr = (uint8_t *)AllocateRaw(alloc, size, flags);
302
302
  return ptr;
303
303
  } else {
304
304
  if (!current_bucket || (current_bucket->used + aligned_size) > block_size) {
305
- current_bucket = AllocateMemory<Bucket>(alloc, RG_SIZE(Bucket) + block_size,
306
- flags & ~(int)Allocator::Flag::Zero).ptr;
307
- current_bucket->used = AlignUp(current_bucket->data, 16) - current_bucket->data;
308
-
309
- RG_ASSERT(current_bucket->used <= 8);
305
+ current_bucket = (Bucket *)AllocateRaw(alloc, RG_SIZE(Bucket) + block_size,
306
+ flags & ~(int)Allocator::Flag::Zero);
307
+ current_bucket->used = 0;
310
308
  }
311
309
 
312
310
  uint8_t *ptr = current_bucket->data + current_bucket->used;
@@ -321,15 +319,16 @@ void *BlockAllocatorBase::Allocate(Size size, unsigned int flags)
321
319
  }
322
320
  }
323
321
 
324
- void BlockAllocatorBase::Resize(void **ptr, Size old_size, Size new_size, unsigned int flags)
322
+ void *BlockAllocatorBase::Resize(void *ptr, Size old_size, Size new_size, unsigned int flags)
325
323
  {
326
324
  RG_ASSERT(old_size >= 0);
327
325
  RG_ASSERT(new_size >= 0);
328
326
 
329
327
  if (!new_size) {
330
- Release(*ptr, old_size);
328
+ Release(ptr, old_size);
329
+ ptr = nullptr;
331
330
  } else {
332
- if (!*ptr) {
331
+ if (!ptr) {
333
332
  old_size = 0;
334
333
  }
335
334
 
@@ -338,32 +337,34 @@ void BlockAllocatorBase::Resize(void **ptr, Size old_size, Size new_size, unsign
338
337
  Size aligned_delta = aligned_new_size - aligned_old_size;
339
338
 
340
339
  // Try fast path
341
- if (*ptr && *ptr == last_alloc &&
340
+ if (ptr && ptr == last_alloc &&
342
341
  (current_bucket->used + aligned_delta) <= block_size &&
343
342
  !AllocateSeparately(aligned_new_size)) {
344
343
  current_bucket->used += aligned_delta;
345
344
 
346
345
  if ((flags & (int)Allocator::Flag::Zero) && new_size > old_size) {
347
- memset_safe(ptr + old_size, 0, new_size - old_size);
346
+ memset_safe((uint8_t *)ptr + old_size, 0, new_size - old_size);
348
347
  }
349
348
  } else if (AllocateSeparately(aligned_old_size)) {
350
349
  LinkedAllocator *alloc = GetAllocator();
351
- ptr = ResizeMemory(alloc, ptr, old_size, new_size, flags).ptr;
350
+ ptr = ResizeRaw(alloc, ptr, old_size, new_size, flags);
352
351
  } else {
353
352
  void *new_ptr = Allocate(new_size, flags & ~(int)Allocator::Flag::Zero);
354
353
  if (new_size > old_size) {
355
- memcpy_safe(new_ptr, *ptr, old_size);
354
+ memcpy_safe(new_ptr, ptr, old_size);
356
355
 
357
356
  if (flags & (int)Allocator::Flag::Zero) {
358
- memset_safe(ptr + old_size, 0, new_size - old_size);
357
+ memset_safe((uint8_t *)ptr + old_size, 0, new_size - old_size);
359
358
  }
360
359
  } else {
361
- memcpy_safe(new_ptr, *ptr, new_size);
360
+ memcpy_safe(new_ptr, ptr, new_size);
362
361
  }
363
362
 
364
- *ptr = new_ptr;
363
+ ptr = new_ptr;
365
364
  }
366
365
  }
366
+
367
+ return ptr;
367
368
  }
368
369
 
369
370
  void BlockAllocatorBase::Release(void *ptr, Size size)
@@ -377,13 +378,15 @@ void BlockAllocatorBase::Release(void *ptr, Size size)
377
378
 
378
379
  if (ptr == last_alloc) {
379
380
  current_bucket->used -= aligned_size;
381
+
380
382
  if (!current_bucket->used) {
381
- ReleaseMemory(alloc, current_bucket, RG_SIZE(Bucket) + block_size);
383
+ ReleaseRaw(alloc, current_bucket, RG_SIZE(Bucket) + block_size);
382
384
  current_bucket = nullptr;
383
385
  }
386
+
384
387
  last_alloc = nullptr;
385
388
  } else if (AllocateSeparately(aligned_size)) {
386
- ReleaseMemory(alloc, ptr, size);
389
+ ReleaseRaw(alloc, ptr, size);
387
390
  }
388
391
  }
389
392
  }
@@ -762,7 +765,7 @@ bool CopyString(Span<const char> str, Span<char> buf)
762
765
 
763
766
  Span<char> DuplicateString(Span<const char> str, Allocator *alloc)
764
767
  {
765
- char *new_str = AllocateMemory<char>(alloc, str.len + 1).ptr;
768
+ char *new_str = (char *)AllocateRaw(alloc, str.len + 1);
766
769
  memcpy_safe(new_str, str.ptr, (size_t)str.len);
767
770
  new_str[str.len] = 0;
768
771
  return MakeSpan(new_str, str.len);
@@ -2771,7 +2774,7 @@ const char *GetApplicationExecutable()
2771
2774
  RG_ASSERT(strlen(path) < RG_SIZE(executable_path));
2772
2775
 
2773
2776
  CopyString(path, executable_path);
2774
- ReleaseMemory(nullptr, (void *)path, -1);
2777
+ ReleaseRaw(nullptr, (void *)path, -1);
2775
2778
  }
2776
2779
  }
2777
2780
 
@@ -3560,8 +3563,8 @@ bool ExecuteCommandLine(const char *cmd_line, FunctionRef<Span<const uint8_t>()>
3560
3563
  STARTUPINFOW si = {};
3561
3564
 
3562
3565
  // Convert command line
3563
- Span<wchar_t> cmd_line_w = AllocateMemory<wchar_t>(nullptr, 4 * strlen(cmd_line) + 2);
3564
- RG_DEFER { ReleaseMemory(nullptr, cmd_line_w.ptr, cmd_line_w.len); };
3566
+ Span<wchar_t> cmd_line_w = AllocateSpan<wchar_t>(nullptr, 2 * strlen(cmd_line) + 1);
3567
+ RG_DEFER { ReleaseSpan(nullptr, cmd_line_w); };
3565
3568
  if (ConvertUtf8ToWin32Wide(cmd_line, cmd_line_w) < 0)
3566
3569
  return false;
3567
3570
 
@@ -5444,7 +5447,7 @@ Fiber::~Fiber()
5444
5447
  fib_run = nullptr;
5445
5448
  }
5446
5449
 
5447
- ReleaseMemory(nullptr, ucp.uc_stack.ss_sp, (Size)ucp.uc_stack.ss_size);
5450
+ ReleaseRaw(nullptr, ucp.uc_stack.ss_sp, (Size)ucp.uc_stack.ss_size);
5448
5451
  }
5449
5452
 
5450
5453
  void Fiber::SwitchTo()
@@ -5726,7 +5729,7 @@ bool StreamReader::Close(bool implicit)
5726
5729
  case CompressionType::Gzip:
5727
5730
  case CompressionType::Zlib: {
5728
5731
  #ifdef MZ_VERSION
5729
- ReleaseMemory(nullptr, compression.u.miniz, RG_SIZE(*compression.u.miniz));
5732
+ ReleaseOne(nullptr, compression.u.miniz);
5730
5733
  compression.u.miniz = nullptr;
5731
5734
  #else
5732
5735
  RG_UNREACHABLE();
@@ -5742,7 +5745,7 @@ bool StreamReader::Close(bool implicit)
5742
5745
  BrotliDecoderDestroyInstance(ctx->state);
5743
5746
  }
5744
5747
 
5745
- ReleaseMemory(nullptr, ctx, RG_SIZE(*ctx));
5748
+ ReleaseOne(nullptr, ctx);
5746
5749
  compression.u.brotli = nullptr;
5747
5750
  }
5748
5751
  #else
@@ -5975,8 +5978,7 @@ bool StreamReader::InitDecompressor(CompressionType type)
5975
5978
  case CompressionType::Gzip:
5976
5979
  case CompressionType::Zlib: {
5977
5980
  #ifdef MZ_VERSION
5978
- compression.u.miniz =
5979
- AllocateMemory<MinizInflateContext>(nullptr, RG_SIZE(MinizInflateContext), (int)Allocator::Flag::Zero).ptr;
5981
+ compression.u.miniz = AllocateOne<MinizInflateContext>(nullptr, (int)Allocator::Flag::Zero);
5980
5982
  tinfl_init(&compression.u.miniz->inflator);
5981
5983
  compression.u.miniz->crc32 = MZ_CRC32_INIT;
5982
5984
  #else
@@ -5988,8 +5990,7 @@ bool StreamReader::InitDecompressor(CompressionType type)
5988
5990
 
5989
5991
  case CompressionType::Brotli: {
5990
5992
  #ifdef BROTLI_DEFAULT_MODE
5991
- compression.u.brotli =
5992
- AllocateMemory<BrotliDecompressContext>(nullptr, RG_SIZE(BrotliDecompressContext), (int)Allocator::Flag::Zero).ptr;
5993
+ compression.u.brotli = AllocateOne<BrotliDecompressContext>(nullptr, (int)Allocator::Flag::Zero);
5993
5994
  compression.u.brotli->state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr);
5994
5995
  #else
5995
5996
  LogError("Brotli decompression not available for '%1'", filename);
@@ -6527,7 +6528,7 @@ bool StreamWriter::Close(bool implicit)
6527
6528
  MinizDeflateContext *ctx = compression.u.miniz;
6528
6529
 
6529
6530
  RG_DEFER {
6530
- ReleaseMemory(nullptr, ctx, RG_SIZE(*ctx));
6531
+ ReleaseOne(nullptr, ctx);
6531
6532
  compression.u.miniz = nullptr;
6532
6533
  };
6533
6534
 
@@ -6665,8 +6666,7 @@ bool StreamWriter::InitCompressor(CompressionType type, CompressionSpeed speed)
6665
6666
  case CompressionType::Gzip:
6666
6667
  case CompressionType::Zlib: {
6667
6668
  #ifdef MZ_VERSION
6668
- compression.u.miniz =
6669
- AllocateMemory<MinizDeflateContext>(nullptr, RG_SIZE(MinizDeflateContext), (int)Allocator::Flag::Zero).ptr;
6669
+ compression.u.miniz = AllocateOne<MinizDeflateContext>(nullptr, (int)Allocator::Flag::Zero);
6670
6670
  compression.u.miniz->crc32 = MZ_CRC32_INIT;
6671
6671
 
6672
6672
  int flags = 0;
@@ -7033,7 +7033,7 @@ bool ReloadAssets()
7033
7033
  AssetInfo asset_copy;
7034
7034
 
7035
7035
  asset_copy.name = DuplicateString(asset.name, &assets_alloc).ptr;
7036
- asset_copy.data = AllocateMemory<uint8_t>(&assets_alloc, asset.data.len);
7036
+ asset_copy.data = AllocateSpan<uint8_t>(&assets_alloc, asset.data.len);
7037
7037
  memcpy_safe((void *)asset_copy.data.ptr, asset.data.ptr, (size_t)asset.data.len);
7038
7038
  asset_copy.compression_type = asset.compression_type;
7039
7039
  asset_copy.source_map = DuplicateString(asset.source_map, &assets_alloc).ptr;
@@ -837,15 +837,14 @@ public:
837
837
  virtual ~Allocator() = default;
838
838
 
839
839
  virtual void *Allocate(Size size, unsigned int flags = 0) = 0;
840
- virtual void Resize(void **ptr, Size old_size, Size new_size, unsigned int flags = 0) = 0;
840
+ virtual void *Resize(void *ptr, Size old_size, Size new_size, unsigned int flags = 0) = 0;
841
841
  virtual void Release(void *ptr, Size size) = 0;
842
842
  };
843
843
 
844
844
  Allocator *GetDefaultAllocator();
845
845
  Allocator *GetNullAllocator();
846
846
 
847
- template <typename T>
848
- Span<T> AllocateMemory(Allocator *alloc, Size size, unsigned int flags = 0)
847
+ static inline void *AllocateRaw(Allocator *alloc, Size size, unsigned int flags = 0)
849
848
  {
850
849
  RG_ASSERT(size >= 0);
851
850
 
@@ -853,13 +852,40 @@ Span<T> AllocateMemory(Allocator *alloc, Size size, unsigned int flags = 0)
853
852
  alloc = GetDefaultAllocator();
854
853
  }
855
854
 
855
+ void *ptr = alloc->Allocate(size, flags);
856
+ return ptr;
857
+ }
858
+
859
+ template <typename T>
860
+ T *AllocateOne(Allocator *alloc, unsigned int flags = 0)
861
+ {
862
+ if (!alloc) {
863
+ alloc = GetDefaultAllocator();
864
+ }
865
+
866
+ Size size = RG_SIZE(T);
867
+
856
868
  T *ptr = (T *)alloc->Allocate(size, flags);
857
- return MakeSpan(ptr, size);
869
+ return ptr;
858
870
  }
859
871
 
860
872
  template <typename T>
861
- Span<T> ResizeMemory(Allocator *alloc, T *ptr, Size old_size, Size new_size,
862
- unsigned int flags = 0)
873
+ Span<T> AllocateSpan(Allocator *alloc, Size len, unsigned int flags = 0)
874
+ {
875
+ RG_ASSERT(len >= 0);
876
+
877
+ if (!alloc) {
878
+ alloc = GetDefaultAllocator();
879
+ }
880
+
881
+ Size size = len * RG_SIZE(T);
882
+
883
+ T *ptr = (T *)alloc->Allocate(size, flags);
884
+ return MakeSpan(ptr, len);
885
+ }
886
+
887
+ static inline void *ResizeRaw(Allocator *alloc, void *ptr, Size old_size, Size new_size,
888
+ unsigned int flags = 0)
863
889
  {
864
890
  RG_ASSERT(new_size >= 0);
865
891
 
@@ -867,28 +893,56 @@ Span<T> ResizeMemory(Allocator *alloc, T *ptr, Size old_size, Size new_size,
867
893
  alloc = GetDefaultAllocator();
868
894
  }
869
895
 
870
- alloc->Resize((void **)&ptr, old_size, new_size, flags);
871
- return MakeSpan(ptr, new_size);
896
+ ptr = alloc->Resize(ptr, old_size, new_size, flags);
897
+ return ptr;
898
+ }
899
+
900
+ template <typename T>
901
+ Span<T> ResizeSpan(Allocator *alloc, Span<T> mem, Size new_len,
902
+ unsigned int flags = 0)
903
+ {
904
+ RG_ASSERT(new_len >= 0);
905
+
906
+ if (!alloc) {
907
+ alloc = GetDefaultAllocator();
908
+ }
909
+
910
+ Size old_size = mem.len * RG_SIZE(T);
911
+ Size new_size = new_len * RG_SIZE(T);
912
+
913
+ mem.ptr = (T *)alloc->Resize(mem.ptr, old_size, new_size, flags);
914
+ return MakeSpan(mem.ptr, new_len);
915
+ }
916
+
917
+ static inline void ReleaseRaw(Allocator *alloc, void *ptr, Size size)
918
+ {
919
+ if (!alloc) {
920
+ alloc = GetDefaultAllocator();
921
+ }
922
+
923
+ alloc->Release(ptr, size);
872
924
  }
873
925
 
874
926
  template<typename T>
875
- void ReleaseMemory(Allocator *alloc, Span<T> mem)
927
+ void ReleaseOne(Allocator *alloc, T *ptr)
876
928
  {
877
929
  if (!alloc) {
878
930
  alloc = GetDefaultAllocator();
879
931
  }
880
932
 
881
- alloc->Release((void *)mem.ptr, mem.len);
933
+ alloc->Release((void *)ptr, RG_SIZE(T));
882
934
  }
883
935
 
884
936
  template<typename T>
885
- void ReleaseMemory(Allocator *alloc, T *ptr, Size size)
937
+ void ReleaseSpan(Allocator *alloc, Span<T> mem)
886
938
  {
887
939
  if (!alloc) {
888
940
  alloc = GetDefaultAllocator();
889
941
  }
890
942
 
891
- alloc->Release((void *)ptr, size);
943
+ Size size = mem.len * RG_SIZE(T);
944
+
945
+ alloc->Release((void *)mem.ptr, size);
892
946
  }
893
947
 
894
948
  class LinkedAllocator final: public Allocator {
@@ -899,7 +953,7 @@ class LinkedAllocator final: public Allocator {
899
953
  struct Bucket {
900
954
  // Keep head first or stuff will break
901
955
  Node head;
902
- uint8_t data[8]; // Extra size is used to align pointer
956
+ uint8_t data[];
903
957
  };
904
958
 
905
959
  Allocator *allocator;
@@ -917,18 +971,17 @@ public:
917
971
  void ReleaseAll();
918
972
 
919
973
  void *Allocate(Size size, unsigned int flags = 0) override;
920
- void Resize(void **ptr, Size old_size, Size new_size, unsigned int flags = 0) override;
974
+ void *Resize(void *ptr, Size old_size, Size new_size, unsigned int flags = 0) override;
921
975
  void Release(void *ptr, Size size) override;
922
976
 
923
977
  private:
924
- static Bucket *PointerToBucket(void *ptr)
925
- { return (Bucket *)((uint8_t *)ptr - RG_OFFSET_OF(Bucket, data)); }
978
+ static Bucket *PointerToBucket(void *ptr);
926
979
  };
927
980
 
928
981
  class BlockAllocatorBase: public Allocator {
929
982
  struct Bucket {
930
983
  Size used;
931
- uint8_t data[8]; // Extra size is used to align pointer
984
+ uint8_t data[];
932
985
  };
933
986
 
934
987
  Size block_size;
@@ -944,7 +997,7 @@ public:
944
997
  }
945
998
 
946
999
  void *Allocate(Size size, unsigned int flags = 0) override;
947
- void Resize(void **ptr, Size old_size, Size new_size, unsigned int flags = 0) override;
1000
+ void *Resize(void *ptr, Size old_size, Size new_size, unsigned int flags = 0) override;
948
1001
  void Release(void *ptr, Size size) override;
949
1002
 
950
1003
  protected:
@@ -1315,7 +1368,7 @@ public:
1315
1368
  len = new_capacity;
1316
1369
  }
1317
1370
 
1318
- ptr = ResizeMemory(allocator, ptr, capacity * RG_SIZE(T), new_capacity * RG_SIZE(T)).ptr;
1371
+ ptr = (T *)ResizeRaw(allocator, ptr, capacity * RG_SIZE(T), new_capacity * RG_SIZE(T));
1319
1372
  capacity = new_capacity;
1320
1373
  }
1321
1374
  }
@@ -1612,9 +1665,9 @@ public:
1612
1665
  Size bucket_offset = (offset + len) % BucketSize;
1613
1666
 
1614
1667
  if (bucket_idx >= buckets.len) {
1615
- Bucket *new_bucket = AllocateMemory<Bucket>(buckets.allocator, RG_SIZE(Bucket)).ptr;
1668
+ Bucket *new_bucket = AllocateOne<Bucket>(buckets.allocator);
1616
1669
  new (&new_bucket->allocator) AllocatorType();
1617
- new_bucket->values = AllocateMemory<T>(&new_bucket->allocator, BucketSize * RG_SIZE(T)).ptr;
1670
+ new_bucket->values = (T *)AllocateRaw(&new_bucket->allocator, BucketSize * RG_SIZE(T));
1618
1671
 
1619
1672
  buckets.Append(new_bucket);
1620
1673
  }
@@ -1731,7 +1784,7 @@ private:
1731
1784
  void DeleteBucket(Bucket *bucket)
1732
1785
  {
1733
1786
  bucket->allocator.~AllocatorType();
1734
- ReleaseMemory(buckets.allocator, bucket, RG_SIZE(Bucket));
1787
+ ReleaseOne(buckets.allocator, bucket);
1735
1788
  }
1736
1789
  };
1737
1790
 
@@ -2244,10 +2297,10 @@ private:
2244
2297
  Size old_capacity = capacity;
2245
2298
 
2246
2299
  if (new_capacity) {
2247
- used = AllocateMemory<size_t>(allocator,
2248
- (new_capacity + (RG_SIZE(size_t) * 8) - 1) / RG_SIZE(size_t),
2249
- (int)Allocator::Flag::Zero).ptr;
2250
- data = AllocateMemory<ValueType>(allocator, new_capacity * RG_SIZE(ValueType)).ptr;
2300
+ used = (size_t *)AllocateRaw(allocator,
2301
+ (new_capacity + (RG_SIZE(size_t) * 8) - 1) / RG_SIZE(size_t),
2302
+ (int)Allocator::Flag::Zero);
2303
+ data = (ValueType *)AllocateRaw(allocator, new_capacity * RG_SIZE(ValueType));
2251
2304
  for (Size i = 0; i < new_capacity; i++) {
2252
2305
  new (&data[i]) ValueType();
2253
2306
  }
@@ -2269,9 +2322,8 @@ private:
2269
2322
  capacity = 0;
2270
2323
  }
2271
2324
 
2272
- ReleaseMemory(allocator, old_used,
2273
- (old_capacity + (RG_SIZE(size_t) * 8) - 1) / RG_SIZE(size_t));
2274
- ReleaseMemory(allocator, old_data, old_capacity * RG_SIZE(ValueType));
2325
+ ReleaseRaw(allocator, old_used, (old_capacity + (RG_SIZE(size_t) * 8) - 1) / RG_SIZE(size_t));
2326
+ ReleaseRaw(allocator, old_data, old_capacity * RG_SIZE(ValueType));
2275
2327
  }
2276
2328
 
2277
2329
  void MarkUsed(Size idx)