harfbuzzjs 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/build.yml +45 -0
- package/.gitmodules +3 -0
- package/LICENSE +0 -1
- package/README.md +2 -8
- package/build-subset.sh +22 -0
- package/build.sh +17 -57
- package/{subset/config-override.h → config-override-subset.h} +0 -0
- package/config-override.h +3 -3
- package/examples/Mada.abjad.otf +0 -0
- package/{subset/roboto-black.ttf → examples/Roboto-Black.ttf} +0 -0
- package/{subset/test.js → examples/hb-subset.example.node.js} +5 -5
- package/examples/hbjs.example.html +0 -1
- package/examples/hbjs.example.js +3 -2
- package/examples/hbjs.example.node.js +2 -0
- package/examples/nohbjs.html +0 -1
- package/harfbuzz.ts +0 -1
- package/hb-subset.symbols +28 -0
- package/hb-subset.wasm +0 -0
- package/hb.wasm +0 -0
- package/hbjs.cc +256 -0
- package/hbjs.js +26 -4
- package/hbjs.symbols +36 -0
- package/index.js +0 -1
- package/package.json +1 -1
- package/hbjs.c +0 -284
- package/libc/ctype.h +0 -73
- package/libc/emmalloc.cpp +0 -1202
- package/libc/fprintf.c +0 -65
- package/libc/include/assert.h +0 -14
- package/libc/include/cassert +0 -1
- package/libc/include/cfloat +0 -1
- package/libc/include/climits +0 -1
- package/libc/include/cmath +0 -1
- package/libc/include/cstdarg +0 -1
- package/libc/include/cstddef +0 -1
- package/libc/include/cstdio +0 -1
- package/libc/include/cstdlib +0 -1
- package/libc/include/cstring +0 -1
- package/libc/include/emscripten.h +0 -0
- package/libc/include/float.h +0 -6
- package/libc/include/limits.h +0 -2
- package/libc/include/locale.h +0 -0
- package/libc/include/malloc.h +0 -13
- package/libc/include/math.h +0 -0
- package/libc/include/raqm-version.h +0 -5
- package/libc/include/stdarg.h +0 -0
- package/libc/include/stdbool.h +0 -12
- package/libc/include/stddef.h +0 -0
- package/libc/include/stdint.h +0 -1
- package/libc/include/stdio.h +0 -0
- package/libc/include/stdlib.h +0 -110
- package/libc/include/string.h +0 -26
- package/libc/include/sys/types.h +0 -1
- package/libc/include/unistd.h +0 -14
- package/libc/main.c +0 -7
- package/libc/malloc.cc +0 -29
- package/libc/prf.c +0 -667
- package/libc/sprintf.c +0 -112
- package/libc/strtol.c +0 -79
- package/libc/zephyr-string.c +0 -381
- package/subset/build.sh +0 -48
- package/subset/hb-subset.wasm +0 -0
- package/subset/test.cc +0 -50
package/libc/emmalloc.cpp
DELETED
|
@@ -1,1202 +0,0 @@
|
|
|
1
|
-
/*
|
|
2
|
-
* Copyright 2018 The Emscripten Authors. All rights reserved.
|
|
3
|
-
* Emscripten is available under two separate licenses, the MIT license and the
|
|
4
|
-
* University of Illinois/NCSA Open Source License. Both these licenses can be
|
|
5
|
-
* found in the LICENSE file.
|
|
6
|
-
*
|
|
7
|
-
* Simple minimalistic but efficient malloc/free.
|
|
8
|
-
*
|
|
9
|
-
* Assumptions:
|
|
10
|
-
*
|
|
11
|
-
* - Pointers are 32-bit.
|
|
12
|
-
* - Single-threaded.
|
|
13
|
-
* - sbrk() is used, and nothing else.
|
|
14
|
-
* - sbrk() will not be accessed by anyone else.
|
|
15
|
-
* - sbrk() is very fast in most cases (internal wasm call).
|
|
16
|
-
*
|
|
17
|
-
* Invariants:
|
|
18
|
-
*
|
|
19
|
-
* - Metadata is 8 bytes, allocation payload is a
|
|
20
|
-
* multiple of 8 bytes.
|
|
21
|
-
* - All regions of memory are adjacent.
|
|
22
|
-
* - Due to the above, after initial alignment fixing, all
|
|
23
|
-
* regions are aligned.
|
|
24
|
-
* - A region is either in use (used payload > 0) or not.
|
|
25
|
-
* Used regions may be adjacent, and a used and unused region
|
|
26
|
-
* may be adjacent, but not two unused ones - they would be
|
|
27
|
-
* merged.
|
|
28
|
-
* - A used region always has minimal space at the end - we
|
|
29
|
-
* split off extra space when possible immediately.
|
|
30
|
-
*
|
|
31
|
-
* Debugging:
|
|
32
|
-
*
|
|
33
|
-
* - If not NDEBUG, runtime assert()s are in use.
|
|
34
|
-
* - If EMMALLOC_DEBUG is defined, a large amount of extra checks are done.
|
|
35
|
-
* - If EMMALLOC_DEBUG_LOG is defined, a lot of operations are logged
|
|
36
|
-
* out, in addition to EMMALLOC_DEBUG.
|
|
37
|
-
* - Debugging and logging uses EM_ASM, not printf etc., to minimize any
|
|
38
|
-
* risk of debugging or logging depending on malloc.
|
|
39
|
-
*
|
|
40
|
-
* TODO
|
|
41
|
-
*
|
|
42
|
-
* - Optimizations for small allocations that are not multiples of 8, like
|
|
43
|
-
* 12 and 20 (which take 24 and 32 bytes respectively)
|
|
44
|
-
*
|
|
45
|
-
*/
|
|
46
|
-
|
|
47
|
-
#include <assert.h>
|
|
48
|
-
#include <emscripten.h>
|
|
49
|
-
#include <limits.h> // CHAR_BIT
|
|
50
|
-
#include <malloc.h> // mallinfo
|
|
51
|
-
#include <string.h> // for memcpy, memset
|
|
52
|
-
#include <unistd.h> // for sbrk()
|
|
53
|
-
|
|
54
|
-
#define EMMALLOC_EXPORT __attribute__((__weak__, __visibility__("default")))
|
|
55
|
-
|
|
56
|
-
// Assumptions
|
|
57
|
-
|
|
58
|
-
static_assert(sizeof(void*) == 4, "32-bit system");
|
|
59
|
-
static_assert(sizeof(size_t) == 4, "32-bit system");
|
|
60
|
-
static_assert(sizeof(int) == 4, "32-bit system");
|
|
61
|
-
|
|
62
|
-
#define SIZE_T_BIT (sizeof(size_t) * CHAR_BIT)
|
|
63
|
-
|
|
64
|
-
static_assert(CHAR_BIT == 8, "standard char bit size");
|
|
65
|
-
static_assert(SIZE_T_BIT == 32, "standard size_t bit size");
|
|
66
|
-
|
|
67
|
-
// Debugging
|
|
68
|
-
|
|
69
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
70
|
-
#ifndef EMMALLOC_DEBUG
|
|
71
|
-
#define EMMALLOC_DEBUG
|
|
72
|
-
#endif
|
|
73
|
-
#endif
|
|
74
|
-
|
|
75
|
-
#ifdef EMMALLOC_DEBUG
|
|
76
|
-
// Forward declaration for convenience.
|
|
77
|
-
static void emmalloc_validate_all();
|
|
78
|
-
#endif
|
|
79
|
-
#ifdef EMMALLOC_DEBUG
|
|
80
|
-
// Forward declaration for convenience.
|
|
81
|
-
static void emmalloc_dump_all();
|
|
82
|
-
#endif
|
|
83
|
-
|
|
84
|
-
// Math utilities
|
|
85
|
-
|
|
86
|
-
static bool isPowerOf2(size_t x) { return __builtin_popcount(x) == 1; }
|
|
87
|
-
|
|
88
|
-
static size_t lowerBoundPowerOf2(size_t x) {
|
|
89
|
-
if (x == 0)
|
|
90
|
-
return 1;
|
|
91
|
-
// e.g. 5 is 0..0101, so clz is 29, and we want
|
|
92
|
-
// 4 which is 1 << 2, so the result should be 2
|
|
93
|
-
return SIZE_T_BIT - 1 - __builtin_clz(x);
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
// Constants
|
|
97
|
-
|
|
98
|
-
// All allocations are aligned to this value.
|
|
99
|
-
static const size_t ALIGNMENT = 8;
|
|
100
|
-
|
|
101
|
-
// Even allocating 1 byte incurs this much actual payload
|
|
102
|
-
// allocation. This is our minimum bin size.
|
|
103
|
-
static const size_t ALLOC_UNIT = ALIGNMENT;
|
|
104
|
-
|
|
105
|
-
// How big the metadata is in each region. It is convenient
|
|
106
|
-
// that this is identical to the above values.
|
|
107
|
-
static const size_t METADATA_SIZE = ALLOC_UNIT;
|
|
108
|
-
|
|
109
|
-
// How big a minimal region is.
|
|
110
|
-
static const size_t MIN_REGION_SIZE = METADATA_SIZE + ALLOC_UNIT;
|
|
111
|
-
|
|
112
|
-
static_assert(ALLOC_UNIT == ALIGNMENT, "expected size of allocation unit");
|
|
113
|
-
static_assert(METADATA_SIZE == ALIGNMENT, "expected size of metadata");
|
|
114
|
-
|
|
115
|
-
// Constant utilities
|
|
116
|
-
|
|
117
|
-
// Align a pointer, increasing it upwards as necessary
|
|
118
|
-
static size_t alignUp(size_t ptr) { return (ptr + ALIGNMENT - 1) & -ALIGNMENT; }
|
|
119
|
-
|
|
120
|
-
static void* alignUpPointer(void* ptr) { return (void*)alignUp(size_t(ptr)); }
|
|
121
|
-
|
|
122
|
-
//
|
|
123
|
-
// Data structures
|
|
124
|
-
//
|
|
125
|
-
|
|
126
|
-
struct Region;
|
|
127
|
-
|
|
128
|
-
// Information memory that is a free list, i.e., may
|
|
129
|
-
// be reused.
|
|
130
|
-
// Note how this can fit instead of the payload (as
|
|
131
|
-
// the payload is a multiple of MIN_ALLOC).
|
|
132
|
-
struct FreeInfo {
|
|
133
|
-
// free lists are doubly-linked lists
|
|
134
|
-
FreeInfo* _prev;
|
|
135
|
-
FreeInfo* _next;
|
|
136
|
-
|
|
137
|
-
FreeInfo*& prev() { return _prev; }
|
|
138
|
-
FreeInfo*& next() { return _next; }
|
|
139
|
-
};
|
|
140
|
-
|
|
141
|
-
static_assert(sizeof(FreeInfo) == ALLOC_UNIT, "expected size of free info");
|
|
142
|
-
|
|
143
|
-
// The first region of memory.
|
|
144
|
-
static Region* firstRegion = nullptr;
|
|
145
|
-
|
|
146
|
-
// The last region of memory. It's important to know the end
|
|
147
|
-
// since we may append to it.
|
|
148
|
-
static Region* lastRegion = nullptr;
|
|
149
|
-
|
|
150
|
-
// A contiguous region of memory. Metadata at the beginning describes it,
|
|
151
|
-
// after which is the "payload", the sections that user code calling
|
|
152
|
-
// malloc can use.
|
|
153
|
-
struct Region {
|
|
154
|
-
// Whether this region is in use or not.
|
|
155
|
-
size_t _used : 1;
|
|
156
|
-
|
|
157
|
-
// The total size of the section of memory this is associated
|
|
158
|
-
// with and contained in.
|
|
159
|
-
// That includes the metadata itself and the payload memory after,
|
|
160
|
-
// which includes the used and unused portions of it.
|
|
161
|
-
// FIXME: Shift by 1, as our size is even anyhow?
|
|
162
|
-
// Or, disallow allocation of half the total space or above.
|
|
163
|
-
// Browsers barely allow allocating 2^31 anyhow, so inside that
|
|
164
|
-
// space we can just allocate something smaller than it.
|
|
165
|
-
size_t _totalSize : 31;
|
|
166
|
-
|
|
167
|
-
// Each memory area knows its previous neighbor, as we hope to merge them.
|
|
168
|
-
// To compute the next neighbor we can use the total size, and to know
|
|
169
|
-
// if a neighbor exists we can compare the region to lastRegion
|
|
170
|
-
Region* _prev;
|
|
171
|
-
|
|
172
|
-
// Up to here was the fixed metadata, of size 16. The rest is either
|
|
173
|
-
// the payload, or freelist info.
|
|
174
|
-
union {
|
|
175
|
-
FreeInfo _freeInfo;
|
|
176
|
-
char _payload[];
|
|
177
|
-
};
|
|
178
|
-
|
|
179
|
-
size_t getTotalSize() { return _totalSize; }
|
|
180
|
-
void setTotalSize(size_t x) { _totalSize = x; }
|
|
181
|
-
void incTotalSize(size_t x) { _totalSize += x; }
|
|
182
|
-
void decTotalSize(size_t x) { _totalSize -= x; }
|
|
183
|
-
|
|
184
|
-
size_t getUsed() { return _used; }
|
|
185
|
-
void setUsed(size_t x) { _used = x; }
|
|
186
|
-
|
|
187
|
-
Region*& prev() { return _prev; }
|
|
188
|
-
// The next region is not, as we compute it on the fly
|
|
189
|
-
Region* next() {
|
|
190
|
-
if (this != lastRegion) {
|
|
191
|
-
return (Region*)((char*)this + getTotalSize());
|
|
192
|
-
} else {
|
|
193
|
-
return nullptr;
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
FreeInfo& freeInfo() { return _freeInfo; }
|
|
197
|
-
// The payload is special, we just return its address, as we
|
|
198
|
-
// never want to modify it ourselves.
|
|
199
|
-
char* payload() { return &_payload[0]; }
|
|
200
|
-
};
|
|
201
|
-
|
|
202
|
-
// Region utilities
|
|
203
|
-
|
|
204
|
-
static void* getPayload(Region* region) {
|
|
205
|
-
assert(((char*)®ion->freeInfo()) - ((char*)region) == METADATA_SIZE);
|
|
206
|
-
assert(region->getUsed());
|
|
207
|
-
return region->payload();
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
static Region* fromPayload(void* payload) { return (Region*)((char*)payload - METADATA_SIZE); }
|
|
211
|
-
|
|
212
|
-
static Region* fromFreeInfo(FreeInfo* freeInfo) {
|
|
213
|
-
return (Region*)((char*)freeInfo - METADATA_SIZE);
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
static size_t getMaxPayload(Region* region) { return region->getTotalSize() - METADATA_SIZE; }
|
|
217
|
-
|
|
218
|
-
// TODO: move into class, make more similar to next()
|
|
219
|
-
static void* getAfter(Region* region) { return ((char*)region) + region->getTotalSize(); }
|
|
220
|
-
|
|
221
|
-
// Globals
|
|
222
|
-
|
|
223
|
-
// TODO: For now we have a single global space for all allocations,
|
|
224
|
-
// but for multithreading etc. we may want to generalize that.
|
|
225
|
-
|
|
226
|
-
// A freelist (a list of Regions ready for re-use) for all
|
|
227
|
-
// power of 2 payload sizes (only the ones from ALIGNMENT
|
|
228
|
-
// size and above are relevant, though). The freelist at index
|
|
229
|
-
// K contains regions of memory big enough to contain at least
|
|
230
|
-
// 2^K bytes.
|
|
231
|
-
//
|
|
232
|
-
// Note that there is no freelist for 2^32, as that amount can
|
|
233
|
-
// never be allocated.
|
|
234
|
-
|
|
235
|
-
static const size_t MIN_FREELIST_INDEX = 3; // 8 == ALLOC_UNIT
|
|
236
|
-
static const size_t MAX_FREELIST_INDEX = 32; // uint32_t
|
|
237
|
-
|
|
238
|
-
static FreeInfo* freeLists[MAX_FREELIST_INDEX] = {nullptr, nullptr, nullptr, nullptr, nullptr,
|
|
239
|
-
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
|
|
240
|
-
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
|
|
241
|
-
nullptr, nullptr, nullptr, nullptr, nullptr};
|
|
242
|
-
|
|
243
|
-
// Global utilities
|
|
244
|
-
|
|
245
|
-
// The freelist index is where we would appear in a freelist if
|
|
246
|
-
// we were one. It is a list of items of size at least the power
|
|
247
|
-
// of 2 that lower bounds us.
|
|
248
|
-
static size_t getFreeListIndex(size_t size) {
|
|
249
|
-
assert(1 << MIN_FREELIST_INDEX == ALLOC_UNIT);
|
|
250
|
-
assert(size > 0);
|
|
251
|
-
if (size < ALLOC_UNIT)
|
|
252
|
-
size = ALLOC_UNIT;
|
|
253
|
-
// We need a lower bound here, as the list contains things
|
|
254
|
-
// that can contain at least a power of 2.
|
|
255
|
-
size_t index = lowerBoundPowerOf2(size);
|
|
256
|
-
assert(MIN_FREELIST_INDEX <= index && index < MAX_FREELIST_INDEX);
|
|
257
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
258
|
-
EM_ASM({out(" emmalloc.getFreeListIndex " + [ $0, $1 ])}, size, index);
|
|
259
|
-
#endif
|
|
260
|
-
return index;
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
// The big-enough freelist index is the index of the freelist of
|
|
264
|
-
// items that are all big enough for us. This is computed using
|
|
265
|
-
// an upper bound power of 2.
|
|
266
|
-
static size_t getBigEnoughFreeListIndex(size_t size) {
|
|
267
|
-
assert(size > 0);
|
|
268
|
-
size_t index = getFreeListIndex(size);
|
|
269
|
-
// If we're a power of 2, the lower and upper bounds are the
|
|
270
|
-
// same. Otherwise, add one.
|
|
271
|
-
if (!isPowerOf2(size))
|
|
272
|
-
index++;
|
|
273
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
274
|
-
EM_ASM({out(" emmalloc.getBigEnoughFreeListIndex " + [ $0, $1 ])}, size, index);
|
|
275
|
-
#endif
|
|
276
|
-
return index;
|
|
277
|
-
}
|
|
278
|
-
|
|
279
|
-
// Items in the freelist at this index must be at least this large.
|
|
280
|
-
static size_t getMinSizeForFreeListIndex(size_t index) { return 1 << index; }
|
|
281
|
-
|
|
282
|
-
// Items in the freelist at this index must be smaller than this.
|
|
283
|
-
static size_t getMaxSizeForFreeListIndex(size_t index) { return 1 << (index + 1); }
|
|
284
|
-
|
|
285
|
-
static void removeFromFreeList(Region* region) {
|
|
286
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
287
|
-
EM_ASM({out(" emmalloc.removeFromFreeList " + $0)}, region);
|
|
288
|
-
#endif
|
|
289
|
-
size_t index = getFreeListIndex(getMaxPayload(region));
|
|
290
|
-
FreeInfo* freeInfo = ®ion->freeInfo();
|
|
291
|
-
if (freeLists[index] == freeInfo) {
|
|
292
|
-
freeLists[index] = freeInfo->next();
|
|
293
|
-
}
|
|
294
|
-
if (freeInfo->prev()) {
|
|
295
|
-
freeInfo->prev()->next() = freeInfo->next();
|
|
296
|
-
}
|
|
297
|
-
if (freeInfo->next()) {
|
|
298
|
-
freeInfo->next()->prev() = freeInfo->prev();
|
|
299
|
-
}
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
static void addToFreeList(Region* region) {
|
|
303
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
304
|
-
EM_ASM({out(" emmalloc.addToFreeList " + $0)}, region);
|
|
305
|
-
#endif
|
|
306
|
-
assert(getAfter(region) <= sbrk(0));
|
|
307
|
-
size_t index = getFreeListIndex(getMaxPayload(region));
|
|
308
|
-
FreeInfo* freeInfo = ®ion->freeInfo();
|
|
309
|
-
FreeInfo* last = freeLists[index];
|
|
310
|
-
freeLists[index] = freeInfo;
|
|
311
|
-
freeInfo->prev() = nullptr;
|
|
312
|
-
freeInfo->next() = last;
|
|
313
|
-
if (last) {
|
|
314
|
-
last->prev() = freeInfo;
|
|
315
|
-
}
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
// Receives a region that has just become free (and is not yet in a freelist).
|
|
319
|
-
// Tries to merge it into a region before or after it to which it is adjacent.
|
|
320
|
-
static int mergeIntoExistingFreeRegion(Region* region) {
|
|
321
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
322
|
-
EM_ASM({out(" emmalloc.mergeIntoExistingFreeRegion " + $0)}, region);
|
|
323
|
-
#endif
|
|
324
|
-
assert(getAfter(region) <= sbrk(0));
|
|
325
|
-
int merged = 0;
|
|
326
|
-
Region* prev = region->prev();
|
|
327
|
-
Region* next = region->next();
|
|
328
|
-
if (prev && !prev->getUsed()) {
|
|
329
|
-
// Merge them.
|
|
330
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
331
|
-
EM_ASM({out(" emmalloc.mergeIntoExistingFreeRegion merge into prev " + $0)}, prev);
|
|
332
|
-
#endif
|
|
333
|
-
removeFromFreeList(prev);
|
|
334
|
-
prev->incTotalSize(region->getTotalSize());
|
|
335
|
-
if (next) {
|
|
336
|
-
next->prev() = prev; // was: region
|
|
337
|
-
} else {
|
|
338
|
-
assert(region == lastRegion);
|
|
339
|
-
lastRegion = prev;
|
|
340
|
-
}
|
|
341
|
-
if (next) {
|
|
342
|
-
// We may also be able to merge with the next, keep trying.
|
|
343
|
-
if (!next->getUsed()) {
|
|
344
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
345
|
-
EM_ASM({out(" emmalloc.mergeIntoExistingFreeRegion also merge into next " + $0)}, next);
|
|
346
|
-
#endif
|
|
347
|
-
removeFromFreeList(next);
|
|
348
|
-
prev->incTotalSize(next->getTotalSize());
|
|
349
|
-
if (next != lastRegion) {
|
|
350
|
-
next->next()->prev() = prev;
|
|
351
|
-
} else {
|
|
352
|
-
lastRegion = prev;
|
|
353
|
-
}
|
|
354
|
-
}
|
|
355
|
-
}
|
|
356
|
-
addToFreeList(prev);
|
|
357
|
-
return 1;
|
|
358
|
-
}
|
|
359
|
-
if (next && !next->getUsed()) {
|
|
360
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
361
|
-
EM_ASM({out(" emmalloc.mergeIntoExistingFreeRegion merge into next " + $0)}, next);
|
|
362
|
-
#endif
|
|
363
|
-
// Merge them.
|
|
364
|
-
removeFromFreeList(next);
|
|
365
|
-
region->incTotalSize(next->getTotalSize());
|
|
366
|
-
if (next != lastRegion) {
|
|
367
|
-
next->next()->prev() = region;
|
|
368
|
-
} else {
|
|
369
|
-
lastRegion = region;
|
|
370
|
-
}
|
|
371
|
-
addToFreeList(region);
|
|
372
|
-
return 1;
|
|
373
|
-
}
|
|
374
|
-
return 0;
|
|
375
|
-
}
|
|
376
|
-
|
|
377
|
-
static void stopUsing(Region* region) {
|
|
378
|
-
region->setUsed(0);
|
|
379
|
-
if (!mergeIntoExistingFreeRegion(region)) {
|
|
380
|
-
addToFreeList(region);
|
|
381
|
-
}
|
|
382
|
-
}
|
|
383
|
-
|
|
384
|
-
// Grow a region. If not in use, we may need to be in another
|
|
385
|
-
// freelist.
|
|
386
|
-
// TODO: We can calculate that, to save some work.
|
|
387
|
-
static void growRegion(Region* region, size_t sizeDelta) {
|
|
388
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
389
|
-
EM_ASM({out(" emmalloc.growRegion " + [ $0, $1 ])}, region, sizeDelta);
|
|
390
|
-
#endif
|
|
391
|
-
if (!region->getUsed()) {
|
|
392
|
-
removeFromFreeList(region);
|
|
393
|
-
}
|
|
394
|
-
region->incTotalSize(sizeDelta);
|
|
395
|
-
if (!region->getUsed()) {
|
|
396
|
-
addToFreeList(region);
|
|
397
|
-
}
|
|
398
|
-
}
|
|
399
|
-
|
|
400
|
-
// Extends the last region to a certain payload size. Returns 1 if successful,
|
|
401
|
-
// 0 if an error occurred in sbrk().
|
|
402
|
-
static int extendLastRegion(size_t size) {
|
|
403
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
404
|
-
EM_ASM({out(" emmalloc.extendLastRegionToSize " + $0)}, size);
|
|
405
|
-
#endif
|
|
406
|
-
size_t reusable = getMaxPayload(lastRegion);
|
|
407
|
-
size_t sbrkSize = alignUp(size) - reusable;
|
|
408
|
-
void* ptr = sbrk(sbrkSize);
|
|
409
|
-
if (ptr == (void*)-1) {
|
|
410
|
-
// sbrk() failed, we failed.
|
|
411
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
412
|
-
EM_ASM({out(" emmalloc.extendLastRegion sbrk failure")});
|
|
413
|
-
#endif
|
|
414
|
-
return 0;
|
|
415
|
-
}
|
|
416
|
-
// sbrk() should give us new space right after the last region.
|
|
417
|
-
assert(ptr == getAfter(lastRegion));
|
|
418
|
-
// Increment the region's size.
|
|
419
|
-
growRegion(lastRegion, sbrkSize);
|
|
420
|
-
return 1;
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
static void possiblySplitRemainder(Region* region, size_t size) {
|
|
424
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
425
|
-
EM_ASM({out(" emmalloc.possiblySplitRemainder " + [ $0, $1 ])}, region, size);
|
|
426
|
-
#endif
|
|
427
|
-
size_t payloadSize = getMaxPayload(region);
|
|
428
|
-
assert(payloadSize >= size);
|
|
429
|
-
size_t extra = payloadSize - size;
|
|
430
|
-
// Room for a minimal region is definitely worth splitting. Otherwise,
|
|
431
|
-
// if we don't have room for a full region, but we do have an allocation
|
|
432
|
-
// unit's worth, and we are the last region, it's worth allocating some
|
|
433
|
-
// more memory to create a region here. The next allocation can reuse it,
|
|
434
|
-
// which is better than leaving it as unused and unreusable space at the
|
|
435
|
-
// end of this region.
|
|
436
|
-
if (region == lastRegion && extra >= ALLOC_UNIT && extra < MIN_REGION_SIZE) {
|
|
437
|
-
// Yes, this is a small-but-useful amount of memory in the final region,
|
|
438
|
-
// extend it.
|
|
439
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
440
|
-
EM_ASM({out(" emmalloc.possiblySplitRemainder pre-extending")});
|
|
441
|
-
#endif
|
|
442
|
-
if (extendLastRegion(payloadSize + ALLOC_UNIT)) {
|
|
443
|
-
// Success.
|
|
444
|
-
extra += ALLOC_UNIT;
|
|
445
|
-
assert(extra >= MIN_REGION_SIZE);
|
|
446
|
-
} else {
|
|
447
|
-
return;
|
|
448
|
-
}
|
|
449
|
-
}
|
|
450
|
-
if (extra >= MIN_REGION_SIZE) {
|
|
451
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
452
|
-
EM_ASM({out(" emmalloc.possiblySplitRemainder is splitting")});
|
|
453
|
-
#endif
|
|
454
|
-
// Worth it, split the region
|
|
455
|
-
// TODO: Consider not doing it, may affect long-term fragmentation.
|
|
456
|
-
void* after = getAfter(region);
|
|
457
|
-
Region* split = (Region*)alignUpPointer((char*)getPayload(region) + size);
|
|
458
|
-
region->setTotalSize((char*)split - (char*)region);
|
|
459
|
-
size_t totalSplitSize = (char*)after - (char*)split;
|
|
460
|
-
assert(totalSplitSize >= MIN_REGION_SIZE);
|
|
461
|
-
split->setTotalSize(totalSplitSize);
|
|
462
|
-
split->prev() = region;
|
|
463
|
-
if (region != lastRegion) {
|
|
464
|
-
split->next()->prev() = split;
|
|
465
|
-
} else {
|
|
466
|
-
lastRegion = split;
|
|
467
|
-
}
|
|
468
|
-
stopUsing(split);
|
|
469
|
-
}
|
|
470
|
-
}
|
|
471
|
-
|
|
472
|
-
// Sets the used payload of a region, and does other necessary work when
|
|
473
|
-
// starting to use a region, such as splitting off a remainder if there is
|
|
474
|
-
// any.
|
|
475
|
-
static void useRegion(Region* region, size_t size) {
|
|
476
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
477
|
-
EM_ASM({out(" emmalloc.useRegion " + [ $0, $1 ])}, region, size);
|
|
478
|
-
#endif
|
|
479
|
-
assert(size > 0);
|
|
480
|
-
region->setUsed(1);
|
|
481
|
-
// We may not be using all of it, split out a smaller
|
|
482
|
-
// region into a free list if it's large enough.
|
|
483
|
-
possiblySplitRemainder(region, size);
|
|
484
|
-
}
|
|
485
|
-
|
|
486
|
-
static Region* useFreeInfo(FreeInfo* freeInfo, size_t size) {
|
|
487
|
-
Region* region = fromFreeInfo(freeInfo);
|
|
488
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
489
|
-
EM_ASM({out(" emmalloc.useFreeInfo " + [ $0, $1 ])}, region, size);
|
|
490
|
-
#endif
|
|
491
|
-
// This region is no longer free
|
|
492
|
-
removeFromFreeList(region);
|
|
493
|
-
// This region is now in use
|
|
494
|
-
useRegion(region, size);
|
|
495
|
-
return region;
|
|
496
|
-
}
|
|
497
|
-
|
|
498
|
-
// Debugging
|
|
499
|
-
|
|
500
|
-
// Mostly for testing purposes, wipes everything.
|
|
501
|
-
EMMALLOC_EXPORT
|
|
502
|
-
void emmalloc_blank_slate_from_orbit() {
|
|
503
|
-
for (int i = 0; i < MAX_FREELIST_INDEX; i++) {
|
|
504
|
-
freeLists[i] = nullptr;
|
|
505
|
-
}
|
|
506
|
-
firstRegion = nullptr;
|
|
507
|
-
lastRegion = nullptr;
|
|
508
|
-
}
|
|
509
|
-
|
|
510
|
-
#ifdef EMMALLOC_DEBUG
|
|
511
|
-
// For testing purposes, validate a region.
|
|
512
|
-
static void emmalloc_validate_region(Region* region) {
|
|
513
|
-
assert(getAfter(region) <= sbrk(0));
|
|
514
|
-
assert(getMaxPayload(region) < region->getTotalSize());
|
|
515
|
-
if (region->prev()) {
|
|
516
|
-
assert(getAfter(region->prev()) == region);
|
|
517
|
-
assert(region->prev()->next() == region);
|
|
518
|
-
}
|
|
519
|
-
if (region->next()) {
|
|
520
|
-
assert(getAfter(region) == region->next());
|
|
521
|
-
assert(region->next()->prev() == region);
|
|
522
|
-
}
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
// For testing purposes, check that everything is valid.
|
|
526
|
-
static void emmalloc_validate_all() {
|
|
527
|
-
void* end = sbrk(0);
|
|
528
|
-
// Validate regions.
|
|
529
|
-
Region* curr = firstRegion;
|
|
530
|
-
Region* prev = nullptr;
|
|
531
|
-
EM_ASM({ Module.emmallocDebug = {regions : {}}; });
|
|
532
|
-
while (curr) {
|
|
533
|
-
// Note all region, so we can see freelist items are in the main list.
|
|
534
|
-
EM_ASM(
|
|
535
|
-
{
|
|
536
|
-
var region = $0;
|
|
537
|
-
assert(!Module.emmallocDebug.regions[region], "dupe region");
|
|
538
|
-
Module.emmallocDebug.regions[region] = 1;
|
|
539
|
-
},
|
|
540
|
-
curr);
|
|
541
|
-
assert(curr->prev() == prev);
|
|
542
|
-
if (prev) {
|
|
543
|
-
assert(getAfter(prev) == curr);
|
|
544
|
-
// Adjacent free regions must be merged.
|
|
545
|
-
assert(!(!prev->getUsed() && !curr->getUsed()));
|
|
546
|
-
}
|
|
547
|
-
assert(getAfter(curr) <= end);
|
|
548
|
-
prev = curr;
|
|
549
|
-
curr = curr->next();
|
|
550
|
-
}
|
|
551
|
-
if (prev) {
|
|
552
|
-
assert(prev == lastRegion);
|
|
553
|
-
} else {
|
|
554
|
-
assert(!lastRegion);
|
|
555
|
-
}
|
|
556
|
-
if (lastRegion) {
|
|
557
|
-
assert(getAfter(lastRegion) == end);
|
|
558
|
-
}
|
|
559
|
-
// Validate freelists.
|
|
560
|
-
for (int i = 0; i < MAX_FREELIST_INDEX; i++) {
|
|
561
|
-
FreeInfo* curr = freeLists[i];
|
|
562
|
-
if (!curr)
|
|
563
|
-
continue;
|
|
564
|
-
FreeInfo* prev = nullptr;
|
|
565
|
-
while (curr) {
|
|
566
|
-
assert(curr->prev() == prev);
|
|
567
|
-
Region* region = fromFreeInfo(curr);
|
|
568
|
-
// Regions must be in the main list.
|
|
569
|
-
EM_ASM(
|
|
570
|
-
{
|
|
571
|
-
var region = $0;
|
|
572
|
-
assert(Module.emmallocDebug.regions[region], "free region not in list");
|
|
573
|
-
},
|
|
574
|
-
region);
|
|
575
|
-
assert(getAfter(region) <= end);
|
|
576
|
-
assert(!region->getUsed());
|
|
577
|
-
assert(getMaxPayload(region) >= getMinSizeForFreeListIndex(i));
|
|
578
|
-
assert(getMaxPayload(region) < getMaxSizeForFreeListIndex(i));
|
|
579
|
-
prev = curr;
|
|
580
|
-
curr = curr->next();
|
|
581
|
-
}
|
|
582
|
-
}
|
|
583
|
-
// Validate lastRegion.
|
|
584
|
-
if (lastRegion) {
|
|
585
|
-
assert(lastRegion->next() == nullptr);
|
|
586
|
-
assert(getAfter(lastRegion) <= end);
|
|
587
|
-
assert(firstRegion);
|
|
588
|
-
} else {
|
|
589
|
-
assert(!firstRegion);
|
|
590
|
-
}
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
594
|
-
// For testing purposes, dump out a region.
|
|
595
|
-
static void emmalloc_dump_region(Region* region) {
|
|
596
|
-
EM_ASM({out(" [" + $0 + " - " + $1 + " (" + $2 + " bytes" + ($3 ? ", used" : "") + ")]")},
|
|
597
|
-
region, getAfter(region), getMaxPayload(region), region->getUsed());
|
|
598
|
-
}
|
|
599
|
-
|
|
600
|
-
// For testing purposes, dumps out the entire global state.
|
|
601
|
-
static void emmalloc_dump_all() {
|
|
602
|
-
EM_ASM({out(" emmalloc_dump_all:\n sbrk(0) = " + $0)}, sbrk(0));
|
|
603
|
-
Region* curr = firstRegion;
|
|
604
|
-
EM_ASM({out(" all regions:")});
|
|
605
|
-
while (curr) {
|
|
606
|
-
emmalloc_dump_region(curr);
|
|
607
|
-
curr = curr->next();
|
|
608
|
-
}
|
|
609
|
-
for (int i = 0; i < MAX_FREELIST_INDEX; i++) {
|
|
610
|
-
FreeInfo* curr = freeLists[i];
|
|
611
|
-
if (!curr)
|
|
612
|
-
continue;
|
|
613
|
-
EM_ASM({out(" freeList[" + $0 + "] sizes: [" + $1 + ", " + $2 + ")")}, i,
|
|
614
|
-
getMinSizeForFreeListIndex(i), getMaxSizeForFreeListIndex(i));
|
|
615
|
-
FreeInfo* prev = nullptr;
|
|
616
|
-
while (curr) {
|
|
617
|
-
Region* region = fromFreeInfo(curr);
|
|
618
|
-
emmalloc_dump_region(region);
|
|
619
|
-
prev = curr;
|
|
620
|
-
curr = curr->next();
|
|
621
|
-
}
|
|
622
|
-
}
|
|
623
|
-
}
|
|
624
|
-
#endif // EMMALLOC_DEBUG_LOG
|
|
625
|
-
#endif // EMMALLOC_DEBUG
|
|
626
|
-
|
|
627
|
-
// When we free something of size 100, we put it in the
|
|
628
|
-
// freelist for items of size 64 and above. Then when something
|
|
629
|
-
// needs 64 bytes, we know the things in that list are all
|
|
630
|
-
// suitable. However, note that this means that if we then
|
|
631
|
-
// try to allocate something of size 100 once more, we will
|
|
632
|
-
// look in the freelist for items of size 128 or more (again,
|
|
633
|
-
// so we know all items in the list are big enough), which means
|
|
634
|
-
// we may not reuse the perfect region we just freed. It's hard
|
|
635
|
-
// to do a perfect job on that without a lot more work (memory
|
|
636
|
-
// and/or time), so instead, we use a simple heuristic to look
|
|
637
|
-
// at the one-lower freelist, which *may* contain something
|
|
638
|
-
// big enough for us. We look at just a few elements, but that is
|
|
639
|
-
// enough if we are alloating/freeing a lot of such elements
|
|
640
|
-
// (since the recent items are there).
|
|
641
|
-
// TODO: Consider more optimizations, e.g. slow bubbling of larger
|
|
642
|
-
// items in each freelist towards the root, or even actually
|
|
643
|
-
// keep it sorted by size.
|
|
644
|
-
// Consider also what happens to the very largest allocations,
|
|
645
|
-
// 2^32 - a little. That goes in the freelist of items of size
|
|
646
|
-
// 2^31 or less. >2 tries is enough to go through that entire
|
|
647
|
-
// freelist because even 2 can't exist, they'd exhaust memory
|
|
648
|
-
// (together with metadata overhead). So we should be able to
|
|
649
|
-
// free and allocate such largest allocations (barring fragmentation
|
|
650
|
-
// happening in between).
|
|
651
|
-
static const size_t SPECULATIVE_FREELIST_TRIES = 32;
|
|
652
|
-
|
|
653
|
-
static Region* tryFromFreeList(size_t size) {
|
|
654
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
655
|
-
EM_ASM({out(" emmalloc.tryFromFreeList " + $0)}, size);
|
|
656
|
-
#endif
|
|
657
|
-
// Look in the freelist of items big enough for us.
|
|
658
|
-
size_t index = getBigEnoughFreeListIndex(size);
|
|
659
|
-
// If we *may* find an item in the index one
|
|
660
|
-
// below us, try that briefly in constant time;
|
|
661
|
-
// see comment on algorithm on the declaration of
|
|
662
|
-
// SPECULATIVE_FREELIST_TRIES.
|
|
663
|
-
if (index > MIN_FREELIST_INDEX && size < getMinSizeForFreeListIndex(index)) {
|
|
664
|
-
FreeInfo* freeInfo = freeLists[index - 1];
|
|
665
|
-
size_t tries = 0;
|
|
666
|
-
while (freeInfo && tries < SPECULATIVE_FREELIST_TRIES) {
|
|
667
|
-
Region* region = fromFreeInfo(freeInfo);
|
|
668
|
-
if (getMaxPayload(region) >= size) {
|
|
669
|
-
// Success, use it
|
|
670
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
671
|
-
EM_ASM({out(" emmalloc.tryFromFreeList try succeeded")});
|
|
672
|
-
#endif
|
|
673
|
-
return useFreeInfo(freeInfo, size);
|
|
674
|
-
}
|
|
675
|
-
freeInfo = freeInfo->next();
|
|
676
|
-
tries++;
|
|
677
|
-
}
|
|
678
|
-
}
|
|
679
|
-
// Note that index may start out at MAX_FREELIST_INDEX,
|
|
680
|
-
// if it is almost the largest allocation possible,
|
|
681
|
-
// 2^32 minus a little. In that case, looking in the lower
|
|
682
|
-
// freelist is our only hope, and it can contain at most 1
|
|
683
|
-
// element (see discussion above), so we will find it if
|
|
684
|
-
// it's there). If not, and we got here, we'll never enter
|
|
685
|
-
// the loop at all.
|
|
686
|
-
while (index < MAX_FREELIST_INDEX) {
|
|
687
|
-
FreeInfo* freeInfo = freeLists[index];
|
|
688
|
-
if (freeInfo) {
|
|
689
|
-
// We found one, use it.
|
|
690
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
691
|
-
EM_ASM({out(" emmalloc.tryFromFreeList had item to use")});
|
|
692
|
-
#endif
|
|
693
|
-
return useFreeInfo(freeInfo, size);
|
|
694
|
-
}
|
|
695
|
-
// Look in a freelist of larger elements.
|
|
696
|
-
// TODO This does increase the risk of fragmentation, though,
|
|
697
|
-
// and maybe the iteration adds runtime overhead.
|
|
698
|
-
index++;
|
|
699
|
-
}
|
|
700
|
-
// No luck, no free list.
|
|
701
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
702
|
-
EM_ASM({out(" emmalloc.tryFromFreeList no luck")});
|
|
703
|
-
#endif
|
|
704
|
-
return nullptr;
|
|
705
|
-
}
|
|
706
|
-
|
|
707
|
-
// Allocate a completely new region.
|
|
708
|
-
static Region* allocateRegion(size_t size) {
|
|
709
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
710
|
-
EM_ASM({out(" emmalloc.allocateRegion")});
|
|
711
|
-
#endif
|
|
712
|
-
size_t sbrkSize = METADATA_SIZE + alignUp(size);
|
|
713
|
-
void* ptr = sbrk(sbrkSize);
|
|
714
|
-
if (ptr == (void*)-1) {
|
|
715
|
-
// sbrk() failed, we failed.
|
|
716
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
717
|
-
EM_ASM({out(" emmalloc.allocateRegion sbrk failure")});
|
|
718
|
-
#endif
|
|
719
|
-
return nullptr;
|
|
720
|
-
}
|
|
721
|
-
// sbrk() results might not be aligned. We assume single-threaded sbrk()
|
|
722
|
-
// access here in order to fix that up
|
|
723
|
-
void* fixedPtr = alignUpPointer(ptr);
|
|
724
|
-
if (ptr != fixedPtr) {
|
|
725
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
726
|
-
EM_ASM({out(" emmalloc.allocateRegion fixing alignment")});
|
|
727
|
-
#endif
|
|
728
|
-
size_t extra = (char*)fixedPtr - (char*)ptr;
|
|
729
|
-
void* extraPtr = sbrk(extra);
|
|
730
|
-
if (extraPtr == (void*)-1) {
|
|
731
|
-
// sbrk() failed, we failed.
|
|
732
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
733
|
-
EM_ASM({out(" emmalloc.newAllocation sbrk failure")});
|
|
734
|
-
;
|
|
735
|
-
#endif
|
|
736
|
-
return nullptr;
|
|
737
|
-
}
|
|
738
|
-
// Verify the sbrk() assumption, no one else should call it.
|
|
739
|
-
// If this fails, it means we also leak the previous allocation,
|
|
740
|
-
// so we don't even try to handle it.
|
|
741
|
-
assert((char*)extraPtr == (char*)ptr + sbrkSize);
|
|
742
|
-
// After the first allocation, everything must remain aligned forever.
|
|
743
|
-
assert(!lastRegion);
|
|
744
|
-
// We now have a contiguous block of memory from ptr to
|
|
745
|
-
// ptr + sbrkSize + fixedPtr - ptr = fixedPtr + sbrkSize.
|
|
746
|
-
// fixedPtr is aligned and starts a region of the right
|
|
747
|
-
// amount of memory.
|
|
748
|
-
}
|
|
749
|
-
Region* region = (Region*)fixedPtr;
|
|
750
|
-
// Apply globally
|
|
751
|
-
if (!lastRegion) {
|
|
752
|
-
assert(!firstRegion);
|
|
753
|
-
firstRegion = region;
|
|
754
|
-
lastRegion = region;
|
|
755
|
-
} else {
|
|
756
|
-
assert(firstRegion);
|
|
757
|
-
region->prev() = lastRegion;
|
|
758
|
-
lastRegion = region;
|
|
759
|
-
}
|
|
760
|
-
// Success, we have new memory
|
|
761
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
762
|
-
EM_ASM({out(" emmalloc.newAllocation success")});
|
|
763
|
-
;
|
|
764
|
-
#endif
|
|
765
|
-
region->setTotalSize(sbrkSize);
|
|
766
|
-
region->setUsed(1);
|
|
767
|
-
return region;
|
|
768
|
-
}
|
|
769
|
-
|
|
770
|
-
// Allocate new memory. This may reuse part of the last region, only
|
|
771
|
-
// allocating what we need.
|
|
772
|
-
static Region* newAllocation(size_t size) {
|
|
773
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
774
|
-
EM_ASM({out(" emmalloc.newAllocation " + $0)}, size);
|
|
775
|
-
#endif
|
|
776
|
-
assert(size > 0);
|
|
777
|
-
if (lastRegion) {
|
|
778
|
-
// If the last region is free, we can extend it rather than leave it
|
|
779
|
-
// as fragmented free spce between allocated regions. This is also
|
|
780
|
-
// more efficient and simple as well.
|
|
781
|
-
if (!lastRegion->getUsed()) {
|
|
782
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
783
|
-
EM_ASM({out(" emmalloc.newAllocation extending lastRegion at " + $0)}, lastRegion);
|
|
784
|
-
#endif
|
|
785
|
-
// Remove it first, before we adjust the size (which affects which list
|
|
786
|
-
// it should be in). Also mark it as used so extending it doesn't do
|
|
787
|
-
// freelist computations; we'll undo that if we fail.
|
|
788
|
-
lastRegion->setUsed(1);
|
|
789
|
-
removeFromFreeList(lastRegion);
|
|
790
|
-
if (extendLastRegion(size)) {
|
|
791
|
-
return lastRegion;
|
|
792
|
-
} else {
|
|
793
|
-
lastRegion->setUsed(0);
|
|
794
|
-
return nullptr;
|
|
795
|
-
}
|
|
796
|
-
}
|
|
797
|
-
}
|
|
798
|
-
// Otherwise, get a new region.
|
|
799
|
-
return allocateRegion(size);
|
|
800
|
-
}
|
|
801
|
-
|
|
802
|
-
// Internal mirror of public API.
|
|
803
|
-
|
|
804
|
-
static void* emmalloc_malloc(size_t size) {
|
|
805
|
-
// malloc() spec defines malloc(0) => nullptr.
|
|
806
|
-
if (size == 0)
|
|
807
|
-
return nullptr;
|
|
808
|
-
// Look in the freelist first.
|
|
809
|
-
Region* region = tryFromFreeList(size);
|
|
810
|
-
if (!region) {
|
|
811
|
-
// Allocate some new memory otherwise.
|
|
812
|
-
region = newAllocation(size);
|
|
813
|
-
if (!region) {
|
|
814
|
-
// We failed to allocate, sadly.
|
|
815
|
-
return nullptr;
|
|
816
|
-
}
|
|
817
|
-
}
|
|
818
|
-
assert(getAfter(region) <= sbrk(0));
|
|
819
|
-
return getPayload(region);
|
|
820
|
-
}
|
|
821
|
-
|
|
822
|
-
static void emmalloc_free(void* ptr) {
|
|
823
|
-
if (ptr == nullptr)
|
|
824
|
-
return;
|
|
825
|
-
stopUsing(fromPayload(ptr));
|
|
826
|
-
}
|
|
827
|
-
|
|
828
|
-
static void* emmalloc_calloc(size_t nmemb, size_t size) {
|
|
829
|
-
// TODO If we know no one else is using sbrk(), we can assume that new
|
|
830
|
-
// memory allocations are zero'd out.
|
|
831
|
-
void* ptr = emmalloc_malloc(nmemb * size);
|
|
832
|
-
if (!ptr)
|
|
833
|
-
return nullptr;
|
|
834
|
-
memset(ptr, 0, nmemb * size);
|
|
835
|
-
return ptr;
|
|
836
|
-
}
|
|
837
|
-
|
|
838
|
-
static void* emmalloc_realloc(void* ptr, size_t size) {
|
|
839
|
-
if (!ptr)
|
|
840
|
-
return emmalloc_malloc(size);
|
|
841
|
-
if (!size) {
|
|
842
|
-
emmalloc_free(ptr);
|
|
843
|
-
return nullptr;
|
|
844
|
-
}
|
|
845
|
-
Region* region = fromPayload(ptr);
|
|
846
|
-
assert(region->getUsed());
|
|
847
|
-
// Grow it. First, maybe we can do simple growth in the current region.
|
|
848
|
-
if (size <= getMaxPayload(region)) {
|
|
849
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
850
|
-
EM_ASM({out(" emmalloc.emmalloc_realloc use existing payload space")});
|
|
851
|
-
#endif
|
|
852
|
-
region->setUsed(1);
|
|
853
|
-
// There might be enough left over to split out now.
|
|
854
|
-
possiblySplitRemainder(region, size);
|
|
855
|
-
return ptr;
|
|
856
|
-
}
|
|
857
|
-
// Perhaps right after us is free space we can merge to us.
|
|
858
|
-
Region* next = region->next();
|
|
859
|
-
if (next && !next->getUsed()) {
|
|
860
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
861
|
-
EM_ASM({out(" emmalloc.emmalloc_realloc merge in next")});
|
|
862
|
-
#endif
|
|
863
|
-
removeFromFreeList(next);
|
|
864
|
-
region->incTotalSize(next->getTotalSize());
|
|
865
|
-
if (next != lastRegion) {
|
|
866
|
-
next->next()->prev() = region;
|
|
867
|
-
} else {
|
|
868
|
-
lastRegion = region;
|
|
869
|
-
}
|
|
870
|
-
}
|
|
871
|
-
// We may now be big enough.
|
|
872
|
-
if (size <= getMaxPayload(region)) {
|
|
873
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
874
|
-
EM_ASM({out(" emmalloc.emmalloc_realloc use existing payload space after merge")});
|
|
875
|
-
#endif
|
|
876
|
-
region->setUsed(1);
|
|
877
|
-
// There might be enough left over to split out now.
|
|
878
|
-
possiblySplitRemainder(region, size);
|
|
879
|
-
return ptr;
|
|
880
|
-
}
|
|
881
|
-
// We still aren't big enough. If we are the last, we can extend ourselves - however, that
|
|
882
|
-
// definitely means increasing the total sbrk(), and there may be free space lower down, so
|
|
883
|
-
// this is a tradeoff between speed (avoid the memcpy) and space. It's not clear what's
|
|
884
|
-
// better here; for now, check for free space first.
|
|
885
|
-
Region* newRegion = tryFromFreeList(size);
|
|
886
|
-
if (!newRegion && region == lastRegion) {
|
|
887
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
888
|
-
EM_ASM({out(" emmalloc.emmalloc_realloc extend last region")});
|
|
889
|
-
#endif
|
|
890
|
-
if (extendLastRegion(size)) {
|
|
891
|
-
// It worked. We don't need the formerly free region.
|
|
892
|
-
if (newRegion) {
|
|
893
|
-
stopUsing(newRegion);
|
|
894
|
-
}
|
|
895
|
-
return ptr;
|
|
896
|
-
} else {
|
|
897
|
-
// If this failed, we can also try the normal
|
|
898
|
-
// malloc path, which may find space in a freelist;
|
|
899
|
-
// fall through.
|
|
900
|
-
}
|
|
901
|
-
}
|
|
902
|
-
// We need new space, and a copy
|
|
903
|
-
if (!newRegion) {
|
|
904
|
-
newRegion = newAllocation(size);
|
|
905
|
-
if (!newRegion)
|
|
906
|
-
return nullptr;
|
|
907
|
-
}
|
|
908
|
-
memcpy(getPayload(newRegion), getPayload(region),
|
|
909
|
-
size < getMaxPayload(region) ? size : getMaxPayload(region));
|
|
910
|
-
stopUsing(region);
|
|
911
|
-
return getPayload(newRegion);
|
|
912
|
-
}
|
|
913
|
-
|
|
914
|
-
static struct mallinfo emmalloc_mallinfo() {
|
|
915
|
-
struct mallinfo info;
|
|
916
|
-
info.arena = 0;
|
|
917
|
-
info.ordblks = 0;
|
|
918
|
-
info.smblks = 0;
|
|
919
|
-
info.hblks = 0;
|
|
920
|
-
info.hblkhd = 0;
|
|
921
|
-
info.usmblks = 0;
|
|
922
|
-
info.fsmblks = 0;
|
|
923
|
-
info.uordblks = 0;
|
|
924
|
-
info.ordblks = 0;
|
|
925
|
-
info.keepcost = 0;
|
|
926
|
-
if (firstRegion) {
|
|
927
|
-
info.arena = (char*)sbrk(0) - (char*)firstRegion;
|
|
928
|
-
Region* region = firstRegion;
|
|
929
|
-
while (region) {
|
|
930
|
-
if (region->getUsed()) {
|
|
931
|
-
info.uordblks += getMaxPayload(region);
|
|
932
|
-
} else {
|
|
933
|
-
info.fordblks += getMaxPayload(region);
|
|
934
|
-
info.ordblks++;
|
|
935
|
-
}
|
|
936
|
-
region = region->next();
|
|
937
|
-
}
|
|
938
|
-
}
|
|
939
|
-
return info;
|
|
940
|
-
}
|
|
941
|
-
|
|
942
|
-
// An aligned allocation. This is a rarer allocation path, and is
|
|
943
|
-
// much less optimized - the assumption is that it is used for few
|
|
944
|
-
// large allocations.
|
|
945
|
-
static void* alignedAllocation(size_t size, size_t alignment) {
|
|
946
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
947
|
-
EM_ASM({out(" emmalloc.alignedAllocation")});
|
|
948
|
-
#endif
|
|
949
|
-
assert(alignment > ALIGNMENT);
|
|
950
|
-
assert(alignment % ALIGNMENT == 0);
|
|
951
|
-
// Try from the freelist first. We may be lucky and get something
|
|
952
|
-
// properly aligned.
|
|
953
|
-
// TODO: Perhaps look more carefully, checking alignment as we go,
|
|
954
|
-
// using multiple tries?
|
|
955
|
-
Region* fromFreeList = tryFromFreeList(size + alignment);
|
|
956
|
-
if (fromFreeList && size_t(getPayload(fromFreeList)) % alignment == 0) {
|
|
957
|
-
// Luck has favored us.
|
|
958
|
-
return getPayload(fromFreeList);
|
|
959
|
-
} else if (fromFreeList) {
|
|
960
|
-
stopUsing(fromFreeList);
|
|
961
|
-
}
|
|
962
|
-
// No luck from free list, so do a new allocation which we can
|
|
963
|
-
// force to be aligned.
|
|
964
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
965
|
-
EM_ASM({out(" emmalloc.alignedAllocation new allocation")});
|
|
966
|
-
#endif
|
|
967
|
-
// Ensure a region before us, which we may enlarge as necessary.
|
|
968
|
-
if (!lastRegion) {
|
|
969
|
-
// This allocation is not freeable, but there is one at most.
|
|
970
|
-
void* prev = emmalloc_malloc(MIN_REGION_SIZE);
|
|
971
|
-
if (!prev)
|
|
972
|
-
return nullptr;
|
|
973
|
-
}
|
|
974
|
-
// See if we need to enlarge the previous region in order to get
|
|
975
|
-
// us properly aligned. Take into account that our region will
|
|
976
|
-
// start with METADATA_SIZE of space.
|
|
977
|
-
size_t address = size_t(getAfter(lastRegion)) + METADATA_SIZE;
|
|
978
|
-
size_t error = address % alignment;
|
|
979
|
-
if (error != 0) {
|
|
980
|
-
// E.g. if we want alignment 24, and have address 16, then we
|
|
981
|
-
// need to add 8.
|
|
982
|
-
size_t extra = alignment - error;
|
|
983
|
-
assert(extra % ALIGNMENT == 0);
|
|
984
|
-
if (!extendLastRegion(getMaxPayload(lastRegion) + extra)) {
|
|
985
|
-
return nullptr;
|
|
986
|
-
}
|
|
987
|
-
address = size_t(getAfter(lastRegion)) + METADATA_SIZE;
|
|
988
|
-
error = address % alignment;
|
|
989
|
-
assert(error == 0);
|
|
990
|
-
}
|
|
991
|
-
Region* region = allocateRegion(size);
|
|
992
|
-
if (!region)
|
|
993
|
-
return nullptr;
|
|
994
|
-
void* ptr = getPayload(region);
|
|
995
|
-
assert(size_t(ptr) == address);
|
|
996
|
-
assert(size_t(ptr) % alignment == 0);
|
|
997
|
-
return ptr;
|
|
998
|
-
}
|
|
999
|
-
|
|
1000
|
-
static int isMultipleOfSizeT(size_t size) { return (size & 3) == 0; }
|
|
1001
|
-
|
|
1002
|
-
static int emmalloc_posix_memalign(void** memptr, size_t alignment, size_t size) {
|
|
1003
|
-
*memptr = nullptr;
|
|
1004
|
-
if (!isPowerOf2(alignment) || !isMultipleOfSizeT(alignment)) {
|
|
1005
|
-
return 22; // EINVAL
|
|
1006
|
-
}
|
|
1007
|
-
if (size == 0) {
|
|
1008
|
-
return 0;
|
|
1009
|
-
}
|
|
1010
|
-
if (alignment <= ALIGNMENT) {
|
|
1011
|
-
// Use normal allocation path, which will provide that alignment.
|
|
1012
|
-
*memptr = emmalloc_malloc(size);
|
|
1013
|
-
} else {
|
|
1014
|
-
// Use more sophisticaed alignment-specific allocation path.
|
|
1015
|
-
*memptr = alignedAllocation(size, alignment);
|
|
1016
|
-
}
|
|
1017
|
-
if (!*memptr) {
|
|
1018
|
-
return 12; // ENOMEM
|
|
1019
|
-
}
|
|
1020
|
-
return 0;
|
|
1021
|
-
}
|
|
1022
|
-
|
|
1023
|
-
static void* emmalloc_memalign(size_t alignment, size_t size) {
|
|
1024
|
-
void* ptr;
|
|
1025
|
-
if (emmalloc_posix_memalign(&ptr, alignment, size) != 0) {
|
|
1026
|
-
return nullptr;
|
|
1027
|
-
}
|
|
1028
|
-
return ptr;
|
|
1029
|
-
}
|
|
1030
|
-
|
|
1031
|
-
// Public API. This is a thin wrapper around our mirror of it, adding
|
|
1032
|
-
// logging and validation when debugging. Otherwise it should inline
|
|
1033
|
-
// out.
|
|
1034
|
-
|
|
1035
|
-
extern "C" {
|
|
1036
|
-
|
|
1037
|
-
EMMALLOC_EXPORT
|
|
1038
|
-
void* malloc(size_t size) {
|
|
1039
|
-
#ifdef EMMALLOC_DEBUG
|
|
1040
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1041
|
-
EM_ASM({out("emmalloc.malloc " + $0)}, size);
|
|
1042
|
-
#endif
|
|
1043
|
-
emmalloc_validate_all();
|
|
1044
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1045
|
-
emmalloc_dump_all();
|
|
1046
|
-
#endif
|
|
1047
|
-
#endif
|
|
1048
|
-
void* ptr = emmalloc_malloc(size);
|
|
1049
|
-
#ifdef EMMALLOC_DEBUG
|
|
1050
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1051
|
-
EM_ASM({out("emmalloc.malloc ==> " + $0)}, ptr);
|
|
1052
|
-
#endif
|
|
1053
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1054
|
-
emmalloc_dump_all();
|
|
1055
|
-
#endif
|
|
1056
|
-
emmalloc_validate_all();
|
|
1057
|
-
#endif
|
|
1058
|
-
return ptr;
|
|
1059
|
-
}
|
|
1060
|
-
|
|
1061
|
-
EMMALLOC_EXPORT
|
|
1062
|
-
void free(void* ptr) {
|
|
1063
|
-
#ifdef EMMALLOC_DEBUG
|
|
1064
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1065
|
-
EM_ASM({out("emmalloc.free " + $0)}, ptr);
|
|
1066
|
-
#endif
|
|
1067
|
-
emmalloc_validate_all();
|
|
1068
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1069
|
-
emmalloc_dump_all();
|
|
1070
|
-
#endif
|
|
1071
|
-
#endif
|
|
1072
|
-
emmalloc_free(ptr);
|
|
1073
|
-
#ifdef EMMALLOC_DEBUG
|
|
1074
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1075
|
-
emmalloc_dump_all();
|
|
1076
|
-
#endif
|
|
1077
|
-
emmalloc_validate_all();
|
|
1078
|
-
#endif
|
|
1079
|
-
}
|
|
1080
|
-
|
|
1081
|
-
EMMALLOC_EXPORT
|
|
1082
|
-
void* calloc(size_t nmemb, size_t size) {
|
|
1083
|
-
#ifdef EMMALLOC_DEBUG
|
|
1084
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1085
|
-
EM_ASM({out("emmalloc.calloc " + $0)}, size);
|
|
1086
|
-
#endif
|
|
1087
|
-
emmalloc_validate_all();
|
|
1088
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1089
|
-
emmalloc_dump_all();
|
|
1090
|
-
#endif
|
|
1091
|
-
#endif
|
|
1092
|
-
void* ptr = emmalloc_calloc(nmemb, size);
|
|
1093
|
-
#ifdef EMMALLOC_DEBUG
|
|
1094
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1095
|
-
EM_ASM({out("emmalloc.calloc ==> " + $0)}, ptr);
|
|
1096
|
-
#endif
|
|
1097
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1098
|
-
emmalloc_dump_all();
|
|
1099
|
-
#endif
|
|
1100
|
-
emmalloc_validate_all();
|
|
1101
|
-
#endif
|
|
1102
|
-
return ptr;
|
|
1103
|
-
}
|
|
1104
|
-
|
|
1105
|
-
EMMALLOC_EXPORT
|
|
1106
|
-
void* realloc(void* ptr, size_t size) {
|
|
1107
|
-
#ifdef EMMALLOC_DEBUG
|
|
1108
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1109
|
-
EM_ASM({out("emmalloc.realloc " + [ $0, $1 ])}, ptr, size);
|
|
1110
|
-
#endif
|
|
1111
|
-
emmalloc_validate_all();
|
|
1112
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1113
|
-
emmalloc_dump_all();
|
|
1114
|
-
#endif
|
|
1115
|
-
#endif
|
|
1116
|
-
void* newPtr = emmalloc_realloc(ptr, size);
|
|
1117
|
-
#ifdef EMMALLOC_DEBUG
|
|
1118
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1119
|
-
EM_ASM({out("emmalloc.realloc ==> " + $0)}, newPtr);
|
|
1120
|
-
#endif
|
|
1121
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1122
|
-
emmalloc_dump_all();
|
|
1123
|
-
#endif
|
|
1124
|
-
emmalloc_validate_all();
|
|
1125
|
-
#endif
|
|
1126
|
-
return newPtr;
|
|
1127
|
-
}
|
|
1128
|
-
|
|
1129
|
-
EMMALLOC_EXPORT
|
|
1130
|
-
int posix_memalign(void** memptr, size_t alignment, size_t size) {
|
|
1131
|
-
#ifdef EMMALLOC_DEBUG
|
|
1132
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1133
|
-
EM_ASM({out("emmalloc.posix_memalign " + [ $0, $1, $2 ])}, memptr, alignment, size);
|
|
1134
|
-
#endif
|
|
1135
|
-
emmalloc_validate_all();
|
|
1136
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1137
|
-
emmalloc_dump_all();
|
|
1138
|
-
#endif
|
|
1139
|
-
#endif
|
|
1140
|
-
int result = emmalloc_posix_memalign(memptr, alignment, size);
|
|
1141
|
-
#ifdef EMMALLOC_DEBUG
|
|
1142
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1143
|
-
EM_ASM({out("emmalloc.posix_memalign ==> " + $0)}, result);
|
|
1144
|
-
#endif
|
|
1145
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1146
|
-
emmalloc_dump_all();
|
|
1147
|
-
#endif
|
|
1148
|
-
emmalloc_validate_all();
|
|
1149
|
-
#endif
|
|
1150
|
-
return result;
|
|
1151
|
-
}
|
|
1152
|
-
|
|
1153
|
-
EMMALLOC_EXPORT
|
|
1154
|
-
void* memalign(size_t alignment, size_t size) {
|
|
1155
|
-
#ifdef EMMALLOC_DEBUG
|
|
1156
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1157
|
-
EM_ASM({out("emmalloc.memalign " + [ $0, $1 ])}, alignment, size);
|
|
1158
|
-
#endif
|
|
1159
|
-
emmalloc_validate_all();
|
|
1160
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1161
|
-
emmalloc_dump_all();
|
|
1162
|
-
#endif
|
|
1163
|
-
#endif
|
|
1164
|
-
void* ptr = emmalloc_memalign(alignment, size);
|
|
1165
|
-
#ifdef EMMALLOC_DEBUG
|
|
1166
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1167
|
-
EM_ASM({out("emmalloc.memalign ==> " + $0)}, ptr);
|
|
1168
|
-
#endif
|
|
1169
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1170
|
-
emmalloc_dump_all();
|
|
1171
|
-
#endif
|
|
1172
|
-
emmalloc_validate_all();
|
|
1173
|
-
#endif
|
|
1174
|
-
return ptr;
|
|
1175
|
-
}
|
|
1176
|
-
|
|
1177
|
-
EMMALLOC_EXPORT
|
|
1178
|
-
struct mallinfo mallinfo() {
|
|
1179
|
-
#ifdef EMMALLOC_DEBUG
|
|
1180
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1181
|
-
EM_ASM({out("emmalloc.mallinfo")});
|
|
1182
|
-
#endif
|
|
1183
|
-
emmalloc_validate_all();
|
|
1184
|
-
#ifdef EMMALLOC_DEBUG_LOG
|
|
1185
|
-
emmalloc_dump_all();
|
|
1186
|
-
#endif
|
|
1187
|
-
#endif
|
|
1188
|
-
return emmalloc_mallinfo();
|
|
1189
|
-
}
|
|
1190
|
-
|
|
1191
|
-
// Export malloc and free as duplicate names emscripten_builtin_malloc and
|
|
1192
|
-
// emscripten_builtin_free so that applications can replace malloc and free
|
|
1193
|
-
// in their code, and make those replacements refer to the original malloc
|
|
1194
|
-
// and free from this file.
|
|
1195
|
-
// This allows an easy mechanism for hooking into memory allocation.
|
|
1196
|
-
#if defined(__EMSCRIPTEN__)
|
|
1197
|
-
extern __typeof(malloc) emscripten_builtin_malloc __attribute__((alias("malloc")));
|
|
1198
|
-
extern __typeof(free) emscripten_builtin_free __attribute__((alias("free")));
|
|
1199
|
-
extern __typeof(memalign) emscripten_builtin_memalign __attribute__((alias("memalign")));
|
|
1200
|
-
#endif
|
|
1201
|
-
|
|
1202
|
-
} // extern "C"
|