@grain/stdlib 0.6.3 → 0.6.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/LICENSE +1 -1
- package/array.gr +2 -0
- package/bytes.gr +38 -7
- package/float32.gr +68 -0
- package/float32.md +164 -0
- package/float64.gr +87 -16
- package/float64.md +164 -0
- package/hash.gr +2 -0
- package/int16.gr +85 -0
- package/int16.md +168 -0
- package/int32.gr +107 -0
- package/int32.md +235 -0
- package/int64.gr +107 -0
- package/int64.md +235 -0
- package/int8.gr +1 -1
- package/int8.md +1 -1
- package/map.gr +3 -2
- package/package.json +1 -1
- package/pervasives.md +10 -3
- package/runtime/malloc.gr +269 -135
- package/runtime/malloc.md +3 -10
- package/runtime/numbers.gr +181 -100
- package/runtime/string.gr +1 -0
- package/runtime/string.md +10 -3
- package/set.gr +0 -5
- package/string.gr +291 -72
- package/string.md +108 -0
- package/wasi/file.gr +5 -0
- package/wasi/process.gr +2 -1
package/map.gr
CHANGED
|
@@ -436,13 +436,14 @@ provide let toArray = (map: Map<a, b>) => {
|
|
|
436
436
|
use WasmI32.{ (+) as addWasmI32 }
|
|
437
437
|
// Assign the values into the array.
|
|
438
438
|
// We store them directly to prevent GC on uninitialized array data.
|
|
439
|
-
let
|
|
439
|
+
let arrayPtr = WasmI32.fromGrain(array)
|
|
440
440
|
let item = (key, value)
|
|
441
441
|
WasmI32.store(
|
|
442
|
-
addWasmI32(
|
|
442
|
+
addWasmI32(arrayPtr, untagSimpleNumber(i) * 4n),
|
|
443
443
|
Memory.incRef(WasmI32.fromGrain(item)),
|
|
444
444
|
8n
|
|
445
445
|
)
|
|
446
|
+
ignore(array)
|
|
446
447
|
i + 1
|
|
447
448
|
}
|
|
448
449
|
reduce(reducer, 0, map)
|
package/package.json
CHANGED
package/pervasives.md
CHANGED
|
@@ -855,9 +855,16 @@ Returns:
|
|
|
855
855
|
|
|
856
856
|
### Pervasives.**print**
|
|
857
857
|
|
|
858
|
-
<details
|
|
859
|
-
<summary
|
|
860
|
-
|
|
858
|
+
<details>
|
|
859
|
+
<summary>Added in <code>0.1.0</code></summary>
|
|
860
|
+
<table>
|
|
861
|
+
<thead>
|
|
862
|
+
<tr><th>version</th><th>changes</th></tr>
|
|
863
|
+
</thead>
|
|
864
|
+
<tbody>
|
|
865
|
+
<tr><td><code>0.6.0</code></td><td>Added support for custom suffixes</td></tr>
|
|
866
|
+
</tbody>
|
|
867
|
+
</table>
|
|
861
868
|
</details>
|
|
862
869
|
|
|
863
870
|
```grain
|
package/runtime/malloc.gr
CHANGED
|
@@ -3,8 +3,6 @@ module Malloc
|
|
|
3
3
|
|
|
4
4
|
/*
|
|
5
5
|
* This module implements a generic memory allocator.
|
|
6
|
-
* The algorithm is quite simple, being based on the memory allocator
|
|
7
|
-
* from pages 185-188 of K&R C (2nd edition).
|
|
8
6
|
*/
|
|
9
7
|
|
|
10
8
|
from "runtime/unsafe/wasmi32" include WasmI32
|
|
@@ -20,6 +18,8 @@ use WasmI32.{
|
|
|
20
18
|
(>>>),
|
|
21
19
|
(==),
|
|
22
20
|
(!=),
|
|
21
|
+
(&),
|
|
22
|
+
(^),
|
|
23
23
|
}
|
|
24
24
|
from "runtime/exception" include Exception
|
|
25
25
|
|
|
@@ -32,44 +32,64 @@ primitive (||) = "@or"
|
|
|
32
32
|
|
|
33
33
|
primitive heapStart = "@heap.start"
|
|
34
34
|
|
|
35
|
-
/* UNDERSTANDING THE STRUCTURE OF THE FREE
|
|
36
|
-
* The original K&R definition for the free list entry type was the following:
|
|
35
|
+
/* UNDERSTANDING THE STRUCTURE OF THE FREE LISTS
|
|
37
36
|
*
|
|
38
|
-
*
|
|
39
|
-
*
|
|
40
|
-
*
|
|
41
|
-
*
|
|
42
|
-
*
|
|
43
|
-
*
|
|
44
|
-
*
|
|
37
|
+
* `malloc` allocates memory and `free` releases this memory. Two separate free
|
|
38
|
+
* lists are maintained, one for small blocks of 64 bytes, and one for larger
|
|
39
|
+
* blocks of multiples of 64 bytes. Each block has an 8-byte header and 8-byte
|
|
40
|
+
* footer to keep track of block sizes and maintain the free list.
|
|
41
|
+
*
|
|
42
|
+
* Most allocations in programs are small, so the separate free lists allow us
|
|
43
|
+
* to implement `malloc` and `free` in O(1) for small allocations and O(n)
|
|
44
|
+
* `malloc` and O(1) `free` for large allocations, where `n` is the size of the
|
|
45
|
+
* free list for large blocks.
|
|
46
|
+
*
|
|
47
|
+
* The small blocks are able to service:
|
|
48
|
+
* - Numbers (with the exception of large BigInts/Rationals)
|
|
49
|
+
* - Tuples/Arrays up to 8 elements
|
|
50
|
+
* - Records up to 6 elements
|
|
51
|
+
* - Variants up to 5 elements
|
|
52
|
+
* - Closures up to 6 elements
|
|
53
|
+
* - Bytes/Strings up to length 32
|
|
54
|
+
*
|
|
55
|
+
* Blocks in memory look like this:
|
|
45
56
|
*
|
|
46
|
-
*
|
|
47
|
-
*
|
|
48
|
-
*
|
|
57
|
+
* 8 bytes 8 bytes 64n - 16 bytes 8 bytes 8 bytes
|
|
58
|
+
* ┌─────────────────────┬────────────────┬─────────────────┬────────────────┬─────────────────────┐
|
|
59
|
+
* │ <prev block footer> │ <block header> │ <block content> │ <block footer> │ <next block header> │
|
|
60
|
+
* └─────────────────────┴────────────────┴─────────────────┴────────────────┴─────────────────────┘
|
|
49
61
|
*
|
|
50
|
-
*
|
|
51
|
-
*
|
|
52
|
-
*
|
|
53
|
-
*
|
|
54
|
-
*
|
|
62
|
+
* Block headers look like this:
|
|
63
|
+
* ┌───────────────────────┬──────────────┐
|
|
64
|
+
* │ <prev free block ptr> │ <block size> │
|
|
65
|
+
* └───────────────────────┴──────────────┘
|
|
66
|
+
*
|
|
67
|
+
* Block footers look like this:
|
|
68
|
+
* ┌───────────────────────┬──────────────┐
|
|
69
|
+
* │ <next free block ptr> │ <block size> │
|
|
70
|
+
* └───────────────────────┴──────────────┘
|
|
71
|
+
*
|
|
72
|
+
* The size is kept in the header and footer to allow us to quickly combine
|
|
73
|
+
* free blocks when blocks are freed.
|
|
74
|
+
*
|
|
75
|
+
* Pointers to the previous/next free blocks give us doubly-linked free lists,
|
|
76
|
+
* which makes it possible to remove blocks from the free list in constant
|
|
77
|
+
* time.
|
|
78
|
+
*
|
|
79
|
+
* A block is considered in use when the previous/next pointers are both zero.
|
|
55
80
|
*/
|
|
56
81
|
|
|
57
82
|
/**
|
|
58
|
-
*
|
|
83
|
+
* Pointers to the start of the free lists. This is always a multiple of
|
|
59
84
|
* 8, with the exception of its initial value (used as a sentinel).
|
|
60
85
|
*/
|
|
61
|
-
let mut
|
|
86
|
+
let mut smallBlockFreePtr = 1n
|
|
87
|
+
let mut largeBlockFreePtr = 1n
|
|
62
88
|
|
|
63
89
|
/**
|
|
64
90
|
* Size (in bytes) of entries in the free list.
|
|
65
91
|
*/
|
|
66
|
-
let
|
|
67
|
-
|
|
68
|
-
/**
|
|
69
|
-
* log_2(mallocHeaderSize) (multiplication by the header
|
|
70
|
-
* size is equivalent to left-shifting by this amount)
|
|
71
|
-
*/
|
|
72
|
-
let logMallocHeaderSize = 3n
|
|
92
|
+
let _HEADER_FOOTER_SIZE = 8n
|
|
73
93
|
|
|
74
94
|
/**
|
|
75
95
|
* The current size (in bytes) of the heap.
|
|
@@ -87,9 +107,9 @@ let _BASE = heapStart() + _RESERVED_RUNTIME_SPACE
|
|
|
87
107
|
/**
|
|
88
108
|
* The start pointer of the heap.
|
|
89
109
|
*/
|
|
90
|
-
let _HEAP_START = _BASE +
|
|
110
|
+
let _HEAP_START = _BASE + _HEADER_FOOTER_SIZE
|
|
91
111
|
|
|
92
|
-
let
|
|
112
|
+
let _PREV_NEXT_OFFSET = 0n
|
|
93
113
|
let _SIZE_OFFSET = 4n
|
|
94
114
|
|
|
95
115
|
/**
|
|
@@ -97,33 +117,65 @@ let _SIZE_OFFSET = 4n
|
|
|
97
117
|
*/
|
|
98
118
|
let _PAGE_SIZE = 65536n
|
|
99
119
|
|
|
100
|
-
|
|
101
|
-
|
|
120
|
+
/**
|
|
121
|
+
* Size (in bytes) of blocks allocated by the allocator
|
|
122
|
+
*/
|
|
123
|
+
let _UNIT_SIZE = 64n
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* log_2(_UNIT_SIZE) (multiplication by the header
|
|
127
|
+
* size is equivalent to left-shifting by this amount)
|
|
128
|
+
*/
|
|
129
|
+
let logUnitSize = 6n
|
|
130
|
+
|
|
131
|
+
let headerGetPrevious = (headerPtr: WasmI32) => {
|
|
132
|
+
WasmI32.load(headerPtr, _PREV_NEXT_OFFSET)
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
let headerSetPrevious = (headerPtr: WasmI32, val: WasmI32) => {
|
|
136
|
+
WasmI32.store(headerPtr, val, _PREV_NEXT_OFFSET)
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
let headerGetSize = (headerPtr: WasmI32) => {
|
|
140
|
+
WasmI32.load(headerPtr, _SIZE_OFFSET)
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
let headerSetSize = (headerPtr: WasmI32, val: WasmI32) => {
|
|
144
|
+
WasmI32.store(headerPtr, val, _SIZE_OFFSET)
|
|
102
145
|
}
|
|
103
146
|
|
|
104
|
-
|
|
105
|
-
|
|
147
|
+
// These functions are no different than the ones above, but exist to make the
|
|
148
|
+
// code much easier to follow
|
|
149
|
+
|
|
150
|
+
let footerGetNext = (footerPtr: WasmI32) => {
|
|
151
|
+
WasmI32.load(footerPtr, _PREV_NEXT_OFFSET)
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
let footerSetNext = (footerPtr: WasmI32, val: WasmI32) => {
|
|
155
|
+
WasmI32.store(footerPtr, val, _PREV_NEXT_OFFSET)
|
|
106
156
|
}
|
|
107
157
|
|
|
108
|
-
let
|
|
109
|
-
WasmI32.load(
|
|
158
|
+
let footerGetSize = (footerPtr: WasmI32) => {
|
|
159
|
+
WasmI32.load(footerPtr, _SIZE_OFFSET)
|
|
110
160
|
}
|
|
111
161
|
|
|
112
|
-
let
|
|
113
|
-
WasmI32.store(
|
|
162
|
+
let footerSetSize = (footerPtr: WasmI32, val: WasmI32) => {
|
|
163
|
+
WasmI32.store(footerPtr, val, _SIZE_OFFSET)
|
|
114
164
|
}
|
|
115
165
|
|
|
116
166
|
/**
|
|
117
|
-
* Requests that the heap be grown by the given number of
|
|
167
|
+
* Requests that the heap be grown by the given number of units.
|
|
118
168
|
*
|
|
119
|
-
* @param
|
|
169
|
+
* @param nunits: The number of units requested
|
|
120
170
|
* @returns The pointer to the beginning of the extended region if successful or -1 otherwise
|
|
121
171
|
*/
|
|
122
|
-
let growHeap = (
|
|
172
|
+
let growHeap = (nunits: WasmI32) => {
|
|
123
173
|
let mut reqSize = 0n
|
|
124
174
|
let mut reqResult = 0n
|
|
125
175
|
let mut origSize = heapSize
|
|
126
176
|
|
|
177
|
+
let nbytes = nunits << logUnitSize
|
|
178
|
+
|
|
127
179
|
// If the size has not been initialized, do so.
|
|
128
180
|
if (heapSize == 0n) {
|
|
129
181
|
heapSize = memorySize() * _PAGE_SIZE - _HEAP_START
|
|
@@ -131,8 +183,7 @@ let growHeap = (nbytes: WasmI32) => {
|
|
|
131
183
|
// More bytes requested than the initial heap size,
|
|
132
184
|
// so we need to request more anyway.
|
|
133
185
|
reqSize = nbytes - heapSize
|
|
134
|
-
reqSize = reqSize >>> 16n
|
|
135
|
-
reqSize += 1n
|
|
186
|
+
reqSize = (reqSize + _PAGE_SIZE - 1n) >>> 16n
|
|
136
187
|
reqResult = memoryGrow(reqSize)
|
|
137
188
|
if (reqResult == -1n) {
|
|
138
189
|
-1n
|
|
@@ -161,49 +212,105 @@ let growHeap = (nbytes: WasmI32) => {
|
|
|
161
212
|
}
|
|
162
213
|
}
|
|
163
214
|
|
|
215
|
+
let removeFromFreeList = (blockPtr: WasmI32) => {
|
|
216
|
+
let blockSize = headerGetSize(blockPtr)
|
|
217
|
+
let blockFooterPtr = blockPtr + blockSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
218
|
+
let nextPtr = footerGetNext(blockFooterPtr)
|
|
219
|
+
|
|
220
|
+
let prevPtr = headerGetPrevious(blockPtr)
|
|
221
|
+
if (prevPtr == 1n) {
|
|
222
|
+
// this block was the start of the free list
|
|
223
|
+
if (blockSize == 1n) {
|
|
224
|
+
smallBlockFreePtr = nextPtr
|
|
225
|
+
} else {
|
|
226
|
+
largeBlockFreePtr = nextPtr
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
headerSetPrevious(nextPtr, prevPtr)
|
|
230
|
+
} else {
|
|
231
|
+
let prevSize = headerGetSize(prevPtr)
|
|
232
|
+
let prevFooterPtr = prevPtr + prevSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
233
|
+
footerSetNext(prevFooterPtr, nextPtr)
|
|
234
|
+
headerSetPrevious(nextPtr, prevPtr)
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
164
238
|
/**
|
|
165
239
|
* Frees the given allocated pointer.
|
|
166
240
|
*
|
|
167
241
|
* @param ap: The pointer to free
|
|
168
242
|
*/
|
|
169
243
|
provide let free = (ap: WasmI32) => {
|
|
170
|
-
let mut blockPtr = ap -
|
|
171
|
-
let mut
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
244
|
+
let mut blockPtr = ap - _HEADER_FOOTER_SIZE
|
|
245
|
+
let mut blockSize = headerGetSize(blockPtr)
|
|
246
|
+
|
|
247
|
+
let nextBlockPtr = blockPtr + blockSize * _UNIT_SIZE
|
|
248
|
+
if (headerGetPrevious(nextBlockPtr) > 0n) {
|
|
249
|
+
// adjacent block is free, so merge
|
|
250
|
+
removeFromFreeList(nextBlockPtr)
|
|
251
|
+
|
|
252
|
+
let nextBlockSize = headerGetSize(nextBlockPtr)
|
|
253
|
+
blockSize += nextBlockSize
|
|
254
|
+
headerSetSize(blockPtr, blockSize)
|
|
255
|
+
|
|
256
|
+
let footerPtr = blockPtr + blockSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
257
|
+
footerSetSize(footerPtr, blockSize)
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
let prevBlockFooterPtr = blockPtr - _HEADER_FOOTER_SIZE
|
|
261
|
+
if (footerGetNext(prevBlockFooterPtr) > 0n) {
|
|
262
|
+
// (prev) adjacent block is free, so merge
|
|
263
|
+
let prevBlockSize = footerGetSize(prevBlockFooterPtr)
|
|
264
|
+
let prevBlockPtr = blockPtr - prevBlockSize * _UNIT_SIZE
|
|
265
|
+
|
|
266
|
+
if (prevBlockSize == 1n) {
|
|
267
|
+
// Since we merged, this block is already a part of the free list. If
|
|
268
|
+
// the old block was size 1, it needs to be switched to the large list.
|
|
269
|
+
removeFromFreeList(prevBlockPtr)
|
|
186
270
|
}
|
|
187
271
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
272
|
+
blockPtr = prevBlockPtr
|
|
273
|
+
|
|
274
|
+
blockSize += prevBlockSize
|
|
275
|
+
headerSetSize(blockPtr, blockSize)
|
|
276
|
+
|
|
277
|
+
let footerPtr = blockPtr + blockSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
278
|
+
footerSetSize(footerPtr, blockSize)
|
|
279
|
+
footerSetNext(footerPtr, footerGetNext(prevBlockFooterPtr))
|
|
280
|
+
|
|
281
|
+
if (prevBlockSize == 1n) {
|
|
282
|
+
if (largeBlockFreePtr != 1n) {
|
|
283
|
+
headerSetPrevious(largeBlockFreePtr, blockPtr)
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
let footerPtr = blockPtr + blockSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
287
|
+
footerSetNext(footerPtr, largeBlockFreePtr)
|
|
288
|
+
headerSetPrevious(blockPtr, 1n)
|
|
289
|
+
|
|
290
|
+
largeBlockFreePtr = blockPtr
|
|
196
291
|
}
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
292
|
+
} else {
|
|
293
|
+
if (blockSize == 1n) {
|
|
294
|
+
if (smallBlockFreePtr != 1n) {
|
|
295
|
+
headerSetPrevious(smallBlockFreePtr, blockPtr)
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
let footerPtr = blockPtr + _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
299
|
+
footerSetNext(footerPtr, smallBlockFreePtr)
|
|
300
|
+
headerSetPrevious(blockPtr, 1n)
|
|
301
|
+
|
|
302
|
+
smallBlockFreePtr = blockPtr
|
|
202
303
|
} else {
|
|
203
|
-
|
|
304
|
+
if (largeBlockFreePtr != 1n) {
|
|
305
|
+
headerSetPrevious(largeBlockFreePtr, blockPtr)
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
let footerPtr = blockPtr + blockSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
309
|
+
footerSetNext(footerPtr, largeBlockFreePtr)
|
|
310
|
+
headerSetPrevious(blockPtr, 1n)
|
|
311
|
+
|
|
312
|
+
largeBlockFreePtr = blockPtr
|
|
204
313
|
}
|
|
205
|
-
// Set the free list head to this block
|
|
206
|
-
freePtr = p
|
|
207
314
|
}
|
|
208
315
|
}
|
|
209
316
|
|
|
@@ -215,25 +322,48 @@ provide let free = (ap: WasmI32) => {
|
|
|
215
322
|
* @param nbytes: The number of bytes to try to grow the heap by
|
|
216
323
|
* @returns A pointer to the start of the free list if successful or -1 otherwise
|
|
217
324
|
*/
|
|
218
|
-
let morecore = (
|
|
325
|
+
let morecore = (nunits: WasmI32) => {
|
|
219
326
|
let origSize = heapSize
|
|
220
|
-
|
|
327
|
+
|
|
328
|
+
let cp = growHeap(nunits + 1n) // include an extra unit for 4 headers/footers
|
|
221
329
|
|
|
222
330
|
// If there was an error, fail
|
|
223
331
|
if (cp == -1n) {
|
|
224
332
|
Exception.panic("OutOfMemory: Maximum memory size exceeded")
|
|
225
333
|
} else {
|
|
226
|
-
// Set the
|
|
227
|
-
//
|
|
334
|
+
// Set up the block. We'll add dummy headers/footers before and after the
|
|
335
|
+
// block to avoid unnecessary bounds checks elsewhere in the code.
|
|
228
336
|
let grownAmount = heapSize - origSize
|
|
229
|
-
|
|
337
|
+
let units = (grownAmount >>> logUnitSize) - 1n
|
|
338
|
+
|
|
339
|
+
let dummyFooter = cp
|
|
340
|
+
footerSetSize(dummyFooter, 0n)
|
|
341
|
+
footerSetNext(dummyFooter, 0n)
|
|
342
|
+
|
|
343
|
+
let blockHeader = dummyFooter + _HEADER_FOOTER_SIZE
|
|
344
|
+
headerSetSize(blockHeader, units)
|
|
345
|
+
headerSetPrevious(blockHeader, 0n)
|
|
346
|
+
|
|
347
|
+
let blockFooter = blockHeader + units * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
348
|
+
footerSetSize(blockFooter, units)
|
|
349
|
+
footerSetNext(blockFooter, 0n)
|
|
350
|
+
|
|
351
|
+
let dummyHeader = blockFooter + _HEADER_FOOTER_SIZE
|
|
352
|
+
headerSetSize(dummyHeader, 0n)
|
|
353
|
+
headerSetPrevious(dummyHeader, 0n)
|
|
354
|
+
|
|
230
355
|
// Call free() with the new block to add it to the free list.
|
|
231
|
-
free(
|
|
356
|
+
free(blockHeader + _HEADER_FOOTER_SIZE)
|
|
357
|
+
|
|
232
358
|
// Return the free list pointer.
|
|
233
|
-
|
|
359
|
+
largeBlockFreePtr
|
|
234
360
|
}
|
|
235
361
|
}
|
|
236
362
|
|
|
363
|
+
let roundBytesToUnits = bytes => {
|
|
364
|
+
(bytes + _UNIT_SIZE - 1n) >>> logUnitSize
|
|
365
|
+
}
|
|
366
|
+
|
|
237
367
|
/**
|
|
238
368
|
* Allocates the requested number of bytes, returning a pointer.
|
|
239
369
|
*
|
|
@@ -241,70 +371,74 @@ let morecore = (nbytes: WasmI32) => {
|
|
|
241
371
|
* @returns The pointer to the allocated region (8-byte aligned) or -1 if the allocation failed
|
|
242
372
|
*/
|
|
243
373
|
provide let malloc = (nbytes: WasmI32) => {
|
|
244
|
-
let mut
|
|
245
|
-
let mut prevp = freePtr
|
|
374
|
+
let mut nunits = roundBytesToUnits(nbytes + _HEADER_FOOTER_SIZE * 2n)
|
|
246
375
|
|
|
247
|
-
//
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
376
|
+
// Fast path for small blocks
|
|
377
|
+
if (nunits == 1n && smallBlockFreePtr != 1n) {
|
|
378
|
+
let blockPtr = smallBlockFreePtr
|
|
379
|
+
headerSetPrevious(blockPtr, 0n)
|
|
380
|
+
let footer = blockPtr + _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
381
|
+
let next = footerGetNext(footer)
|
|
382
|
+
footerSetNext(footer, 0n)
|
|
251
383
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
prevp = _BASE
|
|
257
|
-
WasmI32.store(_BASE, 0n, _SIZE_OFFSET)
|
|
384
|
+
headerSetPrevious(next, 1n)
|
|
385
|
+
smallBlockFreePtr = next
|
|
386
|
+
|
|
387
|
+
return blockPtr + _HEADER_FOOTER_SIZE
|
|
258
388
|
}
|
|
259
389
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
if (size == nbytes) {
|
|
271
|
-
// It's exactly the right size!
|
|
272
|
-
setNext(prevp, getNext(p))
|
|
273
|
-
} else {
|
|
274
|
-
// Shrink it as needed
|
|
275
|
-
let newSize = size - nbytes
|
|
276
|
-
setSize(p, newSize)
|
|
277
|
-
p += newSize
|
|
278
|
-
setSize(p, nbytes)
|
|
279
|
-
}
|
|
280
|
-
// Update the pointer to the free list.
|
|
281
|
-
freePtr = prevp
|
|
390
|
+
// Find a large enough block
|
|
391
|
+
let mut freeBlockPtr = largeBlockFreePtr
|
|
392
|
+
while (true) {
|
|
393
|
+
// Free list is empty; grow the heap
|
|
394
|
+
if (freeBlockPtr == 1n) {
|
|
395
|
+
freeBlockPtr = morecore(nunits)
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
let blockSize = headerGetSize(freeBlockPtr)
|
|
399
|
+
let footerPtr = freeBlockPtr + blockSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
282
400
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
401
|
+
// Perfectly sized block, or one unit larger to avoid leaving size 1 blocks
|
|
402
|
+
// in the large block free list
|
|
403
|
+
if (blockSize == nunits || blockSize == nunits + 1n) {
|
|
404
|
+
let blockPtr = freeBlockPtr
|
|
405
|
+
|
|
406
|
+
removeFromFreeList(blockPtr)
|
|
407
|
+
headerSetPrevious(blockPtr, 0n)
|
|
408
|
+
footerSetNext(footerPtr, 0n)
|
|
409
|
+
|
|
410
|
+
return blockPtr + _HEADER_FOOTER_SIZE
|
|
286
411
|
}
|
|
287
412
|
|
|
288
|
-
//
|
|
289
|
-
if (
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
413
|
+
// Take a chunk of this larger block
|
|
414
|
+
if (blockSize > nunits) {
|
|
415
|
+
let blockPtr = freeBlockPtr
|
|
416
|
+
|
|
417
|
+
let newSize = blockSize - nunits
|
|
418
|
+
headerSetSize(blockPtr, newSize)
|
|
419
|
+
let newFooterPtr = blockPtr + newSize * _UNIT_SIZE - _HEADER_FOOTER_SIZE
|
|
420
|
+
footerSetSize(newFooterPtr, newSize)
|
|
421
|
+
footerSetNext(newFooterPtr, footerGetNext(footerPtr))
|
|
422
|
+
|
|
423
|
+
let newBlockPtr = newFooterPtr + _HEADER_FOOTER_SIZE
|
|
424
|
+
headerSetSize(newBlockPtr, nunits)
|
|
425
|
+
headerSetPrevious(newBlockPtr, 0n)
|
|
426
|
+
footerSetSize(footerPtr, nunits)
|
|
427
|
+
footerSetNext(footerPtr, 0n)
|
|
428
|
+
|
|
429
|
+
return newBlockPtr + _HEADER_FOOTER_SIZE
|
|
297
430
|
}
|
|
431
|
+
|
|
432
|
+
freeBlockPtr = footerGetNext(footerPtr)
|
|
298
433
|
}
|
|
299
|
-
|
|
434
|
+
|
|
435
|
+
return -1n
|
|
300
436
|
}
|
|
301
437
|
|
|
302
438
|
/**
|
|
303
|
-
*
|
|
304
|
-
* Used for debugging.
|
|
305
|
-
*
|
|
306
|
-
* @returns The free list pointer
|
|
439
|
+
* Leaks all memory in all free lists; used for testing.
|
|
307
440
|
*/
|
|
308
|
-
provide let
|
|
309
|
-
|
|
441
|
+
provide let leakAll = () => {
|
|
442
|
+
smallBlockFreePtr = 1n
|
|
443
|
+
largeBlockFreePtr = 1n
|
|
310
444
|
}
|
package/runtime/malloc.md
CHANGED
|
@@ -46,18 +46,11 @@ Returns:
|
|
|
46
46
|
|----|-----------|
|
|
47
47
|
|`WasmI32`|The pointer to the allocated region (8-byte aligned) or -1 if the allocation failed|
|
|
48
48
|
|
|
49
|
-
### Malloc.**
|
|
49
|
+
### Malloc.**leakAll**
|
|
50
50
|
|
|
51
51
|
```grain
|
|
52
|
-
|
|
52
|
+
leakAll : () => Void
|
|
53
53
|
```
|
|
54
54
|
|
|
55
|
-
|
|
56
|
-
Used for debugging.
|
|
57
|
-
|
|
58
|
-
Returns:
|
|
59
|
-
|
|
60
|
-
|type|description|
|
|
61
|
-
|----|-----------|
|
|
62
|
-
|`WasmI32`|The free list pointer|
|
|
55
|
+
Leaks all memory in all free lists; used for testing.
|
|
63
56
|
|