sab-message-port 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +28 -0
- package/README.md +440 -0
- package/dist/SABMessagePort.min.js +1 -0
- package/package.json +54 -0
- package/src/SABMessagePort.js +802 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
BSD 3-Clause License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026, Shlomi Loubaton
|
|
4
|
+
|
|
5
|
+
Redistribution and use in source and binary forms, with or without
|
|
6
|
+
modification, are permitted provided that the following conditions are met:
|
|
7
|
+
|
|
8
|
+
1. Redistributions of source code must retain the above copyright notice, this
|
|
9
|
+
list of conditions and the following disclaimer.
|
|
10
|
+
|
|
11
|
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
12
|
+
this list of conditions and the following disclaimer in the documentation
|
|
13
|
+
and/or other materials provided with the distribution.
|
|
14
|
+
|
|
15
|
+
3. Neither the name of the copyright holder nor the names of its
|
|
16
|
+
contributors may be used to endorse or promote products derived from
|
|
17
|
+
this software without specific prior written permission.
|
|
18
|
+
|
|
19
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
20
|
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
21
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
22
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
23
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
24
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
25
|
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
26
|
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
27
|
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
28
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
package/README.md
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
# sab-message-port
|
|
2
|
+
|
|
3
|
+
High-performance IPC for Web Workers and Node.js worker threads over `SharedArrayBuffer`.
|
|
4
|
+
|
|
5
|
+
Passes JSON messages between threads through shared memory using `Atomics` for synchronization — no serialization through `postMessage`, no copying overhead. Supports both **blocking** reads (via `Atomics.wait` in worker threads) and **non-blocking** async reads (via `Atomics.waitAsync`, safe on the main thread). Large messages are chunked transparently.
|
|
6
|
+
|
|
7
|
+
## Motivation
|
|
8
|
+
|
|
9
|
+
The native `postMessage` API is event-loop driven — a worker can only receive messages by yielding control back to the event loop. This makes it unsuitable for long-running synchronous worker code that needs to communicate mid-execution without returning from its current call stack.
|
|
10
|
+
|
|
11
|
+
`sab-message-port` solves this by providing a **blocking read** (`Atomics.wait`) that lets a worker pause in place, wait for a message, and resume — with no dependence on the event loop. The worker stays in its own synchronous flow while the main thread sends messages asynchronously from the other side.
|
|
12
|
+
|
|
13
|
+
## Install
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npm install sab-message-port
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
```javascript
|
|
20
|
+
import { SABMessagePort, SABPipe } from 'sab-message-port';
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Quick Start
|
|
24
|
+
|
|
25
|
+
**Main thread:**
|
|
26
|
+
|
|
27
|
+
```javascript
|
|
28
|
+
import { SABMessagePort } from 'sab-message-port';
|
|
29
|
+
|
|
30
|
+
const port = new SABMessagePort();
|
|
31
|
+
port.postInit(worker);
|
|
32
|
+
|
|
33
|
+
port.onmessage = (e) => console.log('from worker:', e.data);
|
|
34
|
+
port.postMessage({ cmd: 'ping' });
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
**Worker:**
|
|
38
|
+
|
|
39
|
+
```javascript
|
|
40
|
+
import { SABMessagePort } from 'sab-message-port';
|
|
41
|
+
|
|
42
|
+
self.onmessage = (e) => {
|
|
43
|
+
if (e.data.type === 'SABMessagePort') {
|
|
44
|
+
const port = SABMessagePort.from(e.data);
|
|
45
|
+
|
|
46
|
+
port.onmessage = (e) => {
|
|
47
|
+
if (e.data.cmd === 'ping') {
|
|
48
|
+
port.postMessage({ reply: 'pong' });
|
|
49
|
+
}
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
};
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Example: Blocking Reads with Interrupt
|
|
56
|
+
|
|
57
|
+
With native `postMessage`, a worker can only receive messages by returning to the event loop. If the worker is stuck in a long synchronous loop, incoming messages pile up undelivered. `sab-message-port` solves this — `port.read()` blocks in-place via `Atomics.wait`, and `port.tryRead()` checks for signals mid-computation, all without yielding to the event loop.
|
|
58
|
+
|
|
59
|
+
**main.js**
|
|
60
|
+
|
|
61
|
+
```javascript
|
|
62
|
+
import { SABMessagePort } from 'sab-message-port';
|
|
63
|
+
|
|
64
|
+
const worker = new Worker('./worker.js', { type: 'module' });
|
|
65
|
+
const port = new SABMessagePort();
|
|
66
|
+
port.postInit(worker);
|
|
67
|
+
|
|
68
|
+
port.onmessage = ({ data }) => console.log(data);
|
|
69
|
+
|
|
70
|
+
// Start a long task
|
|
71
|
+
port.postMessage({ cmd: 'run', task: 'A', iterations: 5_000_000 });
|
|
72
|
+
|
|
73
|
+
// After 200ms, abort and start a different task
|
|
74
|
+
setTimeout(() => {
|
|
75
|
+
port.postMessage({ cmd: 'abort' });
|
|
76
|
+
port.postMessage({ cmd: 'run', task: 'B', iterations: 2_000_000 });
|
|
77
|
+
}, 200);
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
**worker.js** — entirely synchronous, never yields to the event loop
|
|
81
|
+
|
|
82
|
+
```javascript
|
|
83
|
+
import { SABMessagePort } from 'sab-message-port';
|
|
84
|
+
|
|
85
|
+
self.onmessage = (e) => {
|
|
86
|
+
if (e.data.type !== 'SABMessagePort') return;
|
|
87
|
+
const port = SABMessagePort.from(e.data);
|
|
88
|
+
|
|
89
|
+
while (true) {
|
|
90
|
+
const task = port.read(); // block until a task arrives
|
|
91
|
+
if (task.cmd !== 'run') continue;
|
|
92
|
+
|
|
93
|
+
for (let i = 0; i < task.iterations; i++) {
|
|
94
|
+
/* ... heavy work ... */
|
|
95
|
+
|
|
96
|
+
if (i % 500_000 === 0) {
|
|
97
|
+
port.postMessage({ task: task.task, progress: `${(i / task.iterations * 100) | 0}%` });
|
|
98
|
+
|
|
99
|
+
if (port.tryRead()?.cmd === 'abort') { // non-blocking check
|
|
100
|
+
port.postMessage({ task: task.task, aborted: true });
|
|
101
|
+
break; // → back to port.read()
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
---
|
|
110
|
+
|
|
111
|
+
## SABMessagePort
|
|
112
|
+
|
|
113
|
+
Bidirectional channel over a single `SharedArrayBuffer`. Both sides can read and write simultaneously — full duplex. API mirrors the native `MessagePort`.
|
|
114
|
+
|
|
115
|
+
All messages must be **JSON-serializable** (they go through `JSON.stringify`/`JSON.parse` internally). Message ordering is **FIFO** — messages are always delivered in the order they were sent.
|
|
116
|
+
|
|
117
|
+
### `new SABMessagePort(side = 'a', sizeKB = 256)`
|
|
118
|
+
|
|
119
|
+
Creates a new bidirectional port.
|
|
120
|
+
|
|
121
|
+
| Parameter | Default | Description |
|
|
122
|
+
|-----------|---------|-------------|
|
|
123
|
+
| `side` | `'a'` | `'a'` (initiator) or `'b'` (responder). Typically only the initiator is created directly; the responder uses `SABMessagePort.from()`. |
|
|
124
|
+
| `sizeKB` | `256` | Total buffer size in KB (split in half between the two directions), or an existing `SharedArrayBuffer`. Each direction gets `sizeKB / 2` KB of buffer space. |
|
|
125
|
+
|
|
126
|
+
```javascript
|
|
127
|
+
const port = new SABMessagePort(); // 256 KB total (128 KB per direction), side 'a'
|
|
128
|
+
const port = new SABMessagePort('a', 512); // 512 KB total (256 KB per direction)
|
|
129
|
+
|
|
130
|
+
// Or pass an existing SharedArrayBuffer
|
|
131
|
+
const sab = new SharedArrayBuffer(256 * 1024);
|
|
132
|
+
const port = new SABMessagePort('a', sab);
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
Throws if `side` is not `'a'` or `'b'`.
|
|
136
|
+
|
|
137
|
+
### `SABMessagePort.from(initMsg)`
|
|
138
|
+
|
|
139
|
+
Creates the responder side (`'b'`) from a received init message. The init message must have `type: 'SABMessagePort'` and a `buffer` property containing the `SharedArrayBuffer`.
|
|
140
|
+
|
|
141
|
+
Throws if `initMsg.type !== 'SABMessagePort'`.
|
|
142
|
+
|
|
143
|
+
```javascript
|
|
144
|
+
// Worker side
|
|
145
|
+
self.onmessage = (e) => {
|
|
146
|
+
if (e.data.type === 'SABMessagePort') {
|
|
147
|
+
const port = SABMessagePort.from(e.data);
|
|
148
|
+
// ready to send and receive
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### `port.postInit(target = null, extraProps = {})`
|
|
154
|
+
|
|
155
|
+
Sends the shared buffer to the other side via `postMessage`, or returns the arguments for manual sending.
|
|
156
|
+
|
|
157
|
+
| Parameter | Default | Description |
|
|
158
|
+
|-----------|---------|-------------|
|
|
159
|
+
| `target` | `null` | A `Worker`, `MessagePort`, or any object with a `postMessage` method. If `null`, returns the arguments instead of sending. |
|
|
160
|
+
| `extraProps` | `{}` | Additional properties merged into the init message (e.g. `{ channel: 'rpc' }`). |
|
|
161
|
+
|
|
162
|
+
When `target` is provided, calls `target.postMessage(data, transferList)` directly. When `target` is `null`, returns `[data, transferList]` — a two-element array where `data` is the init message object and `transferList` is `[sharedArrayBuffer]`.
|
|
163
|
+
|
|
164
|
+
```javascript
|
|
165
|
+
// Auto-send to worker
|
|
166
|
+
port.postInit(worker, { channel: 'rpc' });
|
|
167
|
+
|
|
168
|
+
// Manual — returns [data, transferList]
|
|
169
|
+
const [data, transfer] = port.postInit(null, { channel: 'rpc' });
|
|
170
|
+
// data = { type: 'SABMessagePort', buffer: SharedArrayBuffer, channel: 'rpc' }
|
|
171
|
+
// transfer = [SharedArrayBuffer]
|
|
172
|
+
worker.postMessage(data, transfer);
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
### `port.postMessage(msg)` → `Promise`
|
|
176
|
+
|
|
177
|
+
Queues a JSON-serializable message for sending. Returns a promise that resolves when the message has been written to the shared buffer.
|
|
178
|
+
|
|
179
|
+
Multiple `postMessage()` calls made before the writer flushes are **batched** into a single payload and sent together. The returned promise resolves when the entire batch containing that message is fully written. This means you can fire off multiple `postMessage()` calls without awaiting — they will be coalesced efficiently.
|
|
180
|
+
|
|
181
|
+
```javascript
|
|
182
|
+
// Fire-and-forget (message is queued and sent asynchronously)
|
|
183
|
+
port.postMessage({ action: 'save', data: [1, 2, 3] });
|
|
184
|
+
|
|
185
|
+
// Or await to know when it's been written to the buffer
|
|
186
|
+
await port.postMessage({ action: 'save', data: [1, 2, 3] });
|
|
187
|
+
|
|
188
|
+
// Batching: these may all be sent as one payload
|
|
189
|
+
port.postMessage({ a: 1 });
|
|
190
|
+
port.postMessage({ b: 2 });
|
|
191
|
+
port.postMessage({ c: 3 });
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
Throws if the port has been closed.
|
|
195
|
+
|
|
196
|
+
### `port.onmessage`
|
|
197
|
+
|
|
198
|
+
Event-driven reader. Mirrors the `MessagePort.onmessage` pattern. Setting a handler starts a continuous async read loop; setting `null` stops it.
|
|
199
|
+
|
|
200
|
+
The handler receives an event object with a `data` property containing the message, matching the Web API convention: `handler({ data: message })`.
|
|
201
|
+
|
|
202
|
+
```javascript
|
|
203
|
+
port.onmessage = (e) => {
|
|
204
|
+
console.log(e.data); // the received message
|
|
205
|
+
};
|
|
206
|
+
|
|
207
|
+
// Stop listening
|
|
208
|
+
port.onmessage = null;
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
**Mutual exclusion:** You cannot call `read()`, `asyncRead()`, or `tryRead()` while an `onmessage` handler is active — doing so throws an error. Set `onmessage = null` first.
|
|
212
|
+
|
|
213
|
+
**Error resilience:** If the handler throws, the error is silently caught and the message loop continues. This ensures one bad message doesn't break the entire channel.
|
|
214
|
+
|
|
215
|
+
**Re-assignment:** Assigning a new handler function replaces the current one immediately within the same loop — no gap in delivery and no duplicate loops.
|
|
216
|
+
|
|
217
|
+
### `await port.asyncRead(timeout = Infinity, maxMessages = 1)` → message | null | Array
|
|
218
|
+
|
|
219
|
+
Async read using `Atomics.waitAsync`. **Safe on the main thread.** Waits for a message or until timeout expires.
|
|
220
|
+
|
|
221
|
+
| Parameter | Default | Description |
|
|
222
|
+
|-----------|---------|-------------|
|
|
223
|
+
| `timeout` | `Infinity` | Maximum time to wait in milliseconds. `Infinity` waits forever. |
|
|
224
|
+
| `maxMessages` | `1` | Maximum number of messages to return. |
|
|
225
|
+
|
|
226
|
+
**Return value depends on `maxMessages`:**
|
|
227
|
+
|
|
228
|
+
- **`maxMessages = 1`** (default): Returns a single message, or `null` if timeout expired with no data.
|
|
229
|
+
- **`maxMessages > 1`**: Returns an **array** of messages ordered **newest-first, oldest-last**, up to `maxMessages` items. Returns an **empty array** `[]` if timeout expired with no data. You can `pop()` from the returned array to process messages in send order (FIFO).
|
|
230
|
+
|
|
231
|
+
If messages are already queued internally from a previous batch, they are returned immediately without waiting.
|
|
232
|
+
|
|
233
|
+
```javascript
|
|
234
|
+
const msg = await port.asyncRead(); // wait forever, returns single message
|
|
235
|
+
const msg = await port.asyncRead(1000); // wait up to 1s, returns message or null
|
|
236
|
+
const msgs = await port.asyncRead(1000, 5); // up to 5 messages, array (newest first, pop for FIFO)
|
|
237
|
+
const msgs = await port.asyncRead(0, 10); // non-blocking, returns whatever is available now
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
Throws if the port has been closed, or if `onmessage` is active.
|
|
241
|
+
|
|
242
|
+
### `port.read(timeout = Infinity, blocking = true, maxMessages = 1)` → message | null | Array
|
|
243
|
+
|
|
244
|
+
Synchronous read using `Atomics.wait`. **Worker threads only** — calling this on the main thread throws (`Atomics.wait` is not allowed on the main thread).
|
|
245
|
+
|
|
246
|
+
| Parameter | Default | Description |
|
|
247
|
+
|-----------|---------|-------------|
|
|
248
|
+
| `timeout` | `Infinity` | Maximum time to wait in milliseconds. Ignored when `blocking` is `false`. |
|
|
249
|
+
| `blocking` | `true` | If `true`, blocks the thread until data arrives or timeout expires. If `false`, returns immediately. |
|
|
250
|
+
| `maxMessages` | `1` | Maximum number of messages to return. |
|
|
251
|
+
|
|
252
|
+
**Return value depends on `maxMessages`:**
|
|
253
|
+
|
|
254
|
+
- **`maxMessages = 1`** (default): Returns a single message, or `null` if no data is available (timeout or non-blocking).
|
|
255
|
+
- **`maxMessages > 1`**: Returns an **array** of messages ordered **newest-first, oldest-last**, up to `maxMessages` items. Returns an **empty array** `[]` if no data is available. You can `pop()` from the returned array to process messages in send order (FIFO).
|
|
256
|
+
|
|
257
|
+
If messages are already queued internally from a previous batch, they are returned immediately without blocking.
|
|
258
|
+
|
|
259
|
+
**Timeout and multipart messages:** The timeout only applies to the initial wait for a message. Once a large (multipart) message begins arriving, the read waits indefinitely for all remaining parts to ensure the message is fully received.
|
|
260
|
+
|
|
261
|
+
```javascript
|
|
262
|
+
const msg = port.read(); // block forever until message
|
|
263
|
+
const msg = port.read(500); // block up to 500ms, null on timeout
|
|
264
|
+
const msg = port.read(0, false); // non-blocking, returns null if empty
|
|
265
|
+
const msgs = port.read(1000, true, 5); // block up to 1s, up to 5 msgs (newest first, pop for FIFO)
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
Throws if the port has been closed, or if `onmessage` is active.
|
|
269
|
+
|
|
270
|
+
### `port.tryRead(maxMessages = 1)` → message | null | Array
|
|
271
|
+
|
|
272
|
+
Non-blocking read. Equivalent to `port.read(0, false, maxMessages)`. Returns immediately with available data.
|
|
273
|
+
|
|
274
|
+
| Parameter | Default | Description |
|
|
275
|
+
|-----------|---------|-------------|
|
|
276
|
+
| `maxMessages` | `1` | Maximum number of messages to return. |
|
|
277
|
+
|
|
278
|
+
**Return value depends on `maxMessages`:**
|
|
279
|
+
|
|
280
|
+
- **`maxMessages = 1`** (default): Returns a single message, or `null`.
|
|
281
|
+
- **`maxMessages > 1`**: Returns an **array** (newest-first, oldest-last), or an **empty array** `[]`. `pop()` to process in FIFO order.
|
|
282
|
+
|
|
283
|
+
```javascript
|
|
284
|
+
const msg = port.tryRead(); // single message or null
|
|
285
|
+
const msgs = port.tryRead(10); // up to 10 messages (array, newest first) or []
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
### `port.close()`
|
|
289
|
+
|
|
290
|
+
Disposes both directions. Unblocks any waiting readers/writers by signaling disposal. After closing, all `postMessage()`, `read()`, `asyncRead()`, and `tryRead()` calls will throw. Calling `close()` multiple times is safe (subsequent calls are no-ops).
|
|
291
|
+
|
|
292
|
+
### `port.buffer` → `SharedArrayBuffer`
|
|
293
|
+
|
|
294
|
+
The underlying shared buffer.
|
|
295
|
+
|
|
296
|
+
---
|
|
297
|
+
|
|
298
|
+
## SABPipe
|
|
299
|
+
|
|
300
|
+
Unidirectional channel — one end writes, the other reads. Used internally by `SABMessagePort`, but useful on its own when you only need one-way communication.
|
|
301
|
+
|
|
302
|
+
All messages must be **JSON-serializable**. Message ordering is **FIFO**.
|
|
303
|
+
|
|
304
|
+
### `new SABPipe(role, sabOrSize = 131072, byteOffset = 0, sectionSize = null)`
|
|
305
|
+
|
|
306
|
+
| Parameter | Default | Description |
|
|
307
|
+
|-----------|---------|-------------|
|
|
308
|
+
| `role` | (required) | `'w'` (writer) or `'r'` (reader). Throws if invalid. |
|
|
309
|
+
| `sabOrSize` | `131072` | Byte size for a new buffer (128 KB), or an existing `SharedArrayBuffer`. |
|
|
310
|
+
| `byteOffset` | `0` | Starting byte offset in the SAB. |
|
|
311
|
+
| `sectionSize` | `null` | Section size in bytes. Defaults to the remaining SAB from `byteOffset`. |
|
|
312
|
+
|
|
313
|
+
The writer and reader must share the same `SharedArrayBuffer` (and same offset/section) to communicate. Role enforcement is strict: the writer can only call `postMessage()`, and the reader can only call `read()`/`asyncRead()`/`tryRead()`/`onmessage`. Calling the wrong method throws.
|
|
314
|
+
|
|
315
|
+
```javascript
|
|
316
|
+
// Writer creates the buffer
|
|
317
|
+
const writer = new SABPipe('w');
|
|
318
|
+
|
|
319
|
+
// Reader attaches to the same buffer
|
|
320
|
+
const reader = new SABPipe('r', writer.buffer);
|
|
321
|
+
```
|
|
322
|
+
|
|
323
|
+
### Writer API
|
|
324
|
+
|
|
325
|
+
#### `writer.postMessage(msg)` → `Promise`
|
|
326
|
+
|
|
327
|
+
Queues a JSON-serializable message for sending. Returns a promise that resolves when the batch is written. Multiple calls are batched — see [`SABMessagePort.postMessage`](#portpostmessagemsg--promise) for details.
|
|
328
|
+
|
|
329
|
+
```javascript
|
|
330
|
+
writer.postMessage({ hello: 'world' });
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
### Reader API
|
|
334
|
+
|
|
335
|
+
All read methods share the same return-value convention:
|
|
336
|
+
|
|
337
|
+
- **`maxMessages = 1`** (default): returns a single message or `null`.
|
|
338
|
+
- **`maxMessages > 1`**: returns an **array** of messages ordered **newest-first, oldest-last**, or an **empty array** `[]` if no data. `pop()` to process in FIFO order.
|
|
339
|
+
|
|
340
|
+
#### `reader.read(timeout = Infinity, blocking = true, maxMessages = 1)`
|
|
341
|
+
|
|
342
|
+
Synchronous read. **Worker threads only** (uses `Atomics.wait`).
|
|
343
|
+
|
|
344
|
+
| Parameter | Default | Description |
|
|
345
|
+
|-----------|---------|-------------|
|
|
346
|
+
| `timeout` | `Infinity` | Max wait time in ms. Ignored when non-blocking. |
|
|
347
|
+
| `blocking` | `true` | If `false`, returns immediately without waiting. |
|
|
348
|
+
| `maxMessages` | `1` | Max messages to return. |
|
|
349
|
+
|
|
350
|
+
Timeout only applies to the initial wait. Multipart messages (large payloads that span multiple chunks) always wait for all parts once the first part arrives.
|
|
351
|
+
|
|
352
|
+
#### `await reader.asyncRead(timeout = Infinity, maxMessages = 1)`
|
|
353
|
+
|
|
354
|
+
Async read using `Atomics.waitAsync`. **Safe on the main thread.**
|
|
355
|
+
|
|
356
|
+
| Parameter | Default | Description |
|
|
357
|
+
|-----------|---------|-------------|
|
|
358
|
+
| `timeout` | `Infinity` | Max wait time in ms. |
|
|
359
|
+
| `maxMessages` | `1` | Max messages to return. |
|
|
360
|
+
|
|
361
|
+
#### `reader.tryRead(maxMessages = 1)`
|
|
362
|
+
|
|
363
|
+
Non-blocking read. Equivalent to `reader.read(0, false, maxMessages)`. Returns immediately.
|
|
364
|
+
|
|
365
|
+
#### `reader.onmessage`
|
|
366
|
+
|
|
367
|
+
Event-driven handler. Setting a function starts a continuous async read loop; setting `null` stops it. Handler receives `{ data: message }`. See [`SABMessagePort.onmessage`](#portonmessage) for full behavior details (mutual exclusion, error resilience, re-assignment).
|
|
368
|
+
|
|
369
|
+
### Shared
|
|
370
|
+
|
|
371
|
+
#### `pipe.close()` / `pipe.destroy()`
|
|
372
|
+
|
|
373
|
+
Disposes the channel and unblocks any waiting readers/writers. After disposal, all read/write operations throw `'SABPipe disposed'`. Safe to call multiple times.
|
|
374
|
+
|
|
375
|
+
#### `pipe.isDisposed()` → `boolean`
|
|
376
|
+
|
|
377
|
+
Returns `true` if the pipe has been disposed (by either side).
|
|
378
|
+
|
|
379
|
+
---
|
|
380
|
+
|
|
381
|
+
## Message Batching
|
|
382
|
+
|
|
383
|
+
When the writer side calls `postMessage()` multiple times in quick succession (without awaiting), messages are **batched** into a single payload and sent together over the shared buffer. On the reader side, these batched messages are unpacked into an internal queue and delivered one at a time.
|
|
384
|
+
|
|
385
|
+
This means a single `read()` or `asyncRead()` call may populate the internal queue with multiple messages. Subsequent reads return immediately from the queue without waiting on the shared buffer. Use `maxMessages > 1` to retrieve multiple queued messages in one call.
|
|
386
|
+
|
|
387
|
+
```javascript
|
|
388
|
+
// Writer side: 3 messages batched into one payload
|
|
389
|
+
writer.postMessage({ id: 0 });
|
|
390
|
+
writer.postMessage({ id: 1 });
|
|
391
|
+
writer.postMessage({ id: 2 });
|
|
392
|
+
|
|
393
|
+
// Reader side: first read waits for data, gets all 3 into the queue
|
|
394
|
+
const msg0 = reader.read(); // { id: 0 } — waited for data
|
|
395
|
+
const msg1 = reader.read(); // { id: 1 } — returned immediately from queue
|
|
396
|
+
const msg2 = reader.read(); // { id: 2 } — returned immediately from queue
|
|
397
|
+
|
|
398
|
+
// Or get all at once (newest first — pop() for FIFO)
|
|
399
|
+
const msgs = reader.read(Infinity, true, 10); // [{ id: 2 }, { id: 1 }, { id: 0 }]
|
|
400
|
+
msgs.pop(); // { id: 0 } — oldest
|
|
401
|
+
msgs.pop(); // { id: 1 }
|
|
402
|
+
msgs.pop(); // { id: 2 } — newest
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
## Large Messages & Chunking
|
|
406
|
+
|
|
407
|
+
Messages larger than the buffer's data section are automatically split into chunks (multipart messages) and reassembled on the reader side. This is fully transparent — no API changes needed regardless of message size.
|
|
408
|
+
|
|
409
|
+
During a multipart read, timeout is suspended: once the first chunk arrives, the reader waits indefinitely for remaining chunks to ensure the full message is received.
|
|
410
|
+
|
|
411
|
+
The maximum single-chunk size is `bufferSize - 32 bytes` (32 bytes are reserved for control fields). For the default 128 KB pipe, that's ~131 KB per chunk.
|
|
412
|
+
|
|
413
|
+
---
|
|
414
|
+
|
|
415
|
+
## Performance
|
|
416
|
+
|
|
417
|
+
Benchmarked on a single machine, Node.js worker threads, 1000 messages (~757 KB total):
|
|
418
|
+
|
|
419
|
+
| Mode | Avg Latency | Throughput |
|
|
420
|
+
|------|-------------|------------|
|
|
421
|
+
| Blocking read (`Atomics.wait`) | ~19 us/msg | ~39 MB/s |
|
|
422
|
+
| Async read (`Atomics.waitAsync`) | ~27 us/msg | ~27 MB/s |
|
|
423
|
+
|
|
424
|
+
Sustained throughput (3 second run, ~500 byte messages):
|
|
425
|
+
|
|
426
|
+
| Mode | Messages/sec | Throughput |
|
|
427
|
+
|------|-------------|------------|
|
|
428
|
+
| Blocking read | ~59,000 msg/s | ~30 MB/s |
|
|
429
|
+
| Async read | ~50,000 msg/s | ~25 MB/s |
|
|
430
|
+
|
|
431
|
+
Blocking reads are faster because `Atomics.wait` wakes with lower latency than the async event loop. Use blocking reads in worker threads for maximum performance; use async reads on the main thread or when you need to interleave with other async work.
|
|
432
|
+
|
|
433
|
+
## Requirements
|
|
434
|
+
|
|
435
|
+
- Node.js >= 16 or any browser with `SharedArrayBuffer` support
|
|
436
|
+
- `SharedArrayBuffer` requires [cross-origin isolation](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer#security_requirements) headers in browsers
|
|
437
|
+
|
|
438
|
+
## License
|
|
439
|
+
|
|
440
|
+
[BSD-3-Clause](LICENSE)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
var p=new TextEncoder,g=new TextDecoder,R=!1,n=R?(...u)=>console.log("[SABPipe]",...u):()=>{},l=class u{static STATUS=0;static RW_SIGNAL=1;static W_DATA_LEN=2;static NUM_PARTS=3;static PART_INDEX=4;static RESERVED_1=5;static RESERVED_2=6;static RESERVED_3=7;static CONTROL_TOP=8;static DATA_OFFSET=32;static STATUS_ACTIVE=0;static STATUS_DISPOSED=-1;static RW_CAN_WRITE=0;static RW_CAN_READ=1;constructor(t,e=131072,s=0,i=null){this._sab=typeof e=="number"?new SharedArrayBuffer(e):e;let r=i??this._sab.byteLength-s;if(this.i32=new Int32Array(this._sab,s,r>>2),this.u8=new Uint8Array(this._sab,s,r),this.maxChunk=r-u.DATA_OFFSET,t==="w")this.isWriter=!0,this.isReader=!1,this._write_queue=this._create_write_queue(),this._read_queue=null,this._payload_in_progress=null,this._writing=!1;else if(t==="r")this.isWriter=!1,this.isReader=!0,this._read_queue=[],this._write_queue=null,this._reading=!1,this._onmessage=null,this._messageLoopActive=!1;else throw new Error("Invalid role parameter: must be 'r' or 'w'");this._max_chunk=r-u.DATA_OFFSET}c=this.constructor;isDisposed(){return this.i32===null||this.i32[this.c.STATUS]===this.c.STATUS_DISPOSED}_checkDisposed(){if(this.isDisposed())throw this.i32=null,this.u8=null,this._sab=null,new Error("SABPipe disposed")}destroy(){if(this.i32!==null){for(let t=0;t<this.c.CONTROL_TOP;t++)Atomics.store(this.i32,t,this.c.STATUS_DISPOSED);for(let t=0;t<this.c.CONTROL_TOP;t++)Atomics.notify(this.i32,t,1/0);this.i32=null,this.u8=null,this._sab=null}}close(){this.destroy()}_create_write_queue(){let t,e,s=new Promise((i,r)=>{t=i,e=r});return{queue:[],finishWritePromise:s,finishWriteResolveFunc:t,finishWriteRejectFunc:e}}_json_to_chunks(t){let e=p.encode(JSON.stringify(t)),s=[],i=Math.ceil(e.length/this._max_chunk)||1;for(let r=0;r<i;r++){let h=e.subarray(r*this._max_chunk,(r+1)*this._max_chunk);s.push(h)}return s}async _asyncWrite(){if(!this.isWriter)throw new Error("Only writer can write");if(this._checkDisposed(),this._writing){n("_write: already in progress, returning");return}this._writing=!0;try{if(n("_write: waiting for can write"),await this._waitForCanWrite(),n("_write: can write now"),Atomics.load(this.i32,this.c.RW_SIGNAL)===this.c.STATUS_DISPOSED&&this._checkDisposed(),this._write_queue.queue.length===0)return;this._payload_in_progress=this._write_queue,this._payload_in_progress.chunks=this._json_to_chunks(this._payload_in_progress.queue),this._payload_in_progress.currentPart=0,this._write_queue=this._create_write_queue(),this._payload_in_progress.finishWritePromise.then(()=>{this._write_queue.queue.length>0&&this._asyncWrite().catch(()=>{})});let t=this._payload_in_progress.chunks.length;for(let s=0;s<t;s++){s>0&&(await this._waitForCanWrite(),Atomics.load(this.i32,this.c.RW_SIGNAL)===this.c.STATUS_DISPOSED&&this._checkDisposed());let i=this._payload_in_progress.chunks[s];this.u8.set(i,this.c.DATA_OFFSET),this.i32[this.c.NUM_PARTS]=t,this.i32[this.c.PART_INDEX]=s,this.i32[this.c.W_DATA_LEN]=i.length,n(`_write: signaling RW_CAN_READ, part ${s}/${t}`),Atomics.store(this.i32,this.c.RW_SIGNAL,this.c.RW_CAN_READ),Atomics.notify(this.i32,this.c.RW_SIGNAL,1)}n("_write: all parts sent, resolving promise");let e=this._payload_in_progress;this._payload_in_progress=null,e.finishWriteResolveFunc()}finally{this._writing=!1}}async _waitForCanWrite(){for(;;){let t=Atomics.load(this.i32,this.c.RW_SIGNAL);if(t===this.c.RW_CAN_WRITE)return;t===this.c.STATUS_DISPOSED&&this._checkDisposed();let e=Atomics.waitAsync(this.i32,this.c.RW_SIGNAL,t);e.async&&await e.value}}_read(t=!0,e=1/0){if(!this.isReader)throw new Error("Only reader can read");this._checkDisposed(),n(`_read: starting, blocking=${t}, timeout=${e}`);let s=[],i=1,r=!0;for(;;){let a=Atomics.load(this.i32,this.c.RW_SIGNAL);if(n(`_read: signal=${a}, RW_CAN_READ=${this.c.RW_CAN_READ}`),a===this.c.STATUS_DISPOSED&&this._checkDisposed(),a!==this.c.RW_CAN_READ){if(!t)return n("_read: non-blocking, no data"),!1;let A=r?e:1/0;n(`_read: entering Atomics.wait, signal=${a}, timeout=${A}`);let w=Atomics.wait(this.i32,this.c.RW_SIGNAL,a,A);if(n(`_read: Atomics.wait returned ${w}`),w==="timed-out")return n("_read: timed out"),!1;let f=Atomics.load(this.i32,this.c.RW_SIGNAL);if(n(`_read: after wake, newSignal=${f}`),f===this.c.STATUS_DISPOSED&&this._checkDisposed(),f!==this.c.RW_CAN_READ){n("_read: spurious wakeup, retrying");continue}}let o=this.i32[this.c.W_DATA_LEN];i=this.i32[this.c.NUM_PARTS];let _=this.i32[this.c.PART_INDEX],d=this.u8.slice(this.c.DATA_OFFSET,this.c.DATA_OFFSET+o);if(s.push(d),n(`_read: got part ${_}/${i}, len=${o}, signaling RW_CAN_WRITE`),Atomics.store(this.i32,this.c.RW_SIGNAL,this.c.RW_CAN_WRITE),Atomics.notify(this.i32,this.c.RW_SIGNAL,1),r=!1,_>=i-1){n("_read: last part received");break}}let h;if(s.length===1)h=s[0];else{let a=s.reduce((_,d)=>_+d.length,0);h=new Uint8Array(a);let o=0;for(let _ of s)h.set(_,o),o+=_.length}let c=JSON.parse(g.decode(h));return c.reverse(),this._read_queue=c.concat(this._read_queue),!0}async _waitForCanRead(t=1/0){let e=t===1/0?1/0:Date.now()+t;for(;;){let s=Atomics.load(this.i32,this.c.RW_SIGNAL);if(s===this.c.RW_CAN_READ)return!0;s===this.c.STATUS_DISPOSED&&this._checkDisposed();let i=e===1/0?1/0:Math.max(0,e-Date.now());if(i===0)return!1;let r=Atomics.waitAsync(this.i32,this.c.RW_SIGNAL,s);if(r.async){if(i===1/0)await r.value;else if(await Promise.race([r.value,new Promise(c=>setTimeout(()=>c("timed-out"),i))])==="timed-out")return!1}}}async _asyncRead(t=1/0){if(!this.isReader)throw new Error("Only reader can read");if(this._checkDisposed(),this._reading)return n("_asyncRead: already in progress, returning"),!1;this._reading=!0;try{n(`_asyncRead: starting, timeout=${t}`);let e=[],s=!0;for(;;){let h=s?t:1/0;if(n(`_asyncRead: waiting for can read, timeout=${h}`),!await this._waitForCanRead(h))return n("_asyncRead: timed out or no data"),!1;Atomics.load(this.i32,this.c.RW_SIGNAL)===this.c.STATUS_DISPOSED&&this._checkDisposed();let a=this.i32[this.c.W_DATA_LEN],o=this.i32[this.c.NUM_PARTS],_=this.i32[this.c.PART_INDEX],d=this.u8.slice(this.c.DATA_OFFSET,this.c.DATA_OFFSET+a);if(e.push(d),n(`_asyncRead: got part ${_}/${o}, len=${a}, signaling RW_CAN_WRITE`),Atomics.store(this.i32,this.c.RW_SIGNAL,this.c.RW_CAN_WRITE),Atomics.notify(this.i32,this.c.RW_SIGNAL,1),s=!1,_>=o-1){n("_asyncRead: last part received");break}}let i;if(e.length===1)i=e[0];else{let h=e.reduce((a,o)=>a+o.length,0);i=new Uint8Array(h);let c=0;for(let a of e)i.set(a,c),c+=a.length}let r=JSON.parse(g.decode(i));return r.reverse(),this._read_queue=r.concat(this._read_queue),!0}finally{this._reading=!1}}postMessage(t){if(!this.isWriter)throw new Error("Only writer can write");this._checkDisposed(),this._write_queue.queue.push(t);let e=this._write_queue.finishWritePromise;return this._asyncWrite().catch(()=>{}),e}read(t=1/0,e=!0,s=1){if(!this.isReader)throw new Error("Only reader can read");if(this._onmessage!==null)throw new Error("Cannot call read while onmessage is active");return this._checkDisposed(),this._read_queue.length>0?this._popMessages(s):(this._read(e,t),this._popMessages(s))}tryRead(t=1){return this.read(0,!1,t)}async asyncRead(t=1/0,e=1){if(!this.isReader)throw new Error("Only reader can read");if(this._onmessage!==null)throw new Error("Cannot call asyncRead while onmessage is active");return this._checkDisposed(),this._read_queue.length>0?this._popMessages(e):(await this._asyncRead(t),this._popMessages(e))}set onmessage(t){if(!this.isReader)throw new Error("Only reader can set onmessage");this._onmessage=t??null,t!==null&&!this._messageLoopActive&&this._messageLoop()}get onmessage(){return this._onmessage}async _messageLoop(){if(!this._messageLoopActive){this._messageLoopActive=!0;try{for(;this._onmessage!==null;){for(;this._read_queue.length>0&&this._onmessage!==null;){let t=this._read_queue.pop();try{this._onmessage({data:t})}catch{}}if(this._onmessage===null)break;await this._asyncRead()}}catch{}finally{this._messageLoopActive=!1}}}_popMessages(t){if(t===1)return this._read_queue.length>0?this._read_queue.pop():null;{let e=Math.min(t,this._read_queue.length);return e===0?[]:this._read_queue.splice(-e)}}},y=class u{constructor(t="a",e=256){if(t!=="a"&&t!=="b")throw new Error("side must be 'a' or 'b'");this._sab=typeof e=="number"?new SharedArrayBuffer(e*1024):e;let s=this._sab.byteLength/2;t==="a"?(this._writer=new l("w",this._sab,0,s),this._reader=new l("r",this._sab,s,s)):(this._reader=new l("r",this._sab,0,s),this._writer=new l("w",this._sab,s,s))}static from(t){if(t?.type!=="SABMessagePort")throw new Error("Not a SABMessagePort init message");return new u("b",t.buffer)}postInit(t=null,e={}){let s=[{type:"SABMessagePort",buffer:this._sab,...e},[this._sab]];if(t===null)return s;t.postMessage(...s)}postMessage(t){return this._writer.postMessage(t)}set onmessage(t){this._reader.onmessage=t}get onmessage(){return this._reader.onmessage}asyncRead(t,e){return this._reader.asyncRead(t,e)}read(t,e,s){return this._reader.read(t,e,s)}tryRead(t){return this._reader.tryRead(t)}close(){this._writer.destroy(),this._reader.destroy()}get buffer(){return this._sab}};export{y as SABMessagePort,l as SABPipe};
|
package/package.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "sab-message-port",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Worker IPC over SharedArrayBuffer — blocking reads without the event loop",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"import": "./dist/SABMessagePort.min.js",
|
|
9
|
+
"default": "./dist/SABMessagePort.min.js"
|
|
10
|
+
},
|
|
11
|
+
"./src": "./src/SABMessagePort.js"
|
|
12
|
+
},
|
|
13
|
+
"main": "./dist/SABMessagePort.min.js",
|
|
14
|
+
"sideEffects": false,
|
|
15
|
+
"files": [
|
|
16
|
+
"dist",
|
|
17
|
+
"src",
|
|
18
|
+
"LICENSE",
|
|
19
|
+
"README.md"
|
|
20
|
+
],
|
|
21
|
+
"scripts": {
|
|
22
|
+
"build": "esbuild src/SABMessagePort.js --bundle --format=esm --minify --outfile=dist/SABMessagePort.min.js",
|
|
23
|
+
"test": "node test/test.mjs",
|
|
24
|
+
"prepublishOnly": "npm run build"
|
|
25
|
+
},
|
|
26
|
+
"engines": {
|
|
27
|
+
"node": ">=16.0.0"
|
|
28
|
+
},
|
|
29
|
+
"keywords": [
|
|
30
|
+
"sharedarraybuffer",
|
|
31
|
+
"atomics",
|
|
32
|
+
"worker",
|
|
33
|
+
"ipc",
|
|
34
|
+
"messageport",
|
|
35
|
+
"zero-copy",
|
|
36
|
+
"blocking",
|
|
37
|
+
"web-worker",
|
|
38
|
+
"worker-threads",
|
|
39
|
+
"shared-memory"
|
|
40
|
+
],
|
|
41
|
+
"author": "Shlomi Loubaton",
|
|
42
|
+
"license": "BSD-3-Clause",
|
|
43
|
+
"repository": {
|
|
44
|
+
"type": "git",
|
|
45
|
+
"url": "git+https://github.com/shlomil/sab-message-port.git"
|
|
46
|
+
},
|
|
47
|
+
"homepage": "https://github.com/shlomil/sab-message-port#readme",
|
|
48
|
+
"bugs": {
|
|
49
|
+
"url": "https://github.com/shlomil/sab-message-port/issues"
|
|
50
|
+
},
|
|
51
|
+
"devDependencies": {
|
|
52
|
+
"esbuild": "^0.27.3"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
@@ -0,0 +1,802 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SABPipe — High-level SharedArrayBuffer communication channel.
|
|
3
|
+
*
|
|
4
|
+
* Unified layout for all Lobo SABs (rpc_sab, evt_sab, dbg_sab):
|
|
5
|
+
*
|
|
6
|
+
* Byte offset Int32 index Field
|
|
7
|
+
* ───────────────────────────────────────────────────────────────
|
|
8
|
+
* 0 0 status (STATUS_ACTIVE=0, STATUS_DISPOSED=0xFFFFFFFF)
|
|
9
|
+
* 4 1 rw_signal (0=RW_CAN_WRITE (IDLE state, empty buffer), 1=RW_CAN_READ, STATUS_DISPOSED=0xFFFFFFFF)
|
|
10
|
+
* 8 2 w_data_len (0=no new data, else length of data)
|
|
11
|
+
* 12 3 num_parts (for multipart messages, total number of parts; 1 for single-part)
|
|
12
|
+
* 16 4 part_index
|
|
13
|
+
* 20 5 reserved1
|
|
14
|
+
* 24 6 reserved2
|
|
15
|
+
* 28 7 reserved3
|
|
16
|
+
* 32..131071 — data (JSON payload)
|
|
17
|
+
*
|
|
18
|
+
* RW protocol:
|
|
19
|
+
*
|
|
20
|
+
* Terms:
|
|
21
|
+
* - use the terms "read"/"write" for external API.
|
|
22
|
+
* - fresh-write = new message (not in the middle of multipart session) - previous write was single or last part of multipart session
|
|
23
|
+
* - fresh-read = not in the middle of multipart read session - previous read was single or last part of multipart session
|
|
24
|
+
*
|
|
25
|
+
* Writer low level _asyncWrite():
|
|
26
|
+
* - check role isWriter, check sab is not disposed, throw if so
|
|
27
|
+
* - The payload to write is the entire write_queue:
|
|
28
|
+
* - writer has access to write_queue { queue, finishWritePromise , finishWriteResolveFunc, finishWriteRejectFunc }
|
|
29
|
+
* - No timeouts, all code is async
|
|
30
|
+
* - _payload_in_progress = null is initialized in the class constructor for the use of this function.
|
|
31
|
+
* - Writer writes data (non-blocking, all actions with async):
|
|
32
|
+
* 1. if _payload_in_progress != null then return immediately (without awaiting)
|
|
33
|
+
* 2. waitAsync for rw_signal=0 (RW_CAN_WRITE) if needed
|
|
34
|
+
* 3. if rw_signal=STATUS_DISPOSED, throw error
|
|
35
|
+
* 4. if write_queue is empty, return immediately (nothing to write)
|
|
36
|
+
* 5. if fresh-write then
|
|
37
|
+
* - move write_queue to _payload_in_progress, break it to parts and create new empty write_queue
|
|
38
|
+
* - chain the writing of payloads
|
|
39
|
+
* _payload_in_progress.finishWritePromise.then(() => {if _write_queue.queue.length > 0) _asyncWrite()})
|
|
40
|
+
* so that next payload is written immediately after current one finishes
|
|
41
|
+
* 6. else: select next part of _payload_in_progress to be written
|
|
42
|
+
* 7. write data (JSON stringified payload or parts of it)
|
|
43
|
+
* 8. num_parts=1 (1 for single or >1 for multipart)
|
|
44
|
+
* 9. part_index (0..num_parts-1 for multipart, always 0 for single part)
|
|
45
|
+
* 10. w_data_len (length of data, written to data field)
|
|
46
|
+
* 11. rw_signal=1 (W_CAN_READ)
|
|
47
|
+
* 12. notify rw_signal
|
|
48
|
+
* - 13. if multipart: - writer can now wait (waitAsync only) for rw_signal=0 (RW_CAN_WRITE) before writing next part.
|
|
49
|
+
* - go to step 2 for next part until all parts written
|
|
50
|
+
* 14. tmp = _payload_in_progress
|
|
51
|
+
* 15. _payload_in_progress = null
|
|
52
|
+
* 16. resolve tmp.finishWritePromise resolve only after payload is cleared so new _write invocation
|
|
53
|
+
* don't chain the current payload anymore
|
|
54
|
+
*
|
|
55
|
+
* Reader: (low level _read or _tryRead):
|
|
56
|
+
* - Reader reads data (try Read or blocking Read):
|
|
57
|
+
* 1. wait for rw_signal=1 (RW_CAN_READ) or just check value and continue for non-blocking
|
|
58
|
+
* 2. if rw_signal=STATUS_DISPOSED, throw error
|
|
59
|
+
* 3. read w_data_len to get length of data
|
|
60
|
+
* 4. read data (JSON string) and parse
|
|
61
|
+
* 5. read num_parts and part_index for multipart handling - message reconstruction if needed
|
|
62
|
+
* 6. set rw_signal=0 (RW_CAN_WRITE) to indicate buffer is consumed and ready for next message
|
|
63
|
+
* 7. notify rw_signal
|
|
64
|
+
* 8. if multipart and part_index<num_parts-1, goto [step 1] and repeat for next part until all parts read
|
|
65
|
+
* 9. if multipart, after last part is read reconstruct full payload
|
|
66
|
+
* 10. parse the payload , reverse it, append the current read_queue to it and set it as the new read_queue.
|
|
67
|
+
* - Timeout Support: in blocking-read, reader can specify timeout for waiting on rw_signal in [step 1] iff not in the
|
|
68
|
+
* middle of multipart session (e.g. previous read was a single part or last part in a multipart
|
|
69
|
+
* session). During multipart session, reader waits indefinitely for each part to ensure full message
|
|
70
|
+
* is received.
|
|
71
|
+
*
|
|
72
|
+
*
|
|
73
|
+
*
|
|
74
|
+
* Reader/Writer Disposal: The dispose() method will set rw_signal to STATUS_DISPOSED and notify all waiters, ensuring that they are
|
|
75
|
+
* unblocked and can check the disposed state.
|
|
76
|
+
* After being woken up from waitAsync or wait, reader or writer should check if rw_signal
|
|
77
|
+
* is STATUS_DISPOSED and throw error if so. This ensures that any thread waiting for a message or
|
|
78
|
+
* waiting for buffer availability will be properly notified of disposal and can handle it gracefully.
|
|
79
|
+
*
|
|
80
|
+
* Higher level buffered/Queues read/write methods:
|
|
81
|
+
*
|
|
82
|
+
*
|
|
83
|
+
* async postMessage(jsonMessage) :
|
|
84
|
+
* // write a single message
|
|
85
|
+
* - queue (push) the jsonMessage into write_queue
|
|
86
|
+
* - call low level _asyncWrite() to write the entire queue without await (so main thread can call postMessage multiple times without awaiting)
|
|
87
|
+
* - return write_queue.finishWritePromise that resolves when the message is fully sent
|
|
88
|
+
*
|
|
89
|
+
*
|
|
90
|
+
* read(timeout, blocking = true, max_num_messages=1) -> jsonMessage:
|
|
91
|
+
* // reads (pop) a single message from read queue, or multiple messages if available and requested, up to max_num_messages
|
|
92
|
+
* // if blocking is false, timeout is ignored and method returns immediately with available messages up to max_num_messages or null if no messages available
|
|
93
|
+
* // if max_num_messages!=1 returns an array of messages, or empty array if no messages available,
|
|
94
|
+
* // otherwise returns a single message or null if no messages available
|
|
95
|
+
* // the read_queue is ordered newest first, poping from the end of the array returns the oldest message.
|
|
96
|
+
* // if array is returned, it should take a slice off the end of the read_queue up to max_num_messages and return it, removing those messages from the read_queue
|
|
97
|
+
* - if there are messages in read_queue, return the next max_num_messages immediately (non-blocking)
|
|
98
|
+
* - else in not blocking and rw_signal=RW_CAN_READ call _read() to read the message and return the first max_num_messages messages in the read_queue
|
|
99
|
+
* - else if blocking call _read(timeout) to read the messages and return the first max_num_messages message in the read_queue
|
|
100
|
+
*
|
|
101
|
+
* Implementation notes:
|
|
102
|
+
* - private methods and values are prefixed with _ to indicate they are not part of the public API.
|
|
103
|
+
* - if possible, avoid exposing word indices and bit flags in the public API, use higher level abstractions instead.
|
|
104
|
+
*
|
|
105
|
+
*/
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
const _encoder = new TextEncoder();
|
|
109
|
+
const _decoder = new TextDecoder();
|
|
110
|
+
|
|
111
|
+
// Debug logging - set to true to enable
|
|
112
|
+
const MYLOG_ON = false;
|
|
113
|
+
const mylog = MYLOG_ON ? (...args) => console.log('[SABPipe]', ...args) : () => {};
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
// ════════════════════════════════════════════════════════════════
|
|
119
|
+
// SABPipe — Core channel implementation
|
|
120
|
+
// ════════════════════════════════════════════════════════════════
|
|
121
|
+
|
|
122
|
+
export class SABPipe {
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
static STATUS = 0;
|
|
126
|
+
static RW_SIGNAL = 1;
|
|
127
|
+
static W_DATA_LEN = 2;
|
|
128
|
+
static NUM_PARTS = 3;
|
|
129
|
+
static PART_INDEX = 4;
|
|
130
|
+
static RESERVED_1 = 5;
|
|
131
|
+
static RESERVED_2 = 6;
|
|
132
|
+
static RESERVED_3 = 7;
|
|
133
|
+
static CONTROL_TOP = 8; // number of Int32 control fields (status, flags, metadata)
|
|
134
|
+
static DATA_OFFSET = 8 * 4; // byte offset where data starts (after control fields)
|
|
135
|
+
|
|
136
|
+
static STATUS_ACTIVE = 0;
|
|
137
|
+
static STATUS_DISPOSED = -1; // 0xFFFFFFFF as signed int32
|
|
138
|
+
|
|
139
|
+
static RW_CAN_WRITE = 0;
|
|
140
|
+
static RW_CAN_READ = 1;
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
constructor(role, sabOrSize = 131072, byteOffset = 0, sectionSize = null) { // 128 KB default
|
|
144
|
+
this._sab = typeof sabOrSize === 'number'
|
|
145
|
+
? new SharedArrayBuffer(sabOrSize)
|
|
146
|
+
: sabOrSize;
|
|
147
|
+
const size = sectionSize ?? (this._sab.byteLength - byteOffset);
|
|
148
|
+
this.i32 = new Int32Array(this._sab, byteOffset, size >> 2);
|
|
149
|
+
this.u8 = new Uint8Array(this._sab, byteOffset, size);
|
|
150
|
+
this.maxChunk = size - SABPipe.DATA_OFFSET;
|
|
151
|
+
if (role === 'w') {
|
|
152
|
+
this.isWriter = true;
|
|
153
|
+
this.isReader = false;
|
|
154
|
+
this._write_queue = this._create_write_queue();
|
|
155
|
+
this._read_queue = null;
|
|
156
|
+
this._payload_in_progress = null;
|
|
157
|
+
this._writing = false;
|
|
158
|
+
} else if (role === 'r') {
|
|
159
|
+
this.isWriter = false;
|
|
160
|
+
this.isReader = true;
|
|
161
|
+
this._read_queue = [];
|
|
162
|
+
this._write_queue = null;
|
|
163
|
+
this._reading = false;
|
|
164
|
+
this._onmessage = null;
|
|
165
|
+
this._messageLoopActive = false;
|
|
166
|
+
} else throw new Error("Invalid role parameter: must be 'r' or 'w'");
|
|
167
|
+
this._max_chunk = size - SABPipe.DATA_OFFSET;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
c = this.constructor; // for easier access to static fields
|
|
171
|
+
|
|
172
|
+
isDisposed() {
|
|
173
|
+
return this.i32 === null || this.i32[this.c.STATUS] === this.c.STATUS_DISPOSED;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
_checkDisposed() {
|
|
177
|
+
if (this.isDisposed()) {
|
|
178
|
+
this.i32 = null;
|
|
179
|
+
this.u8 = null;
|
|
180
|
+
this._sab = null;
|
|
181
|
+
throw new Error('SABPipe disposed');
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
destroy() {
|
|
186
|
+
if (this.i32 === null) return; // already destroyed
|
|
187
|
+
|
|
188
|
+
//wake every possible waiting thread (reader or writer) to unblock them on dispose
|
|
189
|
+
for (let i = 0; i < this.c.CONTROL_TOP; i++) {
|
|
190
|
+
Atomics.store(this.i32, i, this.c.STATUS_DISPOSED);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
for (let i = 0; i < this.c.CONTROL_TOP; i++) {
|
|
194
|
+
Atomics.notify(this.i32, i, Infinity);
|
|
195
|
+
}
|
|
196
|
+
this.i32 = null;
|
|
197
|
+
this.u8 = null;
|
|
198
|
+
this._sab = null;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
close() {
|
|
202
|
+
this.destroy();
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
_create_write_queue() {
|
|
206
|
+
let resolveFunc, rejectFunc;
|
|
207
|
+
const finishWritePromise = new Promise((resolve, reject) => {
|
|
208
|
+
resolveFunc = resolve;
|
|
209
|
+
rejectFunc = reject;
|
|
210
|
+
});
|
|
211
|
+
return { queue: [], finishWritePromise, finishWriteResolveFunc: resolveFunc, finishWriteRejectFunc: rejectFunc };
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
_json_to_chunks(payload) {
|
|
215
|
+
const bytes = _encoder.encode(JSON.stringify(payload));
|
|
216
|
+
let chunks = [];
|
|
217
|
+
const numParts = Math.ceil(bytes.length / this._max_chunk) || 1;
|
|
218
|
+
for (let i = 0; i < numParts; i++) {
|
|
219
|
+
const chunk = bytes.subarray(i * this._max_chunk, (i + 1) * this._max_chunk);
|
|
220
|
+
chunks.push(chunk);
|
|
221
|
+
}
|
|
222
|
+
return chunks;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
/**
|
|
227
|
+
* Low-level write implementation. Sends entire write_queue as a single payload.
|
|
228
|
+
* Non-blocking (uses waitAsync only).
|
|
229
|
+
*/
|
|
230
|
+
async _asyncWrite() {
|
|
231
|
+
if (!this.isWriter) throw new Error('Only writer can write');
|
|
232
|
+
this._checkDisposed();
|
|
233
|
+
|
|
234
|
+
// Step 1: If already writing, return immediately (chaining handles continuation)
|
|
235
|
+
if (this._writing) {
|
|
236
|
+
mylog('_write: already in progress, returning');
|
|
237
|
+
return;
|
|
238
|
+
}
|
|
239
|
+
this._writing = true;
|
|
240
|
+
|
|
241
|
+
try {
|
|
242
|
+
// Step 2: Wait for buffer to be available
|
|
243
|
+
mylog('_write: waiting for can write');
|
|
244
|
+
await this._waitForCanWrite();
|
|
245
|
+
mylog('_write: can write now');
|
|
246
|
+
|
|
247
|
+
// Step 3: Check disposed after waking
|
|
248
|
+
if (Atomics.load(this.i32, this.c.RW_SIGNAL) === this.c.STATUS_DISPOSED) {
|
|
249
|
+
this._checkDisposed();
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Step 4: If queue is empty, nothing to write
|
|
253
|
+
if (this._write_queue.queue.length === 0) {
|
|
254
|
+
return;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
// Step 5: Fresh write - move queue to _payload_in_progress
|
|
258
|
+
this._payload_in_progress = this._write_queue;
|
|
259
|
+
this._payload_in_progress.chunks = this._json_to_chunks(this._payload_in_progress.queue);
|
|
260
|
+
this._payload_in_progress.currentPart = 0;
|
|
261
|
+
|
|
262
|
+
// Create new write_queue for incoming messages during send
|
|
263
|
+
this._write_queue = this._create_write_queue();
|
|
264
|
+
|
|
265
|
+
// Chain: when this payload finishes, send next batch if any
|
|
266
|
+
this._payload_in_progress.finishWritePromise.then(() => {
|
|
267
|
+
if (this._write_queue.queue.length > 0) {
|
|
268
|
+
this._asyncWrite().catch(() => {}); // catch to prevent unhandled rejection on disposal
|
|
269
|
+
}
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
// Send all parts
|
|
273
|
+
const numParts = this._payload_in_progress.chunks.length;
|
|
274
|
+
|
|
275
|
+
for (let partIndex = 0; partIndex < numParts; partIndex++) {
|
|
276
|
+
// For parts after the first, wait for reader to consume previous part
|
|
277
|
+
if (partIndex > 0) {
|
|
278
|
+
await this._waitForCanWrite();
|
|
279
|
+
|
|
280
|
+
// Check disposed after waking
|
|
281
|
+
if (Atomics.load(this.i32, this.c.RW_SIGNAL) === this.c.STATUS_DISPOSED) {
|
|
282
|
+
this._checkDisposed();
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// Step 7: Write data chunk
|
|
287
|
+
const chunk = this._payload_in_progress.chunks[partIndex];
|
|
288
|
+
this.u8.set(chunk, this.c.DATA_OFFSET);
|
|
289
|
+
|
|
290
|
+
// Steps 8-10: Write metadata (regular writes - faster than Atomics)
|
|
291
|
+
this.i32[this.c.NUM_PARTS] = numParts;
|
|
292
|
+
this.i32[this.c.PART_INDEX] = partIndex;
|
|
293
|
+
this.i32[this.c.W_DATA_LEN] = chunk.length;
|
|
294
|
+
|
|
295
|
+
// Steps 11-12: Signal reader and notify
|
|
296
|
+
// Atomics.store provides release semantics - ensures all above writes are visible
|
|
297
|
+
mylog(`_write: signaling RW_CAN_READ, part ${partIndex}/${numParts}`);
|
|
298
|
+
Atomics.store(this.i32, this.c.RW_SIGNAL, this.c.RW_CAN_READ);
|
|
299
|
+
Atomics.notify(this.i32, this.c.RW_SIGNAL, 1);
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
// Steps 14-16: Clear payload and resolve promise
|
|
303
|
+
mylog('_write: all parts sent, resolving promise');
|
|
304
|
+
const tmp = this._payload_in_progress;
|
|
305
|
+
this._payload_in_progress = null;
|
|
306
|
+
tmp.finishWriteResolveFunc();
|
|
307
|
+
} finally {
|
|
308
|
+
this._writing = false;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* Wait for rw_signal to become RW_CAN_WRITE (0).
|
|
314
|
+
* Uses Atomics.waitAsync for non-blocking async wait.
|
|
315
|
+
*/
|
|
316
|
+
async _waitForCanWrite() {
|
|
317
|
+
while (true) {
|
|
318
|
+
const signal = Atomics.load(this.i32, this.c.RW_SIGNAL);
|
|
319
|
+
|
|
320
|
+
if (signal === this.c.RW_CAN_WRITE) {
|
|
321
|
+
return; // Buffer is available
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
if (signal === this.c.STATUS_DISPOSED) {
|
|
325
|
+
this._checkDisposed();
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Wait for signal to change
|
|
329
|
+
const result = Atomics.waitAsync(this.i32, this.c.RW_SIGNAL, signal);
|
|
330
|
+
if (result.async) {
|
|
331
|
+
await result.value;
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
/**
|
|
337
|
+
* Low-level read implementation. Reads from SAB and populates read_queue.
|
|
338
|
+
* @param {boolean} blocking - If true, uses Atomics.wait (worker thread only)
|
|
339
|
+
* @param {number} timeout - Timeout in ms (only for fresh-read, not mid-multipart)
|
|
340
|
+
* @returns {boolean} - true if data was read, false if timeout/no data (non-blocking)
|
|
341
|
+
*/
|
|
342
|
+
_read(blocking = true, timeout = Infinity) {
|
|
343
|
+
if (!this.isReader) throw new Error('Only reader can read');
|
|
344
|
+
this._checkDisposed();
|
|
345
|
+
|
|
346
|
+
mylog(`_read: starting, blocking=${blocking}, timeout=${timeout}`);
|
|
347
|
+
|
|
348
|
+
const chunks = [];
|
|
349
|
+
let numParts = 1;
|
|
350
|
+
let isFirstPart = true;
|
|
351
|
+
|
|
352
|
+
// Read all parts (single loop iteration for single-part messages)
|
|
353
|
+
while (true) {
|
|
354
|
+
// Step 1: Wait for data to be available
|
|
355
|
+
const signal = Atomics.load(this.i32, this.c.RW_SIGNAL);
|
|
356
|
+
mylog(`_read: signal=${signal}, RW_CAN_READ=${this.c.RW_CAN_READ}`);
|
|
357
|
+
|
|
358
|
+
if (signal === this.c.STATUS_DISPOSED) {
|
|
359
|
+
this._checkDisposed();
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
if (signal !== this.c.RW_CAN_READ) {
|
|
363
|
+
if (!blocking) {
|
|
364
|
+
mylog('_read: non-blocking, no data');
|
|
365
|
+
return false;
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// Blocking: wait for signal
|
|
369
|
+
// Only use timeout on first part (fresh-read)
|
|
370
|
+
const waitTimeout = isFirstPart ? timeout : Infinity;
|
|
371
|
+
mylog(`_read: entering Atomics.wait, signal=${signal}, timeout=${waitTimeout}`);
|
|
372
|
+
const result = Atomics.wait(this.i32, this.c.RW_SIGNAL, signal, waitTimeout);
|
|
373
|
+
mylog(`_read: Atomics.wait returned ${result}`);
|
|
374
|
+
|
|
375
|
+
if (result === 'timed-out') {
|
|
376
|
+
mylog('_read: timed out');
|
|
377
|
+
return false;
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
// Re-check after waking
|
|
381
|
+
const newSignal = Atomics.load(this.i32, this.c.RW_SIGNAL);
|
|
382
|
+
mylog(`_read: after wake, newSignal=${newSignal}`);
|
|
383
|
+
if (newSignal === this.c.STATUS_DISPOSED) {
|
|
384
|
+
this._checkDisposed();
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
if (newSignal !== this.c.RW_CAN_READ) {
|
|
388
|
+
mylog('_read: spurious wakeup, retrying');
|
|
389
|
+
continue; // Spurious wakeup, retry
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Steps 3-5: Read metadata (regular reads - safe after Atomics.wait acquire)
|
|
394
|
+
const dataLen = this.i32[this.c.W_DATA_LEN];
|
|
395
|
+
numParts = this.i32[this.c.NUM_PARTS];
|
|
396
|
+
const partIndex = this.i32[this.c.PART_INDEX];
|
|
397
|
+
|
|
398
|
+
// Step 4: Read data chunk (copy to avoid issues when buffer is reused)
|
|
399
|
+
const chunk = this.u8.slice(this.c.DATA_OFFSET, this.c.DATA_OFFSET + dataLen);
|
|
400
|
+
chunks.push(chunk);
|
|
401
|
+
|
|
402
|
+
// Steps 6-7: Signal writer and notify
|
|
403
|
+
// Atomics.store provides release semantics
|
|
404
|
+
mylog(`_read: got part ${partIndex}/${numParts}, len=${dataLen}, signaling RW_CAN_WRITE`);
|
|
405
|
+
Atomics.store(this.i32, this.c.RW_SIGNAL, this.c.RW_CAN_WRITE);
|
|
406
|
+
Atomics.notify(this.i32, this.c.RW_SIGNAL, 1);
|
|
407
|
+
|
|
408
|
+
isFirstPart = false;
|
|
409
|
+
|
|
410
|
+
// Step 8: Check if more parts to read
|
|
411
|
+
if (partIndex >= numParts - 1) {
|
|
412
|
+
mylog('_read: last part received');
|
|
413
|
+
break; // Last part received
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// Continue loop for next part (no timeout - must complete multipart)
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
// Step 9: Reconstruct payload if multipart
|
|
420
|
+
let payloadBytes;
|
|
421
|
+
if (chunks.length === 1) {
|
|
422
|
+
payloadBytes = chunks[0];
|
|
423
|
+
} else {
|
|
424
|
+
// Combine all chunks
|
|
425
|
+
const totalLen = chunks.reduce((sum, c) => sum + c.length, 0);
|
|
426
|
+
payloadBytes = new Uint8Array(totalLen);
|
|
427
|
+
let offset = 0;
|
|
428
|
+
for (const chunk of chunks) {
|
|
429
|
+
payloadBytes.set(chunk, offset);
|
|
430
|
+
offset += chunk.length;
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
// Step 10: Parse payload, reverse, prepend to read_queue
|
|
435
|
+
const messages = JSON.parse(_decoder.decode(payloadBytes));
|
|
436
|
+
|
|
437
|
+
// messages is the queue array sent by writer
|
|
438
|
+
// Reverse so oldest is at the end (for FIFO pop)
|
|
439
|
+
messages.reverse();
|
|
440
|
+
|
|
441
|
+
// Prepend to read_queue (new messages at front, oldest at end)
|
|
442
|
+
this._read_queue = messages.concat(this._read_queue);
|
|
443
|
+
|
|
444
|
+
return true;
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
/**
|
|
448
|
+
* Wait for rw_signal to become RW_CAN_READ (1).
|
|
449
|
+
* Uses Atomics.waitAsync for non-blocking async wait.
|
|
450
|
+
* @param {number} timeout - Timeout in ms (Infinity = wait forever)
|
|
451
|
+
* @returns {Promise<boolean>} - true if data available, false if timed out
|
|
452
|
+
*/
|
|
453
|
+
async _waitForCanRead(timeout = Infinity) {
|
|
454
|
+
const deadline = timeout === Infinity ? Infinity : Date.now() + timeout;
|
|
455
|
+
while (true) {
|
|
456
|
+
const signal = Atomics.load(this.i32, this.c.RW_SIGNAL);
|
|
457
|
+
|
|
458
|
+
if (signal === this.c.RW_CAN_READ) {
|
|
459
|
+
return true; // Data is available
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
if (signal === this.c.STATUS_DISPOSED) {
|
|
463
|
+
this._checkDisposed();
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// Calculate remaining time for this wait iteration
|
|
467
|
+
const remaining = deadline === Infinity ? Infinity : Math.max(0, deadline - Date.now());
|
|
468
|
+
if (remaining === 0) return false; // timed out
|
|
469
|
+
|
|
470
|
+
// Wait for signal to change
|
|
471
|
+
// Note: Atomics.waitAsync's timeout doesn't keep the Node.js event loop alive,
|
|
472
|
+
// so we use Promise.race with setTimeout for finite timeouts.
|
|
473
|
+
const result = Atomics.waitAsync(this.i32, this.c.RW_SIGNAL, signal);
|
|
474
|
+
if (result.async) {
|
|
475
|
+
if (remaining === Infinity) {
|
|
476
|
+
await result.value;
|
|
477
|
+
} else {
|
|
478
|
+
const outcome = await Promise.race([
|
|
479
|
+
result.value,
|
|
480
|
+
new Promise(r => setTimeout(() => r('timed-out'), remaining))
|
|
481
|
+
]);
|
|
482
|
+
if (outcome === 'timed-out') return false;
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
// loop: re-check signal (handles spurious wakeups and value changes)
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
/**
|
|
490
|
+
* Low-level async read implementation. Reads from SAB and populates read_queue.
|
|
491
|
+
* Uses waitAsync — safe for main thread. Mirrors _read() logic.
|
|
492
|
+
* @param {number} timeout - Timeout in ms (only for fresh-read, not mid-multipart)
|
|
493
|
+
* @returns {Promise<boolean>} - true if data was read, false if timeout/no data
|
|
494
|
+
*/
|
|
495
|
+
async _asyncRead(timeout = Infinity) {
|
|
496
|
+
if (!this.isReader) throw new Error('Only reader can read');
|
|
497
|
+
this._checkDisposed();
|
|
498
|
+
|
|
499
|
+
// Guard: prevent concurrent async reads from interleaving
|
|
500
|
+
if (this._reading) {
|
|
501
|
+
mylog('_asyncRead: already in progress, returning');
|
|
502
|
+
return false;
|
|
503
|
+
}
|
|
504
|
+
this._reading = true;
|
|
505
|
+
|
|
506
|
+
try {
|
|
507
|
+
mylog(`_asyncRead: starting, timeout=${timeout}`);
|
|
508
|
+
|
|
509
|
+
const chunks = [];
|
|
510
|
+
let isFirstPart = true;
|
|
511
|
+
|
|
512
|
+
// Read all parts (single loop iteration for single-part messages)
|
|
513
|
+
while (true) {
|
|
514
|
+
// Step 1: Wait for data to be available
|
|
515
|
+
// Only use timeout on first part (fresh-read)
|
|
516
|
+
const waitTimeout = isFirstPart ? timeout : Infinity;
|
|
517
|
+
mylog(`_asyncRead: waiting for can read, timeout=${waitTimeout}`);
|
|
518
|
+
const ready = await this._waitForCanRead(waitTimeout);
|
|
519
|
+
|
|
520
|
+
if (!ready) {
|
|
521
|
+
mylog('_asyncRead: timed out or no data');
|
|
522
|
+
return false;
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
// Step 2: Check disposed after waking
|
|
526
|
+
if (Atomics.load(this.i32, this.c.RW_SIGNAL) === this.c.STATUS_DISPOSED) {
|
|
527
|
+
this._checkDisposed();
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// Steps 3-5: Read metadata
|
|
531
|
+
const dataLen = this.i32[this.c.W_DATA_LEN];
|
|
532
|
+
const numParts = this.i32[this.c.NUM_PARTS];
|
|
533
|
+
const partIndex = this.i32[this.c.PART_INDEX];
|
|
534
|
+
|
|
535
|
+
// Step 6: Read data chunk (copy to avoid issues when buffer is reused)
|
|
536
|
+
const chunk = this.u8.slice(this.c.DATA_OFFSET, this.c.DATA_OFFSET + dataLen);
|
|
537
|
+
chunks.push(chunk);
|
|
538
|
+
|
|
539
|
+
// Steps 7-8: Signal writer and notify
|
|
540
|
+
mylog(`_asyncRead: got part ${partIndex}/${numParts}, len=${dataLen}, signaling RW_CAN_WRITE`);
|
|
541
|
+
Atomics.store(this.i32, this.c.RW_SIGNAL, this.c.RW_CAN_WRITE);
|
|
542
|
+
Atomics.notify(this.i32, this.c.RW_SIGNAL, 1);
|
|
543
|
+
|
|
544
|
+
isFirstPart = false;
|
|
545
|
+
|
|
546
|
+
// Step 9: Check if more parts to read
|
|
547
|
+
if (partIndex >= numParts - 1) {
|
|
548
|
+
mylog('_asyncRead: last part received');
|
|
549
|
+
break; // Last part received
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
// Continue loop for next part (no timeout - must complete multipart)
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
// Step 10: Reconstruct payload if multipart
|
|
556
|
+
let payloadBytes;
|
|
557
|
+
if (chunks.length === 1) {
|
|
558
|
+
payloadBytes = chunks[0];
|
|
559
|
+
} else {
|
|
560
|
+
const totalLen = chunks.reduce((sum, c) => sum + c.length, 0);
|
|
561
|
+
payloadBytes = new Uint8Array(totalLen);
|
|
562
|
+
let offset = 0;
|
|
563
|
+
for (const chunk of chunks) {
|
|
564
|
+
payloadBytes.set(chunk, offset);
|
|
565
|
+
offset += chunk.length;
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
// Step 11: Parse payload, reverse, prepend to read_queue
|
|
570
|
+
const messages = JSON.parse(_decoder.decode(payloadBytes));
|
|
571
|
+
messages.reverse();
|
|
572
|
+
this._read_queue = messages.concat(this._read_queue);
|
|
573
|
+
|
|
574
|
+
return true;
|
|
575
|
+
} finally {
|
|
576
|
+
this._reading = false;
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
// ════════════════════════════════════════════════════════════════
|
|
581
|
+
// High-level API
|
|
582
|
+
// ════════════════════════════════════════════════════════════════
|
|
583
|
+
|
|
584
|
+
/**
|
|
585
|
+
* Write a message to the channel.
|
|
586
|
+
* Non-blocking - queues message and returns promise that resolves when sent.
|
|
587
|
+
* @param {*} jsonMessage - JSON-serializable message
|
|
588
|
+
* @returns {Promise} - Resolves when the message batch is fully written
|
|
589
|
+
*/
|
|
590
|
+
postMessage(jsonMessage) {
|
|
591
|
+
if (!this.isWriter) throw new Error('Only writer can write');
|
|
592
|
+
this._checkDisposed();
|
|
593
|
+
|
|
594
|
+
// Push message to queue
|
|
595
|
+
this._write_queue.queue.push(jsonMessage);
|
|
596
|
+
|
|
597
|
+
// Save promise BEFORE _asyncWrite() might replace _write_queue
|
|
598
|
+
const promise = this._write_queue.finishWritePromise;
|
|
599
|
+
|
|
600
|
+
// Trigger send (no await - fire and forget; catch to prevent unhandled rejection on disposal)
|
|
601
|
+
this._asyncWrite().catch(() => {});
|
|
602
|
+
|
|
603
|
+
// Return promise that resolves when this batch is sent
|
|
604
|
+
return promise;
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
/**
|
|
608
|
+
* Read message(s) from the channel.
|
|
609
|
+
* @param {number} timeout - Timeout in ms (ignored if non-blocking)
|
|
610
|
+
* @param {boolean} blocking - If true, blocks until data available (worker thread only)
|
|
611
|
+
* @param {number} max_num_messages - Maximum messages to return
|
|
612
|
+
* @returns {*|null|Array} - Single message/null (max=1) or array of messages (max>1)
|
|
613
|
+
*/
|
|
614
|
+
read(timeout = Infinity, blocking = true, max_num_messages = 1) {
|
|
615
|
+
if (!this.isReader) throw new Error('Only reader can read');
|
|
616
|
+
if (this._onmessage !== null) throw new Error('Cannot call read while onmessage is active');
|
|
617
|
+
this._checkDisposed();
|
|
618
|
+
|
|
619
|
+
// If queue has messages, return immediately
|
|
620
|
+
if (this._read_queue.length > 0) {
|
|
621
|
+
return this._popMessages(max_num_messages);
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
// Try to read from SAB
|
|
625
|
+
this._read(blocking, timeout);
|
|
626
|
+
|
|
627
|
+
// Return from queue (may be empty if timeout or no data)
|
|
628
|
+
return this._popMessages(max_num_messages);
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
/**
|
|
632
|
+
* Non-blocking read - returns immediately with available messages.
|
|
633
|
+
* @param {number} max_num_messages - Maximum messages to return
|
|
634
|
+
* @returns {*|null|Array} - Single message/null (max=1) or array of messages (max>1)
|
|
635
|
+
*/
|
|
636
|
+
tryRead(max_num_messages = 1) {
|
|
637
|
+
return this.read(0, false, max_num_messages);
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
/**
|
|
641
|
+
* Async read message(s) from the channel. Main-thread safe.
|
|
642
|
+
* @param {number} timeout - Timeout in ms
|
|
643
|
+
* @param {number} max_num_messages - Maximum messages to return
|
|
644
|
+
* @returns {Promise<*|null|Array>} - Single message/null (max=1) or array of messages (max>1)
|
|
645
|
+
*/
|
|
646
|
+
async asyncRead(timeout = Infinity, max_num_messages = 1) {
|
|
647
|
+
if (!this.isReader) throw new Error('Only reader can read');
|
|
648
|
+
if (this._onmessage !== null) throw new Error('Cannot call asyncRead while onmessage is active');
|
|
649
|
+
this._checkDisposed();
|
|
650
|
+
|
|
651
|
+
// If queue has messages, return immediately
|
|
652
|
+
if (this._read_queue.length > 0) {
|
|
653
|
+
return this._popMessages(max_num_messages);
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
// Async read from SAB
|
|
657
|
+
await this._asyncRead(timeout);
|
|
658
|
+
|
|
659
|
+
// Return from queue (may be empty if timeout or no data)
|
|
660
|
+
return this._popMessages(max_num_messages);
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
/**
|
|
664
|
+
* Event-driven message handler. Mirrors the Web API MessagePort.onmessage pattern.
|
|
665
|
+
* Setting a handler starts a continuous async read loop; setting null stops it.
|
|
666
|
+
* @param {Function|null} handler - handler(message) called for each received message
|
|
667
|
+
*/
|
|
668
|
+
set onmessage(handler) {
|
|
669
|
+
if (!this.isReader) throw new Error('Only reader can set onmessage');
|
|
670
|
+
this._onmessage = handler ?? null;
|
|
671
|
+
|
|
672
|
+
// Start loop if handler set and loop not already running
|
|
673
|
+
if (handler !== null && !this._messageLoopActive) {
|
|
674
|
+
this._messageLoop(); // fire-and-forget (no await)
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
get onmessage() {
|
|
679
|
+
return this._onmessage;
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
/**
|
|
683
|
+
* Internal message loop. Continuously reads messages and dispatches to onmessage handler.
|
|
684
|
+
* Runs until onmessage is set to null or channel is disposed.
|
|
685
|
+
*/
|
|
686
|
+
async _messageLoop() {
|
|
687
|
+
if (this._messageLoopActive) return;
|
|
688
|
+
this._messageLoopActive = true;
|
|
689
|
+
|
|
690
|
+
try {
|
|
691
|
+
while (this._onmessage !== null) {
|
|
692
|
+
// 1. Drain all queued messages first
|
|
693
|
+
while (this._read_queue.length > 0 && this._onmessage !== null) {
|
|
694
|
+
const msg = this._read_queue.pop();
|
|
695
|
+
try {
|
|
696
|
+
this._onmessage({ data: msg });
|
|
697
|
+
} catch (e) {
|
|
698
|
+
// Handler error does not break the loop
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
if (this._onmessage === null) break;
|
|
703
|
+
|
|
704
|
+
// 2. Wait for new data (no timeout — waits until data or disposal)
|
|
705
|
+
await this._asyncRead();
|
|
706
|
+
// _asyncRead populates _read_queue; loop drains it on next iteration
|
|
707
|
+
}
|
|
708
|
+
} catch (err) {
|
|
709
|
+
// Channel disposed — loop stops silently
|
|
710
|
+
} finally {
|
|
711
|
+
this._messageLoopActive = false;
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
/**
|
|
716
|
+
* Pop messages from read_queue.
|
|
717
|
+
* @param {number} max - Maximum messages to pop
|
|
718
|
+
* @returns {*|null|Array} - Single message/null (max=1) or array (max>1)
|
|
719
|
+
*/
|
|
720
|
+
_popMessages(max) {
|
|
721
|
+
if (max === 1) {
|
|
722
|
+
// Return single message or null
|
|
723
|
+
return this._read_queue.length > 0 ? this._read_queue.pop() : null;
|
|
724
|
+
} else {
|
|
725
|
+
// Return array of messages (FIFO - oldest first)
|
|
726
|
+
const count = Math.min(max, this._read_queue.length);
|
|
727
|
+
if (count === 0) return [];
|
|
728
|
+
// Pop from end (oldest messages)
|
|
729
|
+
return this._read_queue.splice(-count);
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
// ════════════════════════════════════════════════════════════════
|
|
736
|
+
// SABMessagePort — Bidirectional wrapper over two SABPipe instances
|
|
737
|
+
// ════════════════════════════════════════════════════════════════
|
|
738
|
+
|
|
739
|
+
export class SABMessagePort {
|
|
740
|
+
|
|
741
|
+
constructor(side = 'a', sabOrSizeKB = 256) {
|
|
742
|
+
if (side !== 'a' && side !== 'b') throw new Error("side must be 'a' or 'b'");
|
|
743
|
+
|
|
744
|
+
this._sab = (typeof sabOrSizeKB === 'number')
|
|
745
|
+
? new SharedArrayBuffer(sabOrSizeKB * 1024)
|
|
746
|
+
: sabOrSizeKB;
|
|
747
|
+
|
|
748
|
+
const sectionSize = this._sab.byteLength / 2;
|
|
749
|
+
|
|
750
|
+
if (side === 'a') {
|
|
751
|
+
this._writer = new SABPipe('w', this._sab, 0, sectionSize);
|
|
752
|
+
this._reader = new SABPipe('r', this._sab, sectionSize, sectionSize);
|
|
753
|
+
} else {
|
|
754
|
+
this._reader = new SABPipe('r', this._sab, 0, sectionSize);
|
|
755
|
+
this._writer = new SABPipe('w', this._sab, sectionSize, sectionSize);
|
|
756
|
+
}
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
static from(initMsg) {
|
|
760
|
+
if (initMsg?.type !== 'SABMessagePort') {
|
|
761
|
+
throw new Error('Not a SABMessagePort init message');
|
|
762
|
+
}
|
|
763
|
+
return new SABMessagePort('b', initMsg.buffer);
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
postInit(target = null, extraProps = {}) {
|
|
767
|
+
const msg = [
|
|
768
|
+
{ type: 'SABMessagePort', buffer: this._sab, ...extraProps },
|
|
769
|
+
[this._sab]
|
|
770
|
+
];
|
|
771
|
+
if (target === null) return msg;
|
|
772
|
+
target.postMessage(...msg);
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
postMessage(jsonMessage) {
|
|
776
|
+
return this._writer.postMessage(jsonMessage);
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
set onmessage(handler) { this._reader.onmessage = handler; }
|
|
780
|
+
get onmessage() { return this._reader.onmessage; }
|
|
781
|
+
|
|
782
|
+
asyncRead(timeout, max_num_messages) {
|
|
783
|
+
return this._reader.asyncRead(timeout, max_num_messages);
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
read(timeout, blocking, max_num_messages) {
|
|
787
|
+
return this._reader.read(timeout, blocking, max_num_messages);
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
tryRead(max_num_messages) {
|
|
791
|
+
return this._reader.tryRead(max_num_messages);
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
close() {
|
|
795
|
+
this._writer.destroy();
|
|
796
|
+
this._reader.destroy();
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
get buffer() {
|
|
800
|
+
return this._sab;
|
|
801
|
+
}
|
|
802
|
+
}
|