@dmop/puru 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -10,46 +10,246 @@ interface SpawnResult<T> {
10
10
  cancel: () => void;
11
11
  }
12
12
 
13
+ /**
14
+ * A Go-style channel for communicating between async tasks and across worker threads.
15
+ *
16
+ * Use `chan<T>(capacity?)` to create a channel. Values must be structured-cloneable
17
+ * (no functions, symbols, or WeakRefs). `null` cannot be sent — `recv()` returns
18
+ * `null` only when the channel is closed.
19
+ *
20
+ * @example
21
+ * const ch = chan<number>(10)
22
+ * await ch.send(42)
23
+ * const value = await ch.recv() // 42
24
+ * ch.close()
25
+ * await ch.recv() // null — channel closed
26
+ *
27
+ * @example
28
+ * // Async iteration ends automatically when the channel is closed
29
+ * for await (const item of ch) {
30
+ * process(item)
31
+ * }
32
+ */
13
33
  interface Channel<T> {
14
34
  send(value: T): Promise<void>;
35
+ /** Resolves with the next value, or `null` if the channel is closed. */
15
36
  recv(): Promise<T | null>;
16
37
  close(): void;
17
38
  [Symbol.asyncIterator](): AsyncIterator<T>;
18
39
  }
19
- declare function chan<T>(capacity?: number): Channel<T>;
40
+ /**
41
+ * Create a Go-style channel for communicating between tasks and across worker threads.
42
+ *
43
+ * Provides backpressure: `send()` blocks when the buffer is full,
44
+ * `recv()` blocks when the buffer is empty. Channel values must be structured-cloneable
45
+ * (no functions, symbols, or WeakRefs). `null` cannot be sent — it signals closure.
46
+ *
47
+ * @param capacity Buffer size. `0` (default) = unbuffered: each `send()` blocks until a `recv()` is ready.
48
+ *
49
+ * @example
50
+ * const ch = chan<string>(5) // buffered channel, capacity 5
51
+ * await ch.send('hello')
52
+ * const msg = await ch.recv() // 'hello'
53
+ * ch.close()
54
+ *
55
+ * @example
56
+ * // Fan-out: multiple workers pulling from the same channel
57
+ * const input = chan<Job>(50)
58
+ * const output = chan<Result>(50)
59
+ *
60
+ * for (let i = 0; i < 4; i++) {
61
+ * spawn(async ({ input, output }) => {
62
+ * for await (const job of input) {
63
+ * await output.send(processJob(job))
64
+ * }
65
+ * }, { channels: { input, output } })
66
+ * }
67
+ */
68
+ declare function chan<T extends NonNullable<unknown>>(capacity?: number): Channel<T>;
20
69
 
70
+ /**
71
+ * Run a function in a worker thread. Returns a handle with the result promise and a cancel function.
72
+ *
73
+ * **Functions must be self-contained** — they are serialized via `.toString()` and sent to a
74
+ * worker thread, so they cannot capture variables from the enclosing scope. Define everything
75
+ * you need inside the function body, or use `task()` to pass arguments.
76
+ *
77
+ * **Two modes:**
78
+ * - Default (exclusive): the function gets a dedicated thread. Best for CPU-bound work (> 5ms).
79
+ * - `{ concurrent: true }`: many tasks share a thread's event loop. Best for async/I/O work.
80
+ *
81
+ * @example
82
+ * // CPU-bound work — define helpers inside the function body
83
+ * const { result } = spawn(() => {
84
+ * function fibonacci(n: number): number {
85
+ * if (n <= 1) return n
86
+ * return fibonacci(n - 1) + fibonacci(n - 2)
87
+ * }
88
+ * return fibonacci(40)
89
+ * })
90
+ * console.log(await result)
91
+ *
92
+ * @example
93
+ * // I/O-bound work — concurrent mode shares threads efficiently
94
+ * const { result } = spawn(() => fetch('https://api.example.com').then(r => r.json()), {
95
+ * concurrent: true,
96
+ * })
97
+ *
98
+ * @example
99
+ * // Cancel a long-running task
100
+ * const { result, cancel } = spawn(() => longRunningTask())
101
+ * setTimeout(cancel, 5000)
102
+ *
103
+ * @example
104
+ * // Cross-thread channels — pass channels via opts.channels
105
+ * const ch = chan<number>(10)
106
+ * spawn(async ({ ch }) => {
107
+ * for (let i = 0; i < 100; i++) await ch.send(i)
108
+ * ch.close()
109
+ * }, { channels: { ch } })
110
+ */
21
111
  declare function spawn<T>(fn: (() => T | Promise<T>) | ((channels: Record<string, Channel<unknown>>) => T | Promise<T>), opts?: {
22
112
  priority?: 'low' | 'normal' | 'high';
23
113
  concurrent?: boolean;
24
114
  channels?: Record<string, Channel<unknown>>;
25
115
  }): SpawnResult<T>;
26
116
 
117
+ /**
118
+ * Structured concurrency: spawn multiple tasks and wait for all to complete.
119
+ *
120
+ * Like `Promise.all`, but tasks run in worker threads across CPU cores. Results are
121
+ * returned in the order tasks were spawned. A shared `AbortSignal` lets long-running
122
+ * tasks observe cooperative cancellation via `cancel()`.
123
+ *
124
+ * For fail-fast behavior (cancel all on first error), use `ErrGroup` instead.
125
+ *
126
+ * @example
127
+ * // CPU-bound parallel work
128
+ * const wg = new WaitGroup()
129
+ * wg.spawn(() => { /* define helpers inside — no closure captures *\/ })
130
+ * wg.spawn(() => { /* another CPU task *\/ })
131
+ * const [r1, r2] = await wg.wait()
132
+ *
133
+ * @example
134
+ * // Mixed CPU + I/O
135
+ * wg.spawn(() => crunchNumbers(), )
136
+ * wg.spawn(() => fetch('https://api.example.com').then(r => r.json()), { concurrent: true })
137
+ * const results = await wg.wait()
138
+ *
139
+ * @example
140
+ * // Tolerate partial failures with waitSettled
141
+ * const settled = await wg.waitSettled()
142
+ * for (const r of settled) {
143
+ * if (r.status === 'fulfilled') use(r.value)
144
+ * else console.error(r.reason)
145
+ * }
146
+ */
27
147
  declare class WaitGroup {
28
148
  private tasks;
29
149
  private controller;
150
+ /**
151
+ * An `AbortSignal` shared across all tasks in this group.
152
+ * Pass it into spawned functions so they can stop early when `cancel()` is called.
153
+ */
30
154
  get signal(): AbortSignal;
155
+ /**
156
+ * Spawns a function on a worker thread and adds it to the group.
157
+ *
158
+ * @throws If the group has already been cancelled.
159
+ */
31
160
  spawn(fn: (() => unknown) | ((channels: Record<string, Channel<unknown>>) => unknown), opts?: {
32
161
  concurrent?: boolean;
33
162
  channels?: Record<string, Channel<unknown>>;
34
163
  }): void;
164
+ /**
165
+ * Waits for all tasks to complete successfully.
166
+ * Rejects as soon as any task throws.
167
+ */
35
168
  wait(): Promise<unknown[]>;
169
+ /**
170
+ * Waits for all tasks to settle (fulfilled or rejected) and returns each outcome.
171
+ * Never rejects — inspect each `PromiseSettledResult` to handle failures individually.
172
+ */
36
173
  waitSettled(): Promise<PromiseSettledResult<unknown>[]>;
174
+ /**
175
+ * Cancels all tasks in the group and signals the shared `AbortSignal`.
176
+ * Already-settled tasks are unaffected.
177
+ */
37
178
  cancel(): void;
38
179
  }
39
180
 
181
+ /**
182
+ * Like `WaitGroup`, but cancels all remaining tasks on the first error.
183
+ *
184
+ * Modeled after Go's `golang.org/x/sync/errgroup`. Use when partial results are useless —
185
+ * if any task fails, there is no point waiting for the rest. Benchmarks show ~3.7x faster
186
+ * failure handling than waiting for all tasks to settle.
187
+ *
188
+ * For "wait for everything regardless of failures", use `WaitGroup` with `waitSettled()`.
189
+ *
190
+ * @example
191
+ * const eg = new ErrGroup()
192
+ * eg.spawn(() => run('fetchUser', userId))
193
+ * eg.spawn(() => run('fetchOrders', userId))
194
+ * eg.spawn(() => run('fetchAnalytics', userId))
195
+ *
196
+ * try {
197
+ * const [user, orders, analytics] = await eg.wait()
198
+ * } catch (err) {
199
+ * // First failure cancelled the rest — no partial data to clean up
200
+ * }
201
+ *
202
+ * @example
203
+ * // Observe cancellation inside a task via the shared signal
204
+ * const eg = new ErrGroup()
205
+ * eg.spawn(() => {
206
+ * // eg.signal is not directly available inside the worker —
207
+ * // use task() with register() and check a channel or AbortSignal instead
208
+ * })
209
+ */
40
210
  declare class ErrGroup {
41
211
  private tasks;
42
212
  private controller;
43
213
  private firstError;
44
214
  private hasError;
45
215
  get signal(): AbortSignal;
46
- spawn(fn: () => unknown, opts?: {
216
+ spawn(fn: (() => unknown) | ((channels: Record<string, Channel<unknown>>) => unknown), opts?: {
47
217
  concurrent?: boolean;
218
+ channels?: Record<string, Channel<unknown>>;
48
219
  }): void;
49
220
  wait(): Promise<unknown[]>;
50
221
  cancel(): void;
51
222
  }
52
223
 
224
+ /**
225
+ * Async mutual exclusion. Serializes access to shared state under concurrency.
226
+ *
227
+ * Prefer `withLock()` over manual `lock()`/`unlock()` — it automatically releases
228
+ * the lock even if the callback throws.
229
+ *
230
+ * Note: `Mutex` operates on the main thread (or whichever thread creates it).
231
+ * Worker threads do not share memory, so this is not useful for cross-thread locking.
232
+ * For cross-thread coordination, use channels instead.
233
+ *
234
+ * @example
235
+ * const mu = new Mutex()
236
+ *
237
+ * // withLock — recommended (auto-unlocks on error)
238
+ * const result = await mu.withLock(async () => {
239
+ * const current = await db.get('counter')
240
+ * await db.set('counter', current + 1)
241
+ * return current + 1
242
+ * })
243
+ *
244
+ * @example
245
+ * // Manual lock/unlock (use withLock instead when possible)
246
+ * await mu.lock()
247
+ * try {
248
+ * // critical section
249
+ * } finally {
250
+ * mu.unlock()
251
+ * }
252
+ */
53
253
  declare class Mutex {
54
254
  private queue;
55
255
  private locked;
@@ -59,6 +259,30 @@ declare class Mutex {
59
259
  get isLocked(): boolean;
60
260
  }
61
261
 
262
+ /**
263
+ * Run a function exactly once, even if called concurrently.
264
+ * All callers await the same promise and receive the same result.
265
+ *
266
+ * Use for lazy, one-time initialization of expensive resources (DB pools, ML models,
267
+ * config, etc.) that must be initialized at most once regardless of concurrent demand.
268
+ *
269
+ * @example
270
+ * const initDB = new Once<DBPool>()
271
+ *
272
+ * async function getDB() {
273
+ * return initDB.do(() => createPool({ max: 10 }))
274
+ * }
275
+ *
276
+ * // Safe under concurrent load — pool is created exactly once
277
+ * const [db1, db2] = await Promise.all([getDB(), getDB()])
278
+ * // db1 === db2 (same pool instance)
279
+ *
280
+ * @example
281
+ * // Check if initialization has already run
282
+ * if (!initDB.done) {
283
+ * console.log('not yet initialized')
284
+ * }
285
+ */
62
286
  declare class Once<T = void> {
63
287
  private promise;
64
288
  private called;
@@ -68,13 +292,98 @@ declare class Once<T = void> {
68
292
  }
69
293
 
70
294
  type SelectCase<T = unknown> = [Promise<T>, (value: T) => void];
295
+ /**
296
+ * Options for `select()`.
297
+ *
298
+ * `default` makes the call non-blocking: if no case is immediately ready,
299
+ * the default handler runs instead of waiting. This mirrors Go's `select { default: ... }`.
300
+ */
71
301
  interface SelectOptions {
72
302
  default?: () => void;
73
303
  }
304
+ /**
305
+ * Wait for the first of multiple promises to resolve, like Go's `select`.
306
+ *
307
+ * Each case is a `[promise, handler]` tuple. The handler for the first settled
308
+ * promise is called with its value. All other handlers are ignored.
309
+ *
310
+ * If `opts.default` is provided, `select` becomes non-blocking: if no promise
311
+ * is already resolved, the default runs immediately (Go's `select { default: ... }`).
312
+ *
313
+ * Commonly used with `ch.recv()`, `after()`, and `spawn().result`.
314
+ *
315
+ * @example
316
+ * // Block until a channel message arrives or timeout after 5s
317
+ * await select([
318
+ * [ch.recv(), (value) => console.log('received', value)],
319
+ * [after(5000), () => console.log('timed out')],
320
+ * ])
321
+ *
322
+ * @example
323
+ * // Non-blocking: check a channel without waiting
324
+ * await select(
325
+ * [[ch.recv(), (value) => process(value)]],
326
+ * { default: () => console.log('channel empty — doing other work') },
327
+ * )
328
+ *
329
+ * @example
330
+ * // Race two worker results against a deadline
331
+ * const { result: fast } = spawn(() => quickSearch(query))
332
+ * const { result: deep } = spawn(() => deepSearch(query))
333
+ *
334
+ * let response: Result
335
+ * await select([
336
+ * [fast, (r) => { response = r }],
337
+ * [after(200), () => { response = { partial: true } }],
338
+ * ])
339
+ */
74
340
  declare function select(cases: SelectCase[], opts?: SelectOptions): Promise<void>;
75
341
 
342
+ /**
343
+ * Returns a promise that resolves after `ms` milliseconds.
344
+ *
345
+ * Designed for use with `select()` to add timeouts to channel operations or
346
+ * race a deadline against worker results. Also works as a simple async delay.
347
+ *
348
+ * @example
349
+ * // Timeout a channel receive after 2 seconds
350
+ * await select([
351
+ * [ch.recv(), (value) => handle(value)],
352
+ * [after(2000), () => handleTimeout()],
353
+ * ])
354
+ *
355
+ * @example
356
+ * // Simple delay
357
+ * await after(500)
358
+ * console.log('500ms later')
359
+ */
76
360
  declare function after(ms: number): Promise<void>;
77
361
 
362
+ /**
363
+ * A repeating timer that ticks at a fixed interval.
364
+ *
365
+ * Implements `AsyncIterable<void>` — use `for await...of` to run work on each tick.
366
+ * Call `stop()` to cancel the ticker and end the iteration.
367
+ *
368
+ * Create with the `ticker(ms)` factory function.
369
+ *
370
+ * @example
371
+ * const t = ticker(1000) // tick every second
372
+ * for await (const _ of t) {
373
+ * await doWork()
374
+ * if (shouldStop) t.stop() // ends the for-await loop
375
+ * }
376
+ *
377
+ * @example
378
+ * // Use with select() to process work on each tick with a timeout
379
+ * const t = ticker(5000)
380
+ * for await (const _ of t) {
381
+ * await select([
382
+ * [spawn(() => checkHealth()).result, (ok) => report(ok)],
383
+ * [after(4000), () => report('timeout')],
384
+ * ])
385
+ * }
386
+ */
78
387
  declare class Ticker {
79
388
  private interval;
80
389
  private resolve;
@@ -85,12 +394,59 @@ declare class Ticker {
85
394
  stop(): void;
86
395
  [Symbol.asyncIterator](): AsyncIterator<void>;
87
396
  }
397
+ /**
398
+ * Create a `Ticker` that fires every `ms` milliseconds.
399
+ *
400
+ * @example
401
+ * const t = ticker(500)
402
+ * for await (const _ of t) {
403
+ * console.log('tick')
404
+ * if (done) t.stop()
405
+ * }
406
+ */
88
407
  declare function ticker(ms: number): Ticker;
89
408
 
90
- type TaskFn = (...args: unknown[]) => unknown;
91
- declare function register(name: string, fn: TaskFn): void;
92
- declare function run<T = unknown>(name: string, ...args: unknown[]): Promise<T>;
409
+ /**
410
+ * Define a reusable task that runs in a worker thread.
411
+ *
412
+ * Returns a typed async function — call it like a regular async function,
413
+ * and it dispatches to the thread pool each time.
414
+ *
415
+ * Use task() when you have the same function to call many times with
416
+ * different arguments. For one-off work, use spawn() instead.
417
+ *
418
+ * Arguments must be JSON-serializable (no functions, symbols, undefined, or BigInt).
419
+ * The function itself must be self-contained — it cannot capture enclosing scope variables.
420
+ *
421
+ * @example
422
+ * const resizeImage = task((src: string, width: number, height: number) => {
423
+ * // runs in a worker thread
424
+ * return processPixels(src, width, height)
425
+ * })
426
+ *
427
+ * const result = await resizeImage('photo.jpg', 800, 600)
428
+ * const [a, b] = await Promise.all([resizeImage('a.jpg', 400, 300), resizeImage('b.jpg', 800, 600)])
429
+ */
430
+ declare function task<TArgs extends unknown[], TReturn>(fn: (...args: TArgs) => TReturn | Promise<TReturn>): (...args: TArgs) => Promise<TReturn>;
93
431
 
432
+ /**
433
+ * Configure the global thread pool. **Must be called before the first `spawn()`.**
434
+ *
435
+ * After the pool is initialized, calling `configure()` throws. Call it once at
436
+ * application startup or in test setup.
437
+ *
438
+ * @example
439
+ * configure({
440
+ * maxThreads: 4, // default: os.availableParallelism()
441
+ * concurrency: 64, // max concurrent tasks per shared worker (default: 64)
442
+ * idleTimeout: 30_000, // kill idle workers after 30s (default: 30_000)
443
+ * adapter: 'auto', // 'auto' | 'node' | 'bun' | 'inline' (default: 'auto')
444
+ * })
445
+ *
446
+ * @example
447
+ * // In tests: run tasks on the main thread with no real workers
448
+ * configure({ adapter: 'inline' })
449
+ */
94
450
  declare function configure(opts: Partial<PuruConfig>): void;
95
451
 
96
452
  interface PoolStats {
@@ -119,10 +475,24 @@ interface PoolStats {
119
475
  }
120
476
  declare function stats(): PoolStats;
121
477
  declare function resize(maxThreads: number): void;
478
+ /**
479
+ * Gracefully shut down the thread pool.
480
+ *
481
+ * Rejects all queued tasks, waits for all workers to terminate, then clears
482
+ * the pool. Safe to call at process exit or at the end of a test suite.
483
+ *
484
+ * ```ts
485
+ * process.on('SIGTERM', async () => {
486
+ * await shutdown()
487
+ * process.exit(0)
488
+ * })
489
+ * ```
490
+ */
491
+ declare function shutdown(): Promise<void>;
122
492
 
123
493
  type Runtime = 'node' | 'deno' | 'bun' | 'browser';
124
494
  type Capability = 'full-threads' | 'single-thread';
125
495
  declare function detectRuntime(): Runtime;
126
496
  declare function detectCapability(): Capability;
127
497
 
128
- export { type Capability, type Channel, ErrGroup, Mutex, Once, type PoolStats, type PuruConfig, type Runtime, type SelectOptions, type SpawnResult, Ticker, WaitGroup, after, chan, configure, detectCapability, detectRuntime, register, resize, run, select, spawn, stats, ticker };
498
+ export { type Capability, type Channel, ErrGroup, Mutex, Once, type PoolStats, type PuruConfig, type Runtime, type SelectOptions, type SpawnResult, Ticker, WaitGroup, after, chan, configure, detectCapability, detectRuntime, resize, select, shutdown, spawn, stats, task, ticker };