@dmop/puru 0.1.10 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AGENTS.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # puru — Guide for AI Assistants
2
2
 
3
- puru is a thread pool library for JavaScript with Go-style concurrency primitives (channels, WaitGroup, select). It runs functions off the main thread with no worker files and no boilerplate.
3
+ puru is a thread pool library for JavaScript with Go-style concurrency primitives (channels, WaitGroup, ErrGroup, select, context, Mutex, RWMutex, Cond, Timer). It runs functions off the main thread with no worker files and no boilerplate.
4
4
 
5
5
  Full API reference: https://raw.githubusercontent.com/dmop/puru/main/llms-full.txt
6
6
 
@@ -120,6 +120,141 @@ eg.spawn(() => fetch('https://api.example.com/users/1/orders').then((r) => r.jso
120
120
  const [user, orders] = await eg.wait() // throws on first error, cancels the rest
121
121
  ```
122
122
 
123
+ ### ErrGroup with concurrency limit
124
+
125
+ ```typescript
126
+ import { ErrGroup } from '@dmop/puru'
127
+
128
+ const eg = new ErrGroup()
129
+ eg.setLimit(4) // max 4 tasks in flight at once
130
+
131
+ for (const url of urls) {
132
+ eg.spawn(() => fetch(url).then(r => r.json()), { concurrent: true })
133
+ }
134
+
135
+ const results = await eg.wait()
136
+ ```
137
+
138
+ ### Context-integrated spawn (auto-cancel)
139
+
140
+ ```typescript
141
+ import { spawn, background, withTimeout } from '@dmop/puru'
142
+
143
+ // Task auto-cancels when context expires — no manual wiring needed
144
+ const [ctx, cancel] = withTimeout(background(), 5000)
145
+ const { result } = spawn(() => heavyWork(), { ctx })
146
+
147
+ try {
148
+ console.log(await result)
149
+ } finally {
150
+ cancel()
151
+ }
152
+ ```
153
+
154
+ ### Context with WaitGroup / ErrGroup
155
+
156
+ ```typescript
157
+ import { background, withTimeout, WaitGroup } from '@dmop/puru'
158
+
159
+ const [ctx, cancel] = withTimeout(background(), 5000)
160
+
161
+ // Pass context to WaitGroup — all tasks auto-cancel when ctx expires
162
+ const wg = new WaitGroup(ctx)
163
+ wg.spawn(() => { /* CPU work */ return 42 })
164
+ wg.spawn(() => fetch('https://api.example.com/data').then(r => r.json()), { concurrent: true })
165
+
166
+ try {
167
+ const results = await wg.wait()
168
+ } catch {
169
+ console.log('timed out or cancelled')
170
+ } finally {
171
+ cancel()
172
+ }
173
+ ```
174
+
175
+ ### RWMutex (read-write lock)
176
+
177
+ ```typescript
178
+ import { RWMutex } from '@dmop/puru'
179
+
180
+ const rw = new RWMutex()
181
+
182
+ // Many readers can run concurrently
183
+ const data = await rw.withRLock(() => cache.get('config'))
184
+
185
+ // Writers get exclusive access
186
+ await rw.withLock(() => cache.set('config', newValue))
187
+ ```
188
+
189
+ ### Timer (resettable one-shot)
190
+
191
+ ```typescript
192
+ import { Timer, select } from '@dmop/puru'
193
+
194
+ const t = new Timer(5000)
195
+
196
+ // Use with select for cancellable timeouts
197
+ await select([
198
+ [ch.recv(), (v) => { t.stop(); handle(v) }],
199
+ [t.channel, () => console.log('timed out')],
200
+ ])
201
+
202
+ // Reset for debounce patterns
203
+ t.reset(300)
204
+ ```
205
+
206
+ ### Cond (condition variable)
207
+
208
+ ```typescript
209
+ import { Mutex, Cond } from '@dmop/puru'
210
+
211
+ const mu = new Mutex()
212
+ const cond = new Cond(mu)
213
+ let ready = false
214
+
215
+ // Waiter
216
+ await mu.lock()
217
+ while (!ready) await cond.wait()
218
+ mu.unlock()
219
+
220
+ // Signaler
221
+ await mu.lock()
222
+ ready = true
223
+ cond.broadcast() // wake all waiters
224
+ mu.unlock()
225
+ ```
226
+
227
+ ### Directional channels
228
+
229
+ ```typescript
230
+ import { chan } from '@dmop/puru'
231
+ import type { SendOnly, RecvOnly } from '@dmop/puru'
232
+
233
+ const ch = chan<number>(10)
234
+
235
+ function producer(out: SendOnly<number>) {
236
+ await out.send(42)
237
+ out.close()
238
+ }
239
+
240
+ function consumer(inp: RecvOnly<number>) {
241
+ for await (const v of inp) console.log(v)
242
+ }
243
+
244
+ producer(ch.sendOnly())
245
+ consumer(ch.recvOnly())
246
+ ```
247
+
248
+ ### Channel inspection
249
+
250
+ ```typescript
251
+ const ch = chan<number>(100)
252
+ await ch.send(1)
253
+ await ch.send(2)
254
+ console.log(ch.len) // 2 (buffered values)
255
+ console.log(ch.cap) // 100 (buffer capacity)
256
+ ```
257
+
123
258
  ### Cross-thread channels (fan-out)
124
259
 
125
260
  ```typescript
package/README.md CHANGED
@@ -1,44 +1,55 @@
1
1
  # puru (プール)
2
2
 
3
- > A thread pool for JavaScript with Go-style concurrency primitives.
4
- >
5
- > Run work off the main thread with inline functions, channels, `WaitGroup`, `ErrGroup`, `select`, `Mutex`, `Once`, and more. No worker files. No boilerplate.
3
+ [![npm version](https://img.shields.io/npm/v/@dmop/puru)](https://www.npmjs.com/package/@dmop/puru)
4
+ [![npm downloads](https://img.shields.io/npm/dm/@dmop/puru)](https://www.npmjs.com/package/@dmop/puru)
5
+ [![bundle size](https://img.shields.io/bundlephobia/minzip/@dmop/puru)](https://bundlephobia.com/package/@dmop/puru)
6
+ [![license](https://img.shields.io/npm/l/@dmop/puru)](LICENSE)
6
7
 
7
- `puru` is for the moment when `Promise.all()` is no longer enough, but raw `worker_threads` feels too low-level.
8
+ **Go-style concurrency for JavaScript.** Worker threads with channels, WaitGroup, select, and context no worker files, no boilerplate.
8
9
 
9
- - CPU-heavy work: use dedicated worker threads
10
- - Async / I/O-heavy work: share worker threads efficiently with `concurrent: true`
11
- - Coordination: use channels, `WaitGroup`, `ErrGroup`, `select`, `Mutex`, `Once`, and `ticker`
12
- - Ergonomics: write worker logic inline or define reusable typed tasks
13
-
14
- Works on **Node.js >= 20** and **Bun**.
10
+ ```ts
11
+ import { spawn } from '@dmop/puru'
15
12
 
16
- ## Why This Exists
13
+ const { result } = spawn(() => {
14
+ let sum = 0
15
+ for (let i = 0; i < 100_000_000; i++) sum += i
16
+ return sum
17
+ })
17
18
 
18
- JavaScript apps usually hit one of these walls:
19
+ console.log(await result) // runs off the main thread
20
+ ```
19
21
 
20
- - A request handler does 200ms of CPU work and stalls the event loop
21
- - You want worker threads, but you do not want separate worker files and message plumbing
22
- - You need more than raw parallelism: cancellation, fan-out, backpressure, coordination
23
- - You like Go's concurrency model and want something similar in JavaScript
22
+ ## Before / After
24
23
 
25
- `puru` gives you a managed worker pool with a much nicer programming model.
24
+ <table>
25
+ <tr><th>Raw worker_threads</th><th>puru</th></tr>
26
+ <tr>
27
+ <td>
26
28
 
27
- ## Install
29
+ ```ts
30
+ const { Worker } = require('worker_threads')
31
+ const worker = new Worker('./worker.js')
32
+ worker.postMessage({ n: 40 })
33
+ worker.on('message', (result) => {
34
+ console.log(result)
35
+ worker.terminate()
36
+ })
37
+ worker.on('error', reject)
28
38
 
29
- ```bash
30
- npm install @dmop/puru
31
- # or
32
- bun add @dmop/puru
39
+ // worker.js (separate file)
40
+ const { parentPort } = require('worker_threads')
41
+ parentPort.on('message', ({ n }) => {
42
+ parentPort.postMessage(fibonacci(n))
43
+ })
33
44
  ```
34
45
 
35
- ## 30-Second Tour
46
+ </td>
47
+ <td>
36
48
 
37
49
  ```ts
38
- import { spawn, task, WaitGroup, chan } from '@dmop/puru'
50
+ import { spawn } from '@dmop/puru'
39
51
 
40
- // 1. One CPU-heavy task on a dedicated worker
41
- const { result: fib } = spawn(() => {
52
+ const { result } = spawn(() => {
42
53
  function fibonacci(n: number): number {
43
54
  if (n <= 1) return n
44
55
  return fibonacci(n - 1) + fibonacci(n - 2)
@@ -46,162 +57,126 @@ const { result: fib } = spawn(() => {
46
57
  return fibonacci(40)
47
58
  })
48
59
 
49
- // 2. Reusable typed worker function
50
- const resize = task((width: number, height: number) => {
51
- return { width, height, pixels: width * height }
52
- })
53
-
54
- // 3. Structured concurrency
55
- const wg = new WaitGroup()
56
- wg.spawn(() => {
57
- let sum = 0
58
- for (let i = 0; i < 1_000_000; i++) sum += i
59
- return sum
60
- })
61
- wg.spawn(
62
- () => fetch('https://api.example.com/users/1').then((r) => r.json()),
63
- { concurrent: true },
64
- )
65
-
66
- // 4. Channels for coordination
67
- const jobs = chan<number>(10)
68
- spawn(async ({ jobs }) => {
69
- for (let i = 0; i < 10; i++) await jobs.send(i)
70
- jobs.close()
71
- }, { channels: { jobs }, concurrent: true })
72
-
73
- console.log(await fib)
74
- console.log(await resize(800, 600))
75
- console.log(await wg.wait())
60
+ console.log(await result)
76
61
  ```
77
62
 
78
- ## The Big Rule
79
-
80
- Functions passed to `spawn()` are serialized with `.toString()` and executed in a worker.
63
+ </td>
64
+ </tr>
65
+ </table>
81
66
 
82
- That means they **cannot capture variables from the enclosing scope**.
67
+ One file. No message plumbing. Automatic pooling.
83
68
 
84
- ```ts
85
- const x = 42
86
-
87
- spawn(() => x + 1) // ReferenceError at runtime
69
+ ## Install
88
70
 
89
- spawn(() => {
90
- const x = 42
91
- return x + 1
92
- }) // works
71
+ ```bash
72
+ npm install @dmop/puru
93
73
  ```
94
74
 
95
- If you need to pass arguments repeatedly, prefer `task(fn)`.
96
-
97
- ## Why People Reach for puru
98
-
99
- ### Inline worker code
100
-
101
- No separate worker file in the normal case.
75
+ ## Quick Start
102
76
 
103
77
  ```ts
104
- import { spawn } from '@dmop/puru'
78
+ import { spawn, WaitGroup, chan } from '@dmop/puru'
105
79
 
106
- const { result } = spawn(() => {
107
- let sum = 0
108
- for (let i = 0; i < 10_000_000; i++) sum += i
109
- return sum
110
- })
111
- ```
80
+ // CPU work on a dedicated worker
81
+ const { result } = spawn(() => fibonacci(40))
112
82
 
113
- ### Two execution modes
83
+ // Parallel batch — wait for all
84
+ const wg = new WaitGroup()
85
+ wg.spawn(() => crunchData())
86
+ wg.spawn(() => crunchMoreData())
87
+ const [a, b] = await wg.wait()
88
+
89
+ // Cross-thread channels
90
+ const ch = chan<number>(10)
91
+ spawn(async ({ ch }) => {
92
+ for (let i = 0; i < 10; i++) await ch.send(i)
93
+ ch.close()
94
+ }, { channels: { ch } })
95
+
96
+ for await (const item of ch) console.log(item)
97
+ ```
114
98
 
115
- | Mode | Use it for | What happens |
116
- | --- | --- | --- |
117
- | `spawn(fn)` | CPU-bound work | The task gets a dedicated worker |
118
- | `spawn(fn, { concurrent: true })` | Async / I/O-heavy work | Multiple tasks share a worker's event loop |
99
+ ## Performance
119
100
 
120
- This is the key distinction:
101
+ Measured on Apple M1 Pro (8 cores). Full results in [BENCHMARKS.md](docs/BENCHMARKS.md).
121
102
 
122
- - `exclusive` mode is for actual CPU parallelism
123
- - `concurrent` mode is for lots of tasks that mostly `await`
103
+ | Benchmark | Single-threaded | puru | Speedup |
104
+ | --- | --: | --: | --: |
105
+ | Fibonacci (fib(38) x8) | 4,345 ms | 2,131 ms | **2.0x** |
106
+ | Prime counting (2M range) | 335 ms | 77 ms | **4.4x** |
107
+ | 100 concurrent async tasks | 1,140 ms | 16 ms | **73x** |
108
+ | Fan-out pipeline (4 workers) | 176 ms | 51 ms | **3.4x** |
124
109
 
125
- ### More than a worker pool
110
+ Spawn overhead: ~0.1-0.5ms. Use for tasks above ~5ms.
126
111
 
127
- `puru` is not just `spawn()`.
112
+ ## Two Modes
128
113
 
129
- - `chan()` for cross-thread coordination and backpressure
130
- - `WaitGroup` for “run many, wait for all”
131
- - `ErrGroup` for “fail fast, cancel the rest”
132
- - `select()` for first-ready coordination
133
- - `Mutex` for shared resource protection
134
- - `Once` for one-time initialization under concurrency
135
- - `task()` for reusable typed worker functions
114
+ | Mode | Use it for | What happens |
115
+ | --- | --- | --- |
116
+ | `spawn(fn)` | CPU-bound work | Dedicated worker thread |
117
+ | `spawn(fn, { concurrent: true })` | Async / I/O work | Shares a worker's event loop |
136
118
 
137
119
  ## When To Use What
138
120
 
139
- | Situation | Best tool |
121
+ | Situation | Tool |
140
122
  | --- | --- |
141
- | One heavy synchronous task | `spawn(fn)` |
142
- | Same worker logic called many times with different inputs | `task(fn)` |
143
- | Many async tasks that mostly wait on I/O | `spawn(fn, { concurrent: true })` |
144
- | Parallel batch with “wait for everything” | `WaitGroup` |
145
- | Parallel batch where the first failure should cancel the rest | `ErrGroup` |
146
- | Producer/consumer or fan-out/fan-in pipeline | `chan()` |
147
- | Non-blocking coordination between async operations | `select()` |
123
+ | One heavy CPU task | `spawn(fn)` |
124
+ | Same logic, many inputs | `task(fn)` |
125
+ | Wait for all tasks | `WaitGroup` |
126
+ | Fail-fast, cancel the rest | `ErrGroup` (with `setLimit()` for throttling) |
127
+ | Timeouts and cancellation | `context` + `spawn(fn, { ctx })` |
128
+ | Producer/consumer pipelines | `chan()` + `select()` |
148
129
 
149
- ## Why Not Just Use...
130
+ ## The Big Rule
150
131
 
151
- ### `Promise.all()`
132
+ > **Functions passed to `spawn()` cannot capture outer variables.** They are serialized as text and sent to a worker — closures don't survive.
152
133
 
153
- Use `Promise.all()` when work is already cheap and async.
134
+ ```ts
135
+ const x = 42
136
+ spawn(() => x + 1) // ReferenceError at runtime
154
137
 
155
- Use `puru` when:
138
+ spawn(() => {
139
+ const x = 42 // define inside
140
+ return x + 1
141
+ }) // works
142
+ ```
156
143
 
157
- - work is CPU-heavy
158
- - you need the main thread to stay responsive under load
159
- - you want worker coordination primitives, not just promise aggregation
144
+ Use `task(fn)` to pass arguments to reusable worker functions.
160
145
 
161
- ### `worker_threads`
146
+ ## What's Included
162
147
 
163
- Raw `worker_threads` are powerful, but they are low-level:
148
+ **Coordination:** `chan()` &middot; `WaitGroup` &middot; `ErrGroup` &middot; `select()` &middot; `context`
164
149
 
165
- - separate worker entry files
166
- - manual message passing
167
- - manual pooling
168
- - no built-in channels, `WaitGroup`, `ErrGroup`, or `select`
150
+ **Synchronization:** `Mutex` &middot; `RWMutex` &middot; `Once` &middot; `Cond`
169
151
 
170
- `puru` keeps the power and removes most of the ceremony.
152
+ **Timing:** `after()` &middot; `ticker()` &middot; `Timer`
171
153
 
172
- ### Cluster
154
+ **Ergonomics:** `task()` &middot; `configure()` &middot; `stats()` &middot; directional channels &middot; channel `len`/`cap`
173
155
 
174
- Cluster solves a different problem.
156
+ All modeled after Go's concurrency primitives. Full API in [docs/API.md](docs/API.md).
175
157
 
176
- - Cluster: more processes, better request throughput
177
- - `puru`: offload heavy work inside each process
158
+ ## Why Not Just Use...
178
159
 
179
- They work well together.
160
+ **`Promise.all()`** — Great for cheap async work. Use puru when work is CPU-heavy or you need the main thread to stay responsive.
180
161
 
181
- ## Feature Snapshot
162
+ **`worker_threads`** Powerful but low-level: separate files, manual messaging, manual pooling, no channels/WaitGroup/select. puru keeps the power, removes the ceremony.
182
163
 
183
- | Feature | `puru` |
184
- | --- | --- |
185
- | Inline worker functions | Yes |
186
- | Dedicated CPU workers | Yes |
187
- | Shared-worker async mode | Yes |
188
- | Channels across workers | Yes |
189
- | WaitGroup / ErrGroup | Yes |
190
- | `select` / timers | Yes |
191
- | Mutex / Once | Yes |
192
- | Bun support | Yes |
193
- | TypeScript support | Yes |
164
+ **Cluster** Cluster adds processes for request throughput. puru offloads heavy work inside each process. They compose well together.
194
165
 
195
- ## Performance
166
+ ## Runtimes
196
167
 
197
- `puru` is designed for real work, not micro-bench tricks.
168
+ | Runtime | Status |
169
+ | --- | --- |
170
+ | Node.js >= 20 | Full support |
171
+ | Bun | Full support |
172
+ | Deno | Planned |
198
173
 
199
- - Spawn overhead is roughly `0.1-0.5ms`
200
- - As a rule of thumb, use worker threads for tasks above `~5ms`
201
- - CPU-bound benchmarks show real speedups from multi-core execution
202
- - Concurrent async benchmarks show large gains when many tasks mostly wait on I/O off the main thread
174
+ ## Testing
203
175
 
204
- Full benchmark tables live in [docs/BENCHMARKS.md](docs/BENCHMARKS.md).
176
+ ```ts
177
+ import { configure } from '@dmop/puru'
178
+ configure({ adapter: 'inline' }) // runs on main thread, no real workers
179
+ ```
205
180
 
206
181
  ## Docs
207
182
 
@@ -210,33 +185,13 @@ Full benchmark tables live in [docs/BENCHMARKS.md](docs/BENCHMARKS.md).
210
185
  - [Production use cases](USE-CASES.md)
211
186
  - [Examples](examples)
212
187
  - [AI assistant guide](AGENTS.md)
213
- - [Full LLM reference](llms-full.txt)
214
-
215
- ## Runtimes
216
-
217
- | Runtime | Support | Notes |
218
- | --- | --- | --- |
219
- | Node.js >= 20 | Full | Uses `worker_threads` |
220
- | Bun | Full | Uses Web Workers |
221
- | Deno | Planned | Not yet implemented |
222
-
223
- ## Testing
224
-
225
- Use the inline adapter to run tasks on the main thread in tests:
226
-
227
- ```ts
228
- import { configure } from '@dmop/puru'
229
-
230
- configure({ adapter: 'inline' })
231
- ```
232
188
 
233
189
  ## Limitations
234
190
 
235
- - `spawn()` functions cannot capture outer variables
236
- - Channel values must be structured-cloneable
237
- - `null` is reserved as the channel closed sentinel
191
+ - `spawn()` functions cannot capture outer variables (see [The Big Rule](#the-big-rule))
192
+ - Channel values must be structured-cloneable (no functions, symbols, WeakRefs)
193
+ - `null` is reserved as the channel-closed sentinel
238
194
  - `task()` arguments must be JSON-serializable
239
- - Channel ops from workers have RPC overhead, so use them for coordination, not ultra-fine-grained inner loops
240
195
 
241
196
  ## License
242
197