@dmop/puru 0.1.10 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AGENTS.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # puru — Guide for AI Assistants
2
2
 
3
- puru is a thread pool library for JavaScript with Go-style concurrency primitives (channels, WaitGroup, select). It runs functions off the main thread with no worker files and no boilerplate.
3
+ puru is a zero-dependency thread pool library for JavaScript with Go-style concurrency primitives (channels, WaitGroup, ErrGroup, select, context, Mutex, RWMutex, Cond, Timer). It runs functions off the main thread with no worker files and no boilerplate.
4
4
 
5
5
  Full API reference: https://raw.githubusercontent.com/dmop/puru/main/llms-full.txt
6
6
 
@@ -117,7 +117,142 @@ const eg = new ErrGroup()
117
117
  eg.spawn(() => fetch('https://api.example.com/users/1').then((r) => r.json()), { concurrent: true })
118
118
  eg.spawn(() => fetch('https://api.example.com/users/1/orders').then((r) => r.json()), { concurrent: true })
119
119
 
120
- const [user, orders] = await eg.wait() // throws on first error, cancels the rest
120
+ const [user, orders] = await eg.wait() // throws on first error, terminates remaining workers
121
+ ```
122
+
123
+ ### ErrGroup with concurrency limit
124
+
125
+ ```typescript
126
+ import { ErrGroup } from '@dmop/puru'
127
+
128
+ const eg = new ErrGroup()
129
+ eg.setLimit(4) // max 4 tasks in flight at once
130
+
131
+ for (const url of urls) {
132
+ eg.spawn(() => fetch(url).then(r => r.json()), { concurrent: true })
133
+ }
134
+
135
+ const results = await eg.wait()
136
+ ```
137
+
138
+ ### Context-integrated spawn (auto-cancel)
139
+
140
+ ```typescript
141
+ import { spawn, background, withTimeout } from '@dmop/puru'
142
+
143
+ // Task auto-cancels when context expires — no manual wiring needed
144
+ const [ctx, cancel] = withTimeout(background(), 5000)
145
+ const { result } = spawn(() => heavyWork(), { ctx })
146
+
147
+ try {
148
+ console.log(await result)
149
+ } finally {
150
+ cancel()
151
+ }
152
+ ```
153
+
154
+ ### Context with WaitGroup / ErrGroup
155
+
156
+ ```typescript
157
+ import { background, withTimeout, WaitGroup } from '@dmop/puru'
158
+
159
+ const [ctx, cancel] = withTimeout(background(), 5000)
160
+
161
+ // Pass context to WaitGroup — all tasks auto-cancel when ctx expires
162
+ const wg = new WaitGroup(ctx)
163
+ wg.spawn(() => { /* CPU work */ return 42 })
164
+ wg.spawn(() => fetch('https://api.example.com/data').then(r => r.json()), { concurrent: true })
165
+
166
+ try {
167
+ const results = await wg.wait()
168
+ } catch {
169
+ console.log('timed out or cancelled')
170
+ } finally {
171
+ cancel()
172
+ }
173
+ ```
174
+
175
+ ### RWMutex (read-write lock)
176
+
177
+ ```typescript
178
+ import { RWMutex } from '@dmop/puru'
179
+
180
+ const rw = new RWMutex()
181
+
182
+ // Many readers can run concurrently
183
+ const data = await rw.withRLock(() => cache.get('config'))
184
+
185
+ // Writers get exclusive access
186
+ await rw.withLock(() => cache.set('config', newValue))
187
+ ```
188
+
189
+ ### Timer (resettable one-shot)
190
+
191
+ ```typescript
192
+ import { Timer, select } from '@dmop/puru'
193
+
194
+ const t = new Timer(5000)
195
+
196
+ // Use with select for cancellable timeouts
197
+ await select([
198
+ [ch.recv(), (v) => { t.stop(); handle(v) }],
199
+ [t.channel, () => console.log('timed out')],
200
+ ])
201
+
202
+ // Reset for debounce patterns
203
+ t.reset(300)
204
+ ```
205
+
206
+ ### Cond (condition variable)
207
+
208
+ ```typescript
209
+ import { Mutex, Cond } from '@dmop/puru'
210
+
211
+ const mu = new Mutex()
212
+ const cond = new Cond(mu)
213
+ let ready = false
214
+
215
+ // Waiter
216
+ await mu.lock()
217
+ while (!ready) await cond.wait()
218
+ mu.unlock()
219
+
220
+ // Signaler
221
+ await mu.lock()
222
+ ready = true
223
+ cond.broadcast() // wake all waiters
224
+ mu.unlock()
225
+ ```
226
+
227
+ ### Directional channels
228
+
229
+ ```typescript
230
+ import { chan } from '@dmop/puru'
231
+ import type { SendOnly, RecvOnly } from '@dmop/puru'
232
+
233
+ const ch = chan<number>(10)
234
+
235
+ function producer(out: SendOnly<number>) {
236
+ await out.send(42)
237
+ out.close()
238
+ }
239
+
240
+ function consumer(inp: RecvOnly<number>) {
241
+ for await (const v of inp) console.log(v)
242
+ }
243
+
244
+ producer(ch.sendOnly())
245
+ consumer(ch.recvOnly())
246
+ ```
247
+
248
+ ### Channel inspection
249
+
250
+ ```typescript
251
+ const ch = chan<number>(100)
252
+ await ch.send(1)
253
+ await ch.send(2)
254
+ console.log(ch.len) // 2 (buffered values)
255
+ console.log(ch.cap) // 100 (buffer capacity)
121
256
  ```
122
257
 
123
258
  ### Cross-thread channels (fan-out)
package/README.md CHANGED
@@ -1,44 +1,56 @@
1
1
  # puru (プール)
2
2
 
3
- > A thread pool for JavaScript with Go-style concurrency primitives.
4
- >
5
- > Run work off the main thread with inline functions, channels, `WaitGroup`, `ErrGroup`, `select`, `Mutex`, `Once`, and more. No worker files. No boilerplate.
3
+ [![npm version](https://img.shields.io/npm/v/@dmop/puru)](https://www.npmjs.com/package/@dmop/puru)
4
+ [![npm downloads](https://img.shields.io/npm/dm/@dmop/puru)](https://www.npmjs.com/package/@dmop/puru)
5
+ [![bundle size](https://img.shields.io/bundlephobia/minzip/@dmop/puru)](https://bundlephobia.com/package/@dmop/puru)
6
+ [![zero dependencies](https://img.shields.io/badge/dependencies-0-brightgreen)](https://www.npmjs.com/package/@dmop/puru?activeTab=dependencies)
7
+ [![license](https://img.shields.io/npm/l/@dmop/puru)](LICENSE)
6
8
 
7
- `puru` is for the moment when `Promise.all()` is no longer enough, but raw `worker_threads` feels too low-level.
9
+ **Go-style concurrency for JavaScript.** Worker threads with channels, WaitGroup, select, and context — zero dependencies, no worker files, no boilerplate.
8
10
 
9
- - CPU-heavy work: use dedicated worker threads
10
- - Async / I/O-heavy work: share worker threads efficiently with `concurrent: true`
11
- - Coordination: use channels, `WaitGroup`, `ErrGroup`, `select`, `Mutex`, `Once`, and `ticker`
12
- - Ergonomics: write worker logic inline or define reusable typed tasks
13
-
14
- Works on **Node.js >= 20** and **Bun**.
11
+ ```ts
12
+ import { spawn } from '@dmop/puru'
15
13
 
16
- ## Why This Exists
14
+ const { result } = spawn(() => {
15
+ let sum = 0
16
+ for (let i = 0; i < 100_000_000; i++) sum += i
17
+ return sum
18
+ })
17
19
 
18
- JavaScript apps usually hit one of these walls:
20
+ console.log(await result) // runs off the main thread
21
+ ```
19
22
 
20
- - A request handler does 200ms of CPU work and stalls the event loop
21
- - You want worker threads, but you do not want separate worker files and message plumbing
22
- - You need more than raw parallelism: cancellation, fan-out, backpressure, coordination
23
- - You like Go's concurrency model and want something similar in JavaScript
23
+ ## Before / After
24
24
 
25
- `puru` gives you a managed worker pool with a much nicer programming model.
25
+ <table>
26
+ <tr><th>Raw worker_threads</th><th>puru</th></tr>
27
+ <tr>
28
+ <td>
26
29
 
27
- ## Install
30
+ ```ts
31
+ const { Worker } = require('worker_threads')
32
+ const worker = new Worker('./worker.js')
33
+ worker.postMessage({ n: 40 })
34
+ worker.on('message', (result) => {
35
+ console.log(result)
36
+ worker.terminate()
37
+ })
38
+ worker.on('error', reject)
28
39
 
29
- ```bash
30
- npm install @dmop/puru
31
- # or
32
- bun add @dmop/puru
40
+ // worker.js (separate file)
41
+ const { parentPort } = require('worker_threads')
42
+ parentPort.on('message', ({ n }) => {
43
+ parentPort.postMessage(fibonacci(n))
44
+ })
33
45
  ```
34
46
 
35
- ## 30-Second Tour
47
+ </td>
48
+ <td>
36
49
 
37
50
  ```ts
38
- import { spawn, task, WaitGroup, chan } from '@dmop/puru'
51
+ import { spawn } from '@dmop/puru'
39
52
 
40
- // 1. One CPU-heavy task on a dedicated worker
41
- const { result: fib } = spawn(() => {
53
+ const { result } = spawn(() => {
42
54
  function fibonacci(n: number): number {
43
55
  if (n <= 1) return n
44
56
  return fibonacci(n - 1) + fibonacci(n - 2)
@@ -46,162 +58,130 @@ const { result: fib } = spawn(() => {
46
58
  return fibonacci(40)
47
59
  })
48
60
 
49
- // 2. Reusable typed worker function
50
- const resize = task((width: number, height: number) => {
51
- return { width, height, pixels: width * height }
52
- })
53
-
54
- // 3. Structured concurrency
55
- const wg = new WaitGroup()
56
- wg.spawn(() => {
57
- let sum = 0
58
- for (let i = 0; i < 1_000_000; i++) sum += i
59
- return sum
60
- })
61
- wg.spawn(
62
- () => fetch('https://api.example.com/users/1').then((r) => r.json()),
63
- { concurrent: true },
64
- )
65
-
66
- // 4. Channels for coordination
67
- const jobs = chan<number>(10)
68
- spawn(async ({ jobs }) => {
69
- for (let i = 0; i < 10; i++) await jobs.send(i)
70
- jobs.close()
71
- }, { channels: { jobs }, concurrent: true })
72
-
73
- console.log(await fib)
74
- console.log(await resize(800, 600))
75
- console.log(await wg.wait())
61
+ console.log(await result)
76
62
  ```
77
63
 
78
- ## The Big Rule
79
-
80
- Functions passed to `spawn()` are serialized with `.toString()` and executed in a worker.
64
+ </td>
65
+ </tr>
66
+ </table>
81
67
 
82
- That means they **cannot capture variables from the enclosing scope**.
68
+ One file. No message plumbing. Automatic pooling.
83
69
 
84
- ```ts
85
- const x = 42
70
+ ## Install
86
71
 
87
- spawn(() => x + 1) // ReferenceError at runtime
72
+ Zero runtime dependencies just the library itself.
88
73
 
89
- spawn(() => {
90
- const x = 42
91
- return x + 1
92
- }) // works
74
+ ```bash
75
+ npm install @dmop/puru
76
+ # or
77
+ bun add @dmop/puru
93
78
  ```
94
79
 
95
- If you need to pass arguments repeatedly, prefer `task(fn)`.
96
-
97
- ## Why People Reach for puru
98
-
99
- ### Inline worker code
100
-
101
- No separate worker file in the normal case.
80
+ ## Quick Start
102
81
 
103
82
  ```ts
104
- import { spawn } from '@dmop/puru'
83
+ import { spawn, WaitGroup, chan } from '@dmop/puru'
105
84
 
106
- const { result } = spawn(() => {
107
- let sum = 0
108
- for (let i = 0; i < 10_000_000; i++) sum += i
109
- return sum
110
- })
111
- ```
85
+ // CPU work on a dedicated worker
86
+ const { result } = spawn(() => fibonacci(40))
112
87
 
113
- ### Two execution modes
88
+ // Parallel batch — wait for all
89
+ const wg = new WaitGroup()
90
+ wg.spawn(() => crunchData())
91
+ wg.spawn(() => crunchMoreData())
92
+ const [a, b] = await wg.wait()
93
+
94
+ // Cross-thread channels
95
+ const ch = chan<number>(10)
96
+ spawn(async ({ ch }) => {
97
+ for (let i = 0; i < 10; i++) await ch.send(i)
98
+ ch.close()
99
+ }, { channels: { ch } })
100
+
101
+ for await (const item of ch) console.log(item)
102
+ ```
114
103
 
115
- | Mode | Use it for | What happens |
116
- | --- | --- | --- |
117
- | `spawn(fn)` | CPU-bound work | The task gets a dedicated worker |
118
- | `spawn(fn, { concurrent: true })` | Async / I/O-heavy work | Multiple tasks share a worker's event loop |
104
+ ## Performance
119
105
 
120
- This is the key distinction:
106
+ Measured on Apple M1 Pro (8 cores). Full results in [BENCHMARKS.md](docs/BENCHMARKS.md).
121
107
 
122
- - `exclusive` mode is for actual CPU parallelism
123
- - `concurrent` mode is for lots of tasks that mostly `await`
108
+ | Benchmark | Single-threaded | puru | Speedup |
109
+ | --- | --: | --: | --: |
110
+ | Fibonacci (fib(38) x8) | 4,345 ms | 2,131 ms | **2.0x** |
111
+ | Prime counting (2M range) | 335 ms | 77 ms | **4.4x** |
112
+ | 100 concurrent async tasks | 1,140 ms | 16 ms | **73x** |
113
+ | Fan-out pipeline (4 workers) | 176 ms | 51 ms | **3.4x** |
124
114
 
125
- ### More than a worker pool
115
+ Spawn overhead: ~0.1-0.5ms. Use for tasks above ~5ms.
126
116
 
127
- `puru` is not just `spawn()`.
117
+ ## Two Modes
128
118
 
129
- - `chan()` for cross-thread coordination and backpressure
130
- - `WaitGroup` for “run many, wait for all”
131
- - `ErrGroup` for “fail fast, cancel the rest”
132
- - `select()` for first-ready coordination
133
- - `Mutex` for shared resource protection
134
- - `Once` for one-time initialization under concurrency
135
- - `task()` for reusable typed worker functions
119
+ | Mode | Use it for | What happens |
120
+ | --- | --- | --- |
121
+ | `spawn(fn)` | CPU-bound work | Dedicated worker thread |
122
+ | `spawn(fn, { concurrent: true })` | Async / I/O work | Shares a worker's event loop |
136
123
 
137
124
  ## When To Use What
138
125
 
139
- | Situation | Best tool |
126
+ | Situation | Tool |
140
127
  | --- | --- |
141
- | One heavy synchronous task | `spawn(fn)` |
142
- | Same worker logic called many times with different inputs | `task(fn)` |
143
- | Many async tasks that mostly wait on I/O | `spawn(fn, { concurrent: true })` |
144
- | Parallel batch with “wait for everything” | `WaitGroup` |
145
- | Parallel batch where the first failure should cancel the rest | `ErrGroup` |
146
- | Producer/consumer or fan-out/fan-in pipeline | `chan()` |
147
- | Non-blocking coordination between async operations | `select()` |
128
+ | One heavy CPU task | `spawn(fn)` |
129
+ | Same logic, many inputs | `task(fn)` |
130
+ | Wait for all tasks | `WaitGroup` |
131
+ | Fail-fast, cancel the rest | `ErrGroup` (with `setLimit()` for throttling) |
132
+ | Timeouts and cancellation | `context` + `spawn(fn, { ctx })` |
133
+ | Producer/consumer pipelines | `chan()` + `select()` |
148
134
 
149
- ## Why Not Just Use...
135
+ ## The Big Rule
150
136
 
151
- ### `Promise.all()`
137
+ > **Functions passed to `spawn()` cannot capture outer variables.** They are serialized as text and sent to a worker — closures don't survive.
152
138
 
153
- Use `Promise.all()` when work is already cheap and async.
139
+ ```ts
140
+ const x = 42
141
+ spawn(() => x + 1) // ReferenceError at runtime
154
142
 
155
- Use `puru` when:
143
+ spawn(() => {
144
+ const x = 42 // define inside
145
+ return x + 1
146
+ }) // works
147
+ ```
156
148
 
157
- - work is CPU-heavy
158
- - you need the main thread to stay responsive under load
159
- - you want worker coordination primitives, not just promise aggregation
149
+ Use `task(fn)` to pass arguments to reusable worker functions.
160
150
 
161
- ### `worker_threads`
151
+ ## What's Included
162
152
 
163
- Raw `worker_threads` are powerful, but they are low-level:
153
+ **Coordination:** `chan()` &middot; `WaitGroup` &middot; `ErrGroup` &middot; `select()` &middot; `context`
164
154
 
165
- - separate worker entry files
166
- - manual message passing
167
- - manual pooling
168
- - no built-in channels, `WaitGroup`, `ErrGroup`, or `select`
155
+ **Synchronization:** `Mutex` &middot; `RWMutex` &middot; `Once` &middot; `Cond`
169
156
 
170
- `puru` keeps the power and removes most of the ceremony.
157
+ **Timing:** `after()` &middot; `ticker()` &middot; `Timer`
171
158
 
172
- ### Cluster
159
+ **Ergonomics:** `task()` &middot; `configure()` &middot; `stats()` &middot; directional channels &middot; channel `len`/`cap`
173
160
 
174
- Cluster solves a different problem.
161
+ All modeled after Go's concurrency primitives. Full API in [docs/API.md](docs/API.md).
175
162
 
176
- - Cluster: more processes, better request throughput
177
- - `puru`: offload heavy work inside each process
163
+ ## Why Not Just Use...
178
164
 
179
- They work well together.
165
+ **`Promise.all()`** — Great for cheap async work. Use puru when work is CPU-heavy or you need the main thread to stay responsive.
180
166
 
181
- ## Feature Snapshot
167
+ **`worker_threads`** Powerful but low-level: separate files, manual messaging, manual pooling, no channels/WaitGroup/select. puru keeps the power, removes the ceremony.
182
168
 
183
- | Feature | `puru` |
184
- | --- | --- |
185
- | Inline worker functions | Yes |
186
- | Dedicated CPU workers | Yes |
187
- | Shared-worker async mode | Yes |
188
- | Channels across workers | Yes |
189
- | WaitGroup / ErrGroup | Yes |
190
- | `select` / timers | Yes |
191
- | Mutex / Once | Yes |
192
- | Bun support | Yes |
193
- | TypeScript support | Yes |
169
+ **Cluster** Cluster adds processes for request throughput. puru offloads heavy work inside each process. They compose well together.
194
170
 
195
- ## Performance
171
+ ## Runtimes
196
172
 
197
- `puru` is designed for real work, not micro-bench tricks.
173
+ | Runtime | Status |
174
+ | --- | --- |
175
+ | Node.js >= 20 | Full support |
176
+ | Bun | Full support |
177
+ | Deno | Planned |
198
178
 
199
- - Spawn overhead is roughly `0.1-0.5ms`
200
- - As a rule of thumb, use worker threads for tasks above `~5ms`
201
- - CPU-bound benchmarks show real speedups from multi-core execution
202
- - Concurrent async benchmarks show large gains when many tasks mostly wait on I/O off the main thread
179
+ ## Testing
203
180
 
204
- Full benchmark tables live in [docs/BENCHMARKS.md](docs/BENCHMARKS.md).
181
+ ```ts
182
+ import { configure } from '@dmop/puru'
183
+ configure({ adapter: 'inline' }) // runs on main thread, no real workers
184
+ ```
205
185
 
206
186
  ## Docs
207
187
 
@@ -210,33 +190,13 @@ Full benchmark tables live in [docs/BENCHMARKS.md](docs/BENCHMARKS.md).
210
190
  - [Production use cases](USE-CASES.md)
211
191
  - [Examples](examples)
212
192
  - [AI assistant guide](AGENTS.md)
213
- - [Full LLM reference](llms-full.txt)
214
-
215
- ## Runtimes
216
-
217
- | Runtime | Support | Notes |
218
- | --- | --- | --- |
219
- | Node.js >= 20 | Full | Uses `worker_threads` |
220
- | Bun | Full | Uses Web Workers |
221
- | Deno | Planned | Not yet implemented |
222
-
223
- ## Testing
224
-
225
- Use the inline adapter to run tasks on the main thread in tests:
226
-
227
- ```ts
228
- import { configure } from '@dmop/puru'
229
-
230
- configure({ adapter: 'inline' })
231
- ```
232
193
 
233
194
  ## Limitations
234
195
 
235
- - `spawn()` functions cannot capture outer variables
236
- - Channel values must be structured-cloneable
237
- - `null` is reserved as the channel closed sentinel
196
+ - `spawn()` functions cannot capture outer variables (see [The Big Rule](#the-big-rule))
197
+ - Channel values must be structured-cloneable (no functions, symbols, WeakRefs)
198
+ - `null` is reserved as the channel-closed sentinel
238
199
  - `task()` arguments must be JSON-serializable
239
- - Channel ops from workers have RPC overhead, so use them for coordination, not ultra-fine-grained inner loops
240
200
 
241
201
  ## License
242
202