@dmop/puru 0.1.4 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/llms-full.txt ADDED
@@ -0,0 +1,286 @@
1
+ # puru (プール) — Full Documentation
2
+
3
+ > A thread pool with Go-style concurrency primitives for JavaScript
4
+
5
+ puru manages a pool of worker threads and provides a simple API to run functions off the main thread. No separate worker files, no manual message passing.
6
+
7
+ Works on Node.js and Bun. Deno support coming soon.
8
+
9
+ puru (プール) means "pool" in Japanese.
10
+
11
+ ## Install
12
+
13
+ npm install @dmop/puru
14
+ bun add @dmop/puru
15
+
16
+ ## Quick Start
17
+
18
+ ```typescript
19
+ import { spawn, chan, WaitGroup, select, after } from '@dmop/puru'
20
+
21
+ // CPU work — runs in a dedicated worker thread
22
+ const { result } = spawn(() => fibonacci(40))
23
+ console.log(await result)
24
+
25
+ // I/O work — many tasks share worker threads
26
+ const wg = new WaitGroup()
27
+ for (const url of urls) {
28
+ wg.spawn(() => fetch(url).then(r => r.json()), { concurrent: true })
29
+ }
30
+ const results = await wg.wait()
31
+ ```
32
+
33
+ ## How It Works
34
+
35
+ puru manages a thread pool — tasks are dispatched onto a fixed set of worker threads.
36
+
37
+ Two modes:
38
+
39
+ - Exclusive (default): 1 task per thread, full core usage. Best for CPU-bound work.
40
+ - Concurrent ({ concurrent: true }): many tasks share a thread's event loop. Best for I/O-bound / async work.
41
+
42
+ CPU-bound work gets a dedicated thread. I/O-bound work shares threads efficiently. The API is inspired by Go's concurrency primitives (channels, WaitGroup, select), but the underlying mechanism is a thread pool — not a green thread scheduler.
43
+
44
+ ## API Reference
45
+
46
+ ### spawn(fn, opts?)
47
+
48
+ Run a function in a worker thread. Returns { result: Promise<T>, cancel: () => void }.
49
+
50
+ ```typescript
51
+ // CPU-bound — exclusive mode (default)
52
+ const { result } = spawn(() => fibonacci(40))
53
+
54
+ // I/O-bound — concurrent mode (many tasks per thread)
55
+ const { result } = spawn(() => fetch(url), { concurrent: true })
56
+
57
+ // With priority
58
+ const { result } = spawn(() => criticalWork(), { priority: 'high' })
59
+
60
+ // Cancel
61
+ const { result, cancel } = spawn(() => longTask())
62
+ setTimeout(cancel, 5000)
63
+ ```
64
+
65
+ Options:
66
+ - priority: 'low' | 'normal' | 'high' (default: 'normal')
67
+ - concurrent: boolean (default: false)
68
+ - channels: Record<string, Channel<unknown>> — pass channels to the worker
69
+
70
+ Exclusive mode (default): the function gets a dedicated thread. Use for CPU-heavy work.
71
+ Concurrent mode ({ concurrent: true }): multiple tasks share a thread's event loop. Use for async/I/O work.
72
+
73
+ Functions must be self-contained — they cannot capture variables from the enclosing scope:
74
+
75
+ ```typescript
76
+ const x = 42
77
+ spawn(() => x + 1) // ReferenceError: x is not defined
78
+ spawn(() => 42 + 1) // works
79
+ ```
80
+
81
+ ### chan(capacity?)
82
+
83
+ Create a channel for communicating between async tasks — including across worker threads.
84
+
85
+ ```typescript
86
+ const ch = chan<number>(10) // buffered, capacity 10
87
+ const ch = chan<string>() // unbuffered, capacity 0
88
+
89
+ await ch.send(42)
90
+ const value = await ch.recv() // 42
91
+
92
+ ch.close()
93
+ await ch.recv() // null (closed)
94
+
95
+ // Async iteration
96
+ for await (const value of ch) {
97
+ process(value)
98
+ }
99
+ ```
100
+
101
+ Channels in workers — pass channels to spawn() and use them across worker threads:
102
+
103
+ ```typescript
104
+ const ch = chan<number>(10)
105
+
106
+ // Producer worker
107
+ spawn(async ({ ch }) => {
108
+ for (let i = 0; i < 100; i++) await ch.send(i)
109
+ ch.close()
110
+ }, { channels: { ch } })
111
+
112
+ // Consumer worker
113
+ spawn(async ({ ch }) => {
114
+ for await (const item of ch) process(item)
115
+ }, { channels: { ch } })
116
+
117
+ // Fan-out: multiple workers pulling from the same channel
118
+ const input = chan<Job>(50)
119
+ const output = chan<Result>(50)
120
+
121
+ for (let i = 0; i < 4; i++) {
122
+ spawn(async ({ input, output }) => {
123
+ for await (const job of input) {
124
+ await output.send(processJob(job))
125
+ }
126
+ }, { channels: { input, output } })
127
+ }
128
+ ```
129
+
130
+ ### WaitGroup
131
+
132
+ Structured concurrency. Spawn multiple tasks, wait for all.
133
+
134
+ ```typescript
135
+ const wg = new WaitGroup()
136
+ wg.spawn(() => cpuWork()) // exclusive
137
+ wg.spawn(() => fetchData(), { concurrent: true }) // concurrent
138
+
139
+ const results = await wg.wait() // like Promise.all
140
+ const settled = await wg.waitSettled() // like Promise.allSettled
141
+
142
+ wg.cancel() // cancel all tasks
143
+ ```
144
+
145
+ ### ErrGroup
146
+
147
+ Like WaitGroup, but cancels all remaining tasks on first error (modeled after Go's golang.org/x/sync/errgroup).
148
+
149
+ ```typescript
150
+ const eg = new ErrGroup()
151
+ eg.spawn(() => fetchUser(id))
152
+ eg.spawn(() => fetchOrders(id))
153
+ eg.spawn(() => fetchAnalytics(id))
154
+
155
+ try {
156
+ const [user, orders, analytics] = await eg.wait()
157
+ } catch (err) {
158
+ // First error — all other tasks were cancelled
159
+ console.error('Failed:', err)
160
+ }
161
+ ```
162
+
163
+ ### Mutex
164
+
165
+ Async mutual exclusion. Serialize access to shared resources under concurrency.
166
+
167
+ ```typescript
168
+ const mu = new Mutex()
169
+
170
+ // withLock — recommended (auto-unlocks on error)
171
+ const result = await mu.withLock(async () => {
172
+ return await db.query('UPDATE ...')
173
+ })
174
+
175
+ // Manual lock/unlock
176
+ await mu.lock()
177
+ try { /* critical section */ }
178
+ finally { mu.unlock() }
179
+ ```
180
+
181
+ ### Once<T>
182
+
183
+ Run a function exactly once, even if called concurrently. All callers get the same result.
184
+
185
+ ```typescript
186
+ const once = new Once<DBConnection>()
187
+ const conn = await once.do(() => createExpensiveConnection())
188
+ // Subsequent calls return the cached result
189
+ ```
190
+
191
+ ### select(cases, opts?)
192
+
193
+ Wait for the first of multiple promises to resolve, like Go's select.
194
+
195
+ ```typescript
196
+ // Blocking — waits for first ready
197
+ await select([
198
+ [ch.recv(), (value) => console.log('received', value)],
199
+ [after(5000), () => console.log('timeout')],
200
+ ])
201
+
202
+ // Non-blocking — returns immediately if nothing is ready (Go's select with default)
203
+ await select(
204
+ [[ch.recv(), (value) => process(value)]],
205
+ { default: () => console.log('channel not ready') },
206
+ )
207
+ ```
208
+
209
+ ### after(ms) / ticker(ms)
210
+
211
+ Timers for use with select and async iteration.
212
+
213
+ ```typescript
214
+ await after(1000) // one-shot: resolves after 1 second
215
+
216
+ // Repeating: tick every 500ms
217
+ const t = ticker(500)
218
+ for await (const _ of t) {
219
+ console.log('tick')
220
+ if (shouldStop) t.stop()
221
+ }
222
+ ```
223
+
224
+ ### register(name, fn) / run(name, ...args)
225
+
226
+ Named task registry. Register functions by name, call them by name.
227
+
228
+ ```typescript
229
+ register('resize', (buffer, w, h) => sharp(buffer).resize(w, h).toBuffer())
230
+ const resized = await run('resize', imageBuffer, 800, 600)
231
+ ```
232
+
233
+ ### configure(opts?)
234
+
235
+ Optional global configuration. Must be called before the first spawn().
236
+
237
+ ```typescript
238
+ configure({
239
+ maxThreads: 4, // default: os.availableParallelism()
240
+ concurrency: 64, // max concurrent tasks per shared worker (default: 64)
241
+ idleTimeout: 30_000, // kill idle workers after 30s (default)
242
+ adapter: 'auto', // 'auto' | 'node' | 'bun' | 'inline'
243
+ })
244
+ ```
245
+
246
+ ### stats() / resize(n)
247
+
248
+ ```typescript
249
+ const s = stats() // { totalWorkers, idleWorkers, busyWorkers, queuedTasks, ... }
250
+ resize(8) // scale pool up/down at runtime
251
+ ```
252
+
253
+ ### detectRuntime() / detectCapability()
254
+
255
+ ```typescript
256
+ detectRuntime() // 'node' | 'bun' | 'deno' | 'browser'
257
+ detectCapability() // 'full-threads' | 'single-thread'
258
+ ```
259
+
260
+ ## Runtimes
261
+
262
+ - Node.js >= 20: Full support via worker_threads
263
+ - Bun: Full support via Web Workers (file-based)
264
+ - Deno: Planned
265
+ - Cloudflare Workers: Not supported (no thread support)
266
+ - Vercel Edge: Not supported (no thread support)
267
+
268
+ ## Testing
269
+
270
+ ```typescript
271
+ import { configure } from '@dmop/puru'
272
+ configure({ adapter: 'inline' }) // runs tasks in main thread, no real workers
273
+ ```
274
+
275
+ ## Limitations
276
+
277
+ - Functions passed to spawn() cannot capture variables from the enclosing scope
278
+ - Channel values must be structured-cloneable (no functions, symbols, or WeakRefs)
279
+ - null cannot be sent through a channel (it's the "closed" sentinel)
280
+ - register()/run() args must be JSON-serializable
281
+ - Channel operations from workers have ~0.1-0.5ms RPC overhead per send/recv (fine for coarse-grained coordination, not for per-item micro-operations)
282
+ - Spawn overhead is ~0.1-0.5ms — use spawn for tasks > 5ms; for trivial operations, call directly
283
+
284
+ ## License
285
+
286
+ MIT
package/llms.txt ADDED
@@ -0,0 +1,45 @@
1
+ # puru (プール)
2
+
3
+ > A thread pool with Go-style concurrency primitives for JavaScript
4
+
5
+ puru manages a pool of worker threads and provides a simple API to run functions off the main thread. No separate worker files, no manual message passing.
6
+
7
+ Works on Node.js and Bun. Deno support coming soon.
8
+
9
+ ## Install
10
+
11
+ npm install @dmop/puru
12
+ bun add @dmop/puru
13
+
14
+ ## Core API
15
+
16
+ - spawn(fn, opts?) — run a function in a worker thread, returns { result: Promise<T>, cancel() }
17
+ - chan(capacity?) — Go-style channels for cross-thread communication
18
+ - WaitGroup — spawn multiple tasks, wait for all (like Promise.all but off main thread)
19
+ - ErrGroup — like WaitGroup but cancels all on first error
20
+ - select(cases, opts?) — wait for first of multiple promises (like Go's select)
21
+ - Mutex — async mutual exclusion
22
+ - Once<T> — run a function exactly once, all callers get same result
23
+ - after(ms) / ticker(ms) — timers for use with select and async iteration
24
+ - register(name, fn) / run(name, ...args) — named task registry
25
+ - configure(opts?) — global config (maxThreads, concurrency, idleTimeout, adapter)
26
+ - stats() / resize(n) — pool introspection and runtime scaling
27
+
28
+ ## Two Modes
29
+
30
+ - Exclusive (default): 1 task per thread, for CPU-bound work — spawn(() => heavyWork())
31
+ - Concurrent: many tasks share a thread's event loop, for I/O-bound work — spawn(() => fetch(url), { concurrent: true })
32
+
33
+ ## Key Constraints
34
+
35
+ - Functions passed to spawn() must be self-contained (no closure captures)
36
+ - Channel values must be structured-cloneable (no functions, symbols, WeakRefs)
37
+ - null cannot be sent through channels (it's the "closed" sentinel)
38
+ - register()/run() args must be JSON-serializable
39
+ - Spawn overhead is ~0.1-0.5ms, use for tasks > 5ms
40
+
41
+ ## Links
42
+
43
+ - Docs: https://github.com/dmop/puru#readme
44
+ - Full docs for LLMs: https://raw.githubusercontent.com/dmop/puru/main/llms-full.txt
45
+ - AI assistant guide (AGENTS.md): https://raw.githubusercontent.com/dmop/puru/main/AGENTS.md
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@dmop/puru",
3
- "version": "0.1.4",
3
+ "version": "0.1.10",
4
4
  "description": "puru (プール) — A thread pool with Go-style concurrency primitives for JavaScript",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",
@@ -19,16 +19,20 @@
19
19
  }
20
20
  },
21
21
  "files": [
22
- "dist"
22
+ "dist",
23
+ "llms.txt",
24
+ "llms-full.txt",
25
+ "AGENTS.md"
23
26
  ],
24
27
  "engines": {
25
- "node": ">=18.0.0"
28
+ "node": ">=20.0.0"
26
29
  },
27
30
  "scripts": {
28
31
  "build": "tsup",
32
+ "lint": "oxlint . --vitest-plugin --node-plugin --deny-warnings -D no-explicit-any",
29
33
  "test": "vitest run",
30
34
  "test:watch": "vitest",
31
- "typecheck": "tsc --noEmit",
35
+ "typecheck": "npm run build && tsc -p tsconfig.check.json",
32
36
  "bench": "npm run build && npx tsx benchmarks/run-all.ts",
33
37
  "bench:node": "npm run build && npx tsx benchmarks/run-all.ts",
34
38
  "bench:bun": "npm run build && bun benchmarks/run-all.ts",
@@ -43,12 +47,37 @@
43
47
  "bench:errgroup": "npx tsx benchmarks/09-errgroup.ts",
44
48
  "bench:select": "npx tsx benchmarks/10-select-default.ts",
45
49
  "bench:once": "npx tsx benchmarks/11-once-ticker.ts",
46
- "prepublishOnly": "npm run build"
50
+ "size": "npm run build && size-limit",
51
+ "docs": "typedoc",
52
+ "prepublishOnly": "npm run build",
53
+ "prepare": "husky"
47
54
  },
55
+ "size-limit": [
56
+ {
57
+ "path": "dist/index.js",
58
+ "limit": "15 kB",
59
+ "ignore": [
60
+ "os",
61
+ "fs",
62
+ "path",
63
+ "worker_threads",
64
+ "child_process",
65
+ "url"
66
+ ]
67
+ }
68
+ ],
48
69
  "devDependencies": {
70
+ "@changesets/changelog-github": "^0.6.0",
71
+ "@changesets/cli": "^2.30.0",
72
+ "@size-limit/preset-small-lib": "^12.0.1",
49
73
  "@types/node": "^22.0.0",
74
+ "@vitest/coverage-v8": "^3.2.4",
50
75
  "bun-types": "^1.3.11",
76
+ "husky": "^9.1.7",
77
+ "oxlint": "^1.58.0",
78
+ "size-limit": "^12.0.1",
51
79
  "tsup": "^8.5.0",
80
+ "typedoc": "^0.28.18",
52
81
  "typescript": "^5.7.0",
53
82
  "vitest": "^3.1.0"
54
83
  },
@@ -66,6 +95,10 @@
66
95
  "type": "git",
67
96
  "url": "git+https://github.com/dmop/puru.git"
68
97
  },
98
+ "publishConfig": {
99
+ "access": "public",
100
+ "registry": "https://registry.npmjs.org"
101
+ },
69
102
  "homepage": "https://github.com/dmop/puru#readme",
70
103
  "bugs": {
71
104
  "url": "https://github.com/dmop/puru/issues"