@wrongstack/core 0.1.8 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/dist/agent-bridge-6KPqsFx6.d.ts +33 -0
  2. package/dist/compactor-B4mQZXf2.d.ts +17 -0
  3. package/dist/config-BU9f_5yH.d.ts +193 -0
  4. package/dist/{provider-txgB0Oq9.d.ts → context-BmM2xGUZ.d.ts} +532 -472
  5. package/dist/coordination/index.d.ts +694 -0
  6. package/dist/coordination/index.js +1995 -0
  7. package/dist/coordination/index.js.map +1 -0
  8. package/dist/defaults/index.d.ts +34 -2206
  9. package/dist/defaults/index.js +4116 -3790
  10. package/dist/defaults/index.js.map +1 -1
  11. package/dist/events-BMNaEFZl.d.ts +218 -0
  12. package/dist/execution/index.d.ts +260 -0
  13. package/dist/execution/index.js +1625 -0
  14. package/dist/execution/index.js.map +1 -0
  15. package/dist/index.d.ts +50 -12
  16. package/dist/index.js +6669 -5909
  17. package/dist/index.js.map +1 -1
  18. package/dist/infrastructure/index.d.ts +10 -0
  19. package/dist/infrastructure/index.js +575 -0
  20. package/dist/infrastructure/index.js.map +1 -0
  21. package/dist/input-reader-E-ffP2ee.d.ts +12 -0
  22. package/dist/kernel/index.d.ts +15 -4
  23. package/dist/kernel/index.js.map +1 -1
  24. package/dist/logger-BH6AE0W9.d.ts +24 -0
  25. package/dist/logger-BMQgxvdy.d.ts +12 -0
  26. package/dist/mcp-servers-Dzgg4x1w.d.ts +100 -0
  27. package/dist/memory-CEXuo7sz.d.ts +16 -0
  28. package/dist/mode-CV077NjV.d.ts +27 -0
  29. package/dist/models/index.d.ts +60 -0
  30. package/dist/models/index.js +621 -0
  31. package/dist/models/index.js.map +1 -0
  32. package/dist/models-registry-DqzwpBQy.d.ts +46 -0
  33. package/dist/models-registry-Y2xbog0E.d.ts +95 -0
  34. package/dist/multi-agent-fmkRHtof.d.ts +283 -0
  35. package/dist/observability/index.d.ts +353 -0
  36. package/dist/observability/index.js +691 -0
  37. package/dist/observability/index.js.map +1 -0
  38. package/dist/observability-BhnVLBLS.d.ts +67 -0
  39. package/dist/path-resolver-CPRj4bFY.d.ts +10 -0
  40. package/dist/path-resolver-DBjaoXFq.d.ts +54 -0
  41. package/dist/plugin-DJk6LL8B.d.ts +434 -0
  42. package/dist/renderer-rk_1Swwc.d.ts +158 -0
  43. package/dist/sdd/index.d.ts +206 -0
  44. package/dist/sdd/index.js +864 -0
  45. package/dist/sdd/index.js.map +1 -0
  46. package/dist/secret-scrubber-CicHLN4G.d.ts +31 -0
  47. package/dist/secret-scrubber-DF88luOe.d.ts +54 -0
  48. package/dist/secret-vault-DoISxaKO.d.ts +19 -0
  49. package/dist/security/index.d.ts +30 -0
  50. package/dist/security/index.js +524 -0
  51. package/dist/security/index.js.map +1 -0
  52. package/dist/selector-BbJqiEP4.d.ts +51 -0
  53. package/dist/session-reader-Drq8RvJu.d.ts +150 -0
  54. package/dist/skill-DhfSizKv.d.ts +72 -0
  55. package/dist/storage/index.d.ts +382 -0
  56. package/dist/storage/index.js +1530 -0
  57. package/dist/storage/index.js.map +1 -0
  58. package/dist/{system-prompt-vAB0F54-.d.ts → system-prompt-BC_8ypCG.d.ts} +1 -1
  59. package/dist/task-graph-BITvWt4t.d.ts +160 -0
  60. package/dist/tool-executor-CpuJPYm9.d.ts +97 -0
  61. package/dist/types/index.d.ts +26 -4
  62. package/dist/types/index.js +1787 -4
  63. package/dist/types/index.js.map +1 -1
  64. package/dist/utils/index.d.ts +49 -2
  65. package/dist/utils/index.js +100 -2
  66. package/dist/utils/index.js.map +1 -1
  67. package/package.json +34 -2
  68. package/skills/audit-log/SKILL.md +67 -0
  69. package/skills/bug-hunter/SKILL.md +87 -0
  70. package/skills/refactor-planner/SKILL.md +94 -0
  71. package/skills/security-scanner/SKILL.md +117 -0
  72. package/dist/mode-Pjt5vMS6.d.ts +0 -815
  73. package/dist/session-reader-9sOTgmeC.d.ts +0 -1087
@@ -0,0 +1,694 @@
1
+ import { M as MultiAgentConfig, k as SubagentRunner, g as SubagentConfig, m as TaskSpec, l as TaskResult, d as CoordinatorStatus, e as MultiAgentCoordinator, f as SpawnResult, o as BridgeMessage, A as AgentBridge } from '../multi-agent-fmkRHtof.js';
2
+ export { B as BudgetExceededError, a as BudgetKind, b as BudgetLimits, c as BudgetUsage, S as SubagentBudget } from '../multi-agent-fmkRHtof.js';
3
+ import { u as Tool, q as SessionWriter, o as SessionStore } from '../context-BmM2xGUZ.js';
4
+ import { I as InMemoryAgentBridge } from '../agent-bridge-6KPqsFx6.js';
5
+ export { a as InMemoryBridgeTransport, c as createMessage } from '../agent-bridge-6KPqsFx6.js';
6
+ import { E as EventBus } from '../events-BMNaEFZl.js';
7
+ import { EventEmitter } from 'node:events';
8
+ import { A as Agent, n as AgentInput } from '../plugin-DJk6LL8B.js';
9
+ import '../observability-BhnVLBLS.js';
10
+ import '../renderer-rk_1Swwc.js';
11
+ import '../secret-scrubber-CicHLN4G.js';
12
+ import '../config-BU9f_5yH.js';
13
+ import '../models-registry-Y2xbog0E.js';
14
+ import '../logger-BMQgxvdy.js';
15
+
16
+ /**
17
+ * Single fleet-wide event with subagent attribution. Whatever a child
18
+ * agent emits on its own EventBus gets re-published here, prefixed with
19
+ * `subagentId` so a single subscriber can multiplex across the fleet.
20
+ *
21
+ * The director uses `FleetBus.filter('tool.executed', …)` to see every
22
+ * tool call across the fleet; the TUI uses
23
+ * `FleetBus.subscribe(id, handler)` to render a per-subagent panel.
24
+ */
25
+ interface FleetEvent {
26
+ subagentId: string;
27
+ taskId?: string;
28
+ ts: number;
29
+ type: string;
30
+ payload: unknown;
31
+ }
32
+ type FleetHandler = (event: FleetEvent) => void;
33
+ /**
34
+ * Fan-in for per-subagent EventBuses. Each subagent's bus is plugged in
35
+ * via `attach()`; the FleetBus re-emits every event with subagent
36
+ * attribution. Detachment is automatic via the returned disposer — call
37
+ * it when a subagent terminates so we don't leak listeners.
38
+ *
39
+ * The bus exposes two subscription modes: by `subagentId` (everything
40
+ * from one child) and by `type` (one event-type across the fleet). They
41
+ * compose — if you need a per-subagent + per-type slice, subscribe by
42
+ * type and filter on `event.subagentId` in your handler.
43
+ */
44
+ declare class FleetBus {
45
+ private readonly byId;
46
+ private readonly byType;
47
+ private readonly any;
48
+ /**
49
+ * Hook a subagent's EventBus into the fleet. EventBus is strongly
50
+ * typed and doesn't expose an `onAny` hook, so we subscribe to the
51
+ * canonical set of event types a subagent emits during a run. New
52
+ * event types added to the kernel must be added here too — but the
53
+ * cost is a tiny single line per type, and the explicit list keeps
54
+ * the wire format clear.
55
+ *
56
+ * Returns a disposer that detaches every subscription; call on
57
+ * subagent teardown so the listeners don't outlive the run.
58
+ */
59
+ attach(subagentId: string, bus: EventBus, taskId?: string): () => void;
60
+ /** Subscribe to every event from one subagent. */
61
+ subscribe(subagentId: string, handler: FleetHandler): () => void;
62
+ /** Subscribe to one event type across all subagents. */
63
+ filter(type: string, handler: FleetHandler): () => void;
64
+ /** Subscribe to literally everything. The fleet roll-up uses this. */
65
+ onAny(handler: FleetHandler): () => void;
66
+ emit(event: FleetEvent): void;
67
+ }
68
+ /**
69
+ * Roll-up of token usage + cost across an entire director run. The
70
+ * director's `fleet_status` tool returns this so the model can reason
71
+ * about budget in its next turn ("the researcher already burned $0.40,
72
+ * lean on summaries for the next task").
73
+ */
74
+ interface FleetUsage {
75
+ total: {
76
+ input: number;
77
+ output: number;
78
+ cacheRead: number;
79
+ cacheWrite: number;
80
+ cost: number;
81
+ };
82
+ perSubagent: Record<string, SubagentUsageSnapshot>;
83
+ }
84
+ interface SubagentUsageSnapshot {
85
+ subagentId: string;
86
+ provider?: string;
87
+ model?: string;
88
+ input: number;
89
+ output: number;
90
+ cacheRead: number;
91
+ cacheWrite: number;
92
+ cost: number;
93
+ toolCalls: number;
94
+ iterations: number;
95
+ startedAt: number;
96
+ lastEventAt: number;
97
+ }
98
+ /**
99
+ * Aggregates provider.response + tool.executed events from the FleetBus
100
+ * into a live `FleetUsage` snapshot. Costs are computed by the caller
101
+ * via a `priceLookup(subagentId)` so we don't bake provider-pricing
102
+ * coupling into core; the CLI/tests supply a function that resolves
103
+ * each subagent's per-token rates from the models registry.
104
+ */
105
+ declare class FleetUsageAggregator {
106
+ private readonly bus;
107
+ private readonly priceLookup?;
108
+ private readonly metaLookup?;
109
+ private readonly perSubagent;
110
+ private readonly total;
111
+ constructor(bus: FleetBus, priceLookup?: ((subagentId: string) => {
112
+ input?: number;
113
+ output?: number;
114
+ cacheRead?: number;
115
+ cacheWrite?: number;
116
+ } | undefined) | undefined, metaLookup?: ((subagentId: string) => {
117
+ provider?: string;
118
+ model?: string;
119
+ } | undefined) | undefined);
120
+ /** Live snapshot — safe to call from a tool's execute() body. */
121
+ snapshot(): FleetUsage;
122
+ private ensure;
123
+ private onProviderResponse;
124
+ private onToolExecuted;
125
+ private onIterationStarted;
126
+ }
127
+
128
+ /**
129
+ * Director — high-level orchestrator that owns a `MultiAgentCoordinator`,
130
+ * a `FleetBus`, and a `FleetUsageAggregator`. Exposes a small imperative
131
+ * API (`spawn`, `assign`, `awaitTasks`, `terminate`, `status`, `usage`)
132
+ * that's easy to test, and a `tools()` factory that wraps the same API
133
+ * as agent-callable `Tool`s so an LLM can drive the orchestration.
134
+ *
135
+ * This class is intentionally *not* an `Agent`. It's a coordinator +
136
+ * observability surface. To make it LLM-driven, construct an Agent
137
+ * with `director.tools()` registered. That keeps the construction
138
+ * symmetric with how other agents are built and avoids smuggling a
139
+ * heavy LLM dependency into core just for the director path.
140
+ */
141
+ interface DirectorOptions {
142
+ config: MultiAgentConfig;
143
+ runner?: SubagentRunner;
144
+ /**
145
+ * When set, the director writes a `fleet.json` manifest to this path
146
+ * recording every spawned subagent (id, provider, model, role, task
147
+ * ids). Used by `wstack replay <runId>` to rehydrate a fleet. Pass an
148
+ * absolute file path — the directory must already exist (the
149
+ * director-session factory creates it when used together).
150
+ */
151
+ manifestPath?: string;
152
+ /**
153
+ * Optional roster used by `leaderSystemPrompt()` to render a roles
154
+ * summary into the leader's preamble. Same shape as the roster passed
155
+ * to `tools()` — typically the same value.
156
+ */
157
+ roster?: Record<string, SubagentConfig>;
158
+ /**
159
+ * Override the built-in fleet preamble (see `DEFAULT_DIRECTOR_PREAMBLE`).
160
+ * Pass an empty string to suppress the preamble entirely.
161
+ */
162
+ directorPreamble?: string;
163
+ /**
164
+ * Override the built-in subagent baseline (see
165
+ * `DEFAULT_SUBAGENT_BASELINE`). Pass an empty string to suppress.
166
+ */
167
+ subagentBaseline?: string;
168
+ /**
169
+ * Absolute path to a directory the fleet can use as a shared scratchpad
170
+ * (read + write by every subagent). When set, the director creates it on
171
+ * construction and `subagentSystemPrompt()` automatically injects a
172
+ * "Shared notes" block telling subagents where to drop their findings.
173
+ * This is the cheap fleet-coordination channel — agents don't need each
174
+ * other's transcripts, just each other's conclusions.
175
+ *
176
+ * Convention: under a fleet run rooted at `<sessionsRoot>/<runId>/`,
177
+ * pass `<sessionsRoot>/<runId>/shared/` here.
178
+ */
179
+ sharedScratchpadPath?: string;
180
+ /**
181
+ * Maximum number of spawns this director can perform across its
182
+ * lifetime. Default: unlimited. Acts as a hard fleet-wide cost cap —
183
+ * a runaway leader that keeps spawning workers gets cut off cleanly
184
+ * instead of burning provider tokens until the user kills the
185
+ * process. The N+1-th spawn call rejects with a `DirectorBudgetError`.
186
+ */
187
+ maxSpawns?: number;
188
+ /**
189
+ * Maximum nesting depth for spawns. The director constructed by the
190
+ * user is at depth `spawnDepth` (default 0); any subagent that itself
191
+ * acts as a director would construct its own `Director` with
192
+ * `spawnDepth: parent.spawnDepth + 1`. When `spawnDepth >= maxSpawnDepth`,
193
+ * `spawn()` rejects. Default: 2 (root director can spawn workers; a
194
+ * worker that becomes a sub-director cannot itself spawn further).
195
+ * This stops infinite recursive director chains from a hostile or
196
+ * confused prompt.
197
+ */
198
+ maxSpawnDepth?: number;
199
+ /**
200
+ * Current spawn-chain depth for this director instance. Defaults to 0.
201
+ * A nested director should pass `parent.spawnDepth + 1`. Together with
202
+ * `maxSpawnDepth` this bounds the chain.
203
+ */
204
+ spawnDepth?: number;
205
+ }
206
+ /**
207
+ * Thrown by `Director.spawn()` when a configured spawn cap (`maxSpawns`,
208
+ * `maxSpawnDepth`) is hit. Distinct error class so callers — including
209
+ * the `spawn_subagent` tool surface — can recognize the budget case and
210
+ * report it cleanly instead of treating it like an unexpected failure.
211
+ */
212
+ declare class DirectorBudgetError extends Error {
213
+ readonly kind: 'max_spawns' | 'max_spawn_depth';
214
+ readonly limit: number;
215
+ readonly observed: number;
216
+ constructor(kind: 'max_spawns' | 'max_spawn_depth', limit: number, observed: number);
217
+ }
218
+ declare class Director {
219
+ readonly id: string;
220
+ readonly fleet: FleetBus;
221
+ readonly usage: FleetUsageAggregator;
222
+ /**
223
+ * Director-side bridge endpoint. Subagents are wired to the same
224
+ * in-memory transport so the director can `ask()` them synchronously
225
+ * and they can `send()` progress back. Exposed so external code (e.g.
226
+ * the TUI) can subscribe to inbound messages.
227
+ */
228
+ readonly bridge: InMemoryAgentBridge;
229
+ private readonly transport;
230
+ private readonly coordinator;
231
+ /** Resolves with the matching `TaskResult` the first time the
232
+ * coordinator emits `task.completed` for a given task id. Each entry
233
+ * is created lazily on first poll/await and cleared once consumed. */
234
+ private readonly taskWaiters;
235
+ /** Cache of completed results in case the consumer asks AFTER the
236
+ * coordinator already fired the event — `awaitTasks(['t-1'])` after
237
+ * t-1 finished should resolve immediately, not hang. */
238
+ private readonly completed;
239
+ /** Per-subagent provider/model metadata, captured at spawn time so the
240
+ * FleetUsageAggregator's metaLookup can surface readable rows. */
241
+ private readonly subagentMeta;
242
+ private readonly priceLookups;
243
+ /** Bridge endpoints we created per subagent (so we can `stop()` them
244
+ * on shutdown and free transport subscriptions). */
245
+ private readonly subagentBridges;
246
+ /** Tracks per-spawn config + assigned task ids for manifest writing. */
247
+ private readonly manifestEntries;
248
+ private readonly manifestPath?;
249
+ private readonly roster?;
250
+ private readonly directorPreamble;
251
+ private readonly subagentBaseline;
252
+ /** Absolute path to the fleet's shared scratchpad directory, or null
253
+ * when none was configured. Exposed as a readonly getter for callers
254
+ * that need to surface the path to the user (e.g. the CLI logging
255
+ * the location after `--director` boots). */
256
+ readonly sharedScratchpadPath: string | null;
257
+ /** Spawn cap (lifetime total). Infinity means unlimited. */
258
+ readonly maxSpawns: number;
259
+ /** Nesting cap. The N-th director in a chain has `spawnDepth = N-1`. */
260
+ readonly maxSpawnDepth: number;
261
+ /** This director's position in a director chain. Root director = 0. */
262
+ readonly spawnDepth: number;
263
+ /** Live spawn counter for `maxSpawns` enforcement. */
264
+ private spawnCount;
265
+ constructor(opts: DirectorOptions);
266
+ /**
267
+ * Spawn a subagent. Identical to the coordinator's `spawn()` but
268
+ * captures provider/model metadata for the usage aggregator and
269
+ * lets the FleetBus attach to the runner's EventBus when the task
270
+ * actually runs (see `attachSubagentBus`).
271
+ *
272
+ * Caller-supplied `priceLookup` is optional but recommended — without
273
+ * it the `cost` column in `usage.snapshot()` stays at 0.
274
+ */
275
+ spawn(config: SubagentConfig, priceLookup?: {
276
+ input?: number;
277
+ output?: number;
278
+ cacheRead?: number;
279
+ cacheWrite?: number;
280
+ }): Promise<string>;
281
+ /**
282
+ * Synchronously ask a subagent something via the bridge. Sends a
283
+ * `task` message addressed to the subagent and awaits a matching
284
+ * reply (matched by message id). Subagent runners that handle these
285
+ * requests subscribe to `ctx.bridge` and reply with a message whose
286
+ * `id` equals the incoming request's id (see `InMemoryAgentBridge`'s
287
+ * `request<T>` implementation).
288
+ *
289
+ * Returns the response payload directly (the bridge wrapper is
290
+ * unwrapped for ergonomics). Times out after `timeoutMs` (default
291
+ * matches the bridge's own default of 30s) — surface those rejections
292
+ * to the caller as actionable errors instead of letting tools hang.
293
+ */
294
+ ask<T = unknown>(subagentId: string, payload: unknown, timeoutMs?: number): Promise<T>;
295
+ /**
296
+ * Read completed task results and format them as a structured text
297
+ * block the director's LLM can paste into its own context. The
298
+ * Director keeps every completed `TaskResult` in `completed` so this
299
+ * is a pure read — no bridge round-trip, cheap to call.
300
+ *
301
+ * The returned string is intentionally markdown-flavored: headers per
302
+ * subagent, a one-line meta row (iter / tools / ms), and the task's
303
+ * result text. Pass `style: 'json'` for a programmatic shape instead
304
+ * (useful when the director model is doing structured-output work).
305
+ */
306
+ rollUp(taskIds: string[], style?: 'markdown' | 'json'): string;
307
+ /**
308
+ * Write the fleet manifest to `manifestPath`. Returns the path written
309
+ * or null when no path was configured. Captures every spawn + its
310
+ * assigned tasks — paired with per-subagent JSONLs, this is enough to
311
+ * replay an entire director run.
312
+ */
313
+ writeManifest(): Promise<string | null>;
314
+ /**
315
+ * Tear down the director: stop every subagent, close every bridge
316
+ * endpoint, and (when configured) write the final manifest. Idempotent
317
+ * — calling shutdown twice is a no-op on the second invocation.
318
+ */
319
+ shutdown(): Promise<void>;
320
+ /**
321
+ * Hand a task to the coordinator. Returns the assigned task id so
322
+ * callers can wait on it via `awaitTasks([id])`. The coordinator's
323
+ * concurrency limit applies — the task may queue before running.
324
+ */
325
+ assign(task: TaskSpec): Promise<string>;
326
+ /**
327
+ * Block until every task id resolves. Returns results in the same
328
+ * order as the input. If any task hasn't completed by the time this
329
+ * is called, the promise hangs until it does — pair with a timeout
330
+ * at the caller if that's a concern. Resolves immediately for ids
331
+ * whose results were already cached.
332
+ */
333
+ awaitTasks(taskIds: string[]): Promise<TaskResult[]>;
334
+ terminate(subagentId: string): Promise<void>;
335
+ terminateAll(): Promise<void>;
336
+ status(): CoordinatorStatus;
337
+ /**
338
+ * Subscribe to coordinator events. Currently only `task.completed` is
339
+ * exposed (the others are internal lifecycle). Returns an unsubscribe
340
+ * function. External callers (e.g. the CLI's `MultiAgentHost`) use this
341
+ * to drive their own pending/results tracking without poking the
342
+ * coordinator directly.
343
+ */
344
+ on(event: 'task.completed', handler: (payload: {
345
+ task: TaskSpec;
346
+ result: TaskResult;
347
+ }) => void): () => void;
348
+ /**
349
+ * Snapshot of every task that has resolved (success, failed, timeout,
350
+ * stopped) since the director started. Returned in completion order
351
+ * via the internal map's iteration order. Used by `/fleet status` to
352
+ * paint the completed table without reaching into private state.
353
+ */
354
+ completedResults(): TaskResult[];
355
+ snapshot(): FleetUsage;
356
+ /**
357
+ * Compose the leader/director-agent system prompt: fleet preamble +
358
+ * (optional) roster summary + user base prompt. Pass the result to your
359
+ * leader Agent's `ctx.systemPrompt` when constructing it.
360
+ *
361
+ * `basePrompt` defaults to `config.leaderSystemPrompt` so callers can
362
+ * use the no-arg form when the multi-agent config already carries it.
363
+ */
364
+ leaderSystemPrompt(basePrompt?: string): string;
365
+ /**
366
+ * Compose a subagent's system prompt for a given `SubagentConfig`:
367
+ * baseline + role + task + per-spawn override. Returned by value — does
368
+ * not mutate the config. Factories (the user-supplied `AgentFactory`)
369
+ * should call this when building each subagent's Agent so the bridge
370
+ * contract, role context, and override are all surfaced.
371
+ *
372
+ * When `taskBrief` is omitted the Task section is dropped. Pass the
373
+ * actual task description here to reinforce it in the system prompt
374
+ * (the runner already passes it as user input — duplicating in the
375
+ * system prompt is optional but improves anchoring on small models).
376
+ */
377
+ subagentSystemPrompt(config: SubagentConfig, taskBrief?: string): string;
378
+ /**
379
+ * Build the tool set the LLM-driven director uses to orchestrate.
380
+ * Returns an array of `Tool` definitions; register these on the
381
+ * director's `Agent` to expose `spawn_subagent`, `assign_task`, etc.
382
+ * Each tool's `execute()` delegates straight to the matching method
383
+ * above.
384
+ *
385
+ * Tools all carry `permission: 'auto'` — the *user* has already
386
+ * approved running the director when they kicked off the run, so
387
+ * gating individual orchestration calls behind a confirm prompt
388
+ * would just be noise. The actual subagent tools they spawn are
389
+ * still permission-checked normally.
390
+ */
391
+ tools(roster?: Record<string, SubagentConfig>): Tool[];
392
+ }
393
+
394
+ interface MultiAgentCoordinatorOptions {
395
+ /**
396
+ * Callback that executes a task on behalf of a subagent. Required for
397
+ * `assign()` to actually run anything — without it, tasks queue forever.
398
+ * The coordinator provides per-subagent isolation (own budget, own signal,
399
+ * own bridge) and enforces timeout + concurrency.
400
+ */
401
+ runner?: SubagentRunner;
402
+ }
403
+ declare class DefaultMultiAgentCoordinator extends EventEmitter implements MultiAgentCoordinator {
404
+ readonly coordinatorId: string;
405
+ readonly config: MultiAgentConfig;
406
+ private readonly runner?;
407
+ private readonly subagents;
408
+ private pendingTasks;
409
+ private completedResults;
410
+ private totalIterations;
411
+ private inFlight;
412
+ constructor(config: MultiAgentConfig, options?: MultiAgentCoordinatorOptions);
413
+ spawn(subagent: SubagentConfig): Promise<SpawnResult>;
414
+ assign(task: TaskSpec): Promise<void>;
415
+ delegate(to: string, msg: BridgeMessage): Promise<void>;
416
+ /**
417
+ * Wire up the communication bridge for a subagent. Call after spawn() once
418
+ * the caller has created the bidirectional connection.
419
+ */
420
+ setSubagentBridge(subagentId: string, bridge: AgentBridge): void;
421
+ stop(subagentId: string): Promise<void>;
422
+ stopAll(): Promise<void>;
423
+ getStatus(): CoordinatorStatus;
424
+ /** Expose snapshot of completed results — useful for callers awaiting all done. */
425
+ results(): readonly TaskResult[];
426
+ /**
427
+ * Manual completion — for callers that drive subagents without a runner
428
+ * (e.g. external orchestrators). When a runner is configured the coordinator
429
+ * calls this itself.
430
+ */
431
+ completeTask(result: TaskResult): void;
432
+ private tryDispatchNext;
433
+ private canDispatch;
434
+ private findIdleSubagent;
435
+ private runDispatched;
436
+ private executeWithTimeout;
437
+ private recordCompletion;
438
+ private isDone;
439
+ }
440
+
441
+ /**
442
+ * Caller-supplied factory that builds an isolated `Agent` for a subagent.
443
+ * The factory MUST construct a fresh `Context` per call — sharing context
444
+ * between subagents defeats isolation. Each Agent should also use either
445
+ * its own `EventBus` or a forwarded view, so per-subagent metrics can be
446
+ * attributed correctly.
447
+ */
448
+ type AgentFactory = (config: SubagentConfig) => Promise<AgentFactoryResult>;
449
+ interface AgentFactoryResult {
450
+ agent: Agent;
451
+ /** Event bus the factory wired to this agent — required for budget hookup. */
452
+ events: EventBus;
453
+ }
454
+ interface AgentRunnerOptions {
455
+ factory: AgentFactory;
456
+ /**
457
+ * Format a TaskSpec into the user input the agent will receive. Defaults
458
+ * to `task.description ?? ''`. Override when subagents expect structured
459
+ * input (e.g. JSON contracts, role-prefixed prompts).
460
+ */
461
+ formatTaskInput?: (task: TaskSpec, config: SubagentConfig) => AgentInput;
462
+ }
463
+ /**
464
+ * Builds a `SubagentRunner` that drives a real `Agent` per task while honoring
465
+ * the coordinator's budget and abort signal. This is the production adapter —
466
+ * the coordinator's `runner` option in CLI/TUI assemblies points here.
467
+ *
468
+ * Lifecycle per task:
469
+ * 1. factory(config) → fresh Agent + EventBus.
470
+ * 2. Subscribe to events to feed the budget (tool calls, token usage).
471
+ * 3. Call agent.run(input, { signal }) — the coordinator's signal cancels.
472
+ * 4. Map RunResult.status onto a `SubagentRunOutcome` or throw on failure.
473
+ * 5. Unsubscribe and let the factory's resources be GC'd.
474
+ *
475
+ * The budget is checked synchronously from event handlers — a runaway agent
476
+ * that crosses its tool-call limit triggers `BudgetExceededError`, which the
477
+ * coordinator surfaces as `status: 'failed'` on the task result.
478
+ */
479
+ declare function makeAgentSubagentRunner(opts: AgentRunnerOptions): SubagentRunner;
480
+
481
+ /**
482
+ * Per-subagent session factory.
483
+ *
484
+ * Director runs produce many parallel transcripts — one per spawned
485
+ * subagent — and we want them all rooted under the same director-run
486
+ * directory so a future `wstack replay <runId>` can rehydrate the whole
487
+ * fleet from a single tree.
488
+ *
489
+ * The factory builds (or accepts) a `SessionStore` whose `dir` points at
490
+ * `<sessionsRoot>/<directorRunId>/`, and returns a small `create()`
491
+ * function that the orchestration layer calls per-spawn. Each call
492
+ * yields a fresh `SessionWriter` whose JSONL file lives in that
493
+ * directory, named by either the caller-supplied `subagentId` (preferred,
494
+ * so the file name is human-readable) or a derived id.
495
+ *
496
+ * **Why a thin factory instead of plumbing options through every spawn
497
+ * site?** Because the director is the only caller that needs this
498
+ * isolation pattern, and shoving `sessionStore` options into
499
+ * `SubagentConfig` would leak storage details into a config shape that
500
+ * agents and the coordinator have no business knowing about.
501
+ */
502
+ interface DirectorSessionFactoryOptions {
503
+ /**
504
+ * Either a parent directory where `<directorRunId>/` will be created,
505
+ * or a pre-built `SessionStore` whose `dir` already points at the
506
+ * director run directory. Tests pass an in-memory store for isolation;
507
+ * production code passes the path under `~/.wrongstack/sessions/`.
508
+ */
509
+ store?: SessionStore;
510
+ sessionsRoot?: string;
511
+ /**
512
+ * Director run id — namespaces all subagent JSONLs under one folder.
513
+ * Defaults to a timestamped id; supplied explicitly when resuming a
514
+ * prior fleet manifest.
515
+ */
516
+ directorRunId?: string;
517
+ }
518
+ interface DirectorSessionFactory {
519
+ /** Absolute directory where this director run's transcripts live. */
520
+ readonly dir: string;
521
+ /** The director run id used to namespace the directory. */
522
+ readonly directorRunId: string;
523
+ /**
524
+ * Create a fresh `SessionWriter` for the named subagent. Each
525
+ * subagent gets its own JSONL file. The writer's `id` matches the
526
+ * supplied `subagentId` so disk paths line up with in-memory ids.
527
+ */
528
+ createSubagentSession(args: {
529
+ subagentId: string;
530
+ provider?: string;
531
+ model?: string;
532
+ title?: string;
533
+ }): Promise<SessionWriter>;
534
+ }
535
+ /**
536
+ * Build a `DirectorSessionFactory`. Pass either a pre-configured
537
+ * `SessionStore` (tests) or a `sessionsRoot` path (production). When
538
+ * neither is supplied the factory throws — there's no sane default for
539
+ * "where do these JSONLs live".
540
+ */
541
+ declare function makeDirectorSessionFactory(opts: DirectorSessionFactoryOptions): DirectorSessionFactory;
542
+
543
+ /**
544
+ * System-prompt composition helpers for the Director ecosystem.
545
+ *
546
+ * Two callers need composed prompts:
547
+ *
548
+ * 1. The **leader** (the director's own Agent) — needs a preamble that
549
+ * explains the fleet protocol: when to spawn, when to await, how to
550
+ * roll up, and the eight orchestration tools it owns.
551
+ *
552
+ * 2. Each **subagent** — needs a baseline that explains it has a parent
553
+ * it can call via the bridge, a role-specific block, the task brief,
554
+ * and finally any per-spawn `systemPromptOverride` from `SubagentConfig`.
555
+ *
556
+ * Both composers are pure functions: feed them parts, they return a string.
557
+ * No I/O, no side effects, no implicit defaults beyond the ones exported
558
+ * here. Callers (CLI multi-agent factory, Director itself) decide which
559
+ * parts to fill in — that keeps the composition seam visible and testable.
560
+ */
561
+ /**
562
+ * Default fleet-protocol preamble injected at the **front** of the
563
+ * director-agent's system prompt. Kept deliberately short — long preambles
564
+ * crowd out the user's leader prompt and the LLM stops attending. The tool
565
+ * descriptions live on the tool definitions themselves; this preamble only
566
+ * teaches *when* to reach for them.
567
+ */
568
+ declare const DEFAULT_DIRECTOR_PREAMBLE = "You are the Director of a multi-agent fleet. You orchestrate worker\nsubagents by spawning them, assigning tasks, awaiting completions, and\nrolling up their outputs into your next decision.\n\nCore fleet tools available to you:\n - spawn_subagent \u2014 create a worker with a chosen provider / model / role\n - assign_task \u2014 hand a piece of work to a specific subagent\n - await_tasks \u2014 block until named task ids complete (parallel-safe)\n - ask_subagent \u2014 synchronously query a running subagent via the bridge\n - roll_up \u2014 aggregate finished tasks into a markdown/json summary\n - terminate_subagent \u2014 abort a stuck worker (use sparingly)\n - fleet_status \u2014 snapshot of all subagents and pending tasks\n - fleet_usage \u2014 token + cost breakdown per subagent and total\n\nWorking rules:\n 1. Decompose first. Before spawning, decide which sub-tasks are\n independent and can run in parallel. Sequential work doesn't need a\n subagent \u2014 do it yourself.\n 2. Match worker to job. Cheap/fast model for triage, capable model for\n synthesis. Different providers per sibling is allowed and encouraged.\n 3. Always pair an assign with an await. Don't fire-and-forget; you owe\n the user a single coherent answer at the end.\n 4. Roll up before deciding. After await_tasks resolves, call roll_up so\n the results are folded back into your context in a compact form.\n 5. Budget is real. Check fleet_usage periodically. If a subagent is\n thrashing, terminate it rather than letting cost climb silently.\n 6. Never claim a subagent's work as your own without verifying it. If a\n result looks wrong, ask_subagent for clarification before passing it\n to the user.";
569
+ /**
570
+ * Default baseline prepended to every subagent's system prompt. Tells the
571
+ * subagent its place in the hierarchy and the bridge contract — without
572
+ * this, a subagent has no way to know it *can* ask the parent for
573
+ * clarification, and it will hallucinate answers when context is missing.
574
+ *
575
+ * Bridge contract: subagents may `send` progress and `request` answers, but
576
+ * MAY NOT exfiltrate the parent's full system prompt or tools list. The
577
+ * baseline reinforces this in plain text — the actual enforcement is at
578
+ * the bridge transport layer.
579
+ */
580
+ declare const DEFAULT_SUBAGENT_BASELINE = "You are a subagent operating under a Director. You were spawned to handle\na specific slice of a larger plan \u2014 do that slice well and report back.\n\nBridge contract:\n - You have a parent (the Director). You may call `request` on the\n parent bridge to ask a clarifying question. Use this sparingly; the\n parent is also working.\n - You MAY NOT request the parent's system prompt, tool list, or other\n subagents' context. Those are not yours to read.\n - Your final task output is what the Director sees. Be concise,\n structured, and self-contained \u2014 assume the Director will paste your\n output into its own context.";
581
+ /** Parts the leader-prompt composer accepts. All optional. */
582
+ interface DirectorPromptParts {
583
+ /** The user's existing leader system prompt — typically what was passed
584
+ * via `MultiAgentConfig.leaderSystemPrompt`. */
585
+ basePrompt?: string;
586
+ /** Override the built-in fleet preamble. Pass empty string to suppress. */
587
+ directorPreamble?: string;
588
+ /** Optional roster summary block — a short list of pre-configured roles
589
+ * the director can spawn (e.g. "researcher, coder, reviewer"). Helps
590
+ * small models discover the available shapes without scanning tools. */
591
+ rosterSummary?: string;
592
+ }
593
+ /**
594
+ * Compose the leader/director's system prompt. Order:
595
+ * 1. Director preamble (fleet protocol)
596
+ * 2. Roster summary (optional, when provided)
597
+ * 3. User base prompt (the per-project leader prompt)
598
+ *
599
+ * Sections are separated by a blank line. Empty parts are skipped so the
600
+ * output never contains stray blank-line runs.
601
+ */
602
+ declare function composeDirectorPrompt(parts?: DirectorPromptParts): string;
603
+ /** Parts the subagent-prompt composer accepts. Layered from generic to
604
+ * specific; later layers override earlier ones when they conflict. */
605
+ interface SubagentPromptParts {
606
+ /** Base persona/identity for *every* subagent. Defaults to the bridge
607
+ * contract baseline. Pass empty string to suppress. */
608
+ baseline?: string;
609
+ /** Role-specific block, e.g. "You are a code reviewer. Focus on…". */
610
+ role?: string;
611
+ /** Task brief — usually the same string the runner passes as user input,
612
+ * but exposed here in case the factory wants it duplicated in the
613
+ * system prompt for reinforcement. */
614
+ task?: string;
615
+ /**
616
+ * Absolute path to a shared scratchpad directory the whole fleet can
617
+ * read/write. When set, the composer adds a "Shared notes" block that
618
+ * tells the subagent where to drop findings and where to look for
619
+ * sibling output. This is the cheap fleet-coordination channel —
620
+ * agents don't need each other's transcripts, just each other's
621
+ * conclusions. Falls between `task` and `override` so the override
622
+ * can still narrow or replace it.
623
+ */
624
+ sharedScratchpad?: string;
625
+ /** Final per-spawn override from `SubagentConfig.systemPromptOverride`.
626
+ * Added last so it wins on conflict — that's by design: the spawn site
627
+ * knows the most about what this specific subagent should do. */
628
+ override?: string;
629
+ }
630
+ /**
631
+ * Compose a subagent's system prompt. Order:
632
+ * 1. Baseline (bridge contract)
633
+ * 2. Role
634
+ * 3. Task brief
635
+ * 4. Per-spawn override
636
+ *
637
+ * Same blank-line-separated joining as the director composer.
638
+ *
639
+ * Layering rationale: the baseline never needs to change between
640
+ * subagents; the role is the "what kind of worker is this"; the task is
641
+ * the "what should you do *now*"; the override is the spawn-site escape
642
+ * hatch ("…and respond only in JSON"). Putting override last means it
643
+ * never gets squashed by something earlier in the chain.
644
+ */
645
+ declare function composeSubagentPrompt(parts?: SubagentPromptParts): string;
646
+ /**
647
+ * Render a short bullet list summarising a roster — useful for stuffing
648
+ * into `composeDirectorPrompt({ rosterSummary })` so the director model
649
+ * can see available roles without scanning tool descriptions.
650
+ *
651
+ * Each entry: `- <role-id>: <name>[ (provider/model)] — <prompt-headline>`
652
+ * The prompt headline is the first non-empty line of `config.prompt`,
653
+ * truncated to 80 chars. Skipped entirely when the role has no prompt.
654
+ */
655
+ declare function rosterSummaryFromConfigs(roster: Record<string, {
656
+ name: string;
657
+ provider?: string;
658
+ model?: string;
659
+ prompt?: string;
660
+ role?: string;
661
+ }>): string;
662
+
663
+ /**
664
+ * Pre-built subagent role configurations for the WrongStack fleet.
665
+ * These can be passed to `MultiAgentHost.spawn()` or used as templates
666
+ * for the director's roster.
667
+ */
668
+
669
+ /**
670
+ * Audit Log Agent — analyzes session logs, event streams, and traces.
671
+ * Use for: post-mortems, trend analysis, operational insights.
672
+ */
673
+ declare const AUDIT_LOG_AGENT: SubagentConfig;
674
+ /**
675
+ * Bug Hunter Agent — systematic bug and code smell detection.
676
+ * Use for: pre-refactoring health checks, code review, regression prevention.
677
+ */
678
+ declare const BUG_HUNTER_AGENT: SubagentConfig;
679
+ /**
680
+ * Refactor Planner Agent — structured refactoring planning.
681
+ * Use for: large rewrites, technical debt reduction, architecture improvements.
682
+ */
683
+ declare const REFACTOR_PLANNER_AGENT: SubagentConfig;
684
+ /**
685
+ * Security Scanner Agent — vulnerability and secret detection.
686
+ * Use for: CI checks, pre-release audits, dependency vulnerability scanning.
687
+ */
688
+ declare const SECURITY_SCANNER_AGENT: SubagentConfig;
689
+ /** All pre-built agents in a map for easy lookup by role. */
690
+ declare const FLEET_ROSTER: Record<string, SubagentConfig>;
691
+ /** Quick-access list for spawning all at once. */
692
+ declare const ALL_FLEET_AGENTS: SubagentConfig[];
693
+
694
+ export { ALL_FLEET_AGENTS, AUDIT_LOG_AGENT, type AgentFactory, type AgentFactoryResult, type AgentRunnerOptions, BUG_HUNTER_AGENT, DEFAULT_DIRECTOR_PREAMBLE, DEFAULT_SUBAGENT_BASELINE, DefaultMultiAgentCoordinator, Director, DirectorBudgetError, type DirectorPromptParts, type DirectorSessionFactory, type DirectorSessionFactoryOptions, FLEET_ROSTER, FleetBus, type FleetEvent, type FleetHandler, type FleetUsage, FleetUsageAggregator, InMemoryAgentBridge, type MultiAgentCoordinatorOptions, REFACTOR_PLANNER_AGENT, SECURITY_SCANNER_AGENT, type SubagentPromptParts, type SubagentUsageSnapshot, composeDirectorPrompt, composeSubagentPrompt, makeAgentSubagentRunner, makeDirectorSessionFactory, rosterSummaryFromConfigs };