@wrongstack/core 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  import { g as Logger, f as LogLevel, P as PathResolver, r as ModelsRegistry, O as EventBus, z as ResolvedModel, j as MemoryStore, i as MemoryScope, S as SecretScrubber, t as PermissionPolicy, I as InputReader, s as PermissionDecision, B as RetryPolicy, E as ErrorHandler, a as Compactor, R as RecoveryDecision, H as SkillLoader, J as SkillManifest, G as SkillEntry, b as Config, c as ConfigLoader, d as ConfigStore, C as CompactReport, a0 as MiddlewareHandler, p as ModelsDevPayload, A as ResolvedProvider, W as WireFamily, n as ModeStore, l as ModeConfig, k as Mode, M as MCPServerConfig } from '../mode-Pjt5vMS6.js';
2
2
  import { u as TokenCounter, U as Usage, C as CacheStats, p as SessionStore, o as SessionMetadata, r as SessionWriter, l as ResumedSession, S as SessionData, q as SessionSummary, c as ContentBlock, v as Tool, a0 as Context, i as ProviderError, h as Provider, M as Message, F as ToolUseBlock, B as ToolResultBlock, n as SessionEvent } from '../provider-txgB0Oq9.js';
3
- import { h as AttachmentStore, A as AddAttachmentInput, g as AttachmentRef, d as Attachment, S as SecretVault, x as MultiAgentCoordinator, w as MultiAgentConfig, a9 as SubagentRunner, a5 as SubagentConfig, X as SpawnResult, al as TaskSpec, B as BridgeMessage, a as AgentBridge, l as CoordinatorStatus, aj as TaskResult, aC as Agent, aE as AgentInput, j as BridgeTransport, b as AgentBridgeConfig, o as DoneCondition, aM as RunResult, a4 as Specification, Y as SpecAnalysis, a3 as SpecValidationResult, af as TaskGraph, ag as TaskNode, ae as TaskFilter, ak as TaskSort, ai as TaskProgress, an as TaskType, ah as TaskPriority, at as ToolExecutorOptions, au as ToolExecutorStrategy, ao as ToolBatchResult, O as SessionReader, n as DefaultSessionReaderOptions, N as SessionQuery, T as SessionSummaryLite, R as SessionSearchQuery, Q as SessionSearchHit, L as SessionExportOptions, u as MetricsSink, s as MetricLabels, v as MetricsSnapshot, q as HealthRegistry, H as HealthCheck, c as AggregateHealth, aw as Tracer, W as Span } from '../session-reader-7AutWHut.js';
4
- export { aG as BudgetExceededError, aH as BudgetKind, aI as BudgetLimits, aJ as BudgetUsage, aN as SubagentBudget } from '../session-reader-7AutWHut.js';
3
+ import { h as AttachmentStore, A as AddAttachmentInput, g as AttachmentRef, d as Attachment, S as SecretVault, x as MultiAgentCoordinator, w as MultiAgentConfig, a9 as SubagentRunner, a5 as SubagentConfig, X as SpawnResult, al as TaskSpec, B as BridgeMessage, a as AgentBridge, l as CoordinatorStatus, aj as TaskResult, aC as Agent, aE as AgentInput, j as BridgeTransport, b as AgentBridgeConfig, o as DoneCondition, aM as RunResult, a4 as Specification, Y as SpecAnalysis, a3 as SpecValidationResult, af as TaskGraph, ag as TaskNode, ae as TaskFilter, ak as TaskSort, ai as TaskProgress, an as TaskType, ah as TaskPriority, at as ToolExecutorOptions, au as ToolExecutorStrategy, ao as ToolBatchResult, O as SessionReader, n as DefaultSessionReaderOptions, N as SessionQuery, T as SessionSummaryLite, R as SessionSearchQuery, Q as SessionSearchHit, L as SessionExportOptions, u as MetricsSink, s as MetricLabels, v as MetricsSnapshot, q as HealthRegistry, H as HealthCheck, c as AggregateHealth, aw as Tracer, W as Span } from '../session-reader-9sOTgmeC.js';
4
+ export { aG as BudgetExceededError, aH as BudgetKind, aI as BudgetLimits, aJ as BudgetUsage, aN as SubagentBudget } from '../session-reader-9sOTgmeC.js';
5
5
  import { a as WstackPaths } from '../wstack-paths-BGu2INTm.js';
6
6
  import { EventEmitter } from 'node:events';
7
7
 
@@ -882,6 +882,118 @@ interface AgentRunnerOptions {
882
882
  */
883
883
  declare function makeAgentSubagentRunner(opts: AgentRunnerOptions): SubagentRunner;
884
884
 
885
+ /**
886
+ * Single fleet-wide event with subagent attribution. Whatever a child
887
+ * agent emits on its own EventBus gets re-published here, prefixed with
888
+ * `subagentId` so a single subscriber can multiplex across the fleet.
889
+ *
890
+ * The director uses `FleetBus.filter('tool.executed', …)` to see every
891
+ * tool call across the fleet; the TUI uses
892
+ * `FleetBus.subscribe(id, handler)` to render a per-subagent panel.
893
+ */
894
+ interface FleetEvent {
895
+ subagentId: string;
896
+ taskId?: string;
897
+ ts: number;
898
+ type: string;
899
+ payload: unknown;
900
+ }
901
+ type FleetHandler = (event: FleetEvent) => void;
902
+ /**
903
+ * Fan-in for per-subagent EventBuses. Each subagent's bus is plugged in
904
+ * via `attach()`; the FleetBus re-emits every event with subagent
905
+ * attribution. Detachment is automatic via the returned disposer — call
906
+ * it when a subagent terminates so we don't leak listeners.
907
+ *
908
+ * The bus exposes two subscription modes: by `subagentId` (everything
909
+ * from one child) and by `type` (one event-type across the fleet). They
910
+ * compose — if you need a per-subagent + per-type slice, subscribe by
911
+ * type and filter on `event.subagentId` in your handler.
912
+ */
913
+ declare class FleetBus {
914
+ private readonly byId;
915
+ private readonly byType;
916
+ private readonly any;
917
+ /**
918
+ * Hook a subagent's EventBus into the fleet. EventBus is strongly
919
+ * typed and doesn't expose an `onAny` hook, so we subscribe to the
920
+ * canonical set of event types a subagent emits during a run. New
921
+ * event types added to the kernel must be added here too — but the
922
+ * cost is a tiny single line per type, and the explicit list keeps
923
+ * the wire format clear.
924
+ *
925
+ * Returns a disposer that detaches every subscription; call on
926
+ * subagent teardown so the listeners don't outlive the run.
927
+ */
928
+ attach(subagentId: string, bus: EventBus, taskId?: string): () => void;
929
+ /** Subscribe to every event from one subagent. */
930
+ subscribe(subagentId: string, handler: FleetHandler): () => void;
931
+ /** Subscribe to one event type across all subagents. */
932
+ filter(type: string, handler: FleetHandler): () => void;
933
+ /** Subscribe to literally everything. The fleet roll-up uses this. */
934
+ onAny(handler: FleetHandler): () => void;
935
+ emit(event: FleetEvent): void;
936
+ }
937
+ /**
938
+ * Roll-up of token usage + cost across an entire director run. The
939
+ * director's `fleet_status` tool returns this so the model can reason
940
+ * about budget in its next turn ("the researcher already burned $0.40,
941
+ * lean on summaries for the next task").
942
+ */
943
+ interface FleetUsage {
944
+ total: {
945
+ input: number;
946
+ output: number;
947
+ cacheRead: number;
948
+ cacheWrite: number;
949
+ cost: number;
950
+ };
951
+ perSubagent: Record<string, SubagentUsageSnapshot>;
952
+ }
953
+ interface SubagentUsageSnapshot {
954
+ subagentId: string;
955
+ provider?: string;
956
+ model?: string;
957
+ input: number;
958
+ output: number;
959
+ cacheRead: number;
960
+ cacheWrite: number;
961
+ cost: number;
962
+ toolCalls: number;
963
+ iterations: number;
964
+ startedAt: number;
965
+ lastEventAt: number;
966
+ }
967
+ /**
968
+ * Aggregates provider.response + tool.executed events from the FleetBus
969
+ * into a live `FleetUsage` snapshot. Costs are computed by the caller
970
+ * via a `priceLookup(subagentId)` so we don't bake provider-pricing
971
+ * coupling into core; the CLI/tests supply a function that resolves
972
+ * each subagent's per-token rates from the models registry.
973
+ */
974
+ declare class FleetUsageAggregator {
975
+ private readonly bus;
976
+ private readonly priceLookup?;
977
+ private readonly metaLookup?;
978
+ private readonly perSubagent;
979
+ private readonly total;
980
+ constructor(bus: FleetBus, priceLookup?: ((subagentId: string) => {
981
+ input?: number;
982
+ output?: number;
983
+ cacheRead?: number;
984
+ cacheWrite?: number;
985
+ } | undefined) | undefined, metaLookup?: ((subagentId: string) => {
986
+ provider?: string;
987
+ model?: string;
988
+ } | undefined) | undefined);
989
+ /** Live snapshot — safe to call from a tool's execute() body. */
990
+ snapshot(): FleetUsage;
991
+ private ensure;
992
+ private onProviderResponse;
993
+ private onToolExecuted;
994
+ private onIterationStarted;
995
+ }
996
+
885
997
  /**
886
998
  * In-memory pub/sub transport for agent-to-agent messaging.
887
999
  * Subscribers register by agentId and receive messages via callback.
@@ -910,6 +1022,382 @@ declare class InMemoryAgentBridge implements AgentBridge {
910
1022
  }
911
1023
  declare function createMessage<T = unknown>(type: BridgeMessage['type'], from: string, payload: T, to?: string): BridgeMessage<T>;
912
1024
 
1025
+ /**
1026
+ * Director — high-level orchestrator that owns a `MultiAgentCoordinator`,
1027
+ * a `FleetBus`, and a `FleetUsageAggregator`. Exposes a small imperative
1028
+ * API (`spawn`, `assign`, `awaitTasks`, `terminate`, `status`, `usage`)
1029
+ * that's easy to test, and a `tools()` factory that wraps the same API
1030
+ * as agent-callable `Tool`s so an LLM can drive the orchestration.
1031
+ *
1032
+ * This class is intentionally *not* an `Agent`. It's a coordinator +
1033
+ * observability surface. To make it LLM-driven, construct an Agent
1034
+ * with `director.tools()` registered. That keeps the construction
1035
+ * symmetric with how other agents are built and avoids smuggling a
1036
+ * heavy LLM dependency into core just for the director path.
1037
+ */
1038
+ interface DirectorOptions {
1039
+ config: MultiAgentConfig;
1040
+ runner?: SubagentRunner;
1041
+ /**
1042
+ * When set, the director writes a `fleet.json` manifest to this path
1043
+ * recording every spawned subagent (id, provider, model, role, task
1044
+ * ids). Used by `wstack replay <runId>` to rehydrate a fleet. Pass an
1045
+ * absolute file path — the directory must already exist (the
1046
+ * director-session factory creates it when used together).
1047
+ */
1048
+ manifestPath?: string;
1049
+ /**
1050
+ * Optional roster used by `leaderSystemPrompt()` to render a roles
1051
+ * summary into the leader's preamble. Same shape as the roster passed
1052
+ * to `tools()` — typically the same value.
1053
+ */
1054
+ roster?: Record<string, SubagentConfig>;
1055
+ /**
1056
+ * Override the built-in fleet preamble (see `DEFAULT_DIRECTOR_PREAMBLE`).
1057
+ * Pass an empty string to suppress the preamble entirely.
1058
+ */
1059
+ directorPreamble?: string;
1060
+ /**
1061
+ * Override the built-in subagent baseline (see
1062
+ * `DEFAULT_SUBAGENT_BASELINE`). Pass an empty string to suppress.
1063
+ */
1064
+ subagentBaseline?: string;
1065
+ }
1066
+ declare class Director {
1067
+ readonly id: string;
1068
+ readonly fleet: FleetBus;
1069
+ readonly usage: FleetUsageAggregator;
1070
+ /**
1071
+ * Director-side bridge endpoint. Subagents are wired to the same
1072
+ * in-memory transport so the director can `ask()` them synchronously
1073
+ * and they can `send()` progress back. Exposed so external code (e.g.
1074
+ * the TUI) can subscribe to inbound messages.
1075
+ */
1076
+ readonly bridge: InMemoryAgentBridge;
1077
+ private readonly transport;
1078
+ private readonly coordinator;
1079
+ /** Resolves with the matching `TaskResult` the first time the
1080
+ * coordinator emits `task.completed` for a given task id. Each entry
1081
+ * is created lazily on first poll/await and cleared once consumed. */
1082
+ private readonly taskWaiters;
1083
+ /** Cache of completed results in case the consumer asks AFTER the
1084
+ * coordinator already fired the event — `awaitTasks(['t-1'])` after
1085
+ * t-1 finished should resolve immediately, not hang. */
1086
+ private readonly completed;
1087
+ /** Per-subagent provider/model metadata, captured at spawn time so the
1088
+ * FleetUsageAggregator's metaLookup can surface readable rows. */
1089
+ private readonly subagentMeta;
1090
+ private readonly priceLookups;
1091
+ /** Bridge endpoints we created per subagent (so we can `stop()` them
1092
+ * on shutdown and free transport subscriptions). */
1093
+ private readonly subagentBridges;
1094
+ /** Tracks per-spawn config + assigned task ids for manifest writing. */
1095
+ private readonly manifestEntries;
1096
+ private readonly manifestPath?;
1097
+ private readonly roster?;
1098
+ private readonly directorPreamble;
1099
+ private readonly subagentBaseline;
1100
+ constructor(opts: DirectorOptions);
1101
+ /**
1102
+ * Spawn a subagent. Identical to the coordinator's `spawn()` but
1103
+ * captures provider/model metadata for the usage aggregator and
1104
+ * lets the FleetBus attach to the runner's EventBus when the task
1105
+ * actually runs (see `attachSubagentBus`).
1106
+ *
1107
+ * Caller-supplied `priceLookup` is optional but recommended — without
1108
+ * it the `cost` column in `usage.snapshot()` stays at 0.
1109
+ */
1110
+ spawn(config: SubagentConfig, priceLookup?: {
1111
+ input?: number;
1112
+ output?: number;
1113
+ cacheRead?: number;
1114
+ cacheWrite?: number;
1115
+ }): Promise<string>;
1116
+ /**
1117
+ * Synchronously ask a subagent something via the bridge. Sends a
1118
+ * `task` message addressed to the subagent and awaits a matching
1119
+ * reply (matched by message id). Subagent runners that handle these
1120
+ * requests subscribe to `ctx.bridge` and reply with a message whose
1121
+ * `id` equals the incoming request's id (see `InMemoryAgentBridge`'s
1122
+ * `request<T>` implementation).
1123
+ *
1124
+ * Returns the response payload directly (the bridge wrapper is
1125
+ * unwrapped for ergonomics). Times out after `timeoutMs` (default
1126
+ * matches the bridge's own default of 30s) — surface those rejections
1127
+ * to the caller as actionable errors instead of letting tools hang.
1128
+ */
1129
+ ask<T = unknown>(subagentId: string, payload: unknown, timeoutMs?: number): Promise<T>;
1130
+ /**
1131
+ * Read completed task results and format them as a structured text
1132
+ * block the director's LLM can paste into its own context. The
1133
+ * Director keeps every completed `TaskResult` in `completed` so this
1134
+ * is a pure read — no bridge round-trip, cheap to call.
1135
+ *
1136
+ * The returned string is intentionally markdown-flavored: headers per
1137
+ * subagent, a one-line meta row (iter / tools / ms), and the task's
1138
+ * result text. Pass `style: 'json'` for a programmatic shape instead
1139
+ * (useful when the director model is doing structured-output work).
1140
+ */
1141
+ rollUp(taskIds: string[], style?: 'markdown' | 'json'): string;
1142
+ /**
1143
+ * Write the fleet manifest to `manifestPath`. Returns the path written
1144
+ * or null when no path was configured. Captures every spawn + its
1145
+ * assigned tasks — paired with per-subagent JSONLs, this is enough to
1146
+ * replay an entire director run.
1147
+ */
1148
+ writeManifest(): Promise<string | null>;
1149
+ /**
1150
+ * Tear down the director: stop every subagent, close every bridge
1151
+ * endpoint, and (when configured) write the final manifest. Idempotent
1152
+ * — calling shutdown twice is a no-op on the second invocation.
1153
+ */
1154
+ shutdown(): Promise<void>;
1155
+ /**
1156
+ * Hand a task to the coordinator. Returns the assigned task id so
1157
+ * callers can wait on it via `awaitTasks([id])`. The coordinator's
1158
+ * concurrency limit applies — the task may queue before running.
1159
+ */
1160
+ assign(task: TaskSpec): Promise<string>;
1161
+ /**
1162
+ * Block until every task id resolves. Returns results in the same
1163
+ * order as the input. If any task hasn't completed by the time this
1164
+ * is called, the promise hangs until it does — pair with a timeout
1165
+ * at the caller if that's a concern. Resolves immediately for ids
1166
+ * whose results were already cached.
1167
+ */
1168
+ awaitTasks(taskIds: string[]): Promise<TaskResult[]>;
1169
+ terminate(subagentId: string): Promise<void>;
1170
+ terminateAll(): Promise<void>;
1171
+ status(): CoordinatorStatus;
1172
+ /**
1173
+ * Subscribe to coordinator events. Currently only `task.completed` is
1174
+ * exposed (the others are internal lifecycle). Returns an unsubscribe
1175
+ * function. External callers (e.g. the CLI's `MultiAgentHost`) use this
1176
+ * to drive their own pending/results tracking without poking the
1177
+ * coordinator directly.
1178
+ */
1179
+ on(event: 'task.completed', handler: (payload: {
1180
+ task: TaskSpec;
1181
+ result: TaskResult;
1182
+ }) => void): () => void;
1183
+ /**
1184
+ * Snapshot of every task that has resolved (success, failed, timeout,
1185
+ * stopped) since the director started. Returned in completion order
1186
+ * via the internal map's iteration order. Used by `/fleet status` to
1187
+ * paint the completed table without reaching into private state.
1188
+ */
1189
+ completedResults(): TaskResult[];
1190
+ snapshot(): FleetUsage;
1191
+ /**
1192
+ * Compose the leader/director-agent system prompt: fleet preamble +
1193
+ * (optional) roster summary + user base prompt. Pass the result to your
1194
+ * leader Agent's `ctx.systemPrompt` when constructing it.
1195
+ *
1196
+ * `basePrompt` defaults to `config.leaderSystemPrompt` so callers can
1197
+ * use the no-arg form when the multi-agent config already carries it.
1198
+ */
1199
+ leaderSystemPrompt(basePrompt?: string): string;
1200
+ /**
1201
+ * Compose a subagent's system prompt for a given `SubagentConfig`:
1202
+ * baseline + role + task + per-spawn override. Returned by value — does
1203
+ * not mutate the config. Factories (the user-supplied `AgentFactory`)
1204
+ * should call this when building each subagent's Agent so the bridge
1205
+ * contract, role context, and override are all surfaced.
1206
+ *
1207
+ * When `taskBrief` is omitted the Task section is dropped. Pass the
1208
+ * actual task description here to reinforce it in the system prompt
1209
+ * (the runner already passes it as user input — duplicating in the
1210
+ * system prompt is optional but improves anchoring on small models).
1211
+ */
1212
+ subagentSystemPrompt(config: SubagentConfig, taskBrief?: string): string;
1213
+ /**
1214
+ * Build the tool set the LLM-driven director uses to orchestrate.
1215
+ * Returns an array of `Tool` definitions; register these on the
1216
+ * director's `Agent` to expose `spawn_subagent`, `assign_task`, etc.
1217
+ * Each tool's `execute()` delegates straight to the matching method
1218
+ * above.
1219
+ *
1220
+ * Tools all carry `permission: 'auto'` — the *user* has already
1221
+ * approved running the director when they kicked off the run, so
1222
+ * gating individual orchestration calls behind a confirm prompt
1223
+ * would just be noise. The actual subagent tools they spawn are
1224
+ * still permission-checked normally.
1225
+ */
1226
+ tools(roster?: Record<string, SubagentConfig>): Tool[];
1227
+ }
1228
+
1229
+ /**
1230
+ * Per-subagent session factory.
1231
+ *
1232
+ * Director runs produce many parallel transcripts — one per spawned
1233
+ * subagent — and we want them all rooted under the same director-run
1234
+ * directory so a future `wstack replay <runId>` can rehydrate the whole
1235
+ * fleet from a single tree.
1236
+ *
1237
+ * The factory builds (or accepts) a `SessionStore` whose `dir` points at
1238
+ * `<sessionsRoot>/<directorRunId>/`, and returns a small `create()`
1239
+ * function that the orchestration layer calls per-spawn. Each call
1240
+ * yields a fresh `SessionWriter` whose JSONL file lives in that
1241
+ * directory, named by either the caller-supplied `subagentId` (preferred,
1242
+ * so the file name is human-readable) or a derived id.
1243
+ *
1244
+ * **Why a thin factory instead of plumbing options through every spawn
1245
+ * site?** Because the director is the only caller that needs this
1246
+ * isolation pattern, and shoving `sessionStore` options into
1247
+ * `SubagentConfig` would leak storage details into a config shape that
1248
+ * agents and the coordinator have no business knowing about.
1249
+ */
1250
+ interface DirectorSessionFactoryOptions {
1251
+ /**
1252
+ * Either a parent directory where `<directorRunId>/` will be created,
1253
+ * or a pre-built `SessionStore` whose `dir` already points at the
1254
+ * director run directory. Tests pass an in-memory store for isolation;
1255
+ * production code passes the path under `~/.wrongstack/sessions/`.
1256
+ */
1257
+ store?: SessionStore;
1258
+ sessionsRoot?: string;
1259
+ /**
1260
+ * Director run id — namespaces all subagent JSONLs under one folder.
1261
+ * Defaults to a timestamped id; supplied explicitly when resuming a
1262
+ * prior fleet manifest.
1263
+ */
1264
+ directorRunId?: string;
1265
+ }
1266
+ interface DirectorSessionFactory {
1267
+ /** Absolute directory where this director run's transcripts live. */
1268
+ readonly dir: string;
1269
+ /** The director run id used to namespace the directory. */
1270
+ readonly directorRunId: string;
1271
+ /**
1272
+ * Create a fresh `SessionWriter` for the named subagent. Each
1273
+ * subagent gets its own JSONL file. The writer's `id` matches the
1274
+ * supplied `subagentId` so disk paths line up with in-memory ids.
1275
+ */
1276
+ createSubagentSession(args: {
1277
+ subagentId: string;
1278
+ provider?: string;
1279
+ model?: string;
1280
+ title?: string;
1281
+ }): Promise<SessionWriter>;
1282
+ }
1283
+ /**
1284
+ * Build a `DirectorSessionFactory`. Pass either a pre-configured
1285
+ * `SessionStore` (tests) or a `sessionsRoot` path (production). When
1286
+ * neither is supplied the factory throws — there's no sane default for
1287
+ * "where do these JSONLs live".
1288
+ */
1289
+ declare function makeDirectorSessionFactory(opts: DirectorSessionFactoryOptions): DirectorSessionFactory;
1290
+
1291
+ /**
1292
+ * System-prompt composition helpers for the Director ecosystem.
1293
+ *
1294
+ * Two callers need composed prompts:
1295
+ *
1296
+ * 1. The **leader** (the director's own Agent) — needs a preamble that
1297
+ * explains the fleet protocol: when to spawn, when to await, how to
1298
+ * roll up, and the eight orchestration tools it owns.
1299
+ *
1300
+ * 2. Each **subagent** — needs a baseline that explains it has a parent
1301
+ * it can call via the bridge, a role-specific block, the task brief,
1302
+ * and finally any per-spawn `systemPromptOverride` from `SubagentConfig`.
1303
+ *
1304
+ * Both composers are pure functions: feed them parts, they return a string.
1305
+ * No I/O, no side effects, no implicit defaults beyond the ones exported
1306
+ * here. Callers (CLI multi-agent factory, Director itself) decide which
1307
+ * parts to fill in — that keeps the composition seam visible and testable.
1308
+ */
1309
+ /**
1310
+ * Default fleet-protocol preamble injected at the **front** of the
1311
+ * director-agent's system prompt. Kept deliberately short — long preambles
1312
+ * crowd out the user's leader prompt and the LLM stops attending. The tool
1313
+ * descriptions live on the tool definitions themselves; this preamble only
1314
+ * teaches *when* to reach for them.
1315
+ */
1316
+ declare const DEFAULT_DIRECTOR_PREAMBLE = "You are the Director of a multi-agent fleet. You orchestrate worker\nsubagents by spawning them, assigning tasks, awaiting completions, and\nrolling up their outputs into your next decision.\n\nCore fleet tools available to you:\n - spawn_subagent \u2014 create a worker with a chosen provider / model / role\n - assign_task \u2014 hand a piece of work to a specific subagent\n - await_tasks \u2014 block until named task ids complete (parallel-safe)\n - ask_subagent \u2014 synchronously query a running subagent via the bridge\n - roll_up \u2014 aggregate finished tasks into a markdown/json summary\n - terminate_subagent \u2014 abort a stuck worker (use sparingly)\n - fleet_status \u2014 snapshot of all subagents and pending tasks\n - fleet_usage \u2014 token + cost breakdown per subagent and total\n\nWorking rules:\n 1. Decompose first. Before spawning, decide which sub-tasks are\n independent and can run in parallel. Sequential work doesn't need a\n subagent \u2014 do it yourself.\n 2. Match worker to job. Cheap/fast model for triage, capable model for\n synthesis. Different providers per sibling is allowed and encouraged.\n 3. Always pair an assign with an await. Don't fire-and-forget; you owe\n the user a single coherent answer at the end.\n 4. Roll up before deciding. After await_tasks resolves, call roll_up so\n the results are folded back into your context in a compact form.\n 5. Budget is real. Check fleet_usage periodically. If a subagent is\n thrashing, terminate it rather than letting cost climb silently.\n 6. Never claim a subagent's work as your own without verifying it. If a\n result looks wrong, ask_subagent for clarification before passing it\n to the user.";
1317
+ /**
1318
+ * Default baseline prepended to every subagent's system prompt. Tells the
1319
+ * subagent its place in the hierarchy and the bridge contract — without
1320
+ * this, a subagent has no way to know it *can* ask the parent for
1321
+ * clarification, and it will hallucinate answers when context is missing.
1322
+ *
1323
+ * Bridge contract: subagents may `send` progress and `request` answers, but
1324
+ * MAY NOT exfiltrate the parent's full system prompt or tools list. The
1325
+ * baseline reinforces this in plain text — the actual enforcement is at
1326
+ * the bridge transport layer.
1327
+ */
1328
+ declare const DEFAULT_SUBAGENT_BASELINE = "You are a subagent operating under a Director. You were spawned to handle\na specific slice of a larger plan \u2014 do that slice well and report back.\n\nBridge contract:\n - You have a parent (the Director). You may call `request` on the\n parent bridge to ask a clarifying question. Use this sparingly; the\n parent is also working.\n - You MAY NOT request the parent's system prompt, tool list, or other\n subagents' context. Those are not yours to read.\n - Your final task output is what the Director sees. Be concise,\n structured, and self-contained \u2014 assume the Director will paste your\n output into its own context.";
1329
+ /** Parts the leader-prompt composer accepts. All optional. */
1330
+ interface DirectorPromptParts {
1331
+ /** The user's existing leader system prompt — typically what was passed
1332
+ * via `MultiAgentConfig.leaderSystemPrompt`. */
1333
+ basePrompt?: string;
1334
+ /** Override the built-in fleet preamble. Pass empty string to suppress. */
1335
+ directorPreamble?: string;
1336
+ /** Optional roster summary block — a short list of pre-configured roles
1337
+ * the director can spawn (e.g. "researcher, coder, reviewer"). Helps
1338
+ * small models discover the available shapes without scanning tools. */
1339
+ rosterSummary?: string;
1340
+ }
1341
+ /**
1342
+ * Compose the leader/director's system prompt. Order:
1343
+ * 1. Director preamble (fleet protocol)
1344
+ * 2. Roster summary (optional, when provided)
1345
+ * 3. User base prompt (the per-project leader prompt)
1346
+ *
1347
+ * Sections are separated by a blank line. Empty parts are skipped so the
1348
+ * output never contains stray blank-line runs.
1349
+ */
1350
+ declare function composeDirectorPrompt(parts?: DirectorPromptParts): string;
1351
+ /** Parts the subagent-prompt composer accepts. Layered from generic to
1352
+ * specific; later layers override earlier ones when they conflict. */
1353
+ interface SubagentPromptParts {
1354
+ /** Base persona/identity for *every* subagent. Defaults to the bridge
1355
+ * contract baseline. Pass empty string to suppress. */
1356
+ baseline?: string;
1357
+ /** Role-specific block, e.g. "You are a code reviewer. Focus on…". */
1358
+ role?: string;
1359
+ /** Task brief — usually the same string the runner passes as user input,
1360
+ * but exposed here in case the factory wants it duplicated in the
1361
+ * system prompt for reinforcement. */
1362
+ task?: string;
1363
+ /** Final per-spawn override from `SubagentConfig.systemPromptOverride`.
1364
+ * Added last so it wins on conflict — that's by design: the spawn site
1365
+ * knows the most about what this specific subagent should do. */
1366
+ override?: string;
1367
+ }
1368
+ /**
1369
+ * Compose a subagent's system prompt. Order:
1370
+ * 1. Baseline (bridge contract)
1371
+ * 2. Role
1372
+ * 3. Task brief
1373
+ * 4. Per-spawn override
1374
+ *
1375
+ * Same blank-line-separated joining as the director composer.
1376
+ *
1377
+ * Layering rationale: the baseline never needs to change between
1378
+ * subagents; the role is the "what kind of worker is this"; the task is
1379
+ * the "what should you do *now*"; the override is the spawn-site escape
1380
+ * hatch ("…and respond only in JSON"). Putting override last means it
1381
+ * never gets squashed by something earlier in the chain.
1382
+ */
1383
+ declare function composeSubagentPrompt(parts?: SubagentPromptParts): string;
1384
+ /**
1385
+ * Render a short bullet list summarising a roster — useful for stuffing
1386
+ * into `composeDirectorPrompt({ rosterSummary })` so the director model
1387
+ * can see available roles without scanning tool descriptions.
1388
+ *
1389
+ * Each entry: `- <role-id>: <name>[ (provider/model)] — <prompt-headline>`
1390
+ * The prompt headline is the first non-empty line of `config.prompt`,
1391
+ * truncated to 80 chars. Skipped entirely when the role has no prompt.
1392
+ */
1393
+ declare function rosterSummaryFromConfigs(roster: Record<string, {
1394
+ name: string;
1395
+ provider?: string;
1396
+ model?: string;
1397
+ prompt?: string;
1398
+ role?: string;
1399
+ }>): string;
1400
+
913
1401
  type AutonomousResult = RunResult & {
914
1402
  toolCalls: number;
915
1403
  reason?: string;
@@ -950,6 +1438,7 @@ declare class AutonomousRunner {
950
1438
  private readonly doneChecker;
951
1439
  constructor(opts: AutonomousRunnerOptions);
952
1440
  run(): Promise<AutonomousResult>;
1441
+ private runLoop;
953
1442
  stop(): void;
954
1443
  }
955
1444
 
@@ -1714,4 +2203,4 @@ declare const sentinelServer: () => MCPServerConfig;
1714
2203
  /** Everything bundled — full set of built-in servers. Useful for `wstack mcp add --all`. */
1715
2204
  declare const allServers: () => Record<string, MCPServerConfig>;
1716
2205
 
1717
- export { type AbandonedSession, type AgentFactory, type AgentFactoryResult, type AgentRunnerOptions, type AttachmentStoreOptions, AutoCompactionMiddleware, AutonomousRunner, type AutonomousRunnerOptions, type CompactorOptions, type ConfigLoaderOptions, type ConfigMigration, ConfigMigrationError, type ConfigSource, type ContextManagerAction, type ContextManagerInput, type ContextManagerResult, type ContextManagerToolOptions, DEFAULT_CONFIG_MIGRATIONS, DefaultAttachmentStore, DefaultConfigLoader, DefaultConfigStore, DefaultErrorHandler, DefaultHealthRegistry, DefaultLogger, type DefaultLoggerOptions, DefaultMemoryStore, DefaultModeStore, DefaultModelsRegistry, type DefaultModelsRegistryOptions, DefaultMultiAgentCoordinator, DefaultPathResolver, DefaultPermissionPolicy, DefaultRetryPolicy, DefaultSecretScrubber, DefaultSecretVault, DefaultSessionReader, DefaultSessionStore, DefaultSkillLoader, DefaultTaskStore, DefaultTokenCounter, type DoneCheckResult, DoneConditionChecker, type GeneratedTask, HybridCompactor, InMemoryAgentBridge, InMemoryBridgeTransport, InMemoryMetricsSink, IntelligentCompactor, type IntelligentCompactorOptions, LLMSelector, type LLMSelectorOptions, type MemoryStoreOptions, type MetricsServerHandle, type MetricsServerOptions, type MigrationContext, type MigrationResult, type ModeLoaderOptions, type MultiAgentCoordinatorOptions, NoopMetricsSink, NoopTracer, OTelTracer, type OtlpMetricsExporterHandle, type OtlpMetricsExporterOptions, type OtlpTraceExporterHandle, type OtlpTraceExporterOptions, PROMETHEUS_CONTENT_TYPE, type PermissionPolicyOptions, type PersistedQueueItem, QueueStore, RecoveryLock, type RecoveryLockOptions, type SecretVaultOptions, SelectiveCompactor, type SelectiveCompactorOptions, type SessionStoreOptions, type SkillLoaderOptions, SpecDrivenDev, type SpecDrivenDevOptions, SpecParser, TaskFlow, type TaskFlowEventMap, type TaskFlowEventName, type TaskFlowExecutionContext, type TaskFlowOptions, type TaskFlowPhase, TaskGenerator, type TaskGeneratorOptions, type TaskStore, TaskTracker, type TaskTrackerOptions, type TaskTransition, ToolExecutor, allServers, awsServer, blockServer, braveSearchServer, buildOtlpMetricsRequest, buildOtlpTracesRequest, classifyFamily, context7Server, contextManagerTool, createContextManagerTool, createMessage, decryptConfigSecrets, encryptConfigSecrets, everArtServer, filesystemServer, githubServer, googleMapsServer, loadProjectModes, loadUserModes, makeAgentSubagentRunner, migratePlaintextSecrets, renderPrometheus, rewriteConfigEncrypted, runConfigMigrations, sentinelServer, slackServer, startMetricsServer, startOtlpMetricsExporter, startOtlpTraceExporter, wireMetricsToEvents };
2206
+ export { type AbandonedSession, type AgentFactory, type AgentFactoryResult, type AgentRunnerOptions, type AttachmentStoreOptions, AutoCompactionMiddleware, AutonomousRunner, type AutonomousRunnerOptions, type CompactorOptions, type ConfigLoaderOptions, type ConfigMigration, ConfigMigrationError, type ConfigSource, type ContextManagerAction, type ContextManagerInput, type ContextManagerResult, type ContextManagerToolOptions, DEFAULT_CONFIG_MIGRATIONS, DEFAULT_DIRECTOR_PREAMBLE, DEFAULT_SUBAGENT_BASELINE, DefaultAttachmentStore, DefaultConfigLoader, DefaultConfigStore, DefaultErrorHandler, DefaultHealthRegistry, DefaultLogger, type DefaultLoggerOptions, DefaultMemoryStore, DefaultModeStore, DefaultModelsRegistry, type DefaultModelsRegistryOptions, DefaultMultiAgentCoordinator, DefaultPathResolver, DefaultPermissionPolicy, DefaultRetryPolicy, DefaultSecretScrubber, DefaultSecretVault, DefaultSessionReader, DefaultSessionStore, DefaultSkillLoader, DefaultTaskStore, DefaultTokenCounter, Director, type DirectorPromptParts, type DirectorSessionFactory, type DirectorSessionFactoryOptions, type DoneCheckResult, DoneConditionChecker, FleetBus, type FleetEvent, type FleetHandler, type FleetUsage, FleetUsageAggregator, type GeneratedTask, HybridCompactor, InMemoryAgentBridge, InMemoryBridgeTransport, InMemoryMetricsSink, IntelligentCompactor, type IntelligentCompactorOptions, LLMSelector, type LLMSelectorOptions, type MemoryStoreOptions, type MetricsServerHandle, type MetricsServerOptions, type MigrationContext, type MigrationResult, type ModeLoaderOptions, type MultiAgentCoordinatorOptions, NoopMetricsSink, NoopTracer, OTelTracer, type OtlpMetricsExporterHandle, type OtlpMetricsExporterOptions, type OtlpTraceExporterHandle, type OtlpTraceExporterOptions, PROMETHEUS_CONTENT_TYPE, type PermissionPolicyOptions, type PersistedQueueItem, QueueStore, RecoveryLock, type RecoveryLockOptions, type SecretVaultOptions, SelectiveCompactor, type SelectiveCompactorOptions, type SessionStoreOptions, type SkillLoaderOptions, SpecDrivenDev, type SpecDrivenDevOptions, SpecParser, type SubagentPromptParts, type SubagentUsageSnapshot, TaskFlow, type TaskFlowEventMap, type TaskFlowEventName, type TaskFlowExecutionContext, type TaskFlowOptions, type TaskFlowPhase, TaskGenerator, type TaskGeneratorOptions, type TaskStore, TaskTracker, type TaskTrackerOptions, type TaskTransition, ToolExecutor, allServers, awsServer, blockServer, braveSearchServer, buildOtlpMetricsRequest, buildOtlpTracesRequest, classifyFamily, composeDirectorPrompt, composeSubagentPrompt, context7Server, contextManagerTool, createContextManagerTool, createMessage, decryptConfigSecrets, encryptConfigSecrets, everArtServer, filesystemServer, githubServer, googleMapsServer, loadProjectModes, loadUserModes, makeAgentSubagentRunner, makeDirectorSessionFactory, migratePlaintextSecrets, renderPrometheus, rewriteConfigEncrypted, rosterSummaryFromConfigs, runConfigMigrations, sentinelServer, slackServer, startMetricsServer, startOtlpMetricsExporter, startOtlpTraceExporter, wireMetricsToEvents };