@bluecopa/harness 0.0.0-snapshot.137

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2863 @@
1
+ import { tool, generateText, stepCountIs, streamText } from 'ai';
2
+ import { anthropic } from '@ai-sdk/anthropic';
3
+ import { z } from 'zod';
4
+ import { randomUUID } from 'crypto';
5
+ import * as fs from 'fs/promises';
6
+ import * as path from 'path';
7
+
8
+ // src/arc/loop.ts
9
+
10
+ // src/agent/types.ts
11
+ function getTextContent(content) {
12
+ if (typeof content === "string") return content;
13
+ return content.filter(
14
+ (p) => p.type === "text"
15
+ ).map((p) => p.text).join("\n");
16
+ }
17
+
18
+ // src/arc/message-convert.ts
19
+ function toModelMessages(messages) {
20
+ const out = [];
21
+ let seenNonSystem = false;
22
+ for (const msg of messages) {
23
+ if (msg.role === "system") {
24
+ const text = typeof msg.content === "string" ? msg.content : getTextContent(msg.content);
25
+ if (seenNonSystem) {
26
+ out.push({ role: "user", content: `[System] ${text}` });
27
+ } else {
28
+ out.push({ role: "system", content: text });
29
+ }
30
+ continue;
31
+ }
32
+ seenNonSystem = true;
33
+ if (msg.role === "user") {
34
+ out.push({ role: "user", content: msg.content });
35
+ continue;
36
+ }
37
+ if (msg.role === "assistant") {
38
+ const textContent = typeof msg.content === "string" ? msg.content : getTextContent(msg.content);
39
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
40
+ const parts = [];
41
+ if (textContent) {
42
+ parts.push({ type: "text", text: textContent });
43
+ }
44
+ for (const tc of msg.toolCalls) {
45
+ const part = {
46
+ type: "tool-call",
47
+ toolCallId: tc.toolCallId,
48
+ toolName: tc.toolName,
49
+ input: tc.args
50
+ };
51
+ if (tc.providerMetadata) {
52
+ part.providerOptions = tc.providerMetadata;
53
+ }
54
+ parts.push(part);
55
+ }
56
+ const assistantMsg = { role: "assistant", content: parts };
57
+ if (msg.providerMetadata) {
58
+ assistantMsg.providerOptions = msg.providerMetadata;
59
+ }
60
+ out.push(assistantMsg);
61
+ } else {
62
+ out.push({ role: "assistant", content: textContent });
63
+ }
64
+ continue;
65
+ }
66
+ if (msg.role === "tool") {
67
+ if (msg.toolResults && msg.toolResults.length > 0) {
68
+ const parts = msg.toolResults.map((tr) => ({
69
+ type: "tool-result",
70
+ toolCallId: tr.toolCallId,
71
+ toolName: tr.toolName,
72
+ output: tr.isError ? { type: "error-text", value: tr.result } : { type: "text", value: tr.result }
73
+ }));
74
+ out.push({ role: "tool", content: parts });
75
+ } else {
76
+ const textContent = typeof msg.content === "string" ? msg.content : getTextContent(msg.content);
77
+ out.push({ role: "user", content: `[Tool result]: ${textContent}` });
78
+ }
79
+ continue;
80
+ }
81
+ }
82
+ return out;
83
+ }
84
+
85
+ // src/arc/utils.ts
86
+ function truncate(value, max) {
87
+ if (value.length <= max) return value;
88
+ return `${value.slice(0, max)}
89
+ ...[truncated ${value.length - max} chars]`;
90
+ }
91
+ function truncateInline(value, max) {
92
+ return truncate(value.replace(/\s+/g, " ").trim(), max);
93
+ }
94
+ function looksLikePath(value) {
95
+ if (!value || /\s/.test(value)) return false;
96
+ return value.startsWith("/") || value.startsWith("./") || value.startsWith("../") || value.includes("/") || /\.[A-Za-z0-9]+$/.test(value);
97
+ }
98
+
99
+ // src/arc/trace-utils.ts
100
+ function cloneForTrace(value) {
101
+ try {
102
+ return structuredClone(value);
103
+ } catch {
104
+ return JSON.parse(JSON.stringify(value, traceJsonReplacer));
105
+ }
106
+ }
107
+ function traceJsonReplacer(_key, value) {
108
+ if (typeof value === "bigint") return value.toString();
109
+ if (value instanceof Error) {
110
+ return {
111
+ name: value.name,
112
+ message: value.message,
113
+ stack: value.stack
114
+ };
115
+ }
116
+ if (value instanceof Uint8Array) {
117
+ return {
118
+ type: value.constructor.name,
119
+ byteLength: value.byteLength
120
+ };
121
+ }
122
+ return value;
123
+ }
124
+ function normalizeReadEpisodeArgs(args) {
125
+ const rawDetail = args.detail;
126
+ const detail = rawDetail === "summary" || rawDetail === "trace" || rawDetail === "artifacts" ? rawDetail : void 0;
127
+ return {
128
+ id: String(args.id ?? ""),
129
+ ...detail ? { detail } : {},
130
+ ...typeof args.artifactKey === "string" ? { artifactKey: args.artifactKey } : {},
131
+ ...typeof args.maxTokens === "number" ? { maxTokens: args.maxTokens } : {}
132
+ };
133
+ }
134
+
135
+ // src/arc/episode-projection.ts
136
+ var DEFAULT_TRACE_MAX_CHARS = 8e4;
137
+ var TRACE_TOOL_RESULT_LIMIT = 8e3;
138
+ function formatDispatchForPrompt(record, options = {}) {
139
+ const maxChars = options.maxChars ?? 4e3;
140
+ const a = record.artifact;
141
+ const t = record.tuple;
142
+ const lines = [
143
+ `Dispatch ${t.id} [${a.status}]`,
144
+ `steps: ${a.stepsUsed}/${t.steps}`,
145
+ `summary: ${a.summary}`,
146
+ `instruction: ${truncateInline(t.instruction, options.compact ? 220 : 800)}`
147
+ ];
148
+ if (options.compact) return truncate(lines.join("\n"), maxChars);
149
+ if (a.output) {
150
+ lines.push(`output: ${a.output}`);
151
+ }
152
+ const workerArtifacts = record.workerResult?.artifacts ?? [];
153
+ const produced = workerArtifacts.filter((x) => x.action === "produced" || x.action === "modified");
154
+ if (produced.length > 0) {
155
+ lines.push(
156
+ "files_touched:",
157
+ ...produced.slice(0, 10).map((x) => ` - ${x.action} ${x.kind} ${truncateInline(x.uri, 180)}`)
158
+ );
159
+ }
160
+ const actions = a.actions ?? record.workerResult?.actions ?? [];
161
+ if (actions.length > 0) {
162
+ lines.push(
163
+ "actions:",
164
+ ...actions.slice(-10).map((action) => ` - ${truncateInline(action, 200)}`)
165
+ );
166
+ }
167
+ const textOutput = a.textOutput ?? record.workerResult?.lastMessage;
168
+ if (textOutput) {
169
+ const budget = Math.max(500, maxChars - lines.join("\n").length - 100);
170
+ lines.push(`worker_output:
171
+ ${truncate(textOutput, budget)}`);
172
+ }
173
+ return truncate(lines.join("\n"), maxChars);
174
+ }
175
+ function buildOodaSnapshot(input) {
176
+ const recent = input.dispatches.slice(-5);
177
+ const observations = recent.map(
178
+ (r) => [
179
+ `${r.tuple.id}: ${r.artifact.status}`,
180
+ r.artifact.summary
181
+ ].filter(Boolean).join("; ")
182
+ );
183
+ const disprovenApproaches = [];
184
+ for (const r of recent) {
185
+ if (r.artifact.status === "failed" || r.artifact.status === "incomplete") {
186
+ disprovenApproaches.push(
187
+ `${r.tuple.id}: ${truncateInline(r.tuple.instruction, 180)} -> ${r.artifact.status}`
188
+ );
189
+ }
190
+ }
191
+ const beliefs = [];
192
+ if (input.allIncomplete) {
193
+ beliefs.push("No dispatch has produced a complete artifact yet.");
194
+ }
195
+ if (beliefs.length === 0) {
196
+ beliefs.push("No unresolved issue detected.");
197
+ }
198
+ return {
199
+ observations: observations.length > 0 ? observations : ["No completed dispatches yet."],
200
+ beliefs,
201
+ disprovenApproaches,
202
+ blockers: [],
203
+ decisionPressure: {
204
+ turn: input.turn,
205
+ maxTurns: input.maxTurns,
206
+ turnsRemaining: input.turnsRemaining,
207
+ dispatchCount: input.dispatchCount,
208
+ allIncomplete: input.allIncomplete
209
+ }
210
+ };
211
+ }
212
+ function formatOodaSnapshotForPrompt(snapshot) {
213
+ return [
214
+ "## OODA State",
215
+ "decision_pressure:",
216
+ ` turn: ${snapshot.decisionPressure.turn}`,
217
+ ` max_turns: ${snapshot.decisionPressure.maxTurns}`,
218
+ ` turns_remaining: ${snapshot.decisionPressure.turnsRemaining}`,
219
+ ` dispatches_so_far: ${snapshot.decisionPressure.dispatchCount}`,
220
+ ` all_incomplete: ${snapshot.decisionPressure.allIncomplete}`,
221
+ "observations:",
222
+ formatList(snapshot.observations),
223
+ "beliefs:",
224
+ formatList(snapshot.beliefs),
225
+ "disproven_approaches:",
226
+ formatList(snapshot.disprovenApproaches),
227
+ "blockers:",
228
+ formatList(snapshot.blockers)
229
+ ].join("\n");
230
+ }
231
+ async function formatReadEpisodeResult(record, transcriptStore) {
232
+ const transcript = await transcriptStore.get(record.transcriptId);
233
+ const traceSection = transcript ? formatTranscriptTrace(transcript.messages, TRACE_TOOL_RESULT_LIMIT) : "(transcript not found)";
234
+ const base = [
235
+ `Dispatch ${record.tuple.id}`,
236
+ "",
237
+ formatDispatchForPrompt(record, { maxChars: 12e3 }),
238
+ "",
239
+ "--- Trace ---",
240
+ traceSection
241
+ ];
242
+ return truncate(base.join("\n"), DEFAULT_TRACE_MAX_CHARS);
243
+ }
244
+ function formatTranscriptTrace(messages, toolResultLimit) {
245
+ return messages.map((message, index) => {
246
+ const lines = [`message ${index + 1}: ${message.role}`];
247
+ const content = getTextContent(message.content);
248
+ if (content) lines.push(content);
249
+ if (message.toolCalls?.length) {
250
+ lines.push("tool_calls:");
251
+ for (const call of message.toolCalls) {
252
+ lines.push(`- ${call.toolName} ${JSON.stringify(call.args)}`);
253
+ }
254
+ }
255
+ if (message.toolResults?.length) {
256
+ lines.push("tool_results:");
257
+ for (const result of message.toolResults) {
258
+ const status = result.isError ? "error" : "ok";
259
+ lines.push(
260
+ `- ${result.toolName} ${status} ${truncate(result.result, toolResultLimit)}`
261
+ );
262
+ }
263
+ }
264
+ return lines.join("\n");
265
+ }).join("\n\n");
266
+ }
267
+ function formatList(items) {
268
+ if (items.length === 0) return "- none";
269
+ return items.map((item) => `- ${item}`).join("\n");
270
+ }
271
+
272
+ // src/arc/lcm/tools.ts
273
+ async function expandSummary(deps, summaryId, maxTokens) {
274
+ const node = deps.summaryDAG.getNode(summaryId);
275
+ if (!node) return `Summary node not found: ${summaryId}`;
276
+ if (node.depth === 0) {
277
+ const conversationId = node.sourceConversationIds[0];
278
+ if (!conversationId) return node.summary;
279
+ const messages = deps.messageStore.getConversation(conversationId);
280
+ if (messages.length === 0) {
281
+ const record = deps.findDispatchRecord(summaryId);
282
+ if (record) {
283
+ const transcript = await deps.transcriptStore.get(record.transcriptId);
284
+ if (transcript) {
285
+ return formatTranscriptTrace(transcript.messages, 8e3);
286
+ }
287
+ }
288
+ return node.summary;
289
+ }
290
+ const trace = messages.map(
291
+ (m) => `[${m.role}:${m.index}] ${m.content.slice(0, Math.floor(maxTokens * 4 / messages.length))}`
292
+ ).join("\n");
293
+ return truncate(trace, maxTokens * 4);
294
+ }
295
+ const children = node.sourceIds.map((id) => deps.summaryDAG.getNode(id)).filter((n) => !!n);
296
+ return children.map((child) => `### ${child.id} (d${child.depth})
297
+ ${child.summary}`).join("\n\n");
298
+ }
299
+ function describeSummary(deps, summaryId) {
300
+ if (summaryId) {
301
+ const node = deps.summaryDAG.getNode(summaryId);
302
+ if (!node) return `Summary node not found: ${summaryId}`;
303
+ return [
304
+ `ID: ${node.id}`,
305
+ `Depth: ${node.depth}`,
306
+ `Sources: ${node.sourceIds.join(", ")}`,
307
+ `Conversations: ${node.sourceConversationIds.join(", ")}`,
308
+ `Artifacts: ${node.artifacts.join(", ") || "none"}`,
309
+ `Operations: ${node.operations.join(", ") || "none"}`,
310
+ `Outcome: ${node.outcome}`,
311
+ `Tokens: ${node.tokenCount}`,
312
+ `Summary:
313
+ ${node.summary}`
314
+ ].join("\n");
315
+ }
316
+ const allNodes = deps.summaryDAG.getAllNodes();
317
+ if (allNodes.length === 0) return "DAG is empty \u2014 no dispatches completed yet.";
318
+ const byDepth = /* @__PURE__ */ new Map();
319
+ for (const node of allNodes) {
320
+ byDepth.set(node.depth, (byDepth.get(node.depth) ?? 0) + 1);
321
+ }
322
+ const totalTokens = allNodes.reduce((sum, n) => sum + n.tokenCount, 0);
323
+ const lines = [
324
+ `DAG Overview: ${allNodes.length} total nodes, ~${totalTokens} tokens`,
325
+ "Nodes by depth:",
326
+ ...Array.from(byDepth.entries()).sort(([a], [b]) => a - b).map(([depth, count]) => ` d${depth}: ${count} nodes`),
327
+ "",
328
+ "All nodes:",
329
+ ...allNodes.map(
330
+ (n) => ` ${n.id}: d${n.depth}, ${n.outcome}, ${n.tokenCount} tokens`
331
+ )
332
+ ];
333
+ return lines.join("\n");
334
+ }
335
+ async function recallDirect(deps, query) {
336
+ const transcriptIds = await deps.vectorIndex.search(query, 5);
337
+ if (transcriptIds.length === 0) {
338
+ return "No relevant history found.";
339
+ }
340
+ const transcripts = await Promise.all(
341
+ transcriptIds.map((id) => deps.transcriptStore.get(id).then((t) => ({ id, t })))
342
+ );
343
+ const excerpts = [];
344
+ for (const { id, t: transcript } of transcripts) {
345
+ if (!transcript) continue;
346
+ const lastEntry = transcript.messages[transcript.messages.length - 1];
347
+ const lastMsg = lastEntry ? getTextContent(lastEntry.content).slice(0, 300) : "(no messages)";
348
+ excerpts.push(`**${id}**: ${transcript.instruction}
349
+ Last result: ${lastMsg}`);
350
+ }
351
+ if (excerpts.length === 0) {
352
+ return "No history available yet.";
353
+ }
354
+ return excerpts.join("\n\n");
355
+ }
356
+ function historyRead(deps, conversationId, messageIndex) {
357
+ const msg = deps.messageStore.getMessage(conversationId, messageIndex);
358
+ if (!msg) return null;
359
+ const parts = [msg.content];
360
+ if (msg.toolCalls) {
361
+ for (const tc of msg.toolCalls) {
362
+ parts.push(`[tool_call] ${tc.toolName} ${JSON.stringify(tc.args)}`);
363
+ }
364
+ }
365
+ if (msg.toolResults) {
366
+ for (const tr of msg.toolResults) {
367
+ parts.push(`[tool_result] ${tr.toolName}: ${tr.result}`);
368
+ }
369
+ }
370
+ return parts.join("\n");
371
+ }
372
+ var history_search = tool({
373
+ description: "Search across all stored messages from all conversations (orchestrator + workers). Returns matching excerpts with context. Use to find prior commands, errors, thread IDs, file paths, or any text across the full run history.",
374
+ inputSchema: z.object({
375
+ pattern: z.string().max(500).describe("Search pattern (literal text match) to search for across all messages"),
376
+ conversationId: z.string().max(200).optional().describe(
377
+ 'Optional: limit search to a specific conversation (e.g., "orchestrator" or "worker_<tupleId>")'
378
+ ),
379
+ maxResults: z.number().min(1).max(100).optional().describe("Maximum number of results to return (default: 20)")
380
+ })
381
+ });
382
+ var history_expand = tool({
383
+ description: "Expand a compacted summary back to source messages or child summaries. For leaf nodes: returns the original worker transcript. For rollup nodes: returns the child summaries. Use when a ghost cue or summary reference needs full detail.",
384
+ inputSchema: z.object({
385
+ summaryId: z.string().max(200).describe("The summary node ID to expand (e.g., 'summary_d0_tuple_abc')"),
386
+ maxTokens: z.number().min(100).max(32e3).optional().describe("Approximate maximum tokens to return (default: 8000)")
387
+ })
388
+ });
389
+ var history_overview = tool({
390
+ description: "Show summary node metadata (sources, depth, artifacts, operations) or history overview. With summaryId: shows that node's details. Without: shows how many nodes at each depth, what's in context vs compacted.",
391
+ inputSchema: z.object({
392
+ summaryId: z.string().max(200).optional().describe(
393
+ "Optional summary node ID. Omit to get full history overview."
394
+ )
395
+ })
396
+ });
397
+ var history_read = tool({
398
+ description: "Read a specific message from run history at full fidelity. Use after history_search returns a [conversationId:messageIndex] reference and you need the complete, untruncated content.",
399
+ inputSchema: z.object({
400
+ conversationId: z.string().max(200).describe('Conversation ID (e.g., "orchestrator" or "worker_tuple_abc")'),
401
+ messageIndex: z.number().min(0).describe("Message index within the conversation")
402
+ })
403
+ });
404
+
405
+ // src/arc/tools.ts
406
+ var expectedArtifactSchema = z.object({
407
+ type: z.enum(["file", "directory", "value", "unknown"]).describe("What kind of artifact or result is expected"),
408
+ path: z.string().optional().describe("Concrete path when type is file or directory"),
409
+ description: z.string().optional().describe("Short natural-language description of the artifact")
410
+ });
411
+ var expectedOutputSchema = z.union([
412
+ z.string().describe(
413
+ 'Legacy format: file path, "file:path", "value:description", or natural-language description'
414
+ ),
415
+ z.object({
416
+ artifacts: z.array(expectedArtifactSchema).optional().describe("Concrete artifacts or values the worker should produce"),
417
+ successCriteria: z.array(z.string()).optional().describe("Observable conditions that mean the dispatch succeeded"),
418
+ verification: z.string().optional().describe("How the worker should verify the result"),
419
+ description: z.string().optional().describe(
420
+ "Short summary of the expected result when artifacts are not known upfront"
421
+ ),
422
+ // Compatibility with an earlier structured shape.
423
+ kind: z.enum(["file", "files", "value", "unknown"]).optional(),
424
+ path: z.string().optional(),
425
+ paths: z.array(z.string()).optional()
426
+ }).describe("Structured expected-output contract")
427
+ ]);
428
+ var dispatch = tool({
429
+ description: "Dispatch a worker to accomplish a task. Workers are stateless and see only the instruction, input files, and tools. Call multiple times in one response for parallel execution.",
430
+ inputSchema: z.object({
431
+ instruction: z.string().describe("What the worker should do"),
432
+ inputs: z.array(z.string()).optional().describe("Artifact IDs to provide as context"),
433
+ expectedOutput: expectedOutputSchema.describe(
434
+ "Expected artifacts, success criteria, and verification contract"
435
+ ),
436
+ tier: z.enum(["fast", "strong"]).optional().describe("Worker model tier. 'fast' for simple lookups, reads, searches, and summaries. 'strong' (default) for complex reasoning, coding, and multi-step tasks.")
437
+ })
438
+ });
439
+ var recall = tool({
440
+ description: "Query project history. Use to check past decisions, approaches, or errors.",
441
+ inputSchema: z.object({
442
+ query: z.string().describe("Natural language question about project history")
443
+ })
444
+ });
445
+ var readEpisodeInputSchema = z.object({
446
+ id: z.string().describe("Episode, tuple, or transcript ID to read"),
447
+ detail: z.enum(["summary", "trace", "artifacts"]).optional().describe(
448
+ "summary is compact; trace hydrates the worker transcript; artifacts returns produced/observed artifact state"
449
+ ),
450
+ artifactKey: z.string().optional().describe("Optional artifact handle to focus on when detail is artifacts"),
451
+ maxTokens: z.number().optional().describe("Approximate maximum tokens to return")
452
+ });
453
+ tool({
454
+ description: "Read a prior episode from this task run. Default summary returns the compact projection; trace hydrates the full worker transcript when exact evidence is needed.",
455
+ inputSchema: readEpisodeInputSchema
456
+ });
457
+ var ReadEpisode = tool({
458
+ description: "Read a prior episode from this task run. Use when the provided episode projection is insufficient and exact prior trace or artifacts are needed.",
459
+ inputSchema: readEpisodeInputSchema
460
+ });
461
+ var done = tool({
462
+ description: "Mark task complete and return final output.",
463
+ inputSchema: z.object({
464
+ output: z.string().describe("Artifact ID or summary to return")
465
+ })
466
+ });
467
+ var ScratchPad_Write = tool({
468
+ description: "Write a note to the shared scratch pad. Future workers will see it listed and can read it.",
469
+ inputSchema: z.object({
470
+ key: z.string().describe("Note name (e.g., 'network-findings', 'port-scan-results')"),
471
+ content: z.string().max(1e5).describe("Note content (max 100KB)")
472
+ })
473
+ });
474
+ var ScratchPad_Read = tool({
475
+ description: "Read a note from the shared scratch pad.",
476
+ inputSchema: z.object({
477
+ key: z.string().describe("Note name to read")
478
+ })
479
+ });
480
+ var ScratchPad_List = tool({
481
+ description: "List all notes in the shared scratch pad.",
482
+ inputSchema: z.object({})
483
+ });
484
+ var Show = tool({
485
+ description: "Show content directly to the user's terminal. Use when the user asked to see a file, output, or result. The content is displayed immediately without waiting for your full response.",
486
+ inputSchema: z.object({
487
+ content: z.string().describe("The content to display (file contents, command output, etc.)"),
488
+ language: z.string().optional().describe("Language hint for syntax highlighting (e.g. 'python', 'typescript')")
489
+ })
490
+ });
491
+ function ok(output) {
492
+ return { success: true, output };
493
+ }
494
+ function err(error) {
495
+ return { success: false, output: "", error };
496
+ }
497
+ var s = (t) => t;
498
+ function buildHistoryToolEntries(deps) {
499
+ const tools = /* @__PURE__ */ new Map();
500
+ tools.set("history_search", {
501
+ name: "history_search",
502
+ schema: s(history_search),
503
+ execute: async (_p, args) => {
504
+ if (!deps.historySearch) return err("history_search unavailable");
505
+ const pattern = String(args.pattern ?? "");
506
+ const conversationId = args.conversationId;
507
+ const maxResults = args.maxResults ?? 20;
508
+ const results = deps.historySearch(pattern, { ...conversationId ? { conversationId } : {}, maxResults });
509
+ return results.length === 0 ? ok(`No matches for "${pattern}"`) : ok(results.map((r) => `[${r.conversationId}:${r.messageIndex}] ${r.matchContext}`).join("\n"));
510
+ }
511
+ });
512
+ tools.set("history_expand", {
513
+ name: "history_expand",
514
+ schema: s(history_expand),
515
+ execute: async (_p, args) => {
516
+ if (!deps.historyExpand) return err("history_expand unavailable");
517
+ return ok(await deps.historyExpand(String(args.summaryId ?? ""), args.maxTokens ?? 8e3));
518
+ }
519
+ });
520
+ tools.set("history_overview", {
521
+ name: "history_overview",
522
+ schema: s(history_overview),
523
+ execute: async (_p, args) => {
524
+ if (!deps.historyOverview) return err("history_overview unavailable");
525
+ return ok(deps.historyOverview(args.summaryId));
526
+ }
527
+ });
528
+ tools.set("history_read", {
529
+ name: "history_read",
530
+ schema: s(history_read),
531
+ execute: async (_p, args) => {
532
+ if (!deps.historyRead) return err("history_read unavailable");
533
+ const conversationId = String(args.conversationId ?? "");
534
+ const messageIndex = Number(args.messageIndex ?? 0);
535
+ const content = deps.historyRead(conversationId, messageIndex);
536
+ return content === null ? ok(`No message found: ${conversationId}:${messageIndex}`) : ok(content);
537
+ }
538
+ });
539
+ return tools;
540
+ }
541
+ function buildWorkerToolEntries(deps) {
542
+ const tools = /* @__PURE__ */ new Map();
543
+ tools.set("ReadEpisode", {
544
+ name: "ReadEpisode",
545
+ schema: s(ReadEpisode),
546
+ execute: async (_p, args) => deps.readEpisode ? ok(await deps.readEpisode(normalizeReadEpisodeArgs(args))) : err("ReadEpisode unavailable")
547
+ });
548
+ tools.set("Show", {
549
+ name: "Show",
550
+ schema: s(Show),
551
+ execute: async () => ok("")
552
+ });
553
+ tools.set("ScratchPad_Write", {
554
+ name: "ScratchPad_Write",
555
+ schema: s(ScratchPad_Write),
556
+ execute: async (_p, args) => {
557
+ if (!deps.scratchPad) return err("ScratchPad unavailable");
558
+ await deps.scratchPad.write(String(args.key ?? ""), String(args.content ?? ""));
559
+ return ok(`Written to scratch pad: ${args.key}`);
560
+ }
561
+ });
562
+ tools.set("ScratchPad_Read", {
563
+ name: "ScratchPad_Read",
564
+ schema: s(ScratchPad_Read),
565
+ execute: async (_p, args) => {
566
+ if (!deps.scratchPad) return err("ScratchPad unavailable");
567
+ const content = await deps.scratchPad.read(String(args.key ?? ""));
568
+ return content === null ? ok(`No note found with key: ${args.key}`) : ok(content);
569
+ }
570
+ });
571
+ tools.set("ScratchPad_List", {
572
+ name: "ScratchPad_List",
573
+ schema: s(ScratchPad_List),
574
+ execute: async () => {
575
+ if (!deps.scratchPad) return err("ScratchPad unavailable");
576
+ const keys = await deps.scratchPad.list();
577
+ return keys.length === 0 ? ok("Scratch pad is empty.") : ok(keys.join("\n"));
578
+ }
579
+ });
580
+ for (const [name, entry] of buildHistoryToolEntries(deps)) {
581
+ tools.set(name, entry);
582
+ }
583
+ return tools;
584
+ }
585
+ function buildOrchestratorToolEntries(deps) {
586
+ const tools = /* @__PURE__ */ new Map();
587
+ if (deps.recall) {
588
+ const recallFn = deps.recall;
589
+ tools.set("recall", {
590
+ name: "recall",
591
+ schema: s(recall),
592
+ execute: async (_p, args) => ok(await recallFn(String(args.query ?? "")))
593
+ });
594
+ }
595
+ if (deps.askUser) {
596
+ const askUserFn = deps.askUser;
597
+ tools.set("AskUser", {
598
+ name: "AskUser",
599
+ schema: s(tool({
600
+ description: "Ask the user a clarifying question and wait for their response.",
601
+ inputSchema: z.object({
602
+ question: z.string().describe("The question to ask the user"),
603
+ options: z.array(z.string()).optional().describe("Optional choices to present")
604
+ })
605
+ })),
606
+ execute: async (_p, args) => {
607
+ const question = String(args.question ?? "");
608
+ const options = Array.isArray(args.options) ? args.options.map(String) : void 0;
609
+ return ok(await askUserFn(question, options));
610
+ }
611
+ });
612
+ }
613
+ for (const [name, entry] of buildHistoryToolEntries(deps)) {
614
+ tools.set(name, entry);
615
+ }
616
+ return tools;
617
+ }
618
+ function normalizeExpectedOutput(input) {
619
+ if (input && typeof input === "object" && !Array.isArray(input)) {
620
+ const record = input;
621
+ const artifacts = normalizeExpectedArtifacts(record);
622
+ const successCriteria = Array.isArray(record.successCriteria) ? record.successCriteria.filter(
623
+ (item) => typeof item === "string"
624
+ ) : void 0;
625
+ const verification = typeof record.verification === "string" ? record.verification : void 0;
626
+ const description = typeof record.description === "string" ? record.description : void 0;
627
+ return {
628
+ artifacts: artifacts.length > 0 ? artifacts : [{ type: "unknown", ...description ? { description } : {} }],
629
+ ...successCriteria && successCriteria.length > 0 ? { successCriteria } : {},
630
+ ...verification ? { verification } : {},
631
+ ...description ? { description } : {}
632
+ };
633
+ }
634
+ const raw = typeof input === "string" ? input.trim() : "";
635
+ if (!raw) {
636
+ return {
637
+ artifacts: [{ type: "unknown", description: "unspecified output" }]
638
+ };
639
+ }
640
+ if (raw.startsWith("value:")) {
641
+ return {
642
+ artifacts: [
643
+ {
644
+ type: "value",
645
+ description: raw.slice("value:".length).trim() || "value output"
646
+ }
647
+ ],
648
+ description: raw
649
+ };
650
+ }
651
+ if (raw.startsWith("file:")) {
652
+ return {
653
+ artifacts: [{ type: "file", path: raw.slice("file:".length).trim() }],
654
+ description: raw
655
+ };
656
+ }
657
+ if (looksLikePath(raw)) {
658
+ return {
659
+ artifacts: [{ type: "file", path: raw }],
660
+ description: raw
661
+ };
662
+ }
663
+ return {
664
+ artifacts: [{ type: "unknown", description: raw }],
665
+ description: raw
666
+ };
667
+ }
668
+ function normalizeExpectedArtifacts(record) {
669
+ const direct = Array.isArray(record.artifacts) ? record.artifacts.flatMap((item) => normalizeExpectedArtifact(item)) : [];
670
+ if (direct.length > 0) return direct;
671
+ const kind = typeof record.kind === "string" ? record.kind : void 0;
672
+ if (kind === "file" && typeof record.path === "string") {
673
+ return [{ type: "file", path: record.path }];
674
+ }
675
+ if (kind === "files" && Array.isArray(record.paths)) {
676
+ return record.paths.filter((path2) => typeof path2 === "string").map((path2) => ({ type: "file", path: path2 }));
677
+ }
678
+ if (kind === "value") {
679
+ return [
680
+ {
681
+ type: "value",
682
+ ...typeof record.description === "string" ? { description: record.description } : {}
683
+ }
684
+ ];
685
+ }
686
+ if (kind === "unknown") {
687
+ return [
688
+ {
689
+ type: "unknown",
690
+ ...typeof record.description === "string" ? { description: record.description } : {}
691
+ }
692
+ ];
693
+ }
694
+ return [];
695
+ }
696
+ function normalizeExpectedArtifact(item) {
697
+ if (!item || typeof item !== "object" || Array.isArray(item)) return [];
698
+ const record = item;
699
+ const type = record.type;
700
+ if (type !== "file" && type !== "directory" && type !== "value" && type !== "unknown")
701
+ return [];
702
+ return [
703
+ {
704
+ type,
705
+ ...typeof record.path === "string" ? { path: record.path } : {},
706
+ ...typeof record.description === "string" ? { description: record.description } : {}
707
+ }
708
+ ];
709
+ }
710
+
711
+ // src/arc/tool-registry.ts
712
+ function toolSchemasFromRegistry(registry) {
713
+ const schemas = {};
714
+ for (const [name, meta] of registry) {
715
+ if (meta.schema) schemas[name] = meta.schema;
716
+ }
717
+ return schemas;
718
+ }
719
+
720
+ // src/arc/stores/memory-stores.ts
721
+ var MemoryTranscriptStore = class {
722
+ transcripts = [];
723
+ byId = /* @__PURE__ */ new Map();
724
+ async append(transcript) {
725
+ this.transcripts.push(transcript);
726
+ this.byId.set(transcript.id, transcript);
727
+ }
728
+ async getAll() {
729
+ return [...this.transcripts];
730
+ }
731
+ async get(id) {
732
+ return this.byId.get(id) ?? null;
733
+ }
734
+ };
735
+ var MemoryVectorIndex = class {
736
+ entries = [];
737
+ async add(id, text) {
738
+ this.entries.push({ id, text });
739
+ }
740
+ async search(query, k) {
741
+ const lowerQuery = query.toLowerCase();
742
+ const scored = this.entries.map((e) => ({
743
+ id: e.id,
744
+ score: e.text.toLowerCase().includes(lowerQuery) ? 1 : 0
745
+ }));
746
+ scored.sort((a, b) => b.score - a.score);
747
+ return scored.slice(0, k).map((s2) => s2.id);
748
+ }
749
+ async load() {
750
+ }
751
+ async save() {
752
+ }
753
+ };
754
+ var MemoryScratchPad = class {
755
+ entries = /* @__PURE__ */ new Map();
756
+ async write(key, content) {
757
+ this.entries.set(key, content);
758
+ }
759
+ async read(key) {
760
+ return this.entries.get(key) ?? null;
761
+ }
762
+ async list() {
763
+ return [...this.entries.keys()];
764
+ }
765
+ async clear() {
766
+ this.entries.clear();
767
+ }
768
+ };
769
+ var MemoryArtifactStore = class {
770
+ artifacts = /* @__PURE__ */ new Map();
771
+ async set(id, artifact) {
772
+ this.artifacts.set(id, artifact);
773
+ }
774
+ async get(id) {
775
+ return this.artifacts.get(id) ?? null;
776
+ }
777
+ async getAll() {
778
+ const result = {};
779
+ for (const [id, artifact] of this.artifacts) {
780
+ result[id] = artifact;
781
+ }
782
+ return result;
783
+ }
784
+ };
785
+ var MemorySessionStore = class {
786
+ snapshots = /* @__PURE__ */ new Map();
787
+ metas = /* @__PURE__ */ new Map();
788
+ async load(id) {
789
+ return this.snapshots.get(id) ?? null;
790
+ }
791
+ async save(id, snapshot) {
792
+ this.snapshots.set(id, snapshot);
793
+ }
794
+ async getMeta(id) {
795
+ return this.metas.get(id) ?? null;
796
+ }
797
+ async saveMeta(id, meta) {
798
+ this.metas.set(id, meta);
799
+ }
800
+ async list() {
801
+ return [...this.metas.values()].sort((a, b) => b.lastActiveAt - a.lastActiveAt);
802
+ }
803
+ };
804
+
805
+ // src/arc/lcm/message-store.ts
806
+ var MemoryMessageStore = class _MemoryMessageStore {
807
+ messages = [];
808
+ byConversation = /* @__PURE__ */ new Map();
809
+ append(message) {
810
+ this.messages.push(message);
811
+ let conv = this.byConversation.get(message.conversationId);
812
+ if (!conv) {
813
+ conv = [];
814
+ this.byConversation.set(message.conversationId, conv);
815
+ }
816
+ conv.push(message);
817
+ }
818
+ getConversation(conversationId) {
819
+ return this.byConversation.get(conversationId) ?? [];
820
+ }
821
+ getMessage(conversationId, index) {
822
+ const conv = this.byConversation.get(conversationId);
823
+ return conv?.find((m) => m.index === index) ?? null;
824
+ }
825
+ serialize() {
826
+ return [...this.messages];
827
+ }
828
+ loadFrom(messages) {
829
+ this.messages.length = 0;
830
+ this.byConversation.clear();
831
+ for (const msg of messages) this.append(msg);
832
+ }
833
+ static hydrate(messages) {
834
+ const store = new _MemoryMessageStore();
835
+ store.loadFrom(messages);
836
+ return store;
837
+ }
838
+ grep(pattern, opts) {
839
+ const maxResults = opts?.maxResults ?? 50;
840
+ const re = new RegExp(escapeRegex(pattern), "gi");
841
+ const source = opts?.conversationId ? this.byConversation.get(opts.conversationId) ?? [] : this.messages;
842
+ const results = [];
843
+ for (const msg of source) {
844
+ if (results.length >= maxResults) break;
845
+ const searchText = buildSearchText(msg);
846
+ re.lastIndex = 0;
847
+ const match = re.exec(searchText);
848
+ if (!match) continue;
849
+ const start = Math.max(0, match.index - 80);
850
+ const end = Math.min(searchText.length, match.index + match[0].length + 80);
851
+ results.push({
852
+ messageId: msg.id,
853
+ conversationId: msg.conversationId,
854
+ messageIndex: msg.index,
855
+ excerpt: match[0],
856
+ matchContext: searchText.slice(start, end)
857
+ });
858
+ }
859
+ return results;
860
+ }
861
+ };
862
+ function buildSearchText(msg) {
863
+ const parts = [msg.content];
864
+ if (msg.toolCalls) {
865
+ for (const tc of msg.toolCalls) {
866
+ parts.push(`${tc.toolName} ${JSON.stringify(tc.args)}`);
867
+ }
868
+ }
869
+ if (msg.toolResults) {
870
+ for (const tr of msg.toolResults) {
871
+ parts.push(`${tr.toolName}: ${tr.result}`);
872
+ }
873
+ }
874
+ return parts.join("\n");
875
+ }
876
+ function escapeRegex(s2) {
877
+ return s2.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
878
+ }
879
+
880
+ // src/arc/lcm/summary-dag.ts
881
+ var MemorySummaryDAG = class _MemorySummaryDAG {
882
+ nodes = /* @__PURE__ */ new Map();
883
+ /** Tracks which source IDs have been covered by a parent node */
884
+ coveredBy = /* @__PURE__ */ new Map();
885
+ serialize() {
886
+ return {
887
+ nodes: [...this.nodes.values()],
888
+ coveredBy: [...this.coveredBy.entries()]
889
+ };
890
+ }
891
+ loadFrom(data) {
892
+ this.nodes.clear();
893
+ this.coveredBy.clear();
894
+ for (const node of data.nodes) this.nodes.set(node.id, node);
895
+ for (const [id, parent] of data.coveredBy) this.coveredBy.set(id, parent);
896
+ }
897
+ static hydrate(data) {
898
+ const dag = new _MemorySummaryDAG();
899
+ dag.loadFrom(data);
900
+ return dag;
901
+ }
902
+ addLeaf(node) {
903
+ this.nodes.set(node.id, node);
904
+ }
905
+ compact(opts) {
906
+ const minChildren = opts?.minChildren ?? 4;
907
+ const softTokenBudget = opts?.softTokenBudget ?? 8e3;
908
+ const created = [];
909
+ const maxDepth = Math.max(0, ...Array.from(this.nodes.values()).map((n) => n.depth));
910
+ for (let depth = 0; depth <= maxDepth; depth++) {
911
+ const nodesAtDepth = Array.from(this.nodes.values()).filter((n) => n.depth === depth).sort((a, b) => a.createdAt - b.createdAt);
912
+ const uncovered = nodesAtDepth.filter((n) => !this.coveredBy.has(n.id));
913
+ const totalTokens = uncovered.reduce((sum, n) => sum + n.tokenCount, 0);
914
+ if (uncovered.length >= minChildren && totalTokens > softTokenBudget) {
915
+ for (let i = 0; i + minChildren <= uncovered.length; i += minChildren) {
916
+ const batch = uncovered.slice(i, i + minChildren);
917
+ const rollup = buildRollupNode(batch, depth + 1);
918
+ this.nodes.set(rollup.id, rollup);
919
+ for (const child of batch) {
920
+ this.coveredBy.set(child.id, rollup.id);
921
+ }
922
+ created.push(rollup);
923
+ }
924
+ }
925
+ }
926
+ return created;
927
+ }
928
+ getNode(id) {
929
+ return this.nodes.get(id) ?? null;
930
+ }
931
+ getLineage(id, visited = /* @__PURE__ */ new Set()) {
932
+ if (visited.has(id)) return [];
933
+ visited.add(id);
934
+ const node = this.nodes.get(id);
935
+ if (!node) return [];
936
+ const result = [...node.sourceIds];
937
+ for (const sourceId of node.sourceIds) {
938
+ const child = this.nodes.get(sourceId);
939
+ if (child && child.depth >= 0) {
940
+ result.push(...this.getLineage(sourceId, visited));
941
+ }
942
+ }
943
+ return [...new Set(result)];
944
+ }
945
+ getFrontier(budget) {
946
+ const allNodes = Array.from(this.nodes.values());
947
+ if (allNodes.length === 0) return { frontier: [], coveredIds: /* @__PURE__ */ new Set() };
948
+ const maxDepth = Math.max(...allNodes.map((n) => n.depth));
949
+ const frontier = [];
950
+ const coveredIds = /* @__PURE__ */ new Set();
951
+ let tokensUsed = 0;
952
+ const lineageCache = /* @__PURE__ */ new Map();
953
+ const cachedLineage = (id) => {
954
+ let result = lineageCache.get(id);
955
+ if (!result) {
956
+ result = this.getLineage(id);
957
+ lineageCache.set(id, result);
958
+ }
959
+ return result;
960
+ };
961
+ for (let depth = maxDepth; depth >= 0; depth--) {
962
+ const nodesAtDepth = allNodes.filter((n) => n.depth === depth).sort((a, b) => a.createdAt - b.createdAt);
963
+ for (const node of nodesAtDepth) {
964
+ if (coveredIds.has(node.id)) continue;
965
+ if (tokensUsed + node.tokenCount <= budget) {
966
+ frontier.push(node);
967
+ tokensUsed += node.tokenCount;
968
+ for (const sourceId of cachedLineage(node.id)) {
969
+ coveredIds.add(sourceId);
970
+ }
971
+ }
972
+ }
973
+ }
974
+ frontier.sort((a, b) => a.createdAt - b.createdAt);
975
+ return { frontier, coveredIds };
976
+ }
977
+ getAllNodes() {
978
+ return Array.from(this.nodes.values());
979
+ }
980
+ };
981
+ function buildRollupNode(children, depth) {
982
+ const allArtifacts = [...new Set(children.flatMap((c) => c.artifacts))];
983
+ const allOperations = [...new Set(children.flatMap((c) => c.operations))];
984
+ const allConversationIds = [
985
+ ...new Set(children.flatMap((c) => c.sourceConversationIds))
986
+ ];
987
+ const outcomes = children.map((c) => c.outcome);
988
+ const complete = outcomes.filter((o) => o === "complete").length;
989
+ const incomplete = outcomes.filter((o) => o === "incomplete").length;
990
+ const failed = outcomes.filter((o) => o === "failed").length;
991
+ const parts = [];
992
+ if (complete) parts.push(`${complete} complete`);
993
+ if (incomplete) parts.push(`${incomplete} incomplete`);
994
+ if (failed) parts.push(`${failed} failed`);
995
+ const aggregateOutcome = parts.join(", ") || "unknown";
996
+ const firstId = children[0]?.id.replace(/^summary_d\d+_/, "") ?? "0";
997
+ const lastId = children[children.length - 1]?.id.replace(/^summary_d\d+_/, "") ?? "0";
998
+ const id = `summary_d${depth}_${firstId}_${lastId}`;
999
+ const summaryLines = [
1000
+ `Rollup [${children.length} episodes]: ${aggregateOutcome}.`
1001
+ ];
1002
+ if (allArtifacts.length > 0) {
1003
+ summaryLines.push(`Produced: ${allArtifacts.join(", ")}`);
1004
+ }
1005
+ if (allOperations.length > 0) {
1006
+ summaryLines.push(`Active threads: ${allOperations.join(", ")}`);
1007
+ }
1008
+ const summary = summaryLines.join("\n");
1009
+ return {
1010
+ id,
1011
+ depth,
1012
+ sourceIds: children.map((c) => c.id),
1013
+ sourceConversationIds: allConversationIds,
1014
+ summary,
1015
+ artifacts: allArtifacts,
1016
+ operations: allOperations,
1017
+ outcome: aggregateOutcome,
1018
+ tokenCount: Math.ceil(summary.length / 4),
1019
+ createdAt: Date.now()
1020
+ };
1021
+ }
1022
+
1023
+ // src/arc/lcm/context-assembler.ts
1024
+ function estimateTokens(text) {
1025
+ return Math.ceil(text.length / 4);
1026
+ }
1027
+ function assembleContext(opts) {
1028
+ const {
1029
+ conversationId,
1030
+ store,
1031
+ dag,
1032
+ budget,
1033
+ freshTailSize,
1034
+ taskContext,
1035
+ ooda
1036
+ } = opts;
1037
+ const messages = [];
1038
+ let tokensUsed = 0;
1039
+ const taskTokens = estimateTokens(taskContext);
1040
+ messages.push({ role: "user", content: taskContext });
1041
+ tokensUsed += taskTokens;
1042
+ if (ooda) {
1043
+ const oodaText = formatOodaSnapshotForPrompt(ooda);
1044
+ const oodaTokens = estimateTokens(oodaText);
1045
+ messages.push({ role: "user", content: oodaText });
1046
+ tokensUsed += oodaTokens;
1047
+ }
1048
+ const remainingBudget = budget - tokensUsed;
1049
+ const tailBudget = Math.floor(remainingBudget * 0.4);
1050
+ const frontierBudget = remainingBudget - tailBudget;
1051
+ const { ghostCues, frontierText, ghostCueText } = buildFrontierAndGhostCues(
1052
+ dag,
1053
+ frontierBudget
1054
+ );
1055
+ if (frontierText) {
1056
+ messages.push({ role: "user", content: frontierText });
1057
+ tokensUsed += estimateTokens(frontierText);
1058
+ }
1059
+ if (ghostCueText) {
1060
+ messages.push({ role: "user", content: ghostCueText });
1061
+ tokensUsed += estimateTokens(ghostCueText);
1062
+ }
1063
+ const conversation = store.getConversation(conversationId);
1064
+ if (conversation.length > 0) {
1065
+ const tail = conversation.slice(-freshTailSize);
1066
+ for (const msg of tail) {
1067
+ const msgTokens = estimateTokens(msg.content);
1068
+ if (tokensUsed + msgTokens > budget) break;
1069
+ messages.push(storedToAgentMessage(msg));
1070
+ tokensUsed += msgTokens;
1071
+ }
1072
+ }
1073
+ return {
1074
+ messages,
1075
+ ghostCues,
1076
+ tokenEstimate: tokensUsed,
1077
+ ...frontierText != null ? { frontierText } : {},
1078
+ ...ghostCueText != null ? { ghostCueText } : {}
1079
+ };
1080
+ }
1081
+ async function assembleWorkerContext(opts) {
1082
+ const { vectorIndex, transcriptStore, scratchPad, instruction, budget } = opts;
1083
+ const retrievalBudget = Math.floor(budget * 0.15);
1084
+ const dagBudget = budget - retrievalBudget;
1085
+ const base = assembleContext({
1086
+ conversationId: "__worker__",
1087
+ store: opts.store,
1088
+ dag: opts.dag,
1089
+ budget: dagBudget,
1090
+ freshTailSize: 0,
1091
+ taskContext: opts.taskContext
1092
+ });
1093
+ let retrievalTokensUsed = 0;
1094
+ const retrievalSections = [];
1095
+ if (vectorIndex && transcriptStore) {
1096
+ const transcriptIds = await vectorIndex.search(instruction, 3);
1097
+ if (transcriptIds.length > 0) {
1098
+ const transcripts = await Promise.all(
1099
+ transcriptIds.map((id) => transcriptStore.get(id).then((t) => ({ id, t })))
1100
+ );
1101
+ const summaries = [];
1102
+ for (const { id, t: transcript } of transcripts) {
1103
+ if (!transcript) continue;
1104
+ const lastEntry = transcript.messages[transcript.messages.length - 1];
1105
+ const lastMsg = lastEntry ? getTextContent(lastEntry.content) : "";
1106
+ const summary = `- **${id}**: ${transcript.instruction}
1107
+ Result: ${lastMsg.slice(0, 200)}`;
1108
+ const tokens = estimateTokens(summary);
1109
+ if (retrievalTokensUsed + tokens > retrievalBudget) break;
1110
+ summaries.push(summary);
1111
+ retrievalTokensUsed += tokens;
1112
+ }
1113
+ if (summaries.length > 0) {
1114
+ retrievalSections.push(`## Relevant Prior Work
1115
+ ${summaries.join("\n")}`);
1116
+ }
1117
+ }
1118
+ }
1119
+ if (scratchPad) {
1120
+ const keys = await scratchPad.list();
1121
+ if (keys.length > 0) {
1122
+ const section = `## Scratch Notes
1123
+ Available notes (use ScratchPad_Read to view): ${keys.join(", ")}`;
1124
+ const tokens = estimateTokens(section);
1125
+ if (retrievalTokensUsed + tokens <= retrievalBudget) {
1126
+ retrievalSections.push(section);
1127
+ retrievalTokensUsed += tokens;
1128
+ }
1129
+ }
1130
+ }
1131
+ if (retrievalSections.length > 0) {
1132
+ const retrievalText = retrievalSections.join("\n\n");
1133
+ const frontierText = base.frontierText ? `${base.frontierText}
1134
+
1135
+ ${retrievalText}` : retrievalText;
1136
+ return {
1137
+ ...base,
1138
+ frontierText,
1139
+ tokenEstimate: base.tokenEstimate + retrievalTokensUsed
1140
+ };
1141
+ }
1142
+ return base;
1143
+ }
1144
+ function buildFrontierAndGhostCues(dag, frontierBudget) {
1145
+ const ghostCues = [];
1146
+ const allNodes = dag.getAllNodes();
1147
+ if (allNodes.length === 0) {
1148
+ return { ghostCues, frontierText: null, ghostCueText: null };
1149
+ }
1150
+ const { frontier, coveredIds } = dag.getFrontier(frontierBudget);
1151
+ const frontierIds = new Set(frontier.map((n) => n.id));
1152
+ let frontierText = null;
1153
+ if (frontier.length > 0) {
1154
+ const text = frontier.map((node) => formatSummaryForContext(node)).join("\n\n");
1155
+ frontierText = `## Prior Work (${frontier.length} episodes)
1156
+ ${text}`;
1157
+ }
1158
+ const expandTool = "history_expand";
1159
+ const grepTool = "history_search";
1160
+ for (const node of allNodes) {
1161
+ if (frontierIds.has(node.id)) continue;
1162
+ if (coveredIds.has(node.id)) continue;
1163
+ if (node.depth === 0) {
1164
+ ghostCues.push({
1165
+ summaryId: node.id,
1166
+ conversationIds: node.sourceConversationIds,
1167
+ depth: node.depth,
1168
+ label: `[compacted \u2014 use ${expandTool}('${node.id}') for detail]`
1169
+ });
1170
+ }
1171
+ }
1172
+ let ghostCueText = null;
1173
+ if (ghostCues.length > 0) {
1174
+ const maxCues = 10;
1175
+ const cueText = ghostCues.slice(0, maxCues).map((cue) => `- ${cue.summaryId}: ${cue.label}`).join("\n");
1176
+ const remaining = ghostCues.length > maxCues ? `
1177
+ ... and ${ghostCues.length - maxCues} more` : "";
1178
+ ghostCueText = `## Compacted History
1179
+ ${cueText}${remaining}
1180
+
1181
+ Use ${grepTool} to search across all stored messages, or ${expandTool} to hydrate a specific summary.`;
1182
+ }
1183
+ return { ghostCues, frontierText, ghostCueText };
1184
+ }
1185
+ function formatSummaryForContext(node) {
1186
+ const depthLabel = node.depth === 0 ? "episode" : node.depth === 1 ? "rollup" : `rollup-d${node.depth}`;
1187
+ return `### ${node.id} (${depthLabel})
1188
+ ${node.summary}`;
1189
+ }
1190
+ function storedToAgentMessage(msg) {
1191
+ return {
1192
+ role: msg.role,
1193
+ content: msg.content,
1194
+ ...msg.toolCalls ? { toolCalls: msg.toolCalls } : {},
1195
+ ...msg.toolResults ? { toolResults: msg.toolResults } : {}
1196
+ };
1197
+ }
1198
+
1199
+ // src/arc/prompts.ts
1200
+ var ORCHESTRATOR_SYSTEM_PROMPT = `You accomplish tasks by dispatching workers and synthesizing results.
1201
+
1202
+ Dispatch workers for anything requiring tool use. Answer directly with done for questions you can answer from knowledge or prior results.
1203
+
1204
+ You can dispatch multiple workers in a single turn \u2014 they run in parallel. Two strategies:
1205
+ - Decompose: different subtasks in parallel, collate results.
1206
+ - Explore: same problem, different approaches, take the best.
1207
+
1208
+ Each dispatch accepts an optional tier parameter:
1209
+ - tier: "fast" \u2014 cheap, fast model for simple lookups, file reads, searches, and summarization.
1210
+ - tier: "strong" (default) \u2014 full-capability model for coding, debugging, complex reasoning, and multi-step tasks.
1211
+ Use fast when the task is straightforward and doesn't require deep reasoning.
1212
+
1213
+ Workers are stateless \u2014 they see only their instruction, input artifacts, and tools. They also have access to history and scratch pad tools, so you don't need to repeat everything.
1214
+
1215
+ Older context gets compacted into summaries. Use history_search to find specific prior results, history_read for full content, and history_expand to restore compacted summaries.
1216
+
1217
+ Before dispatching, check what prior workers attempted. Do not repeat failed approaches \u2014 change strategy or decompose differently.
1218
+
1219
+ Explain your reasoning before each tool call.
1220
+
1221
+ Before calling done, use readEpisode to verify the actual worker output \u2014 summaries may be truncated or stale. Call done only after confirming the result matches the task requirements. Re-dispatching costs turns \u2014 only follow up if the result is incomplete, failed, or clearly wrong.`;
1222
+ var WORKER_SYSTEM_PROMPT = `Complete the instruction using the available tools. Workers are stateless and cannot interact with the user \u2014 assume reasonable defaults and proceed.
1223
+
1224
+ Before starting, check ScratchPad_List for notes from prior workers and use history_search to find relevant prior attempts.
1225
+
1226
+ If you are running low on steps, stop and write your progress to ScratchPad_Write \u2014 this is more valuable than one more attempt.
1227
+
1228
+ Before finishing, verify your work against the expected output contract. If verification fails, fix it.
1229
+
1230
+ Explain your reasoning before each tool call.`;
1231
+
1232
+ // src/arc/shared-types.ts
1233
+ function resolveToolChoice(config, turn) {
1234
+ if (!config) return "auto";
1235
+ return typeof config === "function" ? config(turn) : config;
1236
+ }
1237
+
1238
+ // src/vercel-agent-loop.ts
1239
+ function toModelMessages2(messages) {
1240
+ const out = [];
1241
+ for (const msg of messages) {
1242
+ if (msg.role === "system") {
1243
+ out.push({
1244
+ role: "system",
1245
+ content: typeof msg.content === "string" ? msg.content : getTextContent(msg.content)
1246
+ });
1247
+ continue;
1248
+ }
1249
+ if (msg.role === "user") {
1250
+ out.push({ role: "user", content: msg.content });
1251
+ continue;
1252
+ }
1253
+ if (msg.role === "assistant") {
1254
+ const textContent = typeof msg.content === "string" ? msg.content : getTextContent(msg.content);
1255
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
1256
+ const parts = [];
1257
+ if (textContent) {
1258
+ parts.push({ type: "text", text: textContent });
1259
+ }
1260
+ for (const tc of msg.toolCalls) {
1261
+ parts.push({
1262
+ type: "tool-call",
1263
+ toolCallId: tc.toolCallId,
1264
+ toolName: tc.toolName,
1265
+ input: tc.args,
1266
+ ...tc.providerMetadata ? { providerOptions: tc.providerMetadata } : {}
1267
+ });
1268
+ }
1269
+ out.push({ role: "assistant", content: parts });
1270
+ } else {
1271
+ out.push({ role: "assistant", content: textContent });
1272
+ }
1273
+ continue;
1274
+ }
1275
+ if (msg.role === "tool") {
1276
+ if (msg.toolResults && msg.toolResults.length > 0) {
1277
+ const parts = msg.toolResults.map((tr) => ({
1278
+ type: "tool-result",
1279
+ toolCallId: tr.toolCallId,
1280
+ toolName: tr.toolName,
1281
+ output: tr.isError ? { type: "error-text", value: tr.result } : { type: "text", value: tr.result }
1282
+ }));
1283
+ out.push({ role: "tool", content: parts });
1284
+ } else {
1285
+ const textContent = typeof msg.content === "string" ? msg.content : getTextContent(msg.content);
1286
+ out.push({ role: "user", content: `[Tool result]: ${textContent}` });
1287
+ }
1288
+ continue;
1289
+ }
1290
+ }
1291
+ return out;
1292
+ }
1293
+ var VercelAgentLoop = class _VercelAgentLoop {
1294
+ model;
1295
+ createModel;
1296
+ systemPrompt;
1297
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1298
+ tools;
1299
+ validToolNames;
1300
+ toolChoiceConfig;
1301
+ providerOptions;
1302
+ prepareStep;
1303
+ /** Track tool names called across steps for prepareStep context. */
1304
+ toolCallHistory = [];
1305
+ step = 0;
1306
+ /** Last step's token usage — read after nextAction/streamAction completes. */
1307
+ lastUsage;
1308
+ constructor(config = {}) {
1309
+ this.toolChoiceConfig = config.toolChoice;
1310
+ this.model = config.model ?? process.env.HARNESS_MODEL ?? "claude-sonnet-4-5";
1311
+ this.createModel = config.createModel ?? anthropic;
1312
+ this.tools = config.tools ?? {};
1313
+ this.validToolNames = new Set(Object.keys(this.tools));
1314
+ this.providerOptions = config.providerOptions;
1315
+ this.prepareStep = config.prepareStep;
1316
+ this.systemPrompt = config.systemPrompt ?? [
1317
+ "You are an agent that accomplishes tasks using tools.",
1318
+ "You may call multiple independent tools in a single turn.",
1319
+ "Use tools when shell or filesystem access is required.",
1320
+ "Use TextEditor for file changes. Use ReadOnlyBash only for read-only inspection or verification commands.",
1321
+ "Avoid rewriting the same file multiple times unless a previous run returned an error that requires a fix.",
1322
+ "When the task is fully complete, respond with a brief text summary (no tool call)."
1323
+ ].join(" ");
1324
+ if (config.apiKey) {
1325
+ process.env.ANTHROPIC_API_KEY = config.apiKey;
1326
+ }
1327
+ }
1328
+ /** Build the `system` parameter for generateText/streamText. */
1329
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1330
+ buildSystemParam() {
1331
+ if (typeof this.systemPrompt === "string") return this.systemPrompt;
1332
+ return this.systemPrompt.map((block) => ({
1333
+ role: "system",
1334
+ content: block.text,
1335
+ ...block.cacheControl ? {
1336
+ providerOptions: {
1337
+ anthropic: { cacheControl: block.cacheControl },
1338
+ openrouter: { cacheControl: block.cacheControl }
1339
+ }
1340
+ } : {}
1341
+ }));
1342
+ }
1343
+ /** Resolve model + tools for this step via prepareStep callback. */
1344
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1345
+ resolveStep(stepNumber) {
1346
+ if (!this.prepareStep) {
1347
+ return {
1348
+ model: this.model,
1349
+ tools: this.tools,
1350
+ validNames: this.validToolNames
1351
+ };
1352
+ }
1353
+ const overrides = this.prepareStep({
1354
+ stepNumber,
1355
+ toolCallHistory: this.toolCallHistory
1356
+ });
1357
+ if (!overrides) {
1358
+ return {
1359
+ model: this.model,
1360
+ tools: this.tools,
1361
+ validNames: this.validToolNames
1362
+ };
1363
+ }
1364
+ const model = overrides.model ?? this.model;
1365
+ let tools = this.tools;
1366
+ let validNames = this.validToolNames;
1367
+ if (overrides.activeTools) {
1368
+ const allowed = new Set(overrides.activeTools);
1369
+ tools = Object.fromEntries(
1370
+ Object.entries(this.tools).filter(([k]) => allowed.has(k))
1371
+ );
1372
+ validNames = new Set(Object.keys(tools));
1373
+ }
1374
+ return { model, tools, validNames };
1375
+ }
1376
+ /** Extract StepUsage from AI SDK usage object. */
1377
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1378
+ static extractUsage(usage) {
1379
+ if (!usage) return void 0;
1380
+ const u = {};
1381
+ if (usage.inputTokens != null) u.inputTokens = usage.inputTokens;
1382
+ if (usage.outputTokens != null) u.outputTokens = usage.outputTokens;
1383
+ const inputDetails = usage.inputTokenDetails ?? usage;
1384
+ const outputDetails = usage.outputTokenDetails ?? usage;
1385
+ if (inputDetails.cacheReadTokens != null)
1386
+ u.cacheReadTokens = inputDetails.cacheReadTokens;
1387
+ if (inputDetails.cacheWriteTokens != null)
1388
+ u.cacheWriteTokens = inputDetails.cacheWriteTokens;
1389
+ if (outputDetails.reasoningTokens != null)
1390
+ u.reasoningTokens = outputDetails.reasoningTokens;
1391
+ return Object.keys(u).length > 0 ? u : void 0;
1392
+ }
1393
+ async nextAction(messages, signal) {
1394
+ const currentStep = this.step++;
1395
+ const { model, tools, validNames } = this.resolveStep(currentStep + 1);
1396
+ const result = await generateText({
1397
+ model: this.createModel(model),
1398
+ tools,
1399
+ toolChoice: resolveToolChoice(this.toolChoiceConfig, currentStep),
1400
+ system: this.buildSystemParam(),
1401
+ messages: toModelMessages2(messages),
1402
+ stopWhen: stepCountIs(1),
1403
+ ...signal ? { abortSignal: signal } : {},
1404
+ ...this.providerOptions ? { providerOptions: this.providerOptions } : {}
1405
+ });
1406
+ this.lastUsage = _VercelAgentLoop.extractUsage(result.usage);
1407
+ if (result.toolCalls && result.toolCalls.length > 0) {
1408
+ const publicRationale = result.text?.trim() || void 0;
1409
+ const validCalls = [];
1410
+ for (const call of result.toolCalls) {
1411
+ const name = call.toolName;
1412
+ if (validNames.has(name)) {
1413
+ const toolCallId = call.toolCallId;
1414
+ const providerMetadata = call.providerMetadata ?? call.experimental_providerMetadata;
1415
+ validCalls.push({
1416
+ type: "tool",
1417
+ name,
1418
+ args: call.input,
1419
+ ...toolCallId != null ? { toolCallId } : {},
1420
+ ...providerMetadata != null ? { providerMetadata } : {}
1421
+ });
1422
+ this.toolCallHistory.push(name);
1423
+ }
1424
+ }
1425
+ if (validCalls.length === 0) {
1426
+ return {
1427
+ type: "final",
1428
+ content: `Unknown tool: ${result.toolCalls[0].toolName}`
1429
+ };
1430
+ }
1431
+ if (validCalls.length === 1) {
1432
+ const [call] = validCalls;
1433
+ return publicRationale ? { ...call, publicRationale } : call;
1434
+ }
1435
+ return {
1436
+ type: "tool_batch",
1437
+ calls: validCalls,
1438
+ ...publicRationale ? { publicRationale } : {}
1439
+ };
1440
+ }
1441
+ const text = result.text?.trim();
1442
+ return { type: "final", content: text || "Done." };
1443
+ }
1444
+ async *streamAction(messages) {
1445
+ const currentStep = this.step++;
1446
+ const { model, tools, validNames } = this.resolveStep(currentStep + 1);
1447
+ const result = streamText({
1448
+ model: this.createModel(model),
1449
+ tools,
1450
+ toolChoice: resolveToolChoice(this.toolChoiceConfig, currentStep),
1451
+ system: this.buildSystemParam(),
1452
+ messages: toModelMessages2(messages),
1453
+ stopWhen: stepCountIs(1),
1454
+ ...this.providerOptions ? { providerOptions: this.providerOptions } : {}
1455
+ });
1456
+ const toolArgs = /* @__PURE__ */ new Map();
1457
+ for await (const part of result.fullStream) {
1458
+ if (part.type === "text-delta") {
1459
+ yield { type: "text_delta", text: part.text };
1460
+ }
1461
+ if (part.type === "tool-input-start") {
1462
+ toolArgs.set(part.id, "");
1463
+ }
1464
+ if (part.type === "tool-input-delta") {
1465
+ toolArgs.set(part.id, (toolArgs.get(part.id) ?? "") + part.delta);
1466
+ }
1467
+ if (part.type === "tool-call") {
1468
+ const name = part.toolName;
1469
+ if (validNames.has(name)) {
1470
+ const p = part;
1471
+ const args = p.args ?? p.input ?? {};
1472
+ const toolCallId = p.toolCallId;
1473
+ yield {
1474
+ type: "tool_start",
1475
+ name,
1476
+ args,
1477
+ ...toolCallId != null ? { toolCallId } : {}
1478
+ };
1479
+ this.toolCallHistory.push(name);
1480
+ }
1481
+ }
1482
+ }
1483
+ try {
1484
+ const usage = await result.usage;
1485
+ this.lastUsage = _VercelAgentLoop.extractUsage(usage);
1486
+ } catch {
1487
+ this.lastUsage = void 0;
1488
+ }
1489
+ }
1490
+ };
1491
+
1492
+ // src/arc/worker.ts
1493
+ function summarizeArgs(args) {
1494
+ return Object.values(args).map((v) => String(v).slice(0, 200)).join(", ");
1495
+ }
1496
+ async function runWorker(config) {
1497
+ const {
1498
+ task,
1499
+ instruction,
1500
+ expectedOutput,
1501
+ lcmContext,
1502
+ inputArtifacts,
1503
+ tools,
1504
+ toolRegistry,
1505
+ maxSteps,
1506
+ toolProvider,
1507
+ createModel,
1508
+ model,
1509
+ workDir,
1510
+ signal,
1511
+ systemPromptPrefix,
1512
+ systemPromptSuffix,
1513
+ providerOptions,
1514
+ orchestratorContext,
1515
+ tupleId,
1516
+ onProgress,
1517
+ onTrace,
1518
+ hookRunner
1519
+ } = config;
1520
+ const traceTupleId = tupleId ?? "worker";
1521
+ const systemPrompt = [];
1522
+ if (systemPromptPrefix) {
1523
+ systemPrompt.push({
1524
+ text: systemPromptPrefix,
1525
+ cacheControl: { type: "ephemeral" }
1526
+ });
1527
+ }
1528
+ systemPrompt.push({
1529
+ text: WORKER_SYSTEM_PROMPT,
1530
+ cacheControl: { type: "ephemeral" }
1531
+ });
1532
+ if (systemPromptSuffix) {
1533
+ systemPrompt.push({
1534
+ text: systemPromptSuffix,
1535
+ cacheControl: { type: "ephemeral" }
1536
+ });
1537
+ }
1538
+ let userContent = "";
1539
+ if (orchestratorContext) {
1540
+ userContent += `## Orchestrator Context
1541
+ ${orchestratorContext}
1542
+
1543
+ `;
1544
+ }
1545
+ if (task) {
1546
+ userContent += `## Original Task
1547
+ ${task}
1548
+
1549
+ `;
1550
+ }
1551
+ userContent += [
1552
+ `## Workspace`,
1553
+ `Current workspace root: ${workDir}`,
1554
+ `## Worker Budget`,
1555
+ `max_steps: ${maxSteps}`,
1556
+ ""
1557
+ ].join("\n");
1558
+ if (expectedOutput) {
1559
+ userContent += `## Expected Output Contract
1560
+ ${formatExpectedOutput(expectedOutput)}
1561
+
1562
+ `;
1563
+ }
1564
+ if (lcmContext) {
1565
+ if (lcmContext.frontierText) {
1566
+ userContent += `${lcmContext.frontierText}
1567
+
1568
+ `;
1569
+ }
1570
+ if (lcmContext.ghostCueText) {
1571
+ userContent += `${lcmContext.ghostCueText}
1572
+
1573
+ `;
1574
+ }
1575
+ }
1576
+ userContent += `## Instruction
1577
+ ${instruction}`;
1578
+ if (inputArtifacts.size > 0) {
1579
+ userContent += "\n\n## Input Files\n";
1580
+ for (const [id, content] of inputArtifacts) {
1581
+ userContent += `
1582
+ ### ${id}
1583
+ \`\`\`
1584
+ ${content}
1585
+ \`\`\`
1586
+ `;
1587
+ }
1588
+ }
1589
+ const allTools = tools;
1590
+ const loop = new VercelAgentLoop({
1591
+ model,
1592
+ createModel,
1593
+ systemPrompt,
1594
+ tools: allTools,
1595
+ ...providerOptions ? { providerOptions } : {}
1596
+ });
1597
+ const messages = [{ role: "user", content: userContent }];
1598
+ let stepsUsed = 0;
1599
+ let lastMessage = "";
1600
+ let output = null;
1601
+ let status = "incomplete";
1602
+ const actions = [];
1603
+ const artifacts = [];
1604
+ while (stepsUsed < maxSteps) {
1605
+ if (signal?.aborted) {
1606
+ status = "failed";
1607
+ lastMessage = "Worker aborted";
1608
+ break;
1609
+ }
1610
+ if (toolProvider.activeThreads) {
1611
+ const updates = toolProvider.activeThreads();
1612
+ if (updates.length > 0) {
1613
+ const lines = updates.map((u) => {
1614
+ const threadStatus = u.error ? `Error: ${u.error}` : u.isComplete ? `Completed (exit code: ${u.exitCode})` : "Running";
1615
+ const parts = [`[auto] Thread ${u.threadId}: ${threadStatus}`];
1616
+ if (u.output) parts.push(u.output);
1617
+ return parts.join("\n");
1618
+ });
1619
+ messages.push({
1620
+ role: "user",
1621
+ content: `[System: background thread updates]
1622
+ ${lines.join("\n\n")}`
1623
+ });
1624
+ }
1625
+ }
1626
+ const step = stepsUsed + 1;
1627
+ const modelStartedAt = Date.now();
1628
+ onProgress?.({ kind: "model_start", step, maxSteps });
1629
+ onTrace?.({
1630
+ scope: "worker",
1631
+ phase: "model_input",
1632
+ tupleId: traceTupleId,
1633
+ step,
1634
+ model,
1635
+ system: systemPrompt.map((b) => b.text).join("\n\n"),
1636
+ messages: cloneForTrace(messages),
1637
+ toolNames: Object.keys(allTools)
1638
+ });
1639
+ let action;
1640
+ try {
1641
+ action = await loop.nextAction(messages, signal);
1642
+ } catch (error) {
1643
+ if (signal?.aborted) {
1644
+ status = "interrupted";
1645
+ lastMessage = "Worker interrupted by user";
1646
+ break;
1647
+ }
1648
+ onProgress?.({
1649
+ kind: "model_error",
1650
+ step,
1651
+ durationMs: Date.now() - modelStartedAt,
1652
+ error: summarizeError(error)
1653
+ });
1654
+ throw error;
1655
+ }
1656
+ stepsUsed++;
1657
+ onTrace?.({
1658
+ scope: "worker",
1659
+ phase: "model_output",
1660
+ tupleId: traceTupleId,
1661
+ step,
1662
+ action: cloneForTrace(action)
1663
+ });
1664
+ const actionToolNames = toolNamesForAction(action);
1665
+ const publicRationale = publicRationaleForAction(action);
1666
+ const missingPublicRationale = actionToolNames.length > 0 && !publicRationale;
1667
+ if (missingPublicRationale) {
1668
+ onTrace?.({
1669
+ scope: "worker",
1670
+ phase: "public_rationale_missing",
1671
+ tupleId: traceTupleId,
1672
+ step,
1673
+ toolNames: actionToolNames
1674
+ });
1675
+ }
1676
+ onProgress?.({
1677
+ kind: "model_complete",
1678
+ step,
1679
+ actionType: action.type,
1680
+ durationMs: Date.now() - modelStartedAt,
1681
+ ...actionToolNames.length > 0 ? { toolNames: actionToolNames } : {},
1682
+ ...publicRationale ? { publicRationale } : {},
1683
+ ...missingPublicRationale ? { missingPublicRationale } : {},
1684
+ ...action.type === "final" ? { outputSummary: summarizeText(action.content, 240) } : {},
1685
+ ...loop.lastUsage?.inputTokens != null ? { inputTokens: loop.lastUsage.inputTokens } : {},
1686
+ ...loop.lastUsage?.outputTokens != null ? { outputTokens: loop.lastUsage.outputTokens } : {}
1687
+ });
1688
+ if (action.type === "final") {
1689
+ messages.push({ role: "assistant", content: action.content });
1690
+ lastMessage = action.content;
1691
+ status = "complete";
1692
+ output ??= action.content;
1693
+ break;
1694
+ }
1695
+ if (action.type === "tool" || action.type === "tool_batch") {
1696
+ const calls = action.type === "tool" ? [action] : action.calls;
1697
+ const callsWithIds = calls.map((call) => ({
1698
+ ...call,
1699
+ toolCallId: call.toolCallId ?? `call_${Math.random().toString(36).slice(2)}`
1700
+ }));
1701
+ messages.push({
1702
+ role: "assistant",
1703
+ content: publicRationale ?? "",
1704
+ toolCalls: callsWithIds.map((c) => ({
1705
+ toolCallId: c.toolCallId,
1706
+ toolName: c.name,
1707
+ args: c.args,
1708
+ ...c.providerMetadata ? { providerMetadata: c.providerMetadata } : {}
1709
+ }))
1710
+ });
1711
+ const results = [];
1712
+ for (const call of callsWithIds) {
1713
+ const toolCallId = call.toolCallId;
1714
+ const argSummary = summarizeArgs(call.args);
1715
+ const toolStartedAt = Date.now();
1716
+ onTrace?.({
1717
+ scope: "worker",
1718
+ phase: "tool_call",
1719
+ tupleId: traceTupleId,
1720
+ step: stepsUsed,
1721
+ toolCallId,
1722
+ toolName: call.name,
1723
+ args: cloneForTrace(call.args)
1724
+ });
1725
+ onProgress?.({
1726
+ kind: "tool_start",
1727
+ step: stepsUsed,
1728
+ toolCallId,
1729
+ toolName: call.name,
1730
+ ...argSummary ? { argsSummary: argSummary } : {}
1731
+ });
1732
+ if (hookRunner) {
1733
+ const pre = await hookRunner.run({ event: "PreToolUse", toolName: call.name, input: call.args });
1734
+ if (!pre.allow) {
1735
+ const blockedResult = { success: false, output: "", error: pre.reason ?? "blocked by pre-hook" };
1736
+ results.push({ toolCallId, toolName: call.name, result: toolResultText(blockedResult), isError: true, durationMs: Date.now() - toolStartedAt });
1737
+ actions.push(`${call.name}${argSummary ? `: ${argSummary}` : ""} \u2192 ERROR: ${pre.reason ?? "blocked by pre-hook"}`);
1738
+ continue;
1739
+ }
1740
+ }
1741
+ let result2;
1742
+ try {
1743
+ const meta = toolRegistry.get(call.name);
1744
+ if (meta?.execute) {
1745
+ result2 = await meta.execute(toolProvider, call.args, workDir);
1746
+ } else {
1747
+ result2 = { success: false, output: "", error: `Unknown tool: ${call.name}` };
1748
+ }
1749
+ } catch (error) {
1750
+ onProgress?.({
1751
+ kind: "tool_error",
1752
+ step: stepsUsed,
1753
+ toolCallId,
1754
+ toolName: call.name,
1755
+ durationMs: Date.now() - toolStartedAt,
1756
+ error: summarizeError(error)
1757
+ });
1758
+ throw error;
1759
+ }
1760
+ if (hookRunner) {
1761
+ await hookRunner.run({ event: "PostToolUse", toolName: call.name, input: call.args, output: result2 });
1762
+ }
1763
+ const resultText = toolResultText(result2);
1764
+ const exitCode = result2.metadata?.exitCode;
1765
+ onTrace?.({
1766
+ scope: "worker",
1767
+ phase: "tool_result",
1768
+ tupleId: traceTupleId,
1769
+ step: stepsUsed,
1770
+ toolCallId,
1771
+ toolName: call.name,
1772
+ result: cloneForTrace(result2),
1773
+ resultText
1774
+ });
1775
+ onProgress?.({
1776
+ kind: "tool_complete",
1777
+ step: stepsUsed,
1778
+ toolCallId,
1779
+ toolName: call.name,
1780
+ success: result2.success,
1781
+ durationMs: Date.now() - toolStartedAt,
1782
+ outputSummary: summarizeText(resultText, 300),
1783
+ output: resultText,
1784
+ ...exitCode !== void 0 ? { exitCode } : {}
1785
+ });
1786
+ results.push({
1787
+ toolCallId,
1788
+ toolName: call.name,
1789
+ result: resultText,
1790
+ isError: !result2.success,
1791
+ durationMs: Date.now() - toolStartedAt
1792
+ });
1793
+ if (result2.success && result2.artifact) {
1794
+ artifacts.push(result2.artifact);
1795
+ if (result2.artifact.action !== "observed" && result2.artifact.kind === "file") {
1796
+ output = result2.artifact.uri;
1797
+ }
1798
+ }
1799
+ const resultSummary = summarizeText(resultText, 150);
1800
+ actions.push(
1801
+ `${call.name}${argSummary ? `: ${argSummary}` : ""} \u2192 ${result2.success ? resultSummary : `ERROR: ${resultSummary}`}`
1802
+ );
1803
+ }
1804
+ messages.push({
1805
+ role: "tool",
1806
+ content: results.map((r) => `[${r.toolName}]: ${r.result}`).join("\n"),
1807
+ toolResults: results
1808
+ });
1809
+ }
1810
+ }
1811
+ if (status === "incomplete" && stepsUsed >= maxSteps) {
1812
+ lastMessage = lastMessage || "Worker ran out of steps";
1813
+ }
1814
+ onProgress?.({
1815
+ kind: "worker_result",
1816
+ status,
1817
+ stepsUsed,
1818
+ summary: summarizeText(lastMessage, 300)
1819
+ });
1820
+ const result = {
1821
+ transcript: messages,
1822
+ output,
1823
+ status,
1824
+ stepsUsed,
1825
+ lastMessage,
1826
+ actions,
1827
+ artifacts
1828
+ };
1829
+ onTrace?.({
1830
+ scope: "worker",
1831
+ phase: "worker_result",
1832
+ tupleId: traceTupleId,
1833
+ result: cloneForTrace(result)
1834
+ });
1835
+ return result;
1836
+ }
1837
+ function toolResultText(result) {
1838
+ if (result.modelOutput) return result.modelOutput;
1839
+ if (result.success) return result.output;
1840
+ return [result.error, result.output].filter(Boolean).join("\n") || "Unknown error";
1841
+ }
1842
+ function toolNamesForAction(action) {
1843
+ if (action.type === "tool") return [action.name];
1844
+ if (action.type === "tool_batch")
1845
+ return action.calls.map((call) => call.name);
1846
+ return [];
1847
+ }
1848
+ function publicRationaleForAction(action) {
1849
+ if (action.type === "tool") return action.publicRationale;
1850
+ if (action.type === "tool_batch") return action.publicRationale;
1851
+ return void 0;
1852
+ }
1853
+ function formatExpectedOutput(expectedOutput) {
1854
+ return JSON.stringify(expectedOutput, null, 2);
1855
+ }
1856
+ function summarizeText(value, max) {
1857
+ const normalized = value.replace(/\s+/g, " ").trim();
1858
+ return normalized.length <= max ? normalized : `${normalized.slice(0, max)}...`;
1859
+ }
1860
+ function summarizeError(error) {
1861
+ return error instanceof Error ? error.message : String(error);
1862
+ }
1863
+
1864
+ // src/arc/lcm/compactor.ts
1865
+ function buildLeafSummary(record) {
1866
+ const conversationId = `worker_${record.tuple.id}`;
1867
+ const a = record.artifact;
1868
+ const wr = record.workerResult;
1869
+ const artifactUris = (wr?.artifacts ?? []).filter((x) => x.action === "produced" || x.action === "modified").map((x) => x.uri);
1870
+ const summaryParts = [
1871
+ `[${a.status}] ${record.tuple.instruction}`,
1872
+ `steps: ${a.stepsUsed}/${record.tuple.steps}`,
1873
+ a.summary
1874
+ ];
1875
+ if (artifactUris.length > 0) {
1876
+ summaryParts.push(`produced: ${artifactUris.join(", ")}`);
1877
+ }
1878
+ const actions = a.actions ?? wr?.actions ?? [];
1879
+ if (actions.length > 0) {
1880
+ summaryParts.push(`actions: ${actions.slice(-5).join("; ")}`);
1881
+ }
1882
+ const summary = summaryParts.join("\n");
1883
+ return {
1884
+ id: `summary_d0_${record.tuple.id}`,
1885
+ depth: 0,
1886
+ sourceIds: [record.transcriptId],
1887
+ sourceConversationIds: [conversationId],
1888
+ summary,
1889
+ artifacts: artifactUris,
1890
+ operations: [],
1891
+ outcome: a.status,
1892
+ tokenCount: Math.ceil(summary.length / 4),
1893
+ createdAt: record.completedAt
1894
+ };
1895
+ }
1896
+
1897
+ // src/arc/dispatcher.ts
1898
+ function buildTuple(deps, args, orchestratorContext) {
1899
+ const tier = args.tier === "fast" || args.tier === "strong" ? args.tier : void 0;
1900
+ return {
1901
+ id: `tuple_${randomUUID().slice(0, 8)}`,
1902
+ instruction: String(args.instruction ?? ""),
1903
+ inputs: Array.isArray(args.inputs) ? args.inputs.map(String) : [],
1904
+ expectedOutput: normalizeExpectedOutput(args.expectedOutput),
1905
+ tools: deps.allWorkerToolNames,
1906
+ steps: deps.maxStepsPerWorker,
1907
+ ...tier ? { tier } : {},
1908
+ orchestratorContext
1909
+ };
1910
+ }
1911
+ function resolveWorkerModel(deps, tuple) {
1912
+ const tier = tuple.tier ?? "strong";
1913
+ return deps.workerModelMap?.[tier] ?? deps.workerModel;
1914
+ }
1915
+ async function getInputArtifacts(artifactStore, toolProvider, inputs) {
1916
+ const result = /* @__PURE__ */ new Map();
1917
+ for (const id of inputs) {
1918
+ const artifact = await artifactStore.get(id);
1919
+ if (artifact?.output) {
1920
+ try {
1921
+ const fileResult = await toolProvider.readFile(artifact.output);
1922
+ if (fileResult.success) {
1923
+ result.set(id, fileResult.output);
1924
+ continue;
1925
+ }
1926
+ } catch {
1927
+ }
1928
+ }
1929
+ if (artifact?.textOutput) {
1930
+ result.set(id, artifact.textOutput);
1931
+ continue;
1932
+ }
1933
+ if (artifact?.summary) {
1934
+ result.set(id, artifact.summary);
1935
+ }
1936
+ }
1937
+ return result;
1938
+ }
1939
+ async function* runParallelDispatches(deps, state, calls, publicRationale, signal) {
1940
+ const tuples = [];
1941
+ for (const { args } of calls) {
1942
+ state.dispatchCount++;
1943
+ const tuple = buildTuple(deps, args, publicRationale);
1944
+ tuples.push(tuple);
1945
+ yield { type: "dispatch_full", tuple };
1946
+ yield {
1947
+ type: "dispatch",
1948
+ tupleId: tuple.id,
1949
+ instruction: tuple.instruction
1950
+ };
1951
+ yield {
1952
+ type: "trace",
1953
+ trace: {
1954
+ scope: "orchestrator",
1955
+ phase: "tool_call",
1956
+ turn: state.turn,
1957
+ toolName: "dispatch",
1958
+ args: cloneForTrace(args)
1959
+ }
1960
+ };
1961
+ }
1962
+ const queuedEvents = [];
1963
+ const workerPromises = [];
1964
+ for (const tuple of tuples) {
1965
+ const sections = [
1966
+ `## Original Task
1967
+ ${deps.task}`,
1968
+ `## Workspace
1969
+ Current workspace root: ${deps.workDir}`,
1970
+ `## Worker Budget
1971
+ max_steps: ${tuple.steps}`,
1972
+ tuple.expectedOutput ? `## Expected Output Contract
1973
+ ${JSON.stringify(tuple.expectedOutput, null, 2)}` : ""
1974
+ ];
1975
+ const priorWork = await buildPriorWorkContext(
1976
+ state.dispatchRecords,
1977
+ deps.transcriptStore,
1978
+ 4e3
1979
+ );
1980
+ if (priorWork) sections.push(priorWork);
1981
+ const workerTaskContext = sections.filter(Boolean).join("\n\n");
1982
+ const workerContext = await assembleWorkerContext({
1983
+ store: deps.messageStore,
1984
+ dag: deps.summaryDAG,
1985
+ budget: 12e3,
1986
+ taskContext: workerTaskContext,
1987
+ instruction: tuple.instruction,
1988
+ vectorIndex: deps.vectorIndex,
1989
+ transcriptStore: deps.transcriptStore,
1990
+ scratchPad: deps.scratchPad
1991
+ });
1992
+ const progressEvents = [];
1993
+ const { hookRunner } = deps;
1994
+ const promise = (async () => {
1995
+ if (hookRunner) {
1996
+ const pre = await hookRunner.run({ event: "BeforeWorker", metadata: { tupleId: tuple.id, instruction: tuple.instruction } });
1997
+ if (!pre.allow) {
1998
+ const failResult = {
1999
+ transcript: [],
2000
+ output: null,
2001
+ status: "failed",
2002
+ stepsUsed: 0,
2003
+ lastMessage: pre.reason ?? "blocked by BeforeWorker hook",
2004
+ actions: [],
2005
+ artifacts: []
2006
+ };
2007
+ return { tuple, run: { result: failResult, progress: progressEvents } };
2008
+ }
2009
+ }
2010
+ const result = await runWorker({
2011
+ task: deps.task,
2012
+ instruction: tuple.instruction,
2013
+ expectedOutput: tuple.expectedOutput,
2014
+ lcmContext: workerContext,
2015
+ inputArtifacts: await getInputArtifacts(deps.artifactStore, deps.toolProvider, tuple.inputs),
2016
+ tools: deps.workerTools,
2017
+ toolRegistry: deps.workerToolRegistry,
2018
+ maxSteps: tuple.steps,
2019
+ toolProvider: deps.toolProvider,
2020
+ createModel: deps.createModel,
2021
+ model: resolveWorkerModel(deps, tuple),
2022
+ workDir: deps.workDir,
2023
+ signal,
2024
+ systemPromptSuffix: deps.workerSystemPromptSuffix,
2025
+ providerOptions: deps.providerOptions,
2026
+ orchestratorContext: tuple.orchestratorContext,
2027
+ tupleId: tuple.id,
2028
+ hookRunner,
2029
+ onProgress: (progress) => {
2030
+ progressEvents.push(progress);
2031
+ queuedEvents.push({
2032
+ type: "worker_progress",
2033
+ tupleId: tuple.id,
2034
+ progress
2035
+ });
2036
+ },
2037
+ onTrace: (trace) => {
2038
+ queuedEvents.push({ type: "trace", trace });
2039
+ }
2040
+ });
2041
+ if (hookRunner) {
2042
+ await hookRunner.run({ event: "AfterWorker", metadata: { tupleId: tuple.id, status: result.status } });
2043
+ }
2044
+ return { tuple, run: { result, progress: progressEvents } };
2045
+ })();
2046
+ workerPromises.push(promise);
2047
+ }
2048
+ const allSettled = Promise.allSettled(workerPromises);
2049
+ let settled = false;
2050
+ allSettled.then(() => {
2051
+ settled = true;
2052
+ });
2053
+ while (!settled || queuedEvents.length > 0) {
2054
+ while (queuedEvents.length > 0) {
2055
+ const event = queuedEvents.shift();
2056
+ if (event) yield event;
2057
+ }
2058
+ if (!settled) {
2059
+ await sleep(100);
2060
+ }
2061
+ }
2062
+ const results = await allSettled;
2063
+ for (let i = 0; i < results.length; i++) {
2064
+ const result = results[i];
2065
+ if (result.status === "rejected") {
2066
+ const tuple2 = tuples[i];
2067
+ const error = result.reason instanceof Error ? result.reason.message : String(result.reason);
2068
+ yield {
2069
+ type: "worker_complete",
2070
+ tupleId: tuple2.id,
2071
+ status: "failed",
2072
+ summary: `Worker error: ${error}`,
2073
+ stepsUsed: 0
2074
+ };
2075
+ deps.appendOrchestratorMessage("user", `[result] Status: failed
2076
+ Summary: Worker error: ${error}`);
2077
+ continue;
2078
+ }
2079
+ const { tuple, run } = result.value;
2080
+ yield* recordDispatchResult(deps, state, tuple, run);
2081
+ }
2082
+ }
2083
+ async function* recordDispatchResult(deps, state, tuple, workerRun) {
2084
+ const workerResult = workerRun.result;
2085
+ const artifact = {
2086
+ id: tuple.id,
2087
+ tupleId: tuple.id,
2088
+ output: workerResult.output,
2089
+ textOutput: workerResult.lastMessage,
2090
+ status: workerResult.status,
2091
+ summary: workerResult.lastMessage.slice(0, 200),
2092
+ stepsUsed: workerResult.stepsUsed,
2093
+ actions: workerResult.actions,
2094
+ instruction: tuple.instruction
2095
+ };
2096
+ await deps.artifactStore.set(tuple.id, artifact);
2097
+ state.lastArtifact = artifact;
2098
+ yield {
2099
+ type: "worker_complete",
2100
+ tupleId: tuple.id,
2101
+ status: artifact.status,
2102
+ summary: artifact.summary,
2103
+ stepsUsed: artifact.stepsUsed,
2104
+ actions: artifact.actions
2105
+ };
2106
+ const transcript = {
2107
+ id: `transcript_${tuple.id}`,
2108
+ tupleId: tuple.id,
2109
+ instruction: tuple.instruction,
2110
+ messages: workerResult.transcript,
2111
+ timestamp: Date.now()
2112
+ };
2113
+ await deps.transcriptStore.append(transcript);
2114
+ const workerConversationId = `worker_${tuple.id}`;
2115
+ for (const [idx, msg] of workerResult.transcript.entries()) {
2116
+ deps.messageStore.append({
2117
+ id: `${workerConversationId}_msg_${idx}`,
2118
+ conversationId: workerConversationId,
2119
+ index: idx,
2120
+ role: msg.role === "tool" ? "tool" : msg.role === "assistant" ? "assistant" : "user",
2121
+ content: getTextContent(msg.content),
2122
+ ...msg.toolCalls ? { toolCalls: msg.toolCalls } : {},
2123
+ ...msg.toolResults ? { toolResults: msg.toolResults } : {},
2124
+ timestamp: transcript.timestamp
2125
+ });
2126
+ }
2127
+ const dispatchRecord = {
2128
+ tuple,
2129
+ artifact,
2130
+ transcriptId: transcript.id,
2131
+ progress: workerRun.progress,
2132
+ completedAt: transcript.timestamp
2133
+ };
2134
+ dispatchRecord.workerResult = workerResult;
2135
+ state.dispatchRecords.push(dispatchRecord);
2136
+ const leafSummary = buildLeafSummary(dispatchRecord);
2137
+ deps.summaryDAG.addLeaf(leafSummary);
2138
+ deps.summaryDAG.compact();
2139
+ yield {
2140
+ type: "trace",
2141
+ trace: {
2142
+ scope: "orchestrator",
2143
+ phase: "tool_result",
2144
+ turn: state.turn,
2145
+ toolName: "dispatch",
2146
+ args: cloneForTrace({ instruction: tuple.instruction }),
2147
+ result: {
2148
+ tuple: cloneForTrace(tuple),
2149
+ artifact: cloneForTrace(artifact),
2150
+ transcript: cloneForTrace(transcript),
2151
+ progress: cloneForTrace(workerRun.progress),
2152
+ workerResult: cloneForTrace(workerResult)
2153
+ }
2154
+ }
2155
+ };
2156
+ const textToEmbed = `${tuple.instruction}
2157
+ ${workerResult.lastMessage}`;
2158
+ deps.vectorIndex.add(transcript.id, textToEmbed).catch(() => {
2159
+ });
2160
+ const actionsText = artifact.actions?.length ? `
2161
+ Actions:
2162
+ ${artifact.actions.map((a) => `- ${a}`).join("\n")}` : "";
2163
+ const textOutput = artifact.textOutput && artifact.textOutput !== artifact.summary ? `
2164
+ Text output:
2165
+ ${artifact.textOutput}` : "";
2166
+ deps.appendOrchestratorMessage("assistant", `[dispatch] ${tuple.instruction}`);
2167
+ deps.appendOrchestratorMessage("user", `[result] Status: ${artifact.status}
2168
+ Summary: ${artifact.summary}${textOutput}${actionsText}`);
2169
+ if (artifact.status === "failed") {
2170
+ deps.appendOrchestratorMessage(
2171
+ "user",
2172
+ `\u26A0 Worker failed. Review the output and dispatch a corrective worker.`
2173
+ );
2174
+ } else if (artifact.status === "incomplete") {
2175
+ const actionsSummary = (artifact.actions ?? []).slice(-5).join("\n ");
2176
+ deps.appendOrchestratorMessage(
2177
+ "user",
2178
+ `Worker ran out of steps but made progress. Actions:
2179
+ ${actionsSummary}
2180
+ Check if the result is already sufficient. Redispatch to finish \u2014 the new worker will automatically see prior attempt history.`
2181
+ );
2182
+ }
2183
+ }
2184
+ async function buildPriorWorkContext(dispatchRecords, transcriptStore, budgetChars) {
2185
+ const relevant = dispatchRecords.filter(
2186
+ (r) => r.artifact.status === "incomplete" || r.artifact.status === "failed"
2187
+ );
2188
+ if (relevant.length === 0) return null;
2189
+ const sections = [];
2190
+ let charsUsed = 0;
2191
+ for (const record of relevant.slice().reverse()) {
2192
+ const transcript = await transcriptStore.get(record.transcriptId);
2193
+ if (!transcript || transcript.messages.length === 0) continue;
2194
+ const header = `### Prior Attempt: ${record.tuple.id} [${record.artifact.status}]
2195
+ Instruction: ${record.tuple.instruction}`;
2196
+ charsUsed += header.length;
2197
+ if (charsUsed > budgetChars) break;
2198
+ const lines = [header];
2199
+ const msgs = transcript.messages;
2200
+ for (let i = msgs.length - 1; i >= 0 && charsUsed < budgetChars; i--) {
2201
+ const msg = msgs[i];
2202
+ const text = getTextContent(msg.content);
2203
+ const toolCalls = msg.toolCalls?.map(
2204
+ (tc) => `${tc.toolName}(${Object.values(tc.args).map((v) => String(v).slice(0, 100)).join(", ").slice(0, 200)})`
2205
+ );
2206
+ const toolResults = msg.toolResults?.map(
2207
+ (tr) => `${tr.toolName}: ${tr.isError ? "ERROR " : ""}${tr.result.slice(0, 300)}`
2208
+ );
2209
+ const parts = [];
2210
+ if (text) parts.push(text.slice(0, 500));
2211
+ if (toolCalls?.length) parts.push(`Tools: ${toolCalls.join("; ")}`);
2212
+ if (toolResults?.length) parts.push(toolResults.join("\n"));
2213
+ if (parts.length === 0) continue;
2214
+ const entry = `[${msg.role}] ${parts.join("\n")}`;
2215
+ charsUsed += entry.length;
2216
+ lines.splice(1, 0, entry);
2217
+ }
2218
+ sections.push(lines.join("\n"));
2219
+ }
2220
+ if (sections.length === 0) return null;
2221
+ return `## Prior Worker History
2222
+ ${sections.join("\n\n")}`;
2223
+ }
2224
+ function sleep(ms) {
2225
+ return new Promise((resolve) => setTimeout(resolve, ms));
2226
+ }
2227
+
2228
+ // src/arc/loop.ts
2229
+ var ORCHESTRATOR_SYSTEM_CACHED = [
2230
+ {
2231
+ role: "system",
2232
+ content: ORCHESTRATOR_SYSTEM_PROMPT,
2233
+ providerOptions: {
2234
+ anthropic: { cacheControl: { type: "ephemeral" } },
2235
+ openrouter: { cacheControl: { type: "ephemeral" } }
2236
+ }
2237
+ }
2238
+ ];
2239
+ var DEFAULT_WINDOW_SIZE = 10;
2240
+ var DEFAULT_MAX_TURNS = 12;
2241
+ var DEFAULT_MAX_STEPS_PER_WORKER = 15;
2242
+ var DEFAULT_CONTEXT_BUDGET = 24e3;
2243
+ var DEFAULT_FRESH_TAIL_SIZE = 6;
2244
+ var controlFlowTools = { dispatch, done };
2245
+ var ArcLoop = class {
2246
+ config;
2247
+ transcriptStore;
2248
+ vectorIndex;
2249
+ scratchPad;
2250
+ artifactStore;
2251
+ // LCM stores — mutable for session hydration
2252
+ messageStore = new MemoryMessageStore();
2253
+ summaryDAG = new MemorySummaryDAG();
2254
+ createModel;
2255
+ windowSize;
2256
+ model;
2257
+ /** Orchestrator tool schemas (for the model) */
2258
+ orchestratorToolSchemas;
2259
+ /** Orchestrator tool registry (for execute) — excludes dispatch/done (control flow) */
2260
+ orchestratorToolRegistry;
2261
+ /** Dispatcher deps + mutable state — shared with dispatcher.ts functions */
2262
+ dispatchDeps;
2263
+ dispatchState;
2264
+ orchestratorMessageIndex = 0;
2265
+ turn = 0;
2266
+ maxTurns;
2267
+ /** Per-turn abort controller — cancelled by interrupt(), refreshed each turn. */
2268
+ turnController = new AbortController();
2269
+ /** Resolver for the next task — set when the loop is waiting between tasks. */
2270
+ taskResolve = null;
2271
+ constructor(config) {
2272
+ this.config = config;
2273
+ this.maxTurns = config.maxTurns ?? DEFAULT_MAX_TURNS;
2274
+ this.createModel = config.createModel ?? anthropic;
2275
+ this.transcriptStore = config.transcriptStore ?? new MemoryTranscriptStore();
2276
+ this.artifactStore = config.artifactStore ?? new MemoryArtifactStore();
2277
+ this.vectorIndex = config.vectorIndex ?? new MemoryVectorIndex();
2278
+ this.scratchPad = config.scratchPad ?? new MemoryScratchPad();
2279
+ this.windowSize = config.orchestratorWindowSize ?? DEFAULT_WINDOW_SIZE;
2280
+ this.model = config.model;
2281
+ const lcmDeps = {
2282
+ messageStore: this.messageStore,
2283
+ summaryDAG: this.summaryDAG,
2284
+ vectorIndex: this.vectorIndex,
2285
+ transcriptStore: this.transcriptStore,
2286
+ findDispatchRecord: (summaryId) => this.findEpisodeRecordBySummaryId(summaryId)
2287
+ };
2288
+ const toolDeps = {
2289
+ readEpisode: (args) => this.readEpisode(args),
2290
+ historySearch: (pattern, opts) => this.messageStore.grep(pattern, opts),
2291
+ historyExpand: (summaryId, maxTokens) => expandSummary(lcmDeps, summaryId, maxTokens ?? 8e3),
2292
+ historyOverview: (summaryId) => describeSummary(lcmDeps, summaryId),
2293
+ historyRead: (conversationId, messageIndex) => historyRead(lcmDeps, conversationId, messageIndex),
2294
+ scratchPad: this.scratchPad,
2295
+ recall: (query) => recallDirect(lcmDeps, query),
2296
+ ...config.askUser ? { askUser: config.askUser } : {}
2297
+ };
2298
+ this.orchestratorToolRegistry = buildOrchestratorToolEntries(toolDeps);
2299
+ const workerArcTools = buildWorkerToolEntries(toolDeps);
2300
+ this.orchestratorToolSchemas = {
2301
+ ...controlFlowTools,
2302
+ ...toolSchemasFromRegistry(this.orchestratorToolRegistry)
2303
+ };
2304
+ const agentToolRegistry = config.tools ?? /* @__PURE__ */ new Map();
2305
+ const workerToolRegistry = new Map(agentToolRegistry);
2306
+ for (const [name, tool3] of workerArcTools) {
2307
+ workerToolRegistry.set(name, tool3);
2308
+ }
2309
+ const workerTools = {
2310
+ ...toolSchemasFromRegistry(agentToolRegistry),
2311
+ ...toolSchemasFromRegistry(workerArcTools)
2312
+ };
2313
+ this.dispatchState = {
2314
+ turn: 0,
2315
+ dispatchCount: 0,
2316
+ dispatchRecords: [],
2317
+ lastArtifact: null
2318
+ };
2319
+ this.dispatchDeps = {
2320
+ task: config.task,
2321
+ workDir: config.workDir,
2322
+ maxStepsPerWorker: config.maxStepsPerWorker ?? DEFAULT_MAX_STEPS_PER_WORKER,
2323
+ createModel: this.createModel,
2324
+ workerModel: config.workerModel,
2325
+ workerModelMap: config.workerModelMap,
2326
+ toolProvider: config.toolProvider,
2327
+ workerSystemPromptSuffix: config.workerSystemPromptSuffix,
2328
+ providerOptions: config.providerOptions,
2329
+ hookRunner: config.hookRunner,
2330
+ messageStore: this.messageStore,
2331
+ summaryDAG: this.summaryDAG,
2332
+ vectorIndex: this.vectorIndex,
2333
+ transcriptStore: this.transcriptStore,
2334
+ artifactStore: this.artifactStore,
2335
+ scratchPad: this.scratchPad,
2336
+ workerTools,
2337
+ workerToolRegistry,
2338
+ allWorkerToolNames: Object.keys(workerTools),
2339
+ appendOrchestratorMessage: (role, content) => this.appendOrchestratorMessage(role, content)
2340
+ };
2341
+ }
2342
+ /**
2343
+ * Interrupt the current turn — cancels in-flight model calls and workers.
2344
+ * The orchestrator loop stays alive and will prompt for user steering.
2345
+ */
2346
+ interrupt() {
2347
+ this.turnController.abort();
2348
+ }
2349
+ /** True when the loop is waiting for the next task (between done boundaries). */
2350
+ get idle() {
2351
+ return this.taskResolve !== null;
2352
+ }
2353
+ /**
2354
+ * Push a follow-up task into the loop. The orchestrator sees it as
2355
+ * a new user message with full conversational context from prior tasks.
2356
+ */
2357
+ pushTask(task) {
2358
+ if (!this.taskResolve) return false;
2359
+ const resolve = this.taskResolve;
2360
+ this.taskResolve = null;
2361
+ resolve(task);
2362
+ return true;
2363
+ }
2364
+ waitForNextTask(signal) {
2365
+ if (signal?.aborted) return Promise.resolve(null);
2366
+ return new Promise((resolve) => {
2367
+ const onAbort = () => {
2368
+ this.taskResolve = null;
2369
+ resolve(null);
2370
+ };
2371
+ signal?.addEventListener("abort", onAbort, { once: true });
2372
+ this.taskResolve = (task) => {
2373
+ signal?.removeEventListener("abort", onAbort);
2374
+ resolve(task);
2375
+ };
2376
+ });
2377
+ }
2378
+ /** Save session snapshot + update meta if a session store is configured. */
2379
+ async saveSession() {
2380
+ const { sessionStore, sessionId } = this.config;
2381
+ if (!sessionStore || !sessionId) return null;
2382
+ try {
2383
+ const dagSnapshot = this.summaryDAG.serialize();
2384
+ await sessionStore.save(sessionId, {
2385
+ messages: this.messageStore.serialize(),
2386
+ dispatches: this.dispatchState.dispatchRecords.map(stripDispatchForSnapshot),
2387
+ dagNodes: dagSnapshot.nodes,
2388
+ dagCoveredBy: dagSnapshot.coveredBy,
2389
+ turn: this.turn,
2390
+ dispatchCount: this.dispatchState.dispatchCount,
2391
+ orchestratorMessageIndex: this.orchestratorMessageIndex
2392
+ });
2393
+ const existing = await sessionStore.getMeta(sessionId);
2394
+ await sessionStore.saveMeta(sessionId, {
2395
+ id: sessionId,
2396
+ slug: existing?.slug ?? sessionId.slice(0, 8),
2397
+ createdAt: existing?.createdAt ?? Date.now(),
2398
+ lastActiveAt: Date.now(),
2399
+ taskCount: (existing?.taskCount ?? 0) + 1,
2400
+ summary: truncate(this.config.task, 120)
2401
+ });
2402
+ return { type: "session_saved", sessionId };
2403
+ } catch (err2) {
2404
+ const msg = err2 instanceof Error ? err2.message : String(err2);
2405
+ return { type: "trace", trace: { scope: "orchestrator", phase: "tool_result", turn: this.turn, toolName: "session_save", args: {}, result: { error: msg } } };
2406
+ }
2407
+ }
2408
+ /** Reset per-task state while keeping full conversation history. */
2409
+ async resetForNewTask(task) {
2410
+ this.config = { ...this.config, task };
2411
+ this.dispatchDeps.task = task;
2412
+ this.turn = 0;
2413
+ this.dispatchState.turn = 0;
2414
+ this.dispatchState.dispatchCount = 0;
2415
+ this.dispatchState.lastArtifact = null;
2416
+ await this.scratchPad.clear();
2417
+ }
2418
+ /**
2419
+ * Stream events from the orchestration loop.
2420
+ */
2421
+ async *stream(signal) {
2422
+ await this.vectorIndex.load();
2423
+ if (this.config.sessionStore && this.config.sessionId) {
2424
+ const snapshot = await this.config.sessionStore.load(this.config.sessionId);
2425
+ if (snapshot) {
2426
+ this.messageStore.loadFrom(snapshot.messages);
2427
+ this.summaryDAG.loadFrom({ nodes: snapshot.dagNodes, coveredBy: snapshot.dagCoveredBy });
2428
+ this.dispatchState.dispatchRecords.length = 0;
2429
+ this.dispatchState.dispatchRecords.push(...snapshot.dispatches);
2430
+ this.turn = Math.max(0, Math.min(snapshot.turn, this.maxTurns));
2431
+ this.dispatchState.dispatchCount = Math.max(0, snapshot.dispatchCount);
2432
+ const orchMsgs = this.messageStore.getConversation("orchestrator");
2433
+ this.orchestratorMessageIndex = orchMsgs.length;
2434
+ }
2435
+ }
2436
+ taskLoop: while (true) {
2437
+ if (signal?.aborted) break;
2438
+ while (this.turn < this.maxTurns) {
2439
+ if (signal?.aborted) break;
2440
+ this.turn++;
2441
+ this.dispatchState.turn = this.turn;
2442
+ this.turnController = new AbortController();
2443
+ if (signal) {
2444
+ if (signal.aborted) {
2445
+ this.turnController.abort();
2446
+ break;
2447
+ }
2448
+ signal.addEventListener("abort", () => this.turnController.abort(), { once: true });
2449
+ }
2450
+ const turnSignal = this.turnController.signal;
2451
+ const context = await this.buildContext();
2452
+ const { messages: orchestratorMessages, contextTokens } = this.buildOrchestratorMessages(context);
2453
+ const modelMessages = toModelMessages(orchestratorMessages);
2454
+ yield { type: "orchestrator_turn", turn: this.turn, contextTokens };
2455
+ yield {
2456
+ type: "trace",
2457
+ trace: {
2458
+ scope: "orchestrator",
2459
+ phase: "model_input",
2460
+ turn: this.turn,
2461
+ model: this.model,
2462
+ system: ORCHESTRATOR_SYSTEM_PROMPT,
2463
+ messages: cloneForTrace(orchestratorMessages),
2464
+ modelMessages: cloneForTrace(modelMessages),
2465
+ toolNames: Object.keys(this.orchestratorToolSchemas)
2466
+ }
2467
+ };
2468
+ let result;
2469
+ try {
2470
+ result = await generateText({
2471
+ model: this.createModel(this.model),
2472
+ system: ORCHESTRATOR_SYSTEM_CACHED,
2473
+ tools: this.orchestratorToolSchemas,
2474
+ messages: modelMessages,
2475
+ toolChoice: "auto",
2476
+ abortSignal: turnSignal,
2477
+ ...this.config.providerOptions ? { providerOptions: this.config.providerOptions } : {}
2478
+ });
2479
+ } catch (err2) {
2480
+ if (turnSignal.aborted && !signal?.aborted) {
2481
+ yield* this.handleInterrupt();
2482
+ continue;
2483
+ }
2484
+ throw err2;
2485
+ }
2486
+ yield {
2487
+ type: "orchestrator_usage",
2488
+ turn: this.turn,
2489
+ ...result.usage?.inputTokens != null ? { inputTokens: result.usage.inputTokens } : {},
2490
+ ...result.usage?.outputTokens != null ? { outputTokens: result.usage.outputTokens } : {}
2491
+ };
2492
+ yield {
2493
+ type: "trace",
2494
+ trace: {
2495
+ scope: "orchestrator",
2496
+ phase: "model_output",
2497
+ turn: this.turn,
2498
+ text: String(result.text ?? ""),
2499
+ toolCalls: traceToolCalls(result.toolCalls ?? [])
2500
+ }
2501
+ };
2502
+ if (result.toolCalls && result.toolCalls.length > 0) {
2503
+ const publicRationale = result.text?.trim() || "";
2504
+ if (publicRationale) {
2505
+ yield { type: "text_delta", text: publicRationale + "\n" };
2506
+ this.appendOrchestratorMessage("assistant", publicRationale);
2507
+ } else {
2508
+ yield {
2509
+ type: "trace",
2510
+ trace: {
2511
+ scope: "orchestrator",
2512
+ phase: "public_rationale_missing",
2513
+ turn: this.turn,
2514
+ toolCalls: traceToolCalls(result.toolCalls ?? [])
2515
+ }
2516
+ };
2517
+ }
2518
+ const dispatchCalls = [];
2519
+ const nonDispatchCalls = [];
2520
+ for (const toolCall of result.toolCalls) {
2521
+ const args = toolCall.args ?? toolCall.input ?? {};
2522
+ if (toolCall.toolName === "dispatch") {
2523
+ dispatchCalls.push({ toolCall, args });
2524
+ } else {
2525
+ nonDispatchCalls.push({ toolCall, args });
2526
+ }
2527
+ }
2528
+ for (const { toolCall, args } of nonDispatchCalls) {
2529
+ yield {
2530
+ type: "trace",
2531
+ trace: {
2532
+ scope: "orchestrator",
2533
+ phase: "tool_call",
2534
+ turn: this.turn,
2535
+ toolName: toolCall.toolName,
2536
+ args: cloneForTrace(args)
2537
+ }
2538
+ };
2539
+ if (toolCall.toolName === "done") {
2540
+ const output = String(args.output ?? "");
2541
+ yield {
2542
+ type: "trace",
2543
+ trace: {
2544
+ scope: "orchestrator",
2545
+ phase: "tool_result",
2546
+ turn: this.turn,
2547
+ toolName: toolCall.toolName,
2548
+ args: cloneForTrace(args),
2549
+ result: { output, accepted: true }
2550
+ }
2551
+ };
2552
+ this.appendOrchestratorMessage("assistant", `[done] ${output}`);
2553
+ yield { type: "done", output };
2554
+ const savedEvent2 = await this.saveSession();
2555
+ if (savedEvent2) yield savedEvent2;
2556
+ const nextTask2 = await this.waitForNextTask(signal);
2557
+ if (nextTask2 == null) return;
2558
+ this.appendOrchestratorMessage("user", nextTask2);
2559
+ await this.resetForNewTask(nextTask2);
2560
+ continue taskLoop;
2561
+ }
2562
+ const registryTool = this.orchestratorToolRegistry.get(toolCall.toolName);
2563
+ if (registryTool?.execute) {
2564
+ if (toolCall.toolName === "AskUser") {
2565
+ yield { type: "ask_user", question: String(args.question ?? ""), options: Array.isArray(args.options) ? args.options.map(String) : void 0 };
2566
+ }
2567
+ const toolResult = await registryTool.execute(this.config.toolProvider, args, this.config.workDir);
2568
+ const resultText = toolResult.success ? toolResult.output : toolResult.error ?? toolResult.output;
2569
+ yield {
2570
+ type: "trace",
2571
+ trace: {
2572
+ scope: "orchestrator",
2573
+ phase: "tool_result",
2574
+ turn: this.turn,
2575
+ toolName: toolCall.toolName,
2576
+ args: cloneForTrace(args),
2577
+ result: cloneForTrace(toolResult)
2578
+ }
2579
+ };
2580
+ if (toolCall.toolName === "recall") {
2581
+ yield { type: "recall", query: String(args.query ?? ""), answer: resultText };
2582
+ }
2583
+ this.appendOrchestratorMessage("assistant", `[${toolCall.toolName}] ${truncate(JSON.stringify(args), 200)}`);
2584
+ this.appendOrchestratorMessage("user", `[${toolCall.toolName} result]
2585
+ ${resultText}`);
2586
+ }
2587
+ }
2588
+ if (dispatchCalls.length > 0) {
2589
+ yield* runParallelDispatches(this.dispatchDeps, this.dispatchState, dispatchCalls, publicRationale, turnSignal);
2590
+ if (turnSignal.aborted && !signal?.aborted) {
2591
+ yield* this.handleInterrupt();
2592
+ }
2593
+ }
2594
+ } else {
2595
+ const text = result.text?.trim() ?? "";
2596
+ if (text) {
2597
+ yield { type: "text_delta", text };
2598
+ this.appendOrchestratorMessage("assistant", text);
2599
+ }
2600
+ }
2601
+ }
2602
+ await this.vectorIndex.save();
2603
+ if (signal?.aborted) {
2604
+ yield { type: "done", output: "[aborted]" };
2605
+ break;
2606
+ }
2607
+ this.appendOrchestratorMessage("assistant", "[done] max turns reached");
2608
+ yield { type: "done", output: "[max turns reached]" };
2609
+ const savedEvent = await this.saveSession();
2610
+ if (savedEvent) yield savedEvent;
2611
+ const nextTask = await this.waitForNextTask(signal);
2612
+ if (nextTask == null) break;
2613
+ this.appendOrchestratorMessage("user", nextTask);
2614
+ await this.resetForNewTask(nextTask);
2615
+ }
2616
+ await this.vectorIndex.save();
2617
+ }
2618
+ /**
2619
+ * Run a single task to completion (for headless/test use).
2620
+ * Breaks after the first `done` event — does not wait for follow-up tasks.
2621
+ */
2622
+ async run(signal) {
2623
+ const events = [];
2624
+ let output = "";
2625
+ for await (const event of this.stream(signal)) {
2626
+ events.push(event);
2627
+ if (event.type === "done") {
2628
+ output = event.output;
2629
+ break;
2630
+ }
2631
+ }
2632
+ return { output, events };
2633
+ }
2634
+ /** Append a message to the LCM message store (single source of truth) */
2635
+ appendOrchestratorMessage(role, content) {
2636
+ this.messageStore.append({
2637
+ id: `orchestrator_msg_${this.orchestratorMessageIndex}`,
2638
+ conversationId: "orchestrator",
2639
+ index: this.orchestratorMessageIndex++,
2640
+ role,
2641
+ content,
2642
+ timestamp: Date.now()
2643
+ });
2644
+ }
2645
+ findEpisodeRecordBySummaryId(summaryId) {
2646
+ const tupleId = summaryId.replace(/^summary_d0_/, "");
2647
+ return this.dispatchState.dispatchRecords.find((r) => r.tuple.id === tupleId);
2648
+ }
2649
+ async buildContext() {
2650
+ const artifacts = await this.artifactStore.getAll();
2651
+ const artifactStatusCounts = { complete: 0, incomplete: 0, failed: 0, interrupted: 0 };
2652
+ for (const a of Object.values(artifacts)) artifactStatusCounts[a.status]++;
2653
+ const artifactCount = Object.keys(artifacts).length;
2654
+ const allIncomplete = artifactCount > 0 && artifactStatusCounts.complete === 0;
2655
+ return {
2656
+ task: this.config.task,
2657
+ artifacts,
2658
+ lastResult: this.dispatchState.lastArtifact,
2659
+ recentTurns: this.messageStore.getConversation("orchestrator").slice(-this.windowSize).map((msg) => ({ role: msg.role, content: msg.content })),
2660
+ turn: this.turn,
2661
+ maxTurns: this.maxTurns,
2662
+ turnsRemaining: Math.max(0, this.maxTurns - this.turn),
2663
+ dispatchCount: this.dispatchState.dispatchCount,
2664
+ artifactStatusCounts,
2665
+ allIncomplete,
2666
+ dispatches: [...this.dispatchState.dispatchRecords],
2667
+ ooda: buildOodaSnapshot({
2668
+ turn: this.turn,
2669
+ maxTurns: this.maxTurns,
2670
+ turnsRemaining: Math.max(0, this.maxTurns - this.turn),
2671
+ dispatchCount: this.dispatchState.dispatchCount,
2672
+ allIncomplete,
2673
+ dispatches: this.dispatchState.dispatchRecords
2674
+ }),
2675
+ // LCM additions
2676
+ messageStore: this.messageStore,
2677
+ summaryDAG: this.summaryDAG
2678
+ };
2679
+ }
2680
+ buildOrchestratorMessages(context) {
2681
+ const customContextMessages = this.config.formatOrchestratorContext?.(context);
2682
+ if (customContextMessages) {
2683
+ const taskContextMsg = {
2684
+ role: "user",
2685
+ content: this.buildTaskContextText(context)
2686
+ };
2687
+ const oodaMsg = {
2688
+ role: "user",
2689
+ content: formatOodaSnapshotForPrompt(context.ooda)
2690
+ };
2691
+ const messages = [taskContextMsg, oodaMsg, ...customContextMessages];
2692
+ const contextTokens = messages.reduce(
2693
+ (sum, m) => sum + Math.ceil((typeof m.content === "string" ? m.content : "").length / 4),
2694
+ 0
2695
+ );
2696
+ return { messages, contextTokens };
2697
+ }
2698
+ const taskContext = this.buildTaskContextText(context);
2699
+ const assembled = assembleContext({
2700
+ conversationId: "orchestrator",
2701
+ store: this.messageStore,
2702
+ dag: this.summaryDAG,
2703
+ budget: DEFAULT_CONTEXT_BUDGET,
2704
+ freshTailSize: DEFAULT_FRESH_TAIL_SIZE,
2705
+ taskContext,
2706
+ ooda: context.ooda
2707
+ });
2708
+ if (context.lastResult) {
2709
+ let content = `## Last Result
2710
+ Artifact ${context.lastResult.id}: [${context.lastResult.status}]
2711
+ ${context.lastResult.summary}`;
2712
+ if (context.lastResult.instruction) {
2713
+ content += `
2714
+ Your dispatch instruction was: "${context.lastResult.instruction}"`;
2715
+ }
2716
+ assembled.messages.push({ role: "user", content });
2717
+ }
2718
+ return { messages: assembled.messages, contextTokens: assembled.tokenEstimate };
2719
+ }
2720
+ buildTaskContextText(context) {
2721
+ return `## Task
2722
+ ${context.task}
2723
+
2724
+ ## Workspace
2725
+ root: ${this.config.workDir}
2726
+
2727
+ ## Budget
2728
+ turn: ${context.turn}
2729
+ max_turns: ${context.maxTurns}
2730
+ turns_remaining: ${context.turnsRemaining}
2731
+ dispatches_so_far: ${context.dispatchCount}
2732
+
2733
+ ## Progress
2734
+ artifact_status_counts: complete=${context.artifactStatusCounts.complete}, incomplete=${context.artifactStatusCounts.incomplete}, failed=${context.artifactStatusCounts.failed}
2735
+ all_incomplete: ${context.allIncomplete}`;
2736
+ }
2737
+ async readEpisode(args) {
2738
+ const record = this.dispatchState.dispatchRecords.find(
2739
+ (r) => r.tuple.id === args.id || r.transcriptId === args.id || r.artifact.id === args.id
2740
+ );
2741
+ if (!record) {
2742
+ return `Episode not found: ${args.id}`;
2743
+ }
2744
+ return formatReadEpisodeResult(record, this.transcriptStore);
2745
+ }
2746
+ /**
2747
+ * Handle a turn interrupt: prompt user for steering, inject into context.
2748
+ */
2749
+ async *handleInterrupt() {
2750
+ this.appendOrchestratorMessage("user", "[system] Turn interrupted by user.");
2751
+ if (this.config.askUser) {
2752
+ const question = "Turn interrupted. How would you like to proceed?";
2753
+ yield { type: "ask_user", question };
2754
+ const answer = await this.config.askUser(question);
2755
+ this.appendOrchestratorMessage("user", `[user steering] ${answer}`);
2756
+ }
2757
+ }
2758
+ };
2759
+ function stripDispatchForSnapshot(record) {
2760
+ const { workerResult: _w, progress: _p, ...rest } = record;
2761
+ return { ...rest, progress: [] };
2762
+ }
2763
+ function traceToolCalls(toolCalls) {
2764
+ return toolCalls.map((toolCall) => {
2765
+ const record = toolCall && typeof toolCall === "object" ? toolCall : {};
2766
+ const rawArgs = record["args"] ?? record["input"] ?? {};
2767
+ const args = rawArgs && typeof rawArgs === "object" && !Array.isArray(rawArgs) ? rawArgs : {};
2768
+ const toolCallId = typeof record["toolCallId"] === "string" ? record["toolCallId"] : void 0;
2769
+ return {
2770
+ toolName: String(record["toolName"] ?? ""),
2771
+ ...toolCallId ? { toolCallId } : {},
2772
+ args: cloneForTrace(args)
2773
+ };
2774
+ });
2775
+ }
2776
+ var FsTranscriptStore = class {
2777
+ dir;
2778
+ indexPath;
2779
+ index = [];
2780
+ loaded = false;
2781
+ constructor(dir) {
2782
+ this.dir = dir;
2783
+ this.indexPath = path.join(dir, "index.json");
2784
+ }
2785
+ async append(transcript) {
2786
+ await this.ensureLoaded();
2787
+ await fs.mkdir(this.dir, { recursive: true });
2788
+ const filePath = path.join(this.dir, `${transcript.id}.json`);
2789
+ await fs.writeFile(filePath, JSON.stringify(transcript, null, 2));
2790
+ this.index.push(transcript.id);
2791
+ await fs.writeFile(this.indexPath, JSON.stringify(this.index, null, 2));
2792
+ }
2793
+ async getAll() {
2794
+ await this.ensureLoaded();
2795
+ const transcripts = [];
2796
+ for (const id of this.index) {
2797
+ const transcript = await this.get(id);
2798
+ if (transcript) {
2799
+ transcripts.push(transcript);
2800
+ }
2801
+ }
2802
+ return transcripts;
2803
+ }
2804
+ async get(id) {
2805
+ try {
2806
+ const filePath = path.join(this.dir, `${id}.json`);
2807
+ const data = await fs.readFile(filePath, "utf-8");
2808
+ return JSON.parse(data);
2809
+ } catch {
2810
+ return null;
2811
+ }
2812
+ }
2813
+ async ensureLoaded() {
2814
+ if (this.loaded) return;
2815
+ try {
2816
+ const data = await fs.readFile(this.indexPath, "utf-8");
2817
+ this.index = JSON.parse(data);
2818
+ } catch {
2819
+ this.index = [];
2820
+ }
2821
+ this.loaded = true;
2822
+ }
2823
+ };
2824
+ var FsArtifactStore = class {
2825
+ filePath;
2826
+ artifacts = {};
2827
+ loaded = false;
2828
+ constructor(filePath) {
2829
+ this.filePath = filePath;
2830
+ }
2831
+ async set(id, artifact) {
2832
+ await this.ensureLoaded();
2833
+ this.artifacts[id] = artifact;
2834
+ await this.save();
2835
+ }
2836
+ async get(id) {
2837
+ await this.ensureLoaded();
2838
+ return this.artifacts[id] ?? null;
2839
+ }
2840
+ async getAll() {
2841
+ await this.ensureLoaded();
2842
+ return { ...this.artifacts };
2843
+ }
2844
+ async ensureLoaded() {
2845
+ if (this.loaded) return;
2846
+ try {
2847
+ const data = await fs.readFile(this.filePath, "utf-8");
2848
+ this.artifacts = JSON.parse(data);
2849
+ } catch {
2850
+ this.artifacts = {};
2851
+ }
2852
+ this.loaded = true;
2853
+ }
2854
+ async save() {
2855
+ const dir = path.dirname(this.filePath);
2856
+ await fs.mkdir(dir, { recursive: true });
2857
+ await fs.writeFile(this.filePath, JSON.stringify(this.artifacts, null, 2));
2858
+ }
2859
+ };
2860
+
2861
+ export { ArcLoop, FsArtifactStore, FsTranscriptStore, MemoryArtifactStore, MemoryMessageStore, MemoryScratchPad, MemorySessionStore, MemorySummaryDAG, MemoryTranscriptStore, MemoryVectorIndex, cloneForTrace, formatDispatchForPrompt };
2862
+ //# sourceMappingURL=index.js.map
2863
+ //# sourceMappingURL=index.js.map