@flue/sdk 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -9
- package/dist/agent-BB4lwAd5.mjs +453 -0
- package/dist/client.d.mts +26 -0
- package/dist/client.mjs +78 -0
- package/dist/cloudflare/index.d.mts +35 -0
- package/dist/cloudflare/index.mjs +241 -0
- package/dist/command-helpers-DdAfbnom.d.mts +21 -0
- package/dist/command-helpers-hTZKWK13.mjs +37 -0
- package/dist/index.d.mts +116 -0
- package/dist/index.mjs +1664 -0
- package/dist/internal.d.mts +29 -0
- package/dist/internal.mjs +39 -0
- package/dist/mcp-BVF-sOBZ.d.mts +22 -0
- package/dist/mcp-DOgMtp8y.mjs +285 -0
- package/dist/node/index.d.mts +14 -0
- package/dist/node/index.mjs +75 -0
- package/dist/sandbox.d.mts +29 -0
- package/dist/sandbox.mjs +132 -0
- package/dist/session-DukL3zwF.mjs +1303 -0
- package/dist/types-T8pE1xIS.d.mts +461 -0
- package/package.json +11 -3
|
@@ -0,0 +1,1303 @@
|
|
|
1
|
+
import { i as loadSkillByPath, n as createTools, t as BUILTIN_TOOL_NAMES } from "./agent-BB4lwAd5.mjs";
|
|
2
|
+
import { completeSimple, isContextOverflow } from "@mariozechner/pi-ai";
|
|
3
|
+
import { Agent } from "@mariozechner/pi-agent-core";
|
|
4
|
+
import { toJsonSchema } from "@valibot/to-json-schema";
|
|
5
|
+
import * as v from "valibot";
|
|
6
|
+
|
|
7
|
+
//#region src/compaction.ts
|
|
8
|
+
const DEFAULT_COMPACTION_SETTINGS = {
|
|
9
|
+
enabled: true,
|
|
10
|
+
reserveTokens: 16384,
|
|
11
|
+
keepRecentTokens: 2e4
|
|
12
|
+
};
|
|
13
|
+
function calculateContextTokens(usage) {
|
|
14
|
+
return usage.totalTokens || usage.input + usage.output + usage.cacheRead + usage.cacheWrite;
|
|
15
|
+
}
|
|
16
|
+
function getAssistantUsage(msg) {
|
|
17
|
+
if (msg.role === "assistant" && "usage" in msg) {
|
|
18
|
+
const assistantMsg = msg;
|
|
19
|
+
if (assistantMsg.stopReason !== "aborted" && assistantMsg.stopReason !== "error" && assistantMsg.usage) return assistantMsg.usage;
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
function getLastAssistantUsageInfo(messages) {
|
|
23
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
24
|
+
const msg = messages[i];
|
|
25
|
+
const usage = getAssistantUsage(msg);
|
|
26
|
+
if (usage) return {
|
|
27
|
+
usage,
|
|
28
|
+
index: i
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
/** chars/4 heuristic. Conservative (overestimates). */
|
|
33
|
+
function estimateTokens(message) {
|
|
34
|
+
let chars = 0;
|
|
35
|
+
switch (message.role) {
|
|
36
|
+
case "user": {
|
|
37
|
+
const { content } = message;
|
|
38
|
+
if (typeof content === "string") chars = content.length;
|
|
39
|
+
else if (Array.isArray(content)) {
|
|
40
|
+
for (const block of content) if (block.type === "text") chars += block.text.length;
|
|
41
|
+
}
|
|
42
|
+
return Math.ceil(chars / 4);
|
|
43
|
+
}
|
|
44
|
+
case "assistant": {
|
|
45
|
+
const { content } = message;
|
|
46
|
+
for (const block of content) if (block.type === "text") chars += block.text.length;
|
|
47
|
+
else if (block.type === "thinking") chars += block.thinking.length;
|
|
48
|
+
else if (block.type === "toolCall") chars += block.name.length + JSON.stringify(block.arguments).length;
|
|
49
|
+
return Math.ceil(chars / 4);
|
|
50
|
+
}
|
|
51
|
+
case "toolResult": {
|
|
52
|
+
const { content } = message;
|
|
53
|
+
for (const block of content) if (block.type === "text") chars += block.text.length;
|
|
54
|
+
else if (block.type === "image") chars += 4800;
|
|
55
|
+
return Math.ceil(chars / 4);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
return 0;
|
|
59
|
+
}
|
|
60
|
+
function estimateContextTokens(messages) {
|
|
61
|
+
const usageInfo = getLastAssistantUsageInfo(messages);
|
|
62
|
+
if (!usageInfo) {
|
|
63
|
+
let estimated = 0;
|
|
64
|
+
for (const message of messages) estimated += estimateTokens(message);
|
|
65
|
+
return {
|
|
66
|
+
tokens: estimated,
|
|
67
|
+
usageTokens: 0,
|
|
68
|
+
trailingTokens: estimated,
|
|
69
|
+
lastUsageIndex: null
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
const usageTokens = calculateContextTokens(usageInfo.usage);
|
|
73
|
+
let trailingTokens = 0;
|
|
74
|
+
for (let i = usageInfo.index + 1; i < messages.length; i++) trailingTokens += estimateTokens(messages[i]);
|
|
75
|
+
return {
|
|
76
|
+
tokens: usageTokens + trailingTokens,
|
|
77
|
+
usageTokens,
|
|
78
|
+
trailingTokens,
|
|
79
|
+
lastUsageIndex: usageInfo.index
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
function shouldCompact(contextTokens, contextWindow, settings) {
|
|
83
|
+
if (!settings.enabled) return false;
|
|
84
|
+
return contextTokens > contextWindow - settings.reserveTokens;
|
|
85
|
+
}
|
|
86
|
+
function createFileOps() {
|
|
87
|
+
return {
|
|
88
|
+
read: /* @__PURE__ */ new Set(),
|
|
89
|
+
written: /* @__PURE__ */ new Set(),
|
|
90
|
+
edited: /* @__PURE__ */ new Set()
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
function extractFileOpsFromMessage(message, fileOps) {
|
|
94
|
+
if (message.role !== "assistant") return;
|
|
95
|
+
const assistant = message;
|
|
96
|
+
if (!Array.isArray(assistant.content)) return;
|
|
97
|
+
for (const block of assistant.content) {
|
|
98
|
+
if (block.type !== "toolCall") continue;
|
|
99
|
+
const args = block.arguments;
|
|
100
|
+
if (!args) continue;
|
|
101
|
+
const path = typeof args.path === "string" ? args.path : void 0;
|
|
102
|
+
if (!path) continue;
|
|
103
|
+
switch (block.name) {
|
|
104
|
+
case "read":
|
|
105
|
+
fileOps.read.add(path);
|
|
106
|
+
break;
|
|
107
|
+
case "write":
|
|
108
|
+
fileOps.written.add(path);
|
|
109
|
+
break;
|
|
110
|
+
case "edit":
|
|
111
|
+
fileOps.edited.add(path);
|
|
112
|
+
break;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
function computeFileLists(fileOps) {
|
|
117
|
+
const modified = new Set([...fileOps.edited, ...fileOps.written]);
|
|
118
|
+
return {
|
|
119
|
+
readFiles: [...fileOps.read].filter((f) => !modified.has(f)).sort(),
|
|
120
|
+
modifiedFiles: [...modified].sort()
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
function formatFileOperations(readFiles, modifiedFiles) {
|
|
124
|
+
const sections = [];
|
|
125
|
+
if (readFiles.length > 0) sections.push(`<read-files>\n${readFiles.join("\n")}\n</read-files>`);
|
|
126
|
+
if (modifiedFiles.length > 0) sections.push(`<modified-files>\n${modifiedFiles.join("\n")}\n</modified-files>`);
|
|
127
|
+
if (sections.length === 0) return "";
|
|
128
|
+
return `\n\n${sections.join("\n\n")}`;
|
|
129
|
+
}
|
|
130
|
+
const TOOL_RESULT_MAX_CHARS = 2e3;
|
|
131
|
+
function truncateForSummary(text, maxChars) {
|
|
132
|
+
if (text.length <= maxChars) return text;
|
|
133
|
+
const truncatedChars = text.length - maxChars;
|
|
134
|
+
return `${text.slice(0, maxChars)}\n\n[... ${truncatedChars} more characters truncated]`;
|
|
135
|
+
}
|
|
136
|
+
/** Serialize messages to text so the summarization model doesn't treat it as a conversation to continue. */
|
|
137
|
+
function serializeConversation(messages) {
|
|
138
|
+
const parts = [];
|
|
139
|
+
for (const msg of messages) if (msg.role === "user") {
|
|
140
|
+
const { content } = msg;
|
|
141
|
+
const text = typeof content === "string" ? content : content.filter((c) => c.type === "text").map((c) => c.text).join("");
|
|
142
|
+
if (text) parts.push(`[User]: ${text}`);
|
|
143
|
+
} else if (msg.role === "assistant") {
|
|
144
|
+
const { content } = msg;
|
|
145
|
+
const textParts = [];
|
|
146
|
+
const thinkingParts = [];
|
|
147
|
+
const toolCalls = [];
|
|
148
|
+
for (const block of content) if (block.type === "text") textParts.push(block.text);
|
|
149
|
+
else if (block.type === "thinking") thinkingParts.push(block.thinking);
|
|
150
|
+
else if (block.type === "toolCall") {
|
|
151
|
+
const argsStr = Object.entries(block.arguments).map(([k, v]) => `${k}=${JSON.stringify(v)}`).join(", ");
|
|
152
|
+
toolCalls.push(`${block.name}(${argsStr})`);
|
|
153
|
+
}
|
|
154
|
+
if (thinkingParts.length > 0) parts.push(`[Assistant thinking]: ${thinkingParts.join("\n")}`);
|
|
155
|
+
if (textParts.length > 0) parts.push(`[Assistant]: ${textParts.join("\n")}`);
|
|
156
|
+
if (toolCalls.length > 0) parts.push(`[Assistant tool calls]: ${toolCalls.join("; ")}`);
|
|
157
|
+
} else if (msg.role === "toolResult") {
|
|
158
|
+
const { content } = msg;
|
|
159
|
+
const text = content.filter((c) => c.type === "text").map((c) => c.text).join("");
|
|
160
|
+
if (text) parts.push(`[Tool result]: ${truncateForSummary(text, TOOL_RESULT_MAX_CHARS)}`);
|
|
161
|
+
}
|
|
162
|
+
return parts.join("\n\n");
|
|
163
|
+
}
|
|
164
|
+
const SUMMARIZATION_SYSTEM_PROMPT = "You are a context summarization assistant. Your task is to read a conversation between a user and an AI coding assistant, then produce a structured summary following the exact format specified.\n\nDo NOT continue the conversation. Do NOT respond to any questions in the conversation. ONLY output the structured summary.";
|
|
165
|
+
const SUMMARIZATION_PROMPT = `The messages above are a conversation to summarize. Create a structured context checkpoint summary that another LLM will use to continue the work.
|
|
166
|
+
|
|
167
|
+
Use this EXACT format:
|
|
168
|
+
|
|
169
|
+
## Goal
|
|
170
|
+
[What is the user trying to accomplish? Can be multiple items if the session covers different tasks.]
|
|
171
|
+
|
|
172
|
+
## Constraints & Preferences
|
|
173
|
+
- [Any constraints, preferences, or requirements mentioned by user]
|
|
174
|
+
- [Or "(none)" if none were mentioned]
|
|
175
|
+
|
|
176
|
+
## Progress
|
|
177
|
+
### Done
|
|
178
|
+
- [x] [Completed tasks/changes]
|
|
179
|
+
|
|
180
|
+
### In Progress
|
|
181
|
+
- [ ] [Current work]
|
|
182
|
+
|
|
183
|
+
### Blocked
|
|
184
|
+
- [Issues preventing progress, if any]
|
|
185
|
+
|
|
186
|
+
## Key Decisions
|
|
187
|
+
- **[Decision]**: [Brief rationale]
|
|
188
|
+
|
|
189
|
+
## Next Steps
|
|
190
|
+
1. [Ordered list of what should happen next]
|
|
191
|
+
|
|
192
|
+
## Critical Context
|
|
193
|
+
- [Any data, examples, or references needed to continue]
|
|
194
|
+
- [Or "(none)" if not applicable]
|
|
195
|
+
|
|
196
|
+
Keep each section concise. Preserve exact file paths, function names, and error messages.`;
|
|
197
|
+
const UPDATE_SUMMARIZATION_PROMPT = `The messages above are NEW conversation messages to incorporate into the existing summary provided in <previous-summary> tags.
|
|
198
|
+
|
|
199
|
+
Update the existing structured summary with new information. RULES:
|
|
200
|
+
- PRESERVE all existing information from the previous summary
|
|
201
|
+
- ADD new progress, decisions, and context from the new messages
|
|
202
|
+
- UPDATE the Progress section: move items from "In Progress" to "Done" when completed
|
|
203
|
+
- UPDATE "Next Steps" based on what was accomplished
|
|
204
|
+
- PRESERVE exact file paths, function names, and error messages
|
|
205
|
+
- If something is no longer relevant, you may remove it
|
|
206
|
+
|
|
207
|
+
Use this EXACT format:
|
|
208
|
+
|
|
209
|
+
## Goal
|
|
210
|
+
[Preserve existing goals, add new ones if the task expanded]
|
|
211
|
+
|
|
212
|
+
## Constraints & Preferences
|
|
213
|
+
- [Preserve existing, add new ones discovered]
|
|
214
|
+
|
|
215
|
+
## Progress
|
|
216
|
+
### Done
|
|
217
|
+
- [x] [Include previously done items AND newly completed items]
|
|
218
|
+
|
|
219
|
+
### In Progress
|
|
220
|
+
- [ ] [Current work - update based on progress]
|
|
221
|
+
|
|
222
|
+
### Blocked
|
|
223
|
+
- [Current blockers - remove if resolved]
|
|
224
|
+
|
|
225
|
+
## Key Decisions
|
|
226
|
+
- **[Decision]**: [Brief rationale] (preserve all previous, add new)
|
|
227
|
+
|
|
228
|
+
## Next Steps
|
|
229
|
+
1. [Update based on current state]
|
|
230
|
+
|
|
231
|
+
## Critical Context
|
|
232
|
+
- [Preserve important context, add new if needed]
|
|
233
|
+
|
|
234
|
+
Keep each section concise. Preserve exact file paths, function names, and error messages.`;
|
|
235
|
+
const TURN_PREFIX_SUMMARIZATION_PROMPT = `This is the PREFIX of a turn that was too large to keep. The SUFFIX (recent work) is retained.
|
|
236
|
+
|
|
237
|
+
Summarize the prefix to provide context for the retained suffix:
|
|
238
|
+
|
|
239
|
+
## Original Request
|
|
240
|
+
[What did the user ask for in this turn?]
|
|
241
|
+
|
|
242
|
+
## Early Progress
|
|
243
|
+
- [Key decisions and work done in the prefix]
|
|
244
|
+
|
|
245
|
+
## Context for Suffix
|
|
246
|
+
- [Information needed to understand the retained recent work]
|
|
247
|
+
|
|
248
|
+
Be concise. Focus on what's needed to understand the kept suffix.`;
|
|
249
|
+
/** Valid cut points: user or assistant messages. Never cut at toolResult. */
|
|
250
|
+
function findValidCutPoints(messages, start, end) {
|
|
251
|
+
const cutPoints = [];
|
|
252
|
+
for (let i = start; i < end; i++) {
|
|
253
|
+
const role = messages[i].role;
|
|
254
|
+
if (role === "user" || role === "assistant") cutPoints.push(i);
|
|
255
|
+
}
|
|
256
|
+
return cutPoints;
|
|
257
|
+
}
|
|
258
|
+
function findTurnStartIndex(messages, index, start) {
|
|
259
|
+
for (let i = index; i >= start; i--) if (messages[i].role === "user") return i;
|
|
260
|
+
return -1;
|
|
261
|
+
}
|
|
262
|
+
function findCutPoint(messages, start, end, keepRecentTokens) {
|
|
263
|
+
const cutPoints = findValidCutPoints(messages, start, end);
|
|
264
|
+
if (cutPoints.length === 0) return {
|
|
265
|
+
firstKeptIndex: start,
|
|
266
|
+
turnStartIndex: -1,
|
|
267
|
+
isSplitTurn: false
|
|
268
|
+
};
|
|
269
|
+
let accumulatedTokens = 0;
|
|
270
|
+
let cutIndex = cutPoints[0];
|
|
271
|
+
for (let i = end - 1; i >= start; i--) {
|
|
272
|
+
const messageTokens = estimateTokens(messages[i]);
|
|
273
|
+
accumulatedTokens += messageTokens;
|
|
274
|
+
if (accumulatedTokens >= keepRecentTokens) {
|
|
275
|
+
for (let c = 0; c < cutPoints.length; c++) if (cutPoints[c] >= i) {
|
|
276
|
+
cutIndex = cutPoints[c];
|
|
277
|
+
break;
|
|
278
|
+
}
|
|
279
|
+
break;
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
const isUserMessage = messages[cutIndex].role === "user";
|
|
283
|
+
const turnStartIndex = isUserMessage ? -1 : findTurnStartIndex(messages, cutIndex, start);
|
|
284
|
+
return {
|
|
285
|
+
firstKeptIndex: cutIndex,
|
|
286
|
+
turnStartIndex,
|
|
287
|
+
isSplitTurn: !isUserMessage && turnStartIndex !== -1
|
|
288
|
+
};
|
|
289
|
+
}
|
|
290
|
+
/** Pure function — no I/O. Finds cut point, extracts messages to summarize, tracks file ops. */
|
|
291
|
+
function prepareCompaction(messages, settings, previousCompaction) {
|
|
292
|
+
if (messages.length === 0) return void 0;
|
|
293
|
+
const boundaryStart = previousCompaction ? previousCompaction.firstKeptIndex : 0;
|
|
294
|
+
const boundaryEnd = messages.length;
|
|
295
|
+
const tokensBefore = estimateContextTokens(messages).tokens;
|
|
296
|
+
const cutPoint = findCutPoint(messages, boundaryStart, boundaryEnd, settings.keepRecentTokens);
|
|
297
|
+
if (cutPoint.firstKeptIndex <= boundaryStart) return void 0;
|
|
298
|
+
const historyEnd = cutPoint.isSplitTurn ? cutPoint.turnStartIndex : cutPoint.firstKeptIndex;
|
|
299
|
+
const messagesToSummarize = messages.slice(boundaryStart, historyEnd);
|
|
300
|
+
const turnPrefixMessages = cutPoint.isSplitTurn ? messages.slice(cutPoint.turnStartIndex, cutPoint.firstKeptIndex) : [];
|
|
301
|
+
const fileOps = createFileOps();
|
|
302
|
+
if (previousCompaction?.details) {
|
|
303
|
+
for (const f of previousCompaction.details.readFiles ?? []) fileOps.read.add(f);
|
|
304
|
+
for (const f of previousCompaction.details.modifiedFiles ?? []) fileOps.edited.add(f);
|
|
305
|
+
}
|
|
306
|
+
for (const msg of messagesToSummarize) extractFileOpsFromMessage(msg, fileOps);
|
|
307
|
+
for (const msg of turnPrefixMessages) extractFileOpsFromMessage(msg, fileOps);
|
|
308
|
+
return {
|
|
309
|
+
firstKeptIndex: cutPoint.firstKeptIndex,
|
|
310
|
+
messagesToSummarize,
|
|
311
|
+
turnPrefixMessages,
|
|
312
|
+
isSplitTurn: cutPoint.isSplitTurn,
|
|
313
|
+
tokensBefore,
|
|
314
|
+
previousSummary: previousCompaction?.summary,
|
|
315
|
+
fileOps,
|
|
316
|
+
settings
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
async function generateSummary(currentMessages, model, reserveTokens, apiKey, signal, previousSummary) {
|
|
320
|
+
const maxTokens = Math.min(Math.floor(.8 * reserveTokens), 16e3);
|
|
321
|
+
const basePrompt = previousSummary ? UPDATE_SUMMARIZATION_PROMPT : SUMMARIZATION_PROMPT;
|
|
322
|
+
let promptText = `<conversation>\n${serializeConversation(currentMessages)}\n</conversation>\n\n`;
|
|
323
|
+
if (previousSummary) promptText += `<previous-summary>\n${previousSummary}\n</previous-summary>\n\n`;
|
|
324
|
+
promptText += basePrompt;
|
|
325
|
+
const summarizationMessages = [{
|
|
326
|
+
role: "user",
|
|
327
|
+
content: [{
|
|
328
|
+
type: "text",
|
|
329
|
+
text: promptText
|
|
330
|
+
}],
|
|
331
|
+
timestamp: Date.now()
|
|
332
|
+
}];
|
|
333
|
+
const completionOptions = {
|
|
334
|
+
maxTokens,
|
|
335
|
+
signal
|
|
336
|
+
};
|
|
337
|
+
if (apiKey) completionOptions.apiKey = apiKey;
|
|
338
|
+
if (model.reasoning) completionOptions.reasoning = "high";
|
|
339
|
+
const response = await completeSimple(model, {
|
|
340
|
+
systemPrompt: SUMMARIZATION_SYSTEM_PROMPT,
|
|
341
|
+
messages: summarizationMessages
|
|
342
|
+
}, completionOptions);
|
|
343
|
+
if (response.stopReason === "error") throw new Error(`Summarization failed: ${response.errorMessage || "Unknown error"}`);
|
|
344
|
+
return response.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
|
|
345
|
+
}
|
|
346
|
+
async function generateTurnPrefixSummary(messages, model, reserveTokens, apiKey, signal) {
|
|
347
|
+
const maxTokens = Math.min(Math.floor(.5 * reserveTokens), 16e3);
|
|
348
|
+
const summarizationMessages = [{
|
|
349
|
+
role: "user",
|
|
350
|
+
content: [{
|
|
351
|
+
type: "text",
|
|
352
|
+
text: `<conversation>\n${serializeConversation(messages)}\n</conversation>\n\n${TURN_PREFIX_SUMMARIZATION_PROMPT}`
|
|
353
|
+
}],
|
|
354
|
+
timestamp: Date.now()
|
|
355
|
+
}];
|
|
356
|
+
const completionOptions = {
|
|
357
|
+
maxTokens,
|
|
358
|
+
signal
|
|
359
|
+
};
|
|
360
|
+
if (apiKey) completionOptions.apiKey = apiKey;
|
|
361
|
+
const response = await completeSimple(model, {
|
|
362
|
+
systemPrompt: SUMMARIZATION_SYSTEM_PROMPT,
|
|
363
|
+
messages: summarizationMessages
|
|
364
|
+
}, completionOptions);
|
|
365
|
+
if (response.stopReason === "error") throw new Error(`Turn prefix summarization failed: ${response.errorMessage || "Unknown error"}`);
|
|
366
|
+
return response.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
|
|
367
|
+
}
|
|
368
|
+
async function compact(preparation, model, apiKey, signal) {
|
|
369
|
+
const { firstKeptIndex, messagesToSummarize, turnPrefixMessages, isSplitTurn, tokensBefore, previousSummary, fileOps, settings } = preparation;
|
|
370
|
+
let summary;
|
|
371
|
+
if (isSplitTurn && turnPrefixMessages.length > 0) {
|
|
372
|
+
const [historyResult, turnPrefixResult] = await Promise.all([messagesToSummarize.length > 0 ? generateSummary(messagesToSummarize, model, settings.reserveTokens, apiKey, signal, previousSummary) : Promise.resolve("No prior history."), generateTurnPrefixSummary(turnPrefixMessages, model, settings.reserveTokens, apiKey, signal)]);
|
|
373
|
+
summary = `${historyResult}\n\n---\n\n**Turn Context (split turn):**\n\n${turnPrefixResult}`;
|
|
374
|
+
} else summary = await generateSummary(messagesToSummarize, model, settings.reserveTokens, apiKey, signal, previousSummary);
|
|
375
|
+
const { readFiles, modifiedFiles } = computeFileLists(fileOps);
|
|
376
|
+
summary += formatFileOperations(readFiles, modifiedFiles);
|
|
377
|
+
return {
|
|
378
|
+
summary,
|
|
379
|
+
firstKeptIndex,
|
|
380
|
+
tokensBefore,
|
|
381
|
+
details: {
|
|
382
|
+
readFiles,
|
|
383
|
+
modifiedFiles
|
|
384
|
+
}
|
|
385
|
+
};
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
//#endregion
|
|
389
|
+
//#region src/result.ts
|
|
390
|
+
const HEADLESS_PREAMBLE = "You are running in headless mode with no human operator. Work autonomously — never ask questions, never wait for user input. Make your best judgment and proceed independently.";
|
|
391
|
+
function buildResultInstructions(schema) {
|
|
392
|
+
const { $schema: _, ...schemaWithoutMeta } = toJsonSchema(schema, { errorMode: "ignore" });
|
|
393
|
+
return [
|
|
394
|
+
"",
|
|
395
|
+
"```json",
|
|
396
|
+
JSON.stringify(schemaWithoutMeta, null, 2),
|
|
397
|
+
"```",
|
|
398
|
+
"",
|
|
399
|
+
"Example: (Object)",
|
|
400
|
+
"---RESULT_START---",
|
|
401
|
+
"{\"key\": \"value\"}",
|
|
402
|
+
"---RESULT_END---",
|
|
403
|
+
"",
|
|
404
|
+
"Example: (String)",
|
|
405
|
+
"---RESULT_START---",
|
|
406
|
+
"Hello, world!",
|
|
407
|
+
"---RESULT_END---"
|
|
408
|
+
].join("\n");
|
|
409
|
+
}
|
|
410
|
+
/** Follow-up prompt used when the LLM forgets to include RESULT_START/RESULT_END delimiters. */
|
|
411
|
+
function buildResultExtractionPrompt(schema) {
|
|
412
|
+
return [
|
|
413
|
+
"Your task is complete. Now respond with ONLY your final result.",
|
|
414
|
+
"No explanation, no preamble — just the result in the following format, conforming to this schema:",
|
|
415
|
+
buildResultInstructions(schema)
|
|
416
|
+
].join("\n");
|
|
417
|
+
}
|
|
418
|
+
function buildSkillPrompt(skillInstructions, args, schema) {
|
|
419
|
+
const parts = [
|
|
420
|
+
HEADLESS_PREAMBLE,
|
|
421
|
+
"",
|
|
422
|
+
skillInstructions
|
|
423
|
+
];
|
|
424
|
+
if (args && Object.keys(args).length > 0) parts.push(`\nArguments:\n${JSON.stringify(args, null, 2)}`);
|
|
425
|
+
if (schema) {
|
|
426
|
+
parts.push("When complete, you MUST output your result between these exact delimiters conforming to this schema:");
|
|
427
|
+
parts.push(buildResultInstructions(schema));
|
|
428
|
+
}
|
|
429
|
+
return parts.join("\n");
|
|
430
|
+
}
|
|
431
|
+
function buildPromptText(text, schema) {
|
|
432
|
+
const parts = [
|
|
433
|
+
HEADLESS_PREAMBLE,
|
|
434
|
+
"",
|
|
435
|
+
text
|
|
436
|
+
];
|
|
437
|
+
if (schema) {
|
|
438
|
+
parts.push("When complete, you MUST output your result between these exact delimiters conforming to this schema:");
|
|
439
|
+
parts.push(buildResultInstructions(schema));
|
|
440
|
+
}
|
|
441
|
+
return parts.join("\n");
|
|
442
|
+
}
|
|
443
|
+
/** Extract the last ---RESULT_START---/---RESULT_END--- block from agent text and validate against schema. */
|
|
444
|
+
function extractResult(text, schema) {
|
|
445
|
+
const resultBlock = extractLastResultBlock(text);
|
|
446
|
+
if (resultBlock === null) throw new ResultExtractionError("No ---RESULT_START--- / ---RESULT_END--- block found in the assistant response.", text);
|
|
447
|
+
let result = resultBlock;
|
|
448
|
+
if (schema.type === "object" || schema.type === "array") try {
|
|
449
|
+
result = JSON.parse(resultBlock);
|
|
450
|
+
} catch {
|
|
451
|
+
throw new ResultExtractionError("Result block contains invalid JSON for the expected schema.", resultBlock);
|
|
452
|
+
}
|
|
453
|
+
const parsed = v.safeParse(schema, result);
|
|
454
|
+
if (!parsed.success) throw new ResultExtractionError(`Result does not match the expected schema: ${parsed.issues.map((i) => i.message).join(", ")}`, resultBlock);
|
|
455
|
+
return parsed.output;
|
|
456
|
+
}
|
|
457
|
+
function extractLastResultBlock(text) {
|
|
458
|
+
const matches = text.matchAll(/---RESULT_START---\s*\n([\s\S]*?)---RESULT_END---/g);
|
|
459
|
+
let lastMatch = null;
|
|
460
|
+
for (const match of matches) lastMatch = match[1]?.trim() ?? null;
|
|
461
|
+
return lastMatch;
|
|
462
|
+
}
|
|
463
|
+
var ResultExtractionError = class extends Error {
|
|
464
|
+
constructor(message, rawOutput) {
|
|
465
|
+
super(message);
|
|
466
|
+
this.rawOutput = rawOutput;
|
|
467
|
+
this.name = "ResultExtractionError";
|
|
468
|
+
}
|
|
469
|
+
};
|
|
470
|
+
|
|
471
|
+
//#endregion
|
|
472
|
+
//#region src/env-utils.ts
|
|
473
|
+
async function createScopedEnv(env, commands) {
|
|
474
|
+
if (env.scope) return env.scope({ commands });
|
|
475
|
+
if (commands.length > 0) throw new Error("[flue] Cannot use commands: this environment does not support scoped command execution. Commands are only available in BashFactory sandbox mode. Remote sandboxes handle command execution at the platform level.");
|
|
476
|
+
return env;
|
|
477
|
+
}
|
|
478
|
+
function mergeCommands(defaults, perCall) {
|
|
479
|
+
if (!perCall || perCall.length === 0) return defaults;
|
|
480
|
+
if (defaults.length === 0) return perCall;
|
|
481
|
+
const byName = /* @__PURE__ */ new Map();
|
|
482
|
+
for (const cmd of defaults) byName.set(cmd.name, cmd);
|
|
483
|
+
for (const cmd of perCall) byName.set(cmd.name, cmd);
|
|
484
|
+
return Array.from(byName.values());
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
//#endregion
|
|
488
|
+
//#region src/roles.ts
|
|
489
|
+
function assertRoleExists(roles, roleName) {
|
|
490
|
+
if (!roleName) return;
|
|
491
|
+
if (roles[roleName]) return;
|
|
492
|
+
const available = Object.keys(roles);
|
|
493
|
+
const list = available.length > 0 ? available.join(", ") : "(none defined)";
|
|
494
|
+
throw new Error(`[flue] Role "${roleName}" not registered. Available roles: ${list}. Define roles as markdown files under \`.flue/roles/\`.`);
|
|
495
|
+
}
|
|
496
|
+
function resolveEffectiveRole(options) {
|
|
497
|
+
const role = options.callRole ?? options.sessionRole ?? options.agentRole;
|
|
498
|
+
assertRoleExists(options.roles, role);
|
|
499
|
+
return role;
|
|
500
|
+
}
|
|
501
|
+
function resolveRoleModel(roles, roleName) {
|
|
502
|
+
assertRoleExists(roles, roleName);
|
|
503
|
+
return roleName ? roles[roleName]?.model : void 0;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
//#endregion
|
|
507
|
+
//#region src/session-history.ts
|
|
508
|
+
var SessionHistory = class SessionHistory {
|
|
509
|
+
entries;
|
|
510
|
+
byId;
|
|
511
|
+
leafId;
|
|
512
|
+
constructor(entries, leafId) {
|
|
513
|
+
this.entries = [...entries];
|
|
514
|
+
this.leafId = leafId;
|
|
515
|
+
this.byId = new Map(this.entries.map((entry) => [entry.id, entry]));
|
|
516
|
+
}
|
|
517
|
+
static empty() {
|
|
518
|
+
return new SessionHistory([], null);
|
|
519
|
+
}
|
|
520
|
+
static fromData(data) {
|
|
521
|
+
if (!data) return SessionHistory.empty();
|
|
522
|
+
return new SessionHistory(data.entries, data.leafId);
|
|
523
|
+
}
|
|
524
|
+
getLeafId() {
|
|
525
|
+
return this.leafId;
|
|
526
|
+
}
|
|
527
|
+
getActivePath() {
|
|
528
|
+
const path = [];
|
|
529
|
+
let current = this.leafId ? this.byId.get(this.leafId) : void 0;
|
|
530
|
+
while (current) {
|
|
531
|
+
path.push(current);
|
|
532
|
+
current = current.parentId ? this.byId.get(current.parentId) : void 0;
|
|
533
|
+
}
|
|
534
|
+
return path.reverse();
|
|
535
|
+
}
|
|
536
|
+
buildContextEntries() {
|
|
537
|
+
const path = this.getActivePath();
|
|
538
|
+
const latestCompactionIndex = findLatestCompactionIndex(path);
|
|
539
|
+
if (latestCompactionIndex === -1) return pathToContextEntries(path);
|
|
540
|
+
const compaction = path[latestCompactionIndex];
|
|
541
|
+
const firstKeptIndex = path.findIndex((entry) => entry.id === compaction.firstKeptEntryId);
|
|
542
|
+
const keptStart = firstKeptIndex >= 0 ? firstKeptIndex : latestCompactionIndex + 1;
|
|
543
|
+
const context = [{
|
|
544
|
+
message: createContextSummaryMessage(compaction.summary, compaction.timestamp),
|
|
545
|
+
entry: compaction
|
|
546
|
+
}];
|
|
547
|
+
context.push(...pathToContextEntries(path.slice(keptStart, latestCompactionIndex)));
|
|
548
|
+
context.push(...pathToContextEntries(path.slice(latestCompactionIndex + 1)));
|
|
549
|
+
return context;
|
|
550
|
+
}
|
|
551
|
+
buildContext() {
|
|
552
|
+
return this.buildContextEntries().map((entry) => entry.message);
|
|
553
|
+
}
|
|
554
|
+
getLatestCompaction() {
|
|
555
|
+
const path = this.getActivePath();
|
|
556
|
+
for (let i = path.length - 1; i >= 0; i--) {
|
|
557
|
+
const entry = path[i];
|
|
558
|
+
if (entry.type === "compaction") return entry;
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
appendMessage(message, source) {
|
|
562
|
+
const entry = {
|
|
563
|
+
type: "message",
|
|
564
|
+
id: generateEntryId(this.byId),
|
|
565
|
+
parentId: this.leafId,
|
|
566
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
567
|
+
message,
|
|
568
|
+
source
|
|
569
|
+
};
|
|
570
|
+
this.appendEntry(entry);
|
|
571
|
+
return entry.id;
|
|
572
|
+
}
|
|
573
|
+
appendMessages(messages, source) {
|
|
574
|
+
return messages.map((message) => this.appendMessage(message, source));
|
|
575
|
+
}
|
|
576
|
+
appendCompaction(input) {
|
|
577
|
+
if (!this.byId.has(input.firstKeptEntryId)) throw new Error(`[flue] Cannot compact: entry "${input.firstKeptEntryId}" does not exist.`);
|
|
578
|
+
const entry = {
|
|
579
|
+
type: "compaction",
|
|
580
|
+
id: generateEntryId(this.byId),
|
|
581
|
+
parentId: this.leafId,
|
|
582
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
583
|
+
summary: input.summary,
|
|
584
|
+
firstKeptEntryId: input.firstKeptEntryId,
|
|
585
|
+
tokensBefore: input.tokensBefore,
|
|
586
|
+
details: input.details
|
|
587
|
+
};
|
|
588
|
+
this.appendEntry(entry);
|
|
589
|
+
return entry.id;
|
|
590
|
+
}
|
|
591
|
+
appendBranchSummary(summary, fromId, details) {
|
|
592
|
+
const entry = {
|
|
593
|
+
type: "branch_summary",
|
|
594
|
+
id: generateEntryId(this.byId),
|
|
595
|
+
parentId: this.leafId,
|
|
596
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
597
|
+
fromId,
|
|
598
|
+
summary,
|
|
599
|
+
details
|
|
600
|
+
};
|
|
601
|
+
this.appendEntry(entry);
|
|
602
|
+
return entry.id;
|
|
603
|
+
}
|
|
604
|
+
removeLeafMessage(message) {
|
|
605
|
+
if (!this.leafId) return false;
|
|
606
|
+
const leaf = this.byId.get(this.leafId);
|
|
607
|
+
if (!leaf || leaf.type !== "message" || leaf.message !== message) return false;
|
|
608
|
+
this.byId.delete(leaf.id);
|
|
609
|
+
this.entries = this.entries.filter((entry) => entry.id !== leaf.id);
|
|
610
|
+
this.leafId = leaf.parentId;
|
|
611
|
+
return true;
|
|
612
|
+
}
|
|
613
|
+
toData(metadata, createdAt, updatedAt) {
|
|
614
|
+
return {
|
|
615
|
+
version: 2,
|
|
616
|
+
entries: [...this.entries],
|
|
617
|
+
leafId: this.leafId,
|
|
618
|
+
metadata,
|
|
619
|
+
createdAt,
|
|
620
|
+
updatedAt
|
|
621
|
+
};
|
|
622
|
+
}
|
|
623
|
+
appendEntry(entry) {
|
|
624
|
+
this.entries.push(entry);
|
|
625
|
+
this.byId.set(entry.id, entry);
|
|
626
|
+
this.leafId = entry.id;
|
|
627
|
+
}
|
|
628
|
+
};
|
|
629
|
+
function pathToContextEntries(path) {
|
|
630
|
+
const context = [];
|
|
631
|
+
for (const entry of path) if (entry.type === "message") context.push({
|
|
632
|
+
message: entry.message,
|
|
633
|
+
entry
|
|
634
|
+
});
|
|
635
|
+
else if (entry.type === "branch_summary") context.push({
|
|
636
|
+
message: createUserContextMessage(`[Branch Summary]\n\n${entry.summary}`, entry.timestamp),
|
|
637
|
+
entry
|
|
638
|
+
});
|
|
639
|
+
return context;
|
|
640
|
+
}
|
|
641
|
+
function findLatestCompactionIndex(path) {
|
|
642
|
+
for (let i = path.length - 1; i >= 0; i--) if (path[i].type === "compaction") return i;
|
|
643
|
+
return -1;
|
|
644
|
+
}
|
|
645
|
+
function createContextSummaryMessage(summary, timestamp) {
|
|
646
|
+
return createUserContextMessage(summary.startsWith("[Context Summary]") ? summary : `[Context Summary]\n\n${summary}`, timestamp);
|
|
647
|
+
}
|
|
648
|
+
function createUserContextMessage(text, timestamp) {
|
|
649
|
+
return {
|
|
650
|
+
role: "user",
|
|
651
|
+
content: [{
|
|
652
|
+
type: "text",
|
|
653
|
+
text
|
|
654
|
+
}],
|
|
655
|
+
timestamp: new Date(timestamp).getTime()
|
|
656
|
+
};
|
|
657
|
+
}
|
|
658
|
+
function generateEntryId(byId) {
|
|
659
|
+
for (let i = 0; i < 100; i++) {
|
|
660
|
+
const id = crypto.randomUUID().slice(0, 8);
|
|
661
|
+
if (!byId.has(id)) return id;
|
|
662
|
+
}
|
|
663
|
+
return crypto.randomUUID();
|
|
664
|
+
}
|
|
665
|
+
|
|
666
|
+
//#endregion
|
|
667
|
+
//#region src/session.ts
|
|
668
|
+
/** Internal session implementation. Not exported publicly — wrapped by FlueSession. */
|
|
669
|
+
const MAX_SHELL_HISTORY_CHARS = 50 * 1024;
|
|
670
|
+
const MAX_TASK_DEPTH = 4;
|
|
671
|
+
/** In-memory session store. Sessions persist for the lifetime of the process. */
|
|
672
|
+
var InMemorySessionStore = class {
|
|
673
|
+
store = /* @__PURE__ */ new Map();
|
|
674
|
+
async save(id, data) {
|
|
675
|
+
this.store.set(id, data);
|
|
676
|
+
}
|
|
677
|
+
async load(id) {
|
|
678
|
+
return this.store.get(id) ?? null;
|
|
679
|
+
}
|
|
680
|
+
async delete(id) {
|
|
681
|
+
this.store.delete(id);
|
|
682
|
+
}
|
|
683
|
+
};
|
|
684
|
+
var Session = class {
|
|
685
|
+
id;
|
|
686
|
+
metadata;
|
|
687
|
+
get role() {
|
|
688
|
+
return this.sessionRole;
|
|
689
|
+
}
|
|
690
|
+
harness;
|
|
691
|
+
storageKey;
|
|
692
|
+
config;
|
|
693
|
+
env;
|
|
694
|
+
store;
|
|
695
|
+
history;
|
|
696
|
+
createdAt;
|
|
697
|
+
compactionSettings;
|
|
698
|
+
overflowRecoveryAttempted = false;
|
|
699
|
+
compactionAbortController;
|
|
700
|
+
eventCallback;
|
|
701
|
+
agentCommands;
|
|
702
|
+
agentTools;
|
|
703
|
+
deleted = false;
|
|
704
|
+
activeOperation;
|
|
705
|
+
activeTasks = /* @__PURE__ */ new Set();
|
|
706
|
+
sessionRole;
|
|
707
|
+
taskDepth;
|
|
708
|
+
createTaskSession;
|
|
709
|
+
onDelete;
|
|
710
|
+
constructor(options) {
|
|
711
|
+
this.id = options.id;
|
|
712
|
+
this.storageKey = options.storageKey;
|
|
713
|
+
this.config = options.config;
|
|
714
|
+
this.env = options.env;
|
|
715
|
+
this.store = options.store;
|
|
716
|
+
this.agentCommands = options.agentCommands ?? [];
|
|
717
|
+
this.agentTools = options.agentTools ?? [];
|
|
718
|
+
this.sessionRole = options.sessionRole;
|
|
719
|
+
this.taskDepth = options.taskDepth ?? 0;
|
|
720
|
+
this.createTaskSession = options.createTaskSession;
|
|
721
|
+
this.onDelete = options.onDelete;
|
|
722
|
+
this.metadata = options.existingData?.metadata ?? {};
|
|
723
|
+
this.createdAt = options.existingData?.createdAt;
|
|
724
|
+
this.history = SessionHistory.fromData(options.existingData);
|
|
725
|
+
const cc = this.config.compaction;
|
|
726
|
+
this.compactionSettings = {
|
|
727
|
+
enabled: cc?.enabled ?? DEFAULT_COMPACTION_SETTINGS.enabled,
|
|
728
|
+
reserveTokens: cc?.reserveTokens ?? DEFAULT_COMPACTION_SETTINGS.reserveTokens,
|
|
729
|
+
keepRecentTokens: cc?.keepRecentTokens ?? DEFAULT_COMPACTION_SETTINGS.keepRecentTokens
|
|
730
|
+
};
|
|
731
|
+
const systemPrompt = this.config.systemPrompt;
|
|
732
|
+
assertRoleExists(this.config.roles, this.config.role);
|
|
733
|
+
assertRoleExists(this.config.roles, this.sessionRole);
|
|
734
|
+
const tools = [...this.createBuiltinTools(this.env, this.agentCommands, []), ...this.createCustomTools(this.agentTools)];
|
|
735
|
+
const previousMessages = this.history.buildContext();
|
|
736
|
+
this.harness = new Agent({
|
|
737
|
+
initialState: {
|
|
738
|
+
systemPrompt,
|
|
739
|
+
model: this.config.model,
|
|
740
|
+
tools,
|
|
741
|
+
messages: previousMessages
|
|
742
|
+
},
|
|
743
|
+
toolExecution: "parallel"
|
|
744
|
+
});
|
|
745
|
+
this.eventCallback = options.onAgentEvent;
|
|
746
|
+
this.harness.subscribe(async (event) => {
|
|
747
|
+
switch (event.type) {
|
|
748
|
+
case "agent_start":
|
|
749
|
+
this.emit({ type: "agent_start" });
|
|
750
|
+
break;
|
|
751
|
+
case "message_update": {
|
|
752
|
+
const aEvent = event.assistantMessageEvent;
|
|
753
|
+
if (aEvent.type === "text_delta") this.emit({
|
|
754
|
+
type: "text_delta",
|
|
755
|
+
text: aEvent.delta
|
|
756
|
+
});
|
|
757
|
+
break;
|
|
758
|
+
}
|
|
759
|
+
case "tool_execution_start":
|
|
760
|
+
this.emit({
|
|
761
|
+
type: "tool_start",
|
|
762
|
+
toolName: event.toolName,
|
|
763
|
+
toolCallId: event.toolCallId,
|
|
764
|
+
args: event.args
|
|
765
|
+
});
|
|
766
|
+
break;
|
|
767
|
+
case "tool_execution_end":
|
|
768
|
+
this.emit({
|
|
769
|
+
type: "tool_end",
|
|
770
|
+
toolName: event.toolName,
|
|
771
|
+
toolCallId: event.toolCallId,
|
|
772
|
+
isError: event.isError,
|
|
773
|
+
result: event.result
|
|
774
|
+
});
|
|
775
|
+
break;
|
|
776
|
+
case "turn_end":
|
|
777
|
+
this.emit({ type: "turn_end" });
|
|
778
|
+
break;
|
|
779
|
+
case "agent_end": break;
|
|
780
|
+
}
|
|
781
|
+
});
|
|
782
|
+
}
|
|
783
|
+
async prompt(text, options) {
|
|
784
|
+
return this.runOperation("prompt", async () => {
|
|
785
|
+
const role = this.resolveEffectiveRole(options?.role);
|
|
786
|
+
const schema = options?.result;
|
|
787
|
+
const fullPrompt = buildPromptText(text, schema);
|
|
788
|
+
const effectiveCommands = mergeCommands(this.agentCommands, options?.commands);
|
|
789
|
+
return this.withScopedRuntime({
|
|
790
|
+
commands: effectiveCommands,
|
|
791
|
+
tools: options?.tools ?? [],
|
|
792
|
+
role,
|
|
793
|
+
model: options?.model,
|
|
794
|
+
callSite: "this prompt() call"
|
|
795
|
+
}, async () => {
|
|
796
|
+
const beforeLength = this.harness.state.messages.length;
|
|
797
|
+
await this.harness.prompt(fullPrompt);
|
|
798
|
+
await this.harness.waitForIdle();
|
|
799
|
+
await this.syncHarnessMessagesSince(beforeLength, "prompt");
|
|
800
|
+
await this.checkLatestAssistantForCompaction();
|
|
801
|
+
this.throwIfError("prompt");
|
|
802
|
+
if (schema) return this.extractResultWithRetry(schema);
|
|
803
|
+
return { text: this.getAssistantText() };
|
|
804
|
+
});
|
|
805
|
+
});
|
|
806
|
+
}
|
|
807
|
+
async skill(name, options) {
|
|
808
|
+
return this.runOperation("skill", async () => {
|
|
809
|
+
const role = this.resolveEffectiveRole(options?.role);
|
|
810
|
+
let registeredSkill = this.config.skills[name];
|
|
811
|
+
if (!registeredSkill && (name.includes("/") || /\.(md|markdown)$/i.test(name))) {
|
|
812
|
+
const loaded = await loadSkillByPath(this.env, this.env.cwd, name);
|
|
813
|
+
if (loaded) registeredSkill = loaded;
|
|
814
|
+
}
|
|
815
|
+
if (!registeredSkill) {
|
|
816
|
+
const available = Object.keys(this.config.skills).join(", ") || "(none)";
|
|
817
|
+
throw new Error(`Skill "${name}" not registered. Available: ${available}. Skills can also be referenced by relative path under .agents/skills/ (e.g. "triage/reproduce.md").`);
|
|
818
|
+
}
|
|
819
|
+
const schema = options?.result;
|
|
820
|
+
const skillPrompt = buildSkillPrompt(registeredSkill.instructions, options?.args, schema);
|
|
821
|
+
const effectiveCommands = mergeCommands(this.agentCommands, options?.commands);
|
|
822
|
+
return this.withScopedRuntime({
|
|
823
|
+
commands: effectiveCommands,
|
|
824
|
+
tools: options?.tools ?? [],
|
|
825
|
+
role,
|
|
826
|
+
model: options?.model,
|
|
827
|
+
callSite: `this skill("${name}") call`
|
|
828
|
+
}, async () => {
|
|
829
|
+
const beforeLength = this.harness.state.messages.length;
|
|
830
|
+
await this.harness.prompt(skillPrompt);
|
|
831
|
+
await this.harness.waitForIdle();
|
|
832
|
+
await this.syncHarnessMessagesSince(beforeLength, "skill");
|
|
833
|
+
await this.checkLatestAssistantForCompaction();
|
|
834
|
+
this.throwIfError(`skill("${name}")`);
|
|
835
|
+
if (schema) return this.extractResultWithRetry(schema);
|
|
836
|
+
return { text: this.getAssistantText() };
|
|
837
|
+
});
|
|
838
|
+
});
|
|
839
|
+
}
|
|
840
|
+
async task(text, options) {
|
|
841
|
+
return (await this.runTask(text, options, void 0)).output;
|
|
842
|
+
}
|
|
843
|
+
async shell(command, options) {
|
|
844
|
+
return this.runOperation("shell", async () => {
|
|
845
|
+
const effectiveCommands = mergeCommands(this.agentCommands, options?.commands);
|
|
846
|
+
const result = await (await createScopedEnv(this.env, effectiveCommands)).exec(command, {
|
|
847
|
+
env: options?.env,
|
|
848
|
+
cwd: options?.cwd
|
|
849
|
+
});
|
|
850
|
+
const shellResult = {
|
|
851
|
+
stdout: result.stdout,
|
|
852
|
+
stderr: result.stderr,
|
|
853
|
+
exitCode: result.exitCode
|
|
854
|
+
};
|
|
855
|
+
const message = this.createShellMessage(command, shellResult, options);
|
|
856
|
+
this.history.appendMessage(message, "shell");
|
|
857
|
+
this.harness.state.messages = this.history.buildContext();
|
|
858
|
+
await this.save();
|
|
859
|
+
return shellResult;
|
|
860
|
+
});
|
|
861
|
+
}
|
|
862
|
+
abort() {
|
|
863
|
+
this.harness.abort();
|
|
864
|
+
this.compactionAbortController?.abort();
|
|
865
|
+
for (const task of this.activeTasks) task.abort();
|
|
866
|
+
}
|
|
867
|
+
close() {
|
|
868
|
+
if (this.deleted) return;
|
|
869
|
+
this.deleted = true;
|
|
870
|
+
this.abort();
|
|
871
|
+
this.onDelete?.();
|
|
872
|
+
}
|
|
873
|
+
async delete() {
|
|
874
|
+
if (this.deleted) return;
|
|
875
|
+
this.deleted = true;
|
|
876
|
+
this.abort();
|
|
877
|
+
await deleteSessionTree(this.store, this.storageKey);
|
|
878
|
+
this.onDelete?.();
|
|
879
|
+
}
|
|
880
|
+
resolveEffectiveRole(callRole) {
|
|
881
|
+
return resolveEffectiveRole({
|
|
882
|
+
roles: this.config.roles,
|
|
883
|
+
agentRole: this.config.role,
|
|
884
|
+
sessionRole: this.sessionRole,
|
|
885
|
+
callRole
|
|
886
|
+
});
|
|
887
|
+
}
|
|
888
|
+
/** Precedence: call-level > role-level > agent-level default. */
|
|
889
|
+
resolveModelForCall(promptModel, roleName, callSite) {
|
|
890
|
+
let model = this.config.model;
|
|
891
|
+
const roleModel = resolveRoleModel(this.config.roles, roleName);
|
|
892
|
+
if (roleModel && this.config.resolveModel) model = this.config.resolveModel(roleModel);
|
|
893
|
+
if (promptModel && this.config.resolveModel) model = this.config.resolveModel(promptModel);
|
|
894
|
+
return this.requireModel(model, callSite);
|
|
895
|
+
}
|
|
896
|
+
/**
|
|
897
|
+
* Throws a clear, actionable error when no model is configured for a call.
|
|
898
|
+
* Use with the resolved model (post-precedence) to guarantee we never hand
|
|
899
|
+
* `undefined` to the underlying agent.
|
|
900
|
+
*/
|
|
901
|
+
requireModel(model, callSite) {
|
|
902
|
+
if (model) return model;
|
|
903
|
+
throw new Error(`[flue] No model configured for ${callSite}. Pass \`{ model: "provider/model-id" }\` to \`init()\` for an agent-wide default, or to this prompt()/skill() call for a one-off override.`);
|
|
904
|
+
}
|
|
905
|
+
buildSystemPrompt(roleName) {
|
|
906
|
+
const parts = [this.config.systemPrompt];
|
|
907
|
+
if (!roleName) return parts.join("\n\n");
|
|
908
|
+
const role = this.config.roles[roleName];
|
|
909
|
+
if (!role) return parts.join("\n\n");
|
|
910
|
+
parts.push(`<role name="${role.name}">\n${role.instructions}\n</role>`);
|
|
911
|
+
return parts.filter(Boolean).join("\n\n");
|
|
912
|
+
}
|
|
913
|
+
createCustomTools(tools) {
|
|
914
|
+
this.validateCustomToolNames(tools);
|
|
915
|
+
return tools.map((toolDef) => ({
|
|
916
|
+
name: toolDef.name,
|
|
917
|
+
label: toolDef.name,
|
|
918
|
+
description: toolDef.description,
|
|
919
|
+
parameters: toolDef.parameters,
|
|
920
|
+
async execute(_toolCallId, params, signal) {
|
|
921
|
+
if (signal?.aborted) throw new Error("Operation aborted");
|
|
922
|
+
return {
|
|
923
|
+
content: [{
|
|
924
|
+
type: "text",
|
|
925
|
+
text: await toolDef.execute(params, signal)
|
|
926
|
+
}],
|
|
927
|
+
details: { customTool: toolDef.name }
|
|
928
|
+
};
|
|
929
|
+
}
|
|
930
|
+
}));
|
|
931
|
+
}
|
|
932
|
+
validateCustomToolNames(tools) {
|
|
933
|
+
const names = /* @__PURE__ */ new Set();
|
|
934
|
+
for (const toolDef of tools) {
|
|
935
|
+
if (BUILTIN_TOOL_NAMES.has(toolDef.name)) throw new Error(`[flue] Custom tool "${toolDef.name}" conflicts with a built-in tool. Built-in tools: ${[...BUILTIN_TOOL_NAMES].join(", ")}`);
|
|
936
|
+
if (names.has(toolDef.name)) throw new Error(`[flue] Duplicate custom tool name "${toolDef.name}". Tool names must be unique.`);
|
|
937
|
+
names.add(toolDef.name);
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
createBuiltinTools(env, commands, tools, role, model) {
|
|
941
|
+
return createTools(env, {
|
|
942
|
+
roles: this.config.roles,
|
|
943
|
+
task: (params, signal) => this.runTaskForTool(params, commands, tools, role, model, signal)
|
|
944
|
+
});
|
|
945
|
+
}
|
|
946
|
+
async withScopedRuntime(options, fn) {
|
|
947
|
+
const customTools = this.createCustomTools([...this.agentTools, ...options.tools]);
|
|
948
|
+
const scopedEnv = await createScopedEnv(this.env, options.commands);
|
|
949
|
+
const previousTools = this.harness.state.tools;
|
|
950
|
+
const previousModel = this.harness.state.model;
|
|
951
|
+
const previousSystemPrompt = this.harness.state.systemPrompt;
|
|
952
|
+
this.harness.state.model = this.resolveModelForCall(options.model, options.role, options.callSite);
|
|
953
|
+
this.harness.state.systemPrompt = this.buildSystemPrompt(options.role);
|
|
954
|
+
this.harness.state.tools = [...this.createBuiltinTools(scopedEnv, options.commands, options.tools, options.role, options.model), ...customTools];
|
|
955
|
+
try {
|
|
956
|
+
return await fn();
|
|
957
|
+
} finally {
|
|
958
|
+
this.harness.state.tools = previousTools;
|
|
959
|
+
this.harness.state.model = previousModel;
|
|
960
|
+
this.harness.state.systemPrompt = previousSystemPrompt;
|
|
961
|
+
}
|
|
962
|
+
}
|
|
963
|
+
async runTaskForTool(params, commands, tools, inheritedRole, inheritedModel, signal) {
|
|
964
|
+
const result = await this.runTask(params.prompt, {
|
|
965
|
+
role: params.role ?? inheritedRole,
|
|
966
|
+
inheritedModel,
|
|
967
|
+
cwd: params.cwd,
|
|
968
|
+
commands,
|
|
969
|
+
tools
|
|
970
|
+
}, signal);
|
|
971
|
+
return {
|
|
972
|
+
content: [{
|
|
973
|
+
type: "text",
|
|
974
|
+
text: result.text || "(task completed with no text)"
|
|
975
|
+
}],
|
|
976
|
+
details: {
|
|
977
|
+
taskId: result.taskId,
|
|
978
|
+
sessionId: result.sessionId,
|
|
979
|
+
messageId: result.messageId,
|
|
980
|
+
role: result.role,
|
|
981
|
+
cwd: result.cwd
|
|
982
|
+
}
|
|
983
|
+
};
|
|
984
|
+
}
|
|
985
|
+
async runTask(text, options, signal) {
|
|
986
|
+
this.assertActive();
|
|
987
|
+
if (!this.createTaskSession) throw new Error("[flue] This session cannot create task sessions.");
|
|
988
|
+
if (this.taskDepth >= MAX_TASK_DEPTH) throw new Error(`[flue] Maximum task depth (${MAX_TASK_DEPTH}) exceeded.`);
|
|
989
|
+
if (signal?.aborted) throw new Error("Operation aborted");
|
|
990
|
+
const taskId = crypto.randomUUID();
|
|
991
|
+
const requestedRole = options?.role ?? this.sessionRole ?? this.config.role;
|
|
992
|
+
let child;
|
|
993
|
+
let abortListener;
|
|
994
|
+
this.emit({
|
|
995
|
+
type: "task_start",
|
|
996
|
+
taskId,
|
|
997
|
+
prompt: text,
|
|
998
|
+
role: requestedRole,
|
|
999
|
+
cwd: options?.cwd,
|
|
1000
|
+
parentSessionId: this.id
|
|
1001
|
+
});
|
|
1002
|
+
try {
|
|
1003
|
+
const role = this.resolveEffectiveRole(options?.role);
|
|
1004
|
+
const commands = mergeCommands(this.agentCommands, options?.commands);
|
|
1005
|
+
child = await this.createTaskSession({
|
|
1006
|
+
parentSessionId: this.id,
|
|
1007
|
+
taskId,
|
|
1008
|
+
parentEnv: this.env,
|
|
1009
|
+
cwd: options?.cwd,
|
|
1010
|
+
role,
|
|
1011
|
+
commands,
|
|
1012
|
+
depth: this.taskDepth + 1
|
|
1013
|
+
});
|
|
1014
|
+
await this.recordTaskSession(child.id, child.storageKey, taskId);
|
|
1015
|
+
this.activeTasks.add(child);
|
|
1016
|
+
if (signal) {
|
|
1017
|
+
abortListener = () => child?.abort();
|
|
1018
|
+
signal.addEventListener("abort", abortListener, { once: true });
|
|
1019
|
+
if (signal.aborted) throw new Error("Operation aborted");
|
|
1020
|
+
}
|
|
1021
|
+
const schema = options?.result;
|
|
1022
|
+
const roleModel = resolveRoleModel(this.config.roles, role);
|
|
1023
|
+
const childOptions = {
|
|
1024
|
+
model: options?.model ?? (roleModel ? void 0 : options?.inheritedModel),
|
|
1025
|
+
tools: options?.tools
|
|
1026
|
+
};
|
|
1027
|
+
if (schema) childOptions.result = schema;
|
|
1028
|
+
const output = await child.prompt(text, childOptions);
|
|
1029
|
+
const taskResult = {
|
|
1030
|
+
output,
|
|
1031
|
+
text: typeof output?.text === "string" ? output.text : child.getAssistantText(),
|
|
1032
|
+
taskId,
|
|
1033
|
+
sessionId: child.id,
|
|
1034
|
+
messageId: child.getLatestAssistantMessageId(),
|
|
1035
|
+
role,
|
|
1036
|
+
cwd: options?.cwd
|
|
1037
|
+
};
|
|
1038
|
+
this.emit({
|
|
1039
|
+
type: "task_end",
|
|
1040
|
+
taskId,
|
|
1041
|
+
isError: false,
|
|
1042
|
+
result: taskResult.text,
|
|
1043
|
+
parentSessionId: this.id
|
|
1044
|
+
});
|
|
1045
|
+
return taskResult;
|
|
1046
|
+
} catch (error) {
|
|
1047
|
+
this.emit({
|
|
1048
|
+
type: "task_end",
|
|
1049
|
+
taskId,
|
|
1050
|
+
isError: true,
|
|
1051
|
+
result: getErrorMessage(error),
|
|
1052
|
+
parentSessionId: this.id
|
|
1053
|
+
});
|
|
1054
|
+
this.emit({
|
|
1055
|
+
type: "error",
|
|
1056
|
+
error: getErrorMessage(error)
|
|
1057
|
+
});
|
|
1058
|
+
throw error;
|
|
1059
|
+
} finally {
|
|
1060
|
+
if (signal && abortListener) signal.removeEventListener("abort", abortListener);
|
|
1061
|
+
if (child) {
|
|
1062
|
+
this.activeTasks.delete(child);
|
|
1063
|
+
child.close();
|
|
1064
|
+
}
|
|
1065
|
+
}
|
|
1066
|
+
}
|
|
1067
|
+
async runOperation(operation, fn) {
|
|
1068
|
+
return this.runExclusive(operation, async () => {
|
|
1069
|
+
try {
|
|
1070
|
+
return await fn();
|
|
1071
|
+
} catch (error) {
|
|
1072
|
+
this.emit({
|
|
1073
|
+
type: "error",
|
|
1074
|
+
error: getErrorMessage(error)
|
|
1075
|
+
});
|
|
1076
|
+
throw error;
|
|
1077
|
+
} finally {
|
|
1078
|
+
this.emit({ type: "idle" });
|
|
1079
|
+
}
|
|
1080
|
+
});
|
|
1081
|
+
}
|
|
1082
|
+
async runExclusive(operation, fn) {
|
|
1083
|
+
this.assertActive();
|
|
1084
|
+
if (this.activeOperation) throw new Error(`[flue] Session "${this.id}" is already running ${this.activeOperation}. Start another session for parallel conversation branches.`);
|
|
1085
|
+
this.activeOperation = operation;
|
|
1086
|
+
try {
|
|
1087
|
+
return await fn();
|
|
1088
|
+
} finally {
|
|
1089
|
+
this.activeOperation = void 0;
|
|
1090
|
+
}
|
|
1091
|
+
}
|
|
1092
|
+
emit(event) {
|
|
1093
|
+
this.eventCallback?.({
|
|
1094
|
+
...event,
|
|
1095
|
+
sessionId: this.id
|
|
1096
|
+
});
|
|
1097
|
+
}
|
|
1098
|
+
assertActive() {
|
|
1099
|
+
if (this.deleted) throw new Error(`[flue] Session "${this.id}" has been deleted.`);
|
|
1100
|
+
}
|
|
1101
|
+
createShellMessage(command, result, options) {
|
|
1102
|
+
return {
|
|
1103
|
+
role: "user",
|
|
1104
|
+
content: [{
|
|
1105
|
+
type: "text",
|
|
1106
|
+
text: formatShellHistory(command, result, options?.cwd ? `\ncwd: ${options.cwd}` : "", options?.env ? `\nenv: ${Object.keys(options.env).sort().join(", ")}` : "")
|
|
1107
|
+
}],
|
|
1108
|
+
timestamp: Date.now()
|
|
1109
|
+
};
|
|
1110
|
+
}
|
|
1111
|
+
async syncHarnessMessagesSince(index, source) {
|
|
1112
|
+
const messages = this.harness.state.messages.slice(index);
|
|
1113
|
+
if (messages.length === 0) return;
|
|
1114
|
+
this.history.appendMessages(messages, source);
|
|
1115
|
+
await this.save();
|
|
1116
|
+
}
|
|
1117
|
+
async save() {
|
|
1118
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
1119
|
+
const data = this.history.toData(this.metadata, this.createdAt ?? now, now);
|
|
1120
|
+
if (!this.createdAt) this.createdAt = now;
|
|
1121
|
+
await this.store.save(this.storageKey, data);
|
|
1122
|
+
}
|
|
1123
|
+
async recordTaskSession(sessionId, storageKey, taskId) {
|
|
1124
|
+
const taskSessions = Array.isArray(this.metadata.taskSessions) ? this.metadata.taskSessions : [];
|
|
1125
|
+
if (!taskSessions.some((task) => task?.sessionId === sessionId)) {
|
|
1126
|
+
taskSessions.push({
|
|
1127
|
+
sessionId,
|
|
1128
|
+
taskId,
|
|
1129
|
+
storageKey
|
|
1130
|
+
});
|
|
1131
|
+
this.metadata.taskSessions = taskSessions;
|
|
1132
|
+
await this.save();
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
1135
|
+
async checkLatestAssistantForCompaction() {
|
|
1136
|
+
const messages = this.harness.state.messages;
|
|
1137
|
+
const lastMsg = messages[messages.length - 1];
|
|
1138
|
+
if (lastMsg?.role === "assistant") await this.checkCompaction(lastMsg);
|
|
1139
|
+
}
|
|
1140
|
+
async checkCompaction(assistantMessage) {
|
|
1141
|
+
if (!this.compactionSettings.enabled) return;
|
|
1142
|
+
if (assistantMessage.stopReason === "aborted") return;
|
|
1143
|
+
const contextWindow = this.harness.state.model.contextWindow ?? 0;
|
|
1144
|
+
if (isContextOverflow(assistantMessage, contextWindow)) {
|
|
1145
|
+
if (this.overflowRecoveryAttempted) return;
|
|
1146
|
+
this.overflowRecoveryAttempted = true;
|
|
1147
|
+
console.error(`[flue:compaction] Overflow detected, compacting and retrying...`);
|
|
1148
|
+
const messages = this.harness.state.messages;
|
|
1149
|
+
const lastMsg = messages[messages.length - 1];
|
|
1150
|
+
if (lastMsg && lastMsg.role === "assistant") {
|
|
1151
|
+
this.harness.state.messages = messages.slice(0, -1);
|
|
1152
|
+
this.history.removeLeafMessage(lastMsg);
|
|
1153
|
+
await this.save();
|
|
1154
|
+
}
|
|
1155
|
+
await this.runCompaction("overflow", true);
|
|
1156
|
+
return;
|
|
1157
|
+
}
|
|
1158
|
+
if (assistantMessage.stopReason === "error") return;
|
|
1159
|
+
const contextTokens = calculateContextTokens(assistantMessage.usage);
|
|
1160
|
+
if (shouldCompact(contextTokens, contextWindow, this.compactionSettings)) {
|
|
1161
|
+
console.error(`[flue:compaction] Threshold reached — ${contextTokens} tokens used, window ${contextWindow}, reserve ${this.compactionSettings.reserveTokens}, triggering compaction`);
|
|
1162
|
+
await this.runCompaction("threshold", false);
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
async runCompaction(reason, willRetry) {
|
|
1166
|
+
this.compactionAbortController = new AbortController();
|
|
1167
|
+
const messagesBefore = this.harness.state.messages.length;
|
|
1168
|
+
try {
|
|
1169
|
+
const model = this.harness.state.model;
|
|
1170
|
+
const contextEntries = this.history.buildContextEntries();
|
|
1171
|
+
const messages = contextEntries.map((entry) => entry.message);
|
|
1172
|
+
const latestCompaction = this.history.getLatestCompaction();
|
|
1173
|
+
const preparation = prepareCompaction(messages, this.compactionSettings, latestCompaction ? {
|
|
1174
|
+
summary: latestCompaction.summary,
|
|
1175
|
+
firstKeptIndex: 1,
|
|
1176
|
+
details: latestCompaction.details
|
|
1177
|
+
} : void 0);
|
|
1178
|
+
if (!preparation) {
|
|
1179
|
+
console.error(`[flue:compaction] Nothing to compact (no valid cut point found)`);
|
|
1180
|
+
return;
|
|
1181
|
+
}
|
|
1182
|
+
const firstKeptEntry = contextEntries[preparation.firstKeptIndex]?.entry;
|
|
1183
|
+
if (!firstKeptEntry || firstKeptEntry.type !== "message") {
|
|
1184
|
+
console.error(`[flue:compaction] Nothing to compact (first kept message has no entry)`);
|
|
1185
|
+
return;
|
|
1186
|
+
}
|
|
1187
|
+
console.error(`[flue:compaction] Summarizing ${preparation.messagesToSummarize.length} messages` + (preparation.isSplitTurn ? ` (split turn: ${preparation.turnPrefixMessages.length} prefix messages)` : "") + `, keeping messages from index ${preparation.firstKeptIndex}`);
|
|
1188
|
+
const estimatedTokens = preparation.tokensBefore;
|
|
1189
|
+
this.emit({
|
|
1190
|
+
type: "compaction_start",
|
|
1191
|
+
reason,
|
|
1192
|
+
estimatedTokens
|
|
1193
|
+
});
|
|
1194
|
+
const result = await compact(preparation, model, void 0, this.compactionAbortController.signal);
|
|
1195
|
+
if (this.compactionAbortController.signal.aborted) return;
|
|
1196
|
+
this.history.appendCompaction({
|
|
1197
|
+
summary: result.summary,
|
|
1198
|
+
firstKeptEntryId: firstKeptEntry.id,
|
|
1199
|
+
tokensBefore: result.tokensBefore,
|
|
1200
|
+
details: result.details
|
|
1201
|
+
});
|
|
1202
|
+
this.harness.state.messages = this.history.buildContext();
|
|
1203
|
+
const messagesAfter = this.harness.state.messages.length;
|
|
1204
|
+
console.error(`[flue:compaction] Complete — messages: ${messagesBefore} → ${messagesAfter}, tokens before: ${result.tokensBefore}`);
|
|
1205
|
+
this.emit({
|
|
1206
|
+
type: "compaction_end",
|
|
1207
|
+
messagesBefore,
|
|
1208
|
+
messagesAfter
|
|
1209
|
+
});
|
|
1210
|
+
await this.save();
|
|
1211
|
+
if (willRetry) {
|
|
1212
|
+
const msgs = this.harness.state.messages;
|
|
1213
|
+
const lastMsg = msgs[msgs.length - 1];
|
|
1214
|
+
if (lastMsg?.role === "assistant" && lastMsg.stopReason === "error") this.harness.state.messages = msgs.slice(0, -1);
|
|
1215
|
+
console.error(`[flue:compaction] Retrying after overflow recovery...`);
|
|
1216
|
+
const beforeRetry = this.harness.state.messages.length;
|
|
1217
|
+
await this.harness.continue();
|
|
1218
|
+
await this.harness.waitForIdle();
|
|
1219
|
+
await this.syncHarnessMessagesSince(beforeRetry, "retry");
|
|
1220
|
+
}
|
|
1221
|
+
} catch (error) {
|
|
1222
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1223
|
+
console.error(`[flue:compaction] Failed: ${errorMessage}`);
|
|
1224
|
+
} finally {
|
|
1225
|
+
this.compactionAbortController = void 0;
|
|
1226
|
+
}
|
|
1227
|
+
}
|
|
1228
|
+
throwIfError(context) {
|
|
1229
|
+
const errorMsg = this.harness.state.errorMessage;
|
|
1230
|
+
if (errorMsg) throw new Error(`[flue] ${context} failed: ${errorMsg}`);
|
|
1231
|
+
}
|
|
1232
|
+
getAssistantText() {
|
|
1233
|
+
const messages = this.harness.state.messages;
|
|
1234
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
1235
|
+
const msg = messages[i];
|
|
1236
|
+
if (msg.role !== "assistant") continue;
|
|
1237
|
+
const content = msg.content;
|
|
1238
|
+
if (!Array.isArray(content)) continue;
|
|
1239
|
+
const textParts = [];
|
|
1240
|
+
for (const block of content) if (block.type === "text") textParts.push(block.text);
|
|
1241
|
+
return textParts.join("\n");
|
|
1242
|
+
}
|
|
1243
|
+
return "";
|
|
1244
|
+
}
|
|
1245
|
+
getLatestAssistantMessageId() {
|
|
1246
|
+
const path = this.history.getActivePath();
|
|
1247
|
+
for (let i = path.length - 1; i >= 0; i--) {
|
|
1248
|
+
const entry = path[i];
|
|
1249
|
+
if (entry.type === "message" && entry.message.role === "assistant") return entry.id;
|
|
1250
|
+
}
|
|
1251
|
+
}
|
|
1252
|
+
async extractResultWithRetry(schema) {
|
|
1253
|
+
const text = this.getAssistantText();
|
|
1254
|
+
try {
|
|
1255
|
+
return extractResult(text, schema);
|
|
1256
|
+
} catch (err) {
|
|
1257
|
+
if (!(err instanceof ResultExtractionError)) throw err;
|
|
1258
|
+
if (!err.message.includes("RESULT_START")) throw err;
|
|
1259
|
+
const followUpPrompt = buildResultExtractionPrompt(schema);
|
|
1260
|
+
const beforeRetry = this.harness.state.messages.length;
|
|
1261
|
+
await this.harness.prompt(followUpPrompt);
|
|
1262
|
+
await this.harness.waitForIdle();
|
|
1263
|
+
await this.syncHarnessMessagesSince(beforeRetry, "retry");
|
|
1264
|
+
await this.checkLatestAssistantForCompaction();
|
|
1265
|
+
return extractResult(this.getAssistantText(), schema);
|
|
1266
|
+
}
|
|
1267
|
+
}
|
|
1268
|
+
};
|
|
1269
|
+
function normalizePath(p) {
|
|
1270
|
+
const parts = p.split("/");
|
|
1271
|
+
const result = [];
|
|
1272
|
+
for (const part of parts) {
|
|
1273
|
+
if (part === "." || part === "") continue;
|
|
1274
|
+
if (part === "..") result.pop();
|
|
1275
|
+
else result.push(part);
|
|
1276
|
+
}
|
|
1277
|
+
return "/" + result.join("/");
|
|
1278
|
+
}
|
|
1279
|
+
async function deleteSessionTree(store, storageKey, seen = /* @__PURE__ */ new Set()) {
|
|
1280
|
+
if (seen.has(storageKey)) return;
|
|
1281
|
+
seen.add(storageKey);
|
|
1282
|
+
const data = await store.load(storageKey);
|
|
1283
|
+
const taskSessions = Array.isArray(data?.metadata?.taskSessions) ? data.metadata.taskSessions : [];
|
|
1284
|
+
for (const task of taskSessions) if (typeof task?.storageKey === "string") await deleteSessionTree(store, task.storageKey, seen);
|
|
1285
|
+
await store.delete(storageKey);
|
|
1286
|
+
}
|
|
1287
|
+
function formatShellHistory(command, result, cwdLine, envLine) {
|
|
1288
|
+
const sections = [`<shell_command>\n$ ${command}${cwdLine}${envLine}\n</shell_command>`, `<shell_result exitCode="${result.exitCode}">`];
|
|
1289
|
+
if (result.stdout) sections.push(`<stdout>\n${result.stdout}\n</stdout>`);
|
|
1290
|
+
if (result.stderr) sections.push(`<stderr>\n${result.stderr}\n</stderr>`);
|
|
1291
|
+
sections.push("</shell_result>");
|
|
1292
|
+
return truncateShellHistory(sections.join("\n"));
|
|
1293
|
+
}
|
|
1294
|
+
function truncateShellHistory(text) {
|
|
1295
|
+
if (text.length <= MAX_SHELL_HISTORY_CHARS) return text;
|
|
1296
|
+
return `[Shell output truncated: ${text.length - MAX_SHELL_HISTORY_CHARS} leading characters omitted]\n` + text.slice(text.length - MAX_SHELL_HISTORY_CHARS);
|
|
1297
|
+
}
|
|
1298
|
+
function getErrorMessage(error) {
|
|
1299
|
+
return error instanceof Error ? error.message : String(error);
|
|
1300
|
+
}
|
|
1301
|
+
|
|
1302
|
+
//#endregion
|
|
1303
|
+
export { assertRoleExists as a, normalizePath as i, Session as n, createScopedEnv as o, deleteSessionTree as r, mergeCommands as s, InMemorySessionStore as t };
|