@flue/sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +200 -0
- package/README.md +194 -0
- package/dist/client.d.mts +30 -0
- package/dist/client.mjs +62 -0
- package/dist/cloudflare/index.d.mts +36 -0
- package/dist/cloudflare/index.mjs +206 -0
- package/dist/index.d.mts +26 -0
- package/dist/index.mjs +775 -0
- package/dist/sandbox.d.mts +28 -0
- package/dist/sandbox.mjs +101 -0
- package/dist/session-BD0MEuO3.mjs +1300 -0
- package/dist/types-xNvqlohs.d.mts +327 -0
- package/package.json +50 -0
|
@@ -0,0 +1,1300 @@
|
|
|
1
|
+
import { Agent } from "@mariozechner/pi-agent-core";
|
|
2
|
+
import { Type, completeSimple, isContextOverflow } from "@mariozechner/pi-ai";
|
|
3
|
+
import { toJsonSchema } from "@valibot/to-json-schema";
|
|
4
|
+
import * as v from "valibot";
|
|
5
|
+
|
|
6
|
+
//#region src/context.ts
|
|
7
|
+
/** Parse optional YAML frontmatter (--- delimited). Basic `key: value` only. */
|
|
8
|
+
function parseFrontmatterFile(content, defaultName) {
|
|
9
|
+
const frontmatterMatch = content.match(/^---\s*\n([\s\S]*?)\n---\s*\n([\s\S]*)$/);
|
|
10
|
+
if (!frontmatterMatch) return {
|
|
11
|
+
name: defaultName,
|
|
12
|
+
description: "",
|
|
13
|
+
body: content.trim(),
|
|
14
|
+
frontmatter: {}
|
|
15
|
+
};
|
|
16
|
+
const rawFrontmatter = frontmatterMatch[1] ?? "";
|
|
17
|
+
const body = frontmatterMatch[2] ?? "";
|
|
18
|
+
const frontmatter = {};
|
|
19
|
+
for (const line of rawFrontmatter.split("\n")) {
|
|
20
|
+
const match = line.match(/^(\w+):\s*(.+)$/);
|
|
21
|
+
if (match?.[1] && match[2]) frontmatter[match[1]] = match[2].trim();
|
|
22
|
+
}
|
|
23
|
+
return {
|
|
24
|
+
name: frontmatter.name || defaultName,
|
|
25
|
+
description: frontmatter.description || "",
|
|
26
|
+
body: body.trim(),
|
|
27
|
+
frontmatter
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
/** Read AGENTS.md (and CLAUDE.md if present) from a directory. Returns concatenated contents. */
|
|
31
|
+
async function readAgentsMd(env, basePath) {
|
|
32
|
+
const parts = [];
|
|
33
|
+
for (const filename of ["AGENTS.md", "CLAUDE.md"]) {
|
|
34
|
+
const filePath = basePath.endsWith("/") ? basePath + filename : `${basePath}/${filename}`;
|
|
35
|
+
if (await env.exists(filePath)) {
|
|
36
|
+
const content = await env.readFile(filePath);
|
|
37
|
+
parts.push(content.trim());
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
return parts.join("\n\n");
|
|
41
|
+
}
|
|
42
|
+
/** Discover skills from .agents/skills/<name>/SKILL.md under basePath. */
|
|
43
|
+
async function discoverLocalSkills(env, basePath) {
|
|
44
|
+
const skillsDir = basePath.endsWith("/") ? `${basePath}.agents/skills` : `${basePath}/.agents/skills`;
|
|
45
|
+
if (!await env.exists(skillsDir)) return {};
|
|
46
|
+
const skills = {};
|
|
47
|
+
const entries = await env.readdir(skillsDir);
|
|
48
|
+
for (const entry of entries) {
|
|
49
|
+
const skillDir = `${skillsDir}/${entry}`;
|
|
50
|
+
try {
|
|
51
|
+
if (!(await env.stat(skillDir)).isDirectory) continue;
|
|
52
|
+
} catch {
|
|
53
|
+
continue;
|
|
54
|
+
}
|
|
55
|
+
const skillMdPath = `${skillDir}/SKILL.md`;
|
|
56
|
+
if (!await env.exists(skillMdPath)) continue;
|
|
57
|
+
const parsed = parseFrontmatterFile(await env.readFile(skillMdPath), entry);
|
|
58
|
+
skills[parsed.name] = {
|
|
59
|
+
name: parsed.name,
|
|
60
|
+
description: parsed.description,
|
|
61
|
+
instructions: parsed.body
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
return skills;
|
|
65
|
+
}
|
|
66
|
+
function composeSystemPrompt(agentsMd, skills, env) {
|
|
67
|
+
const parts = [];
|
|
68
|
+
if (agentsMd) parts.push(agentsMd);
|
|
69
|
+
const skillEntries = Object.values(skills);
|
|
70
|
+
if (skillEntries.length > 0) {
|
|
71
|
+
parts.push("", "## Available Skills", "");
|
|
72
|
+
for (const skill of skillEntries) {
|
|
73
|
+
const desc = skill.description ? ` - ${skill.description}` : "";
|
|
74
|
+
parts.push(`- **${skill.name}**${desc}`);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
if (env) {
|
|
78
|
+
const date = (/* @__PURE__ */ new Date()).toLocaleDateString("en-US", {
|
|
79
|
+
weekday: "short",
|
|
80
|
+
year: "numeric",
|
|
81
|
+
month: "short",
|
|
82
|
+
day: "numeric"
|
|
83
|
+
});
|
|
84
|
+
parts.push("", `Date: ${date}`);
|
|
85
|
+
parts.push(`Working directory: ${env.cwd}`);
|
|
86
|
+
if (env.directoryListing && env.directoryListing.length > 0) parts.push("", "Directory structure:", env.directoryListing.join("\n"));
|
|
87
|
+
}
|
|
88
|
+
return parts.join("\n");
|
|
89
|
+
}
|
|
90
|
+
/** Discover AGENTS.md, local skills, and directory listing from the session's cwd. */
|
|
91
|
+
async function discoverSessionContext(env) {
|
|
92
|
+
const cwd = env.cwd;
|
|
93
|
+
const agentsMd = await readAgentsMd(env, cwd);
|
|
94
|
+
const skills = await discoverLocalSkills(env, cwd);
|
|
95
|
+
let directoryListing;
|
|
96
|
+
try {
|
|
97
|
+
directoryListing = await env.readdir(cwd);
|
|
98
|
+
} catch {}
|
|
99
|
+
return {
|
|
100
|
+
systemPrompt: composeSystemPrompt(agentsMd, skills, {
|
|
101
|
+
cwd,
|
|
102
|
+
directoryListing
|
|
103
|
+
}),
|
|
104
|
+
skills
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
//#endregion
|
|
109
|
+
//#region src/agent.ts
|
|
110
|
+
const MAX_READ_LINES = 2e3;
|
|
111
|
+
const MAX_READ_BYTES = 50 * 1024;
|
|
112
|
+
const MAX_GREP_MATCHES = 100;
|
|
113
|
+
const MAX_GREP_LINE_LENGTH = 500;
|
|
114
|
+
const MAX_GLOB_RESULTS = 1e3;
|
|
115
|
+
const BUILTIN_TOOL_NAMES = new Set([
|
|
116
|
+
"read",
|
|
117
|
+
"write",
|
|
118
|
+
"edit",
|
|
119
|
+
"bash",
|
|
120
|
+
"grep",
|
|
121
|
+
"glob"
|
|
122
|
+
]);
|
|
123
|
+
function createTools(env) {
|
|
124
|
+
return [
|
|
125
|
+
createReadTool(env),
|
|
126
|
+
createWriteTool(env),
|
|
127
|
+
createEditTool(env),
|
|
128
|
+
createBashTool(env),
|
|
129
|
+
createGrepTool(env),
|
|
130
|
+
createGlobTool(env)
|
|
131
|
+
];
|
|
132
|
+
}
|
|
133
|
+
function createReadTool(env) {
|
|
134
|
+
return {
|
|
135
|
+
name: "read",
|
|
136
|
+
label: "Read File",
|
|
137
|
+
description: "Read a file or list a directory. For files, output is truncated to 2000 lines or 50KB — use offset/limit for large files. For directories, returns the list of entries.",
|
|
138
|
+
parameters: Type.Object({
|
|
139
|
+
path: Type.String({ description: "Path to the file to read" }),
|
|
140
|
+
offset: Type.Optional(Type.Number({ description: "Line number to start from (1-indexed)" })),
|
|
141
|
+
limit: Type.Optional(Type.Number({ description: "Maximum number of lines to read" }))
|
|
142
|
+
}),
|
|
143
|
+
async execute(_toolCallId, params, signal) {
|
|
144
|
+
throwIfAborted(signal);
|
|
145
|
+
try {
|
|
146
|
+
if ((await env.stat(params.path)).isDirectory) {
|
|
147
|
+
const entries = await env.readdir(params.path);
|
|
148
|
+
return {
|
|
149
|
+
content: [{
|
|
150
|
+
type: "text",
|
|
151
|
+
text: entries.join("\n") || "(empty directory)"
|
|
152
|
+
}],
|
|
153
|
+
details: {
|
|
154
|
+
path: params.path,
|
|
155
|
+
isDirectory: true,
|
|
156
|
+
entries: entries.length
|
|
157
|
+
}
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
} catch {}
|
|
161
|
+
const allLines = (await env.readFile(params.path)).split("\n");
|
|
162
|
+
const startLine = params.offset ? Math.max(0, params.offset - 1) : 0;
|
|
163
|
+
if (startLine >= allLines.length) throw new Error(`Offset ${params.offset} is beyond end of file (${allLines.length} lines total)`);
|
|
164
|
+
const endLine = params.limit ? startLine + params.limit : allLines.length;
|
|
165
|
+
const { text: truncatedText, wasTruncated } = truncateHead(allLines.slice(startLine, endLine), MAX_READ_LINES, MAX_READ_BYTES);
|
|
166
|
+
let output = truncatedText;
|
|
167
|
+
if (wasTruncated) {
|
|
168
|
+
const shownEnd = startLine + truncatedText.split("\n").length;
|
|
169
|
+
output += `\n\n[Showing lines ${startLine + 1}-${shownEnd} of ${allLines.length}. Use offset=${shownEnd + 1} to continue.]`;
|
|
170
|
+
}
|
|
171
|
+
return {
|
|
172
|
+
content: [{
|
|
173
|
+
type: "text",
|
|
174
|
+
text: output
|
|
175
|
+
}],
|
|
176
|
+
details: {
|
|
177
|
+
path: params.path,
|
|
178
|
+
lines: allLines.length
|
|
179
|
+
}
|
|
180
|
+
};
|
|
181
|
+
}
|
|
182
|
+
};
|
|
183
|
+
}
|
|
184
|
+
function createWriteTool(env) {
|
|
185
|
+
return {
|
|
186
|
+
name: "write",
|
|
187
|
+
label: "Write File",
|
|
188
|
+
description: "Write content to a file. Creates the file and parent directories if they do not exist.",
|
|
189
|
+
parameters: Type.Object({
|
|
190
|
+
path: Type.String({ description: "Path to the file to write" }),
|
|
191
|
+
content: Type.String({ description: "Content to write to the file" })
|
|
192
|
+
}),
|
|
193
|
+
async execute(_toolCallId, params, signal) {
|
|
194
|
+
throwIfAborted(signal);
|
|
195
|
+
const resolved = env.resolvePath(params.path);
|
|
196
|
+
const dir = resolved.replace(/\/[^/]*$/, "");
|
|
197
|
+
if (dir && dir !== resolved) await env.mkdir(dir, { recursive: true });
|
|
198
|
+
await env.writeFile(resolved, params.content);
|
|
199
|
+
return {
|
|
200
|
+
content: [{
|
|
201
|
+
type: "text",
|
|
202
|
+
text: `Successfully wrote ${params.content.length} bytes to ${params.path}`
|
|
203
|
+
}],
|
|
204
|
+
details: {
|
|
205
|
+
path: params.path,
|
|
206
|
+
size: params.content.length
|
|
207
|
+
}
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
function createEditTool(env) {
|
|
213
|
+
return {
|
|
214
|
+
name: "edit",
|
|
215
|
+
label: "Edit File",
|
|
216
|
+
description: "Edit a file using exact text replacement. The oldText must match a unique region of the file. Use replaceAll to replace all occurrences.",
|
|
217
|
+
parameters: Type.Object({
|
|
218
|
+
path: Type.String({ description: "Path to the file to edit" }),
|
|
219
|
+
oldText: Type.String({ description: "Exact text to find (must be unique)" }),
|
|
220
|
+
newText: Type.String({ description: "Replacement text" }),
|
|
221
|
+
replaceAll: Type.Optional(Type.Boolean({ description: "Replace all occurrences" }))
|
|
222
|
+
}),
|
|
223
|
+
async execute(_toolCallId, params, signal) {
|
|
224
|
+
throwIfAborted(signal);
|
|
225
|
+
const content = await env.readFile(params.path);
|
|
226
|
+
if (params.replaceAll) {
|
|
227
|
+
const newContent = content.replaceAll(params.oldText, params.newText);
|
|
228
|
+
if (newContent === content) throw new Error(`Could not find the text in ${params.path}. No changes made.`);
|
|
229
|
+
await env.writeFile(params.path, newContent);
|
|
230
|
+
const count = content.split(params.oldText).length - 1;
|
|
231
|
+
return {
|
|
232
|
+
content: [{
|
|
233
|
+
type: "text",
|
|
234
|
+
text: `Replaced ${count} occurrences in ${params.path}`
|
|
235
|
+
}],
|
|
236
|
+
details: {
|
|
237
|
+
path: params.path,
|
|
238
|
+
replacements: count
|
|
239
|
+
}
|
|
240
|
+
};
|
|
241
|
+
}
|
|
242
|
+
const occurrences = countOccurrences(content, params.oldText);
|
|
243
|
+
if (occurrences === 0) throw new Error(`Could not find the exact text in ${params.path}. Make sure your oldText matches exactly, including whitespace and indentation.`);
|
|
244
|
+
if (occurrences > 1) throw new Error(`Found ${occurrences} occurrences of the text in ${params.path}. Provide more surrounding context to make the match unique, or use replaceAll.`);
|
|
245
|
+
const newContent = content.replace(params.oldText, params.newText);
|
|
246
|
+
await env.writeFile(params.path, newContent);
|
|
247
|
+
return {
|
|
248
|
+
content: [{
|
|
249
|
+
type: "text",
|
|
250
|
+
text: `Successfully edited ${params.path}`
|
|
251
|
+
}],
|
|
252
|
+
details: { path: params.path }
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
function createBashTool(env) {
|
|
258
|
+
return {
|
|
259
|
+
name: "bash",
|
|
260
|
+
label: "Run Command",
|
|
261
|
+
description: "Execute a bash command. Returns stdout and stderr. Output is truncated to the last 2000 lines or 50KB.",
|
|
262
|
+
parameters: Type.Object({
|
|
263
|
+
command: Type.String({ description: "Bash command to execute" }),
|
|
264
|
+
timeout: Type.Optional(Type.Number({ description: "Timeout in seconds" }))
|
|
265
|
+
}),
|
|
266
|
+
async execute(_toolCallId, params, signal) {
|
|
267
|
+
throwIfAborted(signal);
|
|
268
|
+
return formatBashResult(await env.exec(params.command), params.command);
|
|
269
|
+
}
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
function formatBashResult(result, command) {
|
|
273
|
+
const { text: output } = truncateTail((result.stdout + (result.stderr ? "\n" + result.stderr : "")).trim(), MAX_READ_LINES, MAX_READ_BYTES);
|
|
274
|
+
if (result.exitCode !== 0) throw new Error(`${output}\n\nCommand exited with code ${result.exitCode}`);
|
|
275
|
+
return {
|
|
276
|
+
content: [{
|
|
277
|
+
type: "text",
|
|
278
|
+
text: output || "(no output)"
|
|
279
|
+
}],
|
|
280
|
+
details: {
|
|
281
|
+
command,
|
|
282
|
+
exitCode: result.exitCode
|
|
283
|
+
}
|
|
284
|
+
};
|
|
285
|
+
}
|
|
286
|
+
function createGrepTool(env) {
|
|
287
|
+
return {
|
|
288
|
+
name: "grep",
|
|
289
|
+
label: "Search Files",
|
|
290
|
+
description: "Search file contents for a regex pattern. Returns matching lines with file paths and line numbers.",
|
|
291
|
+
parameters: Type.Object({
|
|
292
|
+
pattern: Type.String({ description: "Search pattern (regex)" }),
|
|
293
|
+
path: Type.Optional(Type.String({ description: "Directory or file to search (default: .)" })),
|
|
294
|
+
include: Type.Optional(Type.String({ description: "Glob filter, e.g. \"*.ts\"" }))
|
|
295
|
+
}),
|
|
296
|
+
async execute(_toolCallId, params, signal) {
|
|
297
|
+
throwIfAborted(signal);
|
|
298
|
+
const searchPath = params.path || ".";
|
|
299
|
+
let cmd = `grep -rn "${escapeShellArg(params.pattern)}" ${escapeShellArg(searchPath)}`;
|
|
300
|
+
if (params.include) cmd = `grep -rn --include="${escapeShellArg(params.include)}" "${escapeShellArg(params.pattern)}" ${escapeShellArg(searchPath)}`;
|
|
301
|
+
const result = await env.exec(cmd);
|
|
302
|
+
if (result.exitCode === 1 && !result.stdout.trim()) return {
|
|
303
|
+
content: [{
|
|
304
|
+
type: "text",
|
|
305
|
+
text: "No matches found."
|
|
306
|
+
}],
|
|
307
|
+
details: { matchCount: 0 }
|
|
308
|
+
};
|
|
309
|
+
if (result.exitCode > 1) throw new Error(`grep failed: ${result.stderr}`);
|
|
310
|
+
const lines = result.stdout.trim().split("\n");
|
|
311
|
+
let finalOutput = lines.slice(0, MAX_GREP_MATCHES).map((line) => line.length > MAX_GREP_LINE_LENGTH ? line.slice(0, MAX_GREP_LINE_LENGTH) + "..." : line).join("\n");
|
|
312
|
+
if (lines.length > MAX_GREP_MATCHES) finalOutput += `\n\n[Showing ${MAX_GREP_MATCHES} of ${lines.length} matches. Narrow your search.]`;
|
|
313
|
+
return {
|
|
314
|
+
content: [{
|
|
315
|
+
type: "text",
|
|
316
|
+
text: finalOutput
|
|
317
|
+
}],
|
|
318
|
+
details: { matchCount: Math.min(lines.length, MAX_GREP_MATCHES) }
|
|
319
|
+
};
|
|
320
|
+
}
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
function createGlobTool(env) {
|
|
324
|
+
return {
|
|
325
|
+
name: "glob",
|
|
326
|
+
label: "Find Files",
|
|
327
|
+
description: "Find files by glob pattern. Returns matching file paths.",
|
|
328
|
+
parameters: Type.Object({
|
|
329
|
+
pattern: Type.String({ description: "Glob pattern, e.g. \"**/*.ts\"" }),
|
|
330
|
+
path: Type.Optional(Type.String({ description: "Directory to search in (default: .)" }))
|
|
331
|
+
}),
|
|
332
|
+
async execute(_toolCallId, params, signal) {
|
|
333
|
+
throwIfAborted(signal);
|
|
334
|
+
const cmd = `find ${escapeShellArg(params.path || ".")} -type f -name "${escapeShellArg(params.pattern)}" 2>/dev/null | head -${MAX_GLOB_RESULTS}`;
|
|
335
|
+
const result = await env.exec(cmd);
|
|
336
|
+
if (result.exitCode !== 0 && !result.stdout.trim()) return {
|
|
337
|
+
content: [{
|
|
338
|
+
type: "text",
|
|
339
|
+
text: "No files found matching pattern."
|
|
340
|
+
}],
|
|
341
|
+
details: { matchCount: 0 }
|
|
342
|
+
};
|
|
343
|
+
const paths = result.stdout.trim().split("\n").filter(Boolean);
|
|
344
|
+
if (paths.length === 0) return {
|
|
345
|
+
content: [{
|
|
346
|
+
type: "text",
|
|
347
|
+
text: "No files found matching pattern."
|
|
348
|
+
}],
|
|
349
|
+
details: { matchCount: 0 }
|
|
350
|
+
};
|
|
351
|
+
return {
|
|
352
|
+
content: [{
|
|
353
|
+
type: "text",
|
|
354
|
+
text: paths.join("\n")
|
|
355
|
+
}],
|
|
356
|
+
details: { matchCount: paths.length }
|
|
357
|
+
};
|
|
358
|
+
}
|
|
359
|
+
};
|
|
360
|
+
}
|
|
361
|
+
function throwIfAborted(signal) {
|
|
362
|
+
if (signal?.aborted) throw new Error("Operation aborted");
|
|
363
|
+
}
|
|
364
|
+
function countOccurrences(str, substr) {
|
|
365
|
+
let count = 0;
|
|
366
|
+
let pos = str.indexOf(substr, 0);
|
|
367
|
+
while (pos !== -1) {
|
|
368
|
+
count++;
|
|
369
|
+
pos = str.indexOf(substr, pos + substr.length);
|
|
370
|
+
}
|
|
371
|
+
return count;
|
|
372
|
+
}
|
|
373
|
+
function escapeShellArg(arg) {
|
|
374
|
+
return arg.replace(/\\/g, "\\\\").replace(/"/g, "\\\"").replace(/\$/g, "\\$").replace(/`/g, "\\`");
|
|
375
|
+
}
|
|
376
|
+
function truncateHead(lines, maxLines, maxBytes) {
|
|
377
|
+
let result = "";
|
|
378
|
+
let lineCount = 0;
|
|
379
|
+
let wasTruncated = false;
|
|
380
|
+
for (const line of lines) {
|
|
381
|
+
if (lineCount >= maxLines) {
|
|
382
|
+
wasTruncated = true;
|
|
383
|
+
break;
|
|
384
|
+
}
|
|
385
|
+
const next = lineCount === 0 ? line : "\n" + line;
|
|
386
|
+
if (result.length + next.length > maxBytes) {
|
|
387
|
+
wasTruncated = true;
|
|
388
|
+
break;
|
|
389
|
+
}
|
|
390
|
+
result += next;
|
|
391
|
+
lineCount++;
|
|
392
|
+
}
|
|
393
|
+
return {
|
|
394
|
+
text: result,
|
|
395
|
+
wasTruncated
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
function truncateTail(text, maxLines, maxBytes) {
|
|
399
|
+
const lines = text.split("\n");
|
|
400
|
+
if (lines.length <= maxLines && text.length <= maxBytes) return {
|
|
401
|
+
text,
|
|
402
|
+
wasTruncated: false
|
|
403
|
+
};
|
|
404
|
+
let result = lines.slice(-maxLines).join("\n");
|
|
405
|
+
if (result.length > maxBytes) result = result.slice(-maxBytes);
|
|
406
|
+
return {
|
|
407
|
+
text: result,
|
|
408
|
+
wasTruncated: true
|
|
409
|
+
};
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
//#endregion
|
|
413
|
+
//#region src/compaction.ts
|
|
414
|
+
const DEFAULT_COMPACTION_SETTINGS = {
|
|
415
|
+
enabled: true,
|
|
416
|
+
reserveTokens: 16384,
|
|
417
|
+
keepRecentTokens: 2e4
|
|
418
|
+
};
|
|
419
|
+
function calculateContextTokens(usage) {
|
|
420
|
+
return usage.totalTokens || usage.input + usage.output + usage.cacheRead + usage.cacheWrite;
|
|
421
|
+
}
|
|
422
|
+
function getAssistantUsage(msg) {
|
|
423
|
+
if (msg.role === "assistant" && "usage" in msg) {
|
|
424
|
+
const assistantMsg = msg;
|
|
425
|
+
if (assistantMsg.stopReason !== "aborted" && assistantMsg.stopReason !== "error" && assistantMsg.usage) return assistantMsg.usage;
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
function getLastAssistantUsageInfo(messages) {
|
|
429
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
430
|
+
const msg = messages[i];
|
|
431
|
+
const usage = getAssistantUsage(msg);
|
|
432
|
+
if (usage) return {
|
|
433
|
+
usage,
|
|
434
|
+
index: i
|
|
435
|
+
};
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
/** chars/4 heuristic. Conservative (overestimates). */
|
|
439
|
+
function estimateTokens(message) {
|
|
440
|
+
let chars = 0;
|
|
441
|
+
switch (message.role) {
|
|
442
|
+
case "user": {
|
|
443
|
+
const { content } = message;
|
|
444
|
+
if (typeof content === "string") chars = content.length;
|
|
445
|
+
else if (Array.isArray(content)) {
|
|
446
|
+
for (const block of content) if (block.type === "text") chars += block.text.length;
|
|
447
|
+
}
|
|
448
|
+
return Math.ceil(chars / 4);
|
|
449
|
+
}
|
|
450
|
+
case "assistant": {
|
|
451
|
+
const { content } = message;
|
|
452
|
+
for (const block of content) if (block.type === "text") chars += block.text.length;
|
|
453
|
+
else if (block.type === "thinking") chars += block.thinking.length;
|
|
454
|
+
else if (block.type === "toolCall") chars += block.name.length + JSON.stringify(block.arguments).length;
|
|
455
|
+
return Math.ceil(chars / 4);
|
|
456
|
+
}
|
|
457
|
+
case "toolResult": {
|
|
458
|
+
const { content } = message;
|
|
459
|
+
for (const block of content) if (block.type === "text") chars += block.text.length;
|
|
460
|
+
else if (block.type === "image") chars += 4800;
|
|
461
|
+
return Math.ceil(chars / 4);
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
return 0;
|
|
465
|
+
}
|
|
466
|
+
function estimateContextTokens(messages) {
|
|
467
|
+
const usageInfo = getLastAssistantUsageInfo(messages);
|
|
468
|
+
if (!usageInfo) {
|
|
469
|
+
let estimated = 0;
|
|
470
|
+
for (const message of messages) estimated += estimateTokens(message);
|
|
471
|
+
return {
|
|
472
|
+
tokens: estimated,
|
|
473
|
+
usageTokens: 0,
|
|
474
|
+
trailingTokens: estimated,
|
|
475
|
+
lastUsageIndex: null
|
|
476
|
+
};
|
|
477
|
+
}
|
|
478
|
+
const usageTokens = calculateContextTokens(usageInfo.usage);
|
|
479
|
+
let trailingTokens = 0;
|
|
480
|
+
for (let i = usageInfo.index + 1; i < messages.length; i++) trailingTokens += estimateTokens(messages[i]);
|
|
481
|
+
return {
|
|
482
|
+
tokens: usageTokens + trailingTokens,
|
|
483
|
+
usageTokens,
|
|
484
|
+
trailingTokens,
|
|
485
|
+
lastUsageIndex: usageInfo.index
|
|
486
|
+
};
|
|
487
|
+
}
|
|
488
|
+
function shouldCompact(contextTokens, contextWindow, settings) {
|
|
489
|
+
if (!settings.enabled) return false;
|
|
490
|
+
return contextTokens > contextWindow - settings.reserveTokens;
|
|
491
|
+
}
|
|
492
|
+
function createFileOps() {
|
|
493
|
+
return {
|
|
494
|
+
read: /* @__PURE__ */ new Set(),
|
|
495
|
+
written: /* @__PURE__ */ new Set(),
|
|
496
|
+
edited: /* @__PURE__ */ new Set()
|
|
497
|
+
};
|
|
498
|
+
}
|
|
499
|
+
function extractFileOpsFromMessage(message, fileOps) {
|
|
500
|
+
if (message.role !== "assistant") return;
|
|
501
|
+
const assistant = message;
|
|
502
|
+
if (!Array.isArray(assistant.content)) return;
|
|
503
|
+
for (const block of assistant.content) {
|
|
504
|
+
if (block.type !== "toolCall") continue;
|
|
505
|
+
const args = block.arguments;
|
|
506
|
+
if (!args) continue;
|
|
507
|
+
const path = typeof args.path === "string" ? args.path : void 0;
|
|
508
|
+
if (!path) continue;
|
|
509
|
+
switch (block.name) {
|
|
510
|
+
case "read":
|
|
511
|
+
fileOps.read.add(path);
|
|
512
|
+
break;
|
|
513
|
+
case "write":
|
|
514
|
+
fileOps.written.add(path);
|
|
515
|
+
break;
|
|
516
|
+
case "edit":
|
|
517
|
+
fileOps.edited.add(path);
|
|
518
|
+
break;
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
function computeFileLists(fileOps) {
|
|
523
|
+
const modified = new Set([...fileOps.edited, ...fileOps.written]);
|
|
524
|
+
return {
|
|
525
|
+
readFiles: [...fileOps.read].filter((f) => !modified.has(f)).sort(),
|
|
526
|
+
modifiedFiles: [...modified].sort()
|
|
527
|
+
};
|
|
528
|
+
}
|
|
529
|
+
function formatFileOperations(readFiles, modifiedFiles) {
|
|
530
|
+
const sections = [];
|
|
531
|
+
if (readFiles.length > 0) sections.push(`<read-files>\n${readFiles.join("\n")}\n</read-files>`);
|
|
532
|
+
if (modifiedFiles.length > 0) sections.push(`<modified-files>\n${modifiedFiles.join("\n")}\n</modified-files>`);
|
|
533
|
+
if (sections.length === 0) return "";
|
|
534
|
+
return `\n\n${sections.join("\n\n")}`;
|
|
535
|
+
}
|
|
536
|
+
const TOOL_RESULT_MAX_CHARS = 2e3;
|
|
537
|
+
function truncateForSummary(text, maxChars) {
|
|
538
|
+
if (text.length <= maxChars) return text;
|
|
539
|
+
const truncatedChars = text.length - maxChars;
|
|
540
|
+
return `${text.slice(0, maxChars)}\n\n[... ${truncatedChars} more characters truncated]`;
|
|
541
|
+
}
|
|
542
|
+
/** Serialize messages to text so the summarization model doesn't treat it as a conversation to continue. */
|
|
543
|
+
function serializeConversation(messages) {
|
|
544
|
+
const parts = [];
|
|
545
|
+
for (const msg of messages) if (msg.role === "user") {
|
|
546
|
+
const { content } = msg;
|
|
547
|
+
const text = typeof content === "string" ? content : content.filter((c) => c.type === "text").map((c) => c.text).join("");
|
|
548
|
+
if (text) parts.push(`[User]: ${text}`);
|
|
549
|
+
} else if (msg.role === "assistant") {
|
|
550
|
+
const { content } = msg;
|
|
551
|
+
const textParts = [];
|
|
552
|
+
const thinkingParts = [];
|
|
553
|
+
const toolCalls = [];
|
|
554
|
+
for (const block of content) if (block.type === "text") textParts.push(block.text);
|
|
555
|
+
else if (block.type === "thinking") thinkingParts.push(block.thinking);
|
|
556
|
+
else if (block.type === "toolCall") {
|
|
557
|
+
const argsStr = Object.entries(block.arguments).map(([k, v]) => `${k}=${JSON.stringify(v)}`).join(", ");
|
|
558
|
+
toolCalls.push(`${block.name}(${argsStr})`);
|
|
559
|
+
}
|
|
560
|
+
if (thinkingParts.length > 0) parts.push(`[Assistant thinking]: ${thinkingParts.join("\n")}`);
|
|
561
|
+
if (textParts.length > 0) parts.push(`[Assistant]: ${textParts.join("\n")}`);
|
|
562
|
+
if (toolCalls.length > 0) parts.push(`[Assistant tool calls]: ${toolCalls.join("; ")}`);
|
|
563
|
+
} else if (msg.role === "toolResult") {
|
|
564
|
+
const { content } = msg;
|
|
565
|
+
const text = content.filter((c) => c.type === "text").map((c) => c.text).join("");
|
|
566
|
+
if (text) parts.push(`[Tool result]: ${truncateForSummary(text, TOOL_RESULT_MAX_CHARS)}`);
|
|
567
|
+
}
|
|
568
|
+
return parts.join("\n\n");
|
|
569
|
+
}
|
|
570
|
+
const SUMMARIZATION_SYSTEM_PROMPT = "You are a context summarization assistant. Your task is to read a conversation between a user and an AI coding assistant, then produce a structured summary following the exact format specified.\n\nDo NOT continue the conversation. Do NOT respond to any questions in the conversation. ONLY output the structured summary.";
|
|
571
|
+
const SUMMARIZATION_PROMPT = `The messages above are a conversation to summarize. Create a structured context checkpoint summary that another LLM will use to continue the work.
|
|
572
|
+
|
|
573
|
+
Use this EXACT format:
|
|
574
|
+
|
|
575
|
+
## Goal
|
|
576
|
+
[What is the user trying to accomplish? Can be multiple items if the session covers different tasks.]
|
|
577
|
+
|
|
578
|
+
## Constraints & Preferences
|
|
579
|
+
- [Any constraints, preferences, or requirements mentioned by user]
|
|
580
|
+
- [Or "(none)" if none were mentioned]
|
|
581
|
+
|
|
582
|
+
## Progress
|
|
583
|
+
### Done
|
|
584
|
+
- [x] [Completed tasks/changes]
|
|
585
|
+
|
|
586
|
+
### In Progress
|
|
587
|
+
- [ ] [Current work]
|
|
588
|
+
|
|
589
|
+
### Blocked
|
|
590
|
+
- [Issues preventing progress, if any]
|
|
591
|
+
|
|
592
|
+
## Key Decisions
|
|
593
|
+
- **[Decision]**: [Brief rationale]
|
|
594
|
+
|
|
595
|
+
## Next Steps
|
|
596
|
+
1. [Ordered list of what should happen next]
|
|
597
|
+
|
|
598
|
+
## Critical Context
|
|
599
|
+
- [Any data, examples, or references needed to continue]
|
|
600
|
+
- [Or "(none)" if not applicable]
|
|
601
|
+
|
|
602
|
+
Keep each section concise. Preserve exact file paths, function names, and error messages.`;
|
|
603
|
+
const UPDATE_SUMMARIZATION_PROMPT = `The messages above are NEW conversation messages to incorporate into the existing summary provided in <previous-summary> tags.
|
|
604
|
+
|
|
605
|
+
Update the existing structured summary with new information. RULES:
|
|
606
|
+
- PRESERVE all existing information from the previous summary
|
|
607
|
+
- ADD new progress, decisions, and context from the new messages
|
|
608
|
+
- UPDATE the Progress section: move items from "In Progress" to "Done" when completed
|
|
609
|
+
- UPDATE "Next Steps" based on what was accomplished
|
|
610
|
+
- PRESERVE exact file paths, function names, and error messages
|
|
611
|
+
- If something is no longer relevant, you may remove it
|
|
612
|
+
|
|
613
|
+
Use this EXACT format:
|
|
614
|
+
|
|
615
|
+
## Goal
|
|
616
|
+
[Preserve existing goals, add new ones if the task expanded]
|
|
617
|
+
|
|
618
|
+
## Constraints & Preferences
|
|
619
|
+
- [Preserve existing, add new ones discovered]
|
|
620
|
+
|
|
621
|
+
## Progress
|
|
622
|
+
### Done
|
|
623
|
+
- [x] [Include previously done items AND newly completed items]
|
|
624
|
+
|
|
625
|
+
### In Progress
|
|
626
|
+
- [ ] [Current work - update based on progress]
|
|
627
|
+
|
|
628
|
+
### Blocked
|
|
629
|
+
- [Current blockers - remove if resolved]
|
|
630
|
+
|
|
631
|
+
## Key Decisions
|
|
632
|
+
- **[Decision]**: [Brief rationale] (preserve all previous, add new)
|
|
633
|
+
|
|
634
|
+
## Next Steps
|
|
635
|
+
1. [Update based on current state]
|
|
636
|
+
|
|
637
|
+
## Critical Context
|
|
638
|
+
- [Preserve important context, add new if needed]
|
|
639
|
+
|
|
640
|
+
Keep each section concise. Preserve exact file paths, function names, and error messages.`;
|
|
641
|
+
const TURN_PREFIX_SUMMARIZATION_PROMPT = `This is the PREFIX of a turn that was too large to keep. The SUFFIX (recent work) is retained.
|
|
642
|
+
|
|
643
|
+
Summarize the prefix to provide context for the retained suffix:
|
|
644
|
+
|
|
645
|
+
## Original Request
|
|
646
|
+
[What did the user ask for in this turn?]
|
|
647
|
+
|
|
648
|
+
## Early Progress
|
|
649
|
+
- [Key decisions and work done in the prefix]
|
|
650
|
+
|
|
651
|
+
## Context for Suffix
|
|
652
|
+
- [Information needed to understand the retained recent work]
|
|
653
|
+
|
|
654
|
+
Be concise. Focus on what's needed to understand the kept suffix.`;
|
|
655
|
+
/** Valid cut points: user or assistant messages. Never cut at toolResult. */
|
|
656
|
+
function findValidCutPoints(messages, start, end) {
|
|
657
|
+
const cutPoints = [];
|
|
658
|
+
for (let i = start; i < end; i++) {
|
|
659
|
+
const role = messages[i].role;
|
|
660
|
+
if (role === "user" || role === "assistant") cutPoints.push(i);
|
|
661
|
+
}
|
|
662
|
+
return cutPoints;
|
|
663
|
+
}
|
|
664
|
+
function findTurnStartIndex(messages, index, start) {
|
|
665
|
+
for (let i = index; i >= start; i--) if (messages[i].role === "user") return i;
|
|
666
|
+
return -1;
|
|
667
|
+
}
|
|
668
|
+
function findCutPoint(messages, start, end, keepRecentTokens) {
|
|
669
|
+
const cutPoints = findValidCutPoints(messages, start, end);
|
|
670
|
+
if (cutPoints.length === 0) return {
|
|
671
|
+
firstKeptIndex: start,
|
|
672
|
+
turnStartIndex: -1,
|
|
673
|
+
isSplitTurn: false
|
|
674
|
+
};
|
|
675
|
+
let accumulatedTokens = 0;
|
|
676
|
+
let cutIndex = cutPoints[0];
|
|
677
|
+
for (let i = end - 1; i >= start; i--) {
|
|
678
|
+
const messageTokens = estimateTokens(messages[i]);
|
|
679
|
+
accumulatedTokens += messageTokens;
|
|
680
|
+
if (accumulatedTokens >= keepRecentTokens) {
|
|
681
|
+
for (let c = 0; c < cutPoints.length; c++) if (cutPoints[c] >= i) {
|
|
682
|
+
cutIndex = cutPoints[c];
|
|
683
|
+
break;
|
|
684
|
+
}
|
|
685
|
+
break;
|
|
686
|
+
}
|
|
687
|
+
}
|
|
688
|
+
const isUserMessage = messages[cutIndex].role === "user";
|
|
689
|
+
const turnStartIndex = isUserMessage ? -1 : findTurnStartIndex(messages, cutIndex, start);
|
|
690
|
+
return {
|
|
691
|
+
firstKeptIndex: cutIndex,
|
|
692
|
+
turnStartIndex,
|
|
693
|
+
isSplitTurn: !isUserMessage && turnStartIndex !== -1
|
|
694
|
+
};
|
|
695
|
+
}
|
|
696
|
+
/** Pure function — no I/O. Finds cut point, extracts messages to summarize, tracks file ops. */
|
|
697
|
+
function prepareCompaction(messages, settings, previousCompaction) {
|
|
698
|
+
if (messages.length === 0) return void 0;
|
|
699
|
+
const boundaryStart = previousCompaction ? previousCompaction.firstKeptIndex : 0;
|
|
700
|
+
const boundaryEnd = messages.length;
|
|
701
|
+
const tokensBefore = estimateContextTokens(messages).tokens;
|
|
702
|
+
const cutPoint = findCutPoint(messages, boundaryStart, boundaryEnd, settings.keepRecentTokens);
|
|
703
|
+
if (cutPoint.firstKeptIndex <= boundaryStart) return void 0;
|
|
704
|
+
const historyEnd = cutPoint.isSplitTurn ? cutPoint.turnStartIndex : cutPoint.firstKeptIndex;
|
|
705
|
+
const messagesToSummarize = messages.slice(boundaryStart, historyEnd);
|
|
706
|
+
const turnPrefixMessages = cutPoint.isSplitTurn ? messages.slice(cutPoint.turnStartIndex, cutPoint.firstKeptIndex) : [];
|
|
707
|
+
const fileOps = createFileOps();
|
|
708
|
+
if (previousCompaction?.details) {
|
|
709
|
+
for (const f of previousCompaction.details.readFiles ?? []) fileOps.read.add(f);
|
|
710
|
+
for (const f of previousCompaction.details.modifiedFiles ?? []) fileOps.edited.add(f);
|
|
711
|
+
}
|
|
712
|
+
for (const msg of messagesToSummarize) extractFileOpsFromMessage(msg, fileOps);
|
|
713
|
+
for (const msg of turnPrefixMessages) extractFileOpsFromMessage(msg, fileOps);
|
|
714
|
+
return {
|
|
715
|
+
firstKeptIndex: cutPoint.firstKeptIndex,
|
|
716
|
+
messagesToSummarize,
|
|
717
|
+
turnPrefixMessages,
|
|
718
|
+
isSplitTurn: cutPoint.isSplitTurn,
|
|
719
|
+
tokensBefore,
|
|
720
|
+
previousSummary: previousCompaction?.summary,
|
|
721
|
+
fileOps,
|
|
722
|
+
settings
|
|
723
|
+
};
|
|
724
|
+
}
|
|
725
|
+
async function generateSummary(currentMessages, model, reserveTokens, apiKey, signal, previousSummary) {
|
|
726
|
+
const maxTokens = Math.min(Math.floor(.8 * reserveTokens), 16e3);
|
|
727
|
+
const basePrompt = previousSummary ? UPDATE_SUMMARIZATION_PROMPT : SUMMARIZATION_PROMPT;
|
|
728
|
+
let promptText = `<conversation>\n${serializeConversation(currentMessages)}\n</conversation>\n\n`;
|
|
729
|
+
if (previousSummary) promptText += `<previous-summary>\n${previousSummary}\n</previous-summary>\n\n`;
|
|
730
|
+
promptText += basePrompt;
|
|
731
|
+
const summarizationMessages = [{
|
|
732
|
+
role: "user",
|
|
733
|
+
content: [{
|
|
734
|
+
type: "text",
|
|
735
|
+
text: promptText
|
|
736
|
+
}],
|
|
737
|
+
timestamp: Date.now()
|
|
738
|
+
}];
|
|
739
|
+
const completionOptions = {
|
|
740
|
+
maxTokens,
|
|
741
|
+
signal
|
|
742
|
+
};
|
|
743
|
+
if (apiKey) completionOptions.apiKey = apiKey;
|
|
744
|
+
if (model.reasoning) completionOptions.reasoning = "high";
|
|
745
|
+
const response = await completeSimple(model, {
|
|
746
|
+
systemPrompt: SUMMARIZATION_SYSTEM_PROMPT,
|
|
747
|
+
messages: summarizationMessages
|
|
748
|
+
}, completionOptions);
|
|
749
|
+
if (response.stopReason === "error") throw new Error(`Summarization failed: ${response.errorMessage || "Unknown error"}`);
|
|
750
|
+
return response.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
|
|
751
|
+
}
|
|
752
|
+
async function generateTurnPrefixSummary(messages, model, reserveTokens, apiKey, signal) {
|
|
753
|
+
const maxTokens = Math.min(Math.floor(.5 * reserveTokens), 16e3);
|
|
754
|
+
const summarizationMessages = [{
|
|
755
|
+
role: "user",
|
|
756
|
+
content: [{
|
|
757
|
+
type: "text",
|
|
758
|
+
text: `<conversation>\n${serializeConversation(messages)}\n</conversation>\n\n${TURN_PREFIX_SUMMARIZATION_PROMPT}`
|
|
759
|
+
}],
|
|
760
|
+
timestamp: Date.now()
|
|
761
|
+
}];
|
|
762
|
+
const completionOptions = {
|
|
763
|
+
maxTokens,
|
|
764
|
+
signal
|
|
765
|
+
};
|
|
766
|
+
if (apiKey) completionOptions.apiKey = apiKey;
|
|
767
|
+
const response = await completeSimple(model, {
|
|
768
|
+
systemPrompt: SUMMARIZATION_SYSTEM_PROMPT,
|
|
769
|
+
messages: summarizationMessages
|
|
770
|
+
}, completionOptions);
|
|
771
|
+
if (response.stopReason === "error") throw new Error(`Turn prefix summarization failed: ${response.errorMessage || "Unknown error"}`);
|
|
772
|
+
return response.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
|
|
773
|
+
}
|
|
774
|
+
async function compact(preparation, model, apiKey, signal) {
|
|
775
|
+
const { firstKeptIndex, messagesToSummarize, turnPrefixMessages, isSplitTurn, tokensBefore, previousSummary, fileOps, settings } = preparation;
|
|
776
|
+
let summary;
|
|
777
|
+
if (isSplitTurn && turnPrefixMessages.length > 0) {
|
|
778
|
+
const [historyResult, turnPrefixResult] = await Promise.all([messagesToSummarize.length > 0 ? generateSummary(messagesToSummarize, model, settings.reserveTokens, apiKey, signal, previousSummary) : Promise.resolve("No prior history."), generateTurnPrefixSummary(turnPrefixMessages, model, settings.reserveTokens, apiKey, signal)]);
|
|
779
|
+
summary = `${historyResult}\n\n---\n\n**Turn Context (split turn):**\n\n${turnPrefixResult}`;
|
|
780
|
+
} else summary = await generateSummary(messagesToSummarize, model, settings.reserveTokens, apiKey, signal, previousSummary);
|
|
781
|
+
const { readFiles, modifiedFiles } = computeFileLists(fileOps);
|
|
782
|
+
summary += formatFileOperations(readFiles, modifiedFiles);
|
|
783
|
+
return {
|
|
784
|
+
summary,
|
|
785
|
+
firstKeptIndex,
|
|
786
|
+
tokensBefore,
|
|
787
|
+
details: {
|
|
788
|
+
readFiles,
|
|
789
|
+
modifiedFiles
|
|
790
|
+
}
|
|
791
|
+
};
|
|
792
|
+
}
|
|
793
|
+
function buildCompactedMessages(messages, result) {
|
|
794
|
+
return [{
|
|
795
|
+
role: "user",
|
|
796
|
+
content: [{
|
|
797
|
+
type: "text",
|
|
798
|
+
text: `[Context Summary]\n\n${result.summary}`
|
|
799
|
+
}],
|
|
800
|
+
timestamp: Date.now()
|
|
801
|
+
}, ...messages.slice(result.firstKeptIndex)];
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
//#endregion
|
|
805
|
+
//#region src/result.ts
|
|
806
|
+
const HEADLESS_PREAMBLE = "You are running in headless mode with no human operator. Work autonomously — never ask questions, never wait for user input. Make your best judgment and proceed independently.";
|
|
807
|
+
function buildResultInstructions(schema) {
|
|
808
|
+
const { $schema: _, ...schemaWithoutMeta } = toJsonSchema(schema, { errorMode: "ignore" });
|
|
809
|
+
return [
|
|
810
|
+
"",
|
|
811
|
+
"```json",
|
|
812
|
+
JSON.stringify(schemaWithoutMeta, null, 2),
|
|
813
|
+
"```",
|
|
814
|
+
"",
|
|
815
|
+
"Example: (Object)",
|
|
816
|
+
"---RESULT_START---",
|
|
817
|
+
"{\"key\": \"value\"}",
|
|
818
|
+
"---RESULT_END---",
|
|
819
|
+
"",
|
|
820
|
+
"Example: (String)",
|
|
821
|
+
"---RESULT_START---",
|
|
822
|
+
"Hello, world!",
|
|
823
|
+
"---RESULT_END---"
|
|
824
|
+
].join("\n");
|
|
825
|
+
}
|
|
826
|
+
/** Follow-up prompt used when the LLM forgets to include RESULT_START/RESULT_END delimiters. */
|
|
827
|
+
function buildResultExtractionPrompt(schema) {
|
|
828
|
+
return [
|
|
829
|
+
"Your task is complete. Now respond with ONLY your final result.",
|
|
830
|
+
"No explanation, no preamble — just the result in the following format, conforming to this schema:",
|
|
831
|
+
buildResultInstructions(schema)
|
|
832
|
+
].join("\n");
|
|
833
|
+
}
|
|
834
|
+
function buildSkillPrompt(skillInstructions, args, schema) {
|
|
835
|
+
const parts = [
|
|
836
|
+
HEADLESS_PREAMBLE,
|
|
837
|
+
"",
|
|
838
|
+
skillInstructions
|
|
839
|
+
];
|
|
840
|
+
if (args && Object.keys(args).length > 0) parts.push(`\nArguments:\n${JSON.stringify(args, null, 2)}`);
|
|
841
|
+
if (schema) {
|
|
842
|
+
parts.push("When complete, you MUST output your result between these exact delimiters conforming to this schema:");
|
|
843
|
+
parts.push(buildResultInstructions(schema));
|
|
844
|
+
}
|
|
845
|
+
return parts.join("\n");
|
|
846
|
+
}
|
|
847
|
+
function buildPromptText(text, schema) {
|
|
848
|
+
const parts = [
|
|
849
|
+
HEADLESS_PREAMBLE,
|
|
850
|
+
"",
|
|
851
|
+
text
|
|
852
|
+
];
|
|
853
|
+
if (schema) {
|
|
854
|
+
parts.push("When complete, you MUST output your result between these exact delimiters conforming to this schema:");
|
|
855
|
+
parts.push(buildResultInstructions(schema));
|
|
856
|
+
}
|
|
857
|
+
return parts.join("\n");
|
|
858
|
+
}
|
|
859
|
+
/** Extract the last ---RESULT_START---/---RESULT_END--- block from agent text and validate against schema. */
|
|
860
|
+
function extractResult(text, schema) {
|
|
861
|
+
const resultBlock = extractLastResultBlock(text);
|
|
862
|
+
if (resultBlock === null) throw new ResultExtractionError("No ---RESULT_START--- / ---RESULT_END--- block found in the assistant response.", text);
|
|
863
|
+
let result = resultBlock;
|
|
864
|
+
if (schema.type === "object" || schema.type === "array") try {
|
|
865
|
+
result = JSON.parse(resultBlock);
|
|
866
|
+
} catch {
|
|
867
|
+
throw new ResultExtractionError("Result block contains invalid JSON for the expected schema.", resultBlock);
|
|
868
|
+
}
|
|
869
|
+
const parsed = v.safeParse(schema, result);
|
|
870
|
+
if (!parsed.success) throw new ResultExtractionError(`Result does not match the expected schema: ${parsed.issues.map((i) => i.message).join(", ")}`, resultBlock);
|
|
871
|
+
return parsed.output;
|
|
872
|
+
}
|
|
873
|
+
function extractLastResultBlock(text) {
|
|
874
|
+
const matches = text.matchAll(/---RESULT_START---\s*\n([\s\S]*?)---RESULT_END---/g);
|
|
875
|
+
let lastMatch = null;
|
|
876
|
+
for (const match of matches) lastMatch = match[1]?.trim() ?? null;
|
|
877
|
+
return lastMatch;
|
|
878
|
+
}
|
|
879
|
+
var ResultExtractionError = class extends Error {
|
|
880
|
+
constructor(message, rawOutput) {
|
|
881
|
+
super(message);
|
|
882
|
+
this.rawOutput = rawOutput;
|
|
883
|
+
this.name = "ResultExtractionError";
|
|
884
|
+
}
|
|
885
|
+
};
|
|
886
|
+
|
|
887
|
+
//#endregion
|
|
888
|
+
//#region src/session.ts
|
|
889
|
+
/** Internal session implementation. Not exported publicly — wrapped by FlueSession. */
|
|
890
|
+
/** In-memory session store. Sessions persist for the lifetime of the process. */
|
|
891
|
+
var InMemorySessionStore = class {
|
|
892
|
+
store = /* @__PURE__ */ new Map();
|
|
893
|
+
async save(id, data) {
|
|
894
|
+
this.store.set(id, data);
|
|
895
|
+
}
|
|
896
|
+
async load(id) {
|
|
897
|
+
return this.store.get(id) ?? null;
|
|
898
|
+
}
|
|
899
|
+
async delete(id) {
|
|
900
|
+
this.store.delete(id);
|
|
901
|
+
}
|
|
902
|
+
};
|
|
903
|
+
var Session = class Session {
|
|
904
|
+
id;
|
|
905
|
+
metadata;
|
|
906
|
+
agent;
|
|
907
|
+
config;
|
|
908
|
+
env;
|
|
909
|
+
store;
|
|
910
|
+
createdAt;
|
|
911
|
+
compactionSettings;
|
|
912
|
+
lastCompaction;
|
|
913
|
+
overflowRecoveryAttempted = false;
|
|
914
|
+
compactionAbortController;
|
|
915
|
+
eventCallback;
|
|
916
|
+
builtinTools;
|
|
917
|
+
constructor(id, config, env, store, existingData, onAgentEvent) {
|
|
918
|
+
this.id = id;
|
|
919
|
+
this.config = config;
|
|
920
|
+
this.env = env;
|
|
921
|
+
this.store = store;
|
|
922
|
+
this.metadata = existingData?.metadata ?? {};
|
|
923
|
+
this.createdAt = existingData?.createdAt;
|
|
924
|
+
this.lastCompaction = existingData?.lastCompaction;
|
|
925
|
+
const cc = config.compaction;
|
|
926
|
+
this.compactionSettings = {
|
|
927
|
+
enabled: cc?.enabled ?? DEFAULT_COMPACTION_SETTINGS.enabled,
|
|
928
|
+
reserveTokens: cc?.reserveTokens ?? DEFAULT_COMPACTION_SETTINGS.reserveTokens,
|
|
929
|
+
keepRecentTokens: cc?.keepRecentTokens ?? DEFAULT_COMPACTION_SETTINGS.keepRecentTokens
|
|
930
|
+
};
|
|
931
|
+
const systemPrompt = config.systemPrompt;
|
|
932
|
+
const tools = createTools(env);
|
|
933
|
+
this.builtinTools = tools;
|
|
934
|
+
const previousMessages = existingData?.messages ?? [];
|
|
935
|
+
this.agent = new Agent({
|
|
936
|
+
initialState: {
|
|
937
|
+
systemPrompt,
|
|
938
|
+
model: config.model,
|
|
939
|
+
tools,
|
|
940
|
+
messages: previousMessages
|
|
941
|
+
},
|
|
942
|
+
toolExecution: "parallel"
|
|
943
|
+
});
|
|
944
|
+
this.eventCallback = onAgentEvent;
|
|
945
|
+
const emit = onAgentEvent;
|
|
946
|
+
this.agent.subscribe(async (event) => {
|
|
947
|
+
switch (event.type) {
|
|
948
|
+
case "agent_start":
|
|
949
|
+
emit?.({ type: "agent_start" });
|
|
950
|
+
break;
|
|
951
|
+
case "message_update": {
|
|
952
|
+
const aEvent = event.assistantMessageEvent;
|
|
953
|
+
if (aEvent.type === "text_delta") emit?.({
|
|
954
|
+
type: "text_delta",
|
|
955
|
+
text: aEvent.delta
|
|
956
|
+
});
|
|
957
|
+
break;
|
|
958
|
+
}
|
|
959
|
+
case "tool_execution_start":
|
|
960
|
+
emit?.({
|
|
961
|
+
type: "tool_start",
|
|
962
|
+
toolName: event.toolName,
|
|
963
|
+
toolCallId: event.toolCallId,
|
|
964
|
+
args: event.args
|
|
965
|
+
});
|
|
966
|
+
break;
|
|
967
|
+
case "tool_execution_end":
|
|
968
|
+
emit?.({
|
|
969
|
+
type: "tool_end",
|
|
970
|
+
toolName: event.toolName,
|
|
971
|
+
toolCallId: event.toolCallId,
|
|
972
|
+
isError: event.isError,
|
|
973
|
+
result: event.result
|
|
974
|
+
});
|
|
975
|
+
break;
|
|
976
|
+
case "turn_end":
|
|
977
|
+
emit?.({ type: "turn_end" });
|
|
978
|
+
break;
|
|
979
|
+
case "agent_end": {
|
|
980
|
+
const messages = this.agent.state.messages;
|
|
981
|
+
const lastMsg = messages[messages.length - 1];
|
|
982
|
+
if (lastMsg?.role === "assistant") await this.checkCompaction(lastMsg);
|
|
983
|
+
emit?.({ type: "done" });
|
|
984
|
+
break;
|
|
985
|
+
}
|
|
986
|
+
}
|
|
987
|
+
});
|
|
988
|
+
}
|
|
989
|
+
async prompt(text, options) {
|
|
990
|
+
this.resolveModelForCall(options?.model, options?.role);
|
|
991
|
+
const promptWithRole = this.injectRoleInstructions(text, options?.role);
|
|
992
|
+
const schema = options?.result;
|
|
993
|
+
const fullPrompt = buildPromptText(promptWithRole, schema);
|
|
994
|
+
if (options?.commands) this.assertCommandSupport(options.commands);
|
|
995
|
+
const registeredCommandNames = options?.commands ? this.registerCommands(options.commands) : [];
|
|
996
|
+
const registeredToolNames = options?.tools ? this.registerCustomTools(options.tools) : [];
|
|
997
|
+
try {
|
|
998
|
+
await this.agent.prompt(fullPrompt);
|
|
999
|
+
await this.agent.waitForIdle();
|
|
1000
|
+
this.throwIfError("prompt");
|
|
1001
|
+
await this.save();
|
|
1002
|
+
if (schema) return this.extractResultWithRetry(schema);
|
|
1003
|
+
return { text: this.getAssistantText() };
|
|
1004
|
+
} finally {
|
|
1005
|
+
this.unregisterCommands(registeredCommandNames);
|
|
1006
|
+
if (registeredToolNames.length > 0) this.unregisterCustomTools();
|
|
1007
|
+
}
|
|
1008
|
+
}
|
|
1009
|
+
async skill(name, options) {
|
|
1010
|
+
const registeredSkill = this.config.skills[name];
|
|
1011
|
+
if (!registeredSkill) throw new Error(`Skill "${name}" not registered. Available: ${Object.keys(this.config.skills).join(", ") || "(none)"}`);
|
|
1012
|
+
this.resolveModelForCall(options?.model, options?.role);
|
|
1013
|
+
const schema = options?.result;
|
|
1014
|
+
const skillPrompt = buildSkillPrompt(registeredSkill.instructions, options?.args, schema);
|
|
1015
|
+
const promptWithRole = this.injectRoleInstructions(skillPrompt, options?.role);
|
|
1016
|
+
if (options?.commands) this.assertCommandSupport(options.commands);
|
|
1017
|
+
const registeredCommandNames = options?.commands ? this.registerCommands(options.commands) : [];
|
|
1018
|
+
const registeredToolNames = options?.tools ? this.registerCustomTools(options.tools) : [];
|
|
1019
|
+
try {
|
|
1020
|
+
await this.agent.prompt(promptWithRole);
|
|
1021
|
+
await this.agent.waitForIdle();
|
|
1022
|
+
this.throwIfError(`skill("${name}")`);
|
|
1023
|
+
await this.save();
|
|
1024
|
+
if (schema) return this.extractResultWithRetry(schema);
|
|
1025
|
+
return { text: this.getAssistantText() };
|
|
1026
|
+
} finally {
|
|
1027
|
+
this.unregisterCommands(registeredCommandNames);
|
|
1028
|
+
if (registeredToolNames.length > 0) this.unregisterCustomTools();
|
|
1029
|
+
}
|
|
1030
|
+
}
|
|
1031
|
+
async shell(command, options) {
|
|
1032
|
+
if (options?.commands) this.assertCommandSupport(options.commands);
|
|
1033
|
+
const registeredNames = options?.commands ? this.registerCommands(options.commands) : [];
|
|
1034
|
+
try {
|
|
1035
|
+
const result = await this.env.exec(command, {
|
|
1036
|
+
env: options?.env,
|
|
1037
|
+
cwd: options?.cwd
|
|
1038
|
+
});
|
|
1039
|
+
return {
|
|
1040
|
+
stdout: result.stdout,
|
|
1041
|
+
stderr: result.stderr,
|
|
1042
|
+
exitCode: result.exitCode
|
|
1043
|
+
};
|
|
1044
|
+
} finally {
|
|
1045
|
+
this.unregisterCommands(registeredNames);
|
|
1046
|
+
}
|
|
1047
|
+
}
|
|
1048
|
+
async task(prompt, options) {
|
|
1049
|
+
if (!options?.workspace) throw new Error("[flue] task() requires a workspace option.");
|
|
1050
|
+
const taskCwd = options.workspace.startsWith("/") ? options.workspace : normalizePath(this.env.cwd + "/" + options.workspace);
|
|
1051
|
+
function taskResolvePath(p) {
|
|
1052
|
+
if (p.startsWith("/")) return normalizePath(p);
|
|
1053
|
+
if (taskCwd === "/") return normalizePath("/" + p);
|
|
1054
|
+
return normalizePath(taskCwd + "/" + p);
|
|
1055
|
+
}
|
|
1056
|
+
const parentEnv = this.env;
|
|
1057
|
+
const taskEnv = {
|
|
1058
|
+
exec: (cmd, opts) => parentEnv.exec(cmd, {
|
|
1059
|
+
cwd: opts?.cwd ?? taskCwd,
|
|
1060
|
+
env: opts?.env
|
|
1061
|
+
}),
|
|
1062
|
+
readFile: (p) => parentEnv.readFile(taskResolvePath(p)),
|
|
1063
|
+
readFileBuffer: (p) => parentEnv.readFileBuffer(taskResolvePath(p)),
|
|
1064
|
+
writeFile: (p, c) => parentEnv.writeFile(taskResolvePath(p), c),
|
|
1065
|
+
stat: (p) => parentEnv.stat(taskResolvePath(p)),
|
|
1066
|
+
readdir: (p) => parentEnv.readdir(taskResolvePath(p)),
|
|
1067
|
+
exists: (p) => parentEnv.exists(taskResolvePath(p)),
|
|
1068
|
+
mkdir: (p, o) => parentEnv.mkdir(taskResolvePath(p), o),
|
|
1069
|
+
rm: (p, o) => parentEnv.rm(taskResolvePath(p), o),
|
|
1070
|
+
cwd: taskCwd,
|
|
1071
|
+
resolvePath: taskResolvePath,
|
|
1072
|
+
commandSupport: parentEnv.commandSupport,
|
|
1073
|
+
cleanup: async () => {}
|
|
1074
|
+
};
|
|
1075
|
+
const localContext = await discoverSessionContext(taskEnv);
|
|
1076
|
+
let taskModel = this.config.model;
|
|
1077
|
+
const taskRole = options?.role ? this.config.roles[options.role] : void 0;
|
|
1078
|
+
if (taskRole?.model && this.config.resolveModel) taskModel = this.config.resolveModel(taskRole.model);
|
|
1079
|
+
if (options?.model && this.config.resolveModel) taskModel = this.config.resolveModel(options.model);
|
|
1080
|
+
const taskConfig = {
|
|
1081
|
+
systemPrompt: localContext.systemPrompt,
|
|
1082
|
+
skills: localContext.skills,
|
|
1083
|
+
roles: this.config.roles,
|
|
1084
|
+
model: taskModel,
|
|
1085
|
+
resolveModel: this.config.resolveModel,
|
|
1086
|
+
compaction: this.config.compaction
|
|
1087
|
+
};
|
|
1088
|
+
this.eventCallback?.({
|
|
1089
|
+
type: "task_start",
|
|
1090
|
+
workspace: taskCwd
|
|
1091
|
+
});
|
|
1092
|
+
const taskStore = new InMemorySessionStore();
|
|
1093
|
+
const taskSession = new Session(`${this.id}:task:${Date.now()}`, taskConfig, taskEnv, taskStore, null, this.eventCallback);
|
|
1094
|
+
try {
|
|
1095
|
+
const promptOpts = { role: options?.role };
|
|
1096
|
+
if (options?.result) promptOpts.result = options.result;
|
|
1097
|
+
return await taskSession.prompt(prompt, promptOpts);
|
|
1098
|
+
} finally {
|
|
1099
|
+
this.eventCallback?.({ type: "task_end" });
|
|
1100
|
+
await taskSession.destroy();
|
|
1101
|
+
}
|
|
1102
|
+
}
|
|
1103
|
+
abort() {
|
|
1104
|
+
this.agent.abort();
|
|
1105
|
+
}
|
|
1106
|
+
async destroy() {
|
|
1107
|
+
this.agent.abort();
|
|
1108
|
+
await this.store.delete(this.id);
|
|
1109
|
+
await this.env.cleanup();
|
|
1110
|
+
}
|
|
1111
|
+
/** Precedence: prompt-level > role-level > agent-level default. */
|
|
1112
|
+
resolveModelForCall(promptModel, roleName) {
|
|
1113
|
+
let model = this.config.model;
|
|
1114
|
+
if (roleName && this.config.roles[roleName]?.model && this.config.resolveModel) model = this.config.resolveModel(this.config.roles[roleName].model);
|
|
1115
|
+
if (promptModel && this.config.resolveModel) model = this.config.resolveModel(promptModel);
|
|
1116
|
+
this.agent.state.model = model;
|
|
1117
|
+
}
|
|
1118
|
+
injectRoleInstructions(text, roleName) {
|
|
1119
|
+
if (!roleName) return text;
|
|
1120
|
+
const role = this.config.roles[roleName];
|
|
1121
|
+
if (!role) return text;
|
|
1122
|
+
return `<role>\n${role.instructions}\n</role>\n\n${text}`;
|
|
1123
|
+
}
|
|
1124
|
+
assertCommandSupport(commands) {
|
|
1125
|
+
if (commands.length === 0) return;
|
|
1126
|
+
if (!this.env.commandSupport) throw new Error("[flue] Cannot use commands: this environment does not support command registration. Commands are only available in isolate sandbox mode. Remote sandboxes handle command execution at the platform level.");
|
|
1127
|
+
}
|
|
1128
|
+
registerCommands(commands) {
|
|
1129
|
+
if (!this.env.commandSupport || commands.length === 0) return [];
|
|
1130
|
+
const names = [];
|
|
1131
|
+
for (const cmd of commands) {
|
|
1132
|
+
this.env.commandSupport.register(cmd);
|
|
1133
|
+
names.push(cmd.name);
|
|
1134
|
+
}
|
|
1135
|
+
return names;
|
|
1136
|
+
}
|
|
1137
|
+
unregisterCommands(names) {
|
|
1138
|
+
if (!this.env.commandSupport || names.length === 0) return;
|
|
1139
|
+
for (const name of names) this.env.commandSupport.unregister(name);
|
|
1140
|
+
}
|
|
1141
|
+
registerCustomTools(tools) {
|
|
1142
|
+
const names = [];
|
|
1143
|
+
for (const toolDef of tools) {
|
|
1144
|
+
if (BUILTIN_TOOL_NAMES.has(toolDef.name)) throw new Error(`[flue] Custom tool "${toolDef.name}" conflicts with a built-in tool. Built-in tools: ${[...BUILTIN_TOOL_NAMES].join(", ")}`);
|
|
1145
|
+
if (names.includes(toolDef.name)) throw new Error(`[flue] Duplicate custom tool name "${toolDef.name}". Tool names must be unique.`);
|
|
1146
|
+
names.push(toolDef.name);
|
|
1147
|
+
}
|
|
1148
|
+
const agentTools = tools.map((toolDef) => ({
|
|
1149
|
+
name: toolDef.name,
|
|
1150
|
+
label: toolDef.name,
|
|
1151
|
+
description: toolDef.description,
|
|
1152
|
+
parameters: toolDef.parameters,
|
|
1153
|
+
async execute(_toolCallId, params, signal) {
|
|
1154
|
+
if (signal?.aborted) throw new Error("Operation aborted");
|
|
1155
|
+
return {
|
|
1156
|
+
content: [{
|
|
1157
|
+
type: "text",
|
|
1158
|
+
text: await toolDef.execute(params)
|
|
1159
|
+
}],
|
|
1160
|
+
details: { customTool: toolDef.name }
|
|
1161
|
+
};
|
|
1162
|
+
}
|
|
1163
|
+
}));
|
|
1164
|
+
this.agent.state.tools = [...this.agent.state.tools, ...agentTools];
|
|
1165
|
+
return names;
|
|
1166
|
+
}
|
|
1167
|
+
unregisterCustomTools() {
|
|
1168
|
+
this.agent.state.tools = [...this.builtinTools];
|
|
1169
|
+
}
|
|
1170
|
+
async save() {
|
|
1171
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
1172
|
+
const data = {
|
|
1173
|
+
messages: this.agent.state.messages,
|
|
1174
|
+
metadata: this.metadata,
|
|
1175
|
+
createdAt: this.createdAt ?? now,
|
|
1176
|
+
updatedAt: now,
|
|
1177
|
+
lastCompaction: this.lastCompaction
|
|
1178
|
+
};
|
|
1179
|
+
if (!this.createdAt) this.createdAt = now;
|
|
1180
|
+
await this.store.save(this.id, data);
|
|
1181
|
+
}
|
|
1182
|
+
async checkCompaction(assistantMessage) {
|
|
1183
|
+
if (!this.compactionSettings.enabled) return;
|
|
1184
|
+
if (assistantMessage.stopReason === "aborted") return;
|
|
1185
|
+
const contextWindow = this.agent.state.model.contextWindow ?? 0;
|
|
1186
|
+
if (isContextOverflow(assistantMessage, contextWindow)) {
|
|
1187
|
+
if (this.overflowRecoveryAttempted) return;
|
|
1188
|
+
this.overflowRecoveryAttempted = true;
|
|
1189
|
+
console.error(`[flue:compaction] Overflow detected, compacting and retrying...`);
|
|
1190
|
+
const messages = this.agent.state.messages;
|
|
1191
|
+
const lastMsg = messages[messages.length - 1];
|
|
1192
|
+
if (lastMsg && lastMsg.role === "assistant") this.agent.state.messages = messages.slice(0, -1);
|
|
1193
|
+
await this.runCompaction("overflow", true);
|
|
1194
|
+
return;
|
|
1195
|
+
}
|
|
1196
|
+
let contextTokens;
|
|
1197
|
+
if (assistantMessage.stopReason === "error") {
|
|
1198
|
+
const estimate = estimateContextTokens(this.agent.state.messages);
|
|
1199
|
+
if (estimate.lastUsageIndex === null) return;
|
|
1200
|
+
contextTokens = estimate.tokens;
|
|
1201
|
+
} else contextTokens = calculateContextTokens(assistantMessage.usage);
|
|
1202
|
+
if (shouldCompact(contextTokens, contextWindow, this.compactionSettings)) {
|
|
1203
|
+
console.error(`[flue:compaction] Threshold reached — ${contextTokens} tokens used, window ${contextWindow}, reserve ${this.compactionSettings.reserveTokens}, triggering compaction`);
|
|
1204
|
+
await this.runCompaction("threshold", false);
|
|
1205
|
+
}
|
|
1206
|
+
}
|
|
1207
|
+
async runCompaction(reason, willRetry) {
|
|
1208
|
+
this.compactionAbortController = new AbortController();
|
|
1209
|
+
const messagesBefore = this.agent.state.messages.length;
|
|
1210
|
+
try {
|
|
1211
|
+
const model = this.agent.state.model;
|
|
1212
|
+
const messages = this.agent.state.messages;
|
|
1213
|
+
const preparation = prepareCompaction(messages, this.compactionSettings, this.lastCompaction);
|
|
1214
|
+
if (!preparation) {
|
|
1215
|
+
console.error(`[flue:compaction] Nothing to compact (no valid cut point found)`);
|
|
1216
|
+
return;
|
|
1217
|
+
}
|
|
1218
|
+
console.error(`[flue:compaction] Summarizing ${preparation.messagesToSummarize.length} messages` + (preparation.isSplitTurn ? ` (split turn: ${preparation.turnPrefixMessages.length} prefix messages)` : "") + `, keeping messages from index ${preparation.firstKeptIndex}`);
|
|
1219
|
+
const estimatedTokens = preparation.tokensBefore;
|
|
1220
|
+
this.eventCallback?.({
|
|
1221
|
+
type: "compaction_start",
|
|
1222
|
+
reason,
|
|
1223
|
+
estimatedTokens
|
|
1224
|
+
});
|
|
1225
|
+
const result = await compact(preparation, model, void 0, this.compactionAbortController.signal);
|
|
1226
|
+
if (this.compactionAbortController.signal.aborted) return;
|
|
1227
|
+
const newMessages = buildCompactedMessages(messages, result);
|
|
1228
|
+
this.agent.state.messages = newMessages;
|
|
1229
|
+
const messagesAfter = newMessages.length;
|
|
1230
|
+
console.error(`[flue:compaction] Complete — messages: ${messagesBefore} → ${messagesAfter}, tokens before: ${result.tokensBefore}`);
|
|
1231
|
+
this.eventCallback?.({
|
|
1232
|
+
type: "compaction_end",
|
|
1233
|
+
messagesBefore,
|
|
1234
|
+
messagesAfter
|
|
1235
|
+
});
|
|
1236
|
+
this.lastCompaction = {
|
|
1237
|
+
summary: result.summary,
|
|
1238
|
+
firstKeptIndex: 1,
|
|
1239
|
+
details: result.details
|
|
1240
|
+
};
|
|
1241
|
+
await this.save();
|
|
1242
|
+
if (willRetry) {
|
|
1243
|
+
const msgs = this.agent.state.messages;
|
|
1244
|
+
const lastMsg = msgs[msgs.length - 1];
|
|
1245
|
+
if (lastMsg?.role === "assistant" && lastMsg.stopReason === "error") this.agent.state.messages = msgs.slice(0, -1);
|
|
1246
|
+
console.error(`[flue:compaction] Retrying after overflow recovery...`);
|
|
1247
|
+
await this.agent.continue();
|
|
1248
|
+
}
|
|
1249
|
+
} catch (error) {
|
|
1250
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1251
|
+
console.error(`[flue:compaction] Failed: ${errorMessage}`);
|
|
1252
|
+
} finally {
|
|
1253
|
+
this.compactionAbortController = void 0;
|
|
1254
|
+
}
|
|
1255
|
+
}
|
|
1256
|
+
throwIfError(context) {
|
|
1257
|
+
const errorMsg = this.agent.state.errorMessage;
|
|
1258
|
+
if (errorMsg) throw new Error(`[flue] ${context} failed: ${errorMsg}`);
|
|
1259
|
+
}
|
|
1260
|
+
getAssistantText() {
|
|
1261
|
+
const messages = this.agent.state.messages;
|
|
1262
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
1263
|
+
const msg = messages[i];
|
|
1264
|
+
if (msg.role !== "assistant") continue;
|
|
1265
|
+
const content = msg.content;
|
|
1266
|
+
if (!Array.isArray(content)) continue;
|
|
1267
|
+
const textParts = [];
|
|
1268
|
+
for (const block of content) if (block.type === "text") textParts.push(block.text);
|
|
1269
|
+
return textParts.join("\n");
|
|
1270
|
+
}
|
|
1271
|
+
return "";
|
|
1272
|
+
}
|
|
1273
|
+
async extractResultWithRetry(schema) {
|
|
1274
|
+
const text = this.getAssistantText();
|
|
1275
|
+
try {
|
|
1276
|
+
return extractResult(text, schema);
|
|
1277
|
+
} catch (err) {
|
|
1278
|
+
if (!(err instanceof ResultExtractionError)) throw err;
|
|
1279
|
+
if (!err.message.includes("RESULT_START")) throw err;
|
|
1280
|
+
const followUpPrompt = buildResultExtractionPrompt(schema);
|
|
1281
|
+
await this.agent.prompt(followUpPrompt);
|
|
1282
|
+
await this.agent.waitForIdle();
|
|
1283
|
+
await this.save();
|
|
1284
|
+
return extractResult(this.getAssistantText(), schema);
|
|
1285
|
+
}
|
|
1286
|
+
}
|
|
1287
|
+
};
|
|
1288
|
+
function normalizePath(p) {
|
|
1289
|
+
const parts = p.split("/");
|
|
1290
|
+
const result = [];
|
|
1291
|
+
for (const part of parts) {
|
|
1292
|
+
if (part === "." || part === "") continue;
|
|
1293
|
+
if (part === "..") result.pop();
|
|
1294
|
+
else result.push(part);
|
|
1295
|
+
}
|
|
1296
|
+
return "/" + result.join("/");
|
|
1297
|
+
}
|
|
1298
|
+
|
|
1299
|
+
//#endregion
|
|
1300
|
+
export { createTools as a, BUILTIN_TOOL_NAMES as i, Session as n, discoverSessionContext as o, normalizePath as r, parseFrontmatterFile as s, InMemorySessionStore as t };
|