@flue/sdk 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,943 +0,0 @@
1
- import { i as loadSkillByPath, n as createTools, r as discoverSessionContext, t as BUILTIN_TOOL_NAMES } from "./agent-BYG0nVbQ.mjs";
2
- import { completeSimple, isContextOverflow } from "@mariozechner/pi-ai";
3
- import { Agent } from "@mariozechner/pi-agent-core";
4
- import { toJsonSchema } from "@valibot/to-json-schema";
5
- import * as v from "valibot";
6
-
7
- //#region src/compaction.ts
8
- const DEFAULT_COMPACTION_SETTINGS = {
9
- enabled: true,
10
- reserveTokens: 16384,
11
- keepRecentTokens: 2e4
12
- };
13
- function calculateContextTokens(usage) {
14
- return usage.totalTokens || usage.input + usage.output + usage.cacheRead + usage.cacheWrite;
15
- }
16
- function getAssistantUsage(msg) {
17
- if (msg.role === "assistant" && "usage" in msg) {
18
- const assistantMsg = msg;
19
- if (assistantMsg.stopReason !== "aborted" && assistantMsg.stopReason !== "error" && assistantMsg.usage) return assistantMsg.usage;
20
- }
21
- }
22
- function getLastAssistantUsageInfo(messages) {
23
- for (let i = messages.length - 1; i >= 0; i--) {
24
- const msg = messages[i];
25
- const usage = getAssistantUsage(msg);
26
- if (usage) return {
27
- usage,
28
- index: i
29
- };
30
- }
31
- }
32
- /** chars/4 heuristic. Conservative (overestimates). */
33
- function estimateTokens(message) {
34
- let chars = 0;
35
- switch (message.role) {
36
- case "user": {
37
- const { content } = message;
38
- if (typeof content === "string") chars = content.length;
39
- else if (Array.isArray(content)) {
40
- for (const block of content) if (block.type === "text") chars += block.text.length;
41
- }
42
- return Math.ceil(chars / 4);
43
- }
44
- case "assistant": {
45
- const { content } = message;
46
- for (const block of content) if (block.type === "text") chars += block.text.length;
47
- else if (block.type === "thinking") chars += block.thinking.length;
48
- else if (block.type === "toolCall") chars += block.name.length + JSON.stringify(block.arguments).length;
49
- return Math.ceil(chars / 4);
50
- }
51
- case "toolResult": {
52
- const { content } = message;
53
- for (const block of content) if (block.type === "text") chars += block.text.length;
54
- else if (block.type === "image") chars += 4800;
55
- return Math.ceil(chars / 4);
56
- }
57
- }
58
- return 0;
59
- }
60
- function estimateContextTokens(messages) {
61
- const usageInfo = getLastAssistantUsageInfo(messages);
62
- if (!usageInfo) {
63
- let estimated = 0;
64
- for (const message of messages) estimated += estimateTokens(message);
65
- return {
66
- tokens: estimated,
67
- usageTokens: 0,
68
- trailingTokens: estimated,
69
- lastUsageIndex: null
70
- };
71
- }
72
- const usageTokens = calculateContextTokens(usageInfo.usage);
73
- let trailingTokens = 0;
74
- for (let i = usageInfo.index + 1; i < messages.length; i++) trailingTokens += estimateTokens(messages[i]);
75
- return {
76
- tokens: usageTokens + trailingTokens,
77
- usageTokens,
78
- trailingTokens,
79
- lastUsageIndex: usageInfo.index
80
- };
81
- }
82
- function shouldCompact(contextTokens, contextWindow, settings) {
83
- if (!settings.enabled) return false;
84
- return contextTokens > contextWindow - settings.reserveTokens;
85
- }
86
- function createFileOps() {
87
- return {
88
- read: /* @__PURE__ */ new Set(),
89
- written: /* @__PURE__ */ new Set(),
90
- edited: /* @__PURE__ */ new Set()
91
- };
92
- }
93
- function extractFileOpsFromMessage(message, fileOps) {
94
- if (message.role !== "assistant") return;
95
- const assistant = message;
96
- if (!Array.isArray(assistant.content)) return;
97
- for (const block of assistant.content) {
98
- if (block.type !== "toolCall") continue;
99
- const args = block.arguments;
100
- if (!args) continue;
101
- const path = typeof args.path === "string" ? args.path : void 0;
102
- if (!path) continue;
103
- switch (block.name) {
104
- case "read":
105
- fileOps.read.add(path);
106
- break;
107
- case "write":
108
- fileOps.written.add(path);
109
- break;
110
- case "edit":
111
- fileOps.edited.add(path);
112
- break;
113
- }
114
- }
115
- }
116
- function computeFileLists(fileOps) {
117
- const modified = new Set([...fileOps.edited, ...fileOps.written]);
118
- return {
119
- readFiles: [...fileOps.read].filter((f) => !modified.has(f)).sort(),
120
- modifiedFiles: [...modified].sort()
121
- };
122
- }
123
- function formatFileOperations(readFiles, modifiedFiles) {
124
- const sections = [];
125
- if (readFiles.length > 0) sections.push(`<read-files>\n${readFiles.join("\n")}\n</read-files>`);
126
- if (modifiedFiles.length > 0) sections.push(`<modified-files>\n${modifiedFiles.join("\n")}\n</modified-files>`);
127
- if (sections.length === 0) return "";
128
- return `\n\n${sections.join("\n\n")}`;
129
- }
130
- const TOOL_RESULT_MAX_CHARS = 2e3;
131
- function truncateForSummary(text, maxChars) {
132
- if (text.length <= maxChars) return text;
133
- const truncatedChars = text.length - maxChars;
134
- return `${text.slice(0, maxChars)}\n\n[... ${truncatedChars} more characters truncated]`;
135
- }
136
- /** Serialize messages to text so the summarization model doesn't treat it as a conversation to continue. */
137
- function serializeConversation(messages) {
138
- const parts = [];
139
- for (const msg of messages) if (msg.role === "user") {
140
- const { content } = msg;
141
- const text = typeof content === "string" ? content : content.filter((c) => c.type === "text").map((c) => c.text).join("");
142
- if (text) parts.push(`[User]: ${text}`);
143
- } else if (msg.role === "assistant") {
144
- const { content } = msg;
145
- const textParts = [];
146
- const thinkingParts = [];
147
- const toolCalls = [];
148
- for (const block of content) if (block.type === "text") textParts.push(block.text);
149
- else if (block.type === "thinking") thinkingParts.push(block.thinking);
150
- else if (block.type === "toolCall") {
151
- const argsStr = Object.entries(block.arguments).map(([k, v]) => `${k}=${JSON.stringify(v)}`).join(", ");
152
- toolCalls.push(`${block.name}(${argsStr})`);
153
- }
154
- if (thinkingParts.length > 0) parts.push(`[Assistant thinking]: ${thinkingParts.join("\n")}`);
155
- if (textParts.length > 0) parts.push(`[Assistant]: ${textParts.join("\n")}`);
156
- if (toolCalls.length > 0) parts.push(`[Assistant tool calls]: ${toolCalls.join("; ")}`);
157
- } else if (msg.role === "toolResult") {
158
- const { content } = msg;
159
- const text = content.filter((c) => c.type === "text").map((c) => c.text).join("");
160
- if (text) parts.push(`[Tool result]: ${truncateForSummary(text, TOOL_RESULT_MAX_CHARS)}`);
161
- }
162
- return parts.join("\n\n");
163
- }
164
- const SUMMARIZATION_SYSTEM_PROMPT = "You are a context summarization assistant. Your task is to read a conversation between a user and an AI coding assistant, then produce a structured summary following the exact format specified.\n\nDo NOT continue the conversation. Do NOT respond to any questions in the conversation. ONLY output the structured summary.";
165
- const SUMMARIZATION_PROMPT = `The messages above are a conversation to summarize. Create a structured context checkpoint summary that another LLM will use to continue the work.
166
-
167
- Use this EXACT format:
168
-
169
- ## Goal
170
- [What is the user trying to accomplish? Can be multiple items if the session covers different tasks.]
171
-
172
- ## Constraints & Preferences
173
- - [Any constraints, preferences, or requirements mentioned by user]
174
- - [Or "(none)" if none were mentioned]
175
-
176
- ## Progress
177
- ### Done
178
- - [x] [Completed tasks/changes]
179
-
180
- ### In Progress
181
- - [ ] [Current work]
182
-
183
- ### Blocked
184
- - [Issues preventing progress, if any]
185
-
186
- ## Key Decisions
187
- - **[Decision]**: [Brief rationale]
188
-
189
- ## Next Steps
190
- 1. [Ordered list of what should happen next]
191
-
192
- ## Critical Context
193
- - [Any data, examples, or references needed to continue]
194
- - [Or "(none)" if not applicable]
195
-
196
- Keep each section concise. Preserve exact file paths, function names, and error messages.`;
197
- const UPDATE_SUMMARIZATION_PROMPT = `The messages above are NEW conversation messages to incorporate into the existing summary provided in <previous-summary> tags.
198
-
199
- Update the existing structured summary with new information. RULES:
200
- - PRESERVE all existing information from the previous summary
201
- - ADD new progress, decisions, and context from the new messages
202
- - UPDATE the Progress section: move items from "In Progress" to "Done" when completed
203
- - UPDATE "Next Steps" based on what was accomplished
204
- - PRESERVE exact file paths, function names, and error messages
205
- - If something is no longer relevant, you may remove it
206
-
207
- Use this EXACT format:
208
-
209
- ## Goal
210
- [Preserve existing goals, add new ones if the task expanded]
211
-
212
- ## Constraints & Preferences
213
- - [Preserve existing, add new ones discovered]
214
-
215
- ## Progress
216
- ### Done
217
- - [x] [Include previously done items AND newly completed items]
218
-
219
- ### In Progress
220
- - [ ] [Current work - update based on progress]
221
-
222
- ### Blocked
223
- - [Current blockers - remove if resolved]
224
-
225
- ## Key Decisions
226
- - **[Decision]**: [Brief rationale] (preserve all previous, add new)
227
-
228
- ## Next Steps
229
- 1. [Update based on current state]
230
-
231
- ## Critical Context
232
- - [Preserve important context, add new if needed]
233
-
234
- Keep each section concise. Preserve exact file paths, function names, and error messages.`;
235
- const TURN_PREFIX_SUMMARIZATION_PROMPT = `This is the PREFIX of a turn that was too large to keep. The SUFFIX (recent work) is retained.
236
-
237
- Summarize the prefix to provide context for the retained suffix:
238
-
239
- ## Original Request
240
- [What did the user ask for in this turn?]
241
-
242
- ## Early Progress
243
- - [Key decisions and work done in the prefix]
244
-
245
- ## Context for Suffix
246
- - [Information needed to understand the retained recent work]
247
-
248
- Be concise. Focus on what's needed to understand the kept suffix.`;
249
- /** Valid cut points: user or assistant messages. Never cut at toolResult. */
250
- function findValidCutPoints(messages, start, end) {
251
- const cutPoints = [];
252
- for (let i = start; i < end; i++) {
253
- const role = messages[i].role;
254
- if (role === "user" || role === "assistant") cutPoints.push(i);
255
- }
256
- return cutPoints;
257
- }
258
- function findTurnStartIndex(messages, index, start) {
259
- for (let i = index; i >= start; i--) if (messages[i].role === "user") return i;
260
- return -1;
261
- }
262
- function findCutPoint(messages, start, end, keepRecentTokens) {
263
- const cutPoints = findValidCutPoints(messages, start, end);
264
- if (cutPoints.length === 0) return {
265
- firstKeptIndex: start,
266
- turnStartIndex: -1,
267
- isSplitTurn: false
268
- };
269
- let accumulatedTokens = 0;
270
- let cutIndex = cutPoints[0];
271
- for (let i = end - 1; i >= start; i--) {
272
- const messageTokens = estimateTokens(messages[i]);
273
- accumulatedTokens += messageTokens;
274
- if (accumulatedTokens >= keepRecentTokens) {
275
- for (let c = 0; c < cutPoints.length; c++) if (cutPoints[c] >= i) {
276
- cutIndex = cutPoints[c];
277
- break;
278
- }
279
- break;
280
- }
281
- }
282
- const isUserMessage = messages[cutIndex].role === "user";
283
- const turnStartIndex = isUserMessage ? -1 : findTurnStartIndex(messages, cutIndex, start);
284
- return {
285
- firstKeptIndex: cutIndex,
286
- turnStartIndex,
287
- isSplitTurn: !isUserMessage && turnStartIndex !== -1
288
- };
289
- }
290
- /** Pure function — no I/O. Finds cut point, extracts messages to summarize, tracks file ops. */
291
- function prepareCompaction(messages, settings, previousCompaction) {
292
- if (messages.length === 0) return void 0;
293
- const boundaryStart = previousCompaction ? previousCompaction.firstKeptIndex : 0;
294
- const boundaryEnd = messages.length;
295
- const tokensBefore = estimateContextTokens(messages).tokens;
296
- const cutPoint = findCutPoint(messages, boundaryStart, boundaryEnd, settings.keepRecentTokens);
297
- if (cutPoint.firstKeptIndex <= boundaryStart) return void 0;
298
- const historyEnd = cutPoint.isSplitTurn ? cutPoint.turnStartIndex : cutPoint.firstKeptIndex;
299
- const messagesToSummarize = messages.slice(boundaryStart, historyEnd);
300
- const turnPrefixMessages = cutPoint.isSplitTurn ? messages.slice(cutPoint.turnStartIndex, cutPoint.firstKeptIndex) : [];
301
- const fileOps = createFileOps();
302
- if (previousCompaction?.details) {
303
- for (const f of previousCompaction.details.readFiles ?? []) fileOps.read.add(f);
304
- for (const f of previousCompaction.details.modifiedFiles ?? []) fileOps.edited.add(f);
305
- }
306
- for (const msg of messagesToSummarize) extractFileOpsFromMessage(msg, fileOps);
307
- for (const msg of turnPrefixMessages) extractFileOpsFromMessage(msg, fileOps);
308
- return {
309
- firstKeptIndex: cutPoint.firstKeptIndex,
310
- messagesToSummarize,
311
- turnPrefixMessages,
312
- isSplitTurn: cutPoint.isSplitTurn,
313
- tokensBefore,
314
- previousSummary: previousCompaction?.summary,
315
- fileOps,
316
- settings
317
- };
318
- }
319
- async function generateSummary(currentMessages, model, reserveTokens, apiKey, signal, previousSummary) {
320
- const maxTokens = Math.min(Math.floor(.8 * reserveTokens), 16e3);
321
- const basePrompt = previousSummary ? UPDATE_SUMMARIZATION_PROMPT : SUMMARIZATION_PROMPT;
322
- let promptText = `<conversation>\n${serializeConversation(currentMessages)}\n</conversation>\n\n`;
323
- if (previousSummary) promptText += `<previous-summary>\n${previousSummary}\n</previous-summary>\n\n`;
324
- promptText += basePrompt;
325
- const summarizationMessages = [{
326
- role: "user",
327
- content: [{
328
- type: "text",
329
- text: promptText
330
- }],
331
- timestamp: Date.now()
332
- }];
333
- const completionOptions = {
334
- maxTokens,
335
- signal
336
- };
337
- if (apiKey) completionOptions.apiKey = apiKey;
338
- if (model.reasoning) completionOptions.reasoning = "high";
339
- const response = await completeSimple(model, {
340
- systemPrompt: SUMMARIZATION_SYSTEM_PROMPT,
341
- messages: summarizationMessages
342
- }, completionOptions);
343
- if (response.stopReason === "error") throw new Error(`Summarization failed: ${response.errorMessage || "Unknown error"}`);
344
- return response.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
345
- }
346
- async function generateTurnPrefixSummary(messages, model, reserveTokens, apiKey, signal) {
347
- const maxTokens = Math.min(Math.floor(.5 * reserveTokens), 16e3);
348
- const summarizationMessages = [{
349
- role: "user",
350
- content: [{
351
- type: "text",
352
- text: `<conversation>\n${serializeConversation(messages)}\n</conversation>\n\n${TURN_PREFIX_SUMMARIZATION_PROMPT}`
353
- }],
354
- timestamp: Date.now()
355
- }];
356
- const completionOptions = {
357
- maxTokens,
358
- signal
359
- };
360
- if (apiKey) completionOptions.apiKey = apiKey;
361
- const response = await completeSimple(model, {
362
- systemPrompt: SUMMARIZATION_SYSTEM_PROMPT,
363
- messages: summarizationMessages
364
- }, completionOptions);
365
- if (response.stopReason === "error") throw new Error(`Turn prefix summarization failed: ${response.errorMessage || "Unknown error"}`);
366
- return response.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
367
- }
368
- async function compact(preparation, model, apiKey, signal) {
369
- const { firstKeptIndex, messagesToSummarize, turnPrefixMessages, isSplitTurn, tokensBefore, previousSummary, fileOps, settings } = preparation;
370
- let summary;
371
- if (isSplitTurn && turnPrefixMessages.length > 0) {
372
- const [historyResult, turnPrefixResult] = await Promise.all([messagesToSummarize.length > 0 ? generateSummary(messagesToSummarize, model, settings.reserveTokens, apiKey, signal, previousSummary) : Promise.resolve("No prior history."), generateTurnPrefixSummary(turnPrefixMessages, model, settings.reserveTokens, apiKey, signal)]);
373
- summary = `${historyResult}\n\n---\n\n**Turn Context (split turn):**\n\n${turnPrefixResult}`;
374
- } else summary = await generateSummary(messagesToSummarize, model, settings.reserveTokens, apiKey, signal, previousSummary);
375
- const { readFiles, modifiedFiles } = computeFileLists(fileOps);
376
- summary += formatFileOperations(readFiles, modifiedFiles);
377
- return {
378
- summary,
379
- firstKeptIndex,
380
- tokensBefore,
381
- details: {
382
- readFiles,
383
- modifiedFiles
384
- }
385
- };
386
- }
387
- function buildCompactedMessages(messages, result) {
388
- return [{
389
- role: "user",
390
- content: [{
391
- type: "text",
392
- text: `[Context Summary]\n\n${result.summary}`
393
- }],
394
- timestamp: Date.now()
395
- }, ...messages.slice(result.firstKeptIndex)];
396
- }
397
-
398
- //#endregion
399
- //#region src/result.ts
400
- const HEADLESS_PREAMBLE = "You are running in headless mode with no human operator. Work autonomously — never ask questions, never wait for user input. Make your best judgment and proceed independently.";
401
- function buildResultInstructions(schema) {
402
- const { $schema: _, ...schemaWithoutMeta } = toJsonSchema(schema, { errorMode: "ignore" });
403
- return [
404
- "",
405
- "```json",
406
- JSON.stringify(schemaWithoutMeta, null, 2),
407
- "```",
408
- "",
409
- "Example: (Object)",
410
- "---RESULT_START---",
411
- "{\"key\": \"value\"}",
412
- "---RESULT_END---",
413
- "",
414
- "Example: (String)",
415
- "---RESULT_START---",
416
- "Hello, world!",
417
- "---RESULT_END---"
418
- ].join("\n");
419
- }
420
- /** Follow-up prompt used when the LLM forgets to include RESULT_START/RESULT_END delimiters. */
421
- function buildResultExtractionPrompt(schema) {
422
- return [
423
- "Your task is complete. Now respond with ONLY your final result.",
424
- "No explanation, no preamble — just the result in the following format, conforming to this schema:",
425
- buildResultInstructions(schema)
426
- ].join("\n");
427
- }
428
- function buildSkillPrompt(skillInstructions, args, schema) {
429
- const parts = [
430
- HEADLESS_PREAMBLE,
431
- "",
432
- skillInstructions
433
- ];
434
- if (args && Object.keys(args).length > 0) parts.push(`\nArguments:\n${JSON.stringify(args, null, 2)}`);
435
- if (schema) {
436
- parts.push("When complete, you MUST output your result between these exact delimiters conforming to this schema:");
437
- parts.push(buildResultInstructions(schema));
438
- }
439
- return parts.join("\n");
440
- }
441
- function buildPromptText(text, schema) {
442
- const parts = [
443
- HEADLESS_PREAMBLE,
444
- "",
445
- text
446
- ];
447
- if (schema) {
448
- parts.push("When complete, you MUST output your result between these exact delimiters conforming to this schema:");
449
- parts.push(buildResultInstructions(schema));
450
- }
451
- return parts.join("\n");
452
- }
453
- /** Extract the last ---RESULT_START---/---RESULT_END--- block from agent text and validate against schema. */
454
- function extractResult(text, schema) {
455
- const resultBlock = extractLastResultBlock(text);
456
- if (resultBlock === null) throw new ResultExtractionError("No ---RESULT_START--- / ---RESULT_END--- block found in the assistant response.", text);
457
- let result = resultBlock;
458
- if (schema.type === "object" || schema.type === "array") try {
459
- result = JSON.parse(resultBlock);
460
- } catch {
461
- throw new ResultExtractionError("Result block contains invalid JSON for the expected schema.", resultBlock);
462
- }
463
- const parsed = v.safeParse(schema, result);
464
- if (!parsed.success) throw new ResultExtractionError(`Result does not match the expected schema: ${parsed.issues.map((i) => i.message).join(", ")}`, resultBlock);
465
- return parsed.output;
466
- }
467
- function extractLastResultBlock(text) {
468
- const matches = text.matchAll(/---RESULT_START---\s*\n([\s\S]*?)---RESULT_END---/g);
469
- let lastMatch = null;
470
- for (const match of matches) lastMatch = match[1]?.trim() ?? null;
471
- return lastMatch;
472
- }
473
- var ResultExtractionError = class extends Error {
474
- constructor(message, rawOutput) {
475
- super(message);
476
- this.rawOutput = rawOutput;
477
- this.name = "ResultExtractionError";
478
- }
479
- };
480
-
481
- //#endregion
482
- //#region src/session.ts
483
- /** Internal session implementation. Not exported publicly — wrapped by FlueSession. */
484
- /** In-memory session store. Sessions persist for the lifetime of the process. */
485
- var InMemorySessionStore = class {
486
- store = /* @__PURE__ */ new Map();
487
- async save(id, data) {
488
- this.store.set(id, data);
489
- }
490
- async load(id) {
491
- return this.store.get(id) ?? null;
492
- }
493
- async delete(id) {
494
- this.store.delete(id);
495
- }
496
- };
497
- var Session = class Session {
498
- id;
499
- metadata;
500
- agent;
501
- config;
502
- env;
503
- store;
504
- createdAt;
505
- compactionSettings;
506
- lastCompaction;
507
- overflowRecoveryAttempted = false;
508
- compactionAbortController;
509
- eventCallback;
510
- builtinTools;
511
- sessionCommands;
512
- constructor(id, config, env, store, existingData, onAgentEvent, sessionCommands) {
513
- this.id = id;
514
- this.config = config;
515
- this.env = env;
516
- this.store = store;
517
- this.sessionCommands = sessionCommands ?? [];
518
- this.metadata = existingData?.metadata ?? {};
519
- this.createdAt = existingData?.createdAt;
520
- this.lastCompaction = existingData?.lastCompaction;
521
- const cc = config.compaction;
522
- this.compactionSettings = {
523
- enabled: cc?.enabled ?? DEFAULT_COMPACTION_SETTINGS.enabled,
524
- reserveTokens: cc?.reserveTokens ?? DEFAULT_COMPACTION_SETTINGS.reserveTokens,
525
- keepRecentTokens: cc?.keepRecentTokens ?? DEFAULT_COMPACTION_SETTINGS.keepRecentTokens
526
- };
527
- const systemPrompt = config.systemPrompt;
528
- const tools = createTools(env);
529
- this.builtinTools = tools;
530
- const previousMessages = existingData?.messages ?? [];
531
- this.agent = new Agent({
532
- initialState: {
533
- systemPrompt,
534
- model: config.model,
535
- tools,
536
- messages: previousMessages
537
- },
538
- toolExecution: "parallel"
539
- });
540
- this.eventCallback = onAgentEvent;
541
- const emit = onAgentEvent;
542
- this.agent.subscribe(async (event) => {
543
- switch (event.type) {
544
- case "agent_start":
545
- emit?.({ type: "agent_start" });
546
- break;
547
- case "message_update": {
548
- const aEvent = event.assistantMessageEvent;
549
- if (aEvent.type === "text_delta") emit?.({
550
- type: "text_delta",
551
- text: aEvent.delta
552
- });
553
- break;
554
- }
555
- case "tool_execution_start":
556
- emit?.({
557
- type: "tool_start",
558
- toolName: event.toolName,
559
- toolCallId: event.toolCallId,
560
- args: event.args
561
- });
562
- break;
563
- case "tool_execution_end":
564
- emit?.({
565
- type: "tool_end",
566
- toolName: event.toolName,
567
- toolCallId: event.toolCallId,
568
- isError: event.isError,
569
- result: event.result
570
- });
571
- break;
572
- case "turn_end":
573
- emit?.({ type: "turn_end" });
574
- break;
575
- case "agent_end": {
576
- const messages = this.agent.state.messages;
577
- const lastMsg = messages[messages.length - 1];
578
- if (lastMsg?.role === "assistant") await this.checkCompaction(lastMsg);
579
- emit?.({ type: "done" });
580
- break;
581
- }
582
- }
583
- });
584
- }
585
- async prompt(text, options) {
586
- this.assertRoleExists(options?.role);
587
- this.resolveModelForCall(options?.model, options?.role);
588
- const promptWithRole = this.injectRoleInstructions(text, options?.role);
589
- const schema = options?.result;
590
- const fullPrompt = buildPromptText(promptWithRole, schema);
591
- const effectiveCommands = this.mergeCommands(options?.commands);
592
- if (effectiveCommands.length > 0) this.assertCommandSupport(effectiveCommands);
593
- const registeredCommandNames = this.registerCommands(effectiveCommands);
594
- const registeredToolNames = options?.tools ? this.registerCustomTools(options.tools) : [];
595
- try {
596
- await this.agent.prompt(fullPrompt);
597
- await this.agent.waitForIdle();
598
- this.throwIfError("prompt");
599
- await this.save();
600
- if (schema) return this.extractResultWithRetry(schema);
601
- return { text: this.getAssistantText() };
602
- } finally {
603
- this.unregisterCommands(registeredCommandNames);
604
- if (registeredToolNames.length > 0) this.unregisterCustomTools();
605
- }
606
- }
607
- async skill(name, options) {
608
- this.assertRoleExists(options?.role);
609
- let registeredSkill = this.config.skills[name];
610
- if (!registeredSkill && (name.includes("/") || /\.(md|markdown)$/i.test(name))) {
611
- const loaded = await loadSkillByPath(this.env, this.env.cwd, name);
612
- if (loaded) registeredSkill = loaded;
613
- }
614
- if (!registeredSkill) {
615
- const available = Object.keys(this.config.skills).join(", ") || "(none)";
616
- throw new Error(`Skill "${name}" not registered. Available: ${available}. Skills can also be referenced by relative path under .agents/skills/ (e.g. "triage/reproduce.md").`);
617
- }
618
- this.resolveModelForCall(options?.model, options?.role);
619
- const schema = options?.result;
620
- const skillPrompt = buildSkillPrompt(registeredSkill.instructions, options?.args, schema);
621
- const promptWithRole = this.injectRoleInstructions(skillPrompt, options?.role);
622
- const effectiveCommands = this.mergeCommands(options?.commands);
623
- if (effectiveCommands.length > 0) this.assertCommandSupport(effectiveCommands);
624
- const registeredCommandNames = this.registerCommands(effectiveCommands);
625
- const registeredToolNames = options?.tools ? this.registerCustomTools(options.tools) : [];
626
- try {
627
- await this.agent.prompt(promptWithRole);
628
- await this.agent.waitForIdle();
629
- this.throwIfError(`skill("${name}")`);
630
- await this.save();
631
- if (schema) return this.extractResultWithRetry(schema);
632
- return { text: this.getAssistantText() };
633
- } finally {
634
- this.unregisterCommands(registeredCommandNames);
635
- if (registeredToolNames.length > 0) this.unregisterCustomTools();
636
- }
637
- }
638
- async shell(command, options) {
639
- const effectiveCommands = this.mergeCommands(options?.commands);
640
- if (effectiveCommands.length > 0) this.assertCommandSupport(effectiveCommands);
641
- const registeredNames = this.registerCommands(effectiveCommands);
642
- try {
643
- const result = await this.env.exec(command, {
644
- env: options?.env,
645
- cwd: options?.cwd
646
- });
647
- return {
648
- stdout: result.stdout,
649
- stderr: result.stderr,
650
- exitCode: result.exitCode
651
- };
652
- } finally {
653
- this.unregisterCommands(registeredNames);
654
- }
655
- }
656
- async task(prompt, options) {
657
- this.assertRoleExists(options?.role);
658
- if (!options?.workspace) throw new Error("[flue] task() requires a workspace option.");
659
- const taskCwd = options.workspace.startsWith("/") ? options.workspace : normalizePath(this.env.cwd + "/" + options.workspace);
660
- function taskResolvePath(p) {
661
- if (p.startsWith("/")) return normalizePath(p);
662
- if (taskCwd === "/") return normalizePath("/" + p);
663
- return normalizePath(taskCwd + "/" + p);
664
- }
665
- const parentEnv = this.env;
666
- const taskEnv = {
667
- exec: (cmd, opts) => parentEnv.exec(cmd, {
668
- cwd: opts?.cwd ?? taskCwd,
669
- env: opts?.env
670
- }),
671
- readFile: (p) => parentEnv.readFile(taskResolvePath(p)),
672
- readFileBuffer: (p) => parentEnv.readFileBuffer(taskResolvePath(p)),
673
- writeFile: (p, c) => parentEnv.writeFile(taskResolvePath(p), c),
674
- stat: (p) => parentEnv.stat(taskResolvePath(p)),
675
- readdir: (p) => parentEnv.readdir(taskResolvePath(p)),
676
- exists: (p) => parentEnv.exists(taskResolvePath(p)),
677
- mkdir: (p, o) => parentEnv.mkdir(taskResolvePath(p), o),
678
- rm: (p, o) => parentEnv.rm(taskResolvePath(p), o),
679
- cwd: taskCwd,
680
- resolvePath: taskResolvePath,
681
- commandSupport: parentEnv.commandSupport,
682
- cleanup: async () => {}
683
- };
684
- const localContext = await discoverSessionContext(taskEnv);
685
- let taskModel = this.config.model;
686
- const taskRole = options?.role ? this.config.roles[options.role] : void 0;
687
- if (taskRole?.model && this.config.resolveModel) taskModel = this.config.resolveModel(taskRole.model);
688
- if (options?.model && this.config.resolveModel) taskModel = this.config.resolveModel(options.model);
689
- const taskConfig = {
690
- systemPrompt: localContext.systemPrompt,
691
- skills: localContext.skills,
692
- roles: this.config.roles,
693
- model: this.requireModel(taskModel, "this task() call"),
694
- resolveModel: this.config.resolveModel,
695
- compaction: this.config.compaction
696
- };
697
- this.eventCallback?.({
698
- type: "task_start",
699
- workspace: taskCwd
700
- });
701
- const taskStore = new InMemorySessionStore();
702
- const taskSession = new Session(`${this.id}:task:${Date.now()}`, taskConfig, taskEnv, taskStore, null, this.eventCallback);
703
- try {
704
- const promptOpts = { role: options?.role };
705
- if (options?.result) promptOpts.result = options.result;
706
- return await taskSession.prompt(prompt, promptOpts);
707
- } finally {
708
- this.eventCallback?.({ type: "task_end" });
709
- await taskSession.destroy();
710
- }
711
- }
712
- abort() {
713
- this.agent.abort();
714
- }
715
- async destroy() {
716
- this.agent.abort();
717
- await this.store.delete(this.id);
718
- await this.env.cleanup();
719
- }
720
- /** Precedence: prompt-level > role-level > agent-level default. */
721
- resolveModelForCall(promptModel, roleName) {
722
- let model = this.config.model;
723
- if (roleName && this.config.roles[roleName]?.model && this.config.resolveModel) model = this.config.resolveModel(this.config.roles[roleName].model);
724
- if (promptModel && this.config.resolveModel) model = this.config.resolveModel(promptModel);
725
- this.agent.state.model = this.requireModel(model, "this prompt()/skill()/task() call");
726
- }
727
- /**
728
- * Throws a clear, actionable error when no model is configured for a call.
729
- * Use with the resolved model (post-precedence) to guarantee we never hand
730
- * `undefined` to the underlying agent.
731
- */
732
- requireModel(model, callSite) {
733
- if (model) return model;
734
- throw new Error(`[flue] No model configured for ${callSite}. Pass \`{ model: "provider/model-id" }\` to \`init()\` for a session-wide default, or to this prompt()/skill()/task() call for a one-off override.`);
735
- }
736
- /**
737
- * Throws a clear error when a caller references a role that isn't registered.
738
- * Roles are loaded from `.flue/roles/` at build time. Called eagerly at the top
739
- * of prompt()/skill()/task() so typos surface before any LLM work begins.
740
- */
741
- assertRoleExists(roleName) {
742
- if (!roleName) return;
743
- if (this.config.roles[roleName]) return;
744
- const available = Object.keys(this.config.roles);
745
- const list = available.length > 0 ? available.join(", ") : "(none defined)";
746
- throw new Error(`[flue] Role "${roleName}" not registered. Available roles: ${list}. Define roles as markdown files under \`.flue/roles/\`.`);
747
- }
748
- injectRoleInstructions(text, roleName) {
749
- if (!roleName) return text;
750
- const role = this.config.roles[roleName];
751
- if (!role) return text;
752
- return `<role>\n${role.instructions}\n</role>\n\n${text}`;
753
- }
754
- assertCommandSupport(commands) {
755
- if (commands.length === 0) return;
756
- if (!this.env.commandSupport) throw new Error("[flue] Cannot use commands: this environment does not support command registration. Commands are only available in isolate sandbox mode. Remote sandboxes handle command execution at the platform level.");
757
- }
758
- /**
759
- * Merge session-wide `commands` (from init()) with per-call commands. When
760
- * both define a command with the same name, the per-call entry wins for
761
- * that call.
762
- */
763
- mergeCommands(perCall) {
764
- if (!perCall || perCall.length === 0) return this.sessionCommands;
765
- if (this.sessionCommands.length === 0) return perCall;
766
- const byName = /* @__PURE__ */ new Map();
767
- for (const cmd of this.sessionCommands) byName.set(cmd.name, cmd);
768
- for (const cmd of perCall) byName.set(cmd.name, cmd);
769
- return Array.from(byName.values());
770
- }
771
- registerCommands(commands) {
772
- if (!this.env.commandSupport || commands.length === 0) return [];
773
- const names = [];
774
- for (const cmd of commands) {
775
- this.env.commandSupport.register(cmd);
776
- names.push(cmd.name);
777
- }
778
- return names;
779
- }
780
- unregisterCommands(names) {
781
- if (!this.env.commandSupport || names.length === 0) return;
782
- for (const name of names) this.env.commandSupport.unregister(name);
783
- }
784
- registerCustomTools(tools) {
785
- const names = [];
786
- for (const toolDef of tools) {
787
- if (BUILTIN_TOOL_NAMES.has(toolDef.name)) throw new Error(`[flue] Custom tool "${toolDef.name}" conflicts with a built-in tool. Built-in tools: ${[...BUILTIN_TOOL_NAMES].join(", ")}`);
788
- if (names.includes(toolDef.name)) throw new Error(`[flue] Duplicate custom tool name "${toolDef.name}". Tool names must be unique.`);
789
- names.push(toolDef.name);
790
- }
791
- const agentTools = tools.map((toolDef) => ({
792
- name: toolDef.name,
793
- label: toolDef.name,
794
- description: toolDef.description,
795
- parameters: toolDef.parameters,
796
- async execute(_toolCallId, params, signal) {
797
- if (signal?.aborted) throw new Error("Operation aborted");
798
- return {
799
- content: [{
800
- type: "text",
801
- text: await toolDef.execute(params)
802
- }],
803
- details: { customTool: toolDef.name }
804
- };
805
- }
806
- }));
807
- this.agent.state.tools = [...this.agent.state.tools, ...agentTools];
808
- return names;
809
- }
810
- unregisterCustomTools() {
811
- this.agent.state.tools = [...this.builtinTools];
812
- }
813
- async save() {
814
- const now = (/* @__PURE__ */ new Date()).toISOString();
815
- const data = {
816
- messages: this.agent.state.messages,
817
- metadata: this.metadata,
818
- createdAt: this.createdAt ?? now,
819
- updatedAt: now,
820
- lastCompaction: this.lastCompaction
821
- };
822
- if (!this.createdAt) this.createdAt = now;
823
- await this.store.save(this.id, data);
824
- }
825
- async checkCompaction(assistantMessage) {
826
- if (!this.compactionSettings.enabled) return;
827
- if (assistantMessage.stopReason === "aborted") return;
828
- const contextWindow = this.agent.state.model.contextWindow ?? 0;
829
- if (isContextOverflow(assistantMessage, contextWindow)) {
830
- if (this.overflowRecoveryAttempted) return;
831
- this.overflowRecoveryAttempted = true;
832
- console.error(`[flue:compaction] Overflow detected, compacting and retrying...`);
833
- const messages = this.agent.state.messages;
834
- const lastMsg = messages[messages.length - 1];
835
- if (lastMsg && lastMsg.role === "assistant") this.agent.state.messages = messages.slice(0, -1);
836
- await this.runCompaction("overflow", true);
837
- return;
838
- }
839
- let contextTokens;
840
- if (assistantMessage.stopReason === "error") {
841
- const estimate = estimateContextTokens(this.agent.state.messages);
842
- if (estimate.lastUsageIndex === null) return;
843
- contextTokens = estimate.tokens;
844
- } else contextTokens = calculateContextTokens(assistantMessage.usage);
845
- if (shouldCompact(contextTokens, contextWindow, this.compactionSettings)) {
846
- console.error(`[flue:compaction] Threshold reached — ${contextTokens} tokens used, window ${contextWindow}, reserve ${this.compactionSettings.reserveTokens}, triggering compaction`);
847
- await this.runCompaction("threshold", false);
848
- }
849
- }
850
- async runCompaction(reason, willRetry) {
851
- this.compactionAbortController = new AbortController();
852
- const messagesBefore = this.agent.state.messages.length;
853
- try {
854
- const model = this.agent.state.model;
855
- const messages = this.agent.state.messages;
856
- const preparation = prepareCompaction(messages, this.compactionSettings, this.lastCompaction);
857
- if (!preparation) {
858
- console.error(`[flue:compaction] Nothing to compact (no valid cut point found)`);
859
- return;
860
- }
861
- console.error(`[flue:compaction] Summarizing ${preparation.messagesToSummarize.length} messages` + (preparation.isSplitTurn ? ` (split turn: ${preparation.turnPrefixMessages.length} prefix messages)` : "") + `, keeping messages from index ${preparation.firstKeptIndex}`);
862
- const estimatedTokens = preparation.tokensBefore;
863
- this.eventCallback?.({
864
- type: "compaction_start",
865
- reason,
866
- estimatedTokens
867
- });
868
- const result = await compact(preparation, model, void 0, this.compactionAbortController.signal);
869
- if (this.compactionAbortController.signal.aborted) return;
870
- const newMessages = buildCompactedMessages(messages, result);
871
- this.agent.state.messages = newMessages;
872
- const messagesAfter = newMessages.length;
873
- console.error(`[flue:compaction] Complete — messages: ${messagesBefore} → ${messagesAfter}, tokens before: ${result.tokensBefore}`);
874
- this.eventCallback?.({
875
- type: "compaction_end",
876
- messagesBefore,
877
- messagesAfter
878
- });
879
- this.lastCompaction = {
880
- summary: result.summary,
881
- firstKeptIndex: 1,
882
- details: result.details
883
- };
884
- await this.save();
885
- if (willRetry) {
886
- const msgs = this.agent.state.messages;
887
- const lastMsg = msgs[msgs.length - 1];
888
- if (lastMsg?.role === "assistant" && lastMsg.stopReason === "error") this.agent.state.messages = msgs.slice(0, -1);
889
- console.error(`[flue:compaction] Retrying after overflow recovery...`);
890
- await this.agent.continue();
891
- }
892
- } catch (error) {
893
- const errorMessage = error instanceof Error ? error.message : String(error);
894
- console.error(`[flue:compaction] Failed: ${errorMessage}`);
895
- } finally {
896
- this.compactionAbortController = void 0;
897
- }
898
- }
899
- throwIfError(context) {
900
- const errorMsg = this.agent.state.errorMessage;
901
- if (errorMsg) throw new Error(`[flue] ${context} failed: ${errorMsg}`);
902
- }
903
- getAssistantText() {
904
- const messages = this.agent.state.messages;
905
- for (let i = messages.length - 1; i >= 0; i--) {
906
- const msg = messages[i];
907
- if (msg.role !== "assistant") continue;
908
- const content = msg.content;
909
- if (!Array.isArray(content)) continue;
910
- const textParts = [];
911
- for (const block of content) if (block.type === "text") textParts.push(block.text);
912
- return textParts.join("\n");
913
- }
914
- return "";
915
- }
916
- async extractResultWithRetry(schema) {
917
- const text = this.getAssistantText();
918
- try {
919
- return extractResult(text, schema);
920
- } catch (err) {
921
- if (!(err instanceof ResultExtractionError)) throw err;
922
- if (!err.message.includes("RESULT_START")) throw err;
923
- const followUpPrompt = buildResultExtractionPrompt(schema);
924
- await this.agent.prompt(followUpPrompt);
925
- await this.agent.waitForIdle();
926
- await this.save();
927
- return extractResult(this.getAssistantText(), schema);
928
- }
929
- }
930
- };
931
- function normalizePath(p) {
932
- const parts = p.split("/");
933
- const result = [];
934
- for (const part of parts) {
935
- if (part === "." || part === "") continue;
936
- if (part === "..") result.pop();
937
- else result.push(part);
938
- }
939
- return "/" + result.join("/");
940
- }
941
-
942
- //#endregion
943
- export { Session as n, normalizePath as r, InMemorySessionStore as t };