goatchain 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +529 -0
- package/cli/args.mjs +113 -0
- package/cli/clack.mjs +111 -0
- package/cli/clipboard.mjs +320 -0
- package/cli/files.mjs +247 -0
- package/cli/index.mjs +299 -0
- package/cli/itermPaste.mjs +147 -0
- package/cli/persist.mjs +205 -0
- package/cli/repl.mjs +3141 -0
- package/cli/sdk.mjs +341 -0
- package/cli/sessionTransfer.mjs +118 -0
- package/cli/turn.mjs +751 -0
- package/cli/ui.mjs +138 -0
- package/cli.mjs +5 -0
- package/dist/index.cjs +4860 -0
- package/dist/index.d.cts +3479 -0
- package/dist/index.d.ts +3479 -0
- package/dist/index.js +4795 -0
- package/package.json +68 -0
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,4860 @@
|
|
|
1
|
+
//#region rolldown:runtime
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __copyProps = (to, from, except, desc) => {
|
|
9
|
+
if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
10
|
+
key = keys[i];
|
|
11
|
+
if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
|
|
12
|
+
get: ((k) => from[k]).bind(null, key),
|
|
13
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
14
|
+
});
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
|
|
19
|
+
value: mod,
|
|
20
|
+
enumerable: true
|
|
21
|
+
}) : target, mod));
|
|
22
|
+
|
|
23
|
+
//#endregion
|
|
24
|
+
let node_fs = require("node:fs");
|
|
25
|
+
node_fs = __toESM(node_fs);
|
|
26
|
+
let node_path = require("node:path");
|
|
27
|
+
node_path = __toESM(node_path);
|
|
28
|
+
let node_module = require("node:module");
|
|
29
|
+
node_module = __toESM(node_module);
|
|
30
|
+
let openai = require("openai");
|
|
31
|
+
openai = __toESM(openai);
|
|
32
|
+
let node_child_process = require("node:child_process");
|
|
33
|
+
node_child_process = __toESM(node_child_process);
|
|
34
|
+
let node_process = require("node:process");
|
|
35
|
+
node_process = __toESM(node_process);
|
|
36
|
+
let node_fs_promises = require("node:fs/promises");
|
|
37
|
+
node_fs_promises = __toESM(node_fs_promises);
|
|
38
|
+
|
|
39
|
+
//#region src/agent/checkpointMiddleware.ts
|
|
40
|
+
/**
|
|
41
|
+
* Convert AgentLoopState to AgentLoopCheckpoint
|
|
42
|
+
*/
|
|
43
|
+
function toLoopCheckpoint(state, options) {
|
|
44
|
+
let phase = options?.phase;
|
|
45
|
+
if (!phase) if (!state.shouldContinue) phase = "completed";
|
|
46
|
+
else if (state.pendingToolCalls.length > 0) phase = "tool_execution";
|
|
47
|
+
else phase = "llm_call";
|
|
48
|
+
let status = options?.status;
|
|
49
|
+
if (!status) if (phase === "completed") status = `Completed: ${state.stopReason ?? "unknown"}`;
|
|
50
|
+
else if (phase === "tool_execution") status = `Executing tools: ${state.pendingToolCalls.map((tc) => tc.toolCall.function.name).join(", ")}`;
|
|
51
|
+
else if (phase === "approval_pending") status = "Waiting for user approval";
|
|
52
|
+
else status = `Iteration ${state.iteration}: Calling LLM`;
|
|
53
|
+
return {
|
|
54
|
+
sessionId: state.sessionId,
|
|
55
|
+
agentId: state.agentId,
|
|
56
|
+
agentName: options?.agentName,
|
|
57
|
+
iteration: state.iteration,
|
|
58
|
+
phase,
|
|
59
|
+
status,
|
|
60
|
+
modelConfig: options?.modelConfig,
|
|
61
|
+
requestParams: options?.requestParams,
|
|
62
|
+
messages: [...state.messages],
|
|
63
|
+
pendingToolCalls: state.pendingToolCalls.map((tc) => ({
|
|
64
|
+
toolCall: { ...tc.toolCall },
|
|
65
|
+
result: tc.result,
|
|
66
|
+
isError: tc.isError
|
|
67
|
+
})),
|
|
68
|
+
currentResponse: state.currentResponse,
|
|
69
|
+
currentThinking: state.currentThinking,
|
|
70
|
+
shouldContinue: state.shouldContinue,
|
|
71
|
+
stopReason: state.stopReason,
|
|
72
|
+
lastModelStopReason: state.lastModelStopReason,
|
|
73
|
+
usage: { ...state.usage },
|
|
74
|
+
metadata: { ...state.metadata },
|
|
75
|
+
savedAt: Date.now()
|
|
76
|
+
};
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Restore AgentLoopState from AgentLoopCheckpoint
|
|
80
|
+
*
|
|
81
|
+
* Note: agentName, phase, and status are display-only fields and are not
|
|
82
|
+
* part of AgentLoopState. They will be recalculated when a new checkpoint is saved.
|
|
83
|
+
*/
|
|
84
|
+
function fromLoopCheckpoint(checkpoint) {
|
|
85
|
+
return {
|
|
86
|
+
sessionId: checkpoint.sessionId,
|
|
87
|
+
agentId: checkpoint.agentId,
|
|
88
|
+
iteration: checkpoint.iteration,
|
|
89
|
+
messages: [...checkpoint.messages],
|
|
90
|
+
pendingToolCalls: checkpoint.pendingToolCalls.map((tc) => ({
|
|
91
|
+
toolCall: { ...tc.toolCall },
|
|
92
|
+
result: tc.result,
|
|
93
|
+
isError: tc.isError
|
|
94
|
+
})),
|
|
95
|
+
currentResponse: checkpoint.currentResponse,
|
|
96
|
+
currentThinking: checkpoint.currentThinking,
|
|
97
|
+
shouldContinue: checkpoint.shouldContinue,
|
|
98
|
+
stopReason: checkpoint.stopReason,
|
|
99
|
+
lastModelStopReason: checkpoint.lastModelStopReason,
|
|
100
|
+
usage: { ...checkpoint.usage },
|
|
101
|
+
metadata: { ...checkpoint.metadata }
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
//#endregion
|
|
106
|
+
//#region src/agent/errors.ts
|
|
107
|
+
/**
|
|
108
|
+
* Error thrown when agent execution is aborted via AbortSignal
|
|
109
|
+
*/
|
|
110
|
+
var AgentAbortError = class extends Error {
|
|
111
|
+
constructor(message = "Agent execution aborted") {
|
|
112
|
+
super(message);
|
|
113
|
+
this.name = "AgentAbortError";
|
|
114
|
+
}
|
|
115
|
+
};
|
|
116
|
+
/**
|
|
117
|
+
* Error thrown when agent exceeds maximum iterations
|
|
118
|
+
*/
|
|
119
|
+
var AgentMaxIterationsError = class extends Error {
|
|
120
|
+
iterations;
|
|
121
|
+
constructor(iterations, message) {
|
|
122
|
+
super(message ?? `Agent exceeded maximum iterations (${iterations})`);
|
|
123
|
+
this.name = "AgentMaxIterationsError";
|
|
124
|
+
this.iterations = iterations;
|
|
125
|
+
}
|
|
126
|
+
};
|
|
127
|
+
/**
|
|
128
|
+
* Internal control-flow error used to stop streaming when execution is paused.
|
|
129
|
+
*
|
|
130
|
+
* This is caught by the Agent and is not intended to be shown to end users.
|
|
131
|
+
*/
|
|
132
|
+
var AgentPauseError = class extends Error {
|
|
133
|
+
constructor(message = "Agent execution paused") {
|
|
134
|
+
super(message);
|
|
135
|
+
this.name = "AgentPauseError";
|
|
136
|
+
}
|
|
137
|
+
};
|
|
138
|
+
/**
|
|
139
|
+
* Check if the abort signal has been triggered and throw if so.
|
|
140
|
+
*
|
|
141
|
+
* @param signal - AbortSignal to check
|
|
142
|
+
* @param context - Optional context for error message
|
|
143
|
+
* @throws AgentAbortError if signal is aborted
|
|
144
|
+
*/
|
|
145
|
+
function ensureNotAborted(signal, context) {
|
|
146
|
+
if (!signal?.aborted) return;
|
|
147
|
+
const reason = signal?.reason;
|
|
148
|
+
if (reason instanceof Error) throw reason;
|
|
149
|
+
throw new AgentAbortError(typeof reason === "string" ? reason : context ? `${context} aborted` : "Agent execution aborted");
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
//#endregion
|
|
153
|
+
//#region src/agent/middleware.ts
|
|
154
|
+
/**
|
|
155
|
+
* Compose multiple middleware functions into a single function.
|
|
156
|
+
*
|
|
157
|
+
* This implements an immutable onion model where each middleware:
|
|
158
|
+
* - Receives state from the previous middleware (or initial state)
|
|
159
|
+
* - Can transform state before passing to next()
|
|
160
|
+
* - Returns the result from downstream processing
|
|
161
|
+
*
|
|
162
|
+
* Execution order for each iteration:
|
|
163
|
+
* ```
|
|
164
|
+
* outer:before → inner:before → exec (model.stream) → inner:after → outer:after
|
|
165
|
+
* ```
|
|
166
|
+
*
|
|
167
|
+
* State flow:
|
|
168
|
+
* ```
|
|
169
|
+
* initialState → outer(transform?) → inner(transform?) → exec → result
|
|
170
|
+
* ```
|
|
171
|
+
*
|
|
172
|
+
* @param middleware - Array of middleware functions
|
|
173
|
+
* @returns Composed middleware function that can be invoked per iteration
|
|
174
|
+
*
|
|
175
|
+
* @example
|
|
176
|
+
* ```ts
|
|
177
|
+
* const composed = compose([
|
|
178
|
+
* async (state, next) => {
|
|
179
|
+
* console.log('outer:before');
|
|
180
|
+
* const result = await next(state);
|
|
181
|
+
* console.log('outer:after');
|
|
182
|
+
* return result;
|
|
183
|
+
* },
|
|
184
|
+
* async (state, next) => {
|
|
185
|
+
* console.log('inner:before');
|
|
186
|
+
* // Transform state before passing down
|
|
187
|
+
* const modified = { ...state, messages: compress(state.messages) };
|
|
188
|
+
* const result = await next(modified);
|
|
189
|
+
* console.log('inner:after');
|
|
190
|
+
* return result;
|
|
191
|
+
* },
|
|
192
|
+
* ]);
|
|
193
|
+
*
|
|
194
|
+
* // Execute with core function
|
|
195
|
+
* const result = await composed(initialState, async (s) => {
|
|
196
|
+
* // Core execution receives transformed state
|
|
197
|
+
* await model.stream(s.messages);
|
|
198
|
+
* return s;
|
|
199
|
+
* });
|
|
200
|
+
* ```
|
|
201
|
+
*/
|
|
202
|
+
function compose(middleware) {
|
|
203
|
+
return (initialState, exec) => {
|
|
204
|
+
let index = -1;
|
|
205
|
+
const dispatch = async (i, currentState) => {
|
|
206
|
+
if (i <= index) throw new Error("next() called multiple times");
|
|
207
|
+
index = i;
|
|
208
|
+
if (i === middleware.length) return exec ? await exec(currentState) : currentState;
|
|
209
|
+
const fn = middleware[i];
|
|
210
|
+
return fn(currentState, (nextState) => dispatch(i + 1, nextState));
|
|
211
|
+
};
|
|
212
|
+
return dispatch(0, initialState);
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
//#endregion
|
|
217
|
+
//#region src/agent/types.ts
|
|
218
|
+
/**
|
|
219
|
+
* Create initial AgentLoopState from AgentInput
|
|
220
|
+
*/
|
|
221
|
+
function createInitialLoopState(input, agentId, systemPrompt) {
|
|
222
|
+
const messages = [
|
|
223
|
+
{
|
|
224
|
+
role: "system",
|
|
225
|
+
content: systemPrompt
|
|
226
|
+
},
|
|
227
|
+
...input.messages ?? [],
|
|
228
|
+
{
|
|
229
|
+
role: "user",
|
|
230
|
+
content: input.input
|
|
231
|
+
}
|
|
232
|
+
];
|
|
233
|
+
return {
|
|
234
|
+
sessionId: input.sessionId,
|
|
235
|
+
agentId,
|
|
236
|
+
messages,
|
|
237
|
+
iteration: 0,
|
|
238
|
+
pendingToolCalls: [],
|
|
239
|
+
currentResponse: "",
|
|
240
|
+
shouldContinue: true,
|
|
241
|
+
usage: {
|
|
242
|
+
promptTokens: 0,
|
|
243
|
+
completionTokens: 0,
|
|
244
|
+
totalTokens: 0
|
|
245
|
+
},
|
|
246
|
+
metadata: {}
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
/**
|
|
250
|
+
* Create initial AgentLoopState from an existing message history (no new user message appended).
|
|
251
|
+
*/
|
|
252
|
+
function createInitialLoopStateFromMessages(input, agentId, systemPrompt) {
|
|
253
|
+
const normalized = Array.isArray(input.messages) ? input.messages : [];
|
|
254
|
+
const messages = [{
|
|
255
|
+
role: "system",
|
|
256
|
+
content: systemPrompt
|
|
257
|
+
}, ...normalized.length > 0 && normalized[0]?.role === "system" ? normalized.slice(1) : normalized];
|
|
258
|
+
return {
|
|
259
|
+
sessionId: input.sessionId,
|
|
260
|
+
agentId,
|
|
261
|
+
messages,
|
|
262
|
+
iteration: 0,
|
|
263
|
+
pendingToolCalls: [],
|
|
264
|
+
currentResponse: "",
|
|
265
|
+
shouldContinue: true,
|
|
266
|
+
usage: {
|
|
267
|
+
promptTokens: 0,
|
|
268
|
+
completionTokens: 0,
|
|
269
|
+
totalTokens: 0
|
|
270
|
+
},
|
|
271
|
+
metadata: {}
|
|
272
|
+
};
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
//#endregion
|
|
276
|
+
//#region src/agent/agent.ts
|
|
277
|
+
/** Default maximum iterations to prevent infinite loops */
|
|
278
|
+
const DEFAULT_MAX_ITERATIONS = 10;
|
|
279
|
+
/**
|
|
280
|
+
* Agent class - the main orchestrator.
|
|
281
|
+
*
|
|
282
|
+
* Agent is a blueprint/configuration that can handle multiple sessions.
|
|
283
|
+
* It composes model, tools, state store, and session manager.
|
|
284
|
+
*
|
|
285
|
+
* Supports middleware pattern for extensible hooks at each loop iteration.
|
|
286
|
+
*/
|
|
287
|
+
var Agent = class Agent {
|
|
288
|
+
id;
|
|
289
|
+
name;
|
|
290
|
+
systemPrompt;
|
|
291
|
+
createdAt;
|
|
292
|
+
_model;
|
|
293
|
+
_tools;
|
|
294
|
+
_stateStore;
|
|
295
|
+
_sessionManager;
|
|
296
|
+
_metadata;
|
|
297
|
+
_middlewares = [];
|
|
298
|
+
_stats = {
|
|
299
|
+
updatedAt: Date.now(),
|
|
300
|
+
totalSessions: 0,
|
|
301
|
+
activeSessions: 0,
|
|
302
|
+
totalUsage: {
|
|
303
|
+
promptTokens: 0,
|
|
304
|
+
completionTokens: 0,
|
|
305
|
+
totalTokens: 0
|
|
306
|
+
}
|
|
307
|
+
};
|
|
308
|
+
constructor(options) {
|
|
309
|
+
this.id = options.id ?? crypto.randomUUID();
|
|
310
|
+
this.name = options.name;
|
|
311
|
+
this.systemPrompt = options.systemPrompt;
|
|
312
|
+
this.createdAt = Date.now();
|
|
313
|
+
this._model = options.model;
|
|
314
|
+
this._tools = options.tools;
|
|
315
|
+
this._stateStore = options.stateStore;
|
|
316
|
+
this._sessionManager = options.sessionManager;
|
|
317
|
+
}
|
|
318
|
+
/**
|
|
319
|
+
* Get the current model
|
|
320
|
+
*/
|
|
321
|
+
get model() {
|
|
322
|
+
return this._model;
|
|
323
|
+
}
|
|
324
|
+
/**
|
|
325
|
+
* Get the tool registry
|
|
326
|
+
*/
|
|
327
|
+
get tools() {
|
|
328
|
+
return this._tools;
|
|
329
|
+
}
|
|
330
|
+
/**
|
|
331
|
+
* Get the state store
|
|
332
|
+
*/
|
|
333
|
+
get stateStore() {
|
|
334
|
+
return this._stateStore;
|
|
335
|
+
}
|
|
336
|
+
/**
|
|
337
|
+
* Get the session manager
|
|
338
|
+
*/
|
|
339
|
+
get sessionManager() {
|
|
340
|
+
return this._sessionManager;
|
|
341
|
+
}
|
|
342
|
+
/**
|
|
343
|
+
* Get runtime statistics
|
|
344
|
+
*/
|
|
345
|
+
get stats() {
|
|
346
|
+
return { ...this._stats };
|
|
347
|
+
}
|
|
348
|
+
/**
|
|
349
|
+
* Get metadata
|
|
350
|
+
*/
|
|
351
|
+
get metadata() {
|
|
352
|
+
return this._metadata;
|
|
353
|
+
}
|
|
354
|
+
/**
|
|
355
|
+
* Set metadata
|
|
356
|
+
*/
|
|
357
|
+
set metadata(value) {
|
|
358
|
+
this._metadata = value;
|
|
359
|
+
}
|
|
360
|
+
/**
|
|
361
|
+
* Add middleware to the agent.
|
|
362
|
+
*
|
|
363
|
+
* Middleware runs at each loop iteration and can intercept/transform the loop state.
|
|
364
|
+
* Middleware follows an immutable pattern - it receives state and returns new state via next().
|
|
365
|
+
*
|
|
366
|
+
* @param fn - Middleware function
|
|
367
|
+
* @returns this for chaining
|
|
368
|
+
*
|
|
369
|
+
* @example
|
|
370
|
+
* ```ts
|
|
371
|
+
* // Logging middleware (pass-through)
|
|
372
|
+
* agent.use(async (state, next) => {
|
|
373
|
+
* console.log('Before iteration', state.iteration);
|
|
374
|
+
* const result = await next(state);
|
|
375
|
+
* console.log('After iteration', state.iteration);
|
|
376
|
+
* return result;
|
|
377
|
+
* });
|
|
378
|
+
*
|
|
379
|
+
* // Transforming middleware (e.g., compression)
|
|
380
|
+
* agent.use(async (state, next) => {
|
|
381
|
+
* const compressed = { ...state, messages: compress(state.messages) };
|
|
382
|
+
* return next(compressed);
|
|
383
|
+
* });
|
|
384
|
+
* ```
|
|
385
|
+
*/
|
|
386
|
+
use(fn) {
|
|
387
|
+
this._middlewares.push(fn);
|
|
388
|
+
return this;
|
|
389
|
+
}
|
|
390
|
+
/**
|
|
391
|
+
* Switch to a different model
|
|
392
|
+
*
|
|
393
|
+
* @param model - New model to use
|
|
394
|
+
*/
|
|
395
|
+
setModel(model) {
|
|
396
|
+
this._model = model;
|
|
397
|
+
this._stats.updatedAt = Date.now();
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Update session counts
|
|
401
|
+
*
|
|
402
|
+
* @param total - Total sessions
|
|
403
|
+
* @param active - Active sessions
|
|
404
|
+
*/
|
|
405
|
+
updateSessionCounts(total, active) {
|
|
406
|
+
this._stats.totalSessions = total;
|
|
407
|
+
this._stats.activeSessions = active;
|
|
408
|
+
this._stats.updatedAt = Date.now();
|
|
409
|
+
}
|
|
410
|
+
/**
|
|
411
|
+
* Add to total usage statistics
|
|
412
|
+
*
|
|
413
|
+
* @param usage - Usage to add
|
|
414
|
+
* @param usage.promptTokens - Number of prompt tokens
|
|
415
|
+
* @param usage.completionTokens - Number of completion tokens
|
|
416
|
+
* @param usage.totalTokens - Total number of tokens
|
|
417
|
+
*/
|
|
418
|
+
addUsage(usage) {
|
|
419
|
+
this._stats.totalUsage.promptTokens += usage.promptTokens;
|
|
420
|
+
this._stats.totalUsage.completionTokens += usage.completionTokens;
|
|
421
|
+
this._stats.totalUsage.totalTokens += usage.totalTokens;
|
|
422
|
+
this._stats.updatedAt = Date.now();
|
|
423
|
+
}
|
|
424
|
+
/**
|
|
425
|
+
* Create a snapshot of the agent for persistence
|
|
426
|
+
*
|
|
427
|
+
* @returns Agent snapshot
|
|
428
|
+
*/
|
|
429
|
+
toSnapshot() {
|
|
430
|
+
return {
|
|
431
|
+
id: this.id,
|
|
432
|
+
name: this.name,
|
|
433
|
+
createdAt: this.createdAt,
|
|
434
|
+
config: {
|
|
435
|
+
systemPrompt: this.systemPrompt,
|
|
436
|
+
model: { modelId: this._model.modelId },
|
|
437
|
+
tools: this._tools?.list().map((t) => t.name) ?? []
|
|
438
|
+
},
|
|
439
|
+
stats: { ...this._stats },
|
|
440
|
+
metadata: this._metadata
|
|
441
|
+
};
|
|
442
|
+
}
|
|
443
|
+
/**
|
|
444
|
+
* Restore agent state from a snapshot
|
|
445
|
+
*
|
|
446
|
+
* Note: This restores mutable state (stats, metadata) from a snapshot.
|
|
447
|
+
* The model and tools must be provided separately as they contain runtime instances.
|
|
448
|
+
*
|
|
449
|
+
* @param snapshot - Agent snapshot to restore from
|
|
450
|
+
*/
|
|
451
|
+
restoreFromSnapshot(snapshot) {
|
|
452
|
+
this._stats = {
|
|
453
|
+
updatedAt: snapshot.stats.updatedAt,
|
|
454
|
+
totalSessions: snapshot.stats.totalSessions,
|
|
455
|
+
activeSessions: snapshot.stats.activeSessions,
|
|
456
|
+
totalUsage: { ...snapshot.stats.totalUsage }
|
|
457
|
+
};
|
|
458
|
+
this._metadata = snapshot.metadata;
|
|
459
|
+
}
|
|
460
|
+
/**
|
|
461
|
+
* Execute model stream and collect events.
|
|
462
|
+
* This is the core execution that middleware wraps around.
|
|
463
|
+
*/
|
|
464
|
+
async executeModelStream(state, signal, tools, modelOverride) {
|
|
465
|
+
const events = [];
|
|
466
|
+
const toolCallAccumulator = /* @__PURE__ */ new Map();
|
|
467
|
+
const ensureToolCall = (callId) => {
|
|
468
|
+
const existing = toolCallAccumulator.get(callId);
|
|
469
|
+
if (existing) return existing;
|
|
470
|
+
const created = {
|
|
471
|
+
argsText: "",
|
|
472
|
+
started: false
|
|
473
|
+
};
|
|
474
|
+
toolCallAccumulator.set(callId, created);
|
|
475
|
+
return created;
|
|
476
|
+
};
|
|
477
|
+
const emitToolCallStartIfNeeded = (callId, toolName) => {
|
|
478
|
+
const entry = ensureToolCall(callId);
|
|
479
|
+
if (toolName && !entry.toolName) entry.toolName = toolName;
|
|
480
|
+
if (!entry.started) {
|
|
481
|
+
entry.started = true;
|
|
482
|
+
events.push({
|
|
483
|
+
type: "tool_call_start",
|
|
484
|
+
callId,
|
|
485
|
+
toolName: entry.toolName
|
|
486
|
+
});
|
|
487
|
+
}
|
|
488
|
+
};
|
|
489
|
+
const emitToolCallDelta = (callId, toolName, argsTextDelta) => {
|
|
490
|
+
const entry = ensureToolCall(callId);
|
|
491
|
+
if (toolName && !entry.toolName) entry.toolName = toolName;
|
|
492
|
+
if (typeof argsTextDelta === "string" && argsTextDelta.length > 0) entry.argsText += argsTextDelta;
|
|
493
|
+
emitToolCallStartIfNeeded(callId, entry.toolName);
|
|
494
|
+
events.push({
|
|
495
|
+
type: "tool_call_delta",
|
|
496
|
+
callId,
|
|
497
|
+
toolName: entry.toolName,
|
|
498
|
+
argsTextDelta
|
|
499
|
+
});
|
|
500
|
+
};
|
|
501
|
+
const finalizeToolCalls = () => {
|
|
502
|
+
for (const [callId, entry] of toolCallAccumulator) {
|
|
503
|
+
if (!entry.toolName) continue;
|
|
504
|
+
const toolCall = {
|
|
505
|
+
id: callId,
|
|
506
|
+
type: "function",
|
|
507
|
+
function: {
|
|
508
|
+
name: entry.toolName,
|
|
509
|
+
arguments: entry.argsText
|
|
510
|
+
}
|
|
511
|
+
};
|
|
512
|
+
if (this._tools) state.pendingToolCalls.push({ toolCall });
|
|
513
|
+
events.push({
|
|
514
|
+
type: "tool_call_end",
|
|
515
|
+
toolCall
|
|
516
|
+
});
|
|
517
|
+
}
|
|
518
|
+
toolCallAccumulator.clear();
|
|
519
|
+
};
|
|
520
|
+
const streamArgs = modelOverride ? {
|
|
521
|
+
model: modelOverride,
|
|
522
|
+
messages: state.messages,
|
|
523
|
+
tools
|
|
524
|
+
} : {
|
|
525
|
+
messages: state.messages,
|
|
526
|
+
tools
|
|
527
|
+
};
|
|
528
|
+
for await (const event of this._model.stream(streamArgs)) {
|
|
529
|
+
ensureNotAborted(signal, "Agent streaming");
|
|
530
|
+
if (event.type === "delta") {
|
|
531
|
+
if (event.chunk.kind === "text") {
|
|
532
|
+
state.currentResponse += event.chunk.text;
|
|
533
|
+
events.push({
|
|
534
|
+
type: "text_delta",
|
|
535
|
+
delta: event.chunk.text
|
|
536
|
+
});
|
|
537
|
+
} else if (event.chunk.kind === "thinking_start") events.push({ type: "thinking_start" });
|
|
538
|
+
else if (event.chunk.kind === "thinking_delta") {
|
|
539
|
+
state.currentThinking = (state.currentThinking ?? "") + event.chunk.text;
|
|
540
|
+
events.push({
|
|
541
|
+
type: "thinking_delta",
|
|
542
|
+
delta: event.chunk.text
|
|
543
|
+
});
|
|
544
|
+
} else if (event.chunk.kind === "thinking_end") events.push({ type: "thinking_end" });
|
|
545
|
+
else if (event.chunk.kind === "tool_call_delta") emitToolCallDelta(event.chunk.callId, event.chunk.toolId, event.chunk.argsTextDelta);
|
|
546
|
+
} else if (event.type === "response_end") {
|
|
547
|
+
finalizeToolCalls();
|
|
548
|
+
state.lastModelStopReason = event.stopReason;
|
|
549
|
+
const raw = event.usage;
|
|
550
|
+
if (raw && typeof raw === "object") {
|
|
551
|
+
const usage = raw;
|
|
552
|
+
if (usage.prompt_tokens || usage.completion_tokens || usage.total_tokens) {
|
|
553
|
+
const normalized = {
|
|
554
|
+
promptTokens: usage.prompt_tokens ?? 0,
|
|
555
|
+
completionTokens: usage.completion_tokens ?? 0,
|
|
556
|
+
totalTokens: usage.total_tokens ?? 0
|
|
557
|
+
};
|
|
558
|
+
state.usage.promptTokens += normalized.promptTokens;
|
|
559
|
+
state.usage.completionTokens += normalized.completionTokens;
|
|
560
|
+
state.usage.totalTokens += normalized.totalTokens;
|
|
561
|
+
this.addUsage(normalized);
|
|
562
|
+
events.push({
|
|
563
|
+
type: "usage",
|
|
564
|
+
usage: normalized
|
|
565
|
+
});
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
} else if (event.type === "error") {
|
|
569
|
+
const code = event.error?.code ?? "model_error";
|
|
570
|
+
const message = event.error?.message ?? "Model error";
|
|
571
|
+
const err = /* @__PURE__ */ new Error(`${code}: ${message}`);
|
|
572
|
+
err.code = code;
|
|
573
|
+
state.error = err;
|
|
574
|
+
state.shouldContinue = false;
|
|
575
|
+
state.stopReason = "error";
|
|
576
|
+
events.push({
|
|
577
|
+
type: "error",
|
|
578
|
+
error: err
|
|
579
|
+
});
|
|
580
|
+
} else if (event.type === "text_delta") {
|
|
581
|
+
state.currentResponse += event.delta;
|
|
582
|
+
events.push({
|
|
583
|
+
type: "text_delta",
|
|
584
|
+
delta: event.delta
|
|
585
|
+
});
|
|
586
|
+
} else if (event.type === "thinking_end" || event.type === "thinking_start") events.push({ type: event.type });
|
|
587
|
+
else if (event.type === "thinking_delta") {
|
|
588
|
+
state.currentThinking = (state.currentThinking ?? "") + event.content;
|
|
589
|
+
events.push({
|
|
590
|
+
type: "thinking_delta",
|
|
591
|
+
delta: event.content
|
|
592
|
+
});
|
|
593
|
+
} else if (event.type === "tool_call" && this._tools) {
|
|
594
|
+
const toolCall = event.toolCall;
|
|
595
|
+
events.push({
|
|
596
|
+
type: "tool_call_start",
|
|
597
|
+
callId: toolCall.id,
|
|
598
|
+
toolName: toolCall.function.name
|
|
599
|
+
});
|
|
600
|
+
state.pendingToolCalls.push({ toolCall });
|
|
601
|
+
events.push({
|
|
602
|
+
type: "tool_call_end",
|
|
603
|
+
toolCall
|
|
604
|
+
});
|
|
605
|
+
} else if (event.type === "usage") {
|
|
606
|
+
state.usage.promptTokens += event.usage.promptTokens;
|
|
607
|
+
state.usage.completionTokens += event.usage.completionTokens;
|
|
608
|
+
state.usage.totalTokens += event.usage.totalTokens;
|
|
609
|
+
this.addUsage(event.usage);
|
|
610
|
+
events.push({
|
|
611
|
+
type: "usage",
|
|
612
|
+
usage: event.usage
|
|
613
|
+
});
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
if (toolCallAccumulator.size > 0) finalizeToolCalls();
|
|
617
|
+
return events;
|
|
618
|
+
}
|
|
619
|
+
/**
|
|
620
|
+
* Execute a single tool call and return the result event.
|
|
621
|
+
*/
|
|
622
|
+
async executeToolCall(tc, ctx, signal) {
|
|
623
|
+
ensureNotAborted(signal, `Tool execution: ${tc.toolCall.function.name}`);
|
|
624
|
+
const tool = this._tools?.get(tc.toolCall.function.name);
|
|
625
|
+
if (!tool) {
|
|
626
|
+
tc.result = `Tool not found: ${tc.toolCall.function.name}`;
|
|
627
|
+
tc.isError = true;
|
|
628
|
+
return {
|
|
629
|
+
type: "tool_result",
|
|
630
|
+
tool_call_id: tc.toolCall.id,
|
|
631
|
+
result: tc.result,
|
|
632
|
+
isError: true
|
|
633
|
+
};
|
|
634
|
+
}
|
|
635
|
+
try {
|
|
636
|
+
const args = typeof tc.toolCall.function.arguments === "string" ? JSON.parse(tc.toolCall.function.arguments) : tc.toolCall.function.arguments;
|
|
637
|
+
tc.result = await tool.execute(args, ctx);
|
|
638
|
+
tc.isError = false;
|
|
639
|
+
return {
|
|
640
|
+
type: "tool_result",
|
|
641
|
+
tool_call_id: tc.toolCall.id,
|
|
642
|
+
result: tc.result
|
|
643
|
+
};
|
|
644
|
+
} catch (error) {
|
|
645
|
+
tc.result = error instanceof Error ? error.message : String(error);
|
|
646
|
+
tc.isError = true;
|
|
647
|
+
return {
|
|
648
|
+
type: "tool_result",
|
|
649
|
+
tool_call_id: tc.toolCall.id,
|
|
650
|
+
result: tc.result,
|
|
651
|
+
isError: true
|
|
652
|
+
};
|
|
653
|
+
}
|
|
654
|
+
}
|
|
655
|
+
createToolExecutionContext(state, signal, input) {
|
|
656
|
+
return {
|
|
657
|
+
sessionId: state.sessionId,
|
|
658
|
+
agentId: state.agentId,
|
|
659
|
+
context: input?.context,
|
|
660
|
+
signal,
|
|
661
|
+
capabilities: input?.capabilities,
|
|
662
|
+
usage: state.usage,
|
|
663
|
+
metadata: {
|
|
664
|
+
...state.metadata ?? {},
|
|
665
|
+
...input?.metadata ?? {}
|
|
666
|
+
}
|
|
667
|
+
};
|
|
668
|
+
}
|
|
669
|
+
/**
|
|
670
|
+
* Merge execution results from processedState back to original state.
|
|
671
|
+
*
|
|
672
|
+
* This preserves original messages (for checkpoint) while keeping:
|
|
673
|
+
* - currentResponse, currentThinking (from LLM)
|
|
674
|
+
* - pendingToolCalls (from LLM)
|
|
675
|
+
* - usage (accumulated)
|
|
676
|
+
* - metadata (middleware may update this)
|
|
677
|
+
*
|
|
678
|
+
* Note: If processedState shares references with state (shallow copy),
|
|
679
|
+
* some fields are already synced. This method ensures all fields are merged.
|
|
680
|
+
*/
|
|
681
|
+
mergeStateResults(state, processedState) {
|
|
682
|
+
state.currentResponse = processedState.currentResponse;
|
|
683
|
+
state.currentThinking = processedState.currentThinking;
|
|
684
|
+
state.pendingToolCalls = processedState.pendingToolCalls;
|
|
685
|
+
state.usage = processedState.usage;
|
|
686
|
+
state.metadata = processedState.metadata;
|
|
687
|
+
}
|
|
688
|
+
/**
|
|
689
|
+
* Add tool result to message history.
|
|
690
|
+
*/
|
|
691
|
+
addToolResultToHistory(state, tc) {
|
|
692
|
+
state.messages.push({
|
|
693
|
+
role: "tool",
|
|
694
|
+
tool_call_id: tc.toolCall.id,
|
|
695
|
+
content: typeof tc.result === "string" ? tc.result : JSON.stringify(tc.result)
|
|
696
|
+
});
|
|
697
|
+
}
|
|
698
|
+
/**
|
|
699
|
+
* Add assistant message with tool calls to history.
|
|
700
|
+
*/
|
|
701
|
+
addAssistantMessageWithToolCalls(state) {
|
|
702
|
+
state.messages.push({
|
|
703
|
+
role: "assistant",
|
|
704
|
+
content: state.currentResponse || "",
|
|
705
|
+
tool_calls: state.pendingToolCalls.map((tc) => tc.toolCall)
|
|
706
|
+
});
|
|
707
|
+
}
|
|
708
|
+
/**
|
|
709
|
+
* Add final assistant response to history.
|
|
710
|
+
*/
|
|
711
|
+
addFinalAssistantMessage(state) {
|
|
712
|
+
if (state.currentResponse) state.messages.push({
|
|
713
|
+
role: "assistant",
|
|
714
|
+
content: state.currentResponse
|
|
715
|
+
});
|
|
716
|
+
}
|
|
717
|
+
/**
|
|
718
|
+
* Stream agent execution with proper agentic loop.
|
|
719
|
+
*
|
|
720
|
+
* The execution follows an immutable middleware pattern with checkpoint support:
|
|
721
|
+
* ```
|
|
722
|
+
* checkpoint:before (save original state)
|
|
723
|
+
* → middleware chain (transforms state immutably)
|
|
724
|
+
* → exec (model.stream with processed state)
|
|
725
|
+
* → merge results back (except messages)
|
|
726
|
+
* checkpoint:after (save original state with original messages)
|
|
727
|
+
* ```
|
|
728
|
+
*
|
|
729
|
+
* Key invariant: checkpoint always saves the original messages, while
|
|
730
|
+
* LLM receives potentially transformed messages (e.g., compressed).
|
|
731
|
+
*/
|
|
732
|
+
async *streamWithState(state, input) {
|
|
733
|
+
const maxIterations = input.maxIterations ?? DEFAULT_MAX_ITERATIONS;
|
|
734
|
+
const signal = input.signal;
|
|
735
|
+
const tools = this._tools?.toOpenAIFormat();
|
|
736
|
+
const runUserMiddleware = compose(this._middlewares);
|
|
737
|
+
const toolContext = this.createToolExecutionContext(state, signal, input.toolContext);
|
|
738
|
+
const stateStore = this._stateStore;
|
|
739
|
+
const savePoint = stateStore?.savePoint ?? "before";
|
|
740
|
+
const deleteOnComplete = stateStore?.deleteOnComplete ?? true;
|
|
741
|
+
const checkpointModelConfig = input.model ? {
|
|
742
|
+
modelId: input.model.modelId,
|
|
743
|
+
provider: input.model.provider
|
|
744
|
+
} : { modelId: this._model.modelId };
|
|
745
|
+
while (state.shouldContinue) {
|
|
746
|
+
ensureNotAborted(signal, `Agent iteration ${state.iteration}`);
|
|
747
|
+
if (state.iteration >= maxIterations) {
|
|
748
|
+
state.shouldContinue = false;
|
|
749
|
+
state.stopReason = "max_iterations";
|
|
750
|
+
yield {
|
|
751
|
+
type: "done",
|
|
752
|
+
finalResponse: state.currentResponse,
|
|
753
|
+
stopReason: "max_iterations",
|
|
754
|
+
modelStopReason: state.lastModelStopReason
|
|
755
|
+
};
|
|
756
|
+
break;
|
|
757
|
+
}
|
|
758
|
+
yield {
|
|
759
|
+
type: "iteration_start",
|
|
760
|
+
iteration: state.iteration
|
|
761
|
+
};
|
|
762
|
+
if (state.pendingToolCalls.length > 0) {
|
|
763
|
+
try {
|
|
764
|
+
yield* this.handleToolCalls(state, toolContext, {
|
|
765
|
+
signal,
|
|
766
|
+
toolContextInput: input.toolContext,
|
|
767
|
+
stateStore,
|
|
768
|
+
checkpointModelConfig,
|
|
769
|
+
requestParams: input.requestParams
|
|
770
|
+
});
|
|
771
|
+
} catch (error) {
|
|
772
|
+
if (error instanceof AgentPauseError) return;
|
|
773
|
+
throw error;
|
|
774
|
+
}
|
|
775
|
+
continue;
|
|
776
|
+
}
|
|
777
|
+
state.currentResponse = "";
|
|
778
|
+
state.currentThinking = void 0;
|
|
779
|
+
state.pendingToolCalls = [];
|
|
780
|
+
state.lastModelStopReason = void 0;
|
|
781
|
+
if (stateStore && (savePoint === "before" || savePoint === "both")) {
|
|
782
|
+
const checkpoint = toLoopCheckpoint(state, {
|
|
783
|
+
agentName: this.name,
|
|
784
|
+
modelConfig: checkpointModelConfig,
|
|
785
|
+
requestParams: input.requestParams
|
|
786
|
+
});
|
|
787
|
+
await stateStore.saveCheckpoint(checkpoint);
|
|
788
|
+
}
|
|
789
|
+
let collectedEvents = [];
|
|
790
|
+
const processedState = await runUserMiddleware(state, async (middlewareState) => {
|
|
791
|
+
ensureNotAborted(signal, "Agent model call");
|
|
792
|
+
collectedEvents = await this.executeModelStream(middlewareState, signal, tools, input.model);
|
|
793
|
+
return middlewareState;
|
|
794
|
+
});
|
|
795
|
+
this.mergeStateResults(state, processedState);
|
|
796
|
+
if (stateStore && (savePoint === "after" || savePoint === "both")) {
|
|
797
|
+
const checkpoint = toLoopCheckpoint(state, {
|
|
798
|
+
agentName: this.name,
|
|
799
|
+
modelConfig: checkpointModelConfig,
|
|
800
|
+
requestParams: input.requestParams
|
|
801
|
+
});
|
|
802
|
+
await stateStore.saveCheckpoint(checkpoint);
|
|
803
|
+
}
|
|
804
|
+
for (const event of collectedEvents) yield event;
|
|
805
|
+
ensureNotAborted(signal, `Agent iteration ${state.iteration}`);
|
|
806
|
+
if (state.stopReason === "error") {
|
|
807
|
+
yield {
|
|
808
|
+
type: "iteration_end",
|
|
809
|
+
iteration: state.iteration,
|
|
810
|
+
willContinue: false,
|
|
811
|
+
toolCallCount: state.pendingToolCalls.length
|
|
812
|
+
};
|
|
813
|
+
yield {
|
|
814
|
+
type: "done",
|
|
815
|
+
finalResponse: state.currentResponse,
|
|
816
|
+
stopReason: "error",
|
|
817
|
+
modelStopReason: state.lastModelStopReason
|
|
818
|
+
};
|
|
819
|
+
break;
|
|
820
|
+
}
|
|
821
|
+
if (state.pendingToolCalls.length > 0) try {
|
|
822
|
+
yield* this.handleToolCalls(state, toolContext, {
|
|
823
|
+
signal,
|
|
824
|
+
toolContextInput: input.toolContext,
|
|
825
|
+
stateStore,
|
|
826
|
+
checkpointModelConfig,
|
|
827
|
+
requestParams: input.requestParams
|
|
828
|
+
});
|
|
829
|
+
} catch (error) {
|
|
830
|
+
if (error instanceof AgentPauseError) return;
|
|
831
|
+
throw error;
|
|
832
|
+
}
|
|
833
|
+
else {
|
|
834
|
+
yield* this.handleFinalResponse(state);
|
|
835
|
+
if (stateStore && deleteOnComplete && state.stopReason === "final_response") await stateStore.deleteCheckpoint(state.sessionId);
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
}
|
|
839
|
+
async *stream(input) {
|
|
840
|
+
const state = createInitialLoopState(input, this.id, this.systemPrompt);
|
|
841
|
+
yield* this.streamWithState(state, {
|
|
842
|
+
maxIterations: input.maxIterations,
|
|
843
|
+
signal: input.signal,
|
|
844
|
+
model: input.model,
|
|
845
|
+
toolContext: input.toolContext,
|
|
846
|
+
requestParams: input.requestParams
|
|
847
|
+
});
|
|
848
|
+
}
|
|
849
|
+
/**
|
|
850
|
+
* Stream agent execution starting from an existing message history (no new user message appended).
|
|
851
|
+
*
|
|
852
|
+
* Useful for orchestration patterns like "handoff", where a different agent should
|
|
853
|
+
* continue the same conversation state under a different system prompt.
|
|
854
|
+
*/
|
|
855
|
+
async *streamFromMessages(input) {
|
|
856
|
+
const state = createInitialLoopStateFromMessages(input, this.id, this.systemPrompt);
|
|
857
|
+
yield* this.streamWithState(state, {
|
|
858
|
+
maxIterations: input.maxIterations,
|
|
859
|
+
signal: input.signal,
|
|
860
|
+
model: input.model,
|
|
861
|
+
toolContext: input.toolContext,
|
|
862
|
+
requestParams: input.requestParams
|
|
863
|
+
});
|
|
864
|
+
}
|
|
865
|
+
/**
|
|
866
|
+
* Stream agent execution from a saved checkpoint.
|
|
867
|
+
*
|
|
868
|
+
* This allows resuming interrupted executions from the exact state
|
|
869
|
+
* where they were saved (e.g., during tool approval waiting).
|
|
870
|
+
*
|
|
871
|
+
* @param input - Input containing the checkpoint to resume from
|
|
872
|
+
*
|
|
873
|
+
* @example
|
|
874
|
+
* ```typescript
|
|
875
|
+
* // Load checkpoint from store
|
|
876
|
+
* const checkpoint = await store.loadLoopCheckpoint(sessionId)
|
|
877
|
+
*
|
|
878
|
+
* if (checkpoint) {
|
|
879
|
+
* // Resume execution from checkpoint
|
|
880
|
+
* for await (const event of agent.streamFromCheckpoint({
|
|
881
|
+
* checkpoint,
|
|
882
|
+
* maxIterations: 10,
|
|
883
|
+
* })) {
|
|
884
|
+
* console.log(event)
|
|
885
|
+
* }
|
|
886
|
+
* }
|
|
887
|
+
* ```
|
|
888
|
+
*/
|
|
889
|
+
async *streamFromCheckpoint(input) {
|
|
890
|
+
const state = fromLoopCheckpoint(input.checkpoint);
|
|
891
|
+
const requestParams = input.requestParams ?? input.checkpoint.requestParams;
|
|
892
|
+
yield* this.streamWithState(state, {
|
|
893
|
+
maxIterations: input.maxIterations,
|
|
894
|
+
signal: input.signal,
|
|
895
|
+
model: input.model,
|
|
896
|
+
toolContext: input.toolContext,
|
|
897
|
+
requestParams
|
|
898
|
+
});
|
|
899
|
+
}
|
|
900
|
+
/**
|
|
901
|
+
* Risk levels that require user approval before execution.
|
|
902
|
+
*/
|
|
903
|
+
static APPROVAL_REQUIRED_LEVELS = new Set(["high", "critical"]);
|
|
904
|
+
/**
|
|
905
|
+
* Check if a tool requires approval based on its risk level.
|
|
906
|
+
*/
|
|
907
|
+
requiresApproval(riskLevel) {
|
|
908
|
+
return Agent.APPROVAL_REQUIRED_LEVELS.has(riskLevel);
|
|
909
|
+
}
|
|
910
|
+
/**
|
|
911
|
+
* Handle tool calls: execute tools, yield results, continue loop.
|
|
912
|
+
*/
|
|
913
|
+
async *handleToolCalls(state, toolContext, options) {
|
|
914
|
+
const toolCallCount = state.pendingToolCalls.length;
|
|
915
|
+
const signal = options?.signal;
|
|
916
|
+
const toolContextInput = options?.toolContextInput;
|
|
917
|
+
const onToolApproval = toolContextInput?.onToolApproval;
|
|
918
|
+
const approval = toolContextInput?.approval;
|
|
919
|
+
const approvalStrategy = approval?.strategy ?? "high_risk";
|
|
920
|
+
const approvalEnabled = approval?.autoApprove !== true && (Boolean(onToolApproval) || approval?.strategy != null || approval?.decisions != null);
|
|
921
|
+
const approvalDecisions = approval?.decisions ?? {};
|
|
922
|
+
const shouldRequireApproval = (riskLevel) => {
|
|
923
|
+
if (!approvalEnabled) return false;
|
|
924
|
+
if (approvalStrategy === "all") return true;
|
|
925
|
+
return this.requiresApproval(riskLevel);
|
|
926
|
+
};
|
|
927
|
+
const hasAssistantToolCallMessage = (toolCallId) => {
|
|
928
|
+
return state.messages.some((m) => {
|
|
929
|
+
if (!m || typeof m !== "object" || m.role !== "assistant") return false;
|
|
930
|
+
const tc = m.tool_calls;
|
|
931
|
+
return Array.isArray(tc) && tc.some((c) => c?.id === toolCallId);
|
|
932
|
+
});
|
|
933
|
+
};
|
|
934
|
+
const addAssistantMessageIfNeeded = (toolCallId) => {
|
|
935
|
+
if (!hasAssistantToolCallMessage(toolCallId)) this.addAssistantMessageWithToolCalls(state);
|
|
936
|
+
};
|
|
937
|
+
const alreadyHasToolResultMessage = (toolCallId) => {
|
|
938
|
+
return state.messages.some((m) => m && typeof m === "object" && m.role === "tool" && m.tool_call_id === toolCallId);
|
|
939
|
+
};
|
|
940
|
+
for (const tc of state.pendingToolCalls) {
|
|
941
|
+
if (alreadyHasToolResultMessage(tc.toolCall.id)) continue;
|
|
942
|
+
const tool = this._tools?.get(tc.toolCall.function.name);
|
|
943
|
+
const riskLevel = tool?.riskLevel ?? "safe";
|
|
944
|
+
if (tool && shouldRequireApproval(riskLevel)) {
|
|
945
|
+
let args = {};
|
|
946
|
+
try {
|
|
947
|
+
args = typeof tc.toolCall.function.arguments === "string" ? JSON.parse(tc.toolCall.function.arguments) : tc.toolCall.function.arguments;
|
|
948
|
+
} catch {
|
|
949
|
+
args = { _raw: tc.toolCall.function.arguments };
|
|
950
|
+
}
|
|
951
|
+
const toolName = tc.toolCall.function.name;
|
|
952
|
+
let approvalResult = approvalDecisions[tc.toolCall.id];
|
|
953
|
+
if (!approvalResult && onToolApproval) {
|
|
954
|
+
yield {
|
|
955
|
+
type: "tool_approval_requested",
|
|
956
|
+
tool_call_id: tc.toolCall.id,
|
|
957
|
+
toolName,
|
|
958
|
+
riskLevel,
|
|
959
|
+
args
|
|
960
|
+
};
|
|
961
|
+
approvalResult = await onToolApproval({
|
|
962
|
+
toolName,
|
|
963
|
+
toolCall: tc.toolCall,
|
|
964
|
+
riskLevel,
|
|
965
|
+
args
|
|
966
|
+
});
|
|
967
|
+
}
|
|
968
|
+
if (!approvalResult) {
|
|
969
|
+
const checkpoint = toLoopCheckpoint(state, {
|
|
970
|
+
agentName: this.name,
|
|
971
|
+
phase: "approval_pending",
|
|
972
|
+
status: `Waiting for approval: ${toolName}`,
|
|
973
|
+
modelConfig: options?.checkpointModelConfig,
|
|
974
|
+
requestParams: options?.requestParams
|
|
975
|
+
});
|
|
976
|
+
if (options?.stateStore) await options.stateStore.saveCheckpoint(checkpoint);
|
|
977
|
+
yield {
|
|
978
|
+
type: "requires_action",
|
|
979
|
+
kind: "tool_approval",
|
|
980
|
+
tool_call_id: tc.toolCall.id,
|
|
981
|
+
toolName,
|
|
982
|
+
riskLevel,
|
|
983
|
+
args,
|
|
984
|
+
checkpoint: options?.stateStore ? void 0 : checkpoint,
|
|
985
|
+
checkpointRef: options?.stateStore ? {
|
|
986
|
+
sessionId: checkpoint.sessionId,
|
|
987
|
+
agentId: checkpoint.agentId
|
|
988
|
+
} : void 0
|
|
989
|
+
};
|
|
990
|
+
throw new AgentPauseError();
|
|
991
|
+
}
|
|
992
|
+
if ("pending" in approvalResult && approvalResult.pending) {
|
|
993
|
+
const checkpoint = toLoopCheckpoint(state, {
|
|
994
|
+
agentName: this.name,
|
|
995
|
+
phase: "approval_pending",
|
|
996
|
+
status: approvalResult.reason ? `Waiting for approval: ${toolName} (${approvalResult.reason})` : `Waiting for approval: ${toolName}`,
|
|
997
|
+
modelConfig: options?.checkpointModelConfig,
|
|
998
|
+
requestParams: options?.requestParams
|
|
999
|
+
});
|
|
1000
|
+
if (options?.stateStore) await options.stateStore.saveCheckpoint(checkpoint);
|
|
1001
|
+
yield {
|
|
1002
|
+
type: "requires_action",
|
|
1003
|
+
kind: "tool_approval",
|
|
1004
|
+
tool_call_id: tc.toolCall.id,
|
|
1005
|
+
toolName,
|
|
1006
|
+
riskLevel,
|
|
1007
|
+
args,
|
|
1008
|
+
checkpoint: options?.stateStore ? void 0 : checkpoint,
|
|
1009
|
+
checkpointRef: options?.stateStore ? {
|
|
1010
|
+
sessionId: checkpoint.sessionId,
|
|
1011
|
+
agentId: checkpoint.agentId
|
|
1012
|
+
} : void 0
|
|
1013
|
+
};
|
|
1014
|
+
throw new AgentPauseError();
|
|
1015
|
+
}
|
|
1016
|
+
if (!("approved" in approvalResult) || !approvalResult.approved) {
|
|
1017
|
+
const reason = approvalResult.reason ?? "User denied approval";
|
|
1018
|
+
tc.result = `Tool execution skipped: ${reason}`;
|
|
1019
|
+
tc.isError = true;
|
|
1020
|
+
addAssistantMessageIfNeeded(tc.toolCall.id);
|
|
1021
|
+
yield {
|
|
1022
|
+
type: "tool_skipped",
|
|
1023
|
+
tool_call_id: tc.toolCall.id,
|
|
1024
|
+
toolName: tc.toolCall.function.name,
|
|
1025
|
+
reason
|
|
1026
|
+
};
|
|
1027
|
+
yield {
|
|
1028
|
+
type: "tool_result",
|
|
1029
|
+
tool_call_id: tc.toolCall.id,
|
|
1030
|
+
result: tc.result,
|
|
1031
|
+
isError: true
|
|
1032
|
+
};
|
|
1033
|
+
this.addToolResultToHistory(state, tc);
|
|
1034
|
+
continue;
|
|
1035
|
+
}
|
|
1036
|
+
}
|
|
1037
|
+
addAssistantMessageIfNeeded(tc.toolCall.id);
|
|
1038
|
+
yield await this.executeToolCall(tc, toolContext, signal);
|
|
1039
|
+
this.addToolResultToHistory(state, tc);
|
|
1040
|
+
}
|
|
1041
|
+
state.pendingToolCalls = [];
|
|
1042
|
+
state.iteration++;
|
|
1043
|
+
yield {
|
|
1044
|
+
type: "iteration_end",
|
|
1045
|
+
iteration: state.iteration - 1,
|
|
1046
|
+
willContinue: true,
|
|
1047
|
+
toolCallCount
|
|
1048
|
+
};
|
|
1049
|
+
}
|
|
1050
|
+
/**
|
|
1051
|
+
* Handle final response: add to history, emit done.
|
|
1052
|
+
*/
|
|
1053
|
+
async *handleFinalResponse(state) {
|
|
1054
|
+
state.shouldContinue = false;
|
|
1055
|
+
state.stopReason = "final_response";
|
|
1056
|
+
this.addFinalAssistantMessage(state);
|
|
1057
|
+
yield {
|
|
1058
|
+
type: "iteration_end",
|
|
1059
|
+
iteration: state.iteration,
|
|
1060
|
+
willContinue: false,
|
|
1061
|
+
toolCallCount: 0
|
|
1062
|
+
};
|
|
1063
|
+
yield {
|
|
1064
|
+
type: "done",
|
|
1065
|
+
finalResponse: state.currentResponse,
|
|
1066
|
+
stopReason: "final_response",
|
|
1067
|
+
modelStopReason: state.lastModelStopReason
|
|
1068
|
+
};
|
|
1069
|
+
}
|
|
1070
|
+
};
|
|
1071
|
+
|
|
1072
|
+
//#endregion
|
|
1073
|
+
//#region src/state/types.ts
|
|
1074
|
+
/**
|
|
1075
|
+
* Predefined state keys for common data types.
|
|
1076
|
+
*/
|
|
1077
|
+
const StateKeys = {
|
|
1078
|
+
CHECKPOINT: "checkpoint",
|
|
1079
|
+
COMPRESSION: "compression",
|
|
1080
|
+
SESSION: "session",
|
|
1081
|
+
COMPRESSION_SNAPSHOT: "compression-snapshot"
|
|
1082
|
+
};
|
|
1083
|
+
|
|
1084
|
+
//#endregion
|
|
1085
|
+
//#region src/state/stateStore.ts
|
|
1086
|
+
/**
|
|
1087
|
+
* Abstract base class for state storage.
|
|
1088
|
+
*
|
|
1089
|
+
* StateStore provides a session-centric storage abstraction where all data
|
|
1090
|
+
* is organized by sessionId. Each session can have multiple keys storing
|
|
1091
|
+
* different types of data (checkpoint, compression state, session info, etc.).
|
|
1092
|
+
*
|
|
1093
|
+
* Subclasses only need to implement the low-level storage primitives:
|
|
1094
|
+
* - _write: Write data to a path
|
|
1095
|
+
* - _read: Read data from a path
|
|
1096
|
+
* - _delete: Delete data at a path
|
|
1097
|
+
* - _exists: Check if data exists at a path
|
|
1098
|
+
* - _list: List all paths with a given prefix
|
|
1099
|
+
*
|
|
1100
|
+
* The base class provides:
|
|
1101
|
+
* - High-level API for session-based storage (save, load, delete, etc.)
|
|
1102
|
+
* - Convenience methods for checkpoint operations
|
|
1103
|
+
* - JSON serialization/deserialization
|
|
1104
|
+
*
|
|
1105
|
+
* @example
|
|
1106
|
+
* ```typescript
|
|
1107
|
+
* // Using with an agent
|
|
1108
|
+
* const store = new FileStateStore({ dir: './state' })
|
|
1109
|
+
*
|
|
1110
|
+
* const agent = new Agent({
|
|
1111
|
+
* name: 'MyAgent',
|
|
1112
|
+
* stateStore: store,
|
|
1113
|
+
* // ...
|
|
1114
|
+
* })
|
|
1115
|
+
*
|
|
1116
|
+
* // Manual state operations
|
|
1117
|
+
* await store.save(sessionId, 'custom-key', { myData: 123 })
|
|
1118
|
+
* const data = await store.load(sessionId, 'custom-key')
|
|
1119
|
+
* ```
|
|
1120
|
+
*/
|
|
1121
|
+
var StateStore = class {
|
|
1122
|
+
/**
|
|
1123
|
+
* When to save checkpoints during agent execution.
|
|
1124
|
+
*/
|
|
1125
|
+
savePoint;
|
|
1126
|
+
/**
|
|
1127
|
+
* Whether to delete checkpoint after successful completion.
|
|
1128
|
+
*/
|
|
1129
|
+
deleteOnComplete;
|
|
1130
|
+
constructor(options) {
|
|
1131
|
+
this.savePoint = options?.savePoint ?? "before";
|
|
1132
|
+
this.deleteOnComplete = options?.deleteOnComplete ?? true;
|
|
1133
|
+
}
|
|
1134
|
+
/**
|
|
1135
|
+
* Save data for a session under a specific key.
|
|
1136
|
+
*
|
|
1137
|
+
* @param sessionId - Session identifier
|
|
1138
|
+
* @param key - Data key (e.g., 'checkpoint', 'compression', or custom keys)
|
|
1139
|
+
* @param data - Data to save (will be JSON serialized)
|
|
1140
|
+
*/
|
|
1141
|
+
async save(sessionId, key, data) {
|
|
1142
|
+
const path$5 = this.buildPath(sessionId, key);
|
|
1143
|
+
const serialized = JSON.stringify(data, null, 2);
|
|
1144
|
+
await this._write(path$5, serialized);
|
|
1145
|
+
}
|
|
1146
|
+
/**
|
|
1147
|
+
* Load data for a session by key.
|
|
1148
|
+
*
|
|
1149
|
+
* @param sessionId - Session identifier
|
|
1150
|
+
* @param key - Data key
|
|
1151
|
+
* @returns The data or undefined if not found
|
|
1152
|
+
*/
|
|
1153
|
+
async load(sessionId, key) {
|
|
1154
|
+
const path$5 = this.buildPath(sessionId, key);
|
|
1155
|
+
const content = await this._read(path$5);
|
|
1156
|
+
if (content === void 0) return;
|
|
1157
|
+
try {
|
|
1158
|
+
return JSON.parse(content);
|
|
1159
|
+
} catch {
|
|
1160
|
+
return;
|
|
1161
|
+
}
|
|
1162
|
+
}
|
|
1163
|
+
/**
|
|
1164
|
+
* Delete data for a session by key.
|
|
1165
|
+
*
|
|
1166
|
+
* @param sessionId - Session identifier
|
|
1167
|
+
* @param key - Data key
|
|
1168
|
+
*/
|
|
1169
|
+
async delete(sessionId, key) {
|
|
1170
|
+
const path$5 = this.buildPath(sessionId, key);
|
|
1171
|
+
await this._delete(path$5);
|
|
1172
|
+
}
|
|
1173
|
+
/**
|
|
1174
|
+
* Delete all data for a session.
|
|
1175
|
+
*
|
|
1176
|
+
* @param sessionId - Session identifier
|
|
1177
|
+
*/
|
|
1178
|
+
async deleteSession(sessionId) {
|
|
1179
|
+
const prefix = this.buildPrefix(sessionId);
|
|
1180
|
+
const paths = await this._list(prefix);
|
|
1181
|
+
await Promise.all(paths.map((path$5) => this._delete(path$5)));
|
|
1182
|
+
}
|
|
1183
|
+
/**
|
|
1184
|
+
* List all keys for a session.
|
|
1185
|
+
*
|
|
1186
|
+
* @param sessionId - Session identifier
|
|
1187
|
+
* @returns Array of keys
|
|
1188
|
+
*/
|
|
1189
|
+
async listKeys(sessionId) {
|
|
1190
|
+
const prefix = this.buildPrefix(sessionId);
|
|
1191
|
+
return (await this._list(prefix)).map((path$5) => this.extractKey(sessionId, path$5));
|
|
1192
|
+
}
|
|
1193
|
+
/**
|
|
1194
|
+
* Check if data exists for a session key.
|
|
1195
|
+
*
|
|
1196
|
+
* @param sessionId - Session identifier
|
|
1197
|
+
* @param key - Data key
|
|
1198
|
+
* @returns True if data exists
|
|
1199
|
+
*/
|
|
1200
|
+
async exists(sessionId, key) {
|
|
1201
|
+
const path$5 = this.buildPath(sessionId, key);
|
|
1202
|
+
return this._exists(path$5);
|
|
1203
|
+
}
|
|
1204
|
+
/**
|
|
1205
|
+
* Save an agent loop checkpoint.
|
|
1206
|
+
*
|
|
1207
|
+
* This is a convenience method that saves the checkpoint under the
|
|
1208
|
+
* predefined CHECKPOINT key with additional metadata.
|
|
1209
|
+
*
|
|
1210
|
+
* @param checkpoint - Checkpoint to save
|
|
1211
|
+
*/
|
|
1212
|
+
async saveCheckpoint(checkpoint) {
|
|
1213
|
+
const wrapped = {
|
|
1214
|
+
_meta: {
|
|
1215
|
+
description: "GoatChain Agent Loop Checkpoint - DO NOT EDIT MANUALLY",
|
|
1216
|
+
savedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1217
|
+
agentId: checkpoint.agentId,
|
|
1218
|
+
agentName: checkpoint.agentName,
|
|
1219
|
+
sessionId: checkpoint.sessionId,
|
|
1220
|
+
iteration: checkpoint.iteration,
|
|
1221
|
+
phase: checkpoint.phase,
|
|
1222
|
+
status: checkpoint.status,
|
|
1223
|
+
messageCount: checkpoint.messages.length,
|
|
1224
|
+
toolCallsPending: checkpoint.pendingToolCalls?.length ?? 0
|
|
1225
|
+
},
|
|
1226
|
+
checkpoint
|
|
1227
|
+
};
|
|
1228
|
+
await this.save(checkpoint.sessionId, StateKeys.CHECKPOINT, wrapped);
|
|
1229
|
+
}
|
|
1230
|
+
/**
|
|
1231
|
+
* Load an agent loop checkpoint by session ID.
|
|
1232
|
+
*
|
|
1233
|
+
* @param sessionId - Session identifier
|
|
1234
|
+
* @returns Checkpoint or undefined if not found
|
|
1235
|
+
*/
|
|
1236
|
+
async loadCheckpoint(sessionId) {
|
|
1237
|
+
return (await this.load(sessionId, StateKeys.CHECKPOINT))?.checkpoint;
|
|
1238
|
+
}
|
|
1239
|
+
/**
|
|
1240
|
+
* Delete an agent loop checkpoint.
|
|
1241
|
+
*
|
|
1242
|
+
* @param sessionId - Session identifier
|
|
1243
|
+
*/
|
|
1244
|
+
async deleteCheckpoint(sessionId) {
|
|
1245
|
+
await this.delete(sessionId, StateKeys.CHECKPOINT);
|
|
1246
|
+
}
|
|
1247
|
+
/**
|
|
1248
|
+
* List all checkpoints across all sessions.
|
|
1249
|
+
*
|
|
1250
|
+
* @returns Array of checkpoints
|
|
1251
|
+
*/
|
|
1252
|
+
async listCheckpoints() {
|
|
1253
|
+
const sessionIds = await this.listSessions();
|
|
1254
|
+
const checkpoints = [];
|
|
1255
|
+
for (const sessionId of sessionIds) {
|
|
1256
|
+
const checkpoint = await this.loadCheckpoint(sessionId);
|
|
1257
|
+
if (checkpoint) checkpoints.push(checkpoint);
|
|
1258
|
+
}
|
|
1259
|
+
return checkpoints;
|
|
1260
|
+
}
|
|
1261
|
+
/**
|
|
1262
|
+
* List all session IDs that have stored data.
|
|
1263
|
+
*
|
|
1264
|
+
* @returns Array of session IDs
|
|
1265
|
+
*/
|
|
1266
|
+
async listSessions() {
|
|
1267
|
+
const allPaths = await this._list("");
|
|
1268
|
+
const sessionIds = /* @__PURE__ */ new Set();
|
|
1269
|
+
for (const path$5 of allPaths) {
|
|
1270
|
+
const sessionId = this.extractSessionId(path$5);
|
|
1271
|
+
if (sessionId) sessionIds.add(sessionId);
|
|
1272
|
+
}
|
|
1273
|
+
return Array.from(sessionIds);
|
|
1274
|
+
}
|
|
1275
|
+
/**
|
|
1276
|
+
* Build a storage path from sessionId and key.
|
|
1277
|
+
* Default format: `{sessionId}/{key}`
|
|
1278
|
+
*
|
|
1279
|
+
* Subclasses can override this for different path formats.
|
|
1280
|
+
*/
|
|
1281
|
+
buildPath(sessionId, key) {
|
|
1282
|
+
return `${sessionId}/${key}`;
|
|
1283
|
+
}
|
|
1284
|
+
/**
|
|
1285
|
+
* Build a prefix for listing all data under a session.
|
|
1286
|
+
* Default format: `{sessionId}/`
|
|
1287
|
+
*/
|
|
1288
|
+
buildPrefix(sessionId) {
|
|
1289
|
+
return `${sessionId}/`;
|
|
1290
|
+
}
|
|
1291
|
+
/**
|
|
1292
|
+
* Extract the key from a full path.
|
|
1293
|
+
*/
|
|
1294
|
+
extractKey(sessionId, path$5) {
|
|
1295
|
+
const prefix = this.buildPrefix(sessionId);
|
|
1296
|
+
return path$5.startsWith(prefix) ? path$5.slice(prefix.length) : path$5;
|
|
1297
|
+
}
|
|
1298
|
+
/**
|
|
1299
|
+
* Extract the sessionId from a full path.
|
|
1300
|
+
*/
|
|
1301
|
+
extractSessionId(path$5) {
|
|
1302
|
+
const parts = path$5.split("/");
|
|
1303
|
+
return parts.length > 0 ? parts[0] : void 0;
|
|
1304
|
+
}
|
|
1305
|
+
};
|
|
1306
|
+
|
|
1307
|
+
//#endregion
|
|
1308
|
+
//#region src/state/FileStateStore.ts
|
|
1309
|
+
/**
|
|
1310
|
+
* File-based implementation of StateStore.
|
|
1311
|
+
*
|
|
1312
|
+
* Stores state data as JSON files in a directory structure organized by session:
|
|
1313
|
+
*
|
|
1314
|
+
* ```
|
|
1315
|
+
* <baseDir>/
|
|
1316
|
+
* <sessionId>/
|
|
1317
|
+
* checkpoint.json
|
|
1318
|
+
* compression.json
|
|
1319
|
+
* session.json
|
|
1320
|
+
* custom-key.json
|
|
1321
|
+
* ```
|
|
1322
|
+
*
|
|
1323
|
+
* @example
|
|
1324
|
+
* ```typescript
|
|
1325
|
+
* const store = new FileStateStore({
|
|
1326
|
+
* dir: './state',
|
|
1327
|
+
* savePoint: 'before',
|
|
1328
|
+
* deleteOnComplete: true,
|
|
1329
|
+
* })
|
|
1330
|
+
*
|
|
1331
|
+
* const agent = new Agent({
|
|
1332
|
+
* name: 'MyAgent',
|
|
1333
|
+
* stateStore: store,
|
|
1334
|
+
* // ...
|
|
1335
|
+
* })
|
|
1336
|
+
* ```
|
|
1337
|
+
*/
|
|
1338
|
+
var FileStateStore = class extends StateStore {
|
|
1339
|
+
baseDir;
|
|
1340
|
+
constructor(options) {
|
|
1341
|
+
super(options);
|
|
1342
|
+
this.baseDir = node_path.default.resolve(options.dir);
|
|
1343
|
+
this.ensureDir(this.baseDir);
|
|
1344
|
+
}
|
|
1345
|
+
async _write(storagePath, data) {
|
|
1346
|
+
const filePath = this.toFilePath(storagePath);
|
|
1347
|
+
this.ensureDir(node_path.default.dirname(filePath));
|
|
1348
|
+
(0, node_fs.writeFileSync)(filePath, data, "utf-8");
|
|
1349
|
+
}
|
|
1350
|
+
async _read(storagePath) {
|
|
1351
|
+
const filePath = this.toFilePath(storagePath);
|
|
1352
|
+
try {
|
|
1353
|
+
if (!(0, node_fs.existsSync)(filePath)) return;
|
|
1354
|
+
return (0, node_fs.readFileSync)(filePath, "utf-8");
|
|
1355
|
+
} catch {
|
|
1356
|
+
return;
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
async _delete(storagePath) {
|
|
1360
|
+
const filePath = this.toFilePath(storagePath);
|
|
1361
|
+
if ((0, node_fs.existsSync)(filePath)) (0, node_fs.rmSync)(filePath);
|
|
1362
|
+
const sessionDir = node_path.default.dirname(filePath);
|
|
1363
|
+
if ((0, node_fs.existsSync)(sessionDir)) try {
|
|
1364
|
+
if ((0, node_fs.readdirSync)(sessionDir).length === 0) (0, node_fs.rmSync)(sessionDir, { recursive: true });
|
|
1365
|
+
} catch {}
|
|
1366
|
+
}
|
|
1367
|
+
async _exists(storagePath) {
|
|
1368
|
+
return (0, node_fs.existsSync)(this.toFilePath(storagePath));
|
|
1369
|
+
}
|
|
1370
|
+
async _list(prefix) {
|
|
1371
|
+
const results = [];
|
|
1372
|
+
if (!(0, node_fs.existsSync)(this.baseDir)) return results;
|
|
1373
|
+
const sessionDirs = (0, node_fs.readdirSync)(this.baseDir, { withFileTypes: true }).filter((d) => d.isDirectory()).map((d) => d.name);
|
|
1374
|
+
for (const sessionId of sessionDirs) {
|
|
1375
|
+
if (prefix && !sessionId.startsWith(prefix.split("/")[0])) continue;
|
|
1376
|
+
const sessionDir = node_path.default.join(this.baseDir, sessionId);
|
|
1377
|
+
const files = this.listJsonFiles(sessionDir);
|
|
1378
|
+
for (const file of files) {
|
|
1379
|
+
const storagePath = `${sessionId}/${node_path.default.basename(file, ".json")}`;
|
|
1380
|
+
if (!prefix || storagePath.startsWith(prefix)) results.push(storagePath);
|
|
1381
|
+
}
|
|
1382
|
+
}
|
|
1383
|
+
return results;
|
|
1384
|
+
}
|
|
1385
|
+
/**
|
|
1386
|
+
* Convert storage path to file system path.
|
|
1387
|
+
* Storage path: `{sessionId}/{key}`
|
|
1388
|
+
* File path: `{baseDir}/{sessionId}/{key}.json`
|
|
1389
|
+
*/
|
|
1390
|
+
toFilePath(storagePath) {
|
|
1391
|
+
return node_path.default.join(this.baseDir, `${storagePath}.json`);
|
|
1392
|
+
}
|
|
1393
|
+
/**
|
|
1394
|
+
* Ensure a directory exists.
|
|
1395
|
+
*/
|
|
1396
|
+
ensureDir(dir) {
|
|
1397
|
+
if (!(0, node_fs.existsSync)(dir)) (0, node_fs.mkdirSync)(dir, { recursive: true });
|
|
1398
|
+
}
|
|
1399
|
+
/**
|
|
1400
|
+
* List all JSON files in a directory.
|
|
1401
|
+
*/
|
|
1402
|
+
listJsonFiles(dir) {
|
|
1403
|
+
if (!(0, node_fs.existsSync)(dir)) return [];
|
|
1404
|
+
return (0, node_fs.readdirSync)(dir).filter((f) => f.endsWith(".json")).map((f) => node_path.default.join(dir, f));
|
|
1405
|
+
}
|
|
1406
|
+
/**
|
|
1407
|
+
* Get the base directory path.
|
|
1408
|
+
*/
|
|
1409
|
+
getBaseDir() {
|
|
1410
|
+
return this.baseDir;
|
|
1411
|
+
}
|
|
1412
|
+
/**
|
|
1413
|
+
* Clear all state data from the store.
|
|
1414
|
+
* WARNING: This will delete all files in the base directory.
|
|
1415
|
+
*/
|
|
1416
|
+
clear() {
|
|
1417
|
+
if ((0, node_fs.existsSync)(this.baseDir)) {
|
|
1418
|
+
(0, node_fs.rmSync)(this.baseDir, { recursive: true });
|
|
1419
|
+
(0, node_fs.mkdirSync)(this.baseDir, { recursive: true });
|
|
1420
|
+
}
|
|
1421
|
+
}
|
|
1422
|
+
};
|
|
1423
|
+
|
|
1424
|
+
//#endregion
|
|
1425
|
+
//#region src/state/InMemoryStateStore.ts
|
|
1426
|
+
/**
|
|
1427
|
+
* In-memory implementation of StateStore.
|
|
1428
|
+
*
|
|
1429
|
+
* Useful for development, testing, and short-lived applications.
|
|
1430
|
+
* Data is lost when the process exits.
|
|
1431
|
+
*
|
|
1432
|
+
* @example
|
|
1433
|
+
* ```typescript
|
|
1434
|
+
* const store = new InMemoryStateStore({
|
|
1435
|
+
* savePoint: 'before',
|
|
1436
|
+
* deleteOnComplete: true,
|
|
1437
|
+
* })
|
|
1438
|
+
*
|
|
1439
|
+
* const agent = new Agent({
|
|
1440
|
+
* name: 'MyAgent',
|
|
1441
|
+
* stateStore: store,
|
|
1442
|
+
* // ...
|
|
1443
|
+
* })
|
|
1444
|
+
* ```
|
|
1445
|
+
*/
|
|
1446
|
+
var InMemoryStateStore = class extends StateStore {
|
|
1447
|
+
/**
|
|
1448
|
+
* Internal storage: path -> data
|
|
1449
|
+
*/
|
|
1450
|
+
store = /* @__PURE__ */ new Map();
|
|
1451
|
+
constructor(options) {
|
|
1452
|
+
super(options);
|
|
1453
|
+
}
|
|
1454
|
+
async _write(path$5, data) {
|
|
1455
|
+
this.store.set(path$5, data);
|
|
1456
|
+
}
|
|
1457
|
+
async _read(path$5) {
|
|
1458
|
+
return this.store.get(path$5);
|
|
1459
|
+
}
|
|
1460
|
+
async _delete(path$5) {
|
|
1461
|
+
this.store.delete(path$5);
|
|
1462
|
+
}
|
|
1463
|
+
async _exists(path$5) {
|
|
1464
|
+
return this.store.has(path$5);
|
|
1465
|
+
}
|
|
1466
|
+
async _list(prefix) {
|
|
1467
|
+
const paths = [];
|
|
1468
|
+
for (const key of this.store.keys()) if (prefix === "" || key.startsWith(prefix)) paths.push(key);
|
|
1469
|
+
return paths;
|
|
1470
|
+
}
|
|
1471
|
+
/**
|
|
1472
|
+
* Clear all data from the store.
|
|
1473
|
+
* Useful for testing.
|
|
1474
|
+
*/
|
|
1475
|
+
clear() {
|
|
1476
|
+
this.store.clear();
|
|
1477
|
+
}
|
|
1478
|
+
/**
|
|
1479
|
+
* Get statistics about the store.
|
|
1480
|
+
*/
|
|
1481
|
+
stats() {
|
|
1482
|
+
const sessionIds = /* @__PURE__ */ new Set();
|
|
1483
|
+
for (const key of this.store.keys()) {
|
|
1484
|
+
const sessionId = this.extractSessionId(key);
|
|
1485
|
+
if (sessionId) sessionIds.add(sessionId);
|
|
1486
|
+
}
|
|
1487
|
+
return {
|
|
1488
|
+
entryCount: this.store.size,
|
|
1489
|
+
sessionCount: sessionIds.size
|
|
1490
|
+
};
|
|
1491
|
+
}
|
|
1492
|
+
};
|
|
1493
|
+
|
|
1494
|
+
//#endregion
|
|
1495
|
+
//#region src/agent/tokenCounter.ts
|
|
1496
|
+
let cachedTiktoken;
|
|
1497
|
+
function loadTiktoken() {
|
|
1498
|
+
if (cachedTiktoken !== void 0) return cachedTiktoken;
|
|
1499
|
+
try {
|
|
1500
|
+
const mod = (0, node_module.createRequire)(require("url").pathToFileURL(__filename).href)("tiktoken");
|
|
1501
|
+
cachedTiktoken = mod;
|
|
1502
|
+
return mod;
|
|
1503
|
+
} catch {
|
|
1504
|
+
cachedTiktoken = null;
|
|
1505
|
+
return null;
|
|
1506
|
+
}
|
|
1507
|
+
}
|
|
1508
|
+
/**
|
|
1509
|
+
* Default model to use for token encoding.
|
|
1510
|
+
*/
|
|
1511
|
+
const DEFAULT_MODEL = "gpt-4o";
|
|
1512
|
+
/**
|
|
1513
|
+
* Count tokens in a string using the specified model's encoding.
|
|
1514
|
+
*
|
|
1515
|
+
* Creates a new encoder for each call and immediately frees it
|
|
1516
|
+
* to avoid memory leaks in long-running processes.
|
|
1517
|
+
*
|
|
1518
|
+
* @param text - The text to count tokens for
|
|
1519
|
+
* @param model - Optional model name for encoding (default: gpt-4o)
|
|
1520
|
+
* @returns Number of tokens
|
|
1521
|
+
*/
|
|
1522
|
+
function countTokens(text, model) {
|
|
1523
|
+
if (!text) return 0;
|
|
1524
|
+
const tk = loadTiktoken();
|
|
1525
|
+
if (!tk) return Math.ceil(text.length / 4);
|
|
1526
|
+
let encoder = null;
|
|
1527
|
+
try {
|
|
1528
|
+
encoder = model ? tk.encoding_for_model(model) : tk.encoding_for_model(DEFAULT_MODEL);
|
|
1529
|
+
return encoder.encode(text).length;
|
|
1530
|
+
} catch (_error) {
|
|
1531
|
+
console.error("Error encoding text:", _error);
|
|
1532
|
+
try {
|
|
1533
|
+
encoder = tk.get_encoding("cl100k_base");
|
|
1534
|
+
return encoder.encode(text).length;
|
|
1535
|
+
} catch {
|
|
1536
|
+
return Math.ceil(text.length / 4);
|
|
1537
|
+
}
|
|
1538
|
+
} finally {
|
|
1539
|
+
encoder?.free();
|
|
1540
|
+
}
|
|
1541
|
+
}
|
|
1542
|
+
/**
|
|
1543
|
+
* Count tokens in a message content.
|
|
1544
|
+
* Handles both string content and array content (for multimodal messages).
|
|
1545
|
+
*
|
|
1546
|
+
* @param content - Message content (string or array)
|
|
1547
|
+
* @param model - Optional model name for encoding
|
|
1548
|
+
* @returns Number of tokens
|
|
1549
|
+
*/
|
|
1550
|
+
function countContentTokens(content, model) {
|
|
1551
|
+
if (typeof content === "string") return countTokens(content, model);
|
|
1552
|
+
if (Array.isArray(content)) return content.reduce((sum, part) => {
|
|
1553
|
+
if (typeof part === "object" && part !== null && "text" in part) return sum + countTokens(String(part.text), model);
|
|
1554
|
+
return sum;
|
|
1555
|
+
}, 0);
|
|
1556
|
+
return 0;
|
|
1557
|
+
}
|
|
1558
|
+
/**
|
|
1559
|
+
* Count tokens for a single message by serializing it to JSON.
|
|
1560
|
+
*
|
|
1561
|
+
* This method is more accurate as it accounts for JSON structure overhead
|
|
1562
|
+
* that the actual API will see.
|
|
1563
|
+
*
|
|
1564
|
+
* @param message - The message to count tokens for
|
|
1565
|
+
* @param model - Optional model name for encoding
|
|
1566
|
+
* @returns Number of tokens
|
|
1567
|
+
*/
|
|
1568
|
+
function countMessageTokens(message, model) {
|
|
1569
|
+
try {
|
|
1570
|
+
return countTokens(JSON.stringify(message), model);
|
|
1571
|
+
} catch {
|
|
1572
|
+
return countMessageTokensManual(message, model);
|
|
1573
|
+
}
|
|
1574
|
+
}
|
|
1575
|
+
/**
|
|
1576
|
+
* Manual token counting for a single message (fallback method).
|
|
1577
|
+
* Includes role overhead and content tokens.
|
|
1578
|
+
*/
|
|
1579
|
+
function countMessageTokensManual(message, model) {
|
|
1580
|
+
let tokens = 4;
|
|
1581
|
+
tokens += countContentTokens(message.content, model);
|
|
1582
|
+
if (message.role === "assistant" && message.tool_calls) for (const tc of message.tool_calls) {
|
|
1583
|
+
tokens += countTokens(tc.function.name, model);
|
|
1584
|
+
const args = typeof tc.function.arguments === "string" ? tc.function.arguments : JSON.stringify(tc.function.arguments);
|
|
1585
|
+
tokens += countTokens(args, model);
|
|
1586
|
+
tokens += 10;
|
|
1587
|
+
}
|
|
1588
|
+
if (message.role === "tool" && message.name) tokens += countTokens(message.name, model);
|
|
1589
|
+
if (message.role === "assistant" && message.reasoning_content) tokens += countTokens(message.reasoning_content, model);
|
|
1590
|
+
return tokens;
|
|
1591
|
+
}
|
|
1592
|
+
/**
|
|
1593
|
+
* Count total tokens in a message array.
|
|
1594
|
+
*
|
|
1595
|
+
* Serializes the entire messages array to JSON for accurate counting,
|
|
1596
|
+
* as this reflects what the actual API will receive.
|
|
1597
|
+
*
|
|
1598
|
+
* @param messages - Array of messages
|
|
1599
|
+
* @param model - Optional model name for encoding
|
|
1600
|
+
* @returns Total number of tokens
|
|
1601
|
+
*/
|
|
1602
|
+
function countMessagesTokens(messages, model) {
|
|
1603
|
+
if (!messages || messages.length === 0) return 0;
|
|
1604
|
+
try {
|
|
1605
|
+
return countTokens(JSON.stringify(messages), model);
|
|
1606
|
+
} catch {
|
|
1607
|
+
return messages.reduce((sum, msg) => sum + countMessageTokens(msg, model), 3);
|
|
1608
|
+
}
|
|
1609
|
+
}
|
|
1610
|
+
|
|
1611
|
+
//#endregion
|
|
1612
|
+
//#region src/agent/contextCompressionMiddleware.ts
|
|
1613
|
+
/**
|
|
1614
|
+
* Placeholder text for cleared tool outputs.
|
|
1615
|
+
*/
|
|
1616
|
+
const CLEARED_TOOL_OUTPUT = "[Old tool result content cleared]";
|
|
1617
|
+
/**
|
|
1618
|
+
* Default summary generation prompt.
|
|
1619
|
+
*/
|
|
1620
|
+
const DEFAULT_SUMMARY_PROMPT = `You are Claude Code, Anthropic's official CLI for Claude.
|
|
1621
|
+
|
|
1622
|
+
You are a helpful AI assistant tasked with summarizing conversations.
|
|
1623
|
+
|
|
1624
|
+
Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions.
|
|
1625
|
+
This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context.
|
|
1626
|
+
|
|
1627
|
+
Before providing your final summary, wrap your analysis in <analysis> tags to organize your thoughts and ensure you've covered all necessary points. In your analysis process:
|
|
1628
|
+
|
|
1629
|
+
1. Chronologically analyze each message and section of the conversation. For each section thoroughly identify:
|
|
1630
|
+
- The user's explicit requests and intents
|
|
1631
|
+
- Your approach to addressing the user's requests
|
|
1632
|
+
- Key decisions, technical concepts and code patterns
|
|
1633
|
+
- Specific details like:
|
|
1634
|
+
- file names
|
|
1635
|
+
- full code snippets
|
|
1636
|
+
- function signatures
|
|
1637
|
+
- file edits
|
|
1638
|
+
- Errors that you ran into and how you fixed them
|
|
1639
|
+
- Pay special attention to specific user feedback that you received, especially if the user told you to do something differently.
|
|
1640
|
+
2. Double-check for technical accuracy and completeness, addressing each required element thoroughly.
|
|
1641
|
+
|
|
1642
|
+
Your summary should include the following sections:
|
|
1643
|
+
|
|
1644
|
+
1. Primary Request and Intent: Capture all of the user's explicit requests and intents in detail
|
|
1645
|
+
2. Key Technical Concepts: List all important technical concepts, technologies, and frameworks discussed.
|
|
1646
|
+
3. Files and Code Sections: Enumerate specific files and code sections examined, modified, or created. Pay special attention to the most recent messages and include full code snippets where applicable and include a summary of why this file read or edit is important.
|
|
1647
|
+
4. Errors and fixes: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received, especially if the user told you to do something differently.
|
|
1648
|
+
5. Problem Solving: Document problems solved and any ongoing troubleshooting efforts.
|
|
1649
|
+
6. All user messages: List ALL user messages that are not tool results. These are critical for understanding the users' feedback and changing intent.
|
|
1650
|
+
6. Pending Tasks: Outline any pending tasks that you have explicitly been asked to work on.
|
|
1651
|
+
7. Current Work: Describe in detail precisely what was being worked on immediately before this summary request, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable.
|
|
1652
|
+
8. Optional Next Step: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests, and the task you were working on immediately before this summary request. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. Do not start on tangential requests or really old requests that were already completed without confirming with the user first.
|
|
1653
|
+
If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. This should be verbatim to ensure there's no drift in task interpretation.
|
|
1654
|
+
|
|
1655
|
+
Here's an example of how your output should be structured:
|
|
1656
|
+
|
|
1657
|
+
<example>
|
|
1658
|
+
<analysis>
|
|
1659
|
+
[Your thought process, ensuring all points are covered thoroughly and accurately]
|
|
1660
|
+
</analysis>
|
|
1661
|
+
|
|
1662
|
+
<summary>
|
|
1663
|
+
1. Primary Request and Intent:
|
|
1664
|
+
[Detailed description]
|
|
1665
|
+
|
|
1666
|
+
2. Key Technical Concepts:
|
|
1667
|
+
- [Concept 1]
|
|
1668
|
+
- [Concept 2]
|
|
1669
|
+
- [...]
|
|
1670
|
+
|
|
1671
|
+
3. Files and Code Sections:
|
|
1672
|
+
- [File Name 1]
|
|
1673
|
+
- [Summary of why this file is important]
|
|
1674
|
+
- [Summary of the changes made to this file, if any]
|
|
1675
|
+
- [Important Code Snippet]
|
|
1676
|
+
- [File Name 2]
|
|
1677
|
+
- [Important Code Snippet]
|
|
1678
|
+
- [...]
|
|
1679
|
+
|
|
1680
|
+
4. Errors and fixes:
|
|
1681
|
+
- [Detailed description of error 1]:
|
|
1682
|
+
- [How you fixed the error]
|
|
1683
|
+
- [User feedback on the error if any]
|
|
1684
|
+
- [...]
|
|
1685
|
+
|
|
1686
|
+
5. Problem Solving:
|
|
1687
|
+
[Description of solved problems and ongoing troubleshooting]
|
|
1688
|
+
|
|
1689
|
+
6. All user messages:
|
|
1690
|
+
- [Detailed non tool use user message]
|
|
1691
|
+
- [...]
|
|
1692
|
+
|
|
1693
|
+
7. Pending Tasks:
|
|
1694
|
+
- [Task 1]
|
|
1695
|
+
- [Task 2]
|
|
1696
|
+
- [...]
|
|
1697
|
+
|
|
1698
|
+
8. Current Work:
|
|
1699
|
+
[Precise description of current work]
|
|
1700
|
+
|
|
1701
|
+
9. Optional Next Step:
|
|
1702
|
+
[Optional Next step to take]
|
|
1703
|
+
|
|
1704
|
+
</summary>
|
|
1705
|
+
</example>
|
|
1706
|
+
|
|
1707
|
+
Please provide your summary based on the conversation so far, following this structure and ensuring precision and thoroughness in your response.
|
|
1708
|
+
|
|
1709
|
+
There may be additional summarization instructions provided in the included context. If so, remember to follow these instructions when creating the above summary. Examples of instructions include:
|
|
1710
|
+
<example>
|
|
1711
|
+
## Compact Instructions
|
|
1712
|
+
When summarizing the conversation focus on typescript code changes and also remember the mistakes you made and how you fixed them.
|
|
1713
|
+
</example>
|
|
1714
|
+
|
|
1715
|
+
<example>
|
|
1716
|
+
# Summary instructions
|
|
1717
|
+
When you are using compact - please focus on test output and code changes. Include file reads verbatim.
|
|
1718
|
+
</example>
|
|
1719
|
+
|
|
1720
|
+
Existing summary (may be empty):
|
|
1721
|
+
{{existingSummary}}
|
|
1722
|
+
|
|
1723
|
+
Messages to summarize:
|
|
1724
|
+
{{toolOutputs}}
|
|
1725
|
+
`;
|
|
1726
|
+
/**
|
|
1727
|
+
* Create a context compression middleware.
|
|
1728
|
+
*
|
|
1729
|
+
* This middleware automatically compresses conversation history when it exceeds
|
|
1730
|
+
* the configured token limit. It follows an immutable pattern - original messages
|
|
1731
|
+
* are never modified. Instead, compressed messages are passed to the LLM while
|
|
1732
|
+
* the original messages remain unchanged (preserved for checkpoint).
|
|
1733
|
+
*
|
|
1734
|
+
* Compression strategy:
|
|
1735
|
+
* 1. Preserve message structure (all messages remain, including tool calls)
|
|
1736
|
+
* 2. Protect recent N turns completely
|
|
1737
|
+
* 3. Protect recent tool outputs up to token limit
|
|
1738
|
+
* 4. Clear old tool outputs with placeholder: "[Old tool result content cleared]"
|
|
1739
|
+
* 5. Optionally generate a summary of cleared content
|
|
1740
|
+
*
|
|
1741
|
+
* @param options - Compression configuration options
|
|
1742
|
+
* @returns Middleware function
|
|
1743
|
+
*
|
|
1744
|
+
* @example
|
|
1745
|
+
* ```ts
|
|
1746
|
+
* const agent = new Agent({ model, ... })
|
|
1747
|
+
*
|
|
1748
|
+
* // Basic usage - just clear old tool outputs
|
|
1749
|
+
* agent.use(createContextCompressionMiddleware({
|
|
1750
|
+
* contextLimit: 128000,
|
|
1751
|
+
* outputLimit: 4096,
|
|
1752
|
+
* }))
|
|
1753
|
+
*
|
|
1754
|
+
* // With summary generation and persistence
|
|
1755
|
+
* agent.use(createContextCompressionMiddleware({
|
|
1756
|
+
* contextLimit: 128000,
|
|
1757
|
+
* outputLimit: 4096,
|
|
1758
|
+
* enableSummary: true,
|
|
1759
|
+
* getModel: () => agent.model,
|
|
1760
|
+
* getStateStore: () => agent.stateStore,
|
|
1761
|
+
* }))
|
|
1762
|
+
* ```
|
|
1763
|
+
*/
|
|
1764
|
+
function createContextCompressionMiddleware(options) {
|
|
1765
|
+
const { contextLimit, outputLimit, protectedTurns = 2, protectedToolTokens = 4e4, trimToolOutputThreshold = 2e4, enableSummary = false, model, getModel, summaryPrompt = DEFAULT_SUMMARY_PROMPT, stateStore, getStateStore, persistClearedContent = false, onCompressionStart, onCompressionEnd } = options;
|
|
1766
|
+
if (enableSummary && !model && !getModel) throw new Error("ContextCompressionMiddleware: \"model\" or \"getModel\" required when enableSummary is true");
|
|
1767
|
+
const compressionThreshold = contextLimit - outputLimit;
|
|
1768
|
+
return async (state, next) => {
|
|
1769
|
+
const store = stateStore ?? getStateStore?.();
|
|
1770
|
+
let compressionState;
|
|
1771
|
+
if (store) compressionState = await store.load(state.sessionId, StateKeys.COMPRESSION);
|
|
1772
|
+
let messagesWithSummary = state.messages;
|
|
1773
|
+
if (compressionState?.summary) {
|
|
1774
|
+
if (!messagesWithSummary.some((m) => m.role === "user" && typeof m.content === "string" && m.content.startsWith("[Context from cleared tool outputs]"))) {
|
|
1775
|
+
const systemIndex = messagesWithSummary.findIndex((m) => m.role === "system");
|
|
1776
|
+
const insertIndex = systemIndex >= 0 ? systemIndex + 1 : 0;
|
|
1777
|
+
messagesWithSummary = [
|
|
1778
|
+
...messagesWithSummary.slice(0, insertIndex),
|
|
1779
|
+
{
|
|
1780
|
+
role: "user",
|
|
1781
|
+
content: `[Context from cleared tool outputs]\n${compressionState.summary}`
|
|
1782
|
+
},
|
|
1783
|
+
...messagesWithSummary.slice(insertIndex)
|
|
1784
|
+
];
|
|
1785
|
+
console.log(`📋 [Compression] Auto-loaded summary (${compressionState.summary.length} chars) into messages at index ${insertIndex}`);
|
|
1786
|
+
}
|
|
1787
|
+
}
|
|
1788
|
+
if (countMessagesTokens(messagesWithSummary) > compressionThreshold) {
|
|
1789
|
+
const summaryModel = enableSummary ? model ?? getModel?.() : void 0;
|
|
1790
|
+
const { compressedMessages, stats, summary } = await compressMessagesImmutable(messagesWithSummary, {
|
|
1791
|
+
protectedTurns,
|
|
1792
|
+
protectedToolTokens,
|
|
1793
|
+
trimToolOutputThreshold,
|
|
1794
|
+
targetTokens: compressionThreshold,
|
|
1795
|
+
enableSummary,
|
|
1796
|
+
summaryPrompt
|
|
1797
|
+
}, summaryModel, compressionState?.summary, onCompressionStart, onCompressionEnd);
|
|
1798
|
+
const processedState = {
|
|
1799
|
+
...state,
|
|
1800
|
+
messages: compressedMessages,
|
|
1801
|
+
metadata: {
|
|
1802
|
+
...state.metadata,
|
|
1803
|
+
lastCompression: stats
|
|
1804
|
+
}
|
|
1805
|
+
};
|
|
1806
|
+
if (store) {
|
|
1807
|
+
const now = Date.now();
|
|
1808
|
+
const newCompressionState = {
|
|
1809
|
+
lastStats: stats,
|
|
1810
|
+
history: [...compressionState?.history ?? [], stats],
|
|
1811
|
+
summary,
|
|
1812
|
+
updatedAt: now
|
|
1813
|
+
};
|
|
1814
|
+
await store.save(state.sessionId, StateKeys.COMPRESSION, newCompressionState);
|
|
1815
|
+
if (persistClearedContent) {
|
|
1816
|
+
const snapshot = {
|
|
1817
|
+
messages: compressedMessages.map((msg) => ({
|
|
1818
|
+
role: msg.role,
|
|
1819
|
+
content: msg.content,
|
|
1820
|
+
tool_call_id: msg.tool_call_id,
|
|
1821
|
+
name: msg.name,
|
|
1822
|
+
tool_calls: "tool_calls" in msg ? msg.tool_calls : void 0
|
|
1823
|
+
})),
|
|
1824
|
+
stats,
|
|
1825
|
+
timestamp: now
|
|
1826
|
+
};
|
|
1827
|
+
const snapshotKey = `${StateKeys.COMPRESSION_SNAPSHOT}-${now}`;
|
|
1828
|
+
await store.save(state.sessionId, snapshotKey, snapshot);
|
|
1829
|
+
}
|
|
1830
|
+
}
|
|
1831
|
+
return next(processedState);
|
|
1832
|
+
}
|
|
1833
|
+
if (messagesWithSummary !== state.messages) return next({
|
|
1834
|
+
...state,
|
|
1835
|
+
messages: messagesWithSummary
|
|
1836
|
+
});
|
|
1837
|
+
return next(state);
|
|
1838
|
+
};
|
|
1839
|
+
}
|
|
1840
|
+
/**
|
|
1841
|
+
* Compress messages immutably by creating new messages array with cleared tool outputs.
|
|
1842
|
+
*
|
|
1843
|
+
* Strategy:
|
|
1844
|
+
* 1. Identify protected turns (recent N turns are fully protected)
|
|
1845
|
+
* 2. Identify protected tool outputs (recent outputs up to token limit)
|
|
1846
|
+
* 3. Create new messages with unprotected tool outputs cleared
|
|
1847
|
+
* 4. Trim protected but large tool outputs
|
|
1848
|
+
* 5. Optionally generate summary of cleared content
|
|
1849
|
+
*
|
|
1850
|
+
* This function never modifies the input messages array.
|
|
1851
|
+
*/
|
|
1852
|
+
async function compressMessagesImmutable(originalMessages, options, model, existingSummary, onStart, onEnd) {
|
|
1853
|
+
const { protectedTurns, protectedToolTokens, trimToolOutputThreshold, enableSummary, summaryPrompt } = options;
|
|
1854
|
+
const tokensBefore = countMessagesTokens(originalMessages);
|
|
1855
|
+
console.log(`\n🗜️ [Compression] Starting compression process...`);
|
|
1856
|
+
console.log(` 📊 Tokens before: ${tokensBefore.toLocaleString()}`);
|
|
1857
|
+
console.log(` 🎯 Target tokens: ${options.targetTokens.toLocaleString()}`);
|
|
1858
|
+
const protectedBoundary = findProtectedBoundary(originalMessages, protectedTurns);
|
|
1859
|
+
console.log(` 🛡️ Protected turns boundary: index ${protectedBoundary} (protecting ${protectedTurns} turns)`);
|
|
1860
|
+
const { protectedToolIds, unprotectedToolMessages } = categorizeToolMessages(originalMessages, protectedBoundary, protectedToolTokens);
|
|
1861
|
+
console.log(` 🔒 Protected tool outputs: ${protectedToolIds.size}`);
|
|
1862
|
+
console.log(` 🗑️ Tool outputs to clear: ${unprotectedToolMessages.length}`);
|
|
1863
|
+
const initialStats = {
|
|
1864
|
+
tokensBefore,
|
|
1865
|
+
tokensAfter: 0,
|
|
1866
|
+
clearedToolOutputs: unprotectedToolMessages.length,
|
|
1867
|
+
trimmedToolOutputs: 0,
|
|
1868
|
+
summaryGenerated: false,
|
|
1869
|
+
timestamp: Date.now()
|
|
1870
|
+
};
|
|
1871
|
+
onStart?.(initialStats);
|
|
1872
|
+
const clearedContents = [];
|
|
1873
|
+
let trimmedCount = 0;
|
|
1874
|
+
const compressedMessages = originalMessages.map((msg) => {
|
|
1875
|
+
if (msg.role === "tool") {
|
|
1876
|
+
const toolMsg = msg;
|
|
1877
|
+
if (!protectedToolIds.has(toolMsg.tool_call_id)) {
|
|
1878
|
+
const originalContent = typeof toolMsg.content === "string" ? toolMsg.content : JSON.stringify(toolMsg.content);
|
|
1879
|
+
clearedContents.push({
|
|
1880
|
+
name: toolMsg.name,
|
|
1881
|
+
content: originalContent
|
|
1882
|
+
});
|
|
1883
|
+
return {
|
|
1884
|
+
...toolMsg,
|
|
1885
|
+
content: CLEARED_TOOL_OUTPUT
|
|
1886
|
+
};
|
|
1887
|
+
} else if (countContentTokens(toolMsg.content) > trimToolOutputThreshold) {
|
|
1888
|
+
trimmedCount++;
|
|
1889
|
+
return trimToolOutput(toolMsg, trimToolOutputThreshold);
|
|
1890
|
+
}
|
|
1891
|
+
}
|
|
1892
|
+
return msg;
|
|
1893
|
+
});
|
|
1894
|
+
let newSummary = existingSummary;
|
|
1895
|
+
if (enableSummary && model && clearedContents.length > 0) {
|
|
1896
|
+
console.log(`\n📝 [Compression] Starting summary generation for ${clearedContents.length} cleared tool outputs...`);
|
|
1897
|
+
const generatedSummary = (await generateSummary(clearedContents, model, summaryPrompt, existingSummary)).trim();
|
|
1898
|
+
if (generatedSummary.length > 0) {
|
|
1899
|
+
console.log(`✅ [Compression] Summary generated (${generatedSummary.length} chars)`);
|
|
1900
|
+
newSummary = generatedSummary;
|
|
1901
|
+
const existingSummaryIndex = compressedMessages.findIndex((m) => m.role === "user" && typeof m.content === "string" && m.content.startsWith("[Context from cleared tool outputs]"));
|
|
1902
|
+
if (existingSummaryIndex >= 0) {
|
|
1903
|
+
console.log(`🗑️ [Compression] Removing existing summary message at index ${existingSummaryIndex}`);
|
|
1904
|
+
compressedMessages.splice(existingSummaryIndex, 1);
|
|
1905
|
+
}
|
|
1906
|
+
const systemIndex = compressedMessages.findIndex((m) => m.role === "system");
|
|
1907
|
+
const insertIndex = systemIndex >= 0 ? systemIndex + 1 : 0;
|
|
1908
|
+
console.log(`📌 [Compression] Inserting summary message at index ${insertIndex} (after system message)`);
|
|
1909
|
+
compressedMessages.splice(insertIndex, 0, {
|
|
1910
|
+
role: "user",
|
|
1911
|
+
content: `[Context from cleared tool outputs]\n${newSummary}`
|
|
1912
|
+
});
|
|
1913
|
+
}
|
|
1914
|
+
}
|
|
1915
|
+
let mergedMessages = mergeAdjacentUserMessages(compressedMessages);
|
|
1916
|
+
const tokensAfter = countMessagesTokens(mergedMessages);
|
|
1917
|
+
if (enableSummary && newSummary && tokensAfter > options.targetTokens) {
|
|
1918
|
+
const summaryMessageIndex = compressedMessages.findIndex((m) => m.role === "user" && typeof m.content === "string" && m.content.startsWith("[Context from cleared tool outputs]"));
|
|
1919
|
+
if (summaryMessageIndex >= 0) {
|
|
1920
|
+
const summaryMessage = compressedMessages[summaryMessageIndex];
|
|
1921
|
+
const summaryContent = typeof summaryMessage.content === "string" ? summaryMessage.content : "";
|
|
1922
|
+
const summaryTokens = countContentTokens(summaryContent);
|
|
1923
|
+
const excessTokens = tokensAfter - options.targetTokens;
|
|
1924
|
+
if (summaryTokens > excessTokens) {
|
|
1925
|
+
console.log(` ⚠️ [Compression] Summary too large (${summaryTokens} tokens), truncating...`);
|
|
1926
|
+
const targetSummaryTokens = Math.max(100, summaryTokens - excessTokens - 100);
|
|
1927
|
+
const targetSummaryChars = targetSummaryTokens * 4;
|
|
1928
|
+
const truncatedSummary = summaryContent.slice(0, targetSummaryChars - 43) + "\n\n[... summary truncated due to length ...]";
|
|
1929
|
+
compressedMessages[summaryMessageIndex] = {
|
|
1930
|
+
...summaryMessage,
|
|
1931
|
+
content: truncatedSummary
|
|
1932
|
+
};
|
|
1933
|
+
console.log(` ✂️ [Compression] Summary truncated from ${summaryTokens} to ~${targetSummaryTokens} tokens`);
|
|
1934
|
+
}
|
|
1935
|
+
}
|
|
1936
|
+
}
|
|
1937
|
+
mergedMessages = mergeAdjacentUserMessages(compressedMessages);
|
|
1938
|
+
const finalTokensAfter = countMessagesTokens(mergedMessages);
|
|
1939
|
+
const tokensSaved = tokensBefore - finalTokensAfter;
|
|
1940
|
+
const compressionRatio = (tokensSaved / tokensBefore * 100).toFixed(1);
|
|
1941
|
+
console.log(`\n✅ [Compression] Compression completed:`);
|
|
1942
|
+
console.log(` 📊 Tokens: ${tokensBefore.toLocaleString()} → ${finalTokensAfter.toLocaleString()} (saved ${tokensSaved.toLocaleString()}, ${compressionRatio}%)`);
|
|
1943
|
+
console.log(` 🗑️ Cleared: ${clearedContents.length} tool outputs`);
|
|
1944
|
+
console.log(` ✂️ Trimmed: ${trimmedCount} tool outputs`);
|
|
1945
|
+
console.log(` 📝 Summary: ${enableSummary && clearedContents.length > 0 ? "Generated" : "Not generated"}`);
|
|
1946
|
+
if (finalTokensAfter > options.targetTokens) console.log(` ⚠️ [Compression] WARNING: Still exceeds target by ${finalTokensAfter - options.targetTokens} tokens`);
|
|
1947
|
+
const finalStats = {
|
|
1948
|
+
tokensBefore,
|
|
1949
|
+
tokensAfter: finalTokensAfter,
|
|
1950
|
+
clearedToolOutputs: clearedContents.length,
|
|
1951
|
+
trimmedToolOutputs: trimmedCount,
|
|
1952
|
+
summaryGenerated: enableSummary && clearedContents.length > 0,
|
|
1953
|
+
timestamp: Date.now()
|
|
1954
|
+
};
|
|
1955
|
+
onEnd?.(finalStats);
|
|
1956
|
+
return {
|
|
1957
|
+
compressedMessages: mergedMessages,
|
|
1958
|
+
stats: finalStats,
|
|
1959
|
+
summary: newSummary,
|
|
1960
|
+
clearedContents
|
|
1961
|
+
};
|
|
1962
|
+
}
|
|
1963
|
+
/**
|
|
1964
|
+
* Find the message index that marks the start of protected turns.
|
|
1965
|
+
* Messages at or after this index are protected.
|
|
1966
|
+
*/
|
|
1967
|
+
function findProtectedBoundary(messages, protectedTurns) {
|
|
1968
|
+
if (protectedTurns <= 0) return messages.length;
|
|
1969
|
+
let turnCount = 0;
|
|
1970
|
+
for (let i = messages.length - 1; i >= 0; i--) if (messages[i].role === "user") {
|
|
1971
|
+
turnCount++;
|
|
1972
|
+
if (turnCount >= protectedTurns) return i;
|
|
1973
|
+
}
|
|
1974
|
+
return 0;
|
|
1975
|
+
}
|
|
1976
|
+
/**
|
|
1977
|
+
* Categorize tool messages into protected and unprotected groups.
|
|
1978
|
+
*/
|
|
1979
|
+
function categorizeToolMessages(messages, protectedBoundary, protectedToolTokens) {
|
|
1980
|
+
const protectedToolIds = /* @__PURE__ */ new Set();
|
|
1981
|
+
const unprotectedToolMessages = [];
|
|
1982
|
+
for (let i = protectedBoundary; i < messages.length; i++) {
|
|
1983
|
+
const msg = messages[i];
|
|
1984
|
+
if (msg.role === "tool") protectedToolIds.add(msg.tool_call_id);
|
|
1985
|
+
}
|
|
1986
|
+
let toolTokens = 0;
|
|
1987
|
+
for (let i = protectedBoundary - 1; i >= 0; i--) {
|
|
1988
|
+
const msg = messages[i];
|
|
1989
|
+
if (msg.role === "tool") {
|
|
1990
|
+
const toolMsg = msg;
|
|
1991
|
+
const msgTokens = countContentTokens(toolMsg.content);
|
|
1992
|
+
if (toolTokens + msgTokens <= protectedToolTokens) {
|
|
1993
|
+
protectedToolIds.add(toolMsg.tool_call_id);
|
|
1994
|
+
toolTokens += msgTokens;
|
|
1995
|
+
}
|
|
1996
|
+
}
|
|
1997
|
+
}
|
|
1998
|
+
for (let i = 0; i < protectedBoundary; i++) {
|
|
1999
|
+
const msg = messages[i];
|
|
2000
|
+
if (msg.role === "tool") {
|
|
2001
|
+
const toolMsg = msg;
|
|
2002
|
+
if (!protectedToolIds.has(toolMsg.tool_call_id)) unprotectedToolMessages.push(toolMsg);
|
|
2003
|
+
}
|
|
2004
|
+
}
|
|
2005
|
+
return {
|
|
2006
|
+
protectedToolIds,
|
|
2007
|
+
unprotectedToolMessages
|
|
2008
|
+
};
|
|
2009
|
+
}
|
|
2010
|
+
/**
|
|
2011
|
+
* Trim a tool message output to fit within token limit.
|
|
2012
|
+
*/
|
|
2013
|
+
function trimToolOutput(msg, maxTokens) {
|
|
2014
|
+
const content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
2015
|
+
const maxChars = maxTokens * 4;
|
|
2016
|
+
const truncationMarker = "\n\n[... content truncated due to length ...]";
|
|
2017
|
+
if (content.length <= maxChars) return msg;
|
|
2018
|
+
const truncatedContent = content.slice(0, maxChars - 43) + truncationMarker;
|
|
2019
|
+
return {
|
|
2020
|
+
...msg,
|
|
2021
|
+
content: truncatedContent
|
|
2022
|
+
};
|
|
2023
|
+
}
|
|
2024
|
+
function mergeAdjacentUserMessages(messages) {
|
|
2025
|
+
const merged = [];
|
|
2026
|
+
for (const msg of messages) {
|
|
2027
|
+
if (msg.role !== "user") {
|
|
2028
|
+
merged.push(msg);
|
|
2029
|
+
continue;
|
|
2030
|
+
}
|
|
2031
|
+
const last = merged[merged.length - 1];
|
|
2032
|
+
if (last && last.role === "user") {
|
|
2033
|
+
if (last.name === msg.name) {
|
|
2034
|
+
merged[merged.length - 1] = {
|
|
2035
|
+
...last,
|
|
2036
|
+
content: mergeUserContent(last.content, msg.content)
|
|
2037
|
+
};
|
|
2038
|
+
continue;
|
|
2039
|
+
}
|
|
2040
|
+
}
|
|
2041
|
+
merged.push(msg);
|
|
2042
|
+
}
|
|
2043
|
+
return merged;
|
|
2044
|
+
}
|
|
2045
|
+
function mergeUserContent(left, right) {
|
|
2046
|
+
return [...toContentBlocks(left), ...toContentBlocks(right)];
|
|
2047
|
+
}
|
|
2048
|
+
function toContentBlocks(content) {
|
|
2049
|
+
if (typeof content === "string") return content.trim().length > 0 ? [{
|
|
2050
|
+
type: "text",
|
|
2051
|
+
text: content
|
|
2052
|
+
}] : [];
|
|
2053
|
+
if (Array.isArray(content)) {
|
|
2054
|
+
const blocks = [];
|
|
2055
|
+
const items = content;
|
|
2056
|
+
for (const item of items) {
|
|
2057
|
+
if (typeof item === "string") {
|
|
2058
|
+
const trimmed = item.trim();
|
|
2059
|
+
if (trimmed.length > 0) blocks.push({
|
|
2060
|
+
type: "text",
|
|
2061
|
+
text: trimmed
|
|
2062
|
+
});
|
|
2063
|
+
continue;
|
|
2064
|
+
}
|
|
2065
|
+
if (item && typeof item === "object" && "type" in item) {
|
|
2066
|
+
const block = item;
|
|
2067
|
+
if (block.type === "text") {
|
|
2068
|
+
const text = block.text;
|
|
2069
|
+
if (typeof text === "string" && text.trim().length === 0) continue;
|
|
2070
|
+
}
|
|
2071
|
+
blocks.push(block);
|
|
2072
|
+
continue;
|
|
2073
|
+
}
|
|
2074
|
+
if (item !== null && item !== void 0) {
|
|
2075
|
+
const text = String(item);
|
|
2076
|
+
if (text.trim().length > 0) blocks.push({
|
|
2077
|
+
type: "text",
|
|
2078
|
+
text
|
|
2079
|
+
});
|
|
2080
|
+
}
|
|
2081
|
+
}
|
|
2082
|
+
return blocks;
|
|
2083
|
+
}
|
|
2084
|
+
if (content && typeof content === "object" && "type" in content) {
|
|
2085
|
+
const block = content;
|
|
2086
|
+
if (block.type === "text") {
|
|
2087
|
+
const text = block.text;
|
|
2088
|
+
if (typeof text === "string" && text.trim().length === 0) return [];
|
|
2089
|
+
}
|
|
2090
|
+
return [block];
|
|
2091
|
+
}
|
|
2092
|
+
const fallback = String(content);
|
|
2093
|
+
return fallback.trim().length > 0 ? [{
|
|
2094
|
+
type: "text",
|
|
2095
|
+
text: fallback
|
|
2096
|
+
}] : [];
|
|
2097
|
+
}
|
|
2098
|
+
/**
|
|
2099
|
+
* Generate a summary of cleared tool outputs using the model.
|
|
2100
|
+
*/
|
|
2101
|
+
async function generateSummary(clearedContents, model, summaryPrompt, existingSummary) {
|
|
2102
|
+
const formattedContent = clearedContents.map(({ name, content }) => {
|
|
2103
|
+
return `## ${name ?? "unknown_tool"}\n${content.length > 5e3 ? `${content.slice(0, 5e3)}...[truncated for summary]` : content}`;
|
|
2104
|
+
}).join("\n\n---\n\n");
|
|
2105
|
+
const fullPrompt = buildSummaryPrompt(summaryPrompt, formattedContent, existingSummary);
|
|
2106
|
+
const promptTokens = Math.ceil(fullPrompt.length / 4);
|
|
2107
|
+
console.log(` 🤖 [Summary] Calling model (${model.modelId || "unknown"}) to generate summary...`);
|
|
2108
|
+
console.log(` 📊 [Summary] Input: ${clearedContents.length} tool outputs, ~${promptTokens} tokens`);
|
|
2109
|
+
console.log(` 📥 [Summary] Input content preview (first 500 chars):`);
|
|
2110
|
+
console.log(` ${formattedContent.slice(0, 500)}${formattedContent.length > 500 ? "..." : ""}`);
|
|
2111
|
+
const startTime = Date.now();
|
|
2112
|
+
const response = await model.invoke([{
|
|
2113
|
+
role: "system",
|
|
2114
|
+
content: "You are a helpful assistant that creates concise summaries."
|
|
2115
|
+
}, {
|
|
2116
|
+
role: "user",
|
|
2117
|
+
content: fullPrompt
|
|
2118
|
+
}], { maxTokens: 1e3 });
|
|
2119
|
+
const duration = Date.now() - startTime;
|
|
2120
|
+
const summary = typeof response.message.content === "string" ? response.message.content : JSON.stringify(response.message.content);
|
|
2121
|
+
console.log(` ⏱️ [Summary] Model call completed in ${duration}ms`);
|
|
2122
|
+
console.log(` 📄 [Summary] Generated summary (${summary.length} chars):`);
|
|
2123
|
+
console.log(` ${summary}`);
|
|
2124
|
+
console.log(` 📊 [Summary] Compression ratio: ${formattedContent.length} → ${summary.length} chars (${((1 - summary.length / formattedContent.length) * 100).toFixed(1)}% reduction)`);
|
|
2125
|
+
return summary;
|
|
2126
|
+
}
|
|
2127
|
+
function buildSummaryPrompt(summaryPrompt, formattedContent, existingSummary) {
|
|
2128
|
+
const hasToolOutputs = summaryPrompt.includes("{{toolOutputs}}");
|
|
2129
|
+
const hasExistingSummary = summaryPrompt.includes("{{existingSummary}}");
|
|
2130
|
+
const baseSummary = existingSummary ?? "";
|
|
2131
|
+
if (hasToolOutputs || hasExistingSummary) return summaryPrompt.split("{{toolOutputs}}").join(formattedContent).split("{{existingSummary}}").join(baseSummary);
|
|
2132
|
+
return `${summaryPrompt}\n\n${baseSummary.length > 0 ? `Existing summary:\n${baseSummary}\n\nTool outputs to summarize:\n${formattedContent}` : `Tool outputs to summarize:\n${formattedContent}`}`;
|
|
2133
|
+
}
|
|
2134
|
+
/**
|
|
2135
|
+
* Manually compress a session by generating a summary from full message history.
|
|
2136
|
+
*
|
|
2137
|
+
* This function extracts all tool outputs from the full messages and generates
|
|
2138
|
+
* a new summary based on the complete context. The summary is saved to
|
|
2139
|
+
* CompressionState and will be automatically loaded by the middleware on next run.
|
|
2140
|
+
*
|
|
2141
|
+
* @param options - Manual compression options
|
|
2142
|
+
* @returns Compression result with generated summary
|
|
2143
|
+
*
|
|
2144
|
+
* @example
|
|
2145
|
+
* ```ts
|
|
2146
|
+
* const result = await compressSessionManually({
|
|
2147
|
+
* sessionId: 'session-123',
|
|
2148
|
+
* fullMessages: allMessages,
|
|
2149
|
+
* model: myModel,
|
|
2150
|
+
* stateStore: myStore,
|
|
2151
|
+
* })
|
|
2152
|
+
* console.log(`Generated summary: ${result.summary}`)
|
|
2153
|
+
* ```
|
|
2154
|
+
*/
|
|
2155
|
+
async function compressSessionManually(options) {
|
|
2156
|
+
const { sessionId, fullMessages, model, stateStore, summaryPrompt = DEFAULT_SUMMARY_PROMPT } = options;
|
|
2157
|
+
console.log(`\n📝 [Manual Compression] Starting manual compression for session ${sessionId}...`);
|
|
2158
|
+
console.log(` 📊 Total messages: ${fullMessages.length}`);
|
|
2159
|
+
const toolOutputs = [];
|
|
2160
|
+
for (const msg of fullMessages) if (msg.role === "tool") {
|
|
2161
|
+
const toolMsg = msg;
|
|
2162
|
+
const content = typeof toolMsg.content === "string" ? toolMsg.content : JSON.stringify(toolMsg.content);
|
|
2163
|
+
toolOutputs.push({
|
|
2164
|
+
name: toolMsg.name,
|
|
2165
|
+
content
|
|
2166
|
+
});
|
|
2167
|
+
}
|
|
2168
|
+
console.log(` 🔧 Tool outputs found: ${toolOutputs.length}`);
|
|
2169
|
+
if (toolOutputs.length === 0) {
|
|
2170
|
+
console.log(` ⚠️ [Manual Compression] No tool outputs found, nothing to summarize`);
|
|
2171
|
+
return {
|
|
2172
|
+
summary: "",
|
|
2173
|
+
messageCount: fullMessages.length,
|
|
2174
|
+
toolOutputCount: 0
|
|
2175
|
+
};
|
|
2176
|
+
}
|
|
2177
|
+
console.log(` 🤖 [Manual Compression] Generating summary from ${toolOutputs.length} tool outputs...`);
|
|
2178
|
+
const summary = (await generateSummary(toolOutputs, model, summaryPrompt, void 0)).trim();
|
|
2179
|
+
if (summary.length === 0) {
|
|
2180
|
+
console.log(` ⚠️ [Manual Compression] Summary generation returned empty result`);
|
|
2181
|
+
return {
|
|
2182
|
+
summary: "",
|
|
2183
|
+
messageCount: fullMessages.length,
|
|
2184
|
+
toolOutputCount: toolOutputs.length
|
|
2185
|
+
};
|
|
2186
|
+
}
|
|
2187
|
+
console.log(` ✅ [Manual Compression] Summary generated (${summary.length} chars)`);
|
|
2188
|
+
const existingState = await stateStore.load(sessionId, StateKeys.COMPRESSION);
|
|
2189
|
+
const now = Date.now();
|
|
2190
|
+
const newCompressionState = {
|
|
2191
|
+
lastStats: existingState?.lastStats,
|
|
2192
|
+
history: existingState?.history ?? [],
|
|
2193
|
+
summary,
|
|
2194
|
+
updatedAt: now
|
|
2195
|
+
};
|
|
2196
|
+
await stateStore.save(sessionId, StateKeys.COMPRESSION, newCompressionState);
|
|
2197
|
+
console.log(` 💾 [Manual Compression] Summary saved to CompressionState`);
|
|
2198
|
+
return {
|
|
2199
|
+
summary,
|
|
2200
|
+
messageCount: fullMessages.length,
|
|
2201
|
+
toolOutputCount: toolOutputs.length
|
|
2202
|
+
};
|
|
2203
|
+
}
|
|
2204
|
+
|
|
2205
|
+
//#endregion
|
|
2206
|
+
//#region src/model/base.ts
|
|
2207
|
+
/**
|
|
2208
|
+
* Abstract base class for LLM model providers.
|
|
2209
|
+
*
|
|
2210
|
+
* Implement this class to create custom model integrations
|
|
2211
|
+
* (e.g., OpenAI, Anthropic, local models).
|
|
2212
|
+
*/
|
|
2213
|
+
var BaseModel = class {};
|
|
2214
|
+
|
|
2215
|
+
//#endregion
|
|
2216
|
+
//#region src/model/adapter.ts
|
|
2217
|
+
function computeFeatureGrants(options) {
|
|
2218
|
+
const grants = {};
|
|
2219
|
+
const reqs = options.featureRequests || {};
|
|
2220
|
+
for (const [featureId, request] of Object.entries(reqs)) if (options.supportsFeature ? options.supportsFeature(featureId) : false) grants[featureId] = {
|
|
2221
|
+
granted: true,
|
|
2222
|
+
effectiveConfig: request?.config || {}
|
|
2223
|
+
};
|
|
2224
|
+
else grants[featureId] = {
|
|
2225
|
+
granted: false,
|
|
2226
|
+
reason: "feature_not_supported"
|
|
2227
|
+
};
|
|
2228
|
+
return grants;
|
|
2229
|
+
}
|
|
2230
|
+
|
|
2231
|
+
//#endregion
|
|
2232
|
+
//#region src/model/errors.ts
|
|
2233
|
+
/**
|
|
2234
|
+
* Model error class.
|
|
2235
|
+
* Follows SRP: only handles error representation.
|
|
2236
|
+
*/
|
|
2237
|
+
var ModelError = class extends Error {
|
|
2238
|
+
code;
|
|
2239
|
+
retryable;
|
|
2240
|
+
status;
|
|
2241
|
+
constructor(message, options) {
|
|
2242
|
+
super(message);
|
|
2243
|
+
this.name = "ModelError";
|
|
2244
|
+
this.code = options.code;
|
|
2245
|
+
this.retryable = options.retryable ?? false;
|
|
2246
|
+
this.status = options.status;
|
|
2247
|
+
}
|
|
2248
|
+
};
|
|
2249
|
+
|
|
2250
|
+
//#endregion
|
|
2251
|
+
//#region src/model/health.ts
|
|
2252
|
+
/**
|
|
2253
|
+
* Model health manager.
|
|
2254
|
+
* Follows SRP: only handles health state tracking.
|
|
2255
|
+
*/
|
|
2256
|
+
var ModelHealth = class {
|
|
2257
|
+
state = /* @__PURE__ */ new Map();
|
|
2258
|
+
get(ref) {
|
|
2259
|
+
return this.state.get(this.keyOf(ref)) || {
|
|
2260
|
+
failures: 0,
|
|
2261
|
+
nextRetryAt: 0
|
|
2262
|
+
};
|
|
2263
|
+
}
|
|
2264
|
+
markSuccess(ref) {
|
|
2265
|
+
this.state.set(this.keyOf(ref), {
|
|
2266
|
+
failures: 0,
|
|
2267
|
+
nextRetryAt: 0
|
|
2268
|
+
});
|
|
2269
|
+
}
|
|
2270
|
+
markFailure(ref, options) {
|
|
2271
|
+
const failures = this.get(ref).failures + 1;
|
|
2272
|
+
const delay = Math.min(options.maxDelayMs, options.baseDelayMs * 2 ** Math.min(8, failures - 1));
|
|
2273
|
+
this.state.set(this.keyOf(ref), {
|
|
2274
|
+
failures,
|
|
2275
|
+
nextRetryAt: options.now + delay,
|
|
2276
|
+
lastError: {
|
|
2277
|
+
code: options.code,
|
|
2278
|
+
message: options.message
|
|
2279
|
+
}
|
|
2280
|
+
});
|
|
2281
|
+
}
|
|
2282
|
+
isAvailable(ref, now) {
|
|
2283
|
+
return this.get(ref).nextRetryAt <= now;
|
|
2284
|
+
}
|
|
2285
|
+
keyOf(ref) {
|
|
2286
|
+
return `${ref.provider}:${ref.modelId}`;
|
|
2287
|
+
}
|
|
2288
|
+
};
|
|
2289
|
+
/**
|
|
2290
|
+
* In-memory implementation for backward compatibility.
|
|
2291
|
+
*/
|
|
2292
|
+
var InMemoryModelHealth = class extends ModelHealth {};
|
|
2293
|
+
|
|
2294
|
+
//#endregion
|
|
2295
|
+
//#region src/model/router.ts
|
|
2296
|
+
/**
|
|
2297
|
+
* Model router.
|
|
2298
|
+
* Follows SRP: only handles routing logic and fallback selection.
|
|
2299
|
+
*/
|
|
2300
|
+
var ModelRouter = class {
|
|
2301
|
+
constructor(health, fallbackOrder) {
|
|
2302
|
+
this.health = health;
|
|
2303
|
+
this.fallbackOrder = fallbackOrder;
|
|
2304
|
+
}
|
|
2305
|
+
/**
|
|
2306
|
+
* Select available models based on health and feature requirements.
|
|
2307
|
+
*/
|
|
2308
|
+
select(options) {
|
|
2309
|
+
const { now, requiredFeatures, isFeatureSupported } = options;
|
|
2310
|
+
const available = this.fallbackOrder.filter((ref) => this.health.isAvailable(ref, now));
|
|
2311
|
+
const withFeatures = this.filterByFeatures(available.length ? available : this.fallbackOrder, requiredFeatures, isFeatureSupported);
|
|
2312
|
+
return withFeatures.length ? withFeatures : this.fallbackOrder;
|
|
2313
|
+
}
|
|
2314
|
+
filterByFeatures(candidates, requiredFeatures, isFeatureSupported) {
|
|
2315
|
+
if (!requiredFeatures) return candidates;
|
|
2316
|
+
const requiredIds = Object.entries(requiredFeatures).filter(([, v]) => v?.required).map(([k]) => k);
|
|
2317
|
+
if (!requiredIds.length) return candidates;
|
|
2318
|
+
if (!isFeatureSupported) return [];
|
|
2319
|
+
return candidates.filter((ref) => requiredIds.every((id) => isFeatureSupported(ref, id)));
|
|
2320
|
+
}
|
|
2321
|
+
};
|
|
2322
|
+
|
|
2323
|
+
//#endregion
|
|
2324
|
+
//#region src/model/utils/http.ts
|
|
2325
|
+
/**
|
|
2326
|
+
* HTTP utility functions.
|
|
2327
|
+
* Follows SRP: only handles HTTP-related utilities.
|
|
2328
|
+
*/
|
|
2329
|
+
var HttpUtils = class {
|
|
2330
|
+
/**
|
|
2331
|
+
* Determine if an HTTP status code is retryable.
|
|
2332
|
+
* Retryable: 408, 409, 429, 5xx
|
|
2333
|
+
*/
|
|
2334
|
+
static isRetryableStatus(status) {
|
|
2335
|
+
return status === 408 || status === 409 || status === 429 || status >= 500 && status <= 599;
|
|
2336
|
+
}
|
|
2337
|
+
/**
|
|
2338
|
+
* Sleep for a specified duration, with abort signal support.
|
|
2339
|
+
*/
|
|
2340
|
+
static async sleep(ms, signal) {
|
|
2341
|
+
if (ms <= 0) return Promise.resolve();
|
|
2342
|
+
return new Promise((resolve, reject) => {
|
|
2343
|
+
const timer = setTimeout(() => {
|
|
2344
|
+
cleanup();
|
|
2345
|
+
resolve();
|
|
2346
|
+
}, ms);
|
|
2347
|
+
function cleanup() {
|
|
2348
|
+
clearTimeout(timer);
|
|
2349
|
+
if (signal) signal.removeEventListener("abort", onAbort);
|
|
2350
|
+
}
|
|
2351
|
+
function onAbort() {
|
|
2352
|
+
cleanup();
|
|
2353
|
+
reject(/* @__PURE__ */ new Error("Aborted"));
|
|
2354
|
+
}
|
|
2355
|
+
if (signal) {
|
|
2356
|
+
if (signal.aborted) return onAbort();
|
|
2357
|
+
signal.addEventListener("abort", onAbort);
|
|
2358
|
+
}
|
|
2359
|
+
});
|
|
2360
|
+
}
|
|
2361
|
+
};
|
|
2362
|
+
|
|
2363
|
+
//#endregion
|
|
2364
|
+
//#region src/model/utils/id.ts
|
|
2365
|
+
function createRequestId(prefix = "req") {
|
|
2366
|
+
const rand = Math.random().toString(16).slice(2);
|
|
2367
|
+
return `${prefix}_${Date.now().toString(16)}_${rand}`;
|
|
2368
|
+
}
|
|
2369
|
+
|
|
2370
|
+
//#endregion
|
|
2371
|
+
//#region src/model/utils/retry.ts
|
|
2372
|
+
/**
|
|
2373
|
+
* Retry policy for model requests with exponential backoff and jitter.
|
|
2374
|
+
*
|
|
2375
|
+
* Implements industry-standard retry patterns:
|
|
2376
|
+
* - Exponential backoff: prevents overwhelming recovering services
|
|
2377
|
+
* - Jitter: prevents thundering herd when many clients retry simultaneously
|
|
2378
|
+
* - Max delay cap: prevents unbounded wait times
|
|
2379
|
+
*
|
|
2380
|
+
* @example
|
|
2381
|
+
* ```ts
|
|
2382
|
+
* // Default: exponential backoff with equal jitter
|
|
2383
|
+
* const policy = new RetryPolicy()
|
|
2384
|
+
*
|
|
2385
|
+
* // Custom: 5 attempts, 1s base, 60s max, full jitter
|
|
2386
|
+
* const policy = new RetryPolicy({
|
|
2387
|
+
* maxAttempts: 5,
|
|
2388
|
+
* baseDelayMs: 1000,
|
|
2389
|
+
* maxDelayMs: 60000,
|
|
2390
|
+
* jitter: 'full',
|
|
2391
|
+
* })
|
|
2392
|
+
*
|
|
2393
|
+
* // Usage
|
|
2394
|
+
* for (let attempt = 1; attempt <= policy.maxAttempts; attempt++) {
|
|
2395
|
+
* try {
|
|
2396
|
+
* await makeRequest()
|
|
2397
|
+
* break
|
|
2398
|
+
* } catch (err) {
|
|
2399
|
+
* if (!policy.canRetry(attempt)) throw err
|
|
2400
|
+
* await sleep(policy.getDelay(attempt))
|
|
2401
|
+
* }
|
|
2402
|
+
* }
|
|
2403
|
+
* ```
|
|
2404
|
+
*/
|
|
2405
|
+
var RetryPolicy = class RetryPolicy {
|
|
2406
|
+
maxAttempts;
|
|
2407
|
+
baseDelayMs;
|
|
2408
|
+
maxDelayMs;
|
|
2409
|
+
strategy;
|
|
2410
|
+
jitter;
|
|
2411
|
+
multiplier;
|
|
2412
|
+
_previousDelay;
|
|
2413
|
+
constructor(options = {}) {
|
|
2414
|
+
this.maxAttempts = options.maxAttempts ?? 3;
|
|
2415
|
+
this.baseDelayMs = options.baseDelayMs ?? 500;
|
|
2416
|
+
this.maxDelayMs = options.maxDelayMs ?? 3e4;
|
|
2417
|
+
this.strategy = options.strategy ?? "exponential";
|
|
2418
|
+
this.jitter = options.jitter ?? "equal";
|
|
2419
|
+
this.multiplier = options.multiplier ?? 2;
|
|
2420
|
+
this._previousDelay = this.baseDelayMs;
|
|
2421
|
+
}
|
|
2422
|
+
/**
|
|
2423
|
+
* Calculate delay for a given attempt.
|
|
2424
|
+
*
|
|
2425
|
+
* @param attempt - Current attempt number (1-indexed)
|
|
2426
|
+
* @returns Delay in milliseconds with jitter applied
|
|
2427
|
+
*/
|
|
2428
|
+
getDelay(attempt) {
|
|
2429
|
+
let delay;
|
|
2430
|
+
switch (this.strategy) {
|
|
2431
|
+
case "exponential":
|
|
2432
|
+
delay = this.baseDelayMs * this.multiplier ** (attempt - 1);
|
|
2433
|
+
break;
|
|
2434
|
+
case "linear":
|
|
2435
|
+
delay = this.baseDelayMs * attempt;
|
|
2436
|
+
break;
|
|
2437
|
+
case "fixed":
|
|
2438
|
+
delay = this.baseDelayMs;
|
|
2439
|
+
break;
|
|
2440
|
+
}
|
|
2441
|
+
delay = Math.min(delay, this.maxDelayMs);
|
|
2442
|
+
delay = this.applyJitter(delay);
|
|
2443
|
+
this._previousDelay = delay;
|
|
2444
|
+
return Math.floor(delay);
|
|
2445
|
+
}
|
|
2446
|
+
/**
|
|
2447
|
+
* Apply jitter to the calculated delay.
|
|
2448
|
+
*/
|
|
2449
|
+
applyJitter(delay) {
|
|
2450
|
+
switch (this.jitter) {
|
|
2451
|
+
case "full": return Math.random() * delay;
|
|
2452
|
+
case "equal": return delay / 2 + Math.random() * delay / 2;
|
|
2453
|
+
case "decorrelated": return Math.min(this.maxDelayMs, Math.random() * (this._previousDelay * 3 - this.baseDelayMs) + this.baseDelayMs);
|
|
2454
|
+
case "none":
|
|
2455
|
+
default: return delay;
|
|
2456
|
+
}
|
|
2457
|
+
}
|
|
2458
|
+
/**
|
|
2459
|
+
* Check if another retry is allowed.
|
|
2460
|
+
*
|
|
2461
|
+
* @param attempt - Current attempt number (1-indexed)
|
|
2462
|
+
* @returns true if more retries are allowed
|
|
2463
|
+
*/
|
|
2464
|
+
canRetry(attempt) {
|
|
2465
|
+
return attempt < this.maxAttempts;
|
|
2466
|
+
}
|
|
2467
|
+
/**
|
|
2468
|
+
* Reset internal state (useful for decorrelated jitter).
|
|
2469
|
+
*/
|
|
2470
|
+
reset() {
|
|
2471
|
+
this._previousDelay = this.baseDelayMs;
|
|
2472
|
+
}
|
|
2473
|
+
/**
|
|
2474
|
+
* Get a human-readable description of the policy.
|
|
2475
|
+
*/
|
|
2476
|
+
toString() {
|
|
2477
|
+
return `RetryPolicy(${this.strategy}, max=${this.maxAttempts}, base=${this.baseDelayMs}ms, cap=${this.maxDelayMs}ms, jitter=${this.jitter})`;
|
|
2478
|
+
}
|
|
2479
|
+
/**
|
|
2480
|
+
* Create default retry policy for API calls.
|
|
2481
|
+
* - 3 attempts
|
|
2482
|
+
* - 500ms base delay
|
|
2483
|
+
* - 30s max delay
|
|
2484
|
+
* - Exponential backoff with equal jitter
|
|
2485
|
+
*/
|
|
2486
|
+
static default = new RetryPolicy();
|
|
2487
|
+
/**
|
|
2488
|
+
* Create aggressive retry policy for critical operations.
|
|
2489
|
+
* - 5 attempts
|
|
2490
|
+
* - 1s base delay
|
|
2491
|
+
* - 60s max delay
|
|
2492
|
+
*/
|
|
2493
|
+
static aggressive = new RetryPolicy({
|
|
2494
|
+
maxAttempts: 5,
|
|
2495
|
+
baseDelayMs: 1e3,
|
|
2496
|
+
maxDelayMs: 6e4
|
|
2497
|
+
});
|
|
2498
|
+
/**
|
|
2499
|
+
* Create gentle retry policy for rate-limited APIs.
|
|
2500
|
+
* - 3 attempts
|
|
2501
|
+
* - 2s base delay
|
|
2502
|
+
* - 30s max delay
|
|
2503
|
+
* - Full jitter for better spread
|
|
2504
|
+
*/
|
|
2505
|
+
static gentle = new RetryPolicy({
|
|
2506
|
+
maxAttempts: 3,
|
|
2507
|
+
baseDelayMs: 2e3,
|
|
2508
|
+
maxDelayMs: 3e4,
|
|
2509
|
+
jitter: "full"
|
|
2510
|
+
});
|
|
2511
|
+
};
|
|
2512
|
+
|
|
2513
|
+
//#endregion
|
|
2514
|
+
//#region src/model/createModel.ts
|
|
2515
|
+
function createModel(options) {
|
|
2516
|
+
const adapterByProvider = new Map(options.adapters.map((a) => [a.provider, a]));
|
|
2517
|
+
const health = options.health ?? new InMemoryModelHealth();
|
|
2518
|
+
const initialFallbackOrder = options.routing?.fallbackOrder ?? (() => {
|
|
2519
|
+
const derived = [];
|
|
2520
|
+
for (const adapter of options.adapters) if (adapter.defaultModelId) derived.push({
|
|
2521
|
+
provider: adapter.provider,
|
|
2522
|
+
modelId: adapter.defaultModelId
|
|
2523
|
+
});
|
|
2524
|
+
if (derived.length === 0) throw new ModelError("No routing configuration and no adapter with defaultModelId provided. Either provide options.routing.fallbackOrder or set defaultModelId on your adapters.", {
|
|
2525
|
+
code: "missing_routing",
|
|
2526
|
+
retryable: false
|
|
2527
|
+
});
|
|
2528
|
+
return derived;
|
|
2529
|
+
})();
|
|
2530
|
+
const router = new ModelRouter(health, initialFallbackOrder);
|
|
2531
|
+
const retryPolicy = new RetryPolicy({
|
|
2532
|
+
maxAttempts: options.retry?.maxAttemptsPerModel ?? 3,
|
|
2533
|
+
baseDelayMs: options.retry?.baseDelayMs ?? 500,
|
|
2534
|
+
maxDelayMs: options.retry?.maxDelayMs ?? 3e4,
|
|
2535
|
+
strategy: options.retry?.strategy ?? "exponential",
|
|
2536
|
+
jitter: options.retry?.jitter ?? "equal"
|
|
2537
|
+
});
|
|
2538
|
+
const defaultTimeoutMs = options.timeoutMs ?? 6e4;
|
|
2539
|
+
function getAdapter(ref) {
|
|
2540
|
+
const adapter = adapterByProvider.get(ref.provider);
|
|
2541
|
+
if (!adapter) throw new ModelError(`No adapter for provider: ${ref.provider}`, {
|
|
2542
|
+
code: "adapter_missing",
|
|
2543
|
+
retryable: false
|
|
2544
|
+
});
|
|
2545
|
+
return adapter;
|
|
2546
|
+
}
|
|
2547
|
+
function isFeatureSupported(ref, featureId) {
|
|
2548
|
+
const adapter = adapterByProvider.get(ref.provider);
|
|
2549
|
+
if (!adapter?.supportsFeature) return false;
|
|
2550
|
+
return adapter.supportsFeature(ref.modelId, featureId);
|
|
2551
|
+
}
|
|
2552
|
+
async function* stream(requestIn) {
|
|
2553
|
+
const now = Date.now();
|
|
2554
|
+
const requestId = requestIn.requestId || createRequestId("req");
|
|
2555
|
+
const order = requestIn.model ? [requestIn.model] : router.select({
|
|
2556
|
+
now,
|
|
2557
|
+
requiredFeatures: requestIn.features,
|
|
2558
|
+
isFeatureSupported
|
|
2559
|
+
});
|
|
2560
|
+
let lastError;
|
|
2561
|
+
const toolCallAccumulator = /* @__PURE__ */ new Map();
|
|
2562
|
+
for (const modelRef of order) {
|
|
2563
|
+
const adapter = getAdapter(modelRef);
|
|
2564
|
+
const timeoutMs = requestIn.timeoutMs ?? defaultTimeoutMs;
|
|
2565
|
+
const featureGrants = computeFeatureGrants({
|
|
2566
|
+
modelId: modelRef.modelId,
|
|
2567
|
+
featureRequests: requestIn.features,
|
|
2568
|
+
supportsFeature: (fid) => adapter.supportsFeature?.(modelRef.modelId, fid) ?? false
|
|
2569
|
+
});
|
|
2570
|
+
if (Object.entries(requestIn.features || {}).some(([fid, req$1]) => req$1?.required && !featureGrants[fid]?.granted)) continue;
|
|
2571
|
+
const controller = new AbortController();
|
|
2572
|
+
const signal = requestIn.signal;
|
|
2573
|
+
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
|
2574
|
+
const abortListener = () => controller.abort();
|
|
2575
|
+
if (signal) if (signal.aborted) controller.abort();
|
|
2576
|
+
else signal.addEventListener("abort", abortListener);
|
|
2577
|
+
const req = {
|
|
2578
|
+
requestId,
|
|
2579
|
+
model: modelRef,
|
|
2580
|
+
input: requestIn.input,
|
|
2581
|
+
messages: requestIn.messages,
|
|
2582
|
+
tools: requestIn.tools,
|
|
2583
|
+
features: requestIn.features,
|
|
2584
|
+
stream: requestIn.stream ?? true,
|
|
2585
|
+
signal: controller.signal,
|
|
2586
|
+
timeoutMs
|
|
2587
|
+
};
|
|
2588
|
+
try {
|
|
2589
|
+
for (let attempt = 1; attempt <= retryPolicy.maxAttempts; attempt++) try {
|
|
2590
|
+
for await (const ev of adapter.stream({
|
|
2591
|
+
request: req,
|
|
2592
|
+
featureGrants,
|
|
2593
|
+
featureRequests: requestIn.features
|
|
2594
|
+
})) if (ev.type === "delta") {
|
|
2595
|
+
if (ev.chunk.kind === "text") yield {
|
|
2596
|
+
type: "text_delta",
|
|
2597
|
+
delta: ev.chunk.text
|
|
2598
|
+
};
|
|
2599
|
+
else if (ev.chunk.kind === "thinking_delta") yield {
|
|
2600
|
+
type: ev.chunk.kind,
|
|
2601
|
+
content: ev.chunk.text
|
|
2602
|
+
};
|
|
2603
|
+
else if (ev.chunk.kind === "thinking_end" || ev.chunk.kind === "thinking_start") yield { type: ev.chunk.kind };
|
|
2604
|
+
else if (ev.chunk.kind === "tool_call_delta") {
|
|
2605
|
+
const existing = toolCallAccumulator.get(ev.chunk.callId) ?? { argsText: "" };
|
|
2606
|
+
if (ev.chunk.toolId) existing.toolId = ev.chunk.toolId;
|
|
2607
|
+
if (ev.chunk.argsTextDelta) existing.argsText += ev.chunk.argsTextDelta;
|
|
2608
|
+
toolCallAccumulator.set(ev.chunk.callId, existing);
|
|
2609
|
+
}
|
|
2610
|
+
} else if (ev.type === "response_end") {
|
|
2611
|
+
for (const [callId, data] of toolCallAccumulator) if (data.toolId) yield {
|
|
2612
|
+
type: "tool_call",
|
|
2613
|
+
toolCall: {
|
|
2614
|
+
id: callId,
|
|
2615
|
+
type: "function",
|
|
2616
|
+
function: {
|
|
2617
|
+
name: data.toolId,
|
|
2618
|
+
arguments: data.argsText
|
|
2619
|
+
}
|
|
2620
|
+
}
|
|
2621
|
+
};
|
|
2622
|
+
toolCallAccumulator.clear();
|
|
2623
|
+
if (ev.usage && typeof ev.usage === "object") {
|
|
2624
|
+
const usage = ev.usage;
|
|
2625
|
+
if (usage.prompt_tokens || usage.completion_tokens || usage.total_tokens) yield {
|
|
2626
|
+
type: "usage",
|
|
2627
|
+
usage: {
|
|
2628
|
+
promptTokens: usage.prompt_tokens ?? 0,
|
|
2629
|
+
completionTokens: usage.completion_tokens ?? 0,
|
|
2630
|
+
totalTokens: usage.total_tokens ?? 0
|
|
2631
|
+
}
|
|
2632
|
+
};
|
|
2633
|
+
}
|
|
2634
|
+
yield { type: "done" };
|
|
2635
|
+
} else yield ev;
|
|
2636
|
+
health.markSuccess(modelRef);
|
|
2637
|
+
return;
|
|
2638
|
+
} catch (err) {
|
|
2639
|
+
lastError = err;
|
|
2640
|
+
const modelError = normalizeModelError(err);
|
|
2641
|
+
if (!modelError.retryable || !retryPolicy.canRetry(attempt)) throw modelError;
|
|
2642
|
+
const delayMs = retryPolicy.getDelay(attempt);
|
|
2643
|
+
options.retry?.onRetry?.({
|
|
2644
|
+
attempt,
|
|
2645
|
+
maxAttempts: retryPolicy.maxAttempts,
|
|
2646
|
+
delayMs,
|
|
2647
|
+
error: {
|
|
2648
|
+
code: modelError.code,
|
|
2649
|
+
message: modelError.message,
|
|
2650
|
+
retryable: modelError.retryable
|
|
2651
|
+
},
|
|
2652
|
+
model: modelRef,
|
|
2653
|
+
request: req
|
|
2654
|
+
});
|
|
2655
|
+
await HttpUtils.sleep(delayMs, controller.signal);
|
|
2656
|
+
}
|
|
2657
|
+
} catch (err) {
|
|
2658
|
+
const modelError = normalizeModelError(err);
|
|
2659
|
+
health.markFailure(modelRef, {
|
|
2660
|
+
now: Date.now(),
|
|
2661
|
+
baseDelayMs: retryPolicy.baseDelayMs,
|
|
2662
|
+
maxDelayMs: retryPolicy.maxDelayMs,
|
|
2663
|
+
code: modelError.code,
|
|
2664
|
+
message: modelError.message
|
|
2665
|
+
});
|
|
2666
|
+
yield {
|
|
2667
|
+
type: "error",
|
|
2668
|
+
requestId,
|
|
2669
|
+
error: {
|
|
2670
|
+
code: modelError.code,
|
|
2671
|
+
message: modelError.message,
|
|
2672
|
+
retryable: modelError.retryable
|
|
2673
|
+
}
|
|
2674
|
+
};
|
|
2675
|
+
lastError = modelError;
|
|
2676
|
+
continue;
|
|
2677
|
+
} finally {
|
|
2678
|
+
clearTimeout(timeout);
|
|
2679
|
+
if (signal) signal.removeEventListener("abort", abortListener);
|
|
2680
|
+
}
|
|
2681
|
+
}
|
|
2682
|
+
throw normalizeModelError(lastError || new ModelError("All models failed", { code: "all_models_failed" }));
|
|
2683
|
+
}
|
|
2684
|
+
async function run(request) {
|
|
2685
|
+
let text = "";
|
|
2686
|
+
let stopReason = "error";
|
|
2687
|
+
let usage;
|
|
2688
|
+
let model;
|
|
2689
|
+
let requestId = request.requestId || "";
|
|
2690
|
+
let featureGrants = {};
|
|
2691
|
+
for await (const ev of stream({
|
|
2692
|
+
...request,
|
|
2693
|
+
stream: true
|
|
2694
|
+
})) if (ev.type === "response_start") {
|
|
2695
|
+
requestId = ev.requestId;
|
|
2696
|
+
model = ev.model;
|
|
2697
|
+
featureGrants = ev.featureGrants;
|
|
2698
|
+
} else if (ev.type === "delta" && ev.chunk.kind === "text") text += ev.chunk.text;
|
|
2699
|
+
else if (ev.type === "text_delta") text += ev.delta;
|
|
2700
|
+
else if (ev.type === "response_end") {
|
|
2701
|
+
stopReason = ev.stopReason;
|
|
2702
|
+
usage = ev.usage;
|
|
2703
|
+
} else if (ev.type === "error") stopReason = "error";
|
|
2704
|
+
if (!model) throw new ModelError("Missing response_start from adapter", {
|
|
2705
|
+
code: "protocol_error",
|
|
2706
|
+
retryable: false
|
|
2707
|
+
});
|
|
2708
|
+
return {
|
|
2709
|
+
requestId,
|
|
2710
|
+
model,
|
|
2711
|
+
text,
|
|
2712
|
+
stopReason,
|
|
2713
|
+
usage,
|
|
2714
|
+
featureGrants
|
|
2715
|
+
};
|
|
2716
|
+
}
|
|
2717
|
+
return {
|
|
2718
|
+
get modelId() {
|
|
2719
|
+
return initialFallbackOrder[0]?.modelId ?? "unknown";
|
|
2720
|
+
},
|
|
2721
|
+
setModelId(modelId) {
|
|
2722
|
+
const primary = initialFallbackOrder[0];
|
|
2723
|
+
if (!primary) throw new ModelError("No primary model to update", {
|
|
2724
|
+
code: "no_primary_model",
|
|
2725
|
+
retryable: false
|
|
2726
|
+
});
|
|
2727
|
+
initialFallbackOrder[0] = {
|
|
2728
|
+
provider: primary.provider,
|
|
2729
|
+
modelId
|
|
2730
|
+
};
|
|
2731
|
+
},
|
|
2732
|
+
invoke: async (messages, options$1) => {
|
|
2733
|
+
const result = await run({
|
|
2734
|
+
messages,
|
|
2735
|
+
tools: options$1?.tools
|
|
2736
|
+
});
|
|
2737
|
+
return {
|
|
2738
|
+
message: {
|
|
2739
|
+
role: "assistant",
|
|
2740
|
+
content: result.text
|
|
2741
|
+
},
|
|
2742
|
+
usage: result.usage
|
|
2743
|
+
};
|
|
2744
|
+
},
|
|
2745
|
+
stream: ((messagesOrRequest, options$1) => {
|
|
2746
|
+
if (Array.isArray(messagesOrRequest)) return stream({
|
|
2747
|
+
messages: messagesOrRequest,
|
|
2748
|
+
tools: options$1?.tools
|
|
2749
|
+
});
|
|
2750
|
+
return stream(messagesOrRequest);
|
|
2751
|
+
}),
|
|
2752
|
+
run
|
|
2753
|
+
};
|
|
2754
|
+
}
|
|
2755
|
+
function normalizeModelError(err) {
|
|
2756
|
+
if (err instanceof ModelError) return err;
|
|
2757
|
+
if (err && typeof err === "object") {
|
|
2758
|
+
const status = typeof err.status === "number" ? err.status : void 0;
|
|
2759
|
+
const code = typeof err.code === "string" ? err.code : status ? `http_${status}` : "unknown_error";
|
|
2760
|
+
return new ModelError(typeof err.message === "string" ? err.message : "Unknown error", {
|
|
2761
|
+
code,
|
|
2762
|
+
retryable: status ? HttpUtils.isRetryableStatus(status) : false,
|
|
2763
|
+
status
|
|
2764
|
+
});
|
|
2765
|
+
}
|
|
2766
|
+
return new ModelError(String(err || "Unknown error"), {
|
|
2767
|
+
code: "unknown_error",
|
|
2768
|
+
retryable: false
|
|
2769
|
+
});
|
|
2770
|
+
}
|
|
2771
|
+
|
|
2772
|
+
//#endregion
|
|
2773
|
+
//#region src/model/openai/createOpenAIAdapter.ts
|
|
2774
|
+
/**
|
|
2775
|
+
* Convert Message content to OpenAI format
|
|
2776
|
+
* Handles string, ContentBlock, and ContentBlock[]
|
|
2777
|
+
*/
|
|
2778
|
+
function toOpenAIContent(content) {
|
|
2779
|
+
if (typeof content === "string") return content.trim().length === 0 ? "" : content;
|
|
2780
|
+
if (content === null || content === void 0) return "";
|
|
2781
|
+
if (typeof content === "object" && "type" in content) {
|
|
2782
|
+
const block = content;
|
|
2783
|
+
if (block.type === "text" && block.text) return block.text;
|
|
2784
|
+
if (block.type === "image" && block.source?.data) return [{
|
|
2785
|
+
type: "image_url",
|
|
2786
|
+
image_url: { url: block.source.data }
|
|
2787
|
+
}];
|
|
2788
|
+
}
|
|
2789
|
+
if (Array.isArray(content)) {
|
|
2790
|
+
const result = [];
|
|
2791
|
+
for (const item of content) if (typeof item === "string") result.push({
|
|
2792
|
+
type: "text",
|
|
2793
|
+
text: item
|
|
2794
|
+
});
|
|
2795
|
+
else if (item && typeof item === "object" && "type" in item) {
|
|
2796
|
+
const block = item;
|
|
2797
|
+
if (block.type === "text" && block.text) result.push({
|
|
2798
|
+
type: "text",
|
|
2799
|
+
text: block.text
|
|
2800
|
+
});
|
|
2801
|
+
else if (block.type === "image" && block.source?.data) result.push({
|
|
2802
|
+
type: "image_url",
|
|
2803
|
+
image_url: { url: block.source.data }
|
|
2804
|
+
});
|
|
2805
|
+
}
|
|
2806
|
+
return result.length > 0 ? result : "";
|
|
2807
|
+
}
|
|
2808
|
+
return String(content);
|
|
2809
|
+
}
|
|
2810
|
+
/**
|
|
2811
|
+
* Convert Message to OpenAI format.
|
|
2812
|
+
*
|
|
2813
|
+
* The main conversion is for the `content` field, which may be in MCP format
|
|
2814
|
+
* (ContentBlock) and needs to be converted to OpenAI format (string or array).
|
|
2815
|
+
* Other fields like tool_call_id and tool_calls are already OpenAI-compatible.
|
|
2816
|
+
*/
|
|
2817
|
+
function toOpenAIMessage(message) {
|
|
2818
|
+
return {
|
|
2819
|
+
...message,
|
|
2820
|
+
content: toOpenAIContent(message.content)
|
|
2821
|
+
};
|
|
2822
|
+
}
|
|
2823
|
+
function createOpenAIAdapter(options = {}) {
|
|
2824
|
+
const baseUrl = options.baseUrl;
|
|
2825
|
+
const apiKeySecretName = options.apiKeySecretName || "OPENAI_API_KEY";
|
|
2826
|
+
const secretProvider = options.secretProvider;
|
|
2827
|
+
function getApiKey() {
|
|
2828
|
+
const key = secretProvider?.get(apiKeySecretName);
|
|
2829
|
+
if (!key) throw new ModelError(`Missing secret: ${apiKeySecretName}`, {
|
|
2830
|
+
code: "missing_api_key",
|
|
2831
|
+
retryable: false
|
|
2832
|
+
});
|
|
2833
|
+
return key;
|
|
2834
|
+
}
|
|
2835
|
+
function createClient() {
|
|
2836
|
+
const apiKey = getApiKey();
|
|
2837
|
+
if (typeof baseUrl === "string" && /\/chat\/completions\/?$/.test(baseUrl)) return new openai.default({
|
|
2838
|
+
apiKey,
|
|
2839
|
+
organization: options.organization,
|
|
2840
|
+
project: options.project,
|
|
2841
|
+
baseURL: "https://api.openai.com/v1",
|
|
2842
|
+
fetch: (input, init) => {
|
|
2843
|
+
return fetch(baseUrl, init);
|
|
2844
|
+
}
|
|
2845
|
+
});
|
|
2846
|
+
return new openai.default({
|
|
2847
|
+
apiKey,
|
|
2848
|
+
organization: options.organization,
|
|
2849
|
+
project: options.project,
|
|
2850
|
+
baseURL: baseUrl || void 0
|
|
2851
|
+
});
|
|
2852
|
+
}
|
|
2853
|
+
function toModelRef(request) {
|
|
2854
|
+
return {
|
|
2855
|
+
provider: "openai",
|
|
2856
|
+
modelId: request.model.modelId
|
|
2857
|
+
};
|
|
2858
|
+
}
|
|
2859
|
+
function toModelError(err) {
|
|
2860
|
+
if (err instanceof ModelError) return err;
|
|
2861
|
+
const status = typeof err?.status === "number" ? err.status : void 0;
|
|
2862
|
+
return new ModelError(typeof err?.message === "string" ? err.message : "OpenAI error", {
|
|
2863
|
+
code: typeof err?.code === "string" ? err.code : status ? `openai_http_${status}` : "openai_error",
|
|
2864
|
+
retryable: status ? HttpUtils.isRetryableStatus(status) : false,
|
|
2865
|
+
status
|
|
2866
|
+
});
|
|
2867
|
+
}
|
|
2868
|
+
function supportsFeature(_modelId, _featureId) {
|
|
2869
|
+
return false;
|
|
2870
|
+
}
|
|
2871
|
+
async function* stream(args) {
|
|
2872
|
+
const { request, featureGrants } = args;
|
|
2873
|
+
const requestId = request.requestId || createRequestId("req");
|
|
2874
|
+
const client = createClient();
|
|
2875
|
+
const model = request.model.modelId;
|
|
2876
|
+
const shouldStream = request.stream !== false;
|
|
2877
|
+
const messages = [];
|
|
2878
|
+
if (request.messages && Array.isArray(request.messages)) messages.push(...request.messages.map(toOpenAIMessage));
|
|
2879
|
+
else {
|
|
2880
|
+
if (typeof request.instructions === "string" && request.instructions.length > 0) messages.push({
|
|
2881
|
+
role: "system",
|
|
2882
|
+
content: request.instructions
|
|
2883
|
+
});
|
|
2884
|
+
messages.push({
|
|
2885
|
+
role: "user",
|
|
2886
|
+
content: request.input
|
|
2887
|
+
});
|
|
2888
|
+
}
|
|
2889
|
+
const reasoningEffort = (() => {
|
|
2890
|
+
const effort = request.reasoning?.effort;
|
|
2891
|
+
if (typeof effort !== "string") return void 0;
|
|
2892
|
+
if (effort === "none" || effort === "minimal" || effort === "low" || effort === "medium" || effort === "high" || effort === "xhigh") return effort;
|
|
2893
|
+
})();
|
|
2894
|
+
const body = {
|
|
2895
|
+
model,
|
|
2896
|
+
messages,
|
|
2897
|
+
stream: shouldStream,
|
|
2898
|
+
stream_options: shouldStream ? { include_usage: true } : void 0,
|
|
2899
|
+
metadata: request.metadata ?? void 0,
|
|
2900
|
+
reasoning_effort: reasoningEffort,
|
|
2901
|
+
max_completion_tokens: request.maxOutputTokens ?? void 0,
|
|
2902
|
+
temperature: request.temperature ?? void 0,
|
|
2903
|
+
top_p: request.topP ?? void 0,
|
|
2904
|
+
presence_penalty: request.presencePenalty ?? void 0,
|
|
2905
|
+
frequency_penalty: request.frequencyPenalty ?? void 0,
|
|
2906
|
+
seed: request.seed ?? void 0
|
|
2907
|
+
};
|
|
2908
|
+
if (request.tools && Array.isArray(request.tools) && request.tools.length > 0) body.tools = request.tools;
|
|
2909
|
+
try {
|
|
2910
|
+
yield {
|
|
2911
|
+
type: "response_start",
|
|
2912
|
+
requestId,
|
|
2913
|
+
model: toModelRef(request),
|
|
2914
|
+
featureGrants
|
|
2915
|
+
};
|
|
2916
|
+
if (!shouldStream) {
|
|
2917
|
+
const resp = await client.chat.completions.create({
|
|
2918
|
+
...body,
|
|
2919
|
+
stream: false
|
|
2920
|
+
}, {
|
|
2921
|
+
signal: request.signal,
|
|
2922
|
+
timeout: request.timeoutMs
|
|
2923
|
+
});
|
|
2924
|
+
const choice = Array.isArray(resp?.choices) ? resp.choices[0] : void 0;
|
|
2925
|
+
const message = choice?.message;
|
|
2926
|
+
const reasoningContent = typeof message?.reasoning_content === "string" ? message.reasoning_content : "";
|
|
2927
|
+
if (reasoningContent.length > 0) {
|
|
2928
|
+
yield {
|
|
2929
|
+
type: "delta",
|
|
2930
|
+
requestId,
|
|
2931
|
+
chunk: { kind: "thinking_start" }
|
|
2932
|
+
};
|
|
2933
|
+
yield {
|
|
2934
|
+
type: "delta",
|
|
2935
|
+
requestId,
|
|
2936
|
+
chunk: {
|
|
2937
|
+
kind: "thinking_delta",
|
|
2938
|
+
text: reasoningContent
|
|
2939
|
+
}
|
|
2940
|
+
};
|
|
2941
|
+
yield {
|
|
2942
|
+
type: "delta",
|
|
2943
|
+
requestId,
|
|
2944
|
+
chunk: { kind: "thinking_end" }
|
|
2945
|
+
};
|
|
2946
|
+
}
|
|
2947
|
+
const text = typeof message?.content === "string" ? message.content : "";
|
|
2948
|
+
if (text.length > 0) yield {
|
|
2949
|
+
type: "delta",
|
|
2950
|
+
requestId,
|
|
2951
|
+
chunk: {
|
|
2952
|
+
kind: "text",
|
|
2953
|
+
text
|
|
2954
|
+
}
|
|
2955
|
+
};
|
|
2956
|
+
const toolCalls = Array.isArray(message?.tool_calls) ? message.tool_calls : [];
|
|
2957
|
+
for (let i = 0; i < toolCalls.length; i++) {
|
|
2958
|
+
const tc = toolCalls[i];
|
|
2959
|
+
const callId = typeof tc?.id === "string" ? tc.id : `call_${i}`;
|
|
2960
|
+
const toolId = typeof tc?.function?.name === "string" ? tc.function.name : void 0;
|
|
2961
|
+
const args$1 = typeof tc?.function?.arguments === "string" ? tc.function.arguments : void 0;
|
|
2962
|
+
if (args$1 || toolId) yield {
|
|
2963
|
+
type: "delta",
|
|
2964
|
+
requestId,
|
|
2965
|
+
chunk: {
|
|
2966
|
+
kind: "tool_call_delta",
|
|
2967
|
+
callId,
|
|
2968
|
+
toolId,
|
|
2969
|
+
argsTextDelta: args$1
|
|
2970
|
+
}
|
|
2971
|
+
};
|
|
2972
|
+
}
|
|
2973
|
+
yield {
|
|
2974
|
+
type: "response_end",
|
|
2975
|
+
requestId,
|
|
2976
|
+
stopReason: toStopReason(choice?.finish_reason),
|
|
2977
|
+
usage: resp?.usage
|
|
2978
|
+
};
|
|
2979
|
+
return;
|
|
2980
|
+
}
|
|
2981
|
+
const stream$1 = await client.chat.completions.create({
|
|
2982
|
+
...body,
|
|
2983
|
+
stream: true
|
|
2984
|
+
}, {
|
|
2985
|
+
signal: request.signal,
|
|
2986
|
+
timeout: request.timeoutMs
|
|
2987
|
+
});
|
|
2988
|
+
let finishReason = null;
|
|
2989
|
+
let lastUsage = void 0;
|
|
2990
|
+
const callIdByIndex = /* @__PURE__ */ new Map();
|
|
2991
|
+
let inThinkingPhase = false;
|
|
2992
|
+
for await (const chunk of stream$1) {
|
|
2993
|
+
if (chunk?.usage != null) lastUsage = chunk.usage;
|
|
2994
|
+
const choices = Array.isArray(chunk?.choices) ? chunk.choices : [];
|
|
2995
|
+
for (const choice of choices) {
|
|
2996
|
+
if (choice?.finish_reason != null) finishReason = choice.finish_reason;
|
|
2997
|
+
const delta = choice?.delta;
|
|
2998
|
+
const reasoningContent = delta?.reasoning_content;
|
|
2999
|
+
if (typeof reasoningContent === "string" && reasoningContent.length > 0) {
|
|
3000
|
+
if (!inThinkingPhase) {
|
|
3001
|
+
inThinkingPhase = true;
|
|
3002
|
+
yield {
|
|
3003
|
+
type: "delta",
|
|
3004
|
+
requestId,
|
|
3005
|
+
chunk: { kind: "thinking_start" }
|
|
3006
|
+
};
|
|
3007
|
+
}
|
|
3008
|
+
yield {
|
|
3009
|
+
type: "delta",
|
|
3010
|
+
requestId,
|
|
3011
|
+
chunk: {
|
|
3012
|
+
kind: "thinking_delta",
|
|
3013
|
+
text: reasoningContent
|
|
3014
|
+
}
|
|
3015
|
+
};
|
|
3016
|
+
}
|
|
3017
|
+
const content = delta?.content;
|
|
3018
|
+
if (typeof content === "string" && content.length > 0) {
|
|
3019
|
+
if (inThinkingPhase) {
|
|
3020
|
+
inThinkingPhase = false;
|
|
3021
|
+
yield {
|
|
3022
|
+
type: "delta",
|
|
3023
|
+
requestId,
|
|
3024
|
+
chunk: { kind: "thinking_end" }
|
|
3025
|
+
};
|
|
3026
|
+
}
|
|
3027
|
+
yield {
|
|
3028
|
+
type: "delta",
|
|
3029
|
+
requestId,
|
|
3030
|
+
chunk: {
|
|
3031
|
+
kind: "text",
|
|
3032
|
+
text: content
|
|
3033
|
+
}
|
|
3034
|
+
};
|
|
3035
|
+
}
|
|
3036
|
+
const toolCalls = Array.isArray(delta?.tool_calls) ? delta.tool_calls : [];
|
|
3037
|
+
for (const tc of toolCalls) {
|
|
3038
|
+
const index = typeof tc?.index === "number" ? tc.index : 0;
|
|
3039
|
+
const callId = callIdByIndex.get(index) || (typeof tc?.id === "string" ? tc.id : `call_${index}`);
|
|
3040
|
+
callIdByIndex.set(index, callId);
|
|
3041
|
+
const toolId = typeof tc?.function?.name === "string" ? tc.function.name : void 0;
|
|
3042
|
+
const argsDelta = typeof tc?.function?.arguments === "string" ? tc.function.arguments : void 0;
|
|
3043
|
+
if (toolId || argsDelta) yield {
|
|
3044
|
+
type: "delta",
|
|
3045
|
+
requestId,
|
|
3046
|
+
chunk: {
|
|
3047
|
+
kind: "tool_call_delta",
|
|
3048
|
+
callId,
|
|
3049
|
+
toolId,
|
|
3050
|
+
argsTextDelta: argsDelta
|
|
3051
|
+
}
|
|
3052
|
+
};
|
|
3053
|
+
}
|
|
3054
|
+
}
|
|
3055
|
+
}
|
|
3056
|
+
if (inThinkingPhase) yield {
|
|
3057
|
+
type: "delta",
|
|
3058
|
+
requestId,
|
|
3059
|
+
chunk: { kind: "thinking_end" }
|
|
3060
|
+
};
|
|
3061
|
+
yield {
|
|
3062
|
+
type: "response_end",
|
|
3063
|
+
requestId,
|
|
3064
|
+
stopReason: toStopReason(finishReason),
|
|
3065
|
+
usage: lastUsage
|
|
3066
|
+
};
|
|
3067
|
+
} catch (err) {
|
|
3068
|
+
if (err?.name === "AbortError") throw new ModelError("Aborted", {
|
|
3069
|
+
code: "aborted",
|
|
3070
|
+
retryable: false
|
|
3071
|
+
});
|
|
3072
|
+
throw toModelError(err);
|
|
3073
|
+
}
|
|
3074
|
+
}
|
|
3075
|
+
return {
|
|
3076
|
+
provider: "openai",
|
|
3077
|
+
defaultModelId: options.defaultModelId,
|
|
3078
|
+
supportsFeature,
|
|
3079
|
+
stream
|
|
3080
|
+
};
|
|
3081
|
+
}
|
|
3082
|
+
function toStopReason(finishReason) {
|
|
3083
|
+
if (finishReason === "tool_calls" || finishReason === "function_call") return "tool_call";
|
|
3084
|
+
if (finishReason === "length") return "length";
|
|
3085
|
+
if (finishReason === "content_filter") return "error";
|
|
3086
|
+
if (finishReason === "stop" || finishReason == null) return "final";
|
|
3087
|
+
return "final";
|
|
3088
|
+
}
|
|
3089
|
+
|
|
3090
|
+
//#endregion
|
|
3091
|
+
//#region src/session/base.ts
|
|
3092
|
+
/**
|
|
3093
|
+
* Abstract base class for a session.
|
|
3094
|
+
*
|
|
3095
|
+
* A session represents a single conversation instance with an agent.
|
|
3096
|
+
* One agent can have multiple concurrent sessions.
|
|
3097
|
+
*/
|
|
3098
|
+
var BaseSession = class {
|
|
3099
|
+
/**
|
|
3100
|
+
* Set model override for this session
|
|
3101
|
+
*
|
|
3102
|
+
* @param model - Model configuration to use for this session
|
|
3103
|
+
*/
|
|
3104
|
+
setModelOverride(model) {
|
|
3105
|
+
if (!this.configOverride) this.configOverride = {};
|
|
3106
|
+
this.configOverride.model = model;
|
|
3107
|
+
this.updatedAt = Date.now();
|
|
3108
|
+
}
|
|
3109
|
+
/**
|
|
3110
|
+
* Clear model override (use agent's default model)
|
|
3111
|
+
*/
|
|
3112
|
+
clearModelOverride() {
|
|
3113
|
+
if (this.configOverride) {
|
|
3114
|
+
delete this.configOverride.model;
|
|
3115
|
+
this.updatedAt = Date.now();
|
|
3116
|
+
}
|
|
3117
|
+
}
|
|
3118
|
+
/**
|
|
3119
|
+
* Set system prompt override for this session
|
|
3120
|
+
*
|
|
3121
|
+
* @param systemPrompt - System prompt to use for this session
|
|
3122
|
+
*/
|
|
3123
|
+
setSystemPromptOverride(systemPrompt) {
|
|
3124
|
+
if (!this.configOverride) this.configOverride = {};
|
|
3125
|
+
this.configOverride.systemPromptOverride = systemPrompt;
|
|
3126
|
+
this.updatedAt = Date.now();
|
|
3127
|
+
}
|
|
3128
|
+
/**
|
|
3129
|
+
* Clear system prompt override
|
|
3130
|
+
*/
|
|
3131
|
+
clearSystemPromptOverride() {
|
|
3132
|
+
if (this.configOverride) {
|
|
3133
|
+
delete this.configOverride.systemPromptOverride;
|
|
3134
|
+
this.updatedAt = Date.now();
|
|
3135
|
+
}
|
|
3136
|
+
}
|
|
3137
|
+
/**
|
|
3138
|
+
* Disable specific tools for this session
|
|
3139
|
+
*
|
|
3140
|
+
* @param toolNames - Names of tools to disable
|
|
3141
|
+
*/
|
|
3142
|
+
disableTools(toolNames) {
|
|
3143
|
+
if (!this.configOverride) this.configOverride = {};
|
|
3144
|
+
this.configOverride.disabledTools = [...this.configOverride.disabledTools ?? [], ...toolNames];
|
|
3145
|
+
this.updatedAt = Date.now();
|
|
3146
|
+
}
|
|
3147
|
+
/**
|
|
3148
|
+
* Re-enable all tools for this session
|
|
3149
|
+
*/
|
|
3150
|
+
enableAllTools() {
|
|
3151
|
+
if (this.configOverride) {
|
|
3152
|
+
delete this.configOverride.disabledTools;
|
|
3153
|
+
this.updatedAt = Date.now();
|
|
3154
|
+
}
|
|
3155
|
+
}
|
|
3156
|
+
/**
|
|
3157
|
+
* Update session status
|
|
3158
|
+
*
|
|
3159
|
+
* @param status - New status
|
|
3160
|
+
* @param errorMessage - Error message (if status is 'error')
|
|
3161
|
+
*/
|
|
3162
|
+
setStatus(status, errorMessage) {
|
|
3163
|
+
this.status = status;
|
|
3164
|
+
this.errorMessage = errorMessage;
|
|
3165
|
+
this.updatedAt = Date.now();
|
|
3166
|
+
}
|
|
3167
|
+
/**
|
|
3168
|
+
* Mark session as active (update lastActiveAt)
|
|
3169
|
+
*/
|
|
3170
|
+
markActive() {
|
|
3171
|
+
this.lastActiveAt = Date.now();
|
|
3172
|
+
this.updatedAt = Date.now();
|
|
3173
|
+
}
|
|
3174
|
+
/**
|
|
3175
|
+
* Add a message to the conversation history
|
|
3176
|
+
*
|
|
3177
|
+
* @param message - Message to add
|
|
3178
|
+
*/
|
|
3179
|
+
addMessage(message) {
|
|
3180
|
+
this.messages.push(message);
|
|
3181
|
+
this.markActive();
|
|
3182
|
+
}
|
|
3183
|
+
/**
|
|
3184
|
+
* Get preview of the last message (truncated for display)
|
|
3185
|
+
*
|
|
3186
|
+
* @param maxLength - Maximum length of preview
|
|
3187
|
+
* @returns Preview string or undefined if no messages
|
|
3188
|
+
*/
|
|
3189
|
+
getLastMessagePreview(maxLength = 100) {
|
|
3190
|
+
if (this.messages.length === 0) return void 0;
|
|
3191
|
+
const lastMessage = this.messages[this.messages.length - 1];
|
|
3192
|
+
const content = typeof lastMessage.content === "string" ? lastMessage.content : JSON.stringify(lastMessage.content);
|
|
3193
|
+
return content.length > maxLength ? `${content.substring(0, maxLength)}...` : content;
|
|
3194
|
+
}
|
|
3195
|
+
/**
|
|
3196
|
+
* Add usage statistics
|
|
3197
|
+
*
|
|
3198
|
+
* @param usage - Usage to add
|
|
3199
|
+
* @param usage.promptTokens - Number of prompt tokens
|
|
3200
|
+
* @param usage.completionTokens - Number of completion tokens
|
|
3201
|
+
* @param usage.totalTokens - Total number of tokens
|
|
3202
|
+
*/
|
|
3203
|
+
addUsage(usage) {
|
|
3204
|
+
this.usage.promptTokens += usage.promptTokens;
|
|
3205
|
+
this.usage.completionTokens += usage.completionTokens;
|
|
3206
|
+
this.usage.totalTokens += usage.totalTokens;
|
|
3207
|
+
this.updatedAt = Date.now();
|
|
3208
|
+
}
|
|
3209
|
+
/**
|
|
3210
|
+
* Record a response with its duration
|
|
3211
|
+
*
|
|
3212
|
+
* @param durationMs - Response duration in milliseconds
|
|
3213
|
+
*/
|
|
3214
|
+
recordResponse(durationMs) {
|
|
3215
|
+
const currentTotal = (this.avgResponseTime ?? 0) * this.responseCount;
|
|
3216
|
+
this.responseCount++;
|
|
3217
|
+
this.avgResponseTime = (currentTotal + durationMs) / this.responseCount;
|
|
3218
|
+
this.updatedAt = Date.now();
|
|
3219
|
+
}
|
|
3220
|
+
/**
|
|
3221
|
+
* Increment tool call count
|
|
3222
|
+
*/
|
|
3223
|
+
incrementToolCallCount() {
|
|
3224
|
+
this.toolCallCount++;
|
|
3225
|
+
this.updatedAt = Date.now();
|
|
3226
|
+
}
|
|
3227
|
+
/**
|
|
3228
|
+
* Create a snapshot of the session for persistence
|
|
3229
|
+
*
|
|
3230
|
+
* @returns Session snapshot
|
|
3231
|
+
*/
|
|
3232
|
+
toSnapshot() {
|
|
3233
|
+
const state = {
|
|
3234
|
+
status: this.status,
|
|
3235
|
+
updatedAt: this.updatedAt,
|
|
3236
|
+
lastActiveAt: this.lastActiveAt,
|
|
3237
|
+
title: this.title,
|
|
3238
|
+
errorMessage: this.errorMessage
|
|
3239
|
+
};
|
|
3240
|
+
const context = {
|
|
3241
|
+
messages: [...this.messages],
|
|
3242
|
+
messageCount: this.messages.length,
|
|
3243
|
+
lastMessagePreview: this.getLastMessagePreview(),
|
|
3244
|
+
toolCallCount: this.toolCallCount
|
|
3245
|
+
};
|
|
3246
|
+
const stats = {
|
|
3247
|
+
usage: { ...this.usage },
|
|
3248
|
+
responseCount: this.responseCount,
|
|
3249
|
+
avgResponseTime: this.avgResponseTime
|
|
3250
|
+
};
|
|
3251
|
+
return {
|
|
3252
|
+
id: this.id,
|
|
3253
|
+
agentId: this.agentId,
|
|
3254
|
+
createdAt: this.createdAt,
|
|
3255
|
+
state,
|
|
3256
|
+
configOverride: this.configOverride ? { ...this.configOverride } : void 0,
|
|
3257
|
+
context,
|
|
3258
|
+
stats,
|
|
3259
|
+
metadata: this.metadata ? { ...this.metadata } : void 0
|
|
3260
|
+
};
|
|
3261
|
+
}
|
|
3262
|
+
/**
|
|
3263
|
+
* Restore session state from a snapshot
|
|
3264
|
+
*
|
|
3265
|
+
* @param snapshot - Session snapshot to restore from
|
|
3266
|
+
*/
|
|
3267
|
+
restoreFromSnapshot(snapshot) {
|
|
3268
|
+
this.status = snapshot.state.status;
|
|
3269
|
+
this.updatedAt = snapshot.state.updatedAt;
|
|
3270
|
+
this.lastActiveAt = snapshot.state.lastActiveAt;
|
|
3271
|
+
this.title = snapshot.state.title;
|
|
3272
|
+
this.errorMessage = snapshot.state.errorMessage;
|
|
3273
|
+
this.configOverride = snapshot.configOverride ? { ...snapshot.configOverride } : void 0;
|
|
3274
|
+
this.messages = [...snapshot.context.messages];
|
|
3275
|
+
this.toolCallCount = snapshot.context.toolCallCount;
|
|
3276
|
+
this.usage = { ...snapshot.stats.usage };
|
|
3277
|
+
this.responseCount = snapshot.stats.responseCount;
|
|
3278
|
+
this.avgResponseTime = snapshot.stats.avgResponseTime;
|
|
3279
|
+
this.metadata = snapshot.metadata ? { ...snapshot.metadata } : {};
|
|
3280
|
+
}
|
|
3281
|
+
};
|
|
3282
|
+
|
|
3283
|
+
//#endregion
|
|
3284
|
+
//#region src/session/manager.ts
|
|
3285
|
+
/**
|
|
3286
|
+
* Abstract base class for session management.
|
|
3287
|
+
*
|
|
3288
|
+
* Handles session lifecycle: creation, retrieval, listing, and destruction.
|
|
3289
|
+
* Implement this class for different storage backends (e.g., Redis, SQLite, in-memory).
|
|
3290
|
+
*/
|
|
3291
|
+
var BaseSessionManager = class {};
|
|
3292
|
+
|
|
3293
|
+
//#endregion
|
|
3294
|
+
//#region src/tool/base.ts
|
|
3295
|
+
/**
|
|
3296
|
+
* Helper to create a text content block for CallToolResult
|
|
3297
|
+
*/
|
|
3298
|
+
function textContent(text) {
|
|
3299
|
+
return { content: [{
|
|
3300
|
+
type: "text",
|
|
3301
|
+
text
|
|
3302
|
+
}] };
|
|
3303
|
+
}
|
|
3304
|
+
/**
|
|
3305
|
+
* Helper to create an error result for CallToolResult
|
|
3306
|
+
*/
|
|
3307
|
+
function errorContent(error) {
|
|
3308
|
+
return {
|
|
3309
|
+
content: [{
|
|
3310
|
+
type: "text",
|
|
3311
|
+
text: error
|
|
3312
|
+
}],
|
|
3313
|
+
isError: true
|
|
3314
|
+
};
|
|
3315
|
+
}
|
|
3316
|
+
/**
|
|
3317
|
+
* Helper to create an image content block for CallToolResult
|
|
3318
|
+
*/
|
|
3319
|
+
function imageContent(data, mimeType) {
|
|
3320
|
+
return { content: [{
|
|
3321
|
+
type: "image",
|
|
3322
|
+
data,
|
|
3323
|
+
mimeType
|
|
3324
|
+
}] };
|
|
3325
|
+
}
|
|
3326
|
+
/**
|
|
3327
|
+
* Abstract base class for tools.
|
|
3328
|
+
*
|
|
3329
|
+
* Tools are capabilities that the agent can invoke during execution.
|
|
3330
|
+
* Implement this class to create custom tools.
|
|
3331
|
+
*
|
|
3332
|
+
* All tool execute() methods must return MCP SDK-compliant CallToolResult:
|
|
3333
|
+
* - content: array of ContentBlock (TextContent, ImageContent, etc.)
|
|
3334
|
+
* - isError: optional boolean to indicate error
|
|
3335
|
+
* - structuredContent: optional structured data object
|
|
3336
|
+
*/
|
|
3337
|
+
var BaseTool = class {
|
|
3338
|
+
/**
|
|
3339
|
+
* Risk level of the tool, used to determine if user approval is required.
|
|
3340
|
+
*
|
|
3341
|
+
* - 'safe': No risk, read-only operations (default)
|
|
3342
|
+
* - 'low': Low risk, minimal side effects
|
|
3343
|
+
* - 'medium': Medium risk, reversible changes
|
|
3344
|
+
* - 'high': High risk, file modifications
|
|
3345
|
+
* - 'critical': Critical risk, arbitrary command execution
|
|
3346
|
+
*/
|
|
3347
|
+
riskLevel = "safe";
|
|
3348
|
+
};
|
|
3349
|
+
|
|
3350
|
+
//#endregion
|
|
3351
|
+
//#region src/tool/builtin/glob.ts
|
|
3352
|
+
/**
|
|
3353
|
+
* Maximum number of files to return
|
|
3354
|
+
*/
|
|
3355
|
+
const MAX_FILES = 1e3;
|
|
3356
|
+
/**
|
|
3357
|
+
* Maximum depth for recursive search
|
|
3358
|
+
*/
|
|
3359
|
+
const MAX_DEPTH = 20;
|
|
3360
|
+
/**
|
|
3361
|
+
* Convert glob pattern to RegExp
|
|
3362
|
+
*
|
|
3363
|
+
* Supports:
|
|
3364
|
+
* - `*` matches any sequence of characters except `/`
|
|
3365
|
+
* - `**` matches any sequence of characters including `/`
|
|
3366
|
+
* - `?` matches any single character except `/`
|
|
3367
|
+
* - `{a,b}` matches either `a` or `b`
|
|
3368
|
+
* - `[abc]` matches any character in the brackets
|
|
3369
|
+
*/
|
|
3370
|
+
function globToRegex(pattern) {
|
|
3371
|
+
let regexStr = "";
|
|
3372
|
+
let i = 0;
|
|
3373
|
+
while (i < pattern.length) {
|
|
3374
|
+
const char = pattern[i];
|
|
3375
|
+
if (char === "*") if (pattern[i + 1] === "*") if (pattern[i + 2] === "/") {
|
|
3376
|
+
regexStr += "(?:.*\\/)?";
|
|
3377
|
+
i += 3;
|
|
3378
|
+
} else {
|
|
3379
|
+
regexStr += ".*";
|
|
3380
|
+
i += 2;
|
|
3381
|
+
}
|
|
3382
|
+
else {
|
|
3383
|
+
regexStr += "[^/]*";
|
|
3384
|
+
i++;
|
|
3385
|
+
}
|
|
3386
|
+
else if (char === "?") {
|
|
3387
|
+
regexStr += "[^/]";
|
|
3388
|
+
i++;
|
|
3389
|
+
} else if (char === "{") {
|
|
3390
|
+
const end = pattern.indexOf("}", i);
|
|
3391
|
+
if (end !== -1) {
|
|
3392
|
+
const options = pattern.slice(i + 1, end).split(",");
|
|
3393
|
+
regexStr += `(?:${options.map((o) => escapeRegex(o)).join("|")})`;
|
|
3394
|
+
i = end + 1;
|
|
3395
|
+
} else {
|
|
3396
|
+
regexStr += "\\{";
|
|
3397
|
+
i++;
|
|
3398
|
+
}
|
|
3399
|
+
} else if (char === "[") {
|
|
3400
|
+
const end = pattern.indexOf("]", i);
|
|
3401
|
+
if (end !== -1) {
|
|
3402
|
+
regexStr += pattern.slice(i, end + 1);
|
|
3403
|
+
i = end + 1;
|
|
3404
|
+
} else {
|
|
3405
|
+
regexStr += "\\[";
|
|
3406
|
+
i++;
|
|
3407
|
+
}
|
|
3408
|
+
} else if (char === ".") {
|
|
3409
|
+
regexStr += "\\.";
|
|
3410
|
+
i++;
|
|
3411
|
+
} else if (char === "/") {
|
|
3412
|
+
regexStr += "\\/";
|
|
3413
|
+
i++;
|
|
3414
|
+
} else if ("()[]{}^$+|\\".includes(char)) {
|
|
3415
|
+
regexStr += `\\${char}`;
|
|
3416
|
+
i++;
|
|
3417
|
+
} else {
|
|
3418
|
+
regexStr += char;
|
|
3419
|
+
i++;
|
|
3420
|
+
}
|
|
3421
|
+
}
|
|
3422
|
+
return /* @__PURE__ */ new RegExp(`^${regexStr}$`);
|
|
3423
|
+
}
|
|
3424
|
+
/**
|
|
3425
|
+
* Escape special regex characters
|
|
3426
|
+
*/
|
|
3427
|
+
function escapeRegex(str) {
|
|
3428
|
+
return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
3429
|
+
}
|
|
3430
|
+
/**
|
|
3431
|
+
* Tool for finding files matching glob patterns.
|
|
3432
|
+
*
|
|
3433
|
+
* This tool provides fast file pattern matching that works with any codebase size,
|
|
3434
|
+
* returning matching file paths sorted by modification time.
|
|
3435
|
+
*
|
|
3436
|
+
* @example
|
|
3437
|
+
* ```typescript
|
|
3438
|
+
* const globTool = new GlobTool()
|
|
3439
|
+
* const result = await globTool.execute({
|
|
3440
|
+
* pattern: '**\/*.ts',
|
|
3441
|
+
* path: './src'
|
|
3442
|
+
* })
|
|
3443
|
+
* ```
|
|
3444
|
+
*/
|
|
3445
|
+
var GlobTool = class extends BaseTool {
|
|
3446
|
+
name = "Glob";
|
|
3447
|
+
description = `Fast file pattern matching tool that works with any codebase size.
|
|
3448
|
+
|
|
3449
|
+
Usage notes:
|
|
3450
|
+
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
|
3451
|
+
- Returns matching file paths sorted by modification time (newest first)
|
|
3452
|
+
- Use this tool when you need to find files by name patterns
|
|
3453
|
+
- You can call multiple tools in a single response for parallel searches
|
|
3454
|
+
|
|
3455
|
+
Supported patterns:
|
|
3456
|
+
- \`*\` matches any sequence of characters except path separator
|
|
3457
|
+
- \`**\` matches any sequence of characters including path separator
|
|
3458
|
+
- \`?\` matches any single character
|
|
3459
|
+
- \`{a,b}\` matches either a or b
|
|
3460
|
+
- \`[abc]\` matches any character in brackets`;
|
|
3461
|
+
parameters = {
|
|
3462
|
+
type: "object",
|
|
3463
|
+
properties: {
|
|
3464
|
+
pattern: {
|
|
3465
|
+
type: "string",
|
|
3466
|
+
description: "The glob pattern to match files against"
|
|
3467
|
+
},
|
|
3468
|
+
path: {
|
|
3469
|
+
type: "string",
|
|
3470
|
+
description: "The directory to search in. If not specified, the current working directory will be used. IMPORTANT: Omit this field to use the default directory. DO NOT enter \"undefined\" or \"null\" - simply omit it for the default behavior."
|
|
3471
|
+
}
|
|
3472
|
+
},
|
|
3473
|
+
required: ["pattern"]
|
|
3474
|
+
};
|
|
3475
|
+
/** Current working directory for search */
|
|
3476
|
+
cwd;
|
|
3477
|
+
constructor(options) {
|
|
3478
|
+
super();
|
|
3479
|
+
this.cwd = options?.cwd ?? node_process.default.cwd();
|
|
3480
|
+
}
|
|
3481
|
+
/**
|
|
3482
|
+
* Set the current working directory
|
|
3483
|
+
*/
|
|
3484
|
+
setCwd(cwd) {
|
|
3485
|
+
this.cwd = cwd;
|
|
3486
|
+
}
|
|
3487
|
+
/**
|
|
3488
|
+
* Get the current working directory
|
|
3489
|
+
*/
|
|
3490
|
+
getCwd() {
|
|
3491
|
+
return this.cwd;
|
|
3492
|
+
}
|
|
3493
|
+
/**
|
|
3494
|
+
* Execute glob pattern matching
|
|
3495
|
+
*
|
|
3496
|
+
* @param args - Glob arguments
|
|
3497
|
+
* @returns MCP-compliant CallToolResult with matching files
|
|
3498
|
+
*/
|
|
3499
|
+
async execute(args) {
|
|
3500
|
+
const { pattern, path: searchPath } = this.validateArgs(args);
|
|
3501
|
+
const baseDir = searchPath ? node_path.default.isAbsolute(searchPath) ? searchPath : node_path.default.resolve(this.cwd, searchPath) : this.cwd;
|
|
3502
|
+
try {
|
|
3503
|
+
if (!(await (0, node_fs_promises.stat)(baseDir)).isDirectory()) return errorContent(`Path is not a directory: ${baseDir}`);
|
|
3504
|
+
} catch (error) {
|
|
3505
|
+
if (error.code === "ENOENT") return errorContent(`Directory not found: ${baseDir}`);
|
|
3506
|
+
throw error;
|
|
3507
|
+
}
|
|
3508
|
+
let normalizedPattern = pattern;
|
|
3509
|
+
if (!pattern.startsWith("**/") && !pattern.startsWith("/") && !pattern.startsWith("./")) normalizedPattern = `**/${pattern}`;
|
|
3510
|
+
const regex = globToRegex(normalizedPattern);
|
|
3511
|
+
const files = [];
|
|
3512
|
+
await this.walkDirectory(baseDir, "", regex, files, 0);
|
|
3513
|
+
files.sort((a, b) => b.mtime - a.mtime);
|
|
3514
|
+
const truncated = files.length > MAX_FILES;
|
|
3515
|
+
const resultFiles = files.slice(0, MAX_FILES).map((f) => f.path);
|
|
3516
|
+
const result = {
|
|
3517
|
+
files: resultFiles,
|
|
3518
|
+
totalMatches: files.length,
|
|
3519
|
+
truncated
|
|
3520
|
+
};
|
|
3521
|
+
return {
|
|
3522
|
+
content: [{
|
|
3523
|
+
type: "text",
|
|
3524
|
+
text: resultFiles.length > 0 ? `Found ${files.length} file${files.length !== 1 ? "s" : ""}${truncated ? ` (showing first ${MAX_FILES})` : ""}:\n${resultFiles.join("\n")}` : `No files found matching pattern: ${pattern}`
|
|
3525
|
+
}],
|
|
3526
|
+
structuredContent: result
|
|
3527
|
+
};
|
|
3528
|
+
}
|
|
3529
|
+
/**
|
|
3530
|
+
* Validate and parse arguments
|
|
3531
|
+
*/
|
|
3532
|
+
validateArgs(args) {
|
|
3533
|
+
const pattern = args.pattern;
|
|
3534
|
+
if (typeof pattern !== "string" || !pattern.trim()) throw new Error("Pattern is required and must be a non-empty string");
|
|
3535
|
+
let searchPath;
|
|
3536
|
+
if (args.path !== void 0 && args.path !== null && args.path !== "undefined" && args.path !== "null") {
|
|
3537
|
+
if (typeof args.path !== "string") throw new TypeError("Path must be a string");
|
|
3538
|
+
searchPath = args.path.trim() || void 0;
|
|
3539
|
+
}
|
|
3540
|
+
return {
|
|
3541
|
+
pattern: pattern.trim(),
|
|
3542
|
+
path: searchPath
|
|
3543
|
+
};
|
|
3544
|
+
}
|
|
3545
|
+
/**
|
|
3546
|
+
* Recursively walk directory and collect matching files
|
|
3547
|
+
*/
|
|
3548
|
+
async walkDirectory(baseDir, relativePath, pattern, files, depth) {
|
|
3549
|
+
if (depth > MAX_DEPTH) return;
|
|
3550
|
+
const currentDir = relativePath ? node_path.default.join(baseDir, relativePath) : baseDir;
|
|
3551
|
+
let entries;
|
|
3552
|
+
try {
|
|
3553
|
+
entries = await (0, node_fs_promises.readdir)(currentDir, { withFileTypes: true });
|
|
3554
|
+
} catch {
|
|
3555
|
+
return;
|
|
3556
|
+
}
|
|
3557
|
+
for (const entry of entries) {
|
|
3558
|
+
if (entry.name.startsWith(".") || entry.name === "node_modules") continue;
|
|
3559
|
+
const entryRelativePath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
|
|
3560
|
+
if (entry.isDirectory()) await this.walkDirectory(baseDir, entryRelativePath, pattern, files, depth + 1);
|
|
3561
|
+
else if (entry.isFile()) {
|
|
3562
|
+
if (pattern.test(entryRelativePath)) try {
|
|
3563
|
+
const stats = await (0, node_fs_promises.stat)(node_path.default.join(currentDir, entry.name));
|
|
3564
|
+
files.push({
|
|
3565
|
+
path: entryRelativePath,
|
|
3566
|
+
mtime: stats.mtimeMs
|
|
3567
|
+
});
|
|
3568
|
+
} catch {}
|
|
3569
|
+
}
|
|
3570
|
+
}
|
|
3571
|
+
}
|
|
3572
|
+
};
|
|
3573
|
+
|
|
3574
|
+
//#endregion
|
|
3575
|
+
//#region src/tool/builtin/grep.ts
|
|
3576
|
+
/**
|
|
3577
|
+
* Maximum output length before truncation (in characters)
|
|
3578
|
+
*/
|
|
3579
|
+
const MAX_OUTPUT_LENGTH = 5e4;
|
|
3580
|
+
/**
|
|
3581
|
+
* Default command timeout (60 seconds)
|
|
3582
|
+
*/
|
|
3583
|
+
const DEFAULT_TIMEOUT = 6e4;
|
|
3584
|
+
/**
|
|
3585
|
+
* Tool for searching files using ripgrep.
|
|
3586
|
+
*
|
|
3587
|
+
* A powerful search tool built on ripgrep that supports regex patterns,
|
|
3588
|
+
* multiple output modes, and various filtering options.
|
|
3589
|
+
*
|
|
3590
|
+
* @example
|
|
3591
|
+
* ```typescript
|
|
3592
|
+
* const grepTool = new GrepTool()
|
|
3593
|
+
* const result = await grepTool.execute({
|
|
3594
|
+
* pattern: 'function\\s+\\w+',
|
|
3595
|
+
* path: './src',
|
|
3596
|
+
* type: 'ts'
|
|
3597
|
+
* })
|
|
3598
|
+
* ```
|
|
3599
|
+
*/
|
|
3600
|
+
var GrepTool = class extends BaseTool {
|
|
3601
|
+
name = "Grep";
|
|
3602
|
+
description = `A powerful search tool built on ripgrep.
|
|
3603
|
+
|
|
3604
|
+
Usage notes:
|
|
3605
|
+
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
|
3606
|
+
- Filter files with glob parameter (e.g., "*.js", "**/*.tsx") or type parameter (e.g., "js", "py", "rust")
|
|
3607
|
+
- Output modes: "content" shows matching lines, "files_with_matches" shows only file paths (default), "count" shows match counts
|
|
3608
|
+
- Pattern syntax: Uses ripgrep (not grep) - literal braces need escaping (use \`interface\\{\\}\` to find \`interface{}\` in Go code)
|
|
3609
|
+
- Multiline matching: By default patterns match within single lines only. For cross-line patterns, use multiline: true`;
|
|
3610
|
+
parameters = {
|
|
3611
|
+
type: "object",
|
|
3612
|
+
properties: {
|
|
3613
|
+
pattern: {
|
|
3614
|
+
type: "string",
|
|
3615
|
+
description: "The regular expression pattern to search for in file contents"
|
|
3616
|
+
},
|
|
3617
|
+
path: {
|
|
3618
|
+
type: "string",
|
|
3619
|
+
description: "File or directory to search in. Defaults to current working directory."
|
|
3620
|
+
},
|
|
3621
|
+
glob: {
|
|
3622
|
+
type: "string",
|
|
3623
|
+
description: "Glob pattern to filter files (e.g. \"*.js\", \"*.{ts,tsx}\")"
|
|
3624
|
+
},
|
|
3625
|
+
output_mode: {
|
|
3626
|
+
type: "string",
|
|
3627
|
+
enum: [
|
|
3628
|
+
"content",
|
|
3629
|
+
"files_with_matches",
|
|
3630
|
+
"count"
|
|
3631
|
+
],
|
|
3632
|
+
description: "Output mode: \"content\" shows matching lines, \"files_with_matches\" shows file paths (default), \"count\" shows match counts."
|
|
3633
|
+
},
|
|
3634
|
+
"-B": {
|
|
3635
|
+
type: "number",
|
|
3636
|
+
description: "Number of lines to show before each match. Requires output_mode: \"content\"."
|
|
3637
|
+
},
|
|
3638
|
+
"-A": {
|
|
3639
|
+
type: "number",
|
|
3640
|
+
description: "Number of lines to show after each match. Requires output_mode: \"content\"."
|
|
3641
|
+
},
|
|
3642
|
+
"-C": {
|
|
3643
|
+
type: "number",
|
|
3644
|
+
description: "Number of lines to show before and after each match. Requires output_mode: \"content\"."
|
|
3645
|
+
},
|
|
3646
|
+
"-n": {
|
|
3647
|
+
type: "boolean",
|
|
3648
|
+
description: "Show line numbers in output. Requires output_mode: \"content\". Defaults to true."
|
|
3649
|
+
},
|
|
3650
|
+
"-i": {
|
|
3651
|
+
type: "boolean",
|
|
3652
|
+
description: "Case insensitive search"
|
|
3653
|
+
},
|
|
3654
|
+
type: {
|
|
3655
|
+
type: "string",
|
|
3656
|
+
description: "File type to search (e.g., js, py, rust, go, java). More efficient than glob for standard file types."
|
|
3657
|
+
},
|
|
3658
|
+
head_limit: {
|
|
3659
|
+
type: "number",
|
|
3660
|
+
description: "Limit output to first N lines/entries. Defaults to 0 (unlimited)."
|
|
3661
|
+
},
|
|
3662
|
+
offset: {
|
|
3663
|
+
type: "number",
|
|
3664
|
+
description: "Skip first N lines/entries before applying head_limit. Defaults to 0."
|
|
3665
|
+
},
|
|
3666
|
+
multiline: {
|
|
3667
|
+
type: "boolean",
|
|
3668
|
+
description: "Enable multiline mode where . matches newlines and patterns can span lines. Default: false."
|
|
3669
|
+
}
|
|
3670
|
+
},
|
|
3671
|
+
required: ["pattern"]
|
|
3672
|
+
};
|
|
3673
|
+
/** Current working directory for search */
|
|
3674
|
+
cwd;
|
|
3675
|
+
/** Path to ripgrep binary */
|
|
3676
|
+
rgPath;
|
|
3677
|
+
constructor(options) {
|
|
3678
|
+
super();
|
|
3679
|
+
this.cwd = options?.cwd ?? node_process.default.cwd();
|
|
3680
|
+
this.rgPath = options?.rgPath ?? "rg";
|
|
3681
|
+
}
|
|
3682
|
+
/**
|
|
3683
|
+
* Set the current working directory
|
|
3684
|
+
*/
|
|
3685
|
+
setCwd(cwd) {
|
|
3686
|
+
this.cwd = cwd;
|
|
3687
|
+
}
|
|
3688
|
+
/**
|
|
3689
|
+
* Get the current working directory
|
|
3690
|
+
*/
|
|
3691
|
+
getCwd() {
|
|
3692
|
+
return this.cwd;
|
|
3693
|
+
}
|
|
3694
|
+
/**
|
|
3695
|
+
* Execute grep search
|
|
3696
|
+
*
|
|
3697
|
+
* @param args - Grep arguments
|
|
3698
|
+
* @returns MCP-compliant CallToolResult with search results
|
|
3699
|
+
*/
|
|
3700
|
+
async execute(args) {
|
|
3701
|
+
const validatedArgs = this.validateArgs(args);
|
|
3702
|
+
const rgArgs = this.buildRgArgs(validatedArgs);
|
|
3703
|
+
const result = await this.runRipgrep(rgArgs, validatedArgs);
|
|
3704
|
+
let text = result.output || "[No matches found]";
|
|
3705
|
+
if (result.timedOut) text += "\n[Search timed out]";
|
|
3706
|
+
if (result.matchCount !== void 0) text = `Found ${result.matchCount} match${result.matchCount !== 1 ? "es" : ""}\n${text}`;
|
|
3707
|
+
return {
|
|
3708
|
+
content: [{
|
|
3709
|
+
type: "text",
|
|
3710
|
+
text
|
|
3711
|
+
}],
|
|
3712
|
+
structuredContent: result,
|
|
3713
|
+
isError: result.exitCode !== 0 && result.exitCode !== 1
|
|
3714
|
+
};
|
|
3715
|
+
}
|
|
3716
|
+
/**
|
|
3717
|
+
* Validate and parse arguments
|
|
3718
|
+
*/
|
|
3719
|
+
validateArgs(args) {
|
|
3720
|
+
const pattern = args.pattern;
|
|
3721
|
+
if (typeof pattern !== "string" || !pattern.trim()) throw new Error("Pattern is required and must be a non-empty string");
|
|
3722
|
+
const result = { pattern: pattern.trim() };
|
|
3723
|
+
if (args.path !== void 0 && args.path !== null && args.path !== "") {
|
|
3724
|
+
if (typeof args.path !== "string") throw new TypeError("Path must be a string");
|
|
3725
|
+
result.path = args.path.trim();
|
|
3726
|
+
}
|
|
3727
|
+
if (args.glob !== void 0 && args.glob !== null && args.glob !== "") {
|
|
3728
|
+
if (typeof args.glob !== "string") throw new TypeError("Glob must be a string");
|
|
3729
|
+
result.glob = args.glob.trim();
|
|
3730
|
+
}
|
|
3731
|
+
if (args.type !== void 0 && args.type !== null && args.type !== "") {
|
|
3732
|
+
if (typeof args.type !== "string") throw new TypeError("Type must be a string");
|
|
3733
|
+
result.type = args.type.trim();
|
|
3734
|
+
}
|
|
3735
|
+
if (args.output_mode !== void 0) {
|
|
3736
|
+
const validModes = [
|
|
3737
|
+
"content",
|
|
3738
|
+
"files_with_matches",
|
|
3739
|
+
"count"
|
|
3740
|
+
];
|
|
3741
|
+
if (!validModes.includes(args.output_mode)) throw new Error(`Invalid output_mode. Must be one of: ${validModes.join(", ")}`);
|
|
3742
|
+
result.output_mode = args.output_mode;
|
|
3743
|
+
}
|
|
3744
|
+
for (const arg of [
|
|
3745
|
+
"-B",
|
|
3746
|
+
"-A",
|
|
3747
|
+
"-C",
|
|
3748
|
+
"head_limit",
|
|
3749
|
+
"offset"
|
|
3750
|
+
]) if (args[arg] !== void 0 && args[arg] !== null) {
|
|
3751
|
+
if (typeof args[arg] !== "number") throw new TypeError(`${arg} must be a number`);
|
|
3752
|
+
result[arg] = Math.max(0, Math.floor(args[arg]));
|
|
3753
|
+
}
|
|
3754
|
+
for (const arg of [
|
|
3755
|
+
"-n",
|
|
3756
|
+
"-i",
|
|
3757
|
+
"multiline"
|
|
3758
|
+
]) if (args[arg] !== void 0 && args[arg] !== null) result[arg] = Boolean(args[arg]);
|
|
3759
|
+
return result;
|
|
3760
|
+
}
|
|
3761
|
+
/**
|
|
3762
|
+
* Build ripgrep command arguments
|
|
3763
|
+
*/
|
|
3764
|
+
buildRgArgs(args) {
|
|
3765
|
+
const rgArgs = [];
|
|
3766
|
+
const outputMode = args.output_mode ?? "files_with_matches";
|
|
3767
|
+
if (outputMode === "files_with_matches") rgArgs.push("-l");
|
|
3768
|
+
else if (outputMode === "count") rgArgs.push("-c");
|
|
3769
|
+
if (outputMode === "content") {
|
|
3770
|
+
if (args["-n"] !== false) rgArgs.push("-n");
|
|
3771
|
+
if (args["-B"] !== void 0 && args["-B"] > 0) rgArgs.push("-B", String(args["-B"]));
|
|
3772
|
+
if (args["-A"] !== void 0 && args["-A"] > 0) rgArgs.push("-A", String(args["-A"]));
|
|
3773
|
+
if (args["-C"] !== void 0 && args["-C"] > 0) rgArgs.push("-C", String(args["-C"]));
|
|
3774
|
+
}
|
|
3775
|
+
if (args["-i"]) rgArgs.push("-i");
|
|
3776
|
+
if (args.multiline) rgArgs.push("-U", "--multiline-dotall");
|
|
3777
|
+
if (args.type) rgArgs.push("--type", args.type);
|
|
3778
|
+
if (args.glob) rgArgs.push("--glob", args.glob);
|
|
3779
|
+
rgArgs.push("--color", "never");
|
|
3780
|
+
rgArgs.push("--no-heading");
|
|
3781
|
+
rgArgs.push("--regexp", args.pattern);
|
|
3782
|
+
if (args.path) rgArgs.push("--", args.path);
|
|
3783
|
+
return rgArgs;
|
|
3784
|
+
}
|
|
3785
|
+
/**
|
|
3786
|
+
* Run ripgrep command
|
|
3787
|
+
*/
|
|
3788
|
+
runRipgrep(rgArgs, args) {
|
|
3789
|
+
return new Promise((resolve) => {
|
|
3790
|
+
let output = "";
|
|
3791
|
+
let timedOut = false;
|
|
3792
|
+
let truncated = false;
|
|
3793
|
+
const child = (0, node_child_process.spawn)(this.rgPath, rgArgs, {
|
|
3794
|
+
cwd: this.cwd,
|
|
3795
|
+
env: node_process.default.env,
|
|
3796
|
+
stdio: [
|
|
3797
|
+
"pipe",
|
|
3798
|
+
"pipe",
|
|
3799
|
+
"pipe"
|
|
3800
|
+
]
|
|
3801
|
+
});
|
|
3802
|
+
const timeoutId = setTimeout(() => {
|
|
3803
|
+
timedOut = true;
|
|
3804
|
+
child.kill("SIGTERM");
|
|
3805
|
+
setTimeout(() => {
|
|
3806
|
+
if (!child.killed) child.kill("SIGKILL");
|
|
3807
|
+
}, 5e3);
|
|
3808
|
+
}, DEFAULT_TIMEOUT);
|
|
3809
|
+
child.stdout?.on("data", (data) => {
|
|
3810
|
+
const str = data.toString();
|
|
3811
|
+
if (output.length + str.length > MAX_OUTPUT_LENGTH) {
|
|
3812
|
+
output += str.slice(0, MAX_OUTPUT_LENGTH - output.length);
|
|
3813
|
+
truncated = true;
|
|
3814
|
+
} else output += str;
|
|
3815
|
+
});
|
|
3816
|
+
child.stderr?.on("data", (data) => {
|
|
3817
|
+
const str = data.toString();
|
|
3818
|
+
if (str.includes("error:")) output += `\n[stderr]: ${str}`;
|
|
3819
|
+
});
|
|
3820
|
+
child.on("close", (code) => {
|
|
3821
|
+
clearTimeout(timeoutId);
|
|
3822
|
+
let finalOutput = output;
|
|
3823
|
+
if (args.offset || args.head_limit) {
|
|
3824
|
+
const lines = output.split("\n").filter((line) => line.trim());
|
|
3825
|
+
const offset = args.offset ?? 0;
|
|
3826
|
+
const limit = args.head_limit ?? lines.length;
|
|
3827
|
+
finalOutput = lines.slice(offset, offset + limit).join("\n");
|
|
3828
|
+
}
|
|
3829
|
+
let matchCount;
|
|
3830
|
+
if (args.output_mode === "count") matchCount = finalOutput.split("\n").filter((line) => line.trim()).reduce((sum, line) => {
|
|
3831
|
+
const match = line.match(/:(\d+)$/);
|
|
3832
|
+
return sum + (match ? Number.parseInt(match[1], 10) : 0);
|
|
3833
|
+
}, 0);
|
|
3834
|
+
resolve({
|
|
3835
|
+
exitCode: code,
|
|
3836
|
+
output: truncated ? `${finalOutput}\n... [output truncated]` : finalOutput,
|
|
3837
|
+
truncated,
|
|
3838
|
+
timedOut,
|
|
3839
|
+
matchCount
|
|
3840
|
+
});
|
|
3841
|
+
});
|
|
3842
|
+
child.on("error", (error) => {
|
|
3843
|
+
clearTimeout(timeoutId);
|
|
3844
|
+
if (error?.code === "ENOENT") {
|
|
3845
|
+
this.runSystemGrep(args).then(resolve);
|
|
3846
|
+
return;
|
|
3847
|
+
}
|
|
3848
|
+
resolve({
|
|
3849
|
+
exitCode: 1,
|
|
3850
|
+
engine: "rg",
|
|
3851
|
+
output: `Failed to execute ripgrep: ${error.message}. Make sure 'rg' is installed and in PATH.`,
|
|
3852
|
+
truncated: false,
|
|
3853
|
+
timedOut: false
|
|
3854
|
+
});
|
|
3855
|
+
});
|
|
3856
|
+
});
|
|
3857
|
+
}
|
|
3858
|
+
runSystemGrep(args) {
|
|
3859
|
+
return new Promise((resolve) => {
|
|
3860
|
+
let output = "";
|
|
3861
|
+
let timedOut = false;
|
|
3862
|
+
let truncated = false;
|
|
3863
|
+
const grepArgs = [];
|
|
3864
|
+
grepArgs.push("-R", "-E", "-I");
|
|
3865
|
+
const mode = args.output_mode ?? "files_with_matches";
|
|
3866
|
+
if (mode === "files_with_matches") grepArgs.push("-l");
|
|
3867
|
+
else if (mode === "count") grepArgs.push("-c");
|
|
3868
|
+
else grepArgs.push("-n");
|
|
3869
|
+
if (mode === "content") {
|
|
3870
|
+
if (args["-B"] !== void 0 && args["-B"] > 0) grepArgs.push("-B", String(args["-B"]));
|
|
3871
|
+
if (args["-A"] !== void 0 && args["-A"] > 0) grepArgs.push("-A", String(args["-A"]));
|
|
3872
|
+
if (args["-C"] !== void 0 && args["-C"] > 0) grepArgs.push("-C", String(args["-C"]));
|
|
3873
|
+
}
|
|
3874
|
+
if (args["-i"]) grepArgs.push("-i");
|
|
3875
|
+
grepArgs.push(args.pattern);
|
|
3876
|
+
if (args.path) grepArgs.push(args.path);
|
|
3877
|
+
else grepArgs.push(".");
|
|
3878
|
+
const child = (0, node_child_process.spawn)("grep", grepArgs, {
|
|
3879
|
+
cwd: this.cwd,
|
|
3880
|
+
env: node_process.default.env,
|
|
3881
|
+
stdio: [
|
|
3882
|
+
"pipe",
|
|
3883
|
+
"pipe",
|
|
3884
|
+
"pipe"
|
|
3885
|
+
]
|
|
3886
|
+
});
|
|
3887
|
+
const timeoutId = setTimeout(() => {
|
|
3888
|
+
timedOut = true;
|
|
3889
|
+
child.kill("SIGTERM");
|
|
3890
|
+
setTimeout(() => {
|
|
3891
|
+
if (!child.killed) child.kill("SIGKILL");
|
|
3892
|
+
}, 5e3);
|
|
3893
|
+
}, DEFAULT_TIMEOUT);
|
|
3894
|
+
child.stdout?.on("data", (data) => {
|
|
3895
|
+
const str = data.toString();
|
|
3896
|
+
if (output.length + str.length > MAX_OUTPUT_LENGTH) {
|
|
3897
|
+
output += str.slice(0, MAX_OUTPUT_LENGTH - output.length);
|
|
3898
|
+
truncated = true;
|
|
3899
|
+
} else output += str;
|
|
3900
|
+
});
|
|
3901
|
+
child.stderr?.on("data", (data) => {
|
|
3902
|
+
const str = data.toString();
|
|
3903
|
+
if (str.trim()) output += `\n[stderr]: ${str}`;
|
|
3904
|
+
});
|
|
3905
|
+
child.on("close", (code) => {
|
|
3906
|
+
clearTimeout(timeoutId);
|
|
3907
|
+
let finalOutput = output;
|
|
3908
|
+
if (args.offset || args.head_limit) {
|
|
3909
|
+
const lines = output.split("\n").filter((line) => line.trim());
|
|
3910
|
+
const offset = args.offset ?? 0;
|
|
3911
|
+
const limit = args.head_limit ?? lines.length;
|
|
3912
|
+
finalOutput = lines.slice(offset, offset + limit).join("\n");
|
|
3913
|
+
}
|
|
3914
|
+
let matchCount;
|
|
3915
|
+
if (mode === "count") matchCount = finalOutput.split("\n").filter((line) => line.trim()).reduce((sum, line) => {
|
|
3916
|
+
const last = line.split(":").pop();
|
|
3917
|
+
const n = last ? Number.parseInt(last, 10) : NaN;
|
|
3918
|
+
return sum + (Number.isFinite(n) ? n : 0);
|
|
3919
|
+
}, 0);
|
|
3920
|
+
resolve({
|
|
3921
|
+
exitCode: code,
|
|
3922
|
+
engine: "grep",
|
|
3923
|
+
output: truncated ? `${finalOutput}\n... [output truncated]` : finalOutput,
|
|
3924
|
+
truncated,
|
|
3925
|
+
timedOut,
|
|
3926
|
+
matchCount
|
|
3927
|
+
});
|
|
3928
|
+
});
|
|
3929
|
+
child.on("error", (error) => {
|
|
3930
|
+
clearTimeout(timeoutId);
|
|
3931
|
+
resolve({
|
|
3932
|
+
exitCode: 1,
|
|
3933
|
+
engine: "grep",
|
|
3934
|
+
output: `Failed to execute grep: ${error.message}.`,
|
|
3935
|
+
truncated: false,
|
|
3936
|
+
timedOut: false
|
|
3937
|
+
});
|
|
3938
|
+
});
|
|
3939
|
+
});
|
|
3940
|
+
}
|
|
3941
|
+
};
|
|
3942
|
+
|
|
3943
|
+
//#endregion
|
|
3944
|
+
//#region src/tool/builtin/read.ts
|
|
3945
|
+
/**
|
|
3946
|
+
* Default maximum number of lines to read
|
|
3947
|
+
*/
|
|
3948
|
+
const DEFAULT_LINE_LIMIT = 2e3;
|
|
3949
|
+
/**
|
|
3950
|
+
* Maximum characters per line before truncation
|
|
3951
|
+
*/
|
|
3952
|
+
const MAX_LINE_LENGTH = 2e3;
|
|
3953
|
+
/**
|
|
3954
|
+
* Known binary/image file extensions
|
|
3955
|
+
*/
|
|
3956
|
+
const BINARY_EXTENSIONS = new Set([
|
|
3957
|
+
".png",
|
|
3958
|
+
".jpg",
|
|
3959
|
+
".jpeg",
|
|
3960
|
+
".gif",
|
|
3961
|
+
".bmp",
|
|
3962
|
+
".ico",
|
|
3963
|
+
".webp",
|
|
3964
|
+
".svg",
|
|
3965
|
+
".pdf",
|
|
3966
|
+
".zip",
|
|
3967
|
+
".tar",
|
|
3968
|
+
".gz",
|
|
3969
|
+
".rar",
|
|
3970
|
+
".7z",
|
|
3971
|
+
".exe",
|
|
3972
|
+
".dll",
|
|
3973
|
+
".so",
|
|
3974
|
+
".dylib",
|
|
3975
|
+
".mp3",
|
|
3976
|
+
".mp4",
|
|
3977
|
+
".avi",
|
|
3978
|
+
".mov",
|
|
3979
|
+
".wav",
|
|
3980
|
+
".woff",
|
|
3981
|
+
".woff2",
|
|
3982
|
+
".ttf",
|
|
3983
|
+
".eot"
|
|
3984
|
+
]);
|
|
3985
|
+
/**
|
|
3986
|
+
* MIME types for common file extensions
|
|
3987
|
+
*/
|
|
3988
|
+
const MIME_TYPES = {
|
|
3989
|
+
".png": "image/png",
|
|
3990
|
+
".jpg": "image/jpeg",
|
|
3991
|
+
".jpeg": "image/jpeg",
|
|
3992
|
+
".gif": "image/gif",
|
|
3993
|
+
".webp": "image/webp",
|
|
3994
|
+
".svg": "image/svg+xml",
|
|
3995
|
+
".pdf": "application/pdf",
|
|
3996
|
+
".json": "application/json",
|
|
3997
|
+
".js": "text/javascript",
|
|
3998
|
+
".ts": "text/typescript",
|
|
3999
|
+
".html": "text/html",
|
|
4000
|
+
".css": "text/css",
|
|
4001
|
+
".md": "text/markdown",
|
|
4002
|
+
".txt": "text/plain"
|
|
4003
|
+
};
|
|
4004
|
+
/**
|
|
4005
|
+
* Tool for reading files from the filesystem.
|
|
4006
|
+
*
|
|
4007
|
+
* This tool reads files with line number formatting, supporting
|
|
4008
|
+
* offset/limit for large files and detecting binary content.
|
|
4009
|
+
*
|
|
4010
|
+
* @example
|
|
4011
|
+
* ```typescript
|
|
4012
|
+
* const readTool = new ReadTool()
|
|
4013
|
+
* const result = await readTool.execute({
|
|
4014
|
+
* file_path: '/path/to/file.ts',
|
|
4015
|
+
* offset: 100,
|
|
4016
|
+
* limit: 50
|
|
4017
|
+
* })
|
|
4018
|
+
* ```
|
|
4019
|
+
*
|
|
4020
|
+
* @example Restrict reads to a specific directory
|
|
4021
|
+
* ```typescript
|
|
4022
|
+
* const readTool = new ReadTool({
|
|
4023
|
+
* cwd: '/app/output',
|
|
4024
|
+
* restrictToDirectory: true
|
|
4025
|
+
* })
|
|
4026
|
+
* // All paths will be resolved relative to /app/output
|
|
4027
|
+
* // Absolute paths and path traversal (../) will be blocked
|
|
4028
|
+
* ```
|
|
4029
|
+
*/
|
|
4030
|
+
var ReadTool = class extends BaseTool {
|
|
4031
|
+
name = "Read";
|
|
4032
|
+
/** Current working directory for resolving relative paths */
|
|
4033
|
+
_cwd;
|
|
4034
|
+
/** Allowed directory for file operations (if set, restricts reads to this directory) */
|
|
4035
|
+
_allowedDirectory;
|
|
4036
|
+
constructor(options) {
|
|
4037
|
+
super();
|
|
4038
|
+
this._cwd = options?.cwd ?? node_process.default.cwd();
|
|
4039
|
+
this._allowedDirectory = options?.allowedDirectory;
|
|
4040
|
+
}
|
|
4041
|
+
/**
|
|
4042
|
+
* Dynamic description that includes allowed directory info if configured
|
|
4043
|
+
*/
|
|
4044
|
+
get description() {
|
|
4045
|
+
const baseDescription = `Reads a file from the local filesystem. You can access any file directly by using this tool.
|
|
4046
|
+
|
|
4047
|
+
Usage notes:
|
|
4048
|
+
- The file_path parameter must be an absolute path, not a relative path
|
|
4049
|
+
- By default, it reads up to ${DEFAULT_LINE_LIMIT} lines starting from the beginning
|
|
4050
|
+
- You can optionally specify a line offset and limit for large files
|
|
4051
|
+
- Any lines longer than ${MAX_LINE_LENGTH} characters will be truncated
|
|
4052
|
+
- Results are returned with line numbers (like cat -n format)
|
|
4053
|
+
- Can read images (PNG, JPG, etc.), PDFs, and Jupyter notebooks
|
|
4054
|
+
- You can call multiple tools in parallel to read multiple files at once`;
|
|
4055
|
+
if (this._allowedDirectory) return `${baseDescription}
|
|
4056
|
+
- IMPORTANT: Files can ONLY be read from within: ${this._allowedDirectory}
|
|
4057
|
+
- Use absolute paths starting with ${this._allowedDirectory}/ (e.g., ${this._allowedDirectory}/filename.html)`;
|
|
4058
|
+
return baseDescription;
|
|
4059
|
+
}
|
|
4060
|
+
/**
|
|
4061
|
+
* Dynamic parameters that include allowed directory info if configured
|
|
4062
|
+
*/
|
|
4063
|
+
get parameters() {
|
|
4064
|
+
return {
|
|
4065
|
+
type: "object",
|
|
4066
|
+
properties: {
|
|
4067
|
+
file_path: {
|
|
4068
|
+
type: "string",
|
|
4069
|
+
description: this._allowedDirectory ? `The absolute path to the file to read (must be within ${this._allowedDirectory})` : "The absolute path to the file to read"
|
|
4070
|
+
},
|
|
4071
|
+
offset: {
|
|
4072
|
+
type: "number",
|
|
4073
|
+
description: "The line number to start reading from (1-based). Only provide if the file is too large to read at once."
|
|
4074
|
+
},
|
|
4075
|
+
limit: {
|
|
4076
|
+
type: "number",
|
|
4077
|
+
description: "The number of lines to read. Only provide if the file is too large to read at once."
|
|
4078
|
+
}
|
|
4079
|
+
},
|
|
4080
|
+
required: ["file_path"]
|
|
4081
|
+
};
|
|
4082
|
+
}
|
|
4083
|
+
/**
|
|
4084
|
+
* Set the current working directory
|
|
4085
|
+
*/
|
|
4086
|
+
setCwd(cwd) {
|
|
4087
|
+
this._cwd = cwd;
|
|
4088
|
+
}
|
|
4089
|
+
/**
|
|
4090
|
+
* Get the current working directory
|
|
4091
|
+
*/
|
|
4092
|
+
getCwd() {
|
|
4093
|
+
return this._cwd;
|
|
4094
|
+
}
|
|
4095
|
+
/**
|
|
4096
|
+
* Set the allowed directory for file operations
|
|
4097
|
+
*/
|
|
4098
|
+
setAllowedDirectory(dir) {
|
|
4099
|
+
this._allowedDirectory = dir;
|
|
4100
|
+
}
|
|
4101
|
+
/**
|
|
4102
|
+
* Get the allowed directory for file operations
|
|
4103
|
+
*/
|
|
4104
|
+
getAllowedDirectory() {
|
|
4105
|
+
return this._allowedDirectory;
|
|
4106
|
+
}
|
|
4107
|
+
/**
|
|
4108
|
+
* Execute file read
|
|
4109
|
+
*
|
|
4110
|
+
* @param args - Read arguments
|
|
4111
|
+
* @returns MCP-compliant CallToolResult with file content
|
|
4112
|
+
*/
|
|
4113
|
+
async execute(args) {
|
|
4114
|
+
const { file_path, offset, limit } = this.validateArgs(args);
|
|
4115
|
+
const filePath = node_path.default.isAbsolute(file_path) ? file_path : node_path.default.resolve(this._cwd, file_path);
|
|
4116
|
+
if (this._allowedDirectory) {
|
|
4117
|
+
const normalizedAllowed = node_path.default.resolve(this._allowedDirectory);
|
|
4118
|
+
const normalizedFilePath = node_path.default.resolve(filePath);
|
|
4119
|
+
if (!normalizedFilePath.startsWith(normalizedAllowed + node_path.default.sep) && normalizedFilePath !== normalizedAllowed) return errorContent(`Access denied: ${file_path}\nFiles can only be read from within: ${this._allowedDirectory}\nPlease use a path like: ${this._allowedDirectory}/<filename>`);
|
|
4120
|
+
}
|
|
4121
|
+
let stats;
|
|
4122
|
+
try {
|
|
4123
|
+
stats = await (0, node_fs_promises.stat)(filePath);
|
|
4124
|
+
} catch (error) {
|
|
4125
|
+
if (error.code === "ENOENT") return errorContent(`File not found: ${filePath}`);
|
|
4126
|
+
throw error;
|
|
4127
|
+
}
|
|
4128
|
+
if (stats.isDirectory()) return errorContent(`Path is a directory, not a file: ${filePath}. Use ls command via Bash tool to read directories.`);
|
|
4129
|
+
const ext = node_path.default.extname(filePath).toLowerCase();
|
|
4130
|
+
const isBinary = BINARY_EXTENSIONS.has(ext);
|
|
4131
|
+
const mimeType = MIME_TYPES[ext];
|
|
4132
|
+
let result;
|
|
4133
|
+
if (isBinary) {
|
|
4134
|
+
result = await this.handleBinaryFile(filePath, stats.size, mimeType);
|
|
4135
|
+
if (mimeType?.startsWith("image/")) return {
|
|
4136
|
+
content: [{
|
|
4137
|
+
type: "image",
|
|
4138
|
+
data: (await (0, node_fs_promises.readFile)(filePath)).toString("base64"),
|
|
4139
|
+
mimeType
|
|
4140
|
+
}],
|
|
4141
|
+
structuredContent: result
|
|
4142
|
+
};
|
|
4143
|
+
} else if (ext === ".ipynb") result = await this.handleJupyterNotebook(filePath, stats.size, offset, limit);
|
|
4144
|
+
else result = await this.handleTextFile(filePath, stats.size, offset, limit);
|
|
4145
|
+
return {
|
|
4146
|
+
content: [{
|
|
4147
|
+
type: "text",
|
|
4148
|
+
text: result.content
|
|
4149
|
+
}],
|
|
4150
|
+
structuredContent: result
|
|
4151
|
+
};
|
|
4152
|
+
}
|
|
4153
|
+
/**
|
|
4154
|
+
* Validate and parse arguments
|
|
4155
|
+
*/
|
|
4156
|
+
validateArgs(args) {
|
|
4157
|
+
const filePath = args.file_path;
|
|
4158
|
+
if (typeof filePath !== "string" || !filePath.trim()) throw new Error("file_path is required and must be a non-empty string");
|
|
4159
|
+
const result = { file_path: filePath.trim() };
|
|
4160
|
+
if (args.offset !== void 0 && args.offset !== null) {
|
|
4161
|
+
if (typeof args.offset !== "number") throw new TypeError("offset must be a number");
|
|
4162
|
+
result.offset = Math.max(1, Math.floor(args.offset));
|
|
4163
|
+
}
|
|
4164
|
+
if (args.limit !== void 0 && args.limit !== null) {
|
|
4165
|
+
if (typeof args.limit !== "number") throw new TypeError("limit must be a number");
|
|
4166
|
+
result.limit = Math.max(1, Math.floor(args.limit));
|
|
4167
|
+
}
|
|
4168
|
+
return result;
|
|
4169
|
+
}
|
|
4170
|
+
/**
|
|
4171
|
+
* Handle binary file (images, PDFs, etc.)
|
|
4172
|
+
*/
|
|
4173
|
+
async handleBinaryFile(filePath, fileSize, mimeType) {
|
|
4174
|
+
const base64 = (await (0, node_fs_promises.readFile)(filePath)).toString("base64");
|
|
4175
|
+
return {
|
|
4176
|
+
content: `[Binary file: ${node_path.default.basename(filePath)}]\nSize: ${this.formatSize(fileSize)}\nMIME type: ${mimeType ?? "unknown"}\nBase64 encoded content:\n${base64}`,
|
|
4177
|
+
totalLines: 1,
|
|
4178
|
+
linesReturned: 1,
|
|
4179
|
+
startLine: 1,
|
|
4180
|
+
truncated: false,
|
|
4181
|
+
fileSize,
|
|
4182
|
+
isBinary: true,
|
|
4183
|
+
mimeType
|
|
4184
|
+
};
|
|
4185
|
+
}
|
|
4186
|
+
/**
|
|
4187
|
+
* Handle Jupyter notebook file
|
|
4188
|
+
*/
|
|
4189
|
+
async handleJupyterNotebook(filePath, fileSize, offset, limit) {
|
|
4190
|
+
const content = await (0, node_fs_promises.readFile)(filePath, "utf-8");
|
|
4191
|
+
let notebook;
|
|
4192
|
+
try {
|
|
4193
|
+
notebook = JSON.parse(content);
|
|
4194
|
+
} catch {
|
|
4195
|
+
throw new Error(`Invalid Jupyter notebook format: ${filePath}`);
|
|
4196
|
+
}
|
|
4197
|
+
const cells = notebook.cells || [];
|
|
4198
|
+
const outputLines = [];
|
|
4199
|
+
for (let i = 0; i < cells.length; i++) {
|
|
4200
|
+
const cell = cells[i];
|
|
4201
|
+
const cellNum = i + 1;
|
|
4202
|
+
const cellType = cell.cell_type || "unknown";
|
|
4203
|
+
outputLines.push(`--- Cell ${cellNum} (${cellType}) ---`);
|
|
4204
|
+
const source = Array.isArray(cell.source) ? cell.source.join("") : cell.source || "";
|
|
4205
|
+
outputLines.push(...source.split("\n"));
|
|
4206
|
+
if (cell.outputs && cell.outputs.length > 0) {
|
|
4207
|
+
outputLines.push("--- Output ---");
|
|
4208
|
+
for (const output of cell.outputs) if (output.text) {
|
|
4209
|
+
const text = Array.isArray(output.text) ? output.text.join("") : output.text;
|
|
4210
|
+
outputLines.push(...text.split("\n"));
|
|
4211
|
+
} else if (output.data) {
|
|
4212
|
+
if (output.data["text/plain"]) {
|
|
4213
|
+
const text = Array.isArray(output.data["text/plain"]) ? output.data["text/plain"].join("") : output.data["text/plain"];
|
|
4214
|
+
outputLines.push(...text.split("\n"));
|
|
4215
|
+
}
|
|
4216
|
+
}
|
|
4217
|
+
}
|
|
4218
|
+
outputLines.push("");
|
|
4219
|
+
}
|
|
4220
|
+
return this.formatOutput(outputLines, fileSize, offset, limit);
|
|
4221
|
+
}
|
|
4222
|
+
/**
|
|
4223
|
+
* Handle text file
|
|
4224
|
+
*/
|
|
4225
|
+
async handleTextFile(filePath, fileSize, offset, limit) {
|
|
4226
|
+
const content = await (0, node_fs_promises.readFile)(filePath, "utf-8");
|
|
4227
|
+
if (content.length === 0) return {
|
|
4228
|
+
content: "[File is empty]",
|
|
4229
|
+
totalLines: 0,
|
|
4230
|
+
linesReturned: 0,
|
|
4231
|
+
startLine: 1,
|
|
4232
|
+
truncated: false,
|
|
4233
|
+
fileSize,
|
|
4234
|
+
isBinary: false
|
|
4235
|
+
};
|
|
4236
|
+
const lines = content.split("\n");
|
|
4237
|
+
return this.formatOutput(lines, fileSize, offset, limit);
|
|
4238
|
+
}
|
|
4239
|
+
/**
|
|
4240
|
+
* Format output with line numbers and apply offset/limit
|
|
4241
|
+
*/
|
|
4242
|
+
formatOutput(lines, fileSize, offset, limit) {
|
|
4243
|
+
const totalLines = lines.length;
|
|
4244
|
+
const startLine = offset ?? 1;
|
|
4245
|
+
const lineLimit = limit ?? DEFAULT_LINE_LIMIT;
|
|
4246
|
+
const startIndex = startLine - 1;
|
|
4247
|
+
const endIndex = Math.min(startIndex + lineLimit, totalLines);
|
|
4248
|
+
const selectedLines = lines.slice(startIndex, endIndex);
|
|
4249
|
+
let truncated = false;
|
|
4250
|
+
return {
|
|
4251
|
+
content: selectedLines.map((line, idx) => {
|
|
4252
|
+
const lineNum = startLine + idx;
|
|
4253
|
+
const lineNumStr = String(lineNum).padStart(6, " ");
|
|
4254
|
+
let displayLine = line;
|
|
4255
|
+
if (line.length > MAX_LINE_LENGTH) {
|
|
4256
|
+
displayLine = `${line.slice(0, MAX_LINE_LENGTH)}... [truncated]`;
|
|
4257
|
+
truncated = true;
|
|
4258
|
+
}
|
|
4259
|
+
return `${lineNumStr}|${displayLine}`;
|
|
4260
|
+
}).join("\n"),
|
|
4261
|
+
totalLines,
|
|
4262
|
+
linesReturned: selectedLines.length,
|
|
4263
|
+
startLine,
|
|
4264
|
+
truncated,
|
|
4265
|
+
fileSize,
|
|
4266
|
+
isBinary: false
|
|
4267
|
+
};
|
|
4268
|
+
}
|
|
4269
|
+
/**
|
|
4270
|
+
* Format file size for display
|
|
4271
|
+
*/
|
|
4272
|
+
formatSize(bytes) {
|
|
4273
|
+
const units = [
|
|
4274
|
+
"B",
|
|
4275
|
+
"KB",
|
|
4276
|
+
"MB",
|
|
4277
|
+
"GB"
|
|
4278
|
+
];
|
|
4279
|
+
let size = bytes;
|
|
4280
|
+
let unitIndex = 0;
|
|
4281
|
+
while (size >= 1024 && unitIndex < units.length - 1) {
|
|
4282
|
+
size /= 1024;
|
|
4283
|
+
unitIndex++;
|
|
4284
|
+
}
|
|
4285
|
+
return `${size.toFixed(unitIndex === 0 ? 0 : 2)} ${units[unitIndex]}`;
|
|
4286
|
+
}
|
|
4287
|
+
};
|
|
4288
|
+
|
|
4289
|
+
//#endregion
|
|
4290
|
+
//#region src/tool/builtin/edit.ts
|
|
4291
|
+
/**
|
|
4292
|
+
* Tool for performing exact string replacements in files.
|
|
4293
|
+
*
|
|
4294
|
+
* This tool finds and replaces text in files with careful handling
|
|
4295
|
+
* of unique matches and the option to replace all occurrences.
|
|
4296
|
+
*
|
|
4297
|
+
* @example
|
|
4298
|
+
* ```typescript
|
|
4299
|
+
* const editTool = new EditTool()
|
|
4300
|
+
* const result = await editTool.execute({
|
|
4301
|
+
* file_path: '/path/to/file.ts',
|
|
4302
|
+
* old_string: 'const foo = 1',
|
|
4303
|
+
* new_string: 'const foo = 2'
|
|
4304
|
+
* })
|
|
4305
|
+
* ```
|
|
4306
|
+
*/
|
|
4307
|
+
var EditTool = class extends BaseTool {
|
|
4308
|
+
name = "Edit";
|
|
4309
|
+
riskLevel = "high";
|
|
4310
|
+
description = `Performs exact string replacements in files.
|
|
4311
|
+
|
|
4312
|
+
Usage notes:
|
|
4313
|
+
- When editing, preserve the exact indentation (tabs/spaces) from the original file
|
|
4314
|
+
- The edit will FAIL if old_string is not unique in the file unless replace_all is true
|
|
4315
|
+
- Use replace_all for replacing/renaming strings across the entire file
|
|
4316
|
+
- old_string and new_string must be different
|
|
4317
|
+
- ALWAYS prefer editing existing files over creating new ones`;
|
|
4318
|
+
parameters = {
|
|
4319
|
+
type: "object",
|
|
4320
|
+
properties: {
|
|
4321
|
+
file_path: {
|
|
4322
|
+
type: "string",
|
|
4323
|
+
description: "The absolute path to the file to modify"
|
|
4324
|
+
},
|
|
4325
|
+
old_string: {
|
|
4326
|
+
type: "string",
|
|
4327
|
+
description: "The text to replace"
|
|
4328
|
+
},
|
|
4329
|
+
new_string: {
|
|
4330
|
+
type: "string",
|
|
4331
|
+
description: "The text to replace it with (must be different from old_string)"
|
|
4332
|
+
},
|
|
4333
|
+
replace_all: {
|
|
4334
|
+
type: "boolean",
|
|
4335
|
+
description: "Replace all occurrences of old_string (default false)"
|
|
4336
|
+
}
|
|
4337
|
+
},
|
|
4338
|
+
required: [
|
|
4339
|
+
"file_path",
|
|
4340
|
+
"old_string",
|
|
4341
|
+
"new_string"
|
|
4342
|
+
]
|
|
4343
|
+
};
|
|
4344
|
+
/** Current working directory for resolving relative paths */
|
|
4345
|
+
cwd;
|
|
4346
|
+
constructor(options) {
|
|
4347
|
+
super();
|
|
4348
|
+
this.cwd = options?.cwd ?? node_process.default.cwd();
|
|
4349
|
+
}
|
|
4350
|
+
/**
|
|
4351
|
+
* Set the current working directory
|
|
4352
|
+
*/
|
|
4353
|
+
setCwd(cwd) {
|
|
4354
|
+
this.cwd = cwd;
|
|
4355
|
+
}
|
|
4356
|
+
/**
|
|
4357
|
+
* Get the current working directory
|
|
4358
|
+
*/
|
|
4359
|
+
getCwd() {
|
|
4360
|
+
return this.cwd;
|
|
4361
|
+
}
|
|
4362
|
+
/**
|
|
4363
|
+
* Execute file edit
|
|
4364
|
+
*
|
|
4365
|
+
* @param args - Edit arguments
|
|
4366
|
+
* @returns MCP-compliant CallToolResult with edit details
|
|
4367
|
+
*/
|
|
4368
|
+
async execute(args) {
|
|
4369
|
+
const { file_path, old_string, new_string, replace_all } = this.validateArgs(args);
|
|
4370
|
+
const filePath = node_path.default.isAbsolute(file_path) ? file_path : node_path.default.resolve(this.cwd, file_path);
|
|
4371
|
+
try {
|
|
4372
|
+
if ((await (0, node_fs_promises.stat)(filePath)).isDirectory()) return errorContent(`Path is a directory, not a file: ${filePath}`);
|
|
4373
|
+
} catch (error) {
|
|
4374
|
+
if (error.code === "ENOENT") return errorContent(`File not found: ${filePath}`);
|
|
4375
|
+
throw error;
|
|
4376
|
+
}
|
|
4377
|
+
const content = await (0, node_fs_promises.readFile)(filePath, "utf-8");
|
|
4378
|
+
const occurrences = this.countOccurrences(content, old_string);
|
|
4379
|
+
if (occurrences === 0) return errorContent(`old_string not found in file: ${filePath}\n\nSearched for:\n${this.truncateForError(old_string)}`);
|
|
4380
|
+
if (occurrences > 1 && !replace_all) return errorContent(`old_string is not unique in the file (found ${occurrences} occurrences). Either provide more context to make it unique, or set replace_all: true to replace all occurrences.`);
|
|
4381
|
+
let newContent;
|
|
4382
|
+
let replacements;
|
|
4383
|
+
if (replace_all) {
|
|
4384
|
+
newContent = content.split(old_string).join(new_string);
|
|
4385
|
+
replacements = occurrences;
|
|
4386
|
+
} else {
|
|
4387
|
+
const index = content.indexOf(old_string);
|
|
4388
|
+
newContent = content.slice(0, index) + new_string + content.slice(index + old_string.length);
|
|
4389
|
+
replacements = 1;
|
|
4390
|
+
}
|
|
4391
|
+
await (0, node_fs_promises.writeFile)(filePath, newContent, "utf-8");
|
|
4392
|
+
const result = {
|
|
4393
|
+
success: true,
|
|
4394
|
+
replacements,
|
|
4395
|
+
filePath,
|
|
4396
|
+
message: `Successfully replaced ${replacements} occurrence${replacements > 1 ? "s" : ""} in ${node_path.default.basename(filePath)}`
|
|
4397
|
+
};
|
|
4398
|
+
return {
|
|
4399
|
+
content: [{
|
|
4400
|
+
type: "text",
|
|
4401
|
+
text: result.message
|
|
4402
|
+
}],
|
|
4403
|
+
structuredContent: result
|
|
4404
|
+
};
|
|
4405
|
+
}
|
|
4406
|
+
/**
|
|
4407
|
+
* Validate and parse arguments
|
|
4408
|
+
*/
|
|
4409
|
+
validateArgs(args) {
|
|
4410
|
+
const filePath = args.file_path;
|
|
4411
|
+
const oldString = args.old_string;
|
|
4412
|
+
const newString = args.new_string;
|
|
4413
|
+
if (typeof filePath !== "string" || !filePath.trim()) throw new Error("file_path is required and must be a non-empty string");
|
|
4414
|
+
if (typeof oldString !== "string") throw new TypeError("old_string is required and must be a string");
|
|
4415
|
+
if (oldString === "") throw new Error("old_string cannot be empty");
|
|
4416
|
+
if (typeof newString !== "string") throw new TypeError("new_string is required and must be a string");
|
|
4417
|
+
if (oldString === newString) throw new Error("new_string must be different from old_string");
|
|
4418
|
+
return {
|
|
4419
|
+
file_path: filePath.trim(),
|
|
4420
|
+
old_string: oldString,
|
|
4421
|
+
new_string: newString,
|
|
4422
|
+
replace_all: args.replace_all === true
|
|
4423
|
+
};
|
|
4424
|
+
}
|
|
4425
|
+
/**
|
|
4426
|
+
* Count occurrences of a substring in a string
|
|
4427
|
+
*/
|
|
4428
|
+
countOccurrences(str, substr) {
|
|
4429
|
+
let count = 0;
|
|
4430
|
+
let pos = str.indexOf(substr);
|
|
4431
|
+
while (pos !== -1) {
|
|
4432
|
+
count++;
|
|
4433
|
+
pos = str.indexOf(substr, pos + substr.length);
|
|
4434
|
+
}
|
|
4435
|
+
return count;
|
|
4436
|
+
}
|
|
4437
|
+
/**
|
|
4438
|
+
* Truncate string for error messages
|
|
4439
|
+
*/
|
|
4440
|
+
truncateForError(str, maxLength = 200) {
|
|
4441
|
+
if (str.length <= maxLength) return str;
|
|
4442
|
+
return `${str.slice(0, maxLength)}... [truncated, ${str.length} chars total]`;
|
|
4443
|
+
}
|
|
4444
|
+
};
|
|
4445
|
+
|
|
4446
|
+
//#endregion
|
|
4447
|
+
//#region src/tool/builtin/write.ts
|
|
4448
|
+
/**
|
|
4449
|
+
* Tool for writing files to the filesystem.
|
|
4450
|
+
*
|
|
4451
|
+
* This tool creates or overwrites files with the specified content,
|
|
4452
|
+
* automatically creating parent directories if needed.
|
|
4453
|
+
*
|
|
4454
|
+
* @example
|
|
4455
|
+
* ```typescript
|
|
4456
|
+
* const writeTool = new WriteTool()
|
|
4457
|
+
* const result = await writeTool.execute({
|
|
4458
|
+
* file_path: '/path/to/file.ts',
|
|
4459
|
+
* content: 'export const foo = 1'
|
|
4460
|
+
* })
|
|
4461
|
+
* ```
|
|
4462
|
+
*
|
|
4463
|
+
* @example Restrict writes to a specific directory
|
|
4464
|
+
* ```typescript
|
|
4465
|
+
* const writeTool = new WriteTool({
|
|
4466
|
+
* cwd: '/app/output',
|
|
4467
|
+
* restrictToDirectory: true
|
|
4468
|
+
* })
|
|
4469
|
+
* // All paths will be resolved relative to /app/output
|
|
4470
|
+
* // Absolute paths and path traversal (../) will be blocked
|
|
4471
|
+
* ```
|
|
4472
|
+
*/
|
|
4473
|
+
var WriteTool = class extends BaseTool {
|
|
4474
|
+
name = "Write";
|
|
4475
|
+
riskLevel = "high";
|
|
4476
|
+
/** Current working directory for resolving relative paths */
|
|
4477
|
+
_cwd;
|
|
4478
|
+
/** Allowed directory for file operations (if set, restricts writes to this directory) */
|
|
4479
|
+
_allowedDirectory;
|
|
4480
|
+
constructor(options) {
|
|
4481
|
+
super();
|
|
4482
|
+
this._cwd = options?.cwd ?? node_process.default.cwd();
|
|
4483
|
+
this._allowedDirectory = options?.allowedDirectory;
|
|
4484
|
+
}
|
|
4485
|
+
/**
|
|
4486
|
+
* Dynamic description that includes allowed directory info if configured
|
|
4487
|
+
*/
|
|
4488
|
+
get description() {
|
|
4489
|
+
const baseDescription = `Writes a file to the local filesystem.
|
|
4490
|
+
|
|
4491
|
+
Usage notes:
|
|
4492
|
+
- This tool will overwrite the existing file if there is one at the provided path
|
|
4493
|
+
- Parent directories will be created automatically if they don't exist
|
|
4494
|
+
- ALWAYS prefer editing existing files over writing new ones
|
|
4495
|
+
- NEVER proactively create documentation files (*.md) or README files unless explicitly requested`;
|
|
4496
|
+
if (this._allowedDirectory) return `${baseDescription}
|
|
4497
|
+
- IMPORTANT: Files can ONLY be written within: ${this._allowedDirectory}
|
|
4498
|
+
- Use absolute paths starting with ${this._allowedDirectory}/ (e.g., ${this._allowedDirectory}/filename.html)`;
|
|
4499
|
+
return baseDescription;
|
|
4500
|
+
}
|
|
4501
|
+
/**
|
|
4502
|
+
* Dynamic parameters that include allowed directory info if configured
|
|
4503
|
+
*/
|
|
4504
|
+
get parameters() {
|
|
4505
|
+
return {
|
|
4506
|
+
type: "object",
|
|
4507
|
+
properties: {
|
|
4508
|
+
file_path: {
|
|
4509
|
+
type: "string",
|
|
4510
|
+
description: this._allowedDirectory ? `The absolute path to the file to write (must be within ${this._allowedDirectory})` : "The absolute path to the file to write (must be absolute, not relative)"
|
|
4511
|
+
},
|
|
4512
|
+
content: {
|
|
4513
|
+
type: "string",
|
|
4514
|
+
description: "The content to write to the file"
|
|
4515
|
+
}
|
|
4516
|
+
},
|
|
4517
|
+
required: ["file_path", "content"]
|
|
4518
|
+
};
|
|
4519
|
+
}
|
|
4520
|
+
/**
|
|
4521
|
+
* Set the current working directory
|
|
4522
|
+
*/
|
|
4523
|
+
setCwd(cwd) {
|
|
4524
|
+
this._cwd = cwd;
|
|
4525
|
+
}
|
|
4526
|
+
/**
|
|
4527
|
+
* Get the current working directory
|
|
4528
|
+
*/
|
|
4529
|
+
getCwd() {
|
|
4530
|
+
return this._cwd;
|
|
4531
|
+
}
|
|
4532
|
+
/**
|
|
4533
|
+
* Set the allowed directory for file operations
|
|
4534
|
+
*/
|
|
4535
|
+
setAllowedDirectory(dir) {
|
|
4536
|
+
this._allowedDirectory = dir;
|
|
4537
|
+
}
|
|
4538
|
+
/**
|
|
4539
|
+
* Get the allowed directory for file operations
|
|
4540
|
+
*/
|
|
4541
|
+
getAllowedDirectory() {
|
|
4542
|
+
return this._allowedDirectory;
|
|
4543
|
+
}
|
|
4544
|
+
/**
|
|
4545
|
+
* Execute file write
|
|
4546
|
+
*
|
|
4547
|
+
* @param args - Write arguments
|
|
4548
|
+
* @returns MCP-compliant CallToolResult with write details
|
|
4549
|
+
*/
|
|
4550
|
+
async execute(args) {
|
|
4551
|
+
const { file_path, content } = this.validateArgs(args);
|
|
4552
|
+
const filePath = node_path.default.isAbsolute(file_path) ? file_path : node_path.default.resolve(this._cwd, file_path);
|
|
4553
|
+
if (this._allowedDirectory) {
|
|
4554
|
+
const normalizedAllowed = node_path.default.resolve(this._allowedDirectory);
|
|
4555
|
+
const normalizedFilePath = node_path.default.resolve(filePath);
|
|
4556
|
+
if (!normalizedFilePath.startsWith(normalizedAllowed + node_path.default.sep) && normalizedFilePath !== normalizedAllowed) return errorContent(`Access denied: ${file_path}\nFiles can only be written within: ${this._allowedDirectory}\nPlease use a path like: ${this._allowedDirectory}/<filename>`);
|
|
4557
|
+
}
|
|
4558
|
+
let overwritten = false;
|
|
4559
|
+
try {
|
|
4560
|
+
if ((await (0, node_fs_promises.stat)(filePath)).isDirectory()) return errorContent(`Path is a directory, not a file: ${filePath}`);
|
|
4561
|
+
overwritten = true;
|
|
4562
|
+
} catch (error) {
|
|
4563
|
+
if (error.code !== "ENOENT") throw error;
|
|
4564
|
+
}
|
|
4565
|
+
await (0, node_fs_promises.mkdir)(node_path.default.dirname(filePath), { recursive: true });
|
|
4566
|
+
await (0, node_fs_promises.writeFile)(filePath, content, "utf-8");
|
|
4567
|
+
const bytesWritten = Buffer.byteLength(content, "utf-8");
|
|
4568
|
+
const result = {
|
|
4569
|
+
success: true,
|
|
4570
|
+
filePath,
|
|
4571
|
+
bytesWritten,
|
|
4572
|
+
overwritten,
|
|
4573
|
+
message: overwritten ? `Successfully overwrote ${node_path.default.basename(filePath)} (${this.formatSize(bytesWritten)})` : `Successfully created ${node_path.default.basename(filePath)} (${this.formatSize(bytesWritten)})`
|
|
4574
|
+
};
|
|
4575
|
+
return {
|
|
4576
|
+
content: [{
|
|
4577
|
+
type: "text",
|
|
4578
|
+
text: result.message
|
|
4579
|
+
}],
|
|
4580
|
+
structuredContent: result
|
|
4581
|
+
};
|
|
4582
|
+
}
|
|
4583
|
+
/**
|
|
4584
|
+
* Validate and parse arguments
|
|
4585
|
+
*/
|
|
4586
|
+
validateArgs(args) {
|
|
4587
|
+
const filePath = args.file_path;
|
|
4588
|
+
const content = args.content;
|
|
4589
|
+
if (typeof filePath !== "string" || !filePath.trim()) throw new Error("file_path is required and must be a non-empty string");
|
|
4590
|
+
if (typeof content !== "string") throw new TypeError("content is required and must be a string");
|
|
4591
|
+
return {
|
|
4592
|
+
file_path: filePath.trim(),
|
|
4593
|
+
content
|
|
4594
|
+
};
|
|
4595
|
+
}
|
|
4596
|
+
/**
|
|
4597
|
+
* Format file size for display
|
|
4598
|
+
*/
|
|
4599
|
+
formatSize(bytes) {
|
|
4600
|
+
const units = [
|
|
4601
|
+
"B",
|
|
4602
|
+
"KB",
|
|
4603
|
+
"MB",
|
|
4604
|
+
"GB"
|
|
4605
|
+
];
|
|
4606
|
+
let size = bytes;
|
|
4607
|
+
let unitIndex = 0;
|
|
4608
|
+
while (size >= 1024 && unitIndex < units.length - 1) {
|
|
4609
|
+
size /= 1024;
|
|
4610
|
+
unitIndex++;
|
|
4611
|
+
}
|
|
4612
|
+
return `${size.toFixed(unitIndex === 0 ? 0 : 2)} ${units[unitIndex]}`;
|
|
4613
|
+
}
|
|
4614
|
+
};
|
|
4615
|
+
|
|
4616
|
+
//#endregion
|
|
4617
|
+
//#region src/tool/builtin/webSearch.ts
|
|
4618
|
+
/**
|
|
4619
|
+
* Tool for searching the web using Serper API.
|
|
4620
|
+
*
|
|
4621
|
+
* This tool allows the agent to search the web and use the results
|
|
4622
|
+
* to inform responses with up-to-date information.
|
|
4623
|
+
*
|
|
4624
|
+
* @example
|
|
4625
|
+
* ```typescript
|
|
4626
|
+
* const webSearchTool = new WebSearchTool({ apiKey: 'your-serper-api-key' })
|
|
4627
|
+
* const result = await webSearchTool.execute({
|
|
4628
|
+
* query: 'latest TypeScript features 2025'
|
|
4629
|
+
* })
|
|
4630
|
+
* ```
|
|
4631
|
+
*/
|
|
4632
|
+
var WebSearchTool = class extends BaseTool {
|
|
4633
|
+
name = "WebSearch";
|
|
4634
|
+
description = `Allows the agent to search the web and use the results to inform responses.
|
|
4635
|
+
|
|
4636
|
+
Usage notes:
|
|
4637
|
+
- Provides up-to-date information for current events and recent data
|
|
4638
|
+
- Returns search results with titles, links, and snippets
|
|
4639
|
+
- Use this tool for accessing information beyond the knowledge cutoff
|
|
4640
|
+
- After answering, include a "Sources:" section with relevant URLs as markdown hyperlinks`;
|
|
4641
|
+
parameters = {
|
|
4642
|
+
type: "object",
|
|
4643
|
+
properties: { query: {
|
|
4644
|
+
type: "string",
|
|
4645
|
+
minLength: 2,
|
|
4646
|
+
description: "The search query to use"
|
|
4647
|
+
} },
|
|
4648
|
+
required: ["query"]
|
|
4649
|
+
};
|
|
4650
|
+
/** Serper API key */
|
|
4651
|
+
apiKey;
|
|
4652
|
+
/** Serper API endpoint */
|
|
4653
|
+
apiEndpoint;
|
|
4654
|
+
/** Number of results to return */
|
|
4655
|
+
numResults;
|
|
4656
|
+
constructor(options) {
|
|
4657
|
+
super();
|
|
4658
|
+
this.apiKey = options?.apiKey ?? node_process.default.env.SERPER_API_KEY ?? "";
|
|
4659
|
+
this.apiEndpoint = options?.apiEndpoint ?? "https://google.serper.dev/search?format=json";
|
|
4660
|
+
this.numResults = options?.numResults ?? 10;
|
|
4661
|
+
}
|
|
4662
|
+
/**
|
|
4663
|
+
* Set API key
|
|
4664
|
+
*/
|
|
4665
|
+
setApiKey(apiKey) {
|
|
4666
|
+
this.apiKey = apiKey;
|
|
4667
|
+
}
|
|
4668
|
+
/**
|
|
4669
|
+
* Execute web search
|
|
4670
|
+
*
|
|
4671
|
+
* @param args - WebSearch arguments
|
|
4672
|
+
* @returns MCP-compliant CallToolResult with search results
|
|
4673
|
+
*/
|
|
4674
|
+
async execute(args) {
|
|
4675
|
+
const { query } = this.validateArgs(args);
|
|
4676
|
+
if (!this.apiKey) return errorContent("Serper API key is not configured. Set SERPER_API_KEY environment variable or pass apiKey in constructor.");
|
|
4677
|
+
try {
|
|
4678
|
+
const response = await fetch(this.apiEndpoint, {
|
|
4679
|
+
method: "POST",
|
|
4680
|
+
headers: {
|
|
4681
|
+
"X-API-KEY": this.apiKey,
|
|
4682
|
+
"Content-Type": "application/json"
|
|
4683
|
+
},
|
|
4684
|
+
body: JSON.stringify({
|
|
4685
|
+
q: query,
|
|
4686
|
+
num: this.numResults
|
|
4687
|
+
})
|
|
4688
|
+
});
|
|
4689
|
+
if (!response.ok) {
|
|
4690
|
+
const errorText = await response.text();
|
|
4691
|
+
return errorContent(`Serper API error (${response.status}): ${errorText}`);
|
|
4692
|
+
}
|
|
4693
|
+
const results = ((await response.json()).organic ?? []).map((item, index) => ({
|
|
4694
|
+
title: item.title,
|
|
4695
|
+
link: item.link,
|
|
4696
|
+
snippet: item.snippet,
|
|
4697
|
+
position: item.position ?? index + 1
|
|
4698
|
+
}));
|
|
4699
|
+
const markdown = this.formatMarkdown(query, results);
|
|
4700
|
+
const result = {
|
|
4701
|
+
success: true,
|
|
4702
|
+
query,
|
|
4703
|
+
results,
|
|
4704
|
+
totalResults: results.length,
|
|
4705
|
+
markdown
|
|
4706
|
+
};
|
|
4707
|
+
return {
|
|
4708
|
+
content: [{
|
|
4709
|
+
type: "text",
|
|
4710
|
+
text: markdown
|
|
4711
|
+
}],
|
|
4712
|
+
structuredContent: result
|
|
4713
|
+
};
|
|
4714
|
+
} catch (error) {
|
|
4715
|
+
return errorContent(`Failed to execute search: ${error instanceof Error ? error.message : String(error)}`);
|
|
4716
|
+
}
|
|
4717
|
+
}
|
|
4718
|
+
/**
|
|
4719
|
+
* Validate and parse arguments
|
|
4720
|
+
*/
|
|
4721
|
+
validateArgs(args) {
|
|
4722
|
+
const query = args.query;
|
|
4723
|
+
if (typeof query !== "string" || query.trim().length < 2) throw new Error("query is required and must be at least 2 characters");
|
|
4724
|
+
return { query: query.trim() };
|
|
4725
|
+
}
|
|
4726
|
+
/**
|
|
4727
|
+
* Format search results as markdown
|
|
4728
|
+
*/
|
|
4729
|
+
formatMarkdown(query, results) {
|
|
4730
|
+
if (results.length === 0) return `No results found for: "${query}"`;
|
|
4731
|
+
const lines = [`## Search Results for: "${query}"`, ""];
|
|
4732
|
+
for (const result of results) {
|
|
4733
|
+
lines.push(`### ${result.position}. [${result.title}](${result.link})`);
|
|
4734
|
+
lines.push("");
|
|
4735
|
+
lines.push(result.snippet);
|
|
4736
|
+
lines.push("");
|
|
4737
|
+
}
|
|
4738
|
+
lines.push("---");
|
|
4739
|
+
lines.push("");
|
|
4740
|
+
lines.push("**Sources:**");
|
|
4741
|
+
for (const result of results) lines.push(`- [${result.title}](${result.link})`);
|
|
4742
|
+
return lines.join("\n");
|
|
4743
|
+
}
|
|
4744
|
+
};
|
|
4745
|
+
|
|
4746
|
+
//#endregion
|
|
4747
|
+
//#region src/tool/registry.ts
|
|
4748
|
+
/**
|
|
4749
|
+
* Registry for managing tools.
|
|
4750
|
+
*
|
|
4751
|
+
* Provides methods to register, unregister, and look up tools,
|
|
4752
|
+
* as well as convert to OpenAI-compatible format.
|
|
4753
|
+
*/
|
|
4754
|
+
var ToolRegistry = class {
|
|
4755
|
+
tools = /* @__PURE__ */ new Map();
|
|
4756
|
+
/**
|
|
4757
|
+
* Register a tool
|
|
4758
|
+
*
|
|
4759
|
+
* @param tool - Tool to register
|
|
4760
|
+
* @throws Error if tool with same name already exists
|
|
4761
|
+
*/
|
|
4762
|
+
register(tool) {
|
|
4763
|
+
if (this.tools.has(tool.name)) throw new Error(`Tool "${tool.name}" already registered`);
|
|
4764
|
+
this.tools.set(tool.name, tool);
|
|
4765
|
+
}
|
|
4766
|
+
/**
|
|
4767
|
+
* Unregister a tool by name
|
|
4768
|
+
*
|
|
4769
|
+
* @param name - Name of tool to remove
|
|
4770
|
+
* @returns true if tool was found and removed
|
|
4771
|
+
*/
|
|
4772
|
+
unregister(name) {
|
|
4773
|
+
return this.tools.delete(name);
|
|
4774
|
+
}
|
|
4775
|
+
/**
|
|
4776
|
+
* Get a tool by name
|
|
4777
|
+
*
|
|
4778
|
+
* @param name - Tool name
|
|
4779
|
+
* @returns Tool instance or undefined if not found
|
|
4780
|
+
*/
|
|
4781
|
+
get(name) {
|
|
4782
|
+
return this.tools.get(name);
|
|
4783
|
+
}
|
|
4784
|
+
/**
|
|
4785
|
+
* List all registered tools
|
|
4786
|
+
*
|
|
4787
|
+
* @returns Array of all tools
|
|
4788
|
+
*/
|
|
4789
|
+
list() {
|
|
4790
|
+
return Array.from(this.tools.values());
|
|
4791
|
+
}
|
|
4792
|
+
/**
|
|
4793
|
+
* Check if a tool is registered
|
|
4794
|
+
*
|
|
4795
|
+
* @param name - Tool name
|
|
4796
|
+
* @returns true if tool exists
|
|
4797
|
+
*/
|
|
4798
|
+
has(name) {
|
|
4799
|
+
return this.tools.has(name);
|
|
4800
|
+
}
|
|
4801
|
+
/**
|
|
4802
|
+
* Get count of registered tools
|
|
4803
|
+
*/
|
|
4804
|
+
get size() {
|
|
4805
|
+
return this.tools.size;
|
|
4806
|
+
}
|
|
4807
|
+
/**
|
|
4808
|
+
* Convert all tools to OpenAI-compatible format
|
|
4809
|
+
*
|
|
4810
|
+
* @returns Array of tools in OpenAI function calling format
|
|
4811
|
+
*/
|
|
4812
|
+
toOpenAIFormat() {
|
|
4813
|
+
return this.list().map((tool) => ({
|
|
4814
|
+
type: "function",
|
|
4815
|
+
function: {
|
|
4816
|
+
name: tool.name,
|
|
4817
|
+
description: tool.description,
|
|
4818
|
+
parameters: tool.parameters
|
|
4819
|
+
}
|
|
4820
|
+
}));
|
|
4821
|
+
}
|
|
4822
|
+
};
|
|
4823
|
+
|
|
4824
|
+
//#endregion
|
|
4825
|
+
exports.Agent = Agent;
|
|
4826
|
+
exports.AgentAbortError = AgentAbortError;
|
|
4827
|
+
exports.AgentMaxIterationsError = AgentMaxIterationsError;
|
|
4828
|
+
exports.BaseModel = BaseModel;
|
|
4829
|
+
exports.BaseSession = BaseSession;
|
|
4830
|
+
exports.BaseSessionManager = BaseSessionManager;
|
|
4831
|
+
exports.BaseTool = BaseTool;
|
|
4832
|
+
exports.EditTool = EditTool;
|
|
4833
|
+
exports.FileCheckpointStore = FileStateStore;
|
|
4834
|
+
exports.FileStateStore = FileStateStore;
|
|
4835
|
+
exports.GlobTool = GlobTool;
|
|
4836
|
+
exports.GrepTool = GrepTool;
|
|
4837
|
+
exports.InMemoryCheckpointStore = InMemoryStateStore;
|
|
4838
|
+
exports.InMemoryModelHealth = InMemoryModelHealth;
|
|
4839
|
+
exports.InMemoryStateStore = InMemoryStateStore;
|
|
4840
|
+
exports.ModelError = ModelError;
|
|
4841
|
+
exports.ReadTool = ReadTool;
|
|
4842
|
+
exports.RetryPolicy = RetryPolicy;
|
|
4843
|
+
exports.StateKeys = StateKeys;
|
|
4844
|
+
exports.StateStore = StateStore;
|
|
4845
|
+
exports.ToolRegistry = ToolRegistry;
|
|
4846
|
+
exports.WebSearchTool = WebSearchTool;
|
|
4847
|
+
exports.WriteTool = WriteTool;
|
|
4848
|
+
exports.compose = compose;
|
|
4849
|
+
exports.compressSessionManually = compressSessionManually;
|
|
4850
|
+
exports.createContextCompressionMiddleware = createContextCompressionMiddleware;
|
|
4851
|
+
exports.createInitialLoopState = createInitialLoopState;
|
|
4852
|
+
exports.createInitialLoopStateFromMessages = createInitialLoopStateFromMessages;
|
|
4853
|
+
exports.createModel = createModel;
|
|
4854
|
+
exports.createOpenAIAdapter = createOpenAIAdapter;
|
|
4855
|
+
exports.ensureNotAborted = ensureNotAborted;
|
|
4856
|
+
exports.errorContent = errorContent;
|
|
4857
|
+
exports.fromLoopCheckpoint = fromLoopCheckpoint;
|
|
4858
|
+
exports.imageContent = imageContent;
|
|
4859
|
+
exports.textContent = textContent;
|
|
4860
|
+
exports.toLoopCheckpoint = toLoopCheckpoint;
|