zidane 4.0.2 → 4.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +196 -614
- package/dist/agent-BoV5Twdl.d.ts +2347 -0
- package/dist/agent-BoV5Twdl.d.ts.map +1 -0
- package/dist/contexts-3Arvn7yR.js +321 -0
- package/dist/contexts-3Arvn7yR.js.map +1 -0
- package/dist/contexts.d.ts +2 -25
- package/dist/contexts.js +2 -10
- package/dist/errors-D1lhd6mX.js +118 -0
- package/dist/errors-D1lhd6mX.js.map +1 -0
- package/dist/index-28otmfLX.d.ts +400 -0
- package/dist/index-28otmfLX.d.ts.map +1 -0
- package/dist/index-BfSdALzk.d.ts +113 -0
- package/dist/index-BfSdALzk.d.ts.map +1 -0
- package/dist/index-DPsd0qwm.d.ts +254 -0
- package/dist/index-DPsd0qwm.d.ts.map +1 -0
- package/dist/index.d.ts +5 -95
- package/dist/index.js +141 -271
- package/dist/index.js.map +1 -0
- package/dist/interpolate-CukJwP2G.js +887 -0
- package/dist/interpolate-CukJwP2G.js.map +1 -0
- package/dist/mcp-8wClKY-3.js +771 -0
- package/dist/mcp-8wClKY-3.js.map +1 -0
- package/dist/mcp.d.ts +2 -4
- package/dist/mcp.js +2 -13
- package/dist/messages-z5Pq20p7.js +1020 -0
- package/dist/messages-z5Pq20p7.js.map +1 -0
- package/dist/presets-Cs7_CsMk.js +39 -0
- package/dist/presets-Cs7_CsMk.js.map +1 -0
- package/dist/presets.d.ts +2 -43
- package/dist/presets.js +2 -17
- package/dist/providers-CX-R-Oy-.js +969 -0
- package/dist/providers-CX-R-Oy-.js.map +1 -0
- package/dist/providers.d.ts +2 -4
- package/dist/providers.js +3 -23
- package/dist/session/sqlite.d.ts +7 -12
- package/dist/session/sqlite.d.ts.map +1 -0
- package/dist/session/sqlite.js +67 -79
- package/dist/session/sqlite.js.map +1 -0
- package/dist/session-Cn68UASv.js +440 -0
- package/dist/session-Cn68UASv.js.map +1 -0
- package/dist/session.d.ts +2 -4
- package/dist/session.js +3 -27
- package/dist/skills.d.ts +3 -322
- package/dist/skills.js +24 -47
- package/dist/skills.js.map +1 -0
- package/dist/stats-DoKUtF5T.js +58 -0
- package/dist/stats-DoKUtF5T.js.map +1 -0
- package/dist/tools-DpeWKzP1.js +3941 -0
- package/dist/tools-DpeWKzP1.js.map +1 -0
- package/dist/tools.d.ts +3 -95
- package/dist/tools.js +2 -40
- package/dist/tui.d.ts +533 -0
- package/dist/tui.d.ts.map +1 -0
- package/dist/tui.js +2004 -0
- package/dist/tui.js.map +1 -0
- package/dist/types-Bx_F8jet.js +39 -0
- package/dist/types-Bx_F8jet.js.map +1 -0
- package/dist/types.d.ts +4 -55
- package/dist/types.js +4 -28
- package/package.json +38 -4
- package/dist/agent-BAHrGtqu.d.ts +0 -2425
- package/dist/chunk-4ILGBQ23.js +0 -803
- package/dist/chunk-4LPBN547.js +0 -3540
- package/dist/chunk-64LLNY7F.js +0 -28
- package/dist/chunk-6STZTA4N.js +0 -830
- package/dist/chunk-7GQ7P6DM.js +0 -566
- package/dist/chunk-IC7FT4OD.js +0 -37
- package/dist/chunk-JCOB6IYO.js +0 -22
- package/dist/chunk-JH6IAAFA.js +0 -28
- package/dist/chunk-LNN5UTS2.js +0 -97
- package/dist/chunk-PMCQOMV4.js +0 -490
- package/dist/chunk-UD25QF3H.js +0 -304
- package/dist/chunk-W57VY6DJ.js +0 -834
- package/dist/sandbox-D7v6Wy62.d.ts +0 -28
- package/dist/skills-use-DwZrNmcw.d.ts +0 -80
- package/dist/types-Bai5rKpa.d.ts +0 -89
- package/dist/validation-Pm--dQEU.d.ts +0 -185
|
@@ -0,0 +1,3941 @@
|
|
|
1
|
+
import { n as createProcessContext } from "./contexts-3Arvn7yR.js";
|
|
2
|
+
import { r as AgentProviderError, s as toTypedError, t as AgentAbortedError } from "./errors-D1lhd6mX.js";
|
|
3
|
+
import { t as toolOutputByteLength } from "./types-Bx_F8jet.js";
|
|
4
|
+
import { t as connectMcpServers } from "./mcp-8wClKY-3.js";
|
|
5
|
+
import { _ as validateResourcePath, b as createSkillActivationState, d as escapeXml, n as resolveSkills, p as installAllowedToolsGate, t as interpolateShellCommands, u as buildCatalog } from "./interpolate-CukJwP2G.js";
|
|
6
|
+
import { t as flattenTurns } from "./stats-DoKUtF5T.js";
|
|
7
|
+
import { createHooks } from "hookable";
|
|
8
|
+
import { stat } from "node:fs/promises";
|
|
9
|
+
import { resolve } from "node:path";
|
|
10
|
+
import { Buffer } from "node:buffer";
|
|
11
|
+
//#region src/aliasing.ts
|
|
12
|
+
/**
|
|
13
|
+
* Build alias lookup maps from a `toolAliases` record.
|
|
14
|
+
*
|
|
15
|
+
* Validates:
|
|
16
|
+
* - No two canonical names map to the same alias (collision).
|
|
17
|
+
* - No alias collides with another canonical tool name (would shadow).
|
|
18
|
+
*
|
|
19
|
+
* Silently ignores alias entries whose canonical name isn't in `canonicalNames` —
|
|
20
|
+
* preset/agent authors can declare aliases for tools that may be added later via MCP.
|
|
21
|
+
*
|
|
22
|
+
* @param aliases - The `toolAliases` map from the agent options.
|
|
23
|
+
* @param canonicalNames - All tool canonical names currently in scope (agent + MCP).
|
|
24
|
+
*/
|
|
25
|
+
function buildAliasMaps(aliases, canonicalNames) {
|
|
26
|
+
const aliasByCanonical = /* @__PURE__ */ new Map();
|
|
27
|
+
const canonicalByAlias = /* @__PURE__ */ new Map();
|
|
28
|
+
if (!aliases) return {
|
|
29
|
+
aliasByCanonical,
|
|
30
|
+
canonicalByAlias
|
|
31
|
+
};
|
|
32
|
+
const canonicalSet = new Set(canonicalNames);
|
|
33
|
+
function isRemappedAway(canonical) {
|
|
34
|
+
const mapped = aliases[canonical];
|
|
35
|
+
return typeof mapped === "string" && mapped.length > 0 && mapped !== canonical;
|
|
36
|
+
}
|
|
37
|
+
for (const [canonical, alias] of Object.entries(aliases)) {
|
|
38
|
+
if (typeof alias !== "string" || alias.length === 0) throw new Error(`Tool alias for "${canonical}" must be a non-empty string`);
|
|
39
|
+
if (alias === canonical) continue;
|
|
40
|
+
if (!canonicalSet.has(canonical)) continue;
|
|
41
|
+
if (canonicalSet.has(alias) && !isRemappedAway(alias)) throw new Error(`Tool alias "${canonical}" -> "${alias}" collides with an existing canonical tool name`);
|
|
42
|
+
const existingCanonical = canonicalByAlias.get(alias);
|
|
43
|
+
if (existingCanonical && existingCanonical !== canonical) throw new Error(`Tool alias collision: both "${existingCanonical}" and "${canonical}" map to alias "${alias}"`);
|
|
44
|
+
aliasByCanonical.set(canonical, alias);
|
|
45
|
+
canonicalByAlias.set(alias, canonical);
|
|
46
|
+
}
|
|
47
|
+
return {
|
|
48
|
+
aliasByCanonical,
|
|
49
|
+
canonicalByAlias
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
/** Return the alias for a canonical name, falling back to the canonical name itself. */
|
|
53
|
+
function toWireName(canonical, maps) {
|
|
54
|
+
return maps.aliasByCanonical.get(canonical) ?? canonical;
|
|
55
|
+
}
|
|
56
|
+
/** Return the canonical name for a wire name, falling back to the wire name itself. */
|
|
57
|
+
function toCanonicalName(wire, maps) {
|
|
58
|
+
return maps.canonicalByAlias.get(wire) ?? wire;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Rewrite `tool_call` block names in a content array from canonical → wire for outbound
|
|
62
|
+
* messages sent to the provider. Mutation is non-destructive (returns a new array).
|
|
63
|
+
*/
|
|
64
|
+
function rewriteContentToWire(content, maps) {
|
|
65
|
+
if (maps.aliasByCanonical.size === 0) return content;
|
|
66
|
+
return content.map((block) => {
|
|
67
|
+
if (block.type !== "tool_call") return block;
|
|
68
|
+
const wire = maps.aliasByCanonical.get(block.name);
|
|
69
|
+
if (!wire || wire === block.name) return block;
|
|
70
|
+
return {
|
|
71
|
+
...block,
|
|
72
|
+
name: wire
|
|
73
|
+
};
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Rewrite `tool_call` block names in a content array from wire → canonical for inbound
|
|
78
|
+
* messages received from the provider. Non-destructive.
|
|
79
|
+
*/
|
|
80
|
+
function rewriteContentToCanonical(content, maps) {
|
|
81
|
+
if (maps.canonicalByAlias.size === 0) return content;
|
|
82
|
+
return content.map((block) => {
|
|
83
|
+
if (block.type !== "tool_call") return block;
|
|
84
|
+
const canonical = maps.canonicalByAlias.get(block.name);
|
|
85
|
+
if (!canonical || canonical === block.name) return block;
|
|
86
|
+
return {
|
|
87
|
+
...block,
|
|
88
|
+
name: canonical
|
|
89
|
+
};
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Rewrite every `SessionMessage.content` in an array from canonical → wire.
|
|
94
|
+
* Returns a new array of new message objects — input messages are not mutated.
|
|
95
|
+
* When the alias map is empty, returns the input by reference (no allocation).
|
|
96
|
+
*/
|
|
97
|
+
function rewriteMessagesToWire(messages, maps) {
|
|
98
|
+
if (maps.aliasByCanonical.size === 0) return messages;
|
|
99
|
+
return messages.map((msg) => ({
|
|
100
|
+
...msg,
|
|
101
|
+
content: rewriteContentToWire(msg.content, maps)
|
|
102
|
+
}));
|
|
103
|
+
}
|
|
104
|
+
//#endregion
|
|
105
|
+
//#region src/tools/read-state.ts
|
|
106
|
+
const STATE = /* @__PURE__ */ new WeakMap();
|
|
107
|
+
/**
|
|
108
|
+
* Get or lazily create the per-session read-state map. Returns `undefined`
|
|
109
|
+
* when no session is provided — tools should treat that as "no dedup, no
|
|
110
|
+
* guard": the state has nowhere to live, so every read is fresh.
|
|
111
|
+
*/
|
|
112
|
+
function getReadState(session) {
|
|
113
|
+
if (!session) return void 0;
|
|
114
|
+
let map = STATE.get(session);
|
|
115
|
+
if (!map) {
|
|
116
|
+
map = /* @__PURE__ */ new Map();
|
|
117
|
+
STATE.set(session, map);
|
|
118
|
+
}
|
|
119
|
+
return map;
|
|
120
|
+
}
|
|
121
|
+
const TOOL_DEDUP_STATE = /* @__PURE__ */ new WeakMap();
|
|
122
|
+
/**
|
|
123
|
+
* Get or lazily create the per-session tool-dedup map. Returns `undefined`
|
|
124
|
+
* when no session is provided — middleware should treat that as "no dedup".
|
|
125
|
+
*/
|
|
126
|
+
function getToolDedupState(session) {
|
|
127
|
+
if (!session) return void 0;
|
|
128
|
+
let map = TOOL_DEDUP_STATE.get(session);
|
|
129
|
+
if (!map) {
|
|
130
|
+
map = /* @__PURE__ */ new Map();
|
|
131
|
+
TOOL_DEDUP_STATE.set(session, map);
|
|
132
|
+
}
|
|
133
|
+
return map;
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* FNV-1a 32-bit hash, hex-encoded. Fast, non-cryptographic — we only need
|
|
137
|
+
* collision resistance against accidental matches between different file
|
|
138
|
+
* contents within one session (~1 in 4 billion). Cheaper than allocating
|
|
139
|
+
* a Buffer and pulling in `crypto`.
|
|
140
|
+
*/
|
|
141
|
+
function hashContent(text) {
|
|
142
|
+
let h = 2166136261;
|
|
143
|
+
for (let i = 0; i < text.length; i++) {
|
|
144
|
+
h ^= text.charCodeAt(i);
|
|
145
|
+
h = h + ((h << 1) + (h << 4) + (h << 7) + (h << 8) + (h << 24)) >>> 0;
|
|
146
|
+
}
|
|
147
|
+
return h.toString(16).padStart(8, "0");
|
|
148
|
+
}
|
|
149
|
+
//#endregion
|
|
150
|
+
//#region src/dedup-tools.ts
|
|
151
|
+
/**
|
|
152
|
+
* Install the per-tool argument-dedup middleware on a hook bus.
|
|
153
|
+
* `getDedupTools` returns the resolved per-tool hashers (run override merged
|
|
154
|
+
* with agent defaults). `getSession` does the same for the session-bound
|
|
155
|
+
* state. Both are called lazily so handlers attached after install (e.g. via
|
|
156
|
+
* MCP bootstrap completion) can take effect.
|
|
157
|
+
*
|
|
158
|
+
* Returns an `uninstall` fn — the agent calls this in `finally` so handlers
|
|
159
|
+
* never leak across runs.
|
|
160
|
+
*/
|
|
161
|
+
function installDedupToolsGate(hooks, getDedupTools, getSession) {
|
|
162
|
+
const pending = /* @__PURE__ */ new Map();
|
|
163
|
+
function pendingKey(callId, name) {
|
|
164
|
+
return `${callId}::${name}`;
|
|
165
|
+
}
|
|
166
|
+
function gateHandler(ctx) {
|
|
167
|
+
if (ctx.block || ctx.result !== void 0) return;
|
|
168
|
+
const hasher = getDedupTools()?.[ctx.name];
|
|
169
|
+
if (!hasher) return;
|
|
170
|
+
const state = getToolDedupState(getSession());
|
|
171
|
+
if (!state) return;
|
|
172
|
+
let hash;
|
|
173
|
+
try {
|
|
174
|
+
hash = hasher(ctx.input);
|
|
175
|
+
} catch {
|
|
176
|
+
return;
|
|
177
|
+
}
|
|
178
|
+
if (typeof hash !== "string" || hash.length === 0) return;
|
|
179
|
+
const prior = state.get(ctx.name);
|
|
180
|
+
if (prior && prior.hash === hash) {
|
|
181
|
+
ctx.result = prior.result;
|
|
182
|
+
return;
|
|
183
|
+
}
|
|
184
|
+
pending.set(pendingKey(ctx.callId, ctx.name), hash);
|
|
185
|
+
}
|
|
186
|
+
function afterHandler(ctx) {
|
|
187
|
+
const key = pendingKey(ctx.callId, ctx.name);
|
|
188
|
+
const hash = pending.get(key);
|
|
189
|
+
if (hash === void 0) return;
|
|
190
|
+
pending.delete(key);
|
|
191
|
+
const state = getToolDedupState(getSession());
|
|
192
|
+
if (!state) return;
|
|
193
|
+
state.set(ctx.name, {
|
|
194
|
+
hash,
|
|
195
|
+
result: ctx.result
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
const unregisterGate = hooks.hook("tool:gate", gateHandler);
|
|
199
|
+
const unregisterAfter = hooks.hook("tool:after", afterHandler);
|
|
200
|
+
return function uninstall() {
|
|
201
|
+
unregisterGate();
|
|
202
|
+
unregisterAfter();
|
|
203
|
+
pending.clear();
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
//#endregion
|
|
207
|
+
//#region src/tools/validation.ts
|
|
208
|
+
const TRUE_STRINGS = new Set([
|
|
209
|
+
"true",
|
|
210
|
+
"True",
|
|
211
|
+
"TRUE",
|
|
212
|
+
"1",
|
|
213
|
+
"yes",
|
|
214
|
+
"Yes",
|
|
215
|
+
"YES"
|
|
216
|
+
]);
|
|
217
|
+
const FALSE_STRINGS = new Set([
|
|
218
|
+
"false",
|
|
219
|
+
"False",
|
|
220
|
+
"FALSE",
|
|
221
|
+
"0",
|
|
222
|
+
"no",
|
|
223
|
+
"No",
|
|
224
|
+
"NO"
|
|
225
|
+
]);
|
|
226
|
+
function validateToolArgs(input, schema) {
|
|
227
|
+
const required = schema.required ?? [];
|
|
228
|
+
const properties = schema.properties ?? {};
|
|
229
|
+
for (const field of required) if (!(field in input) || input[field] === void 0 || input[field] === null) return {
|
|
230
|
+
valid: false,
|
|
231
|
+
error: `Missing required field: ${field}`
|
|
232
|
+
};
|
|
233
|
+
let coerced;
|
|
234
|
+
const coercions = [];
|
|
235
|
+
for (const [key, value] of Object.entries(input)) {
|
|
236
|
+
const propSchema = properties[key];
|
|
237
|
+
if (!propSchema?.type) continue;
|
|
238
|
+
if (value === void 0 || value === null) continue;
|
|
239
|
+
const outcome = coerceValue(value, propSchema);
|
|
240
|
+
if (outcome.error) return {
|
|
241
|
+
valid: false,
|
|
242
|
+
error: `Field "${key}": ${outcome.error}`
|
|
243
|
+
};
|
|
244
|
+
if (outcome.changed) {
|
|
245
|
+
if (!coerced) coerced = { ...input };
|
|
246
|
+
coerced[key] = outcome.value;
|
|
247
|
+
coercions.push(key);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
return {
|
|
251
|
+
valid: true,
|
|
252
|
+
coercedInput: coerced ?? input,
|
|
253
|
+
coercions
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
function coerceValue(value, schema) {
|
|
257
|
+
const declaredTypes = Array.isArray(schema.type) ? schema.type : [schema.type];
|
|
258
|
+
for (const t of declaredTypes) if (matchesType(value, t)) {
|
|
259
|
+
if (schema.enum && !schema.enum.includes(value)) return {
|
|
260
|
+
value,
|
|
261
|
+
changed: false,
|
|
262
|
+
error: `must be one of ${JSON.stringify(schema.enum)}, got ${formatValue(value)}`
|
|
263
|
+
};
|
|
264
|
+
return {
|
|
265
|
+
value,
|
|
266
|
+
changed: false
|
|
267
|
+
};
|
|
268
|
+
}
|
|
269
|
+
for (const t of declaredTypes) {
|
|
270
|
+
const coerced = tryCoerce(value, t);
|
|
271
|
+
if (coerced.ok) {
|
|
272
|
+
if (schema.enum && !schema.enum.includes(coerced.value)) return {
|
|
273
|
+
value,
|
|
274
|
+
changed: false,
|
|
275
|
+
error: `must be one of ${JSON.stringify(schema.enum)}, got ${formatValue(coerced.value)}`
|
|
276
|
+
};
|
|
277
|
+
return {
|
|
278
|
+
value: coerced.value,
|
|
279
|
+
changed: true
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
return {
|
|
284
|
+
value,
|
|
285
|
+
changed: false,
|
|
286
|
+
error: `expected ${declaredTypes.join(" | ")}, got ${jsonType(value)} ${formatValue(value)}`
|
|
287
|
+
};
|
|
288
|
+
}
|
|
289
|
+
function matchesType(value, type) {
|
|
290
|
+
switch (type) {
|
|
291
|
+
case "string": return typeof value === "string";
|
|
292
|
+
case "number": return typeof value === "number" && Number.isFinite(value);
|
|
293
|
+
case "integer": return typeof value === "number" && Number.isInteger(value);
|
|
294
|
+
case "boolean": return typeof value === "boolean";
|
|
295
|
+
case "array": return Array.isArray(value);
|
|
296
|
+
case "object": return value !== null && typeof value === "object" && !Array.isArray(value);
|
|
297
|
+
case "null": return value === null;
|
|
298
|
+
default: return true;
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
function tryCoerce(value, type) {
|
|
302
|
+
if (typeof value === "string") {
|
|
303
|
+
if (type === "boolean") {
|
|
304
|
+
const trimmed = value.trim();
|
|
305
|
+
if (TRUE_STRINGS.has(trimmed)) return {
|
|
306
|
+
ok: true,
|
|
307
|
+
value: true
|
|
308
|
+
};
|
|
309
|
+
if (FALSE_STRINGS.has(trimmed)) return {
|
|
310
|
+
ok: true,
|
|
311
|
+
value: false
|
|
312
|
+
};
|
|
313
|
+
return { ok: false };
|
|
314
|
+
}
|
|
315
|
+
if (type === "number") {
|
|
316
|
+
const n = Number(value.trim());
|
|
317
|
+
return Number.isFinite(n) ? {
|
|
318
|
+
ok: true,
|
|
319
|
+
value: n
|
|
320
|
+
} : { ok: false };
|
|
321
|
+
}
|
|
322
|
+
if (type === "integer") {
|
|
323
|
+
const n = Number(value.trim());
|
|
324
|
+
return Number.isInteger(n) ? {
|
|
325
|
+
ok: true,
|
|
326
|
+
value: n
|
|
327
|
+
} : { ok: false };
|
|
328
|
+
}
|
|
329
|
+
if (type === "array" || type === "object") try {
|
|
330
|
+
const parsed = JSON.parse(value);
|
|
331
|
+
if (type === "array" && Array.isArray(parsed)) return {
|
|
332
|
+
ok: true,
|
|
333
|
+
value: parsed
|
|
334
|
+
};
|
|
335
|
+
if (type === "object" && parsed !== null && typeof parsed === "object" && !Array.isArray(parsed)) return {
|
|
336
|
+
ok: true,
|
|
337
|
+
value: parsed
|
|
338
|
+
};
|
|
339
|
+
return { ok: false };
|
|
340
|
+
} catch {
|
|
341
|
+
return { ok: false };
|
|
342
|
+
}
|
|
343
|
+
if (type === "null") return value === "" || value === "null" ? {
|
|
344
|
+
ok: true,
|
|
345
|
+
value: null
|
|
346
|
+
} : { ok: false };
|
|
347
|
+
}
|
|
348
|
+
if (typeof value === "number" && Number.isFinite(value)) {
|
|
349
|
+
if (type === "string") return {
|
|
350
|
+
ok: true,
|
|
351
|
+
value: String(value)
|
|
352
|
+
};
|
|
353
|
+
if (type === "integer" && Number.isInteger(value)) return {
|
|
354
|
+
ok: true,
|
|
355
|
+
value
|
|
356
|
+
};
|
|
357
|
+
}
|
|
358
|
+
if (typeof value === "boolean" && type === "string") return {
|
|
359
|
+
ok: true,
|
|
360
|
+
value: String(value)
|
|
361
|
+
};
|
|
362
|
+
return { ok: false };
|
|
363
|
+
}
|
|
364
|
+
function jsonType(value) {
|
|
365
|
+
if (value === null) return "null";
|
|
366
|
+
if (Array.isArray(value)) return "array";
|
|
367
|
+
return typeof value;
|
|
368
|
+
}
|
|
369
|
+
function formatValue(value) {
|
|
370
|
+
let s;
|
|
371
|
+
try {
|
|
372
|
+
s = JSON.stringify(value);
|
|
373
|
+
} catch {
|
|
374
|
+
s = String(value);
|
|
375
|
+
}
|
|
376
|
+
if (s === void 0) s = String(value);
|
|
377
|
+
return s.length > 80 ? `${s.slice(0, 77)}...` : s;
|
|
378
|
+
}
|
|
379
|
+
//#endregion
|
|
380
|
+
//#region src/loop.ts
|
|
381
|
+
const IMAGE_OMITTED_MARKER = "[image omitted — model does not support vision]";
|
|
382
|
+
/**
|
|
383
|
+
* Compute the effective thinking budget for a given run-relative turn, given
|
|
384
|
+
* the configured decay schedule. Pure helper — exported for tests and so
|
|
385
|
+
* downstream tooling can preview decay curves without spinning up the loop.
|
|
386
|
+
*
|
|
387
|
+
* - No `baseBudget` → returns `undefined`. Decay never invents a budget.
|
|
388
|
+
* - No `decay` → identity (`baseBudget`).
|
|
389
|
+
* - Function form → `decay(turn, baseBudget)`. The return is clamped to
|
|
390
|
+
* `[0, baseBudget]` so a buggy curve can't request *more* budget than the
|
|
391
|
+
* caller explicitly opted into.
|
|
392
|
+
* - Struct form `{ afterTurn, factor, floor }` → for `turn <= afterTurn`,
|
|
393
|
+
* returns `baseBudget`. For `turn > afterTurn`, returns
|
|
394
|
+
* `max(floor, baseBudget * factor^(turn - afterTurn))` clamped to
|
|
395
|
+
* `[floor, baseBudget]`.
|
|
396
|
+
*
|
|
397
|
+
* Result is rounded to the nearest integer (token counts are integers).
|
|
398
|
+
*/
|
|
399
|
+
function applyThinkingDecay(baseBudget, decay, turn) {
|
|
400
|
+
if (typeof baseBudget !== "number" || baseBudget <= 0) return baseBudget;
|
|
401
|
+
if (!decay) return baseBudget;
|
|
402
|
+
let raw;
|
|
403
|
+
if (typeof decay === "function") raw = decay(turn, baseBudget);
|
|
404
|
+
else {
|
|
405
|
+
if (turn <= decay.afterTurn) return baseBudget;
|
|
406
|
+
const k = turn - decay.afterTurn;
|
|
407
|
+
raw = Math.max(decay.floor, baseBudget * decay.factor ** k);
|
|
408
|
+
}
|
|
409
|
+
if (Number.isNaN(raw) || raw <= 0) return 0;
|
|
410
|
+
return Math.round(Math.min(baseBudget, raw));
|
|
411
|
+
}
|
|
412
|
+
/** Convert turns to the SessionMessage[] format expected by providers */
|
|
413
|
+
function turnsToMessages(turns) {
|
|
414
|
+
return turns.filter((t) => t.role !== "system").map((t) => ({
|
|
415
|
+
role: t.role,
|
|
416
|
+
content: t.content
|
|
417
|
+
}));
|
|
418
|
+
}
|
|
419
|
+
/**
|
|
420
|
+
* Flatten image blocks inside previously-stored `tool_result` outputs to text
|
|
421
|
+
* markers when the provider reports no vision capability. This handles the
|
|
422
|
+
* session-resume / mid-session-provider-switch case:
|
|
423
|
+
*
|
|
424
|
+
* - A session was produced with a vision-capable provider (e.g. Claude) and
|
|
425
|
+
* contains structured tool_result content with images.
|
|
426
|
+
* - The agent resumes with a text-only provider (e.g. Cerebras).
|
|
427
|
+
* - Without this pass, the persisted image blocks would re-encode via the
|
|
428
|
+
* provider's wire format and leak base64 payloads into a non-vision model's
|
|
429
|
+
* context.
|
|
430
|
+
*
|
|
431
|
+
* Scope: stored tool_result outputs only. User-authored image blocks are left
|
|
432
|
+
* alone — those are consumer-controlled and the consumer is responsible for
|
|
433
|
+
* matching prompt parts to the provider's capabilities.
|
|
434
|
+
*/
|
|
435
|
+
const COMPACTION_STUB = "[…elided by client-side tail compaction; ask the user or re-run the tool to retrieve.]";
|
|
436
|
+
/**
|
|
437
|
+
* Tail-compaction for non-Anthropic providers: when the cumulative byte size
|
|
438
|
+
* of `tool_result` content across the wire-level message list exceeds
|
|
439
|
+
* `threshold`, replace older `tool_result` outputs with a short stub. The
|
|
440
|
+
* newest `keepTurns` messages (user/assistant alike) are left untouched so
|
|
441
|
+
* the model retains the freshest tool context.
|
|
442
|
+
*
|
|
443
|
+
* Only `tool_result` blocks are touched — text and image blocks pass through
|
|
444
|
+
* unchanged. Mutates a shallow-cloned message array; original `messages` is
|
|
445
|
+
* not modified.
|
|
446
|
+
*
|
|
447
|
+
* For Anthropic users, prefer the server-side `context-management-2025-06-27`
|
|
448
|
+
* beta (token-accurate, no client-side approximation). This function is the
|
|
449
|
+
* client-side fallback for OpenAI-compatible / OpenRouter / Cerebras runs
|
|
450
|
+
* against OSS models that lack a server-side equivalent.
|
|
451
|
+
*/
|
|
452
|
+
function applyTailCompaction(messages, threshold, keepTurns) {
|
|
453
|
+
if (messages.length === 0) return messages;
|
|
454
|
+
let totalBytes = 0;
|
|
455
|
+
for (const msg of messages) for (const block of msg.content) if (block.type === "tool_result") totalBytes += toolOutputByteLength(block.output);
|
|
456
|
+
if (totalBytes <= threshold) return messages;
|
|
457
|
+
const keep = Math.max(0, keepTurns);
|
|
458
|
+
const cutoff = messages.length - keep;
|
|
459
|
+
if (cutoff <= 0) return messages;
|
|
460
|
+
let changed = false;
|
|
461
|
+
const out = messages.slice();
|
|
462
|
+
for (let i = 0; i < cutoff; i++) {
|
|
463
|
+
const msg = out[i];
|
|
464
|
+
let msgChanged = false;
|
|
465
|
+
const newContent = msg.content.map((block) => {
|
|
466
|
+
if (block.type !== "tool_result") return block;
|
|
467
|
+
if (toolOutputByteLength(block.output) <= 86) return block;
|
|
468
|
+
msgChanged = true;
|
|
469
|
+
changed = true;
|
|
470
|
+
return {
|
|
471
|
+
...block,
|
|
472
|
+
output: COMPACTION_STUB
|
|
473
|
+
};
|
|
474
|
+
});
|
|
475
|
+
if (msgChanged) out[i] = {
|
|
476
|
+
...msg,
|
|
477
|
+
content: newContent
|
|
478
|
+
};
|
|
479
|
+
}
|
|
480
|
+
return changed ? out : messages;
|
|
481
|
+
}
|
|
482
|
+
/**
|
|
483
|
+
* Replace `read_file` `tool_result` blocks with a short stub when a later
|
|
484
|
+
* successful `edit` / `multi_edit` / `write_file` modified the same path.
|
|
485
|
+
*
|
|
486
|
+
* Eliminates the common waste pattern where the model carries the pre-edit
|
|
487
|
+
* file body forward across many turns. Operates on the wire-level message
|
|
488
|
+
* list only — the persisted session keeps the original content.
|
|
489
|
+
*
|
|
490
|
+
* Detection is conservative: success is gated on the corresponding
|
|
491
|
+
* tool_result starting with `Edited ` (edit / multi_edit) or `Created ` /
|
|
492
|
+
* `Updated ` (write_file). Failed edits and `No change needed` writes do
|
|
493
|
+
* NOT invalidate prior reads — the file content is still what the read
|
|
494
|
+
* returned.
|
|
495
|
+
*
|
|
496
|
+
* Pure function, exported for tests and so downstream tooling can preview
|
|
497
|
+
* elision without spinning up the loop.
|
|
498
|
+
*/
|
|
499
|
+
const STALE_READ_STUB = "[…elided: file edited later in this run; re-read if still needed.]";
|
|
500
|
+
function applyStaleReadElision(messages) {
|
|
501
|
+
if (messages.length === 0) return messages;
|
|
502
|
+
const resultByCallId = /* @__PURE__ */ new Map();
|
|
503
|
+
for (const msg of messages) for (const block of msg.content) if (block.type === "tool_result" && typeof block.output === "string") resultByCallId.set(block.callId, block.output);
|
|
504
|
+
const maxMutationIdxByPath = /* @__PURE__ */ new Map();
|
|
505
|
+
const readCallInfo = /* @__PURE__ */ new Map();
|
|
506
|
+
for (let i = 0; i < messages.length; i++) for (const block of messages[i].content) {
|
|
507
|
+
if (block.type !== "tool_call") continue;
|
|
508
|
+
const path = block.input.path;
|
|
509
|
+
if (typeof path !== "string") continue;
|
|
510
|
+
if (block.name === "read_file") {
|
|
511
|
+
readCallInfo.set(block.id, {
|
|
512
|
+
path,
|
|
513
|
+
msgIdx: i
|
|
514
|
+
});
|
|
515
|
+
continue;
|
|
516
|
+
}
|
|
517
|
+
const isEdit = block.name === "edit" || block.name === "multi_edit";
|
|
518
|
+
const isWrite = block.name === "write_file";
|
|
519
|
+
if (!isEdit && !isWrite) continue;
|
|
520
|
+
const result = resultByCallId.get(block.id);
|
|
521
|
+
if (typeof result !== "string") continue;
|
|
522
|
+
if (!(isEdit ? result.startsWith("Edited ") : result.startsWith("Created ") || result.startsWith("Updated "))) continue;
|
|
523
|
+
const prior = maxMutationIdxByPath.get(path);
|
|
524
|
+
if (prior === void 0 || i > prior) maxMutationIdxByPath.set(path, i);
|
|
525
|
+
}
|
|
526
|
+
if (maxMutationIdxByPath.size === 0) return messages;
|
|
527
|
+
const staleCallIds = /* @__PURE__ */ new Set();
|
|
528
|
+
for (const [callId, info] of readCallInfo) {
|
|
529
|
+
const lastMutationIdx = maxMutationIdxByPath.get(info.path);
|
|
530
|
+
if (typeof lastMutationIdx === "number" && info.msgIdx < lastMutationIdx) staleCallIds.add(callId);
|
|
531
|
+
}
|
|
532
|
+
if (staleCallIds.size === 0) return messages;
|
|
533
|
+
let changed = false;
|
|
534
|
+
const out = messages.slice();
|
|
535
|
+
for (let i = 0; i < out.length; i++) {
|
|
536
|
+
const msg = out[i];
|
|
537
|
+
let msgChanged = false;
|
|
538
|
+
const newContent = msg.content.map((block) => {
|
|
539
|
+
if (block.type !== "tool_result" || !staleCallIds.has(block.callId)) return block;
|
|
540
|
+
if (block.output === "[…elided: file edited later in this run; re-read if still needed.]") return block;
|
|
541
|
+
msgChanged = true;
|
|
542
|
+
changed = true;
|
|
543
|
+
return {
|
|
544
|
+
...block,
|
|
545
|
+
output: STALE_READ_STUB
|
|
546
|
+
};
|
|
547
|
+
});
|
|
548
|
+
if (msgChanged) out[i] = {
|
|
549
|
+
...msg,
|
|
550
|
+
content: newContent
|
|
551
|
+
};
|
|
552
|
+
}
|
|
553
|
+
return changed ? out : messages;
|
|
554
|
+
}
|
|
555
|
+
function sanitizeStoredToolResults(provider, messages) {
|
|
556
|
+
if (provider.meta.capabilities?.vision !== false) return messages;
|
|
557
|
+
return messages.map((msg) => {
|
|
558
|
+
let changed = false;
|
|
559
|
+
const newContent = msg.content.map((block) => {
|
|
560
|
+
if (block.type !== "tool_result" || typeof block.output === "string") return block;
|
|
561
|
+
changed = true;
|
|
562
|
+
const flattened = block.output.map((b) => b.type === "image" ? IMAGE_OMITTED_MARKER : b.text).join("\n");
|
|
563
|
+
return {
|
|
564
|
+
...block,
|
|
565
|
+
output: flattened
|
|
566
|
+
};
|
|
567
|
+
});
|
|
568
|
+
return changed ? {
|
|
569
|
+
...msg,
|
|
570
|
+
content: newContent
|
|
571
|
+
} : msg;
|
|
572
|
+
});
|
|
573
|
+
}
|
|
574
|
+
async function runLoop(ctx) {
|
|
575
|
+
let totalIn = 0;
|
|
576
|
+
let totalOut = 0;
|
|
577
|
+
let totalCacheRead = 0;
|
|
578
|
+
let totalCacheCreation = 0;
|
|
579
|
+
const turnUsages = [];
|
|
580
|
+
const startTime = Date.now();
|
|
581
|
+
const maxTurns = ctx.maxTurns ?? Number.POSITIVE_INFINITY;
|
|
582
|
+
let turnsCompleted = 0;
|
|
583
|
+
const ttft = { mark: void 0 };
|
|
584
|
+
const markTtft = () => {
|
|
585
|
+
if (ttft.mark === void 0) ttft.mark = Date.now() - ctx.runStartMs;
|
|
586
|
+
};
|
|
587
|
+
const unregisterTtftText = ctx.hooks.hook("stream:text", markTtft);
|
|
588
|
+
const unregisterTtftThinking = ctx.hooks.hook("stream:thinking", markTtft);
|
|
589
|
+
const unregisterTtftTool = ctx.hooks.hook("tool:before", markTtft);
|
|
590
|
+
try {
|
|
591
|
+
for (let turn = 0; turn < maxTurns; turn++) {
|
|
592
|
+
if (ctx.signal.aborted) {
|
|
593
|
+
await ctx.hooks.callHook("agent:abort", {});
|
|
594
|
+
break;
|
|
595
|
+
}
|
|
596
|
+
const result = await executeTurn(ctx, turn);
|
|
597
|
+
turnsCompleted = turn + 1;
|
|
598
|
+
totalIn += result.usage.input;
|
|
599
|
+
totalOut += result.usage.output;
|
|
600
|
+
totalCacheRead += result.usage.cacheRead ?? 0;
|
|
601
|
+
totalCacheCreation += result.usage.cacheCreation ?? 0;
|
|
602
|
+
turnUsages.push(result.usage);
|
|
603
|
+
await ctx.hooks.callHook("usage", {
|
|
604
|
+
turn,
|
|
605
|
+
turnId: result.turnId,
|
|
606
|
+
usage: result.usage,
|
|
607
|
+
totalIn,
|
|
608
|
+
totalOut
|
|
609
|
+
});
|
|
610
|
+
if (ctx.signal.aborted) {
|
|
611
|
+
await ctx.hooks.callHook("agent:abort", {});
|
|
612
|
+
break;
|
|
613
|
+
}
|
|
614
|
+
if (ctx.steeringQueue.length > 0) {
|
|
615
|
+
const steerMsg = ctx.steeringQueue.shift();
|
|
616
|
+
await ctx.hooks.callHook("steer:inject", { message: steerMsg });
|
|
617
|
+
const steerUserMsg = ctx.provider.userMessage(steerMsg);
|
|
618
|
+
ctx.turns.push({
|
|
619
|
+
id: await ctx.generateTurnId(),
|
|
620
|
+
runId: ctx.runId,
|
|
621
|
+
role: steerUserMsg.role,
|
|
622
|
+
content: steerUserMsg.content,
|
|
623
|
+
createdAt: Date.now()
|
|
624
|
+
});
|
|
625
|
+
continue;
|
|
626
|
+
}
|
|
627
|
+
if (result.ended) {
|
|
628
|
+
if (ctx.followUpQueue.length > 0) {
|
|
629
|
+
const followUp = ctx.followUpQueue.shift();
|
|
630
|
+
await ctx.hooks.callHook("steer:inject", { message: followUp });
|
|
631
|
+
const followUpMsg = ctx.provider.userMessage(followUp);
|
|
632
|
+
ctx.turns.push({
|
|
633
|
+
id: await ctx.generateTurnId(),
|
|
634
|
+
runId: ctx.runId,
|
|
635
|
+
role: followUpMsg.role,
|
|
636
|
+
content: followUpMsg.content,
|
|
637
|
+
createdAt: Date.now()
|
|
638
|
+
});
|
|
639
|
+
continue;
|
|
640
|
+
}
|
|
641
|
+
return {
|
|
642
|
+
totalIn,
|
|
643
|
+
totalOut,
|
|
644
|
+
totalCacheRead,
|
|
645
|
+
totalCacheCreation,
|
|
646
|
+
turns: turn + 1,
|
|
647
|
+
elapsed: Date.now() - startTime,
|
|
648
|
+
turnUsage: turnUsages,
|
|
649
|
+
output: result.output,
|
|
650
|
+
...ttft.mark !== void 0 ? { timeTillFirstTokenMs: ttft.mark } : {}
|
|
651
|
+
};
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
return {
|
|
655
|
+
totalIn,
|
|
656
|
+
totalOut,
|
|
657
|
+
totalCacheRead,
|
|
658
|
+
totalCacheCreation,
|
|
659
|
+
turns: turnsCompleted,
|
|
660
|
+
elapsed: Date.now() - startTime,
|
|
661
|
+
turnUsage: turnUsages,
|
|
662
|
+
...ttft.mark !== void 0 ? { timeTillFirstTokenMs: ttft.mark } : {}
|
|
663
|
+
};
|
|
664
|
+
} finally {
|
|
665
|
+
unregisterTtftText();
|
|
666
|
+
unregisterTtftThinking();
|
|
667
|
+
unregisterTtftTool();
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
/**
|
|
671
|
+
* Wrap a caught provider error in the matching typed error class.
|
|
672
|
+
*
|
|
673
|
+
* Uses the provider's `classifyError` seam when implemented; otherwise falls back
|
|
674
|
+
* to wrapping in `AgentProviderError`. Abort signals always produce `AgentAbortedError`
|
|
675
|
+
* regardless of the provider classification.
|
|
676
|
+
*/
|
|
677
|
+
function wrapProviderError(err, ctx) {
|
|
678
|
+
if (ctx.signal.aborted || err instanceof Error && err.name === "AbortError") return new AgentAbortedError("Agent run aborted", { cause: err });
|
|
679
|
+
const classification = ctx.provider.classifyError?.(err);
|
|
680
|
+
if (classification) return toTypedError(classification, ctx.provider.name, err);
|
|
681
|
+
return new AgentProviderError(err instanceof Error ? err.message : String(err), {
|
|
682
|
+
provider: ctx.provider.name,
|
|
683
|
+
cause: err
|
|
684
|
+
});
|
|
685
|
+
}
|
|
686
|
+
async function executeTurn(ctx, turn) {
|
|
687
|
+
const turnId = await ctx.generateTurnId();
|
|
688
|
+
let canonicalMessages = turnsToMessages(ctx.turns);
|
|
689
|
+
if (ctx.elideStaleReads === true) canonicalMessages = applyStaleReadElision(canonicalMessages);
|
|
690
|
+
const wireMessages = rewriteMessagesToWire(canonicalMessages, ctx.aliasMaps);
|
|
691
|
+
let sanitizedMessages = sanitizeStoredToolResults(ctx.provider, wireMessages);
|
|
692
|
+
if (ctx.compactStrategy === "tail") {
|
|
693
|
+
const threshold = typeof ctx.compactThreshold === "number" && ctx.compactThreshold > 0 ? ctx.compactThreshold : 131072;
|
|
694
|
+
const keep = typeof ctx.compactKeepTurns === "number" && ctx.compactKeepTurns >= 0 ? ctx.compactKeepTurns : 4;
|
|
695
|
+
sanitizedMessages = applyTailCompaction(sanitizedMessages, threshold, keep);
|
|
696
|
+
}
|
|
697
|
+
const effectiveThinkingBudget = applyThinkingDecay(ctx.thinkingBudget, ctx.thinkingDecay, turn);
|
|
698
|
+
const formattedTools = ctx.rebuildFormattedTools ? ctx.rebuildFormattedTools() : ctx.formattedTools;
|
|
699
|
+
const streamOptions = {
|
|
700
|
+
model: ctx.model,
|
|
701
|
+
system: ctx.system,
|
|
702
|
+
tools: formattedTools,
|
|
703
|
+
messages: sanitizedMessages,
|
|
704
|
+
maxTokens: ctx.maxTokens ?? 16384,
|
|
705
|
+
thinking: ctx.thinking,
|
|
706
|
+
thinkingBudget: effectiveThinkingBudget,
|
|
707
|
+
cache: ctx.cache ?? true,
|
|
708
|
+
signal: ctx.signal
|
|
709
|
+
};
|
|
710
|
+
const transformCtx = { messages: streamOptions.messages };
|
|
711
|
+
await ctx.hooks.callHook("context:transform", transformCtx);
|
|
712
|
+
streamOptions.messages = transformCtx.messages;
|
|
713
|
+
const systemCtx = {
|
|
714
|
+
system: streamOptions.system,
|
|
715
|
+
messages: streamOptions.messages,
|
|
716
|
+
turn,
|
|
717
|
+
turnId,
|
|
718
|
+
...ctx.session ? { session: ctx.session } : {}
|
|
719
|
+
};
|
|
720
|
+
await ctx.hooks.callHook("system:transform", systemCtx);
|
|
721
|
+
streamOptions.system = systemCtx.system;
|
|
722
|
+
await ctx.hooks.callHook("turn:before", {
|
|
723
|
+
turn,
|
|
724
|
+
turnId,
|
|
725
|
+
options: streamOptions
|
|
726
|
+
});
|
|
727
|
+
let currentText = "";
|
|
728
|
+
let currentThinking = "";
|
|
729
|
+
let result;
|
|
730
|
+
try {
|
|
731
|
+
result = await ctx.provider.stream(streamOptions, {
|
|
732
|
+
onText(delta) {
|
|
733
|
+
currentText += delta;
|
|
734
|
+
ctx.hooks.callHook("stream:text", {
|
|
735
|
+
delta,
|
|
736
|
+
text: currentText,
|
|
737
|
+
turnId
|
|
738
|
+
});
|
|
739
|
+
},
|
|
740
|
+
onThinking(delta) {
|
|
741
|
+
currentThinking += delta;
|
|
742
|
+
ctx.hooks.callHook("stream:thinking", {
|
|
743
|
+
delta,
|
|
744
|
+
thinking: currentThinking,
|
|
745
|
+
turnId
|
|
746
|
+
});
|
|
747
|
+
},
|
|
748
|
+
onOAuthRefresh(refreshCtx) {
|
|
749
|
+
return ctx.hooks.callHook("oauth:refresh", refreshCtx);
|
|
750
|
+
}
|
|
751
|
+
});
|
|
752
|
+
} catch (err) {
|
|
753
|
+
const errorUsage = {
|
|
754
|
+
input: 0,
|
|
755
|
+
output: 0
|
|
756
|
+
};
|
|
757
|
+
const errorContent = currentText ? [{
|
|
758
|
+
type: "text",
|
|
759
|
+
text: currentText
|
|
760
|
+
}] : [{
|
|
761
|
+
type: "text",
|
|
762
|
+
text: "[provider error before any output]"
|
|
763
|
+
}];
|
|
764
|
+
const errorTurn = {
|
|
765
|
+
id: turnId,
|
|
766
|
+
runId: ctx.runId,
|
|
767
|
+
role: "assistant",
|
|
768
|
+
content: errorContent,
|
|
769
|
+
usage: errorUsage,
|
|
770
|
+
createdAt: Date.now()
|
|
771
|
+
};
|
|
772
|
+
ctx.turns.push(errorTurn);
|
|
773
|
+
await ctx.hooks.callHook("turn:after", {
|
|
774
|
+
turn,
|
|
775
|
+
turnId,
|
|
776
|
+
usage: errorUsage,
|
|
777
|
+
message: errorTurn,
|
|
778
|
+
toolCounts: {
|
|
779
|
+
turn: Object.freeze({}),
|
|
780
|
+
run: Object.freeze({ ...ctx.runToolCounts })
|
|
781
|
+
}
|
|
782
|
+
});
|
|
783
|
+
throw wrapProviderError(err, ctx);
|
|
784
|
+
}
|
|
785
|
+
if (currentText) await ctx.hooks.callHook("stream:end", {
|
|
786
|
+
text: currentText,
|
|
787
|
+
turnId
|
|
788
|
+
});
|
|
789
|
+
const canonicalToolCalls = result.toolCalls.map((tc) => ({
|
|
790
|
+
...tc,
|
|
791
|
+
name: toCanonicalName(tc.name, ctx.aliasMaps)
|
|
792
|
+
}));
|
|
793
|
+
const canonicalContent = rewriteContentToCanonical(result.assistantMessage?.content ?? [], ctx.aliasMaps);
|
|
794
|
+
const assistantTurn = {
|
|
795
|
+
id: turnId,
|
|
796
|
+
runId: ctx.runId,
|
|
797
|
+
role: "assistant",
|
|
798
|
+
content: result.done ? canonicalContent.length > 0 ? canonicalContent : [{
|
|
799
|
+
type: "text",
|
|
800
|
+
text: currentText
|
|
801
|
+
}] : canonicalContent,
|
|
802
|
+
usage: result.usage,
|
|
803
|
+
createdAt: Date.now()
|
|
804
|
+
};
|
|
805
|
+
ctx.turns.push(assistantTurn);
|
|
806
|
+
const turnCounts = {};
|
|
807
|
+
for (const tc of canonicalToolCalls) turnCounts[tc.name] = (turnCounts[tc.name] ?? 0) + 1;
|
|
808
|
+
await ctx.hooks.callHook("turn:after", {
|
|
809
|
+
turn,
|
|
810
|
+
turnId,
|
|
811
|
+
usage: result.usage,
|
|
812
|
+
message: assistantTurn,
|
|
813
|
+
toolCounts: {
|
|
814
|
+
turn: Object.freeze(turnCounts),
|
|
815
|
+
run: Object.freeze({ ...ctx.runToolCounts })
|
|
816
|
+
}
|
|
817
|
+
});
|
|
818
|
+
if (result.done) {
|
|
819
|
+
if (ctx.schema && !ctx.signal.aborted) {
|
|
820
|
+
const outputSpec = {
|
|
821
|
+
name: "__output__",
|
|
822
|
+
description: "Return the final structured output matching the required schema.",
|
|
823
|
+
inputSchema: ctx.schema
|
|
824
|
+
};
|
|
825
|
+
const schemaMessages = rewriteMessagesToWire(turnsToMessages(ctx.turns), ctx.aliasMaps);
|
|
826
|
+
let schemaResult;
|
|
827
|
+
try {
|
|
828
|
+
schemaResult = await ctx.provider.stream({
|
|
829
|
+
model: ctx.model,
|
|
830
|
+
system: ctx.system,
|
|
831
|
+
tools: ctx.provider.formatTools([outputSpec]),
|
|
832
|
+
messages: schemaMessages,
|
|
833
|
+
maxTokens: ctx.maxTokens ?? 16384,
|
|
834
|
+
signal: ctx.signal,
|
|
835
|
+
toolChoice: {
|
|
836
|
+
type: "tool",
|
|
837
|
+
name: "__output__"
|
|
838
|
+
}
|
|
839
|
+
}, {
|
|
840
|
+
onText: () => {},
|
|
841
|
+
onOAuthRefresh(refreshCtx) {
|
|
842
|
+
return ctx.hooks.callHook("oauth:refresh", refreshCtx);
|
|
843
|
+
}
|
|
844
|
+
});
|
|
845
|
+
} catch (err) {
|
|
846
|
+
throw wrapProviderError(err, ctx);
|
|
847
|
+
}
|
|
848
|
+
const output = schemaResult.toolCalls.find((tc) => tc.name === "__output__")?.input;
|
|
849
|
+
if (output) await ctx.hooks.callHook("output", {
|
|
850
|
+
output,
|
|
851
|
+
schema: ctx.schema
|
|
852
|
+
});
|
|
853
|
+
const schemaTurn = {
|
|
854
|
+
id: await ctx.generateTurnId(),
|
|
855
|
+
runId: ctx.runId,
|
|
856
|
+
role: "assistant",
|
|
857
|
+
content: schemaResult.assistantMessage.content,
|
|
858
|
+
usage: schemaResult.usage,
|
|
859
|
+
createdAt: Date.now()
|
|
860
|
+
};
|
|
861
|
+
ctx.turns.push(schemaTurn);
|
|
862
|
+
return {
|
|
863
|
+
ended: true,
|
|
864
|
+
turnId,
|
|
865
|
+
usage: {
|
|
866
|
+
input: result.usage.input + schemaResult.usage.input,
|
|
867
|
+
output: result.usage.output + schemaResult.usage.output
|
|
868
|
+
},
|
|
869
|
+
output
|
|
870
|
+
};
|
|
871
|
+
}
|
|
872
|
+
return {
|
|
873
|
+
ended: true,
|
|
874
|
+
turnId,
|
|
875
|
+
usage: result.usage
|
|
876
|
+
};
|
|
877
|
+
}
|
|
878
|
+
if (canonicalToolCalls.length === 0 && result.usage.finishReason === "pause") {
|
|
879
|
+
const continueMsg = ctx.provider.userMessage("Please continue.");
|
|
880
|
+
ctx.turns.push({
|
|
881
|
+
id: await ctx.generateTurnId(),
|
|
882
|
+
runId: ctx.runId,
|
|
883
|
+
role: continueMsg.role,
|
|
884
|
+
content: continueMsg.content,
|
|
885
|
+
createdAt: Date.now()
|
|
886
|
+
});
|
|
887
|
+
return {
|
|
888
|
+
ended: false,
|
|
889
|
+
turnId,
|
|
890
|
+
usage: result.usage
|
|
891
|
+
};
|
|
892
|
+
}
|
|
893
|
+
const toolResults = ctx.toolExecution === "parallel" ? await executeToolsParallel(ctx, canonicalToolCalls, turnId) : await executeToolsSequential(ctx, canonicalToolCalls, turnId);
|
|
894
|
+
const toolResultMsg = ctx.provider.toolResultsMessage(toolResults);
|
|
895
|
+
ctx.turns.push({
|
|
896
|
+
id: await ctx.generateTurnId(),
|
|
897
|
+
runId: ctx.runId,
|
|
898
|
+
role: toolResultMsg.role,
|
|
899
|
+
content: toolResultMsg.content,
|
|
900
|
+
createdAt: Date.now()
|
|
901
|
+
});
|
|
902
|
+
if (typeof ctx.toolOutputBudget === "number" && ctx.toolOutputBudget > 0) {
|
|
903
|
+
const totalBytes = toolResults.reduce((sum, r) => sum + toolOutputByteLength(r.content), 0);
|
|
904
|
+
if (totalBytes > ctx.toolOutputBudget) {
|
|
905
|
+
const warning = `[Tool output budget exceeded: ${totalBytes} bytes returned in this turn (cap: ${ctx.toolOutputBudget}). Summarize the salient findings before calling more tools.]`;
|
|
906
|
+
const userMsg = ctx.provider.userMessage(warning);
|
|
907
|
+
ctx.turns.push({
|
|
908
|
+
id: await ctx.generateTurnId(),
|
|
909
|
+
runId: ctx.runId,
|
|
910
|
+
role: userMsg.role,
|
|
911
|
+
content: userMsg.content,
|
|
912
|
+
createdAt: Date.now()
|
|
913
|
+
});
|
|
914
|
+
await ctx.hooks.callHook("budget:exceeded", {
|
|
915
|
+
turn,
|
|
916
|
+
turnId,
|
|
917
|
+
bytes: totalBytes,
|
|
918
|
+
budget: ctx.toolOutputBudget
|
|
919
|
+
});
|
|
920
|
+
}
|
|
921
|
+
}
|
|
922
|
+
return {
|
|
923
|
+
ended: false,
|
|
924
|
+
turnId,
|
|
925
|
+
usage: result.usage
|
|
926
|
+
};
|
|
927
|
+
}
|
|
928
|
+
/**
|
|
929
|
+
* Strip image blocks from a tool output when the provider is not vision-capable.
|
|
930
|
+
* Each image is replaced with a short text marker so the model sees an honest
|
|
931
|
+
* "no image" signal instead of JSON-stringified base64 it might confabulate over.
|
|
932
|
+
*
|
|
933
|
+
* Returns the output unchanged when:
|
|
934
|
+
* - The provider reports `capabilities.vision: true` (native routing handles it),
|
|
935
|
+
* - The provider omits `capabilities` entirely (we default to vision-capable
|
|
936
|
+
* so third-party providers without the field aren't penalized),
|
|
937
|
+
* - The output is a plain string (no image blocks to strip).
|
|
938
|
+
*
|
|
939
|
+
* With the current `text | image` union, replacing every image with a text marker
|
|
940
|
+
* leaves an all-text array — collapse to a plain string to keep the downstream wire
|
|
941
|
+
* shape as narrow as possible.
|
|
942
|
+
*/
|
|
943
|
+
function stripImagesForNonVision(provider, output) {
|
|
944
|
+
if (typeof output === "string") return output;
|
|
945
|
+
if (provider.meta.capabilities?.vision !== false) return output;
|
|
946
|
+
return output.map((b) => b.type === "image" ? IMAGE_OMITTED_MARKER : b.text).join("\n");
|
|
947
|
+
}
|
|
948
|
+
async function executeSingleTool(ctx, call, turnId) {
|
|
949
|
+
const toolDef = ctx.tools[call.name];
|
|
950
|
+
const callId = call.id;
|
|
951
|
+
const displayName = toWireName(call.name, ctx.aliasMaps);
|
|
952
|
+
const runToolCounts = Object.freeze({ ...ctx.runToolCounts });
|
|
953
|
+
const gateCtx = {
|
|
954
|
+
turnId,
|
|
955
|
+
callId,
|
|
956
|
+
name: call.name,
|
|
957
|
+
displayName,
|
|
958
|
+
input: call.input,
|
|
959
|
+
block: false,
|
|
960
|
+
reason: "Tool execution was blocked",
|
|
961
|
+
runToolCounts
|
|
962
|
+
};
|
|
963
|
+
await ctx.hooks.callHook("tool:gate", gateCtx);
|
|
964
|
+
if (gateCtx.block) return { result: {
|
|
965
|
+
id: callId,
|
|
966
|
+
content: `Blocked: ${gateCtx.reason}`
|
|
967
|
+
} };
|
|
968
|
+
ctx.runToolCounts[call.name] = (ctx.runToolCounts[call.name] ?? 0) + 1;
|
|
969
|
+
if (gateCtx.result !== void 0) return { result: {
|
|
970
|
+
id: callId,
|
|
971
|
+
content: await emitToolResult(ctx, {
|
|
972
|
+
turnId,
|
|
973
|
+
callId,
|
|
974
|
+
name: call.name,
|
|
975
|
+
displayName,
|
|
976
|
+
input: gateCtx.input,
|
|
977
|
+
output: gateCtx.result,
|
|
978
|
+
isError: false,
|
|
979
|
+
runToolCounts
|
|
980
|
+
})
|
|
981
|
+
} };
|
|
982
|
+
let effectiveInput = gateCtx.input;
|
|
983
|
+
if (!toolDef) {
|
|
984
|
+
const unknownCtx = {
|
|
985
|
+
turnId,
|
|
986
|
+
callId,
|
|
987
|
+
name: call.name,
|
|
988
|
+
displayName,
|
|
989
|
+
input: effectiveInput,
|
|
990
|
+
suppressError: false
|
|
991
|
+
};
|
|
992
|
+
await ctx.hooks.callHook("tool:unknown", unknownCtx);
|
|
993
|
+
const content = unknownCtx.result ?? `Tool error: Unknown tool: ${call.name}`;
|
|
994
|
+
if (!unknownCtx.suppressError) {
|
|
995
|
+
const err = /* @__PURE__ */ new Error(`Unknown tool: ${call.name}`);
|
|
996
|
+
await ctx.hooks.callHook("tool:error", {
|
|
997
|
+
turnId,
|
|
998
|
+
callId,
|
|
999
|
+
name: call.name,
|
|
1000
|
+
displayName,
|
|
1001
|
+
input: effectiveInput,
|
|
1002
|
+
error: err
|
|
1003
|
+
});
|
|
1004
|
+
}
|
|
1005
|
+
return { result: {
|
|
1006
|
+
id: callId,
|
|
1007
|
+
content
|
|
1008
|
+
} };
|
|
1009
|
+
}
|
|
1010
|
+
const validation = validateToolArgs(effectiveInput, toolDef.spec.inputSchema);
|
|
1011
|
+
if (!validation.valid) {
|
|
1012
|
+
await ctx.hooks.callHook("validation:reject", {
|
|
1013
|
+
turnId,
|
|
1014
|
+
callId,
|
|
1015
|
+
name: call.name,
|
|
1016
|
+
displayName,
|
|
1017
|
+
input: effectiveInput,
|
|
1018
|
+
reason: validation.error ?? "invalid input",
|
|
1019
|
+
schema: toolDef.spec.inputSchema
|
|
1020
|
+
});
|
|
1021
|
+
return { result: {
|
|
1022
|
+
id: callId,
|
|
1023
|
+
content: `Validation error: ${validation.error}`
|
|
1024
|
+
} };
|
|
1025
|
+
}
|
|
1026
|
+
effectiveInput = validation.coercedInput ?? effectiveInput;
|
|
1027
|
+
const coercions = validation.coercions && validation.coercions.length > 0 ? validation.coercions : void 0;
|
|
1028
|
+
if (coercions) await ctx.hooks.callHook("validation:coerce", {
|
|
1029
|
+
turnId,
|
|
1030
|
+
callId,
|
|
1031
|
+
name: call.name,
|
|
1032
|
+
displayName,
|
|
1033
|
+
input: effectiveInput,
|
|
1034
|
+
coercions,
|
|
1035
|
+
schema: toolDef.spec.inputSchema
|
|
1036
|
+
});
|
|
1037
|
+
await ctx.hooks.callHook("tool:before", {
|
|
1038
|
+
turnId,
|
|
1039
|
+
callId,
|
|
1040
|
+
name: call.name,
|
|
1041
|
+
displayName,
|
|
1042
|
+
input: effectiveInput,
|
|
1043
|
+
runToolCounts,
|
|
1044
|
+
...coercions ? { coercions } : {}
|
|
1045
|
+
});
|
|
1046
|
+
let output;
|
|
1047
|
+
let isError = false;
|
|
1048
|
+
try {
|
|
1049
|
+
const toolCtx = {
|
|
1050
|
+
provider: ctx.provider,
|
|
1051
|
+
signal: ctx.signal,
|
|
1052
|
+
execution: ctx.execution,
|
|
1053
|
+
handle: ctx.handle,
|
|
1054
|
+
hooks: ctx.hooks,
|
|
1055
|
+
tools: ctx.agentTools,
|
|
1056
|
+
...ctx.agentName !== void 0 ? { name: ctx.agentName } : {},
|
|
1057
|
+
...ctx.agentSystem !== void 0 ? { system: ctx.agentSystem } : {},
|
|
1058
|
+
...ctx.agentToolAliases !== void 0 ? { toolAliases: ctx.agentToolAliases } : {},
|
|
1059
|
+
...ctx.agentMcpServers !== void 0 ? { mcpServers: ctx.agentMcpServers } : {},
|
|
1060
|
+
...ctx.agentSkills !== void 0 ? { skills: ctx.agentSkills } : {},
|
|
1061
|
+
...ctx.agentBehavior !== void 0 ? { behavior: ctx.agentBehavior } : {},
|
|
1062
|
+
turnId,
|
|
1063
|
+
callId,
|
|
1064
|
+
runId: ctx.runId,
|
|
1065
|
+
...ctx.session ? { session: ctx.session } : {},
|
|
1066
|
+
...typeof ctx.depth === "number" ? { depth: ctx.depth } : {}
|
|
1067
|
+
};
|
|
1068
|
+
output = await toolDef.execute(effectiveInput, toolCtx);
|
|
1069
|
+
} catch (err) {
|
|
1070
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
1071
|
+
const errorCtx = {
|
|
1072
|
+
turnId,
|
|
1073
|
+
callId,
|
|
1074
|
+
name: call.name,
|
|
1075
|
+
displayName,
|
|
1076
|
+
input: effectiveInput,
|
|
1077
|
+
error
|
|
1078
|
+
};
|
|
1079
|
+
await ctx.hooks.callHook("tool:error", errorCtx);
|
|
1080
|
+
output = errorCtx.result ?? `Tool error: ${error.message}`;
|
|
1081
|
+
isError = true;
|
|
1082
|
+
}
|
|
1083
|
+
return { result: {
|
|
1084
|
+
id: callId,
|
|
1085
|
+
content: await emitToolResult(ctx, {
|
|
1086
|
+
turnId,
|
|
1087
|
+
callId,
|
|
1088
|
+
name: call.name,
|
|
1089
|
+
displayName,
|
|
1090
|
+
input: effectiveInput,
|
|
1091
|
+
output,
|
|
1092
|
+
isError,
|
|
1093
|
+
runToolCounts,
|
|
1094
|
+
...coercions ? { coercions } : {}
|
|
1095
|
+
})
|
|
1096
|
+
} };
|
|
1097
|
+
}
|
|
1098
|
+
/**
|
|
1099
|
+
* Shared post-output emission: fire `tool:transform` (mutate-allowed), strip
|
|
1100
|
+
* images for non-vision providers, fire `tool:after`. Used by both the
|
|
1101
|
+
* gate-substitute (Z20) and post-execute paths so they stay byte-for-byte
|
|
1102
|
+
* identical from the consumer's perspective.
|
|
1103
|
+
*/
|
|
1104
|
+
async function emitToolResult(ctx, params) {
|
|
1105
|
+
const { turnId, callId, name, displayName, input, runToolCounts, coercions } = params;
|
|
1106
|
+
let output = params.output;
|
|
1107
|
+
let isError = params.isError;
|
|
1108
|
+
const transformCtx = {
|
|
1109
|
+
turnId,
|
|
1110
|
+
callId,
|
|
1111
|
+
name,
|
|
1112
|
+
displayName,
|
|
1113
|
+
input,
|
|
1114
|
+
result: output,
|
|
1115
|
+
isError,
|
|
1116
|
+
outputBytes: toolOutputByteLength(output),
|
|
1117
|
+
...coercions ? { coercions } : {}
|
|
1118
|
+
};
|
|
1119
|
+
await ctx.hooks.callHook("tool:transform", transformCtx);
|
|
1120
|
+
output = transformCtx.result;
|
|
1121
|
+
isError = transformCtx.isError;
|
|
1122
|
+
output = stripImagesForNonVision(ctx.provider, output);
|
|
1123
|
+
await ctx.hooks.callHook("tool:after", {
|
|
1124
|
+
turnId,
|
|
1125
|
+
callId,
|
|
1126
|
+
name,
|
|
1127
|
+
displayName,
|
|
1128
|
+
input,
|
|
1129
|
+
result: output,
|
|
1130
|
+
outputBytes: toolOutputByteLength(output),
|
|
1131
|
+
runToolCounts,
|
|
1132
|
+
...coercions ? { coercions } : {}
|
|
1133
|
+
});
|
|
1134
|
+
return output;
|
|
1135
|
+
}
|
|
1136
|
+
async function executeToolsSequential(ctx, toolCalls, turnId) {
|
|
1137
|
+
const results = [];
|
|
1138
|
+
for (const call of toolCalls) {
|
|
1139
|
+
if (ctx.signal.aborted) break;
|
|
1140
|
+
if (ctx.steeringQueue.length > 0) {
|
|
1141
|
+
const fromIdx = toolCalls.indexOf(call);
|
|
1142
|
+
for (let i = fromIdx; i < toolCalls.length; i++) results.push({
|
|
1143
|
+
id: toolCalls[i].id,
|
|
1144
|
+
content: "Skipped: steering message received"
|
|
1145
|
+
});
|
|
1146
|
+
return results;
|
|
1147
|
+
}
|
|
1148
|
+
const { result } = await executeSingleTool(ctx, call, turnId);
|
|
1149
|
+
results.push(result);
|
|
1150
|
+
}
|
|
1151
|
+
return results;
|
|
1152
|
+
}
|
|
1153
|
+
async function executeToolsParallel(ctx, toolCalls, turnId) {
|
|
1154
|
+
const executions = toolCalls.map((call) => executeSingleTool(ctx, call, turnId));
|
|
1155
|
+
return (await Promise.allSettled(executions)).map((s, i) => {
|
|
1156
|
+
if (s.status === "fulfilled") return s.value.result;
|
|
1157
|
+
return {
|
|
1158
|
+
id: toolCalls[i].id,
|
|
1159
|
+
content: `Error: ${s.reason instanceof Error ? s.reason.message : String(s.reason)}`
|
|
1160
|
+
};
|
|
1161
|
+
});
|
|
1162
|
+
}
|
|
1163
|
+
//#endregion
|
|
1164
|
+
//#region src/prompt.ts
|
|
1165
|
+
/**
|
|
1166
|
+
* Coerce the run-level prompt into a `PromptPart[]`.
|
|
1167
|
+
*
|
|
1168
|
+
* - `string` prompt → a single `text` part. Empty string returns `undefined`
|
|
1169
|
+
* so callers skip pushing an empty user turn.
|
|
1170
|
+
* - `PromptPart[]` prompt → validated and returned as-is. An empty array, or
|
|
1171
|
+
* an array whose text parts are all empty with no image/document parts,
|
|
1172
|
+
* returns `undefined`.
|
|
1173
|
+
* - `undefined` → `undefined` (promptless resume path).
|
|
1174
|
+
*/
|
|
1175
|
+
function canonicalizePrompt(prompt) {
|
|
1176
|
+
if (prompt === void 0) return void 0;
|
|
1177
|
+
if (typeof prompt === "string") {
|
|
1178
|
+
if (prompt.length === 0) return void 0;
|
|
1179
|
+
return [{
|
|
1180
|
+
type: "text",
|
|
1181
|
+
text: prompt
|
|
1182
|
+
}];
|
|
1183
|
+
}
|
|
1184
|
+
if (prompt.length === 0) return void 0;
|
|
1185
|
+
for (const part of prompt) {
|
|
1186
|
+
if (!part || typeof part !== "object" || typeof part.type !== "string") throw new Error("Invalid PromptPart: each part must be an object with a `type` field.");
|
|
1187
|
+
const type = part.type;
|
|
1188
|
+
if (type !== "text" && type !== "image" && type !== "document") throw new Error(`Invalid PromptPart type "${type}". Expected "text" | "image" | "document".`);
|
|
1189
|
+
}
|
|
1190
|
+
if (!prompt.some((part) => part.type === "text" && part.text.length > 0 || part.type === "image" || part.type === "document")) return void 0;
|
|
1191
|
+
return prompt;
|
|
1192
|
+
}
|
|
1193
|
+
/**
|
|
1194
|
+
* Build a user `SessionMessage` from prompt parts without provider-specific handling.
|
|
1195
|
+
*
|
|
1196
|
+
* - `text` parts map to `{ type: 'text', text }` blocks.
|
|
1197
|
+
* - `image` parts map to `{ type: 'image', mediaType, data }` blocks.
|
|
1198
|
+
* - `document` parts with `encoding: 'text'` are inlined as an attachment-tagged
|
|
1199
|
+
* text block so every provider can read them.
|
|
1200
|
+
* - `document` parts with `encoding: 'base64'` throw — the caller should switch
|
|
1201
|
+
* to a provider that implements `promptMessage` (e.g. Anthropic for PDFs).
|
|
1202
|
+
*/
|
|
1203
|
+
function defaultPromptMessage(parts) {
|
|
1204
|
+
const content = [];
|
|
1205
|
+
for (const part of parts) {
|
|
1206
|
+
if (part.type === "text") {
|
|
1207
|
+
if (part.text.length > 0) content.push({
|
|
1208
|
+
type: "text",
|
|
1209
|
+
text: part.text
|
|
1210
|
+
});
|
|
1211
|
+
continue;
|
|
1212
|
+
}
|
|
1213
|
+
if (part.type === "image") {
|
|
1214
|
+
content.push({
|
|
1215
|
+
type: "image",
|
|
1216
|
+
mediaType: part.mediaType,
|
|
1217
|
+
data: part.data
|
|
1218
|
+
});
|
|
1219
|
+
continue;
|
|
1220
|
+
}
|
|
1221
|
+
if (part.encoding === "text") {
|
|
1222
|
+
const header = part.name ? `<attachment name="${part.name}" media_type="${part.mediaType}">` : `<attachment media_type="${part.mediaType}">`;
|
|
1223
|
+
content.push({
|
|
1224
|
+
type: "text",
|
|
1225
|
+
text: `${header}\n${part.data}\n</attachment>`
|
|
1226
|
+
});
|
|
1227
|
+
continue;
|
|
1228
|
+
}
|
|
1229
|
+
throw new Error(`Provider does not support base64 document parts (mediaType: ${part.mediaType}). Use a text-encoded document or a provider that implements promptMessage (e.g. Anthropic).`);
|
|
1230
|
+
}
|
|
1231
|
+
return {
|
|
1232
|
+
role: "user",
|
|
1233
|
+
content
|
|
1234
|
+
};
|
|
1235
|
+
}
|
|
1236
|
+
/**
|
|
1237
|
+
* Build the prompt `SessionMessage` for a given provider.
|
|
1238
|
+
*
|
|
1239
|
+
* Prefers `provider.promptMessage` when defined, falling back to `defaultPromptMessage`.
|
|
1240
|
+
*/
|
|
1241
|
+
function buildPromptMessage(provider, parts) {
|
|
1242
|
+
if (provider.promptMessage) return provider.promptMessage(parts);
|
|
1243
|
+
return defaultPromptMessage(parts);
|
|
1244
|
+
}
|
|
1245
|
+
//#endregion
|
|
1246
|
+
//#region src/tool-budgets.ts
|
|
1247
|
+
/**
|
|
1248
|
+
* Install the per-tool soft-budget middleware on a hook bus.
|
|
1249
|
+
* `getToolBudgets` returns the resolved per-tool budgets (run override merged
|
|
1250
|
+
* with agent defaults). `enqueueSteer` pushes a synthetic user message into
|
|
1251
|
+
* the run's steeringQueue so the loop drains it between turns.
|
|
1252
|
+
*
|
|
1253
|
+
* The middleware maintains its OWN approval counter (`approvedCounts`),
|
|
1254
|
+
* incremented at gate-time — independent of the loop's `runToolCounts`,
|
|
1255
|
+
* which is incremented after gate completes. This gives atomic per-call
|
|
1256
|
+
* reservation in parallel batches: when a batch of N calls all fire
|
|
1257
|
+
* `tool:gate` before any increments propagate, each gate handler still
|
|
1258
|
+
* sees the prior approvals and refuses past `max`.
|
|
1259
|
+
*
|
|
1260
|
+
* Returns an `uninstall` fn.
|
|
1261
|
+
*/
|
|
1262
|
+
function installToolBudgetsGate(hooks, getToolBudgets, enqueueSteer) {
|
|
1263
|
+
const steeredOnce = /* @__PURE__ */ new Set();
|
|
1264
|
+
const approvedCounts = {};
|
|
1265
|
+
async function gateHandler(ctx) {
|
|
1266
|
+
if (ctx.block || ctx.result !== void 0) return;
|
|
1267
|
+
const budget = getToolBudgets()?.[ctx.name];
|
|
1268
|
+
if (!budget) return;
|
|
1269
|
+
const max = budget.max;
|
|
1270
|
+
if (typeof max !== "number" || max <= 0) return;
|
|
1271
|
+
const count = approvedCounts[ctx.name] ?? 0;
|
|
1272
|
+
if (count < max) {
|
|
1273
|
+
approvedCounts[ctx.name] = count + 1;
|
|
1274
|
+
return;
|
|
1275
|
+
}
|
|
1276
|
+
const onExceed = budget.onExceed ?? "steer";
|
|
1277
|
+
let mode;
|
|
1278
|
+
let message;
|
|
1279
|
+
if (typeof onExceed === "function") try {
|
|
1280
|
+
const out = onExceed({
|
|
1281
|
+
tool: ctx.name,
|
|
1282
|
+
count,
|
|
1283
|
+
max
|
|
1284
|
+
});
|
|
1285
|
+
mode = out.mode;
|
|
1286
|
+
message = out.message;
|
|
1287
|
+
} catch {
|
|
1288
|
+
mode = "steer";
|
|
1289
|
+
message = defaultSteerMessage(ctx.name, count, max);
|
|
1290
|
+
}
|
|
1291
|
+
else if (onExceed === "block") {
|
|
1292
|
+
mode = "block";
|
|
1293
|
+
message = defaultBlockMessage(ctx.name, max);
|
|
1294
|
+
} else {
|
|
1295
|
+
mode = "steer";
|
|
1296
|
+
message = defaultSteerMessage(ctx.name, count, max);
|
|
1297
|
+
}
|
|
1298
|
+
if (mode === "block") {
|
|
1299
|
+
ctx.block = true;
|
|
1300
|
+
ctx.reason = message;
|
|
1301
|
+
await hooks.callHook("tool-budget:exceeded", {
|
|
1302
|
+
tool: ctx.name,
|
|
1303
|
+
count,
|
|
1304
|
+
max,
|
|
1305
|
+
turnId: ctx.turnId,
|
|
1306
|
+
mode: "block"
|
|
1307
|
+
});
|
|
1308
|
+
return;
|
|
1309
|
+
}
|
|
1310
|
+
if (!steeredOnce.has(ctx.name)) {
|
|
1311
|
+
steeredOnce.add(ctx.name);
|
|
1312
|
+
enqueueSteer(message);
|
|
1313
|
+
await hooks.callHook("tool-budget:exceeded", {
|
|
1314
|
+
tool: ctx.name,
|
|
1315
|
+
count,
|
|
1316
|
+
max,
|
|
1317
|
+
turnId: ctx.turnId,
|
|
1318
|
+
mode: "steer"
|
|
1319
|
+
});
|
|
1320
|
+
}
|
|
1321
|
+
}
|
|
1322
|
+
const unregister = hooks.hook("tool:gate", gateHandler);
|
|
1323
|
+
return function uninstall() {
|
|
1324
|
+
unregister();
|
|
1325
|
+
steeredOnce.clear();
|
|
1326
|
+
};
|
|
1327
|
+
}
|
|
1328
|
+
function defaultSteerMessage(tool, count, max) {
|
|
1329
|
+
return `[Tool budget reached: '${tool}' has been called ${count} times this run (cap: ${max}). Avoid calling it again unless strictly necessary; commit to a result and move on.]`;
|
|
1330
|
+
}
|
|
1331
|
+
function defaultBlockMessage(tool, max) {
|
|
1332
|
+
return `Tool '${tool}' has reached its per-run budget of ${max} calls; further invocations are refused.`;
|
|
1333
|
+
}
|
|
1334
|
+
//#endregion
|
|
1335
|
+
//#region src/tools/binary-detect.ts
|
|
1336
|
+
/**
|
|
1337
|
+
* Heuristics for detecting binary content in UTF-8-decoded strings.
|
|
1338
|
+
*
|
|
1339
|
+
* `ExecutionContext.readFile` always returns text. Invalid bytes survive
|
|
1340
|
+
* decoding as U+FFFD (replacement char), and genuinely binary files often
|
|
1341
|
+
* contain NUL bytes — UTF-8 text legitimately should not. These helpers
|
|
1342
|
+
* give tools a cheap way to bail before drowning the model in mojibake.
|
|
1343
|
+
*/
|
|
1344
|
+
const SNIFF_BYTES = 8192;
|
|
1345
|
+
const REPLACEMENT_RATIO_THRESHOLD = .01;
|
|
1346
|
+
const REPLACEMENT_MIN_COUNT = 5;
|
|
1347
|
+
/**
|
|
1348
|
+
* True if a NUL (`\x00`) appears in the leading sample. Cheap, no false
|
|
1349
|
+
* positives on real text — UTF-8 never embeds NUL, so any NUL means the
|
|
1350
|
+
* source bytes were binary.
|
|
1351
|
+
*/
|
|
1352
|
+
function containsNullByte(text, sniffBytes = SNIFF_BYTES) {
|
|
1353
|
+
const sample = text.length > sniffBytes ? text.slice(0, sniffBytes) : text;
|
|
1354
|
+
for (let i = 0; i < sample.length; i++) if (sample.charCodeAt(i) === 0) return true;
|
|
1355
|
+
return false;
|
|
1356
|
+
}
|
|
1357
|
+
/**
|
|
1358
|
+
* Heavier check used by `read_file`:
|
|
1359
|
+
* - NUL byte ⇒ binary,
|
|
1360
|
+
* - or U+FFFD count ≥ minimum AND ratio over threshold ⇒ binary.
|
|
1361
|
+
*
|
|
1362
|
+
* The replacement-char ratio + minimum guards against tripping on a
|
|
1363
|
+
* one-or-two-stray-replacement-char text file (config dumps, editor logs).
|
|
1364
|
+
*/
|
|
1365
|
+
function looksBinary(text, sniffBytes = SNIFF_BYTES) {
|
|
1366
|
+
const sample = text.length > sniffBytes ? text.slice(0, sniffBytes) : text;
|
|
1367
|
+
if (sample.length === 0) return false;
|
|
1368
|
+
let replacementCount = 0;
|
|
1369
|
+
for (let i = 0; i < sample.length; i++) {
|
|
1370
|
+
const code = sample.charCodeAt(i);
|
|
1371
|
+
if (code === 0) return true;
|
|
1372
|
+
if (code === 65533) replacementCount++;
|
|
1373
|
+
}
|
|
1374
|
+
return replacementCount >= REPLACEMENT_MIN_COUNT && replacementCount / sample.length > REPLACEMENT_RATIO_THRESHOLD;
|
|
1375
|
+
}
|
|
1376
|
+
//#endregion
|
|
1377
|
+
//#region src/tools/skills-read.ts
|
|
1378
|
+
function createSkillsReadTool(options) {
|
|
1379
|
+
const byName = new Map(options.catalog.map((s) => [s.name, s]));
|
|
1380
|
+
return {
|
|
1381
|
+
spec: {
|
|
1382
|
+
name: "skills_read",
|
|
1383
|
+
description: "Read a bundled resource file from an active skill. The skill must have been activated via skills_use first. Path is relative to the skill's directory (e.g. \"references/REFERENCE.md\", \"assets/template.txt\").",
|
|
1384
|
+
inputSchema: {
|
|
1385
|
+
type: "object",
|
|
1386
|
+
properties: {
|
|
1387
|
+
name: {
|
|
1388
|
+
type: "string",
|
|
1389
|
+
enum: options.catalog.map((s) => s.name),
|
|
1390
|
+
description: "The name of the active skill."
|
|
1391
|
+
},
|
|
1392
|
+
path: {
|
|
1393
|
+
type: "string",
|
|
1394
|
+
description: "Path to the resource, relative to the skill root. Cannot escape the skill directory."
|
|
1395
|
+
}
|
|
1396
|
+
},
|
|
1397
|
+
required: ["name", "path"],
|
|
1398
|
+
additionalProperties: false
|
|
1399
|
+
}
|
|
1400
|
+
},
|
|
1401
|
+
async execute(input, ctx) {
|
|
1402
|
+
const skillName = input.name;
|
|
1403
|
+
const relPath = input.path;
|
|
1404
|
+
const skill = byName.get(skillName);
|
|
1405
|
+
if (!skill) return `Error: unknown skill "${skillName}".`;
|
|
1406
|
+
if (!options.state.isActive(skillName)) return `Error: skill "${skillName}" is not active. Call skills_use with name: "${skillName}" first.`;
|
|
1407
|
+
if (!skill.baseDir) return `Error: skill "${skillName}" has no base directory (likely an inline skill without bundled resources); cannot read files.`;
|
|
1408
|
+
const validated = validateResourcePath(relPath, skill.baseDir);
|
|
1409
|
+
if (!validated.valid) return `Error: ${validated.error}`;
|
|
1410
|
+
let content;
|
|
1411
|
+
try {
|
|
1412
|
+
content = await ctx.execution.readFile(ctx.handle, validated.absolutePath);
|
|
1413
|
+
} catch (err) {
|
|
1414
|
+
return `Error reading "${relPath}" in skill "${skillName}": ${err instanceof Error ? err.message : String(err)}`;
|
|
1415
|
+
}
|
|
1416
|
+
if (containsNullByte(content)) return JSON.stringify({
|
|
1417
|
+
kind: "binary-unsupported",
|
|
1418
|
+
path: validated.absolutePath,
|
|
1419
|
+
note: "This file appears to be binary. The skills_read tool returns text only; binary files are not delivered through the execution context's text-based readFile API."
|
|
1420
|
+
});
|
|
1421
|
+
return content;
|
|
1422
|
+
}
|
|
1423
|
+
};
|
|
1424
|
+
}
|
|
1425
|
+
//#endregion
|
|
1426
|
+
//#region src/tools/shell-quote.ts
|
|
1427
|
+
/**
|
|
1428
|
+
* Shared shell-argument quoter for tool implementations.
|
|
1429
|
+
*
|
|
1430
|
+
* Single source of truth so `grep`, `binary-read`, and `skills-run-script`
|
|
1431
|
+
* don't drift on the POSIX `'\''` escape pattern.
|
|
1432
|
+
*/
|
|
1433
|
+
const SAFE_TOKEN_RE = /^[\w@%+=:,./-]+$/;
|
|
1434
|
+
const SINGLE_QUOTE_RE = /'/g;
|
|
1435
|
+
/**
|
|
1436
|
+
* Wrap an argument in single quotes, escaping embedded single quotes via the
|
|
1437
|
+
* standard POSIX `'\''` close-escape-reopen trick. Tokens that are already
|
|
1438
|
+
* shell-safe pass through unchanged so command lines stay readable in logs.
|
|
1439
|
+
*
|
|
1440
|
+
* NOT a sandbox — only safe when the caller controls the surrounding shell
|
|
1441
|
+
* context. Use it for arguments only, never for the verb / subcommand.
|
|
1442
|
+
*/
|
|
1443
|
+
function shellQuote(arg) {
|
|
1444
|
+
if (SAFE_TOKEN_RE.test(arg)) return arg;
|
|
1445
|
+
return `'${arg.replace(SINGLE_QUOTE_RE, "'\\''")}'`;
|
|
1446
|
+
}
|
|
1447
|
+
/**
|
|
1448
|
+
* Variant that always quotes — useful when the caller doesn't want a
|
|
1449
|
+
* conditional `unquoted-when-safe` branch (consistent log shape, paranoid
|
|
1450
|
+
* inputs that contain whitespace by construction).
|
|
1451
|
+
*/
|
|
1452
|
+
function alwaysQuote(arg) {
|
|
1453
|
+
return `'${arg.replace(SINGLE_QUOTE_RE, "'\\''")}'`;
|
|
1454
|
+
}
|
|
1455
|
+
//#endregion
|
|
1456
|
+
//#region src/tools/skills-run-script.ts
|
|
1457
|
+
const ABS_WINDOWS_RE = /^[a-z]:[\\/]/i;
|
|
1458
|
+
const COLLAPSE_SLASHES_RE = /\/+/g;
|
|
1459
|
+
function createSkillsRunScriptTool(options) {
|
|
1460
|
+
const byName = new Map(options.catalog.map((s) => [s.name, s]));
|
|
1461
|
+
const timeoutMs = options.scriptTimeoutMs ?? 6e4;
|
|
1462
|
+
return {
|
|
1463
|
+
spec: {
|
|
1464
|
+
name: "skills_run_script",
|
|
1465
|
+
description: "Execute a script bundled with an active skill (from its scripts/ directory). The skill must have been activated via skills_use first. Returns stdout, stderr, and the exit code. Honors the script's shebang.",
|
|
1466
|
+
inputSchema: {
|
|
1467
|
+
type: "object",
|
|
1468
|
+
properties: {
|
|
1469
|
+
name: {
|
|
1470
|
+
type: "string",
|
|
1471
|
+
enum: options.catalog.map((s) => s.name),
|
|
1472
|
+
description: "The name of the active skill."
|
|
1473
|
+
},
|
|
1474
|
+
script: {
|
|
1475
|
+
type: "string",
|
|
1476
|
+
description: "Path to the script relative to the skill's scripts/ directory (e.g. \"extract.py\", \"merge.sh\")."
|
|
1477
|
+
},
|
|
1478
|
+
args: {
|
|
1479
|
+
type: "array",
|
|
1480
|
+
items: { type: "string" },
|
|
1481
|
+
description: "Optional argv array passed to the script."
|
|
1482
|
+
}
|
|
1483
|
+
},
|
|
1484
|
+
required: ["name", "script"],
|
|
1485
|
+
additionalProperties: false
|
|
1486
|
+
}
|
|
1487
|
+
},
|
|
1488
|
+
async execute(input, ctx) {
|
|
1489
|
+
const skillName = input.name;
|
|
1490
|
+
const scriptRel = input.script;
|
|
1491
|
+
const args = input.args ?? [];
|
|
1492
|
+
const skill = byName.get(skillName);
|
|
1493
|
+
if (!skill) return `Error: unknown skill "${skillName}".`;
|
|
1494
|
+
if (!options.state.isActive(skillName)) return `Error: skill "${skillName}" is not active. Call skills_use with name: "${skillName}" first.`;
|
|
1495
|
+
if (!skill.baseDir) return `Error: skill "${skillName}" has no base directory (likely an inline skill); cannot run scripts.`;
|
|
1496
|
+
if (scriptRel.startsWith("/") || ABS_WINDOWS_RE.test(scriptRel)) return `Error: Absolute paths are not allowed ("${scriptRel}").`;
|
|
1497
|
+
const validated = validateResourcePath(`scripts/${scriptRel}`.replace(COLLAPSE_SLASHES_RE, "/"), skill.baseDir);
|
|
1498
|
+
if (!validated.valid) return `Error: ${validated.error}`;
|
|
1499
|
+
const cmd = [validated.absolutePath, ...args].map(alwaysQuote).join(" ");
|
|
1500
|
+
try {
|
|
1501
|
+
const result = await ctx.execution.exec(ctx.handle, cmd, { timeout: Math.max(1, Math.round(timeoutMs / 1e3)) });
|
|
1502
|
+
return JSON.stringify({
|
|
1503
|
+
exitCode: result.exitCode,
|
|
1504
|
+
stdout: result.stdout,
|
|
1505
|
+
stderr: result.stderr
|
|
1506
|
+
});
|
|
1507
|
+
} catch (err) {
|
|
1508
|
+
return `Error running script "${scriptRel}" for skill "${skillName}": ${err instanceof Error ? err.message : String(err)}`;
|
|
1509
|
+
}
|
|
1510
|
+
}
|
|
1511
|
+
};
|
|
1512
|
+
}
|
|
1513
|
+
//#endregion
|
|
1514
|
+
//#region src/tools/skills-use.ts
|
|
1515
|
+
const MAX_RESOURCE_LIST = 50;
|
|
1516
|
+
function buildSkillContentWrapper(skill, body) {
|
|
1517
|
+
const parts = [];
|
|
1518
|
+
parts.push(`<skill_content name="${escapeXml(skill.name)}" spec_version="0.1">`);
|
|
1519
|
+
parts.push(body);
|
|
1520
|
+
if (skill.baseDir) {
|
|
1521
|
+
parts.push("");
|
|
1522
|
+
parts.push(`Skill directory: ${skill.baseDir}`);
|
|
1523
|
+
parts.push("Relative paths resolve against this directory.");
|
|
1524
|
+
}
|
|
1525
|
+
if (skill.resources?.length) {
|
|
1526
|
+
parts.push("");
|
|
1527
|
+
parts.push("<skill_resources>");
|
|
1528
|
+
const shown = skill.resources.slice(0, MAX_RESOURCE_LIST);
|
|
1529
|
+
for (const res of shown) parts.push(` <file type="${res.type}">${escapeXml(res.path)}</file>`);
|
|
1530
|
+
if (skill.resources.length > MAX_RESOURCE_LIST) parts.push(` <!-- …(${skill.resources.length - MAX_RESOURCE_LIST} more) -->`);
|
|
1531
|
+
parts.push("</skill_resources>");
|
|
1532
|
+
}
|
|
1533
|
+
if (skill.compatibility) {
|
|
1534
|
+
parts.push("");
|
|
1535
|
+
parts.push(`Compatibility: ${skill.compatibility}`);
|
|
1536
|
+
}
|
|
1537
|
+
if (skill.allowedTools?.length) parts.push(`Allowed tools: ${skill.allowedTools.join(" ")}`);
|
|
1538
|
+
parts.push("</skill_content>");
|
|
1539
|
+
return parts.join("\n");
|
|
1540
|
+
}
|
|
1541
|
+
/**
|
|
1542
|
+
* Factory for `skills_use`. Auto-injected into the agent's tool set by the
|
|
1543
|
+
* agent runtime when a non-empty skills catalog is available (unless
|
|
1544
|
+
* `SkillsConfig.tool === false`).
|
|
1545
|
+
*
|
|
1546
|
+
* The tool schema's `name` property is `enum`-constrained to the resolved
|
|
1547
|
+
* catalog so the LLM cannot hallucinate a skill that doesn't exist.
|
|
1548
|
+
*/
|
|
1549
|
+
function createSkillsUseTool(options) {
|
|
1550
|
+
const byName = new Map(options.catalog.map((s) => [s.name, s]));
|
|
1551
|
+
const interpolatedBodyCache = /* @__PURE__ */ new Map();
|
|
1552
|
+
return {
|
|
1553
|
+
spec: {
|
|
1554
|
+
name: "skills_use",
|
|
1555
|
+
description: "Activate a specialized skill and load its full instructions. Call this when a task matches a skill's description from the catalog. After calling, follow the returned instructions; use skills_read to load referenced files and skills_run_script to execute bundled scripts.",
|
|
1556
|
+
inputSchema: {
|
|
1557
|
+
type: "object",
|
|
1558
|
+
properties: { name: {
|
|
1559
|
+
type: "string",
|
|
1560
|
+
enum: options.catalog.map((s) => s.name),
|
|
1561
|
+
description: "The name of the skill to activate (must be in the available skills catalog)."
|
|
1562
|
+
} },
|
|
1563
|
+
required: ["name"],
|
|
1564
|
+
additionalProperties: false
|
|
1565
|
+
}
|
|
1566
|
+
},
|
|
1567
|
+
async execute(input, ctx) {
|
|
1568
|
+
const skillName = input.name;
|
|
1569
|
+
const skill = byName.get(skillName);
|
|
1570
|
+
if (!skill) return `Error: unknown skill "${skillName}". Available skills: ${[...byName.keys()].join(", ") || "<none>"}.`;
|
|
1571
|
+
if (!options.state.isActive(skillName)) {
|
|
1572
|
+
if (options.state.activate(skill, "model") === "cap-reached") return `Error: cannot activate "${skillName}" — the maxActive skill cap has been reached. Currently active: ${options.state.active().map((a) => a.skill.name).join(", ")}. Deactivate an existing skill first.`;
|
|
1573
|
+
await options.hooks.callHook("skills:activate", {
|
|
1574
|
+
skill,
|
|
1575
|
+
via: "model"
|
|
1576
|
+
});
|
|
1577
|
+
}
|
|
1578
|
+
let body = interpolatedBodyCache.get(skillName);
|
|
1579
|
+
if (body === void 0) {
|
|
1580
|
+
body = skill.instructions.includes("!`") ? await interpolateShellCommands(skill.instructions, ctx.execution, ctx.handle) : skill.instructions;
|
|
1581
|
+
interpolatedBodyCache.set(skillName, body);
|
|
1582
|
+
}
|
|
1583
|
+
return buildSkillContentWrapper(skill, body);
|
|
1584
|
+
}
|
|
1585
|
+
};
|
|
1586
|
+
}
|
|
1587
|
+
//#endregion
|
|
1588
|
+
//#region src/tools/tool-search.ts
|
|
1589
|
+
const DEFAULT_LIMIT$1 = 20;
|
|
1590
|
+
function rankByQuery(catalog, query) {
|
|
1591
|
+
const q = query.trim().toLowerCase();
|
|
1592
|
+
if (!q) return [...catalog];
|
|
1593
|
+
const nameHits = [];
|
|
1594
|
+
const descHits = [];
|
|
1595
|
+
for (const entry of catalog) if (entry.name.toLowerCase().includes(q)) nameHits.push(entry);
|
|
1596
|
+
else if (entry.description.toLowerCase().includes(q)) descHits.push(entry);
|
|
1597
|
+
return [...nameHits, ...descHits];
|
|
1598
|
+
}
|
|
1599
|
+
/**
|
|
1600
|
+
* Sanitise a JSON-stringified schema for embedding inside a pseudo-XML tag.
|
|
1601
|
+
*
|
|
1602
|
+
* The schema is inlined verbatim (NOT XML-escaped) so the model can reuse
|
|
1603
|
+
* field types directly when constructing arguments. The only transformation
|
|
1604
|
+
* we apply is replacing `<` with `\u003c` inside string literals — this
|
|
1605
|
+
* keeps the JSON byte-equivalent for the model (JSON parsers decode the
|
|
1606
|
+
* escape) while preventing a hostile or buggy `inputSchema` from injecting
|
|
1607
|
+
* apparent XML tags (`</input_schema>`, `<evil>…`) that would muddle the
|
|
1608
|
+
* model's read of the surrounding result envelope.
|
|
1609
|
+
*
|
|
1610
|
+
* The substitution is safe because `<` is only meaningful when it appears
|
|
1611
|
+
* literally; the `\u003c` form is not a tag character and is identical to
|
|
1612
|
+
* `<` after JSON parsing.
|
|
1613
|
+
*/
|
|
1614
|
+
function sanitiseSchemaForXml(schemaJson) {
|
|
1615
|
+
return schemaJson.replace(/</g, "\\u003c");
|
|
1616
|
+
}
|
|
1617
|
+
function formatMatch(entry) {
|
|
1618
|
+
const schema = sanitiseSchemaForXml(JSON.stringify(entry.inputSchema));
|
|
1619
|
+
const serverAttr = entry.server ? ` server="${escapeXml(entry.server)}"` : "";
|
|
1620
|
+
return [
|
|
1621
|
+
` <tool name="${escapeXml(entry.name)}"${serverAttr}>`,
|
|
1622
|
+
` <description>${escapeXml(entry.description)}</description>`,
|
|
1623
|
+
` <input_schema>${schema}</input_schema>`,
|
|
1624
|
+
` </tool>`
|
|
1625
|
+
].join("\n");
|
|
1626
|
+
}
|
|
1627
|
+
/**
|
|
1628
|
+
* Factory for `tool_search`. Auto-injected by the agent when
|
|
1629
|
+
* `behavior.toolDisclosure === 'lazy'` and at least one MCP tool is in the
|
|
1630
|
+
* registry. Opt out via `behavior.toolSearch.tool === false`.
|
|
1631
|
+
*/
|
|
1632
|
+
function createToolSearchTool(options) {
|
|
1633
|
+
const defaultLimit = options.defaultLimit ?? DEFAULT_LIMIT$1;
|
|
1634
|
+
const byName = new Map(options.catalog.map((e) => [e.name, e]));
|
|
1635
|
+
const byServer = /* @__PURE__ */ new Map();
|
|
1636
|
+
for (const entry of options.catalog) {
|
|
1637
|
+
if (!entry.server) continue;
|
|
1638
|
+
const list = byServer.get(entry.server) ?? [];
|
|
1639
|
+
list.push(entry);
|
|
1640
|
+
byServer.set(entry.server, list);
|
|
1641
|
+
}
|
|
1642
|
+
const maxLimit = Math.max(options.catalog.length, 1);
|
|
1643
|
+
return {
|
|
1644
|
+
spec: {
|
|
1645
|
+
name: "tool_search",
|
|
1646
|
+
description: "Discover and load schemas for additional tools listed in <searchable_tools>. Tools listed there are advertised by name + description only — their input schemas are not loaded into context until you surface them through this tool. Pass `query` for a substring search, `names` to load specific tools, or `server` to load every tool from one MCP server. Returned tools become callable for the rest of this run.",
|
|
1647
|
+
inputSchema: {
|
|
1648
|
+
type: "object",
|
|
1649
|
+
properties: {
|
|
1650
|
+
query: {
|
|
1651
|
+
type: "string",
|
|
1652
|
+
description: "Substring to match against tool name + description (case-insensitive)."
|
|
1653
|
+
},
|
|
1654
|
+
names: {
|
|
1655
|
+
type: "array",
|
|
1656
|
+
items: { type: "string" },
|
|
1657
|
+
description: "Explicit tool names to load (bypasses ranking). Use the names shown in <searchable_tools>."
|
|
1658
|
+
},
|
|
1659
|
+
server: {
|
|
1660
|
+
type: "string",
|
|
1661
|
+
description: "MCP server name — load every tool from this server."
|
|
1662
|
+
},
|
|
1663
|
+
limit: {
|
|
1664
|
+
type: "integer",
|
|
1665
|
+
minimum: 1,
|
|
1666
|
+
description: `Cap on returned matches. Default: ${defaultLimit}.`
|
|
1667
|
+
}
|
|
1668
|
+
},
|
|
1669
|
+
additionalProperties: false
|
|
1670
|
+
}
|
|
1671
|
+
},
|
|
1672
|
+
async execute(input, ctx) {
|
|
1673
|
+
if (ctx.signal?.aborted) return "<tool_search_results matches=\"0\" aborted=\"true\">Run aborted.</tool_search_results>";
|
|
1674
|
+
const query = (typeof input.query === "string" ? input.query.trim() : void 0) || void 0;
|
|
1675
|
+
const namesIn = Array.isArray(input.names) ? input.names.filter((n) => typeof n === "string" && n.length > 0) : void 0;
|
|
1676
|
+
const server = typeof input.server === "string" && input.server.length > 0 ? input.server : void 0;
|
|
1677
|
+
const limitIn = typeof input.limit === "number" && Number.isFinite(input.limit) && input.limit > 0 ? Math.floor(input.limit) : defaultLimit;
|
|
1678
|
+
const limit = Math.min(limitIn, maxLimit);
|
|
1679
|
+
if (options.catalog.length === 0) return "<tool_search_results matches=\"0\">No lazy tools registered for this run.</tool_search_results>";
|
|
1680
|
+
const matches = [];
|
|
1681
|
+
const seen = /* @__PURE__ */ new Set();
|
|
1682
|
+
const misses = [];
|
|
1683
|
+
if (namesIn && namesIn.length > 0) for (const n of namesIn) {
|
|
1684
|
+
if (seen.has(n)) continue;
|
|
1685
|
+
const entry = byName.get(n);
|
|
1686
|
+
if (entry) {
|
|
1687
|
+
matches.push(entry);
|
|
1688
|
+
seen.add(n);
|
|
1689
|
+
} else misses.push(n);
|
|
1690
|
+
}
|
|
1691
|
+
if (server) {
|
|
1692
|
+
const list = byServer.get(server) ?? [];
|
|
1693
|
+
for (const entry of list) {
|
|
1694
|
+
if (seen.has(entry.name)) continue;
|
|
1695
|
+
matches.push(entry);
|
|
1696
|
+
seen.add(entry.name);
|
|
1697
|
+
}
|
|
1698
|
+
}
|
|
1699
|
+
if (query !== void 0) for (const entry of rankByQuery(options.catalog, query)) {
|
|
1700
|
+
if (seen.has(entry.name)) continue;
|
|
1701
|
+
matches.push(entry);
|
|
1702
|
+
seen.add(entry.name);
|
|
1703
|
+
}
|
|
1704
|
+
if (!namesIn?.length && !server && query === void 0) for (const entry of options.catalog) {
|
|
1705
|
+
matches.push(entry);
|
|
1706
|
+
seen.add(entry.name);
|
|
1707
|
+
}
|
|
1708
|
+
const truncated = matches.length > limit;
|
|
1709
|
+
const shown = truncated ? matches.slice(0, limit) : matches;
|
|
1710
|
+
for (const entry of shown) options.unlocked.add(entry.canonicalName);
|
|
1711
|
+
const parts = [];
|
|
1712
|
+
const queryAttr = query ? ` query="${escapeXml(query)}"` : "";
|
|
1713
|
+
const serverAttr = server ? ` server="${escapeXml(server)}"` : "";
|
|
1714
|
+
parts.push(`<tool_search_results matches="${shown.length}" total="${matches.length}"${queryAttr}${serverAttr}>`);
|
|
1715
|
+
if (shown.length === 0) parts.push(" No matches. Try a broader query, or omit all parameters to list everything.");
|
|
1716
|
+
else {
|
|
1717
|
+
for (const entry of shown) parts.push(formatMatch(entry));
|
|
1718
|
+
parts.push("");
|
|
1719
|
+
parts.push(" These tools are now callable. Invoke them by name in subsequent turns.");
|
|
1720
|
+
if (truncated) parts.push(` ${matches.length - shown.length} additional matches were truncated — refine the query or raise \`limit\`.`);
|
|
1721
|
+
}
|
|
1722
|
+
if (misses.length > 0) parts.push(` <misses>${misses.map(escapeXml).join(", ")}</misses>`);
|
|
1723
|
+
parts.push("</tool_search_results>");
|
|
1724
|
+
return parts.join("\n");
|
|
1725
|
+
}
|
|
1726
|
+
};
|
|
1727
|
+
}
|
|
1728
|
+
//#endregion
|
|
1729
|
+
//#region src/agent.ts
|
|
1730
|
+
const HOOK_EVENT_SET = new Set([
|
|
1731
|
+
"system:before",
|
|
1732
|
+
"turn:before",
|
|
1733
|
+
"turn:after",
|
|
1734
|
+
"stream:text",
|
|
1735
|
+
"stream:end",
|
|
1736
|
+
"stream:thinking",
|
|
1737
|
+
"oauth:refresh",
|
|
1738
|
+
"tool:gate",
|
|
1739
|
+
"tool:before",
|
|
1740
|
+
"tool:after",
|
|
1741
|
+
"tool:error",
|
|
1742
|
+
"tool:transform",
|
|
1743
|
+
"tool:unknown",
|
|
1744
|
+
"validation:reject",
|
|
1745
|
+
"validation:coerce",
|
|
1746
|
+
"context:transform",
|
|
1747
|
+
"system:transform",
|
|
1748
|
+
"steer:inject",
|
|
1749
|
+
"spawn:before",
|
|
1750
|
+
"spawn:complete",
|
|
1751
|
+
"spawn:error",
|
|
1752
|
+
"child:stream:text",
|
|
1753
|
+
"child:stream:thinking",
|
|
1754
|
+
"child:stream:end",
|
|
1755
|
+
"child:tool:before",
|
|
1756
|
+
"child:tool:after",
|
|
1757
|
+
"child:tool:error",
|
|
1758
|
+
"child:turn:after",
|
|
1759
|
+
"mcp:connect",
|
|
1760
|
+
"mcp:error",
|
|
1761
|
+
"mcp:close",
|
|
1762
|
+
"mcp:bootstrap:start",
|
|
1763
|
+
"mcp:bootstrap:end",
|
|
1764
|
+
"mcp:tools:filter",
|
|
1765
|
+
"mcp:tool:gate",
|
|
1766
|
+
"mcp:tool:before",
|
|
1767
|
+
"mcp:tool:after",
|
|
1768
|
+
"mcp:tool:transform",
|
|
1769
|
+
"mcp:tool:error",
|
|
1770
|
+
"skills:resolve",
|
|
1771
|
+
"skills:catalog",
|
|
1772
|
+
"skills:activate",
|
|
1773
|
+
"skills:deactivate",
|
|
1774
|
+
"usage",
|
|
1775
|
+
"output",
|
|
1776
|
+
"budget:exceeded",
|
|
1777
|
+
"tool-budget:exceeded",
|
|
1778
|
+
"agent:abort",
|
|
1779
|
+
"agent:done",
|
|
1780
|
+
"session:start",
|
|
1781
|
+
"session:end",
|
|
1782
|
+
"session:turns",
|
|
1783
|
+
"session:meta",
|
|
1784
|
+
"session:save"
|
|
1785
|
+
]);
|
|
1786
|
+
function isKnownHookEvent(event) {
|
|
1787
|
+
return HOOK_EVENT_SET.has(event);
|
|
1788
|
+
}
|
|
1789
|
+
function resolveBehavior(agentBehavior, runBehavior) {
|
|
1790
|
+
return {
|
|
1791
|
+
toolExecution: runBehavior?.toolExecution ?? agentBehavior?.toolExecution ?? "parallel",
|
|
1792
|
+
maxTurns: runBehavior?.maxTurns ?? agentBehavior?.maxTurns,
|
|
1793
|
+
maxTokens: runBehavior?.maxTokens ?? agentBehavior?.maxTokens,
|
|
1794
|
+
thinkingBudget: runBehavior?.thinkingBudget ?? agentBehavior?.thinkingBudget,
|
|
1795
|
+
schema: runBehavior?.schema ?? agentBehavior?.schema,
|
|
1796
|
+
cache: runBehavior?.cache ?? agentBehavior?.cache ?? true,
|
|
1797
|
+
toolOutputBudget: runBehavior?.toolOutputBudget ?? agentBehavior?.toolOutputBudget,
|
|
1798
|
+
compactStrategy: runBehavior?.compactStrategy ?? agentBehavior?.compactStrategy ?? "off",
|
|
1799
|
+
compactThreshold: runBehavior?.compactThreshold ?? agentBehavior?.compactThreshold,
|
|
1800
|
+
compactKeepTurns: runBehavior?.compactKeepTurns ?? agentBehavior?.compactKeepTurns,
|
|
1801
|
+
thinkingDecay: runBehavior?.thinkingDecay ?? agentBehavior?.thinkingDecay,
|
|
1802
|
+
dedupReads: runBehavior?.dedupReads ?? agentBehavior?.dedupReads,
|
|
1803
|
+
dedupTools: runBehavior?.dedupTools ?? agentBehavior?.dedupTools,
|
|
1804
|
+
requireReadBeforeEdit: runBehavior?.requireReadBeforeEdit ?? agentBehavior?.requireReadBeforeEdit,
|
|
1805
|
+
toolBudgets: runBehavior?.toolBudgets ?? agentBehavior?.toolBudgets,
|
|
1806
|
+
readLineNumbers: runBehavior?.readLineNumbers ?? agentBehavior?.readLineNumbers,
|
|
1807
|
+
elideStaleReads: runBehavior?.elideStaleReads ?? agentBehavior?.elideStaleReads,
|
|
1808
|
+
toolDisclosure: runBehavior?.toolDisclosure ?? agentBehavior?.toolDisclosure ?? "eager",
|
|
1809
|
+
toolSearch: runBehavior?.toolSearch ?? agentBehavior?.toolSearch
|
|
1810
|
+
};
|
|
1811
|
+
}
|
|
1812
|
+
/**
|
|
1813
|
+
* Resolve which configured MCP server a namespaced tool name belongs to.
|
|
1814
|
+
*
|
|
1815
|
+
* Tools coming through {@link connectMcpServers} are keyed `mcp_<server>_<tool>`.
|
|
1816
|
+
* Server names may themselves contain underscores, so a naive `split('_')[1]`
|
|
1817
|
+
* would mis-attribute tools — instead, we test each configured server name as
|
|
1818
|
+
* a prefix candidate and pick the longest match. This is O(N×M) per lookup
|
|
1819
|
+
* but the loop runs once at run start; not on a hot path.
|
|
1820
|
+
*/
|
|
1821
|
+
function resolveServerForTool(toolName, servers) {
|
|
1822
|
+
if (!servers || servers.length === 0) return void 0;
|
|
1823
|
+
let best;
|
|
1824
|
+
let bestLen = -1;
|
|
1825
|
+
for (const server of servers) {
|
|
1826
|
+
const prefix = `mcp_${server.name}_`;
|
|
1827
|
+
if (toolName.startsWith(prefix) && server.name.length > bestLen) {
|
|
1828
|
+
best = server;
|
|
1829
|
+
bestLen = server.name.length;
|
|
1830
|
+
}
|
|
1831
|
+
}
|
|
1832
|
+
return best;
|
|
1833
|
+
}
|
|
1834
|
+
/**
|
|
1835
|
+
* Partition a tool registry into eager and lazy buckets.
|
|
1836
|
+
*
|
|
1837
|
+
* Native + skill tools always end up in the eager bucket — only MCP tools
|
|
1838
|
+
* are eligible for lazy disclosure. The `tool_search` tool itself, when
|
|
1839
|
+
* registered, must be eager (otherwise the model has no way to discover
|
|
1840
|
+
* anything).
|
|
1841
|
+
*
|
|
1842
|
+
* Lazy entries carry both the wire (`name`, alias-rewritten) and the
|
|
1843
|
+
* canonical (`canonicalName`) so:
|
|
1844
|
+
* - The catalog and `tool_search` results show wire names — the only names
|
|
1845
|
+
* the model ever sees on the provider side.
|
|
1846
|
+
* - The unlock set is keyed by canonical names — the loop's `ctx.tools` map
|
|
1847
|
+
* and the dispatch path are alias-stable.
|
|
1848
|
+
*/
|
|
1849
|
+
function partitionToolDisclosure(toolsBySpecName, mcpToolNames, servers, globalMode, toolAliases) {
|
|
1850
|
+
const eagerCanonicalNames = /* @__PURE__ */ new Set();
|
|
1851
|
+
const lazyCanonicalNames = /* @__PURE__ */ new Set();
|
|
1852
|
+
const lazyEntries = [];
|
|
1853
|
+
function wireFor(canonical) {
|
|
1854
|
+
const aliased = toolAliases?.[canonical];
|
|
1855
|
+
return typeof aliased === "string" && aliased.length > 0 ? aliased : canonical;
|
|
1856
|
+
}
|
|
1857
|
+
for (const [canonicalName, def] of Object.entries(toolsBySpecName)) {
|
|
1858
|
+
if (!mcpToolNames.has(canonicalName)) {
|
|
1859
|
+
eagerCanonicalNames.add(canonicalName);
|
|
1860
|
+
continue;
|
|
1861
|
+
}
|
|
1862
|
+
const server = resolveServerForTool(canonicalName, servers);
|
|
1863
|
+
if ((server?.disclosure ?? globalMode) === "lazy") {
|
|
1864
|
+
lazyCanonicalNames.add(canonicalName);
|
|
1865
|
+
lazyEntries.push({
|
|
1866
|
+
name: wireFor(canonicalName),
|
|
1867
|
+
canonicalName,
|
|
1868
|
+
description: def.spec.description || "",
|
|
1869
|
+
inputSchema: def.spec.inputSchema ?? {
|
|
1870
|
+
type: "object",
|
|
1871
|
+
properties: {}
|
|
1872
|
+
},
|
|
1873
|
+
...server ? { server: server.name } : {}
|
|
1874
|
+
});
|
|
1875
|
+
} else eagerCanonicalNames.add(canonicalName);
|
|
1876
|
+
}
|
|
1877
|
+
return {
|
|
1878
|
+
eagerCanonicalNames,
|
|
1879
|
+
lazyCanonicalNames,
|
|
1880
|
+
lazyEntries
|
|
1881
|
+
};
|
|
1882
|
+
}
|
|
1883
|
+
function buildSearchableCatalog(entries, options) {
|
|
1884
|
+
const byServer = /* @__PURE__ */ new Map();
|
|
1885
|
+
const ungrouped = [];
|
|
1886
|
+
for (const entry of entries) {
|
|
1887
|
+
if (!entry.server) {
|
|
1888
|
+
ungrouped.push(entry);
|
|
1889
|
+
continue;
|
|
1890
|
+
}
|
|
1891
|
+
const list = byServer.get(entry.server) ?? [];
|
|
1892
|
+
list.push(entry);
|
|
1893
|
+
byServer.set(entry.server, list);
|
|
1894
|
+
}
|
|
1895
|
+
const serverNames = [...byServer.keys()].sort();
|
|
1896
|
+
const parts = [];
|
|
1897
|
+
if (options.discoveryToolName) parts.push("The following tools are available but their input schemas are NOT loaded in your context.", `Call the \`${options.discoveryToolName}\` tool to load schemas before invoking them. Surfaced tools persist for the rest of the run.`, "");
|
|
1898
|
+
parts.push("<searchable_tools>");
|
|
1899
|
+
for (const server of serverNames) {
|
|
1900
|
+
parts.push(` <server name="${escapeXml(server)}">`);
|
|
1901
|
+
for (const entry of byServer.get(server)) parts.push(` <tool name="${escapeXml(entry.name)}">${escapeXml(entry.description)}</tool>`);
|
|
1902
|
+
parts.push(" </server>");
|
|
1903
|
+
}
|
|
1904
|
+
for (const entry of ungrouped) parts.push(` <tool name="${escapeXml(entry.name)}">${escapeXml(entry.description)}</tool>`);
|
|
1905
|
+
parts.push("</searchable_tools>");
|
|
1906
|
+
return parts.join("\n");
|
|
1907
|
+
}
|
|
1908
|
+
/**
|
|
1909
|
+
* Install a `tool:gate` listener that refuses dispatch on lazy tools the
|
|
1910
|
+
* model hasn't surfaced via `tool_search` yet. Production providers
|
|
1911
|
+
* (Anthropic, OpenAI) already enforce this server-side because the model
|
|
1912
|
+
* can only emit `tool_use` for tools in the request's `tools` list — but
|
|
1913
|
+
* relying on that alone leaves custom / OSS / mock providers (and any
|
|
1914
|
+
* future lenient validator) able to bypass the gate by quoting a name from
|
|
1915
|
+
* the catalog. The middleware closes the gap so lazy disclosure is a real
|
|
1916
|
+
* boundary, not just an advertisement filter.
|
|
1917
|
+
*
|
|
1918
|
+
* Returns an uninstall function the run-end teardown calls so handlers
|
|
1919
|
+
* never leak across runs.
|
|
1920
|
+
*/
|
|
1921
|
+
function installLazyDisclosureGate(hooks, lazyCanonicalNames, unlocked, discoveryToolName) {
|
|
1922
|
+
if (lazyCanonicalNames.size === 0) return () => {};
|
|
1923
|
+
return hooks.hook("tool:gate", (ctx) => {
|
|
1924
|
+
if (ctx.block) return;
|
|
1925
|
+
if (!lazyCanonicalNames.has(ctx.name)) return;
|
|
1926
|
+
if (unlocked.has(ctx.name)) return;
|
|
1927
|
+
ctx.block = true;
|
|
1928
|
+
ctx.reason = discoveryToolName ? `Tool "${ctx.name}" is listed in <searchable_tools> but its schema has not been loaded. Call the \`${discoveryToolName}\` tool with names: ["${ctx.name}"] first, then re-issue the call.` : `Tool "${ctx.name}" is listed in <searchable_tools> but its schema has not been loaded.`;
|
|
1929
|
+
});
|
|
1930
|
+
}
|
|
1931
|
+
function createAgent({ provider, name: agentName, system: agentSystem, tools: agentTools, toolAliases, behavior: agentBehavior, execution, mcpServers, session, skills: agentSkills, mcpConnector, eager }) {
|
|
1932
|
+
const hooks = createHooks();
|
|
1933
|
+
const executionContext = execution ?? createProcessContext();
|
|
1934
|
+
const sourceTools = agentTools ?? {};
|
|
1935
|
+
let abortController;
|
|
1936
|
+
let running = false;
|
|
1937
|
+
let idleResolve;
|
|
1938
|
+
let idlePromise;
|
|
1939
|
+
let executionHandle = null;
|
|
1940
|
+
let mcpConnection = null;
|
|
1941
|
+
let mcpWarmupPromise = null;
|
|
1942
|
+
const allMcpServers = mcpServers ?? [];
|
|
1943
|
+
const steeringQueue = [];
|
|
1944
|
+
const followUpQueue = [];
|
|
1945
|
+
let conversationTurns = session?.turns.slice() ?? [];
|
|
1946
|
+
let runCounter = session?.runs.length ?? 0;
|
|
1947
|
+
const skillsConfig = agentSkills;
|
|
1948
|
+
const skillsEnabledValue = skillsConfig?.enabled;
|
|
1949
|
+
const skillsDisabled = skillsEnabledValue === false || Array.isArray(skillsEnabledValue) && skillsEnabledValue.length === 0;
|
|
1950
|
+
let resolvedSkills = null;
|
|
1951
|
+
let skillsCatalog = null;
|
|
1952
|
+
let skillsCleanup = () => {};
|
|
1953
|
+
const skillActivationState = createSkillActivationState({ maxActive: skillsConfig?.maxActive });
|
|
1954
|
+
async function run(options) {
|
|
1955
|
+
if (running) throw new Error("Agent is already running. Use steer() or followUp() to queue messages, or waitForIdle().");
|
|
1956
|
+
const hasSessionTurns = session && session.turns.length > 0;
|
|
1957
|
+
if (!options.prompt && !hasSessionTurns) throw new Error("prompt is required when no session with existing turns is provided");
|
|
1958
|
+
if (!options.prompt && hasSessionTurns) {
|
|
1959
|
+
const lastTurn = session.turns.at(-1);
|
|
1960
|
+
if (lastTurn && lastTurn.role !== "user") throw new Error("cannot resume without prompt: last session turn must be a user message");
|
|
1961
|
+
}
|
|
1962
|
+
running = true;
|
|
1963
|
+
abortController = new AbortController();
|
|
1964
|
+
const runId = `run_${++runCounter}`;
|
|
1965
|
+
const promptLabel = typeof options.prompt === "string" ? options.prompt : Array.isArray(options.prompt) ? options.prompt.filter((p) => p.type === "text").map((p) => p.text).join("\n") : "";
|
|
1966
|
+
session?.startRun(runId, promptLabel, {
|
|
1967
|
+
...options.parentRunId ? { parentRunId: options.parentRunId } : {},
|
|
1968
|
+
depth: typeof options.depth === "number" ? options.depth : 0
|
|
1969
|
+
});
|
|
1970
|
+
if (session) {
|
|
1971
|
+
await session.updateStatus("running");
|
|
1972
|
+
await hooks.callHook("session:start", {
|
|
1973
|
+
sessionId: session.id,
|
|
1974
|
+
runId,
|
|
1975
|
+
prompt: promptLabel
|
|
1976
|
+
});
|
|
1977
|
+
}
|
|
1978
|
+
if (options.signal) if (options.signal.aborted) abortController.abort();
|
|
1979
|
+
else {
|
|
1980
|
+
const onExternalAbort = () => abortController?.abort();
|
|
1981
|
+
options.signal.addEventListener("abort", onExternalAbort, { once: true });
|
|
1982
|
+
}
|
|
1983
|
+
idlePromise = new Promise((resolve) => {
|
|
1984
|
+
idleResolve = resolve;
|
|
1985
|
+
});
|
|
1986
|
+
const childrenStats = [];
|
|
1987
|
+
const unregisterSpawnHook = hooks.hook("spawn:complete", (ctx) => {
|
|
1988
|
+
childrenStats.push(ctx);
|
|
1989
|
+
});
|
|
1990
|
+
const perRunUnregisters = [];
|
|
1991
|
+
if (options.hooks) for (const [event, handler] of Object.entries(options.hooks)) {
|
|
1992
|
+
if (!isKnownHookEvent(event)) throw new Error(`Unknown hook event "${event}" passed to run(). See AgentHooks for valid events.`);
|
|
1993
|
+
const handlerList = Array.isArray(handler) ? handler : [handler];
|
|
1994
|
+
for (const fn of handlerList) {
|
|
1995
|
+
if (typeof fn !== "function") continue;
|
|
1996
|
+
perRunUnregisters.push(hooks.hook(event, fn));
|
|
1997
|
+
}
|
|
1998
|
+
}
|
|
1999
|
+
if (!executionHandle) executionHandle = await executionContext.spawn();
|
|
2000
|
+
if (allMcpServers.length > 0 && !mcpConnection) await warmup();
|
|
2001
|
+
if (!skillsDisabled && skillsConfig && !resolvedSkills) {
|
|
2002
|
+
const bundle = await resolveSkills(skillsConfig);
|
|
2003
|
+
resolvedSkills = bundle.skills;
|
|
2004
|
+
skillsCleanup = bundle.cleanup;
|
|
2005
|
+
await hooks.callHook("skills:resolve", { skills: resolvedSkills });
|
|
2006
|
+
const skillsToolRegistered = skillsConfig?.tool !== false && resolvedSkills.length > 0;
|
|
2007
|
+
const catalogCtx = {
|
|
2008
|
+
catalog: buildCatalog(resolvedSkills, { skillsToolRegistered }),
|
|
2009
|
+
skills: resolvedSkills
|
|
2010
|
+
};
|
|
2011
|
+
await hooks.callHook("skills:catalog", catalogCtx);
|
|
2012
|
+
skillsCatalog = catalogCtx.catalog;
|
|
2013
|
+
}
|
|
2014
|
+
if (resolvedSkills && session && session.turns.length > 0 && skillActivationState.active().length === 0) {
|
|
2015
|
+
const skillsByName = new Map(resolvedSkills.map((s) => [s.name, s]));
|
|
2016
|
+
for (const turn of session.turns) {
|
|
2017
|
+
if (turn.role !== "assistant") continue;
|
|
2018
|
+
for (const block of turn.content) {
|
|
2019
|
+
if (block.type !== "tool_call" || block.name !== "skills_use") continue;
|
|
2020
|
+
const skillName = block.input?.name;
|
|
2021
|
+
if (!skillName) continue;
|
|
2022
|
+
const skill = skillsByName.get(skillName);
|
|
2023
|
+
if (!skill) continue;
|
|
2024
|
+
if (skillActivationState.activate(skill, "resume") === "ok") await hooks.callHook("skills:activate", {
|
|
2025
|
+
skill,
|
|
2026
|
+
via: "resume"
|
|
2027
|
+
});
|
|
2028
|
+
}
|
|
2029
|
+
}
|
|
2030
|
+
}
|
|
2031
|
+
const thinking = options.thinking ?? "off";
|
|
2032
|
+
const model = options.model ?? provider.meta.defaultModel;
|
|
2033
|
+
const resolvedBehavior = resolveBehavior(agentBehavior, options.behavior);
|
|
2034
|
+
const { toolExecution, maxTurns, maxTokens, thinkingBudget, schema, cache, toolOutputBudget, compactStrategy, compactThreshold, compactKeepTurns, thinkingDecay, dedupTools, toolBudgets, elideStaleReads, toolDisclosure, toolSearch } = resolvedBehavior;
|
|
2035
|
+
let system = options.system || agentSystem || "You are a helpful assistant.";
|
|
2036
|
+
if (skillsCatalog) system = `${system}\n\n${skillsCatalog}`;
|
|
2037
|
+
const runBaseTools = options.tools !== void 0 ? options.tools : mcpConnection ? {
|
|
2038
|
+
...sourceTools,
|
|
2039
|
+
...mcpConnection.tools
|
|
2040
|
+
} : sourceTools;
|
|
2041
|
+
const mcpToolNames = options.tools === void 0 && mcpConnection ? new Set(Object.keys(mcpConnection.tools)) : /* @__PURE__ */ new Set();
|
|
2042
|
+
const mergedWithSkills = options.tools === void 0 && !!resolvedSkills && resolvedSkills.length > 0 && skillsConfig?.tool !== false ? {
|
|
2043
|
+
skills_use: createSkillsUseTool({
|
|
2044
|
+
catalog: resolvedSkills,
|
|
2045
|
+
state: skillActivationState,
|
|
2046
|
+
hooks
|
|
2047
|
+
}),
|
|
2048
|
+
skills_read: createSkillsReadTool({
|
|
2049
|
+
catalog: resolvedSkills,
|
|
2050
|
+
state: skillActivationState
|
|
2051
|
+
}),
|
|
2052
|
+
skills_run_script: createSkillsRunScriptTool({
|
|
2053
|
+
catalog: resolvedSkills,
|
|
2054
|
+
state: skillActivationState,
|
|
2055
|
+
scriptTimeoutMs: skillsConfig?.scriptTimeoutMs
|
|
2056
|
+
}),
|
|
2057
|
+
...runBaseTools
|
|
2058
|
+
} : runBaseTools;
|
|
2059
|
+
const toolsPreSearch = {};
|
|
2060
|
+
for (const tool of Object.values(mergedWithSkills)) toolsPreSearch[tool.spec.name] = tool;
|
|
2061
|
+
const disclosure = partitionToolDisclosure(toolsPreSearch, mcpToolNames, mcpServers, toolDisclosure, toolAliases);
|
|
2062
|
+
const unlocked = new Set(disclosure.eagerCanonicalNames);
|
|
2063
|
+
const hostDefinedToolSearch = !!toolsPreSearch.tool_search;
|
|
2064
|
+
const shouldInjectToolSearch = disclosure.lazyEntries.length > 0 && toolSearch?.tool !== false && !hostDefinedToolSearch;
|
|
2065
|
+
let tools = toolsPreSearch;
|
|
2066
|
+
if (shouldInjectToolSearch) {
|
|
2067
|
+
const toolSearchTool = createToolSearchTool({
|
|
2068
|
+
catalog: disclosure.lazyEntries,
|
|
2069
|
+
unlocked,
|
|
2070
|
+
...toolSearch?.limit !== void 0 ? { defaultLimit: toolSearch.limit } : {}
|
|
2071
|
+
});
|
|
2072
|
+
tools = {
|
|
2073
|
+
...toolsPreSearch,
|
|
2074
|
+
[toolSearchTool.spec.name]: toolSearchTool
|
|
2075
|
+
};
|
|
2076
|
+
unlocked.add(toolSearchTool.spec.name);
|
|
2077
|
+
}
|
|
2078
|
+
const discoveryToolName = shouldInjectToolSearch ? "tool_search" : hostDefinedToolSearch ? toolAliases?.tool_search ?? "tool_search" : null;
|
|
2079
|
+
if (disclosure.lazyEntries.length > 0) system = `${system}\n\n${buildSearchableCatalog(disclosure.lazyEntries, { discoveryToolName })}`;
|
|
2080
|
+
const aliasMaps = buildAliasMaps(toolAliases, Object.keys(tools));
|
|
2081
|
+
const uninstallLazyDisclosureGate = installLazyDisclosureGate(hooks, disclosure.lazyCanonicalNames, unlocked, discoveryToolName);
|
|
2082
|
+
function buildFormattedTools() {
|
|
2083
|
+
const specs = [];
|
|
2084
|
+
for (const t of Object.values(tools)) {
|
|
2085
|
+
if (!unlocked.has(t.spec.name)) continue;
|
|
2086
|
+
specs.push({
|
|
2087
|
+
name: aliasMaps.aliasByCanonical.get(t.spec.name) ?? t.spec.name,
|
|
2088
|
+
description: t.spec.description || "",
|
|
2089
|
+
inputSchema: t.spec.inputSchema
|
|
2090
|
+
});
|
|
2091
|
+
}
|
|
2092
|
+
return specs.length > 0 ? provider.formatTools(specs) : [];
|
|
2093
|
+
}
|
|
2094
|
+
const formattedTools = buildFormattedTools();
|
|
2095
|
+
const turns = [];
|
|
2096
|
+
const isResume = session && session.turns.length > 0 && (session.runs.length > 0 || !options.prompt);
|
|
2097
|
+
if (isResume) turns.push(...session.turns);
|
|
2098
|
+
const runTurnStart = turns.length;
|
|
2099
|
+
if (options.system) await hooks.callHook("system:before", { system: options.system });
|
|
2100
|
+
const promptParts = canonicalizePrompt(options.prompt);
|
|
2101
|
+
if (promptParts) {
|
|
2102
|
+
const promptMsg = buildPromptMessage(provider, promptParts);
|
|
2103
|
+
turns.push({
|
|
2104
|
+
id: crypto.randomUUID(),
|
|
2105
|
+
runId,
|
|
2106
|
+
role: promptMsg.role,
|
|
2107
|
+
content: promptMsg.content,
|
|
2108
|
+
createdAt: Date.now()
|
|
2109
|
+
});
|
|
2110
|
+
}
|
|
2111
|
+
conversationTurns = turns;
|
|
2112
|
+
let lastPersistedTurnCount = isResume ? session.turns.length : 0;
|
|
2113
|
+
if (session && turns.length > lastPersistedTurnCount) {
|
|
2114
|
+
const seededTurns = turns.slice(lastPersistedTurnCount);
|
|
2115
|
+
await session.appendTurns(seededTurns);
|
|
2116
|
+
lastPersistedTurnCount = turns.length;
|
|
2117
|
+
await hooks.callHook("session:turns", {
|
|
2118
|
+
sessionId: session.id,
|
|
2119
|
+
turns: seededTurns,
|
|
2120
|
+
count: turns.length
|
|
2121
|
+
});
|
|
2122
|
+
}
|
|
2123
|
+
const unregisterSessionSync = session ? hooks.hook("turn:after", async () => {
|
|
2124
|
+
const newTurns = turns.slice(lastPersistedTurnCount);
|
|
2125
|
+
if (newTurns.length > 0) {
|
|
2126
|
+
await session.appendTurns(newTurns);
|
|
2127
|
+
lastPersistedTurnCount = turns.length;
|
|
2128
|
+
await hooks.callHook("session:turns", {
|
|
2129
|
+
sessionId: session.id,
|
|
2130
|
+
turns: newTurns,
|
|
2131
|
+
count: turns.length
|
|
2132
|
+
});
|
|
2133
|
+
}
|
|
2134
|
+
}) : void 0;
|
|
2135
|
+
async function flushTurns() {
|
|
2136
|
+
if (!session) return;
|
|
2137
|
+
const remaining = turns.slice(lastPersistedTurnCount);
|
|
2138
|
+
if (remaining.length > 0) {
|
|
2139
|
+
await session.appendTurns(remaining);
|
|
2140
|
+
lastPersistedTurnCount = turns.length;
|
|
2141
|
+
await hooks.callHook("session:turns", {
|
|
2142
|
+
sessionId: session.id,
|
|
2143
|
+
turns: remaining,
|
|
2144
|
+
count: turns.length
|
|
2145
|
+
});
|
|
2146
|
+
}
|
|
2147
|
+
}
|
|
2148
|
+
async function deactivateAllSkills() {
|
|
2149
|
+
for (const record of skillActivationState.clear()) await hooks.callHook("skills:deactivate", {
|
|
2150
|
+
skill: record.skill,
|
|
2151
|
+
reason: "run-end"
|
|
2152
|
+
});
|
|
2153
|
+
}
|
|
2154
|
+
async function finalizeSession(status) {
|
|
2155
|
+
if (!session) return;
|
|
2156
|
+
const run = session.runs.find((r) => r.id === runId);
|
|
2157
|
+
if (run) await session.updateRun(run);
|
|
2158
|
+
await session.updateStatus(status === "aborted" ? "idle" : status);
|
|
2159
|
+
await hooks.callHook("session:end", {
|
|
2160
|
+
sessionId: session.id,
|
|
2161
|
+
runId,
|
|
2162
|
+
status,
|
|
2163
|
+
turnRange: [runTurnStart, turns.length - 1]
|
|
2164
|
+
});
|
|
2165
|
+
}
|
|
2166
|
+
const uninstallAllowedToolsGate = installAllowedToolsGate(hooks, skillActivationState);
|
|
2167
|
+
const uninstallToolBudgets = installToolBudgetsGate(hooks, () => toolBudgets, (msg) => steeringQueue.push(msg));
|
|
2168
|
+
const uninstallDedupTools = installDedupToolsGate(hooks, () => dedupTools, () => session ?? void 0);
|
|
2169
|
+
const runStartMs = Date.now();
|
|
2170
|
+
const runDepth = typeof options.depth === "number" ? options.depth : 0;
|
|
2171
|
+
try {
|
|
2172
|
+
const stats = await runLoop({
|
|
2173
|
+
provider,
|
|
2174
|
+
hooks,
|
|
2175
|
+
agentName,
|
|
2176
|
+
agentSystem,
|
|
2177
|
+
agentTools: sourceTools,
|
|
2178
|
+
agentToolAliases: toolAliases,
|
|
2179
|
+
agentMcpServers: mcpServers,
|
|
2180
|
+
agentSkills,
|
|
2181
|
+
agentBehavior: resolvedBehavior,
|
|
2182
|
+
tools,
|
|
2183
|
+
formattedTools,
|
|
2184
|
+
rebuildFormattedTools: disclosure.lazyEntries.length > 0 ? buildFormattedTools : void 0,
|
|
2185
|
+
aliasMaps,
|
|
2186
|
+
model,
|
|
2187
|
+
system,
|
|
2188
|
+
thinking,
|
|
2189
|
+
toolExecution,
|
|
2190
|
+
signal: abortController.signal,
|
|
2191
|
+
execution: executionContext,
|
|
2192
|
+
handle: executionHandle,
|
|
2193
|
+
steeringQueue,
|
|
2194
|
+
followUpQueue,
|
|
2195
|
+
turns,
|
|
2196
|
+
runId,
|
|
2197
|
+
generateTurnId: () => session?.generateTurnId() ?? crypto.randomUUID(),
|
|
2198
|
+
maxTurns,
|
|
2199
|
+
maxTokens,
|
|
2200
|
+
...session ? { session } : {},
|
|
2201
|
+
depth: runDepth,
|
|
2202
|
+
thinkingBudget,
|
|
2203
|
+
schema,
|
|
2204
|
+
cache,
|
|
2205
|
+
toolOutputBudget,
|
|
2206
|
+
compactStrategy,
|
|
2207
|
+
compactThreshold,
|
|
2208
|
+
compactKeepTurns,
|
|
2209
|
+
...elideStaleReads !== void 0 ? { elideStaleReads } : {},
|
|
2210
|
+
...thinkingDecay !== void 0 ? { thinkingDecay } : {},
|
|
2211
|
+
runStartMs,
|
|
2212
|
+
runToolCounts: {}
|
|
2213
|
+
});
|
|
2214
|
+
const parentTurnCost = stats.turnUsage?.reduce((sum, t) => sum + (t.cost ?? 0), 0) ?? 0;
|
|
2215
|
+
let childrenIn = 0;
|
|
2216
|
+
let childrenOut = 0;
|
|
2217
|
+
let childrenCost = 0;
|
|
2218
|
+
let childrenCacheRead = 0;
|
|
2219
|
+
let childrenCacheCreation = 0;
|
|
2220
|
+
for (const c of childrenStats) {
|
|
2221
|
+
childrenIn += c.stats.totalIn;
|
|
2222
|
+
childrenOut += c.stats.totalOut;
|
|
2223
|
+
childrenCost += c.stats.cost ?? 0;
|
|
2224
|
+
childrenCacheRead += c.stats.totalCacheRead;
|
|
2225
|
+
childrenCacheCreation += c.stats.totalCacheCreation;
|
|
2226
|
+
}
|
|
2227
|
+
const cumulativeCost = parentTurnCost + childrenCost;
|
|
2228
|
+
const finalStats = {
|
|
2229
|
+
...stats,
|
|
2230
|
+
totalIn: stats.totalIn + childrenIn,
|
|
2231
|
+
totalOut: stats.totalOut + childrenOut,
|
|
2232
|
+
totalCacheRead: stats.totalCacheRead + childrenCacheRead,
|
|
2233
|
+
totalCacheCreation: stats.totalCacheCreation + childrenCacheCreation,
|
|
2234
|
+
...cumulativeCost > 0 ? { cost: cumulativeCost } : {},
|
|
2235
|
+
children: childrenStats.length > 0 ? childrenStats : void 0
|
|
2236
|
+
};
|
|
2237
|
+
await flushTurns();
|
|
2238
|
+
if (abortController.signal.aborted) {
|
|
2239
|
+
session?.abortRun(runId);
|
|
2240
|
+
await finalizeSession("aborted");
|
|
2241
|
+
await hooks.callHook("agent:done", finalStats);
|
|
2242
|
+
return finalStats;
|
|
2243
|
+
}
|
|
2244
|
+
session?.completeRun(runId, {
|
|
2245
|
+
turns: stats.turns,
|
|
2246
|
+
tokensIn: stats.totalIn,
|
|
2247
|
+
tokensOut: stats.totalOut,
|
|
2248
|
+
turnUsage: stats.turnUsage,
|
|
2249
|
+
cost: parentTurnCost > 0 ? parentTurnCost : void 0
|
|
2250
|
+
});
|
|
2251
|
+
await finalizeSession("completed");
|
|
2252
|
+
await hooks.callHook("agent:done", finalStats);
|
|
2253
|
+
return finalStats;
|
|
2254
|
+
} catch (err) {
|
|
2255
|
+
await flushTurns();
|
|
2256
|
+
if (abortController.signal.aborted) {
|
|
2257
|
+
session?.abortRun(runId);
|
|
2258
|
+
await finalizeSession("aborted");
|
|
2259
|
+
const stats = {
|
|
2260
|
+
totalIn: 0,
|
|
2261
|
+
totalOut: 0,
|
|
2262
|
+
totalCacheRead: 0,
|
|
2263
|
+
totalCacheCreation: 0,
|
|
2264
|
+
turns: 0,
|
|
2265
|
+
elapsed: 0
|
|
2266
|
+
};
|
|
2267
|
+
await hooks.callHook("agent:done", stats);
|
|
2268
|
+
return stats;
|
|
2269
|
+
}
|
|
2270
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
2271
|
+
session?.errorRun(runId, message);
|
|
2272
|
+
await finalizeSession("error");
|
|
2273
|
+
throw err;
|
|
2274
|
+
} finally {
|
|
2275
|
+
await deactivateAllSkills();
|
|
2276
|
+
uninstallAllowedToolsGate();
|
|
2277
|
+
uninstallDedupTools();
|
|
2278
|
+
uninstallToolBudgets();
|
|
2279
|
+
uninstallLazyDisclosureGate();
|
|
2280
|
+
unregisterSpawnHook();
|
|
2281
|
+
unregisterSessionSync?.();
|
|
2282
|
+
for (const unregister of perRunUnregisters) unregister();
|
|
2283
|
+
running = false;
|
|
2284
|
+
abortController = void 0;
|
|
2285
|
+
steeringQueue.length = 0;
|
|
2286
|
+
followUpQueue.length = 0;
|
|
2287
|
+
idleResolve?.();
|
|
2288
|
+
idlePromise = void 0;
|
|
2289
|
+
idleResolve = void 0;
|
|
2290
|
+
}
|
|
2291
|
+
}
|
|
2292
|
+
function abort() {
|
|
2293
|
+
abortController?.abort();
|
|
2294
|
+
}
|
|
2295
|
+
function steer(message) {
|
|
2296
|
+
steeringQueue.push(message);
|
|
2297
|
+
}
|
|
2298
|
+
function followUpFn(message) {
|
|
2299
|
+
followUpQueue.push(message);
|
|
2300
|
+
}
|
|
2301
|
+
function waitForIdle() {
|
|
2302
|
+
return idlePromise ?? Promise.resolve();
|
|
2303
|
+
}
|
|
2304
|
+
async function reset() {
|
|
2305
|
+
if (running) throw new Error("Cannot reset() while the agent is running. Call `agent.abort()` and `await agent.waitForIdle()` first.");
|
|
2306
|
+
conversationTurns = [];
|
|
2307
|
+
steeringQueue.length = 0;
|
|
2308
|
+
followUpQueue.length = 0;
|
|
2309
|
+
const cleared = skillActivationState.clear();
|
|
2310
|
+
for (const record of cleared) await hooks.callHook("skills:deactivate", {
|
|
2311
|
+
skill: record.skill,
|
|
2312
|
+
reason: "reset"
|
|
2313
|
+
});
|
|
2314
|
+
}
|
|
2315
|
+
async function activateSkill(name) {
|
|
2316
|
+
if (!resolvedSkills) throw new Error(`Cannot activate skill "${name}" — skills have not been resolved yet. Call activateSkill after the first \`run()\`, or pass a skills config that resolves at agent-creation time.`);
|
|
2317
|
+
const skill = resolvedSkills.find((s) => s.name === name);
|
|
2318
|
+
if (!skill) {
|
|
2319
|
+
const available = resolvedSkills.map((s) => s.name).join(", ") || "<none>";
|
|
2320
|
+
throw new Error(`Unknown skill "${name}". Available skills: ${available}.`);
|
|
2321
|
+
}
|
|
2322
|
+
const outcome = skillActivationState.activate(skill, "explicit");
|
|
2323
|
+
if (outcome === "cap-reached") throw new Error(`Cannot activate skill "${name}" — the maxActive cap of ${skillsConfig?.maxActive} has been reached.`);
|
|
2324
|
+
if (outcome === "ok") await hooks.callHook("skills:activate", {
|
|
2325
|
+
skill,
|
|
2326
|
+
via: "explicit"
|
|
2327
|
+
});
|
|
2328
|
+
}
|
|
2329
|
+
async function deactivateSkill(name) {
|
|
2330
|
+
const removed = skillActivationState.deactivate(name);
|
|
2331
|
+
if (removed) await hooks.callHook("skills:deactivate", {
|
|
2332
|
+
skill: removed.skill,
|
|
2333
|
+
reason: "explicit"
|
|
2334
|
+
});
|
|
2335
|
+
}
|
|
2336
|
+
if (session) {
|
|
2337
|
+
const originalSave = session.save.bind(session);
|
|
2338
|
+
const originalSetMeta = session.setMeta.bind(session);
|
|
2339
|
+
session.save = async () => {
|
|
2340
|
+
await originalSave();
|
|
2341
|
+
await hooks.callHook("session:save", { sessionId: session.id });
|
|
2342
|
+
};
|
|
2343
|
+
session.setMeta = (key, value) => {
|
|
2344
|
+
originalSetMeta(key, value);
|
|
2345
|
+
Promise.resolve(hooks.callHook("session:meta", {
|
|
2346
|
+
sessionId: session.id,
|
|
2347
|
+
key,
|
|
2348
|
+
value
|
|
2349
|
+
})).catch((err) => {
|
|
2350
|
+
console.error("[zidane] session:meta listener rejected:", err);
|
|
2351
|
+
});
|
|
2352
|
+
};
|
|
2353
|
+
}
|
|
2354
|
+
let destroyed = false;
|
|
2355
|
+
/**
|
|
2356
|
+
* Pre-connect MCP servers. Returns the shared in-flight promise when a
|
|
2357
|
+
* bootstrap is already running, so concurrent callers converge on one
|
|
2358
|
+
* connection. Clears the cached promise on failure so the next caller can
|
|
2359
|
+
* retry — leaving a rejected promise cached would permanently poison future
|
|
2360
|
+
* runs on the same agent.
|
|
2361
|
+
*/
|
|
2362
|
+
async function warmup() {
|
|
2363
|
+
if (destroyed) return;
|
|
2364
|
+
if (mcpConnection || allMcpServers.length === 0) return;
|
|
2365
|
+
if (mcpWarmupPromise) return mcpWarmupPromise;
|
|
2366
|
+
mcpWarmupPromise = (async () => {
|
|
2367
|
+
const connection = mcpConnector ? await mcpConnector(allMcpServers) : await connectMcpServers(allMcpServers, void 0, hooks);
|
|
2368
|
+
if (destroyed) {
|
|
2369
|
+
await connection.close().catch(() => {});
|
|
2370
|
+
return;
|
|
2371
|
+
}
|
|
2372
|
+
mcpConnection = connection;
|
|
2373
|
+
})();
|
|
2374
|
+
try {
|
|
2375
|
+
await mcpWarmupPromise;
|
|
2376
|
+
} catch (err) {
|
|
2377
|
+
mcpWarmupPromise = null;
|
|
2378
|
+
throw err;
|
|
2379
|
+
}
|
|
2380
|
+
}
|
|
2381
|
+
async function destroy() {
|
|
2382
|
+
if (destroyed) return;
|
|
2383
|
+
destroyed = true;
|
|
2384
|
+
if (mcpWarmupPromise) try {
|
|
2385
|
+
await mcpWarmupPromise;
|
|
2386
|
+
} catch {}
|
|
2387
|
+
if (mcpConnection) {
|
|
2388
|
+
await mcpConnection.close();
|
|
2389
|
+
mcpConnection = null;
|
|
2390
|
+
}
|
|
2391
|
+
if (executionHandle) {
|
|
2392
|
+
await executionContext.destroy(executionHandle);
|
|
2393
|
+
executionHandle = null;
|
|
2394
|
+
}
|
|
2395
|
+
skillsCleanup();
|
|
2396
|
+
skillsCleanup = () => {};
|
|
2397
|
+
}
|
|
2398
|
+
if (eager && allMcpServers.length > 0) warmup().catch(() => {});
|
|
2399
|
+
return {
|
|
2400
|
+
hooks,
|
|
2401
|
+
run,
|
|
2402
|
+
abort,
|
|
2403
|
+
steer,
|
|
2404
|
+
followUp: followUpFn,
|
|
2405
|
+
waitForIdle,
|
|
2406
|
+
reset,
|
|
2407
|
+
destroy,
|
|
2408
|
+
warmup,
|
|
2409
|
+
activateSkill,
|
|
2410
|
+
deactivateSkill,
|
|
2411
|
+
get isRunning() {
|
|
2412
|
+
return running;
|
|
2413
|
+
},
|
|
2414
|
+
get turns() {
|
|
2415
|
+
return conversationTurns;
|
|
2416
|
+
},
|
|
2417
|
+
get execution() {
|
|
2418
|
+
return executionContext;
|
|
2419
|
+
},
|
|
2420
|
+
get handle() {
|
|
2421
|
+
return executionHandle;
|
|
2422
|
+
},
|
|
2423
|
+
get session() {
|
|
2424
|
+
return session ?? null;
|
|
2425
|
+
},
|
|
2426
|
+
get activeSkills() {
|
|
2427
|
+
return skillActivationState.active();
|
|
2428
|
+
},
|
|
2429
|
+
meta: Object.freeze({ ...provider.meta })
|
|
2430
|
+
};
|
|
2431
|
+
}
|
|
2432
|
+
//#endregion
|
|
2433
|
+
//#region src/tools/edit-utils.ts
|
|
2434
|
+
/**
|
|
2435
|
+
* Internal helpers shared between the `edit` and `multi_edit` tools.
|
|
2436
|
+
*
|
|
2437
|
+
* Not part of the public API — intentionally not re-exported from `tools/index.ts`
|
|
2438
|
+
* or the package barrel.
|
|
2439
|
+
*/
|
|
2440
|
+
/**
|
|
2441
|
+
* Count exact (non-overlapping) occurrences of `needle` in `haystack`.
|
|
2442
|
+
* Returns 0 for an empty needle — both edit tools reject empty `old_string`
|
|
2443
|
+
* up front, so this branch is defensive rather than semantic.
|
|
2444
|
+
*/
|
|
2445
|
+
function countExactMatches(haystack, needle) {
|
|
2446
|
+
if (needle.length === 0) return 0;
|
|
2447
|
+
let count = 0;
|
|
2448
|
+
let idx = 0;
|
|
2449
|
+
while (true) {
|
|
2450
|
+
const next = haystack.indexOf(needle, idx);
|
|
2451
|
+
if (next === -1) break;
|
|
2452
|
+
count++;
|
|
2453
|
+
idx = next + needle.length;
|
|
2454
|
+
}
|
|
2455
|
+
return count;
|
|
2456
|
+
}
|
|
2457
|
+
/** Map curly quotes (any of the four) to their straight ASCII equivalents. */
|
|
2458
|
+
function normalizeQuotes(str) {
|
|
2459
|
+
return str.replaceAll("‘", "'").replaceAll("’", "'").replaceAll("“", "\"").replaceAll("”", "\"");
|
|
2460
|
+
}
|
|
2461
|
+
/**
|
|
2462
|
+
* Substitutions Anthropic's API applies to assistant output before the model
|
|
2463
|
+
* sees it. The model emits the sanitized form; the file on disk contains the
|
|
2464
|
+
* unsanitized form. We undo the substitutions on `old_string` so the search
|
|
2465
|
+
* lands on the actual file contents.
|
|
2466
|
+
*
|
|
2467
|
+
* Verbatim from `claude-code/tools/FileEditTool/utils.ts`.
|
|
2468
|
+
*/
|
|
2469
|
+
const DESANITIZATIONS = [
|
|
2470
|
+
["<fnr>", "<function_results>"],
|
|
2471
|
+
["<n>", "<name>"],
|
|
2472
|
+
["</n>", "</name>"],
|
|
2473
|
+
["<o>", "<output>"],
|
|
2474
|
+
["</o>", "</output>"],
|
|
2475
|
+
["<e>", "<error>"],
|
|
2476
|
+
["</e>", "</error>"],
|
|
2477
|
+
["<s>", "<system>"],
|
|
2478
|
+
["</s>", "</system>"],
|
|
2479
|
+
["<r>", "<result>"],
|
|
2480
|
+
["</r>", "</result>"],
|
|
2481
|
+
["< META_START >", "<META_START>"],
|
|
2482
|
+
["< META_END >", "<META_END>"],
|
|
2483
|
+
["< EOT >", "<EOT>"],
|
|
2484
|
+
["< META >", "<META>"],
|
|
2485
|
+
["< SOS >", "<SOS>"],
|
|
2486
|
+
["\n\nH:", "\n\nHuman:"],
|
|
2487
|
+
["\n\nA:", "\n\nAssistant:"]
|
|
2488
|
+
];
|
|
2489
|
+
/**
|
|
2490
|
+
* Apply the SDK desanitization table to a string. Exported so the edit tools
|
|
2491
|
+
* can apply it to `new_string` whenever `old_string` matched via a
|
|
2492
|
+
* desanitize-class fallback — keeps the file's unsanitized form on disk
|
|
2493
|
+
* instead of writing the model's abbreviated form back.
|
|
2494
|
+
*/
|
|
2495
|
+
function desanitize(s) {
|
|
2496
|
+
let out = s;
|
|
2497
|
+
for (const [from, to] of DESANITIZATIONS) out = out.replaceAll(from, to);
|
|
2498
|
+
return out;
|
|
2499
|
+
}
|
|
2500
|
+
/**
|
|
2501
|
+
* Strip line-number prefixes from each line of a needle, used as a recovery
|
|
2502
|
+
* fallback when the model pastes a `read_file` chunk verbatim into
|
|
2503
|
+
* `old_string` — the on-disk file doesn't carry the metadata prefix.
|
|
2504
|
+
*
|
|
2505
|
+
* Accepts three separator characters so a model that learned on a different
|
|
2506
|
+
* agent stack still works here: `\t` (Claude Code compact, our default),
|
|
2507
|
+
* `|`, and `→`. Pattern: optional leading whitespace, 1-9 digits, then one
|
|
2508
|
+
* of `\t | →`. The 9-digit ceiling covers files up to ~1B lines without
|
|
2509
|
+
* overshooting into legitimate `\d{N}<sep>` content like Markdown table
|
|
2510
|
+
* cells with long numeric IDs.
|
|
2511
|
+
*/
|
|
2512
|
+
const LINE_NUMBER_PREFIX_RE = /^[ \t]*\d{1,9}[\t|\u2192]/gm;
|
|
2513
|
+
function stripLineNumberPrefixes(s) {
|
|
2514
|
+
return s.replace(LINE_NUMBER_PREFIX_RE, "");
|
|
2515
|
+
}
|
|
2516
|
+
/**
|
|
2517
|
+
* Search `target` in `normFile` and slice the matching span out of the
|
|
2518
|
+
* original `haystack`, counting all non-overlapping occurrences. `normFile`
|
|
2519
|
+
* is the haystack with whatever transform (quotes / desanitize / combined)
|
|
2520
|
+
* was applied to make the indices align — slicing the original haystack
|
|
2521
|
+
* preserves the file's actual typography so `replace_all` writes back the
|
|
2522
|
+
* file's form, not the model's.
|
|
2523
|
+
*
|
|
2524
|
+
* Pre-condition: `normFile.length === haystack.length` (every transform
|
|
2525
|
+
* we use is one-to-one). Returns null on miss.
|
|
2526
|
+
*/
|
|
2527
|
+
function locateAndCount(haystack, normFile, target, via) {
|
|
2528
|
+
const idx = normFile.indexOf(target);
|
|
2529
|
+
if (idx === -1) return null;
|
|
2530
|
+
const actual = haystack.slice(idx, idx + target.length);
|
|
2531
|
+
let occ = 0;
|
|
2532
|
+
let cursor = 0;
|
|
2533
|
+
while (true) {
|
|
2534
|
+
const next = normFile.indexOf(target, cursor);
|
|
2535
|
+
if (next === -1) break;
|
|
2536
|
+
occ++;
|
|
2537
|
+
cursor = next + target.length;
|
|
2538
|
+
}
|
|
2539
|
+
return {
|
|
2540
|
+
actual,
|
|
2541
|
+
occurrences: occ,
|
|
2542
|
+
via
|
|
2543
|
+
};
|
|
2544
|
+
}
|
|
2545
|
+
function resolveOldString(haystack, needle) {
|
|
2546
|
+
const exact = countExactMatches(haystack, needle);
|
|
2547
|
+
if (exact > 0) return {
|
|
2548
|
+
actual: needle,
|
|
2549
|
+
occurrences: exact,
|
|
2550
|
+
via: "exact"
|
|
2551
|
+
};
|
|
2552
|
+
const normNeedle = normalizeQuotes(needle);
|
|
2553
|
+
const normFile = normalizeQuotes(haystack);
|
|
2554
|
+
if (normNeedle !== needle || normFile !== haystack) {
|
|
2555
|
+
const m = locateAndCount(haystack, normFile, normNeedle, "quotes");
|
|
2556
|
+
if (m) return m;
|
|
2557
|
+
}
|
|
2558
|
+
const desan = desanitize(needle);
|
|
2559
|
+
if (desan !== needle) {
|
|
2560
|
+
const desanCount = countExactMatches(haystack, desan);
|
|
2561
|
+
if (desanCount > 0) return {
|
|
2562
|
+
actual: desan,
|
|
2563
|
+
occurrences: desanCount,
|
|
2564
|
+
via: "desanitize"
|
|
2565
|
+
};
|
|
2566
|
+
}
|
|
2567
|
+
const combo = desanitize(normNeedle);
|
|
2568
|
+
if (combo !== needle) {
|
|
2569
|
+
const m = locateAndCount(haystack, normFile, combo, "quotes+desanitize");
|
|
2570
|
+
if (m) return m;
|
|
2571
|
+
}
|
|
2572
|
+
const stripped = stripLineNumberPrefixes(needle);
|
|
2573
|
+
if (stripped !== needle && stripped.trim().length > 0) {
|
|
2574
|
+
const count = countExactMatches(haystack, stripped);
|
|
2575
|
+
if (count > 0) return {
|
|
2576
|
+
actual: stripped,
|
|
2577
|
+
occurrences: count,
|
|
2578
|
+
via: "line-numbers"
|
|
2579
|
+
};
|
|
2580
|
+
const strippedNorm = normalizeQuotes(stripped);
|
|
2581
|
+
if (strippedNorm !== stripped || normFile !== haystack) {
|
|
2582
|
+
const m = locateAndCount(haystack, normFile, strippedNorm, "quotes+line-numbers");
|
|
2583
|
+
if (m) return m;
|
|
2584
|
+
}
|
|
2585
|
+
}
|
|
2586
|
+
return null;
|
|
2587
|
+
}
|
|
2588
|
+
/**
|
|
2589
|
+
* Apply the same recovery transforms used to find `old_string` to
|
|
2590
|
+
* `new_string`, so the file gets back its native form: desanitize when
|
|
2591
|
+
* the model emitted `<n>` for `<name>`, strip line-number prefixes when
|
|
2592
|
+
* the match required them, then re-curlify when the match required
|
|
2593
|
+
* quote normalization. Shared between `edit` and `multi_edit`.
|
|
2594
|
+
*/
|
|
2595
|
+
function styleReplacementForVia(replacement, via, actual) {
|
|
2596
|
+
let out = replacement;
|
|
2597
|
+
if (via === "desanitize" || via === "quotes+desanitize") out = desanitize(out);
|
|
2598
|
+
if (via === "line-numbers" || via === "quotes+line-numbers") out = stripLineNumberPrefixes(out);
|
|
2599
|
+
if (via === "quotes" || via === "quotes+desanitize" || via === "quotes+line-numbers") out = preserveQuoteStyle(actual, out);
|
|
2600
|
+
return out;
|
|
2601
|
+
}
|
|
2602
|
+
/**
|
|
2603
|
+
* When `old_string` matched via curly-quote normalization, re-style
|
|
2604
|
+
* `new_string` so the file's typography is preserved across the edit.
|
|
2605
|
+
* Detects whether the matched file region had curly singles, doubles, or
|
|
2606
|
+
* both, and applies the matching curlification to the replacement.
|
|
2607
|
+
*
|
|
2608
|
+
* Apostrophes in contractions (`don't`, `it's`) get the right-single curly
|
|
2609
|
+
* quote regardless of opening context — that's the canonical typographer's
|
|
2610
|
+
* convention for English. Other quotes use a simple
|
|
2611
|
+
* preceded-by-whitespace-or-opening-punctuation heuristic.
|
|
2612
|
+
*/
|
|
2613
|
+
function preserveQuoteStyle(actual, replacement) {
|
|
2614
|
+
const hasDouble = actual.includes("“") || actual.includes("”");
|
|
2615
|
+
const hasSingle = actual.includes("‘") || actual.includes("’");
|
|
2616
|
+
if (!hasDouble && !hasSingle) return replacement;
|
|
2617
|
+
let out = replacement;
|
|
2618
|
+
if (hasDouble) out = applyCurly(out, "\"", "“", "”", false);
|
|
2619
|
+
if (hasSingle) out = applyCurly(out, "'", "‘", "’", true);
|
|
2620
|
+
return out;
|
|
2621
|
+
}
|
|
2622
|
+
function applyCurly(s, straight, left, right, contractionAware) {
|
|
2623
|
+
const chars = [...s];
|
|
2624
|
+
const result = [];
|
|
2625
|
+
for (let i = 0; i < chars.length; i++) {
|
|
2626
|
+
if (chars[i] !== straight) {
|
|
2627
|
+
result.push(chars[i]);
|
|
2628
|
+
continue;
|
|
2629
|
+
}
|
|
2630
|
+
if (contractionAware) {
|
|
2631
|
+
const prev = i > 0 ? chars[i - 1] : "";
|
|
2632
|
+
const next = i < chars.length - 1 ? chars[i + 1] : "";
|
|
2633
|
+
if (/\p{L}/u.test(prev) && /\p{L}/u.test(next)) {
|
|
2634
|
+
result.push(right);
|
|
2635
|
+
continue;
|
|
2636
|
+
}
|
|
2637
|
+
}
|
|
2638
|
+
result.push(isOpeningContext(chars, i) ? left : right);
|
|
2639
|
+
}
|
|
2640
|
+
return result.join("");
|
|
2641
|
+
}
|
|
2642
|
+
function isOpeningContext(chars, i) {
|
|
2643
|
+
if (i === 0) return true;
|
|
2644
|
+
const prev = chars[i - 1];
|
|
2645
|
+
return prev === " " || prev === " " || prev === "\n" || prev === "\r" || prev === "(" || prev === "[" || prev === "{" || prev === "—" || prev === "–";
|
|
2646
|
+
}
|
|
2647
|
+
//#endregion
|
|
2648
|
+
//#region src/tools/path-suggest.ts
|
|
2649
|
+
/**
|
|
2650
|
+
* Find a sibling file in the same directory sharing `path`'s basename
|
|
2651
|
+
* (sans extension), excluding the missing path itself. Returns just the
|
|
2652
|
+
* filename (not the full path) when found, otherwise `null`.
|
|
2653
|
+
*
|
|
2654
|
+
* Silent on errors — a missing parent directory or a `listFiles` failure
|
|
2655
|
+
* means we have no suggestion, not that anything is wrong.
|
|
2656
|
+
*/
|
|
2657
|
+
async function findSimilarFile(execution, handle, path) {
|
|
2658
|
+
const slash = path.lastIndexOf("/");
|
|
2659
|
+
const dir = slash === -1 ? "." : path.slice(0, slash) || "/";
|
|
2660
|
+
const target = slash === -1 ? path : path.slice(slash + 1);
|
|
2661
|
+
const dot = target.lastIndexOf(".");
|
|
2662
|
+
const targetBase = dot === -1 ? target : target.slice(0, dot);
|
|
2663
|
+
if (targetBase.length === 0) return null;
|
|
2664
|
+
let entries;
|
|
2665
|
+
try {
|
|
2666
|
+
entries = await execution.listFiles(handle, dir);
|
|
2667
|
+
} catch {
|
|
2668
|
+
return null;
|
|
2669
|
+
}
|
|
2670
|
+
for (const entry of entries) {
|
|
2671
|
+
if (entry === target) continue;
|
|
2672
|
+
const entryDot = entry.lastIndexOf(".");
|
|
2673
|
+
if ((entryDot === -1 ? entry : entry.slice(0, entryDot)) === targetBase) return entry;
|
|
2674
|
+
}
|
|
2675
|
+
return null;
|
|
2676
|
+
}
|
|
2677
|
+
/**
|
|
2678
|
+
* Format a `Did you mean X?` suffix for missing-file errors. Returns an empty
|
|
2679
|
+
* string when no suggestion is available so callers can string-concat
|
|
2680
|
+
* unconditionally.
|
|
2681
|
+
*/
|
|
2682
|
+
async function suggestionFor(execution, handle, path) {
|
|
2683
|
+
const sibling = await findSimilarFile(execution, handle, path);
|
|
2684
|
+
return sibling ? ` Did you mean ${sibling}?` : "";
|
|
2685
|
+
}
|
|
2686
|
+
//#endregion
|
|
2687
|
+
//#region src/tools/edit.ts
|
|
2688
|
+
/**
|
|
2689
|
+
* Surgical edit — replace `old_string` with `new_string` in a single file.
|
|
2690
|
+
*
|
|
2691
|
+
* Mirrors Claude Code's `Edit` semantics so models post-trained on Anthropic's
|
|
2692
|
+
* tool surface need no relearning. Fails clearly when `old_string` isn't unique
|
|
2693
|
+
* (unless `replace_all: true`) and when not found, with a nearest-match preview
|
|
2694
|
+
* so the model can recover without a separate `read_file` round-trip.
|
|
2695
|
+
*/
|
|
2696
|
+
const edit = {
|
|
2697
|
+
spec: {
|
|
2698
|
+
name: "edit",
|
|
2699
|
+
description: "Replace exact `old_string` with `new_string` in a file. Fails if `old_string` is not unique unless `replace_all: true`. Prefer over `write_file` for surgical changes — preserves the rest of the file. Tolerates `read_file` line-number prefixes (`<N>\\t…`, `<N>|…`, or `<N>→…`) in `old_string` / `new_string` — they are stripped before matching/writing, so you can paste a numbered chunk verbatim.",
|
|
2700
|
+
inputSchema: {
|
|
2701
|
+
type: "object",
|
|
2702
|
+
properties: {
|
|
2703
|
+
path: {
|
|
2704
|
+
type: "string",
|
|
2705
|
+
description: "Relative file path."
|
|
2706
|
+
},
|
|
2707
|
+
old_string: {
|
|
2708
|
+
type: "string",
|
|
2709
|
+
description: "Exact substring to find."
|
|
2710
|
+
},
|
|
2711
|
+
new_string: {
|
|
2712
|
+
type: "string",
|
|
2713
|
+
description: "Replacement substring."
|
|
2714
|
+
},
|
|
2715
|
+
replace_all: {
|
|
2716
|
+
type: "boolean",
|
|
2717
|
+
description: "Replace every occurrence. Default: false."
|
|
2718
|
+
}
|
|
2719
|
+
},
|
|
2720
|
+
required: [
|
|
2721
|
+
"path",
|
|
2722
|
+
"old_string",
|
|
2723
|
+
"new_string"
|
|
2724
|
+
]
|
|
2725
|
+
}
|
|
2726
|
+
},
|
|
2727
|
+
async execute({ path, old_string, new_string, replace_all }, ctx) {
|
|
2728
|
+
const target = path;
|
|
2729
|
+
const find = old_string;
|
|
2730
|
+
const replacement = new_string;
|
|
2731
|
+
const replaceAll = replace_all === true;
|
|
2732
|
+
if (find === replacement) return `Edit error: old_string and new_string are identical — nothing to change in ${target}.`;
|
|
2733
|
+
if (find.length === 0) return `Edit error: old_string is empty. Use write_file to create or fully overwrite a file.`;
|
|
2734
|
+
let original;
|
|
2735
|
+
try {
|
|
2736
|
+
original = await ctx.execution.readFile(ctx.handle, target);
|
|
2737
|
+
} catch {
|
|
2738
|
+
return `Edit error: file not found: ${target}.${await suggestionFor(ctx.execution, ctx.handle, target)}`;
|
|
2739
|
+
}
|
|
2740
|
+
if (ctx.behavior?.requireReadBeforeEdit && ctx.session) {
|
|
2741
|
+
const readState = getReadState(ctx.session);
|
|
2742
|
+
const absKey = `${ctx.handle.cwd}::${target}`;
|
|
2743
|
+
const prior = readState?.get(absKey);
|
|
2744
|
+
if (!prior) return `Edit error: ${target} has not been read in this session. Call read_file first so the edit applies against the current contents.`;
|
|
2745
|
+
if (prior.contentHash !== hashContent(original)) return `Edit error: ${target} has changed on disk since the last read. Re-read the file before editing.`;
|
|
2746
|
+
}
|
|
2747
|
+
const match = resolveOldString(original, find);
|
|
2748
|
+
if (!match) {
|
|
2749
|
+
const preview = nearestMatchPreview(original, find);
|
|
2750
|
+
return preview ? `Edit error: old_string not found in ${target}. Closest match in the file: ${preview}` : `Edit error: old_string not found in ${target}.`;
|
|
2751
|
+
}
|
|
2752
|
+
const { actual, occurrences, via } = match;
|
|
2753
|
+
if (occurrences > 1 && !replaceAll) return `Edit error: old_string appears ${occurrences} times in ${target}. Pass replace_all=true or expand old_string for uniqueness.`;
|
|
2754
|
+
const styledReplacement = styleReplacementForVia(replacement, via, actual);
|
|
2755
|
+
const updated = replaceAll ? original.split(actual).join(styledReplacement) : original.replace(actual, styledReplacement);
|
|
2756
|
+
if (updated === original) return `Edit error: replacement produced no change in ${target}.`;
|
|
2757
|
+
await ctx.execution.writeFile(ctx.handle, target, updated);
|
|
2758
|
+
if (ctx.session) {
|
|
2759
|
+
const readState = getReadState(ctx.session);
|
|
2760
|
+
const absKey = `${ctx.handle.cwd}::${target}`;
|
|
2761
|
+
const prior = readState?.get(absKey);
|
|
2762
|
+
if (readState && prior) readState.set(absKey, {
|
|
2763
|
+
...prior,
|
|
2764
|
+
contentHash: hashContent(updated),
|
|
2765
|
+
mtimeMs: Date.now()
|
|
2766
|
+
});
|
|
2767
|
+
}
|
|
2768
|
+
return `Edited ${target}: replaced ${occurrences} occurrence${occurrences === 1 ? "" : "s"}.`;
|
|
2769
|
+
}
|
|
2770
|
+
};
|
|
2771
|
+
/**
|
|
2772
|
+
* Find the line that shares the longest common prefix with the needle's first
|
|
2773
|
+
* line. Cheap heuristic — better than nothing for the common "model has a typo"
|
|
2774
|
+
* case (off-by-one indent, trailing whitespace, escape mismatch).
|
|
2775
|
+
*
|
|
2776
|
+
* Returns a "line N: <preview>" snippet or null when no line shares a useful
|
|
2777
|
+
* prefix. Strips line-number prefixes from the needle first so a model that
|
|
2778
|
+
* pasted a numbered `read_file` chunk verbatim still gets a useful diagnostic.
|
|
2779
|
+
*/
|
|
2780
|
+
function nearestMatchPreview(haystack, needle) {
|
|
2781
|
+
const needleFirstLine = stripLineNumberPrefixes(needle).split("\n")[0];
|
|
2782
|
+
if (needleFirstLine.length < 3) return null;
|
|
2783
|
+
const lines = haystack.split("\n");
|
|
2784
|
+
let bestScore = 0;
|
|
2785
|
+
let bestIdx = -1;
|
|
2786
|
+
for (let i = 0; i < lines.length; i++) {
|
|
2787
|
+
const score = sharedPrefixLength(lines[i], needleFirstLine);
|
|
2788
|
+
if (score > bestScore) {
|
|
2789
|
+
bestScore = score;
|
|
2790
|
+
bestIdx = i;
|
|
2791
|
+
}
|
|
2792
|
+
}
|
|
2793
|
+
if (bestIdx < 0 || bestScore < Math.min(8, Math.floor(needleFirstLine.length / 2))) return null;
|
|
2794
|
+
const snippet = lines[bestIdx].slice(0, 80);
|
|
2795
|
+
return `line ${bestIdx + 1}: ${JSON.stringify(snippet)}`;
|
|
2796
|
+
}
|
|
2797
|
+
function sharedPrefixLength(a, b) {
|
|
2798
|
+
const max = Math.min(a.length, b.length);
|
|
2799
|
+
let i = 0;
|
|
2800
|
+
while (i < max && a.charCodeAt(i) === b.charCodeAt(i)) i++;
|
|
2801
|
+
return i;
|
|
2802
|
+
}
|
|
2803
|
+
//#endregion
|
|
2804
|
+
//#region src/tools/glob.ts
|
|
2805
|
+
/**
|
|
2806
|
+
* Glob-pattern file matching.
|
|
2807
|
+
*
|
|
2808
|
+
* Uses Bun's native `Bun.Glob` engine when running in the in-process execution
|
|
2809
|
+
* context. For non-process contexts (docker, sandbox), falls back to running
|
|
2810
|
+
* the pattern through a shell `find` invocation so the match is executed
|
|
2811
|
+
* wherever the context lives.
|
|
2812
|
+
*
|
|
2813
|
+
* Results are capped at 1000 entries to keep model input bounded.
|
|
2814
|
+
*
|
|
2815
|
+
* By default each row carries `<path>\t<size>\t<mtime>` metadata so the
|
|
2816
|
+
* model can rank "what changed recently" without a follow-up `read_file`.
|
|
2817
|
+
* Pass `metadata: false` to fall back to plain newline-separated paths.
|
|
2818
|
+
* Metadata is best-effort: in-process contexts use `node:fs/promises stat`
|
|
2819
|
+
* (parallelized); non-process contexts fall through to the path-only shape
|
|
2820
|
+
* since shelling out per file would dominate the call latency.
|
|
2821
|
+
*/
|
|
2822
|
+
const DEFAULT_LIMIT = 1e3;
|
|
2823
|
+
const SAFE_GLOB_PATTERN_RE = /^[\w./*?[\]{}!,^@+-]+$/;
|
|
2824
|
+
async function globInProcess(pattern, cwd, limit) {
|
|
2825
|
+
const glob = new Bun.Glob(pattern);
|
|
2826
|
+
const results = [];
|
|
2827
|
+
for await (const file of glob.scan({ cwd })) {
|
|
2828
|
+
results.push(file);
|
|
2829
|
+
if (results.length >= limit) break;
|
|
2830
|
+
}
|
|
2831
|
+
return results.sort();
|
|
2832
|
+
}
|
|
2833
|
+
async function globViaShell(pattern, ctx, limit) {
|
|
2834
|
+
if (!SAFE_GLOB_PATTERN_RE.test(pattern)) throw new Error("Glob pattern contains unsupported characters (shell fallback only allows path/glob metacharacters)");
|
|
2835
|
+
const searchCmd = `${!pattern.includes("/") ? `find . -type f -name '${pattern}'` : `find . -type f -path './${pattern}'`} 2>/dev/null | sed 's|^./||' | sort | head -n ${limit}`;
|
|
2836
|
+
const result = await ctx.execution.exec(ctx.handle, searchCmd);
|
|
2837
|
+
if (result.exitCode !== 0 && !result.stdout) return [];
|
|
2838
|
+
return result.stdout.split("\n").filter((line) => line.length > 0);
|
|
2839
|
+
}
|
|
2840
|
+
const glob = {
|
|
2841
|
+
spec: {
|
|
2842
|
+
name: "glob",
|
|
2843
|
+
description: "Match files by glob pattern (supports **, *, ?). Relative to the execution context cwd. By default each row is `<path>\\t<size-bytes>\\t<mtime-iso>`; set `metadata: false` for a plain newline-separated list of paths. Always sorted.",
|
|
2844
|
+
inputSchema: {
|
|
2845
|
+
type: "object",
|
|
2846
|
+
properties: {
|
|
2847
|
+
pattern: {
|
|
2848
|
+
type: "string",
|
|
2849
|
+
description: "Glob pattern (e.g. \"src/**/*.ts\", \"*.md\", \"test/**/fixtures/*\")."
|
|
2850
|
+
},
|
|
2851
|
+
limit: {
|
|
2852
|
+
type: "number",
|
|
2853
|
+
description: `Maximum number of matches to return. Default: ${DEFAULT_LIMIT}.`
|
|
2854
|
+
},
|
|
2855
|
+
metadata: {
|
|
2856
|
+
type: "boolean",
|
|
2857
|
+
description: "Append size (bytes) and mtime (ISO) per row, tab-separated. Default: true. In-process only — non-process execution contexts always return paths."
|
|
2858
|
+
}
|
|
2859
|
+
},
|
|
2860
|
+
required: ["pattern"]
|
|
2861
|
+
}
|
|
2862
|
+
},
|
|
2863
|
+
async execute({ pattern, limit, metadata }, ctx) {
|
|
2864
|
+
const pat = pattern;
|
|
2865
|
+
const max = typeof limit === "number" && limit > 0 ? limit : DEFAULT_LIMIT;
|
|
2866
|
+
const wantMetadata = metadata !== false;
|
|
2867
|
+
try {
|
|
2868
|
+
const entries = ctx.execution.type === "process" ? await globInProcess(pat, ctx.handle.cwd, max) : await globViaShell(pat, ctx, max);
|
|
2869
|
+
if (entries.length === 0) return "(no matches)";
|
|
2870
|
+
if (!wantMetadata || ctx.execution.type !== "process") return entries.join("\n");
|
|
2871
|
+
return (await Promise.all(entries.map(async (rel) => {
|
|
2872
|
+
try {
|
|
2873
|
+
const s = await stat(resolve(ctx.handle.cwd, rel));
|
|
2874
|
+
return `${rel}\t${s.size}\t${new Date(s.mtimeMs).toISOString()}`;
|
|
2875
|
+
} catch {
|
|
2876
|
+
return `${rel}\t\t`;
|
|
2877
|
+
}
|
|
2878
|
+
}))).join("\n");
|
|
2879
|
+
} catch (err) {
|
|
2880
|
+
return `Glob error: ${err instanceof Error ? err.message : String(err)}`;
|
|
2881
|
+
}
|
|
2882
|
+
}
|
|
2883
|
+
};
|
|
2884
|
+
//#endregion
|
|
2885
|
+
//#region src/tools/grep.ts
|
|
2886
|
+
/**
|
|
2887
|
+
* Search file contents by regex.
|
|
2888
|
+
*
|
|
2889
|
+
* Wraps ripgrep (`rg`) when available, falls back to an in-process Bun.Glob +
|
|
2890
|
+
* regex implementation when running in the in-process execution context. For
|
|
2891
|
+
* non-process contexts without `rg`, returns a clear hint rather than silently
|
|
2892
|
+
* doing nothing.
|
|
2893
|
+
*
|
|
2894
|
+
* The tool surface mirrors Claude Code's `Grep` so models authored against the
|
|
2895
|
+
* Anthropic tool surface need no relearning. Output modes:
|
|
2896
|
+
* - `files_with_matches` (default) — newline-separated paths.
|
|
2897
|
+
* - `content` — `path:line:match` (line numbers on by default).
|
|
2898
|
+
* - `count` — `path:N` per matching file.
|
|
2899
|
+
*
|
|
2900
|
+
* Results are capped via `head_limit` (default 250) to keep model input
|
|
2901
|
+
* bounded; `offset` lets the caller page through.
|
|
2902
|
+
*/
|
|
2903
|
+
const DEFAULT_HEAD_LIMIT = 250;
|
|
2904
|
+
const DEFAULT_OUTPUT_MODE = "files_with_matches";
|
|
2905
|
+
const grep = {
|
|
2906
|
+
spec: {
|
|
2907
|
+
name: "grep",
|
|
2908
|
+
description: "Search file contents by regex. Returns matching paths (default), match content, or per-file counts. Backed by ripgrep when available with a Bun.Glob fallback for in-process runs.",
|
|
2909
|
+
inputSchema: {
|
|
2910
|
+
type: "object",
|
|
2911
|
+
properties: {
|
|
2912
|
+
"pattern": {
|
|
2913
|
+
type: "string",
|
|
2914
|
+
description: "Regex (PCRE-flavored via ripgrep, JS regex via fallback)."
|
|
2915
|
+
},
|
|
2916
|
+
"path": {
|
|
2917
|
+
type: "string",
|
|
2918
|
+
description: "File or directory to search. Default: \".\"."
|
|
2919
|
+
},
|
|
2920
|
+
"glob": {
|
|
2921
|
+
type: "string",
|
|
2922
|
+
description: "Restrict to files matching this glob, e.g. \"**/*.ts\"."
|
|
2923
|
+
},
|
|
2924
|
+
"type": {
|
|
2925
|
+
type: "string",
|
|
2926
|
+
description: "rg file type filter, e.g. \"ts\", \"py\", \"rust\". Ignored by the fallback."
|
|
2927
|
+
},
|
|
2928
|
+
"output_mode": {
|
|
2929
|
+
type: "string",
|
|
2930
|
+
enum: [
|
|
2931
|
+
"content",
|
|
2932
|
+
"files_with_matches",
|
|
2933
|
+
"count"
|
|
2934
|
+
],
|
|
2935
|
+
description: "Default: \"files_with_matches\"."
|
|
2936
|
+
},
|
|
2937
|
+
"-i": {
|
|
2938
|
+
type: "boolean",
|
|
2939
|
+
description: "Case-insensitive match."
|
|
2940
|
+
},
|
|
2941
|
+
"-n": {
|
|
2942
|
+
type: "boolean",
|
|
2943
|
+
description: "Show line numbers (content mode). Default: true."
|
|
2944
|
+
},
|
|
2945
|
+
"-A": {
|
|
2946
|
+
type: "integer",
|
|
2947
|
+
description: "Lines of trailing context (content mode)."
|
|
2948
|
+
},
|
|
2949
|
+
"-B": {
|
|
2950
|
+
type: "integer",
|
|
2951
|
+
description: "Lines of leading context (content mode)."
|
|
2952
|
+
},
|
|
2953
|
+
"-C": {
|
|
2954
|
+
type: "integer",
|
|
2955
|
+
description: "Lines of surrounding context (content mode). Overridden by -A/-B if set."
|
|
2956
|
+
},
|
|
2957
|
+
"multiline": {
|
|
2958
|
+
type: "boolean",
|
|
2959
|
+
description: "Allow patterns to match across line boundaries."
|
|
2960
|
+
},
|
|
2961
|
+
"head_limit": {
|
|
2962
|
+
type: "integer",
|
|
2963
|
+
description: "Cap output entries. Default: 250. Set 0 for unlimited."
|
|
2964
|
+
},
|
|
2965
|
+
"offset": {
|
|
2966
|
+
type: "integer",
|
|
2967
|
+
description: "Skip first N entries. Default: 0."
|
|
2968
|
+
}
|
|
2969
|
+
},
|
|
2970
|
+
required: ["pattern"]
|
|
2971
|
+
}
|
|
2972
|
+
},
|
|
2973
|
+
async execute(rawInput, ctx) {
|
|
2974
|
+
const input = rawInput;
|
|
2975
|
+
if (await isRipgrepAvailable(ctx)) return runViaRipgrep(input, ctx);
|
|
2976
|
+
if (ctx.execution.type === "process") return runInProcess(input, ctx);
|
|
2977
|
+
return "grep error: ripgrep is not available in the execution context. Install `rg` or use the `shell` tool with grep/awk.";
|
|
2978
|
+
}
|
|
2979
|
+
};
|
|
2980
|
+
/**
|
|
2981
|
+
* Probe ripgrep availability **per call**. The probe is intentionally not
|
|
2982
|
+
* cached: caching at module scope would leak across execution contexts (an
|
|
2983
|
+
* orchestrator running an in-process agent and a docker agent in the same
|
|
2984
|
+
* Node process must be able to differ on whether `rg` exists), and caching
|
|
2985
|
+
* per handle adds bookkeeping for negligible savings — `rg --version` is
|
|
2986
|
+
* ~5 ms, and grep is invoked at most a few times per turn.
|
|
2987
|
+
*/
|
|
2988
|
+
async function isRipgrepAvailable(ctx) {
|
|
2989
|
+
return (await ctx.execution.exec(ctx.handle, "rg --version")).exitCode === 0;
|
|
2990
|
+
}
|
|
2991
|
+
async function runViaRipgrep(input, ctx) {
|
|
2992
|
+
const args = ["rg"];
|
|
2993
|
+
const mode = input.output_mode ?? DEFAULT_OUTPUT_MODE;
|
|
2994
|
+
if (mode === "files_with_matches") args.push("--files-with-matches");
|
|
2995
|
+
else if (mode === "count") args.push("--count");
|
|
2996
|
+
else args.push(input["-n"] ?? true ? "--line-number" : "--no-line-number");
|
|
2997
|
+
if (input["-i"]) args.push("-i");
|
|
2998
|
+
if (mode === "content") {
|
|
2999
|
+
if (typeof input["-A"] === "number") args.push("-A", String(input["-A"]));
|
|
3000
|
+
if (typeof input["-B"] === "number") args.push("-B", String(input["-B"]));
|
|
3001
|
+
if (typeof input["-C"] === "number" && typeof input["-A"] !== "number" && typeof input["-B"] !== "number") args.push("-C", String(input["-C"]));
|
|
3002
|
+
}
|
|
3003
|
+
if (input.multiline) args.push("--multiline", "--multiline-dotall");
|
|
3004
|
+
if (input.glob) args.push("--glob", input.glob);
|
|
3005
|
+
if (input.type) args.push("--type", input.type);
|
|
3006
|
+
args.push("--", input.pattern);
|
|
3007
|
+
args.push(input.path ?? ".");
|
|
3008
|
+
const command = args.map(shellQuote).join(" ");
|
|
3009
|
+
const result = await ctx.execution.exec(ctx.handle, command);
|
|
3010
|
+
if (result.exitCode !== 0 && result.exitCode !== 1) return `grep error: ${result.stderr.trim() || `rg exited with code ${result.exitCode}`}`;
|
|
3011
|
+
return formatPaginated(result.stdout, input);
|
|
3012
|
+
}
|
|
3013
|
+
async function runInProcess(input, ctx) {
|
|
3014
|
+
const mode = input.output_mode ?? DEFAULT_OUTPUT_MODE;
|
|
3015
|
+
const flags = `${input["-i"] ? "i" : ""}${input.multiline ? "s" : ""}${mode !== "content" ? "" : "g"}`;
|
|
3016
|
+
let regex;
|
|
3017
|
+
try {
|
|
3018
|
+
regex = new RegExp(input.pattern, flags || void 0);
|
|
3019
|
+
} catch (err) {
|
|
3020
|
+
return `grep error: invalid regex: ${err.message}`;
|
|
3021
|
+
}
|
|
3022
|
+
const files = await enumerateFiles(input, ctx);
|
|
3023
|
+
const showLineNumbers = input["-n"] ?? true;
|
|
3024
|
+
const before = input["-B"] ?? input["-C"] ?? 0;
|
|
3025
|
+
const after = input["-A"] ?? input["-C"] ?? 0;
|
|
3026
|
+
const lines = [];
|
|
3027
|
+
for (const path of files) {
|
|
3028
|
+
let content;
|
|
3029
|
+
try {
|
|
3030
|
+
content = await ctx.execution.readFile(ctx.handle, path);
|
|
3031
|
+
} catch {
|
|
3032
|
+
continue;
|
|
3033
|
+
}
|
|
3034
|
+
if (input.multiline) {
|
|
3035
|
+
const allMatches = [...content.matchAll(new RegExp(regex.source, `${flags.replace(/g/, "")}g`))];
|
|
3036
|
+
if (allMatches.length === 0) continue;
|
|
3037
|
+
if (mode === "files_with_matches") {
|
|
3038
|
+
lines.push(path);
|
|
3039
|
+
continue;
|
|
3040
|
+
}
|
|
3041
|
+
if (mode === "count") {
|
|
3042
|
+
lines.push(`${path}:${allMatches.length}`);
|
|
3043
|
+
continue;
|
|
3044
|
+
}
|
|
3045
|
+
for (const m of allMatches) {
|
|
3046
|
+
const lineStart = content.lastIndexOf("\n", m.index - 1) + 1;
|
|
3047
|
+
const lineEnd = content.indexOf("\n", m.index);
|
|
3048
|
+
const snippet = content.slice(lineStart, lineEnd === -1 ? void 0 : lineEnd);
|
|
3049
|
+
const lineNo = content.slice(0, m.index).split("\n").length;
|
|
3050
|
+
lines.push(formatContentLine(path, lineNo, snippet, showLineNumbers));
|
|
3051
|
+
}
|
|
3052
|
+
continue;
|
|
3053
|
+
}
|
|
3054
|
+
const fileLines = content.split("\n");
|
|
3055
|
+
const matched = [];
|
|
3056
|
+
for (let i = 0; i < fileLines.length; i++) {
|
|
3057
|
+
regex.lastIndex = 0;
|
|
3058
|
+
if (regex.test(fileLines[i])) matched.push(i);
|
|
3059
|
+
}
|
|
3060
|
+
if (matched.length === 0) continue;
|
|
3061
|
+
if (mode === "files_with_matches") {
|
|
3062
|
+
lines.push(path);
|
|
3063
|
+
continue;
|
|
3064
|
+
}
|
|
3065
|
+
if (mode === "count") {
|
|
3066
|
+
lines.push(`${path}:${matched.length}`);
|
|
3067
|
+
continue;
|
|
3068
|
+
}
|
|
3069
|
+
const includeLineNos = /* @__PURE__ */ new Set();
|
|
3070
|
+
for (const m of matched) for (let i = Math.max(0, m - before); i <= Math.min(fileLines.length - 1, m + after); i++) includeLineNos.add(i);
|
|
3071
|
+
const sorted = [...includeLineNos].sort((a, b) => a - b);
|
|
3072
|
+
let prev = -2;
|
|
3073
|
+
for (const lineNo of sorted) {
|
|
3074
|
+
if (lineNo > prev + 1 && lines.length > 0) lines.push("--");
|
|
3075
|
+
const snippet = fileLines[lineNo];
|
|
3076
|
+
lines.push(formatContentLine(path, lineNo + 1, snippet, showLineNumbers));
|
|
3077
|
+
prev = lineNo;
|
|
3078
|
+
}
|
|
3079
|
+
}
|
|
3080
|
+
return formatPaginated(lines.join("\n"), input);
|
|
3081
|
+
}
|
|
3082
|
+
function formatContentLine(path, lineNo, snippet, showLineNumbers) {
|
|
3083
|
+
return showLineNumbers ? `${path}:${lineNo}:${snippet}` : `${path}:${snippet}`;
|
|
3084
|
+
}
|
|
3085
|
+
async function enumerateFiles(input, ctx) {
|
|
3086
|
+
const cwd = ctx.handle.cwd;
|
|
3087
|
+
const root = input.path ?? ".";
|
|
3088
|
+
if (input.path && !input.path.includes("*") && !input.path.includes("?")) try {
|
|
3089
|
+
if ((await ctx.execution.exec(ctx.handle, `test -f ${shellQuote(input.path)} && echo file || echo dir`)).stdout.trim() === "file") return [input.path];
|
|
3090
|
+
} catch {}
|
|
3091
|
+
const pattern = input.glob ?? "**/*";
|
|
3092
|
+
const glob = new Bun.Glob(pattern);
|
|
3093
|
+
const out = [];
|
|
3094
|
+
const scanRoot = root === "." ? cwd : `${cwd.replace(/\/$/, "")}/${root.replace(/^\.\//, "")}`;
|
|
3095
|
+
for await (const file of glob.scan({
|
|
3096
|
+
cwd: scanRoot,
|
|
3097
|
+
onlyFiles: true
|
|
3098
|
+
})) out.push(root === "." ? file : `${root.replace(/\/$/, "")}/${file}`);
|
|
3099
|
+
return out.sort();
|
|
3100
|
+
}
|
|
3101
|
+
function formatPaginated(text, input) {
|
|
3102
|
+
const headLimit = typeof input.head_limit === "number" && input.head_limit >= 0 ? input.head_limit : DEFAULT_HEAD_LIMIT;
|
|
3103
|
+
const offset = typeof input.offset === "number" && input.offset > 0 ? Math.floor(input.offset) : 0;
|
|
3104
|
+
if (!text.trim()) return "(no matches)";
|
|
3105
|
+
const lines = text.split("\n").filter((l) => l.length > 0);
|
|
3106
|
+
const total = lines.length;
|
|
3107
|
+
const sliced = headLimit === 0 ? lines.slice(offset) : lines.slice(offset, offset + headLimit);
|
|
3108
|
+
if (sliced.length === 0) return "(no matches in this slice)";
|
|
3109
|
+
const truncatedHead = offset > 0;
|
|
3110
|
+
const truncatedTail = headLimit > 0 && offset + headLimit < total;
|
|
3111
|
+
let out = sliced.join("\n");
|
|
3112
|
+
if (truncatedHead) out = `…(${offset} earlier matches skipped)…\n${out}`;
|
|
3113
|
+
if (truncatedTail) out = `${out}\n…(${total - offset - headLimit} more matches; re-run with offset=${offset + headLimit} or larger head_limit)`;
|
|
3114
|
+
return out;
|
|
3115
|
+
}
|
|
3116
|
+
//#endregion
|
|
3117
|
+
//#region src/tools/interaction.ts
|
|
3118
|
+
/**
|
|
3119
|
+
* Create an interaction tool that lets the agent request structured input.
|
|
3120
|
+
*
|
|
3121
|
+
* The model calls this tool with a payload matching the schema.
|
|
3122
|
+
* `onRequest` is called with the payload and should return the response
|
|
3123
|
+
* (string or object) that gets sent back to the model as the tool result.
|
|
3124
|
+
*/
|
|
3125
|
+
function createInteractionTool(options) {
|
|
3126
|
+
return {
|
|
3127
|
+
spec: {
|
|
3128
|
+
name: options.name ?? "interaction",
|
|
3129
|
+
description: options.description ?? "Request structured input from the user or external system.",
|
|
3130
|
+
inputSchema: options.schema
|
|
3131
|
+
},
|
|
3132
|
+
async execute(input, ctx) {
|
|
3133
|
+
const result = await options.onRequest(input, ctx);
|
|
3134
|
+
return typeof result === "string" ? result : JSON.stringify(result);
|
|
3135
|
+
}
|
|
3136
|
+
};
|
|
3137
|
+
}
|
|
3138
|
+
//#endregion
|
|
3139
|
+
//#region src/tools/list-files.ts
|
|
3140
|
+
const listFiles = {
|
|
3141
|
+
spec: {
|
|
3142
|
+
name: "list_files",
|
|
3143
|
+
description: "List files and directories at the given path (relative to project root).",
|
|
3144
|
+
inputSchema: {
|
|
3145
|
+
type: "object",
|
|
3146
|
+
properties: { path: {
|
|
3147
|
+
type: "string",
|
|
3148
|
+
description: "Relative directory path (default: \".\")"
|
|
3149
|
+
} },
|
|
3150
|
+
required: []
|
|
3151
|
+
}
|
|
3152
|
+
},
|
|
3153
|
+
async execute({ path }, ctx) {
|
|
3154
|
+
try {
|
|
3155
|
+
return (await ctx.execution.listFiles(ctx.handle, path || ".")).join("\n") || "(empty directory)";
|
|
3156
|
+
} catch {
|
|
3157
|
+
return `Directory not found: ${path}`;
|
|
3158
|
+
}
|
|
3159
|
+
}
|
|
3160
|
+
};
|
|
3161
|
+
//#endregion
|
|
3162
|
+
//#region src/tools/multi-edit.ts
|
|
3163
|
+
const multiEdit = {
|
|
3164
|
+
spec: {
|
|
3165
|
+
name: "multi_edit",
|
|
3166
|
+
description: "Apply a sequential list of edits to a file atomically. Each edit operates on the result of the previous edit. All edits must succeed for any to be written. Prefer this over multiple `edit` calls when several non-overlapping changes are needed in the same file. Each step tolerates `read_file` line-number prefixes (`<N>\\t…`, `<N>|…`, or `<N>→…`) in `old_string` / `new_string`.",
|
|
3167
|
+
inputSchema: {
|
|
3168
|
+
type: "object",
|
|
3169
|
+
properties: {
|
|
3170
|
+
path: {
|
|
3171
|
+
type: "string",
|
|
3172
|
+
description: "Relative file path."
|
|
3173
|
+
},
|
|
3174
|
+
edits: {
|
|
3175
|
+
type: "array",
|
|
3176
|
+
description: "List of edits applied in order; each operates on the previous edit's output.",
|
|
3177
|
+
items: {
|
|
3178
|
+
type: "object",
|
|
3179
|
+
properties: {
|
|
3180
|
+
old_string: { type: "string" },
|
|
3181
|
+
new_string: { type: "string" },
|
|
3182
|
+
replace_all: { type: "boolean" }
|
|
3183
|
+
},
|
|
3184
|
+
required: ["old_string", "new_string"]
|
|
3185
|
+
}
|
|
3186
|
+
}
|
|
3187
|
+
},
|
|
3188
|
+
required: ["path", "edits"]
|
|
3189
|
+
}
|
|
3190
|
+
},
|
|
3191
|
+
async execute({ path, edits }, ctx) {
|
|
3192
|
+
const target = path;
|
|
3193
|
+
const steps = edits;
|
|
3194
|
+
if (!Array.isArray(steps) || steps.length === 0) return `multi_edit error: edits must be a non-empty array.`;
|
|
3195
|
+
let current;
|
|
3196
|
+
try {
|
|
3197
|
+
current = await ctx.execution.readFile(ctx.handle, target);
|
|
3198
|
+
} catch {
|
|
3199
|
+
return `multi_edit error: file not found: ${target}.${await suggestionFor(ctx.execution, ctx.handle, target)}`;
|
|
3200
|
+
}
|
|
3201
|
+
if (ctx.behavior?.requireReadBeforeEdit && ctx.session) {
|
|
3202
|
+
const readState = getReadState(ctx.session);
|
|
3203
|
+
const absKey = `${ctx.handle.cwd}::${target}`;
|
|
3204
|
+
const prior = readState?.get(absKey);
|
|
3205
|
+
if (!prior) return `multi_edit error: ${target} has not been read in this session. Call read_file first so the edits apply against the current contents.`;
|
|
3206
|
+
if (prior.contentHash !== hashContent(current)) return `multi_edit error: ${target} has changed on disk since the last read. Re-read the file before editing.`;
|
|
3207
|
+
}
|
|
3208
|
+
let applied = 0;
|
|
3209
|
+
for (let i = 0; i < steps.length; i++) {
|
|
3210
|
+
const step = steps[i];
|
|
3211
|
+
const find = step.old_string;
|
|
3212
|
+
const replacement = step.new_string;
|
|
3213
|
+
const replaceAll = step.replace_all === true;
|
|
3214
|
+
if (typeof find !== "string" || typeof replacement !== "string") return `multi_edit error: edit #${i + 1} is missing old_string or new_string.`;
|
|
3215
|
+
if (find.length === 0) return `multi_edit error: edit #${i + 1} has empty old_string. Use write_file to fully replace a file.`;
|
|
3216
|
+
if (find === replacement) return `multi_edit error: edit #${i + 1} old_string and new_string are identical.`;
|
|
3217
|
+
const match = resolveOldString(current, find);
|
|
3218
|
+
if (!match) return `multi_edit error: edit #${i + 1} old_string not found in ${target}.`;
|
|
3219
|
+
const { actual, occurrences, via } = match;
|
|
3220
|
+
if (occurrences > 1 && !replaceAll) return `multi_edit error: edit #${i + 1} old_string appears ${occurrences} times. Pass replace_all=true on this edit or expand old_string for uniqueness.`;
|
|
3221
|
+
const styledReplacement = styleReplacementForVia(replacement, via, actual);
|
|
3222
|
+
current = replaceAll ? current.split(actual).join(styledReplacement) : current.replace(actual, styledReplacement);
|
|
3223
|
+
applied += occurrences;
|
|
3224
|
+
}
|
|
3225
|
+
await ctx.execution.writeFile(ctx.handle, target, current);
|
|
3226
|
+
if (ctx.session) {
|
|
3227
|
+
const readState = getReadState(ctx.session);
|
|
3228
|
+
const absKey = `${ctx.handle.cwd}::${target}`;
|
|
3229
|
+
const prior = readState?.get(absKey);
|
|
3230
|
+
if (readState && prior) readState.set(absKey, {
|
|
3231
|
+
...prior,
|
|
3232
|
+
contentHash: hashContent(current),
|
|
3233
|
+
mtimeMs: Date.now()
|
|
3234
|
+
});
|
|
3235
|
+
}
|
|
3236
|
+
return `Edited ${target}: applied ${steps.length} edit${steps.length === 1 ? "" : "s"} (${applied} replacement${applied === 1 ? "" : "s"}).`;
|
|
3237
|
+
}
|
|
3238
|
+
};
|
|
3239
|
+
//#endregion
|
|
3240
|
+
//#region src/tools/binary-read.ts
|
|
3241
|
+
/**
|
|
3242
|
+
* Best-effort guess at IANA media type from a file extension. Covers the
|
|
3243
|
+
* extensions Zidane's `read_file` actually dispatches to image blocks
|
|
3244
|
+
* (png/jpg/jpeg/gif/webp). Other extensions return `undefined` and the
|
|
3245
|
+
* caller short-circuits the binary route.
|
|
3246
|
+
*/
|
|
3247
|
+
function imageMediaTypeFor(path) {
|
|
3248
|
+
const dot = path.lastIndexOf(".");
|
|
3249
|
+
if (dot === -1) return void 0;
|
|
3250
|
+
switch (path.slice(dot + 1).toLowerCase()) {
|
|
3251
|
+
case "png": return "image/png";
|
|
3252
|
+
case "jpg":
|
|
3253
|
+
case "jpeg": return "image/jpeg";
|
|
3254
|
+
case "gif": return "image/gif";
|
|
3255
|
+
case "webp": return "image/webp";
|
|
3256
|
+
default: return;
|
|
3257
|
+
}
|
|
3258
|
+
}
|
|
3259
|
+
/**
|
|
3260
|
+
* Read a file as base64. Prefers `ExecutionContext.readFileBinary` (zero
|
|
3261
|
+
* subprocess overhead in-process) and falls back to `base64 < path` via
|
|
3262
|
+
* the shell seam — works on docker / sandbox without an interface change.
|
|
3263
|
+
*
|
|
3264
|
+
* Returns `{ base64, byteLength }`. `byteLength` is the *decoded* byte count
|
|
3265
|
+
* so callers can size-budget against the original file, not the inflated
|
|
3266
|
+
* base64 representation (which is ~4/3× larger).
|
|
3267
|
+
*/
|
|
3268
|
+
async function readFileAsBase64(execution, handle, path) {
|
|
3269
|
+
if (execution.readFileBinary) {
|
|
3270
|
+
const bytes = await execution.readFileBinary(handle, path);
|
|
3271
|
+
return {
|
|
3272
|
+
base64: Buffer.from(bytes.buffer, bytes.byteOffset, bytes.byteLength).toString("base64"),
|
|
3273
|
+
byteLength: bytes.byteLength
|
|
3274
|
+
};
|
|
3275
|
+
}
|
|
3276
|
+
const cmd = `base64 < ${alwaysQuote(path)}`;
|
|
3277
|
+
const result = await execution.exec(handle, cmd);
|
|
3278
|
+
if (result.exitCode !== 0) throw new Error(`base64 read failed: ${result.stderr || `exit ${result.exitCode}`}`);
|
|
3279
|
+
const b64 = result.stdout.replace(/\s+/g, "");
|
|
3280
|
+
return {
|
|
3281
|
+
base64: b64,
|
|
3282
|
+
byteLength: decodedBase64ByteLength(b64)
|
|
3283
|
+
};
|
|
3284
|
+
}
|
|
3285
|
+
/**
|
|
3286
|
+
* Decoded byte length of a (whitespace-stripped) base64 string. Accounts for
|
|
3287
|
+
* `=` padding so the value matches the original file size to the byte —
|
|
3288
|
+
* `Math.floor(len * 3 / 4)` over-reports by 1–2 bytes on padded payloads.
|
|
3289
|
+
*/
|
|
3290
|
+
function decodedBase64ByteLength(b64) {
|
|
3291
|
+
if (b64.length === 0) return 0;
|
|
3292
|
+
let pad = 0;
|
|
3293
|
+
if (b64.endsWith("==")) pad = 2;
|
|
3294
|
+
else if (b64.endsWith("=")) pad = 1;
|
|
3295
|
+
return Math.max(0, b64.length * 3 / 4 - pad);
|
|
3296
|
+
}
|
|
3297
|
+
//#endregion
|
|
3298
|
+
//#region src/tools/read-file.ts
|
|
3299
|
+
/**
|
|
3300
|
+
* Read a file with line-based offset/limit and a hard byte cap.
|
|
3301
|
+
*
|
|
3302
|
+
* Defaults are tuned for source code: 2000 lines / 256 KiB. A typical source
|
|
3303
|
+
* file, lockfile, or large config fits in one read; logs and very large
|
|
3304
|
+
* fixtures get truncated with a footer that documents how to fetch the
|
|
3305
|
+
* remainder.
|
|
3306
|
+
*
|
|
3307
|
+
* Binary files are detected on the leading bytes — if the buffer contains a
|
|
3308
|
+
* NUL or has an unreasonable proportion of non-printable bytes, we skip text
|
|
3309
|
+
* decoding and return a marker so the model doesn't drown in mojibake.
|
|
3310
|
+
*/
|
|
3311
|
+
const DEFAULT_LINE_LIMIT = 2e3;
|
|
3312
|
+
const DEFAULT_BYTE_CAP = 262144;
|
|
3313
|
+
/**
|
|
3314
|
+
* Hard upper bound on raw image bytes we'll inline as a base64 image block.
|
|
3315
|
+
* Above this, we return a marker instead — the model won't get useful
|
|
3316
|
+
* information from a 10 MB+ screenshot rendered as one tool result, and
|
|
3317
|
+
* the wire bill gets ugly. Override via the `maxBytes` parameter on the
|
|
3318
|
+
* tool call.
|
|
3319
|
+
*/
|
|
3320
|
+
const DEFAULT_IMAGE_BYTE_CAP = 5 * 1024 * 1024;
|
|
3321
|
+
const readFile$1 = {
|
|
3322
|
+
spec: {
|
|
3323
|
+
name: "read_file",
|
|
3324
|
+
description: "Read a file by path. Returns lines [offset..offset+limit). Default offset=1, limit=2000. Each line is prefixed with its 1-indexed line number followed by a tab (e.g. `42\\tconst foo = bar`); the prefix is metadata, not part of the file. Mirrors Claude Code's `cat -n`-style compact output for token efficiency. A trailing footer explains how to read the rest when truncated. Binary files return a short marker rather than mojibake.",
|
|
3325
|
+
inputSchema: {
|
|
3326
|
+
type: "object",
|
|
3327
|
+
properties: {
|
|
3328
|
+
path: {
|
|
3329
|
+
type: "string",
|
|
3330
|
+
description: "Relative file path."
|
|
3331
|
+
},
|
|
3332
|
+
offset: {
|
|
3333
|
+
type: "integer",
|
|
3334
|
+
description: "1-indexed line number to start from. Default: 1."
|
|
3335
|
+
},
|
|
3336
|
+
limit: {
|
|
3337
|
+
type: "integer",
|
|
3338
|
+
description: "Max lines to return. Default: 2000. Set 0 for unlimited."
|
|
3339
|
+
},
|
|
3340
|
+
maxBytes: {
|
|
3341
|
+
type: "integer",
|
|
3342
|
+
description: "Hard byte cap on file content read, regardless of line count. Default: 262144. Set 0 for unlimited. The rendered output may be slightly larger than this cap when `lineNumbers` is on (each line carries a `<N>\\t` prefix)."
|
|
3343
|
+
},
|
|
3344
|
+
lineNumbers: {
|
|
3345
|
+
type: "boolean",
|
|
3346
|
+
description: "Prefix each line with its 1-indexed line number. Default: true. Override the agent-wide `behavior.readLineNumbers` for this call."
|
|
3347
|
+
}
|
|
3348
|
+
},
|
|
3349
|
+
required: ["path"]
|
|
3350
|
+
}
|
|
3351
|
+
},
|
|
3352
|
+
async execute({ path, offset, limit, maxBytes, lineNumbers }, ctx) {
|
|
3353
|
+
const imgMedia = imageMediaTypeFor(path);
|
|
3354
|
+
if (imgMedia) {
|
|
3355
|
+
const sizeCap = maxBytes !== void 0 ? normalizeInteger(maxBytes, DEFAULT_IMAGE_BYTE_CAP) : DEFAULT_IMAGE_BYTE_CAP;
|
|
3356
|
+
try {
|
|
3357
|
+
const { base64, byteLength } = await readFileAsBase64(ctx.execution, ctx.handle, path);
|
|
3358
|
+
if (sizeCap > 0 && byteLength > sizeCap) return `[image too large to inline: ${path}, ${byteLength} bytes (cap ${sizeCap}). Raise maxBytes, or use shell to inspect.]`;
|
|
3359
|
+
return [{
|
|
3360
|
+
type: "text",
|
|
3361
|
+
text: `Image: ${path} (${byteLength} bytes, ${imgMedia})`
|
|
3362
|
+
}, {
|
|
3363
|
+
type: "image",
|
|
3364
|
+
mediaType: imgMedia,
|
|
3365
|
+
data: base64
|
|
3366
|
+
}];
|
|
3367
|
+
} catch (err) {
|
|
3368
|
+
return `Image read failed: ${path} — ${err instanceof Error ? err.message : String(err)}.${await suggestionFor(ctx.execution, ctx.handle, path)}`;
|
|
3369
|
+
}
|
|
3370
|
+
}
|
|
3371
|
+
let raw;
|
|
3372
|
+
try {
|
|
3373
|
+
raw = await ctx.execution.readFile(ctx.handle, path);
|
|
3374
|
+
} catch {
|
|
3375
|
+
return `File not found: ${path}.${await suggestionFor(ctx.execution, ctx.handle, path)}`;
|
|
3376
|
+
}
|
|
3377
|
+
const totalBytes = Buffer.byteLength(raw);
|
|
3378
|
+
const readState = ctx.behavior?.dedupReads !== false ? getReadState(ctx.session) : void 0;
|
|
3379
|
+
const absKey = `${ctx.handle.cwd}::${path}`;
|
|
3380
|
+
const offsetForKey = normalizeInteger(offset, 1);
|
|
3381
|
+
const limitForKey = normalizeInteger(limit, DEFAULT_LINE_LIMIT);
|
|
3382
|
+
const maxBytesForKey = normalizeInteger(maxBytes, DEFAULT_BYTE_CAP);
|
|
3383
|
+
const showLineNumbers = typeof lineNumbers === "boolean" ? lineNumbers : ctx.behavior?.readLineNumbers ?? true;
|
|
3384
|
+
const currentHash = readState ? hashContent(raw) : "";
|
|
3385
|
+
if (readState) {
|
|
3386
|
+
const prior = readState.get(absKey);
|
|
3387
|
+
if (prior && prior.contentHash === currentHash && prior.offset === offsetForKey && prior.limit === limitForKey && prior.maxBytes === maxBytesForKey && prior.lineNumbers === showLineNumbers) return `File ${path} unchanged since the previous read in this session — the prior result is still current.`;
|
|
3388
|
+
}
|
|
3389
|
+
if (looksBinary(raw)) return `[binary file: ${path}, ${totalBytes} bytes; use shell with hexdump | xxd | od to inspect]`;
|
|
3390
|
+
const offsetN = offsetForKey;
|
|
3391
|
+
const limitN = limitForKey;
|
|
3392
|
+
const maxBytesN = maxBytesForKey;
|
|
3393
|
+
const lines = raw.split("\n");
|
|
3394
|
+
const totalLines = lines.length;
|
|
3395
|
+
const startIdx = Math.max(0, offsetN - 1);
|
|
3396
|
+
const endIdx = limitN > 0 ? Math.min(totalLines, startIdx + limitN) : totalLines;
|
|
3397
|
+
let slice = lines.slice(startIdx, endIdx);
|
|
3398
|
+
let bytesCut = false;
|
|
3399
|
+
if (maxBytesN > 0) {
|
|
3400
|
+
const truncatedSlice = [];
|
|
3401
|
+
let bytesUsed = 0;
|
|
3402
|
+
for (const line of slice) {
|
|
3403
|
+
const lineBytes = Buffer.byteLength(line) + 1;
|
|
3404
|
+
if (bytesUsed + lineBytes > maxBytesN && truncatedSlice.length > 0) {
|
|
3405
|
+
bytesCut = true;
|
|
3406
|
+
break;
|
|
3407
|
+
}
|
|
3408
|
+
truncatedSlice.push(line);
|
|
3409
|
+
bytesUsed += lineBytes;
|
|
3410
|
+
if (bytesUsed >= maxBytesN) break;
|
|
3411
|
+
}
|
|
3412
|
+
if (truncatedSlice.length < slice.length) bytesCut = true;
|
|
3413
|
+
slice = truncatedSlice;
|
|
3414
|
+
}
|
|
3415
|
+
let midLineCut = false;
|
|
3416
|
+
if (maxBytesN > 0 && slice.length > 0) {
|
|
3417
|
+
if (Buffer.byteLength(slice.join("\n")) > maxBytesN) {
|
|
3418
|
+
const lastIdx = slice.length - 1;
|
|
3419
|
+
const lastLine = slice[lastIdx];
|
|
3420
|
+
const otherBytes = lastIdx > 0 ? Buffer.byteLength(slice.slice(0, lastIdx).join("\n")) + 1 : 0;
|
|
3421
|
+
const budgetForLast = Math.max(0, maxBytesN - otherBytes);
|
|
3422
|
+
let cut = Math.min(lastLine.length, budgetForLast);
|
|
3423
|
+
while (cut > 0 && Buffer.byteLength(lastLine.slice(0, cut)) > budgetForLast) cut--;
|
|
3424
|
+
slice[lastIdx] = lastLine.slice(0, cut);
|
|
3425
|
+
midLineCut = true;
|
|
3426
|
+
bytesCut = true;
|
|
3427
|
+
}
|
|
3428
|
+
}
|
|
3429
|
+
const lastLineRead = startIdx + slice.length;
|
|
3430
|
+
const body = showLineNumbers ? slice.map((line, i) => `${startIdx + i + 1}\t${line}`).join("\n") : slice.join("\n");
|
|
3431
|
+
if (readState) readState.set(absKey, {
|
|
3432
|
+
contentHash: currentHash,
|
|
3433
|
+
offset: offsetN,
|
|
3434
|
+
limit: limitN,
|
|
3435
|
+
maxBytes: maxBytesN,
|
|
3436
|
+
lineNumbers: showLineNumbers,
|
|
3437
|
+
mtimeMs: Date.now()
|
|
3438
|
+
});
|
|
3439
|
+
const linesTruncated = endIdx < totalLines || bytesCut;
|
|
3440
|
+
if (!linesTruncated && offsetN === 1) return body;
|
|
3441
|
+
if (!linesTruncated) return `${body}\n\n…read lines ${offsetN}-${lastLineRead} of ${totalLines}.`;
|
|
3442
|
+
if (midLineCut) return `${body}\n\n…truncated mid-line at line ${lastLineRead} (byte cap ${maxBytesN} reached). File has ${totalLines} lines, ${totalBytes} bytes total. Raise maxBytes to read the full line.`;
|
|
3443
|
+
return `${body}\n\n…truncated at line ${lastLineRead} (${bytesCut ? `byte cap (${maxBytesN}) reached` : `line limit (${limitN}) reached`}). File has ${totalLines} lines, ${totalBytes} bytes total — re-read with offset=${lastLineRead + 1} to continue.`;
|
|
3444
|
+
}
|
|
3445
|
+
};
|
|
3446
|
+
function normalizeInteger(value, fallback) {
|
|
3447
|
+
if (typeof value !== "number" || !Number.isFinite(value)) return fallback;
|
|
3448
|
+
if (value < 0) return fallback;
|
|
3449
|
+
return Math.floor(value);
|
|
3450
|
+
}
|
|
3451
|
+
//#endregion
|
|
3452
|
+
//#region src/tools/shell-semantics.ts
|
|
3453
|
+
const DEFAULT_SEMANTIC = (exitCode) => ({
|
|
3454
|
+
isError: exitCode !== 0,
|
|
3455
|
+
message: exitCode !== 0 ? `Command failed with exit code ${exitCode}` : void 0
|
|
3456
|
+
});
|
|
3457
|
+
const COMMAND_SEMANTICS = new Map([
|
|
3458
|
+
["grep", (exit) => ({
|
|
3459
|
+
isError: exit >= 2,
|
|
3460
|
+
message: exit === 1 ? "No matches found" : void 0
|
|
3461
|
+
})],
|
|
3462
|
+
["rg", (exit) => ({
|
|
3463
|
+
isError: exit >= 2,
|
|
3464
|
+
message: exit === 1 ? "No matches found" : void 0
|
|
3465
|
+
})],
|
|
3466
|
+
["diff", (exit) => ({
|
|
3467
|
+
isError: exit >= 2,
|
|
3468
|
+
message: exit === 1 ? "Files differ" : void 0
|
|
3469
|
+
})],
|
|
3470
|
+
["find", (exit) => ({
|
|
3471
|
+
isError: exit >= 2,
|
|
3472
|
+
message: exit === 1 ? "Some directories were inaccessible" : void 0
|
|
3473
|
+
})],
|
|
3474
|
+
["test", (exit) => ({
|
|
3475
|
+
isError: exit >= 2,
|
|
3476
|
+
message: exit === 1 ? "Condition is false" : void 0
|
|
3477
|
+
})],
|
|
3478
|
+
["[", (exit) => ({
|
|
3479
|
+
isError: exit >= 2,
|
|
3480
|
+
message: exit === 1 ? "Condition is false" : void 0
|
|
3481
|
+
})]
|
|
3482
|
+
]);
|
|
3483
|
+
/**
|
|
3484
|
+
* Pick the semantic for a command line. Best-effort: walks the command from
|
|
3485
|
+
* right to left, taking the last segment after `|` / `&&` / `||` / `;` —
|
|
3486
|
+
* that's the segment whose exit code propagates. Don't depend on this for
|
|
3487
|
+
* security; it's a heuristic, not a parser.
|
|
3488
|
+
*/
|
|
3489
|
+
function interpretShellResult(command, exitCode) {
|
|
3490
|
+
const base = extractTrailingCommand(command);
|
|
3491
|
+
return (COMMAND_SEMANTICS.get(base) ?? DEFAULT_SEMANTIC)(exitCode);
|
|
3492
|
+
}
|
|
3493
|
+
function extractTrailingCommand(command) {
|
|
3494
|
+
const segments = command.split(/\|\||&&|[;|\n]/);
|
|
3495
|
+
return (segments[segments.length - 1]?.trim() ?? command).split(/\s+/).filter((t) => !/^[A-Z_]\w*=/i.test(t))[0] ?? "";
|
|
3496
|
+
}
|
|
3497
|
+
//#endregion
|
|
3498
|
+
//#region src/tools/shell.ts
|
|
3499
|
+
/**
|
|
3500
|
+
* Execute a shell command in the agent's execution context.
|
|
3501
|
+
*
|
|
3502
|
+
* Truncation is **tail-priority**: when stdout+stderr combined exceeds
|
|
3503
|
+
* `maxOutputBytes`, the head is dropped and a marker `…(N bytes truncated
|
|
3504
|
+
* from head)…` is inserted before the tail. Errors and exit summaries
|
|
3505
|
+
* usually live at the end of output, so keeping the tail preserves the
|
|
3506
|
+
* model's most useful signal.
|
|
3507
|
+
*
|
|
3508
|
+
* Defaults are tuned for typical commands (build output, test runs): the
|
|
3509
|
+
* combined cap is 32 KiB and the per-call timeout follows the execution
|
|
3510
|
+
* context's own default (30 s for in-process).
|
|
3511
|
+
*/
|
|
3512
|
+
const DEFAULT_MAX_OUTPUT_BYTES = 32768;
|
|
3513
|
+
const shell = {
|
|
3514
|
+
spec: {
|
|
3515
|
+
name: "shell",
|
|
3516
|
+
description: "Execute a shell command in the project root and return its combined stdout/stderr. Output is tail-priority truncated at 32 KiB by default; errors and exit-code summaries live in the tail. By default each call appends a `(exit N, Nms)` footer and surfaces non-empty stderr in a separate section even on success — set `metadata: false` to return only stdout. Set maxOutputBytes=0 to disable truncation.",
|
|
3517
|
+
inputSchema: {
|
|
3518
|
+
type: "object",
|
|
3519
|
+
properties: {
|
|
3520
|
+
command: {
|
|
3521
|
+
type: "string",
|
|
3522
|
+
description: "Shell command to run."
|
|
3523
|
+
},
|
|
3524
|
+
timeout: {
|
|
3525
|
+
type: "integer",
|
|
3526
|
+
description: "Per-call timeout in milliseconds."
|
|
3527
|
+
},
|
|
3528
|
+
maxOutputBytes: {
|
|
3529
|
+
type: "integer",
|
|
3530
|
+
description: "Truncate combined stdout+stderr beyond this many bytes. Default: 32768. Set 0 for unlimited."
|
|
3531
|
+
},
|
|
3532
|
+
metadata: {
|
|
3533
|
+
type: "boolean",
|
|
3534
|
+
description: "Append `(exit N, Nms)` footer and surface non-empty stderr on success. Default: true."
|
|
3535
|
+
}
|
|
3536
|
+
},
|
|
3537
|
+
required: ["command"]
|
|
3538
|
+
}
|
|
3539
|
+
},
|
|
3540
|
+
async execute({ command, timeout, maxOutputBytes, metadata }, ctx) {
|
|
3541
|
+
const execOpts = {};
|
|
3542
|
+
if (typeof timeout === "number" && Number.isFinite(timeout) && timeout > 0) execOpts.timeout = Math.max(1, Math.ceil(timeout / 1e3));
|
|
3543
|
+
const cmd = command;
|
|
3544
|
+
const wantMetadata = metadata !== false;
|
|
3545
|
+
const startedAt = Date.now();
|
|
3546
|
+
const result = await ctx.execution.exec(ctx.handle, cmd, execOpts);
|
|
3547
|
+
const durationMs = Date.now() - startedAt;
|
|
3548
|
+
const cap = normalizeCap(maxOutputBytes);
|
|
3549
|
+
const semantic = interpretShellResult(cmd, result.exitCode);
|
|
3550
|
+
if (result.exitCode === 0) {
|
|
3551
|
+
const stdoutTail = truncateTail(result.stdout || "(no output)", cap);
|
|
3552
|
+
if (!wantMetadata) return stdoutTail;
|
|
3553
|
+
const stderrTrimmed = result.stderr.trim();
|
|
3554
|
+
return `${stdoutTail}${stderrTrimmed ? `\n[stderr]\n${truncateTail(stderrTrimmed, Math.min(cap, 2048))}` : ""}\n(exit 0, ${durationMs}ms)`;
|
|
3555
|
+
}
|
|
3556
|
+
if (!semantic.isError) {
|
|
3557
|
+
const tail = truncateTail((result.stdout || result.stderr || "").trim(), cap);
|
|
3558
|
+
const semanticFooter = semantic.message ? `\n(${semantic.message})` : "";
|
|
3559
|
+
const timingFooter = wantMetadata ? `\n(exit ${result.exitCode}, ${durationMs}ms)` : "";
|
|
3560
|
+
return `${tail.length > 0 ? tail : semantic.message ?? "(no output)"}${semanticFooter}${timingFooter}`;
|
|
3561
|
+
}
|
|
3562
|
+
const combined = `${result.stdout}\n${result.stderr}`.trim();
|
|
3563
|
+
return `${wantMetadata ? `Exit code ${result.exitCode} (${durationMs}ms)` : `Exit code ${result.exitCode}`}\n${truncateTail(combined, cap)}`;
|
|
3564
|
+
}
|
|
3565
|
+
};
|
|
3566
|
+
function normalizeCap(value) {
|
|
3567
|
+
if (typeof value !== "number" || !Number.isFinite(value)) return DEFAULT_MAX_OUTPUT_BYTES;
|
|
3568
|
+
if (value < 0) return DEFAULT_MAX_OUTPUT_BYTES;
|
|
3569
|
+
return Math.floor(value);
|
|
3570
|
+
}
|
|
3571
|
+
/**
|
|
3572
|
+
* Tail-priority byte truncation. When `text` exceeds `cap` bytes, the head is
|
|
3573
|
+
* dropped and replaced with a marker. Always cuts on character boundaries (no
|
|
3574
|
+
* mid-codepoint splits) by walking from the end with `Buffer.byteLength`.
|
|
3575
|
+
*
|
|
3576
|
+
* `cap === 0` disables truncation. `cap` is interpreted as a UTF-8 byte budget
|
|
3577
|
+
* for the tail itself — the marker is added on top and may push the visible
|
|
3578
|
+
* length slightly past `cap`. That tradeoff is intentional: a marker that
|
|
3579
|
+
* always fits inside the budget would shrink the actual content displayed.
|
|
3580
|
+
*/
|
|
3581
|
+
function truncateTail(text, cap) {
|
|
3582
|
+
if (cap === 0) return text;
|
|
3583
|
+
const totalBytes = Buffer.byteLength(text);
|
|
3584
|
+
if (totalBytes <= cap) return text;
|
|
3585
|
+
let bytes = 0;
|
|
3586
|
+
let charIdx = text.length;
|
|
3587
|
+
while (charIdx > 0) {
|
|
3588
|
+
const ch = text[charIdx - 1];
|
|
3589
|
+
const chBytes = Buffer.byteLength(ch);
|
|
3590
|
+
if (bytes + chBytes > cap) break;
|
|
3591
|
+
bytes += chBytes;
|
|
3592
|
+
charIdx--;
|
|
3593
|
+
}
|
|
3594
|
+
const tail = text.slice(charIdx);
|
|
3595
|
+
return `…(${totalBytes - Buffer.byteLength(tail)} bytes truncated from head)…\n${tail}`;
|
|
3596
|
+
}
|
|
3597
|
+
//#endregion
|
|
3598
|
+
//#region src/tools/spawn.ts
|
|
3599
|
+
const BUBBLED_EVENTS = [
|
|
3600
|
+
"stream:text",
|
|
3601
|
+
"stream:thinking",
|
|
3602
|
+
"stream:end",
|
|
3603
|
+
"tool:before",
|
|
3604
|
+
"tool:after",
|
|
3605
|
+
"tool:error",
|
|
3606
|
+
"turn:after"
|
|
3607
|
+
];
|
|
3608
|
+
const CHILD_EVENT_NAME = {
|
|
3609
|
+
"stream:text": "child:stream:text",
|
|
3610
|
+
"stream:thinking": "child:stream:thinking",
|
|
3611
|
+
"stream:end": "child:stream:end",
|
|
3612
|
+
"tool:before": "child:tool:before",
|
|
3613
|
+
"tool:after": "child:tool:after",
|
|
3614
|
+
"tool:error": "child:tool:error",
|
|
3615
|
+
"turn:after": "child:turn:after"
|
|
3616
|
+
};
|
|
3617
|
+
function extractText(message) {
|
|
3618
|
+
if (!message || typeof message !== "object") return "";
|
|
3619
|
+
const msg = message;
|
|
3620
|
+
if (typeof msg.content === "string") return msg.content;
|
|
3621
|
+
if (Array.isArray(msg.content)) return msg.content.filter((block) => !!block && typeof block === "object" && block.type === "text").map((block) => block.text).join("\n");
|
|
3622
|
+
return "";
|
|
3623
|
+
}
|
|
3624
|
+
/**
|
|
3625
|
+
* Race `task` (an already-running child `agent.run()` promise) against a
|
|
3626
|
+
* timer. Does NOT race against the parent abort signal — the child agent
|
|
3627
|
+
* already observes the same signal internally and handles its own aborted
|
|
3628
|
+
* bookkeeping, so racing here would detach the spawn from the child's
|
|
3629
|
+
* session-persisting finally block.
|
|
3630
|
+
*
|
|
3631
|
+
* On timeout: rejects with `SpawnTimeoutError`; caller is expected to call
|
|
3632
|
+
* `agent.abort()` and subsequently `await` the original `task` so the
|
|
3633
|
+
* child's session state (runs, turns, status) gets flushed before the
|
|
3634
|
+
* parent moves on.
|
|
3635
|
+
*/
|
|
3636
|
+
async function raceWithTimeout(task, timeoutMs) {
|
|
3637
|
+
if (!timeoutMs || timeoutMs <= 0) return task;
|
|
3638
|
+
let timer;
|
|
3639
|
+
try {
|
|
3640
|
+
return await new Promise((resolve, reject) => {
|
|
3641
|
+
timer = setTimeout(() => reject(new SpawnTimeoutError(timeoutMs)), timeoutMs);
|
|
3642
|
+
task.then(resolve, reject);
|
|
3643
|
+
});
|
|
3644
|
+
} finally {
|
|
3645
|
+
if (timer) clearTimeout(timer);
|
|
3646
|
+
}
|
|
3647
|
+
}
|
|
3648
|
+
var SpawnTimeoutError = class extends Error {
|
|
3649
|
+
timeoutMs;
|
|
3650
|
+
constructor(timeoutMs) {
|
|
3651
|
+
super(`Child agent timed out after ${timeoutMs}ms`);
|
|
3652
|
+
this.name = "SpawnTimeoutError";
|
|
3653
|
+
this.timeoutMs = timeoutMs;
|
|
3654
|
+
}
|
|
3655
|
+
};
|
|
3656
|
+
/**
|
|
3657
|
+
* Wire child's hooks to bubble into `parentHooks` as `child:*` events.
|
|
3658
|
+
*
|
|
3659
|
+
* Two kinds of forwarding:
|
|
3660
|
+
*
|
|
3661
|
+
* 1. **Originating events** (`stream:text`, `tool:before`, …) → rewrite to
|
|
3662
|
+
* the matching `child:*` event, inject `{ childId, depth }` from this
|
|
3663
|
+
* spawn, then fire on the parent's hook bus.
|
|
3664
|
+
* 2. **Re-bubbled events** (`child:stream:text`, `child:tool:before`, …)
|
|
3665
|
+
* from a grandchild already carry `childId` + `depth` pointing at the
|
|
3666
|
+
* originating subagent. Forward them verbatim so the top-level listener
|
|
3667
|
+
* sees the correct ancestry, not the immediate parent's.
|
|
3668
|
+
*
|
|
3669
|
+
* Returns a function that unregisters every listener registered here.
|
|
3670
|
+
* Called before `agent.run()` starts, torn down in a finally block — so
|
|
3671
|
+
* nothing leaks even if the child throws mid-run.
|
|
3672
|
+
*/
|
|
3673
|
+
function bubbleHooks(childHooks, parentHooks, childId, depth) {
|
|
3674
|
+
const unregisters = [];
|
|
3675
|
+
const fire = parentHooks.callHook;
|
|
3676
|
+
for (const evt of BUBBLED_EVENTS) {
|
|
3677
|
+
const parentEvt = CHILD_EVENT_NAME[evt];
|
|
3678
|
+
const unregister = childHooks.hook(evt, (ctx) => {
|
|
3679
|
+
fire(parentEvt, {
|
|
3680
|
+
...ctx,
|
|
3681
|
+
childId,
|
|
3682
|
+
depth
|
|
3683
|
+
});
|
|
3684
|
+
});
|
|
3685
|
+
unregisters.push(unregister);
|
|
3686
|
+
}
|
|
3687
|
+
for (const evt of BUBBLED_EVENTS) {
|
|
3688
|
+
const parentEvt = CHILD_EVENT_NAME[evt];
|
|
3689
|
+
const unregister = childHooks.hook(parentEvt, (ctx) => {
|
|
3690
|
+
fire(parentEvt, ctx);
|
|
3691
|
+
});
|
|
3692
|
+
unregisters.push(unregister);
|
|
3693
|
+
}
|
|
3694
|
+
return () => {
|
|
3695
|
+
for (const u of unregisters) u();
|
|
3696
|
+
};
|
|
3697
|
+
}
|
|
3698
|
+
/**
|
|
3699
|
+
* Create a configured spawn tool.
|
|
3700
|
+
*
|
|
3701
|
+
* State (`children`, `totalChildStats`, counters, active count) is scoped to
|
|
3702
|
+
* the returned instance. Multiple parent agents using the same instance will
|
|
3703
|
+
* share counters + stats + concurrency slots — call `createSpawnTool()` per
|
|
3704
|
+
* agent (or use the stateless default `spawn`) to keep them isolated.
|
|
3705
|
+
*/
|
|
3706
|
+
function createSpawnTool(options = {}) {
|
|
3707
|
+
const localChildren = /* @__PURE__ */ new Map();
|
|
3708
|
+
let localCounter = 0;
|
|
3709
|
+
let localActiveCount = 0;
|
|
3710
|
+
const maxConcurrent = options.maxConcurrent ?? 3;
|
|
3711
|
+
const maxDepth = options.maxDepth ?? 3;
|
|
3712
|
+
const forwardHooks = options.forwardHooks ?? true;
|
|
3713
|
+
const localStats = {
|
|
3714
|
+
totalIn: 0,
|
|
3715
|
+
totalOut: 0,
|
|
3716
|
+
totalCacheRead: 0,
|
|
3717
|
+
totalCacheCreation: 0,
|
|
3718
|
+
turns: 0,
|
|
3719
|
+
elapsed: 0
|
|
3720
|
+
};
|
|
3721
|
+
return {
|
|
3722
|
+
get children() {
|
|
3723
|
+
return localChildren;
|
|
3724
|
+
},
|
|
3725
|
+
get totalChildStats() {
|
|
3726
|
+
return { ...localStats };
|
|
3727
|
+
},
|
|
3728
|
+
spec: {
|
|
3729
|
+
name: "spawn",
|
|
3730
|
+
description: "Spawn a sub-agent for a self-contained task that benefits from isolation (separate context window, separate retries) — for example, a deep research dive or a long codegen pass on a specific file. The sub-agent runs independently with its own tool access and returns its final response. Do NOT spawn for sequential steps you could do yourself.",
|
|
3731
|
+
inputSchema: {
|
|
3732
|
+
type: "object",
|
|
3733
|
+
properties: {
|
|
3734
|
+
task: {
|
|
3735
|
+
type: "string",
|
|
3736
|
+
description: "The task prompt for the sub-agent. Be specific about what you want it to accomplish."
|
|
3737
|
+
},
|
|
3738
|
+
system: {
|
|
3739
|
+
type: "string",
|
|
3740
|
+
description: "Optional system prompt override for this specific sub-agent."
|
|
3741
|
+
}
|
|
3742
|
+
},
|
|
3743
|
+
required: ["task"]
|
|
3744
|
+
}
|
|
3745
|
+
},
|
|
3746
|
+
async execute(input, ctx) {
|
|
3747
|
+
const task = input.task;
|
|
3748
|
+
const systemOverride = input.system;
|
|
3749
|
+
const parentDepth = ctx.depth ?? 0;
|
|
3750
|
+
const childDepth = parentDepth + 1;
|
|
3751
|
+
if (childDepth > maxDepth) return `Cannot spawn: maxDepth=${maxDepth} reached (parent depth=${parentDepth}). Deepen the cap with createSpawnTool({ maxDepth }).`;
|
|
3752
|
+
if (localActiveCount >= maxConcurrent) return `Cannot spawn: ${localActiveCount}/${maxConcurrent} sub-agents already running. Wait for one to complete.`;
|
|
3753
|
+
if (ctx.signal.aborted) return `[sub-agent pre-aborted] Parent signal was already aborted — skipped "${task.slice(0, 80)}"`;
|
|
3754
|
+
const id = `child-${++localCounter}`;
|
|
3755
|
+
localActiveCount++;
|
|
3756
|
+
const child = {
|
|
3757
|
+
id,
|
|
3758
|
+
task,
|
|
3759
|
+
startedAt: Date.now(),
|
|
3760
|
+
depth: childDepth
|
|
3761
|
+
};
|
|
3762
|
+
localChildren.set(id, child);
|
|
3763
|
+
let destroyError;
|
|
3764
|
+
let childRunStatus = "completed";
|
|
3765
|
+
let finalStats;
|
|
3766
|
+
let result = "";
|
|
3767
|
+
let unbubble;
|
|
3768
|
+
try {
|
|
3769
|
+
const agent = createAgent({
|
|
3770
|
+
...ctx.name !== void 0 ? { name: ctx.name } : {},
|
|
3771
|
+
...ctx.system !== void 0 ? { system: ctx.system } : {},
|
|
3772
|
+
tools: ctx.tools,
|
|
3773
|
+
...ctx.toolAliases !== void 0 ? { toolAliases: ctx.toolAliases } : {},
|
|
3774
|
+
...ctx.mcpServers !== void 0 ? { mcpServers: ctx.mcpServers } : {},
|
|
3775
|
+
...ctx.skills !== void 0 ? { skills: ctx.skills } : {},
|
|
3776
|
+
...ctx.behavior !== void 0 ? { behavior: ctx.behavior } : {},
|
|
3777
|
+
...options.preset,
|
|
3778
|
+
provider: ctx.provider,
|
|
3779
|
+
execution: ctx.execution,
|
|
3780
|
+
...options.persist && ctx.session ? { session: ctx.session } : {}
|
|
3781
|
+
});
|
|
3782
|
+
if (forwardHooks) unbubble = bubbleHooks(agent.hooks, ctx.hooks, id, childDepth);
|
|
3783
|
+
options.onSpawn?.(child);
|
|
3784
|
+
await ctx.hooks.callHook("spawn:before", {
|
|
3785
|
+
id,
|
|
3786
|
+
task,
|
|
3787
|
+
depth: childDepth
|
|
3788
|
+
});
|
|
3789
|
+
const runPromise = agent.run({
|
|
3790
|
+
prompt: task,
|
|
3791
|
+
model: options.model,
|
|
3792
|
+
system: systemOverride ?? options.system,
|
|
3793
|
+
thinking: options.thinking,
|
|
3794
|
+
signal: ctx.signal,
|
|
3795
|
+
depth: childDepth,
|
|
3796
|
+
...options.persist && ctx.runId ? { parentRunId: ctx.runId } : {}
|
|
3797
|
+
});
|
|
3798
|
+
try {
|
|
3799
|
+
finalStats = await raceWithTimeout(runPromise, options.timeoutMs);
|
|
3800
|
+
const treeTurns = flattenTurns(finalStats).length;
|
|
3801
|
+
if (ctx.signal.aborted) {
|
|
3802
|
+
childRunStatus = "aborted";
|
|
3803
|
+
result = [`[sub-agent ${id}] Aborted after ${treeTurns} turns (${finalStats.elapsed}ms)`, `Tokens: ${finalStats.totalIn} in / ${finalStats.totalOut} out`].join("\n");
|
|
3804
|
+
} else {
|
|
3805
|
+
const response = extractText(agent.turns.at(-1));
|
|
3806
|
+
result = [
|
|
3807
|
+
`[sub-agent ${id}] Completed in ${treeTurns} turns (${finalStats.elapsed}ms)`,
|
|
3808
|
+
`Tokens: ${finalStats.totalIn} in / ${finalStats.totalOut} out`,
|
|
3809
|
+
"",
|
|
3810
|
+
response || "(no text response)"
|
|
3811
|
+
].join("\n");
|
|
3812
|
+
}
|
|
3813
|
+
} catch (err) {
|
|
3814
|
+
if (err instanceof SpawnTimeoutError) {
|
|
3815
|
+
childRunStatus = "timeout";
|
|
3816
|
+
agent.abort();
|
|
3817
|
+
try {
|
|
3818
|
+
finalStats = await runPromise;
|
|
3819
|
+
} catch {
|
|
3820
|
+
finalStats = {
|
|
3821
|
+
totalIn: 0,
|
|
3822
|
+
totalOut: 0,
|
|
3823
|
+
totalCacheRead: 0,
|
|
3824
|
+
totalCacheCreation: 0,
|
|
3825
|
+
turns: 0,
|
|
3826
|
+
elapsed: err.timeoutMs
|
|
3827
|
+
};
|
|
3828
|
+
}
|
|
3829
|
+
result = `[sub-agent ${id}] Timed out after ${err.timeoutMs}ms`;
|
|
3830
|
+
} else {
|
|
3831
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
3832
|
+
childRunStatus = "error";
|
|
3833
|
+
finalStats = {
|
|
3834
|
+
totalIn: 0,
|
|
3835
|
+
totalOut: 0,
|
|
3836
|
+
totalCacheRead: 0,
|
|
3837
|
+
totalCacheCreation: 0,
|
|
3838
|
+
turns: 0,
|
|
3839
|
+
elapsed: 0
|
|
3840
|
+
};
|
|
3841
|
+
result = `[sub-agent ${id}] Error: ${error.message}`;
|
|
3842
|
+
await ctx.hooks.callHook("spawn:error", {
|
|
3843
|
+
id,
|
|
3844
|
+
task,
|
|
3845
|
+
depth: childDepth,
|
|
3846
|
+
error
|
|
3847
|
+
});
|
|
3848
|
+
}
|
|
3849
|
+
} finally {
|
|
3850
|
+
try {
|
|
3851
|
+
await agent.destroy();
|
|
3852
|
+
} catch (err) {
|
|
3853
|
+
destroyError = err instanceof Error ? err : new Error(String(err));
|
|
3854
|
+
}
|
|
3855
|
+
}
|
|
3856
|
+
if (finalStats) {
|
|
3857
|
+
localStats.totalIn += finalStats.totalIn;
|
|
3858
|
+
localStats.totalOut += finalStats.totalOut;
|
|
3859
|
+
localStats.totalCacheRead += finalStats.totalCacheRead;
|
|
3860
|
+
localStats.totalCacheCreation += finalStats.totalCacheCreation;
|
|
3861
|
+
localStats.turns += finalStats.turns;
|
|
3862
|
+
localStats.elapsed += finalStats.elapsed;
|
|
3863
|
+
}
|
|
3864
|
+
const childRunStats = {
|
|
3865
|
+
id,
|
|
3866
|
+
task,
|
|
3867
|
+
stats: finalStats,
|
|
3868
|
+
depth: childDepth,
|
|
3869
|
+
status: childRunStatus,
|
|
3870
|
+
...finalStats.output ? { output: finalStats.output } : {}
|
|
3871
|
+
};
|
|
3872
|
+
options.onComplete?.(child, finalStats, childRunStatus);
|
|
3873
|
+
await ctx.hooks.callHook("spawn:complete", childRunStats);
|
|
3874
|
+
if (destroyError) await ctx.hooks.callHook("spawn:error", {
|
|
3875
|
+
id,
|
|
3876
|
+
task,
|
|
3877
|
+
depth: childDepth,
|
|
3878
|
+
error: destroyError
|
|
3879
|
+
});
|
|
3880
|
+
return result;
|
|
3881
|
+
} finally {
|
|
3882
|
+
unbubble?.();
|
|
3883
|
+
localActiveCount--;
|
|
3884
|
+
localChildren.delete(id);
|
|
3885
|
+
}
|
|
3886
|
+
}
|
|
3887
|
+
};
|
|
3888
|
+
}
|
|
3889
|
+
//#endregion
|
|
3890
|
+
//#region src/tools/write-file.ts
|
|
3891
|
+
/**
|
|
3892
|
+
* Write a file, with an idempotency signal when the content is unchanged.
|
|
3893
|
+
*
|
|
3894
|
+
* Three return shapes — chosen so the model can recognize a no-op without a
|
|
3895
|
+
* separate read:
|
|
3896
|
+
* - `Created path (N bytes)` — file did not exist
|
|
3897
|
+
* - `Updated path (N bytes)` — content differed from on-disk
|
|
3898
|
+
* - `No change needed: path already at target state (N bytes)` — equal
|
|
3899
|
+
*
|
|
3900
|
+
* Race window: in non-process execution contexts (docker, sandbox) shared by
|
|
3901
|
+
* multiple agents, another writer can mutate the file between our read and
|
|
3902
|
+
* our write. Local process context is single-writer per agent so the race is
|
|
3903
|
+
* a non-issue there. Documented rather than locked because the cost of
|
|
3904
|
+
* cross-context locking outweighs the cost of a stale "No change" message.
|
|
3905
|
+
*/
|
|
3906
|
+
const writeFile$1 = {
|
|
3907
|
+
spec: {
|
|
3908
|
+
name: "write_file",
|
|
3909
|
+
description: "Write content to a file (creates parent directories). Returns Created / Updated / \"No change needed\" so the model can detect no-ops without a separate read.",
|
|
3910
|
+
inputSchema: {
|
|
3911
|
+
type: "object",
|
|
3912
|
+
properties: {
|
|
3913
|
+
path: {
|
|
3914
|
+
type: "string",
|
|
3915
|
+
description: "Relative file path."
|
|
3916
|
+
},
|
|
3917
|
+
content: {
|
|
3918
|
+
type: "string",
|
|
3919
|
+
description: "File content."
|
|
3920
|
+
}
|
|
3921
|
+
},
|
|
3922
|
+
required: ["path", "content"]
|
|
3923
|
+
}
|
|
3924
|
+
},
|
|
3925
|
+
async execute({ path, content }, ctx) {
|
|
3926
|
+
const targetPath = path;
|
|
3927
|
+
const targetContent = content;
|
|
3928
|
+
let existing;
|
|
3929
|
+
try {
|
|
3930
|
+
existing = await ctx.execution.readFile(ctx.handle, targetPath);
|
|
3931
|
+
} catch {}
|
|
3932
|
+
const bytes = Buffer.byteLength(targetContent);
|
|
3933
|
+
if (existing === targetContent) return `No change needed: ${targetPath} already at target state (${bytes} bytes).`;
|
|
3934
|
+
await ctx.execution.writeFile(ctx.handle, targetPath, targetContent);
|
|
3935
|
+
return existing === void 0 ? `Created ${targetPath} (${bytes} bytes).` : `Updated ${targetPath} (${bytes} bytes).`;
|
|
3936
|
+
}
|
|
3937
|
+
};
|
|
3938
|
+
//#endregion
|
|
3939
|
+
export { multiEdit as a, grep as c, createAgent as d, createToolSearchTool as f, validateToolArgs as g, createSkillsReadTool as h, readFile$1 as i, glob as l, createSkillsRunScriptTool as m, createSpawnTool as n, listFiles as o, createSkillsUseTool as p, shell as r, createInteractionTool as s, writeFile$1 as t, edit as u };
|
|
3940
|
+
|
|
3941
|
+
//# sourceMappingURL=tools-DpeWKzP1.js.map
|