deepagentsdk 0.11.1 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/elements/index.cjs +274 -0
- package/dist/adapters/elements/index.cjs.map +1 -0
- package/dist/adapters/elements/index.d.cts +122 -0
- package/dist/adapters/elements/index.d.mts +122 -0
- package/dist/adapters/elements/index.mjs +268 -0
- package/dist/adapters/elements/index.mjs.map +1 -0
- package/dist/agent-BDM-PIu8.d.mts +1500 -0
- package/dist/agent-DToEVxs-.d.cts +1500 -0
- package/dist/chunk-C5azi7Hr.cjs +67 -0
- package/dist/cli/index.cjs +3162 -0
- package/dist/cli/index.cjs.map +1 -0
- package/dist/cli/index.d.cts +1 -0
- package/dist/cli/index.d.mts +1 -0
- package/dist/cli/index.mjs +3120 -0
- package/dist/cli/index.mjs.map +1 -0
- package/dist/file-saver-BYPKakT4.cjs +3990 -0
- package/dist/file-saver-BYPKakT4.cjs.map +1 -0
- package/dist/file-saver-Hj5so3dV.mjs +3568 -0
- package/dist/file-saver-Hj5so3dV.mjs.map +1 -0
- package/dist/index.cjs +1481 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1233 -0
- package/dist/index.d.mts +1233 -0
- package/dist/index.mjs +1381 -0
- package/dist/index.mjs.map +1 -0
- package/dist/load-BBYEnMwz.mjs +142 -0
- package/dist/load-BBYEnMwz.mjs.map +1 -0
- package/dist/load-BDxe6Cet.mjs +3 -0
- package/dist/load-BrRAKlO6.cjs +163 -0
- package/dist/load-BrRAKlO6.cjs.map +1 -0
- package/dist/load-DqllBbDc.cjs +4 -0
- package/package.json +26 -12
- package/src/adapters/elements/index.ts +0 -27
- package/src/adapters/elements/messageAdapter.ts +0 -165
- package/src/adapters/elements/statusAdapter.ts +0 -39
- package/src/adapters/elements/types.ts +0 -97
- package/src/adapters/elements/useElementsAdapter.ts +0 -261
- package/src/agent.ts +0 -1258
- package/src/backends/composite.ts +0 -273
- package/src/backends/filesystem.ts +0 -692
- package/src/backends/index.ts +0 -22
- package/src/backends/local-sandbox.ts +0 -175
- package/src/backends/persistent.ts +0 -593
- package/src/backends/sandbox.ts +0 -510
- package/src/backends/state.ts +0 -244
- package/src/backends/utils.ts +0 -287
- package/src/checkpointer/file-saver.ts +0 -98
- package/src/checkpointer/index.ts +0 -5
- package/src/checkpointer/kv-saver.ts +0 -82
- package/src/checkpointer/memory-saver.ts +0 -82
- package/src/checkpointer/types.ts +0 -125
- package/src/cli/components/ApiKeyInput.tsx +0 -300
- package/src/cli/components/FilePreview.tsx +0 -237
- package/src/cli/components/Input.tsx +0 -277
- package/src/cli/components/Message.tsx +0 -93
- package/src/cli/components/ModelSelection.tsx +0 -338
- package/src/cli/components/SlashMenu.tsx +0 -101
- package/src/cli/components/StatusBar.tsx +0 -89
- package/src/cli/components/Subagent.tsx +0 -91
- package/src/cli/components/TodoList.tsx +0 -133
- package/src/cli/components/ToolApproval.tsx +0 -70
- package/src/cli/components/ToolCall.tsx +0 -144
- package/src/cli/components/ToolCallSummary.tsx +0 -175
- package/src/cli/components/Welcome.tsx +0 -75
- package/src/cli/components/index.ts +0 -24
- package/src/cli/hooks/index.ts +0 -12
- package/src/cli/hooks/useAgent.ts +0 -933
- package/src/cli/index.tsx +0 -1066
- package/src/cli/theme.ts +0 -205
- package/src/cli/utils/model-list.ts +0 -365
- package/src/constants/errors.ts +0 -29
- package/src/constants/limits.ts +0 -195
- package/src/index.ts +0 -176
- package/src/middleware/agent-memory.ts +0 -330
- package/src/prompts.ts +0 -196
- package/src/skills/index.ts +0 -2
- package/src/skills/load.ts +0 -191
- package/src/skills/types.ts +0 -53
- package/src/tools/execute.ts +0 -167
- package/src/tools/filesystem.ts +0 -418
- package/src/tools/index.ts +0 -39
- package/src/tools/subagent.ts +0 -443
- package/src/tools/todos.ts +0 -101
- package/src/tools/web.ts +0 -567
- package/src/types/backend.ts +0 -177
- package/src/types/core.ts +0 -220
- package/src/types/events.ts +0 -430
- package/src/types/index.ts +0 -94
- package/src/types/structured-output.ts +0 -43
- package/src/types/subagent.ts +0 -96
- package/src/types.ts +0 -22
- package/src/utils/approval.ts +0 -213
- package/src/utils/events.ts +0 -416
- package/src/utils/eviction.ts +0 -181
- package/src/utils/index.ts +0 -34
- package/src/utils/model-parser.ts +0 -38
- package/src/utils/patch-tool-calls.ts +0 -233
- package/src/utils/project-detection.ts +0 -32
- package/src/utils/summarization.ts +0 -254
|
@@ -0,0 +1,3568 @@
|
|
|
1
|
+
import { Output, ToolLoopAgent, generateText, stepCountIs, streamText, tool, wrapLanguageModel } from "ai";
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
import micromatch from "micromatch";
|
|
4
|
+
import { basename } from "path";
|
|
5
|
+
import { tavily } from "@tavily/core";
|
|
6
|
+
import TurndownService from "turndown";
|
|
7
|
+
import { Readability } from "@mozilla/readability";
|
|
8
|
+
import { JSDOM } from "jsdom";
|
|
9
|
+
import { spawn } from "child_process";
|
|
10
|
+
import { anthropic } from "@ai-sdk/anthropic";
|
|
11
|
+
import { openai } from "@ai-sdk/openai";
|
|
12
|
+
import { existsSync, mkdirSync, readFileSync, readdirSync, unlinkSync, writeFileSync } from "node:fs";
|
|
13
|
+
import { join } from "node:path";
|
|
14
|
+
|
|
15
|
+
//#region rolldown:runtime
|
|
16
|
+
var __defProp = Object.defineProperty;
|
|
17
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
18
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
19
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
20
|
+
var __esmMin = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
|
|
21
|
+
var __exportAll = (all, symbols) => {
|
|
22
|
+
let target = {};
|
|
23
|
+
for (var name in all) {
|
|
24
|
+
__defProp(target, name, {
|
|
25
|
+
get: all[name],
|
|
26
|
+
enumerable: true
|
|
27
|
+
});
|
|
28
|
+
}
|
|
29
|
+
if (symbols) {
|
|
30
|
+
__defProp(target, Symbol.toStringTag, { value: "Module" });
|
|
31
|
+
}
|
|
32
|
+
return target;
|
|
33
|
+
};
|
|
34
|
+
var __copyProps = (to, from, except, desc) => {
|
|
35
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
36
|
+
for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
37
|
+
key = keys[i];
|
|
38
|
+
if (!__hasOwnProp.call(to, key) && key !== except) {
|
|
39
|
+
__defProp(to, key, {
|
|
40
|
+
get: ((k) => from[k]).bind(null, key),
|
|
41
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return to;
|
|
47
|
+
};
|
|
48
|
+
var __toCommonJS = (mod) => __hasOwnProp.call(mod, "module.exports") ? mod["module.exports"] : __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
49
|
+
|
|
50
|
+
//#endregion
|
|
51
|
+
//#region src/constants/limits.ts
|
|
52
|
+
var DEFAULT_EVICTION_TOKEN_LIMIT$1, DEFAULT_SUMMARIZATION_THRESHOLD$1, CONTEXT_WINDOW, DEFAULT_KEEP_MESSAGES$1, DEFAULT_MAX_STEPS, DEFAULT_SUBAGENT_MAX_STEPS, DEFAULT_READ_LIMIT, MAX_LINE_LENGTH, MAX_FILE_SIZE_MB, DEFAULT_TIMEOUT_SECONDS, DEFAULT_TIMEOUT_MS, LINE_NUMBER_WIDTH;
|
|
53
|
+
var init_limits = __esmMin((() => {
|
|
54
|
+
DEFAULT_EVICTION_TOKEN_LIMIT$1 = 2e4;
|
|
55
|
+
DEFAULT_SUMMARIZATION_THRESHOLD$1 = 17e4;
|
|
56
|
+
CONTEXT_WINDOW = 2e5;
|
|
57
|
+
DEFAULT_KEEP_MESSAGES$1 = 6;
|
|
58
|
+
DEFAULT_MAX_STEPS = 100;
|
|
59
|
+
DEFAULT_SUBAGENT_MAX_STEPS = 50;
|
|
60
|
+
DEFAULT_READ_LIMIT = 2e3;
|
|
61
|
+
MAX_LINE_LENGTH = 1e4;
|
|
62
|
+
MAX_FILE_SIZE_MB = 10;
|
|
63
|
+
DEFAULT_TIMEOUT_SECONDS = 30;
|
|
64
|
+
DEFAULT_TIMEOUT_MS = DEFAULT_TIMEOUT_SECONDS * 1e3;
|
|
65
|
+
LINE_NUMBER_WIDTH = 6;
|
|
66
|
+
}));
|
|
67
|
+
|
|
68
|
+
//#endregion
|
|
69
|
+
//#region src/utils/events.ts
|
|
70
|
+
/**
|
|
71
|
+
* Create a todos-changed event.
|
|
72
|
+
*/
|
|
73
|
+
function createTodosChangedEvent(todos) {
|
|
74
|
+
return {
|
|
75
|
+
type: "todos-changed",
|
|
76
|
+
todos
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Create a file-write-start event (preview before write).
|
|
81
|
+
*/
|
|
82
|
+
function createFileWriteStartEvent(path, content) {
|
|
83
|
+
return {
|
|
84
|
+
type: "file-write-start",
|
|
85
|
+
path,
|
|
86
|
+
content
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Create a file-written event (after successful write).
|
|
91
|
+
*/
|
|
92
|
+
function createFileWrittenEvent(path, content) {
|
|
93
|
+
return {
|
|
94
|
+
type: "file-written",
|
|
95
|
+
path,
|
|
96
|
+
content
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Create a file-edited event.
|
|
101
|
+
*/
|
|
102
|
+
function createFileEditedEvent(path, occurrences) {
|
|
103
|
+
return {
|
|
104
|
+
type: "file-edited",
|
|
105
|
+
path,
|
|
106
|
+
occurrences
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Create a file-read event.
|
|
111
|
+
*/
|
|
112
|
+
function createFileReadEvent(path, lines) {
|
|
113
|
+
return {
|
|
114
|
+
type: "file-read",
|
|
115
|
+
path,
|
|
116
|
+
lines
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Create a web-search-start event.
|
|
121
|
+
*/
|
|
122
|
+
function createWebSearchStartEvent(query) {
|
|
123
|
+
return {
|
|
124
|
+
type: "web-search-start",
|
|
125
|
+
query
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Create a web-search-finish event.
|
|
130
|
+
*/
|
|
131
|
+
function createWebSearchFinishEvent(query, resultCount) {
|
|
132
|
+
return {
|
|
133
|
+
type: "web-search-finish",
|
|
134
|
+
query,
|
|
135
|
+
resultCount
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
/**
|
|
139
|
+
* Create an http-request-start event.
|
|
140
|
+
*/
|
|
141
|
+
function createHttpRequestStartEvent(url, method) {
|
|
142
|
+
return {
|
|
143
|
+
type: "http-request-start",
|
|
144
|
+
url,
|
|
145
|
+
method
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Create an http-request-finish event.
|
|
150
|
+
*/
|
|
151
|
+
function createHttpRequestFinishEvent(url, statusCode) {
|
|
152
|
+
return {
|
|
153
|
+
type: "http-request-finish",
|
|
154
|
+
url,
|
|
155
|
+
statusCode
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Create a fetch-url-start event.
|
|
160
|
+
*/
|
|
161
|
+
function createFetchUrlStartEvent(url) {
|
|
162
|
+
return {
|
|
163
|
+
type: "fetch-url-start",
|
|
164
|
+
url
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
/**
|
|
168
|
+
* Create a fetch-url-finish event.
|
|
169
|
+
*/
|
|
170
|
+
function createFetchUrlFinishEvent(url, success) {
|
|
171
|
+
return {
|
|
172
|
+
type: "fetch-url-finish",
|
|
173
|
+
url,
|
|
174
|
+
success
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
/**
|
|
178
|
+
* Create a subagent-start event.
|
|
179
|
+
*/
|
|
180
|
+
function createSubagentStartEvent(name, task) {
|
|
181
|
+
return {
|
|
182
|
+
type: "subagent-start",
|
|
183
|
+
name,
|
|
184
|
+
task
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
/**
|
|
188
|
+
* Create a subagent-finish event.
|
|
189
|
+
*/
|
|
190
|
+
function createSubagentFinishEvent(name, result) {
|
|
191
|
+
return {
|
|
192
|
+
type: "subagent-finish",
|
|
193
|
+
name,
|
|
194
|
+
result
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
/**
|
|
198
|
+
* Create a subagent-step event.
|
|
199
|
+
*/
|
|
200
|
+
function createSubagentStepEvent(stepIndex, toolCalls) {
|
|
201
|
+
return {
|
|
202
|
+
type: "subagent-step",
|
|
203
|
+
stepIndex,
|
|
204
|
+
toolCalls
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Create a checkpoint-saved event.
|
|
209
|
+
*/
|
|
210
|
+
function createCheckpointSavedEvent(threadId, step) {
|
|
211
|
+
return {
|
|
212
|
+
type: "checkpoint-saved",
|
|
213
|
+
threadId,
|
|
214
|
+
step
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
/**
|
|
218
|
+
* Create a checkpoint-loaded event.
|
|
219
|
+
*/
|
|
220
|
+
function createCheckpointLoadedEvent(threadId, step, messagesCount) {
|
|
221
|
+
return {
|
|
222
|
+
type: "checkpoint-loaded",
|
|
223
|
+
threadId,
|
|
224
|
+
step,
|
|
225
|
+
messagesCount
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
var init_events = __esmMin((() => {}));
|
|
229
|
+
|
|
230
|
+
//#endregion
|
|
231
|
+
//#region src/types/backend.ts
|
|
232
|
+
/**
|
|
233
|
+
* Type guard to check if a backend is a SandboxBackendProtocol.
|
|
234
|
+
*/
|
|
235
|
+
function isSandboxBackend(backend) {
|
|
236
|
+
return typeof backend.execute === "function" && typeof backend.id === "string";
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
//#endregion
|
|
240
|
+
//#region src/prompts.ts
|
|
241
|
+
/**
|
|
242
|
+
* System prompts for Deep Agent.
|
|
243
|
+
*/
|
|
244
|
+
const BASE_PROMPT = `In order to complete the objective that the user asks of you, you have access to a number of standard tools.`;
|
|
245
|
+
const TODO_SYSTEM_PROMPT = `## \`write_todos\` (task planning)
|
|
246
|
+
|
|
247
|
+
You have access to a \`write_todos\` tool to help you manage and plan tasks. Use this tool whenever you are working on a complex task.
|
|
248
|
+
|
|
249
|
+
### When to Use This Tool
|
|
250
|
+
|
|
251
|
+
Use proactively for:
|
|
252
|
+
1. Complex multi-step tasks (3+ distinct steps)
|
|
253
|
+
2. Non-trivial tasks requiring careful planning
|
|
254
|
+
3. After receiving new instructions - capture requirements as todos
|
|
255
|
+
4. After completing tasks - mark complete and add follow-ups
|
|
256
|
+
5. When starting new tasks - mark as in_progress (ideally only one at a time)
|
|
257
|
+
|
|
258
|
+
### When NOT to Use
|
|
259
|
+
|
|
260
|
+
Skip for:
|
|
261
|
+
1. Single, straightforward tasks
|
|
262
|
+
2. Trivial tasks with no organizational benefit
|
|
263
|
+
3. Tasks completable in < 3 trivial steps
|
|
264
|
+
4. Purely conversational/informational requests
|
|
265
|
+
|
|
266
|
+
### Task States and Management
|
|
267
|
+
|
|
268
|
+
1. **Task States:**
|
|
269
|
+
- pending: Not yet started
|
|
270
|
+
- in_progress: Currently working on
|
|
271
|
+
- completed: Finished successfully
|
|
272
|
+
- cancelled: No longer needed
|
|
273
|
+
|
|
274
|
+
2. **Task Management:**
|
|
275
|
+
- Update status in real-time
|
|
276
|
+
- Mark complete IMMEDIATELY after finishing
|
|
277
|
+
- Only ONE task in_progress at a time
|
|
278
|
+
- Complete current tasks before starting new ones`;
|
|
279
|
+
const FILESYSTEM_SYSTEM_PROMPT = `## Virtual Filesystem
|
|
280
|
+
|
|
281
|
+
You have access to a virtual filesystem. All file paths must start with a /.
|
|
282
|
+
|
|
283
|
+
- ls: list files in a directory (requires absolute path)
|
|
284
|
+
- read_file: read a file from the filesystem
|
|
285
|
+
- write_file: write to a file in the filesystem
|
|
286
|
+
- edit_file: edit a file in the filesystem
|
|
287
|
+
- glob: find files matching a pattern (e.g., "**/*.py")
|
|
288
|
+
- grep: search for text within files`;
|
|
289
|
+
const TASK_SYSTEM_PROMPT = `## \`task\` (subagent spawner)
|
|
290
|
+
|
|
291
|
+
You have access to a \`task\` tool to launch short-lived subagents that handle isolated tasks. These agents are ephemeral — they live only for the duration of the task and return a single result.
|
|
292
|
+
|
|
293
|
+
When to use the task tool:
|
|
294
|
+
- When a task is complex and multi-step, and can be fully delegated in isolation
|
|
295
|
+
- When a task is independent of other tasks and can run in parallel
|
|
296
|
+
- When a task requires focused reasoning or heavy token/context usage that would bloat the orchestrator thread
|
|
297
|
+
- When sandboxing improves reliability (e.g. code execution, structured searches, data formatting)
|
|
298
|
+
- When you only care about the output of the subagent, and not the intermediate steps
|
|
299
|
+
|
|
300
|
+
Subagent lifecycle:
|
|
301
|
+
1. **Spawn** → Provide clear role, instructions, and expected output
|
|
302
|
+
2. **Run** → The subagent completes the task autonomously
|
|
303
|
+
3. **Return** → The subagent provides a single structured result
|
|
304
|
+
4. **Reconcile** → Incorporate or synthesize the result into the main thread
|
|
305
|
+
|
|
306
|
+
When NOT to use the task tool:
|
|
307
|
+
- If you need to see the intermediate reasoning or steps after the subagent has completed (the task tool hides them)
|
|
308
|
+
- If the task is trivial (a few tool calls or simple lookup)
|
|
309
|
+
- If delegating does not reduce token usage, complexity, or context switching
|
|
310
|
+
- If splitting would add latency without benefit
|
|
311
|
+
|
|
312
|
+
## Important Task Tool Usage Notes
|
|
313
|
+
- Whenever possible, parallelize the work that you do. Whenever you have independent steps to complete - kick off tasks (subagents) in parallel to accomplish them faster.
|
|
314
|
+
- Remember to use the \`task\` tool to silo independent tasks within a multi-part objective.
|
|
315
|
+
- You should use the \`task\` tool whenever you have a complex task that will take multiple steps, and is independent from other tasks that the agent needs to complete.`;
|
|
316
|
+
/**
|
|
317
|
+
* Get the task tool description with available subagent types.
|
|
318
|
+
*/
|
|
319
|
+
function getTaskToolDescription(subagentDescriptions) {
|
|
320
|
+
return `
|
|
321
|
+
Launch an ephemeral subagent to handle complex, multi-step independent tasks with isolated context windows.
|
|
322
|
+
|
|
323
|
+
Available agent types and the tools they have access to:
|
|
324
|
+
${subagentDescriptions.join("\n")}
|
|
325
|
+
|
|
326
|
+
When using the Task tool, you must specify a subagent_type parameter to select which agent type to use.
|
|
327
|
+
|
|
328
|
+
## Usage notes:
|
|
329
|
+
1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses
|
|
330
|
+
2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.
|
|
331
|
+
3. Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.
|
|
332
|
+
4. The agent's outputs should generally be trusted
|
|
333
|
+
5. Clearly tell the agent whether you expect it to create content, perform analysis, or just do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent
|
|
334
|
+
6. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.
|
|
335
|
+
7. When only the general-purpose agent is provided, you should use it for all tasks. It is great for isolating context and token usage, and completing specific, complex tasks, as it has all the same capabilities as the main agent.
|
|
336
|
+
|
|
337
|
+
### Example usage of the general-purpose agent:
|
|
338
|
+
|
|
339
|
+
<example_agent_descriptions>
|
|
340
|
+
"general-purpose": use this agent for general purpose tasks, it has access to all tools as the main agent.
|
|
341
|
+
</example_agent_descriptions>
|
|
342
|
+
|
|
343
|
+
<example>
|
|
344
|
+
User: "I want to conduct research on the accomplishments of Lebron James, Michael Jordan, and Kobe Bryant, and then compare them."
|
|
345
|
+
Assistant: *Uses the task tool in parallel to conduct isolated research on each of the three players*
|
|
346
|
+
Assistant: *Synthesizes the results of the three isolated research tasks and responds to the User*
|
|
347
|
+
<commentary>
|
|
348
|
+
Research is a complex, multi-step task in it of itself.
|
|
349
|
+
The research of each individual player is not dependent on the research of the other players.
|
|
350
|
+
The assistant uses the task tool to break down the complex objective into three isolated tasks.
|
|
351
|
+
Each research task only needs to worry about context and tokens about one player, then returns synthesized information about each player as the Tool Result.
|
|
352
|
+
This means each research task can dive deep and spend tokens and context deeply researching each player, but the final result is synthesized information, and saves us tokens in the long run when comparing the players to each other.
|
|
353
|
+
</commentary>
|
|
354
|
+
</example>
|
|
355
|
+
|
|
356
|
+
<example>
|
|
357
|
+
User: "Analyze a single large code repository for security vulnerabilities and generate a report."
|
|
358
|
+
Assistant: *Launches a single \`task\` subagent for the repository analysis*
|
|
359
|
+
Assistant: *Receives report and integrates results into final summary*
|
|
360
|
+
<commentary>
|
|
361
|
+
Subagent is used to isolate a large, context-heavy task, even though there is only one. This prevents the main thread from being overloaded with details.
|
|
362
|
+
If the user then asks followup questions, we have a concise report to reference instead of the entire history of analysis and tool calls, which is good and saves us time and money.
|
|
363
|
+
</commentary>
|
|
364
|
+
</example>
|
|
365
|
+
`.trim();
|
|
366
|
+
}
|
|
367
|
+
const DEFAULT_GENERAL_PURPOSE_DESCRIPTION = "General-purpose agent for researching complex questions, searching for files and content, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. This agent has access to all tools as the main agent.";
|
|
368
|
+
const DEFAULT_SUBAGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools.";
|
|
369
|
+
const EXECUTE_SYSTEM_PROMPT = `## \`execute\` (shell command execution)
|
|
370
|
+
|
|
371
|
+
You have access to an \`execute\` tool to run shell commands in the sandbox environment.
|
|
372
|
+
|
|
373
|
+
### When to Use This Tool
|
|
374
|
+
|
|
375
|
+
Use for:
|
|
376
|
+
- Running build commands (npm install, npm run build, bun install)
|
|
377
|
+
- Running tests (npm test, bun test, pytest)
|
|
378
|
+
- Executing scripts (node script.js, python script.py)
|
|
379
|
+
- Installing dependencies
|
|
380
|
+
- Checking system state (ls, cat, pwd, which)
|
|
381
|
+
- Any shell command that helps accomplish the task
|
|
382
|
+
|
|
383
|
+
### Important Notes
|
|
384
|
+
|
|
385
|
+
1. **Exit Codes**: Always check the exit code to determine success
|
|
386
|
+
- 0 = success
|
|
387
|
+
- non-zero = failure
|
|
388
|
+
- null = possibly timed out
|
|
389
|
+
|
|
390
|
+
2. **Command Chaining**:
|
|
391
|
+
- Use \`&&\` to chain commands that depend on each other
|
|
392
|
+
- Use \`;\` to run commands sequentially regardless of success
|
|
393
|
+
|
|
394
|
+
3. **Timeouts**: Long-running commands may timeout
|
|
395
|
+
|
|
396
|
+
4. **Working Directory**: Commands run in the sandbox's working directory`;
|
|
397
|
+
/**
|
|
398
|
+
* Build skills section for system prompt with progressive disclosure.
|
|
399
|
+
*/
|
|
400
|
+
function buildSkillsPrompt(skills) {
|
|
401
|
+
if (skills.length === 0) return "";
|
|
402
|
+
return `## Skills System
|
|
403
|
+
|
|
404
|
+
You have access to a skills library providing specialized domain knowledge and workflows.
|
|
405
|
+
|
|
406
|
+
**Available Skills:**
|
|
407
|
+
|
|
408
|
+
${skills.map((skill) => `- **${skill.name}**: ${skill.description}\n → Read \`${skill.path}\` for full instructions`).join("\n")}
|
|
409
|
+
|
|
410
|
+
**How to Use Skills (Progressive Disclosure):**
|
|
411
|
+
|
|
412
|
+
1. **Recognize when a skill applies**: Check if the user's task matches any skill's domain
|
|
413
|
+
2. **Read the skill's full instructions**: Use read_file to load the SKILL.md content
|
|
414
|
+
3. **Follow the skill's workflow**: Skills contain step-by-step instructions and examples
|
|
415
|
+
4. **Access supporting files**: Skills may include helper scripts or configuration files in their directory
|
|
416
|
+
|
|
417
|
+
Skills provide expert knowledge for specialized tasks. Always read the full skill before using it.`;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
//#endregion
|
|
421
|
+
//#region src/tools/todos.ts
|
|
422
|
+
/**
|
|
423
|
+
* Todo list tool for task planning and tracking.
|
|
424
|
+
*/
|
|
425
|
+
init_events();
|
|
426
|
+
const TodoItemSchema = z.object({
|
|
427
|
+
id: z.string().describe("Unique identifier for the todo item"),
|
|
428
|
+
content: z.string().max(100).describe("The description/content of the todo item (max 100 chars)"),
|
|
429
|
+
status: z.enum([
|
|
430
|
+
"pending",
|
|
431
|
+
"in_progress",
|
|
432
|
+
"completed",
|
|
433
|
+
"cancelled"
|
|
434
|
+
]).describe("The current status of the todo item")
|
|
435
|
+
});
|
|
436
|
+
/**
|
|
437
|
+
* Create the write_todos tool for task planning.
|
|
438
|
+
* @param state - The shared agent state
|
|
439
|
+
* @param onEvent - Optional callback for emitting events
|
|
440
|
+
*/
|
|
441
|
+
function createTodosTool(state, onEvent) {
|
|
442
|
+
return tool({
|
|
443
|
+
description: `Manage and plan tasks using a structured todo list. Use this tool for:
|
|
444
|
+
- Complex multi-step tasks (3+ steps)
|
|
445
|
+
- After receiving new instructions - capture requirements
|
|
446
|
+
- When starting tasks - mark as in_progress (only one at a time)
|
|
447
|
+
- After completing tasks - mark complete immediately
|
|
448
|
+
|
|
449
|
+
Task states: pending, in_progress, completed, cancelled
|
|
450
|
+
|
|
451
|
+
When merge=true, updates are merged with existing todos by id.
|
|
452
|
+
When merge=false, the new todos replace all existing todos.`,
|
|
453
|
+
inputSchema: z.object({
|
|
454
|
+
todos: z.array(TodoItemSchema).min(1).describe("Array of todo items to write"),
|
|
455
|
+
merge: z.boolean().default(true).describe("Whether to merge with existing todos (true) or replace all (false)")
|
|
456
|
+
}),
|
|
457
|
+
execute: async ({ todos, merge }) => {
|
|
458
|
+
if (merge) {
|
|
459
|
+
const existingMap = /* @__PURE__ */ new Map();
|
|
460
|
+
for (const todo of state.todos) existingMap.set(todo.id, todo);
|
|
461
|
+
for (const newTodo of todos) {
|
|
462
|
+
const existing = existingMap.get(newTodo.id);
|
|
463
|
+
if (existing) existingMap.set(newTodo.id, {
|
|
464
|
+
...existing,
|
|
465
|
+
...newTodo
|
|
466
|
+
});
|
|
467
|
+
else existingMap.set(newTodo.id, newTodo);
|
|
468
|
+
}
|
|
469
|
+
state.todos = Array.from(existingMap.values());
|
|
470
|
+
} else state.todos = todos;
|
|
471
|
+
if (onEvent) onEvent(createTodosChangedEvent([...state.todos]));
|
|
472
|
+
return `Todo list updated successfully.\n\nCurrent todos:\n${state.todos.map((t) => `- [${t.status}] ${t.id}: ${t.content}`).join("\n")}`;
|
|
473
|
+
}
|
|
474
|
+
});
|
|
475
|
+
}
|
|
476
|
+
/**
|
|
477
|
+
* Individual builtin tool reference for selective subagent configuration.
|
|
478
|
+
* This is a reference to the creator function, not an instance.
|
|
479
|
+
*/
|
|
480
|
+
const write_todos = createTodosTool;
|
|
481
|
+
|
|
482
|
+
//#endregion
|
|
483
|
+
//#region src/constants/errors.ts
|
|
484
|
+
var FILE_NOT_FOUND, FILE_ALREADY_EXISTS, STRING_NOT_FOUND, INVALID_REGEX, WEB_SEARCH_ERROR, REQUEST_TIMEOUT, SYSTEM_REMINDER_FILE_EMPTY;
|
|
485
|
+
var init_errors = __esmMin((() => {
|
|
486
|
+
FILE_NOT_FOUND = (path) => `Error: File '${path}' not found`;
|
|
487
|
+
FILE_ALREADY_EXISTS = (path) => `Cannot write to ${path} because it already exists. Read and then make an edit, or write to a new path.`;
|
|
488
|
+
STRING_NOT_FOUND = (path, string) => `Error: String not found in file: '${path}'\n\n${string}`;
|
|
489
|
+
INVALID_REGEX = (message) => `Invalid regex pattern: ${message}`;
|
|
490
|
+
WEB_SEARCH_ERROR = (message) => `Web search error: ${message}`;
|
|
491
|
+
REQUEST_TIMEOUT = (timeout) => `Request timed out after ${timeout} seconds`;
|
|
492
|
+
SYSTEM_REMINDER_FILE_EMPTY = "System reminder: File exists but has empty contents";
|
|
493
|
+
}));
|
|
494
|
+
|
|
495
|
+
//#endregion
|
|
496
|
+
//#region src/backends/utils.ts
|
|
497
|
+
/**
|
|
498
|
+
* Shared utility functions for memory backend implementations.
|
|
499
|
+
*/
|
|
500
|
+
init_errors();
|
|
501
|
+
init_limits();
|
|
502
|
+
const EMPTY_CONTENT_WARNING = SYSTEM_REMINDER_FILE_EMPTY;
|
|
503
|
+
/**
|
|
504
|
+
* Format file content with line numbers (cat -n style).
|
|
505
|
+
*/
|
|
506
|
+
function formatContentWithLineNumbers(content, startLine = 1) {
|
|
507
|
+
let lines;
|
|
508
|
+
if (typeof content === "string") {
|
|
509
|
+
lines = content.split("\n");
|
|
510
|
+
if (lines.length > 0 && lines[lines.length - 1] === "") lines = lines.slice(0, -1);
|
|
511
|
+
} else lines = content;
|
|
512
|
+
const resultLines = [];
|
|
513
|
+
for (let i = 0; i < lines.length; i++) {
|
|
514
|
+
const line = lines[i];
|
|
515
|
+
const lineNum = i + startLine;
|
|
516
|
+
if (line && line.length <= MAX_LINE_LENGTH) resultLines.push(`${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\t${line}`);
|
|
517
|
+
else if (line) {
|
|
518
|
+
const numChunks = Math.ceil(line.length / MAX_LINE_LENGTH);
|
|
519
|
+
for (let chunkIdx = 0; chunkIdx < numChunks; chunkIdx++) {
|
|
520
|
+
const start = chunkIdx * MAX_LINE_LENGTH;
|
|
521
|
+
const end = Math.min(start + MAX_LINE_LENGTH, line.length);
|
|
522
|
+
const chunk = line.substring(start, end);
|
|
523
|
+
if (chunkIdx === 0) resultLines.push(`${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\t${chunk}`);
|
|
524
|
+
else {
|
|
525
|
+
const continuationMarker = `${lineNum}.${chunkIdx}`;
|
|
526
|
+
resultLines.push(`${continuationMarker.padStart(LINE_NUMBER_WIDTH)}\t${chunk}`);
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
} else resultLines.push(`${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\t`);
|
|
530
|
+
}
|
|
531
|
+
return resultLines.join("\n");
|
|
532
|
+
}
|
|
533
|
+
/**
|
|
534
|
+
* Check if content is empty and return warning message.
|
|
535
|
+
*/
|
|
536
|
+
function checkEmptyContent(content) {
|
|
537
|
+
if (!content || content.trim() === "") return EMPTY_CONTENT_WARNING;
|
|
538
|
+
return null;
|
|
539
|
+
}
|
|
540
|
+
/**
|
|
541
|
+
* Convert FileData to plain string content.
|
|
542
|
+
*/
|
|
543
|
+
function fileDataToString(fileData) {
|
|
544
|
+
return fileData.content.join("\n");
|
|
545
|
+
}
|
|
546
|
+
/**
|
|
547
|
+
* Create a FileData object with timestamps.
|
|
548
|
+
*/
|
|
549
|
+
function createFileData(content, createdAt) {
|
|
550
|
+
const lines = typeof content === "string" ? content.split("\n") : content;
|
|
551
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
552
|
+
return {
|
|
553
|
+
content: lines,
|
|
554
|
+
created_at: createdAt || now,
|
|
555
|
+
modified_at: now
|
|
556
|
+
};
|
|
557
|
+
}
|
|
558
|
+
/**
|
|
559
|
+
* Update FileData with new content, preserving creation timestamp.
|
|
560
|
+
*/
|
|
561
|
+
function updateFileData(fileData, content) {
|
|
562
|
+
const lines = typeof content === "string" ? content.split("\n") : content;
|
|
563
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
564
|
+
return {
|
|
565
|
+
content: lines,
|
|
566
|
+
created_at: fileData.created_at,
|
|
567
|
+
modified_at: now
|
|
568
|
+
};
|
|
569
|
+
}
|
|
570
|
+
/**
|
|
571
|
+
* Format file data for read response with line numbers.
|
|
572
|
+
*/
|
|
573
|
+
function formatReadResponse(fileData, offset, limit) {
|
|
574
|
+
const content = fileDataToString(fileData);
|
|
575
|
+
const emptyMsg = checkEmptyContent(content);
|
|
576
|
+
if (emptyMsg) return emptyMsg;
|
|
577
|
+
const lines = content.split("\n");
|
|
578
|
+
const startIdx = offset;
|
|
579
|
+
const endIdx = Math.min(startIdx + limit, lines.length);
|
|
580
|
+
if (startIdx >= lines.length) return `Error: Line offset ${offset} exceeds file length (${lines.length} lines)`;
|
|
581
|
+
return formatContentWithLineNumbers(lines.slice(startIdx, endIdx), startIdx + 1);
|
|
582
|
+
}
|
|
583
|
+
/**
|
|
584
|
+
* Perform string replacement with occurrence validation.
|
|
585
|
+
*/
|
|
586
|
+
function performStringReplacement(content, oldString, newString, replaceAll) {
|
|
587
|
+
const occurrences = content.split(oldString).length - 1;
|
|
588
|
+
if (occurrences === 0) return `Error: String not found in file: '${oldString}'`;
|
|
589
|
+
if (occurrences > 1 && !replaceAll) return `Error: String '${oldString}' appears ${occurrences} times in file. Use replace_all=true to replace all instances, or provide a more specific string with surrounding context.`;
|
|
590
|
+
return [content.split(oldString).join(newString), occurrences];
|
|
591
|
+
}
|
|
592
|
+
/**
|
|
593
|
+
* Validate and normalize a path.
|
|
594
|
+
*/
|
|
595
|
+
function validatePath(path) {
|
|
596
|
+
const pathStr = path || "/";
|
|
597
|
+
if (!pathStr || pathStr.trim() === "") throw new Error("Path cannot be empty");
|
|
598
|
+
let normalized = pathStr.startsWith("/") ? pathStr : "/" + pathStr;
|
|
599
|
+
if (!normalized.endsWith("/")) normalized += "/";
|
|
600
|
+
return normalized;
|
|
601
|
+
}
|
|
602
|
+
/**
|
|
603
|
+
* Search files dict for paths matching glob pattern.
|
|
604
|
+
*/
|
|
605
|
+
function globSearchFiles(files, pattern, path = "/") {
|
|
606
|
+
let normalizedPath;
|
|
607
|
+
try {
|
|
608
|
+
normalizedPath = validatePath(path);
|
|
609
|
+
} catch {
|
|
610
|
+
return "No files found";
|
|
611
|
+
}
|
|
612
|
+
const filtered = Object.fromEntries(Object.entries(files).filter(([fp]) => fp.startsWith(normalizedPath)));
|
|
613
|
+
const matches = [];
|
|
614
|
+
for (const [filePath, fileData] of Object.entries(filtered)) {
|
|
615
|
+
let relative = filePath.substring(normalizedPath.length);
|
|
616
|
+
if (relative.startsWith("/")) relative = relative.substring(1);
|
|
617
|
+
if (!relative) {
|
|
618
|
+
const parts = filePath.split("/");
|
|
619
|
+
relative = parts[parts.length - 1] || "";
|
|
620
|
+
}
|
|
621
|
+
if (micromatch.isMatch(relative, pattern, {
|
|
622
|
+
dot: true,
|
|
623
|
+
nobrace: false
|
|
624
|
+
})) matches.push([filePath, fileData.modified_at]);
|
|
625
|
+
}
|
|
626
|
+
matches.sort((a, b) => b[1].localeCompare(a[1]));
|
|
627
|
+
if (matches.length === 0) return "No files found";
|
|
628
|
+
return matches.map(([fp]) => fp).join("\n");
|
|
629
|
+
}
|
|
630
|
+
/**
|
|
631
|
+
* Return structured grep matches from an in-memory files mapping.
|
|
632
|
+
*/
|
|
633
|
+
function grepMatchesFromFiles(files, pattern, path = null, glob$1 = null) {
|
|
634
|
+
let regex;
|
|
635
|
+
try {
|
|
636
|
+
regex = new RegExp(pattern);
|
|
637
|
+
} catch (e) {
|
|
638
|
+
return INVALID_REGEX(e.message);
|
|
639
|
+
}
|
|
640
|
+
let normalizedPath;
|
|
641
|
+
try {
|
|
642
|
+
normalizedPath = validatePath(path);
|
|
643
|
+
} catch {
|
|
644
|
+
return [];
|
|
645
|
+
}
|
|
646
|
+
let filtered = Object.fromEntries(Object.entries(files).filter(([fp]) => fp.startsWith(normalizedPath)));
|
|
647
|
+
if (glob$1) filtered = Object.fromEntries(Object.entries(filtered).filter(([fp]) => micromatch.isMatch(basename(fp), glob$1, {
|
|
648
|
+
dot: true,
|
|
649
|
+
nobrace: false
|
|
650
|
+
})));
|
|
651
|
+
const matches = [];
|
|
652
|
+
for (const [filePath, fileData] of Object.entries(filtered)) for (let i = 0; i < fileData.content.length; i++) {
|
|
653
|
+
const line = fileData.content[i];
|
|
654
|
+
const lineNum = i + 1;
|
|
655
|
+
if (line && regex.test(line)) matches.push({
|
|
656
|
+
path: filePath,
|
|
657
|
+
line: lineNum,
|
|
658
|
+
text: line
|
|
659
|
+
});
|
|
660
|
+
}
|
|
661
|
+
return matches;
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
//#endregion
|
|
665
|
+
//#region src/backends/state.ts
|
|
666
|
+
init_errors();
|
|
667
|
+
/**
|
|
668
|
+
* Backend that stores files in shared state (ephemeral).
|
|
669
|
+
*
|
|
670
|
+
* Files persist within a single agent invocation but not across invocations.
|
|
671
|
+
* This is the default backend for Deep Agent when no backend is specified.
|
|
672
|
+
*
|
|
673
|
+
* Files are stored in memory as part of the `DeepAgentState`, making this backend
|
|
674
|
+
* fast but non-persistent. Use `FilesystemBackend` or `PersistentBackend` for
|
|
675
|
+
* cross-session persistence.
|
|
676
|
+
*
|
|
677
|
+
* @example Default usage (no backend specified)
|
|
678
|
+
* ```typescript
|
|
679
|
+
* const agent = createDeepAgent({
|
|
680
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
681
|
+
* // StateBackend is used by default
|
|
682
|
+
* });
|
|
683
|
+
* ```
|
|
684
|
+
*
|
|
685
|
+
* @example Explicit usage
|
|
686
|
+
* ```typescript
|
|
687
|
+
* const state: DeepAgentState = { todos: [], files: {} };
|
|
688
|
+
* const backend = new StateBackend(state);
|
|
689
|
+
* const agent = createDeepAgent({
|
|
690
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
691
|
+
* backend,
|
|
692
|
+
* });
|
|
693
|
+
* ```
|
|
694
|
+
*/
|
|
695
|
+
var StateBackend = class {
|
|
696
|
+
state;
|
|
697
|
+
/**
|
|
698
|
+
* Create a new StateBackend instance.
|
|
699
|
+
*
|
|
700
|
+
* @param state - The DeepAgentState object that will store the files.
|
|
701
|
+
* Files are stored in `state.files` as a Record<string, FileData>.
|
|
702
|
+
*/
|
|
703
|
+
constructor(state) {
|
|
704
|
+
this.state = state;
|
|
705
|
+
}
|
|
706
|
+
/**
|
|
707
|
+
* Get files from current state.
|
|
708
|
+
*/
|
|
709
|
+
getFiles() {
|
|
710
|
+
return this.state.files || {};
|
|
711
|
+
}
|
|
712
|
+
/**
|
|
713
|
+
* List files and directories in the specified directory (non-recursive).
|
|
714
|
+
*/
|
|
715
|
+
lsInfo(path) {
|
|
716
|
+
const files = this.getFiles();
|
|
717
|
+
const infos = [];
|
|
718
|
+
const subdirs = /* @__PURE__ */ new Set();
|
|
719
|
+
const normalizedPath = path.endsWith("/") ? path : path + "/";
|
|
720
|
+
for (const [k, fd] of Object.entries(files)) {
|
|
721
|
+
if (!k.startsWith(normalizedPath)) continue;
|
|
722
|
+
const relative = k.substring(normalizedPath.length);
|
|
723
|
+
if (relative.includes("/")) {
|
|
724
|
+
const subdirName = relative.split("/")[0];
|
|
725
|
+
subdirs.add(normalizedPath + subdirName + "/");
|
|
726
|
+
continue;
|
|
727
|
+
}
|
|
728
|
+
const size = fd.content.join("\n").length;
|
|
729
|
+
infos.push({
|
|
730
|
+
path: k,
|
|
731
|
+
is_dir: false,
|
|
732
|
+
size,
|
|
733
|
+
modified_at: fd.modified_at
|
|
734
|
+
});
|
|
735
|
+
}
|
|
736
|
+
for (const subdir of Array.from(subdirs).sort()) infos.push({
|
|
737
|
+
path: subdir,
|
|
738
|
+
is_dir: true,
|
|
739
|
+
size: 0,
|
|
740
|
+
modified_at: ""
|
|
741
|
+
});
|
|
742
|
+
infos.sort((a, b) => a.path.localeCompare(b.path));
|
|
743
|
+
return infos;
|
|
744
|
+
}
|
|
745
|
+
/**
|
|
746
|
+
* Read file content with line numbers.
|
|
747
|
+
*/
|
|
748
|
+
read(filePath, offset = 0, limit = 2e3) {
|
|
749
|
+
const fileData = this.getFiles()[filePath];
|
|
750
|
+
if (!fileData) return FILE_NOT_FOUND(filePath);
|
|
751
|
+
return formatReadResponse(fileData, offset, limit);
|
|
752
|
+
}
|
|
753
|
+
/**
|
|
754
|
+
* Read file content as raw FileData.
|
|
755
|
+
*/
|
|
756
|
+
readRaw(filePath) {
|
|
757
|
+
const fileData = this.getFiles()[filePath];
|
|
758
|
+
if (!fileData) throw new Error(`File '${filePath}' not found`);
|
|
759
|
+
return fileData;
|
|
760
|
+
}
|
|
761
|
+
/**
|
|
762
|
+
* Create a new file with content.
|
|
763
|
+
*/
|
|
764
|
+
write(filePath, content) {
|
|
765
|
+
const files = this.getFiles();
|
|
766
|
+
if (!filePath || filePath.trim() === "") return {
|
|
767
|
+
success: false,
|
|
768
|
+
error: "File path cannot be empty"
|
|
769
|
+
};
|
|
770
|
+
if (filePath in files) return {
|
|
771
|
+
success: false,
|
|
772
|
+
error: FILE_ALREADY_EXISTS(filePath)
|
|
773
|
+
};
|
|
774
|
+
const newFileData = createFileData(content);
|
|
775
|
+
this.state.files[filePath] = newFileData;
|
|
776
|
+
return {
|
|
777
|
+
success: true,
|
|
778
|
+
path: filePath
|
|
779
|
+
};
|
|
780
|
+
}
|
|
781
|
+
/**
|
|
782
|
+
* Edit a file by replacing string occurrences.
|
|
783
|
+
*/
|
|
784
|
+
edit(filePath, oldString, newString, replaceAll = false) {
|
|
785
|
+
const fileData = this.getFiles()[filePath];
|
|
786
|
+
if (!fileData) return {
|
|
787
|
+
success: false,
|
|
788
|
+
error: FILE_NOT_FOUND(filePath)
|
|
789
|
+
};
|
|
790
|
+
const result = performStringReplacement(fileDataToString(fileData), oldString, newString, replaceAll);
|
|
791
|
+
if (typeof result === "string") return {
|
|
792
|
+
success: false,
|
|
793
|
+
error: result
|
|
794
|
+
};
|
|
795
|
+
const [newContent, occurrences] = result;
|
|
796
|
+
const newFileData = updateFileData(fileData, newContent);
|
|
797
|
+
this.state.files[filePath] = newFileData;
|
|
798
|
+
return {
|
|
799
|
+
success: true,
|
|
800
|
+
path: filePath,
|
|
801
|
+
occurrences
|
|
802
|
+
};
|
|
803
|
+
}
|
|
804
|
+
/**
|
|
805
|
+
* Structured search results or error string for invalid input.
|
|
806
|
+
*/
|
|
807
|
+
grepRaw(pattern, path = "/", glob$1 = null) {
|
|
808
|
+
return grepMatchesFromFiles(this.getFiles(), pattern, path, glob$1);
|
|
809
|
+
}
|
|
810
|
+
/**
|
|
811
|
+
* Structured glob matching returning FileInfo objects.
|
|
812
|
+
*/
|
|
813
|
+
globInfo(pattern, path = "/") {
|
|
814
|
+
const files = this.getFiles();
|
|
815
|
+
const result = globSearchFiles(files, pattern, path);
|
|
816
|
+
if (result === "No files found") return [];
|
|
817
|
+
const paths = result.split("\n");
|
|
818
|
+
const infos = [];
|
|
819
|
+
for (const p of paths) {
|
|
820
|
+
const fd = files[p];
|
|
821
|
+
const size = fd ? fd.content.join("\n").length : 0;
|
|
822
|
+
infos.push({
|
|
823
|
+
path: p,
|
|
824
|
+
is_dir: false,
|
|
825
|
+
size,
|
|
826
|
+
modified_at: fd?.modified_at || ""
|
|
827
|
+
});
|
|
828
|
+
}
|
|
829
|
+
return infos;
|
|
830
|
+
}
|
|
831
|
+
};
|
|
832
|
+
|
|
833
|
+
//#endregion
|
|
834
|
+
//#region src/utils/eviction.ts
|
|
835
|
+
/**
|
|
836
|
+
* Sanitize a tool call ID for use as a filename.
|
|
837
|
+
* Removes or replaces characters that are invalid in file paths.
|
|
838
|
+
*/
|
|
839
|
+
function sanitizeToolCallId(toolCallId) {
|
|
840
|
+
return toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_").substring(0, 100);
|
|
841
|
+
}
|
|
842
|
+
/**
|
|
843
|
+
* Estimate the number of tokens in a string.
|
|
844
|
+
* Uses a simple character-based approximation.
|
|
845
|
+
*/
|
|
846
|
+
function estimateTokens(text) {
|
|
847
|
+
return Math.ceil(text.length / CHARS_PER_TOKEN);
|
|
848
|
+
}
|
|
849
|
+
/**
|
|
850
|
+
* Check if a tool result should be evicted based on size.
|
|
851
|
+
*/
|
|
852
|
+
function shouldEvict(result, tokenLimit = DEFAULT_EVICTION_TOKEN_LIMIT) {
|
|
853
|
+
return estimateTokens(result) > tokenLimit;
|
|
854
|
+
}
|
|
855
|
+
/**
|
|
856
|
+
* Evict a large tool result to the filesystem.
|
|
857
|
+
*
|
|
858
|
+
* If the result exceeds the token limit, writes it to a file and
|
|
859
|
+
* returns a truncated message with the file path.
|
|
860
|
+
*
|
|
861
|
+
* @param options - Eviction options
|
|
862
|
+
* @returns Eviction result with content and metadata
|
|
863
|
+
*
|
|
864
|
+
* @example
|
|
865
|
+
* ```typescript
|
|
866
|
+
* const result = await evictToolResult({
|
|
867
|
+
* result: veryLongString,
|
|
868
|
+
* toolCallId: "call_123",
|
|
869
|
+
* toolName: "grep",
|
|
870
|
+
* backend: filesystemBackend,
|
|
871
|
+
* });
|
|
872
|
+
*
|
|
873
|
+
* if (result.evicted) {
|
|
874
|
+
* console.log(`Content saved to ${result.evictedPath}`);
|
|
875
|
+
* }
|
|
876
|
+
* ```
|
|
877
|
+
*/
|
|
878
|
+
async function evictToolResult(options) {
|
|
879
|
+
const { result, toolCallId, toolName, backend, tokenLimit = DEFAULT_EVICTION_TOKEN_LIMIT } = options;
|
|
880
|
+
if (!shouldEvict(result, tokenLimit)) return {
|
|
881
|
+
evicted: false,
|
|
882
|
+
content: result
|
|
883
|
+
};
|
|
884
|
+
const evictPath = `/large_tool_results/${toolName}_${sanitizeToolCallId(toolCallId)}.txt`;
|
|
885
|
+
const writeResult = await backend.write(evictPath, result);
|
|
886
|
+
if (writeResult.error) {
|
|
887
|
+
console.warn(`Failed to evict tool result: ${writeResult.error}`);
|
|
888
|
+
return {
|
|
889
|
+
evicted: false,
|
|
890
|
+
content: result
|
|
891
|
+
};
|
|
892
|
+
}
|
|
893
|
+
return {
|
|
894
|
+
evicted: true,
|
|
895
|
+
content: `Tool result too large (~${estimateTokens(result)} tokens). Content saved to ${evictPath}. Use read_file to access the full content.`,
|
|
896
|
+
evictedPath: evictPath
|
|
897
|
+
};
|
|
898
|
+
}
|
|
899
|
+
/**
|
|
900
|
+
* Create a tool result wrapper that automatically evicts large results.
|
|
901
|
+
*
|
|
902
|
+
* @param backend - Backend or factory for filesystem operations
|
|
903
|
+
* @param state - Current agent state (for factory backends)
|
|
904
|
+
* @param tokenLimit - Token limit before eviction
|
|
905
|
+
* @returns Function that wraps tool results with eviction
|
|
906
|
+
*/
|
|
907
|
+
function createToolResultWrapper(backend, state, tokenLimit = DEFAULT_EVICTION_TOKEN_LIMIT) {
|
|
908
|
+
const resolvedBackend = typeof backend === "function" ? backend(state) : backend;
|
|
909
|
+
return async (result, toolCallId, toolName) => {
|
|
910
|
+
return (await evictToolResult({
|
|
911
|
+
result,
|
|
912
|
+
toolCallId,
|
|
913
|
+
toolName,
|
|
914
|
+
backend: resolvedBackend,
|
|
915
|
+
tokenLimit
|
|
916
|
+
})).content;
|
|
917
|
+
};
|
|
918
|
+
}
|
|
919
|
+
var DEFAULT_EVICTION_TOKEN_LIMIT, CHARS_PER_TOKEN;
|
|
920
|
+
var init_eviction = __esmMin((() => {
|
|
921
|
+
init_limits();
|
|
922
|
+
DEFAULT_EVICTION_TOKEN_LIMIT = DEFAULT_EVICTION_TOKEN_LIMIT$1;
|
|
923
|
+
CHARS_PER_TOKEN = 4;
|
|
924
|
+
}));
|
|
925
|
+
|
|
926
|
+
//#endregion
|
|
927
|
+
//#region src/tools/filesystem.ts
|
|
928
|
+
/**
|
|
929
|
+
* Filesystem tools for virtual file operations.
|
|
930
|
+
*/
|
|
931
|
+
init_eviction();
|
|
932
|
+
init_events();
|
|
933
|
+
const LS_TOOL_DESCRIPTION = "List files and directories in a directory. Paths are relative to the working directory.";
|
|
934
|
+
const READ_FILE_TOOL_DESCRIPTION = "Read the contents of a file. Paths are relative to the working directory.";
|
|
935
|
+
const WRITE_FILE_TOOL_DESCRIPTION = "Write content to a new file. Returns an error if the file already exists. Paths are relative to the working directory.";
|
|
936
|
+
const EDIT_FILE_TOOL_DESCRIPTION = "Edit a file by replacing a specific string with a new string. Paths are relative to the working directory.";
|
|
937
|
+
const GLOB_TOOL_DESCRIPTION = "Find files matching a glob pattern (e.g., '**/*.py' for all Python files). Paths are relative to the working directory.";
|
|
938
|
+
const GREP_TOOL_DESCRIPTION = "Search for a regex pattern in files. Returns matching files and line numbers. Paths are relative to the working directory.";
|
|
939
|
+
/**
|
|
940
|
+
* Resolve backend from factory or instance.
|
|
941
|
+
*/
|
|
942
|
+
function getBackend$1(backend, state) {
|
|
943
|
+
if (typeof backend === "function") return backend(state);
|
|
944
|
+
return backend;
|
|
945
|
+
}
|
|
946
|
+
/**
|
|
947
|
+
* Create the ls tool.
|
|
948
|
+
*/
|
|
949
|
+
function createLsTool(state, backend, onEvent) {
|
|
950
|
+
return tool({
|
|
951
|
+
description: LS_TOOL_DESCRIPTION,
|
|
952
|
+
inputSchema: z.object({ path: z.string().default(".").describe("Directory path to list (default: current directory)") }),
|
|
953
|
+
execute: async ({ path }) => {
|
|
954
|
+
const infos = await getBackend$1(backend, state).lsInfo(path || ".");
|
|
955
|
+
if (onEvent) onEvent({
|
|
956
|
+
type: "ls",
|
|
957
|
+
path: path || ".",
|
|
958
|
+
count: infos.length
|
|
959
|
+
});
|
|
960
|
+
if (infos.length === 0) return `No files found in ${path}`;
|
|
961
|
+
const lines = [];
|
|
962
|
+
for (const info of infos) if (info.is_dir) lines.push(`${info.path} (directory)`);
|
|
963
|
+
else {
|
|
964
|
+
const size = info.size ? ` (${info.size} bytes)` : "";
|
|
965
|
+
lines.push(`${info.path}${size}`);
|
|
966
|
+
}
|
|
967
|
+
return lines.join("\n");
|
|
968
|
+
}
|
|
969
|
+
});
|
|
970
|
+
}
|
|
971
|
+
/**
|
|
972
|
+
* Create the read_file tool.
|
|
973
|
+
*/
|
|
974
|
+
function createReadFileTool(state, backend, evictionLimit, onEvent) {
|
|
975
|
+
return tool({
|
|
976
|
+
description: READ_FILE_TOOL_DESCRIPTION,
|
|
977
|
+
inputSchema: z.object({
|
|
978
|
+
file_path: z.string().describe("Path to the file to read (e.g., 'src/main.ts' or './main.ts')"),
|
|
979
|
+
offset: z.number().default(0).describe("Line offset to start reading from (0-indexed)"),
|
|
980
|
+
limit: z.number().default(2e3).describe("Maximum number of lines to read")
|
|
981
|
+
}),
|
|
982
|
+
execute: async ({ file_path, offset, limit }, { toolCallId }) => {
|
|
983
|
+
const resolvedBackend = getBackend$1(backend, state);
|
|
984
|
+
const content = await resolvedBackend.read(file_path, offset ?? 0, limit ?? 2e3);
|
|
985
|
+
if (onEvent) {
|
|
986
|
+
const lineCount = content.split("\n").length;
|
|
987
|
+
onEvent(createFileReadEvent(file_path, lineCount));
|
|
988
|
+
}
|
|
989
|
+
if (evictionLimit && evictionLimit > 0) return (await evictToolResult({
|
|
990
|
+
result: content,
|
|
991
|
+
toolCallId: toolCallId || `read_${Date.now()}`,
|
|
992
|
+
toolName: "read_file",
|
|
993
|
+
backend: resolvedBackend,
|
|
994
|
+
tokenLimit: evictionLimit
|
|
995
|
+
})).content;
|
|
996
|
+
return content;
|
|
997
|
+
}
|
|
998
|
+
});
|
|
999
|
+
}
|
|
1000
|
+
/**
|
|
1001
|
+
* Create the write_file tool.
|
|
1002
|
+
*/
|
|
1003
|
+
function createWriteFileTool(state, backend, onEvent) {
|
|
1004
|
+
return tool({
|
|
1005
|
+
description: WRITE_FILE_TOOL_DESCRIPTION,
|
|
1006
|
+
inputSchema: z.object({
|
|
1007
|
+
file_path: z.string().describe("Path to the file to write (e.g., 'src/main.ts' or './main.ts')"),
|
|
1008
|
+
content: z.string().describe("Content to write to the file")
|
|
1009
|
+
}),
|
|
1010
|
+
execute: async ({ file_path, content }) => {
|
|
1011
|
+
if (onEvent) onEvent(createFileWriteStartEvent(file_path, content));
|
|
1012
|
+
const result = await getBackend$1(backend, state).write(file_path, content);
|
|
1013
|
+
if (result.error) return result.error;
|
|
1014
|
+
if (onEvent) onEvent(createFileWrittenEvent(file_path, content));
|
|
1015
|
+
return `Successfully wrote to '${file_path}'`;
|
|
1016
|
+
}
|
|
1017
|
+
});
|
|
1018
|
+
}
|
|
1019
|
+
/**
|
|
1020
|
+
* Create the edit_file tool.
|
|
1021
|
+
*/
|
|
1022
|
+
function createEditFileTool(state, backend, onEvent) {
|
|
1023
|
+
return tool({
|
|
1024
|
+
description: EDIT_FILE_TOOL_DESCRIPTION,
|
|
1025
|
+
inputSchema: z.object({
|
|
1026
|
+
file_path: z.string().describe("Path to the file to edit (e.g., 'src/main.ts' or './main.ts')"),
|
|
1027
|
+
old_string: z.string().describe("String to be replaced (must match exactly)"),
|
|
1028
|
+
new_string: z.string().describe("String to replace with"),
|
|
1029
|
+
replace_all: z.boolean().default(false).describe("Whether to replace all occurrences")
|
|
1030
|
+
}),
|
|
1031
|
+
execute: async ({ file_path, old_string, new_string, replace_all }) => {
|
|
1032
|
+
const result = await getBackend$1(backend, state).edit(file_path, old_string, new_string, replace_all ?? false);
|
|
1033
|
+
if (result.error) return result.error;
|
|
1034
|
+
if (onEvent) onEvent(createFileEditedEvent(file_path, result.occurrences ?? 0));
|
|
1035
|
+
return `Successfully replaced ${result.occurrences} occurrence(s) in '${file_path}'`;
|
|
1036
|
+
}
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
/**
|
|
1040
|
+
* Create the glob tool.
|
|
1041
|
+
*/
|
|
1042
|
+
function createGlobTool(state, backend, onEvent) {
|
|
1043
|
+
return tool({
|
|
1044
|
+
description: GLOB_TOOL_DESCRIPTION,
|
|
1045
|
+
inputSchema: z.object({
|
|
1046
|
+
pattern: z.string().describe("Glob pattern (e.g., '*.py', '**/*.ts')"),
|
|
1047
|
+
path: z.string().default(".").describe("Base path to search from (default: current directory)")
|
|
1048
|
+
}),
|
|
1049
|
+
execute: async ({ pattern, path }) => {
|
|
1050
|
+
const infos = await getBackend$1(backend, state).globInfo(pattern, path || ".");
|
|
1051
|
+
if (onEvent) onEvent({
|
|
1052
|
+
type: "glob",
|
|
1053
|
+
pattern,
|
|
1054
|
+
count: infos.length
|
|
1055
|
+
});
|
|
1056
|
+
if (infos.length === 0) return `No files found matching pattern '${pattern}'`;
|
|
1057
|
+
return infos.map((info) => info.path).join("\n");
|
|
1058
|
+
}
|
|
1059
|
+
});
|
|
1060
|
+
}
|
|
1061
|
+
/**
|
|
1062
|
+
* Create the grep tool.
|
|
1063
|
+
*/
|
|
1064
|
+
function createGrepTool(state, backend, evictionLimit, onEvent) {
|
|
1065
|
+
return tool({
|
|
1066
|
+
description: GREP_TOOL_DESCRIPTION,
|
|
1067
|
+
inputSchema: z.object({
|
|
1068
|
+
pattern: z.string().describe("Regex pattern to search for"),
|
|
1069
|
+
path: z.string().default(".").describe("Base path to search from (default: current directory)"),
|
|
1070
|
+
glob: z.string().optional().nullable().describe("Optional glob pattern to filter files (e.g., '*.py')")
|
|
1071
|
+
}),
|
|
1072
|
+
execute: async ({ pattern, path, glob: glob$1 }, { toolCallId }) => {
|
|
1073
|
+
const resolvedBackend = getBackend$1(backend, state);
|
|
1074
|
+
const result = await resolvedBackend.grepRaw(pattern, path || ".", glob$1 ?? null);
|
|
1075
|
+
if (typeof result === "string") {
|
|
1076
|
+
if (onEvent) onEvent({
|
|
1077
|
+
type: "grep",
|
|
1078
|
+
pattern,
|
|
1079
|
+
count: 0
|
|
1080
|
+
});
|
|
1081
|
+
return result;
|
|
1082
|
+
}
|
|
1083
|
+
if (onEvent) onEvent({
|
|
1084
|
+
type: "grep",
|
|
1085
|
+
pattern,
|
|
1086
|
+
count: result.length
|
|
1087
|
+
});
|
|
1088
|
+
if (result.length === 0) return `No matches found for pattern '${pattern}'`;
|
|
1089
|
+
const lines = [];
|
|
1090
|
+
let currentFile = null;
|
|
1091
|
+
for (const match of result) {
|
|
1092
|
+
if (match.path !== currentFile) {
|
|
1093
|
+
currentFile = match.path;
|
|
1094
|
+
lines.push(`\n${currentFile}:`);
|
|
1095
|
+
}
|
|
1096
|
+
lines.push(` ${match.line}: ${match.text}`);
|
|
1097
|
+
}
|
|
1098
|
+
const content = lines.join("\n");
|
|
1099
|
+
if (evictionLimit && evictionLimit > 0) return (await evictToolResult({
|
|
1100
|
+
result: content,
|
|
1101
|
+
toolCallId: toolCallId || `grep_${Date.now()}`,
|
|
1102
|
+
toolName: "grep",
|
|
1103
|
+
backend: resolvedBackend,
|
|
1104
|
+
tokenLimit: evictionLimit
|
|
1105
|
+
})).content;
|
|
1106
|
+
return content;
|
|
1107
|
+
}
|
|
1108
|
+
});
|
|
1109
|
+
}
|
|
1110
|
+
/**
|
|
1111
|
+
* Create all filesystem tools.
|
|
1112
|
+
* @param state - The shared agent state
|
|
1113
|
+
* @param backendOrOptions - Backend or options object
|
|
1114
|
+
* @param onEvent - Optional callback for emitting events (deprecated, use options)
|
|
1115
|
+
*/
|
|
1116
|
+
function createFilesystemTools(state, backendOrOptions, onEvent) {
|
|
1117
|
+
let backend;
|
|
1118
|
+
let eventCallback = onEvent;
|
|
1119
|
+
let evictionLimit;
|
|
1120
|
+
if (backendOrOptions && typeof backendOrOptions === "object" && "backend" in backendOrOptions) {
|
|
1121
|
+
const options = backendOrOptions;
|
|
1122
|
+
backend = options.backend;
|
|
1123
|
+
eventCallback = options.onEvent;
|
|
1124
|
+
evictionLimit = options.toolResultEvictionLimit;
|
|
1125
|
+
} else backend = backendOrOptions;
|
|
1126
|
+
const resolvedBackend = backend || ((s) => new StateBackend(s));
|
|
1127
|
+
return {
|
|
1128
|
+
ls: createLsTool(state, resolvedBackend, eventCallback),
|
|
1129
|
+
read_file: createReadFileTool(state, resolvedBackend, evictionLimit, eventCallback),
|
|
1130
|
+
write_file: createWriteFileTool(state, resolvedBackend, eventCallback),
|
|
1131
|
+
edit_file: createEditFileTool(state, resolvedBackend, eventCallback),
|
|
1132
|
+
glob: createGlobTool(state, resolvedBackend, eventCallback),
|
|
1133
|
+
grep: createGrepTool(state, resolvedBackend, evictionLimit, eventCallback)
|
|
1134
|
+
};
|
|
1135
|
+
}
|
|
1136
|
+
/**
|
|
1137
|
+
* Individual builtin tool references for selective subagent configuration.
|
|
1138
|
+
* These are references to the creator functions, not instances.
|
|
1139
|
+
*/
|
|
1140
|
+
const ls = createLsTool;
|
|
1141
|
+
const read_file = createReadFileTool;
|
|
1142
|
+
const write_file = createWriteFileTool;
|
|
1143
|
+
const edit_file = createEditFileTool;
|
|
1144
|
+
const glob = createGlobTool;
|
|
1145
|
+
const grep = createGrepTool;
|
|
1146
|
+
|
|
1147
|
+
//#endregion
|
|
1148
|
+
//#region src/utils/approval.ts
|
|
1149
|
+
/**
|
|
1150
|
+
* Utilities for applying tool approval configuration.
|
|
1151
|
+
*/
|
|
1152
|
+
/**
|
|
1153
|
+
* Check if approval is needed based on config.
|
|
1154
|
+
*/
|
|
1155
|
+
async function checkNeedsApproval(config, args) {
|
|
1156
|
+
if (typeof config === "boolean") return config;
|
|
1157
|
+
if (config.shouldApprove) return config.shouldApprove(args);
|
|
1158
|
+
return true;
|
|
1159
|
+
}
|
|
1160
|
+
/**
|
|
1161
|
+
* Convert interruptOn config to needsApproval function for a tool.
|
|
1162
|
+
*/
|
|
1163
|
+
function configToNeedsApproval(config) {
|
|
1164
|
+
if (typeof config === "boolean") return config;
|
|
1165
|
+
if (config.shouldApprove) return config.shouldApprove;
|
|
1166
|
+
return true;
|
|
1167
|
+
}
|
|
1168
|
+
let approvalCounter = 0;
|
|
1169
|
+
function generateApprovalId() {
|
|
1170
|
+
return `approval-${Date.now()}-${++approvalCounter}`;
|
|
1171
|
+
}
|
|
1172
|
+
/**
|
|
1173
|
+
* Apply interruptOn configuration to a toolset.
|
|
1174
|
+
*
|
|
1175
|
+
* This adds the `needsApproval` property to tools based on the config.
|
|
1176
|
+
*
|
|
1177
|
+
* @param tools - The original toolset
|
|
1178
|
+
* @param interruptOn - Configuration mapping tool names to approval settings
|
|
1179
|
+
* @returns New toolset with needsApproval applied
|
|
1180
|
+
*
|
|
1181
|
+
* @example
|
|
1182
|
+
* ```typescript
|
|
1183
|
+
* const approvedTools = applyInterruptConfig(tools, {
|
|
1184
|
+
* write_file: true,
|
|
1185
|
+
* execute: { shouldApprove: (args) => args.command.includes('rm') },
|
|
1186
|
+
* });
|
|
1187
|
+
* ```
|
|
1188
|
+
*/
|
|
1189
|
+
function applyInterruptConfig(tools, interruptOn) {
|
|
1190
|
+
if (!interruptOn) return tools;
|
|
1191
|
+
const result = {};
|
|
1192
|
+
for (const [name, tool$1] of Object.entries(tools)) {
|
|
1193
|
+
const config = interruptOn[name];
|
|
1194
|
+
if (config === void 0 || config === false) result[name] = tool$1;
|
|
1195
|
+
else result[name] = {
|
|
1196
|
+
...tool$1,
|
|
1197
|
+
needsApproval: configToNeedsApproval(config)
|
|
1198
|
+
};
|
|
1199
|
+
}
|
|
1200
|
+
return result;
|
|
1201
|
+
}
|
|
1202
|
+
/**
|
|
1203
|
+
* Wrap tools with approval checking that intercepts execution.
|
|
1204
|
+
*
|
|
1205
|
+
* Unlike applyInterruptConfig which just sets needsApproval metadata,
|
|
1206
|
+
* this actually wraps the execute function to request approval before running.
|
|
1207
|
+
*
|
|
1208
|
+
* If no approval callback is provided, tools requiring approval will be auto-denied.
|
|
1209
|
+
*
|
|
1210
|
+
* @param tools - The original toolset
|
|
1211
|
+
* @param interruptOn - Configuration mapping tool names to approval settings
|
|
1212
|
+
* @param onApprovalRequest - Callback to request approval from user (optional)
|
|
1213
|
+
* @returns New toolset with wrapped execute functions
|
|
1214
|
+
*/
|
|
1215
|
+
function wrapToolsWithApproval(tools, interruptOn, onApprovalRequest) {
|
|
1216
|
+
if (!interruptOn) return tools;
|
|
1217
|
+
const result = {};
|
|
1218
|
+
for (const [name, existingTool] of Object.entries(tools)) {
|
|
1219
|
+
const config = interruptOn[name];
|
|
1220
|
+
if (config === void 0 || config === false) result[name] = existingTool;
|
|
1221
|
+
else {
|
|
1222
|
+
const originalExecute = existingTool.execute;
|
|
1223
|
+
if (!originalExecute) {
|
|
1224
|
+
result[name] = existingTool;
|
|
1225
|
+
continue;
|
|
1226
|
+
}
|
|
1227
|
+
result[name] = tool({
|
|
1228
|
+
description: existingTool.description,
|
|
1229
|
+
inputSchema: existingTool.inputSchema,
|
|
1230
|
+
execute: async (args, options) => {
|
|
1231
|
+
if (await checkNeedsApproval(config, args)) {
|
|
1232
|
+
if (!onApprovalRequest) return `Tool execution denied. No approval callback provided. The ${name} tool was not executed.`;
|
|
1233
|
+
const approvalId = generateApprovalId();
|
|
1234
|
+
if (!await onApprovalRequest({
|
|
1235
|
+
approvalId,
|
|
1236
|
+
toolCallId: options?.toolCallId || approvalId,
|
|
1237
|
+
toolName: name,
|
|
1238
|
+
args
|
|
1239
|
+
})) return `Tool execution denied by user. The ${name} tool was not executed.`;
|
|
1240
|
+
}
|
|
1241
|
+
return originalExecute(args, options);
|
|
1242
|
+
}
|
|
1243
|
+
});
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
return result;
|
|
1247
|
+
}
|
|
1248
|
+
|
|
1249
|
+
//#endregion
|
|
1250
|
+
//#region src/tools/web.ts
|
|
1251
|
+
/**
|
|
1252
|
+
* Web tools for search and HTTP requests.
|
|
1253
|
+
* Based on LangChain DeepAgents implementation.
|
|
1254
|
+
*/
|
|
1255
|
+
var web_exports = /* @__PURE__ */ __exportAll({
|
|
1256
|
+
createFetchUrlTool: () => createFetchUrlTool,
|
|
1257
|
+
createHttpRequestTool: () => createHttpRequestTool,
|
|
1258
|
+
createWebSearchTool: () => createWebSearchTool,
|
|
1259
|
+
createWebTools: () => createWebTools,
|
|
1260
|
+
fetch_url: () => fetch_url,
|
|
1261
|
+
htmlToMarkdown: () => htmlToMarkdown,
|
|
1262
|
+
http_request: () => http_request,
|
|
1263
|
+
web_search: () => web_search
|
|
1264
|
+
});
|
|
1265
|
+
/**
|
|
1266
|
+
* Helper to resolve backend from factory or instance.
|
|
1267
|
+
*/
|
|
1268
|
+
function getBackend(backend, state) {
|
|
1269
|
+
if (!backend) return null;
|
|
1270
|
+
if (typeof backend === "function") return backend(state);
|
|
1271
|
+
return backend;
|
|
1272
|
+
}
|
|
1273
|
+
/**
|
|
1274
|
+
* Convert HTML to Markdown with article extraction.
|
|
1275
|
+
* Uses Mozilla Readability to extract main content, then converts to Markdown.
|
|
1276
|
+
*/
|
|
1277
|
+
function htmlToMarkdown(html, url) {
|
|
1278
|
+
try {
|
|
1279
|
+
const dom = new JSDOM(html, { url });
|
|
1280
|
+
const article = new Readability(dom.window.document).parse();
|
|
1281
|
+
if (!article) return (dom.window.document.body?.textContent || "").trim();
|
|
1282
|
+
const markdown = new TurndownService({
|
|
1283
|
+
headingStyle: "atx",
|
|
1284
|
+
codeBlockStyle: "fenced"
|
|
1285
|
+
}).turndown(article.content || "");
|
|
1286
|
+
if (article.title) return `# ${article.title}\n\n${markdown}`;
|
|
1287
|
+
return markdown;
|
|
1288
|
+
} catch (error) {
|
|
1289
|
+
return `Error converting HTML to Markdown: ${error instanceof Error ? error.message : String(error)}`;
|
|
1290
|
+
}
|
|
1291
|
+
}
|
|
1292
|
+
/**
|
|
1293
|
+
* Create the web_search tool.
|
|
1294
|
+
*/
|
|
1295
|
+
function createWebSearchTool(state, options) {
|
|
1296
|
+
const { backend, onEvent, toolResultEvictionLimit, tavilyApiKey } = options;
|
|
1297
|
+
return tool({
|
|
1298
|
+
description: WEB_SEARCH_TOOL_DESCRIPTION,
|
|
1299
|
+
inputSchema: z.object({
|
|
1300
|
+
query: z.string().describe("The search query (be specific and detailed for best results)"),
|
|
1301
|
+
max_results: z.number().default(5).describe("Number of results to return (1-20)"),
|
|
1302
|
+
topic: z.enum([
|
|
1303
|
+
"general",
|
|
1304
|
+
"news",
|
|
1305
|
+
"finance"
|
|
1306
|
+
]).default("general").describe("Search topic category"),
|
|
1307
|
+
include_raw_content: z.boolean().default(false).describe("Include full page content (warning: uses more tokens)")
|
|
1308
|
+
}),
|
|
1309
|
+
execute: async ({ query, max_results, topic, include_raw_content }, { toolCallId }) => {
|
|
1310
|
+
if (onEvent) onEvent(createWebSearchStartEvent(query));
|
|
1311
|
+
try {
|
|
1312
|
+
const results = (await tavily({ apiKey: tavilyApiKey }).search(query, {
|
|
1313
|
+
maxResults: max_results,
|
|
1314
|
+
topic,
|
|
1315
|
+
includeRawContent: include_raw_content ? "text" : false
|
|
1316
|
+
})).results || [];
|
|
1317
|
+
const formattedResults = results.map((r, i) => `## Result ${i + 1}: ${r.title}\nURL: ${r.url}\nScore: ${r.score?.toFixed(2) || "N/A"}\nContent: ${r.content}\n`).join("\n---\n\n");
|
|
1318
|
+
const output = `Found ${results.length} results for query: "${query}"\n\n${formattedResults}`;
|
|
1319
|
+
if (onEvent) onEvent(createWebSearchFinishEvent(query, results.length));
|
|
1320
|
+
if (toolResultEvictionLimit && toolResultEvictionLimit > 0 && backend) {
|
|
1321
|
+
const resolvedBackend = getBackend(backend, state);
|
|
1322
|
+
if (resolvedBackend) return (await evictToolResult({
|
|
1323
|
+
result: output,
|
|
1324
|
+
toolCallId: toolCallId || `web_search_${Date.now()}`,
|
|
1325
|
+
toolName: "web_search",
|
|
1326
|
+
backend: resolvedBackend,
|
|
1327
|
+
tokenLimit: toolResultEvictionLimit
|
|
1328
|
+
})).content;
|
|
1329
|
+
}
|
|
1330
|
+
return output;
|
|
1331
|
+
} catch (error) {
|
|
1332
|
+
const errorMessage = WEB_SEARCH_ERROR(error.message);
|
|
1333
|
+
if (onEvent) onEvent(createWebSearchFinishEvent(query, 0));
|
|
1334
|
+
return errorMessage;
|
|
1335
|
+
}
|
|
1336
|
+
}
|
|
1337
|
+
});
|
|
1338
|
+
}
|
|
1339
|
+
/**
|
|
1340
|
+
* Create the http_request tool.
|
|
1341
|
+
*/
|
|
1342
|
+
function createHttpRequestTool(state, options) {
|
|
1343
|
+
const { backend, onEvent, toolResultEvictionLimit, defaultTimeout } = options;
|
|
1344
|
+
return tool({
|
|
1345
|
+
description: HTTP_REQUEST_TOOL_DESCRIPTION,
|
|
1346
|
+
inputSchema: z.object({
|
|
1347
|
+
url: z.string().url().describe("Target URL (must be valid HTTP/HTTPS URL)"),
|
|
1348
|
+
method: z.enum([
|
|
1349
|
+
"GET",
|
|
1350
|
+
"POST",
|
|
1351
|
+
"PUT",
|
|
1352
|
+
"DELETE",
|
|
1353
|
+
"PATCH"
|
|
1354
|
+
]).default("GET").describe("HTTP method"),
|
|
1355
|
+
headers: z.record(z.string()).optional().describe("HTTP headers as key-value pairs"),
|
|
1356
|
+
body: z.union([z.string(), z.record(z.any())]).optional().describe("Request body (string or JSON object)"),
|
|
1357
|
+
params: z.record(z.string()).optional().describe("URL query parameters as key-value pairs"),
|
|
1358
|
+
timeout: z.number().default(defaultTimeout).describe("Request timeout in seconds")
|
|
1359
|
+
}),
|
|
1360
|
+
execute: async ({ url, method, headers, body, params, timeout }, { toolCallId }) => {
|
|
1361
|
+
if (onEvent) onEvent(createHttpRequestStartEvent(url, method));
|
|
1362
|
+
try {
|
|
1363
|
+
const urlObj = new URL(url);
|
|
1364
|
+
if (params) Object.entries(params).forEach(([key, value]) => {
|
|
1365
|
+
urlObj.searchParams.append(key, value);
|
|
1366
|
+
});
|
|
1367
|
+
const requestOptions = {
|
|
1368
|
+
method,
|
|
1369
|
+
headers: headers || {},
|
|
1370
|
+
signal: AbortSignal.timeout(timeout * 1e3)
|
|
1371
|
+
};
|
|
1372
|
+
if (body) if (typeof body === "string") requestOptions.body = body;
|
|
1373
|
+
else {
|
|
1374
|
+
requestOptions.body = JSON.stringify(body);
|
|
1375
|
+
requestOptions.headers["Content-Type"] = "application/json";
|
|
1376
|
+
}
|
|
1377
|
+
const response = await fetch(urlObj.toString(), requestOptions);
|
|
1378
|
+
const contentType = response.headers.get("content-type") || "";
|
|
1379
|
+
let content;
|
|
1380
|
+
if (contentType.includes("application/json")) try {
|
|
1381
|
+
content = await response.json();
|
|
1382
|
+
} catch {
|
|
1383
|
+
content = await response.text();
|
|
1384
|
+
}
|
|
1385
|
+
else content = await response.text();
|
|
1386
|
+
const formattedOutput = `HTTP ${method} ${url}\nStatus: ${response.status}\nSuccess: ${response.ok}\nContent:\n${typeof content === "string" ? content : JSON.stringify(content, null, 2)}`;
|
|
1387
|
+
if (onEvent) onEvent(createHttpRequestFinishEvent(response.url, response.status));
|
|
1388
|
+
if (toolResultEvictionLimit && toolResultEvictionLimit > 0 && backend) {
|
|
1389
|
+
const resolvedBackend = getBackend(backend, state);
|
|
1390
|
+
if (resolvedBackend) return (await evictToolResult({
|
|
1391
|
+
result: formattedOutput,
|
|
1392
|
+
toolCallId: toolCallId || `http_request_${Date.now()}`,
|
|
1393
|
+
toolName: "http_request",
|
|
1394
|
+
backend: resolvedBackend,
|
|
1395
|
+
tokenLimit: toolResultEvictionLimit
|
|
1396
|
+
})).content;
|
|
1397
|
+
}
|
|
1398
|
+
return formattedOutput;
|
|
1399
|
+
} catch (error) {
|
|
1400
|
+
const err = error;
|
|
1401
|
+
let errorMessage;
|
|
1402
|
+
if (err.name === "TimeoutError" || err.name === "AbortError") errorMessage = REQUEST_TIMEOUT(timeout);
|
|
1403
|
+
else errorMessage = `HTTP request error: ${err.message}`;
|
|
1404
|
+
if (onEvent) onEvent(createHttpRequestFinishEvent(url, 0));
|
|
1405
|
+
return errorMessage;
|
|
1406
|
+
}
|
|
1407
|
+
}
|
|
1408
|
+
});
|
|
1409
|
+
}
|
|
1410
|
+
/**
|
|
1411
|
+
* Create the fetch_url tool.
|
|
1412
|
+
*/
|
|
1413
|
+
function createFetchUrlTool(state, options) {
|
|
1414
|
+
const { backend, onEvent, toolResultEvictionLimit, defaultTimeout } = options;
|
|
1415
|
+
return tool({
|
|
1416
|
+
description: FETCH_URL_TOOL_DESCRIPTION,
|
|
1417
|
+
inputSchema: z.object({
|
|
1418
|
+
url: z.string().url().describe("The URL to fetch (must be valid HTTP/HTTPS URL)"),
|
|
1419
|
+
timeout: z.number().default(defaultTimeout).describe("Request timeout in seconds"),
|
|
1420
|
+
extract_article: z.boolean().default(true).describe("Extract main article content using Readability (disable for non-article pages)")
|
|
1421
|
+
}),
|
|
1422
|
+
execute: async ({ url, timeout, extract_article }, { toolCallId }) => {
|
|
1423
|
+
if (onEvent) onEvent(createFetchUrlStartEvent(url));
|
|
1424
|
+
try {
|
|
1425
|
+
const response = await fetch(url, {
|
|
1426
|
+
signal: AbortSignal.timeout(timeout * 1e3),
|
|
1427
|
+
headers: { "User-Agent": "Mozilla/5.0 (compatible; DeepAgents/1.0)" }
|
|
1428
|
+
});
|
|
1429
|
+
if (!response.ok) {
|
|
1430
|
+
const errorMsg = `HTTP error: ${response.status} ${response.statusText}`;
|
|
1431
|
+
if (onEvent) onEvent(createFetchUrlFinishEvent(response.url, false));
|
|
1432
|
+
return errorMsg;
|
|
1433
|
+
}
|
|
1434
|
+
const html = await response.text();
|
|
1435
|
+
const dom = new JSDOM(html, { url });
|
|
1436
|
+
let contentToConvert = html;
|
|
1437
|
+
if (extract_article) try {
|
|
1438
|
+
const article = new Readability(dom.window.document).parse();
|
|
1439
|
+
if (article && article.content) contentToConvert = article.content;
|
|
1440
|
+
} catch (readabilityError) {
|
|
1441
|
+
console.warn("Readability extraction failed, using full HTML");
|
|
1442
|
+
}
|
|
1443
|
+
const markdown = new TurndownService({
|
|
1444
|
+
headingStyle: "atx",
|
|
1445
|
+
codeBlockStyle: "fenced"
|
|
1446
|
+
}).turndown(contentToConvert);
|
|
1447
|
+
if (onEvent) onEvent(createFetchUrlFinishEvent(response.url, true));
|
|
1448
|
+
if (toolResultEvictionLimit && toolResultEvictionLimit > 0 && backend) {
|
|
1449
|
+
const resolvedBackend = getBackend(backend, state);
|
|
1450
|
+
if (resolvedBackend) return (await evictToolResult({
|
|
1451
|
+
result: markdown,
|
|
1452
|
+
toolCallId: toolCallId || `fetch_url_${Date.now()}`,
|
|
1453
|
+
toolName: "fetch_url",
|
|
1454
|
+
backend: resolvedBackend,
|
|
1455
|
+
tokenLimit: toolResultEvictionLimit
|
|
1456
|
+
})).content;
|
|
1457
|
+
}
|
|
1458
|
+
return markdown;
|
|
1459
|
+
} catch (error) {
|
|
1460
|
+
const err = error;
|
|
1461
|
+
let errorMessage;
|
|
1462
|
+
if (err.name === "TimeoutError" || err.name === "AbortError") errorMessage = REQUEST_TIMEOUT(timeout);
|
|
1463
|
+
else errorMessage = `Error fetching URL: ${err.message}`;
|
|
1464
|
+
if (onEvent) onEvent(createFetchUrlFinishEvent(url, false));
|
|
1465
|
+
return errorMessage;
|
|
1466
|
+
}
|
|
1467
|
+
}
|
|
1468
|
+
});
|
|
1469
|
+
}
|
|
1470
|
+
/**
|
|
1471
|
+
* Create all web tools (web_search, http_request, fetch_url).
|
|
1472
|
+
* Tools are only created if TAVILY_API_KEY is available.
|
|
1473
|
+
*/
|
|
1474
|
+
function createWebTools(state, options) {
|
|
1475
|
+
const { backend, onEvent, toolResultEvictionLimit, tavilyApiKey = process.env.TAVILY_API_KEY, defaultTimeout = DEFAULT_TIMEOUT_SECONDS } = options || {};
|
|
1476
|
+
if (!tavilyApiKey) {
|
|
1477
|
+
console.warn("Tavily API key not found. Web tools (web_search, fetch_url, http_request) will not be available. Set TAVILY_API_KEY environment variable to enable web tools.");
|
|
1478
|
+
return {};
|
|
1479
|
+
}
|
|
1480
|
+
return {
|
|
1481
|
+
web_search: createWebSearchTool(state, {
|
|
1482
|
+
backend,
|
|
1483
|
+
onEvent,
|
|
1484
|
+
toolResultEvictionLimit,
|
|
1485
|
+
tavilyApiKey
|
|
1486
|
+
}),
|
|
1487
|
+
http_request: createHttpRequestTool(state, {
|
|
1488
|
+
backend,
|
|
1489
|
+
onEvent,
|
|
1490
|
+
toolResultEvictionLimit,
|
|
1491
|
+
defaultTimeout
|
|
1492
|
+
}),
|
|
1493
|
+
fetch_url: createFetchUrlTool(state, {
|
|
1494
|
+
backend,
|
|
1495
|
+
onEvent,
|
|
1496
|
+
toolResultEvictionLimit,
|
|
1497
|
+
defaultTimeout
|
|
1498
|
+
})
|
|
1499
|
+
};
|
|
1500
|
+
}
|
|
1501
|
+
var WEB_SEARCH_TOOL_DESCRIPTION, HTTP_REQUEST_TOOL_DESCRIPTION, FETCH_URL_TOOL_DESCRIPTION, web_search, http_request, fetch_url;
|
|
1502
|
+
var init_web = __esmMin((() => {
|
|
1503
|
+
init_eviction();
|
|
1504
|
+
init_errors();
|
|
1505
|
+
init_limits();
|
|
1506
|
+
init_events();
|
|
1507
|
+
WEB_SEARCH_TOOL_DESCRIPTION = `Search the web using Tavily API for current information, news, and documentation.
|
|
1508
|
+
|
|
1509
|
+
Returns an array of search results with titles, URLs, relevant excerpts, and relevance scores.
|
|
1510
|
+
|
|
1511
|
+
IMPORTANT AGENT INSTRUCTIONS:
|
|
1512
|
+
- You MUST synthesize information from search results into a coherent answer
|
|
1513
|
+
- NEVER show raw JSON or result objects to the user
|
|
1514
|
+
- Cite sources by including URLs in your response
|
|
1515
|
+
- If search fails or returns no results, explain this clearly to the user`;
|
|
1516
|
+
HTTP_REQUEST_TOOL_DESCRIPTION = `Make HTTP requests to APIs and web services.
|
|
1517
|
+
|
|
1518
|
+
Supports GET, POST, PUT, DELETE, PATCH methods with custom headers, query parameters, and request bodies.
|
|
1519
|
+
|
|
1520
|
+
Returns structured response with status code, headers, and parsed content (JSON or text).`;
|
|
1521
|
+
FETCH_URL_TOOL_DESCRIPTION = `Fetch web page content and convert HTML to clean Markdown format.
|
|
1522
|
+
|
|
1523
|
+
Uses Mozilla Readability to extract main article content and Turndown to convert to Markdown.
|
|
1524
|
+
|
|
1525
|
+
Returns the page content as formatted Markdown, suitable for analysis and summarization.
|
|
1526
|
+
|
|
1527
|
+
IMPORTANT AGENT INSTRUCTIONS:
|
|
1528
|
+
- Use this tool to read documentation, articles, and web pages
|
|
1529
|
+
- The content is already cleaned and formatted as Markdown
|
|
1530
|
+
- Cite the URL when referencing fetched content`;
|
|
1531
|
+
web_search = createWebSearchTool;
|
|
1532
|
+
http_request = createHttpRequestTool;
|
|
1533
|
+
fetch_url = createFetchUrlTool;
|
|
1534
|
+
}));
|
|
1535
|
+
|
|
1536
|
+
//#endregion
|
|
1537
|
+
//#region src/tools/execute.ts
|
|
1538
|
+
/**
|
|
1539
|
+
* Execute tool for running shell commands in sandbox backends.
|
|
1540
|
+
*
|
|
1541
|
+
* This tool is only available when the backend implements SandboxBackendProtocol.
|
|
1542
|
+
*/
|
|
1543
|
+
/**
|
|
1544
|
+
* Tool description for the execute tool.
|
|
1545
|
+
*/
|
|
1546
|
+
const EXECUTE_TOOL_DESCRIPTION = `Execute a shell command in the sandbox environment.
|
|
1547
|
+
|
|
1548
|
+
Use this tool to:
|
|
1549
|
+
- Run build commands (npm install, npm run build, bun install)
|
|
1550
|
+
- Run tests (npm test, bun test, pytest)
|
|
1551
|
+
- Execute scripts (node script.js, python script.py)
|
|
1552
|
+
- Check system state (ls, cat, pwd, which)
|
|
1553
|
+
- Install dependencies
|
|
1554
|
+
- Run any shell command
|
|
1555
|
+
|
|
1556
|
+
The command runs in the sandbox's working directory. Commands have a timeout limit.
|
|
1557
|
+
|
|
1558
|
+
IMPORTANT:
|
|
1559
|
+
- Always check the exit code to determine success (0 = success)
|
|
1560
|
+
- Long-running commands may timeout
|
|
1561
|
+
- Use && to chain commands that depend on each other
|
|
1562
|
+
- Use ; to run commands sequentially regardless of success`;
|
|
1563
|
+
/**
|
|
1564
|
+
* Create an execute tool for running shell commands.
|
|
1565
|
+
*
|
|
1566
|
+
* @param options - Options including the sandbox backend and optional event callback
|
|
1567
|
+
* @returns An AI SDK tool that executes shell commands
|
|
1568
|
+
*
|
|
1569
|
+
* @example Basic usage
|
|
1570
|
+
* ```typescript
|
|
1571
|
+
* import { LocalSandbox, createExecuteTool } from 'deepagentsdk';
|
|
1572
|
+
*
|
|
1573
|
+
* const sandbox = new LocalSandbox({ cwd: './workspace' });
|
|
1574
|
+
* const executeTool = createExecuteTool({ backend: sandbox });
|
|
1575
|
+
*
|
|
1576
|
+
* // Use with agent
|
|
1577
|
+
* const agent = createDeepAgent({
|
|
1578
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
1579
|
+
* backend: sandbox,
|
|
1580
|
+
* tools: { execute: executeTool },
|
|
1581
|
+
* });
|
|
1582
|
+
* ```
|
|
1583
|
+
*
|
|
1584
|
+
* @example With event streaming
|
|
1585
|
+
* ```typescript
|
|
1586
|
+
* const executeTool = createExecuteTool({
|
|
1587
|
+
* backend: sandbox,
|
|
1588
|
+
* onEvent: (event) => {
|
|
1589
|
+
* if (event.type === 'execute-start') {
|
|
1590
|
+
* console.log(`Running: ${event.command}`);
|
|
1591
|
+
* } else if (event.type === 'execute-finish') {
|
|
1592
|
+
* console.log(`Exit code: ${event.exitCode}`);
|
|
1593
|
+
* }
|
|
1594
|
+
* },
|
|
1595
|
+
* });
|
|
1596
|
+
* ```
|
|
1597
|
+
*/
|
|
1598
|
+
function createExecuteTool(options) {
|
|
1599
|
+
const { backend, onEvent, description } = options;
|
|
1600
|
+
return tool({
|
|
1601
|
+
description: description || EXECUTE_TOOL_DESCRIPTION,
|
|
1602
|
+
inputSchema: z.object({ command: z.string().describe("The shell command to execute (e.g., 'npm install', 'ls -la', 'cat file.txt')") }),
|
|
1603
|
+
execute: async ({ command }) => {
|
|
1604
|
+
if (onEvent) onEvent({
|
|
1605
|
+
type: "execute-start",
|
|
1606
|
+
command,
|
|
1607
|
+
sandboxId: backend.id
|
|
1608
|
+
});
|
|
1609
|
+
const result = await backend.execute(command);
|
|
1610
|
+
if (onEvent) onEvent({
|
|
1611
|
+
type: "execute-finish",
|
|
1612
|
+
command,
|
|
1613
|
+
exitCode: result.exitCode,
|
|
1614
|
+
truncated: result.truncated,
|
|
1615
|
+
sandboxId: backend.id
|
|
1616
|
+
});
|
|
1617
|
+
const parts = [];
|
|
1618
|
+
if (result.output) parts.push(result.output);
|
|
1619
|
+
if (result.exitCode === 0) parts.push(`\n[Exit code: 0 (success)]`);
|
|
1620
|
+
else if (result.exitCode !== null) parts.push(`\n[Exit code: ${result.exitCode} (failure)]`);
|
|
1621
|
+
else parts.push(`\n[Exit code: unknown (possibly timed out)]`);
|
|
1622
|
+
if (result.truncated) parts.push(`[Output truncated due to size limit]`);
|
|
1623
|
+
return parts.join("");
|
|
1624
|
+
}
|
|
1625
|
+
});
|
|
1626
|
+
}
|
|
1627
|
+
/**
|
|
1628
|
+
* Convenience function to create execute tool from just a backend.
|
|
1629
|
+
* Useful for simple cases without event handling.
|
|
1630
|
+
*
|
|
1631
|
+
* @param backend - The sandbox backend
|
|
1632
|
+
* @returns An AI SDK tool that executes shell commands
|
|
1633
|
+
*
|
|
1634
|
+
* @example
|
|
1635
|
+
* ```typescript
|
|
1636
|
+
* const sandbox = new LocalSandbox({ cwd: './workspace' });
|
|
1637
|
+
* const tools = {
|
|
1638
|
+
* execute: createExecuteToolFromBackend(sandbox),
|
|
1639
|
+
* };
|
|
1640
|
+
* ```
|
|
1641
|
+
*/
|
|
1642
|
+
function createExecuteToolFromBackend(backend) {
|
|
1643
|
+
return createExecuteTool({ backend });
|
|
1644
|
+
}
|
|
1645
|
+
/**
|
|
1646
|
+
* Individual builtin tool reference for selective subagent configuration.
|
|
1647
|
+
* This is a reference to the creator function, not an instance.
|
|
1648
|
+
*/
|
|
1649
|
+
const execute = createExecuteTool;
|
|
1650
|
+
|
|
1651
|
+
//#endregion
|
|
1652
|
+
//#region src/tools/subagent.ts
|
|
1653
|
+
/**
|
|
1654
|
+
* Subagent tool for task delegation using AI SDK v6 ToolLoopAgent.
|
|
1655
|
+
*/
|
|
1656
|
+
init_limits();
|
|
1657
|
+
init_events();
|
|
1658
|
+
init_web();
|
|
1659
|
+
/**
|
|
1660
|
+
* Check if a value is a builtin tool creator function.
|
|
1661
|
+
*/
|
|
1662
|
+
function isBuiltinToolCreator(value) {
|
|
1663
|
+
return typeof value === "function" && (value === createWebSearchTool || value === createHttpRequestTool || value === createFetchUrlTool || value === createLsTool || value === createReadFileTool || value === createWriteFileTool || value === createEditFileTool || value === createGlobTool || value === createGrepTool || value === createTodosTool || value === createExecuteTool);
|
|
1664
|
+
}
|
|
1665
|
+
/**
|
|
1666
|
+
* Instantiate a builtin tool creator with the given context.
|
|
1667
|
+
*/
|
|
1668
|
+
function instantiateBuiltinTool(creator, state, options) {
|
|
1669
|
+
const { backend, onEvent, toolResultEvictionLimit } = options;
|
|
1670
|
+
const tavilyApiKey = process.env.TAVILY_API_KEY || "";
|
|
1671
|
+
const defaultTimeout = DEFAULT_TIMEOUT_SECONDS;
|
|
1672
|
+
if (creator === createWebSearchTool) {
|
|
1673
|
+
if (!tavilyApiKey) {
|
|
1674
|
+
console.warn("web_search tool requested but TAVILY_API_KEY not set");
|
|
1675
|
+
return {};
|
|
1676
|
+
}
|
|
1677
|
+
return { web_search: createWebSearchTool(state, {
|
|
1678
|
+
backend,
|
|
1679
|
+
onEvent,
|
|
1680
|
+
toolResultEvictionLimit,
|
|
1681
|
+
tavilyApiKey
|
|
1682
|
+
}) };
|
|
1683
|
+
}
|
|
1684
|
+
if (creator === createHttpRequestTool) return { http_request: createHttpRequestTool(state, {
|
|
1685
|
+
backend,
|
|
1686
|
+
onEvent,
|
|
1687
|
+
toolResultEvictionLimit,
|
|
1688
|
+
defaultTimeout
|
|
1689
|
+
}) };
|
|
1690
|
+
if (creator === createFetchUrlTool) return { fetch_url: createFetchUrlTool(state, {
|
|
1691
|
+
backend,
|
|
1692
|
+
onEvent,
|
|
1693
|
+
toolResultEvictionLimit,
|
|
1694
|
+
defaultTimeout
|
|
1695
|
+
}) };
|
|
1696
|
+
if (creator === createLsTool) return { ls: createLsTool(state, backend, onEvent) };
|
|
1697
|
+
if (creator === createReadFileTool) return { read_file: createReadFileTool(state, backend, toolResultEvictionLimit, onEvent) };
|
|
1698
|
+
if (creator === createWriteFileTool) return { write_file: createWriteFileTool(state, backend, onEvent) };
|
|
1699
|
+
if (creator === createEditFileTool) return { edit_file: createEditFileTool(state, backend, onEvent) };
|
|
1700
|
+
if (creator === createGlobTool) return { glob: createGlobTool(state, backend, onEvent) };
|
|
1701
|
+
if (creator === createGrepTool) return { grep: createGrepTool(state, backend, toolResultEvictionLimit, onEvent) };
|
|
1702
|
+
if (creator === createTodosTool) return { write_todos: createTodosTool(state, onEvent) };
|
|
1703
|
+
if (creator === createExecuteTool) throw new Error("execute tool cannot be used via selective tools - it requires a SandboxBackendProtocol");
|
|
1704
|
+
throw new Error(`Unknown builtin tool creator: ${creator}`);
|
|
1705
|
+
}
|
|
1706
|
+
/**
|
|
1707
|
+
* Process subagent tool configuration (array or ToolSet) into a ToolSet.
|
|
1708
|
+
*/
|
|
1709
|
+
function processSubagentTools(toolConfig, state, options) {
|
|
1710
|
+
if (!toolConfig) return {};
|
|
1711
|
+
if (!Array.isArray(toolConfig)) return toolConfig;
|
|
1712
|
+
let result = {};
|
|
1713
|
+
for (const item of toolConfig) if (isBuiltinToolCreator(item)) {
|
|
1714
|
+
const instantiated = instantiateBuiltinTool(item, state, options);
|
|
1715
|
+
result = {
|
|
1716
|
+
...result,
|
|
1717
|
+
...instantiated
|
|
1718
|
+
};
|
|
1719
|
+
} else if (typeof item === "object" && item !== null) result = {
|
|
1720
|
+
...result,
|
|
1721
|
+
...item
|
|
1722
|
+
};
|
|
1723
|
+
return result;
|
|
1724
|
+
}
|
|
1725
|
+
/**
|
|
1726
|
+
* Build the system prompt for a subagent.
|
|
1727
|
+
*/
|
|
1728
|
+
function buildSubagentSystemPrompt(customPrompt) {
|
|
1729
|
+
return `${customPrompt}
|
|
1730
|
+
|
|
1731
|
+
${BASE_PROMPT}
|
|
1732
|
+
|
|
1733
|
+
${TODO_SYSTEM_PROMPT}
|
|
1734
|
+
|
|
1735
|
+
${FILESYSTEM_SYSTEM_PROMPT}`;
|
|
1736
|
+
}
|
|
1737
|
+
/**
|
|
1738
|
+
* Create the task tool for spawning subagents using ToolLoopAgent.
|
|
1739
|
+
*/
|
|
1740
|
+
function createSubagentTool(state, options) {
|
|
1741
|
+
const { defaultModel, defaultTools = {}, subagents = [], includeGeneralPurposeAgent = true, backend, taskDescription = null, onEvent, interruptOn, parentGenerationOptions, parentAdvancedOptions } = options;
|
|
1742
|
+
const subagentRegistry = {};
|
|
1743
|
+
const subagentDescriptions = [];
|
|
1744
|
+
if (includeGeneralPurposeAgent) {
|
|
1745
|
+
subagentRegistry["general-purpose"] = {
|
|
1746
|
+
systemPrompt: buildSubagentSystemPrompt(DEFAULT_SUBAGENT_PROMPT),
|
|
1747
|
+
toolConfig: defaultTools,
|
|
1748
|
+
model: defaultModel
|
|
1749
|
+
};
|
|
1750
|
+
subagentDescriptions.push(`- general-purpose: ${DEFAULT_GENERAL_PURPOSE_DESCRIPTION}`);
|
|
1751
|
+
}
|
|
1752
|
+
for (const subagent of subagents) {
|
|
1753
|
+
subagentRegistry[subagent.name] = {
|
|
1754
|
+
systemPrompt: buildSubagentSystemPrompt(subagent.systemPrompt),
|
|
1755
|
+
toolConfig: subagent.tools || defaultTools,
|
|
1756
|
+
model: subagent.model || defaultModel,
|
|
1757
|
+
output: subagent.output
|
|
1758
|
+
};
|
|
1759
|
+
subagentDescriptions.push(`- ${subagent.name}: ${subagent.description}`);
|
|
1760
|
+
}
|
|
1761
|
+
return tool({
|
|
1762
|
+
description: taskDescription || getTaskToolDescription(subagentDescriptions),
|
|
1763
|
+
inputSchema: z.object({
|
|
1764
|
+
description: z.string().describe("The task to execute with the selected agent"),
|
|
1765
|
+
subagent_type: z.string().describe(`Name of the agent to use. Available: ${Object.keys(subagentRegistry).join(", ")}`)
|
|
1766
|
+
}),
|
|
1767
|
+
execute: async ({ description, subagent_type }) => {
|
|
1768
|
+
if (!(subagent_type in subagentRegistry)) return `Error: invoked agent of type ${subagent_type}, the only allowed types are ${Object.keys(subagentRegistry).map((k) => `\`${k}\``).join(", ")}`;
|
|
1769
|
+
const subagentConfig = subagentRegistry[subagent_type];
|
|
1770
|
+
const subagentSpec = subagents.find((sa) => sa.name === subagent_type);
|
|
1771
|
+
const subagentInterruptOn = subagentSpec?.interruptOn ?? interruptOn;
|
|
1772
|
+
const mergedGenerationOptions = {
|
|
1773
|
+
...parentGenerationOptions,
|
|
1774
|
+
...subagentSpec?.generationOptions
|
|
1775
|
+
};
|
|
1776
|
+
const mergedAdvancedOptions = {
|
|
1777
|
+
...parentAdvancedOptions,
|
|
1778
|
+
...subagentSpec?.advancedOptions
|
|
1779
|
+
};
|
|
1780
|
+
if (onEvent) onEvent(createSubagentStartEvent(subagent_type, description));
|
|
1781
|
+
const subagentState = {
|
|
1782
|
+
todos: [],
|
|
1783
|
+
files: state.files
|
|
1784
|
+
};
|
|
1785
|
+
const customTools = processSubagentTools(subagentConfig.toolConfig, subagentState, {
|
|
1786
|
+
backend,
|
|
1787
|
+
onEvent
|
|
1788
|
+
});
|
|
1789
|
+
let allTools = {
|
|
1790
|
+
write_todos: createTodosTool(subagentState, onEvent),
|
|
1791
|
+
...createFilesystemTools(subagentState, backend, onEvent),
|
|
1792
|
+
...customTools
|
|
1793
|
+
};
|
|
1794
|
+
allTools = applyInterruptConfig(allTools, subagentInterruptOn);
|
|
1795
|
+
try {
|
|
1796
|
+
const subagentSettings = {
|
|
1797
|
+
model: subagentConfig.model,
|
|
1798
|
+
instructions: subagentConfig.systemPrompt,
|
|
1799
|
+
tools: allTools,
|
|
1800
|
+
stopWhen: stepCountIs(DEFAULT_SUBAGENT_MAX_STEPS),
|
|
1801
|
+
...subagentConfig.output ? { output: Output.object(subagentConfig.output) } : {}
|
|
1802
|
+
};
|
|
1803
|
+
if (Object.keys(mergedGenerationOptions).length > 0) Object.assign(subagentSettings, mergedGenerationOptions);
|
|
1804
|
+
if (mergedAdvancedOptions) {
|
|
1805
|
+
const { toolChoice, activeTools, ...safeAdvancedOptions } = mergedAdvancedOptions;
|
|
1806
|
+
Object.assign(subagentSettings, safeAdvancedOptions);
|
|
1807
|
+
}
|
|
1808
|
+
let subagentStepCount = 0;
|
|
1809
|
+
subagentSettings.onStepFinish = async ({ toolCalls, toolResults }) => {
|
|
1810
|
+
if (onEvent && toolCalls && toolCalls.length > 0) {
|
|
1811
|
+
const toolCallsWithResults = toolCalls.map((tc, index) => ({
|
|
1812
|
+
toolName: tc.toolName,
|
|
1813
|
+
args: tc.args,
|
|
1814
|
+
result: toolResults[index]
|
|
1815
|
+
}));
|
|
1816
|
+
onEvent(createSubagentStepEvent(subagentStepCount++, toolCallsWithResults));
|
|
1817
|
+
}
|
|
1818
|
+
};
|
|
1819
|
+
const result = await new ToolLoopAgent(subagentSettings).generate({ prompt: description });
|
|
1820
|
+
state.files = {
|
|
1821
|
+
...state.files,
|
|
1822
|
+
...subagentState.files
|
|
1823
|
+
};
|
|
1824
|
+
const resultText = result.text || "Task completed successfully.";
|
|
1825
|
+
let formattedResult = resultText;
|
|
1826
|
+
if (subagentConfig.output && "output" in result && result.output) formattedResult = `${resultText}\n\n[Structured Output]\n${JSON.stringify(result.output, null, 2)}`;
|
|
1827
|
+
if (onEvent) onEvent(createSubagentFinishEvent(subagent_type, formattedResult));
|
|
1828
|
+
return formattedResult;
|
|
1829
|
+
} catch (error) {
|
|
1830
|
+
const errorMessage = `Error executing subagent: ${error.message}`;
|
|
1831
|
+
if (onEvent) onEvent(createSubagentFinishEvent(subagent_type, errorMessage));
|
|
1832
|
+
return errorMessage;
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
});
|
|
1836
|
+
}
|
|
1837
|
+
|
|
1838
|
+
//#endregion
|
|
1839
|
+
//#region src/utils/patch-tool-calls.ts
|
|
1840
|
+
/**
|
|
1841
|
+
* Check if a message is an assistant message with tool calls.
|
|
1842
|
+
*/
|
|
1843
|
+
function hasToolCalls(message) {
|
|
1844
|
+
if (message.role !== "assistant") return false;
|
|
1845
|
+
const content = message.content;
|
|
1846
|
+
if (Array.isArray(content)) return content.some((part) => typeof part === "object" && part !== null && "type" in part && part.type === "tool-call");
|
|
1847
|
+
return false;
|
|
1848
|
+
}
|
|
1849
|
+
/**
|
|
1850
|
+
* Extract tool call IDs from an assistant message.
|
|
1851
|
+
*/
|
|
1852
|
+
function getToolCallIds(message) {
|
|
1853
|
+
if (message.role !== "assistant") return [];
|
|
1854
|
+
const content = message.content;
|
|
1855
|
+
if (!Array.isArray(content)) return [];
|
|
1856
|
+
const ids = [];
|
|
1857
|
+
for (const part of content) if (typeof part === "object" && part !== null && "type" in part && part.type === "tool-call" && "toolCallId" in part) ids.push(part.toolCallId);
|
|
1858
|
+
return ids;
|
|
1859
|
+
}
|
|
1860
|
+
/**
|
|
1861
|
+
* Check if a message is a tool result for a specific tool call ID.
|
|
1862
|
+
*/
|
|
1863
|
+
function isToolResultFor(message, toolCallId) {
|
|
1864
|
+
if (message.role !== "tool") return false;
|
|
1865
|
+
if ("toolCallId" in message && message.toolCallId === toolCallId) return true;
|
|
1866
|
+
const content = message.content;
|
|
1867
|
+
if (Array.isArray(content)) return content.some((part) => typeof part === "object" && part !== null && "type" in part && part.type === "tool-result" && "toolCallId" in part && part.toolCallId === toolCallId);
|
|
1868
|
+
return false;
|
|
1869
|
+
}
|
|
1870
|
+
/**
|
|
1871
|
+
* Create a synthetic tool result message for a cancelled tool call.
|
|
1872
|
+
*/
|
|
1873
|
+
function createCancelledToolResult(toolCallId, toolName) {
|
|
1874
|
+
return {
|
|
1875
|
+
role: "tool",
|
|
1876
|
+
content: [{
|
|
1877
|
+
type: "tool-result",
|
|
1878
|
+
toolCallId,
|
|
1879
|
+
toolName,
|
|
1880
|
+
output: {
|
|
1881
|
+
type: "text",
|
|
1882
|
+
value: `Tool call ${toolName} with id ${toolCallId} was cancelled - another message came in before it could be completed.`
|
|
1883
|
+
}
|
|
1884
|
+
}]
|
|
1885
|
+
};
|
|
1886
|
+
}
|
|
1887
|
+
/**
|
|
1888
|
+
* Get tool name from a tool call part.
|
|
1889
|
+
*/
|
|
1890
|
+
function getToolName(message, toolCallId) {
|
|
1891
|
+
if (message.role !== "assistant") return "unknown";
|
|
1892
|
+
const content = message.content;
|
|
1893
|
+
if (!Array.isArray(content)) return "unknown";
|
|
1894
|
+
for (const part of content) if (typeof part === "object" && part !== null && "type" in part && part.type === "tool-call" && "toolCallId" in part && part.toolCallId === toolCallId && "toolName" in part) return part.toolName;
|
|
1895
|
+
return "unknown";
|
|
1896
|
+
}
|
|
1897
|
+
/**
|
|
1898
|
+
* Patch dangling tool calls in a message array.
|
|
1899
|
+
*
|
|
1900
|
+
* Scans for assistant messages with tool_calls that don't have corresponding
|
|
1901
|
+
* tool result messages, and adds synthetic "cancelled" responses.
|
|
1902
|
+
*
|
|
1903
|
+
* @param messages - Array of messages to patch
|
|
1904
|
+
* @returns New array with patched messages (original array is not modified)
|
|
1905
|
+
*
|
|
1906
|
+
* @example
|
|
1907
|
+
* ```typescript
|
|
1908
|
+
* const messages = [
|
|
1909
|
+
* { role: "user", content: "Hello" },
|
|
1910
|
+
* { role: "assistant", content: [{ type: "tool-call", toolCallId: "1", toolName: "search" }] },
|
|
1911
|
+
* // Missing tool result for tool call "1"
|
|
1912
|
+
* { role: "user", content: "Never mind" },
|
|
1913
|
+
* ];
|
|
1914
|
+
*
|
|
1915
|
+
* const patched = patchToolCalls(messages);
|
|
1916
|
+
* // patched now includes a synthetic tool result for the dangling call
|
|
1917
|
+
* ```
|
|
1918
|
+
*/
|
|
1919
|
+
function patchToolCalls(messages) {
|
|
1920
|
+
if (!messages || messages.length === 0) return messages;
|
|
1921
|
+
const result = [];
|
|
1922
|
+
for (let i = 0; i < messages.length; i++) {
|
|
1923
|
+
const message = messages[i];
|
|
1924
|
+
if (!message) continue;
|
|
1925
|
+
result.push(message);
|
|
1926
|
+
if (hasToolCalls(message)) {
|
|
1927
|
+
const toolCallIds = getToolCallIds(message);
|
|
1928
|
+
for (const toolCallId of toolCallIds) {
|
|
1929
|
+
let hasResult = false;
|
|
1930
|
+
for (let j = i + 1; j < messages.length; j++) {
|
|
1931
|
+
const subsequentMsg = messages[j];
|
|
1932
|
+
if (subsequentMsg && isToolResultFor(subsequentMsg, toolCallId)) {
|
|
1933
|
+
hasResult = true;
|
|
1934
|
+
break;
|
|
1935
|
+
}
|
|
1936
|
+
}
|
|
1937
|
+
if (!hasResult) {
|
|
1938
|
+
const toolName = getToolName(message, toolCallId);
|
|
1939
|
+
result.push(createCancelledToolResult(toolCallId, toolName));
|
|
1940
|
+
}
|
|
1941
|
+
}
|
|
1942
|
+
}
|
|
1943
|
+
}
|
|
1944
|
+
return result;
|
|
1945
|
+
}
|
|
1946
|
+
/**
|
|
1947
|
+
* Check if messages have any dangling tool calls.
|
|
1948
|
+
*
|
|
1949
|
+
* @param messages - Array of messages to check
|
|
1950
|
+
* @returns True if there are dangling tool calls
|
|
1951
|
+
*/
|
|
1952
|
+
function hasDanglingToolCalls(messages) {
|
|
1953
|
+
if (!messages || messages.length === 0) return false;
|
|
1954
|
+
for (let i = 0; i < messages.length; i++) {
|
|
1955
|
+
const message = messages[i];
|
|
1956
|
+
if (!message) continue;
|
|
1957
|
+
if (hasToolCalls(message)) {
|
|
1958
|
+
const toolCallIds = getToolCallIds(message);
|
|
1959
|
+
for (const toolCallId of toolCallIds) {
|
|
1960
|
+
let hasResult = false;
|
|
1961
|
+
for (let j = i + 1; j < messages.length; j++) {
|
|
1962
|
+
const subsequentMsg = messages[j];
|
|
1963
|
+
if (subsequentMsg && isToolResultFor(subsequentMsg, toolCallId)) {
|
|
1964
|
+
hasResult = true;
|
|
1965
|
+
break;
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
if (!hasResult) return true;
|
|
1969
|
+
}
|
|
1970
|
+
}
|
|
1971
|
+
}
|
|
1972
|
+
return false;
|
|
1973
|
+
}
|
|
1974
|
+
|
|
1975
|
+
//#endregion
|
|
1976
|
+
//#region src/utils/summarization.ts
|
|
1977
|
+
/**
|
|
1978
|
+
* Conversation summarization utility.
|
|
1979
|
+
*
|
|
1980
|
+
* Automatically summarizes older messages when approaching token limits
|
|
1981
|
+
* to prevent context overflow while preserving important context.
|
|
1982
|
+
*/
|
|
1983
|
+
init_eviction();
|
|
1984
|
+
init_limits();
|
|
1985
|
+
/**
|
|
1986
|
+
* Default token threshold before triggering summarization.
|
|
1987
|
+
* 170k tokens is a safe threshold for most models.
|
|
1988
|
+
*/
|
|
1989
|
+
const DEFAULT_SUMMARIZATION_THRESHOLD = DEFAULT_SUMMARIZATION_THRESHOLD$1;
|
|
1990
|
+
/**
|
|
1991
|
+
* Default number of recent messages to keep intact.
|
|
1992
|
+
*/
|
|
1993
|
+
const DEFAULT_KEEP_MESSAGES = DEFAULT_KEEP_MESSAGES$1;
|
|
1994
|
+
/**
|
|
1995
|
+
* Estimate total tokens in a messages array.
|
|
1996
|
+
*/
|
|
1997
|
+
function estimateMessagesTokens(messages) {
|
|
1998
|
+
let total = 0;
|
|
1999
|
+
for (const message of messages) if (typeof message.content === "string") total += estimateTokens(message.content);
|
|
2000
|
+
else if (Array.isArray(message.content)) {
|
|
2001
|
+
for (const part of message.content) if (typeof part === "object" && part !== null && "text" in part) total += estimateTokens(String(part.text));
|
|
2002
|
+
}
|
|
2003
|
+
return total;
|
|
2004
|
+
}
|
|
2005
|
+
/**
|
|
2006
|
+
* Extract text content from a message.
|
|
2007
|
+
*/
|
|
2008
|
+
function getMessageText(message) {
|
|
2009
|
+
if (typeof message.content === "string") return message.content;
|
|
2010
|
+
if (Array.isArray(message.content)) return message.content.map((part) => {
|
|
2011
|
+
if (typeof part === "object" && part !== null && "text" in part) return String(part.text);
|
|
2012
|
+
if (typeof part === "object" && part !== null && "type" in part) {
|
|
2013
|
+
if (part.type === "tool-call") return `[Tool call: ${part.toolName || "unknown"}]`;
|
|
2014
|
+
if (part.type === "tool-result") return `[Tool result]`;
|
|
2015
|
+
}
|
|
2016
|
+
return "";
|
|
2017
|
+
}).filter(Boolean).join("\n");
|
|
2018
|
+
return "";
|
|
2019
|
+
}
|
|
2020
|
+
/**
|
|
2021
|
+
* Format messages for summarization prompt.
|
|
2022
|
+
*/
|
|
2023
|
+
function formatMessagesForSummary(messages) {
|
|
2024
|
+
return messages.map((msg) => {
|
|
2025
|
+
return `${msg.role === "user" ? "User" : msg.role === "assistant" ? "Assistant" : "System"}: ${getMessageText(msg)}`;
|
|
2026
|
+
}).join("\n\n");
|
|
2027
|
+
}
|
|
2028
|
+
/**
|
|
2029
|
+
* Generate a summary of conversation messages.
|
|
2030
|
+
*/
|
|
2031
|
+
async function generateSummary(messages, model, generationOptions, advancedOptions) {
|
|
2032
|
+
const generateTextOptions = {
|
|
2033
|
+
model,
|
|
2034
|
+
system: `You are a conversation summarizer. Your task is to create a concise but comprehensive summary of the conversation that preserves:
|
|
2035
|
+
1. Key decisions and conclusions
|
|
2036
|
+
2. Important context and background information
|
|
2037
|
+
3. Any tasks or todos mentioned
|
|
2038
|
+
4. Technical details that may be referenced later
|
|
2039
|
+
5. The overall flow and progression of the conversation
|
|
2040
|
+
|
|
2041
|
+
Keep the summary focused and avoid redundancy. The summary should allow someone to understand the conversation context without reading the full history.`,
|
|
2042
|
+
prompt: `Please summarize the following conversation:\n\n${formatMessagesForSummary(messages)}`
|
|
2043
|
+
};
|
|
2044
|
+
if (generationOptions) Object.assign(generateTextOptions, generationOptions);
|
|
2045
|
+
if (advancedOptions) Object.assign(generateTextOptions, advancedOptions);
|
|
2046
|
+
return (await generateText(generateTextOptions)).text;
|
|
2047
|
+
}
|
|
2048
|
+
/**
|
|
2049
|
+
* Summarize older messages when approaching token limits.
|
|
2050
|
+
*
|
|
2051
|
+
* This function checks if the total tokens in the messages exceed the threshold.
|
|
2052
|
+
* If so, it summarizes older messages while keeping recent ones intact.
|
|
2053
|
+
*
|
|
2054
|
+
* @param messages - Array of conversation messages
|
|
2055
|
+
* @param options - Summarization options
|
|
2056
|
+
* @returns Processed messages with optional summary
|
|
2057
|
+
*
|
|
2058
|
+
* @example
|
|
2059
|
+
* ```typescript
|
|
2060
|
+
* import { anthropic } from '@ai-sdk/anthropic';
|
|
2061
|
+
*
|
|
2062
|
+
* const result = await summarizeIfNeeded(messages, {
|
|
2063
|
+
* model: anthropic('claude-haiku-4-5-20251001'),
|
|
2064
|
+
* tokenThreshold: 170000,
|
|
2065
|
+
* keepMessages: 6,
|
|
2066
|
+
* });
|
|
2067
|
+
*
|
|
2068
|
+
* if (result.summarized) {
|
|
2069
|
+
* console.log(`Reduced from ${result.tokensBefore} to ${result.tokensAfter} tokens`);
|
|
2070
|
+
* }
|
|
2071
|
+
* ```
|
|
2072
|
+
*/
|
|
2073
|
+
async function summarizeIfNeeded(messages, options) {
|
|
2074
|
+
const { model, tokenThreshold = DEFAULT_SUMMARIZATION_THRESHOLD, keepMessages = DEFAULT_KEEP_MESSAGES } = options;
|
|
2075
|
+
const tokensBefore = estimateMessagesTokens(messages);
|
|
2076
|
+
if (tokensBefore < tokenThreshold) return {
|
|
2077
|
+
summarized: false,
|
|
2078
|
+
messages,
|
|
2079
|
+
tokensBefore
|
|
2080
|
+
};
|
|
2081
|
+
if (messages.length <= keepMessages) return {
|
|
2082
|
+
summarized: false,
|
|
2083
|
+
messages,
|
|
2084
|
+
tokensBefore
|
|
2085
|
+
};
|
|
2086
|
+
const messagesToSummarize = messages.slice(0, -keepMessages);
|
|
2087
|
+
const messagesToKeep = messages.slice(-keepMessages);
|
|
2088
|
+
const newMessages = [{
|
|
2089
|
+
role: "system",
|
|
2090
|
+
content: `[Previous conversation summary]\n${await generateSummary(messagesToSummarize, model, options.generationOptions, options.advancedOptions)}\n\n[End of summary - recent messages follow]`
|
|
2091
|
+
}, ...messagesToKeep];
|
|
2092
|
+
return {
|
|
2093
|
+
summarized: true,
|
|
2094
|
+
messages: newMessages,
|
|
2095
|
+
tokensBefore,
|
|
2096
|
+
tokensAfter: estimateMessagesTokens(newMessages)
|
|
2097
|
+
};
|
|
2098
|
+
}
|
|
2099
|
+
/**
|
|
2100
|
+
* Check if messages need summarization without performing it.
|
|
2101
|
+
*/
|
|
2102
|
+
function needsSummarization(messages, tokenThreshold = DEFAULT_SUMMARIZATION_THRESHOLD) {
|
|
2103
|
+
return estimateMessagesTokens(messages) >= tokenThreshold;
|
|
2104
|
+
}
|
|
2105
|
+
|
|
2106
|
+
//#endregion
|
|
2107
|
+
//#region src/agent.ts
|
|
2108
|
+
/**
|
|
2109
|
+
* Deep Agent implementation using Vercel AI SDK v6 ToolLoopAgent.
|
|
2110
|
+
*/
|
|
2111
|
+
init_limits();
|
|
2112
|
+
init_events();
|
|
2113
|
+
/**
|
|
2114
|
+
* Build the full system prompt from components.
|
|
2115
|
+
*/
|
|
2116
|
+
function buildSystemPrompt(customPrompt, hasSubagents, hasSandbox, skills) {
|
|
2117
|
+
const parts = [
|
|
2118
|
+
customPrompt || "",
|
|
2119
|
+
BASE_PROMPT,
|
|
2120
|
+
TODO_SYSTEM_PROMPT,
|
|
2121
|
+
FILESYSTEM_SYSTEM_PROMPT
|
|
2122
|
+
];
|
|
2123
|
+
if (hasSandbox) parts.push(EXECUTE_SYSTEM_PROMPT);
|
|
2124
|
+
if (hasSubagents) parts.push(TASK_SYSTEM_PROMPT);
|
|
2125
|
+
if (skills && skills.length > 0) parts.push(buildSkillsPrompt(skills));
|
|
2126
|
+
return parts.filter(Boolean).join("\n\n");
|
|
2127
|
+
}
|
|
2128
|
+
/**
|
|
2129
|
+
* Deep Agent wrapper class that provides generate() and stream() methods.
|
|
2130
|
+
* Uses ToolLoopAgent from AI SDK v6 for the agent loop.
|
|
2131
|
+
*/
|
|
2132
|
+
var DeepAgent = class {
|
|
2133
|
+
model;
|
|
2134
|
+
systemPrompt;
|
|
2135
|
+
userTools;
|
|
2136
|
+
maxSteps;
|
|
2137
|
+
backend;
|
|
2138
|
+
subagentOptions;
|
|
2139
|
+
toolResultEvictionLimit;
|
|
2140
|
+
enablePromptCaching;
|
|
2141
|
+
summarizationConfig;
|
|
2142
|
+
hasSandboxBackend;
|
|
2143
|
+
interruptOn;
|
|
2144
|
+
checkpointer;
|
|
2145
|
+
skillsMetadata = [];
|
|
2146
|
+
outputConfig;
|
|
2147
|
+
loopControl;
|
|
2148
|
+
generationOptions;
|
|
2149
|
+
advancedOptions;
|
|
2150
|
+
constructor(params) {
|
|
2151
|
+
const { model, middleware, tools = {}, systemPrompt, subagents = [], backend, maxSteps = DEFAULT_MAX_STEPS, includeGeneralPurposeAgent = true, toolResultEvictionLimit, enablePromptCaching = false, summarization, interruptOn, checkpointer, skillsDir, agentId, output, loopControl, generationOptions, advancedOptions } = params;
|
|
2152
|
+
if (middleware) this.model = wrapLanguageModel({
|
|
2153
|
+
model,
|
|
2154
|
+
middleware: Array.isArray(middleware) ? middleware : [middleware]
|
|
2155
|
+
});
|
|
2156
|
+
else this.model = model;
|
|
2157
|
+
this.maxSteps = maxSteps;
|
|
2158
|
+
this.backend = backend || ((state) => new StateBackend(state));
|
|
2159
|
+
this.toolResultEvictionLimit = toolResultEvictionLimit;
|
|
2160
|
+
this.enablePromptCaching = enablePromptCaching;
|
|
2161
|
+
this.summarizationConfig = summarization;
|
|
2162
|
+
this.interruptOn = interruptOn;
|
|
2163
|
+
this.checkpointer = checkpointer;
|
|
2164
|
+
this.outputConfig = output;
|
|
2165
|
+
this.loopControl = loopControl;
|
|
2166
|
+
this.generationOptions = generationOptions;
|
|
2167
|
+
this.advancedOptions = advancedOptions;
|
|
2168
|
+
if (agentId) {
|
|
2169
|
+
if (skillsDir) console.warn("[DeepAgent] agentId parameter takes precedence over skillsDir. skillsDir is deprecated and will be ignored.");
|
|
2170
|
+
this.loadSkills({ agentId }).catch((error) => {
|
|
2171
|
+
console.warn("[DeepAgent] Failed to load skills:", error);
|
|
2172
|
+
});
|
|
2173
|
+
} else if (skillsDir) this.loadSkills({ skillsDir }).catch((error) => {
|
|
2174
|
+
console.warn("[DeepAgent] Failed to load skills:", error);
|
|
2175
|
+
});
|
|
2176
|
+
this.hasSandboxBackend = typeof backend !== "function" && backend !== void 0 && isSandboxBackend(backend);
|
|
2177
|
+
this.systemPrompt = buildSystemPrompt(systemPrompt, includeGeneralPurposeAgent || subagents && subagents.length > 0, this.hasSandboxBackend, this.skillsMetadata);
|
|
2178
|
+
this.userTools = tools;
|
|
2179
|
+
this.subagentOptions = {
|
|
2180
|
+
defaultModel: model,
|
|
2181
|
+
defaultTools: tools,
|
|
2182
|
+
subagents,
|
|
2183
|
+
includeGeneralPurposeAgent
|
|
2184
|
+
};
|
|
2185
|
+
}
|
|
2186
|
+
/**
|
|
2187
|
+
* Create core tools (todos and filesystem).
|
|
2188
|
+
* @private
|
|
2189
|
+
*/
|
|
2190
|
+
createCoreTools(state, onEvent) {
|
|
2191
|
+
return {
|
|
2192
|
+
write_todos: createTodosTool(state, onEvent),
|
|
2193
|
+
...createFilesystemTools(state, {
|
|
2194
|
+
backend: this.backend,
|
|
2195
|
+
onEvent,
|
|
2196
|
+
toolResultEvictionLimit: this.toolResultEvictionLimit
|
|
2197
|
+
}),
|
|
2198
|
+
...this.userTools
|
|
2199
|
+
};
|
|
2200
|
+
}
|
|
2201
|
+
/**
|
|
2202
|
+
* Create web tools if TAVILY_API_KEY is available.
|
|
2203
|
+
* Uses dynamic import to avoid bundling Node.js dependencies in client builds.
|
|
2204
|
+
* @private
|
|
2205
|
+
*/
|
|
2206
|
+
createWebToolSet(state, onEvent) {
|
|
2207
|
+
if (!process.env.TAVILY_API_KEY) return {};
|
|
2208
|
+
try {
|
|
2209
|
+
return (init_web(), __toCommonJS(web_exports)).createWebTools(state, {
|
|
2210
|
+
backend: this.backend,
|
|
2211
|
+
onEvent,
|
|
2212
|
+
toolResultEvictionLimit: this.toolResultEvictionLimit
|
|
2213
|
+
});
|
|
2214
|
+
} catch (error) {
|
|
2215
|
+
console.warn("Web tools not available in this environment:", error);
|
|
2216
|
+
return {};
|
|
2217
|
+
}
|
|
2218
|
+
}
|
|
2219
|
+
/**
|
|
2220
|
+
* Create execute tool if backend is a sandbox.
|
|
2221
|
+
* @private
|
|
2222
|
+
*/
|
|
2223
|
+
createExecuteToolSet(onEvent) {
|
|
2224
|
+
if (!this.hasSandboxBackend) return {};
|
|
2225
|
+
const sandboxBackend = this.backend;
|
|
2226
|
+
return { execute: createExecuteTool({
|
|
2227
|
+
backend: sandboxBackend,
|
|
2228
|
+
onEvent
|
|
2229
|
+
}) };
|
|
2230
|
+
}
|
|
2231
|
+
/**
|
|
2232
|
+
* Create subagent tool if configured.
|
|
2233
|
+
* @private
|
|
2234
|
+
*/
|
|
2235
|
+
createSubagentToolSet(state, onEvent) {
|
|
2236
|
+
if (!this.subagentOptions.includeGeneralPurposeAgent && (!this.subagentOptions.subagents || this.subagentOptions.subagents.length === 0)) return {};
|
|
2237
|
+
return { task: createSubagentTool(state, {
|
|
2238
|
+
defaultModel: this.subagentOptions.defaultModel,
|
|
2239
|
+
defaultTools: this.userTools,
|
|
2240
|
+
subagents: this.subagentOptions.subagents,
|
|
2241
|
+
includeGeneralPurposeAgent: this.subagentOptions.includeGeneralPurposeAgent,
|
|
2242
|
+
backend: this.backend,
|
|
2243
|
+
onEvent,
|
|
2244
|
+
interruptOn: this.interruptOn,
|
|
2245
|
+
parentGenerationOptions: this.generationOptions,
|
|
2246
|
+
parentAdvancedOptions: this.advancedOptions
|
|
2247
|
+
}) };
|
|
2248
|
+
}
|
|
2249
|
+
/**
|
|
2250
|
+
* Create all tools for the agent, combining core, web, execute, and subagent tools.
|
|
2251
|
+
* @private
|
|
2252
|
+
*/
|
|
2253
|
+
createTools(state, onEvent) {
|
|
2254
|
+
let allTools = this.createCoreTools(state, onEvent);
|
|
2255
|
+
const webTools = this.createWebToolSet(state, onEvent);
|
|
2256
|
+
if (Object.keys(webTools).length > 0) allTools = {
|
|
2257
|
+
...allTools,
|
|
2258
|
+
...webTools
|
|
2259
|
+
};
|
|
2260
|
+
const executeTools = this.createExecuteToolSet(onEvent);
|
|
2261
|
+
if (Object.keys(executeTools).length > 0) allTools = {
|
|
2262
|
+
...allTools,
|
|
2263
|
+
...executeTools
|
|
2264
|
+
};
|
|
2265
|
+
const subagentTools = this.createSubagentToolSet(state, onEvent);
|
|
2266
|
+
if (Object.keys(subagentTools).length > 0) allTools = {
|
|
2267
|
+
...allTools,
|
|
2268
|
+
...subagentTools
|
|
2269
|
+
};
|
|
2270
|
+
allTools = applyInterruptConfig(allTools, this.interruptOn);
|
|
2271
|
+
return allTools;
|
|
2272
|
+
}
|
|
2273
|
+
/**
|
|
2274
|
+
* Build stop conditions with maxSteps safety limit.
|
|
2275
|
+
* Combines user-provided stop conditions with the maxSteps limit.
|
|
2276
|
+
*/
|
|
2277
|
+
buildStopConditions(maxSteps) {
|
|
2278
|
+
const conditions = [];
|
|
2279
|
+
conditions.push(stepCountIs(maxSteps ?? this.maxSteps));
|
|
2280
|
+
if (this.loopControl?.stopWhen) if (Array.isArray(this.loopControl.stopWhen)) conditions.push(...this.loopControl.stopWhen);
|
|
2281
|
+
else conditions.push(this.loopControl.stopWhen);
|
|
2282
|
+
return conditions;
|
|
2283
|
+
}
|
|
2284
|
+
/**
|
|
2285
|
+
* Build agent settings by combining passthrough options with defaults.
|
|
2286
|
+
*/
|
|
2287
|
+
buildAgentSettings(onEvent) {
|
|
2288
|
+
const settings = {
|
|
2289
|
+
model: this.model,
|
|
2290
|
+
instructions: this.systemPrompt,
|
|
2291
|
+
tools: void 0
|
|
2292
|
+
};
|
|
2293
|
+
if (this.generationOptions) Object.assign(settings, this.generationOptions);
|
|
2294
|
+
if (this.advancedOptions) Object.assign(settings, this.advancedOptions);
|
|
2295
|
+
if (this.loopControl) {
|
|
2296
|
+
if (this.loopControl.prepareStep) settings.prepareStep = this.composePrepareStep(this.loopControl.prepareStep);
|
|
2297
|
+
if (this.loopControl.onStepFinish) settings.onStepFinish = this.composeOnStepFinish(this.loopControl.onStepFinish);
|
|
2298
|
+
if (this.loopControl.onFinish) settings.onFinish = this.composeOnFinish(this.loopControl.onFinish);
|
|
2299
|
+
}
|
|
2300
|
+
if (this.outputConfig) settings.output = Output.object(this.outputConfig);
|
|
2301
|
+
return settings;
|
|
2302
|
+
}
|
|
2303
|
+
/**
|
|
2304
|
+
* Create a ToolLoopAgent for a given state.
|
|
2305
|
+
* @param state - The shared agent state
|
|
2306
|
+
* @param maxSteps - Optional max steps override
|
|
2307
|
+
* @param onEvent - Optional callback for emitting events
|
|
2308
|
+
*/
|
|
2309
|
+
createAgent(state, maxSteps, onEvent) {
|
|
2310
|
+
const tools = this.createTools(state, onEvent);
|
|
2311
|
+
const settings = this.buildAgentSettings(onEvent);
|
|
2312
|
+
const stopConditions = this.buildStopConditions(maxSteps);
|
|
2313
|
+
return new ToolLoopAgent({
|
|
2314
|
+
...settings,
|
|
2315
|
+
tools,
|
|
2316
|
+
stopWhen: stopConditions
|
|
2317
|
+
});
|
|
2318
|
+
}
|
|
2319
|
+
/**
|
|
2320
|
+
* Load skills from directory asynchronously.
|
|
2321
|
+
* Supports both legacy skillsDir and new agentId modes.
|
|
2322
|
+
*/
|
|
2323
|
+
async loadSkills(options) {
|
|
2324
|
+
const { listSkills } = await import("./load-BDxe6Cet.mjs");
|
|
2325
|
+
this.skillsMetadata = (await listSkills(options.agentId ? { agentId: options.agentId } : { projectSkillsDir: options.skillsDir })).map((s) => ({
|
|
2326
|
+
name: s.name,
|
|
2327
|
+
description: s.description,
|
|
2328
|
+
path: s.path
|
|
2329
|
+
}));
|
|
2330
|
+
}
|
|
2331
|
+
/**
|
|
2332
|
+
* Generate a response (non-streaming).
|
|
2333
|
+
*/
|
|
2334
|
+
async generate(options) {
|
|
2335
|
+
const state = {
|
|
2336
|
+
todos: [],
|
|
2337
|
+
files: {}
|
|
2338
|
+
};
|
|
2339
|
+
const result = await this.createAgent(state, options.maxSteps).generate({ prompt: options.prompt });
|
|
2340
|
+
Object.defineProperty(result, "state", {
|
|
2341
|
+
value: state,
|
|
2342
|
+
enumerable: true,
|
|
2343
|
+
writable: false
|
|
2344
|
+
});
|
|
2345
|
+
return result;
|
|
2346
|
+
}
|
|
2347
|
+
/**
|
|
2348
|
+
* Stream a response.
|
|
2349
|
+
*/
|
|
2350
|
+
async stream(options) {
|
|
2351
|
+
const state = {
|
|
2352
|
+
todos: [],
|
|
2353
|
+
files: {}
|
|
2354
|
+
};
|
|
2355
|
+
const result = await this.createAgent(state, options.maxSteps).stream({ prompt: options.prompt });
|
|
2356
|
+
Object.defineProperty(result, "state", {
|
|
2357
|
+
value: state,
|
|
2358
|
+
enumerable: true,
|
|
2359
|
+
writable: false
|
|
2360
|
+
});
|
|
2361
|
+
return result;
|
|
2362
|
+
}
|
|
2363
|
+
/**
|
|
2364
|
+
* Generate with an existing state (for continuing conversations).
|
|
2365
|
+
*/
|
|
2366
|
+
async generateWithState(options) {
|
|
2367
|
+
const result = await this.createAgent(options.state, options.maxSteps).generate({ prompt: options.prompt });
|
|
2368
|
+
Object.defineProperty(result, "state", {
|
|
2369
|
+
value: options.state,
|
|
2370
|
+
enumerable: true,
|
|
2371
|
+
writable: false
|
|
2372
|
+
});
|
|
2373
|
+
return result;
|
|
2374
|
+
}
|
|
2375
|
+
/**
|
|
2376
|
+
* Get the underlying ToolLoopAgent for advanced usage.
|
|
2377
|
+
* This allows using AI SDK's createAgentUIStream and other utilities.
|
|
2378
|
+
*/
|
|
2379
|
+
getAgent(state) {
|
|
2380
|
+
const agentState = state || {
|
|
2381
|
+
todos: [],
|
|
2382
|
+
files: {}
|
|
2383
|
+
};
|
|
2384
|
+
return this.createAgent(agentState);
|
|
2385
|
+
}
|
|
2386
|
+
/**
|
|
2387
|
+
* Stream a response with real-time events.
|
|
2388
|
+
* This is an async generator that yields DeepAgentEvent objects.
|
|
2389
|
+
*
|
|
2390
|
+
* Supports conversation history via the `messages` option for multi-turn conversations.
|
|
2391
|
+
*
|
|
2392
|
+
* @example
|
|
2393
|
+
* ```typescript
|
|
2394
|
+
* // Single turn
|
|
2395
|
+
* for await (const event of agent.streamWithEvents({ prompt: "..." })) {
|
|
2396
|
+
* switch (event.type) {
|
|
2397
|
+
* case 'text':
|
|
2398
|
+
* process.stdout.write(event.text);
|
|
2399
|
+
* break;
|
|
2400
|
+
* case 'done':
|
|
2401
|
+
* // event.messages contains the updated conversation history
|
|
2402
|
+
* console.log('Messages:', event.messages);
|
|
2403
|
+
* break;
|
|
2404
|
+
* }
|
|
2405
|
+
* }
|
|
2406
|
+
*
|
|
2407
|
+
* // Multi-turn conversation
|
|
2408
|
+
* let messages = [];
|
|
2409
|
+
* for await (const event of agent.streamWithEvents({ prompt: "Hello", messages })) {
|
|
2410
|
+
* if (event.type === 'done') {
|
|
2411
|
+
* messages = event.messages; // Save for next turn
|
|
2412
|
+
* }
|
|
2413
|
+
* }
|
|
2414
|
+
* for await (const event of agent.streamWithEvents({ prompt: "Follow up", messages })) {
|
|
2415
|
+
* // Agent now has context from previous turn
|
|
2416
|
+
* }
|
|
2417
|
+
* ```
|
|
2418
|
+
*/
|
|
2419
|
+
/**
|
|
2420
|
+
* Compose user's onStepFinish callback with DeepAgent's internal checkpointing logic.
|
|
2421
|
+
* User callback executes first, errors are caught to prevent breaking checkpointing.
|
|
2422
|
+
*/
|
|
2423
|
+
composeOnStepFinish(userOnStepFinish) {
|
|
2424
|
+
return async (params) => {
|
|
2425
|
+
if (userOnStepFinish) try {
|
|
2426
|
+
await userOnStepFinish(params);
|
|
2427
|
+
} catch (error) {
|
|
2428
|
+
console.error("[DeepAgent] User onStepFinish callback failed:", error);
|
|
2429
|
+
}
|
|
2430
|
+
};
|
|
2431
|
+
}
|
|
2432
|
+
/**
|
|
2433
|
+
* Compose user's onFinish callback with DeepAgent's internal cleanup logic.
|
|
2434
|
+
*/
|
|
2435
|
+
composeOnFinish(userOnFinish) {
|
|
2436
|
+
return async (params) => {
|
|
2437
|
+
if (userOnFinish) try {
|
|
2438
|
+
await userOnFinish(params);
|
|
2439
|
+
} catch (error) {
|
|
2440
|
+
console.error("[DeepAgent] User onFinish callback failed:", error);
|
|
2441
|
+
}
|
|
2442
|
+
};
|
|
2443
|
+
}
|
|
2444
|
+
/**
|
|
2445
|
+
* Compose user's prepareStep callback with DeepAgent's internal step preparation.
|
|
2446
|
+
* Returns a function typed as `any` to avoid AI SDK's strict toolName inference.
|
|
2447
|
+
*/
|
|
2448
|
+
composePrepareStep(userPrepareStep) {
|
|
2449
|
+
return async (params) => {
|
|
2450
|
+
if (userPrepareStep) try {
|
|
2451
|
+
return { ...await userPrepareStep(params) };
|
|
2452
|
+
} catch (error) {
|
|
2453
|
+
console.error("[DeepAgent] User prepareStep callback failed:", error);
|
|
2454
|
+
return params;
|
|
2455
|
+
}
|
|
2456
|
+
return params;
|
|
2457
|
+
};
|
|
2458
|
+
}
|
|
2459
|
+
/**
|
|
2460
|
+
* Build streamText options with callbacks for step tracking and checkpointing.
|
|
2461
|
+
*
|
|
2462
|
+
* @private
|
|
2463
|
+
*/
|
|
2464
|
+
buildStreamTextOptions(inputMessages, tools, options, state, baseStep, pendingInterrupt, eventQueue, stepNumberRef) {
|
|
2465
|
+
const { threadId } = options;
|
|
2466
|
+
const streamOptions = {
|
|
2467
|
+
model: this.model,
|
|
2468
|
+
messages: inputMessages,
|
|
2469
|
+
tools,
|
|
2470
|
+
stopWhen: this.buildStopConditions(options.maxSteps),
|
|
2471
|
+
abortSignal: options.abortSignal,
|
|
2472
|
+
onStepFinish: async ({ toolCalls, toolResults }) => {
|
|
2473
|
+
if (this.loopControl?.onStepFinish) await this.composeOnStepFinish(this.loopControl.onStepFinish)({
|
|
2474
|
+
toolCalls,
|
|
2475
|
+
toolResults
|
|
2476
|
+
});
|
|
2477
|
+
stepNumberRef.value++;
|
|
2478
|
+
const cumulativeStep = baseStep + stepNumberRef.value;
|
|
2479
|
+
const stepEvent = {
|
|
2480
|
+
type: "step-finish",
|
|
2481
|
+
stepNumber: stepNumberRef.value,
|
|
2482
|
+
toolCalls: toolCalls.map((tc, i) => ({
|
|
2483
|
+
toolName: tc.toolName,
|
|
2484
|
+
args: "input" in tc ? tc.input : void 0,
|
|
2485
|
+
result: toolResults[i] ? "output" in toolResults[i] ? toolResults[i].output : void 0 : void 0
|
|
2486
|
+
}))
|
|
2487
|
+
};
|
|
2488
|
+
eventQueue.push(stepEvent);
|
|
2489
|
+
if (threadId && this.checkpointer) {
|
|
2490
|
+
const checkpoint = {
|
|
2491
|
+
threadId,
|
|
2492
|
+
step: cumulativeStep,
|
|
2493
|
+
messages: inputMessages,
|
|
2494
|
+
state: { ...state },
|
|
2495
|
+
interrupt: pendingInterrupt,
|
|
2496
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2497
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2498
|
+
};
|
|
2499
|
+
await this.checkpointer.save(checkpoint);
|
|
2500
|
+
eventQueue.push(createCheckpointSavedEvent(threadId, cumulativeStep));
|
|
2501
|
+
}
|
|
2502
|
+
}
|
|
2503
|
+
};
|
|
2504
|
+
if (this.generationOptions) Object.assign(streamOptions, this.generationOptions);
|
|
2505
|
+
if (this.advancedOptions) Object.assign(streamOptions, this.advancedOptions);
|
|
2506
|
+
if (this.outputConfig) streamOptions.output = Output.object(this.outputConfig);
|
|
2507
|
+
if (this.loopControl) {
|
|
2508
|
+
if (this.loopControl.prepareStep) streamOptions.prepareStep = this.composePrepareStep(this.loopControl.prepareStep);
|
|
2509
|
+
if (this.loopControl.onFinish) streamOptions.onFinish = this.composeOnFinish(this.loopControl.onFinish);
|
|
2510
|
+
}
|
|
2511
|
+
if (this.enablePromptCaching) streamOptions.messages = [{
|
|
2512
|
+
role: "system",
|
|
2513
|
+
content: this.systemPrompt,
|
|
2514
|
+
providerOptions: { anthropic: { cacheControl: { type: "ephemeral" } } }
|
|
2515
|
+
}, ...inputMessages];
|
|
2516
|
+
else streamOptions.system = this.systemPrompt;
|
|
2517
|
+
return streamOptions;
|
|
2518
|
+
}
|
|
2519
|
+
/**
|
|
2520
|
+
* Build message array from options, handling validation and priority logic.
|
|
2521
|
+
* Priority: explicit messages > prompt > checkpoint history.
|
|
2522
|
+
*
|
|
2523
|
+
* @private
|
|
2524
|
+
*/
|
|
2525
|
+
async buildMessageArray(options, patchedHistory) {
|
|
2526
|
+
const { resume } = options;
|
|
2527
|
+
if (!options.prompt && !options.messages && !resume && !options.threadId) return {
|
|
2528
|
+
messages: [],
|
|
2529
|
+
patchedHistory,
|
|
2530
|
+
error: {
|
|
2531
|
+
type: "error",
|
|
2532
|
+
error: /* @__PURE__ */ new Error("Either 'prompt', 'messages', 'resume', or 'threadId' is required")
|
|
2533
|
+
}
|
|
2534
|
+
};
|
|
2535
|
+
let userMessages = [];
|
|
2536
|
+
let shouldUseCheckpointHistory = true;
|
|
2537
|
+
if (options.messages && options.messages.length > 0) {
|
|
2538
|
+
userMessages = options.messages;
|
|
2539
|
+
shouldUseCheckpointHistory = false;
|
|
2540
|
+
if (options.prompt && process.env.NODE_ENV !== "production") console.warn("prompt parameter is deprecated when messages are provided, using messages instead");
|
|
2541
|
+
} else if (options.messages) {
|
|
2542
|
+
shouldUseCheckpointHistory = false;
|
|
2543
|
+
patchedHistory = [];
|
|
2544
|
+
if (options.prompt && process.env.NODE_ENV !== "production") console.warn("prompt parameter is deprecated when empty messages are provided, prompt ignored");
|
|
2545
|
+
} else if (options.prompt) {
|
|
2546
|
+
userMessages = [{
|
|
2547
|
+
role: "user",
|
|
2548
|
+
content: options.prompt
|
|
2549
|
+
}];
|
|
2550
|
+
if (process.env.NODE_ENV !== "production") console.warn("prompt parameter is deprecated, use messages instead");
|
|
2551
|
+
}
|
|
2552
|
+
if (shouldUseCheckpointHistory && patchedHistory.length > 0) {
|
|
2553
|
+
patchedHistory = patchToolCalls(patchedHistory);
|
|
2554
|
+
if (this.summarizationConfig?.enabled && patchedHistory.length > 0) patchedHistory = (await summarizeIfNeeded(patchedHistory, {
|
|
2555
|
+
model: this.summarizationConfig.model || this.model,
|
|
2556
|
+
tokenThreshold: this.summarizationConfig.tokenThreshold,
|
|
2557
|
+
keepMessages: this.summarizationConfig.keepMessages,
|
|
2558
|
+
generationOptions: this.generationOptions,
|
|
2559
|
+
advancedOptions: this.advancedOptions
|
|
2560
|
+
})).messages;
|
|
2561
|
+
} else if (!shouldUseCheckpointHistory) patchedHistory = [];
|
|
2562
|
+
const hasEmptyMessages = options.messages && options.messages.length === 0;
|
|
2563
|
+
const hasValidInput = userMessages.length > 0 || patchedHistory.length > 0;
|
|
2564
|
+
if (hasEmptyMessages && !hasValidInput && !resume) return {
|
|
2565
|
+
messages: [],
|
|
2566
|
+
patchedHistory,
|
|
2567
|
+
shouldReturnEmpty: true
|
|
2568
|
+
};
|
|
2569
|
+
if (!hasValidInput && !resume) return {
|
|
2570
|
+
messages: [],
|
|
2571
|
+
patchedHistory,
|
|
2572
|
+
error: {
|
|
2573
|
+
type: "error",
|
|
2574
|
+
error: /* @__PURE__ */ new Error("No valid input: provide either non-empty messages, prompt, or threadId with existing checkpoint")
|
|
2575
|
+
}
|
|
2576
|
+
};
|
|
2577
|
+
return {
|
|
2578
|
+
messages: [...patchedHistory, ...userMessages],
|
|
2579
|
+
patchedHistory
|
|
2580
|
+
};
|
|
2581
|
+
}
|
|
2582
|
+
/**
|
|
2583
|
+
* Load checkpoint context if threadId is provided.
|
|
2584
|
+
* Handles checkpoint restoration and resume from interrupt.
|
|
2585
|
+
*
|
|
2586
|
+
* @private
|
|
2587
|
+
*/
|
|
2588
|
+
async loadCheckpointContext(options) {
|
|
2589
|
+
const { threadId, resume } = options;
|
|
2590
|
+
let state = options.state || {
|
|
2591
|
+
todos: [],
|
|
2592
|
+
files: {}
|
|
2593
|
+
};
|
|
2594
|
+
let patchedHistory = [];
|
|
2595
|
+
let currentStep = 0;
|
|
2596
|
+
let pendingInterrupt;
|
|
2597
|
+
let checkpointEvent;
|
|
2598
|
+
if (threadId && this.checkpointer) {
|
|
2599
|
+
const checkpoint = await this.checkpointer.load(threadId);
|
|
2600
|
+
if (checkpoint) {
|
|
2601
|
+
state = checkpoint.state;
|
|
2602
|
+
patchedHistory = checkpoint.messages;
|
|
2603
|
+
currentStep = checkpoint.step;
|
|
2604
|
+
pendingInterrupt = checkpoint.interrupt;
|
|
2605
|
+
checkpointEvent = createCheckpointLoadedEvent(threadId, checkpoint.step, checkpoint.messages.length);
|
|
2606
|
+
}
|
|
2607
|
+
}
|
|
2608
|
+
if (resume && pendingInterrupt) if (resume.decisions[0]?.type === "approve") pendingInterrupt = void 0;
|
|
2609
|
+
else pendingInterrupt = void 0;
|
|
2610
|
+
return {
|
|
2611
|
+
state,
|
|
2612
|
+
patchedHistory,
|
|
2613
|
+
currentStep,
|
|
2614
|
+
pendingInterrupt,
|
|
2615
|
+
checkpointEvent
|
|
2616
|
+
};
|
|
2617
|
+
}
|
|
2618
|
+
async *streamWithEvents(options) {
|
|
2619
|
+
const { threadId, resume } = options;
|
|
2620
|
+
const context = await this.loadCheckpointContext(options);
|
|
2621
|
+
const { state, currentStep, pendingInterrupt, checkpointEvent } = context;
|
|
2622
|
+
let patchedHistory = context.patchedHistory;
|
|
2623
|
+
if (checkpointEvent) yield checkpointEvent;
|
|
2624
|
+
const messageResult = await this.buildMessageArray(options, patchedHistory);
|
|
2625
|
+
if (messageResult.error) {
|
|
2626
|
+
yield messageResult.error;
|
|
2627
|
+
return;
|
|
2628
|
+
}
|
|
2629
|
+
if (messageResult.shouldReturnEmpty) {
|
|
2630
|
+
yield {
|
|
2631
|
+
type: "done",
|
|
2632
|
+
text: "",
|
|
2633
|
+
messages: [],
|
|
2634
|
+
state
|
|
2635
|
+
};
|
|
2636
|
+
return;
|
|
2637
|
+
}
|
|
2638
|
+
const inputMessages = messageResult.messages;
|
|
2639
|
+
patchedHistory = messageResult.patchedHistory;
|
|
2640
|
+
const eventQueue = [];
|
|
2641
|
+
const stepNumberRef = { value: 0 };
|
|
2642
|
+
const baseStep = currentStep;
|
|
2643
|
+
const onEvent = (event) => {
|
|
2644
|
+
eventQueue.push(event);
|
|
2645
|
+
};
|
|
2646
|
+
let tools = this.createTools(state, onEvent);
|
|
2647
|
+
const hasInterruptOn = !!this.interruptOn;
|
|
2648
|
+
const hasApprovalCallback = !!options.onApprovalRequest;
|
|
2649
|
+
if (hasInterruptOn && hasApprovalCallback) tools = wrapToolsWithApproval(tools, this.interruptOn, options.onApprovalRequest);
|
|
2650
|
+
try {
|
|
2651
|
+
const result = streamText(this.buildStreamTextOptions(inputMessages, tools, options, state, baseStep, pendingInterrupt, eventQueue, stepNumberRef));
|
|
2652
|
+
yield {
|
|
2653
|
+
type: "step-start",
|
|
2654
|
+
stepNumber: 1
|
|
2655
|
+
};
|
|
2656
|
+
for await (const chunk of result.fullStream) {
|
|
2657
|
+
while (eventQueue.length > 0) {
|
|
2658
|
+
const event = eventQueue.shift();
|
|
2659
|
+
yield event;
|
|
2660
|
+
if (event.type === "step-finish") yield {
|
|
2661
|
+
type: "step-start",
|
|
2662
|
+
stepNumber: event.stepNumber + 1
|
|
2663
|
+
};
|
|
2664
|
+
}
|
|
2665
|
+
if (chunk.type === "text-delta") yield {
|
|
2666
|
+
type: "text",
|
|
2667
|
+
text: chunk.text
|
|
2668
|
+
};
|
|
2669
|
+
else if (chunk.type === "tool-call") yield {
|
|
2670
|
+
type: "tool-call",
|
|
2671
|
+
toolName: chunk.toolName,
|
|
2672
|
+
toolCallId: chunk.toolCallId,
|
|
2673
|
+
args: chunk.input
|
|
2674
|
+
};
|
|
2675
|
+
else if (chunk.type === "tool-result") yield {
|
|
2676
|
+
type: "tool-result",
|
|
2677
|
+
toolName: chunk.toolName,
|
|
2678
|
+
toolCallId: chunk.toolCallId,
|
|
2679
|
+
result: chunk.output,
|
|
2680
|
+
isError: false
|
|
2681
|
+
};
|
|
2682
|
+
else if (chunk.type === "tool-error") yield {
|
|
2683
|
+
type: "tool-result",
|
|
2684
|
+
toolName: chunk.toolName,
|
|
2685
|
+
toolCallId: chunk.toolCallId,
|
|
2686
|
+
result: chunk.error,
|
|
2687
|
+
isError: true
|
|
2688
|
+
};
|
|
2689
|
+
}
|
|
2690
|
+
while (eventQueue.length > 0) yield eventQueue.shift();
|
|
2691
|
+
const finalText = await result.text;
|
|
2692
|
+
const updatedMessages = [...inputMessages, ...finalText ? [{
|
|
2693
|
+
role: "assistant",
|
|
2694
|
+
content: finalText
|
|
2695
|
+
}] : []];
|
|
2696
|
+
const output = "output" in result ? result.output : void 0;
|
|
2697
|
+
yield {
|
|
2698
|
+
type: "done",
|
|
2699
|
+
state,
|
|
2700
|
+
text: finalText,
|
|
2701
|
+
messages: updatedMessages,
|
|
2702
|
+
...output !== void 0 ? { output } : {}
|
|
2703
|
+
};
|
|
2704
|
+
if (threadId && this.checkpointer) {
|
|
2705
|
+
const finalCheckpoint = {
|
|
2706
|
+
threadId,
|
|
2707
|
+
step: baseStep + stepNumberRef.value,
|
|
2708
|
+
messages: updatedMessages,
|
|
2709
|
+
state,
|
|
2710
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2711
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2712
|
+
};
|
|
2713
|
+
await this.checkpointer.save(finalCheckpoint);
|
|
2714
|
+
yield createCheckpointSavedEvent(threadId, baseStep + stepNumberRef.value);
|
|
2715
|
+
}
|
|
2716
|
+
} catch (error) {
|
|
2717
|
+
yield {
|
|
2718
|
+
type: "error",
|
|
2719
|
+
error: error instanceof Error ? error : new Error(String(error))
|
|
2720
|
+
};
|
|
2721
|
+
}
|
|
2722
|
+
}
|
|
2723
|
+
/**
|
|
2724
|
+
* Stream with a simple callback interface.
|
|
2725
|
+
* This is a convenience wrapper around streamWithEvents.
|
|
2726
|
+
*/
|
|
2727
|
+
async streamWithCallback(options, onEvent) {
|
|
2728
|
+
let finalState = options.state || {
|
|
2729
|
+
todos: [],
|
|
2730
|
+
files: {}
|
|
2731
|
+
};
|
|
2732
|
+
let finalText;
|
|
2733
|
+
let finalMessages;
|
|
2734
|
+
for await (const event of this.streamWithEvents(options)) {
|
|
2735
|
+
onEvent(event);
|
|
2736
|
+
if (event.type === "done") {
|
|
2737
|
+
finalState = event.state;
|
|
2738
|
+
finalText = event.text;
|
|
2739
|
+
finalMessages = event.messages;
|
|
2740
|
+
}
|
|
2741
|
+
}
|
|
2742
|
+
return {
|
|
2743
|
+
state: finalState,
|
|
2744
|
+
text: finalText,
|
|
2745
|
+
messages: finalMessages
|
|
2746
|
+
};
|
|
2747
|
+
}
|
|
2748
|
+
};
|
|
2749
|
+
/**
|
|
2750
|
+
* Create a Deep Agent with planning, filesystem, and subagent capabilities.
|
|
2751
|
+
*
|
|
2752
|
+
* @param params - Configuration object for the Deep Agent
|
|
2753
|
+
* @param params.model - **Required.** AI SDK LanguageModel instance (e.g., `anthropic('claude-sonnet-4-20250514')`, `openai('gpt-4o')`)
|
|
2754
|
+
* @param params.systemPrompt - Optional custom system prompt for the agent
|
|
2755
|
+
* @param params.tools - Optional custom tools to add to the agent (AI SDK ToolSet)
|
|
2756
|
+
* @param params.subagents - Optional array of specialized subagent configurations for task delegation
|
|
2757
|
+
* @param params.backend - Optional backend for filesystem operations (default: StateBackend for in-memory storage)
|
|
2758
|
+
* @param params.maxSteps - Optional maximum number of steps for the agent loop (default: 100)
|
|
2759
|
+
* @param params.includeGeneralPurposeAgent - Optional flag to include general-purpose subagent (default: true)
|
|
2760
|
+
* @param params.toolResultEvictionLimit - Optional token limit before evicting large tool results to filesystem (default: disabled)
|
|
2761
|
+
* @param params.enablePromptCaching - Optional flag to enable prompt caching for improved performance (Anthropic only, default: false)
|
|
2762
|
+
* @param params.summarization - Optional summarization configuration for automatic conversation summarization
|
|
2763
|
+
* @returns A configured DeepAgent instance
|
|
2764
|
+
*
|
|
2765
|
+
* @see {@link CreateDeepAgentParams} for detailed parameter types
|
|
2766
|
+
*
|
|
2767
|
+
* @example Basic usage
|
|
2768
|
+
* ```typescript
|
|
2769
|
+
* import { createDeepAgent } from 'deepagentsdk';
|
|
2770
|
+
* import { anthropic } from '@ai-sdk/anthropic';
|
|
2771
|
+
*
|
|
2772
|
+
* const agent = createDeepAgent({
|
|
2773
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2774
|
+
* systemPrompt: 'You are a research assistant...',
|
|
2775
|
+
* });
|
|
2776
|
+
*
|
|
2777
|
+
* const result = await agent.generate({
|
|
2778
|
+
* prompt: 'Research the topic and write a report',
|
|
2779
|
+
* });
|
|
2780
|
+
* ```
|
|
2781
|
+
*
|
|
2782
|
+
* @example With custom tools
|
|
2783
|
+
* ```typescript
|
|
2784
|
+
* import { tool } from 'ai';
|
|
2785
|
+
* import { z } from 'zod';
|
|
2786
|
+
*
|
|
2787
|
+
* const customTool = tool({
|
|
2788
|
+
* description: 'Get current time',
|
|
2789
|
+
* inputSchema: z.object({}),
|
|
2790
|
+
* execute: async () => new Date().toISOString(),
|
|
2791
|
+
* });
|
|
2792
|
+
*
|
|
2793
|
+
* const agent = createDeepAgent({
|
|
2794
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2795
|
+
* tools: { get_time: customTool },
|
|
2796
|
+
* });
|
|
2797
|
+
* ```
|
|
2798
|
+
*
|
|
2799
|
+
* @example With subagents
|
|
2800
|
+
* ```typescript
|
|
2801
|
+
* const agent = createDeepAgent({
|
|
2802
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2803
|
+
* subagents: [{
|
|
2804
|
+
* name: 'research-agent',
|
|
2805
|
+
* description: 'Specialized for research tasks',
|
|
2806
|
+
* systemPrompt: 'You are a research specialist...',
|
|
2807
|
+
* }],
|
|
2808
|
+
* });
|
|
2809
|
+
* ```
|
|
2810
|
+
*
|
|
2811
|
+
* @example With StateBackend (default, explicit)
|
|
2812
|
+
* ```typescript
|
|
2813
|
+
* import { StateBackend } from 'deepagentsdk';
|
|
2814
|
+
*
|
|
2815
|
+
* const state = { todos: [], files: {} };
|
|
2816
|
+
* const agent = createDeepAgent({
|
|
2817
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2818
|
+
* backend: new StateBackend(state), // Ephemeral in-memory storage
|
|
2819
|
+
* });
|
|
2820
|
+
* ```
|
|
2821
|
+
*
|
|
2822
|
+
* @example With FilesystemBackend
|
|
2823
|
+
* ```typescript
|
|
2824
|
+
* import { FilesystemBackend } from 'deepagentsdk';
|
|
2825
|
+
*
|
|
2826
|
+
* const agent = createDeepAgent({
|
|
2827
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2828
|
+
* backend: new FilesystemBackend({ rootDir: './workspace' }), // Persist to disk
|
|
2829
|
+
* });
|
|
2830
|
+
* ```
|
|
2831
|
+
*
|
|
2832
|
+
* @example With PersistentBackend
|
|
2833
|
+
* ```typescript
|
|
2834
|
+
* import { PersistentBackend, InMemoryStore } from 'deepagentsdk';
|
|
2835
|
+
*
|
|
2836
|
+
* const store = new InMemoryStore();
|
|
2837
|
+
* const agent = createDeepAgent({
|
|
2838
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2839
|
+
* backend: new PersistentBackend({ store, namespace: 'project-1' }), // Cross-session persistence
|
|
2840
|
+
* });
|
|
2841
|
+
* ```
|
|
2842
|
+
*
|
|
2843
|
+
* @example With CompositeBackend
|
|
2844
|
+
* ```typescript
|
|
2845
|
+
* import { CompositeBackend, FilesystemBackend, StateBackend } from 'deepagentsdk';
|
|
2846
|
+
*
|
|
2847
|
+
* const state = { todos: [], files: {} };
|
|
2848
|
+
* const agent = createDeepAgent({
|
|
2849
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2850
|
+
* backend: new CompositeBackend(
|
|
2851
|
+
* new StateBackend(state),
|
|
2852
|
+
* { '/persistent/': new FilesystemBackend({ rootDir: './persistent' }) }
|
|
2853
|
+
* ), // Route files by path prefix
|
|
2854
|
+
* });
|
|
2855
|
+
* ```
|
|
2856
|
+
*
|
|
2857
|
+
* @example With middleware for logging and caching
|
|
2858
|
+
* ```typescript
|
|
2859
|
+
* import { createDeepAgent } from 'deepagentsdk';
|
|
2860
|
+
* import { anthropic } from '@ai-sdk/anthropic';
|
|
2861
|
+
*
|
|
2862
|
+
* const loggingMiddleware = {
|
|
2863
|
+
* wrapGenerate: async ({ doGenerate, params }) => {
|
|
2864
|
+
* console.log('Model called with:', params.prompt);
|
|
2865
|
+
* const result = await doGenerate();
|
|
2866
|
+
* console.log('Model returned:', result.text);
|
|
2867
|
+
* return result;
|
|
2868
|
+
* },
|
|
2869
|
+
* };
|
|
2870
|
+
*
|
|
2871
|
+
* const agent = createDeepAgent({
|
|
2872
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2873
|
+
* middleware: [loggingMiddleware],
|
|
2874
|
+
* });
|
|
2875
|
+
* ```
|
|
2876
|
+
*
|
|
2877
|
+
* @example With middleware factory for context access
|
|
2878
|
+
* ```typescript
|
|
2879
|
+
* import { FilesystemBackend } from 'deepagentsdk';
|
|
2880
|
+
*
|
|
2881
|
+
* function createContextMiddleware(backend: BackendProtocol) {
|
|
2882
|
+
* return {
|
|
2883
|
+
* wrapGenerate: async ({ doGenerate }) => {
|
|
2884
|
+
* const state = await backend.read('state');
|
|
2885
|
+
* const result = await doGenerate();
|
|
2886
|
+
* await backend.write('state', { ...state, lastCall: result });
|
|
2887
|
+
* return result;
|
|
2888
|
+
* },
|
|
2889
|
+
* };
|
|
2890
|
+
* }
|
|
2891
|
+
*
|
|
2892
|
+
* const backend = new FilesystemBackend({ rootDir: './workspace' });
|
|
2893
|
+
* const agent = createDeepAgent({
|
|
2894
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2895
|
+
* backend,
|
|
2896
|
+
* middleware: createContextMiddleware(backend),
|
|
2897
|
+
* });
|
|
2898
|
+
* ```
|
|
2899
|
+
*
|
|
2900
|
+
* @example With performance optimizations
|
|
2901
|
+
* ```typescript
|
|
2902
|
+
* const agent = createDeepAgent({
|
|
2903
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
2904
|
+
* enablePromptCaching: true,
|
|
2905
|
+
* toolResultEvictionLimit: 20000,
|
|
2906
|
+
* summarization: {
|
|
2907
|
+
* enabled: true,
|
|
2908
|
+
* tokenThreshold: 170000,
|
|
2909
|
+
* keepMessages: 6,
|
|
2910
|
+
* },
|
|
2911
|
+
* });
|
|
2912
|
+
* ```
|
|
2913
|
+
*/
|
|
2914
|
+
function createDeepAgent(params) {
|
|
2915
|
+
return new DeepAgent(params);
|
|
2916
|
+
}
|
|
2917
|
+
|
|
2918
|
+
//#endregion
|
|
2919
|
+
//#region src/backends/sandbox.ts
|
|
2920
|
+
init_errors();
|
|
2921
|
+
init_limits();
|
|
2922
|
+
/**
|
|
2923
|
+
* Encode string to base64 for safe shell transmission.
|
|
2924
|
+
*/
|
|
2925
|
+
function toBase64(str) {
|
|
2926
|
+
return Buffer.from(str, "utf-8").toString("base64");
|
|
2927
|
+
}
|
|
2928
|
+
/**
|
|
2929
|
+
* Build a Node.js script command with embedded base64 arguments.
|
|
2930
|
+
* This avoids shell argument parsing issues by embedding values directly in the script.
|
|
2931
|
+
*/
|
|
2932
|
+
function buildNodeScript(script, args) {
|
|
2933
|
+
let result = script;
|
|
2934
|
+
for (const [key, value] of Object.entries(args)) result = result.replace(new RegExp(`__${key}__`, "g"), value);
|
|
2935
|
+
return `node -e '${result}'`;
|
|
2936
|
+
}
|
|
2937
|
+
/**
|
|
2938
|
+
* Abstract base class for sandbox backends.
|
|
2939
|
+
*
|
|
2940
|
+
* Implements all file operations using shell commands via execute().
|
|
2941
|
+
* Subclasses only need to implement execute() and id.
|
|
2942
|
+
*
|
|
2943
|
+
* @example Creating a custom sandbox backend
|
|
2944
|
+
* ```typescript
|
|
2945
|
+
* class MyCloudSandbox extends BaseSandbox {
|
|
2946
|
+
* readonly id = 'my-cloud-123';
|
|
2947
|
+
*
|
|
2948
|
+
* async execute(command: string): Promise<ExecuteResponse> {
|
|
2949
|
+
* // Call your cloud provider's API
|
|
2950
|
+
* const result = await myCloudApi.runCommand(command);
|
|
2951
|
+
* return {
|
|
2952
|
+
* output: result.stdout + result.stderr,
|
|
2953
|
+
* exitCode: result.exitCode,
|
|
2954
|
+
* truncated: false,
|
|
2955
|
+
* };
|
|
2956
|
+
* }
|
|
2957
|
+
* }
|
|
2958
|
+
* ```
|
|
2959
|
+
*/
|
|
2960
|
+
var BaseSandbox = class {
|
|
2961
|
+
/**
|
|
2962
|
+
* List files and directories in a path.
|
|
2963
|
+
*/
|
|
2964
|
+
async lsInfo(path) {
|
|
2965
|
+
const pathB64 = toBase64(path);
|
|
2966
|
+
const result = await this.execute(buildNodeScript(`
|
|
2967
|
+
const fs = require("fs");
|
|
2968
|
+
const path = require("path");
|
|
2969
|
+
|
|
2970
|
+
const dirPath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
2971
|
+
|
|
2972
|
+
try {
|
|
2973
|
+
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
|
|
2974
|
+
for (const entry of entries) {
|
|
2975
|
+
const fullPath = path.join(dirPath, entry.name);
|
|
2976
|
+
try {
|
|
2977
|
+
const stat = fs.statSync(fullPath);
|
|
2978
|
+
console.log(JSON.stringify({
|
|
2979
|
+
path: entry.name,
|
|
2980
|
+
is_dir: entry.isDirectory(),
|
|
2981
|
+
size: stat.size,
|
|
2982
|
+
modified_at: stat.mtime.toISOString()
|
|
2983
|
+
}));
|
|
2984
|
+
} catch (e) {}
|
|
2985
|
+
}
|
|
2986
|
+
} catch (e) {}
|
|
2987
|
+
`, { PATH: pathB64 }));
|
|
2988
|
+
const infos = [];
|
|
2989
|
+
for (const line of result.output.trim().split("\n")) {
|
|
2990
|
+
if (!line) continue;
|
|
2991
|
+
try {
|
|
2992
|
+
const data = JSON.parse(line);
|
|
2993
|
+
infos.push({
|
|
2994
|
+
path: data.path,
|
|
2995
|
+
is_dir: data.is_dir,
|
|
2996
|
+
size: data.size,
|
|
2997
|
+
modified_at: data.modified_at
|
|
2998
|
+
});
|
|
2999
|
+
} catch {}
|
|
3000
|
+
}
|
|
3001
|
+
return infos;
|
|
3002
|
+
}
|
|
3003
|
+
/**
|
|
3004
|
+
* Read file content with line numbers.
|
|
3005
|
+
*/
|
|
3006
|
+
async read(filePath, offset = 0, limit = DEFAULT_READ_LIMIT) {
|
|
3007
|
+
const pathB64 = toBase64(filePath);
|
|
3008
|
+
const script = `
|
|
3009
|
+
const fs = require("fs");
|
|
3010
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3011
|
+
const offset = __OFFSET__;
|
|
3012
|
+
const limit = __LIMIT__;
|
|
3013
|
+
|
|
3014
|
+
if (!fs.existsSync(filePath)) {
|
|
3015
|
+
console.error("Error: File not found");
|
|
3016
|
+
process.exit(1);
|
|
3017
|
+
}
|
|
3018
|
+
|
|
3019
|
+
const stat = fs.statSync(filePath);
|
|
3020
|
+
if (stat.size === 0) {
|
|
3021
|
+
console.log("${SYSTEM_REMINDER_FILE_EMPTY}");
|
|
3022
|
+
process.exit(0);
|
|
3023
|
+
}
|
|
3024
|
+
|
|
3025
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
3026
|
+
const lines = content.split("\\n");
|
|
3027
|
+
const selected = lines.slice(offset, offset + limit);
|
|
3028
|
+
|
|
3029
|
+
for (let i = 0; i < selected.length; i++) {
|
|
3030
|
+
const lineNum = (offset + i + 1).toString().padStart(6, " ");
|
|
3031
|
+
console.log(lineNum + "\\t" + selected[i]);
|
|
3032
|
+
}
|
|
3033
|
+
`;
|
|
3034
|
+
const result = await this.execute(buildNodeScript(script, {
|
|
3035
|
+
PATH: pathB64,
|
|
3036
|
+
OFFSET: String(offset),
|
|
3037
|
+
LIMIT: String(limit)
|
|
3038
|
+
}));
|
|
3039
|
+
if (result.exitCode !== 0) {
|
|
3040
|
+
if (result.output.includes("Error: File not found")) return FILE_NOT_FOUND(filePath);
|
|
3041
|
+
return result.output.trim();
|
|
3042
|
+
}
|
|
3043
|
+
return result.output.trimEnd();
|
|
3044
|
+
}
|
|
3045
|
+
/**
|
|
3046
|
+
* Read raw file data.
|
|
3047
|
+
*/
|
|
3048
|
+
async readRaw(filePath) {
|
|
3049
|
+
const pathB64 = toBase64(filePath);
|
|
3050
|
+
const result = await this.execute(buildNodeScript(`
|
|
3051
|
+
const fs = require("fs");
|
|
3052
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3053
|
+
|
|
3054
|
+
if (!fs.existsSync(filePath)) {
|
|
3055
|
+
console.error("Error: File not found");
|
|
3056
|
+
process.exit(1);
|
|
3057
|
+
}
|
|
3058
|
+
|
|
3059
|
+
const stat = fs.statSync(filePath);
|
|
3060
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
3061
|
+
|
|
3062
|
+
console.log(JSON.stringify({
|
|
3063
|
+
content: content.split("\\n"),
|
|
3064
|
+
created_at: stat.birthtime.toISOString(),
|
|
3065
|
+
modified_at: stat.mtime.toISOString()
|
|
3066
|
+
}));
|
|
3067
|
+
`, { PATH: pathB64 }));
|
|
3068
|
+
if (result.exitCode !== 0) throw new Error(`File '${filePath}' not found`);
|
|
3069
|
+
try {
|
|
3070
|
+
const data = JSON.parse(result.output.trim());
|
|
3071
|
+
return {
|
|
3072
|
+
content: data.content,
|
|
3073
|
+
created_at: data.created_at,
|
|
3074
|
+
modified_at: data.modified_at
|
|
3075
|
+
};
|
|
3076
|
+
} catch {
|
|
3077
|
+
throw new Error(`Failed to parse file data for '${filePath}'`);
|
|
3078
|
+
}
|
|
3079
|
+
}
|
|
3080
|
+
/**
|
|
3081
|
+
* Write content to a new file.
|
|
3082
|
+
*/
|
|
3083
|
+
async write(filePath, content) {
|
|
3084
|
+
const pathB64 = toBase64(filePath);
|
|
3085
|
+
const contentB64 = toBase64(content);
|
|
3086
|
+
const result = await this.execute(buildNodeScript(`
|
|
3087
|
+
const fs = require("fs");
|
|
3088
|
+
const path = require("path");
|
|
3089
|
+
|
|
3090
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3091
|
+
const content = Buffer.from("__CONTENT__", "base64").toString("utf-8");
|
|
3092
|
+
|
|
3093
|
+
if (fs.existsSync(filePath)) {
|
|
3094
|
+
console.error("Error: File already exists");
|
|
3095
|
+
process.exit(1);
|
|
3096
|
+
}
|
|
3097
|
+
|
|
3098
|
+
const dir = path.dirname(filePath);
|
|
3099
|
+
if (dir && dir !== ".") {
|
|
3100
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
3101
|
+
}
|
|
3102
|
+
|
|
3103
|
+
fs.writeFileSync(filePath, content, "utf-8");
|
|
3104
|
+
`, {
|
|
3105
|
+
PATH: pathB64,
|
|
3106
|
+
CONTENT: contentB64
|
|
3107
|
+
}));
|
|
3108
|
+
if (result.exitCode !== 0) {
|
|
3109
|
+
if (result.output.includes("already exists")) return {
|
|
3110
|
+
success: false,
|
|
3111
|
+
error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.`
|
|
3112
|
+
};
|
|
3113
|
+
return {
|
|
3114
|
+
success: false,
|
|
3115
|
+
error: result.output.trim() || `Failed to write '${filePath}'`
|
|
3116
|
+
};
|
|
3117
|
+
}
|
|
3118
|
+
return {
|
|
3119
|
+
success: true,
|
|
3120
|
+
path: filePath
|
|
3121
|
+
};
|
|
3122
|
+
}
|
|
3123
|
+
/**
|
|
3124
|
+
* Edit a file by replacing string occurrences.
|
|
3125
|
+
*/
|
|
3126
|
+
async edit(filePath, oldString, newString, replaceAll = false) {
|
|
3127
|
+
const pathB64 = toBase64(filePath);
|
|
3128
|
+
const oldB64 = toBase64(oldString);
|
|
3129
|
+
const newB64 = toBase64(newString);
|
|
3130
|
+
const result = await this.execute(buildNodeScript(`
|
|
3131
|
+
const fs = require("fs");
|
|
3132
|
+
|
|
3133
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3134
|
+
const oldStr = Buffer.from("__OLD__", "base64").toString("utf-8");
|
|
3135
|
+
const newStr = Buffer.from("__NEW__", "base64").toString("utf-8");
|
|
3136
|
+
const replaceAll = __REPLACE_ALL__;
|
|
3137
|
+
|
|
3138
|
+
if (!fs.existsSync(filePath)) {
|
|
3139
|
+
console.error("Error: File not found");
|
|
3140
|
+
process.exit(1);
|
|
3141
|
+
}
|
|
3142
|
+
|
|
3143
|
+
let content = fs.readFileSync(filePath, "utf-8");
|
|
3144
|
+
const count = content.split(oldStr).length - 1;
|
|
3145
|
+
|
|
3146
|
+
if (count === 0) {
|
|
3147
|
+
process.exit(2);
|
|
3148
|
+
}
|
|
3149
|
+
if (count > 1 && !replaceAll) {
|
|
3150
|
+
process.exit(3);
|
|
3151
|
+
}
|
|
3152
|
+
|
|
3153
|
+
if (replaceAll) {
|
|
3154
|
+
content = content.split(oldStr).join(newStr);
|
|
3155
|
+
} else {
|
|
3156
|
+
content = content.replace(oldStr, newStr);
|
|
3157
|
+
}
|
|
3158
|
+
|
|
3159
|
+
fs.writeFileSync(filePath, content, "utf-8");
|
|
3160
|
+
console.log(count);
|
|
3161
|
+
`, {
|
|
3162
|
+
PATH: pathB64,
|
|
3163
|
+
OLD: oldB64,
|
|
3164
|
+
NEW: newB64,
|
|
3165
|
+
REPLACE_ALL: String(replaceAll)
|
|
3166
|
+
}));
|
|
3167
|
+
if (result.exitCode === 1) return {
|
|
3168
|
+
success: false,
|
|
3169
|
+
error: FILE_NOT_FOUND(filePath)
|
|
3170
|
+
};
|
|
3171
|
+
if (result.exitCode === 2) return {
|
|
3172
|
+
success: false,
|
|
3173
|
+
error: STRING_NOT_FOUND(filePath, oldString)
|
|
3174
|
+
};
|
|
3175
|
+
if (result.exitCode === 3) return {
|
|
3176
|
+
success: false,
|
|
3177
|
+
error: `Error: String '${oldString}' appears multiple times. Use replaceAll=true to replace all occurrences.`
|
|
3178
|
+
};
|
|
3179
|
+
return {
|
|
3180
|
+
success: true,
|
|
3181
|
+
path: filePath,
|
|
3182
|
+
occurrences: parseInt(result.output.trim(), 10) || 1
|
|
3183
|
+
};
|
|
3184
|
+
}
|
|
3185
|
+
/**
|
|
3186
|
+
* Search for pattern in files.
|
|
3187
|
+
*/
|
|
3188
|
+
async grepRaw(pattern, path = "/", glob$1 = null) {
|
|
3189
|
+
const patternB64 = toBase64(pattern);
|
|
3190
|
+
const pathB64 = toBase64(path);
|
|
3191
|
+
const globB64 = glob$1 ? toBase64(glob$1) : toBase64("**/*");
|
|
3192
|
+
const result = await this.execute(buildNodeScript(`
|
|
3193
|
+
const fs = require("fs");
|
|
3194
|
+
const path = require("path");
|
|
3195
|
+
|
|
3196
|
+
const pattern = Buffer.from("__PATTERN__", "base64").toString("utf-8");
|
|
3197
|
+
const basePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3198
|
+
const fileGlob = Buffer.from("__GLOB__", "base64").toString("utf-8");
|
|
3199
|
+
|
|
3200
|
+
function walkDir(dir, baseDir) {
|
|
3201
|
+
const results = [];
|
|
3202
|
+
try {
|
|
3203
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
3204
|
+
for (const entry of entries) {
|
|
3205
|
+
const fullPath = path.join(dir, entry.name);
|
|
3206
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
3207
|
+
|
|
3208
|
+
if (entry.isDirectory()) {
|
|
3209
|
+
results.push(...walkDir(fullPath, baseDir));
|
|
3210
|
+
} else {
|
|
3211
|
+
results.push(relativePath);
|
|
3212
|
+
}
|
|
3213
|
+
}
|
|
3214
|
+
} catch (e) {}
|
|
3215
|
+
return results;
|
|
3216
|
+
}
|
|
3217
|
+
|
|
3218
|
+
function matchGlob(filepath, pattern) {
|
|
3219
|
+
if (!pattern || pattern === "**/*") return true;
|
|
3220
|
+
const regex = pattern
|
|
3221
|
+
.replace(/\\./g, "\\\\.")
|
|
3222
|
+
.replace(/\\*\\*/g, "<<<GLOBSTAR>>>")
|
|
3223
|
+
.replace(/\\*/g, "[^/]*")
|
|
3224
|
+
.replace(/<<<GLOBSTAR>>>/g, ".*")
|
|
3225
|
+
.replace(/\\?/g, ".");
|
|
3226
|
+
return new RegExp("^" + regex + "$").test(filepath);
|
|
3227
|
+
}
|
|
3228
|
+
|
|
3229
|
+
const allFiles = walkDir(basePath, basePath);
|
|
3230
|
+
const files = allFiles.filter(f => matchGlob(f, fileGlob)).sort();
|
|
3231
|
+
|
|
3232
|
+
for (const file of files) {
|
|
3233
|
+
try {
|
|
3234
|
+
const fullPath = path.join(basePath, file);
|
|
3235
|
+
const content = fs.readFileSync(fullPath, "utf-8");
|
|
3236
|
+
const lines = content.split("\\n");
|
|
3237
|
+
|
|
3238
|
+
for (let i = 0; i < lines.length; i++) {
|
|
3239
|
+
if (lines[i].includes(pattern)) {
|
|
3240
|
+
console.log(JSON.stringify({
|
|
3241
|
+
path: file,
|
|
3242
|
+
line: i + 1,
|
|
3243
|
+
text: lines[i]
|
|
3244
|
+
}));
|
|
3245
|
+
}
|
|
3246
|
+
}
|
|
3247
|
+
} catch (e) {}
|
|
3248
|
+
}
|
|
3249
|
+
`, {
|
|
3250
|
+
PATTERN: patternB64,
|
|
3251
|
+
PATH: pathB64,
|
|
3252
|
+
GLOB: globB64
|
|
3253
|
+
}));
|
|
3254
|
+
const matches = [];
|
|
3255
|
+
for (const line of result.output.trim().split("\n")) {
|
|
3256
|
+
if (!line) continue;
|
|
3257
|
+
try {
|
|
3258
|
+
const data = JSON.parse(line);
|
|
3259
|
+
matches.push({
|
|
3260
|
+
path: data.path,
|
|
3261
|
+
line: data.line,
|
|
3262
|
+
text: data.text
|
|
3263
|
+
});
|
|
3264
|
+
} catch {}
|
|
3265
|
+
}
|
|
3266
|
+
return matches;
|
|
3267
|
+
}
|
|
3268
|
+
/**
|
|
3269
|
+
* Find files matching glob pattern.
|
|
3270
|
+
*/
|
|
3271
|
+
async globInfo(pattern, path = "/") {
|
|
3272
|
+
const pathB64 = toBase64(path);
|
|
3273
|
+
const patternB64 = toBase64(pattern);
|
|
3274
|
+
const result = await this.execute(buildNodeScript(`
|
|
3275
|
+
const fs = require("fs");
|
|
3276
|
+
const path = require("path");
|
|
3277
|
+
|
|
3278
|
+
const basePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3279
|
+
const pattern = Buffer.from("__PATTERN__", "base64").toString("utf-8");
|
|
3280
|
+
|
|
3281
|
+
function walkDir(dir, baseDir) {
|
|
3282
|
+
const results = [];
|
|
3283
|
+
try {
|
|
3284
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
3285
|
+
for (const entry of entries) {
|
|
3286
|
+
const fullPath = path.join(dir, entry.name);
|
|
3287
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
3288
|
+
|
|
3289
|
+
if (entry.isDirectory()) {
|
|
3290
|
+
results.push(...walkDir(fullPath, baseDir));
|
|
3291
|
+
} else {
|
|
3292
|
+
results.push(relativePath);
|
|
3293
|
+
}
|
|
3294
|
+
}
|
|
3295
|
+
} catch (e) {}
|
|
3296
|
+
return results;
|
|
3297
|
+
}
|
|
3298
|
+
|
|
3299
|
+
function matchGlob(filepath, pattern) {
|
|
3300
|
+
const regex = pattern
|
|
3301
|
+
.replace(/\\./g, "\\\\.")
|
|
3302
|
+
.replace(/\\*\\*/g, "<<<GLOBSTAR>>>")
|
|
3303
|
+
.replace(/\\*/g, "[^/]*")
|
|
3304
|
+
.replace(/<<<GLOBSTAR>>>/g, ".*")
|
|
3305
|
+
.replace(/\\?/g, ".");
|
|
3306
|
+
return new RegExp("^" + regex + "$").test(filepath);
|
|
3307
|
+
}
|
|
3308
|
+
|
|
3309
|
+
const allFiles = walkDir(basePath, basePath);
|
|
3310
|
+
const matches = allFiles.filter(f => matchGlob(f, pattern)).sort();
|
|
3311
|
+
|
|
3312
|
+
for (const m of matches) {
|
|
3313
|
+
try {
|
|
3314
|
+
const fullPath = path.join(basePath, m);
|
|
3315
|
+
const stat = fs.statSync(fullPath);
|
|
3316
|
+
console.log(JSON.stringify({
|
|
3317
|
+
path: m,
|
|
3318
|
+
is_dir: stat.isDirectory(),
|
|
3319
|
+
size: stat.size,
|
|
3320
|
+
modified_at: stat.mtime.toISOString()
|
|
3321
|
+
}));
|
|
3322
|
+
} catch (e) {}
|
|
3323
|
+
}
|
|
3324
|
+
`, {
|
|
3325
|
+
PATH: pathB64,
|
|
3326
|
+
PATTERN: patternB64
|
|
3327
|
+
}));
|
|
3328
|
+
const infos = [];
|
|
3329
|
+
for (const line of result.output.trim().split("\n")) {
|
|
3330
|
+
if (!line) continue;
|
|
3331
|
+
try {
|
|
3332
|
+
const data = JSON.parse(line);
|
|
3333
|
+
infos.push({
|
|
3334
|
+
path: data.path,
|
|
3335
|
+
is_dir: data.is_dir,
|
|
3336
|
+
size: data.size,
|
|
3337
|
+
modified_at: data.modified_at
|
|
3338
|
+
});
|
|
3339
|
+
} catch {}
|
|
3340
|
+
}
|
|
3341
|
+
return infos;
|
|
3342
|
+
}
|
|
3343
|
+
};
|
|
3344
|
+
|
|
3345
|
+
//#endregion
|
|
3346
|
+
//#region src/backends/local-sandbox.ts
|
|
3347
|
+
/**
|
|
3348
|
+
* LocalSandbox: Execute commands locally using child_process.
|
|
3349
|
+
*
|
|
3350
|
+
* Useful for local development and testing without cloud sandboxes.
|
|
3351
|
+
* All file operations are inherited from BaseSandbox and executed
|
|
3352
|
+
* via shell commands in the local filesystem.
|
|
3353
|
+
*/
|
|
3354
|
+
/**
|
|
3355
|
+
* Local sandbox that executes commands using Node.js child_process.
|
|
3356
|
+
*
|
|
3357
|
+
* All commands are executed in a bash shell with the specified working directory.
|
|
3358
|
+
* Inherits all file operations (read, write, edit, ls, grep, glob) from BaseSandbox.
|
|
3359
|
+
*
|
|
3360
|
+
* @example Basic usage
|
|
3361
|
+
* ```typescript
|
|
3362
|
+
* import { LocalSandbox } from 'deepagentsdk';
|
|
3363
|
+
*
|
|
3364
|
+
* const sandbox = new LocalSandbox({ cwd: './workspace' });
|
|
3365
|
+
*
|
|
3366
|
+
* // Execute commands
|
|
3367
|
+
* const result = await sandbox.execute('ls -la');
|
|
3368
|
+
* console.log(result.output);
|
|
3369
|
+
*
|
|
3370
|
+
* // File operations
|
|
3371
|
+
* await sandbox.write('./src/index.ts', 'console.log("hello")');
|
|
3372
|
+
* const content = await sandbox.read('./src/index.ts');
|
|
3373
|
+
* ```
|
|
3374
|
+
*
|
|
3375
|
+
* @example With timeout and environment
|
|
3376
|
+
* ```typescript
|
|
3377
|
+
* const sandbox = new LocalSandbox({
|
|
3378
|
+
* cwd: './workspace',
|
|
3379
|
+
* timeout: 60000, // 60 seconds
|
|
3380
|
+
* env: {
|
|
3381
|
+
* NODE_ENV: 'development',
|
|
3382
|
+
* DEBUG: '*',
|
|
3383
|
+
* },
|
|
3384
|
+
* });
|
|
3385
|
+
* ```
|
|
3386
|
+
*
|
|
3387
|
+
* @example Error handling
|
|
3388
|
+
* ```typescript
|
|
3389
|
+
* const result = await sandbox.execute('npm test');
|
|
3390
|
+
* if (result.exitCode !== 0) {
|
|
3391
|
+
* console.error('Tests failed:', result.output);
|
|
3392
|
+
* }
|
|
3393
|
+
* ```
|
|
3394
|
+
*/
|
|
3395
|
+
var LocalSandbox = class extends BaseSandbox {
|
|
3396
|
+
cwd;
|
|
3397
|
+
timeout;
|
|
3398
|
+
env;
|
|
3399
|
+
maxOutputSize;
|
|
3400
|
+
_id;
|
|
3401
|
+
/**
|
|
3402
|
+
* Create a new LocalSandbox instance.
|
|
3403
|
+
*
|
|
3404
|
+
* @param options - Configuration options for the sandbox
|
|
3405
|
+
*/
|
|
3406
|
+
constructor(options = {}) {
|
|
3407
|
+
super();
|
|
3408
|
+
this.cwd = options.cwd || process.cwd();
|
|
3409
|
+
this.timeout = options.timeout || 3e4;
|
|
3410
|
+
this.env = options.env || {};
|
|
3411
|
+
this.maxOutputSize = options.maxOutputSize || 1024 * 1024;
|
|
3412
|
+
this._id = `local-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
3413
|
+
}
|
|
3414
|
+
/**
|
|
3415
|
+
* Unique identifier for this sandbox instance.
|
|
3416
|
+
* Format: `local-{timestamp}-{random}`
|
|
3417
|
+
*/
|
|
3418
|
+
get id() {
|
|
3419
|
+
return this._id;
|
|
3420
|
+
}
|
|
3421
|
+
/**
|
|
3422
|
+
* Execute a shell command in the local filesystem.
|
|
3423
|
+
*
|
|
3424
|
+
* Commands are executed using bash with the configured working directory
|
|
3425
|
+
* and environment variables. Output is captured from both stdout and stderr.
|
|
3426
|
+
*
|
|
3427
|
+
* @param command - Shell command to execute
|
|
3428
|
+
* @returns ExecuteResponse with output, exit code, and truncation status
|
|
3429
|
+
*
|
|
3430
|
+
* @example
|
|
3431
|
+
* ```typescript
|
|
3432
|
+
* const result = await sandbox.execute('echo "Hello" && ls -la');
|
|
3433
|
+
* console.log(result.output);
|
|
3434
|
+
* console.log('Exit code:', result.exitCode);
|
|
3435
|
+
* ```
|
|
3436
|
+
*/
|
|
3437
|
+
async execute(command) {
|
|
3438
|
+
return new Promise((resolve) => {
|
|
3439
|
+
const child = spawn("bash", ["-c", command], {
|
|
3440
|
+
cwd: this.cwd,
|
|
3441
|
+
env: {
|
|
3442
|
+
...process.env,
|
|
3443
|
+
...this.env
|
|
3444
|
+
},
|
|
3445
|
+
timeout: this.timeout
|
|
3446
|
+
});
|
|
3447
|
+
let output = "";
|
|
3448
|
+
let truncated = false;
|
|
3449
|
+
child.stdout.on("data", (data) => {
|
|
3450
|
+
if (output.length < this.maxOutputSize) output += data.toString();
|
|
3451
|
+
else truncated = true;
|
|
3452
|
+
});
|
|
3453
|
+
child.stderr.on("data", (data) => {
|
|
3454
|
+
if (output.length < this.maxOutputSize) output += data.toString();
|
|
3455
|
+
else truncated = true;
|
|
3456
|
+
});
|
|
3457
|
+
child.on("close", (code) => {
|
|
3458
|
+
resolve({
|
|
3459
|
+
output,
|
|
3460
|
+
exitCode: code,
|
|
3461
|
+
truncated
|
|
3462
|
+
});
|
|
3463
|
+
});
|
|
3464
|
+
child.on("error", (err) => {
|
|
3465
|
+
resolve({
|
|
3466
|
+
output: `Error: ${err.message}`,
|
|
3467
|
+
exitCode: 1,
|
|
3468
|
+
truncated: false
|
|
3469
|
+
});
|
|
3470
|
+
});
|
|
3471
|
+
});
|
|
3472
|
+
}
|
|
3473
|
+
};
|
|
3474
|
+
|
|
3475
|
+
//#endregion
|
|
3476
|
+
//#region src/utils/model-parser.ts
|
|
3477
|
+
/**
|
|
3478
|
+
* Utility to parse model strings into LanguageModel instances.
|
|
3479
|
+
* Provides backward compatibility for CLI and other string-based model specifications.
|
|
3480
|
+
*/
|
|
3481
|
+
/**
|
|
3482
|
+
* Parse a model string into a LanguageModel instance.
|
|
3483
|
+
*
|
|
3484
|
+
* Supports formats like:
|
|
3485
|
+
* - "anthropic/claude-sonnet-4-20250514"
|
|
3486
|
+
* - "openai/gpt-4o"
|
|
3487
|
+
* - "claude-sonnet-4-20250514" (defaults to Anthropic)
|
|
3488
|
+
*
|
|
3489
|
+
* @param modelString - The model string to parse
|
|
3490
|
+
* @returns A LanguageModel instance
|
|
3491
|
+
*
|
|
3492
|
+
* @example
|
|
3493
|
+
* ```typescript
|
|
3494
|
+
* const model = parseModelString("anthropic/claude-sonnet-4-20250514");
|
|
3495
|
+
* const agent = createDeepAgent({ model });
|
|
3496
|
+
* ```
|
|
3497
|
+
*/
|
|
3498
|
+
function parseModelString(modelString) {
|
|
3499
|
+
const [provider, modelName] = modelString.split("/");
|
|
3500
|
+
if (provider === "anthropic") return anthropic(modelName || "claude-sonnet-4-20250514");
|
|
3501
|
+
else if (provider === "openai") return openai(modelName || "gpt-5-mini");
|
|
3502
|
+
return anthropic(modelString);
|
|
3503
|
+
}
|
|
3504
|
+
|
|
3505
|
+
//#endregion
|
|
3506
|
+
//#region src/checkpointer/file-saver.ts
|
|
3507
|
+
/**
|
|
3508
|
+
* File-based checkpoint saver for local development.
|
|
3509
|
+
*/
|
|
3510
|
+
/**
|
|
3511
|
+
* File-based checkpoint saver.
|
|
3512
|
+
*
|
|
3513
|
+
* Stores checkpoints as JSON files in a directory. Each thread gets
|
|
3514
|
+
* its own file named `{threadId}.json`.
|
|
3515
|
+
*
|
|
3516
|
+
* @example
|
|
3517
|
+
* ```typescript
|
|
3518
|
+
* const saver = new FileSaver({ dir: './.checkpoints' });
|
|
3519
|
+
* const agent = createDeepAgent({
|
|
3520
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
3521
|
+
* checkpointer: saver,
|
|
3522
|
+
* });
|
|
3523
|
+
* ```
|
|
3524
|
+
*/
|
|
3525
|
+
var FileSaver = class {
|
|
3526
|
+
dir;
|
|
3527
|
+
constructor(options) {
|
|
3528
|
+
this.dir = options.dir;
|
|
3529
|
+
if (!existsSync(this.dir)) mkdirSync(this.dir, { recursive: true });
|
|
3530
|
+
}
|
|
3531
|
+
getFilePath(threadId) {
|
|
3532
|
+
const safeId = threadId.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
3533
|
+
return join(this.dir, `${safeId}.json`);
|
|
3534
|
+
}
|
|
3535
|
+
async save(checkpoint) {
|
|
3536
|
+
const filePath = this.getFilePath(checkpoint.threadId);
|
|
3537
|
+
const data = {
|
|
3538
|
+
...checkpoint,
|
|
3539
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3540
|
+
};
|
|
3541
|
+
writeFileSync(filePath, JSON.stringify(data, null, 2), "utf-8");
|
|
3542
|
+
}
|
|
3543
|
+
async load(threadId) {
|
|
3544
|
+
const filePath = this.getFilePath(threadId);
|
|
3545
|
+
if (!existsSync(filePath)) return;
|
|
3546
|
+
try {
|
|
3547
|
+
const content = readFileSync(filePath, "utf-8");
|
|
3548
|
+
return JSON.parse(content);
|
|
3549
|
+
} catch {
|
|
3550
|
+
return;
|
|
3551
|
+
}
|
|
3552
|
+
}
|
|
3553
|
+
async list() {
|
|
3554
|
+
if (!existsSync(this.dir)) return [];
|
|
3555
|
+
return readdirSync(this.dir).filter((f) => f.endsWith(".json")).map((f) => f.replace(".json", ""));
|
|
3556
|
+
}
|
|
3557
|
+
async delete(threadId) {
|
|
3558
|
+
const filePath = this.getFilePath(threadId);
|
|
3559
|
+
if (existsSync(filePath)) unlinkSync(filePath);
|
|
3560
|
+
}
|
|
3561
|
+
async exists(threadId) {
|
|
3562
|
+
return existsSync(this.getFilePath(threadId));
|
|
3563
|
+
}
|
|
3564
|
+
};
|
|
3565
|
+
|
|
3566
|
+
//#endregion
|
|
3567
|
+
export { grepMatchesFromFiles as $, createGrepTool as A, DEFAULT_EVICTION_TOKEN_LIMIT as B, htmlToMarkdown as C, createEditFileTool as D, web_search as E, glob as F, shouldEvict as G, estimateTokens as H, grep as I, createFileData as J, StateBackend as K, ls as L, createReadFileTool as M, createWriteFileTool as N, createFilesystemTools as O, edit_file as P, globSearchFiles as Q, read_file as R, fetch_url as S, init_limits as St, init_web as T, evictToolResult as U, createToolResultWrapper as V, init_eviction as W, formatContentWithLineNumbers as X, fileDataToString as Y, formatReadResponse as Z, execute as _, DEFAULT_EVICTION_TOKEN_LIMIT$1 as _t, DeepAgent as a, createTodosTool as at, createWebSearchTool as b, DEFAULT_SUMMARIZATION_THRESHOLD$1 as bt, DEFAULT_SUMMARIZATION_THRESHOLD as c, DEFAULT_GENERAL_PURPOSE_DESCRIPTION as ct, summarizeIfNeeded as d, FILESYSTEM_SYSTEM_PROMPT as dt, performStringReplacement as et, hasDanglingToolCalls as f, TASK_SYSTEM_PROMPT as ft, createExecuteToolFromBackend as g, CONTEXT_WINDOW as gt, createExecuteTool as h, isSandboxBackend as ht, BaseSandbox as i, init_errors as it, createLsTool as j, createGlobTool as k, estimateMessagesTokens as l, DEFAULT_SUBAGENT_PROMPT as lt, createSubagentTool as m, getTaskToolDescription as mt, parseModelString as n, FILE_ALREADY_EXISTS as nt, createDeepAgent as o, write_todos as ot, patchToolCalls as p, TODO_SYSTEM_PROMPT as pt, checkEmptyContent as q, LocalSandbox as r, FILE_NOT_FOUND as rt, DEFAULT_KEEP_MESSAGES as s, BASE_PROMPT as st, FileSaver as t, updateFileData as tt, needsSummarization as u, EXECUTE_SYSTEM_PROMPT as ut, createFetchUrlTool as v, DEFAULT_KEEP_MESSAGES$1 as vt, http_request as w, createWebTools as x, MAX_FILE_SIZE_MB as xt, createHttpRequestTool as y, DEFAULT_READ_LIMIT as yt, write_file as z };
|
|
3568
|
+
//# sourceMappingURL=file-saver-Hj5so3dV.mjs.map
|