deepagentsdk 0.12.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/elements/index.cjs +478 -288
- package/dist/adapters/elements/index.cjs.map +1 -1
- package/dist/adapters/elements/index.d.cts +107 -172
- package/dist/adapters/elements/index.d.mts +107 -172
- package/dist/adapters/elements/index.mjs +471 -284
- package/dist/adapters/elements/index.mjs.map +1 -1
- package/dist/{types-4g9UvXal.d.mts → agent-D0bKkNI-.d.mts} +352 -3
- package/dist/{types-IulnvhFg.d.cts → agent-DwAj5emJ.d.cts} +352 -3
- package/dist/{chunk-CbDLau6x.cjs → chunk-C5azi7Hr.cjs} +33 -0
- package/dist/cli/index.cjs +12 -12
- package/dist/cli/index.cjs.map +1 -1
- package/dist/cli/index.mjs +2 -2
- package/dist/cli/index.mjs.map +1 -1
- package/dist/{agent-Cuks-Idh.cjs → file-saver-BYPKakT4.cjs} +799 -205
- package/dist/file-saver-BYPKakT4.cjs.map +1 -0
- package/dist/{agent-CrH-He58.mjs → file-saver-Hj5so3dV.mjs} +793 -199
- package/dist/file-saver-Hj5so3dV.mjs.map +1 -0
- package/dist/index.cjs +83 -73
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +5 -353
- package/dist/index.d.mts +5 -353
- package/dist/index.mjs +13 -3
- package/dist/index.mjs.map +1 -1
- package/dist/{load-B6CA5js_.mjs → load-BBYEnMwz.mjs} +1 -1
- package/dist/{load-B6CA5js_.mjs.map → load-BBYEnMwz.mjs.map} +1 -1
- package/dist/{load-94gjHorc.mjs → load-BDxe6Cet.mjs} +1 -1
- package/dist/{load-79a2H4m0.cjs → load-BrRAKlO6.cjs} +2 -2
- package/dist/{load-79a2H4m0.cjs.map → load-BrRAKlO6.cjs.map} +1 -1
- package/dist/load-DqllBbDc.cjs +4 -0
- package/package.json +1 -1
- package/dist/agent-CrH-He58.mjs.map +0 -1
- package/dist/agent-Cuks-Idh.cjs.map +0 -1
- package/dist/file-saver-BJCqMIb5.mjs +0 -655
- package/dist/file-saver-BJCqMIb5.mjs.map +0 -1
- package/dist/file-saver-C6O2LAvg.cjs +0 -679
- package/dist/file-saver-C6O2LAvg.cjs.map +0 -1
- package/dist/load-C2qVmZMp.cjs +0 -3
|
@@ -6,139 +6,64 @@ import { tavily } from "@tavily/core";
|
|
|
6
6
|
import TurndownService from "turndown";
|
|
7
7
|
import { Readability } from "@mozilla/readability";
|
|
8
8
|
import { JSDOM } from "jsdom";
|
|
9
|
+
import { spawn } from "child_process";
|
|
10
|
+
import { anthropic } from "@ai-sdk/anthropic";
|
|
11
|
+
import { openai } from "@ai-sdk/openai";
|
|
12
|
+
import { existsSync, mkdirSync, readFileSync, readdirSync, unlinkSync, writeFileSync } from "node:fs";
|
|
13
|
+
import { join } from "node:path";
|
|
14
|
+
|
|
15
|
+
//#region rolldown:runtime
|
|
16
|
+
var __defProp = Object.defineProperty;
|
|
17
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
18
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
19
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
20
|
+
var __esmMin = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
|
|
21
|
+
var __exportAll = (all, symbols) => {
|
|
22
|
+
let target = {};
|
|
23
|
+
for (var name in all) {
|
|
24
|
+
__defProp(target, name, {
|
|
25
|
+
get: all[name],
|
|
26
|
+
enumerable: true
|
|
27
|
+
});
|
|
28
|
+
}
|
|
29
|
+
if (symbols) {
|
|
30
|
+
__defProp(target, Symbol.toStringTag, { value: "Module" });
|
|
31
|
+
}
|
|
32
|
+
return target;
|
|
33
|
+
};
|
|
34
|
+
var __copyProps = (to, from, except, desc) => {
|
|
35
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
36
|
+
for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
37
|
+
key = keys[i];
|
|
38
|
+
if (!__hasOwnProp.call(to, key) && key !== except) {
|
|
39
|
+
__defProp(to, key, {
|
|
40
|
+
get: ((k) => from[k]).bind(null, key),
|
|
41
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return to;
|
|
47
|
+
};
|
|
48
|
+
var __toCommonJS = (mod) => __hasOwnProp.call(mod, "module.exports") ? mod["module.exports"] : __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
9
49
|
|
|
50
|
+
//#endregion
|
|
10
51
|
//#region src/constants/limits.ts
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
*
|
|
27
|
-
* @default 20000
|
|
28
|
-
* @see {@link ../utils/eviction | evictToolResult}
|
|
29
|
-
*/
|
|
30
|
-
const DEFAULT_EVICTION_TOKEN_LIMIT$1 = 2e4;
|
|
31
|
-
/**
|
|
32
|
-
* Default threshold for message summarization.
|
|
33
|
-
*
|
|
34
|
-
* When the estimated token count of messages exceeds this threshold, the system
|
|
35
|
-
* automatically summarizes older messages to stay within context limits. This
|
|
36
|
-
* helps maintain conversation continuity while reducing token usage.
|
|
37
|
-
*
|
|
38
|
-
* @default 170000
|
|
39
|
-
* @see {@link ../utils/summarization | summarizeIfNeeded}
|
|
40
|
-
*/
|
|
41
|
-
const DEFAULT_SUMMARIZATION_THRESHOLD$1 = 17e4;
|
|
42
|
-
/**
|
|
43
|
-
* Maximum context window size for Claude models.
|
|
44
|
-
*
|
|
45
|
-
* This represents the maximum number of tokens that can be processed in a single
|
|
46
|
-
* conversation. Used for calculating token usage percentages and determining when
|
|
47
|
-
* summarization is needed.
|
|
48
|
-
*
|
|
49
|
-
* @default 200000
|
|
50
|
-
* @see {@link ../utils/summarization | estimateMessagesTokens}
|
|
51
|
-
*/
|
|
52
|
-
const CONTEXT_WINDOW = 2e5;
|
|
53
|
-
/**
|
|
54
|
-
* Default number of recent messages to keep during summarization.
|
|
55
|
-
*
|
|
56
|
-
* When summarization is triggered, this many of the most recent messages are
|
|
57
|
-
* preserved verbatim while older messages are summarized. This ensures recent
|
|
58
|
-
* context is immediately available to the agent.
|
|
59
|
-
*
|
|
60
|
-
* @default 6
|
|
61
|
-
*/
|
|
62
|
-
const DEFAULT_KEEP_MESSAGES$1 = 6;
|
|
63
|
-
/**
|
|
64
|
-
* Default maximum number of reasoning steps for the main agent.
|
|
65
|
-
*
|
|
66
|
-
* The agent will stop after reaching this many steps to prevent infinite loops
|
|
67
|
-
* or excessive token usage. Each step represents one tool invocation cycle.
|
|
68
|
-
*
|
|
69
|
-
* @default 100
|
|
70
|
-
*/
|
|
71
|
-
const DEFAULT_MAX_STEPS = 100;
|
|
72
|
-
/**
|
|
73
|
-
* Default maximum number of reasoning steps for subagents.
|
|
74
|
-
*
|
|
75
|
-
* Subagents are given a lower step limit than the main agent to prevent them
|
|
76
|
-
* from consuming too many resources. This ensures the parent agent maintains
|
|
77
|
-
* control over the overall task.
|
|
78
|
-
*
|
|
79
|
-
* @default 50
|
|
80
|
-
* @see {@link ../tools/subagent | createTaskTool}
|
|
81
|
-
*/
|
|
82
|
-
const DEFAULT_SUBAGENT_MAX_STEPS = 50;
|
|
83
|
-
/**
|
|
84
|
-
* Default maximum number of lines to read from a file.
|
|
85
|
-
*
|
|
86
|
-
* The read_file tool defaults to reading this many lines to prevent loading
|
|
87
|
-
* extremely large files into context. Can be overridden per-read operation.
|
|
88
|
-
*
|
|
89
|
-
* @default 2000
|
|
90
|
-
* @see {@link ../tools/filesystem | createReadFileTool}
|
|
91
|
-
*/
|
|
92
|
-
const DEFAULT_READ_LIMIT = 2e3;
|
|
93
|
-
/**
|
|
94
|
-
* Maximum line length before content is considered invalid.
|
|
95
|
-
*
|
|
96
|
-
* Lines exceeding this length may indicate minified code, binary content, or
|
|
97
|
-
* other data that should not be processed as text. Used for validation.
|
|
98
|
-
*
|
|
99
|
-
* @default 10000
|
|
100
|
-
*/
|
|
101
|
-
const MAX_LINE_LENGTH = 1e4;
|
|
102
|
-
/**
|
|
103
|
-
* Maximum file size in megabytes for file operations.
|
|
104
|
-
*
|
|
105
|
-
* Files larger than this size will be rejected to prevent memory issues and
|
|
106
|
-
* excessive token usage. This is a soft limit that can be adjusted for specific
|
|
107
|
-
* use cases.
|
|
108
|
-
*
|
|
109
|
-
* @default 10
|
|
110
|
-
*/
|
|
111
|
-
const MAX_FILE_SIZE_MB = 10;
|
|
112
|
-
/**
|
|
113
|
-
* Default timeout for network requests in seconds.
|
|
114
|
-
*
|
|
115
|
-
* Used by web tools (http_request, fetch_url) to prevent hanging indefinitely
|
|
116
|
-
* on slow or unresponsive servers. Can be overridden per-request.
|
|
117
|
-
*
|
|
118
|
-
* @default 30
|
|
119
|
-
* @see {@link ../tools/web | createHttpRequestTool}
|
|
120
|
-
*/
|
|
121
|
-
const DEFAULT_TIMEOUT_SECONDS = 30;
|
|
122
|
-
/**
|
|
123
|
-
* Default timeout in milliseconds (derived from DEFAULT_TIMEOUT_SECONDS).
|
|
124
|
-
*
|
|
125
|
-
* Provided for convenience when working with APIs that expect milliseconds
|
|
126
|
-
* instead of seconds.
|
|
127
|
-
*
|
|
128
|
-
* @default 30000 (30 seconds)
|
|
129
|
-
*/
|
|
130
|
-
const DEFAULT_TIMEOUT_MS = DEFAULT_TIMEOUT_SECONDS * 1e3;
|
|
131
|
-
/**
|
|
132
|
-
* Width for line number formatting in file read operations.
|
|
133
|
-
*
|
|
134
|
-
* When displaying file content with line numbers, this specifies the minimum
|
|
135
|
-
* width for the line number column. Ensures consistent alignment across
|
|
136
|
-
* different file sizes.
|
|
137
|
-
*
|
|
138
|
-
* @default 6
|
|
139
|
-
* @see {@link ../backends/utils | formatFileContent}
|
|
140
|
-
*/
|
|
141
|
-
const LINE_NUMBER_WIDTH = 6;
|
|
52
|
+
var DEFAULT_EVICTION_TOKEN_LIMIT$1, DEFAULT_SUMMARIZATION_THRESHOLD$1, CONTEXT_WINDOW, DEFAULT_KEEP_MESSAGES$1, DEFAULT_MAX_STEPS, DEFAULT_SUBAGENT_MAX_STEPS, DEFAULT_READ_LIMIT, MAX_LINE_LENGTH, MAX_FILE_SIZE_MB, DEFAULT_TIMEOUT_SECONDS, DEFAULT_TIMEOUT_MS, LINE_NUMBER_WIDTH;
|
|
53
|
+
var init_limits = __esmMin((() => {
|
|
54
|
+
DEFAULT_EVICTION_TOKEN_LIMIT$1 = 2e4;
|
|
55
|
+
DEFAULT_SUMMARIZATION_THRESHOLD$1 = 17e4;
|
|
56
|
+
CONTEXT_WINDOW = 2e5;
|
|
57
|
+
DEFAULT_KEEP_MESSAGES$1 = 6;
|
|
58
|
+
DEFAULT_MAX_STEPS = 100;
|
|
59
|
+
DEFAULT_SUBAGENT_MAX_STEPS = 50;
|
|
60
|
+
DEFAULT_READ_LIMIT = 2e3;
|
|
61
|
+
MAX_LINE_LENGTH = 1e4;
|
|
62
|
+
MAX_FILE_SIZE_MB = 10;
|
|
63
|
+
DEFAULT_TIMEOUT_SECONDS = 30;
|
|
64
|
+
DEFAULT_TIMEOUT_MS = DEFAULT_TIMEOUT_SECONDS * 1e3;
|
|
65
|
+
LINE_NUMBER_WIDTH = 6;
|
|
66
|
+
}));
|
|
142
67
|
|
|
143
68
|
//#endregion
|
|
144
69
|
//#region src/utils/events.ts
|
|
@@ -300,6 +225,7 @@ function createCheckpointLoadedEvent(threadId, step, messagesCount) {
|
|
|
300
225
|
messagesCount
|
|
301
226
|
};
|
|
302
227
|
}
|
|
228
|
+
var init_events = __esmMin((() => {}));
|
|
303
229
|
|
|
304
230
|
//#endregion
|
|
305
231
|
//#region src/types/backend.ts
|
|
@@ -496,6 +422,7 @@ Skills provide expert knowledge for specialized tasks. Always read the full skil
|
|
|
496
422
|
/**
|
|
497
423
|
* Todo list tool for task planning and tracking.
|
|
498
424
|
*/
|
|
425
|
+
init_events();
|
|
499
426
|
const TodoItemSchema = z.object({
|
|
500
427
|
id: z.string().describe("Unique identifier for the todo item"),
|
|
501
428
|
content: z.string().max(100).describe("The description/content of the todo item (max 100 chars)"),
|
|
@@ -554,23 +481,24 @@ const write_todos = createTodosTool;
|
|
|
554
481
|
|
|
555
482
|
//#endregion
|
|
556
483
|
//#region src/constants/errors.ts
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
const SYSTEM_REMINDER_FILE_EMPTY = "System reminder: File exists but has empty contents";
|
|
484
|
+
var FILE_NOT_FOUND, FILE_ALREADY_EXISTS, STRING_NOT_FOUND, INVALID_REGEX, WEB_SEARCH_ERROR, REQUEST_TIMEOUT, SYSTEM_REMINDER_FILE_EMPTY;
|
|
485
|
+
var init_errors = __esmMin((() => {
|
|
486
|
+
FILE_NOT_FOUND = (path) => `Error: File '${path}' not found`;
|
|
487
|
+
FILE_ALREADY_EXISTS = (path) => `Cannot write to ${path} because it already exists. Read and then make an edit, or write to a new path.`;
|
|
488
|
+
STRING_NOT_FOUND = (path, string) => `Error: String not found in file: '${path}'\n\n${string}`;
|
|
489
|
+
INVALID_REGEX = (message) => `Invalid regex pattern: ${message}`;
|
|
490
|
+
WEB_SEARCH_ERROR = (message) => `Web search error: ${message}`;
|
|
491
|
+
REQUEST_TIMEOUT = (timeout) => `Request timed out after ${timeout} seconds`;
|
|
492
|
+
SYSTEM_REMINDER_FILE_EMPTY = "System reminder: File exists but has empty contents";
|
|
493
|
+
}));
|
|
568
494
|
|
|
569
495
|
//#endregion
|
|
570
496
|
//#region src/backends/utils.ts
|
|
571
497
|
/**
|
|
572
498
|
* Shared utility functions for memory backend implementations.
|
|
573
499
|
*/
|
|
500
|
+
init_errors();
|
|
501
|
+
init_limits();
|
|
574
502
|
const EMPTY_CONTENT_WARNING = SYSTEM_REMINDER_FILE_EMPTY;
|
|
575
503
|
/**
|
|
576
504
|
* Format file content with line numbers (cat -n style).
|
|
@@ -735,6 +663,7 @@ function grepMatchesFromFiles(files, pattern, path = null, glob$1 = null) {
|
|
|
735
663
|
|
|
736
664
|
//#endregion
|
|
737
665
|
//#region src/backends/state.ts
|
|
666
|
+
init_errors();
|
|
738
667
|
/**
|
|
739
668
|
* Backend that stores files in shared state (ephemeral).
|
|
740
669
|
*
|
|
@@ -904,15 +833,6 @@ var StateBackend = class {
|
|
|
904
833
|
//#endregion
|
|
905
834
|
//#region src/utils/eviction.ts
|
|
906
835
|
/**
|
|
907
|
-
* Default token limit before evicting a tool result.
|
|
908
|
-
* Approximately 20,000 tokens (~80KB of text).
|
|
909
|
-
*/
|
|
910
|
-
const DEFAULT_EVICTION_TOKEN_LIMIT = DEFAULT_EVICTION_TOKEN_LIMIT$1;
|
|
911
|
-
/**
|
|
912
|
-
* Approximate characters per token (rough estimate).
|
|
913
|
-
*/
|
|
914
|
-
const CHARS_PER_TOKEN = 4;
|
|
915
|
-
/**
|
|
916
836
|
* Sanitize a tool call ID for use as a filename.
|
|
917
837
|
* Removes or replaces characters that are invalid in file paths.
|
|
918
838
|
*/
|
|
@@ -996,12 +916,20 @@ function createToolResultWrapper(backend, state, tokenLimit = DEFAULT_EVICTION_T
|
|
|
996
916
|
})).content;
|
|
997
917
|
};
|
|
998
918
|
}
|
|
919
|
+
var DEFAULT_EVICTION_TOKEN_LIMIT, CHARS_PER_TOKEN;
|
|
920
|
+
var init_eviction = __esmMin((() => {
|
|
921
|
+
init_limits();
|
|
922
|
+
DEFAULT_EVICTION_TOKEN_LIMIT = DEFAULT_EVICTION_TOKEN_LIMIT$1;
|
|
923
|
+
CHARS_PER_TOKEN = 4;
|
|
924
|
+
}));
|
|
999
925
|
|
|
1000
926
|
//#endregion
|
|
1001
927
|
//#region src/tools/filesystem.ts
|
|
1002
928
|
/**
|
|
1003
929
|
* Filesystem tools for virtual file operations.
|
|
1004
930
|
*/
|
|
931
|
+
init_eviction();
|
|
932
|
+
init_events();
|
|
1005
933
|
const LS_TOOL_DESCRIPTION = "List files and directories in a directory. Paths are relative to the working directory.";
|
|
1006
934
|
const READ_FILE_TOOL_DESCRIPTION = "Read the contents of a file. Paths are relative to the working directory.";
|
|
1007
935
|
const WRITE_FILE_TOOL_DESCRIPTION = "Write content to a new file. Returns an error if the file already exists. Paths are relative to the working directory.";
|
|
@@ -1324,6 +1252,16 @@ function wrapToolsWithApproval(tools, interruptOn, onApprovalRequest) {
|
|
|
1324
1252
|
* Web tools for search and HTTP requests.
|
|
1325
1253
|
* Based on LangChain DeepAgents implementation.
|
|
1326
1254
|
*/
|
|
1255
|
+
var web_exports = /* @__PURE__ */ __exportAll({
|
|
1256
|
+
createFetchUrlTool: () => createFetchUrlTool,
|
|
1257
|
+
createHttpRequestTool: () => createHttpRequestTool,
|
|
1258
|
+
createWebSearchTool: () => createWebSearchTool,
|
|
1259
|
+
createWebTools: () => createWebTools,
|
|
1260
|
+
fetch_url: () => fetch_url,
|
|
1261
|
+
htmlToMarkdown: () => htmlToMarkdown,
|
|
1262
|
+
http_request: () => http_request,
|
|
1263
|
+
web_search: () => web_search
|
|
1264
|
+
});
|
|
1327
1265
|
/**
|
|
1328
1266
|
* Helper to resolve backend from factory or instance.
|
|
1329
1267
|
*/
|
|
@@ -1352,18 +1290,6 @@ function htmlToMarkdown(html, url) {
|
|
|
1352
1290
|
}
|
|
1353
1291
|
}
|
|
1354
1292
|
/**
|
|
1355
|
-
* Tool description for web_search.
|
|
1356
|
-
*/
|
|
1357
|
-
const WEB_SEARCH_TOOL_DESCRIPTION = `Search the web using Tavily API for current information, news, and documentation.
|
|
1358
|
-
|
|
1359
|
-
Returns an array of search results with titles, URLs, relevant excerpts, and relevance scores.
|
|
1360
|
-
|
|
1361
|
-
IMPORTANT AGENT INSTRUCTIONS:
|
|
1362
|
-
- You MUST synthesize information from search results into a coherent answer
|
|
1363
|
-
- NEVER show raw JSON or result objects to the user
|
|
1364
|
-
- Cite sources by including URLs in your response
|
|
1365
|
-
- If search fails or returns no results, explain this clearly to the user`;
|
|
1366
|
-
/**
|
|
1367
1293
|
* Create the web_search tool.
|
|
1368
1294
|
*/
|
|
1369
1295
|
function createWebSearchTool(state, options) {
|
|
@@ -1411,14 +1337,6 @@ function createWebSearchTool(state, options) {
|
|
|
1411
1337
|
});
|
|
1412
1338
|
}
|
|
1413
1339
|
/**
|
|
1414
|
-
* Tool description for http_request.
|
|
1415
|
-
*/
|
|
1416
|
-
const HTTP_REQUEST_TOOL_DESCRIPTION = `Make HTTP requests to APIs and web services.
|
|
1417
|
-
|
|
1418
|
-
Supports GET, POST, PUT, DELETE, PATCH methods with custom headers, query parameters, and request bodies.
|
|
1419
|
-
|
|
1420
|
-
Returns structured response with status code, headers, and parsed content (JSON or text).`;
|
|
1421
|
-
/**
|
|
1422
1340
|
* Create the http_request tool.
|
|
1423
1341
|
*/
|
|
1424
1342
|
function createHttpRequestTool(state, options) {
|
|
@@ -1490,19 +1408,6 @@ function createHttpRequestTool(state, options) {
|
|
|
1490
1408
|
});
|
|
1491
1409
|
}
|
|
1492
1410
|
/**
|
|
1493
|
-
* Tool description for fetch_url.
|
|
1494
|
-
*/
|
|
1495
|
-
const FETCH_URL_TOOL_DESCRIPTION = `Fetch web page content and convert HTML to clean Markdown format.
|
|
1496
|
-
|
|
1497
|
-
Uses Mozilla Readability to extract main article content and Turndown to convert to Markdown.
|
|
1498
|
-
|
|
1499
|
-
Returns the page content as formatted Markdown, suitable for analysis and summarization.
|
|
1500
|
-
|
|
1501
|
-
IMPORTANT AGENT INSTRUCTIONS:
|
|
1502
|
-
- Use this tool to read documentation, articles, and web pages
|
|
1503
|
-
- The content is already cleaned and formatted as Markdown
|
|
1504
|
-
- Cite the URL when referencing fetched content`;
|
|
1505
|
-
/**
|
|
1506
1411
|
* Create the fetch_url tool.
|
|
1507
1412
|
*/
|
|
1508
1413
|
function createFetchUrlTool(state, options) {
|
|
@@ -1593,13 +1498,40 @@ function createWebTools(state, options) {
|
|
|
1593
1498
|
})
|
|
1594
1499
|
};
|
|
1595
1500
|
}
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1501
|
+
var WEB_SEARCH_TOOL_DESCRIPTION, HTTP_REQUEST_TOOL_DESCRIPTION, FETCH_URL_TOOL_DESCRIPTION, web_search, http_request, fetch_url;
|
|
1502
|
+
var init_web = __esmMin((() => {
|
|
1503
|
+
init_eviction();
|
|
1504
|
+
init_errors();
|
|
1505
|
+
init_limits();
|
|
1506
|
+
init_events();
|
|
1507
|
+
WEB_SEARCH_TOOL_DESCRIPTION = `Search the web using Tavily API for current information, news, and documentation.
|
|
1508
|
+
|
|
1509
|
+
Returns an array of search results with titles, URLs, relevant excerpts, and relevance scores.
|
|
1510
|
+
|
|
1511
|
+
IMPORTANT AGENT INSTRUCTIONS:
|
|
1512
|
+
- You MUST synthesize information from search results into a coherent answer
|
|
1513
|
+
- NEVER show raw JSON or result objects to the user
|
|
1514
|
+
- Cite sources by including URLs in your response
|
|
1515
|
+
- If search fails or returns no results, explain this clearly to the user`;
|
|
1516
|
+
HTTP_REQUEST_TOOL_DESCRIPTION = `Make HTTP requests to APIs and web services.
|
|
1517
|
+
|
|
1518
|
+
Supports GET, POST, PUT, DELETE, PATCH methods with custom headers, query parameters, and request bodies.
|
|
1519
|
+
|
|
1520
|
+
Returns structured response with status code, headers, and parsed content (JSON or text).`;
|
|
1521
|
+
FETCH_URL_TOOL_DESCRIPTION = `Fetch web page content and convert HTML to clean Markdown format.
|
|
1522
|
+
|
|
1523
|
+
Uses Mozilla Readability to extract main article content and Turndown to convert to Markdown.
|
|
1524
|
+
|
|
1525
|
+
Returns the page content as formatted Markdown, suitable for analysis and summarization.
|
|
1526
|
+
|
|
1527
|
+
IMPORTANT AGENT INSTRUCTIONS:
|
|
1528
|
+
- Use this tool to read documentation, articles, and web pages
|
|
1529
|
+
- The content is already cleaned and formatted as Markdown
|
|
1530
|
+
- Cite the URL when referencing fetched content`;
|
|
1531
|
+
web_search = createWebSearchTool;
|
|
1532
|
+
http_request = createHttpRequestTool;
|
|
1533
|
+
fetch_url = createFetchUrlTool;
|
|
1534
|
+
}));
|
|
1603
1535
|
|
|
1604
1536
|
//#endregion
|
|
1605
1537
|
//#region src/tools/execute.ts
|
|
@@ -1721,6 +1653,9 @@ const execute = createExecuteTool;
|
|
|
1721
1653
|
/**
|
|
1722
1654
|
* Subagent tool for task delegation using AI SDK v6 ToolLoopAgent.
|
|
1723
1655
|
*/
|
|
1656
|
+
init_limits();
|
|
1657
|
+
init_events();
|
|
1658
|
+
init_web();
|
|
1724
1659
|
/**
|
|
1725
1660
|
* Check if a value is a builtin tool creator function.
|
|
1726
1661
|
*/
|
|
@@ -2045,6 +1980,8 @@ function hasDanglingToolCalls(messages) {
|
|
|
2045
1980
|
* Automatically summarizes older messages when approaching token limits
|
|
2046
1981
|
* to prevent context overflow while preserving important context.
|
|
2047
1982
|
*/
|
|
1983
|
+
init_eviction();
|
|
1984
|
+
init_limits();
|
|
2048
1985
|
/**
|
|
2049
1986
|
* Default token threshold before triggering summarization.
|
|
2050
1987
|
* 170k tokens is a safe threshold for most models.
|
|
@@ -2171,6 +2108,8 @@ function needsSummarization(messages, tokenThreshold = DEFAULT_SUMMARIZATION_THR
|
|
|
2171
2108
|
/**
|
|
2172
2109
|
* Deep Agent implementation using Vercel AI SDK v6 ToolLoopAgent.
|
|
2173
2110
|
*/
|
|
2111
|
+
init_limits();
|
|
2112
|
+
init_events();
|
|
2174
2113
|
/**
|
|
2175
2114
|
* Build the full system prompt from components.
|
|
2176
2115
|
*/
|
|
@@ -2261,14 +2200,21 @@ var DeepAgent = class {
|
|
|
2261
2200
|
}
|
|
2262
2201
|
/**
|
|
2263
2202
|
* Create web tools if TAVILY_API_KEY is available.
|
|
2203
|
+
* Uses dynamic import to avoid bundling Node.js dependencies in client builds.
|
|
2264
2204
|
* @private
|
|
2265
2205
|
*/
|
|
2266
2206
|
createWebToolSet(state, onEvent) {
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2207
|
+
if (!process.env.TAVILY_API_KEY) return {};
|
|
2208
|
+
try {
|
|
2209
|
+
return (init_web(), __toCommonJS(web_exports)).createWebTools(state, {
|
|
2210
|
+
backend: this.backend,
|
|
2211
|
+
onEvent,
|
|
2212
|
+
toolResultEvictionLimit: this.toolResultEvictionLimit
|
|
2213
|
+
});
|
|
2214
|
+
} catch (error) {
|
|
2215
|
+
console.warn("Web tools not available in this environment:", error);
|
|
2216
|
+
return {};
|
|
2217
|
+
}
|
|
2272
2218
|
}
|
|
2273
2219
|
/**
|
|
2274
2220
|
* Create execute tool if backend is a sandbox.
|
|
@@ -2375,7 +2321,7 @@ var DeepAgent = class {
|
|
|
2375
2321
|
* Supports both legacy skillsDir and new agentId modes.
|
|
2376
2322
|
*/
|
|
2377
2323
|
async loadSkills(options) {
|
|
2378
|
-
const { listSkills } = await import("./load-
|
|
2324
|
+
const { listSkills } = await import("./load-BDxe6Cet.mjs");
|
|
2379
2325
|
this.skillsMetadata = (await listSkills(options.agentId ? { agentId: options.agentId } : { projectSkillsDir: options.skillsDir })).map((s) => ({
|
|
2380
2326
|
name: s.name,
|
|
2381
2327
|
description: s.description,
|
|
@@ -2970,5 +2916,653 @@ function createDeepAgent(params) {
|
|
|
2970
2916
|
}
|
|
2971
2917
|
|
|
2972
2918
|
//#endregion
|
|
2973
|
-
|
|
2974
|
-
|
|
2919
|
+
//#region src/backends/sandbox.ts
|
|
2920
|
+
init_errors();
|
|
2921
|
+
init_limits();
|
|
2922
|
+
/**
|
|
2923
|
+
* Encode string to base64 for safe shell transmission.
|
|
2924
|
+
*/
|
|
2925
|
+
function toBase64(str) {
|
|
2926
|
+
return Buffer.from(str, "utf-8").toString("base64");
|
|
2927
|
+
}
|
|
2928
|
+
/**
|
|
2929
|
+
* Build a Node.js script command with embedded base64 arguments.
|
|
2930
|
+
* This avoids shell argument parsing issues by embedding values directly in the script.
|
|
2931
|
+
*/
|
|
2932
|
+
function buildNodeScript(script, args) {
|
|
2933
|
+
let result = script;
|
|
2934
|
+
for (const [key, value] of Object.entries(args)) result = result.replace(new RegExp(`__${key}__`, "g"), value);
|
|
2935
|
+
return `node -e '${result}'`;
|
|
2936
|
+
}
|
|
2937
|
+
/**
|
|
2938
|
+
* Abstract base class for sandbox backends.
|
|
2939
|
+
*
|
|
2940
|
+
* Implements all file operations using shell commands via execute().
|
|
2941
|
+
* Subclasses only need to implement execute() and id.
|
|
2942
|
+
*
|
|
2943
|
+
* @example Creating a custom sandbox backend
|
|
2944
|
+
* ```typescript
|
|
2945
|
+
* class MyCloudSandbox extends BaseSandbox {
|
|
2946
|
+
* readonly id = 'my-cloud-123';
|
|
2947
|
+
*
|
|
2948
|
+
* async execute(command: string): Promise<ExecuteResponse> {
|
|
2949
|
+
* // Call your cloud provider's API
|
|
2950
|
+
* const result = await myCloudApi.runCommand(command);
|
|
2951
|
+
* return {
|
|
2952
|
+
* output: result.stdout + result.stderr,
|
|
2953
|
+
* exitCode: result.exitCode,
|
|
2954
|
+
* truncated: false,
|
|
2955
|
+
* };
|
|
2956
|
+
* }
|
|
2957
|
+
* }
|
|
2958
|
+
* ```
|
|
2959
|
+
*/
|
|
2960
|
+
var BaseSandbox = class {
|
|
2961
|
+
/**
|
|
2962
|
+
* List files and directories in a path.
|
|
2963
|
+
*/
|
|
2964
|
+
async lsInfo(path) {
|
|
2965
|
+
const pathB64 = toBase64(path);
|
|
2966
|
+
const result = await this.execute(buildNodeScript(`
|
|
2967
|
+
const fs = require("fs");
|
|
2968
|
+
const path = require("path");
|
|
2969
|
+
|
|
2970
|
+
const dirPath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
2971
|
+
|
|
2972
|
+
try {
|
|
2973
|
+
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
|
|
2974
|
+
for (const entry of entries) {
|
|
2975
|
+
const fullPath = path.join(dirPath, entry.name);
|
|
2976
|
+
try {
|
|
2977
|
+
const stat = fs.statSync(fullPath);
|
|
2978
|
+
console.log(JSON.stringify({
|
|
2979
|
+
path: entry.name,
|
|
2980
|
+
is_dir: entry.isDirectory(),
|
|
2981
|
+
size: stat.size,
|
|
2982
|
+
modified_at: stat.mtime.toISOString()
|
|
2983
|
+
}));
|
|
2984
|
+
} catch (e) {}
|
|
2985
|
+
}
|
|
2986
|
+
} catch (e) {}
|
|
2987
|
+
`, { PATH: pathB64 }));
|
|
2988
|
+
const infos = [];
|
|
2989
|
+
for (const line of result.output.trim().split("\n")) {
|
|
2990
|
+
if (!line) continue;
|
|
2991
|
+
try {
|
|
2992
|
+
const data = JSON.parse(line);
|
|
2993
|
+
infos.push({
|
|
2994
|
+
path: data.path,
|
|
2995
|
+
is_dir: data.is_dir,
|
|
2996
|
+
size: data.size,
|
|
2997
|
+
modified_at: data.modified_at
|
|
2998
|
+
});
|
|
2999
|
+
} catch {}
|
|
3000
|
+
}
|
|
3001
|
+
return infos;
|
|
3002
|
+
}
|
|
3003
|
+
/**
|
|
3004
|
+
* Read file content with line numbers.
|
|
3005
|
+
*/
|
|
3006
|
+
async read(filePath, offset = 0, limit = DEFAULT_READ_LIMIT) {
|
|
3007
|
+
const pathB64 = toBase64(filePath);
|
|
3008
|
+
const script = `
|
|
3009
|
+
const fs = require("fs");
|
|
3010
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3011
|
+
const offset = __OFFSET__;
|
|
3012
|
+
const limit = __LIMIT__;
|
|
3013
|
+
|
|
3014
|
+
if (!fs.existsSync(filePath)) {
|
|
3015
|
+
console.error("Error: File not found");
|
|
3016
|
+
process.exit(1);
|
|
3017
|
+
}
|
|
3018
|
+
|
|
3019
|
+
const stat = fs.statSync(filePath);
|
|
3020
|
+
if (stat.size === 0) {
|
|
3021
|
+
console.log("${SYSTEM_REMINDER_FILE_EMPTY}");
|
|
3022
|
+
process.exit(0);
|
|
3023
|
+
}
|
|
3024
|
+
|
|
3025
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
3026
|
+
const lines = content.split("\\n");
|
|
3027
|
+
const selected = lines.slice(offset, offset + limit);
|
|
3028
|
+
|
|
3029
|
+
for (let i = 0; i < selected.length; i++) {
|
|
3030
|
+
const lineNum = (offset + i + 1).toString().padStart(6, " ");
|
|
3031
|
+
console.log(lineNum + "\\t" + selected[i]);
|
|
3032
|
+
}
|
|
3033
|
+
`;
|
|
3034
|
+
const result = await this.execute(buildNodeScript(script, {
|
|
3035
|
+
PATH: pathB64,
|
|
3036
|
+
OFFSET: String(offset),
|
|
3037
|
+
LIMIT: String(limit)
|
|
3038
|
+
}));
|
|
3039
|
+
if (result.exitCode !== 0) {
|
|
3040
|
+
if (result.output.includes("Error: File not found")) return FILE_NOT_FOUND(filePath);
|
|
3041
|
+
return result.output.trim();
|
|
3042
|
+
}
|
|
3043
|
+
return result.output.trimEnd();
|
|
3044
|
+
}
|
|
3045
|
+
/**
|
|
3046
|
+
* Read raw file data.
|
|
3047
|
+
*/
|
|
3048
|
+
async readRaw(filePath) {
|
|
3049
|
+
const pathB64 = toBase64(filePath);
|
|
3050
|
+
const result = await this.execute(buildNodeScript(`
|
|
3051
|
+
const fs = require("fs");
|
|
3052
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3053
|
+
|
|
3054
|
+
if (!fs.existsSync(filePath)) {
|
|
3055
|
+
console.error("Error: File not found");
|
|
3056
|
+
process.exit(1);
|
|
3057
|
+
}
|
|
3058
|
+
|
|
3059
|
+
const stat = fs.statSync(filePath);
|
|
3060
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
3061
|
+
|
|
3062
|
+
console.log(JSON.stringify({
|
|
3063
|
+
content: content.split("\\n"),
|
|
3064
|
+
created_at: stat.birthtime.toISOString(),
|
|
3065
|
+
modified_at: stat.mtime.toISOString()
|
|
3066
|
+
}));
|
|
3067
|
+
`, { PATH: pathB64 }));
|
|
3068
|
+
if (result.exitCode !== 0) throw new Error(`File '${filePath}' not found`);
|
|
3069
|
+
try {
|
|
3070
|
+
const data = JSON.parse(result.output.trim());
|
|
3071
|
+
return {
|
|
3072
|
+
content: data.content,
|
|
3073
|
+
created_at: data.created_at,
|
|
3074
|
+
modified_at: data.modified_at
|
|
3075
|
+
};
|
|
3076
|
+
} catch {
|
|
3077
|
+
throw new Error(`Failed to parse file data for '${filePath}'`);
|
|
3078
|
+
}
|
|
3079
|
+
}
|
|
3080
|
+
/**
|
|
3081
|
+
* Write content to a new file.
|
|
3082
|
+
*/
|
|
3083
|
+
async write(filePath, content) {
|
|
3084
|
+
const pathB64 = toBase64(filePath);
|
|
3085
|
+
const contentB64 = toBase64(content);
|
|
3086
|
+
const result = await this.execute(buildNodeScript(`
|
|
3087
|
+
const fs = require("fs");
|
|
3088
|
+
const path = require("path");
|
|
3089
|
+
|
|
3090
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3091
|
+
const content = Buffer.from("__CONTENT__", "base64").toString("utf-8");
|
|
3092
|
+
|
|
3093
|
+
if (fs.existsSync(filePath)) {
|
|
3094
|
+
console.error("Error: File already exists");
|
|
3095
|
+
process.exit(1);
|
|
3096
|
+
}
|
|
3097
|
+
|
|
3098
|
+
const dir = path.dirname(filePath);
|
|
3099
|
+
if (dir && dir !== ".") {
|
|
3100
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
3101
|
+
}
|
|
3102
|
+
|
|
3103
|
+
fs.writeFileSync(filePath, content, "utf-8");
|
|
3104
|
+
`, {
|
|
3105
|
+
PATH: pathB64,
|
|
3106
|
+
CONTENT: contentB64
|
|
3107
|
+
}));
|
|
3108
|
+
if (result.exitCode !== 0) {
|
|
3109
|
+
if (result.output.includes("already exists")) return {
|
|
3110
|
+
success: false,
|
|
3111
|
+
error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.`
|
|
3112
|
+
};
|
|
3113
|
+
return {
|
|
3114
|
+
success: false,
|
|
3115
|
+
error: result.output.trim() || `Failed to write '${filePath}'`
|
|
3116
|
+
};
|
|
3117
|
+
}
|
|
3118
|
+
return {
|
|
3119
|
+
success: true,
|
|
3120
|
+
path: filePath
|
|
3121
|
+
};
|
|
3122
|
+
}
|
|
3123
|
+
/**
|
|
3124
|
+
* Edit a file by replacing string occurrences.
|
|
3125
|
+
*/
|
|
3126
|
+
async edit(filePath, oldString, newString, replaceAll = false) {
|
|
3127
|
+
const pathB64 = toBase64(filePath);
|
|
3128
|
+
const oldB64 = toBase64(oldString);
|
|
3129
|
+
const newB64 = toBase64(newString);
|
|
3130
|
+
const result = await this.execute(buildNodeScript(`
|
|
3131
|
+
const fs = require("fs");
|
|
3132
|
+
|
|
3133
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3134
|
+
const oldStr = Buffer.from("__OLD__", "base64").toString("utf-8");
|
|
3135
|
+
const newStr = Buffer.from("__NEW__", "base64").toString("utf-8");
|
|
3136
|
+
const replaceAll = __REPLACE_ALL__;
|
|
3137
|
+
|
|
3138
|
+
if (!fs.existsSync(filePath)) {
|
|
3139
|
+
console.error("Error: File not found");
|
|
3140
|
+
process.exit(1);
|
|
3141
|
+
}
|
|
3142
|
+
|
|
3143
|
+
let content = fs.readFileSync(filePath, "utf-8");
|
|
3144
|
+
const count = content.split(oldStr).length - 1;
|
|
3145
|
+
|
|
3146
|
+
if (count === 0) {
|
|
3147
|
+
process.exit(2);
|
|
3148
|
+
}
|
|
3149
|
+
if (count > 1 && !replaceAll) {
|
|
3150
|
+
process.exit(3);
|
|
3151
|
+
}
|
|
3152
|
+
|
|
3153
|
+
if (replaceAll) {
|
|
3154
|
+
content = content.split(oldStr).join(newStr);
|
|
3155
|
+
} else {
|
|
3156
|
+
content = content.replace(oldStr, newStr);
|
|
3157
|
+
}
|
|
3158
|
+
|
|
3159
|
+
fs.writeFileSync(filePath, content, "utf-8");
|
|
3160
|
+
console.log(count);
|
|
3161
|
+
`, {
|
|
3162
|
+
PATH: pathB64,
|
|
3163
|
+
OLD: oldB64,
|
|
3164
|
+
NEW: newB64,
|
|
3165
|
+
REPLACE_ALL: String(replaceAll)
|
|
3166
|
+
}));
|
|
3167
|
+
if (result.exitCode === 1) return {
|
|
3168
|
+
success: false,
|
|
3169
|
+
error: FILE_NOT_FOUND(filePath)
|
|
3170
|
+
};
|
|
3171
|
+
if (result.exitCode === 2) return {
|
|
3172
|
+
success: false,
|
|
3173
|
+
error: STRING_NOT_FOUND(filePath, oldString)
|
|
3174
|
+
};
|
|
3175
|
+
if (result.exitCode === 3) return {
|
|
3176
|
+
success: false,
|
|
3177
|
+
error: `Error: String '${oldString}' appears multiple times. Use replaceAll=true to replace all occurrences.`
|
|
3178
|
+
};
|
|
3179
|
+
return {
|
|
3180
|
+
success: true,
|
|
3181
|
+
path: filePath,
|
|
3182
|
+
occurrences: parseInt(result.output.trim(), 10) || 1
|
|
3183
|
+
};
|
|
3184
|
+
}
|
|
3185
|
+
/**
|
|
3186
|
+
* Search for pattern in files.
|
|
3187
|
+
*/
|
|
3188
|
+
async grepRaw(pattern, path = "/", glob$1 = null) {
|
|
3189
|
+
const patternB64 = toBase64(pattern);
|
|
3190
|
+
const pathB64 = toBase64(path);
|
|
3191
|
+
const globB64 = glob$1 ? toBase64(glob$1) : toBase64("**/*");
|
|
3192
|
+
const result = await this.execute(buildNodeScript(`
|
|
3193
|
+
const fs = require("fs");
|
|
3194
|
+
const path = require("path");
|
|
3195
|
+
|
|
3196
|
+
const pattern = Buffer.from("__PATTERN__", "base64").toString("utf-8");
|
|
3197
|
+
const basePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3198
|
+
const fileGlob = Buffer.from("__GLOB__", "base64").toString("utf-8");
|
|
3199
|
+
|
|
3200
|
+
function walkDir(dir, baseDir) {
|
|
3201
|
+
const results = [];
|
|
3202
|
+
try {
|
|
3203
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
3204
|
+
for (const entry of entries) {
|
|
3205
|
+
const fullPath = path.join(dir, entry.name);
|
|
3206
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
3207
|
+
|
|
3208
|
+
if (entry.isDirectory()) {
|
|
3209
|
+
results.push(...walkDir(fullPath, baseDir));
|
|
3210
|
+
} else {
|
|
3211
|
+
results.push(relativePath);
|
|
3212
|
+
}
|
|
3213
|
+
}
|
|
3214
|
+
} catch (e) {}
|
|
3215
|
+
return results;
|
|
3216
|
+
}
|
|
3217
|
+
|
|
3218
|
+
function matchGlob(filepath, pattern) {
|
|
3219
|
+
if (!pattern || pattern === "**/*") return true;
|
|
3220
|
+
const regex = pattern
|
|
3221
|
+
.replace(/\\./g, "\\\\.")
|
|
3222
|
+
.replace(/\\*\\*/g, "<<<GLOBSTAR>>>")
|
|
3223
|
+
.replace(/\\*/g, "[^/]*")
|
|
3224
|
+
.replace(/<<<GLOBSTAR>>>/g, ".*")
|
|
3225
|
+
.replace(/\\?/g, ".");
|
|
3226
|
+
return new RegExp("^" + regex + "$").test(filepath);
|
|
3227
|
+
}
|
|
3228
|
+
|
|
3229
|
+
const allFiles = walkDir(basePath, basePath);
|
|
3230
|
+
const files = allFiles.filter(f => matchGlob(f, fileGlob)).sort();
|
|
3231
|
+
|
|
3232
|
+
for (const file of files) {
|
|
3233
|
+
try {
|
|
3234
|
+
const fullPath = path.join(basePath, file);
|
|
3235
|
+
const content = fs.readFileSync(fullPath, "utf-8");
|
|
3236
|
+
const lines = content.split("\\n");
|
|
3237
|
+
|
|
3238
|
+
for (let i = 0; i < lines.length; i++) {
|
|
3239
|
+
if (lines[i].includes(pattern)) {
|
|
3240
|
+
console.log(JSON.stringify({
|
|
3241
|
+
path: file,
|
|
3242
|
+
line: i + 1,
|
|
3243
|
+
text: lines[i]
|
|
3244
|
+
}));
|
|
3245
|
+
}
|
|
3246
|
+
}
|
|
3247
|
+
} catch (e) {}
|
|
3248
|
+
}
|
|
3249
|
+
`, {
|
|
3250
|
+
PATTERN: patternB64,
|
|
3251
|
+
PATH: pathB64,
|
|
3252
|
+
GLOB: globB64
|
|
3253
|
+
}));
|
|
3254
|
+
const matches = [];
|
|
3255
|
+
for (const line of result.output.trim().split("\n")) {
|
|
3256
|
+
if (!line) continue;
|
|
3257
|
+
try {
|
|
3258
|
+
const data = JSON.parse(line);
|
|
3259
|
+
matches.push({
|
|
3260
|
+
path: data.path,
|
|
3261
|
+
line: data.line,
|
|
3262
|
+
text: data.text
|
|
3263
|
+
});
|
|
3264
|
+
} catch {}
|
|
3265
|
+
}
|
|
3266
|
+
return matches;
|
|
3267
|
+
}
|
|
3268
|
+
/**
|
|
3269
|
+
* Find files matching glob pattern.
|
|
3270
|
+
*/
|
|
3271
|
+
async globInfo(pattern, path = "/") {
|
|
3272
|
+
const pathB64 = toBase64(path);
|
|
3273
|
+
const patternB64 = toBase64(pattern);
|
|
3274
|
+
const result = await this.execute(buildNodeScript(`
|
|
3275
|
+
const fs = require("fs");
|
|
3276
|
+
const path = require("path");
|
|
3277
|
+
|
|
3278
|
+
const basePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3279
|
+
const pattern = Buffer.from("__PATTERN__", "base64").toString("utf-8");
|
|
3280
|
+
|
|
3281
|
+
function walkDir(dir, baseDir) {
|
|
3282
|
+
const results = [];
|
|
3283
|
+
try {
|
|
3284
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
3285
|
+
for (const entry of entries) {
|
|
3286
|
+
const fullPath = path.join(dir, entry.name);
|
|
3287
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
3288
|
+
|
|
3289
|
+
if (entry.isDirectory()) {
|
|
3290
|
+
results.push(...walkDir(fullPath, baseDir));
|
|
3291
|
+
} else {
|
|
3292
|
+
results.push(relativePath);
|
|
3293
|
+
}
|
|
3294
|
+
}
|
|
3295
|
+
} catch (e) {}
|
|
3296
|
+
return results;
|
|
3297
|
+
}
|
|
3298
|
+
|
|
3299
|
+
function matchGlob(filepath, pattern) {
|
|
3300
|
+
const regex = pattern
|
|
3301
|
+
.replace(/\\./g, "\\\\.")
|
|
3302
|
+
.replace(/\\*\\*/g, "<<<GLOBSTAR>>>")
|
|
3303
|
+
.replace(/\\*/g, "[^/]*")
|
|
3304
|
+
.replace(/<<<GLOBSTAR>>>/g, ".*")
|
|
3305
|
+
.replace(/\\?/g, ".");
|
|
3306
|
+
return new RegExp("^" + regex + "$").test(filepath);
|
|
3307
|
+
}
|
|
3308
|
+
|
|
3309
|
+
const allFiles = walkDir(basePath, basePath);
|
|
3310
|
+
const matches = allFiles.filter(f => matchGlob(f, pattern)).sort();
|
|
3311
|
+
|
|
3312
|
+
for (const m of matches) {
|
|
3313
|
+
try {
|
|
3314
|
+
const fullPath = path.join(basePath, m);
|
|
3315
|
+
const stat = fs.statSync(fullPath);
|
|
3316
|
+
console.log(JSON.stringify({
|
|
3317
|
+
path: m,
|
|
3318
|
+
is_dir: stat.isDirectory(),
|
|
3319
|
+
size: stat.size,
|
|
3320
|
+
modified_at: stat.mtime.toISOString()
|
|
3321
|
+
}));
|
|
3322
|
+
} catch (e) {}
|
|
3323
|
+
}
|
|
3324
|
+
`, {
|
|
3325
|
+
PATH: pathB64,
|
|
3326
|
+
PATTERN: patternB64
|
|
3327
|
+
}));
|
|
3328
|
+
const infos = [];
|
|
3329
|
+
for (const line of result.output.trim().split("\n")) {
|
|
3330
|
+
if (!line) continue;
|
|
3331
|
+
try {
|
|
3332
|
+
const data = JSON.parse(line);
|
|
3333
|
+
infos.push({
|
|
3334
|
+
path: data.path,
|
|
3335
|
+
is_dir: data.is_dir,
|
|
3336
|
+
size: data.size,
|
|
3337
|
+
modified_at: data.modified_at
|
|
3338
|
+
});
|
|
3339
|
+
} catch {}
|
|
3340
|
+
}
|
|
3341
|
+
return infos;
|
|
3342
|
+
}
|
|
3343
|
+
};
|
|
3344
|
+
|
|
3345
|
+
//#endregion
|
|
3346
|
+
//#region src/backends/local-sandbox.ts
|
|
3347
|
+
/**
|
|
3348
|
+
* LocalSandbox: Execute commands locally using child_process.
|
|
3349
|
+
*
|
|
3350
|
+
* Useful for local development and testing without cloud sandboxes.
|
|
3351
|
+
* All file operations are inherited from BaseSandbox and executed
|
|
3352
|
+
* via shell commands in the local filesystem.
|
|
3353
|
+
*/
|
|
3354
|
+
/**
|
|
3355
|
+
* Local sandbox that executes commands using Node.js child_process.
|
|
3356
|
+
*
|
|
3357
|
+
* All commands are executed in a bash shell with the specified working directory.
|
|
3358
|
+
* Inherits all file operations (read, write, edit, ls, grep, glob) from BaseSandbox.
|
|
3359
|
+
*
|
|
3360
|
+
* @example Basic usage
|
|
3361
|
+
* ```typescript
|
|
3362
|
+
* import { LocalSandbox } from 'deepagentsdk';
|
|
3363
|
+
*
|
|
3364
|
+
* const sandbox = new LocalSandbox({ cwd: './workspace' });
|
|
3365
|
+
*
|
|
3366
|
+
* // Execute commands
|
|
3367
|
+
* const result = await sandbox.execute('ls -la');
|
|
3368
|
+
* console.log(result.output);
|
|
3369
|
+
*
|
|
3370
|
+
* // File operations
|
|
3371
|
+
* await sandbox.write('./src/index.ts', 'console.log("hello")');
|
|
3372
|
+
* const content = await sandbox.read('./src/index.ts');
|
|
3373
|
+
* ```
|
|
3374
|
+
*
|
|
3375
|
+
* @example With timeout and environment
|
|
3376
|
+
* ```typescript
|
|
3377
|
+
* const sandbox = new LocalSandbox({
|
|
3378
|
+
* cwd: './workspace',
|
|
3379
|
+
* timeout: 60000, // 60 seconds
|
|
3380
|
+
* env: {
|
|
3381
|
+
* NODE_ENV: 'development',
|
|
3382
|
+
* DEBUG: '*',
|
|
3383
|
+
* },
|
|
3384
|
+
* });
|
|
3385
|
+
* ```
|
|
3386
|
+
*
|
|
3387
|
+
* @example Error handling
|
|
3388
|
+
* ```typescript
|
|
3389
|
+
* const result = await sandbox.execute('npm test');
|
|
3390
|
+
* if (result.exitCode !== 0) {
|
|
3391
|
+
* console.error('Tests failed:', result.output);
|
|
3392
|
+
* }
|
|
3393
|
+
* ```
|
|
3394
|
+
*/
|
|
3395
|
+
var LocalSandbox = class extends BaseSandbox {
|
|
3396
|
+
cwd;
|
|
3397
|
+
timeout;
|
|
3398
|
+
env;
|
|
3399
|
+
maxOutputSize;
|
|
3400
|
+
_id;
|
|
3401
|
+
/**
|
|
3402
|
+
* Create a new LocalSandbox instance.
|
|
3403
|
+
*
|
|
3404
|
+
* @param options - Configuration options for the sandbox
|
|
3405
|
+
*/
|
|
3406
|
+
constructor(options = {}) {
|
|
3407
|
+
super();
|
|
3408
|
+
this.cwd = options.cwd || process.cwd();
|
|
3409
|
+
this.timeout = options.timeout || 3e4;
|
|
3410
|
+
this.env = options.env || {};
|
|
3411
|
+
this.maxOutputSize = options.maxOutputSize || 1024 * 1024;
|
|
3412
|
+
this._id = `local-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
3413
|
+
}
|
|
3414
|
+
/**
|
|
3415
|
+
* Unique identifier for this sandbox instance.
|
|
3416
|
+
* Format: `local-{timestamp}-{random}`
|
|
3417
|
+
*/
|
|
3418
|
+
get id() {
|
|
3419
|
+
return this._id;
|
|
3420
|
+
}
|
|
3421
|
+
/**
|
|
3422
|
+
* Execute a shell command in the local filesystem.
|
|
3423
|
+
*
|
|
3424
|
+
* Commands are executed using bash with the configured working directory
|
|
3425
|
+
* and environment variables. Output is captured from both stdout and stderr.
|
|
3426
|
+
*
|
|
3427
|
+
* @param command - Shell command to execute
|
|
3428
|
+
* @returns ExecuteResponse with output, exit code, and truncation status
|
|
3429
|
+
*
|
|
3430
|
+
* @example
|
|
3431
|
+
* ```typescript
|
|
3432
|
+
* const result = await sandbox.execute('echo "Hello" && ls -la');
|
|
3433
|
+
* console.log(result.output);
|
|
3434
|
+
* console.log('Exit code:', result.exitCode);
|
|
3435
|
+
* ```
|
|
3436
|
+
*/
|
|
3437
|
+
async execute(command) {
|
|
3438
|
+
return new Promise((resolve) => {
|
|
3439
|
+
const child = spawn("bash", ["-c", command], {
|
|
3440
|
+
cwd: this.cwd,
|
|
3441
|
+
env: {
|
|
3442
|
+
...process.env,
|
|
3443
|
+
...this.env
|
|
3444
|
+
},
|
|
3445
|
+
timeout: this.timeout
|
|
3446
|
+
});
|
|
3447
|
+
let output = "";
|
|
3448
|
+
let truncated = false;
|
|
3449
|
+
child.stdout.on("data", (data) => {
|
|
3450
|
+
if (output.length < this.maxOutputSize) output += data.toString();
|
|
3451
|
+
else truncated = true;
|
|
3452
|
+
});
|
|
3453
|
+
child.stderr.on("data", (data) => {
|
|
3454
|
+
if (output.length < this.maxOutputSize) output += data.toString();
|
|
3455
|
+
else truncated = true;
|
|
3456
|
+
});
|
|
3457
|
+
child.on("close", (code) => {
|
|
3458
|
+
resolve({
|
|
3459
|
+
output,
|
|
3460
|
+
exitCode: code,
|
|
3461
|
+
truncated
|
|
3462
|
+
});
|
|
3463
|
+
});
|
|
3464
|
+
child.on("error", (err) => {
|
|
3465
|
+
resolve({
|
|
3466
|
+
output: `Error: ${err.message}`,
|
|
3467
|
+
exitCode: 1,
|
|
3468
|
+
truncated: false
|
|
3469
|
+
});
|
|
3470
|
+
});
|
|
3471
|
+
});
|
|
3472
|
+
}
|
|
3473
|
+
};
|
|
3474
|
+
|
|
3475
|
+
//#endregion
|
|
3476
|
+
//#region src/utils/model-parser.ts
|
|
3477
|
+
/**
|
|
3478
|
+
* Utility to parse model strings into LanguageModel instances.
|
|
3479
|
+
* Provides backward compatibility for CLI and other string-based model specifications.
|
|
3480
|
+
*/
|
|
3481
|
+
/**
|
|
3482
|
+
* Parse a model string into a LanguageModel instance.
|
|
3483
|
+
*
|
|
3484
|
+
* Supports formats like:
|
|
3485
|
+
* - "anthropic/claude-sonnet-4-20250514"
|
|
3486
|
+
* - "openai/gpt-4o"
|
|
3487
|
+
* - "claude-sonnet-4-20250514" (defaults to Anthropic)
|
|
3488
|
+
*
|
|
3489
|
+
* @param modelString - The model string to parse
|
|
3490
|
+
* @returns A LanguageModel instance
|
|
3491
|
+
*
|
|
3492
|
+
* @example
|
|
3493
|
+
* ```typescript
|
|
3494
|
+
* const model = parseModelString("anthropic/claude-sonnet-4-20250514");
|
|
3495
|
+
* const agent = createDeepAgent({ model });
|
|
3496
|
+
* ```
|
|
3497
|
+
*/
|
|
3498
|
+
function parseModelString(modelString) {
|
|
3499
|
+
const [provider, modelName] = modelString.split("/");
|
|
3500
|
+
if (provider === "anthropic") return anthropic(modelName || "claude-sonnet-4-20250514");
|
|
3501
|
+
else if (provider === "openai") return openai(modelName || "gpt-5-mini");
|
|
3502
|
+
return anthropic(modelString);
|
|
3503
|
+
}
|
|
3504
|
+
|
|
3505
|
+
//#endregion
|
|
3506
|
+
//#region src/checkpointer/file-saver.ts
|
|
3507
|
+
/**
|
|
3508
|
+
* File-based checkpoint saver for local development.
|
|
3509
|
+
*/
|
|
3510
|
+
/**
|
|
3511
|
+
* File-based checkpoint saver.
|
|
3512
|
+
*
|
|
3513
|
+
* Stores checkpoints as JSON files in a directory. Each thread gets
|
|
3514
|
+
* its own file named `{threadId}.json`.
|
|
3515
|
+
*
|
|
3516
|
+
* @example
|
|
3517
|
+
* ```typescript
|
|
3518
|
+
* const saver = new FileSaver({ dir: './.checkpoints' });
|
|
3519
|
+
* const agent = createDeepAgent({
|
|
3520
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
3521
|
+
* checkpointer: saver,
|
|
3522
|
+
* });
|
|
3523
|
+
* ```
|
|
3524
|
+
*/
|
|
3525
|
+
var FileSaver = class {
|
|
3526
|
+
dir;
|
|
3527
|
+
constructor(options) {
|
|
3528
|
+
this.dir = options.dir;
|
|
3529
|
+
if (!existsSync(this.dir)) mkdirSync(this.dir, { recursive: true });
|
|
3530
|
+
}
|
|
3531
|
+
getFilePath(threadId) {
|
|
3532
|
+
const safeId = threadId.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
3533
|
+
return join(this.dir, `${safeId}.json`);
|
|
3534
|
+
}
|
|
3535
|
+
async save(checkpoint) {
|
|
3536
|
+
const filePath = this.getFilePath(checkpoint.threadId);
|
|
3537
|
+
const data = {
|
|
3538
|
+
...checkpoint,
|
|
3539
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3540
|
+
};
|
|
3541
|
+
writeFileSync(filePath, JSON.stringify(data, null, 2), "utf-8");
|
|
3542
|
+
}
|
|
3543
|
+
async load(threadId) {
|
|
3544
|
+
const filePath = this.getFilePath(threadId);
|
|
3545
|
+
if (!existsSync(filePath)) return;
|
|
3546
|
+
try {
|
|
3547
|
+
const content = readFileSync(filePath, "utf-8");
|
|
3548
|
+
return JSON.parse(content);
|
|
3549
|
+
} catch {
|
|
3550
|
+
return;
|
|
3551
|
+
}
|
|
3552
|
+
}
|
|
3553
|
+
async list() {
|
|
3554
|
+
if (!existsSync(this.dir)) return [];
|
|
3555
|
+
return readdirSync(this.dir).filter((f) => f.endsWith(".json")).map((f) => f.replace(".json", ""));
|
|
3556
|
+
}
|
|
3557
|
+
async delete(threadId) {
|
|
3558
|
+
const filePath = this.getFilePath(threadId);
|
|
3559
|
+
if (existsSync(filePath)) unlinkSync(filePath);
|
|
3560
|
+
}
|
|
3561
|
+
async exists(threadId) {
|
|
3562
|
+
return existsSync(this.getFilePath(threadId));
|
|
3563
|
+
}
|
|
3564
|
+
};
|
|
3565
|
+
|
|
3566
|
+
//#endregion
|
|
3567
|
+
export { grepMatchesFromFiles as $, createGrepTool as A, DEFAULT_EVICTION_TOKEN_LIMIT as B, htmlToMarkdown as C, createEditFileTool as D, web_search as E, glob as F, shouldEvict as G, estimateTokens as H, grep as I, createFileData as J, StateBackend as K, ls as L, createReadFileTool as M, createWriteFileTool as N, createFilesystemTools as O, edit_file as P, globSearchFiles as Q, read_file as R, fetch_url as S, init_limits as St, init_web as T, evictToolResult as U, createToolResultWrapper as V, init_eviction as W, formatContentWithLineNumbers as X, fileDataToString as Y, formatReadResponse as Z, execute as _, DEFAULT_EVICTION_TOKEN_LIMIT$1 as _t, DeepAgent as a, createTodosTool as at, createWebSearchTool as b, DEFAULT_SUMMARIZATION_THRESHOLD$1 as bt, DEFAULT_SUMMARIZATION_THRESHOLD as c, DEFAULT_GENERAL_PURPOSE_DESCRIPTION as ct, summarizeIfNeeded as d, FILESYSTEM_SYSTEM_PROMPT as dt, performStringReplacement as et, hasDanglingToolCalls as f, TASK_SYSTEM_PROMPT as ft, createExecuteToolFromBackend as g, CONTEXT_WINDOW as gt, createExecuteTool as h, isSandboxBackend as ht, BaseSandbox as i, init_errors as it, createLsTool as j, createGlobTool as k, estimateMessagesTokens as l, DEFAULT_SUBAGENT_PROMPT as lt, createSubagentTool as m, getTaskToolDescription as mt, parseModelString as n, FILE_ALREADY_EXISTS as nt, createDeepAgent as o, write_todos as ot, patchToolCalls as p, TODO_SYSTEM_PROMPT as pt, checkEmptyContent as q, LocalSandbox as r, FILE_NOT_FOUND as rt, DEFAULT_KEEP_MESSAGES as s, BASE_PROMPT as st, FileSaver as t, updateFileData as tt, needsSummarization as u, EXECUTE_SYSTEM_PROMPT as ut, createFetchUrlTool as v, DEFAULT_KEEP_MESSAGES$1 as vt, http_request as w, createWebTools as x, MAX_FILE_SIZE_MB as xt, createHttpRequestTool as y, DEFAULT_READ_LIMIT as yt, write_file as z };
|
|
3568
|
+
//# sourceMappingURL=file-saver-Hj5so3dV.mjs.map
|