deepagentsdk 0.12.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/elements/index.cjs +478 -288
- package/dist/adapters/elements/index.cjs.map +1 -1
- package/dist/adapters/elements/index.d.cts +107 -172
- package/dist/adapters/elements/index.d.mts +107 -172
- package/dist/adapters/elements/index.mjs +471 -284
- package/dist/adapters/elements/index.mjs.map +1 -1
- package/dist/{types-4g9UvXal.d.mts → agent-D0bKkNI-.d.mts} +352 -3
- package/dist/{types-IulnvhFg.d.cts → agent-DwAj5emJ.d.cts} +352 -3
- package/dist/{chunk-CbDLau6x.cjs → chunk-C5azi7Hr.cjs} +33 -0
- package/dist/cli/index.cjs +12 -12
- package/dist/cli/index.cjs.map +1 -1
- package/dist/cli/index.mjs +2 -2
- package/dist/cli/index.mjs.map +1 -1
- package/dist/{agent-Cuks-Idh.cjs → file-saver-BYPKakT4.cjs} +799 -205
- package/dist/file-saver-BYPKakT4.cjs.map +1 -0
- package/dist/{agent-CrH-He58.mjs → file-saver-Hj5so3dV.mjs} +793 -199
- package/dist/file-saver-Hj5so3dV.mjs.map +1 -0
- package/dist/index.cjs +83 -73
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +5 -353
- package/dist/index.d.mts +5 -353
- package/dist/index.mjs +13 -3
- package/dist/index.mjs.map +1 -1
- package/dist/{load-B6CA5js_.mjs → load-BBYEnMwz.mjs} +1 -1
- package/dist/{load-B6CA5js_.mjs.map → load-BBYEnMwz.mjs.map} +1 -1
- package/dist/{load-94gjHorc.mjs → load-BDxe6Cet.mjs} +1 -1
- package/dist/{load-79a2H4m0.cjs → load-BrRAKlO6.cjs} +2 -2
- package/dist/{load-79a2H4m0.cjs.map → load-BrRAKlO6.cjs.map} +1 -1
- package/dist/load-DqllBbDc.cjs +4 -0
- package/package.json +1 -1
- package/dist/agent-CrH-He58.mjs.map +0 -1
- package/dist/agent-Cuks-Idh.cjs.map +0 -1
- package/dist/file-saver-BJCqMIb5.mjs +0 -655
- package/dist/file-saver-BJCqMIb5.mjs.map +0 -1
- package/dist/file-saver-C6O2LAvg.cjs +0 -679
- package/dist/file-saver-C6O2LAvg.cjs.map +0 -1
- package/dist/load-C2qVmZMp.cjs +0 -3
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
const require_chunk = require('./chunk-
|
|
1
|
+
const require_chunk = require('./chunk-C5azi7Hr.cjs');
|
|
2
2
|
let ai = require("ai");
|
|
3
3
|
let zod = require("zod");
|
|
4
4
|
let micromatch = require("micromatch");
|
|
@@ -9,139 +9,28 @@ let turndown = require("turndown");
|
|
|
9
9
|
turndown = require_chunk.__toESM(turndown);
|
|
10
10
|
let _mozilla_readability = require("@mozilla/readability");
|
|
11
11
|
let jsdom = require("jsdom");
|
|
12
|
+
let child_process = require("child_process");
|
|
13
|
+
let _ai_sdk_anthropic = require("@ai-sdk/anthropic");
|
|
14
|
+
let _ai_sdk_openai = require("@ai-sdk/openai");
|
|
15
|
+
let node_fs = require("node:fs");
|
|
16
|
+
let node_path = require("node:path");
|
|
12
17
|
|
|
13
18
|
//#region src/constants/limits.ts
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
*
|
|
30
|
-
* @default 20000
|
|
31
|
-
* @see {@link ../utils/eviction | evictToolResult}
|
|
32
|
-
*/
|
|
33
|
-
const DEFAULT_EVICTION_TOKEN_LIMIT$1 = 2e4;
|
|
34
|
-
/**
|
|
35
|
-
* Default threshold for message summarization.
|
|
36
|
-
*
|
|
37
|
-
* When the estimated token count of messages exceeds this threshold, the system
|
|
38
|
-
* automatically summarizes older messages to stay within context limits. This
|
|
39
|
-
* helps maintain conversation continuity while reducing token usage.
|
|
40
|
-
*
|
|
41
|
-
* @default 170000
|
|
42
|
-
* @see {@link ../utils/summarization | summarizeIfNeeded}
|
|
43
|
-
*/
|
|
44
|
-
const DEFAULT_SUMMARIZATION_THRESHOLD$1 = 17e4;
|
|
45
|
-
/**
|
|
46
|
-
* Maximum context window size for Claude models.
|
|
47
|
-
*
|
|
48
|
-
* This represents the maximum number of tokens that can be processed in a single
|
|
49
|
-
* conversation. Used for calculating token usage percentages and determining when
|
|
50
|
-
* summarization is needed.
|
|
51
|
-
*
|
|
52
|
-
* @default 200000
|
|
53
|
-
* @see {@link ../utils/summarization | estimateMessagesTokens}
|
|
54
|
-
*/
|
|
55
|
-
const CONTEXT_WINDOW = 2e5;
|
|
56
|
-
/**
|
|
57
|
-
* Default number of recent messages to keep during summarization.
|
|
58
|
-
*
|
|
59
|
-
* When summarization is triggered, this many of the most recent messages are
|
|
60
|
-
* preserved verbatim while older messages are summarized. This ensures recent
|
|
61
|
-
* context is immediately available to the agent.
|
|
62
|
-
*
|
|
63
|
-
* @default 6
|
|
64
|
-
*/
|
|
65
|
-
const DEFAULT_KEEP_MESSAGES$1 = 6;
|
|
66
|
-
/**
|
|
67
|
-
* Default maximum number of reasoning steps for the main agent.
|
|
68
|
-
*
|
|
69
|
-
* The agent will stop after reaching this many steps to prevent infinite loops
|
|
70
|
-
* or excessive token usage. Each step represents one tool invocation cycle.
|
|
71
|
-
*
|
|
72
|
-
* @default 100
|
|
73
|
-
*/
|
|
74
|
-
const DEFAULT_MAX_STEPS = 100;
|
|
75
|
-
/**
|
|
76
|
-
* Default maximum number of reasoning steps for subagents.
|
|
77
|
-
*
|
|
78
|
-
* Subagents are given a lower step limit than the main agent to prevent them
|
|
79
|
-
* from consuming too many resources. This ensures the parent agent maintains
|
|
80
|
-
* control over the overall task.
|
|
81
|
-
*
|
|
82
|
-
* @default 50
|
|
83
|
-
* @see {@link ../tools/subagent | createTaskTool}
|
|
84
|
-
*/
|
|
85
|
-
const DEFAULT_SUBAGENT_MAX_STEPS = 50;
|
|
86
|
-
/**
|
|
87
|
-
* Default maximum number of lines to read from a file.
|
|
88
|
-
*
|
|
89
|
-
* The read_file tool defaults to reading this many lines to prevent loading
|
|
90
|
-
* extremely large files into context. Can be overridden per-read operation.
|
|
91
|
-
*
|
|
92
|
-
* @default 2000
|
|
93
|
-
* @see {@link ../tools/filesystem | createReadFileTool}
|
|
94
|
-
*/
|
|
95
|
-
const DEFAULT_READ_LIMIT = 2e3;
|
|
96
|
-
/**
|
|
97
|
-
* Maximum line length before content is considered invalid.
|
|
98
|
-
*
|
|
99
|
-
* Lines exceeding this length may indicate minified code, binary content, or
|
|
100
|
-
* other data that should not be processed as text. Used for validation.
|
|
101
|
-
*
|
|
102
|
-
* @default 10000
|
|
103
|
-
*/
|
|
104
|
-
const MAX_LINE_LENGTH = 1e4;
|
|
105
|
-
/**
|
|
106
|
-
* Maximum file size in megabytes for file operations.
|
|
107
|
-
*
|
|
108
|
-
* Files larger than this size will be rejected to prevent memory issues and
|
|
109
|
-
* excessive token usage. This is a soft limit that can be adjusted for specific
|
|
110
|
-
* use cases.
|
|
111
|
-
*
|
|
112
|
-
* @default 10
|
|
113
|
-
*/
|
|
114
|
-
const MAX_FILE_SIZE_MB = 10;
|
|
115
|
-
/**
|
|
116
|
-
* Default timeout for network requests in seconds.
|
|
117
|
-
*
|
|
118
|
-
* Used by web tools (http_request, fetch_url) to prevent hanging indefinitely
|
|
119
|
-
* on slow or unresponsive servers. Can be overridden per-request.
|
|
120
|
-
*
|
|
121
|
-
* @default 30
|
|
122
|
-
* @see {@link ../tools/web | createHttpRequestTool}
|
|
123
|
-
*/
|
|
124
|
-
const DEFAULT_TIMEOUT_SECONDS = 30;
|
|
125
|
-
/**
|
|
126
|
-
* Default timeout in milliseconds (derived from DEFAULT_TIMEOUT_SECONDS).
|
|
127
|
-
*
|
|
128
|
-
* Provided for convenience when working with APIs that expect milliseconds
|
|
129
|
-
* instead of seconds.
|
|
130
|
-
*
|
|
131
|
-
* @default 30000 (30 seconds)
|
|
132
|
-
*/
|
|
133
|
-
const DEFAULT_TIMEOUT_MS = DEFAULT_TIMEOUT_SECONDS * 1e3;
|
|
134
|
-
/**
|
|
135
|
-
* Width for line number formatting in file read operations.
|
|
136
|
-
*
|
|
137
|
-
* When displaying file content with line numbers, this specifies the minimum
|
|
138
|
-
* width for the line number column. Ensures consistent alignment across
|
|
139
|
-
* different file sizes.
|
|
140
|
-
*
|
|
141
|
-
* @default 6
|
|
142
|
-
* @see {@link ../backends/utils | formatFileContent}
|
|
143
|
-
*/
|
|
144
|
-
const LINE_NUMBER_WIDTH = 6;
|
|
19
|
+
var DEFAULT_EVICTION_TOKEN_LIMIT$1, DEFAULT_SUMMARIZATION_THRESHOLD$1, CONTEXT_WINDOW, DEFAULT_KEEP_MESSAGES$1, DEFAULT_MAX_STEPS, DEFAULT_SUBAGENT_MAX_STEPS, DEFAULT_READ_LIMIT, MAX_LINE_LENGTH, MAX_FILE_SIZE_MB, DEFAULT_TIMEOUT_SECONDS, DEFAULT_TIMEOUT_MS, LINE_NUMBER_WIDTH;
|
|
20
|
+
var init_limits = require_chunk.__esmMin((() => {
|
|
21
|
+
DEFAULT_EVICTION_TOKEN_LIMIT$1 = 2e4;
|
|
22
|
+
DEFAULT_SUMMARIZATION_THRESHOLD$1 = 17e4;
|
|
23
|
+
CONTEXT_WINDOW = 2e5;
|
|
24
|
+
DEFAULT_KEEP_MESSAGES$1 = 6;
|
|
25
|
+
DEFAULT_MAX_STEPS = 100;
|
|
26
|
+
DEFAULT_SUBAGENT_MAX_STEPS = 50;
|
|
27
|
+
DEFAULT_READ_LIMIT = 2e3;
|
|
28
|
+
MAX_LINE_LENGTH = 1e4;
|
|
29
|
+
MAX_FILE_SIZE_MB = 10;
|
|
30
|
+
DEFAULT_TIMEOUT_SECONDS = 30;
|
|
31
|
+
DEFAULT_TIMEOUT_MS = DEFAULT_TIMEOUT_SECONDS * 1e3;
|
|
32
|
+
LINE_NUMBER_WIDTH = 6;
|
|
33
|
+
}));
|
|
145
34
|
|
|
146
35
|
//#endregion
|
|
147
36
|
//#region src/utils/events.ts
|
|
@@ -303,6 +192,7 @@ function createCheckpointLoadedEvent(threadId, step, messagesCount) {
|
|
|
303
192
|
messagesCount
|
|
304
193
|
};
|
|
305
194
|
}
|
|
195
|
+
var init_events = require_chunk.__esmMin((() => {}));
|
|
306
196
|
|
|
307
197
|
//#endregion
|
|
308
198
|
//#region src/types/backend.ts
|
|
@@ -499,6 +389,7 @@ Skills provide expert knowledge for specialized tasks. Always read the full skil
|
|
|
499
389
|
/**
|
|
500
390
|
* Todo list tool for task planning and tracking.
|
|
501
391
|
*/
|
|
392
|
+
init_events();
|
|
502
393
|
const TodoItemSchema = zod.z.object({
|
|
503
394
|
id: zod.z.string().describe("Unique identifier for the todo item"),
|
|
504
395
|
content: zod.z.string().max(100).describe("The description/content of the todo item (max 100 chars)"),
|
|
@@ -557,23 +448,24 @@ const write_todos = createTodosTool;
|
|
|
557
448
|
|
|
558
449
|
//#endregion
|
|
559
450
|
//#region src/constants/errors.ts
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
const SYSTEM_REMINDER_FILE_EMPTY = "System reminder: File exists but has empty contents";
|
|
451
|
+
var FILE_NOT_FOUND, FILE_ALREADY_EXISTS, STRING_NOT_FOUND, INVALID_REGEX, WEB_SEARCH_ERROR, REQUEST_TIMEOUT, SYSTEM_REMINDER_FILE_EMPTY;
|
|
452
|
+
var init_errors = require_chunk.__esmMin((() => {
|
|
453
|
+
FILE_NOT_FOUND = (path$1) => `Error: File '${path$1}' not found`;
|
|
454
|
+
FILE_ALREADY_EXISTS = (path$1) => `Cannot write to ${path$1} because it already exists. Read and then make an edit, or write to a new path.`;
|
|
455
|
+
STRING_NOT_FOUND = (path$1, string) => `Error: String not found in file: '${path$1}'\n\n${string}`;
|
|
456
|
+
INVALID_REGEX = (message) => `Invalid regex pattern: ${message}`;
|
|
457
|
+
WEB_SEARCH_ERROR = (message) => `Web search error: ${message}`;
|
|
458
|
+
REQUEST_TIMEOUT = (timeout) => `Request timed out after ${timeout} seconds`;
|
|
459
|
+
SYSTEM_REMINDER_FILE_EMPTY = "System reminder: File exists but has empty contents";
|
|
460
|
+
}));
|
|
571
461
|
|
|
572
462
|
//#endregion
|
|
573
463
|
//#region src/backends/utils.ts
|
|
574
464
|
/**
|
|
575
465
|
* Shared utility functions for memory backend implementations.
|
|
576
466
|
*/
|
|
467
|
+
init_errors();
|
|
468
|
+
init_limits();
|
|
577
469
|
const EMPTY_CONTENT_WARNING = SYSTEM_REMINDER_FILE_EMPTY;
|
|
578
470
|
/**
|
|
579
471
|
* Format file content with line numbers (cat -n style).
|
|
@@ -738,6 +630,7 @@ function grepMatchesFromFiles(files, pattern, path$1 = null, glob$1 = null) {
|
|
|
738
630
|
|
|
739
631
|
//#endregion
|
|
740
632
|
//#region src/backends/state.ts
|
|
633
|
+
init_errors();
|
|
741
634
|
/**
|
|
742
635
|
* Backend that stores files in shared state (ephemeral).
|
|
743
636
|
*
|
|
@@ -907,15 +800,6 @@ var StateBackend = class {
|
|
|
907
800
|
//#endregion
|
|
908
801
|
//#region src/utils/eviction.ts
|
|
909
802
|
/**
|
|
910
|
-
* Default token limit before evicting a tool result.
|
|
911
|
-
* Approximately 20,000 tokens (~80KB of text).
|
|
912
|
-
*/
|
|
913
|
-
const DEFAULT_EVICTION_TOKEN_LIMIT = DEFAULT_EVICTION_TOKEN_LIMIT$1;
|
|
914
|
-
/**
|
|
915
|
-
* Approximate characters per token (rough estimate).
|
|
916
|
-
*/
|
|
917
|
-
const CHARS_PER_TOKEN = 4;
|
|
918
|
-
/**
|
|
919
803
|
* Sanitize a tool call ID for use as a filename.
|
|
920
804
|
* Removes or replaces characters that are invalid in file paths.
|
|
921
805
|
*/
|
|
@@ -999,12 +883,20 @@ function createToolResultWrapper(backend, state, tokenLimit = DEFAULT_EVICTION_T
|
|
|
999
883
|
})).content;
|
|
1000
884
|
};
|
|
1001
885
|
}
|
|
886
|
+
var DEFAULT_EVICTION_TOKEN_LIMIT, CHARS_PER_TOKEN;
|
|
887
|
+
var init_eviction = require_chunk.__esmMin((() => {
|
|
888
|
+
init_limits();
|
|
889
|
+
DEFAULT_EVICTION_TOKEN_LIMIT = DEFAULT_EVICTION_TOKEN_LIMIT$1;
|
|
890
|
+
CHARS_PER_TOKEN = 4;
|
|
891
|
+
}));
|
|
1002
892
|
|
|
1003
893
|
//#endregion
|
|
1004
894
|
//#region src/tools/filesystem.ts
|
|
1005
895
|
/**
|
|
1006
896
|
* Filesystem tools for virtual file operations.
|
|
1007
897
|
*/
|
|
898
|
+
init_eviction();
|
|
899
|
+
init_events();
|
|
1008
900
|
const LS_TOOL_DESCRIPTION = "List files and directories in a directory. Paths are relative to the working directory.";
|
|
1009
901
|
const READ_FILE_TOOL_DESCRIPTION = "Read the contents of a file. Paths are relative to the working directory.";
|
|
1010
902
|
const WRITE_FILE_TOOL_DESCRIPTION = "Write content to a new file. Returns an error if the file already exists. Paths are relative to the working directory.";
|
|
@@ -1327,6 +1219,16 @@ function wrapToolsWithApproval(tools, interruptOn, onApprovalRequest) {
|
|
|
1327
1219
|
* Web tools for search and HTTP requests.
|
|
1328
1220
|
* Based on LangChain DeepAgents implementation.
|
|
1329
1221
|
*/
|
|
1222
|
+
var web_exports = /* @__PURE__ */ require_chunk.__exportAll({
|
|
1223
|
+
createFetchUrlTool: () => createFetchUrlTool,
|
|
1224
|
+
createHttpRequestTool: () => createHttpRequestTool,
|
|
1225
|
+
createWebSearchTool: () => createWebSearchTool,
|
|
1226
|
+
createWebTools: () => createWebTools,
|
|
1227
|
+
fetch_url: () => fetch_url,
|
|
1228
|
+
htmlToMarkdown: () => htmlToMarkdown,
|
|
1229
|
+
http_request: () => http_request,
|
|
1230
|
+
web_search: () => web_search
|
|
1231
|
+
});
|
|
1330
1232
|
/**
|
|
1331
1233
|
* Helper to resolve backend from factory or instance.
|
|
1332
1234
|
*/
|
|
@@ -1355,18 +1257,6 @@ function htmlToMarkdown(html, url) {
|
|
|
1355
1257
|
}
|
|
1356
1258
|
}
|
|
1357
1259
|
/**
|
|
1358
|
-
* Tool description for web_search.
|
|
1359
|
-
*/
|
|
1360
|
-
const WEB_SEARCH_TOOL_DESCRIPTION = `Search the web using Tavily API for current information, news, and documentation.
|
|
1361
|
-
|
|
1362
|
-
Returns an array of search results with titles, URLs, relevant excerpts, and relevance scores.
|
|
1363
|
-
|
|
1364
|
-
IMPORTANT AGENT INSTRUCTIONS:
|
|
1365
|
-
- You MUST synthesize information from search results into a coherent answer
|
|
1366
|
-
- NEVER show raw JSON or result objects to the user
|
|
1367
|
-
- Cite sources by including URLs in your response
|
|
1368
|
-
- If search fails or returns no results, explain this clearly to the user`;
|
|
1369
|
-
/**
|
|
1370
1260
|
* Create the web_search tool.
|
|
1371
1261
|
*/
|
|
1372
1262
|
function createWebSearchTool(state, options) {
|
|
@@ -1414,14 +1304,6 @@ function createWebSearchTool(state, options) {
|
|
|
1414
1304
|
});
|
|
1415
1305
|
}
|
|
1416
1306
|
/**
|
|
1417
|
-
* Tool description for http_request.
|
|
1418
|
-
*/
|
|
1419
|
-
const HTTP_REQUEST_TOOL_DESCRIPTION = `Make HTTP requests to APIs and web services.
|
|
1420
|
-
|
|
1421
|
-
Supports GET, POST, PUT, DELETE, PATCH methods with custom headers, query parameters, and request bodies.
|
|
1422
|
-
|
|
1423
|
-
Returns structured response with status code, headers, and parsed content (JSON or text).`;
|
|
1424
|
-
/**
|
|
1425
1307
|
* Create the http_request tool.
|
|
1426
1308
|
*/
|
|
1427
1309
|
function createHttpRequestTool(state, options) {
|
|
@@ -1493,19 +1375,6 @@ function createHttpRequestTool(state, options) {
|
|
|
1493
1375
|
});
|
|
1494
1376
|
}
|
|
1495
1377
|
/**
|
|
1496
|
-
* Tool description for fetch_url.
|
|
1497
|
-
*/
|
|
1498
|
-
const FETCH_URL_TOOL_DESCRIPTION = `Fetch web page content and convert HTML to clean Markdown format.
|
|
1499
|
-
|
|
1500
|
-
Uses Mozilla Readability to extract main article content and Turndown to convert to Markdown.
|
|
1501
|
-
|
|
1502
|
-
Returns the page content as formatted Markdown, suitable for analysis and summarization.
|
|
1503
|
-
|
|
1504
|
-
IMPORTANT AGENT INSTRUCTIONS:
|
|
1505
|
-
- Use this tool to read documentation, articles, and web pages
|
|
1506
|
-
- The content is already cleaned and formatted as Markdown
|
|
1507
|
-
- Cite the URL when referencing fetched content`;
|
|
1508
|
-
/**
|
|
1509
1378
|
* Create the fetch_url tool.
|
|
1510
1379
|
*/
|
|
1511
1380
|
function createFetchUrlTool(state, options) {
|
|
@@ -1596,13 +1465,40 @@ function createWebTools(state, options) {
|
|
|
1596
1465
|
})
|
|
1597
1466
|
};
|
|
1598
1467
|
}
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1468
|
+
var WEB_SEARCH_TOOL_DESCRIPTION, HTTP_REQUEST_TOOL_DESCRIPTION, FETCH_URL_TOOL_DESCRIPTION, web_search, http_request, fetch_url;
|
|
1469
|
+
var init_web = require_chunk.__esmMin((() => {
|
|
1470
|
+
init_eviction();
|
|
1471
|
+
init_errors();
|
|
1472
|
+
init_limits();
|
|
1473
|
+
init_events();
|
|
1474
|
+
WEB_SEARCH_TOOL_DESCRIPTION = `Search the web using Tavily API for current information, news, and documentation.
|
|
1475
|
+
|
|
1476
|
+
Returns an array of search results with titles, URLs, relevant excerpts, and relevance scores.
|
|
1477
|
+
|
|
1478
|
+
IMPORTANT AGENT INSTRUCTIONS:
|
|
1479
|
+
- You MUST synthesize information from search results into a coherent answer
|
|
1480
|
+
- NEVER show raw JSON or result objects to the user
|
|
1481
|
+
- Cite sources by including URLs in your response
|
|
1482
|
+
- If search fails or returns no results, explain this clearly to the user`;
|
|
1483
|
+
HTTP_REQUEST_TOOL_DESCRIPTION = `Make HTTP requests to APIs and web services.
|
|
1484
|
+
|
|
1485
|
+
Supports GET, POST, PUT, DELETE, PATCH methods with custom headers, query parameters, and request bodies.
|
|
1486
|
+
|
|
1487
|
+
Returns structured response with status code, headers, and parsed content (JSON or text).`;
|
|
1488
|
+
FETCH_URL_TOOL_DESCRIPTION = `Fetch web page content and convert HTML to clean Markdown format.
|
|
1489
|
+
|
|
1490
|
+
Uses Mozilla Readability to extract main article content and Turndown to convert to Markdown.
|
|
1491
|
+
|
|
1492
|
+
Returns the page content as formatted Markdown, suitable for analysis and summarization.
|
|
1493
|
+
|
|
1494
|
+
IMPORTANT AGENT INSTRUCTIONS:
|
|
1495
|
+
- Use this tool to read documentation, articles, and web pages
|
|
1496
|
+
- The content is already cleaned and formatted as Markdown
|
|
1497
|
+
- Cite the URL when referencing fetched content`;
|
|
1498
|
+
web_search = createWebSearchTool;
|
|
1499
|
+
http_request = createHttpRequestTool;
|
|
1500
|
+
fetch_url = createFetchUrlTool;
|
|
1501
|
+
}));
|
|
1606
1502
|
|
|
1607
1503
|
//#endregion
|
|
1608
1504
|
//#region src/tools/execute.ts
|
|
@@ -1724,6 +1620,9 @@ const execute = createExecuteTool;
|
|
|
1724
1620
|
/**
|
|
1725
1621
|
* Subagent tool for task delegation using AI SDK v6 ToolLoopAgent.
|
|
1726
1622
|
*/
|
|
1623
|
+
init_limits();
|
|
1624
|
+
init_events();
|
|
1625
|
+
init_web();
|
|
1727
1626
|
/**
|
|
1728
1627
|
* Check if a value is a builtin tool creator function.
|
|
1729
1628
|
*/
|
|
@@ -2048,6 +1947,8 @@ function hasDanglingToolCalls(messages) {
|
|
|
2048
1947
|
* Automatically summarizes older messages when approaching token limits
|
|
2049
1948
|
* to prevent context overflow while preserving important context.
|
|
2050
1949
|
*/
|
|
1950
|
+
init_eviction();
|
|
1951
|
+
init_limits();
|
|
2051
1952
|
/**
|
|
2052
1953
|
* Default token threshold before triggering summarization.
|
|
2053
1954
|
* 170k tokens is a safe threshold for most models.
|
|
@@ -2174,6 +2075,8 @@ function needsSummarization(messages, tokenThreshold = DEFAULT_SUMMARIZATION_THR
|
|
|
2174
2075
|
/**
|
|
2175
2076
|
* Deep Agent implementation using Vercel AI SDK v6 ToolLoopAgent.
|
|
2176
2077
|
*/
|
|
2078
|
+
init_limits();
|
|
2079
|
+
init_events();
|
|
2177
2080
|
/**
|
|
2178
2081
|
* Build the full system prompt from components.
|
|
2179
2082
|
*/
|
|
@@ -2264,14 +2167,21 @@ var DeepAgent = class {
|
|
|
2264
2167
|
}
|
|
2265
2168
|
/**
|
|
2266
2169
|
* Create web tools if TAVILY_API_KEY is available.
|
|
2170
|
+
* Uses dynamic import to avoid bundling Node.js dependencies in client builds.
|
|
2267
2171
|
* @private
|
|
2268
2172
|
*/
|
|
2269
2173
|
createWebToolSet(state, onEvent) {
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2174
|
+
if (!process.env.TAVILY_API_KEY) return {};
|
|
2175
|
+
try {
|
|
2176
|
+
return (init_web(), require_chunk.__toCommonJS(web_exports)).createWebTools(state, {
|
|
2177
|
+
backend: this.backend,
|
|
2178
|
+
onEvent,
|
|
2179
|
+
toolResultEvictionLimit: this.toolResultEvictionLimit
|
|
2180
|
+
});
|
|
2181
|
+
} catch (error) {
|
|
2182
|
+
console.warn("Web tools not available in this environment:", error);
|
|
2183
|
+
return {};
|
|
2184
|
+
}
|
|
2275
2185
|
}
|
|
2276
2186
|
/**
|
|
2277
2187
|
* Create execute tool if backend is a sandbox.
|
|
@@ -2378,7 +2288,7 @@ var DeepAgent = class {
|
|
|
2378
2288
|
* Supports both legacy skillsDir and new agentId modes.
|
|
2379
2289
|
*/
|
|
2380
2290
|
async loadSkills(options) {
|
|
2381
|
-
const { listSkills } = await Promise.resolve().then(() => require("./load-
|
|
2291
|
+
const { listSkills } = await Promise.resolve().then(() => require("./load-DqllBbDc.cjs"));
|
|
2382
2292
|
this.skillsMetadata = (await listSkills(options.agentId ? { agentId: options.agentId } : { projectSkillsDir: options.skillsDir })).map((s) => ({
|
|
2383
2293
|
name: s.name,
|
|
2384
2294
|
description: s.description,
|
|
@@ -2972,6 +2882,654 @@ function createDeepAgent(params) {
|
|
|
2972
2882
|
return new DeepAgent(params);
|
|
2973
2883
|
}
|
|
2974
2884
|
|
|
2885
|
+
//#endregion
|
|
2886
|
+
//#region src/backends/sandbox.ts
|
|
2887
|
+
init_errors();
|
|
2888
|
+
init_limits();
|
|
2889
|
+
/**
|
|
2890
|
+
* Encode string to base64 for safe shell transmission.
|
|
2891
|
+
*/
|
|
2892
|
+
function toBase64(str) {
|
|
2893
|
+
return Buffer.from(str, "utf-8").toString("base64");
|
|
2894
|
+
}
|
|
2895
|
+
/**
|
|
2896
|
+
* Build a Node.js script command with embedded base64 arguments.
|
|
2897
|
+
* This avoids shell argument parsing issues by embedding values directly in the script.
|
|
2898
|
+
*/
|
|
2899
|
+
function buildNodeScript(script, args) {
|
|
2900
|
+
let result = script;
|
|
2901
|
+
for (const [key, value] of Object.entries(args)) result = result.replace(new RegExp(`__${key}__`, "g"), value);
|
|
2902
|
+
return `node -e '${result}'`;
|
|
2903
|
+
}
|
|
2904
|
+
/**
|
|
2905
|
+
* Abstract base class for sandbox backends.
|
|
2906
|
+
*
|
|
2907
|
+
* Implements all file operations using shell commands via execute().
|
|
2908
|
+
* Subclasses only need to implement execute() and id.
|
|
2909
|
+
*
|
|
2910
|
+
* @example Creating a custom sandbox backend
|
|
2911
|
+
* ```typescript
|
|
2912
|
+
* class MyCloudSandbox extends BaseSandbox {
|
|
2913
|
+
* readonly id = 'my-cloud-123';
|
|
2914
|
+
*
|
|
2915
|
+
* async execute(command: string): Promise<ExecuteResponse> {
|
|
2916
|
+
* // Call your cloud provider's API
|
|
2917
|
+
* const result = await myCloudApi.runCommand(command);
|
|
2918
|
+
* return {
|
|
2919
|
+
* output: result.stdout + result.stderr,
|
|
2920
|
+
* exitCode: result.exitCode,
|
|
2921
|
+
* truncated: false,
|
|
2922
|
+
* };
|
|
2923
|
+
* }
|
|
2924
|
+
* }
|
|
2925
|
+
* ```
|
|
2926
|
+
*/
|
|
2927
|
+
var BaseSandbox = class {
|
|
2928
|
+
/**
|
|
2929
|
+
* List files and directories in a path.
|
|
2930
|
+
*/
|
|
2931
|
+
async lsInfo(path$1) {
|
|
2932
|
+
const pathB64 = toBase64(path$1);
|
|
2933
|
+
const result = await this.execute(buildNodeScript(`
|
|
2934
|
+
const fs = require("fs");
|
|
2935
|
+
const path = require("path");
|
|
2936
|
+
|
|
2937
|
+
const dirPath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
2938
|
+
|
|
2939
|
+
try {
|
|
2940
|
+
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
|
|
2941
|
+
for (const entry of entries) {
|
|
2942
|
+
const fullPath = path.join(dirPath, entry.name);
|
|
2943
|
+
try {
|
|
2944
|
+
const stat = fs.statSync(fullPath);
|
|
2945
|
+
console.log(JSON.stringify({
|
|
2946
|
+
path: entry.name,
|
|
2947
|
+
is_dir: entry.isDirectory(),
|
|
2948
|
+
size: stat.size,
|
|
2949
|
+
modified_at: stat.mtime.toISOString()
|
|
2950
|
+
}));
|
|
2951
|
+
} catch (e) {}
|
|
2952
|
+
}
|
|
2953
|
+
} catch (e) {}
|
|
2954
|
+
`, { PATH: pathB64 }));
|
|
2955
|
+
const infos = [];
|
|
2956
|
+
for (const line of result.output.trim().split("\n")) {
|
|
2957
|
+
if (!line) continue;
|
|
2958
|
+
try {
|
|
2959
|
+
const data = JSON.parse(line);
|
|
2960
|
+
infos.push({
|
|
2961
|
+
path: data.path,
|
|
2962
|
+
is_dir: data.is_dir,
|
|
2963
|
+
size: data.size,
|
|
2964
|
+
modified_at: data.modified_at
|
|
2965
|
+
});
|
|
2966
|
+
} catch {}
|
|
2967
|
+
}
|
|
2968
|
+
return infos;
|
|
2969
|
+
}
|
|
2970
|
+
/**
|
|
2971
|
+
* Read file content with line numbers.
|
|
2972
|
+
*/
|
|
2973
|
+
async read(filePath, offset = 0, limit = DEFAULT_READ_LIMIT) {
|
|
2974
|
+
const pathB64 = toBase64(filePath);
|
|
2975
|
+
const script = `
|
|
2976
|
+
const fs = require("fs");
|
|
2977
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
2978
|
+
const offset = __OFFSET__;
|
|
2979
|
+
const limit = __LIMIT__;
|
|
2980
|
+
|
|
2981
|
+
if (!fs.existsSync(filePath)) {
|
|
2982
|
+
console.error("Error: File not found");
|
|
2983
|
+
process.exit(1);
|
|
2984
|
+
}
|
|
2985
|
+
|
|
2986
|
+
const stat = fs.statSync(filePath);
|
|
2987
|
+
if (stat.size === 0) {
|
|
2988
|
+
console.log("${SYSTEM_REMINDER_FILE_EMPTY}");
|
|
2989
|
+
process.exit(0);
|
|
2990
|
+
}
|
|
2991
|
+
|
|
2992
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
2993
|
+
const lines = content.split("\\n");
|
|
2994
|
+
const selected = lines.slice(offset, offset + limit);
|
|
2995
|
+
|
|
2996
|
+
for (let i = 0; i < selected.length; i++) {
|
|
2997
|
+
const lineNum = (offset + i + 1).toString().padStart(6, " ");
|
|
2998
|
+
console.log(lineNum + "\\t" + selected[i]);
|
|
2999
|
+
}
|
|
3000
|
+
`;
|
|
3001
|
+
const result = await this.execute(buildNodeScript(script, {
|
|
3002
|
+
PATH: pathB64,
|
|
3003
|
+
OFFSET: String(offset),
|
|
3004
|
+
LIMIT: String(limit)
|
|
3005
|
+
}));
|
|
3006
|
+
if (result.exitCode !== 0) {
|
|
3007
|
+
if (result.output.includes("Error: File not found")) return FILE_NOT_FOUND(filePath);
|
|
3008
|
+
return result.output.trim();
|
|
3009
|
+
}
|
|
3010
|
+
return result.output.trimEnd();
|
|
3011
|
+
}
|
|
3012
|
+
/**
|
|
3013
|
+
* Read raw file data.
|
|
3014
|
+
*/
|
|
3015
|
+
async readRaw(filePath) {
|
|
3016
|
+
const pathB64 = toBase64(filePath);
|
|
3017
|
+
const result = await this.execute(buildNodeScript(`
|
|
3018
|
+
const fs = require("fs");
|
|
3019
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3020
|
+
|
|
3021
|
+
if (!fs.existsSync(filePath)) {
|
|
3022
|
+
console.error("Error: File not found");
|
|
3023
|
+
process.exit(1);
|
|
3024
|
+
}
|
|
3025
|
+
|
|
3026
|
+
const stat = fs.statSync(filePath);
|
|
3027
|
+
const content = fs.readFileSync(filePath, "utf-8");
|
|
3028
|
+
|
|
3029
|
+
console.log(JSON.stringify({
|
|
3030
|
+
content: content.split("\\n"),
|
|
3031
|
+
created_at: stat.birthtime.toISOString(),
|
|
3032
|
+
modified_at: stat.mtime.toISOString()
|
|
3033
|
+
}));
|
|
3034
|
+
`, { PATH: pathB64 }));
|
|
3035
|
+
if (result.exitCode !== 0) throw new Error(`File '${filePath}' not found`);
|
|
3036
|
+
try {
|
|
3037
|
+
const data = JSON.parse(result.output.trim());
|
|
3038
|
+
return {
|
|
3039
|
+
content: data.content,
|
|
3040
|
+
created_at: data.created_at,
|
|
3041
|
+
modified_at: data.modified_at
|
|
3042
|
+
};
|
|
3043
|
+
} catch {
|
|
3044
|
+
throw new Error(`Failed to parse file data for '${filePath}'`);
|
|
3045
|
+
}
|
|
3046
|
+
}
|
|
3047
|
+
/**
|
|
3048
|
+
* Write content to a new file.
|
|
3049
|
+
*/
|
|
3050
|
+
async write(filePath, content) {
|
|
3051
|
+
const pathB64 = toBase64(filePath);
|
|
3052
|
+
const contentB64 = toBase64(content);
|
|
3053
|
+
const result = await this.execute(buildNodeScript(`
|
|
3054
|
+
const fs = require("fs");
|
|
3055
|
+
const path = require("path");
|
|
3056
|
+
|
|
3057
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3058
|
+
const content = Buffer.from("__CONTENT__", "base64").toString("utf-8");
|
|
3059
|
+
|
|
3060
|
+
if (fs.existsSync(filePath)) {
|
|
3061
|
+
console.error("Error: File already exists");
|
|
3062
|
+
process.exit(1);
|
|
3063
|
+
}
|
|
3064
|
+
|
|
3065
|
+
const dir = path.dirname(filePath);
|
|
3066
|
+
if (dir && dir !== ".") {
|
|
3067
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
3068
|
+
}
|
|
3069
|
+
|
|
3070
|
+
fs.writeFileSync(filePath, content, "utf-8");
|
|
3071
|
+
`, {
|
|
3072
|
+
PATH: pathB64,
|
|
3073
|
+
CONTENT: contentB64
|
|
3074
|
+
}));
|
|
3075
|
+
if (result.exitCode !== 0) {
|
|
3076
|
+
if (result.output.includes("already exists")) return {
|
|
3077
|
+
success: false,
|
|
3078
|
+
error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.`
|
|
3079
|
+
};
|
|
3080
|
+
return {
|
|
3081
|
+
success: false,
|
|
3082
|
+
error: result.output.trim() || `Failed to write '${filePath}'`
|
|
3083
|
+
};
|
|
3084
|
+
}
|
|
3085
|
+
return {
|
|
3086
|
+
success: true,
|
|
3087
|
+
path: filePath
|
|
3088
|
+
};
|
|
3089
|
+
}
|
|
3090
|
+
/**
|
|
3091
|
+
* Edit a file by replacing string occurrences.
|
|
3092
|
+
*/
|
|
3093
|
+
async edit(filePath, oldString, newString, replaceAll = false) {
|
|
3094
|
+
const pathB64 = toBase64(filePath);
|
|
3095
|
+
const oldB64 = toBase64(oldString);
|
|
3096
|
+
const newB64 = toBase64(newString);
|
|
3097
|
+
const result = await this.execute(buildNodeScript(`
|
|
3098
|
+
const fs = require("fs");
|
|
3099
|
+
|
|
3100
|
+
const filePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3101
|
+
const oldStr = Buffer.from("__OLD__", "base64").toString("utf-8");
|
|
3102
|
+
const newStr = Buffer.from("__NEW__", "base64").toString("utf-8");
|
|
3103
|
+
const replaceAll = __REPLACE_ALL__;
|
|
3104
|
+
|
|
3105
|
+
if (!fs.existsSync(filePath)) {
|
|
3106
|
+
console.error("Error: File not found");
|
|
3107
|
+
process.exit(1);
|
|
3108
|
+
}
|
|
3109
|
+
|
|
3110
|
+
let content = fs.readFileSync(filePath, "utf-8");
|
|
3111
|
+
const count = content.split(oldStr).length - 1;
|
|
3112
|
+
|
|
3113
|
+
if (count === 0) {
|
|
3114
|
+
process.exit(2);
|
|
3115
|
+
}
|
|
3116
|
+
if (count > 1 && !replaceAll) {
|
|
3117
|
+
process.exit(3);
|
|
3118
|
+
}
|
|
3119
|
+
|
|
3120
|
+
if (replaceAll) {
|
|
3121
|
+
content = content.split(oldStr).join(newStr);
|
|
3122
|
+
} else {
|
|
3123
|
+
content = content.replace(oldStr, newStr);
|
|
3124
|
+
}
|
|
3125
|
+
|
|
3126
|
+
fs.writeFileSync(filePath, content, "utf-8");
|
|
3127
|
+
console.log(count);
|
|
3128
|
+
`, {
|
|
3129
|
+
PATH: pathB64,
|
|
3130
|
+
OLD: oldB64,
|
|
3131
|
+
NEW: newB64,
|
|
3132
|
+
REPLACE_ALL: String(replaceAll)
|
|
3133
|
+
}));
|
|
3134
|
+
if (result.exitCode === 1) return {
|
|
3135
|
+
success: false,
|
|
3136
|
+
error: FILE_NOT_FOUND(filePath)
|
|
3137
|
+
};
|
|
3138
|
+
if (result.exitCode === 2) return {
|
|
3139
|
+
success: false,
|
|
3140
|
+
error: STRING_NOT_FOUND(filePath, oldString)
|
|
3141
|
+
};
|
|
3142
|
+
if (result.exitCode === 3) return {
|
|
3143
|
+
success: false,
|
|
3144
|
+
error: `Error: String '${oldString}' appears multiple times. Use replaceAll=true to replace all occurrences.`
|
|
3145
|
+
};
|
|
3146
|
+
return {
|
|
3147
|
+
success: true,
|
|
3148
|
+
path: filePath,
|
|
3149
|
+
occurrences: parseInt(result.output.trim(), 10) || 1
|
|
3150
|
+
};
|
|
3151
|
+
}
|
|
3152
|
+
/**
|
|
3153
|
+
* Search for pattern in files.
|
|
3154
|
+
*/
|
|
3155
|
+
async grepRaw(pattern, path$1 = "/", glob$1 = null) {
|
|
3156
|
+
const patternB64 = toBase64(pattern);
|
|
3157
|
+
const pathB64 = toBase64(path$1);
|
|
3158
|
+
const globB64 = glob$1 ? toBase64(glob$1) : toBase64("**/*");
|
|
3159
|
+
const result = await this.execute(buildNodeScript(`
|
|
3160
|
+
const fs = require("fs");
|
|
3161
|
+
const path = require("path");
|
|
3162
|
+
|
|
3163
|
+
const pattern = Buffer.from("__PATTERN__", "base64").toString("utf-8");
|
|
3164
|
+
const basePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3165
|
+
const fileGlob = Buffer.from("__GLOB__", "base64").toString("utf-8");
|
|
3166
|
+
|
|
3167
|
+
function walkDir(dir, baseDir) {
|
|
3168
|
+
const results = [];
|
|
3169
|
+
try {
|
|
3170
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
3171
|
+
for (const entry of entries) {
|
|
3172
|
+
const fullPath = path.join(dir, entry.name);
|
|
3173
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
3174
|
+
|
|
3175
|
+
if (entry.isDirectory()) {
|
|
3176
|
+
results.push(...walkDir(fullPath, baseDir));
|
|
3177
|
+
} else {
|
|
3178
|
+
results.push(relativePath);
|
|
3179
|
+
}
|
|
3180
|
+
}
|
|
3181
|
+
} catch (e) {}
|
|
3182
|
+
return results;
|
|
3183
|
+
}
|
|
3184
|
+
|
|
3185
|
+
function matchGlob(filepath, pattern) {
|
|
3186
|
+
if (!pattern || pattern === "**/*") return true;
|
|
3187
|
+
const regex = pattern
|
|
3188
|
+
.replace(/\\./g, "\\\\.")
|
|
3189
|
+
.replace(/\\*\\*/g, "<<<GLOBSTAR>>>")
|
|
3190
|
+
.replace(/\\*/g, "[^/]*")
|
|
3191
|
+
.replace(/<<<GLOBSTAR>>>/g, ".*")
|
|
3192
|
+
.replace(/\\?/g, ".");
|
|
3193
|
+
return new RegExp("^" + regex + "$").test(filepath);
|
|
3194
|
+
}
|
|
3195
|
+
|
|
3196
|
+
const allFiles = walkDir(basePath, basePath);
|
|
3197
|
+
const files = allFiles.filter(f => matchGlob(f, fileGlob)).sort();
|
|
3198
|
+
|
|
3199
|
+
for (const file of files) {
|
|
3200
|
+
try {
|
|
3201
|
+
const fullPath = path.join(basePath, file);
|
|
3202
|
+
const content = fs.readFileSync(fullPath, "utf-8");
|
|
3203
|
+
const lines = content.split("\\n");
|
|
3204
|
+
|
|
3205
|
+
for (let i = 0; i < lines.length; i++) {
|
|
3206
|
+
if (lines[i].includes(pattern)) {
|
|
3207
|
+
console.log(JSON.stringify({
|
|
3208
|
+
path: file,
|
|
3209
|
+
line: i + 1,
|
|
3210
|
+
text: lines[i]
|
|
3211
|
+
}));
|
|
3212
|
+
}
|
|
3213
|
+
}
|
|
3214
|
+
} catch (e) {}
|
|
3215
|
+
}
|
|
3216
|
+
`, {
|
|
3217
|
+
PATTERN: patternB64,
|
|
3218
|
+
PATH: pathB64,
|
|
3219
|
+
GLOB: globB64
|
|
3220
|
+
}));
|
|
3221
|
+
const matches = [];
|
|
3222
|
+
for (const line of result.output.trim().split("\n")) {
|
|
3223
|
+
if (!line) continue;
|
|
3224
|
+
try {
|
|
3225
|
+
const data = JSON.parse(line);
|
|
3226
|
+
matches.push({
|
|
3227
|
+
path: data.path,
|
|
3228
|
+
line: data.line,
|
|
3229
|
+
text: data.text
|
|
3230
|
+
});
|
|
3231
|
+
} catch {}
|
|
3232
|
+
}
|
|
3233
|
+
return matches;
|
|
3234
|
+
}
|
|
3235
|
+
/**
|
|
3236
|
+
* Find files matching glob pattern.
|
|
3237
|
+
*/
|
|
3238
|
+
async globInfo(pattern, path$1 = "/") {
|
|
3239
|
+
const pathB64 = toBase64(path$1);
|
|
3240
|
+
const patternB64 = toBase64(pattern);
|
|
3241
|
+
const result = await this.execute(buildNodeScript(`
|
|
3242
|
+
const fs = require("fs");
|
|
3243
|
+
const path = require("path");
|
|
3244
|
+
|
|
3245
|
+
const basePath = Buffer.from("__PATH__", "base64").toString("utf-8");
|
|
3246
|
+
const pattern = Buffer.from("__PATTERN__", "base64").toString("utf-8");
|
|
3247
|
+
|
|
3248
|
+
function walkDir(dir, baseDir) {
|
|
3249
|
+
const results = [];
|
|
3250
|
+
try {
|
|
3251
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
3252
|
+
for (const entry of entries) {
|
|
3253
|
+
const fullPath = path.join(dir, entry.name);
|
|
3254
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
3255
|
+
|
|
3256
|
+
if (entry.isDirectory()) {
|
|
3257
|
+
results.push(...walkDir(fullPath, baseDir));
|
|
3258
|
+
} else {
|
|
3259
|
+
results.push(relativePath);
|
|
3260
|
+
}
|
|
3261
|
+
}
|
|
3262
|
+
} catch (e) {}
|
|
3263
|
+
return results;
|
|
3264
|
+
}
|
|
3265
|
+
|
|
3266
|
+
function matchGlob(filepath, pattern) {
|
|
3267
|
+
const regex = pattern
|
|
3268
|
+
.replace(/\\./g, "\\\\.")
|
|
3269
|
+
.replace(/\\*\\*/g, "<<<GLOBSTAR>>>")
|
|
3270
|
+
.replace(/\\*/g, "[^/]*")
|
|
3271
|
+
.replace(/<<<GLOBSTAR>>>/g, ".*")
|
|
3272
|
+
.replace(/\\?/g, ".");
|
|
3273
|
+
return new RegExp("^" + regex + "$").test(filepath);
|
|
3274
|
+
}
|
|
3275
|
+
|
|
3276
|
+
const allFiles = walkDir(basePath, basePath);
|
|
3277
|
+
const matches = allFiles.filter(f => matchGlob(f, pattern)).sort();
|
|
3278
|
+
|
|
3279
|
+
for (const m of matches) {
|
|
3280
|
+
try {
|
|
3281
|
+
const fullPath = path.join(basePath, m);
|
|
3282
|
+
const stat = fs.statSync(fullPath);
|
|
3283
|
+
console.log(JSON.stringify({
|
|
3284
|
+
path: m,
|
|
3285
|
+
is_dir: stat.isDirectory(),
|
|
3286
|
+
size: stat.size,
|
|
3287
|
+
modified_at: stat.mtime.toISOString()
|
|
3288
|
+
}));
|
|
3289
|
+
} catch (e) {}
|
|
3290
|
+
}
|
|
3291
|
+
`, {
|
|
3292
|
+
PATH: pathB64,
|
|
3293
|
+
PATTERN: patternB64
|
|
3294
|
+
}));
|
|
3295
|
+
const infos = [];
|
|
3296
|
+
for (const line of result.output.trim().split("\n")) {
|
|
3297
|
+
if (!line) continue;
|
|
3298
|
+
try {
|
|
3299
|
+
const data = JSON.parse(line);
|
|
3300
|
+
infos.push({
|
|
3301
|
+
path: data.path,
|
|
3302
|
+
is_dir: data.is_dir,
|
|
3303
|
+
size: data.size,
|
|
3304
|
+
modified_at: data.modified_at
|
|
3305
|
+
});
|
|
3306
|
+
} catch {}
|
|
3307
|
+
}
|
|
3308
|
+
return infos;
|
|
3309
|
+
}
|
|
3310
|
+
};
|
|
3311
|
+
|
|
3312
|
+
//#endregion
|
|
3313
|
+
//#region src/backends/local-sandbox.ts
|
|
3314
|
+
/**
|
|
3315
|
+
* LocalSandbox: Execute commands locally using child_process.
|
|
3316
|
+
*
|
|
3317
|
+
* Useful for local development and testing without cloud sandboxes.
|
|
3318
|
+
* All file operations are inherited from BaseSandbox and executed
|
|
3319
|
+
* via shell commands in the local filesystem.
|
|
3320
|
+
*/
|
|
3321
|
+
/**
|
|
3322
|
+
* Local sandbox that executes commands using Node.js child_process.
|
|
3323
|
+
*
|
|
3324
|
+
* All commands are executed in a bash shell with the specified working directory.
|
|
3325
|
+
* Inherits all file operations (read, write, edit, ls, grep, glob) from BaseSandbox.
|
|
3326
|
+
*
|
|
3327
|
+
* @example Basic usage
|
|
3328
|
+
* ```typescript
|
|
3329
|
+
* import { LocalSandbox } from 'deepagentsdk';
|
|
3330
|
+
*
|
|
3331
|
+
* const sandbox = new LocalSandbox({ cwd: './workspace' });
|
|
3332
|
+
*
|
|
3333
|
+
* // Execute commands
|
|
3334
|
+
* const result = await sandbox.execute('ls -la');
|
|
3335
|
+
* console.log(result.output);
|
|
3336
|
+
*
|
|
3337
|
+
* // File operations
|
|
3338
|
+
* await sandbox.write('./src/index.ts', 'console.log("hello")');
|
|
3339
|
+
* const content = await sandbox.read('./src/index.ts');
|
|
3340
|
+
* ```
|
|
3341
|
+
*
|
|
3342
|
+
* @example With timeout and environment
|
|
3343
|
+
* ```typescript
|
|
3344
|
+
* const sandbox = new LocalSandbox({
|
|
3345
|
+
* cwd: './workspace',
|
|
3346
|
+
* timeout: 60000, // 60 seconds
|
|
3347
|
+
* env: {
|
|
3348
|
+
* NODE_ENV: 'development',
|
|
3349
|
+
* DEBUG: '*',
|
|
3350
|
+
* },
|
|
3351
|
+
* });
|
|
3352
|
+
* ```
|
|
3353
|
+
*
|
|
3354
|
+
* @example Error handling
|
|
3355
|
+
* ```typescript
|
|
3356
|
+
* const result = await sandbox.execute('npm test');
|
|
3357
|
+
* if (result.exitCode !== 0) {
|
|
3358
|
+
* console.error('Tests failed:', result.output);
|
|
3359
|
+
* }
|
|
3360
|
+
* ```
|
|
3361
|
+
*/
|
|
3362
|
+
var LocalSandbox = class extends BaseSandbox {
|
|
3363
|
+
cwd;
|
|
3364
|
+
timeout;
|
|
3365
|
+
env;
|
|
3366
|
+
maxOutputSize;
|
|
3367
|
+
_id;
|
|
3368
|
+
/**
|
|
3369
|
+
* Create a new LocalSandbox instance.
|
|
3370
|
+
*
|
|
3371
|
+
* @param options - Configuration options for the sandbox
|
|
3372
|
+
*/
|
|
3373
|
+
constructor(options = {}) {
|
|
3374
|
+
super();
|
|
3375
|
+
this.cwd = options.cwd || process.cwd();
|
|
3376
|
+
this.timeout = options.timeout || 3e4;
|
|
3377
|
+
this.env = options.env || {};
|
|
3378
|
+
this.maxOutputSize = options.maxOutputSize || 1024 * 1024;
|
|
3379
|
+
this._id = `local-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
3380
|
+
}
|
|
3381
|
+
/**
|
|
3382
|
+
* Unique identifier for this sandbox instance.
|
|
3383
|
+
* Format: `local-{timestamp}-{random}`
|
|
3384
|
+
*/
|
|
3385
|
+
get id() {
|
|
3386
|
+
return this._id;
|
|
3387
|
+
}
|
|
3388
|
+
/**
|
|
3389
|
+
* Execute a shell command in the local filesystem.
|
|
3390
|
+
*
|
|
3391
|
+
* Commands are executed using bash with the configured working directory
|
|
3392
|
+
* and environment variables. Output is captured from both stdout and stderr.
|
|
3393
|
+
*
|
|
3394
|
+
* @param command - Shell command to execute
|
|
3395
|
+
* @returns ExecuteResponse with output, exit code, and truncation status
|
|
3396
|
+
*
|
|
3397
|
+
* @example
|
|
3398
|
+
* ```typescript
|
|
3399
|
+
* const result = await sandbox.execute('echo "Hello" && ls -la');
|
|
3400
|
+
* console.log(result.output);
|
|
3401
|
+
* console.log('Exit code:', result.exitCode);
|
|
3402
|
+
* ```
|
|
3403
|
+
*/
|
|
3404
|
+
async execute(command) {
|
|
3405
|
+
return new Promise((resolve) => {
|
|
3406
|
+
const child = (0, child_process.spawn)("bash", ["-c", command], {
|
|
3407
|
+
cwd: this.cwd,
|
|
3408
|
+
env: {
|
|
3409
|
+
...process.env,
|
|
3410
|
+
...this.env
|
|
3411
|
+
},
|
|
3412
|
+
timeout: this.timeout
|
|
3413
|
+
});
|
|
3414
|
+
let output = "";
|
|
3415
|
+
let truncated = false;
|
|
3416
|
+
child.stdout.on("data", (data) => {
|
|
3417
|
+
if (output.length < this.maxOutputSize) output += data.toString();
|
|
3418
|
+
else truncated = true;
|
|
3419
|
+
});
|
|
3420
|
+
child.stderr.on("data", (data) => {
|
|
3421
|
+
if (output.length < this.maxOutputSize) output += data.toString();
|
|
3422
|
+
else truncated = true;
|
|
3423
|
+
});
|
|
3424
|
+
child.on("close", (code) => {
|
|
3425
|
+
resolve({
|
|
3426
|
+
output,
|
|
3427
|
+
exitCode: code,
|
|
3428
|
+
truncated
|
|
3429
|
+
});
|
|
3430
|
+
});
|
|
3431
|
+
child.on("error", (err) => {
|
|
3432
|
+
resolve({
|
|
3433
|
+
output: `Error: ${err.message}`,
|
|
3434
|
+
exitCode: 1,
|
|
3435
|
+
truncated: false
|
|
3436
|
+
});
|
|
3437
|
+
});
|
|
3438
|
+
});
|
|
3439
|
+
}
|
|
3440
|
+
};
|
|
3441
|
+
|
|
3442
|
+
//#endregion
|
|
3443
|
+
//#region src/utils/model-parser.ts
|
|
3444
|
+
/**
|
|
3445
|
+
* Utility to parse model strings into LanguageModel instances.
|
|
3446
|
+
* Provides backward compatibility for CLI and other string-based model specifications.
|
|
3447
|
+
*/
|
|
3448
|
+
/**
|
|
3449
|
+
* Parse a model string into a LanguageModel instance.
|
|
3450
|
+
*
|
|
3451
|
+
* Supports formats like:
|
|
3452
|
+
* - "anthropic/claude-sonnet-4-20250514"
|
|
3453
|
+
* - "openai/gpt-4o"
|
|
3454
|
+
* - "claude-sonnet-4-20250514" (defaults to Anthropic)
|
|
3455
|
+
*
|
|
3456
|
+
* @param modelString - The model string to parse
|
|
3457
|
+
* @returns A LanguageModel instance
|
|
3458
|
+
*
|
|
3459
|
+
* @example
|
|
3460
|
+
* ```typescript
|
|
3461
|
+
* const model = parseModelString("anthropic/claude-sonnet-4-20250514");
|
|
3462
|
+
* const agent = createDeepAgent({ model });
|
|
3463
|
+
* ```
|
|
3464
|
+
*/
|
|
3465
|
+
function parseModelString(modelString) {
|
|
3466
|
+
const [provider, modelName] = modelString.split("/");
|
|
3467
|
+
if (provider === "anthropic") return (0, _ai_sdk_anthropic.anthropic)(modelName || "claude-sonnet-4-20250514");
|
|
3468
|
+
else if (provider === "openai") return (0, _ai_sdk_openai.openai)(modelName || "gpt-5-mini");
|
|
3469
|
+
return (0, _ai_sdk_anthropic.anthropic)(modelString);
|
|
3470
|
+
}
|
|
3471
|
+
|
|
3472
|
+
//#endregion
|
|
3473
|
+
//#region src/checkpointer/file-saver.ts
|
|
3474
|
+
/**
|
|
3475
|
+
* File-based checkpoint saver for local development.
|
|
3476
|
+
*/
|
|
3477
|
+
/**
|
|
3478
|
+
* File-based checkpoint saver.
|
|
3479
|
+
*
|
|
3480
|
+
* Stores checkpoints as JSON files in a directory. Each thread gets
|
|
3481
|
+
* its own file named `{threadId}.json`.
|
|
3482
|
+
*
|
|
3483
|
+
* @example
|
|
3484
|
+
* ```typescript
|
|
3485
|
+
* const saver = new FileSaver({ dir: './.checkpoints' });
|
|
3486
|
+
* const agent = createDeepAgent({
|
|
3487
|
+
* model: anthropic('claude-sonnet-4-20250514'),
|
|
3488
|
+
* checkpointer: saver,
|
|
3489
|
+
* });
|
|
3490
|
+
* ```
|
|
3491
|
+
*/
|
|
3492
|
+
var FileSaver = class {
|
|
3493
|
+
dir;
|
|
3494
|
+
constructor(options) {
|
|
3495
|
+
this.dir = options.dir;
|
|
3496
|
+
if (!(0, node_fs.existsSync)(this.dir)) (0, node_fs.mkdirSync)(this.dir, { recursive: true });
|
|
3497
|
+
}
|
|
3498
|
+
getFilePath(threadId) {
|
|
3499
|
+
const safeId = threadId.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
3500
|
+
return (0, node_path.join)(this.dir, `${safeId}.json`);
|
|
3501
|
+
}
|
|
3502
|
+
async save(checkpoint) {
|
|
3503
|
+
const filePath = this.getFilePath(checkpoint.threadId);
|
|
3504
|
+
const data = {
|
|
3505
|
+
...checkpoint,
|
|
3506
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3507
|
+
};
|
|
3508
|
+
(0, node_fs.writeFileSync)(filePath, JSON.stringify(data, null, 2), "utf-8");
|
|
3509
|
+
}
|
|
3510
|
+
async load(threadId) {
|
|
3511
|
+
const filePath = this.getFilePath(threadId);
|
|
3512
|
+
if (!(0, node_fs.existsSync)(filePath)) return;
|
|
3513
|
+
try {
|
|
3514
|
+
const content = (0, node_fs.readFileSync)(filePath, "utf-8");
|
|
3515
|
+
return JSON.parse(content);
|
|
3516
|
+
} catch {
|
|
3517
|
+
return;
|
|
3518
|
+
}
|
|
3519
|
+
}
|
|
3520
|
+
async list() {
|
|
3521
|
+
if (!(0, node_fs.existsSync)(this.dir)) return [];
|
|
3522
|
+
return (0, node_fs.readdirSync)(this.dir).filter((f) => f.endsWith(".json")).map((f) => f.replace(".json", ""));
|
|
3523
|
+
}
|
|
3524
|
+
async delete(threadId) {
|
|
3525
|
+
const filePath = this.getFilePath(threadId);
|
|
3526
|
+
if ((0, node_fs.existsSync)(filePath)) (0, node_fs.unlinkSync)(filePath);
|
|
3527
|
+
}
|
|
3528
|
+
async exists(threadId) {
|
|
3529
|
+
return (0, node_fs.existsSync)(this.getFilePath(threadId));
|
|
3530
|
+
}
|
|
3531
|
+
};
|
|
3532
|
+
|
|
2975
3533
|
//#endregion
|
|
2976
3534
|
Object.defineProperty(exports, 'BASE_PROMPT', {
|
|
2977
3535
|
enumerable: true,
|
|
@@ -2979,6 +3537,12 @@ Object.defineProperty(exports, 'BASE_PROMPT', {
|
|
|
2979
3537
|
return BASE_PROMPT;
|
|
2980
3538
|
}
|
|
2981
3539
|
});
|
|
3540
|
+
Object.defineProperty(exports, 'BaseSandbox', {
|
|
3541
|
+
enumerable: true,
|
|
3542
|
+
get: function () {
|
|
3543
|
+
return BaseSandbox;
|
|
3544
|
+
}
|
|
3545
|
+
});
|
|
2982
3546
|
Object.defineProperty(exports, 'CONTEXT_WINDOW', {
|
|
2983
3547
|
enumerable: true,
|
|
2984
3548
|
get: function () {
|
|
@@ -3069,22 +3633,22 @@ Object.defineProperty(exports, 'FILE_NOT_FOUND', {
|
|
|
3069
3633
|
return FILE_NOT_FOUND;
|
|
3070
3634
|
}
|
|
3071
3635
|
});
|
|
3072
|
-
Object.defineProperty(exports, '
|
|
3636
|
+
Object.defineProperty(exports, 'FileSaver', {
|
|
3073
3637
|
enumerable: true,
|
|
3074
3638
|
get: function () {
|
|
3075
|
-
return
|
|
3639
|
+
return FileSaver;
|
|
3076
3640
|
}
|
|
3077
3641
|
});
|
|
3078
|
-
Object.defineProperty(exports, '
|
|
3642
|
+
Object.defineProperty(exports, 'LocalSandbox', {
|
|
3079
3643
|
enumerable: true,
|
|
3080
3644
|
get: function () {
|
|
3081
|
-
return
|
|
3645
|
+
return LocalSandbox;
|
|
3082
3646
|
}
|
|
3083
3647
|
});
|
|
3084
|
-
Object.defineProperty(exports, '
|
|
3648
|
+
Object.defineProperty(exports, 'MAX_FILE_SIZE_MB', {
|
|
3085
3649
|
enumerable: true,
|
|
3086
3650
|
get: function () {
|
|
3087
|
-
return
|
|
3651
|
+
return MAX_FILE_SIZE_MB;
|
|
3088
3652
|
}
|
|
3089
3653
|
});
|
|
3090
3654
|
Object.defineProperty(exports, 'StateBackend', {
|
|
@@ -3321,6 +3885,30 @@ Object.defineProperty(exports, 'http_request', {
|
|
|
3321
3885
|
return http_request;
|
|
3322
3886
|
}
|
|
3323
3887
|
});
|
|
3888
|
+
Object.defineProperty(exports, 'init_errors', {
|
|
3889
|
+
enumerable: true,
|
|
3890
|
+
get: function () {
|
|
3891
|
+
return init_errors;
|
|
3892
|
+
}
|
|
3893
|
+
});
|
|
3894
|
+
Object.defineProperty(exports, 'init_eviction', {
|
|
3895
|
+
enumerable: true,
|
|
3896
|
+
get: function () {
|
|
3897
|
+
return init_eviction;
|
|
3898
|
+
}
|
|
3899
|
+
});
|
|
3900
|
+
Object.defineProperty(exports, 'init_limits', {
|
|
3901
|
+
enumerable: true,
|
|
3902
|
+
get: function () {
|
|
3903
|
+
return init_limits;
|
|
3904
|
+
}
|
|
3905
|
+
});
|
|
3906
|
+
Object.defineProperty(exports, 'init_web', {
|
|
3907
|
+
enumerable: true,
|
|
3908
|
+
get: function () {
|
|
3909
|
+
return init_web;
|
|
3910
|
+
}
|
|
3911
|
+
});
|
|
3324
3912
|
Object.defineProperty(exports, 'isSandboxBackend', {
|
|
3325
3913
|
enumerable: true,
|
|
3326
3914
|
get: function () {
|
|
@@ -3339,6 +3927,12 @@ Object.defineProperty(exports, 'needsSummarization', {
|
|
|
3339
3927
|
return needsSummarization;
|
|
3340
3928
|
}
|
|
3341
3929
|
});
|
|
3930
|
+
Object.defineProperty(exports, 'parseModelString', {
|
|
3931
|
+
enumerable: true,
|
|
3932
|
+
get: function () {
|
|
3933
|
+
return parseModelString;
|
|
3934
|
+
}
|
|
3935
|
+
});
|
|
3342
3936
|
Object.defineProperty(exports, 'patchToolCalls', {
|
|
3343
3937
|
enumerable: true,
|
|
3344
3938
|
get: function () {
|
|
@@ -3393,4 +3987,4 @@ Object.defineProperty(exports, 'write_todos', {
|
|
|
3393
3987
|
return write_todos;
|
|
3394
3988
|
}
|
|
3395
3989
|
});
|
|
3396
|
-
//# sourceMappingURL=
|
|
3990
|
+
//# sourceMappingURL=file-saver-BYPKakT4.cjs.map
|