deepagents 1.8.4 → 1.9.0-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +1694 -376
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1699 -1101
- package/dist/index.d.ts +1700 -1100
- package/dist/index.js +1724 -409
- package/dist/index.js.map +1 -1
- package/package.json +9 -6
package/dist/index.cjs
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
Object.defineProperty(exports, Symbol.toStringTag, { value:
|
|
1
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
2
2
|
//#region \0rolldown/runtime.js
|
|
3
3
|
var __create = Object.create;
|
|
4
4
|
var __defProp = Object.defineProperty;
|
|
@@ -7,16 +7,12 @@ var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
|
7
7
|
var __getProtoOf = Object.getPrototypeOf;
|
|
8
8
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
9
9
|
var __copyProps = (to, from, except, desc) => {
|
|
10
|
-
if (from && typeof from === "object" || typeof from === "function") {
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
17
|
-
});
|
|
18
|
-
}
|
|
19
|
-
}
|
|
10
|
+
if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
11
|
+
key = keys[i];
|
|
12
|
+
if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
|
|
13
|
+
get: ((k) => from[k]).bind(null, key),
|
|
14
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
15
|
+
});
|
|
20
16
|
}
|
|
21
17
|
return to;
|
|
22
18
|
};
|
|
@@ -24,7 +20,6 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
24
20
|
value: mod,
|
|
25
21
|
enumerable: true
|
|
26
22
|
}) : target, mod));
|
|
27
|
-
|
|
28
23
|
//#endregion
|
|
29
24
|
let langchain = require("langchain");
|
|
30
25
|
let _langchain_core_runnables = require("@langchain/core/runnables");
|
|
@@ -33,11 +28,12 @@ let zod_v4 = require("zod/v4");
|
|
|
33
28
|
let micromatch = require("micromatch");
|
|
34
29
|
micromatch = __toESM(micromatch);
|
|
35
30
|
let path = require("path");
|
|
31
|
+
path = __toESM(path);
|
|
36
32
|
let _langchain_core_messages = require("@langchain/core/messages");
|
|
37
33
|
let zod = require("zod");
|
|
38
34
|
let yaml = require("yaml");
|
|
39
35
|
yaml = __toESM(yaml);
|
|
40
|
-
let
|
|
36
|
+
let _langchain_langgraph_sdk = require("@langchain/langgraph-sdk");
|
|
41
37
|
let _langchain_core_errors = require("@langchain/core/errors");
|
|
42
38
|
let langchain_chat_models_universal = require("langchain/chat_models/universal");
|
|
43
39
|
let node_fs_promises = require("node:fs/promises");
|
|
@@ -50,18 +46,30 @@ let node_child_process = require("node:child_process");
|
|
|
50
46
|
node_child_process = __toESM(node_child_process);
|
|
51
47
|
let fast_glob = require("fast-glob");
|
|
52
48
|
fast_glob = __toESM(fast_glob);
|
|
49
|
+
let langsmith_experimental_sandbox = require("langsmith/experimental/sandbox");
|
|
53
50
|
let node_os = require("node:os");
|
|
54
51
|
node_os = __toESM(node_os);
|
|
55
|
-
|
|
56
52
|
//#region src/backends/protocol.ts
|
|
57
53
|
/**
|
|
58
54
|
* Type guard to check if a backend supports execution.
|
|
59
55
|
*
|
|
60
56
|
* @param backend - Backend instance to check
|
|
61
|
-
* @returns True if the backend implements
|
|
57
|
+
* @returns True if the backend implements SandboxBackendProtocolV2
|
|
62
58
|
*/
|
|
63
59
|
function isSandboxBackend(backend) {
|
|
64
|
-
return typeof backend.execute === "function" && typeof backend.id === "string";
|
|
60
|
+
return backend != null && typeof backend === "object" && typeof backend.execute === "function" && typeof backend.id === "string" && backend.id !== "";
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Type guard to check if a backend is a sandbox protocol (v1 or v2).
|
|
64
|
+
*
|
|
65
|
+
* Checks for the presence of `execute` function and `id` string,
|
|
66
|
+
* which are the defining features of sandbox protocols.
|
|
67
|
+
*
|
|
68
|
+
* @param backend - Backend instance to check
|
|
69
|
+
* @returns True if the backend implements sandbox protocol (v1 or v2)
|
|
70
|
+
*/
|
|
71
|
+
function isSandboxProtocol(backend) {
|
|
72
|
+
return backend != null && typeof backend === "object" && typeof backend.execute === "function" && typeof backend.id === "string" && backend.id !== "";
|
|
65
73
|
}
|
|
66
74
|
const SANDBOX_ERROR_SYMBOL = Symbol.for("sandbox.error");
|
|
67
75
|
/**
|
|
@@ -112,7 +120,6 @@ var SandboxError = class SandboxError extends Error {
|
|
|
112
120
|
return typeof error === "object" && error !== null && error[SANDBOX_ERROR_SYMBOL] === true;
|
|
113
121
|
}
|
|
114
122
|
};
|
|
115
|
-
|
|
116
123
|
//#endregion
|
|
117
124
|
//#region src/backends/utils.ts
|
|
118
125
|
/**
|
|
@@ -124,9 +131,21 @@ var SandboxError = class SandboxError extends Error {
|
|
|
124
131
|
*/
|
|
125
132
|
const EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents";
|
|
126
133
|
const MAX_LINE_LENGTH = 1e4;
|
|
127
|
-
const LINE_NUMBER_WIDTH = 6;
|
|
128
134
|
const TOOL_RESULT_TOKEN_LIMIT = 2e4;
|
|
129
135
|
const TRUNCATION_GUIDANCE = "... [results truncated, try being more specific with your parameters]";
|
|
136
|
+
const MIME_TYPES = {
|
|
137
|
+
".png": "image/png",
|
|
138
|
+
".jpg": "image/jpeg",
|
|
139
|
+
".jpeg": "image/jpeg",
|
|
140
|
+
".gif": "image/gif",
|
|
141
|
+
".webp": "image/webp",
|
|
142
|
+
".svg": "image/svg+xml",
|
|
143
|
+
".mp3": "audio/mpeg",
|
|
144
|
+
".wav": "audio/wav",
|
|
145
|
+
".mp4": "video/mp4",
|
|
146
|
+
".webm": "video/webm",
|
|
147
|
+
".pdf": "application/pdf"
|
|
148
|
+
};
|
|
130
149
|
/**
|
|
131
150
|
* Sanitize tool_call_id to prevent path traversal and separator issues.
|
|
132
151
|
*
|
|
@@ -154,17 +173,17 @@ function formatContentWithLineNumbers(content, startLine = 1) {
|
|
|
154
173
|
for (let i = 0; i < lines.length; i++) {
|
|
155
174
|
const line = lines[i];
|
|
156
175
|
const lineNum = i + startLine;
|
|
157
|
-
if (line.length <=
|
|
176
|
+
if (line.length <= 1e4) resultLines.push(`${lineNum.toString().padStart(6)}\t${line}`);
|
|
158
177
|
else {
|
|
159
178
|
const numChunks = Math.ceil(line.length / MAX_LINE_LENGTH);
|
|
160
179
|
for (let chunkIdx = 0; chunkIdx < numChunks; chunkIdx++) {
|
|
161
180
|
const start = chunkIdx * MAX_LINE_LENGTH;
|
|
162
181
|
const end = Math.min(start + MAX_LINE_LENGTH, line.length);
|
|
163
182
|
const chunk = line.substring(start, end);
|
|
164
|
-
if (chunkIdx === 0) resultLines.push(`${lineNum.toString().padStart(
|
|
183
|
+
if (chunkIdx === 0) resultLines.push(`${lineNum.toString().padStart(6)}\t${chunk}`);
|
|
165
184
|
else {
|
|
166
185
|
const continuationMarker = `${lineNum}.${chunkIdx}`;
|
|
167
|
-
resultLines.push(`${continuationMarker.padStart(
|
|
186
|
+
resultLines.push(`${continuationMarker.padStart(6)}\t${chunk}`);
|
|
168
187
|
}
|
|
169
188
|
}
|
|
170
189
|
}
|
|
@@ -188,20 +207,50 @@ function checkEmptyContent(content) {
|
|
|
188
207
|
* @returns Content as string with lines joined by newlines
|
|
189
208
|
*/
|
|
190
209
|
function fileDataToString(fileData) {
|
|
191
|
-
return fileData.content.join("\n");
|
|
210
|
+
if (Array.isArray(fileData.content)) return fileData.content.join("\n");
|
|
211
|
+
if (typeof fileData.content === "string") return fileData.content;
|
|
212
|
+
throw new Error("Cannot convert binary FileData to string");
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Type guard to check if FileData contains binary content (Uint8Array).
|
|
216
|
+
*
|
|
217
|
+
* @param data - FileData to check
|
|
218
|
+
* @returns True if the content is a Uint8Array (binary)
|
|
219
|
+
*/
|
|
220
|
+
function isFileDataBinary(data) {
|
|
221
|
+
return ArrayBuffer.isView(data.content);
|
|
192
222
|
}
|
|
193
223
|
/**
|
|
194
|
-
* Create a FileData object
|
|
224
|
+
* Create a FileData object.
|
|
195
225
|
*
|
|
196
|
-
*
|
|
197
|
-
*
|
|
198
|
-
*
|
|
226
|
+
* Defaults to v2 format (content as single string). Pass `fileFormat: "v1"` for
|
|
227
|
+
* backward compatibility with older readers during a rolling deployment.
|
|
228
|
+
* Binary content (Uint8Array) is only supported with v2.
|
|
229
|
+
*
|
|
230
|
+
* @param content - File content as a string or binary Uint8Array (v2 only)
|
|
231
|
+
* @param createdAt - Optional creation timestamp (ISO format), defaults to now
|
|
232
|
+
* @param fileFormat - Storage format: "v2" (default) or "v1" (legacy line array)
|
|
233
|
+
* @returns FileData in the requested format
|
|
199
234
|
*/
|
|
200
|
-
function createFileData(content, createdAt) {
|
|
201
|
-
const lines = typeof content === "string" ? content.split("\n") : content;
|
|
235
|
+
function createFileData(content, createdAt, fileFormat = "v2", mimeType) {
|
|
202
236
|
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
237
|
+
if (fileFormat === "v1" && ArrayBuffer.isView(content)) throw new Error("Binary data is not supported with v1 file formats. Please use v2 file format");
|
|
238
|
+
if (fileFormat === "v2") {
|
|
239
|
+
if (ArrayBuffer.isView(content)) return {
|
|
240
|
+
content: new Uint8Array(content.buffer, content.byteOffset, content.byteLength),
|
|
241
|
+
mimeType: mimeType ?? "application/octet-stream",
|
|
242
|
+
created_at: createdAt || now,
|
|
243
|
+
modified_at: now
|
|
244
|
+
};
|
|
245
|
+
return {
|
|
246
|
+
content,
|
|
247
|
+
mimeType: mimeType ?? "text/plain",
|
|
248
|
+
created_at: createdAt || now,
|
|
249
|
+
modified_at: now
|
|
250
|
+
};
|
|
251
|
+
}
|
|
203
252
|
return {
|
|
204
|
-
content:
|
|
253
|
+
content: typeof content === "string" ? content.split("\n") : content,
|
|
205
254
|
created_at: createdAt || now,
|
|
206
255
|
modified_at: now
|
|
207
256
|
};
|
|
@@ -214,33 +263,20 @@ function createFileData(content, createdAt) {
|
|
|
214
263
|
* @returns Updated FileData object
|
|
215
264
|
*/
|
|
216
265
|
function updateFileData(fileData, content) {
|
|
217
|
-
const lines = typeof content === "string" ? content.split("\n") : content;
|
|
218
266
|
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
267
|
+
if (isFileDataV1(fileData)) return {
|
|
268
|
+
content: typeof content === "string" ? content.split("\n") : content,
|
|
269
|
+
created_at: fileData.created_at,
|
|
270
|
+
modified_at: now
|
|
271
|
+
};
|
|
219
272
|
return {
|
|
220
|
-
content
|
|
273
|
+
content,
|
|
274
|
+
mimeType: fileData.mimeType,
|
|
221
275
|
created_at: fileData.created_at,
|
|
222
276
|
modified_at: now
|
|
223
277
|
};
|
|
224
278
|
}
|
|
225
279
|
/**
|
|
226
|
-
* Format file data for read response with line numbers.
|
|
227
|
-
*
|
|
228
|
-
* @param fileData - FileData object
|
|
229
|
-
* @param offset - Line offset (0-indexed)
|
|
230
|
-
* @param limit - Maximum number of lines
|
|
231
|
-
* @returns Formatted content or error message
|
|
232
|
-
*/
|
|
233
|
-
function formatReadResponse(fileData, offset, limit) {
|
|
234
|
-
const content = fileDataToString(fileData);
|
|
235
|
-
const emptyMsg = checkEmptyContent(content);
|
|
236
|
-
if (emptyMsg) return emptyMsg;
|
|
237
|
-
const lines = content.split("\n");
|
|
238
|
-
const startIdx = offset;
|
|
239
|
-
const endIdx = Math.min(startIdx + limit, lines.length);
|
|
240
|
-
if (startIdx >= lines.length) return `Error: Line offset ${offset} exceeds file length (${lines.length} lines)`;
|
|
241
|
-
return formatContentWithLineNumbers(lines.slice(startIdx, endIdx), startIdx + 1);
|
|
242
|
-
}
|
|
243
|
-
/**
|
|
244
280
|
* Perform string replacement with occurrence validation.
|
|
245
281
|
*
|
|
246
282
|
* @param content - Original content
|
|
@@ -267,13 +303,13 @@ function performStringReplacement(content, oldString, newString, replaceAll) {
|
|
|
267
303
|
function truncateIfTooLong(result) {
|
|
268
304
|
if (Array.isArray(result)) {
|
|
269
305
|
const totalChars = result.reduce((sum, item) => sum + item.length, 0);
|
|
270
|
-
if (totalChars >
|
|
306
|
+
if (totalChars > 2e4 * 4) {
|
|
271
307
|
const truncateAt = Math.floor(result.length * TOOL_RESULT_TOKEN_LIMIT * 4 / totalChars);
|
|
272
308
|
return [...result.slice(0, truncateAt), TRUNCATION_GUIDANCE];
|
|
273
309
|
}
|
|
274
310
|
return result;
|
|
275
311
|
}
|
|
276
|
-
if (result.length >
|
|
312
|
+
if (result.length > 2e4 * 4) return result.substring(0, TOOL_RESULT_TOKEN_LIMIT * 4) + "\n... [results truncated, try being more specific with your parameters]";
|
|
277
313
|
return result;
|
|
278
314
|
}
|
|
279
315
|
/**
|
|
@@ -299,8 +335,8 @@ function truncateIfTooLong(result) {
|
|
|
299
335
|
* validatePath("C:\\Users\\file") // Throws: Windows absolute paths not supported
|
|
300
336
|
* ```
|
|
301
337
|
*/
|
|
302
|
-
function validatePath(path$
|
|
303
|
-
const pathStr = path$
|
|
338
|
+
function validatePath(path$6) {
|
|
339
|
+
const pathStr = path$6 || "/";
|
|
304
340
|
if (!pathStr || pathStr.trim() === "") throw new Error("Path cannot be empty");
|
|
305
341
|
let normalized = pathStr.startsWith("/") ? pathStr : "/" + pathStr;
|
|
306
342
|
if (!normalized.endsWith("/")) normalized += "/";
|
|
@@ -322,10 +358,10 @@ function validatePath(path$5) {
|
|
|
322
358
|
* // Returns: "/test.py\n/src/main.py" (sorted by modified_at)
|
|
323
359
|
* ```
|
|
324
360
|
*/
|
|
325
|
-
function globSearchFiles(files, pattern, path$
|
|
361
|
+
function globSearchFiles(files, pattern, path$8 = "/") {
|
|
326
362
|
let normalizedPath;
|
|
327
363
|
try {
|
|
328
|
-
normalizedPath = validatePath(path$
|
|
364
|
+
normalizedPath = validatePath(path$8);
|
|
329
365
|
} catch {
|
|
330
366
|
return "No files found";
|
|
331
367
|
}
|
|
@@ -351,16 +387,13 @@ function globSearchFiles(files, pattern, path$7 = "/") {
|
|
|
351
387
|
/**
|
|
352
388
|
* Return structured grep matches from an in-memory files mapping.
|
|
353
389
|
*
|
|
354
|
-
* Performs literal text search (not regex).
|
|
355
|
-
*
|
|
356
|
-
* Returns a list of GrepMatch on success, or a string for invalid inputs.
|
|
357
|
-
* We deliberately do not raise here to keep backends non-throwing in tool
|
|
358
|
-
* contexts and preserve user-facing error messages.
|
|
390
|
+
* Performs literal text search (not regex). Binary files are skipped.
|
|
391
|
+
* Returns an empty array when no matches are found or on invalid input.
|
|
359
392
|
*/
|
|
360
|
-
function grepMatchesFromFiles(files, pattern, path$
|
|
393
|
+
function grepMatchesFromFiles(files, pattern, path$10 = null, glob = null) {
|
|
361
394
|
let normalizedPath;
|
|
362
395
|
try {
|
|
363
|
-
normalizedPath = validatePath(path$
|
|
396
|
+
normalizedPath = validatePath(path$10);
|
|
364
397
|
} catch {
|
|
365
398
|
return [];
|
|
366
399
|
}
|
|
@@ -370,18 +403,140 @@ function grepMatchesFromFiles(files, pattern, path$9 = null, glob = null) {
|
|
|
370
403
|
nobrace: false
|
|
371
404
|
})));
|
|
372
405
|
const matches = [];
|
|
373
|
-
for (const [filePath, fileData] of Object.entries(filtered))
|
|
374
|
-
|
|
375
|
-
const
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
406
|
+
for (const [filePath, fileData] of Object.entries(filtered)) {
|
|
407
|
+
if (!isTextMimeType(migrateToFileDataV2(fileData, filePath).mimeType)) continue;
|
|
408
|
+
const lines = fileDataToString(fileData).split("\n");
|
|
409
|
+
for (let i = 0; i < lines.length; i++) {
|
|
410
|
+
const line = lines[i];
|
|
411
|
+
const lineNum = i + 1;
|
|
412
|
+
if (line.includes(pattern)) matches.push({
|
|
413
|
+
path: filePath,
|
|
414
|
+
line: lineNum,
|
|
415
|
+
text: line
|
|
416
|
+
});
|
|
417
|
+
}
|
|
381
418
|
}
|
|
382
419
|
return matches;
|
|
383
420
|
}
|
|
384
|
-
|
|
421
|
+
/**
|
|
422
|
+
* Determine MIME type from a file path's extension.
|
|
423
|
+
*
|
|
424
|
+
* Returns "text/plain" for unknown extensions.
|
|
425
|
+
*
|
|
426
|
+
* @param filePath - File path to inspect
|
|
427
|
+
* @returns MIME type string (e.g., "image/png", "text/plain")
|
|
428
|
+
*/
|
|
429
|
+
function getMimeType(filePath) {
|
|
430
|
+
return MIME_TYPES[path.default.extname(filePath).toLocaleLowerCase()] || "text/plain";
|
|
431
|
+
}
|
|
432
|
+
/**
|
|
433
|
+
* Check whether a MIME type represents text content.
|
|
434
|
+
*
|
|
435
|
+
* @param mimeType - MIME type string to check
|
|
436
|
+
* @returns True if the MIME type is text-based
|
|
437
|
+
*/
|
|
438
|
+
function isTextMimeType(mimeType) {
|
|
439
|
+
return mimeType.startsWith("text/") || mimeType === "application/json" || mimeType === "application/javascript" || mimeType === "image/svg+xml";
|
|
440
|
+
}
|
|
441
|
+
/**
|
|
442
|
+
* Type guard to check if FileData is v1 format (content as line array).
|
|
443
|
+
*
|
|
444
|
+
* @param data - FileData to check
|
|
445
|
+
* @returns True if data is FileDataV1
|
|
446
|
+
*/
|
|
447
|
+
function isFileDataV1(data) {
|
|
448
|
+
return Array.isArray(data.content);
|
|
449
|
+
}
|
|
450
|
+
/**
|
|
451
|
+
* Convert FileData to v2 format, joining v1 line arrays into a single string.
|
|
452
|
+
*
|
|
453
|
+
* If the data is already v2, returns it unchanged.
|
|
454
|
+
*
|
|
455
|
+
* @param data - FileData in either format
|
|
456
|
+
* @returns FileDataV2 with content as string (text) or Uint8Array (binary)
|
|
457
|
+
*/
|
|
458
|
+
function migrateToFileDataV2(data, filePath) {
|
|
459
|
+
if (isFileDataV1(data)) return {
|
|
460
|
+
content: data.content.join("\n"),
|
|
461
|
+
mimeType: getMimeType(filePath),
|
|
462
|
+
created_at: data.created_at,
|
|
463
|
+
modified_at: data.modified_at
|
|
464
|
+
};
|
|
465
|
+
if (!("mimeType" in data) || !data.mimeType) return {
|
|
466
|
+
...data,
|
|
467
|
+
mimeType: getMimeType(filePath)
|
|
468
|
+
};
|
|
469
|
+
return data;
|
|
470
|
+
}
|
|
471
|
+
/**
|
|
472
|
+
* Adapt a v1 {@link BackendProtocol} to {@link BackendProtocolV2}.
|
|
473
|
+
*
|
|
474
|
+
* If the backend already implements v2, it is returned as-is.
|
|
475
|
+
* For v1 backends, wraps returns in Result types:
|
|
476
|
+
* - `read()` string returns wrapped in {@link ReadResult}
|
|
477
|
+
* - `readRaw()` FileData returns wrapped in {@link ReadRawResult}
|
|
478
|
+
* - `grep()` returns wrapped in {@link GrepResult}
|
|
479
|
+
* - `ls()` FileInfo[] returns wrapped in {@link LsResult}
|
|
480
|
+
* - `glob()` FileInfo[] returns wrapped in {@link GlobResult}
|
|
481
|
+
*
|
|
482
|
+
* Note: For sandbox instances, use {@link adaptSandboxProtocol} instead.
|
|
483
|
+
*
|
|
484
|
+
* @param backend - Backend instance (v1 or v2)
|
|
485
|
+
* @returns BackendProtocolV2-compatible backend
|
|
486
|
+
*/
|
|
487
|
+
function adaptBackendProtocol(backend) {
|
|
488
|
+
return {
|
|
489
|
+
async ls(path$11) {
|
|
490
|
+
const result = await ("ls" in backend ? backend.ls(path$11) : backend.lsInfo(path$11));
|
|
491
|
+
if (Array.isArray(result)) return { files: result };
|
|
492
|
+
return result;
|
|
493
|
+
},
|
|
494
|
+
async readRaw(filePath) {
|
|
495
|
+
const result = await backend.readRaw(filePath);
|
|
496
|
+
if ("data" in result || "error" in result) return result;
|
|
497
|
+
return { data: migrateToFileDataV2(result, filePath) };
|
|
498
|
+
},
|
|
499
|
+
async glob(pattern, path$12) {
|
|
500
|
+
const result = await ("glob" in backend ? backend.glob(pattern, path$12) : backend.globInfo(pattern, path$12));
|
|
501
|
+
if (Array.isArray(result)) return { files: result };
|
|
502
|
+
return result;
|
|
503
|
+
},
|
|
504
|
+
write: (filePath, content) => backend.write(filePath, content),
|
|
505
|
+
edit: (filePath, oldString, newString, replaceAll) => backend.edit(filePath, oldString, newString, replaceAll),
|
|
506
|
+
uploadFiles: backend.uploadFiles ? (files) => backend.uploadFiles(files) : void 0,
|
|
507
|
+
downloadFiles: backend.downloadFiles ? (paths) => backend.downloadFiles(paths) : void 0,
|
|
508
|
+
async read(filePath, offset, limit) {
|
|
509
|
+
const result = await backend.read(filePath, offset, limit);
|
|
510
|
+
if (typeof result === "string") return { content: result };
|
|
511
|
+
return result;
|
|
512
|
+
},
|
|
513
|
+
async grep(pattern, path$13, glob) {
|
|
514
|
+
const result = await ("grep" in backend ? backend.grep(pattern, path$13, glob) : backend.grepRaw(pattern, path$13, glob));
|
|
515
|
+
if (Array.isArray(result)) return { matches: result };
|
|
516
|
+
if (typeof result === "string") return { error: result };
|
|
517
|
+
return result;
|
|
518
|
+
}
|
|
519
|
+
};
|
|
520
|
+
}
|
|
521
|
+
/**
|
|
522
|
+
* Adapt a sandbox backend from v1 to v2 interface.
|
|
523
|
+
*
|
|
524
|
+
* This extends {@link adaptBackendProtocol} to also preserve sandbox-specific
|
|
525
|
+
* properties from {@link SandboxBackendProtocol}: `execute` and `id`.
|
|
526
|
+
*
|
|
527
|
+
* @param sandbox - Sandbox backend (v1 or v2)
|
|
528
|
+
* @returns SandboxBackendProtocolV2-compatible sandbox
|
|
529
|
+
*/
|
|
530
|
+
function adaptSandboxProtocol(sandbox) {
|
|
531
|
+
const adapted = adaptBackendProtocol(sandbox);
|
|
532
|
+
adapted.execute = (cmd) => sandbox.execute(cmd);
|
|
533
|
+
Object.defineProperty(adapted, "id", {
|
|
534
|
+
value: sandbox.id,
|
|
535
|
+
enumerable: true,
|
|
536
|
+
configurable: true
|
|
537
|
+
});
|
|
538
|
+
return adapted;
|
|
539
|
+
}
|
|
385
540
|
//#endregion
|
|
386
541
|
//#region src/backends/state.ts
|
|
387
542
|
/**
|
|
@@ -397,8 +552,10 @@ function grepMatchesFromFiles(files, pattern, path$9 = null, glob = null) {
|
|
|
397
552
|
*/
|
|
398
553
|
var StateBackend = class {
|
|
399
554
|
stateAndStore;
|
|
400
|
-
|
|
555
|
+
fileFormat;
|
|
556
|
+
constructor(stateAndStore, options) {
|
|
401
557
|
this.stateAndStore = stateAndStore;
|
|
558
|
+
this.fileFormat = options?.fileFormat ?? "v2";
|
|
402
559
|
}
|
|
403
560
|
/**
|
|
404
561
|
* Get files from current state.
|
|
@@ -410,10 +567,10 @@ var StateBackend = class {
|
|
|
410
567
|
* List files and directories in the specified directory (non-recursive).
|
|
411
568
|
*
|
|
412
569
|
* @param path - Absolute path to directory
|
|
413
|
-
* @returns
|
|
570
|
+
* @returns LsResult with list of FileInfo objects on success or error on failure.
|
|
414
571
|
* Directories have a trailing / in their path and is_dir=true.
|
|
415
572
|
*/
|
|
416
|
-
|
|
573
|
+
ls(path) {
|
|
417
574
|
const files = this.getFiles();
|
|
418
575
|
const infos = [];
|
|
419
576
|
const subdirs = /* @__PURE__ */ new Set();
|
|
@@ -426,7 +583,7 @@ var StateBackend = class {
|
|
|
426
583
|
subdirs.add(normalizedPath + subdirName + "/");
|
|
427
584
|
continue;
|
|
428
585
|
}
|
|
429
|
-
const size = fd.content.join("\n").length;
|
|
586
|
+
const size = isFileDataV1(fd) ? fd.content.join("\n").length : isFileDataBinary(fd) ? fd.content.byteLength : fd.content.length;
|
|
430
587
|
infos.push({
|
|
431
588
|
path: k,
|
|
432
589
|
is_dir: false,
|
|
@@ -441,31 +598,43 @@ var StateBackend = class {
|
|
|
441
598
|
modified_at: ""
|
|
442
599
|
});
|
|
443
600
|
infos.sort((a, b) => a.path.localeCompare(b.path));
|
|
444
|
-
return infos;
|
|
601
|
+
return { files: infos };
|
|
445
602
|
}
|
|
446
603
|
/**
|
|
447
|
-
* Read file content
|
|
604
|
+
* Read file content.
|
|
605
|
+
*
|
|
606
|
+
* Text files are paginated by line offset/limit.
|
|
607
|
+
* Binary files return full Uint8Array content (offset/limit ignored).
|
|
448
608
|
*
|
|
449
609
|
* @param filePath - Absolute file path
|
|
450
610
|
* @param offset - Line offset to start reading from (0-indexed)
|
|
451
611
|
* @param limit - Maximum number of lines to read
|
|
452
|
-
* @returns
|
|
612
|
+
* @returns ReadResult with content on success or error on failure
|
|
453
613
|
*/
|
|
454
614
|
read(filePath, offset = 0, limit = 500) {
|
|
455
615
|
const fileData = this.getFiles()[filePath];
|
|
456
|
-
if (!fileData) return
|
|
457
|
-
|
|
616
|
+
if (!fileData) return { error: `File '${filePath}' not found` };
|
|
617
|
+
const fileDataV2 = migrateToFileDataV2(fileData, filePath);
|
|
618
|
+
if (!isTextMimeType(fileDataV2.mimeType)) return {
|
|
619
|
+
content: fileDataV2.content,
|
|
620
|
+
mimeType: fileDataV2.mimeType
|
|
621
|
+
};
|
|
622
|
+
if (typeof fileDataV2.content !== "string") return { error: `File '${filePath}' has binary content but text MIME type` };
|
|
623
|
+
return {
|
|
624
|
+
content: fileDataV2.content.split("\n").slice(offset, offset + limit).join("\n"),
|
|
625
|
+
mimeType: fileDataV2.mimeType
|
|
626
|
+
};
|
|
458
627
|
}
|
|
459
628
|
/**
|
|
460
629
|
* Read file content as raw FileData.
|
|
461
630
|
*
|
|
462
631
|
* @param filePath - Absolute file path
|
|
463
|
-
* @returns
|
|
632
|
+
* @returns ReadRawResult with raw file data on success or error on failure
|
|
464
633
|
*/
|
|
465
634
|
readRaw(filePath) {
|
|
466
635
|
const fileData = this.getFiles()[filePath];
|
|
467
|
-
if (!fileData)
|
|
468
|
-
return fileData;
|
|
636
|
+
if (!fileData) return { error: `File '${filePath}' not found` };
|
|
637
|
+
return { data: fileData };
|
|
469
638
|
}
|
|
470
639
|
/**
|
|
471
640
|
* Create a new file with content.
|
|
@@ -473,7 +642,8 @@ var StateBackend = class {
|
|
|
473
642
|
*/
|
|
474
643
|
write(filePath, content) {
|
|
475
644
|
if (filePath in this.getFiles()) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
|
|
476
|
-
const
|
|
645
|
+
const mimeType = getMimeType(filePath);
|
|
646
|
+
const newFileData = createFileData(content, void 0, this.fileFormat, mimeType);
|
|
477
647
|
return {
|
|
478
648
|
path: filePath,
|
|
479
649
|
filesUpdate: { [filePath]: newFileData }
|
|
@@ -497,23 +667,24 @@ var StateBackend = class {
|
|
|
497
667
|
};
|
|
498
668
|
}
|
|
499
669
|
/**
|
|
500
|
-
*
|
|
670
|
+
* Search file contents for a literal text pattern.
|
|
671
|
+
* Binary files are skipped.
|
|
501
672
|
*/
|
|
502
|
-
|
|
503
|
-
return grepMatchesFromFiles(this.getFiles(), pattern, path, glob);
|
|
673
|
+
grep(pattern, path = "/", glob = null) {
|
|
674
|
+
return { matches: grepMatchesFromFiles(this.getFiles(), pattern, path, glob) };
|
|
504
675
|
}
|
|
505
676
|
/**
|
|
506
677
|
* Structured glob matching returning FileInfo objects.
|
|
507
678
|
*/
|
|
508
|
-
|
|
679
|
+
glob(pattern, path = "/") {
|
|
509
680
|
const files = this.getFiles();
|
|
510
681
|
const result = globSearchFiles(files, pattern, path);
|
|
511
|
-
if (result === "No files found") return [];
|
|
682
|
+
if (result === "No files found") return { files: [] };
|
|
512
683
|
const paths = result.split("\n");
|
|
513
684
|
const infos = [];
|
|
514
685
|
for (const p of paths) {
|
|
515
686
|
const fd = files[p];
|
|
516
|
-
const size = fd ? fd.content.join("\n").length : 0;
|
|
687
|
+
const size = fd ? isFileDataV1(fd) ? fd.content.join("\n").length : isFileDataBinary(fd) ? fd.content.byteLength : fd.content.length : 0;
|
|
517
688
|
infos.push({
|
|
518
689
|
path: p,
|
|
519
690
|
is_dir: false,
|
|
@@ -521,7 +692,7 @@ var StateBackend = class {
|
|
|
521
692
|
modified_at: fd?.modified_at || ""
|
|
522
693
|
});
|
|
523
694
|
}
|
|
524
|
-
return infos;
|
|
695
|
+
return { files: infos };
|
|
525
696
|
}
|
|
526
697
|
/**
|
|
527
698
|
* Upload multiple files.
|
|
@@ -536,7 +707,9 @@ var StateBackend = class {
|
|
|
536
707
|
const responses = [];
|
|
537
708
|
const updates = {};
|
|
538
709
|
for (const [path, content] of files) try {
|
|
539
|
-
|
|
710
|
+
const mimeType = getMimeType(path);
|
|
711
|
+
if (this.fileFormat === "v2" && !isTextMimeType(mimeType)) updates[path] = createFileData(content, void 0, "v2", mimeType);
|
|
712
|
+
else updates[path] = createFileData(new TextDecoder().decode(content), void 0, this.fileFormat, mimeType);
|
|
540
713
|
responses.push({
|
|
541
714
|
path,
|
|
542
715
|
error: null
|
|
@@ -570,18 +743,23 @@ var StateBackend = class {
|
|
|
570
743
|
});
|
|
571
744
|
continue;
|
|
572
745
|
}
|
|
573
|
-
const
|
|
574
|
-
|
|
575
|
-
|
|
746
|
+
const fileDataV2 = migrateToFileDataV2(fileData, path);
|
|
747
|
+
if (typeof fileDataV2.content === "string") {
|
|
748
|
+
const content = new TextEncoder().encode(fileDataV2.content);
|
|
749
|
+
responses.push({
|
|
750
|
+
path,
|
|
751
|
+
content,
|
|
752
|
+
error: null
|
|
753
|
+
});
|
|
754
|
+
} else responses.push({
|
|
576
755
|
path,
|
|
577
|
-
content,
|
|
756
|
+
content: fileDataV2.content,
|
|
578
757
|
error: null
|
|
579
758
|
});
|
|
580
759
|
}
|
|
581
760
|
return responses;
|
|
582
761
|
}
|
|
583
762
|
};
|
|
584
|
-
|
|
585
763
|
//#endregion
|
|
586
764
|
//#region src/middleware/fs.ts
|
|
587
765
|
/**
|
|
@@ -614,6 +792,20 @@ var StateBackend = class {
|
|
|
614
792
|
* These tools return minimal confirmation messages and are never expected to produce
|
|
615
793
|
* output large enough to exceed token limits, so checking them would be unnecessary.
|
|
616
794
|
*/
|
|
795
|
+
/**
|
|
796
|
+
* All tool names registered by FilesystemMiddleware.
|
|
797
|
+
* This is the single source of truth — used by createDeepAgent to detect
|
|
798
|
+
* collisions with user-supplied tools at construction time.
|
|
799
|
+
*/
|
|
800
|
+
const FILESYSTEM_TOOL_NAMES = [
|
|
801
|
+
"ls",
|
|
802
|
+
"read_file",
|
|
803
|
+
"write_file",
|
|
804
|
+
"edit_file",
|
|
805
|
+
"glob",
|
|
806
|
+
"grep",
|
|
807
|
+
"execute"
|
|
808
|
+
];
|
|
617
809
|
const TOOLS_EXCLUDED_FROM_EVICTION = [
|
|
618
810
|
"ls",
|
|
619
811
|
"glob",
|
|
@@ -623,16 +815,11 @@ const TOOLS_EXCLUDED_FROM_EVICTION = [
|
|
|
623
815
|
"write_file"
|
|
624
816
|
];
|
|
625
817
|
/**
|
|
626
|
-
*
|
|
627
|
-
*
|
|
628
|
-
* This
|
|
629
|
-
*/
|
|
630
|
-
const NUM_CHARS_PER_TOKEN = 4;
|
|
631
|
-
/**
|
|
632
|
-
* Default values for read_file tool pagination (in lines).
|
|
818
|
+
* Maximum size for binary (non-text) files read via read_file, in bytes.
|
|
819
|
+
* Base64-encoded content is ~33% larger, so 10MB raw ≈ 13.3MB in context.
|
|
820
|
+
* This keeps inline multimodal payloads within all major provider limits.
|
|
633
821
|
*/
|
|
634
|
-
const
|
|
635
|
-
const DEFAULT_READ_LINE_LIMIT = 100;
|
|
822
|
+
const MAX_BINARY_READ_SIZE_BYTES = 10 * 1024 * 1024;
|
|
636
823
|
/**
|
|
637
824
|
* Template for truncation message in read_file.
|
|
638
825
|
* {file_path} will be filled in at runtime.
|
|
@@ -672,14 +859,27 @@ function createContentPreview(contentStr, headLines = 5, tailLines = 5) {
|
|
|
672
859
|
return headSample + truncationNotice + tailSample;
|
|
673
860
|
}
|
|
674
861
|
/**
|
|
675
|
-
* Zod
|
|
862
|
+
* Zod schema for legacy FileDataV1 (content as line array).
|
|
676
863
|
*/
|
|
677
|
-
const
|
|
864
|
+
const FileDataV1Schema = zod_v4.z.object({
|
|
678
865
|
content: zod_v4.z.array(zod_v4.z.string()),
|
|
679
866
|
created_at: zod_v4.z.string(),
|
|
680
867
|
modified_at: zod_v4.z.string()
|
|
681
868
|
});
|
|
682
869
|
/**
|
|
870
|
+
* Zod schema for FileDataV2 (content as string for text or Uint8Array for binary).
|
|
871
|
+
*/
|
|
872
|
+
const FileDataV2Schema = zod_v4.z.object({
|
|
873
|
+
content: zod_v4.z.union([zod_v4.z.string(), zod_v4.z.instanceof(Uint8Array)]),
|
|
874
|
+
mimeType: zod_v4.z.string(),
|
|
875
|
+
created_at: zod_v4.z.string(),
|
|
876
|
+
modified_at: zod_v4.z.string()
|
|
877
|
+
});
|
|
878
|
+
/**
|
|
879
|
+
* Zod v3 schema for FileData (re-export from backends)
|
|
880
|
+
*/
|
|
881
|
+
const FileDataSchema = zod_v4.z.union([FileDataV1Schema, FileDataV2Schema]);
|
|
882
|
+
/**
|
|
683
883
|
* Reducer for files state that merges file updates with support for deletions.
|
|
684
884
|
* When a file value is null, the file is deleted from state.
|
|
685
885
|
* When a file value is non-null, it is added or updated in state.
|
|
@@ -722,8 +922,8 @@ const FilesystemStateSchema = new _langchain_langgraph.StateSchema({ files: new
|
|
|
722
922
|
* @param stateAndStore - State and store container for backend initialization
|
|
723
923
|
*/
|
|
724
924
|
function getBackend(backend, stateAndStore) {
|
|
725
|
-
|
|
726
|
-
return
|
|
925
|
+
const actualBackend = typeof backend === "function" ? backend(stateAndStore) : backend;
|
|
926
|
+
return isSandboxProtocol(actualBackend) ? adaptSandboxProtocol(actualBackend) : adaptBackendProtocol(actualBackend);
|
|
727
927
|
}
|
|
728
928
|
const FILESYSTEM_SYSTEM_PROMPT = `## Filesystem Tools \`ls\`, \`read_file\`, \`write_file\`, \`edit_file\`, \`glob\`, \`grep\`
|
|
729
929
|
|
|
@@ -848,7 +1048,9 @@ function createLsTool(backend, options) {
|
|
|
848
1048
|
store: config.store
|
|
849
1049
|
});
|
|
850
1050
|
const path = input.path || "/";
|
|
851
|
-
const
|
|
1051
|
+
const lsResult = await resolvedBackend.ls(path);
|
|
1052
|
+
if (lsResult.error) return `Error listing files: ${lsResult.error}`;
|
|
1053
|
+
const infos = lsResult.files || [];
|
|
852
1054
|
if (infos.length === 0) return `No files found in ${path}`;
|
|
853
1055
|
const lines = [];
|
|
854
1056
|
for (const info of infos) if (info.is_dir) lines.push(`${info.path} (directory)`);
|
|
@@ -875,23 +1077,72 @@ function createReadFileTool(backend, options) {
|
|
|
875
1077
|
state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
|
|
876
1078
|
store: config.store
|
|
877
1079
|
});
|
|
878
|
-
const { file_path, offset =
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
1080
|
+
const { file_path, offset = 0, limit = 100 } = input;
|
|
1081
|
+
const readResult = await resolvedBackend.read(file_path, offset, limit);
|
|
1082
|
+
if (readResult.error) return [{
|
|
1083
|
+
type: "text",
|
|
1084
|
+
text: `Error: ${readResult.error}`
|
|
1085
|
+
}];
|
|
1086
|
+
const mimeType = readResult.mimeType ?? getMimeType(file_path);
|
|
1087
|
+
if (!isTextMimeType(mimeType)) {
|
|
1088
|
+
const binaryContent = readResult.content;
|
|
1089
|
+
if (!binaryContent) return [{
|
|
1090
|
+
type: "text",
|
|
1091
|
+
text: `Error: expected binary content for '${file_path}'`
|
|
1092
|
+
}];
|
|
1093
|
+
let base64Data;
|
|
1094
|
+
if (typeof binaryContent === "string") base64Data = binaryContent;
|
|
1095
|
+
else if (ArrayBuffer.isView(binaryContent)) base64Data = Buffer.from(binaryContent).toString("base64");
|
|
1096
|
+
else {
|
|
1097
|
+
const values = Object.values(binaryContent);
|
|
1098
|
+
base64Data = Buffer.from(new Uint8Array(values)).toString("base64");
|
|
1099
|
+
}
|
|
1100
|
+
const sizeBytes = Math.ceil(base64Data.length * 3 / 4);
|
|
1101
|
+
if (sizeBytes > 10485760) return [{
|
|
1102
|
+
type: "text",
|
|
1103
|
+
text: `Error: file too large to read (${Math.round(sizeBytes / (1024 * 1024))}MB exceeds ${MAX_BINARY_READ_SIZE_BYTES / (1024 * 1024)}MB limit for binary files)`
|
|
1104
|
+
}];
|
|
1105
|
+
if (mimeType.startsWith("image/")) return [{
|
|
1106
|
+
type: "image",
|
|
1107
|
+
mimeType,
|
|
1108
|
+
data: base64Data
|
|
1109
|
+
}];
|
|
1110
|
+
if (mimeType.startsWith("audio/")) return [{
|
|
1111
|
+
type: "audio",
|
|
1112
|
+
mimeType,
|
|
1113
|
+
data: base64Data
|
|
1114
|
+
}];
|
|
1115
|
+
if (mimeType.startsWith("video/")) return [{
|
|
1116
|
+
type: "video",
|
|
1117
|
+
mimeType,
|
|
1118
|
+
data: base64Data
|
|
1119
|
+
}];
|
|
1120
|
+
return [{
|
|
1121
|
+
type: "file",
|
|
1122
|
+
mimeType,
|
|
1123
|
+
data: base64Data
|
|
1124
|
+
}];
|
|
1125
|
+
}
|
|
1126
|
+
let content = typeof readResult.content === "string" ? readResult.content : "";
|
|
1127
|
+
const lines = content.split("\n");
|
|
1128
|
+
if (lines.length > limit) content = lines.slice(0, limit).join("\n");
|
|
1129
|
+
let formatted = formatContentWithLineNumbers(content, offset + 1);
|
|
1130
|
+
if (toolTokenLimitBeforeEvict && formatted.length >= 4 * toolTokenLimitBeforeEvict) {
|
|
883
1131
|
const truncationMsg = READ_FILE_TRUNCATION_MSG.replace("{file_path}", file_path);
|
|
884
|
-
const maxContentLength =
|
|
885
|
-
|
|
1132
|
+
const maxContentLength = 4 * toolTokenLimitBeforeEvict - truncationMsg.length;
|
|
1133
|
+
formatted = formatted.substring(0, maxContentLength) + truncationMsg;
|
|
886
1134
|
}
|
|
887
|
-
return
|
|
1135
|
+
return [{
|
|
1136
|
+
type: "text",
|
|
1137
|
+
text: formatted
|
|
1138
|
+
}];
|
|
888
1139
|
}, {
|
|
889
1140
|
name: "read_file",
|
|
890
1141
|
description: customDescription || READ_FILE_TOOL_DESCRIPTION,
|
|
891
1142
|
schema: zod_v4.z.object({
|
|
892
1143
|
file_path: zod_v4.z.string().describe("Absolute path to the file to read"),
|
|
893
|
-
offset: zod_v4.z.coerce.number().optional().default(
|
|
894
|
-
limit: zod_v4.z.coerce.number().optional().default(
|
|
1144
|
+
offset: zod_v4.z.coerce.number().optional().default(0).describe("Line offset to start reading from (0-indexed)"),
|
|
1145
|
+
limit: zod_v4.z.coerce.number().optional().default(100).describe("Maximum number of lines to read")
|
|
895
1146
|
})
|
|
896
1147
|
});
|
|
897
1148
|
}
|
|
@@ -974,7 +1225,9 @@ function createGlobTool(backend, options) {
|
|
|
974
1225
|
store: config.store
|
|
975
1226
|
});
|
|
976
1227
|
const { pattern, path = "/" } = input;
|
|
977
|
-
const
|
|
1228
|
+
const globResult = await resolvedBackend.glob(pattern, path);
|
|
1229
|
+
if (globResult.error) return `Error finding files: ${globResult.error}`;
|
|
1230
|
+
const infos = globResult.files || [];
|
|
978
1231
|
if (infos.length === 0) return `No files found matching pattern '${pattern}'`;
|
|
979
1232
|
const result = truncateIfTooLong(infos.map((info) => info.path));
|
|
980
1233
|
if (Array.isArray(result)) return result.join("\n");
|
|
@@ -999,12 +1252,13 @@ function createGrepTool(backend, options) {
|
|
|
999
1252
|
store: config.store
|
|
1000
1253
|
});
|
|
1001
1254
|
const { pattern, path = "/", glob = null } = input;
|
|
1002
|
-
const result = await resolvedBackend.
|
|
1003
|
-
if (
|
|
1004
|
-
|
|
1255
|
+
const result = await resolvedBackend.grep(pattern, path, glob);
|
|
1256
|
+
if (result.error) return result.error;
|
|
1257
|
+
const matches = result.matches ?? [];
|
|
1258
|
+
if (matches.length === 0) return `No matches found for pattern '${pattern}'`;
|
|
1005
1259
|
const lines = [];
|
|
1006
1260
|
let currentFile = null;
|
|
1007
|
-
for (const match of
|
|
1261
|
+
for (const match of matches) {
|
|
1008
1262
|
if (match.path !== currentFile) {
|
|
1009
1263
|
currentFile = match.path;
|
|
1010
1264
|
lines.push(`\n${currentFile}:`);
|
|
@@ -1055,21 +1309,22 @@ function createExecuteTool(backend, options) {
|
|
|
1055
1309
|
function createFilesystemMiddleware(options = {}) {
|
|
1056
1310
|
const { backend = (stateAndStore) => new StateBackend(stateAndStore), systemPrompt: customSystemPrompt = null, customToolDescriptions = null, toolTokenLimitBeforeEvict = 2e4 } = options;
|
|
1057
1311
|
const baseSystemPrompt = customSystemPrompt || FILESYSTEM_SYSTEM_PROMPT;
|
|
1312
|
+
const allToolsByName = {
|
|
1313
|
+
ls: createLsTool(backend, { customDescription: customToolDescriptions?.ls }),
|
|
1314
|
+
read_file: createReadFileTool(backend, {
|
|
1315
|
+
customDescription: customToolDescriptions?.read_file,
|
|
1316
|
+
toolTokenLimitBeforeEvict
|
|
1317
|
+
}),
|
|
1318
|
+
write_file: createWriteFileTool(backend, { customDescription: customToolDescriptions?.write_file }),
|
|
1319
|
+
edit_file: createEditFileTool(backend, { customDescription: customToolDescriptions?.edit_file }),
|
|
1320
|
+
glob: createGlobTool(backend, { customDescription: customToolDescriptions?.glob }),
|
|
1321
|
+
grep: createGrepTool(backend, { customDescription: customToolDescriptions?.grep }),
|
|
1322
|
+
execute: createExecuteTool(backend, { customDescription: customToolDescriptions?.execute })
|
|
1323
|
+
};
|
|
1058
1324
|
return (0, langchain.createMiddleware)({
|
|
1059
1325
|
name: "FilesystemMiddleware",
|
|
1060
1326
|
stateSchema: FilesystemStateSchema,
|
|
1061
|
-
tools:
|
|
1062
|
-
createLsTool(backend, { customDescription: customToolDescriptions?.ls }),
|
|
1063
|
-
createReadFileTool(backend, {
|
|
1064
|
-
customDescription: customToolDescriptions?.read_file,
|
|
1065
|
-
toolTokenLimitBeforeEvict
|
|
1066
|
-
}),
|
|
1067
|
-
createWriteFileTool(backend, { customDescription: customToolDescriptions?.write_file }),
|
|
1068
|
-
createEditFileTool(backend, { customDescription: customToolDescriptions?.edit_file }),
|
|
1069
|
-
createGlobTool(backend, { customDescription: customToolDescriptions?.glob }),
|
|
1070
|
-
createGrepTool(backend, { customDescription: customToolDescriptions?.grep }),
|
|
1071
|
-
createExecuteTool(backend, { customDescription: customToolDescriptions?.execute })
|
|
1072
|
-
],
|
|
1327
|
+
tools: Object.values(allToolsByName),
|
|
1073
1328
|
wrapModelCall: async (request, handler) => {
|
|
1074
1329
|
const supportsExecution = isSandboxBackend(getBackend(backend, {
|
|
1075
1330
|
state: request.state || {},
|
|
@@ -1092,7 +1347,7 @@ function createFilesystemMiddleware(options = {}) {
|
|
|
1092
1347
|
if (toolName && TOOLS_EXCLUDED_FROM_EVICTION.includes(toolName)) return handler(request);
|
|
1093
1348
|
const result = await handler(request);
|
|
1094
1349
|
async function processToolMessage(msg, toolTokenLimitBeforeEvict) {
|
|
1095
|
-
if (typeof msg.content === "string" && msg.content.length > toolTokenLimitBeforeEvict *
|
|
1350
|
+
if (typeof msg.content === "string" && msg.content.length > toolTokenLimitBeforeEvict * 4) {
|
|
1096
1351
|
const resolvedBackend = getBackend(backend, {
|
|
1097
1352
|
state: request.state || {},
|
|
1098
1353
|
store: request.runtime?.store
|
|
@@ -1156,7 +1411,6 @@ function createFilesystemMiddleware(options = {}) {
|
|
|
1156
1411
|
}
|
|
1157
1412
|
});
|
|
1158
1413
|
}
|
|
1159
|
-
|
|
1160
1414
|
//#endregion
|
|
1161
1415
|
//#region src/middleware/subagents.ts
|
|
1162
1416
|
/**
|
|
@@ -1537,12 +1791,23 @@ function createSubAgentMiddleware(options) {
|
|
|
1537
1791
|
}
|
|
1538
1792
|
});
|
|
1539
1793
|
}
|
|
1540
|
-
|
|
1541
1794
|
//#endregion
|
|
1542
1795
|
//#region src/middleware/patch_tool_calls.ts
|
|
1543
1796
|
/**
|
|
1544
|
-
* Patch
|
|
1545
|
-
*
|
|
1797
|
+
* Patch tool call / tool response parity in a messages array.
|
|
1798
|
+
*
|
|
1799
|
+
* Ensures strict 1:1 correspondence between AIMessage tool_calls and
|
|
1800
|
+
* ToolMessage responses:
|
|
1801
|
+
*
|
|
1802
|
+
* 1. **Dangling tool_calls** — an AIMessage contains a tool_call with no
|
|
1803
|
+
* matching ToolMessage anywhere after it. A synthetic cancellation
|
|
1804
|
+
* ToolMessage is inserted immediately after the AIMessage.
|
|
1805
|
+
*
|
|
1806
|
+
* 2. **Orphaned ToolMessages** — a ToolMessage whose `tool_call_id` does not
|
|
1807
|
+
* match any tool_call in a preceding AIMessage. The ToolMessage is removed.
|
|
1808
|
+
*
|
|
1809
|
+
* Both directions are required for providers that enforce strict parity
|
|
1810
|
+
* (e.g. Google Gemini returns 400 INVALID_ARGUMENT otherwise).
|
|
1546
1811
|
*
|
|
1547
1812
|
* @param messages - The messages array to patch
|
|
1548
1813
|
* @returns Object with patched messages and needsPatch flag
|
|
@@ -1552,13 +1817,23 @@ function patchDanglingToolCalls(messages) {
|
|
|
1552
1817
|
patchedMessages: [],
|
|
1553
1818
|
needsPatch: false
|
|
1554
1819
|
};
|
|
1820
|
+
const allToolCallIds = /* @__PURE__ */ new Set();
|
|
1821
|
+
for (const msg of messages) if (langchain.AIMessage.isInstance(msg) && msg.tool_calls != null) {
|
|
1822
|
+
for (const tc of msg.tool_calls) if (tc.id) allToolCallIds.add(tc.id);
|
|
1823
|
+
}
|
|
1555
1824
|
const patchedMessages = [];
|
|
1556
1825
|
let needsPatch = false;
|
|
1557
1826
|
for (let i = 0; i < messages.length; i++) {
|
|
1558
1827
|
const msg = messages[i];
|
|
1828
|
+
if (langchain.ToolMessage.isInstance(msg)) {
|
|
1829
|
+
if (!allToolCallIds.has(msg.tool_call_id)) {
|
|
1830
|
+
needsPatch = true;
|
|
1831
|
+
continue;
|
|
1832
|
+
}
|
|
1833
|
+
}
|
|
1559
1834
|
patchedMessages.push(msg);
|
|
1560
1835
|
if (langchain.AIMessage.isInstance(msg) && msg.tool_calls != null) {
|
|
1561
|
-
for (const toolCall of msg.tool_calls) if (!messages.slice(i).find((m) => langchain.ToolMessage.isInstance(m) && m.tool_call_id === toolCall.id)) {
|
|
1836
|
+
for (const toolCall of msg.tool_calls) if (!messages.slice(i + 1).find((m) => langchain.ToolMessage.isInstance(m) && m.tool_call_id === toolCall.id)) {
|
|
1562
1837
|
needsPatch = true;
|
|
1563
1838
|
const toolMsg = `Tool call ${toolCall.name} with id ${toolCall.id} was cancelled - another message came in before it could be completed.`;
|
|
1564
1839
|
patchedMessages.push(new langchain.ToolMessage({
|
|
@@ -1575,11 +1850,18 @@ function patchDanglingToolCalls(messages) {
|
|
|
1575
1850
|
};
|
|
1576
1851
|
}
|
|
1577
1852
|
/**
|
|
1578
|
-
* Create middleware that
|
|
1853
|
+
* Create middleware that enforces strict tool call / tool response parity in
|
|
1854
|
+
* the messages history.
|
|
1855
|
+
*
|
|
1856
|
+
* Two kinds of violations are repaired:
|
|
1857
|
+
* 1. **Dangling tool_calls** — an AIMessage contains tool_calls with no
|
|
1858
|
+
* matching ToolMessage responses. Synthetic cancellation ToolMessages are
|
|
1859
|
+
* injected so every tool_call has a response.
|
|
1860
|
+
* 2. **Orphaned ToolMessages** — a ToolMessage exists whose `tool_call_id`
|
|
1861
|
+
* does not match any tool_call in a preceding AIMessage. These are removed.
|
|
1579
1862
|
*
|
|
1580
|
-
*
|
|
1581
|
-
*
|
|
1582
|
-
* ToolMessages saying the tool call was cancelled.
|
|
1863
|
+
* This is critical for providers like Google Gemini that reject requests with
|
|
1864
|
+
* mismatched function call / function response counts (400 INVALID_ARGUMENT).
|
|
1583
1865
|
*
|
|
1584
1866
|
* This middleware patches in two places:
|
|
1585
1867
|
* 1. `beforeAgent`: Patches state at the start of the agent loop (handles most cases)
|
|
@@ -1587,7 +1869,7 @@ function patchDanglingToolCalls(messages) {
|
|
|
1587
1869
|
* edge cases like HITL rejection during graph resume where state updates from
|
|
1588
1870
|
* beforeAgent may not be applied in time)
|
|
1589
1871
|
*
|
|
1590
|
-
* @returns AgentMiddleware that
|
|
1872
|
+
* @returns AgentMiddleware that enforces tool call / response parity
|
|
1591
1873
|
*
|
|
1592
1874
|
* @example
|
|
1593
1875
|
* ```typescript
|
|
@@ -1625,7 +1907,6 @@ function createPatchToolCallsMiddleware() {
|
|
|
1625
1907
|
}
|
|
1626
1908
|
});
|
|
1627
1909
|
}
|
|
1628
|
-
|
|
1629
1910
|
//#endregion
|
|
1630
1911
|
//#region src/values.ts
|
|
1631
1912
|
/**
|
|
@@ -1660,7 +1941,6 @@ const filesValue = new _langchain_langgraph.ReducedValue(zod.z.record(zod.z.stri
|
|
|
1660
1941
|
inputSchema: zod.z.record(zod.z.string(), FileDataSchema.nullable()).optional(),
|
|
1661
1942
|
reducer: fileDataReducer
|
|
1662
1943
|
});
|
|
1663
|
-
|
|
1664
1944
|
//#endregion
|
|
1665
1945
|
//#region src/middleware/memory.ts
|
|
1666
1946
|
/**
|
|
@@ -1801,12 +2081,14 @@ function formatMemoryContents(contents, sources) {
|
|
|
1801
2081
|
* @returns File content if found, null otherwise.
|
|
1802
2082
|
*/
|
|
1803
2083
|
async function loadMemoryFromBackend(backend, path) {
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
return
|
|
1808
|
-
|
|
1809
|
-
|
|
2084
|
+
const adaptedBackend = adaptBackendProtocol(backend);
|
|
2085
|
+
if (!adaptedBackend.downloadFiles) {
|
|
2086
|
+
const content = await adaptedBackend.read(path);
|
|
2087
|
+
if (content.error) return null;
|
|
2088
|
+
if (typeof content.content !== "string") return null;
|
|
2089
|
+
return content.content;
|
|
2090
|
+
}
|
|
2091
|
+
const results = await adaptedBackend.downloadFiles([path]);
|
|
1810
2092
|
if (results.length !== 1) throw new Error(`Expected 1 response for path ${path}, got ${results.length}`);
|
|
1811
2093
|
const response = results[0];
|
|
1812
2094
|
if (response.error != null) {
|
|
@@ -1842,8 +2124,8 @@ function createMemoryMiddleware(options) {
|
|
|
1842
2124
|
* Resolve backend from instance or factory.
|
|
1843
2125
|
*/
|
|
1844
2126
|
function getBackend(state) {
|
|
1845
|
-
if (typeof backend === "function") return backend({ state });
|
|
1846
|
-
return backend;
|
|
2127
|
+
if (typeof backend === "function") return adaptBackendProtocol(backend({ state }));
|
|
2128
|
+
return adaptBackendProtocol(backend);
|
|
1847
2129
|
}
|
|
1848
2130
|
return (0, langchain.createMiddleware)({
|
|
1849
2131
|
name: "MemoryMiddleware",
|
|
@@ -1879,7 +2161,6 @@ function createMemoryMiddleware(options) {
|
|
|
1879
2161
|
}
|
|
1880
2162
|
});
|
|
1881
2163
|
}
|
|
1882
|
-
|
|
1883
2164
|
//#endregion
|
|
1884
2165
|
//#region src/middleware/skills.ts
|
|
1885
2166
|
/**
|
|
@@ -1925,7 +2206,6 @@ function createMemoryMiddleware(options) {
|
|
|
1925
2206
|
const MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024;
|
|
1926
2207
|
const MAX_SKILL_NAME_LENGTH = 64;
|
|
1927
2208
|
const MAX_SKILL_DESCRIPTION_LENGTH = 1024;
|
|
1928
|
-
const MAX_SKILL_COMPATIBILITY_LENGTH = 500;
|
|
1929
2209
|
/**
|
|
1930
2210
|
* Zod schema for a single skill metadata entry.
|
|
1931
2211
|
*/
|
|
@@ -2035,7 +2315,7 @@ function validateSkillName$1(name, directoryName) {
|
|
|
2035
2315
|
valid: false,
|
|
2036
2316
|
error: "name is required"
|
|
2037
2317
|
};
|
|
2038
|
-
if (name.length >
|
|
2318
|
+
if (name.length > 64) return {
|
|
2039
2319
|
valid: false,
|
|
2040
2320
|
error: "name exceeds 64 characters"
|
|
2041
2321
|
};
|
|
@@ -2109,7 +2389,7 @@ function formatSkillAnnotations(skill) {
|
|
|
2109
2389
|
* validation errors occur
|
|
2110
2390
|
*/
|
|
2111
2391
|
function parseSkillMetadataFromContent(content, skillPath, directoryName) {
|
|
2112
|
-
if (content.length >
|
|
2392
|
+
if (content.length > 10485760) {
|
|
2113
2393
|
console.warn(`Skipping ${skillPath}: content too large (${content.length} bytes)`);
|
|
2114
2394
|
return null;
|
|
2115
2395
|
}
|
|
@@ -2139,7 +2419,7 @@ function parseSkillMetadataFromContent(content, skillPath, directoryName) {
|
|
|
2139
2419
|
const validation = validateSkillName$1(name, directoryName);
|
|
2140
2420
|
if (!validation.valid) console.warn(`Skill '${name}' in ${skillPath} does not follow Agent Skills specification: ${validation.error}. Consider renaming for spec compliance.`);
|
|
2141
2421
|
let descriptionStr = description;
|
|
2142
|
-
if (descriptionStr.length >
|
|
2422
|
+
if (descriptionStr.length > 1024) {
|
|
2143
2423
|
console.warn(`Description exceeds ${MAX_SKILL_DESCRIPTION_LENGTH} characters in ${skillPath}, truncating`);
|
|
2144
2424
|
descriptionStr = descriptionStr.slice(0, MAX_SKILL_DESCRIPTION_LENGTH);
|
|
2145
2425
|
}
|
|
@@ -2149,9 +2429,9 @@ function parseSkillMetadataFromContent(content, skillPath, directoryName) {
|
|
|
2149
2429
|
else allowedTools = String(rawTools).split(/\s+/).filter(Boolean);
|
|
2150
2430
|
else allowedTools = [];
|
|
2151
2431
|
let compatibilityStr = String(frontmatterData.compatibility ?? "").trim() || null;
|
|
2152
|
-
if (compatibilityStr && compatibilityStr.length >
|
|
2153
|
-
console.warn(`Compatibility exceeds
|
|
2154
|
-
compatibilityStr = compatibilityStr.slice(0,
|
|
2432
|
+
if (compatibilityStr && compatibilityStr.length > 500) {
|
|
2433
|
+
console.warn(`Compatibility exceeds 500 characters in ${skillPath}, truncating`);
|
|
2434
|
+
compatibilityStr = compatibilityStr.slice(0, 500);
|
|
2155
2435
|
}
|
|
2156
2436
|
return {
|
|
2157
2437
|
name,
|
|
@@ -2167,12 +2447,15 @@ function parseSkillMetadataFromContent(content, skillPath, directoryName) {
|
|
|
2167
2447
|
* List all skills from a backend source.
|
|
2168
2448
|
*/
|
|
2169
2449
|
async function listSkillsFromBackend(backend, sourcePath) {
|
|
2450
|
+
const adaptedBackend = adaptBackendProtocol(backend);
|
|
2170
2451
|
const skills = [];
|
|
2171
2452
|
const pathSep = sourcePath.includes("\\") ? "\\" : "/";
|
|
2172
2453
|
const normalizedPath = sourcePath.endsWith("/") || sourcePath.endsWith("\\") ? sourcePath : `${sourcePath}${pathSep}`;
|
|
2173
2454
|
let fileInfos;
|
|
2174
2455
|
try {
|
|
2175
|
-
|
|
2456
|
+
const lsResult = await adaptedBackend.ls(normalizedPath);
|
|
2457
|
+
if (lsResult.error || !lsResult.files) return [];
|
|
2458
|
+
fileInfos = lsResult.files;
|
|
2176
2459
|
} catch {
|
|
2177
2460
|
return [];
|
|
2178
2461
|
}
|
|
@@ -2184,16 +2467,17 @@ async function listSkillsFromBackend(backend, sourcePath) {
|
|
|
2184
2467
|
if (entry.type !== "directory") continue;
|
|
2185
2468
|
const skillMdPath = `${normalizedPath}${entry.name}${pathSep}SKILL.md`;
|
|
2186
2469
|
let content;
|
|
2187
|
-
if (
|
|
2188
|
-
const results = await
|
|
2470
|
+
if (adaptedBackend.downloadFiles) {
|
|
2471
|
+
const results = await adaptedBackend.downloadFiles([skillMdPath]);
|
|
2189
2472
|
if (results.length !== 1) continue;
|
|
2190
2473
|
const response = results[0];
|
|
2191
2474
|
if (response.error != null || response.content == null) continue;
|
|
2192
2475
|
content = new TextDecoder().decode(response.content);
|
|
2193
2476
|
} else {
|
|
2194
|
-
const readResult = await
|
|
2195
|
-
if (readResult.
|
|
2196
|
-
content
|
|
2477
|
+
const readResult = await adaptedBackend.read(skillMdPath);
|
|
2478
|
+
if (readResult.error) continue;
|
|
2479
|
+
if (typeof readResult.content !== "string") continue;
|
|
2480
|
+
content = readResult.content;
|
|
2197
2481
|
}
|
|
2198
2482
|
const metadata = parseSkillMetadataFromContent(content, skillMdPath, entry.name);
|
|
2199
2483
|
if (metadata) skills.push(metadata);
|
|
@@ -2258,8 +2542,8 @@ function createSkillsMiddleware(options) {
|
|
|
2258
2542
|
* Resolve backend from instance or factory.
|
|
2259
2543
|
*/
|
|
2260
2544
|
function getBackend(state) {
|
|
2261
|
-
if (typeof backend === "function") return backend({ state });
|
|
2262
|
-
return backend;
|
|
2545
|
+
if (typeof backend === "function") return adaptBackendProtocol(backend({ state }));
|
|
2546
|
+
return adaptBackendProtocol(backend);
|
|
2263
2547
|
}
|
|
2264
2548
|
return (0, langchain.createMiddleware)({
|
|
2265
2549
|
name: "SkillsMiddleware",
|
|
@@ -2294,7 +2578,6 @@ function createSkillsMiddleware(options) {
|
|
|
2294
2578
|
}
|
|
2295
2579
|
});
|
|
2296
2580
|
}
|
|
2297
|
-
|
|
2298
2581
|
//#endregion
|
|
2299
2582
|
//#region src/middleware/utils.ts
|
|
2300
2583
|
/**
|
|
@@ -2302,7 +2585,226 @@ function createSkillsMiddleware(options) {
|
|
|
2302
2585
|
*
|
|
2303
2586
|
* This module provides shared helpers used across middleware implementations.
|
|
2304
2587
|
*/
|
|
2305
|
-
|
|
2588
|
+
//#endregion
|
|
2589
|
+
//#region src/middleware/completion_notifier.ts
|
|
2590
|
+
/**
|
|
2591
|
+
* Completion notifier middleware for async subagents.
|
|
2592
|
+
*
|
|
2593
|
+
* **Experimental** — this middleware is experimental and may change in future releases.
|
|
2594
|
+
*
|
|
2595
|
+
* When an async subagent finishes (success or error), this middleware sends a
|
|
2596
|
+
* message back to the **supervisor's** thread so the supervisor wakes up and can
|
|
2597
|
+
* proactively relay results to the user — without the user having to poll via
|
|
2598
|
+
* `check_async_task`.
|
|
2599
|
+
*
|
|
2600
|
+
* ## Architecture
|
|
2601
|
+
*
|
|
2602
|
+
* The async subagent protocol is inherently fire-and-forget: the supervisor
|
|
2603
|
+
* launches a job via `start_async_task` and only learns about completion
|
|
2604
|
+
* when someone calls `check_async_task`. This middleware closes that gap.
|
|
2605
|
+
*
|
|
2606
|
+
* ```
|
|
2607
|
+
* Supervisor Subagent
|
|
2608
|
+
* | |
|
|
2609
|
+
* |--- start_async_task -----> |
|
|
2610
|
+
* |<-- task_id (immediately) - |
|
|
2611
|
+
* | | (working...)
|
|
2612
|
+
* | | (done!)
|
|
2613
|
+
* | |
|
|
2614
|
+
* |<-- runs.create( |
|
|
2615
|
+
* | supervisor_thread, |
|
|
2616
|
+
* | "completed: ...") |
|
|
2617
|
+
* | |
|
|
2618
|
+
* | (wakes up, sees result) |
|
|
2619
|
+
* ```
|
|
2620
|
+
*
|
|
2621
|
+
* The notifier calls `runs.create()` on the supervisor's thread, which
|
|
2622
|
+
* queues a new run. From the supervisor's perspective, it looks like a new
|
|
2623
|
+
* user message arrived — except the content is a structured notification
|
|
2624
|
+
* from the subagent.
|
|
2625
|
+
*
|
|
2626
|
+
* ## How parent context is propagated
|
|
2627
|
+
*
|
|
2628
|
+
* - `parentGraphId` is passed as a **constructor argument** to the middleware.
|
|
2629
|
+
* This is the supervisor's graph ID (or assistant ID), which the subagent
|
|
2630
|
+
* developer knows at configuration time.
|
|
2631
|
+
* - `url` is the URL of the LangGraph server where the supervisor is deployed.
|
|
2632
|
+
* This is required since JS does not support in-process ASGI transport.
|
|
2633
|
+
* - `headers` are optional additional headers for authenticating with the
|
|
2634
|
+
* supervisor's server.
|
|
2635
|
+
* - `parent_thread_id` is injected into the subagent's input state by the
|
|
2636
|
+
* supervisor's `start_async_task` tool. It survives thread interrupts and
|
|
2637
|
+
* updates because it lives in state, not config.
|
|
2638
|
+
* - If `parent_thread_id` is not present in state, the notifier silently no-ops.
|
|
2639
|
+
*
|
|
2640
|
+
* ## Usage
|
|
2641
|
+
*
|
|
2642
|
+
* ```typescript
|
|
2643
|
+
* import { createCompletionNotifierMiddleware } from "deepagents";
|
|
2644
|
+
*
|
|
2645
|
+
* const notifier = createCompletionNotifierMiddleware({
|
|
2646
|
+
* parentGraphId: "supervisor",
|
|
2647
|
+
* url: "https://my-deployment.langsmith.dev",
|
|
2648
|
+
* });
|
|
2649
|
+
*
|
|
2650
|
+
* const agent = createDeepAgent({
|
|
2651
|
+
* model: "claude-sonnet-4-5-20250929",
|
|
2652
|
+
* middleware: [notifier],
|
|
2653
|
+
* });
|
|
2654
|
+
* ```
|
|
2655
|
+
*
|
|
2656
|
+
* The middleware will read `parent_thread_id` from the agent's state at the
|
|
2657
|
+
* end of execution. This is injected automatically by the supervisor's
|
|
2658
|
+
* `start_async_task` tool when it creates the run.
|
|
2659
|
+
*
|
|
2660
|
+
* @module
|
|
2661
|
+
*/
|
|
2662
|
+
/** State key where the supervisor's launch tool stores the parent thread ID. */
|
|
2663
|
+
const PARENT_THREAD_ID_KEY = "parent_thread_id";
|
|
2664
|
+
/** Maximum characters to include from the last message in notifications. */
|
|
2665
|
+
const MAX_SUMMARY_LENGTH = 500;
|
|
2666
|
+
/**
|
|
2667
|
+
* State extension for subagents that use the completion notifier.
|
|
2668
|
+
*
|
|
2669
|
+
* These fields are injected by the supervisor's `start_async_task`
|
|
2670
|
+
* tool and read by the completion notifier middleware to send notifications
|
|
2671
|
+
* back to the supervisor's thread.
|
|
2672
|
+
*/
|
|
2673
|
+
const CompletionNotifierStateSchema = zod_v4.z.object({ parent_thread_id: zod_v4.z.string().nullish() });
|
|
2674
|
+
/**
|
|
2675
|
+
* Build headers for the supervisor's LangGraph server.
|
|
2676
|
+
*
|
|
2677
|
+
* Ensures `x-auth-scheme: langsmith` is present unless explicitly overridden.
|
|
2678
|
+
*/
|
|
2679
|
+
function resolveHeaders(headers) {
|
|
2680
|
+
const resolved = { ...headers };
|
|
2681
|
+
if (!("x-auth-scheme" in resolved)) resolved["x-auth-scheme"] = "langsmith";
|
|
2682
|
+
return resolved;
|
|
2683
|
+
}
|
|
2684
|
+
/**
|
|
2685
|
+
* Send a notification run to the parent supervisor's thread.
|
|
2686
|
+
*/
|
|
2687
|
+
async function notifyParent(parentThreadId, parentGraphId, notification, options) {
|
|
2688
|
+
try {
|
|
2689
|
+
await new _langchain_langgraph_sdk.Client({
|
|
2690
|
+
apiUrl: options.url,
|
|
2691
|
+
apiKey: null,
|
|
2692
|
+
defaultHeaders: resolveHeaders(options.headers)
|
|
2693
|
+
}).runs.create(parentThreadId, parentGraphId, { input: { messages: [{
|
|
2694
|
+
role: "user",
|
|
2695
|
+
content: notification
|
|
2696
|
+
}] } });
|
|
2697
|
+
} catch (e) {
|
|
2698
|
+
console.warn(`[CompletionNotifierMiddleware] Failed to notify parent thread ${parentThreadId}:`, e);
|
|
2699
|
+
}
|
|
2700
|
+
}
|
|
2701
|
+
/**
|
|
2702
|
+
* Extract a summary from the subagent's final message.
|
|
2703
|
+
*
|
|
2704
|
+
* Returns at most 500 characters from the last message's content.
|
|
2705
|
+
*/
|
|
2706
|
+
function extractLastMessage(state) {
|
|
2707
|
+
const messages = state.messages;
|
|
2708
|
+
if (!messages || messages.length === 0) return "(no output)";
|
|
2709
|
+
const last = messages[messages.length - 1];
|
|
2710
|
+
if (last && typeof last === "object" && "content" in last) {
|
|
2711
|
+
const content = last.content;
|
|
2712
|
+
if (typeof content === "string") return content.slice(0, MAX_SUMMARY_LENGTH);
|
|
2713
|
+
return JSON.stringify(content).slice(0, MAX_SUMMARY_LENGTH);
|
|
2714
|
+
}
|
|
2715
|
+
return String(last).slice(0, MAX_SUMMARY_LENGTH);
|
|
2716
|
+
}
|
|
2717
|
+
/**
|
|
2718
|
+
* Create a completion notifier middleware for async subagents.
|
|
2719
|
+
*
|
|
2720
|
+
* **Experimental** — this middleware is experimental and may change.
|
|
2721
|
+
*
|
|
2722
|
+
* This middleware is added to the **subagent's** middleware stack (not the
|
|
2723
|
+
* supervisor's). When the subagent finishes, it sends a message to the
|
|
2724
|
+
* supervisor's thread via `runs.create()`, waking the supervisor so it can
|
|
2725
|
+
* proactively relay results.
|
|
2726
|
+
*
|
|
2727
|
+
* The supervisor's `parent_thread_id` is read from the subagent's own state
|
|
2728
|
+
* (injected by the supervisor's `start_async_task` tool at launch time).
|
|
2729
|
+
* The `parentGraphId` is provided as a constructor argument since it's static
|
|
2730
|
+
* configuration known at deployment time.
|
|
2731
|
+
*
|
|
2732
|
+
* If `parent_thread_id` is not present in state (e.g., the subagent was
|
|
2733
|
+
* launched manually without a supervisor), the middleware silently does
|
|
2734
|
+
* nothing.
|
|
2735
|
+
*
|
|
2736
|
+
* @param options - Configuration options.
|
|
2737
|
+
* @returns An `AgentMiddleware` instance.
|
|
2738
|
+
*
|
|
2739
|
+
* @example
|
|
2740
|
+
* ```typescript
|
|
2741
|
+
* import { createCompletionNotifierMiddleware } from "deepagents";
|
|
2742
|
+
*
|
|
2743
|
+
* const notifier = createCompletionNotifierMiddleware({
|
|
2744
|
+
* parentGraphId: "supervisor",
|
|
2745
|
+
* url: "https://my-deployment.langsmith.dev",
|
|
2746
|
+
* });
|
|
2747
|
+
*
|
|
2748
|
+
* const agent = createDeepAgent({
|
|
2749
|
+
* model: "claude-sonnet-4-5-20250929",
|
|
2750
|
+
* middleware: [notifier],
|
|
2751
|
+
* });
|
|
2752
|
+
* ```
|
|
2753
|
+
*/
|
|
2754
|
+
function createCompletionNotifierMiddleware(options) {
|
|
2755
|
+
const { parentGraphId, url, headers } = options;
|
|
2756
|
+
let notified = false;
|
|
2757
|
+
/**
|
|
2758
|
+
* Check whether we should send a notification.
|
|
2759
|
+
*/
|
|
2760
|
+
function shouldNotify(state) {
|
|
2761
|
+
if (notified) return false;
|
|
2762
|
+
return Boolean(state[PARENT_THREAD_ID_KEY]);
|
|
2763
|
+
}
|
|
2764
|
+
/**
|
|
2765
|
+
* Send a notification to the parent if conditions are met.
|
|
2766
|
+
*/
|
|
2767
|
+
async function sendNotification(state, message) {
|
|
2768
|
+
if (!shouldNotify(state)) return;
|
|
2769
|
+
notified = true;
|
|
2770
|
+
await notifyParent(state[PARENT_THREAD_ID_KEY], parentGraphId, message, {
|
|
2771
|
+
url,
|
|
2772
|
+
headers
|
|
2773
|
+
});
|
|
2774
|
+
}
|
|
2775
|
+
/**
|
|
2776
|
+
* Read the subagent's own thread_id from runtime config.
|
|
2777
|
+
*
|
|
2778
|
+
* The subagent's `thread_id` is the same as the `task_id` from the
|
|
2779
|
+
* supervisor's perspective.
|
|
2780
|
+
*/
|
|
2781
|
+
function getTaskId(runtime) {
|
|
2782
|
+
return runtime?.configurable?.thread_id;
|
|
2783
|
+
}
|
|
2784
|
+
/**
|
|
2785
|
+
* Build a notification string with task_id prefix.
|
|
2786
|
+
*/
|
|
2787
|
+
function formatNotification(body, runtime) {
|
|
2788
|
+
const taskId = getTaskId(runtime);
|
|
2789
|
+
return `${taskId ? `[task_id=${taskId}]` : ""}${body}`;
|
|
2790
|
+
}
|
|
2791
|
+
return (0, langchain.createMiddleware)({
|
|
2792
|
+
name: "CompletionNotifierMiddleware",
|
|
2793
|
+
stateSchema: CompletionNotifierStateSchema,
|
|
2794
|
+
async afterAgent(state, runtime) {
|
|
2795
|
+
await sendNotification(state, formatNotification(`Completed. Result: ${extractLastMessage(state)}`, runtime));
|
|
2796
|
+
},
|
|
2797
|
+
async wrapModelCall(request, handler) {
|
|
2798
|
+
try {
|
|
2799
|
+
return await handler(request);
|
|
2800
|
+
} catch (e) {
|
|
2801
|
+
const notification = formatNotification(`Error: ${e instanceof Error ? e.message : String(e)}`, request.runtime);
|
|
2802
|
+
await sendNotification(request.state, notification);
|
|
2803
|
+
throw e;
|
|
2804
|
+
}
|
|
2805
|
+
}
|
|
2806
|
+
});
|
|
2807
|
+
}
|
|
2306
2808
|
//#endregion
|
|
2307
2809
|
//#region src/middleware/summarization.ts
|
|
2308
2810
|
/**
|
|
@@ -2499,15 +3001,15 @@ function createSummarizationMiddleware(options) {
|
|
|
2499
3001
|
* Resolve backend from instance or factory.
|
|
2500
3002
|
*/
|
|
2501
3003
|
function getBackend(state) {
|
|
2502
|
-
if (typeof backend === "function") return backend({ state });
|
|
2503
|
-
return backend;
|
|
3004
|
+
if (typeof backend === "function") return adaptBackendProtocol(backend({ state }));
|
|
3005
|
+
return adaptBackendProtocol(backend);
|
|
2504
3006
|
}
|
|
2505
3007
|
/**
|
|
2506
3008
|
* Get or create session ID for history file naming.
|
|
2507
3009
|
*/
|
|
2508
3010
|
function getSessionId(state) {
|
|
2509
3011
|
if (state._summarizationSessionId) return state._summarizationSessionId;
|
|
2510
|
-
if (!sessionId) sessionId = `session_${
|
|
3012
|
+
if (!sessionId) sessionId = `session_${crypto.randomUUID().substring(0, 8)}`;
|
|
2511
3013
|
return sessionId;
|
|
2512
3014
|
}
|
|
2513
3015
|
/**
|
|
@@ -2992,32 +3494,557 @@ ${summary}
|
|
|
2992
3494
|
}
|
|
2993
3495
|
});
|
|
2994
3496
|
}
|
|
2995
|
-
|
|
2996
3497
|
//#endregion
|
|
2997
|
-
//#region src/
|
|
2998
|
-
|
|
3498
|
+
//#region src/middleware/async_subagents.ts
|
|
3499
|
+
function toolCallIdFromRuntime(runtime) {
|
|
3500
|
+
return runtime.toolCall?.id ?? runtime.toolCallId ?? "";
|
|
3501
|
+
}
|
|
2999
3502
|
/**
|
|
3000
|
-
*
|
|
3503
|
+
* Zod schema for {@link AsyncTask}.
|
|
3001
3504
|
*
|
|
3002
|
-
*
|
|
3003
|
-
*
|
|
3004
|
-
|
|
3505
|
+
* Used by the {@link ReducedValue} in the state schema so that LangGraph
|
|
3506
|
+
* can validate and serialize task records stored in `asyncTasks`.
|
|
3507
|
+
*/
|
|
3508
|
+
const AsyncTaskSchema = zod_v4.z.object({
|
|
3509
|
+
taskId: zod_v4.z.string(),
|
|
3510
|
+
agentName: zod_v4.z.string(),
|
|
3511
|
+
threadId: zod_v4.z.string(),
|
|
3512
|
+
runId: zod_v4.z.string(),
|
|
3513
|
+
status: zod_v4.z.string(),
|
|
3514
|
+
createdAt: zod_v4.z.string(),
|
|
3515
|
+
updatedAt: zod_v4.z.string().optional(),
|
|
3516
|
+
checkedAt: zod_v4.z.string().optional()
|
|
3517
|
+
});
|
|
3518
|
+
/**
|
|
3519
|
+
* State schema for the async subagent middleware.
|
|
3005
3520
|
*
|
|
3006
|
-
*
|
|
3007
|
-
*
|
|
3521
|
+
* Declares `asyncTasks` as a reduced state channel so that individual
|
|
3522
|
+
* tool updates (launch, check, update, cancel, list) merge into the existing
|
|
3523
|
+
* tasks dict rather than replacing it wholesale.
|
|
3008
3524
|
*/
|
|
3009
|
-
|
|
3010
|
-
|
|
3011
|
-
|
|
3012
|
-
|
|
3013
|
-
if (typeof component !== "string") throw new TypeError(`Namespace component at index ${i} must be a string, got ${typeof component}.`);
|
|
3014
|
-
if (!component) throw new Error(`Namespace component at index ${i} must not be empty.`);
|
|
3015
|
-
if (!NAMESPACE_COMPONENT_RE.test(component)) throw new Error(`Namespace component at index ${i} contains disallowed characters: "${component}". Only alphanumeric characters, hyphens, underscores, dots, @, +, colons, and tildes are allowed.`);
|
|
3016
|
-
}
|
|
3017
|
-
return namespace;
|
|
3018
|
-
}
|
|
3525
|
+
const AsyncTaskStateSchema = new _langchain_langgraph.StateSchema({ asyncTasks: new _langchain_langgraph.ReducedValue(zod_v4.z.record(zod_v4.z.string(), AsyncTaskSchema).default(() => ({})), {
|
|
3526
|
+
inputSchema: zod_v4.z.record(zod_v4.z.string(), AsyncTaskSchema).optional(),
|
|
3527
|
+
reducer: asyncTasksReducer
|
|
3528
|
+
}) });
|
|
3019
3529
|
/**
|
|
3020
|
-
*
|
|
3530
|
+
* Reducer for the `asyncTasks` state channel.
|
|
3531
|
+
*
|
|
3532
|
+
* Merges task updates into the existing tasks dict using shallow spread.
|
|
3533
|
+
* This allows individual tools to update a single task without overwriting
|
|
3534
|
+
* the full map — only the keys present in `update` are replaced.
|
|
3535
|
+
*
|
|
3536
|
+
* @param existing - The current tasks dict from state (may be undefined on first write).
|
|
3537
|
+
* @param update - New or updated task entries to merge in.
|
|
3538
|
+
* @returns Merged tasks dict.
|
|
3539
|
+
*/
|
|
3540
|
+
function asyncTasksReducer(existing, update) {
|
|
3541
|
+
return {
|
|
3542
|
+
...existing || {},
|
|
3543
|
+
...update || {}
|
|
3544
|
+
};
|
|
3545
|
+
}
|
|
3546
|
+
/**
|
|
3547
|
+
* Description template for the `start_async_task` tool.
|
|
3548
|
+
*
|
|
3549
|
+
* The `{available_agents}` placeholder is replaced at middleware creation
|
|
3550
|
+
* time with a formatted list of configured async subagent names and descriptions.
|
|
3551
|
+
*/
|
|
3552
|
+
const ASYNC_TASK_TOOL_DESCRIPTION = `Launch an async subagent on a remote LangGraph server. The subagent runs in the background and returns a task ID immediately.
|
|
3553
|
+
|
|
3554
|
+
Available async agent types:
|
|
3555
|
+
{available_agents}
|
|
3556
|
+
|
|
3557
|
+
## Usage notes:
|
|
3558
|
+
1. This tool launches a background task and returns immediately with a task ID. Report the task ID to the user and stop — do NOT immediately check status.
|
|
3559
|
+
2. Use \`check_async_task\` only when the user asks for a status update or result.
|
|
3560
|
+
3. Use \`update_async_task\` to send new instructions to a running task.
|
|
3561
|
+
4. Multiple async subagents can run concurrently — launch several and let them run in the background.
|
|
3562
|
+
5. The subagent runs on a remote LangGraph server, so it has its own tools and capabilities.`;
|
|
3563
|
+
/**
|
|
3564
|
+
* Default system prompt appended to the main agent's system message when
|
|
3565
|
+
* async subagent middleware is active.
|
|
3566
|
+
*
|
|
3567
|
+
* Provides the agent with instructions on how to use the five async subagent
|
|
3568
|
+
* tools (launch, check, update, cancel, list) including workflow ordering,
|
|
3569
|
+
* critical rules about polling behavior, and guidance on when to use async
|
|
3570
|
+
* subagents vs. synchronous delegation.
|
|
3571
|
+
*/
|
|
3572
|
+
const ASYNC_TASK_SYSTEM_PROMPT = `## Async subagents (remote LangGraph servers)
|
|
3573
|
+
|
|
3574
|
+
You have access to async subagent tools that launch background tasks on remote LangGraph servers.
|
|
3575
|
+
|
|
3576
|
+
### Tools:
|
|
3577
|
+
- \`start_async_task\`: Start a new background task. Returns a task ID immediately.
|
|
3578
|
+
- \`check_async_task\`: Check the status of a running task. Returns status and result if complete.
|
|
3579
|
+
- \`update_async_task\`: Send an update or new instructions to a running task.
|
|
3580
|
+
- \`cancel_async_task\`: Cancel a running task that is no longer needed.
|
|
3581
|
+
- \`list_async_tasks\`: List all tracked tasks with live statuses. Use this to check all tasks at once.
|
|
3582
|
+
|
|
3583
|
+
### Workflow:
|
|
3584
|
+
1. **Launch** — Use \`start_async_task\` to start a task. Report the task ID to the user and stop.
|
|
3585
|
+
Do NOT immediately check the status — the task runs in the background while you and the user continue other work.
|
|
3586
|
+
2. **Check (on request)** — Only use \`check_async_task\` when the user explicitly asks for a status update or
|
|
3587
|
+
result. If the status is "running", report that and stop — do not poll in a loop.
|
|
3588
|
+
3. **Update** (optional) — Use \`update_async_task\` to send new instructions to a running task. This interrupts
|
|
3589
|
+
the current run and starts a fresh one on the same thread. The task_id stays the same.
|
|
3590
|
+
4. **Cancel** (optional) — Use \`cancel_async_task\` to stop a task that is no longer needed.
|
|
3591
|
+
5. **Collect** — When \`check_async_task\` returns status "success", the result is included in the response.
|
|
3592
|
+
6. **List** — Use \`list_async_tasks\` to see live statuses for all tasks at once, or to recall task IDs after context compaction.
|
|
3593
|
+
|
|
3594
|
+
### Critical rules:
|
|
3595
|
+
- After launching, ALWAYS return control to the user immediately. Never auto-check after launching.
|
|
3596
|
+
- Never poll \`check_async_task\` in a loop. Check once per user request, then stop.
|
|
3597
|
+
- If a check returns "running", tell the user and wait for them to ask again.
|
|
3598
|
+
- Task statuses in conversation history are ALWAYS stale — a task that was "running" may now be done.
|
|
3599
|
+
NEVER report a status from a previous tool result. ALWAYS call a tool to get the current status:
|
|
3600
|
+
use \`list_async_tasks\` when the user asks about multiple tasks or "all tasks",
|
|
3601
|
+
use \`check_async_task\` when the user asks about a specific task.
|
|
3602
|
+
- Always show the full task_id — never truncate or abbreviate it.
|
|
3603
|
+
|
|
3604
|
+
### When to use async subagents:
|
|
3605
|
+
- Long-running tasks that would block the main agent
|
|
3606
|
+
- Tasks that benefit from running on specialized remote deployments
|
|
3607
|
+
- When you want to run multiple tasks concurrently and collect results later`;
|
|
3608
|
+
/**
|
|
3609
|
+
* Task statuses that will never change.
|
|
3610
|
+
*
|
|
3611
|
+
* When listing tasks, live-status fetches are skipped for tasks whose
|
|
3612
|
+
* cached status is in this set, since they are guaranteed to be final.
|
|
3613
|
+
*/
|
|
3614
|
+
const TERMINAL_STATUSES = new Set([
|
|
3615
|
+
"cancelled",
|
|
3616
|
+
"success",
|
|
3617
|
+
"error",
|
|
3618
|
+
"timeout",
|
|
3619
|
+
"interrupted"
|
|
3620
|
+
]);
|
|
3621
|
+
/**
|
|
3622
|
+
* Look up a tracked task from state by its `taskId`.
|
|
3623
|
+
*
|
|
3624
|
+
* @param taskId - The task ID to look up (will be trimmed).
|
|
3625
|
+
* @param state - The current agent state containing `asyncTasks`.
|
|
3626
|
+
* @returns The tracked task on success, or an error string.
|
|
3627
|
+
*/
|
|
3628
|
+
function resolveTrackedTask(taskId, state) {
|
|
3629
|
+
const tracked = (state.asyncTasks ?? {})[taskId.trim()];
|
|
3630
|
+
if (!tracked) return `No tracked task found for taskId: '${taskId}'`;
|
|
3631
|
+
return tracked;
|
|
3632
|
+
}
|
|
3633
|
+
/**
|
|
3634
|
+
* Build a check result from a run's current status and thread state values.
|
|
3635
|
+
*
|
|
3636
|
+
* For successful runs, extracts the last message's content from the remote
|
|
3637
|
+
* thread's state values. For errored runs, includes a generic error message.
|
|
3638
|
+
*
|
|
3639
|
+
* @param run - The run object from the SDK.
|
|
3640
|
+
* @param threadId - The thread ID for the run.
|
|
3641
|
+
* @param threadValues - The `values` from `ThreadState` (the remote subagent's state).
|
|
3642
|
+
*/
|
|
3643
|
+
function buildCheckResult(run, threadId, threadValues) {
|
|
3644
|
+
const checkResult = {
|
|
3645
|
+
status: run.status,
|
|
3646
|
+
threadId
|
|
3647
|
+
};
|
|
3648
|
+
if (run.status === "success") {
|
|
3649
|
+
const messages = (Array.isArray(threadValues) ? {} : threadValues)?.messages ?? [];
|
|
3650
|
+
if (messages.length > 0) {
|
|
3651
|
+
const last = messages[messages.length - 1];
|
|
3652
|
+
const rawContent = typeof last === "object" && last !== null && "content" in last ? last.content : last;
|
|
3653
|
+
checkResult.result = typeof rawContent === "string" ? rawContent : JSON.stringify(rawContent);
|
|
3654
|
+
} else checkResult.result = "Completed with no output messages.";
|
|
3655
|
+
} else if (run.status === "error") checkResult.error = "The async subagent encountered an error.";
|
|
3656
|
+
return checkResult;
|
|
3657
|
+
}
|
|
3658
|
+
/**
|
|
3659
|
+
* Filter tasks by cached status from agent state.
|
|
3660
|
+
*
|
|
3661
|
+
* Filtering uses the cached status, not live server status. Live statuses
|
|
3662
|
+
* are fetched after filtering by the calling tool.
|
|
3663
|
+
*
|
|
3664
|
+
* @param tasks - All tracked tasks from state.
|
|
3665
|
+
* @param statusFilter - If nullish or `'all'`, return all tasks.
|
|
3666
|
+
* Otherwise return only tasks whose cached status matches.
|
|
3667
|
+
*/
|
|
3668
|
+
function filterTasks(tasks, statusFilter) {
|
|
3669
|
+
if (!statusFilter || statusFilter === "all") return Object.values(tasks);
|
|
3670
|
+
return Object.values(tasks).filter((task) => task.status === statusFilter);
|
|
3671
|
+
}
|
|
3672
|
+
/**
|
|
3673
|
+
* Fetch the current run status from the server.
|
|
3674
|
+
*
|
|
3675
|
+
* Returns the cached status immediately for terminal tasks (avoiding
|
|
3676
|
+
* unnecessary API calls). Falls back to the cached status on SDK errors.
|
|
3677
|
+
*/
|
|
3678
|
+
async function fetchLiveTaskStatus(clients, task) {
|
|
3679
|
+
if (TERMINAL_STATUSES.has(task.status)) return task.status;
|
|
3680
|
+
try {
|
|
3681
|
+
return (await clients.getClient(task.agentName).runs.get(task.threadId, task.runId)).status;
|
|
3682
|
+
} catch {
|
|
3683
|
+
return task.status;
|
|
3684
|
+
}
|
|
3685
|
+
}
|
|
3686
|
+
/**
|
|
3687
|
+
* Format a single task as a display string for list output.
|
|
3688
|
+
*/
|
|
3689
|
+
function formatTaskEntry(task, status) {
|
|
3690
|
+
return `- taskId: ${task.taskId} agent: ${task.agentName} status: ${status}`;
|
|
3691
|
+
}
|
|
3692
|
+
/**
|
|
3693
|
+
* Lazily-created, cached LangGraph SDK clients keyed by (url, headers).
|
|
3694
|
+
*
|
|
3695
|
+
* Agents that share the same URL and headers will reuse a single `Client`
|
|
3696
|
+
* instance, avoiding unnecessary connections.
|
|
3697
|
+
*/
|
|
3698
|
+
var ClientCache = class {
|
|
3699
|
+
agents;
|
|
3700
|
+
clients = /* @__PURE__ */ new Map();
|
|
3701
|
+
constructor(agents) {
|
|
3702
|
+
this.agents = agents;
|
|
3703
|
+
}
|
|
3704
|
+
/**
|
|
3705
|
+
* Build headers for a remote LangGraph server, adding the default
|
|
3706
|
+
* `x-auth-scheme: langsmith` header if not already present.
|
|
3707
|
+
*/
|
|
3708
|
+
resolveHeaders(spec) {
|
|
3709
|
+
const headers = { ...spec.headers || {} };
|
|
3710
|
+
if (!("x-auth-scheme" in headers)) headers["x-auth-scheme"] = "langsmith";
|
|
3711
|
+
return headers;
|
|
3712
|
+
}
|
|
3713
|
+
/**
|
|
3714
|
+
* Build a stable cache key from a spec's url and resolved headers.
|
|
3715
|
+
*/
|
|
3716
|
+
cacheKey(spec) {
|
|
3717
|
+
const headers = this.resolveHeaders(spec);
|
|
3718
|
+
const headerStr = Object.entries(headers).sort().flat().join(":");
|
|
3719
|
+
return `${spec.url ?? ""}|${headerStr}`;
|
|
3720
|
+
}
|
|
3721
|
+
/**
|
|
3722
|
+
* Get or create a `Client` for the named agent.
|
|
3723
|
+
*/
|
|
3724
|
+
getClient(name) {
|
|
3725
|
+
const spec = this.agents[name];
|
|
3726
|
+
const key = this.cacheKey(spec);
|
|
3727
|
+
const existing = this.clients.get(key);
|
|
3728
|
+
if (existing) return existing;
|
|
3729
|
+
const headers = this.resolveHeaders(spec);
|
|
3730
|
+
const client = new _langchain_langgraph_sdk.Client({
|
|
3731
|
+
apiUrl: spec.url,
|
|
3732
|
+
defaultHeaders: headers
|
|
3733
|
+
});
|
|
3734
|
+
this.clients.set(key, client);
|
|
3735
|
+
return client;
|
|
3736
|
+
}
|
|
3737
|
+
};
|
|
3738
|
+
/**
|
|
3739
|
+
* Build the `start_async_task` tool.
|
|
3740
|
+
*
|
|
3741
|
+
* Creates a thread on the remote server, starts a run, and returns a
|
|
3742
|
+
* `Command` that persists the new task in state.
|
|
3743
|
+
*/
|
|
3744
|
+
function buildStartTool(agentMap, clients, toolDescription) {
|
|
3745
|
+
return (0, langchain.tool)(async (input, runtime) => {
|
|
3746
|
+
if (!(input.agentName in agentMap)) {
|
|
3747
|
+
const allowed = Object.keys(agentMap).map((k) => `\`${k}\``).join(", ");
|
|
3748
|
+
return `Unknown async subagent type \`${input.agentName}\`. Available types: ${allowed}`;
|
|
3749
|
+
}
|
|
3750
|
+
const spec = agentMap[input.agentName];
|
|
3751
|
+
try {
|
|
3752
|
+
const client = clients.getClient(input.agentName);
|
|
3753
|
+
const thread = await client.threads.create();
|
|
3754
|
+
const run = await client.runs.create(thread.thread_id, spec.graphId, { input: { messages: [{
|
|
3755
|
+
role: "user",
|
|
3756
|
+
content: input.description
|
|
3757
|
+
}] } });
|
|
3758
|
+
const taskId = thread.thread_id;
|
|
3759
|
+
const task = {
|
|
3760
|
+
taskId,
|
|
3761
|
+
agentName: input.agentName,
|
|
3762
|
+
threadId: taskId,
|
|
3763
|
+
runId: run.run_id,
|
|
3764
|
+
status: "running",
|
|
3765
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3766
|
+
};
|
|
3767
|
+
return new _langchain_langgraph.Command({ update: {
|
|
3768
|
+
messages: [new langchain.ToolMessage({
|
|
3769
|
+
content: `Launched async subagent. taskId: ${taskId}`,
|
|
3770
|
+
tool_call_id: toolCallIdFromRuntime(runtime)
|
|
3771
|
+
})],
|
|
3772
|
+
asyncTasks: { [taskId]: task }
|
|
3773
|
+
} });
|
|
3774
|
+
} catch (e) {
|
|
3775
|
+
return `Failed to launch async subagent '${input.agentName}': ${e}`;
|
|
3776
|
+
}
|
|
3777
|
+
}, {
|
|
3778
|
+
name: "start_async_task",
|
|
3779
|
+
description: toolDescription,
|
|
3780
|
+
schema: zod_v4.z.object({
|
|
3781
|
+
description: zod_v4.z.string().describe("A detailed description of the task for the async subagent to perform."),
|
|
3782
|
+
agentName: zod_v4.z.string().describe("The type of async subagent to use. Must be one of the available types listed in the tool description.")
|
|
3783
|
+
})
|
|
3784
|
+
});
|
|
3785
|
+
}
|
|
3786
|
+
/**
|
|
3787
|
+
* Build the `check_async_task` tool.
|
|
3788
|
+
*
|
|
3789
|
+
* Fetches the current run status from the remote server and, if the run
|
|
3790
|
+
* succeeded, retrieves the thread state to extract the result.
|
|
3791
|
+
*/
|
|
3792
|
+
function buildCheckTool(clients) {
|
|
3793
|
+
return (0, langchain.tool)(async (input, runtime) => {
|
|
3794
|
+
const task = resolveTrackedTask(input.taskId, runtime.state);
|
|
3795
|
+
if (typeof task === "string") return task;
|
|
3796
|
+
const client = clients.getClient(task.agentName);
|
|
3797
|
+
let run;
|
|
3798
|
+
try {
|
|
3799
|
+
run = await client.runs.get(task.threadId, task.runId);
|
|
3800
|
+
} catch (e) {
|
|
3801
|
+
return `Failed to get run status: ${e}`;
|
|
3802
|
+
}
|
|
3803
|
+
let threadValues = {};
|
|
3804
|
+
if (run.status === "success") try {
|
|
3805
|
+
threadValues = (await client.threads.getState(task.threadId)).values || {};
|
|
3806
|
+
} catch {}
|
|
3807
|
+
const result = buildCheckResult(run, task.threadId, threadValues);
|
|
3808
|
+
const updatedTask = {
|
|
3809
|
+
taskId: task.taskId,
|
|
3810
|
+
agentName: task.agentName,
|
|
3811
|
+
threadId: task.threadId,
|
|
3812
|
+
runId: task.runId,
|
|
3813
|
+
status: result.status,
|
|
3814
|
+
createdAt: task.createdAt,
|
|
3815
|
+
updatedAt: task.updatedAt,
|
|
3816
|
+
checkedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3817
|
+
};
|
|
3818
|
+
return new _langchain_langgraph.Command({ update: {
|
|
3819
|
+
messages: [new langchain.ToolMessage({
|
|
3820
|
+
content: JSON.stringify(result),
|
|
3821
|
+
tool_call_id: toolCallIdFromRuntime(runtime)
|
|
3822
|
+
})],
|
|
3823
|
+
asyncTasks: { [task.taskId]: updatedTask }
|
|
3824
|
+
} });
|
|
3825
|
+
}, {
|
|
3826
|
+
name: "check_async_task",
|
|
3827
|
+
description: "Check the status of an async subagent task. Returns the current status and, if complete, the result.",
|
|
3828
|
+
schema: zod_v4.z.object({ taskId: zod_v4.z.string().describe("The exact taskId string returned by start_async_task. Pass it verbatim.") })
|
|
3829
|
+
});
|
|
3830
|
+
}
|
|
3831
|
+
/**
|
|
3832
|
+
* Build the `update_async_task` tool.
|
|
3833
|
+
*
|
|
3834
|
+
* Sends a follow-up message to a running async subagent by creating a new
|
|
3835
|
+
* run on the same thread with `multitaskStrategy: "interrupt"`. The subagent
|
|
3836
|
+
* sees the full conversation history plus the new message. The `taskId`
|
|
3837
|
+
* remains the same; only the internal `runId` is updated.
|
|
3838
|
+
*/
|
|
3839
|
+
function buildUpdateTool(agentMap, clients) {
|
|
3840
|
+
return (0, langchain.tool)(async (input, runtime) => {
|
|
3841
|
+
const tracked = resolveTrackedTask(input.taskId, runtime.state);
|
|
3842
|
+
if (typeof tracked === "string") return tracked;
|
|
3843
|
+
const spec = agentMap[tracked.agentName];
|
|
3844
|
+
try {
|
|
3845
|
+
const run = await clients.getClient(tracked.agentName).runs.create(tracked.threadId, spec.graphId, {
|
|
3846
|
+
input: { messages: [{
|
|
3847
|
+
role: "user",
|
|
3848
|
+
content: input.message
|
|
3849
|
+
}] },
|
|
3850
|
+
multitaskStrategy: "interrupt"
|
|
3851
|
+
});
|
|
3852
|
+
const task = {
|
|
3853
|
+
taskId: tracked.taskId,
|
|
3854
|
+
agentName: tracked.agentName,
|
|
3855
|
+
threadId: tracked.threadId,
|
|
3856
|
+
runId: run.run_id,
|
|
3857
|
+
status: "running",
|
|
3858
|
+
createdAt: tracked.createdAt,
|
|
3859
|
+
updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
3860
|
+
checkedAt: tracked.checkedAt
|
|
3861
|
+
};
|
|
3862
|
+
return new _langchain_langgraph.Command({ update: {
|
|
3863
|
+
messages: [new langchain.ToolMessage({
|
|
3864
|
+
content: `Updated async subagent. taskId: ${tracked.taskId}`,
|
|
3865
|
+
tool_call_id: toolCallIdFromRuntime(runtime)
|
|
3866
|
+
})],
|
|
3867
|
+
asyncTasks: { [tracked.taskId]: task }
|
|
3868
|
+
} });
|
|
3869
|
+
} catch (e) {
|
|
3870
|
+
return `Failed to update async subagent: ${e}`;
|
|
3871
|
+
}
|
|
3872
|
+
}, {
|
|
3873
|
+
name: "update_async_task",
|
|
3874
|
+
description: "send updated instructions to an async subagent. Interrupts the current run and starts a new one on the same thread so the subagent sees the full conversation history plus your new message. The taskId remains the same.",
|
|
3875
|
+
schema: zod_v4.z.object({
|
|
3876
|
+
taskId: zod_v4.z.string().describe("The exact taskId string returned by start_async_task. Pass it verbatim."),
|
|
3877
|
+
message: zod_v4.z.string().describe("Follow-up instructions or context to send to the subagent")
|
|
3878
|
+
})
|
|
3879
|
+
});
|
|
3880
|
+
}
|
|
3881
|
+
/**
|
|
3882
|
+
* Build the `cancel_async_task` tool.
|
|
3883
|
+
*
|
|
3884
|
+
* Cancels the current run on the remote server and updates the task's
|
|
3885
|
+
* cached status to `"cancelled"`.
|
|
3886
|
+
*/
|
|
3887
|
+
function buildCancelTool(clients) {
|
|
3888
|
+
return (0, langchain.tool)(async (input, runtime) => {
|
|
3889
|
+
const tracked = resolveTrackedTask(input.taskId, runtime.state);
|
|
3890
|
+
if (typeof tracked === "string") return tracked;
|
|
3891
|
+
const client = clients.getClient(tracked.agentName);
|
|
3892
|
+
try {
|
|
3893
|
+
await client.runs.cancel(tracked.threadId, tracked.runId);
|
|
3894
|
+
} catch (e) {
|
|
3895
|
+
return `Failed to cancel run: ${e}`;
|
|
3896
|
+
}
|
|
3897
|
+
const updated = {
|
|
3898
|
+
taskId: tracked.taskId,
|
|
3899
|
+
agentName: tracked.agentName,
|
|
3900
|
+
threadId: tracked.threadId,
|
|
3901
|
+
runId: tracked.runId,
|
|
3902
|
+
status: "cancelled",
|
|
3903
|
+
createdAt: tracked.createdAt,
|
|
3904
|
+
updatedAt: tracked.updatedAt,
|
|
3905
|
+
checkedAt: tracked.checkedAt
|
|
3906
|
+
};
|
|
3907
|
+
return new _langchain_langgraph.Command({ update: {
|
|
3908
|
+
messages: [new langchain.ToolMessage({
|
|
3909
|
+
content: `Cancelled async subagent task: ${tracked.taskId}`,
|
|
3910
|
+
tool_call_id: toolCallIdFromRuntime(runtime)
|
|
3911
|
+
})],
|
|
3912
|
+
asyncTasks: { [tracked.taskId]: updated }
|
|
3913
|
+
} });
|
|
3914
|
+
}, {
|
|
3915
|
+
name: "cancel_async_task",
|
|
3916
|
+
description: "Cancel a running async subagent task. Use this to stop a task that is no longer needed.",
|
|
3917
|
+
schema: zod_v4.z.object({ taskId: zod_v4.z.string().describe("The exact taskId string returned by start_async_task. Pass it verbatim.") })
|
|
3918
|
+
});
|
|
3919
|
+
}
|
|
3920
|
+
/**
|
|
3921
|
+
* Build the `list_async_tasks` tool.
|
|
3922
|
+
*
|
|
3923
|
+
* Lists all tracked tasks with their live statuses fetched in parallel.
|
|
3924
|
+
* Supports optional filtering by cached status.
|
|
3925
|
+
*/
|
|
3926
|
+
function buildListTool(clients) {
|
|
3927
|
+
return (0, langchain.tool)(async (input, runtime) => {
|
|
3928
|
+
const filtered = filterTasks(runtime.state.asyncTasks ?? {}, input.statusFilter ?? void 0);
|
|
3929
|
+
if (filtered.length === 0) return "No async subagent tasks tracked";
|
|
3930
|
+
const statuses = await Promise.all(filtered.map((task) => fetchLiveTaskStatus(clients, task)));
|
|
3931
|
+
const updatedTasks = {};
|
|
3932
|
+
const entries = [];
|
|
3933
|
+
for (let idx = 0; idx < filtered.length; idx++) {
|
|
3934
|
+
const task = filtered[idx];
|
|
3935
|
+
const status = statuses[idx];
|
|
3936
|
+
const taskEntry = formatTaskEntry(task, status);
|
|
3937
|
+
entries.push(taskEntry);
|
|
3938
|
+
updatedTasks[task.taskId] = {
|
|
3939
|
+
taskId: task.taskId,
|
|
3940
|
+
agentName: task.agentName,
|
|
3941
|
+
threadId: task.threadId,
|
|
3942
|
+
runId: task.runId,
|
|
3943
|
+
status,
|
|
3944
|
+
createdAt: task.createdAt,
|
|
3945
|
+
updatedAt: task.updatedAt,
|
|
3946
|
+
checkedAt: task.checkedAt
|
|
3947
|
+
};
|
|
3948
|
+
}
|
|
3949
|
+
return new _langchain_langgraph.Command({ update: {
|
|
3950
|
+
messages: [new langchain.ToolMessage({
|
|
3951
|
+
content: `${entries.length} tracked task(s):\n${entries.join("\n")}`,
|
|
3952
|
+
tool_call_id: toolCallIdFromRuntime(runtime)
|
|
3953
|
+
})],
|
|
3954
|
+
asyncTasks: updatedTasks
|
|
3955
|
+
} });
|
|
3956
|
+
}, {
|
|
3957
|
+
name: "list_async_tasks",
|
|
3958
|
+
description: "List tracked async subagent tasks with their current live statuses. Be default shows all tasks. Use `statusFilter` to narrow by status (e.g., 'running', 'success', 'error', 'cancelled'). Use `check_async_task` to get the full result of a specific completed task.",
|
|
3959
|
+
schema: zod_v4.z.object({ statusFilter: zod_v4.z.string().nullish().describe("Filter tasks by status. One of: 'running', 'success', 'error', 'cancelled', 'all'. Defaults to 'all'.") })
|
|
3960
|
+
});
|
|
3961
|
+
}
|
|
3962
|
+
/**
|
|
3963
|
+
* Create middleware that adds async subagent tools to an agent.
|
|
3964
|
+
*
|
|
3965
|
+
* Provides five tools for launching, checking, updating, cancelling, and
|
|
3966
|
+
* listing background tasks on remote LangGraph deployments. Task state is
|
|
3967
|
+
* persisted in the `asyncTasks` state channel so it survives
|
|
3968
|
+
* context compaction.
|
|
3969
|
+
*
|
|
3970
|
+
* @throws {Error} If no async subagents are provided or names are duplicated.
|
|
3971
|
+
*
|
|
3972
|
+
* @example
|
|
3973
|
+
* ```ts
|
|
3974
|
+
* const middleware = createAsyncSubAgentMiddleware({
|
|
3975
|
+
* asyncSubAgents: [{
|
|
3976
|
+
* name: "researcher",
|
|
3977
|
+
* description: "Research agent for deep analysis",
|
|
3978
|
+
* url: "https://my-deployment.langsmith.dev",
|
|
3979
|
+
* graphId: "research_agent",
|
|
3980
|
+
* }],
|
|
3981
|
+
* });
|
|
3982
|
+
* ```
|
|
3983
|
+
*/
|
|
3984
|
+
/**
|
|
3985
|
+
* Type guard to distinguish async SubAgents from sync SubAgents/CompiledSubAgents.
|
|
3986
|
+
*
|
|
3987
|
+
* Uses the presence of the `graphId` field as the runtime discriminant —
|
|
3988
|
+
* `AsyncSubAgent` requires it, while `SubAgent` and `CompiledSubAgent` do not have it.
|
|
3989
|
+
*/
|
|
3990
|
+
function isAsyncSubAgent(subAgent) {
|
|
3991
|
+
return "graphId" in subAgent;
|
|
3992
|
+
}
|
|
3993
|
+
function createAsyncSubAgentMiddleware(options) {
|
|
3994
|
+
const { asyncSubAgents, systemPrompt = ASYNC_TASK_SYSTEM_PROMPT } = options;
|
|
3995
|
+
if (!asyncSubAgents || asyncSubAgents.length === 0) throw new Error("At least one async subagent must be specified");
|
|
3996
|
+
const names = asyncSubAgents.map((a) => a.name);
|
|
3997
|
+
const duplicates = names.filter((n, i) => names.indexOf(n) !== i);
|
|
3998
|
+
if (duplicates.length > 0) throw new Error(`Duplicate async subagent names: ${[...new Set(duplicates)].join(", ")}`);
|
|
3999
|
+
const agentMap = Object.fromEntries(asyncSubAgents.map((a) => [a.name, a]));
|
|
4000
|
+
const clients = new ClientCache(agentMap);
|
|
4001
|
+
const agentsDescription = asyncSubAgents.map((a) => `- ${a.name}: ${a.description}`).join("\n");
|
|
4002
|
+
const tools = [
|
|
4003
|
+
buildStartTool(agentMap, clients, ASYNC_TASK_TOOL_DESCRIPTION.replace("{available_agents}", agentsDescription)),
|
|
4004
|
+
buildCheckTool(clients),
|
|
4005
|
+
buildUpdateTool(agentMap, clients),
|
|
4006
|
+
buildCancelTool(clients),
|
|
4007
|
+
buildListTool(clients)
|
|
4008
|
+
];
|
|
4009
|
+
const fullSystemPrompt = systemPrompt ? `${systemPrompt}\n\nAvailable async subagent types:\n${agentsDescription}` : null;
|
|
4010
|
+
return (0, langchain.createMiddleware)({
|
|
4011
|
+
name: "asyncSubAgentMiddleware",
|
|
4012
|
+
stateSchema: AsyncTaskStateSchema,
|
|
4013
|
+
tools,
|
|
4014
|
+
wrapModelCall: async (request, handler) => {
|
|
4015
|
+
if (fullSystemPrompt !== null) return handler({
|
|
4016
|
+
...request,
|
|
4017
|
+
systemMessage: request.systemMessage.concat(new langchain.SystemMessage({ content: fullSystemPrompt }))
|
|
4018
|
+
});
|
|
4019
|
+
return handler(request);
|
|
4020
|
+
}
|
|
4021
|
+
});
|
|
4022
|
+
}
|
|
4023
|
+
//#endregion
|
|
4024
|
+
//#region src/backends/store.ts
|
|
4025
|
+
const NAMESPACE_COMPONENT_RE = /^[A-Za-z0-9\-_.@+:~]+$/;
|
|
4026
|
+
/**
|
|
4027
|
+
* Validate a namespace array.
|
|
4028
|
+
*
|
|
4029
|
+
* Each component must be a non-empty string containing only safe characters:
|
|
4030
|
+
* alphanumeric (a-z, A-Z, 0-9), hyphen (-), underscore (_), dot (.),
|
|
4031
|
+
* at sign (@), plus (+), colon (:), and tilde (~).
|
|
4032
|
+
*
|
|
4033
|
+
* Characters like *, ?, [, ], {, } etc. are rejected to prevent
|
|
4034
|
+
* wildcard or glob injection in store lookups.
|
|
4035
|
+
*/
|
|
4036
|
+
function validateNamespace(namespace) {
|
|
4037
|
+
if (namespace.length === 0) throw new Error("Namespace array must not be empty.");
|
|
4038
|
+
for (let i = 0; i < namespace.length; i++) {
|
|
4039
|
+
const component = namespace[i];
|
|
4040
|
+
if (typeof component !== "string") throw new TypeError(`Namespace component at index ${i} must be a string, got ${typeof component}.`);
|
|
4041
|
+
if (!component) throw new Error(`Namespace component at index ${i} must not be empty.`);
|
|
4042
|
+
if (!NAMESPACE_COMPONENT_RE.test(component)) throw new Error(`Namespace component at index ${i} contains disallowed characters: "${component}". Only alphanumeric characters, hyphens, underscores, dots, @, +, colons, and tildes are allowed.`);
|
|
4043
|
+
}
|
|
4044
|
+
return namespace;
|
|
4045
|
+
}
|
|
4046
|
+
/**
|
|
4047
|
+
* Backend that stores files in LangGraph's BaseStore (persistent).
|
|
3021
4048
|
*
|
|
3022
4049
|
* Uses LangGraph's Store for persistent, cross-conversation storage.
|
|
3023
4050
|
* Files are organized via namespaces and persist across all threads.
|
|
@@ -3029,9 +4056,11 @@ function validateNamespace(namespace) {
|
|
|
3029
4056
|
var StoreBackend = class {
|
|
3030
4057
|
stateAndStore;
|
|
3031
4058
|
_namespace;
|
|
4059
|
+
fileFormat;
|
|
3032
4060
|
constructor(stateAndStore, options) {
|
|
3033
4061
|
this.stateAndStore = stateAndStore;
|
|
3034
4062
|
if (options?.namespace) this._namespace = validateNamespace(options.namespace);
|
|
4063
|
+
this.fileFormat = options?.fileFormat ?? "v2";
|
|
3035
4064
|
}
|
|
3036
4065
|
/**
|
|
3037
4066
|
* Get the store instance.
|
|
@@ -3068,9 +4097,10 @@ var StoreBackend = class {
|
|
|
3068
4097
|
*/
|
|
3069
4098
|
convertStoreItemToFileData(storeItem) {
|
|
3070
4099
|
const value = storeItem.value;
|
|
3071
|
-
if (!value.content
|
|
4100
|
+
if (!(value.content !== void 0 && (Array.isArray(value.content) || typeof value.content === "string" || ArrayBuffer.isView(value.content))) || typeof value.created_at !== "string" || typeof value.modified_at !== "string") throw new Error(`Store item does not contain valid FileData fields. Got keys: ${Object.keys(value).join(", ")}`);
|
|
3072
4101
|
return {
|
|
3073
4102
|
content: value.content,
|
|
4103
|
+
...value.mimeType ? { mimeType: value.mimeType } : {},
|
|
3074
4104
|
created_at: value.created_at,
|
|
3075
4105
|
modified_at: value.modified_at
|
|
3076
4106
|
};
|
|
@@ -3079,11 +4109,12 @@ var StoreBackend = class {
|
|
|
3079
4109
|
* Convert FileData to a value suitable for store.put().
|
|
3080
4110
|
*
|
|
3081
4111
|
* @param fileData - The FileData to convert
|
|
3082
|
-
* @returns Object with content, created_at, and modified_at fields
|
|
4112
|
+
* @returns Object with content, mimeType, created_at, and modified_at fields
|
|
3083
4113
|
*/
|
|
3084
4114
|
convertFileDataToStoreValue(fileData) {
|
|
3085
4115
|
return {
|
|
3086
4116
|
content: fileData.content,
|
|
4117
|
+
..."mimeType" in fileData ? { mimeType: fileData.mimeType } : {},
|
|
3087
4118
|
created_at: fileData.created_at,
|
|
3088
4119
|
modified_at: fileData.modified_at
|
|
3089
4120
|
};
|
|
@@ -3118,10 +4149,10 @@ var StoreBackend = class {
|
|
|
3118
4149
|
* List files and directories in the specified directory (non-recursive).
|
|
3119
4150
|
*
|
|
3120
4151
|
* @param path - Absolute path to directory
|
|
3121
|
-
* @returns
|
|
4152
|
+
* @returns LsResult with list of FileInfo objects on success or error on failure.
|
|
3122
4153
|
* Directories have a trailing / in their path and is_dir=true.
|
|
3123
4154
|
*/
|
|
3124
|
-
async
|
|
4155
|
+
async ls(path) {
|
|
3125
4156
|
const store = this.getStore();
|
|
3126
4157
|
const namespace = this.getNamespace();
|
|
3127
4158
|
const items = await this.searchStorePaginated(store, namespace);
|
|
@@ -3139,7 +4170,7 @@ var StoreBackend = class {
|
|
|
3139
4170
|
}
|
|
3140
4171
|
try {
|
|
3141
4172
|
const fd = this.convertStoreItemToFileData(item);
|
|
3142
|
-
const size = fd.content.join("\n").length;
|
|
4173
|
+
const size = isFileDataV1(fd) ? fd.content.join("\n").length : isFileDataBinary(fd) ? fd.content.byteLength : fd.content.length;
|
|
3143
4174
|
infos.push({
|
|
3144
4175
|
path: itemKey,
|
|
3145
4176
|
is_dir: false,
|
|
@@ -3157,35 +4188,49 @@ var StoreBackend = class {
|
|
|
3157
4188
|
modified_at: ""
|
|
3158
4189
|
});
|
|
3159
4190
|
infos.sort((a, b) => a.path.localeCompare(b.path));
|
|
3160
|
-
return infos;
|
|
4191
|
+
return { files: infos };
|
|
3161
4192
|
}
|
|
3162
4193
|
/**
|
|
3163
|
-
* Read file content
|
|
4194
|
+
* Read file content.
|
|
4195
|
+
*
|
|
4196
|
+
* Text files are paginated by line offset/limit.
|
|
4197
|
+
* Binary files return full Uint8Array content (offset/limit ignored).
|
|
3164
4198
|
*
|
|
3165
4199
|
* @param filePath - Absolute file path
|
|
3166
4200
|
* @param offset - Line offset to start reading from (0-indexed)
|
|
3167
4201
|
* @param limit - Maximum number of lines to read
|
|
3168
|
-
* @returns
|
|
4202
|
+
* @returns ReadResult with content on success or error on failure
|
|
3169
4203
|
*/
|
|
3170
4204
|
async read(filePath, offset = 0, limit = 500) {
|
|
3171
4205
|
try {
|
|
3172
|
-
|
|
4206
|
+
const readRawResult = await this.readRaw(filePath);
|
|
4207
|
+
if (readRawResult.error || !readRawResult.data) return { error: readRawResult.error || "File data not found" };
|
|
4208
|
+
const fileDataV2 = migrateToFileDataV2(readRawResult.data, filePath);
|
|
4209
|
+
if (!isTextMimeType(fileDataV2.mimeType)) return {
|
|
4210
|
+
content: fileDataV2.content,
|
|
4211
|
+
mimeType: fileDataV2.mimeType
|
|
4212
|
+
};
|
|
4213
|
+
if (typeof fileDataV2.content !== "string") return { error: `File '${filePath}' has binary content but text MIME type` };
|
|
4214
|
+
return {
|
|
4215
|
+
content: fileDataV2.content.split("\n").slice(offset, offset + limit).join("\n"),
|
|
4216
|
+
mimeType: fileDataV2.mimeType
|
|
4217
|
+
};
|
|
3173
4218
|
} catch (e) {
|
|
3174
|
-
return
|
|
4219
|
+
return { error: e.message };
|
|
3175
4220
|
}
|
|
3176
4221
|
}
|
|
3177
4222
|
/**
|
|
3178
4223
|
* Read file content as raw FileData.
|
|
3179
4224
|
*
|
|
3180
4225
|
* @param filePath - Absolute file path
|
|
3181
|
-
* @returns
|
|
4226
|
+
* @returns ReadRawResult with raw file data on success or error on failure
|
|
3182
4227
|
*/
|
|
3183
4228
|
async readRaw(filePath) {
|
|
3184
4229
|
const store = this.getStore();
|
|
3185
4230
|
const namespace = this.getNamespace();
|
|
3186
4231
|
const item = await store.get(namespace, filePath);
|
|
3187
|
-
if (!item)
|
|
3188
|
-
return this.convertStoreItemToFileData(item);
|
|
4232
|
+
if (!item) return { error: `File '${filePath}' not found` };
|
|
4233
|
+
return { data: this.convertStoreItemToFileData(item) };
|
|
3189
4234
|
}
|
|
3190
4235
|
/**
|
|
3191
4236
|
* Create a new file with content.
|
|
@@ -3195,7 +4240,8 @@ var StoreBackend = class {
|
|
|
3195
4240
|
const store = this.getStore();
|
|
3196
4241
|
const namespace = this.getNamespace();
|
|
3197
4242
|
if (await store.get(namespace, filePath)) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
|
|
3198
|
-
const
|
|
4243
|
+
const mimeType = getMimeType(filePath);
|
|
4244
|
+
const fileData = createFileData(content, void 0, this.fileFormat, mimeType);
|
|
3199
4245
|
const storeValue = this.convertFileDataToStoreValue(fileData);
|
|
3200
4246
|
await store.put(namespace, filePath, storeValue);
|
|
3201
4247
|
return {
|
|
@@ -3230,9 +4276,10 @@ var StoreBackend = class {
|
|
|
3230
4276
|
}
|
|
3231
4277
|
}
|
|
3232
4278
|
/**
|
|
3233
|
-
*
|
|
4279
|
+
* Search file contents for a literal text pattern.
|
|
4280
|
+
* Binary files are skipped.
|
|
3234
4281
|
*/
|
|
3235
|
-
async
|
|
4282
|
+
async grep(pattern, path = "/", glob = null) {
|
|
3236
4283
|
const store = this.getStore();
|
|
3237
4284
|
const namespace = this.getNamespace();
|
|
3238
4285
|
const items = await this.searchStorePaginated(store, namespace);
|
|
@@ -3242,12 +4289,12 @@ var StoreBackend = class {
|
|
|
3242
4289
|
} catch {
|
|
3243
4290
|
continue;
|
|
3244
4291
|
}
|
|
3245
|
-
return grepMatchesFromFiles(files, pattern, path, glob);
|
|
4292
|
+
return { matches: grepMatchesFromFiles(files, pattern, path, glob) };
|
|
3246
4293
|
}
|
|
3247
4294
|
/**
|
|
3248
4295
|
* Structured glob matching returning FileInfo objects.
|
|
3249
4296
|
*/
|
|
3250
|
-
async
|
|
4297
|
+
async glob(pattern, path = "/") {
|
|
3251
4298
|
const store = this.getStore();
|
|
3252
4299
|
const namespace = this.getNamespace();
|
|
3253
4300
|
const items = await this.searchStorePaginated(store, namespace);
|
|
@@ -3258,12 +4305,12 @@ var StoreBackend = class {
|
|
|
3258
4305
|
continue;
|
|
3259
4306
|
}
|
|
3260
4307
|
const result = globSearchFiles(files, pattern, path);
|
|
3261
|
-
if (result === "No files found") return [];
|
|
4308
|
+
if (result === "No files found") return { files: [] };
|
|
3262
4309
|
const paths = result.split("\n");
|
|
3263
4310
|
const infos = [];
|
|
3264
4311
|
for (const p of paths) {
|
|
3265
4312
|
const fd = files[p];
|
|
3266
|
-
const size = fd ? fd.content.join("\n").length : 0;
|
|
4313
|
+
const size = fd ? isFileDataV1(fd) ? fd.content.join("\n").length : isFileDataBinary(fd) ? fd.content.byteLength : fd.content.length : 0;
|
|
3267
4314
|
infos.push({
|
|
3268
4315
|
path: p,
|
|
3269
4316
|
is_dir: false,
|
|
@@ -3271,7 +4318,7 @@ var StoreBackend = class {
|
|
|
3271
4318
|
modified_at: fd?.modified_at || ""
|
|
3272
4319
|
});
|
|
3273
4320
|
}
|
|
3274
|
-
return infos;
|
|
4321
|
+
return { files: infos };
|
|
3275
4322
|
}
|
|
3276
4323
|
/**
|
|
3277
4324
|
* Upload multiple files.
|
|
@@ -3284,7 +4331,11 @@ var StoreBackend = class {
|
|
|
3284
4331
|
const namespace = this.getNamespace();
|
|
3285
4332
|
const responses = [];
|
|
3286
4333
|
for (const [path, content] of files) try {
|
|
3287
|
-
const
|
|
4334
|
+
const mimeType = getMimeType(path);
|
|
4335
|
+
const isBinary = this.fileFormat === "v2" && !isTextMimeType(mimeType);
|
|
4336
|
+
let fileData;
|
|
4337
|
+
if (isBinary) fileData = createFileData(content, void 0, "v2", mimeType);
|
|
4338
|
+
else fileData = createFileData(new TextDecoder().decode(content), void 0, this.fileFormat, mimeType);
|
|
3288
4339
|
const storeValue = this.convertFileDataToStoreValue(fileData);
|
|
3289
4340
|
await store.put(namespace, path, storeValue);
|
|
3290
4341
|
responses.push({
|
|
@@ -3319,11 +4370,17 @@ var StoreBackend = class {
|
|
|
3319
4370
|
});
|
|
3320
4371
|
continue;
|
|
3321
4372
|
}
|
|
3322
|
-
const
|
|
3323
|
-
|
|
3324
|
-
|
|
4373
|
+
const fileDataV2 = migrateToFileDataV2(this.convertStoreItemToFileData(item), path);
|
|
4374
|
+
if (typeof fileDataV2.content === "string") {
|
|
4375
|
+
const content = new TextEncoder().encode(fileDataV2.content);
|
|
4376
|
+
responses.push({
|
|
4377
|
+
path,
|
|
4378
|
+
content,
|
|
4379
|
+
error: null
|
|
4380
|
+
});
|
|
4381
|
+
} else responses.push({
|
|
3325
4382
|
path,
|
|
3326
|
-
content,
|
|
4383
|
+
content: fileDataV2.content,
|
|
3327
4384
|
error: null
|
|
3328
4385
|
});
|
|
3329
4386
|
} catch {
|
|
@@ -3336,7 +4393,6 @@ var StoreBackend = class {
|
|
|
3336
4393
|
return responses;
|
|
3337
4394
|
}
|
|
3338
4395
|
};
|
|
3339
|
-
|
|
3340
4396
|
//#endregion
|
|
3341
4397
|
//#region src/backends/filesystem.ts
|
|
3342
4398
|
/**
|
|
@@ -3397,10 +4453,10 @@ var FilesystemBackend = class {
|
|
|
3397
4453
|
* @returns List of FileInfo objects for files and directories directly in the directory.
|
|
3398
4454
|
* Directories have a trailing / in their path and is_dir=true.
|
|
3399
4455
|
*/
|
|
3400
|
-
async
|
|
4456
|
+
async ls(dirPath) {
|
|
3401
4457
|
try {
|
|
3402
4458
|
const resolvedPath = this.resolvePath(dirPath);
|
|
3403
|
-
if (!(await node_fs_promises.default.stat(resolvedPath)).isDirectory()) return [];
|
|
4459
|
+
if (!(await node_fs_promises.default.stat(resolvedPath)).isDirectory()) return { files: [] };
|
|
3404
4460
|
const entries = await node_fs_promises.default.readdir(resolvedPath, { withFileTypes: true });
|
|
3405
4461
|
const results = [];
|
|
3406
4462
|
const cwdStr = this.cwd.endsWith(node_path.default.sep) ? this.cwd : this.cwd + node_path.default.sep;
|
|
@@ -3448,9 +4504,9 @@ var FilesystemBackend = class {
|
|
|
3448
4504
|
}
|
|
3449
4505
|
}
|
|
3450
4506
|
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
3451
|
-
return results;
|
|
4507
|
+
return { files: results };
|
|
3452
4508
|
} catch {
|
|
3453
|
-
return [];
|
|
4509
|
+
return { files: [] };
|
|
3454
4510
|
}
|
|
3455
4511
|
}
|
|
3456
4512
|
/**
|
|
@@ -3464,62 +4520,105 @@ var FilesystemBackend = class {
|
|
|
3464
4520
|
async read(filePath, offset = 0, limit = 500) {
|
|
3465
4521
|
try {
|
|
3466
4522
|
const resolvedPath = this.resolvePath(filePath);
|
|
4523
|
+
const mimeType = getMimeType(filePath);
|
|
4524
|
+
const isBinary = !isTextMimeType(mimeType);
|
|
3467
4525
|
let content;
|
|
3468
4526
|
if (SUPPORTS_NOFOLLOW) {
|
|
3469
|
-
if (!(await node_fs_promises.default.stat(resolvedPath)).isFile()) return
|
|
4527
|
+
if (!(await node_fs_promises.default.stat(resolvedPath)).isFile()) return { error: `File '${filePath}' not found` };
|
|
3470
4528
|
const fd = await node_fs_promises.default.open(resolvedPath, node_fs.default.constants.O_RDONLY | node_fs.default.constants.O_NOFOLLOW);
|
|
3471
4529
|
try {
|
|
4530
|
+
if (isBinary) {
|
|
4531
|
+
const buffer = await fd.readFile();
|
|
4532
|
+
return {
|
|
4533
|
+
content: new Uint8Array(buffer),
|
|
4534
|
+
mimeType
|
|
4535
|
+
};
|
|
4536
|
+
}
|
|
3472
4537
|
content = await fd.readFile({ encoding: "utf-8" });
|
|
3473
4538
|
} finally {
|
|
3474
4539
|
await fd.close();
|
|
3475
4540
|
}
|
|
3476
4541
|
} else {
|
|
3477
4542
|
const stat = await node_fs_promises.default.lstat(resolvedPath);
|
|
3478
|
-
if (stat.isSymbolicLink()) return
|
|
3479
|
-
if (!stat.isFile()) return
|
|
4543
|
+
if (stat.isSymbolicLink()) return { error: `Symlinks are not allowed: ${filePath}` };
|
|
4544
|
+
if (!stat.isFile()) return { error: `File '${filePath}' not found` };
|
|
4545
|
+
if (isBinary) {
|
|
4546
|
+
const buffer = await node_fs_promises.default.readFile(resolvedPath);
|
|
4547
|
+
return {
|
|
4548
|
+
content: new Uint8Array(buffer),
|
|
4549
|
+
mimeType
|
|
4550
|
+
};
|
|
4551
|
+
}
|
|
3480
4552
|
content = await node_fs_promises.default.readFile(resolvedPath, "utf-8");
|
|
3481
4553
|
}
|
|
3482
4554
|
const emptyMsg = checkEmptyContent(content);
|
|
3483
|
-
if (emptyMsg) return
|
|
4555
|
+
if (emptyMsg) return {
|
|
4556
|
+
content: emptyMsg,
|
|
4557
|
+
mimeType
|
|
4558
|
+
};
|
|
3484
4559
|
const lines = content.split("\n");
|
|
3485
4560
|
const startIdx = offset;
|
|
3486
4561
|
const endIdx = Math.min(startIdx + limit, lines.length);
|
|
3487
|
-
if (startIdx >= lines.length) return
|
|
3488
|
-
return
|
|
4562
|
+
if (startIdx >= lines.length) return { error: `Line offset ${offset} exceeds file length (${lines.length} lines)` };
|
|
4563
|
+
return {
|
|
4564
|
+
content: lines.slice(startIdx, endIdx).join("\n"),
|
|
4565
|
+
mimeType
|
|
4566
|
+
};
|
|
3489
4567
|
} catch (e) {
|
|
3490
|
-
return `Error reading file '${filePath}': ${e.message}
|
|
4568
|
+
return { error: `Error reading file '${filePath}': ${e.message}` };
|
|
3491
4569
|
}
|
|
3492
4570
|
}
|
|
3493
4571
|
/**
|
|
3494
4572
|
* Read file content as raw FileData.
|
|
3495
4573
|
*
|
|
3496
4574
|
* @param filePath - Absolute file path
|
|
3497
|
-
* @returns
|
|
4575
|
+
* @returns ReadRawResult with raw file data on success or error on failure
|
|
3498
4576
|
*/
|
|
3499
4577
|
async readRaw(filePath) {
|
|
3500
4578
|
const resolvedPath = this.resolvePath(filePath);
|
|
4579
|
+
const mimeType = getMimeType(filePath);
|
|
4580
|
+
const isBinary = !isTextMimeType(mimeType);
|
|
3501
4581
|
let content;
|
|
3502
4582
|
let stat;
|
|
3503
4583
|
if (SUPPORTS_NOFOLLOW) {
|
|
3504
4584
|
stat = await node_fs_promises.default.stat(resolvedPath);
|
|
3505
|
-
if (!stat.isFile())
|
|
4585
|
+
if (!stat.isFile()) return { error: `File '${filePath}' not found` };
|
|
3506
4586
|
const fd = await node_fs_promises.default.open(resolvedPath, node_fs.default.constants.O_RDONLY | node_fs.default.constants.O_NOFOLLOW);
|
|
3507
4587
|
try {
|
|
4588
|
+
if (isBinary) {
|
|
4589
|
+
const buffer = await fd.readFile();
|
|
4590
|
+
return { data: {
|
|
4591
|
+
content: new Uint8Array(buffer),
|
|
4592
|
+
mimeType,
|
|
4593
|
+
created_at: stat.ctime.toISOString(),
|
|
4594
|
+
modified_at: stat.mtime.toISOString()
|
|
4595
|
+
} };
|
|
4596
|
+
}
|
|
3508
4597
|
content = await fd.readFile({ encoding: "utf-8" });
|
|
3509
4598
|
} finally {
|
|
3510
4599
|
await fd.close();
|
|
3511
4600
|
}
|
|
3512
4601
|
} else {
|
|
3513
4602
|
stat = await node_fs_promises.default.lstat(resolvedPath);
|
|
3514
|
-
if (stat.isSymbolicLink())
|
|
3515
|
-
if (!stat.isFile())
|
|
4603
|
+
if (stat.isSymbolicLink()) return { error: `Symlinks are not allowed: ${filePath}` };
|
|
4604
|
+
if (!stat.isFile()) return { error: `File '${filePath}' not found` };
|
|
4605
|
+
if (isBinary) {
|
|
4606
|
+
const buffer = await node_fs_promises.default.readFile(resolvedPath);
|
|
4607
|
+
return { data: {
|
|
4608
|
+
content: new Uint8Array(buffer),
|
|
4609
|
+
mimeType,
|
|
4610
|
+
created_at: stat.ctime.toISOString(),
|
|
4611
|
+
modified_at: stat.mtime.toISOString()
|
|
4612
|
+
} };
|
|
4613
|
+
}
|
|
3516
4614
|
content = await node_fs_promises.default.readFile(resolvedPath, "utf-8");
|
|
3517
4615
|
}
|
|
3518
|
-
return {
|
|
3519
|
-
content
|
|
4616
|
+
return { data: {
|
|
4617
|
+
content,
|
|
4618
|
+
mimeType,
|
|
3520
4619
|
created_at: stat.ctime.toISOString(),
|
|
3521
4620
|
modified_at: stat.mtime.toISOString()
|
|
3522
|
-
};
|
|
4621
|
+
} };
|
|
3523
4622
|
}
|
|
3524
4623
|
/**
|
|
3525
4624
|
* Create a new file with content.
|
|
@@ -3528,6 +4627,7 @@ var FilesystemBackend = class {
|
|
|
3528
4627
|
async write(filePath, content) {
|
|
3529
4628
|
try {
|
|
3530
4629
|
const resolvedPath = this.resolvePath(filePath);
|
|
4630
|
+
const isBinary = !isTextMimeType(getMimeType(filePath));
|
|
3531
4631
|
try {
|
|
3532
4632
|
if ((await node_fs_promises.default.lstat(resolvedPath)).isSymbolicLink()) return { error: `Cannot write to ${filePath} because it is a symlink. Symlinks are not allowed.` };
|
|
3533
4633
|
return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
|
|
@@ -3537,10 +4637,16 @@ var FilesystemBackend = class {
|
|
|
3537
4637
|
const flags = node_fs.default.constants.O_WRONLY | node_fs.default.constants.O_CREAT | node_fs.default.constants.O_TRUNC | node_fs.default.constants.O_NOFOLLOW;
|
|
3538
4638
|
const fd = await node_fs_promises.default.open(resolvedPath, flags, 420);
|
|
3539
4639
|
try {
|
|
3540
|
-
|
|
4640
|
+
if (isBinary) {
|
|
4641
|
+
const buffer = Buffer.from(content, "base64");
|
|
4642
|
+
await fd.writeFile(buffer);
|
|
4643
|
+
} else await fd.writeFile(content, "utf-8");
|
|
3541
4644
|
} finally {
|
|
3542
4645
|
await fd.close();
|
|
3543
4646
|
}
|
|
4647
|
+
} else if (isBinary) {
|
|
4648
|
+
const buffer = Buffer.from(content, "base64");
|
|
4649
|
+
await node_fs_promises.default.writeFile(resolvedPath, buffer);
|
|
3544
4650
|
} else await node_fs_promises.default.writeFile(resolvedPath, content, "utf-8");
|
|
3545
4651
|
return {
|
|
3546
4652
|
path: filePath,
|
|
@@ -3603,17 +4709,17 @@ var FilesystemBackend = class {
|
|
|
3603
4709
|
* @param glob - Optional glob pattern to filter which files to search.
|
|
3604
4710
|
* @returns List of GrepMatch dicts containing path, line number, and matched text.
|
|
3605
4711
|
*/
|
|
3606
|
-
async
|
|
4712
|
+
async grep(pattern, dirPath = "/", glob = null) {
|
|
3607
4713
|
let baseFull;
|
|
3608
4714
|
try {
|
|
3609
4715
|
baseFull = this.resolvePath(dirPath || ".");
|
|
3610
4716
|
} catch {
|
|
3611
|
-
return [];
|
|
4717
|
+
return { matches: [] };
|
|
3612
4718
|
}
|
|
3613
4719
|
try {
|
|
3614
4720
|
await node_fs_promises.default.stat(baseFull);
|
|
3615
4721
|
} catch {
|
|
3616
|
-
return [];
|
|
4722
|
+
return { matches: [] };
|
|
3617
4723
|
}
|
|
3618
4724
|
let results = await this.ripgrepSearch(pattern, baseFull, glob);
|
|
3619
4725
|
if (results === null) results = await this.literalSearch(pattern, baseFull, glob);
|
|
@@ -3623,7 +4729,7 @@ var FilesystemBackend = class {
|
|
|
3623
4729
|
line: lineNum,
|
|
3624
4730
|
text: lineText
|
|
3625
4731
|
});
|
|
3626
|
-
return matches;
|
|
4732
|
+
return { matches };
|
|
3627
4733
|
}
|
|
3628
4734
|
/**
|
|
3629
4735
|
* Search using ripgrep with fixed-string (literal) mode.
|
|
@@ -3703,6 +4809,7 @@ var FilesystemBackend = class {
|
|
|
3703
4809
|
dot: true
|
|
3704
4810
|
});
|
|
3705
4811
|
for (const fp of files) try {
|
|
4812
|
+
if (!isTextMimeType(getMimeType(fp))) continue;
|
|
3706
4813
|
if (includeGlob && !micromatch.default.isMatch(node_path.default.basename(fp), includeGlob)) continue;
|
|
3707
4814
|
if ((await node_fs_promises.default.stat(fp)).size > this.maxFileSizeBytes) continue;
|
|
3708
4815
|
const lines = (await node_fs_promises.default.readFile(fp, "utf-8")).split("\n");
|
|
@@ -3730,13 +4837,13 @@ var FilesystemBackend = class {
|
|
|
3730
4837
|
/**
|
|
3731
4838
|
* Structured glob matching returning FileInfo objects.
|
|
3732
4839
|
*/
|
|
3733
|
-
async
|
|
4840
|
+
async glob(pattern, searchPath = "/") {
|
|
3734
4841
|
if (pattern.startsWith("/")) pattern = pattern.substring(1);
|
|
3735
4842
|
const resolvedSearchPath = searchPath === "/" ? this.cwd : this.resolvePath(searchPath);
|
|
3736
4843
|
try {
|
|
3737
|
-
if (!(await node_fs_promises.default.stat(resolvedSearchPath)).isDirectory()) return [];
|
|
4844
|
+
if (!(await node_fs_promises.default.stat(resolvedSearchPath)).isDirectory()) return { files: [] };
|
|
3738
4845
|
} catch {
|
|
3739
|
-
return [];
|
|
4846
|
+
return { files: [] };
|
|
3740
4847
|
}
|
|
3741
4848
|
const results = [];
|
|
3742
4849
|
try {
|
|
@@ -3776,7 +4883,7 @@ var FilesystemBackend = class {
|
|
|
3776
4883
|
}
|
|
3777
4884
|
} catch {}
|
|
3778
4885
|
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
3779
|
-
return results;
|
|
4886
|
+
return { files: results };
|
|
3780
4887
|
}
|
|
3781
4888
|
/**
|
|
3782
4889
|
* Upload multiple files to the filesystem.
|
|
@@ -3855,7 +4962,6 @@ var FilesystemBackend = class {
|
|
|
3855
4962
|
return responses;
|
|
3856
4963
|
}
|
|
3857
4964
|
};
|
|
3858
|
-
|
|
3859
4965
|
//#endregion
|
|
3860
4966
|
//#region src/backends/composite.ts
|
|
3861
4967
|
/**
|
|
@@ -3872,9 +4978,9 @@ var CompositeBackend = class {
|
|
|
3872
4978
|
routes;
|
|
3873
4979
|
sortedRoutes;
|
|
3874
4980
|
constructor(defaultBackend, routes) {
|
|
3875
|
-
this.default = defaultBackend;
|
|
3876
|
-
this.routes = routes;
|
|
3877
|
-
this.sortedRoutes = Object.entries(routes).sort((a, b) => b[0].length - a[0].length);
|
|
4981
|
+
this.default = isSandboxProtocol(defaultBackend) ? adaptSandboxProtocol(defaultBackend) : adaptBackendProtocol(defaultBackend);
|
|
4982
|
+
this.routes = Object.fromEntries(Object.entries(routes).map(([k, v]) => [k, isSandboxProtocol(v) ? adaptSandboxProtocol(v) : adaptBackendProtocol(v)]));
|
|
4983
|
+
this.sortedRoutes = Object.entries(this.routes).sort((a, b) => b[0].length - a[0].length);
|
|
3878
4984
|
}
|
|
3879
4985
|
/** Delegates to default backend's id if it is a sandbox, otherwise empty string. */
|
|
3880
4986
|
get id() {
|
|
@@ -3898,25 +5004,27 @@ var CompositeBackend = class {
|
|
|
3898
5004
|
* List files and directories in the specified directory (non-recursive).
|
|
3899
5005
|
*
|
|
3900
5006
|
* @param path - Absolute path to directory
|
|
3901
|
-
* @returns
|
|
3902
|
-
*
|
|
5007
|
+
* @returns LsResult with list of FileInfo objects (with route prefixes added) on success or error on failure.
|
|
5008
|
+
* Directories have a trailing / in their path and is_dir=true.
|
|
3903
5009
|
*/
|
|
3904
|
-
async
|
|
5010
|
+
async ls(path) {
|
|
3905
5011
|
for (const [routePrefix, backend] of this.sortedRoutes) if (path.startsWith(routePrefix.replace(/\/$/, ""))) {
|
|
3906
5012
|
const suffix = path.substring(routePrefix.length);
|
|
3907
5013
|
const searchPath = suffix ? "/" + suffix : "/";
|
|
3908
|
-
const
|
|
5014
|
+
const result = await backend.ls(searchPath);
|
|
5015
|
+
if (result.error) return result;
|
|
3909
5016
|
const prefixed = [];
|
|
3910
|
-
for (const fi of
|
|
5017
|
+
for (const fi of result.files || []) prefixed.push({
|
|
3911
5018
|
...fi,
|
|
3912
5019
|
path: routePrefix.slice(0, -1) + fi.path
|
|
3913
5020
|
});
|
|
3914
|
-
return prefixed;
|
|
5021
|
+
return { files: prefixed };
|
|
3915
5022
|
}
|
|
3916
5023
|
if (path === "/") {
|
|
3917
5024
|
const results = [];
|
|
3918
|
-
const
|
|
3919
|
-
|
|
5025
|
+
const defaultResult = await this.default.ls(path);
|
|
5026
|
+
if (defaultResult.error) return defaultResult;
|
|
5027
|
+
results.push(...defaultResult.files || []);
|
|
3920
5028
|
for (const [routePrefix] of this.sortedRoutes) results.push({
|
|
3921
5029
|
path: routePrefix,
|
|
3922
5030
|
is_dir: true,
|
|
@@ -3924,9 +5032,9 @@ var CompositeBackend = class {
|
|
|
3924
5032
|
modified_at: ""
|
|
3925
5033
|
});
|
|
3926
5034
|
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
3927
|
-
return results;
|
|
5035
|
+
return { files: results };
|
|
3928
5036
|
}
|
|
3929
|
-
return await this.default.
|
|
5037
|
+
return await this.default.ls(path);
|
|
3930
5038
|
}
|
|
3931
5039
|
/**
|
|
3932
5040
|
* Read file content, routing to appropriate backend.
|
|
@@ -3944,7 +5052,7 @@ var CompositeBackend = class {
|
|
|
3944
5052
|
* Read file content as raw FileData.
|
|
3945
5053
|
*
|
|
3946
5054
|
* @param filePath - Absolute file path
|
|
3947
|
-
* @returns
|
|
5055
|
+
* @returns ReadRawResult with raw file data on success or error on failure
|
|
3948
5056
|
*/
|
|
3949
5057
|
async readRaw(filePath) {
|
|
3950
5058
|
const [backend, strippedKey] = this.getBackendAndKey(filePath);
|
|
@@ -3953,53 +5061,59 @@ var CompositeBackend = class {
|
|
|
3953
5061
|
/**
|
|
3954
5062
|
* Structured search results or error string for invalid input.
|
|
3955
5063
|
*/
|
|
3956
|
-
async
|
|
5064
|
+
async grep(pattern, path = "/", glob = null) {
|
|
3957
5065
|
for (const [routePrefix, backend] of this.sortedRoutes) if (path.startsWith(routePrefix.replace(/\/$/, ""))) {
|
|
3958
5066
|
const searchPath = path.substring(routePrefix.length - 1);
|
|
3959
|
-
const raw = await backend.
|
|
3960
|
-
if (
|
|
3961
|
-
return raw.map((m) => ({
|
|
5067
|
+
const raw = await backend.grep(pattern, searchPath || "/", glob);
|
|
5068
|
+
if (raw.error) return raw;
|
|
5069
|
+
return { matches: (raw.matches || []).map((m) => ({
|
|
3962
5070
|
...m,
|
|
3963
5071
|
path: routePrefix.slice(0, -1) + m.path
|
|
3964
|
-
}));
|
|
5072
|
+
})) };
|
|
3965
5073
|
}
|
|
3966
5074
|
const allMatches = [];
|
|
3967
|
-
const rawDefault = await this.default.
|
|
3968
|
-
if (
|
|
3969
|
-
allMatches.push(...rawDefault);
|
|
5075
|
+
const rawDefault = await this.default.grep(pattern, path, glob);
|
|
5076
|
+
if (rawDefault.error) return rawDefault;
|
|
5077
|
+
allMatches.push(...rawDefault.matches || []);
|
|
3970
5078
|
for (const [routePrefix, backend] of Object.entries(this.routes)) {
|
|
3971
|
-
const raw = await backend.
|
|
3972
|
-
if (
|
|
3973
|
-
|
|
5079
|
+
const raw = await backend.grep(pattern, "/", glob);
|
|
5080
|
+
if (raw.error) return raw;
|
|
5081
|
+
const matches = (raw.matches || []).map((m) => ({
|
|
3974
5082
|
...m,
|
|
3975
5083
|
path: routePrefix.slice(0, -1) + m.path
|
|
3976
|
-
}))
|
|
5084
|
+
}));
|
|
5085
|
+
allMatches.push(...matches);
|
|
3977
5086
|
}
|
|
3978
|
-
return allMatches;
|
|
5087
|
+
return { matches: allMatches };
|
|
3979
5088
|
}
|
|
3980
5089
|
/**
|
|
3981
5090
|
* Structured glob matching returning FileInfo objects.
|
|
3982
5091
|
*/
|
|
3983
|
-
async
|
|
5092
|
+
async glob(pattern, path = "/") {
|
|
3984
5093
|
const results = [];
|
|
3985
5094
|
for (const [routePrefix, backend] of this.sortedRoutes) if (path.startsWith(routePrefix.replace(/\/$/, ""))) {
|
|
3986
5095
|
const searchPath = path.substring(routePrefix.length - 1);
|
|
3987
|
-
|
|
5096
|
+
const result = await backend.glob(pattern, searchPath || "/");
|
|
5097
|
+
if (result.error) return result;
|
|
5098
|
+
return { files: (result.files || []).map((fi) => ({
|
|
3988
5099
|
...fi,
|
|
3989
5100
|
path: routePrefix.slice(0, -1) + fi.path
|
|
3990
|
-
}));
|
|
5101
|
+
})) };
|
|
3991
5102
|
}
|
|
3992
|
-
const
|
|
3993
|
-
|
|
5103
|
+
const defaultResult = await this.default.glob(pattern, path);
|
|
5104
|
+
if (defaultResult.error) return defaultResult;
|
|
5105
|
+
results.push(...defaultResult.files || []);
|
|
3994
5106
|
for (const [routePrefix, backend] of Object.entries(this.routes)) {
|
|
3995
|
-
const
|
|
3996
|
-
|
|
5107
|
+
const result = await backend.glob(pattern, "/");
|
|
5108
|
+
if (result.error) continue;
|
|
5109
|
+
const files = (result.files || []).map((fi) => ({
|
|
3997
5110
|
...fi,
|
|
3998
5111
|
path: routePrefix.slice(0, -1) + fi.path
|
|
3999
|
-
}))
|
|
5112
|
+
}));
|
|
5113
|
+
results.push(...files);
|
|
4000
5114
|
}
|
|
4001
5115
|
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
4002
|
-
return results;
|
|
5116
|
+
return { files: results };
|
|
4003
5117
|
}
|
|
4004
5118
|
/**
|
|
4005
5119
|
* Create a new file, routing to appropriate backend.
|
|
@@ -4104,7 +5218,6 @@ var CompositeBackend = class {
|
|
|
4104
5218
|
return results;
|
|
4105
5219
|
}
|
|
4106
5220
|
};
|
|
4107
|
-
|
|
4108
5221
|
//#endregion
|
|
4109
5222
|
//#region src/backends/local-shell.ts
|
|
4110
5223
|
/**
|
|
@@ -4228,7 +5341,7 @@ var LocalShellBackend = class LocalShellBackend extends FilesystemBackend {
|
|
|
4228
5341
|
*/
|
|
4229
5342
|
async read(filePath, offset = 0, limit = 500) {
|
|
4230
5343
|
const result = await super.read(filePath, offset, limit);
|
|
4231
|
-
if (
|
|
5344
|
+
if (result.error?.includes("ENOENT")) return { error: `File '${filePath}' not found` };
|
|
4232
5345
|
return result;
|
|
4233
5346
|
}
|
|
4234
5347
|
/**
|
|
@@ -4245,25 +5358,26 @@ var LocalShellBackend = class LocalShellBackend extends FilesystemBackend {
|
|
|
4245
5358
|
/**
|
|
4246
5359
|
* List directory contents, returning paths relative to rootDir.
|
|
4247
5360
|
*/
|
|
4248
|
-
async
|
|
4249
|
-
const
|
|
4250
|
-
if (
|
|
5361
|
+
async ls(dirPath) {
|
|
5362
|
+
const result = await super.ls(dirPath);
|
|
5363
|
+
if (result.error) return result;
|
|
5364
|
+
if (this.virtualMode) return result;
|
|
4251
5365
|
const cwdPrefix = this.cwd.endsWith(node_path.default.sep) ? this.cwd : this.cwd + node_path.default.sep;
|
|
4252
|
-
return
|
|
5366
|
+
return { files: (result.files || []).map((info) => ({
|
|
4253
5367
|
...info,
|
|
4254
5368
|
path: info.path.startsWith(cwdPrefix) ? info.path.slice(cwdPrefix.length) : info.path
|
|
4255
|
-
}));
|
|
5369
|
+
})) };
|
|
4256
5370
|
}
|
|
4257
5371
|
/**
|
|
4258
5372
|
* Glob matching that returns relative paths and includes directories.
|
|
4259
5373
|
*/
|
|
4260
|
-
async
|
|
5374
|
+
async glob(pattern, searchPath = "/") {
|
|
4261
5375
|
if (pattern.startsWith("/")) pattern = pattern.substring(1);
|
|
4262
5376
|
const resolvedSearchPath = searchPath === "/" || searchPath === "" ? this.cwd : this.virtualMode ? node_path.default.resolve(this.cwd, searchPath.replace(/^\//, "")) : node_path.default.resolve(this.cwd, searchPath);
|
|
4263
5377
|
try {
|
|
4264
|
-
if (!(await node_fs_promises.default.stat(resolvedSearchPath)).isDirectory()) return [];
|
|
5378
|
+
if (!(await node_fs_promises.default.stat(resolvedSearchPath)).isDirectory()) return { files: [] };
|
|
4265
5379
|
} catch {
|
|
4266
|
-
return [];
|
|
5380
|
+
return { files: [] };
|
|
4267
5381
|
}
|
|
4268
5382
|
const formatPath = (rel) => this.virtualMode ? `/${rel}` : rel;
|
|
4269
5383
|
const globOpts = {
|
|
@@ -4305,7 +5419,7 @@ var LocalShellBackend = class LocalShellBackend extends FilesystemBackend {
|
|
|
4305
5419
|
const [fileInfos, dirInfos] = await Promise.all([Promise.all(fileMatches.map(statFile)), Promise.all(dirMatches.map(statDir))]);
|
|
4306
5420
|
const results = [...fileInfos, ...dirInfos].filter((info) => info !== null);
|
|
4307
5421
|
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
4308
|
-
return results;
|
|
5422
|
+
return { files: results };
|
|
4309
5423
|
}
|
|
4310
5424
|
/**
|
|
4311
5425
|
* Execute a shell command directly on the host system.
|
|
@@ -4409,7 +5523,6 @@ var LocalShellBackend = class LocalShellBackend extends FilesystemBackend {
|
|
|
4409
5523
|
return backend;
|
|
4410
5524
|
}
|
|
4411
5525
|
};
|
|
4412
|
-
|
|
4413
5526
|
//#endregion
|
|
4414
5527
|
//#region src/backends/sandbox.ts
|
|
4415
5528
|
/**
|
|
@@ -4581,9 +5694,9 @@ var BaseSandbox = class {
|
|
|
4581
5694
|
* including Alpine. No Python or Node.js needed.
|
|
4582
5695
|
*
|
|
4583
5696
|
* @param path - Absolute path to directory
|
|
4584
|
-
* @returns
|
|
5697
|
+
* @returns LsResult with list of FileInfo objects on success or error on failure.
|
|
4585
5698
|
*/
|
|
4586
|
-
async
|
|
5699
|
+
async ls(path) {
|
|
4587
5700
|
const command = buildLsCommand(path);
|
|
4588
5701
|
const result = await this.execute(command);
|
|
4589
5702
|
const infos = [];
|
|
@@ -4598,7 +5711,7 @@ var BaseSandbox = class {
|
|
|
4598
5711
|
modified_at: (/* @__PURE__ */ new Date(parsed.mtime * 1e3)).toISOString()
|
|
4599
5712
|
});
|
|
4600
5713
|
}
|
|
4601
|
-
return infos;
|
|
5714
|
+
return { files: infos };
|
|
4602
5715
|
}
|
|
4603
5716
|
/**
|
|
4604
5717
|
* Read file content with line numbers.
|
|
@@ -4613,11 +5726,26 @@ var BaseSandbox = class {
|
|
|
4613
5726
|
* @returns Formatted file content with line numbers, or error message
|
|
4614
5727
|
*/
|
|
4615
5728
|
async read(filePath, offset = 0, limit = 500) {
|
|
4616
|
-
|
|
5729
|
+
const mimeType = getMimeType(filePath);
|
|
5730
|
+
if (!isTextMimeType(mimeType)) {
|
|
5731
|
+
const results = await this.downloadFiles([filePath]);
|
|
5732
|
+
if (results[0].error || !results[0].content) return { error: `File '${filePath}' not found` };
|
|
5733
|
+
return {
|
|
5734
|
+
content: results[0].content,
|
|
5735
|
+
mimeType
|
|
5736
|
+
};
|
|
5737
|
+
}
|
|
5738
|
+
if (limit === 0) return {
|
|
5739
|
+
content: "",
|
|
5740
|
+
mimeType
|
|
5741
|
+
};
|
|
4617
5742
|
const command = buildReadCommand(filePath, offset, limit);
|
|
4618
5743
|
const result = await this.execute(command);
|
|
4619
|
-
if (result.exitCode !== 0) return
|
|
4620
|
-
return
|
|
5744
|
+
if (result.exitCode !== 0) return { error: `File '${filePath}' not found` };
|
|
5745
|
+
return {
|
|
5746
|
+
content: result.output,
|
|
5747
|
+
mimeType
|
|
5748
|
+
};
|
|
4621
5749
|
}
|
|
4622
5750
|
/**
|
|
4623
5751
|
* Read file content as raw FileData.
|
|
@@ -4625,18 +5753,25 @@ var BaseSandbox = class {
|
|
|
4625
5753
|
* Uses downloadFiles() directly — no runtime needed on the sandbox host.
|
|
4626
5754
|
*
|
|
4627
5755
|
* @param filePath - Absolute file path
|
|
4628
|
-
* @returns
|
|
5756
|
+
* @returns ReadRawResult with raw file data on success or error on failure
|
|
4629
5757
|
*/
|
|
4630
5758
|
async readRaw(filePath) {
|
|
4631
5759
|
const results = await this.downloadFiles([filePath]);
|
|
4632
|
-
if (results[0].error || !results[0].content)
|
|
4633
|
-
const lines = new TextDecoder().decode(results[0].content).split("\n");
|
|
5760
|
+
if (results[0].error || !results[0].content) return { error: `File '${filePath}' not found` };
|
|
4634
5761
|
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
4635
|
-
|
|
4636
|
-
|
|
5762
|
+
const mimeType = getMimeType(filePath);
|
|
5763
|
+
if (!isTextMimeType(mimeType)) return { data: {
|
|
5764
|
+
content: results[0].content,
|
|
5765
|
+
mimeType,
|
|
4637
5766
|
created_at: now,
|
|
4638
5767
|
modified_at: now
|
|
4639
|
-
};
|
|
5768
|
+
} };
|
|
5769
|
+
return { data: {
|
|
5770
|
+
content: new TextDecoder().decode(results[0].content),
|
|
5771
|
+
mimeType,
|
|
5772
|
+
created_at: now,
|
|
5773
|
+
modified_at: now
|
|
5774
|
+
} };
|
|
4640
5775
|
}
|
|
4641
5776
|
/**
|
|
4642
5777
|
* Search for a literal text pattern in files using grep.
|
|
@@ -4646,23 +5781,25 @@ var BaseSandbox = class {
|
|
|
4646
5781
|
* @param glob - Optional glob pattern to filter which files to search.
|
|
4647
5782
|
* @returns List of GrepMatch dicts containing path, line number, and matched text.
|
|
4648
5783
|
*/
|
|
4649
|
-
async
|
|
5784
|
+
async grep(pattern, path = "/", glob = null) {
|
|
4650
5785
|
const command = buildGrepCommand(pattern, path, glob);
|
|
4651
5786
|
const output = (await this.execute(command)).output.trim();
|
|
4652
|
-
if (!output) return [];
|
|
5787
|
+
if (!output) return { matches: [] };
|
|
4653
5788
|
const matches = [];
|
|
4654
5789
|
for (const line of output.split("\n")) {
|
|
4655
5790
|
const parts = line.split(":");
|
|
4656
5791
|
if (parts.length >= 3) {
|
|
5792
|
+
const filePath = parts[0];
|
|
5793
|
+
if (!isTextMimeType(getMimeType(filePath))) continue;
|
|
4657
5794
|
const lineNum = parseInt(parts[1], 10);
|
|
4658
5795
|
if (!isNaN(lineNum)) matches.push({
|
|
4659
|
-
path:
|
|
5796
|
+
path: filePath,
|
|
4660
5797
|
line: lineNum,
|
|
4661
5798
|
text: parts.slice(2).join(":")
|
|
4662
5799
|
});
|
|
4663
5800
|
}
|
|
4664
5801
|
}
|
|
4665
|
-
return matches;
|
|
5802
|
+
return { matches };
|
|
4666
5803
|
}
|
|
4667
5804
|
/**
|
|
4668
5805
|
* Structured glob matching returning FileInfo objects.
|
|
@@ -4677,7 +5814,7 @@ var BaseSandbox = class {
|
|
|
4677
5814
|
* - `?` matches a single character except `/`
|
|
4678
5815
|
* - `[...]` character classes
|
|
4679
5816
|
*/
|
|
4680
|
-
async
|
|
5817
|
+
async glob(pattern, path = "/") {
|
|
4681
5818
|
const command = buildFindCommand(path);
|
|
4682
5819
|
const result = await this.execute(command);
|
|
4683
5820
|
const regex = globToPathRegex(pattern);
|
|
@@ -4695,7 +5832,7 @@ var BaseSandbox = class {
|
|
|
4695
5832
|
modified_at: (/* @__PURE__ */ new Date(parsed.mtime * 1e3)).toISOString()
|
|
4696
5833
|
});
|
|
4697
5834
|
}
|
|
4698
|
-
return infos;
|
|
5835
|
+
return { files: infos };
|
|
4699
5836
|
}
|
|
4700
5837
|
/**
|
|
4701
5838
|
* Create a new file with content.
|
|
@@ -4708,8 +5845,11 @@ var BaseSandbox = class {
|
|
|
4708
5845
|
const existCheck = await this.downloadFiles([filePath]);
|
|
4709
5846
|
if (existCheck[0].content !== null && existCheck[0].error === null) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
|
|
4710
5847
|
} catch {}
|
|
4711
|
-
const
|
|
4712
|
-
|
|
5848
|
+
const mimeType = getMimeType(filePath);
|
|
5849
|
+
let fileContent;
|
|
5850
|
+
if (isTextMimeType(mimeType)) fileContent = new TextEncoder().encode(content);
|
|
5851
|
+
else fileContent = Buffer.from(content, "base64");
|
|
5852
|
+
const results = await this.uploadFiles([[filePath, fileContent]]);
|
|
4713
5853
|
if (results[0].error) return { error: `Failed to write to ${filePath}: ${results[0].error}` };
|
|
4714
5854
|
return {
|
|
4715
5855
|
path: filePath,
|
|
@@ -4808,7 +5948,197 @@ var BaseSandbox = class {
|
|
|
4808
5948
|
};
|
|
4809
5949
|
}
|
|
4810
5950
|
};
|
|
4811
|
-
|
|
5951
|
+
//#endregion
|
|
5952
|
+
//#region src/backends/langsmith.ts
|
|
5953
|
+
/**
|
|
5954
|
+
* LangSmith Sandbox backend for deepagents.
|
|
5955
|
+
*
|
|
5956
|
+
* @example
|
|
5957
|
+
* ```typescript
|
|
5958
|
+
* import { LangSmithSandbox, createDeepAgent } from "deepagents";
|
|
5959
|
+
*
|
|
5960
|
+
* const sandbox = await LangSmithSandbox.create({ templateName: "deepagents-cli" });
|
|
5961
|
+
*
|
|
5962
|
+
* const agent = createDeepAgent({ model, backend: sandbox });
|
|
5963
|
+
*
|
|
5964
|
+
* try {
|
|
5965
|
+
* await agent.invoke({ messages: [...] });
|
|
5966
|
+
* } finally {
|
|
5967
|
+
* await sandbox.close();
|
|
5968
|
+
* }
|
|
5969
|
+
* ```
|
|
5970
|
+
*/
|
|
5971
|
+
/**
|
|
5972
|
+
* LangSmith Sandbox backend for deepagents.
|
|
5973
|
+
*
|
|
5974
|
+
* Extends `BaseSandbox` to provide command execution and file operations
|
|
5975
|
+
* via the LangSmith Sandbox API.
|
|
5976
|
+
*
|
|
5977
|
+
* Use the static `LangSmithSandbox.create()` factory for the simplest setup,
|
|
5978
|
+
* or construct directly with an existing `Sandbox` instance.
|
|
5979
|
+
*/
|
|
5980
|
+
var LangSmithSandbox = class LangSmithSandbox extends BaseSandbox {
|
|
5981
|
+
#sandbox;
|
|
5982
|
+
#defaultTimeout;
|
|
5983
|
+
#isRunning = true;
|
|
5984
|
+
constructor(options) {
|
|
5985
|
+
super();
|
|
5986
|
+
this.#sandbox = options.sandbox;
|
|
5987
|
+
this.#defaultTimeout = options.defaultTimeout ?? 1800;
|
|
5988
|
+
}
|
|
5989
|
+
/** Whether the sandbox is currently active. */
|
|
5990
|
+
get isRunning() {
|
|
5991
|
+
return this.#isRunning;
|
|
5992
|
+
}
|
|
5993
|
+
/** Return the LangSmith sandbox name as the unique identifier. */
|
|
5994
|
+
get id() {
|
|
5995
|
+
return this.#sandbox.name;
|
|
5996
|
+
}
|
|
5997
|
+
/**
|
|
5998
|
+
* Execute a shell command in the LangSmith sandbox.
|
|
5999
|
+
*
|
|
6000
|
+
* @param command - Shell command string to execute
|
|
6001
|
+
* @param options.timeout - Override timeout in seconds; 0 disables timeout
|
|
6002
|
+
*/
|
|
6003
|
+
async execute(command, options) {
|
|
6004
|
+
const effectiveTimeout = options?.timeout !== void 0 ? options.timeout : this.#defaultTimeout;
|
|
6005
|
+
const result = await this.#sandbox.run(command, { timeout: effectiveTimeout });
|
|
6006
|
+
const out = result.stdout ?? "";
|
|
6007
|
+
return {
|
|
6008
|
+
output: result.stderr ? out ? `${out}\n${result.stderr}` : result.stderr : out,
|
|
6009
|
+
exitCode: result.exit_code,
|
|
6010
|
+
truncated: false
|
|
6011
|
+
};
|
|
6012
|
+
}
|
|
6013
|
+
/**
|
|
6014
|
+
* Download files from the sandbox using LangSmith's native file read API.
|
|
6015
|
+
* @param paths - List of file paths to download
|
|
6016
|
+
* @returns List of FileDownloadResponse objects, one per input path
|
|
6017
|
+
*/
|
|
6018
|
+
async downloadFiles(paths) {
|
|
6019
|
+
const responses = [];
|
|
6020
|
+
for (const path of paths) try {
|
|
6021
|
+
const content = await this.#sandbox.read(path);
|
|
6022
|
+
responses.push({
|
|
6023
|
+
path,
|
|
6024
|
+
content,
|
|
6025
|
+
error: null
|
|
6026
|
+
});
|
|
6027
|
+
} catch (err) {
|
|
6028
|
+
if (err instanceof langsmith_experimental_sandbox.LangSmithResourceNotFoundError) responses.push({
|
|
6029
|
+
path,
|
|
6030
|
+
content: null,
|
|
6031
|
+
error: "file_not_found"
|
|
6032
|
+
});
|
|
6033
|
+
else if (err instanceof langsmith_experimental_sandbox.LangSmithSandboxError) {
|
|
6034
|
+
const error = String(err.message).toLowerCase().includes("is a directory") ? "is_directory" : "file_not_found";
|
|
6035
|
+
responses.push({
|
|
6036
|
+
path,
|
|
6037
|
+
content: null,
|
|
6038
|
+
error
|
|
6039
|
+
});
|
|
6040
|
+
} else responses.push({
|
|
6041
|
+
path,
|
|
6042
|
+
content: null,
|
|
6043
|
+
error: "invalid_path"
|
|
6044
|
+
});
|
|
6045
|
+
}
|
|
6046
|
+
return responses;
|
|
6047
|
+
}
|
|
6048
|
+
/**
|
|
6049
|
+
* Upload files to the sandbox using LangSmith's native file write API.
|
|
6050
|
+
* @param files - List of [path, content] tuples to upload
|
|
6051
|
+
* @returns List of FileUploadResponse objects, one per input file
|
|
6052
|
+
*/
|
|
6053
|
+
async uploadFiles(files) {
|
|
6054
|
+
const responses = [];
|
|
6055
|
+
for (const [path, content] of files) try {
|
|
6056
|
+
await this.#sandbox.write(path, content);
|
|
6057
|
+
responses.push({
|
|
6058
|
+
path,
|
|
6059
|
+
error: null
|
|
6060
|
+
});
|
|
6061
|
+
} catch {
|
|
6062
|
+
responses.push({
|
|
6063
|
+
path,
|
|
6064
|
+
error: "permission_denied"
|
|
6065
|
+
});
|
|
6066
|
+
}
|
|
6067
|
+
return responses;
|
|
6068
|
+
}
|
|
6069
|
+
/**
|
|
6070
|
+
* Delete this sandbox and mark it as no longer running.
|
|
6071
|
+
*
|
|
6072
|
+
* After calling this, `isRunning` will be `false` and the sandbox
|
|
6073
|
+
* cannot be used again.
|
|
6074
|
+
*/
|
|
6075
|
+
async close() {
|
|
6076
|
+
await this.#sandbox.delete();
|
|
6077
|
+
this.#isRunning = false;
|
|
6078
|
+
}
|
|
6079
|
+
/**
|
|
6080
|
+
* Create and return a new LangSmithSandbox in one step.
|
|
6081
|
+
*
|
|
6082
|
+
* This is the recommended way to create a sandbox — no need to import
|
|
6083
|
+
* anything from `langsmith/experimental/sandbox` directly.
|
|
6084
|
+
*
|
|
6085
|
+
* @example
|
|
6086
|
+
* ```typescript
|
|
6087
|
+
* const sandbox = await LangSmithSandbox.create({ templateName: "deepagents" });
|
|
6088
|
+
* try {
|
|
6089
|
+
* const agent = createDeepAgent({ model, backend: sandbox });
|
|
6090
|
+
* await agent.invoke({ messages: [...] });
|
|
6091
|
+
* } finally {
|
|
6092
|
+
* await sandbox.close();
|
|
6093
|
+
* }
|
|
6094
|
+
* ```
|
|
6095
|
+
*/
|
|
6096
|
+
static async create(options = {}) {
|
|
6097
|
+
const { templateName = "deepagents", apiKey = process.env.LANGSMITH_API_KEY, defaultTimeout } = options;
|
|
6098
|
+
return new LangSmithSandbox({
|
|
6099
|
+
sandbox: await new langsmith_experimental_sandbox.SandboxClient({ apiKey }).createSandbox(templateName),
|
|
6100
|
+
defaultTimeout
|
|
6101
|
+
});
|
|
6102
|
+
}
|
|
6103
|
+
};
|
|
6104
|
+
//#endregion
|
|
6105
|
+
//#region src/errors.ts
|
|
6106
|
+
const CONFIGURATION_ERROR_SYMBOL = Symbol.for("deepagents.configuration_error");
|
|
6107
|
+
/**
|
|
6108
|
+
* Thrown when `createDeepAgent` receives invalid configuration.
|
|
6109
|
+
*
|
|
6110
|
+
* Follows the same pattern as {@link SandboxError}: a human-readable
|
|
6111
|
+
* `message`, a structured `code` for programmatic handling, and a
|
|
6112
|
+
* static `isInstance` guard that works across realms.
|
|
6113
|
+
*
|
|
6114
|
+
* @example
|
|
6115
|
+
* ```typescript
|
|
6116
|
+
* try {
|
|
6117
|
+
* createDeepAgent({ tools: [myTool] });
|
|
6118
|
+
* } catch (error) {
|
|
6119
|
+
* if (ConfigurationError.isInstance(error)) {
|
|
6120
|
+
* switch (error.code) {
|
|
6121
|
+
* case "TOOL_NAME_COLLISION":
|
|
6122
|
+
* console.error("Rename your tool:", error.message);
|
|
6123
|
+
* break;
|
|
6124
|
+
* }
|
|
6125
|
+
* }
|
|
6126
|
+
* }
|
|
6127
|
+
* ```
|
|
6128
|
+
*/
|
|
6129
|
+
var ConfigurationError = class ConfigurationError extends Error {
|
|
6130
|
+
[CONFIGURATION_ERROR_SYMBOL] = true;
|
|
6131
|
+
name = "ConfigurationError";
|
|
6132
|
+
constructor(message, code, cause) {
|
|
6133
|
+
super(message);
|
|
6134
|
+
this.code = code;
|
|
6135
|
+
this.cause = cause;
|
|
6136
|
+
Object.setPrototypeOf(this, ConfigurationError.prototype);
|
|
6137
|
+
}
|
|
6138
|
+
static isInstance(error) {
|
|
6139
|
+
return typeof error === "object" && error !== null && error[CONFIGURATION_ERROR_SYMBOL] === true;
|
|
6140
|
+
}
|
|
6141
|
+
};
|
|
4812
6142
|
//#endregion
|
|
4813
6143
|
//#region src/middleware/cache.ts
|
|
4814
6144
|
/**
|
|
@@ -4852,10 +6182,14 @@ function createCacheBreakpointMiddleware() {
|
|
|
4852
6182
|
}
|
|
4853
6183
|
});
|
|
4854
6184
|
}
|
|
4855
|
-
|
|
4856
6185
|
//#endregion
|
|
4857
6186
|
//#region src/agent.ts
|
|
4858
6187
|
const BASE_PROMPT = `In order to complete the objective that the user asks of you, you have access to a number of standard tools.`;
|
|
6188
|
+
const BUILTIN_TOOL_NAMES = new Set([
|
|
6189
|
+
...FILESYSTEM_TOOL_NAMES,
|
|
6190
|
+
"task",
|
|
6191
|
+
"write_todos"
|
|
6192
|
+
]);
|
|
4859
6193
|
/**
|
|
4860
6194
|
* Detect whether a model is an Anthropic model.
|
|
4861
6195
|
* Used to gate Anthropic-specific prompt caching optimizations (cache_control breakpoints).
|
|
@@ -4901,6 +6235,8 @@ function isAnthropicModel(model) {
|
|
|
4901
6235
|
*/
|
|
4902
6236
|
function createDeepAgent(params = {}) {
|
|
4903
6237
|
const { model = "claude-sonnet-4-5-20250929", tools = [], systemPrompt, middleware: customMiddleware = [], subagents = [], responseFormat, contextSchema, checkpointer, store, backend, interruptOn, name, memory, skills } = params;
|
|
6238
|
+
const collidingTools = tools.map((t) => t.name).filter((n) => typeof n === "string" && BUILTIN_TOOL_NAMES.has(n));
|
|
6239
|
+
if (collidingTools.length > 0) throw new ConfigurationError(`Tool name(s) [${collidingTools.join(", ")}] conflict with built-in tools. Rename your custom tools to avoid this.`, "TOOL_NAME_COLLISION");
|
|
4904
6240
|
const anthropicModel = isAnthropicModel(model);
|
|
4905
6241
|
const finalSystemPrompt = new langchain.SystemMessage({ content: systemPrompt ? typeof systemPrompt === "string" ? [{
|
|
4906
6242
|
type: "text",
|
|
@@ -4936,13 +6272,19 @@ function createDeepAgent(params = {}) {
|
|
|
4936
6272
|
addCacheControl: anthropicModel
|
|
4937
6273
|
})] : [];
|
|
4938
6274
|
/**
|
|
6275
|
+
* Split the unified subagents array into sync and async subagents.
|
|
6276
|
+
* AsyncSubAgents are identified by the presence of a `graphId` field.
|
|
6277
|
+
*/
|
|
6278
|
+
const syncSubAgents = subagents.filter((a) => !isAsyncSubAgent(a));
|
|
6279
|
+
const asyncSubAgents = subagents.filter((a) => isAsyncSubAgent(a));
|
|
6280
|
+
/**
|
|
4939
6281
|
* Process subagents to add SkillsMiddleware for those with their own skills.
|
|
4940
6282
|
*
|
|
4941
6283
|
* Custom subagents do NOT inherit skills from the main agent by default.
|
|
4942
6284
|
* Only the general-purpose subagent inherits the main agent's skills (via defaultMiddleware).
|
|
4943
6285
|
* If a custom subagent needs skills, it must specify its own `skills` array.
|
|
4944
6286
|
*/
|
|
4945
|
-
const processedSubagents =
|
|
6287
|
+
const processedSubagents = syncSubAgents.map((subagent) => {
|
|
4946
6288
|
/**
|
|
4947
6289
|
* CompiledSubAgent - use as-is (already has its own middleware baked in)
|
|
4948
6290
|
*/
|
|
@@ -4982,10 +6324,6 @@ function createDeepAgent(params = {}) {
|
|
|
4982
6324
|
model,
|
|
4983
6325
|
backend: filesystemBackend
|
|
4984
6326
|
}),
|
|
4985
|
-
(0, langchain.anthropicPromptCachingMiddleware)({
|
|
4986
|
-
unsupportedModelBehavior: "ignore",
|
|
4987
|
-
minMessagesToCache: 1
|
|
4988
|
-
}),
|
|
4989
6327
|
createPatchToolCallsMiddleware()
|
|
4990
6328
|
];
|
|
4991
6329
|
/**
|
|
@@ -5008,11 +6346,17 @@ function createDeepAgent(params = {}) {
|
|
|
5008
6346
|
createSubAgentMiddleware({
|
|
5009
6347
|
defaultModel: model,
|
|
5010
6348
|
defaultTools: tools,
|
|
5011
|
-
defaultMiddleware: [...subagentMiddleware, ...anthropicModel ? [
|
|
6349
|
+
defaultMiddleware: [...subagentMiddleware, ...anthropicModel ? [(0, langchain.anthropicPromptCachingMiddleware)({
|
|
6350
|
+
unsupportedModelBehavior: "ignore",
|
|
6351
|
+
minMessagesToCache: 1
|
|
6352
|
+
}), createCacheBreakpointMiddleware()] : []],
|
|
5012
6353
|
generalPurposeMiddleware: [
|
|
5013
6354
|
...subagentMiddleware,
|
|
5014
6355
|
...skillsMiddlewareArray,
|
|
5015
|
-
...anthropicModel ? [
|
|
6356
|
+
...anthropicModel ? [(0, langchain.anthropicPromptCachingMiddleware)({
|
|
6357
|
+
unsupportedModelBehavior: "ignore",
|
|
6358
|
+
minMessagesToCache: 1
|
|
6359
|
+
}), createCacheBreakpointMiddleware()] : []
|
|
5016
6360
|
],
|
|
5017
6361
|
defaultInterruptOn: interruptOn,
|
|
5018
6362
|
subagents: processedSubagents,
|
|
@@ -5022,17 +6366,17 @@ function createDeepAgent(params = {}) {
|
|
|
5022
6366
|
model,
|
|
5023
6367
|
backend: filesystemBackend
|
|
5024
6368
|
}),
|
|
5025
|
-
(0, langchain.anthropicPromptCachingMiddleware)({
|
|
5026
|
-
unsupportedModelBehavior: "ignore",
|
|
5027
|
-
minMessagesToCache: 1
|
|
5028
|
-
}),
|
|
5029
6369
|
createPatchToolCallsMiddleware()
|
|
5030
6370
|
],
|
|
5031
6371
|
...skillsMiddlewareArray,
|
|
5032
|
-
...
|
|
6372
|
+
...customMiddleware,
|
|
6373
|
+
...anthropicModel ? [(0, langchain.anthropicPromptCachingMiddleware)({
|
|
6374
|
+
unsupportedModelBehavior: "ignore",
|
|
6375
|
+
minMessagesToCache: 1
|
|
6376
|
+
}), createCacheBreakpointMiddleware()] : [],
|
|
5033
6377
|
...memoryMiddlewareArray,
|
|
5034
6378
|
...interruptOn ? [(0, langchain.humanInTheLoopMiddleware)({ interruptOn })] : [],
|
|
5035
|
-
...
|
|
6379
|
+
...asyncSubAgents && asyncSubAgents.length > 0 ? [createAsyncSubAgentMiddleware({ asyncSubAgents })] : []
|
|
5036
6380
|
],
|
|
5037
6381
|
...responseFormat != null && { responseFormat },
|
|
5038
6382
|
contextSchema,
|
|
@@ -5044,7 +6388,6 @@ function createDeepAgent(params = {}) {
|
|
|
5044
6388
|
metadata: { ls_integration: "deepagents" }
|
|
5045
6389
|
});
|
|
5046
6390
|
}
|
|
5047
|
-
|
|
5048
6391
|
//#endregion
|
|
5049
6392
|
//#region src/config.ts
|
|
5050
6393
|
/**
|
|
@@ -5138,7 +6481,6 @@ function createSettings(options = {}) {
|
|
|
5138
6481
|
}
|
|
5139
6482
|
};
|
|
5140
6483
|
}
|
|
5141
|
-
|
|
5142
6484
|
//#endregion
|
|
5143
6485
|
//#region src/middleware/agent-memory.ts
|
|
5144
6486
|
/**
|
|
@@ -5365,38 +6707,6 @@ function createAgentMemoryMiddleware(options) {
|
|
|
5365
6707
|
}
|
|
5366
6708
|
});
|
|
5367
6709
|
}
|
|
5368
|
-
|
|
5369
|
-
//#endregion
|
|
5370
|
-
//#region src/skills/loader.ts
|
|
5371
|
-
/**
|
|
5372
|
-
* Skill loader for parsing and loading agent skills from SKILL.md files.
|
|
5373
|
-
*
|
|
5374
|
-
* This module implements Anthropic's agent skills pattern with YAML frontmatter parsing.
|
|
5375
|
-
* Each skill is a directory containing a SKILL.md file with:
|
|
5376
|
-
* - YAML frontmatter (name, description required)
|
|
5377
|
-
* - Markdown instructions for the agent
|
|
5378
|
-
* - Optional supporting files (scripts, configs, etc.)
|
|
5379
|
-
*
|
|
5380
|
-
* @example
|
|
5381
|
-
* ```markdown
|
|
5382
|
-
* ---
|
|
5383
|
-
* name: web-research
|
|
5384
|
-
* description: Structured approach to conducting thorough web research
|
|
5385
|
-
* ---
|
|
5386
|
-
*
|
|
5387
|
-
* # Web Research Skill
|
|
5388
|
-
*
|
|
5389
|
-
* ## When to Use
|
|
5390
|
-
* - User asks you to research a topic
|
|
5391
|
-
* ...
|
|
5392
|
-
* ```
|
|
5393
|
-
*
|
|
5394
|
-
* @see https://agentskills.io/specification
|
|
5395
|
-
*/
|
|
5396
|
-
/** Maximum size for SKILL.md files (10MB) */
|
|
5397
|
-
const MAX_SKILL_FILE_SIZE$1 = 10 * 1024 * 1024;
|
|
5398
|
-
/** Agent Skills spec constraints */
|
|
5399
|
-
const MAX_SKILL_NAME_LENGTH$1 = 64;
|
|
5400
6710
|
const MAX_SKILL_DESCRIPTION_LENGTH$1 = 1024;
|
|
5401
6711
|
/** Pattern for validating skill names per Agent Skills spec */
|
|
5402
6712
|
const SKILL_NAME_PATTERN = /^[a-z0-9]+(-[a-z0-9]+)*$/;
|
|
@@ -5441,7 +6751,7 @@ function validateSkillName(name, directoryName) {
|
|
|
5441
6751
|
valid: false,
|
|
5442
6752
|
error: "name is required"
|
|
5443
6753
|
};
|
|
5444
|
-
if (name.length >
|
|
6754
|
+
if (name.length > 64) return {
|
|
5445
6755
|
valid: false,
|
|
5446
6756
|
error: "name exceeds 64 characters"
|
|
5447
6757
|
};
|
|
@@ -5481,7 +6791,7 @@ function parseFrontmatter(content) {
|
|
|
5481
6791
|
function parseSkillMetadata(skillMdPath, source) {
|
|
5482
6792
|
try {
|
|
5483
6793
|
const stats = node_fs.default.statSync(skillMdPath);
|
|
5484
|
-
if (stats.size >
|
|
6794
|
+
if (stats.size > 10485760) {
|
|
5485
6795
|
console.warn(`Skipping ${skillMdPath}: file too large (${stats.size} bytes)`);
|
|
5486
6796
|
return null;
|
|
5487
6797
|
}
|
|
@@ -5500,7 +6810,7 @@ function parseSkillMetadata(skillMdPath, source) {
|
|
|
5500
6810
|
const validation = validateSkillName(String(name), directoryName);
|
|
5501
6811
|
if (!validation.valid) console.warn(`Skill '${name}' in ${skillMdPath} does not follow Agent Skills spec: ${validation.error}. Consider renaming to be spec-compliant.`);
|
|
5502
6812
|
let descriptionStr = String(description);
|
|
5503
|
-
if (descriptionStr.length >
|
|
6813
|
+
if (descriptionStr.length > 1024) {
|
|
5504
6814
|
console.warn(`Description exceeds ${MAX_SKILL_DESCRIPTION_LENGTH$1} chars in ${skillMdPath}, truncating`);
|
|
5505
6815
|
descriptionStr = descriptionStr.slice(0, MAX_SKILL_DESCRIPTION_LENGTH$1);
|
|
5506
6816
|
}
|
|
@@ -5588,14 +6898,15 @@ function listSkills(options) {
|
|
|
5588
6898
|
}
|
|
5589
6899
|
return Array.from(allSkills.values());
|
|
5590
6900
|
}
|
|
5591
|
-
|
|
5592
6901
|
//#endregion
|
|
5593
6902
|
exports.BaseSandbox = BaseSandbox;
|
|
5594
6903
|
exports.CompositeBackend = CompositeBackend;
|
|
6904
|
+
exports.ConfigurationError = ConfigurationError;
|
|
5595
6905
|
exports.DEFAULT_GENERAL_PURPOSE_DESCRIPTION = DEFAULT_GENERAL_PURPOSE_DESCRIPTION;
|
|
5596
6906
|
exports.DEFAULT_SUBAGENT_PROMPT = DEFAULT_SUBAGENT_PROMPT;
|
|
5597
6907
|
exports.FilesystemBackend = FilesystemBackend;
|
|
5598
6908
|
exports.GENERAL_PURPOSE_SUBAGENT = GENERAL_PURPOSE_SUBAGENT;
|
|
6909
|
+
exports.LangSmithSandbox = LangSmithSandbox;
|
|
5599
6910
|
exports.LocalShellBackend = LocalShellBackend;
|
|
5600
6911
|
exports.MAX_SKILL_DESCRIPTION_LENGTH = MAX_SKILL_DESCRIPTION_LENGTH;
|
|
5601
6912
|
exports.MAX_SKILL_FILE_SIZE = MAX_SKILL_FILE_SIZE;
|
|
@@ -5604,8 +6915,12 @@ exports.SandboxError = SandboxError;
|
|
|
5604
6915
|
exports.StateBackend = StateBackend;
|
|
5605
6916
|
exports.StoreBackend = StoreBackend;
|
|
5606
6917
|
exports.TASK_SYSTEM_PROMPT = TASK_SYSTEM_PROMPT;
|
|
6918
|
+
exports.adaptBackendProtocol = adaptBackendProtocol;
|
|
6919
|
+
exports.adaptSandboxProtocol = adaptSandboxProtocol;
|
|
5607
6920
|
exports.computeSummarizationDefaults = computeSummarizationDefaults;
|
|
5608
6921
|
exports.createAgentMemoryMiddleware = createAgentMemoryMiddleware;
|
|
6922
|
+
exports.createAsyncSubAgentMiddleware = createAsyncSubAgentMiddleware;
|
|
6923
|
+
exports.createCompletionNotifierMiddleware = createCompletionNotifierMiddleware;
|
|
5609
6924
|
exports.createDeepAgent = createDeepAgent;
|
|
5610
6925
|
exports.createFilesystemMiddleware = createFilesystemMiddleware;
|
|
5611
6926
|
exports.createMemoryMiddleware = createMemoryMiddleware;
|
|
@@ -5616,7 +6931,10 @@ exports.createSubAgentMiddleware = createSubAgentMiddleware;
|
|
|
5616
6931
|
exports.createSummarizationMiddleware = createSummarizationMiddleware;
|
|
5617
6932
|
exports.filesValue = filesValue;
|
|
5618
6933
|
exports.findProjectRoot = findProjectRoot;
|
|
6934
|
+
exports.isAsyncSubAgent = isAsyncSubAgent;
|
|
5619
6935
|
exports.isSandboxBackend = isSandboxBackend;
|
|
6936
|
+
exports.isSandboxProtocol = isSandboxProtocol;
|
|
5620
6937
|
exports.listSkills = listSkills;
|
|
5621
6938
|
exports.parseSkillMetadata = parseSkillMetadata;
|
|
6939
|
+
|
|
5622
6940
|
//# sourceMappingURL=index.cjs.map
|