deepagents 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,3431 @@
1
+ //#region rolldown:runtime
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __copyProps = (to, from, except, desc) => {
9
+ if (from && typeof from === "object" || typeof from === "function") {
10
+ for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
11
+ key = keys[i];
12
+ if (!__hasOwnProp.call(to, key) && key !== except) {
13
+ __defProp(to, key, {
14
+ get: ((k) => from[k]).bind(null, key),
15
+ enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
16
+ });
17
+ }
18
+ }
19
+ }
20
+ return to;
21
+ };
22
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
23
+ value: mod,
24
+ enumerable: true
25
+ }) : target, mod));
26
+
27
+ //#endregion
28
+ let langchain = require("langchain");
29
+ let _langchain_langgraph = require("@langchain/langgraph");
30
+ let zod_v4 = require("zod/v4");
31
+ let micromatch = require("micromatch");
32
+ micromatch = __toESM(micromatch);
33
+ let path = require("path");
34
+ let zod_v3 = require("zod/v3");
35
+ let _langchain_core_messages = require("@langchain/core/messages");
36
+ let node_fs_promises = require("node:fs/promises");
37
+ node_fs_promises = __toESM(node_fs_promises);
38
+ let node_fs = require("node:fs");
39
+ node_fs = __toESM(node_fs);
40
+ let node_path = require("node:path");
41
+ node_path = __toESM(node_path);
42
+ let node_child_process = require("node:child_process");
43
+ let fast_glob = require("fast-glob");
44
+ fast_glob = __toESM(fast_glob);
45
+ let node_os = require("node:os");
46
+ node_os = __toESM(node_os);
47
+ let zod = require("zod");
48
+ let yaml = require("yaml");
49
+ yaml = __toESM(yaml);
50
+
51
+ //#region src/backends/protocol.ts
52
+ /**
53
+ * Type guard to check if a backend supports execution.
54
+ *
55
+ * @param backend - Backend instance to check
56
+ * @returns True if the backend implements SandboxBackendProtocol
57
+ */
58
+ function isSandboxBackend(backend) {
59
+ return typeof backend.execute === "function" && typeof backend.id === "string";
60
+ }
61
+
62
+ //#endregion
63
+ //#region src/backends/utils.ts
64
+ /**
65
+ * Shared utility functions for memory backend implementations.
66
+ *
67
+ * This module contains both user-facing string formatters and structured
68
+ * helpers used by backends and the composite router. Structured helpers
69
+ * enable composition without fragile string parsing.
70
+ */
71
+ const EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents";
72
+ const MAX_LINE_LENGTH = 1e4;
73
+ const LINE_NUMBER_WIDTH = 6;
74
+ /**
75
+ * Sanitize tool_call_id to prevent path traversal and separator issues.
76
+ *
77
+ * Replaces dangerous characters (., /, \) with underscores.
78
+ */
79
+ function sanitizeToolCallId(toolCallId) {
80
+ return toolCallId.replace(/\./g, "_").replace(/\//g, "_").replace(/\\/g, "_");
81
+ }
82
+ /**
83
+ * Format file content with line numbers (cat -n style).
84
+ *
85
+ * Chunks lines longer than MAX_LINE_LENGTH with continuation markers (e.g., 5.1, 5.2).
86
+ *
87
+ * @param content - File content as string or list of lines
88
+ * @param startLine - Starting line number (default: 1)
89
+ * @returns Formatted content with line numbers and continuation markers
90
+ */
91
+ function formatContentWithLineNumbers(content, startLine = 1) {
92
+ let lines;
93
+ if (typeof content === "string") {
94
+ lines = content.split("\n");
95
+ if (lines.length > 0 && lines[lines.length - 1] === "") lines = lines.slice(0, -1);
96
+ } else lines = content;
97
+ const resultLines = [];
98
+ for (let i = 0; i < lines.length; i++) {
99
+ const line = lines[i];
100
+ const lineNum = i + startLine;
101
+ if (line.length <= MAX_LINE_LENGTH) resultLines.push(`${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\t${line}`);
102
+ else {
103
+ const numChunks = Math.ceil(line.length / MAX_LINE_LENGTH);
104
+ for (let chunkIdx = 0; chunkIdx < numChunks; chunkIdx++) {
105
+ const start = chunkIdx * MAX_LINE_LENGTH;
106
+ const end = Math.min(start + MAX_LINE_LENGTH, line.length);
107
+ const chunk = line.substring(start, end);
108
+ if (chunkIdx === 0) resultLines.push(`${lineNum.toString().padStart(LINE_NUMBER_WIDTH)}\t${chunk}`);
109
+ else {
110
+ const continuationMarker = `${lineNum}.${chunkIdx}`;
111
+ resultLines.push(`${continuationMarker.padStart(LINE_NUMBER_WIDTH)}\t${chunk}`);
112
+ }
113
+ }
114
+ }
115
+ }
116
+ return resultLines.join("\n");
117
+ }
118
+ /**
119
+ * Check if content is empty and return warning message.
120
+ *
121
+ * @param content - Content to check
122
+ * @returns Warning message if empty, null otherwise
123
+ */
124
+ function checkEmptyContent(content) {
125
+ if (!content || content.trim() === "") return EMPTY_CONTENT_WARNING;
126
+ return null;
127
+ }
128
+ /**
129
+ * Convert FileData to plain string content.
130
+ *
131
+ * @param fileData - FileData object with 'content' key
132
+ * @returns Content as string with lines joined by newlines
133
+ */
134
+ function fileDataToString(fileData) {
135
+ return fileData.content.join("\n");
136
+ }
137
+ /**
138
+ * Create a FileData object with timestamps.
139
+ *
140
+ * @param content - File content as string
141
+ * @param createdAt - Optional creation timestamp (ISO format)
142
+ * @returns FileData object with content and timestamps
143
+ */
144
+ function createFileData(content, createdAt) {
145
+ const lines = typeof content === "string" ? content.split("\n") : content;
146
+ const now = (/* @__PURE__ */ new Date()).toISOString();
147
+ return {
148
+ content: lines,
149
+ created_at: createdAt || now,
150
+ modified_at: now
151
+ };
152
+ }
153
+ /**
154
+ * Update FileData with new content, preserving creation timestamp.
155
+ *
156
+ * @param fileData - Existing FileData object
157
+ * @param content - New content as string
158
+ * @returns Updated FileData object
159
+ */
160
+ function updateFileData(fileData, content) {
161
+ const lines = typeof content === "string" ? content.split("\n") : content;
162
+ const now = (/* @__PURE__ */ new Date()).toISOString();
163
+ return {
164
+ content: lines,
165
+ created_at: fileData.created_at,
166
+ modified_at: now
167
+ };
168
+ }
169
+ /**
170
+ * Format file data for read response with line numbers.
171
+ *
172
+ * @param fileData - FileData object
173
+ * @param offset - Line offset (0-indexed)
174
+ * @param limit - Maximum number of lines
175
+ * @returns Formatted content or error message
176
+ */
177
+ function formatReadResponse(fileData, offset, limit) {
178
+ const content = fileDataToString(fileData);
179
+ const emptyMsg = checkEmptyContent(content);
180
+ if (emptyMsg) return emptyMsg;
181
+ const lines = content.split("\n");
182
+ const startIdx = offset;
183
+ const endIdx = Math.min(startIdx + limit, lines.length);
184
+ if (startIdx >= lines.length) return `Error: Line offset ${offset} exceeds file length (${lines.length} lines)`;
185
+ return formatContentWithLineNumbers(lines.slice(startIdx, endIdx), startIdx + 1);
186
+ }
187
+ /**
188
+ * Perform string replacement with occurrence validation.
189
+ *
190
+ * @param content - Original content
191
+ * @param oldString - String to replace
192
+ * @param newString - Replacement string
193
+ * @param replaceAll - Whether to replace all occurrences
194
+ * @returns Tuple of [new_content, occurrences] on success, or error message string
195
+ */
196
+ function performStringReplacement(content, oldString, newString, replaceAll) {
197
+ const occurrences = content.split(oldString).length - 1;
198
+ if (occurrences === 0) return `Error: String not found in file: '${oldString}'`;
199
+ if (occurrences > 1 && !replaceAll) return `Error: String '${oldString}' appears ${occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context.`;
200
+ return [content.split(oldString).join(newString), occurrences];
201
+ }
202
+ /**
203
+ * Validate and normalize a path.
204
+ *
205
+ * @param path - Path to validate
206
+ * @returns Normalized path starting with / and ending with /
207
+ * @throws Error if path is invalid
208
+ */
209
+ function validatePath(path$4) {
210
+ const pathStr = path$4 || "/";
211
+ if (!pathStr || pathStr.trim() === "") throw new Error("Path cannot be empty");
212
+ let normalized = pathStr.startsWith("/") ? pathStr : "/" + pathStr;
213
+ if (!normalized.endsWith("/")) normalized += "/";
214
+ return normalized;
215
+ }
216
+ /**
217
+ * Search files dict for paths matching glob pattern.
218
+ *
219
+ * @param files - Dictionary of file paths to FileData
220
+ * @param pattern - Glob pattern (e.g., `*.py`, `**\/*.ts`)
221
+ * @param path - Base path to search from
222
+ * @returns Newline-separated file paths, sorted by modification time (most recent first).
223
+ * Returns "No files found" if no matches.
224
+ *
225
+ * @example
226
+ * ```typescript
227
+ * const files = {"/src/main.py": FileData(...), "/test.py": FileData(...)};
228
+ * globSearchFiles(files, "*.py", "/");
229
+ * // Returns: "/test.py\n/src/main.py" (sorted by modified_at)
230
+ * ```
231
+ */
232
+ function globSearchFiles(files, pattern, path$4 = "/") {
233
+ let normalizedPath;
234
+ try {
235
+ normalizedPath = validatePath(path$4);
236
+ } catch {
237
+ return "No files found";
238
+ }
239
+ const filtered = Object.fromEntries(Object.entries(files).filter(([fp]) => fp.startsWith(normalizedPath)));
240
+ const effectivePattern = pattern;
241
+ const matches = [];
242
+ for (const [filePath, fileData] of Object.entries(filtered)) {
243
+ let relative = filePath.substring(normalizedPath.length);
244
+ if (relative.startsWith("/")) relative = relative.substring(1);
245
+ if (!relative) {
246
+ const parts = filePath.split("/");
247
+ relative = parts[parts.length - 1] || "";
248
+ }
249
+ if (micromatch.default.isMatch(relative, effectivePattern, {
250
+ dot: true,
251
+ nobrace: false
252
+ })) matches.push([filePath, fileData.modified_at]);
253
+ }
254
+ matches.sort((a, b) => b[1].localeCompare(a[1]));
255
+ if (matches.length === 0) return "No files found";
256
+ return matches.map(([fp]) => fp).join("\n");
257
+ }
258
+ /**
259
+ * Return structured grep matches from an in-memory files mapping.
260
+ *
261
+ * Returns a list of GrepMatch on success, or a string for invalid inputs
262
+ * (e.g., invalid regex). We deliberately do not raise here to keep backends
263
+ * non-throwing in tool contexts and preserve user-facing error messages.
264
+ */
265
+ function grepMatchesFromFiles(files, pattern, path$4 = null, glob = null) {
266
+ let regex;
267
+ try {
268
+ regex = new RegExp(pattern);
269
+ } catch (e) {
270
+ return `Invalid regex pattern: ${e.message}`;
271
+ }
272
+ let normalizedPath;
273
+ try {
274
+ normalizedPath = validatePath(path$4);
275
+ } catch {
276
+ return [];
277
+ }
278
+ let filtered = Object.fromEntries(Object.entries(files).filter(([fp]) => fp.startsWith(normalizedPath)));
279
+ if (glob) filtered = Object.fromEntries(Object.entries(filtered).filter(([fp]) => micromatch.default.isMatch((0, path.basename)(fp), glob, {
280
+ dot: true,
281
+ nobrace: false
282
+ })));
283
+ const matches = [];
284
+ for (const [filePath, fileData] of Object.entries(filtered)) for (let i = 0; i < fileData.content.length; i++) {
285
+ const line = fileData.content[i];
286
+ const lineNum = i + 1;
287
+ if (regex.test(line)) matches.push({
288
+ path: filePath,
289
+ line: lineNum,
290
+ text: line
291
+ });
292
+ }
293
+ return matches;
294
+ }
295
+
296
+ //#endregion
297
+ //#region src/backends/state.ts
298
+ /**
299
+ * Backend that stores files in agent state (ephemeral).
300
+ *
301
+ * Uses LangGraph's state management and checkpointing. Files persist within
302
+ * a conversation thread but not across threads. State is automatically
303
+ * checkpointed after each agent step.
304
+ *
305
+ * Special handling: Since LangGraph state must be updated via Command objects
306
+ * (not direct mutation), operations return filesUpdate in WriteResult/EditResult
307
+ * for the middleware to apply via Command.
308
+ */
309
+ var StateBackend = class {
310
+ stateAndStore;
311
+ constructor(stateAndStore) {
312
+ this.stateAndStore = stateAndStore;
313
+ }
314
+ /**
315
+ * Get files from current state.
316
+ */
317
+ getFiles() {
318
+ return this.stateAndStore.state.files || {};
319
+ }
320
+ /**
321
+ * List files and directories in the specified directory (non-recursive).
322
+ *
323
+ * @param path - Absolute path to directory
324
+ * @returns List of FileInfo objects for files and directories directly in the directory.
325
+ * Directories have a trailing / in their path and is_dir=true.
326
+ */
327
+ lsInfo(path$4) {
328
+ const files = this.getFiles();
329
+ const infos = [];
330
+ const subdirs = /* @__PURE__ */ new Set();
331
+ const normalizedPath = path$4.endsWith("/") ? path$4 : path$4 + "/";
332
+ for (const [k, fd] of Object.entries(files)) {
333
+ if (!k.startsWith(normalizedPath)) continue;
334
+ const relative = k.substring(normalizedPath.length);
335
+ if (relative.includes("/")) {
336
+ const subdirName = relative.split("/")[0];
337
+ subdirs.add(normalizedPath + subdirName + "/");
338
+ continue;
339
+ }
340
+ const size = fd.content.join("\n").length;
341
+ infos.push({
342
+ path: k,
343
+ is_dir: false,
344
+ size,
345
+ modified_at: fd.modified_at
346
+ });
347
+ }
348
+ for (const subdir of Array.from(subdirs).sort()) infos.push({
349
+ path: subdir,
350
+ is_dir: true,
351
+ size: 0,
352
+ modified_at: ""
353
+ });
354
+ infos.sort((a, b) => a.path.localeCompare(b.path));
355
+ return infos;
356
+ }
357
+ /**
358
+ * Read file content with line numbers.
359
+ *
360
+ * @param filePath - Absolute file path
361
+ * @param offset - Line offset to start reading from (0-indexed)
362
+ * @param limit - Maximum number of lines to read
363
+ * @returns Formatted file content with line numbers, or error message
364
+ */
365
+ read(filePath, offset = 0, limit = 2e3) {
366
+ const fileData = this.getFiles()[filePath];
367
+ if (!fileData) return `Error: File '${filePath}' not found`;
368
+ return formatReadResponse(fileData, offset, limit);
369
+ }
370
+ /**
371
+ * Read file content as raw FileData.
372
+ *
373
+ * @param filePath - Absolute file path
374
+ * @returns Raw file content as FileData
375
+ */
376
+ readRaw(filePath) {
377
+ const fileData = this.getFiles()[filePath];
378
+ if (!fileData) throw new Error(`File '${filePath}' not found`);
379
+ return fileData;
380
+ }
381
+ /**
382
+ * Create a new file with content.
383
+ * Returns WriteResult with filesUpdate to update LangGraph state.
384
+ */
385
+ write(filePath, content) {
386
+ if (filePath in this.getFiles()) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
387
+ const newFileData = createFileData(content);
388
+ return {
389
+ path: filePath,
390
+ filesUpdate: { [filePath]: newFileData }
391
+ };
392
+ }
393
+ /**
394
+ * Edit a file by replacing string occurrences.
395
+ * Returns EditResult with filesUpdate and occurrences.
396
+ */
397
+ edit(filePath, oldString, newString, replaceAll = false) {
398
+ const fileData = this.getFiles()[filePath];
399
+ if (!fileData) return { error: `Error: File '${filePath}' not found` };
400
+ const result = performStringReplacement(fileDataToString(fileData), oldString, newString, replaceAll);
401
+ if (typeof result === "string") return { error: result };
402
+ const [newContent, occurrences] = result;
403
+ const newFileData = updateFileData(fileData, newContent);
404
+ return {
405
+ path: filePath,
406
+ filesUpdate: { [filePath]: newFileData },
407
+ occurrences
408
+ };
409
+ }
410
+ /**
411
+ * Structured search results or error string for invalid input.
412
+ */
413
+ grepRaw(pattern, path$4 = "/", glob = null) {
414
+ return grepMatchesFromFiles(this.getFiles(), pattern, path$4, glob);
415
+ }
416
+ /**
417
+ * Structured glob matching returning FileInfo objects.
418
+ */
419
+ globInfo(pattern, path$4 = "/") {
420
+ const files = this.getFiles();
421
+ const result = globSearchFiles(files, pattern, path$4);
422
+ if (result === "No files found") return [];
423
+ const paths = result.split("\n");
424
+ const infos = [];
425
+ for (const p of paths) {
426
+ const fd = files[p];
427
+ const size = fd ? fd.content.join("\n").length : 0;
428
+ infos.push({
429
+ path: p,
430
+ is_dir: false,
431
+ size,
432
+ modified_at: fd?.modified_at || ""
433
+ });
434
+ }
435
+ return infos;
436
+ }
437
+ /**
438
+ * Upload multiple files.
439
+ *
440
+ * Note: Since LangGraph state must be updated via Command objects,
441
+ * the caller must apply filesUpdate via Command after calling this method.
442
+ *
443
+ * @param files - List of [path, content] tuples to upload
444
+ * @returns List of FileUploadResponse objects, one per input file
445
+ */
446
+ uploadFiles(files) {
447
+ const responses = [];
448
+ const updates = {};
449
+ for (const [path$4, content] of files) try {
450
+ updates[path$4] = createFileData(new TextDecoder().decode(content));
451
+ responses.push({
452
+ path: path$4,
453
+ error: null
454
+ });
455
+ } catch {
456
+ responses.push({
457
+ path: path$4,
458
+ error: "invalid_path"
459
+ });
460
+ }
461
+ const result = responses;
462
+ result.filesUpdate = updates;
463
+ return result;
464
+ }
465
+ /**
466
+ * Download multiple files.
467
+ *
468
+ * @param paths - List of file paths to download
469
+ * @returns List of FileDownloadResponse objects, one per input path
470
+ */
471
+ downloadFiles(paths) {
472
+ const files = this.getFiles();
473
+ const responses = [];
474
+ for (const path$4 of paths) {
475
+ const fileData = files[path$4];
476
+ if (!fileData) {
477
+ responses.push({
478
+ path: path$4,
479
+ content: null,
480
+ error: "file_not_found"
481
+ });
482
+ continue;
483
+ }
484
+ const contentStr = fileDataToString(fileData);
485
+ const content = new TextEncoder().encode(contentStr);
486
+ responses.push({
487
+ path: path$4,
488
+ content,
489
+ error: null
490
+ });
491
+ }
492
+ return responses;
493
+ }
494
+ };
495
+
496
+ //#endregion
497
+ //#region src/middleware/fs.ts
498
+ /**
499
+ * Middleware for providing filesystem tools to an agent.
500
+ *
501
+ * Provides ls, read_file, write_file, edit_file, glob, and grep tools with support for:
502
+ * - Pluggable backends (StateBackend, StoreBackend, FilesystemBackend, CompositeBackend)
503
+ * - Tool result eviction for large outputs
504
+ */
505
+ /**
506
+ * Zod v3 schema for FileData (re-export from backends)
507
+ */
508
+ const FileDataSchema = zod_v4.z.object({
509
+ content: zod_v4.z.array(zod_v4.z.string()),
510
+ created_at: zod_v4.z.string(),
511
+ modified_at: zod_v4.z.string()
512
+ });
513
+ /**
514
+ * Merge file updates with support for deletions.
515
+ */
516
+ function fileDataReducer(left, right) {
517
+ if (left === void 0) {
518
+ const result$1 = {};
519
+ for (const [key, value] of Object.entries(right)) if (value !== null) result$1[key] = value;
520
+ return result$1;
521
+ }
522
+ const result = { ...left };
523
+ for (const [key, value] of Object.entries(right)) if (value === null) delete result[key];
524
+ else result[key] = value;
525
+ return result;
526
+ }
527
+ /**
528
+ * Shared filesystem state schema.
529
+ * Defined at module level to ensure the same object identity is used across all agents,
530
+ * preventing "Channel already exists with different type" errors when multiple agents
531
+ * use createFilesystemMiddleware.
532
+ */
533
+ const FilesystemStateSchema = zod_v4.z.object({ files: zod_v4.z.record(zod_v4.z.string(), FileDataSchema).default({}).meta({ reducer: {
534
+ fn: fileDataReducer,
535
+ schema: zod_v4.z.record(zod_v4.z.string(), FileDataSchema.nullable())
536
+ } }) });
537
+ /**
538
+ * Resolve backend from factory or instance.
539
+ *
540
+ * @param backend - Backend instance or factory function
541
+ * @param stateAndStore - State and store container for backend initialization
542
+ */
543
+ function getBackend(backend, stateAndStore) {
544
+ if (typeof backend === "function") return backend(stateAndStore);
545
+ return backend;
546
+ }
547
+ const FILESYSTEM_SYSTEM_PROMPT = `You have access to a virtual filesystem. All file paths must start with a /.
548
+
549
+ - ls: list files in a directory (requires absolute path)
550
+ - read_file: read a file from the filesystem
551
+ - write_file: write to a file in the filesystem
552
+ - edit_file: edit a file in the filesystem
553
+ - glob: find files matching a pattern (e.g., "**/*.py")
554
+ - grep: search for text within files`;
555
+ const LS_TOOL_DESCRIPTION = "List files and directories in a directory";
556
+ const READ_FILE_TOOL_DESCRIPTION = "Read the contents of a file";
557
+ const WRITE_FILE_TOOL_DESCRIPTION = "Write content to a new file. Returns an error if the file already exists";
558
+ const EDIT_FILE_TOOL_DESCRIPTION = "Edit a file by replacing a specific string with a new string";
559
+ const GLOB_TOOL_DESCRIPTION = "Find files matching a glob pattern (e.g., '**/*.py' for all Python files)";
560
+ const GREP_TOOL_DESCRIPTION = "Search for a regex pattern in files. Returns matching files and line numbers";
561
+ const EXECUTE_TOOL_DESCRIPTION = `Executes a given command in the sandbox environment with proper handling and security measures.
562
+
563
+ Before executing the command, please follow these steps:
564
+
565
+ 1. Directory Verification:
566
+ - If the command will create new directories or files, first use the ls tool to verify the parent directory exists
567
+
568
+ 2. Command Execution:
569
+ - Always quote file paths that contain spaces with double quotes
570
+ - Commands run in an isolated sandbox environment
571
+ - Returns combined stdout/stderr output with exit code
572
+
573
+ Usage notes:
574
+ - The command parameter is required
575
+ - If the output is very large, it may be truncated
576
+ - IMPORTANT: Avoid using search commands like find and grep. Use the grep, glob tools instead.
577
+ - Avoid read tools like cat, head, tail - use read_file instead.
578
+ - Use '&&' to chain dependent commands, ';' for independent commands
579
+ - Try to use absolute paths to avoid cd`;
580
+ const EXECUTION_SYSTEM_PROMPT = `## Execute Tool \`execute\`
581
+
582
+ You have access to an \`execute\` tool for running shell commands in a sandboxed environment.
583
+ Use this tool to run commands, scripts, tests, builds, and other shell operations.
584
+
585
+ - execute: run a shell command in the sandbox (returns output and exit code)`;
586
+ /**
587
+ * Create ls tool using backend.
588
+ */
589
+ function createLsTool(backend, options) {
590
+ const { customDescription } = options;
591
+ return (0, langchain.tool)(async (input, config) => {
592
+ const resolvedBackend = getBackend(backend, {
593
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
594
+ store: config.store
595
+ });
596
+ const path$4 = input.path || "/";
597
+ const infos = await resolvedBackend.lsInfo(path$4);
598
+ if (infos.length === 0) return `No files found in ${path$4}`;
599
+ const lines = [];
600
+ for (const info of infos) if (info.is_dir) lines.push(`${info.path} (directory)`);
601
+ else {
602
+ const size = info.size ? ` (${info.size} bytes)` : "";
603
+ lines.push(`${info.path}${size}`);
604
+ }
605
+ return lines.join("\n");
606
+ }, {
607
+ name: "ls",
608
+ description: customDescription || LS_TOOL_DESCRIPTION,
609
+ schema: zod_v4.z.object({ path: zod_v4.z.string().optional().default("/").describe("Directory path to list (default: /)") })
610
+ });
611
+ }
612
+ /**
613
+ * Create read_file tool using backend.
614
+ */
615
+ function createReadFileTool(backend, options) {
616
+ const { customDescription } = options;
617
+ return (0, langchain.tool)(async (input, config) => {
618
+ const resolvedBackend = getBackend(backend, {
619
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
620
+ store: config.store
621
+ });
622
+ const { file_path, offset = 0, limit = 2e3 } = input;
623
+ return await resolvedBackend.read(file_path, offset, limit);
624
+ }, {
625
+ name: "read_file",
626
+ description: customDescription || READ_FILE_TOOL_DESCRIPTION,
627
+ schema: zod_v4.z.object({
628
+ file_path: zod_v4.z.string().describe("Absolute path to the file to read"),
629
+ offset: zod_v4.z.coerce.number().optional().default(0).describe("Line offset to start reading from (0-indexed)"),
630
+ limit: zod_v4.z.coerce.number().optional().default(2e3).describe("Maximum number of lines to read")
631
+ })
632
+ });
633
+ }
634
+ /**
635
+ * Create write_file tool using backend.
636
+ */
637
+ function createWriteFileTool(backend, options) {
638
+ const { customDescription } = options;
639
+ return (0, langchain.tool)(async (input, config) => {
640
+ const resolvedBackend = getBackend(backend, {
641
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
642
+ store: config.store
643
+ });
644
+ const { file_path, content } = input;
645
+ const result = await resolvedBackend.write(file_path, content);
646
+ if (result.error) return result.error;
647
+ const message = new langchain.ToolMessage({
648
+ content: `Successfully wrote to '${file_path}'`,
649
+ tool_call_id: config.toolCall?.id,
650
+ name: "write_file",
651
+ metadata: result.metadata
652
+ });
653
+ if (result.filesUpdate) return new _langchain_langgraph.Command({ update: {
654
+ files: result.filesUpdate,
655
+ messages: [message]
656
+ } });
657
+ return message;
658
+ }, {
659
+ name: "write_file",
660
+ description: customDescription || WRITE_FILE_TOOL_DESCRIPTION,
661
+ schema: zod_v4.z.object({
662
+ file_path: zod_v4.z.string().describe("Absolute path to the file to write"),
663
+ content: zod_v4.z.string().describe("Content to write to the file")
664
+ })
665
+ });
666
+ }
667
+ /**
668
+ * Create edit_file tool using backend.
669
+ */
670
+ function createEditFileTool(backend, options) {
671
+ const { customDescription } = options;
672
+ return (0, langchain.tool)(async (input, config) => {
673
+ const resolvedBackend = getBackend(backend, {
674
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
675
+ store: config.store
676
+ });
677
+ const { file_path, old_string, new_string, replace_all = false } = input;
678
+ const result = await resolvedBackend.edit(file_path, old_string, new_string, replace_all);
679
+ if (result.error) return result.error;
680
+ const message = new langchain.ToolMessage({
681
+ content: `Successfully replaced ${result.occurrences} occurrence(s) in '${file_path}'`,
682
+ tool_call_id: config.toolCall?.id,
683
+ name: "edit_file",
684
+ metadata: result.metadata
685
+ });
686
+ if (result.filesUpdate) return new _langchain_langgraph.Command({ update: {
687
+ files: result.filesUpdate,
688
+ messages: [message]
689
+ } });
690
+ return message;
691
+ }, {
692
+ name: "edit_file",
693
+ description: customDescription || EDIT_FILE_TOOL_DESCRIPTION,
694
+ schema: zod_v4.z.object({
695
+ file_path: zod_v4.z.string().describe("Absolute path to the file to edit"),
696
+ old_string: zod_v4.z.string().describe("String to be replaced (must match exactly)"),
697
+ new_string: zod_v4.z.string().describe("String to replace with"),
698
+ replace_all: zod_v4.z.boolean().optional().default(false).describe("Whether to replace all occurrences")
699
+ })
700
+ });
701
+ }
702
+ /**
703
+ * Create glob tool using backend.
704
+ */
705
+ function createGlobTool(backend, options) {
706
+ const { customDescription } = options;
707
+ return (0, langchain.tool)(async (input, config) => {
708
+ const resolvedBackend = getBackend(backend, {
709
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
710
+ store: config.store
711
+ });
712
+ const { pattern, path: path$4 = "/" } = input;
713
+ const infos = await resolvedBackend.globInfo(pattern, path$4);
714
+ if (infos.length === 0) return `No files found matching pattern '${pattern}'`;
715
+ return infos.map((info) => info.path).join("\n");
716
+ }, {
717
+ name: "glob",
718
+ description: customDescription || GLOB_TOOL_DESCRIPTION,
719
+ schema: zod_v4.z.object({
720
+ pattern: zod_v4.z.string().describe("Glob pattern (e.g., '*.py', '**/*.ts')"),
721
+ path: zod_v4.z.string().optional().default("/").describe("Base path to search from (default: /)")
722
+ })
723
+ });
724
+ }
725
+ /**
726
+ * Create grep tool using backend.
727
+ */
728
+ function createGrepTool(backend, options) {
729
+ const { customDescription } = options;
730
+ return (0, langchain.tool)(async (input, config) => {
731
+ const resolvedBackend = getBackend(backend, {
732
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
733
+ store: config.store
734
+ });
735
+ const { pattern, path: path$4 = "/", glob = null } = input;
736
+ const result = await resolvedBackend.grepRaw(pattern, path$4, glob);
737
+ if (typeof result === "string") return result;
738
+ if (result.length === 0) return `No matches found for pattern '${pattern}'`;
739
+ const lines = [];
740
+ let currentFile = null;
741
+ for (const match of result) {
742
+ if (match.path !== currentFile) {
743
+ currentFile = match.path;
744
+ lines.push(`\n${currentFile}:`);
745
+ }
746
+ lines.push(` ${match.line}: ${match.text}`);
747
+ }
748
+ return lines.join("\n");
749
+ }, {
750
+ name: "grep",
751
+ description: customDescription || GREP_TOOL_DESCRIPTION,
752
+ schema: zod_v4.z.object({
753
+ pattern: zod_v4.z.string().describe("Regex pattern to search for"),
754
+ path: zod_v4.z.string().optional().default("/").describe("Base path to search from (default: /)"),
755
+ glob: zod_v4.z.string().optional().nullable().describe("Optional glob pattern to filter files (e.g., '*.py')")
756
+ })
757
+ });
758
+ }
759
+ /**
760
+ * Create execute tool using backend.
761
+ */
762
+ function createExecuteTool(backend, options) {
763
+ const { customDescription } = options;
764
+ return (0, langchain.tool)(async (input, config) => {
765
+ const resolvedBackend = getBackend(backend, {
766
+ state: (0, _langchain_langgraph.getCurrentTaskInput)(config),
767
+ store: config.store
768
+ });
769
+ if (!isSandboxBackend(resolvedBackend)) return "Error: Execution not available. This agent's backend does not support command execution (SandboxBackendProtocol). To use the execute tool, provide a backend that implements SandboxBackendProtocol.";
770
+ const result = await resolvedBackend.execute(input.command);
771
+ const parts = [result.output];
772
+ if (result.exitCode !== null) {
773
+ const status = result.exitCode === 0 ? "succeeded" : "failed";
774
+ parts.push(`\n[Command ${status} with exit code ${result.exitCode}]`);
775
+ }
776
+ if (result.truncated) parts.push("\n[Output was truncated due to size limits]");
777
+ return parts.join("");
778
+ }, {
779
+ name: "execute",
780
+ description: customDescription || EXECUTE_TOOL_DESCRIPTION,
781
+ schema: zod_v4.z.object({ command: zod_v4.z.string().describe("The shell command to execute") })
782
+ });
783
+ }
784
+ /**
785
+ * Create filesystem middleware with all tools and features.
786
+ */
787
+ function createFilesystemMiddleware(options = {}) {
788
+ const { backend = (stateAndStore) => new StateBackend(stateAndStore), systemPrompt: customSystemPrompt = null, customToolDescriptions = null, toolTokenLimitBeforeEvict = 2e4 } = options;
789
+ const baseSystemPrompt = customSystemPrompt || FILESYSTEM_SYSTEM_PROMPT;
790
+ return (0, langchain.createMiddleware)({
791
+ name: "FilesystemMiddleware",
792
+ stateSchema: FilesystemStateSchema,
793
+ tools: [
794
+ createLsTool(backend, { customDescription: customToolDescriptions?.ls }),
795
+ createReadFileTool(backend, { customDescription: customToolDescriptions?.read_file }),
796
+ createWriteFileTool(backend, { customDescription: customToolDescriptions?.write_file }),
797
+ createEditFileTool(backend, { customDescription: customToolDescriptions?.edit_file }),
798
+ createGlobTool(backend, { customDescription: customToolDescriptions?.glob }),
799
+ createGrepTool(backend, { customDescription: customToolDescriptions?.grep }),
800
+ createExecuteTool(backend, { customDescription: customToolDescriptions?.execute })
801
+ ],
802
+ wrapModelCall: async (request, handler) => {
803
+ const supportsExecution = isSandboxBackend(getBackend(backend, {
804
+ state: request.state || {},
805
+ store: request.config?.store
806
+ }));
807
+ let tools = request.tools;
808
+ if (!supportsExecution) tools = tools.filter((t) => t.name !== "execute");
809
+ let systemPrompt = baseSystemPrompt;
810
+ if (supportsExecution) systemPrompt = `${systemPrompt}\n\n${EXECUTION_SYSTEM_PROMPT}`;
811
+ const currentSystemPrompt = request.systemPrompt || "";
812
+ const newSystemPrompt = currentSystemPrompt ? `${currentSystemPrompt}\n\n${systemPrompt}` : systemPrompt;
813
+ return handler({
814
+ ...request,
815
+ tools,
816
+ systemPrompt: newSystemPrompt
817
+ });
818
+ },
819
+ wrapToolCall: toolTokenLimitBeforeEvict ? async (request, handler) => {
820
+ const result = await handler(request);
821
+ async function processToolMessage(msg) {
822
+ if (typeof msg.content === "string" && msg.content.length > toolTokenLimitBeforeEvict * 4) {
823
+ const resolvedBackend = getBackend(backend, {
824
+ state: request.state || {},
825
+ store: request.config?.store
826
+ });
827
+ const evictPath = `/large_tool_results/${sanitizeToolCallId(request.toolCall?.id || msg.tool_call_id)}`;
828
+ const writeResult = await resolvedBackend.write(evictPath, msg.content);
829
+ if (writeResult.error) return {
830
+ message: msg,
831
+ filesUpdate: null
832
+ };
833
+ return {
834
+ message: new langchain.ToolMessage({
835
+ content: `Tool result too large (${Math.round(msg.content.length / 4)} tokens). Content saved to ${evictPath}`,
836
+ tool_call_id: msg.tool_call_id,
837
+ name: msg.name
838
+ }),
839
+ filesUpdate: writeResult.filesUpdate
840
+ };
841
+ }
842
+ return {
843
+ message: msg,
844
+ filesUpdate: null
845
+ };
846
+ }
847
+ if (result instanceof langchain.ToolMessage) {
848
+ const processed = await processToolMessage(result);
849
+ if (processed.filesUpdate) return new _langchain_langgraph.Command({ update: {
850
+ files: processed.filesUpdate,
851
+ messages: [processed.message]
852
+ } });
853
+ return processed.message;
854
+ }
855
+ if ((0, _langchain_langgraph.isCommand)(result)) {
856
+ const update = result.update;
857
+ if (!update?.messages) return result;
858
+ let hasLargeResults = false;
859
+ const accumulatedFiles = { ...update.files || {} };
860
+ const processedMessages = [];
861
+ for (const msg of update.messages) if (msg instanceof langchain.ToolMessage) {
862
+ const processed = await processToolMessage(msg);
863
+ processedMessages.push(processed.message);
864
+ if (processed.filesUpdate) {
865
+ hasLargeResults = true;
866
+ Object.assign(accumulatedFiles, processed.filesUpdate);
867
+ }
868
+ } else processedMessages.push(msg);
869
+ if (hasLargeResults) return new _langchain_langgraph.Command({ update: {
870
+ ...update,
871
+ messages: processedMessages,
872
+ files: accumulatedFiles
873
+ } });
874
+ }
875
+ return result;
876
+ } : void 0
877
+ });
878
+ }
879
+
880
+ //#endregion
881
+ //#region src/middleware/subagents.ts
882
+ const DEFAULT_SUBAGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools.";
883
+ const EXCLUDED_STATE_KEYS = [
884
+ "messages",
885
+ "todos",
886
+ "jumpTo",
887
+ "files"
888
+ ];
889
+ const DEFAULT_GENERAL_PURPOSE_DESCRIPTION = "General-purpose agent for researching complex questions, searching for files and content, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. This agent has access to all tools as the main agent.";
890
+ function getTaskToolDescription(subagentDescriptions) {
891
+ return `
892
+ Launch an ephemeral subagent to handle complex, multi-step independent tasks with isolated context windows.
893
+
894
+ Available agent types and the tools they have access to:
895
+ ${subagentDescriptions.join("\n")}
896
+
897
+ When using the Task tool, you must specify a subagent_type parameter to select which agent type to use.
898
+
899
+ ## Usage notes:
900
+ 1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses
901
+ 2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.
902
+ 3. Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.
903
+ 4. The agent's outputs should generally be trusted
904
+ 5. Clearly tell the agent whether you expect it to create content, perform analysis, or just do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent
905
+ 6. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.
906
+ 7. When only the general-purpose agent is provided, you should use it for all tasks. It is great for isolating context and token usage, and completing specific, complex tasks, as it has all the same capabilities as the main agent.
907
+
908
+ ### Example usage of the general-purpose agent:
909
+
910
+ <example_agent_descriptions>
911
+ "general-purpose": use this agent for general purpose tasks, it has access to all tools as the main agent.
912
+ </example_agent_descriptions>
913
+
914
+ <example>
915
+ User: "I want to conduct research on the accomplishments of Lebron James, Michael Jordan, and Kobe Bryant, and then compare them."
916
+ Assistant: *Uses the task tool in parallel to conduct isolated research on each of the three players*
917
+ Assistant: *Synthesizes the results of the three isolated research tasks and responds to the User*
918
+ <commentary>
919
+ Research is a complex, multi-step task in it of itself.
920
+ The research of each individual player is not dependent on the research of the other players.
921
+ The assistant uses the task tool to break down the complex objective into three isolated tasks.
922
+ Each research task only needs to worry about context and tokens about one player, then returns synthesized information about each player as the Tool Result.
923
+ This means each research task can dive deep and spend tokens and context deeply researching each player, but the final result is synthesized information, and saves us tokens in the long run when comparing the players to each other.
924
+ </commentary>
925
+ </example>
926
+
927
+ <example>
928
+ User: "Analyze a single large code repository for security vulnerabilities and generate a report."
929
+ Assistant: *Launches a single \`task\` subagent for the repository analysis*
930
+ Assistant: *Receives report and integrates results into final summary*
931
+ <commentary>
932
+ Subagent is used to isolate a large, context-heavy task, even though there is only one. This prevents the main thread from being overloaded with details.
933
+ If the user then asks followup questions, we have a concise report to reference instead of the entire history of analysis and tool calls, which is good and saves us time and money.
934
+ </commentary>
935
+ </example>
936
+
937
+ <example>
938
+ User: "Schedule two meetings for me and prepare agendas for each."
939
+ Assistant: *Calls the task tool in parallel to launch two \`task\` subagents (one per meeting) to prepare agendas*
940
+ Assistant: *Returns final schedules and agendas*
941
+ <commentary>
942
+ Tasks are simple individually, but subagents help silo agenda preparation.
943
+ Each subagent only needs to worry about the agenda for one meeting.
944
+ </commentary>
945
+ </example>
946
+
947
+ <example>
948
+ User: "I want to order a pizza from Dominos, order a burger from McDonald's, and order a salad from Subway."
949
+ Assistant: *Calls tools directly in parallel to order a pizza from Dominos, a burger from McDonald's, and a salad from Subway*
950
+ <commentary>
951
+ The assistant did not use the task tool because the objective is super simple and clear and only requires a few trivial tool calls.
952
+ It is better to just complete the task directly and NOT use the \`task\`tool.
953
+ </commentary>
954
+ </example>
955
+
956
+ ### Example usage with custom agents:
957
+
958
+ <example_agent_descriptions>
959
+ "content-reviewer": use this agent after you are done creating significant content or documents
960
+ "greeting-responder": use this agent when to respond to user greetings with a friendly joke
961
+ "research-analyst": use this agent to conduct thorough research on complex topics
962
+ </example_agent_description>
963
+
964
+ <example>
965
+ user: "Please write a function that checks if a number is prime"
966
+ assistant: Sure let me write a function that checks if a number is prime
967
+ assistant: First let me use the Write tool to write a function that checks if a number is prime
968
+ assistant: I'm going to use the Write tool to write the following code:
969
+ <code>
970
+ function isPrime(n) {
971
+ if (n <= 1) return false
972
+ for (let i = 2; i * i <= n; i++) {
973
+ if (n % i === 0) return false
974
+ }
975
+ return true
976
+ }
977
+ </code>
978
+ <commentary>
979
+ Since significant content was created and the task was completed, now use the content-reviewer agent to review the work
980
+ </commentary>
981
+ assistant: Now let me use the content-reviewer agent to review the code
982
+ assistant: Uses the Task tool to launch with the content-reviewer agent
983
+ </example>
984
+
985
+ <example>
986
+ user: "Can you help me research the environmental impact of different renewable energy sources and create a comprehensive report?"
987
+ <commentary>
988
+ This is a complex research task that would benefit from using the research-analyst agent to conduct thorough analysis
989
+ </commentary>
990
+ assistant: I'll help you research the environmental impact of renewable energy sources. Let me use the research-analyst agent to conduct comprehensive research on this topic.
991
+ assistant: Uses the Task tool to launch with the research-analyst agent, providing detailed instructions about what research to conduct and what format the report should take
992
+ </example>
993
+
994
+ <example>
995
+ user: "Hello"
996
+ <commentary>
997
+ Since the user is greeting, use the greeting-responder agent to respond with a friendly joke
998
+ </commentary>
999
+ assistant: "I'm going to use the Task tool to launch with the greeting-responder agent"
1000
+ </example>
1001
+ `.trim();
1002
+ }
1003
+ const TASK_SYSTEM_PROMPT = `## \`task\` (subagent spawner)
1004
+
1005
+ You have access to a \`task\` tool to launch short-lived subagents that handle isolated tasks. These agents are ephemeral — they live only for the duration of the task and return a single result.
1006
+
1007
+ When to use the task tool:
1008
+ - When a task is complex and multi-step, and can be fully delegated in isolation
1009
+ - When a task is independent of other tasks and can run in parallel
1010
+ - When a task requires focused reasoning or heavy token/context usage that would bloat the orchestrator thread
1011
+ - When sandboxing improves reliability (e.g. code execution, structured searches, data formatting)
1012
+ - When you only care about the output of the subagent, and not the intermediate steps (ex. performing a lot of research and then returned a synthesized report, performing a series of computations or lookups to achieve a concise, relevant answer.)
1013
+
1014
+ Subagent lifecycle:
1015
+ 1. **Spawn** → Provide clear role, instructions, and expected output
1016
+ 2. **Run** → The subagent completes the task autonomously
1017
+ 3. **Return** → The subagent provides a single structured result
1018
+ 4. **Reconcile** → Incorporate or synthesize the result into the main thread
1019
+
1020
+ When NOT to use the task tool:
1021
+ - If you need to see the intermediate reasoning or steps after the subagent has completed (the task tool hides them)
1022
+ - If the task is trivial (a few tool calls or simple lookup)
1023
+ - If delegating does not reduce token usage, complexity, or context switching
1024
+ - If splitting would add latency without benefit
1025
+
1026
+ ## Important Task Tool Usage Notes to Remember
1027
+ - Whenever possible, parallelize the work that you do. This is true for both tool_calls, and for tasks. Whenever you have independent steps to complete - make tool_calls, or kick off tasks (subagents) in parallel to accomplish them faster. This saves time for the user, which is incredibly important.
1028
+ - Remember to use the \`task\` tool to silo independent tasks within a multi-part objective.
1029
+ - You should use the \`task\` tool whenever you have a complex task that will take multiple steps, and is independent from other tasks that the agent needs to complete. These agents are highly competent and efficient.`;
1030
+ /**
1031
+ * Filter state to exclude certain keys when passing to subagents
1032
+ */
1033
+ function filterStateForSubagent(state) {
1034
+ const filtered = {};
1035
+ for (const [key, value] of Object.entries(state)) if (!EXCLUDED_STATE_KEYS.includes(key)) filtered[key] = value;
1036
+ return filtered;
1037
+ }
1038
+ /**
1039
+ * Create Command with filtered state update from subagent result
1040
+ */
1041
+ function returnCommandWithStateUpdate(result, toolCallId) {
1042
+ const stateUpdate = filterStateForSubagent(result);
1043
+ const messages = result.messages;
1044
+ const lastMessage = messages?.[messages.length - 1];
1045
+ return new _langchain_langgraph.Command({ update: {
1046
+ ...stateUpdate,
1047
+ messages: [new langchain.ToolMessage({
1048
+ content: lastMessage?.content || "Task completed",
1049
+ tool_call_id: toolCallId,
1050
+ name: "task"
1051
+ })]
1052
+ } });
1053
+ }
1054
+ /**
1055
+ * Create subagent instances from specifications
1056
+ */
1057
+ function getSubagents(options) {
1058
+ const { defaultModel, defaultTools, defaultMiddleware, defaultInterruptOn, subagents, generalPurposeAgent } = options;
1059
+ const defaultSubagentMiddleware = defaultMiddleware || [];
1060
+ const agents = {};
1061
+ const subagentDescriptions = [];
1062
+ if (generalPurposeAgent) {
1063
+ const generalPurposeMiddleware = [...defaultSubagentMiddleware];
1064
+ if (defaultInterruptOn) generalPurposeMiddleware.push((0, langchain.humanInTheLoopMiddleware)({ interruptOn: defaultInterruptOn }));
1065
+ agents["general-purpose"] = (0, langchain.createAgent)({
1066
+ model: defaultModel,
1067
+ systemPrompt: DEFAULT_SUBAGENT_PROMPT,
1068
+ tools: defaultTools,
1069
+ middleware: generalPurposeMiddleware
1070
+ });
1071
+ subagentDescriptions.push(`- general-purpose: ${DEFAULT_GENERAL_PURPOSE_DESCRIPTION}`);
1072
+ }
1073
+ for (const agentParams of subagents) {
1074
+ subagentDescriptions.push(`- ${agentParams.name}: ${agentParams.description}`);
1075
+ if ("runnable" in agentParams) agents[agentParams.name] = agentParams.runnable;
1076
+ else {
1077
+ const middleware = agentParams.middleware ? [...defaultSubagentMiddleware, ...agentParams.middleware] : [...defaultSubagentMiddleware];
1078
+ const interruptOn = agentParams.interruptOn || defaultInterruptOn;
1079
+ if (interruptOn) middleware.push((0, langchain.humanInTheLoopMiddleware)({ interruptOn }));
1080
+ agents[agentParams.name] = (0, langchain.createAgent)({
1081
+ model: agentParams.model ?? defaultModel,
1082
+ systemPrompt: agentParams.systemPrompt,
1083
+ tools: agentParams.tools ?? defaultTools,
1084
+ middleware
1085
+ });
1086
+ }
1087
+ }
1088
+ return {
1089
+ agents,
1090
+ descriptions: subagentDescriptions
1091
+ };
1092
+ }
1093
+ /**
1094
+ * Create the task tool for invoking subagents
1095
+ */
1096
+ function createTaskTool(options) {
1097
+ const { defaultModel, defaultTools, defaultMiddleware, defaultInterruptOn, subagents, generalPurposeAgent, taskDescription } = options;
1098
+ const { agents: subagentGraphs, descriptions: subagentDescriptions } = getSubagents({
1099
+ defaultModel,
1100
+ defaultTools,
1101
+ defaultMiddleware,
1102
+ defaultInterruptOn,
1103
+ subagents,
1104
+ generalPurposeAgent
1105
+ });
1106
+ return (0, langchain.tool)(async (input, config) => {
1107
+ const { description, subagent_type } = input;
1108
+ if (!(subagent_type in subagentGraphs)) {
1109
+ const allowedTypes = Object.keys(subagentGraphs).map((k) => `\`${k}\``).join(", ");
1110
+ throw new Error(`Error: invoked agent of type ${subagent_type}, the only allowed types are ${allowedTypes}`);
1111
+ }
1112
+ const subagent = subagentGraphs[subagent_type];
1113
+ const subagentState = filterStateForSubagent((0, _langchain_langgraph.getCurrentTaskInput)());
1114
+ subagentState.messages = [new _langchain_core_messages.HumanMessage({ content: description })];
1115
+ const result = await subagent.invoke(subagentState, config);
1116
+ if (!config.toolCall?.id) throw new Error("Tool call ID is required for subagent invocation");
1117
+ return returnCommandWithStateUpdate(result, config.toolCall.id);
1118
+ }, {
1119
+ name: "task",
1120
+ description: taskDescription ? taskDescription : getTaskToolDescription(subagentDescriptions),
1121
+ schema: zod_v3.z.object({
1122
+ description: zod_v3.z.string().describe("The task to execute with the selected agent"),
1123
+ subagent_type: zod_v3.z.string().describe(`Name of the agent to use. Available: ${Object.keys(subagentGraphs).join(", ")}`)
1124
+ })
1125
+ });
1126
+ }
1127
+ /**
1128
+ * Create subagent middleware with task tool
1129
+ */
1130
+ function createSubAgentMiddleware(options) {
1131
+ const { defaultModel, defaultTools = [], defaultMiddleware = null, defaultInterruptOn = null, subagents = [], systemPrompt = TASK_SYSTEM_PROMPT, generalPurposeAgent = true, taskDescription = null } = options;
1132
+ return (0, langchain.createMiddleware)({
1133
+ name: "subAgentMiddleware",
1134
+ tools: [createTaskTool({
1135
+ defaultModel,
1136
+ defaultTools,
1137
+ defaultMiddleware,
1138
+ defaultInterruptOn,
1139
+ subagents,
1140
+ generalPurposeAgent,
1141
+ taskDescription
1142
+ })],
1143
+ wrapModelCall: async (request, handler) => {
1144
+ if (systemPrompt !== null) {
1145
+ const currentPrompt = request.systemPrompt || "";
1146
+ const newPrompt = currentPrompt ? `${currentPrompt}\n\n${systemPrompt}` : systemPrompt;
1147
+ return handler({
1148
+ ...request,
1149
+ systemPrompt: newPrompt
1150
+ });
1151
+ }
1152
+ return handler(request);
1153
+ }
1154
+ });
1155
+ }
1156
+
1157
+ //#endregion
1158
+ //#region src/middleware/patch_tool_calls.ts
1159
+ /**
1160
+ * Create middleware that patches dangling tool calls in the messages history.
1161
+ *
1162
+ * When an AI message contains tool_calls but subsequent messages don't include
1163
+ * the corresponding ToolMessage responses, this middleware adds synthetic
1164
+ * ToolMessages saying the tool call was cancelled.
1165
+ *
1166
+ * @returns AgentMiddleware that patches dangling tool calls
1167
+ *
1168
+ * @example
1169
+ * ```typescript
1170
+ * import { createAgent } from "langchain";
1171
+ * import { createPatchToolCallsMiddleware } from "./middleware/patch_tool_calls";
1172
+ *
1173
+ * const agent = createAgent({
1174
+ * model: "claude-sonnet-4-5-20250929",
1175
+ * middleware: [createPatchToolCallsMiddleware()],
1176
+ * });
1177
+ * ```
1178
+ */
1179
+ function createPatchToolCallsMiddleware() {
1180
+ return (0, langchain.createMiddleware)({
1181
+ name: "patchToolCallsMiddleware",
1182
+ beforeAgent: async (state) => {
1183
+ const messages = state.messages;
1184
+ if (!messages || messages.length === 0) return;
1185
+ const patchedMessages = [];
1186
+ for (let i = 0; i < messages.length; i++) {
1187
+ const msg = messages[i];
1188
+ patchedMessages.push(msg);
1189
+ if (langchain.AIMessage.isInstance(msg) && msg.tool_calls != null) {
1190
+ for (const toolCall of msg.tool_calls) if (!messages.slice(i).find((m) => langchain.ToolMessage.isInstance(m) && m.tool_call_id === toolCall.id)) {
1191
+ const toolMsg = `Tool call ${toolCall.name} with id ${toolCall.id} was cancelled - another message came in before it could be completed.`;
1192
+ patchedMessages.push(new langchain.ToolMessage({
1193
+ content: toolMsg,
1194
+ name: toolCall.name,
1195
+ tool_call_id: toolCall.id
1196
+ }));
1197
+ }
1198
+ }
1199
+ }
1200
+ return { messages: [new _langchain_core_messages.RemoveMessage({ id: _langchain_langgraph.REMOVE_ALL_MESSAGES }), ...patchedMessages] };
1201
+ }
1202
+ });
1203
+ }
1204
+
1205
+ //#endregion
1206
+ //#region src/backends/store.ts
1207
+ /**
1208
+ * Backend that stores files in LangGraph's BaseStore (persistent).
1209
+ *
1210
+ * Uses LangGraph's Store for persistent, cross-conversation storage.
1211
+ * Files are organized via namespaces and persist across all threads.
1212
+ *
1213
+ * The namespace can include an optional assistant_id for multi-agent isolation.
1214
+ */
1215
+ var StoreBackend = class {
1216
+ stateAndStore;
1217
+ constructor(stateAndStore) {
1218
+ this.stateAndStore = stateAndStore;
1219
+ }
1220
+ /**
1221
+ * Get the store instance.
1222
+ *
1223
+ * @returns BaseStore instance
1224
+ * @throws Error if no store is available
1225
+ */
1226
+ getStore() {
1227
+ const store = this.stateAndStore.store;
1228
+ if (!store) throw new Error("Store is required but not available in StateAndStore");
1229
+ return store;
1230
+ }
1231
+ /**
1232
+ * Get the namespace for store operations.
1233
+ *
1234
+ * If an assistant_id is available in stateAndStore, return
1235
+ * [assistant_id, "filesystem"] to provide per-assistant isolation.
1236
+ * Otherwise return ["filesystem"].
1237
+ */
1238
+ getNamespace() {
1239
+ const namespace = "filesystem";
1240
+ const assistantId = this.stateAndStore.assistantId;
1241
+ if (assistantId) return [assistantId, namespace];
1242
+ return [namespace];
1243
+ }
1244
+ /**
1245
+ * Convert a store Item to FileData format.
1246
+ *
1247
+ * @param storeItem - The store Item containing file data
1248
+ * @returns FileData object
1249
+ * @throws Error if required fields are missing or have incorrect types
1250
+ */
1251
+ convertStoreItemToFileData(storeItem) {
1252
+ const value = storeItem.value;
1253
+ if (!value.content || !Array.isArray(value.content) || typeof value.created_at !== "string" || typeof value.modified_at !== "string") throw new Error(`Store item does not contain valid FileData fields. Got keys: ${Object.keys(value).join(", ")}`);
1254
+ return {
1255
+ content: value.content,
1256
+ created_at: value.created_at,
1257
+ modified_at: value.modified_at
1258
+ };
1259
+ }
1260
+ /**
1261
+ * Convert FileData to a value suitable for store.put().
1262
+ *
1263
+ * @param fileData - The FileData to convert
1264
+ * @returns Object with content, created_at, and modified_at fields
1265
+ */
1266
+ convertFileDataToStoreValue(fileData) {
1267
+ return {
1268
+ content: fileData.content,
1269
+ created_at: fileData.created_at,
1270
+ modified_at: fileData.modified_at
1271
+ };
1272
+ }
1273
+ /**
1274
+ * Search store with automatic pagination to retrieve all results.
1275
+ *
1276
+ * @param store - The store to search
1277
+ * @param namespace - Hierarchical path prefix to search within
1278
+ * @param options - Optional query, filter, and page_size
1279
+ * @returns List of all items matching the search criteria
1280
+ */
1281
+ async searchStorePaginated(store, namespace, options = {}) {
1282
+ const { query, filter, pageSize = 100 } = options;
1283
+ const allItems = [];
1284
+ let offset = 0;
1285
+ while (true) {
1286
+ const pageItems = await store.search(namespace, {
1287
+ query,
1288
+ filter,
1289
+ limit: pageSize,
1290
+ offset
1291
+ });
1292
+ if (!pageItems || pageItems.length === 0) break;
1293
+ allItems.push(...pageItems);
1294
+ if (pageItems.length < pageSize) break;
1295
+ offset += pageSize;
1296
+ }
1297
+ return allItems;
1298
+ }
1299
+ /**
1300
+ * List files and directories in the specified directory (non-recursive).
1301
+ *
1302
+ * @param path - Absolute path to directory
1303
+ * @returns List of FileInfo objects for files and directories directly in the directory.
1304
+ * Directories have a trailing / in their path and is_dir=true.
1305
+ */
1306
+ async lsInfo(path$4) {
1307
+ const store = this.getStore();
1308
+ const namespace = this.getNamespace();
1309
+ const items = await this.searchStorePaginated(store, namespace);
1310
+ const infos = [];
1311
+ const subdirs = /* @__PURE__ */ new Set();
1312
+ const normalizedPath = path$4.endsWith("/") ? path$4 : path$4 + "/";
1313
+ for (const item of items) {
1314
+ const itemKey = String(item.key);
1315
+ if (!itemKey.startsWith(normalizedPath)) continue;
1316
+ const relative = itemKey.substring(normalizedPath.length);
1317
+ if (relative.includes("/")) {
1318
+ const subdirName = relative.split("/")[0];
1319
+ subdirs.add(normalizedPath + subdirName + "/");
1320
+ continue;
1321
+ }
1322
+ try {
1323
+ const fd = this.convertStoreItemToFileData(item);
1324
+ const size = fd.content.join("\n").length;
1325
+ infos.push({
1326
+ path: itemKey,
1327
+ is_dir: false,
1328
+ size,
1329
+ modified_at: fd.modified_at
1330
+ });
1331
+ } catch {
1332
+ continue;
1333
+ }
1334
+ }
1335
+ for (const subdir of Array.from(subdirs).sort()) infos.push({
1336
+ path: subdir,
1337
+ is_dir: true,
1338
+ size: 0,
1339
+ modified_at: ""
1340
+ });
1341
+ infos.sort((a, b) => a.path.localeCompare(b.path));
1342
+ return infos;
1343
+ }
1344
+ /**
1345
+ * Read file content with line numbers.
1346
+ *
1347
+ * @param filePath - Absolute file path
1348
+ * @param offset - Line offset to start reading from (0-indexed)
1349
+ * @param limit - Maximum number of lines to read
1350
+ * @returns Formatted file content with line numbers, or error message
1351
+ */
1352
+ async read(filePath, offset = 0, limit = 2e3) {
1353
+ try {
1354
+ return formatReadResponse(await this.readRaw(filePath), offset, limit);
1355
+ } catch (e) {
1356
+ return `Error: ${e.message}`;
1357
+ }
1358
+ }
1359
+ /**
1360
+ * Read file content as raw FileData.
1361
+ *
1362
+ * @param filePath - Absolute file path
1363
+ * @returns Raw file content as FileData
1364
+ */
1365
+ async readRaw(filePath) {
1366
+ const store = this.getStore();
1367
+ const namespace = this.getNamespace();
1368
+ const item = await store.get(namespace, filePath);
1369
+ if (!item) throw new Error(`File '${filePath}' not found`);
1370
+ return this.convertStoreItemToFileData(item);
1371
+ }
1372
+ /**
1373
+ * Create a new file with content.
1374
+ * Returns WriteResult. External storage sets filesUpdate=null.
1375
+ */
1376
+ async write(filePath, content) {
1377
+ const store = this.getStore();
1378
+ const namespace = this.getNamespace();
1379
+ if (await store.get(namespace, filePath)) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
1380
+ const fileData = createFileData(content);
1381
+ const storeValue = this.convertFileDataToStoreValue(fileData);
1382
+ await store.put(namespace, filePath, storeValue);
1383
+ return {
1384
+ path: filePath,
1385
+ filesUpdate: null
1386
+ };
1387
+ }
1388
+ /**
1389
+ * Edit a file by replacing string occurrences.
1390
+ * Returns EditResult. External storage sets filesUpdate=null.
1391
+ */
1392
+ async edit(filePath, oldString, newString, replaceAll = false) {
1393
+ const store = this.getStore();
1394
+ const namespace = this.getNamespace();
1395
+ const item = await store.get(namespace, filePath);
1396
+ if (!item) return { error: `Error: File '${filePath}' not found` };
1397
+ try {
1398
+ const fileData = this.convertStoreItemToFileData(item);
1399
+ const result = performStringReplacement(fileDataToString(fileData), oldString, newString, replaceAll);
1400
+ if (typeof result === "string") return { error: result };
1401
+ const [newContent, occurrences] = result;
1402
+ const newFileData = updateFileData(fileData, newContent);
1403
+ const storeValue = this.convertFileDataToStoreValue(newFileData);
1404
+ await store.put(namespace, filePath, storeValue);
1405
+ return {
1406
+ path: filePath,
1407
+ filesUpdate: null,
1408
+ occurrences
1409
+ };
1410
+ } catch (e) {
1411
+ return { error: `Error: ${e.message}` };
1412
+ }
1413
+ }
1414
+ /**
1415
+ * Structured search results or error string for invalid input.
1416
+ */
1417
+ async grepRaw(pattern, path$4 = "/", glob = null) {
1418
+ const store = this.getStore();
1419
+ const namespace = this.getNamespace();
1420
+ const items = await this.searchStorePaginated(store, namespace);
1421
+ const files = {};
1422
+ for (const item of items) try {
1423
+ files[item.key] = this.convertStoreItemToFileData(item);
1424
+ } catch {
1425
+ continue;
1426
+ }
1427
+ return grepMatchesFromFiles(files, pattern, path$4, glob);
1428
+ }
1429
+ /**
1430
+ * Structured glob matching returning FileInfo objects.
1431
+ */
1432
+ async globInfo(pattern, path$4 = "/") {
1433
+ const store = this.getStore();
1434
+ const namespace = this.getNamespace();
1435
+ const items = await this.searchStorePaginated(store, namespace);
1436
+ const files = {};
1437
+ for (const item of items) try {
1438
+ files[item.key] = this.convertStoreItemToFileData(item);
1439
+ } catch {
1440
+ continue;
1441
+ }
1442
+ const result = globSearchFiles(files, pattern, path$4);
1443
+ if (result === "No files found") return [];
1444
+ const paths = result.split("\n");
1445
+ const infos = [];
1446
+ for (const p of paths) {
1447
+ const fd = files[p];
1448
+ const size = fd ? fd.content.join("\n").length : 0;
1449
+ infos.push({
1450
+ path: p,
1451
+ is_dir: false,
1452
+ size,
1453
+ modified_at: fd?.modified_at || ""
1454
+ });
1455
+ }
1456
+ return infos;
1457
+ }
1458
+ /**
1459
+ * Upload multiple files.
1460
+ *
1461
+ * @param files - List of [path, content] tuples to upload
1462
+ * @returns List of FileUploadResponse objects, one per input file
1463
+ */
1464
+ async uploadFiles(files) {
1465
+ const store = this.getStore();
1466
+ const namespace = this.getNamespace();
1467
+ const responses = [];
1468
+ for (const [path$4, content] of files) try {
1469
+ const fileData = createFileData(new TextDecoder().decode(content));
1470
+ const storeValue = this.convertFileDataToStoreValue(fileData);
1471
+ await store.put(namespace, path$4, storeValue);
1472
+ responses.push({
1473
+ path: path$4,
1474
+ error: null
1475
+ });
1476
+ } catch {
1477
+ responses.push({
1478
+ path: path$4,
1479
+ error: "invalid_path"
1480
+ });
1481
+ }
1482
+ return responses;
1483
+ }
1484
+ /**
1485
+ * Download multiple files.
1486
+ *
1487
+ * @param paths - List of file paths to download
1488
+ * @returns List of FileDownloadResponse objects, one per input path
1489
+ */
1490
+ async downloadFiles(paths) {
1491
+ const store = this.getStore();
1492
+ const namespace = this.getNamespace();
1493
+ const responses = [];
1494
+ for (const path$4 of paths) try {
1495
+ const item = await store.get(namespace, path$4);
1496
+ if (!item) {
1497
+ responses.push({
1498
+ path: path$4,
1499
+ content: null,
1500
+ error: "file_not_found"
1501
+ });
1502
+ continue;
1503
+ }
1504
+ const contentStr = fileDataToString(this.convertStoreItemToFileData(item));
1505
+ const content = new TextEncoder().encode(contentStr);
1506
+ responses.push({
1507
+ path: path$4,
1508
+ content,
1509
+ error: null
1510
+ });
1511
+ } catch {
1512
+ responses.push({
1513
+ path: path$4,
1514
+ content: null,
1515
+ error: "file_not_found"
1516
+ });
1517
+ }
1518
+ return responses;
1519
+ }
1520
+ };
1521
+
1522
+ //#endregion
1523
+ //#region src/backends/filesystem.ts
1524
+ /**
1525
+ * FilesystemBackend: Read and write files directly from the filesystem.
1526
+ *
1527
+ * Security and search upgrades:
1528
+ * - Secure path resolution with root containment when in virtual_mode (sandboxed to cwd)
1529
+ * - Prevent symlink-following on file I/O using O_NOFOLLOW when available
1530
+ * - Ripgrep-powered grep with JSON parsing, plus regex fallback
1531
+ * and optional glob include filtering, while preserving virtual path behavior
1532
+ */
1533
+ const SUPPORTS_NOFOLLOW = node_fs.default.constants.O_NOFOLLOW !== void 0;
1534
+ /**
1535
+ * Backend that reads and writes files directly from the filesystem.
1536
+ *
1537
+ * Files are accessed using their actual filesystem paths. Relative paths are
1538
+ * resolved relative to the current working directory. Content is read/written
1539
+ * as plain text, and metadata (timestamps) are derived from filesystem stats.
1540
+ */
1541
+ var FilesystemBackend = class {
1542
+ cwd;
1543
+ virtualMode;
1544
+ maxFileSizeBytes;
1545
+ constructor(options = {}) {
1546
+ const { rootDir, virtualMode = false, maxFileSizeMb = 10 } = options;
1547
+ this.cwd = rootDir ? node_path.default.resolve(rootDir) : process.cwd();
1548
+ this.virtualMode = virtualMode;
1549
+ this.maxFileSizeBytes = maxFileSizeMb * 1024 * 1024;
1550
+ }
1551
+ /**
1552
+ * Resolve a file path with security checks.
1553
+ *
1554
+ * When virtualMode=true, treat incoming paths as virtual absolute paths under
1555
+ * this.cwd, disallow traversal (.., ~) and ensure resolved path stays within root.
1556
+ * When virtualMode=false, preserve legacy behavior: absolute paths are allowed
1557
+ * as-is; relative paths resolve under cwd.
1558
+ *
1559
+ * @param key - File path (absolute, relative, or virtual when virtualMode=true)
1560
+ * @returns Resolved absolute path string
1561
+ * @throws Error if path traversal detected or path outside root
1562
+ */
1563
+ resolvePath(key) {
1564
+ if (this.virtualMode) {
1565
+ const vpath = key.startsWith("/") ? key : "/" + key;
1566
+ if (vpath.includes("..") || vpath.startsWith("~")) throw new Error("Path traversal not allowed");
1567
+ const full = node_path.default.resolve(this.cwd, vpath.substring(1));
1568
+ const relative = node_path.default.relative(this.cwd, full);
1569
+ if (relative.startsWith("..") || node_path.default.isAbsolute(relative)) throw new Error(`Path: ${full} outside root directory: ${this.cwd}`);
1570
+ return full;
1571
+ }
1572
+ if (node_path.default.isAbsolute(key)) return key;
1573
+ return node_path.default.resolve(this.cwd, key);
1574
+ }
1575
+ /**
1576
+ * List files and directories in the specified directory (non-recursive).
1577
+ *
1578
+ * @param dirPath - Absolute directory path to list files from
1579
+ * @returns List of FileInfo objects for files and directories directly in the directory.
1580
+ * Directories have a trailing / in their path and is_dir=true.
1581
+ */
1582
+ async lsInfo(dirPath) {
1583
+ try {
1584
+ const resolvedPath = this.resolvePath(dirPath);
1585
+ if (!(await node_fs_promises.default.stat(resolvedPath)).isDirectory()) return [];
1586
+ const entries = await node_fs_promises.default.readdir(resolvedPath, { withFileTypes: true });
1587
+ const results = [];
1588
+ const cwdStr = this.cwd.endsWith(node_path.default.sep) ? this.cwd : this.cwd + node_path.default.sep;
1589
+ for (const entry of entries) {
1590
+ const fullPath = node_path.default.join(resolvedPath, entry.name);
1591
+ try {
1592
+ const entryStat = await node_fs_promises.default.stat(fullPath);
1593
+ const isFile = entryStat.isFile();
1594
+ const isDir = entryStat.isDirectory();
1595
+ if (!this.virtualMode) {
1596
+ if (isFile) results.push({
1597
+ path: fullPath,
1598
+ is_dir: false,
1599
+ size: entryStat.size,
1600
+ modified_at: entryStat.mtime.toISOString()
1601
+ });
1602
+ else if (isDir) results.push({
1603
+ path: fullPath + node_path.default.sep,
1604
+ is_dir: true,
1605
+ size: 0,
1606
+ modified_at: entryStat.mtime.toISOString()
1607
+ });
1608
+ } else {
1609
+ let relativePath;
1610
+ if (fullPath.startsWith(cwdStr)) relativePath = fullPath.substring(cwdStr.length);
1611
+ else if (fullPath.startsWith(this.cwd)) relativePath = fullPath.substring(this.cwd.length).replace(/^[/\\]/, "");
1612
+ else relativePath = fullPath;
1613
+ relativePath = relativePath.split(node_path.default.sep).join("/");
1614
+ const virtPath = "/" + relativePath;
1615
+ if (isFile) results.push({
1616
+ path: virtPath,
1617
+ is_dir: false,
1618
+ size: entryStat.size,
1619
+ modified_at: entryStat.mtime.toISOString()
1620
+ });
1621
+ else if (isDir) results.push({
1622
+ path: virtPath + "/",
1623
+ is_dir: true,
1624
+ size: 0,
1625
+ modified_at: entryStat.mtime.toISOString()
1626
+ });
1627
+ }
1628
+ } catch {
1629
+ continue;
1630
+ }
1631
+ }
1632
+ results.sort((a, b) => a.path.localeCompare(b.path));
1633
+ return results;
1634
+ } catch {
1635
+ return [];
1636
+ }
1637
+ }
1638
+ /**
1639
+ * Read file content with line numbers.
1640
+ *
1641
+ * @param filePath - Absolute or relative file path
1642
+ * @param offset - Line offset to start reading from (0-indexed)
1643
+ * @param limit - Maximum number of lines to read
1644
+ * @returns Formatted file content with line numbers, or error message
1645
+ */
1646
+ async read(filePath, offset = 0, limit = 2e3) {
1647
+ try {
1648
+ const resolvedPath = this.resolvePath(filePath);
1649
+ let content;
1650
+ if (SUPPORTS_NOFOLLOW) {
1651
+ if (!(await node_fs_promises.default.stat(resolvedPath)).isFile()) return `Error: File '${filePath}' not found`;
1652
+ const fd = await node_fs_promises.default.open(resolvedPath, node_fs.default.constants.O_RDONLY | node_fs.default.constants.O_NOFOLLOW);
1653
+ try {
1654
+ content = await fd.readFile({ encoding: "utf-8" });
1655
+ } finally {
1656
+ await fd.close();
1657
+ }
1658
+ } else {
1659
+ const stat = await node_fs_promises.default.lstat(resolvedPath);
1660
+ if (stat.isSymbolicLink()) return `Error: Symlinks are not allowed: ${filePath}`;
1661
+ if (!stat.isFile()) return `Error: File '${filePath}' not found`;
1662
+ content = await node_fs_promises.default.readFile(resolvedPath, "utf-8");
1663
+ }
1664
+ const emptyMsg = checkEmptyContent(content);
1665
+ if (emptyMsg) return emptyMsg;
1666
+ const lines = content.split("\n");
1667
+ const startIdx = offset;
1668
+ const endIdx = Math.min(startIdx + limit, lines.length);
1669
+ if (startIdx >= lines.length) return `Error: Line offset ${offset} exceeds file length (${lines.length} lines)`;
1670
+ return formatContentWithLineNumbers(lines.slice(startIdx, endIdx), startIdx + 1);
1671
+ } catch (e) {
1672
+ return `Error reading file '${filePath}': ${e.message}`;
1673
+ }
1674
+ }
1675
+ /**
1676
+ * Read file content as raw FileData.
1677
+ *
1678
+ * @param filePath - Absolute file path
1679
+ * @returns Raw file content as FileData
1680
+ */
1681
+ async readRaw(filePath) {
1682
+ const resolvedPath = this.resolvePath(filePath);
1683
+ let content;
1684
+ let stat;
1685
+ if (SUPPORTS_NOFOLLOW) {
1686
+ stat = await node_fs_promises.default.stat(resolvedPath);
1687
+ if (!stat.isFile()) throw new Error(`File '${filePath}' not found`);
1688
+ const fd = await node_fs_promises.default.open(resolvedPath, node_fs.default.constants.O_RDONLY | node_fs.default.constants.O_NOFOLLOW);
1689
+ try {
1690
+ content = await fd.readFile({ encoding: "utf-8" });
1691
+ } finally {
1692
+ await fd.close();
1693
+ }
1694
+ } else {
1695
+ stat = await node_fs_promises.default.lstat(resolvedPath);
1696
+ if (stat.isSymbolicLink()) throw new Error(`Symlinks are not allowed: ${filePath}`);
1697
+ if (!stat.isFile()) throw new Error(`File '${filePath}' not found`);
1698
+ content = await node_fs_promises.default.readFile(resolvedPath, "utf-8");
1699
+ }
1700
+ return {
1701
+ content: content.split("\n"),
1702
+ created_at: stat.ctime.toISOString(),
1703
+ modified_at: stat.mtime.toISOString()
1704
+ };
1705
+ }
1706
+ /**
1707
+ * Create a new file with content.
1708
+ * Returns WriteResult. External storage sets filesUpdate=null.
1709
+ */
1710
+ async write(filePath, content) {
1711
+ try {
1712
+ const resolvedPath = this.resolvePath(filePath);
1713
+ try {
1714
+ if ((await node_fs_promises.default.lstat(resolvedPath)).isSymbolicLink()) return { error: `Cannot write to ${filePath} because it is a symlink. Symlinks are not allowed.` };
1715
+ return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
1716
+ } catch {}
1717
+ await node_fs_promises.default.mkdir(node_path.default.dirname(resolvedPath), { recursive: true });
1718
+ if (SUPPORTS_NOFOLLOW) {
1719
+ const flags = node_fs.default.constants.O_WRONLY | node_fs.default.constants.O_CREAT | node_fs.default.constants.O_TRUNC | node_fs.default.constants.O_NOFOLLOW;
1720
+ const fd = await node_fs_promises.default.open(resolvedPath, flags, 420);
1721
+ try {
1722
+ await fd.writeFile(content, "utf-8");
1723
+ } finally {
1724
+ await fd.close();
1725
+ }
1726
+ } else await node_fs_promises.default.writeFile(resolvedPath, content, "utf-8");
1727
+ return {
1728
+ path: filePath,
1729
+ filesUpdate: null
1730
+ };
1731
+ } catch (e) {
1732
+ return { error: `Error writing file '${filePath}': ${e.message}` };
1733
+ }
1734
+ }
1735
+ /**
1736
+ * Edit a file by replacing string occurrences.
1737
+ * Returns EditResult. External storage sets filesUpdate=null.
1738
+ */
1739
+ async edit(filePath, oldString, newString, replaceAll = false) {
1740
+ try {
1741
+ const resolvedPath = this.resolvePath(filePath);
1742
+ let content;
1743
+ if (SUPPORTS_NOFOLLOW) {
1744
+ if (!(await node_fs_promises.default.stat(resolvedPath)).isFile()) return { error: `Error: File '${filePath}' not found` };
1745
+ const fd = await node_fs_promises.default.open(resolvedPath, node_fs.default.constants.O_RDONLY | node_fs.default.constants.O_NOFOLLOW);
1746
+ try {
1747
+ content = await fd.readFile({ encoding: "utf-8" });
1748
+ } finally {
1749
+ await fd.close();
1750
+ }
1751
+ } else {
1752
+ const stat = await node_fs_promises.default.lstat(resolvedPath);
1753
+ if (stat.isSymbolicLink()) return { error: `Error: Symlinks are not allowed: ${filePath}` };
1754
+ if (!stat.isFile()) return { error: `Error: File '${filePath}' not found` };
1755
+ content = await node_fs_promises.default.readFile(resolvedPath, "utf-8");
1756
+ }
1757
+ const result = performStringReplacement(content, oldString, newString, replaceAll);
1758
+ if (typeof result === "string") return { error: result };
1759
+ const [newContent, occurrences] = result;
1760
+ if (SUPPORTS_NOFOLLOW) {
1761
+ const flags = node_fs.default.constants.O_WRONLY | node_fs.default.constants.O_TRUNC | node_fs.default.constants.O_NOFOLLOW;
1762
+ const fd = await node_fs_promises.default.open(resolvedPath, flags);
1763
+ try {
1764
+ await fd.writeFile(newContent, "utf-8");
1765
+ } finally {
1766
+ await fd.close();
1767
+ }
1768
+ } else await node_fs_promises.default.writeFile(resolvedPath, newContent, "utf-8");
1769
+ return {
1770
+ path: filePath,
1771
+ filesUpdate: null,
1772
+ occurrences
1773
+ };
1774
+ } catch (e) {
1775
+ return { error: `Error editing file '${filePath}': ${e.message}` };
1776
+ }
1777
+ }
1778
+ /**
1779
+ * Structured search results or error string for invalid input.
1780
+ */
1781
+ async grepRaw(pattern, dirPath = "/", glob = null) {
1782
+ try {
1783
+ new RegExp(pattern);
1784
+ } catch (e) {
1785
+ return `Invalid regex pattern: ${e.message}`;
1786
+ }
1787
+ let baseFull;
1788
+ try {
1789
+ baseFull = this.resolvePath(dirPath || ".");
1790
+ } catch {
1791
+ return [];
1792
+ }
1793
+ try {
1794
+ await node_fs_promises.default.stat(baseFull);
1795
+ } catch {
1796
+ return [];
1797
+ }
1798
+ let results = await this.ripgrepSearch(pattern, baseFull, glob);
1799
+ if (results === null) results = await this.pythonSearch(pattern, baseFull, glob);
1800
+ const matches = [];
1801
+ for (const [fpath, items] of Object.entries(results)) for (const [lineNum, lineText] of items) matches.push({
1802
+ path: fpath,
1803
+ line: lineNum,
1804
+ text: lineText
1805
+ });
1806
+ return matches;
1807
+ }
1808
+ /**
1809
+ * Try to use ripgrep for fast searching.
1810
+ * Returns null if ripgrep is not available or fails.
1811
+ */
1812
+ async ripgrepSearch(pattern, baseFull, includeGlob) {
1813
+ return new Promise((resolve) => {
1814
+ const args = ["--json"];
1815
+ if (includeGlob) args.push("--glob", includeGlob);
1816
+ args.push("--", pattern, baseFull);
1817
+ const proc = (0, node_child_process.spawn)("rg", args, { timeout: 3e4 });
1818
+ const results = {};
1819
+ let output = "";
1820
+ proc.stdout.on("data", (data) => {
1821
+ output += data.toString();
1822
+ });
1823
+ proc.on("close", (code) => {
1824
+ if (code !== 0 && code !== 1) {
1825
+ resolve(null);
1826
+ return;
1827
+ }
1828
+ for (const line of output.split("\n")) {
1829
+ if (!line.trim()) continue;
1830
+ try {
1831
+ const data = JSON.parse(line);
1832
+ if (data.type !== "match") continue;
1833
+ const pdata = data.data || {};
1834
+ const ftext = pdata.path?.text;
1835
+ if (!ftext) continue;
1836
+ let virtPath;
1837
+ if (this.virtualMode) try {
1838
+ const resolved = node_path.default.resolve(ftext);
1839
+ const relative = node_path.default.relative(this.cwd, resolved);
1840
+ if (relative.startsWith("..")) continue;
1841
+ virtPath = "/" + relative.split(node_path.default.sep).join("/");
1842
+ } catch {
1843
+ continue;
1844
+ }
1845
+ else virtPath = ftext;
1846
+ const ln = pdata.line_number;
1847
+ const lt = pdata.lines?.text?.replace(/\n$/, "") || "";
1848
+ if (ln === void 0) continue;
1849
+ if (!results[virtPath]) results[virtPath] = [];
1850
+ results[virtPath].push([ln, lt]);
1851
+ } catch {
1852
+ continue;
1853
+ }
1854
+ }
1855
+ resolve(results);
1856
+ });
1857
+ proc.on("error", () => {
1858
+ resolve(null);
1859
+ });
1860
+ });
1861
+ }
1862
+ /**
1863
+ * Fallback regex search implementation.
1864
+ */
1865
+ async pythonSearch(pattern, baseFull, includeGlob) {
1866
+ let regex;
1867
+ try {
1868
+ regex = new RegExp(pattern);
1869
+ } catch {
1870
+ return {};
1871
+ }
1872
+ const results = {};
1873
+ const files = await (0, fast_glob.default)("**/*", {
1874
+ cwd: (await node_fs_promises.default.stat(baseFull)).isDirectory() ? baseFull : node_path.default.dirname(baseFull),
1875
+ absolute: true,
1876
+ onlyFiles: true,
1877
+ dot: true
1878
+ });
1879
+ for (const fp of files) try {
1880
+ if (includeGlob && !micromatch.default.isMatch(node_path.default.basename(fp), includeGlob)) continue;
1881
+ if ((await node_fs_promises.default.stat(fp)).size > this.maxFileSizeBytes) continue;
1882
+ const lines = (await node_fs_promises.default.readFile(fp, "utf-8")).split("\n");
1883
+ for (let i = 0; i < lines.length; i++) {
1884
+ const line = lines[i];
1885
+ if (regex.test(line)) {
1886
+ let virtPath;
1887
+ if (this.virtualMode) try {
1888
+ const relative = node_path.default.relative(this.cwd, fp);
1889
+ if (relative.startsWith("..")) continue;
1890
+ virtPath = "/" + relative.split(node_path.default.sep).join("/");
1891
+ } catch {
1892
+ continue;
1893
+ }
1894
+ else virtPath = fp;
1895
+ if (!results[virtPath]) results[virtPath] = [];
1896
+ results[virtPath].push([i + 1, line]);
1897
+ }
1898
+ }
1899
+ } catch {
1900
+ continue;
1901
+ }
1902
+ return results;
1903
+ }
1904
+ /**
1905
+ * Structured glob matching returning FileInfo objects.
1906
+ */
1907
+ async globInfo(pattern, searchPath = "/") {
1908
+ if (pattern.startsWith("/")) pattern = pattern.substring(1);
1909
+ const resolvedSearchPath = searchPath === "/" ? this.cwd : this.resolvePath(searchPath);
1910
+ try {
1911
+ if (!(await node_fs_promises.default.stat(resolvedSearchPath)).isDirectory()) return [];
1912
+ } catch {
1913
+ return [];
1914
+ }
1915
+ const results = [];
1916
+ try {
1917
+ const matches = await (0, fast_glob.default)(pattern, {
1918
+ cwd: resolvedSearchPath,
1919
+ absolute: true,
1920
+ onlyFiles: true,
1921
+ dot: true
1922
+ });
1923
+ for (const matchedPath of matches) try {
1924
+ const stat = await node_fs_promises.default.stat(matchedPath);
1925
+ if (!stat.isFile()) continue;
1926
+ const normalizedPath = matchedPath.split("/").join(node_path.default.sep);
1927
+ if (!this.virtualMode) results.push({
1928
+ path: normalizedPath,
1929
+ is_dir: false,
1930
+ size: stat.size,
1931
+ modified_at: stat.mtime.toISOString()
1932
+ });
1933
+ else {
1934
+ const cwdStr = this.cwd.endsWith(node_path.default.sep) ? this.cwd : this.cwd + node_path.default.sep;
1935
+ let relativePath;
1936
+ if (normalizedPath.startsWith(cwdStr)) relativePath = normalizedPath.substring(cwdStr.length);
1937
+ else if (normalizedPath.startsWith(this.cwd)) relativePath = normalizedPath.substring(this.cwd.length).replace(/^[/\\]/, "");
1938
+ else relativePath = normalizedPath;
1939
+ relativePath = relativePath.split(node_path.default.sep).join("/");
1940
+ const virt = "/" + relativePath;
1941
+ results.push({
1942
+ path: virt,
1943
+ is_dir: false,
1944
+ size: stat.size,
1945
+ modified_at: stat.mtime.toISOString()
1946
+ });
1947
+ }
1948
+ } catch {
1949
+ continue;
1950
+ }
1951
+ } catch {}
1952
+ results.sort((a, b) => a.path.localeCompare(b.path));
1953
+ return results;
1954
+ }
1955
+ /**
1956
+ * Upload multiple files to the filesystem.
1957
+ *
1958
+ * @param files - List of [path, content] tuples to upload
1959
+ * @returns List of FileUploadResponse objects, one per input file
1960
+ */
1961
+ async uploadFiles(files) {
1962
+ const responses = [];
1963
+ for (const [filePath, content] of files) try {
1964
+ const resolvedPath = this.resolvePath(filePath);
1965
+ await node_fs_promises.default.mkdir(node_path.default.dirname(resolvedPath), { recursive: true });
1966
+ await node_fs_promises.default.writeFile(resolvedPath, content);
1967
+ responses.push({
1968
+ path: filePath,
1969
+ error: null
1970
+ });
1971
+ } catch (e) {
1972
+ if (e.code === "ENOENT") responses.push({
1973
+ path: filePath,
1974
+ error: "file_not_found"
1975
+ });
1976
+ else if (e.code === "EACCES") responses.push({
1977
+ path: filePath,
1978
+ error: "permission_denied"
1979
+ });
1980
+ else if (e.code === "EISDIR") responses.push({
1981
+ path: filePath,
1982
+ error: "is_directory"
1983
+ });
1984
+ else responses.push({
1985
+ path: filePath,
1986
+ error: "invalid_path"
1987
+ });
1988
+ }
1989
+ return responses;
1990
+ }
1991
+ /**
1992
+ * Download multiple files from the filesystem.
1993
+ *
1994
+ * @param paths - List of file paths to download
1995
+ * @returns List of FileDownloadResponse objects, one per input path
1996
+ */
1997
+ async downloadFiles(paths) {
1998
+ const responses = [];
1999
+ for (const filePath of paths) try {
2000
+ const resolvedPath = this.resolvePath(filePath);
2001
+ const content = await node_fs_promises.default.readFile(resolvedPath);
2002
+ responses.push({
2003
+ path: filePath,
2004
+ content,
2005
+ error: null
2006
+ });
2007
+ } catch (e) {
2008
+ if (e.code === "ENOENT") responses.push({
2009
+ path: filePath,
2010
+ content: null,
2011
+ error: "file_not_found"
2012
+ });
2013
+ else if (e.code === "EACCES") responses.push({
2014
+ path: filePath,
2015
+ content: null,
2016
+ error: "permission_denied"
2017
+ });
2018
+ else if (e.code === "EISDIR") responses.push({
2019
+ path: filePath,
2020
+ content: null,
2021
+ error: "is_directory"
2022
+ });
2023
+ else responses.push({
2024
+ path: filePath,
2025
+ content: null,
2026
+ error: "invalid_path"
2027
+ });
2028
+ }
2029
+ return responses;
2030
+ }
2031
+ };
2032
+
2033
+ //#endregion
2034
+ //#region src/backends/composite.ts
2035
+ /**
2036
+ * Backend that routes file operations to different backends based on path prefix.
2037
+ *
2038
+ * This enables hybrid storage strategies like:
2039
+ * - `/memories/` → StoreBackend (persistent, cross-thread)
2040
+ * - Everything else → StateBackend (ephemeral, per-thread)
2041
+ *
2042
+ * The CompositeBackend handles path prefix stripping/re-adding transparently.
2043
+ */
2044
+ var CompositeBackend = class {
2045
+ default;
2046
+ routes;
2047
+ sortedRoutes;
2048
+ constructor(defaultBackend, routes) {
2049
+ this.default = defaultBackend;
2050
+ this.routes = routes;
2051
+ this.sortedRoutes = Object.entries(routes).sort((a, b) => b[0].length - a[0].length);
2052
+ }
2053
+ /**
2054
+ * Determine which backend handles this key and strip prefix.
2055
+ *
2056
+ * @param key - Original file path
2057
+ * @returns Tuple of [backend, stripped_key] where stripped_key has the route
2058
+ * prefix removed (but keeps leading slash).
2059
+ */
2060
+ getBackendAndKey(key) {
2061
+ for (const [prefix, backend] of this.sortedRoutes) if (key.startsWith(prefix)) {
2062
+ const suffix = key.substring(prefix.length);
2063
+ return [backend, suffix ? "/" + suffix : "/"];
2064
+ }
2065
+ return [this.default, key];
2066
+ }
2067
+ /**
2068
+ * List files and directories in the specified directory (non-recursive).
2069
+ *
2070
+ * @param path - Absolute path to directory
2071
+ * @returns List of FileInfo objects with route prefixes added, for files and directories
2072
+ * directly in the directory. Directories have a trailing / in their path and is_dir=true.
2073
+ */
2074
+ async lsInfo(path$4) {
2075
+ for (const [routePrefix, backend] of this.sortedRoutes) if (path$4.startsWith(routePrefix.replace(/\/$/, ""))) {
2076
+ const suffix = path$4.substring(routePrefix.length);
2077
+ const searchPath = suffix ? "/" + suffix : "/";
2078
+ const infos = await backend.lsInfo(searchPath);
2079
+ const prefixed = [];
2080
+ for (const fi of infos) prefixed.push({
2081
+ ...fi,
2082
+ path: routePrefix.slice(0, -1) + fi.path
2083
+ });
2084
+ return prefixed;
2085
+ }
2086
+ if (path$4 === "/") {
2087
+ const results = [];
2088
+ const defaultInfos = await this.default.lsInfo(path$4);
2089
+ results.push(...defaultInfos);
2090
+ for (const [routePrefix] of this.sortedRoutes) results.push({
2091
+ path: routePrefix,
2092
+ is_dir: true,
2093
+ size: 0,
2094
+ modified_at: ""
2095
+ });
2096
+ results.sort((a, b) => a.path.localeCompare(b.path));
2097
+ return results;
2098
+ }
2099
+ return await this.default.lsInfo(path$4);
2100
+ }
2101
+ /**
2102
+ * Read file content, routing to appropriate backend.
2103
+ *
2104
+ * @param filePath - Absolute file path
2105
+ * @param offset - Line offset to start reading from (0-indexed)
2106
+ * @param limit - Maximum number of lines to read
2107
+ * @returns Formatted file content with line numbers, or error message
2108
+ */
2109
+ async read(filePath, offset = 0, limit = 2e3) {
2110
+ const [backend, strippedKey] = this.getBackendAndKey(filePath);
2111
+ return await backend.read(strippedKey, offset, limit);
2112
+ }
2113
+ /**
2114
+ * Read file content as raw FileData.
2115
+ *
2116
+ * @param filePath - Absolute file path
2117
+ * @returns Raw file content as FileData
2118
+ */
2119
+ async readRaw(filePath) {
2120
+ const [backend, strippedKey] = this.getBackendAndKey(filePath);
2121
+ return await backend.readRaw(strippedKey);
2122
+ }
2123
+ /**
2124
+ * Structured search results or error string for invalid input.
2125
+ */
2126
+ async grepRaw(pattern, path$4 = "/", glob = null) {
2127
+ for (const [routePrefix, backend] of this.sortedRoutes) if (path$4.startsWith(routePrefix.replace(/\/$/, ""))) {
2128
+ const searchPath = path$4.substring(routePrefix.length - 1);
2129
+ const raw = await backend.grepRaw(pattern, searchPath || "/", glob);
2130
+ if (typeof raw === "string") return raw;
2131
+ return raw.map((m) => ({
2132
+ ...m,
2133
+ path: routePrefix.slice(0, -1) + m.path
2134
+ }));
2135
+ }
2136
+ const allMatches = [];
2137
+ const rawDefault = await this.default.grepRaw(pattern, path$4, glob);
2138
+ if (typeof rawDefault === "string") return rawDefault;
2139
+ allMatches.push(...rawDefault);
2140
+ for (const [routePrefix, backend] of Object.entries(this.routes)) {
2141
+ const raw = await backend.grepRaw(pattern, "/", glob);
2142
+ if (typeof raw === "string") return raw;
2143
+ allMatches.push(...raw.map((m) => ({
2144
+ ...m,
2145
+ path: routePrefix.slice(0, -1) + m.path
2146
+ })));
2147
+ }
2148
+ return allMatches;
2149
+ }
2150
+ /**
2151
+ * Structured glob matching returning FileInfo objects.
2152
+ */
2153
+ async globInfo(pattern, path$4 = "/") {
2154
+ const results = [];
2155
+ for (const [routePrefix, backend] of this.sortedRoutes) if (path$4.startsWith(routePrefix.replace(/\/$/, ""))) {
2156
+ const searchPath = path$4.substring(routePrefix.length - 1);
2157
+ return (await backend.globInfo(pattern, searchPath || "/")).map((fi) => ({
2158
+ ...fi,
2159
+ path: routePrefix.slice(0, -1) + fi.path
2160
+ }));
2161
+ }
2162
+ const defaultInfos = await this.default.globInfo(pattern, path$4);
2163
+ results.push(...defaultInfos);
2164
+ for (const [routePrefix, backend] of Object.entries(this.routes)) {
2165
+ const infos = await backend.globInfo(pattern, "/");
2166
+ results.push(...infos.map((fi) => ({
2167
+ ...fi,
2168
+ path: routePrefix.slice(0, -1) + fi.path
2169
+ })));
2170
+ }
2171
+ results.sort((a, b) => a.path.localeCompare(b.path));
2172
+ return results;
2173
+ }
2174
+ /**
2175
+ * Create a new file, routing to appropriate backend.
2176
+ *
2177
+ * @param filePath - Absolute file path
2178
+ * @param content - File content as string
2179
+ * @returns WriteResult with path or error
2180
+ */
2181
+ async write(filePath, content) {
2182
+ const [backend, strippedKey] = this.getBackendAndKey(filePath);
2183
+ return await backend.write(strippedKey, content);
2184
+ }
2185
+ /**
2186
+ * Edit a file, routing to appropriate backend.
2187
+ *
2188
+ * @param filePath - Absolute file path
2189
+ * @param oldString - String to find and replace
2190
+ * @param newString - Replacement string
2191
+ * @param replaceAll - If true, replace all occurrences
2192
+ * @returns EditResult with path, occurrences, or error
2193
+ */
2194
+ async edit(filePath, oldString, newString, replaceAll = false) {
2195
+ const [backend, strippedKey] = this.getBackendAndKey(filePath);
2196
+ return await backend.edit(strippedKey, oldString, newString, replaceAll);
2197
+ }
2198
+ /**
2199
+ * Execute a command via the default backend.
2200
+ * Execution is not path-specific, so it always delegates to the default backend.
2201
+ *
2202
+ * @param command - Full shell command string to execute
2203
+ * @returns ExecuteResponse with combined output, exit code, and truncation flag
2204
+ * @throws Error if the default backend doesn't support command execution
2205
+ */
2206
+ execute(command) {
2207
+ if (!isSandboxBackend(this.default)) throw new Error("Default backend doesn't support command execution (SandboxBackendProtocol). To enable execution, provide a default backend that implements SandboxBackendProtocol.");
2208
+ return Promise.resolve(this.default.execute(command));
2209
+ }
2210
+ /**
2211
+ * Upload multiple files, batching by backend for efficiency.
2212
+ *
2213
+ * @param files - List of [path, content] tuples to upload
2214
+ * @returns List of FileUploadResponse objects, one per input file
2215
+ */
2216
+ async uploadFiles(files) {
2217
+ const results = new Array(files.length).fill(null);
2218
+ const batchesByBackend = /* @__PURE__ */ new Map();
2219
+ for (let idx = 0; idx < files.length; idx++) {
2220
+ const [path$4, content] = files[idx];
2221
+ const [backend, strippedPath] = this.getBackendAndKey(path$4);
2222
+ if (!batchesByBackend.has(backend)) batchesByBackend.set(backend, []);
2223
+ batchesByBackend.get(backend).push({
2224
+ idx,
2225
+ path: strippedPath,
2226
+ content
2227
+ });
2228
+ }
2229
+ for (const [backend, batch] of batchesByBackend) {
2230
+ const batchFiles = batch.map((b) => [b.path, b.content]);
2231
+ const batchResponses = await backend.uploadFiles(batchFiles);
2232
+ for (let i = 0; i < batch.length; i++) {
2233
+ const originalIdx = batch[i].idx;
2234
+ results[originalIdx] = {
2235
+ path: files[originalIdx][0],
2236
+ error: batchResponses[i]?.error ?? null
2237
+ };
2238
+ }
2239
+ }
2240
+ return results;
2241
+ }
2242
+ /**
2243
+ * Download multiple files, batching by backend for efficiency.
2244
+ *
2245
+ * @param paths - List of file paths to download
2246
+ * @returns List of FileDownloadResponse objects, one per input path
2247
+ */
2248
+ async downloadFiles(paths) {
2249
+ const results = new Array(paths.length).fill(null);
2250
+ const batchesByBackend = /* @__PURE__ */ new Map();
2251
+ for (let idx = 0; idx < paths.length; idx++) {
2252
+ const path$4 = paths[idx];
2253
+ const [backend, strippedPath] = this.getBackendAndKey(path$4);
2254
+ if (!batchesByBackend.has(backend)) batchesByBackend.set(backend, []);
2255
+ batchesByBackend.get(backend).push({
2256
+ idx,
2257
+ path: strippedPath
2258
+ });
2259
+ }
2260
+ for (const [backend, batch] of batchesByBackend) {
2261
+ const batchPaths = batch.map((b) => b.path);
2262
+ const batchResponses = await backend.downloadFiles(batchPaths);
2263
+ for (let i = 0; i < batch.length; i++) {
2264
+ const originalIdx = batch[i].idx;
2265
+ results[originalIdx] = {
2266
+ path: paths[originalIdx],
2267
+ content: batchResponses[i]?.content ?? null,
2268
+ error: batchResponses[i]?.error ?? null
2269
+ };
2270
+ }
2271
+ }
2272
+ return results;
2273
+ }
2274
+ };
2275
+
2276
+ //#endregion
2277
+ //#region src/backends/sandbox.ts
2278
+ /**
2279
+ * Node.js command template for glob operations.
2280
+ * Uses web-standard atob() for base64 decoding.
2281
+ */
2282
+ function buildGlobCommand(searchPath, pattern) {
2283
+ return `node -e "
2284
+ const fs = require('fs');
2285
+ const path = require('path');
2286
+
2287
+ const searchPath = atob('${btoa(searchPath)}');
2288
+ const pattern = atob('${btoa(pattern)}');
2289
+
2290
+ function globMatch(relativePath, pattern) {
2291
+ const regexPattern = pattern
2292
+ .replace(/\\*\\*/g, '<<<GLOBSTAR>>>')
2293
+ .replace(/\\*/g, '[^/]*')
2294
+ .replace(/\\?/g, '.')
2295
+ .replace(/<<<GLOBSTAR>>>/g, '.*');
2296
+ return new RegExp('^' + regexPattern + '$').test(relativePath);
2297
+ }
2298
+
2299
+ function walkDir(dir, baseDir, results) {
2300
+ try {
2301
+ const entries = fs.readdirSync(dir, { withFileTypes: true });
2302
+ for (const entry of entries) {
2303
+ const fullPath = path.join(dir, entry.name);
2304
+ const relativePath = path.relative(baseDir, fullPath);
2305
+ if (entry.isDirectory()) {
2306
+ walkDir(fullPath, baseDir, results);
2307
+ } else if (globMatch(relativePath, pattern)) {
2308
+ const stat = fs.statSync(fullPath);
2309
+ console.log(JSON.stringify({
2310
+ path: relativePath,
2311
+ size: stat.size,
2312
+ mtime: stat.mtimeMs,
2313
+ isDir: false
2314
+ }));
2315
+ }
2316
+ }
2317
+ } catch (e) {
2318
+ // Silent failure for non-existent paths
2319
+ }
2320
+ }
2321
+
2322
+ try {
2323
+ process.chdir(searchPath);
2324
+ walkDir('.', '.', []);
2325
+ } catch (e) {
2326
+ // Silent failure for non-existent paths
2327
+ }
2328
+ "`;
2329
+ }
2330
+ /**
2331
+ * Node.js command template for listing directory contents.
2332
+ */
2333
+ function buildLsCommand(dirPath) {
2334
+ return `node -e "
2335
+ const fs = require('fs');
2336
+ const path = require('path');
2337
+
2338
+ const dirPath = atob('${btoa(dirPath)}');
2339
+
2340
+ try {
2341
+ const entries = fs.readdirSync(dirPath, { withFileTypes: true });
2342
+ for (const entry of entries) {
2343
+ const fullPath = path.join(dirPath, entry.name);
2344
+ const stat = fs.statSync(fullPath);
2345
+ console.log(JSON.stringify({
2346
+ path: entry.isDirectory() ? fullPath + '/' : fullPath,
2347
+ size: stat.size,
2348
+ mtime: stat.mtimeMs,
2349
+ isDir: entry.isDirectory()
2350
+ }));
2351
+ }
2352
+ } catch (e) {
2353
+ console.error('Error: ' + e.message);
2354
+ process.exit(1);
2355
+ }
2356
+ "`;
2357
+ }
2358
+ /**
2359
+ * Node.js command template for reading files.
2360
+ */
2361
+ function buildReadCommand(filePath, offset, limit) {
2362
+ return `node -e "
2363
+ const fs = require('fs');
2364
+
2365
+ const filePath = atob('${btoa(filePath)}');
2366
+ const offset = ${Number.isFinite(offset) && offset > 0 ? Math.floor(offset) : 0};
2367
+ const limit = ${Number.isFinite(limit) && limit > 0 && limit < Number.MAX_SAFE_INTEGER ? Math.floor(limit) : 0};
2368
+
2369
+ if (!fs.existsSync(filePath)) {
2370
+ console.log('Error: File not found');
2371
+ process.exit(1);
2372
+ }
2373
+
2374
+ const stat = fs.statSync(filePath);
2375
+ if (stat.size === 0) {
2376
+ console.log('System reminder: File exists but has empty contents');
2377
+ process.exit(0);
2378
+ }
2379
+
2380
+ const content = fs.readFileSync(filePath, 'utf-8');
2381
+ const lines = content.split('\\n');
2382
+ const selected = lines.slice(offset, offset + limit);
2383
+
2384
+ for (let i = 0; i < selected.length; i++) {
2385
+ const lineNum = offset + i + 1;
2386
+ console.log(String(lineNum).padStart(6) + '\\t' + selected[i]);
2387
+ }
2388
+ "`;
2389
+ }
2390
+ /**
2391
+ * Node.js command template for writing files.
2392
+ */
2393
+ function buildWriteCommand(filePath, content) {
2394
+ return `node -e "
2395
+ const fs = require('fs');
2396
+ const path = require('path');
2397
+
2398
+ const filePath = atob('${btoa(filePath)}');
2399
+ const content = atob('${btoa(content)}');
2400
+
2401
+ if (fs.existsSync(filePath)) {
2402
+ console.error('Error: File already exists');
2403
+ process.exit(1);
2404
+ }
2405
+
2406
+ const parentDir = path.dirname(filePath) || '.';
2407
+ fs.mkdirSync(parentDir, { recursive: true });
2408
+
2409
+ fs.writeFileSync(filePath, content, 'utf-8');
2410
+ console.log('OK');
2411
+ "`;
2412
+ }
2413
+ /**
2414
+ * Node.js command template for editing files.
2415
+ */
2416
+ function buildEditCommand(filePath, oldStr, newStr, replaceAll) {
2417
+ return `node -e "
2418
+ const fs = require('fs');
2419
+
2420
+ const filePath = atob('${btoa(filePath)}');
2421
+ const oldStr = atob('${btoa(oldStr)}');
2422
+ const newStr = atob('${btoa(newStr)}');
2423
+ const replaceAll = ${Boolean(replaceAll)};
2424
+
2425
+ let text;
2426
+ try {
2427
+ text = fs.readFileSync(filePath, 'utf-8');
2428
+ } catch (e) {
2429
+ process.exit(3);
2430
+ }
2431
+
2432
+ const count = text.split(oldStr).length - 1;
2433
+
2434
+ if (count === 0) {
2435
+ process.exit(1);
2436
+ }
2437
+ if (count > 1 && !replaceAll) {
2438
+ process.exit(2);
2439
+ }
2440
+
2441
+ const result = text.split(oldStr).join(newStr);
2442
+ fs.writeFileSync(filePath, result, 'utf-8');
2443
+ console.log(count);
2444
+ "`;
2445
+ }
2446
+ /**
2447
+ * Node.js command template for grep operations.
2448
+ */
2449
+ function buildGrepCommand(pattern, searchPath, globPattern) {
2450
+ const patternB64 = btoa(pattern);
2451
+ const pathB64 = btoa(searchPath);
2452
+ const globB64 = globPattern ? btoa(globPattern) : "";
2453
+ return `node -e "
2454
+ const fs = require('fs');
2455
+ const path = require('path');
2456
+
2457
+ const pattern = atob('${patternB64}');
2458
+ const searchPath = atob('${pathB64}');
2459
+ const globPattern = ${globPattern ? `atob('${globB64}')` : "null"};
2460
+
2461
+ let regex;
2462
+ try {
2463
+ regex = new RegExp(pattern);
2464
+ } catch (e) {
2465
+ console.error('Invalid regex: ' + e.message);
2466
+ process.exit(1);
2467
+ }
2468
+
2469
+ function globMatch(filePath, pattern) {
2470
+ if (!pattern) return true;
2471
+ const regexPattern = pattern
2472
+ .replace(/\\*\\*/g, '<<<GLOBSTAR>>>')
2473
+ .replace(/\\*/g, '[^/]*')
2474
+ .replace(/\\?/g, '.')
2475
+ .replace(/<<<GLOBSTAR>>>/g, '.*');
2476
+ return new RegExp('^' + regexPattern + '$').test(filePath);
2477
+ }
2478
+
2479
+ function walkDir(dir, results) {
2480
+ try {
2481
+ const entries = fs.readdirSync(dir, { withFileTypes: true });
2482
+ for (const entry of entries) {
2483
+ const fullPath = path.join(dir, entry.name);
2484
+ if (entry.isDirectory()) {
2485
+ walkDir(fullPath, results);
2486
+ } else {
2487
+ const relativePath = path.relative(searchPath, fullPath);
2488
+ if (globMatch(relativePath, globPattern)) {
2489
+ try {
2490
+ const content = fs.readFileSync(fullPath, 'utf-8');
2491
+ const lines = content.split('\\n');
2492
+ for (let i = 0; i < lines.length; i++) {
2493
+ if (regex.test(lines[i])) {
2494
+ console.log(JSON.stringify({
2495
+ path: fullPath,
2496
+ line: i + 1,
2497
+ text: lines[i]
2498
+ }));
2499
+ }
2500
+ }
2501
+ } catch (e) {
2502
+ // Skip unreadable files
2503
+ }
2504
+ }
2505
+ }
2506
+ }
2507
+ } catch (e) {
2508
+ // Skip unreadable directories
2509
+ }
2510
+ }
2511
+
2512
+ try {
2513
+ walkDir(searchPath, []);
2514
+ } catch (e) {
2515
+ // Silent failure
2516
+ }
2517
+ "`;
2518
+ }
2519
+ /**
2520
+ * Base sandbox implementation with execute() as the only abstract method.
2521
+ *
2522
+ * This class provides default implementations for all SandboxBackendProtocol
2523
+ * methods using shell commands executed via execute(). Concrete implementations
2524
+ * only need to implement the execute() method.
2525
+ *
2526
+ * Requires Node.js 20+ on the sandbox host.
2527
+ */
2528
+ var BaseSandbox = class {
2529
+ /**
2530
+ * List files and directories in the specified directory (non-recursive).
2531
+ *
2532
+ * @param path - Absolute path to directory
2533
+ * @returns List of FileInfo objects for files and directories directly in the directory.
2534
+ */
2535
+ async lsInfo(path$4) {
2536
+ const command = buildLsCommand(path$4);
2537
+ const result = await this.execute(command);
2538
+ if (result.exitCode !== 0) return [];
2539
+ const infos = [];
2540
+ const lines = result.output.trim().split("\n").filter(Boolean);
2541
+ for (const line of lines) try {
2542
+ const parsed = JSON.parse(line);
2543
+ infos.push({
2544
+ path: parsed.path,
2545
+ is_dir: parsed.isDir,
2546
+ size: parsed.size,
2547
+ modified_at: parsed.mtime ? new Date(parsed.mtime).toISOString() : void 0
2548
+ });
2549
+ } catch {}
2550
+ return infos;
2551
+ }
2552
+ /**
2553
+ * Read file content with line numbers.
2554
+ *
2555
+ * @param filePath - Absolute file path
2556
+ * @param offset - Line offset to start reading from (0-indexed)
2557
+ * @param limit - Maximum number of lines to read
2558
+ * @returns Formatted file content with line numbers, or error message
2559
+ */
2560
+ async read(filePath, offset = 0, limit = 2e3) {
2561
+ const command = buildReadCommand(filePath, offset, limit);
2562
+ const result = await this.execute(command);
2563
+ if (result.exitCode !== 0) return `Error: File '${filePath}' not found`;
2564
+ return result.output;
2565
+ }
2566
+ /**
2567
+ * Read file content as raw FileData.
2568
+ *
2569
+ * @param filePath - Absolute file path
2570
+ * @returns Raw file content as FileData
2571
+ */
2572
+ async readRaw(filePath) {
2573
+ const command = buildReadCommand(filePath, 0, Number.MAX_SAFE_INTEGER);
2574
+ const result = await this.execute(command);
2575
+ if (result.exitCode !== 0) throw new Error(`File '${filePath}' not found`);
2576
+ const lines = [];
2577
+ for (const line of result.output.split("\n")) {
2578
+ const tabIndex = line.indexOf(" ");
2579
+ if (tabIndex !== -1) lines.push(line.substring(tabIndex + 1));
2580
+ }
2581
+ const now = (/* @__PURE__ */ new Date()).toISOString();
2582
+ return {
2583
+ content: lines,
2584
+ created_at: now,
2585
+ modified_at: now
2586
+ };
2587
+ }
2588
+ /**
2589
+ * Structured search results or error string for invalid input.
2590
+ */
2591
+ async grepRaw(pattern, path$4 = "/", glob = null) {
2592
+ const command = buildGrepCommand(pattern, path$4, glob);
2593
+ const result = await this.execute(command);
2594
+ if (result.exitCode === 1) {
2595
+ if (result.output.includes("Invalid regex:")) return result.output.trim();
2596
+ }
2597
+ const matches = [];
2598
+ const lines = result.output.trim().split("\n").filter(Boolean);
2599
+ for (const line of lines) try {
2600
+ const parsed = JSON.parse(line);
2601
+ matches.push({
2602
+ path: parsed.path,
2603
+ line: parsed.line,
2604
+ text: parsed.text
2605
+ });
2606
+ } catch {}
2607
+ return matches;
2608
+ }
2609
+ /**
2610
+ * Structured glob matching returning FileInfo objects.
2611
+ */
2612
+ async globInfo(pattern, path$4 = "/") {
2613
+ const command = buildGlobCommand(path$4, pattern);
2614
+ const result = await this.execute(command);
2615
+ const infos = [];
2616
+ const lines = result.output.trim().split("\n").filter(Boolean);
2617
+ for (const line of lines) try {
2618
+ const parsed = JSON.parse(line);
2619
+ infos.push({
2620
+ path: parsed.path,
2621
+ is_dir: parsed.isDir,
2622
+ size: parsed.size,
2623
+ modified_at: parsed.mtime ? new Date(parsed.mtime).toISOString() : void 0
2624
+ });
2625
+ } catch {}
2626
+ return infos;
2627
+ }
2628
+ /**
2629
+ * Create a new file with content.
2630
+ */
2631
+ async write(filePath, content) {
2632
+ const command = buildWriteCommand(filePath, content);
2633
+ if ((await this.execute(command)).exitCode !== 0) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
2634
+ return {
2635
+ path: filePath,
2636
+ filesUpdate: null
2637
+ };
2638
+ }
2639
+ /**
2640
+ * Edit a file by replacing string occurrences.
2641
+ */
2642
+ async edit(filePath, oldString, newString, replaceAll = false) {
2643
+ const command = buildEditCommand(filePath, oldString, newString, replaceAll);
2644
+ const result = await this.execute(command);
2645
+ switch (result.exitCode) {
2646
+ case 0: return {
2647
+ path: filePath,
2648
+ filesUpdate: null,
2649
+ occurrences: parseInt(result.output.trim(), 10) || 1
2650
+ };
2651
+ case 1: return { error: `String not found in file '${filePath}'` };
2652
+ case 2: return { error: `Multiple occurrences found in '${filePath}'. Use replaceAll=true to replace all.` };
2653
+ case 3: return { error: `Error: File '${filePath}' not found` };
2654
+ default: return { error: `Unknown error editing file '${filePath}'` };
2655
+ }
2656
+ }
2657
+ };
2658
+
2659
+ //#endregion
2660
+ //#region src/agent.ts
2661
+ const BASE_PROMPT = `In order to complete the objective that the user asks of you, you have access to a number of standard tools.`;
2662
+ /**
2663
+ * Create a Deep Agent with middleware-based architecture.
2664
+ *
2665
+ * Matches Python's create_deep_agent function, using middleware for all features:
2666
+ * - Todo management (todoListMiddleware)
2667
+ * - Filesystem tools (createFilesystemMiddleware)
2668
+ * - Subagent delegation (createSubAgentMiddleware)
2669
+ * - Conversation summarization (summarizationMiddleware)
2670
+ * - Prompt caching (anthropicPromptCachingMiddleware)
2671
+ * - Tool call patching (createPatchToolCallsMiddleware)
2672
+ * - Human-in-the-loop (humanInTheLoopMiddleware) - optional
2673
+ *
2674
+ * @param params Configuration parameters for the agent
2675
+ * @returns ReactAgent instance ready for invocation
2676
+ */
2677
+ function createDeepAgent(params = {}) {
2678
+ const { model = "claude-sonnet-4-5-20250929", tools = [], systemPrompt, middleware: customMiddleware = [], subagents = [], responseFormat, contextSchema, checkpointer, store, backend, interruptOn, name } = params;
2679
+ const finalSystemPrompt = systemPrompt ? `${systemPrompt}\n\n${BASE_PROMPT}` : BASE_PROMPT;
2680
+ const filesystemBackend = backend ? backend : (config) => new StateBackend(config);
2681
+ const middleware = [
2682
+ (0, langchain.todoListMiddleware)(),
2683
+ createFilesystemMiddleware({ backend: filesystemBackend }),
2684
+ createSubAgentMiddleware({
2685
+ defaultModel: model,
2686
+ defaultTools: tools,
2687
+ defaultMiddleware: [
2688
+ (0, langchain.todoListMiddleware)(),
2689
+ createFilesystemMiddleware({ backend: filesystemBackend }),
2690
+ (0, langchain.summarizationMiddleware)({
2691
+ model,
2692
+ trigger: { tokens: 17e4 },
2693
+ keep: { messages: 6 }
2694
+ }),
2695
+ (0, langchain.anthropicPromptCachingMiddleware)({ unsupportedModelBehavior: "ignore" }),
2696
+ createPatchToolCallsMiddleware()
2697
+ ],
2698
+ defaultInterruptOn: interruptOn,
2699
+ subagents,
2700
+ generalPurposeAgent: true
2701
+ }),
2702
+ (0, langchain.summarizationMiddleware)({
2703
+ model,
2704
+ trigger: { tokens: 17e4 },
2705
+ keep: { messages: 6 }
2706
+ }),
2707
+ (0, langchain.anthropicPromptCachingMiddleware)({ unsupportedModelBehavior: "ignore" }),
2708
+ createPatchToolCallsMiddleware()
2709
+ ];
2710
+ if (interruptOn) middleware.push((0, langchain.humanInTheLoopMiddleware)({ interruptOn }));
2711
+ middleware.push(...customMiddleware);
2712
+ return (0, langchain.createAgent)({
2713
+ model,
2714
+ systemPrompt: finalSystemPrompt,
2715
+ tools,
2716
+ middleware,
2717
+ responseFormat,
2718
+ contextSchema,
2719
+ checkpointer,
2720
+ store,
2721
+ name
2722
+ });
2723
+ }
2724
+
2725
+ //#endregion
2726
+ //#region src/config.ts
2727
+ /**
2728
+ * Configuration and settings for deepagents.
2729
+ *
2730
+ * Provides project detection, path management, and environment configuration
2731
+ * for skills and agent memory middleware.
2732
+ */
2733
+ /**
2734
+ * Find the project root by looking for .git directory.
2735
+ *
2736
+ * Walks up the directory tree from startPath (or cwd) looking for a .git
2737
+ * directory, which indicates the project root.
2738
+ *
2739
+ * @param startPath - Directory to start searching from. Defaults to current working directory.
2740
+ * @returns Path to the project root if found, null otherwise.
2741
+ */
2742
+ function findProjectRoot(startPath) {
2743
+ let current = node_path.default.resolve(startPath || process.cwd());
2744
+ while (current !== node_path.default.dirname(current)) {
2745
+ const gitDir = node_path.default.join(current, ".git");
2746
+ if (node_fs.default.existsSync(gitDir)) return current;
2747
+ current = node_path.default.dirname(current);
2748
+ }
2749
+ const rootGitDir = node_path.default.join(current, ".git");
2750
+ if (node_fs.default.existsSync(rootGitDir)) return current;
2751
+ return null;
2752
+ }
2753
+ /**
2754
+ * Validate agent name to prevent invalid filesystem paths and security issues.
2755
+ *
2756
+ * @param agentName - The agent name to validate
2757
+ * @returns True if valid, false otherwise
2758
+ */
2759
+ function isValidAgentName(agentName) {
2760
+ if (!agentName || !agentName.trim()) return false;
2761
+ return /^[a-zA-Z0-9_\-\s]+$/.test(agentName);
2762
+ }
2763
+ /**
2764
+ * Create a Settings instance with detected environment.
2765
+ *
2766
+ * @param options - Configuration options
2767
+ * @returns Settings instance with project detection and path management
2768
+ */
2769
+ function createSettings(options = {}) {
2770
+ const projectRoot = findProjectRoot(options.startPath);
2771
+ const userDeepagentsDir = node_path.default.join(node_os.default.homedir(), ".deepagents");
2772
+ return {
2773
+ projectRoot,
2774
+ userDeepagentsDir,
2775
+ hasProject: projectRoot !== null,
2776
+ getAgentDir(agentName) {
2777
+ if (!isValidAgentName(agentName)) throw new Error(`Invalid agent name: ${JSON.stringify(agentName)}. Agent names can only contain letters, numbers, hyphens, underscores, and spaces.`);
2778
+ return node_path.default.join(userDeepagentsDir, agentName);
2779
+ },
2780
+ ensureAgentDir(agentName) {
2781
+ const agentDir = this.getAgentDir(agentName);
2782
+ node_fs.default.mkdirSync(agentDir, { recursive: true });
2783
+ return agentDir;
2784
+ },
2785
+ getUserAgentMdPath(agentName) {
2786
+ return node_path.default.join(this.getAgentDir(agentName), "agent.md");
2787
+ },
2788
+ getProjectAgentMdPath() {
2789
+ if (!projectRoot) return null;
2790
+ return node_path.default.join(projectRoot, ".deepagents", "agent.md");
2791
+ },
2792
+ getUserSkillsDir(agentName) {
2793
+ return node_path.default.join(this.getAgentDir(agentName), "skills");
2794
+ },
2795
+ ensureUserSkillsDir(agentName) {
2796
+ const skillsDir = this.getUserSkillsDir(agentName);
2797
+ node_fs.default.mkdirSync(skillsDir, { recursive: true });
2798
+ return skillsDir;
2799
+ },
2800
+ getProjectSkillsDir() {
2801
+ if (!projectRoot) return null;
2802
+ return node_path.default.join(projectRoot, ".deepagents", "skills");
2803
+ },
2804
+ ensureProjectSkillsDir() {
2805
+ const skillsDir = this.getProjectSkillsDir();
2806
+ if (!skillsDir) return null;
2807
+ node_fs.default.mkdirSync(skillsDir, { recursive: true });
2808
+ return skillsDir;
2809
+ },
2810
+ ensureProjectDeepagentsDir() {
2811
+ if (!projectRoot) return null;
2812
+ const deepagentsDir = node_path.default.join(projectRoot, ".deepagents");
2813
+ node_fs.default.mkdirSync(deepagentsDir, { recursive: true });
2814
+ return deepagentsDir;
2815
+ }
2816
+ };
2817
+ }
2818
+
2819
+ //#endregion
2820
+ //#region src/skills/loader.ts
2821
+ /**
2822
+ * Skill loader for parsing and loading agent skills from SKILL.md files.
2823
+ *
2824
+ * This module implements Anthropic's agent skills pattern with YAML frontmatter parsing.
2825
+ * Each skill is a directory containing a SKILL.md file with:
2826
+ * - YAML frontmatter (name, description required)
2827
+ * - Markdown instructions for the agent
2828
+ * - Optional supporting files (scripts, configs, etc.)
2829
+ *
2830
+ * @example
2831
+ * ```markdown
2832
+ * ---
2833
+ * name: web-research
2834
+ * description: Structured approach to conducting thorough web research
2835
+ * ---
2836
+ *
2837
+ * # Web Research Skill
2838
+ *
2839
+ * ## When to Use
2840
+ * - User asks you to research a topic
2841
+ * ...
2842
+ * ```
2843
+ *
2844
+ * @see https://agentskills.io/specification
2845
+ */
2846
+ /** Maximum size for SKILL.md files (10MB) */
2847
+ const MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024;
2848
+ /** Agent Skills spec constraints */
2849
+ const MAX_SKILL_NAME_LENGTH = 64;
2850
+ const MAX_SKILL_DESCRIPTION_LENGTH = 1024;
2851
+ /** Pattern for validating skill names per Agent Skills spec */
2852
+ const SKILL_NAME_PATTERN = /^[a-z0-9]+(-[a-z0-9]+)*$/;
2853
+ /** Pattern for extracting YAML frontmatter */
2854
+ const FRONTMATTER_PATTERN = /^---\s*\n([\s\S]*?)\n---\s*\n/;
2855
+ /**
2856
+ * Check if a path is safely contained within base_dir.
2857
+ *
2858
+ * This prevents directory traversal attacks via symlinks or path manipulation.
2859
+ * The function resolves both paths to their canonical form (following symlinks)
2860
+ * and verifies that the target path is within the base directory.
2861
+ *
2862
+ * @param targetPath - The path to validate
2863
+ * @param baseDir - The base directory that should contain the path
2864
+ * @returns True if the path is safely within baseDir, false otherwise
2865
+ */
2866
+ function isSafePath(targetPath, baseDir) {
2867
+ try {
2868
+ const resolvedPath = node_fs.default.realpathSync(targetPath);
2869
+ const resolvedBase = node_fs.default.realpathSync(baseDir);
2870
+ return resolvedPath.startsWith(resolvedBase + node_path.default.sep) || resolvedPath === resolvedBase;
2871
+ } catch {
2872
+ return false;
2873
+ }
2874
+ }
2875
+ /**
2876
+ * Validate skill name per Agent Skills spec.
2877
+ *
2878
+ * Requirements:
2879
+ * - Max 64 characters
2880
+ * - Lowercase alphanumeric and hyphens only (a-z, 0-9, -)
2881
+ * - Cannot start or end with hyphen
2882
+ * - No consecutive hyphens
2883
+ * - Must match parent directory name
2884
+ *
2885
+ * @param name - The skill name from YAML frontmatter
2886
+ * @param directoryName - The parent directory name
2887
+ * @returns Validation result with error message if invalid
2888
+ */
2889
+ function validateSkillName(name, directoryName) {
2890
+ if (!name) return {
2891
+ valid: false,
2892
+ error: "name is required"
2893
+ };
2894
+ if (name.length > MAX_SKILL_NAME_LENGTH) return {
2895
+ valid: false,
2896
+ error: "name exceeds 64 characters"
2897
+ };
2898
+ if (!SKILL_NAME_PATTERN.test(name)) return {
2899
+ valid: false,
2900
+ error: "name must be lowercase alphanumeric with single hyphens only"
2901
+ };
2902
+ if (name !== directoryName) return {
2903
+ valid: false,
2904
+ error: `name '${name}' must match directory name '${directoryName}'`
2905
+ };
2906
+ return { valid: true };
2907
+ }
2908
+ /**
2909
+ * Parse YAML frontmatter from content.
2910
+ *
2911
+ * @param content - The file content
2912
+ * @returns Parsed frontmatter object, or null if parsing fails
2913
+ */
2914
+ function parseFrontmatter(content) {
2915
+ const match = content.match(FRONTMATTER_PATTERN);
2916
+ if (!match) return null;
2917
+ try {
2918
+ const parsed = yaml.default.parse(match[1]);
2919
+ return typeof parsed === "object" && parsed !== null ? parsed : null;
2920
+ } catch {
2921
+ return null;
2922
+ }
2923
+ }
2924
+ /**
2925
+ * Parse YAML frontmatter from a SKILL.md file per Agent Skills spec.
2926
+ *
2927
+ * @param skillMdPath - Path to the SKILL.md file
2928
+ * @param source - Source of the skill ('user' or 'project')
2929
+ * @returns SkillMetadata with all fields, or null if parsing fails
2930
+ */
2931
+ function parseSkillMetadata(skillMdPath, source) {
2932
+ try {
2933
+ const stats = node_fs.default.statSync(skillMdPath);
2934
+ if (stats.size > MAX_SKILL_FILE_SIZE) {
2935
+ console.warn(`Skipping ${skillMdPath}: file too large (${stats.size} bytes)`);
2936
+ return null;
2937
+ }
2938
+ const frontmatter = parseFrontmatter(node_fs.default.readFileSync(skillMdPath, "utf-8"));
2939
+ if (!frontmatter) {
2940
+ console.warn(`Skipping ${skillMdPath}: no valid YAML frontmatter found`);
2941
+ return null;
2942
+ }
2943
+ const name = frontmatter.name;
2944
+ const description = frontmatter.description;
2945
+ if (!name || !description) {
2946
+ console.warn(`Skipping ${skillMdPath}: missing required 'name' or 'description'`);
2947
+ return null;
2948
+ }
2949
+ const directoryName = node_path.default.basename(node_path.default.dirname(skillMdPath));
2950
+ const validation = validateSkillName(String(name), directoryName);
2951
+ if (!validation.valid) console.warn(`Skill '${name}' in ${skillMdPath} does not follow Agent Skills spec: ${validation.error}. Consider renaming to be spec-compliant.`);
2952
+ let descriptionStr = String(description);
2953
+ if (descriptionStr.length > MAX_SKILL_DESCRIPTION_LENGTH) {
2954
+ console.warn(`Description exceeds ${MAX_SKILL_DESCRIPTION_LENGTH} chars in ${skillMdPath}, truncating`);
2955
+ descriptionStr = descriptionStr.slice(0, MAX_SKILL_DESCRIPTION_LENGTH);
2956
+ }
2957
+ return {
2958
+ name: String(name),
2959
+ description: descriptionStr,
2960
+ path: skillMdPath,
2961
+ source,
2962
+ license: frontmatter.license ? String(frontmatter.license) : void 0,
2963
+ compatibility: frontmatter.compatibility ? String(frontmatter.compatibility) : void 0,
2964
+ metadata: frontmatter.metadata && typeof frontmatter.metadata === "object" ? frontmatter.metadata : void 0,
2965
+ allowedTools: frontmatter["allowed-tools"] ? String(frontmatter["allowed-tools"]) : void 0
2966
+ };
2967
+ } catch (error) {
2968
+ console.warn(`Error reading ${skillMdPath}: ${error}`);
2969
+ return null;
2970
+ }
2971
+ }
2972
+ /**
2973
+ * List all skills from a single skills directory (internal helper).
2974
+ *
2975
+ * Scans the skills directory for subdirectories containing SKILL.md files,
2976
+ * parses YAML frontmatter, and returns skill metadata.
2977
+ *
2978
+ * Skills are organized as:
2979
+ * ```
2980
+ * skills/
2981
+ * ├── skill-name/
2982
+ * │ ├── SKILL.md # Required: instructions with YAML frontmatter
2983
+ * │ ├── script.py # Optional: supporting files
2984
+ * │ └── config.json # Optional: supporting files
2985
+ * ```
2986
+ *
2987
+ * @param skillsDir - Path to the skills directory
2988
+ * @param source - Source of the skills ('user' or 'project')
2989
+ * @returns List of skill metadata
2990
+ */
2991
+ function listSkillsFromDir(skillsDir, source) {
2992
+ const expandedDir = skillsDir.startsWith("~") ? node_path.default.join(process.env.HOME || process.env.USERPROFILE || "", skillsDir.slice(1)) : skillsDir;
2993
+ if (!node_fs.default.existsSync(expandedDir)) return [];
2994
+ let resolvedBase;
2995
+ try {
2996
+ resolvedBase = node_fs.default.realpathSync(expandedDir);
2997
+ } catch {
2998
+ return [];
2999
+ }
3000
+ const skills = [];
3001
+ let entries;
3002
+ try {
3003
+ entries = node_fs.default.readdirSync(resolvedBase, { withFileTypes: true });
3004
+ } catch {
3005
+ return [];
3006
+ }
3007
+ for (const entry of entries) {
3008
+ const skillDir = node_path.default.join(resolvedBase, entry.name);
3009
+ if (!isSafePath(skillDir, resolvedBase)) continue;
3010
+ if (!entry.isDirectory()) continue;
3011
+ const skillMdPath = node_path.default.join(skillDir, "SKILL.md");
3012
+ if (!node_fs.default.existsSync(skillMdPath)) continue;
3013
+ if (!isSafePath(skillMdPath, resolvedBase)) continue;
3014
+ const metadata = parseSkillMetadata(skillMdPath, source);
3015
+ if (metadata) skills.push(metadata);
3016
+ }
3017
+ return skills;
3018
+ }
3019
+ /**
3020
+ * List skills from user and/or project directories.
3021
+ *
3022
+ * When both directories are provided, project skills with the same name as
3023
+ * user skills will override them.
3024
+ *
3025
+ * @param options - Options specifying which directories to search
3026
+ * @returns Merged list of skill metadata from both sources, with project skills
3027
+ * taking precedence over user skills when names conflict
3028
+ */
3029
+ function listSkills(options) {
3030
+ const allSkills = /* @__PURE__ */ new Map();
3031
+ if (options.userSkillsDir) {
3032
+ const userSkills = listSkillsFromDir(options.userSkillsDir, "user");
3033
+ for (const skill of userSkills) allSkills.set(skill.name, skill);
3034
+ }
3035
+ if (options.projectSkillsDir) {
3036
+ const projectSkills = listSkillsFromDir(options.projectSkillsDir, "project");
3037
+ for (const skill of projectSkills) allSkills.set(skill.name, skill);
3038
+ }
3039
+ return Array.from(allSkills.values());
3040
+ }
3041
+
3042
+ //#endregion
3043
+ //#region src/middleware/skills.ts
3044
+ /**
3045
+ * Middleware for loading and exposing agent skills to the system prompt.
3046
+ *
3047
+ * This middleware implements Anthropic's "Agent Skills" pattern with progressive disclosure:
3048
+ * 1. Parse YAML frontmatter from SKILL.md files at session start
3049
+ * 2. Inject skills metadata (name + description) into system prompt
3050
+ * 3. Agent reads full SKILL.md content when relevant to a task
3051
+ *
3052
+ * Skills directory structure (per-agent + project):
3053
+ * User-level: ~/.deepagents/{AGENT_NAME}/skills/
3054
+ * Project-level: {PROJECT_ROOT}/.deepagents/skills/
3055
+ *
3056
+ * @example
3057
+ * ```
3058
+ * ~/.deepagents/{AGENT_NAME}/skills/
3059
+ * ├── web-research/
3060
+ * │ ├── SKILL.md # Required: YAML frontmatter + instructions
3061
+ * │ └── helper.py # Optional: supporting files
3062
+ * ├── code-review/
3063
+ * │ ├── SKILL.md
3064
+ * │ └── checklist.md
3065
+ *
3066
+ * .deepagents/skills/
3067
+ * ├── project-specific/
3068
+ * │ └── SKILL.md # Project-specific skills
3069
+ * ```
3070
+ */
3071
+ /**
3072
+ * State schema for skills middleware.
3073
+ */
3074
+ const SkillsStateSchema = zod.z.object({ skillsMetadata: zod.z.array(zod.z.object({
3075
+ name: zod.z.string(),
3076
+ description: zod.z.string(),
3077
+ path: zod.z.string(),
3078
+ source: zod.z.enum(["user", "project"]),
3079
+ license: zod.z.string().optional(),
3080
+ compatibility: zod.z.string().optional(),
3081
+ metadata: zod.z.record(zod.z.string(), zod.z.string()).optional(),
3082
+ allowedTools: zod.z.string().optional()
3083
+ })).optional() });
3084
+ /**
3085
+ * Skills System Documentation prompt template.
3086
+ */
3087
+ const SKILLS_SYSTEM_PROMPT = `
3088
+
3089
+ ## Skills System
3090
+
3091
+ You have access to a skills library that provides specialized capabilities and domain knowledge.
3092
+
3093
+ {skills_locations}
3094
+
3095
+ **Available Skills:**
3096
+
3097
+ {skills_list}
3098
+
3099
+ **How to Use Skills (Progressive Disclosure):**
3100
+
3101
+ Skills follow a **progressive disclosure** pattern - you know they exist (name + description above), but you only read the full instructions when needed:
3102
+
3103
+ 1. **Recognize when a skill applies**: Check if the user's task matches any skill's description
3104
+ 2. **Read the skill's full instructions**: The skill list above shows the exact path to use with read_file
3105
+ 3. **Follow the skill's instructions**: SKILL.md contains step-by-step workflows, best practices, and examples
3106
+ 4. **Access supporting files**: Skills may include Python scripts, configs, or reference docs - use absolute paths
3107
+
3108
+ **When to Use Skills:**
3109
+ - When the user's request matches a skill's domain (e.g., "research X" → web-research skill)
3110
+ - When you need specialized knowledge or structured workflows
3111
+ - When a skill provides proven patterns for complex tasks
3112
+
3113
+ **Skills are Self-Documenting:**
3114
+ - Each SKILL.md tells you exactly what the skill does and how to use it
3115
+ - The skill list above shows the full path for each skill's SKILL.md file
3116
+
3117
+ **Executing Skill Scripts:**
3118
+ Skills may contain Python scripts or other executable files. Always use absolute paths from the skill list.
3119
+
3120
+ **Example Workflow:**
3121
+
3122
+ User: "Can you research the latest developments in quantum computing?"
3123
+
3124
+ 1. Check available skills above → See "web-research" skill with its full path
3125
+ 2. Read the skill using the path shown in the list
3126
+ 3. Follow the skill's research workflow (search → organize → synthesize)
3127
+ 4. Use any helper scripts with absolute paths
3128
+
3129
+ Remember: Skills are tools to make you more capable and consistent. When in doubt, check if a skill exists for the task!
3130
+ `;
3131
+ /**
3132
+ * Format skills locations for display in system prompt.
3133
+ */
3134
+ function formatSkillsLocations(userSkillsDisplay, projectSkillsDir) {
3135
+ const locations = [`**User Skills**: \`${userSkillsDisplay}\``];
3136
+ if (projectSkillsDir) locations.push(`**Project Skills**: \`${projectSkillsDir}\` (overrides user skills)`);
3137
+ return locations.join("\n");
3138
+ }
3139
+ /**
3140
+ * Format skills metadata for display in system prompt.
3141
+ */
3142
+ function formatSkillsList(skills, userSkillsDisplay, projectSkillsDir) {
3143
+ if (skills.length === 0) {
3144
+ const locations = [userSkillsDisplay];
3145
+ if (projectSkillsDir) locations.push(projectSkillsDir);
3146
+ return `(No skills available yet. You can create skills in ${locations.join(" or ")})`;
3147
+ }
3148
+ const userSkills = skills.filter((s) => s.source === "user");
3149
+ const projectSkills = skills.filter((s) => s.source === "project");
3150
+ const lines = [];
3151
+ if (userSkills.length > 0) {
3152
+ lines.push("**User Skills:**");
3153
+ for (const skill of userSkills) {
3154
+ lines.push(`- **${skill.name}**: ${skill.description}`);
3155
+ lines.push(` → Read \`${skill.path}\` for full instructions`);
3156
+ }
3157
+ lines.push("");
3158
+ }
3159
+ if (projectSkills.length > 0) {
3160
+ lines.push("**Project Skills:**");
3161
+ for (const skill of projectSkills) {
3162
+ lines.push(`- **${skill.name}**: ${skill.description}`);
3163
+ lines.push(` → Read \`${skill.path}\` for full instructions`);
3164
+ }
3165
+ }
3166
+ return lines.join("\n");
3167
+ }
3168
+ /**
3169
+ * Create middleware for loading and exposing agent skills.
3170
+ *
3171
+ * This middleware implements Anthropic's agent skills pattern:
3172
+ * - Loads skills metadata (name, description) from YAML frontmatter at session start
3173
+ * - Injects skills list into system prompt for discoverability
3174
+ * - Agent reads full SKILL.md content when a skill is relevant (progressive disclosure)
3175
+ *
3176
+ * Supports both user-level and project-level skills:
3177
+ * - User skills: ~/.deepagents/{AGENT_NAME}/skills/
3178
+ * - Project skills: {PROJECT_ROOT}/.deepagents/skills/
3179
+ * - Project skills override user skills with the same name
3180
+ *
3181
+ * @param options - Configuration options
3182
+ * @returns AgentMiddleware for skills loading and injection
3183
+ */
3184
+ function createSkillsMiddleware(options) {
3185
+ const { skillsDir, assistantId, projectSkillsDir } = options;
3186
+ const userSkillsDisplay = `~/.deepagents/${assistantId}/skills`;
3187
+ return (0, langchain.createMiddleware)({
3188
+ name: "SkillsMiddleware",
3189
+ stateSchema: SkillsStateSchema,
3190
+ beforeAgent() {
3191
+ return { skillsMetadata: listSkills({
3192
+ userSkillsDir: skillsDir,
3193
+ projectSkillsDir
3194
+ }) };
3195
+ },
3196
+ wrapModelCall(request, handler) {
3197
+ const skillsMetadata = request.state?.skillsMetadata || [];
3198
+ const skillsLocations = formatSkillsLocations(userSkillsDisplay, projectSkillsDir);
3199
+ const skillsList = formatSkillsList(skillsMetadata, userSkillsDisplay, projectSkillsDir);
3200
+ const skillsSection = SKILLS_SYSTEM_PROMPT.replace("{skills_locations}", skillsLocations).replace("{skills_list}", skillsList);
3201
+ const currentSystemPrompt = request.systemPrompt || "";
3202
+ const newSystemPrompt = currentSystemPrompt ? `${currentSystemPrompt}\n\n${skillsSection}` : skillsSection;
3203
+ return handler({
3204
+ ...request,
3205
+ systemPrompt: newSystemPrompt
3206
+ });
3207
+ }
3208
+ });
3209
+ }
3210
+
3211
+ //#endregion
3212
+ //#region src/middleware/agent-memory.ts
3213
+ /**
3214
+ * Middleware for loading agent-specific long-term memory into the system prompt.
3215
+ *
3216
+ * This middleware loads the agent's long-term memory from agent.md files
3217
+ * and injects it into the system prompt. Memory is loaded from:
3218
+ * - User memory: ~/.deepagents/{agent_name}/agent.md
3219
+ * - Project memory: {project_root}/.deepagents/agent.md
3220
+ */
3221
+ /**
3222
+ * State schema for agent memory middleware.
3223
+ */
3224
+ const AgentMemoryStateSchema = zod.z.object({
3225
+ userMemory: zod.z.string().optional(),
3226
+ projectMemory: zod.z.string().optional()
3227
+ });
3228
+ /**
3229
+ * Default template for memory injection.
3230
+ */
3231
+ const DEFAULT_MEMORY_TEMPLATE = `<user_memory>
3232
+ {user_memory}
3233
+ </user_memory>
3234
+
3235
+ <project_memory>
3236
+ {project_memory}
3237
+ </project_memory>`;
3238
+ /**
3239
+ * Long-term Memory Documentation system prompt.
3240
+ */
3241
+ const LONGTERM_MEMORY_SYSTEM_PROMPT = `
3242
+
3243
+ ## Long-term Memory
3244
+
3245
+ Your long-term memory is stored in files on the filesystem and persists across sessions.
3246
+
3247
+ **User Memory Location**: \`{agent_dir_absolute}\` (displays as \`{agent_dir_display}\`)
3248
+ **Project Memory Location**: {project_memory_info}
3249
+
3250
+ Your system prompt is loaded from TWO sources at startup:
3251
+ 1. **User agent.md**: \`{agent_dir_absolute}/agent.md\` - Your personal preferences across all projects
3252
+ 2. **Project agent.md**: Loaded from project root if available - Project-specific instructions
3253
+
3254
+ Project-specific agent.md is loaded from these locations (both combined if both exist):
3255
+ - \`[project-root]/.deepagents/agent.md\` (preferred)
3256
+ - \`[project-root]/agent.md\` (fallback, but also included if both exist)
3257
+
3258
+ **When to CHECK/READ memories (CRITICAL - do this FIRST):**
3259
+ - **At the start of ANY new session**: Check both user and project memories
3260
+ - User: \`ls {agent_dir_absolute}\`
3261
+ - Project: \`ls {project_deepagents_dir}\` (if in a project)
3262
+ - **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check project memories FIRST, then user
3263
+ - **When user asks you to do something**: Check if you have project-specific guides or examples
3264
+ - **When user references past work**: Search project memory files for related context
3265
+
3266
+ **Memory-first response pattern:**
3267
+ 1. User asks a question → Check project directory first: \`ls {project_deepagents_dir}\`
3268
+ 2. If relevant files exist → Read them with \`read_file '{project_deepagents_dir}/[filename]'\`
3269
+ 3. Check user memory if needed → \`ls {agent_dir_absolute}\`
3270
+ 4. Base your answer on saved knowledge supplemented by general knowledge
3271
+
3272
+ **When to update memories:**
3273
+ - **IMMEDIATELY when the user describes your role or how you should behave**
3274
+ - **IMMEDIATELY when the user gives feedback on your work** - Update memories to capture what was wrong and how to do it better
3275
+ - When the user explicitly asks you to remember something
3276
+ - When patterns or preferences emerge (coding styles, conventions, workflows)
3277
+ - After significant work where context would help in future sessions
3278
+
3279
+ **Learning from feedback:**
3280
+ - When user says something is better/worse, capture WHY and encode it as a pattern
3281
+ - Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
3282
+ - When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
3283
+ - Look for the underlying principle behind corrections, not just the specific mistake
3284
+
3285
+ ## Deciding Where to Store Memory
3286
+
3287
+ When writing or updating agent memory, decide whether each fact, configuration, or behavior belongs in:
3288
+
3289
+ ### User Agent File: \`{agent_dir_absolute}/agent.md\`
3290
+ → Describes the agent's **personality, style, and universal behavior** across all projects.
3291
+
3292
+ **Store here:**
3293
+ - Your general tone and communication style
3294
+ - Universal coding preferences (formatting, comment style, etc.)
3295
+ - General workflows and methodologies you follow
3296
+ - Tool usage patterns that apply everywhere
3297
+ - Personal preferences that don't change per-project
3298
+
3299
+ **Examples:**
3300
+ - "Be concise and direct in responses"
3301
+ - "Always use type hints in Python"
3302
+ - "Prefer functional programming patterns"
3303
+
3304
+ ### Project Agent File: \`{project_deepagents_dir}/agent.md\`
3305
+ → Describes **how this specific project works** and **how the agent should behave here only.**
3306
+
3307
+ **Store here:**
3308
+ - Project-specific architecture and design patterns
3309
+ - Coding conventions specific to this codebase
3310
+ - Project structure and organization
3311
+ - Testing strategies for this project
3312
+ - Deployment processes and workflows
3313
+ - Team conventions and guidelines
3314
+
3315
+ **Examples:**
3316
+ - "This project uses FastAPI with SQLAlchemy"
3317
+ - "Tests go in tests/ directory mirroring src/ structure"
3318
+ - "All API changes require updating OpenAPI spec"
3319
+
3320
+ ### Project Memory Files: \`{project_deepagents_dir}/*.md\`
3321
+ → Use for **project-specific reference information** and structured notes.
3322
+
3323
+ **Store here:**
3324
+ - API design documentation
3325
+ - Architecture decisions and rationale
3326
+ - Deployment procedures
3327
+ - Common debugging patterns
3328
+ - Onboarding information
3329
+
3330
+ **Examples:**
3331
+ - \`{project_deepagents_dir}/api-design.md\` - REST API patterns used
3332
+ - \`{project_deepagents_dir}/architecture.md\` - System architecture overview
3333
+ - \`{project_deepagents_dir}/deployment.md\` - How to deploy this project
3334
+
3335
+ ### File Operations:
3336
+
3337
+ **User memory:**
3338
+ \`\`\`
3339
+ ls {agent_dir_absolute} # List user memory files
3340
+ read_file '{agent_dir_absolute}/agent.md' # Read user preferences
3341
+ edit_file '{agent_dir_absolute}/agent.md' ... # Update user preferences
3342
+ \`\`\`
3343
+
3344
+ **Project memory (preferred for project-specific information):**
3345
+ \`\`\`
3346
+ ls {project_deepagents_dir} # List project memory files
3347
+ read_file '{project_deepagents_dir}/agent.md' # Read project instructions
3348
+ edit_file '{project_deepagents_dir}/agent.md' ... # Update project instructions
3349
+ write_file '{project_deepagents_dir}/agent.md' ... # Create project memory file
3350
+ \`\`\`
3351
+
3352
+ **Important**:
3353
+ - Project memory files are stored in \`.deepagents/\` inside the project root
3354
+ - Always use absolute paths for file operations
3355
+ - Check project memories BEFORE user when answering project-specific questions`;
3356
+ /**
3357
+ * Create middleware for loading agent-specific long-term memory.
3358
+ *
3359
+ * This middleware loads the agent's long-term memory from a file (agent.md)
3360
+ * and injects it into the system prompt. The memory is loaded once at the
3361
+ * start of the conversation and stored in state.
3362
+ *
3363
+ * @param options - Configuration options
3364
+ * @returns AgentMiddleware for memory loading and injection
3365
+ */
3366
+ function createAgentMemoryMiddleware(options) {
3367
+ const { settings, assistantId, systemPromptTemplate } = options;
3368
+ const agentDir = settings.getAgentDir(assistantId);
3369
+ const agentDirDisplay = `~/.deepagents/${assistantId}`;
3370
+ const agentDirAbsolute = agentDir;
3371
+ const projectRoot = settings.projectRoot;
3372
+ const projectMemoryInfo = projectRoot ? `\`${projectRoot}\` (detected)` : "None (not in a git project)";
3373
+ const projectDeepagentsDir = projectRoot ? `${projectRoot}/.deepagents` : "[project-root]/.deepagents (not in a project)";
3374
+ const template = systemPromptTemplate || DEFAULT_MEMORY_TEMPLATE;
3375
+ return (0, langchain.createMiddleware)({
3376
+ name: "AgentMemoryMiddleware",
3377
+ stateSchema: AgentMemoryStateSchema,
3378
+ beforeAgent(state) {
3379
+ const result = {};
3380
+ if (!("userMemory" in state)) {
3381
+ const userPath = settings.getUserAgentMdPath(assistantId);
3382
+ if (node_fs.default.existsSync(userPath)) try {
3383
+ result.userMemory = node_fs.default.readFileSync(userPath, "utf-8");
3384
+ } catch {}
3385
+ }
3386
+ if (!("projectMemory" in state)) {
3387
+ const projectPath = settings.getProjectAgentMdPath();
3388
+ if (projectPath && node_fs.default.existsSync(projectPath)) try {
3389
+ result.projectMemory = node_fs.default.readFileSync(projectPath, "utf-8");
3390
+ } catch {}
3391
+ }
3392
+ return Object.keys(result).length > 0 ? result : void 0;
3393
+ },
3394
+ wrapModelCall(request, handler) {
3395
+ const userMemory = request.state?.userMemory;
3396
+ const projectMemory = request.state?.projectMemory;
3397
+ const baseSystemPrompt = request.systemPrompt || "";
3398
+ const memorySection = template.replace("{user_memory}", userMemory || "(No user agent.md)").replace("{project_memory}", projectMemory || "(No project agent.md)");
3399
+ const memoryDocs = LONGTERM_MEMORY_SYSTEM_PROMPT.replaceAll("{agent_dir_absolute}", agentDirAbsolute).replaceAll("{agent_dir_display}", agentDirDisplay).replaceAll("{project_memory_info}", projectMemoryInfo).replaceAll("{project_deepagents_dir}", projectDeepagentsDir);
3400
+ let systemPrompt = memorySection;
3401
+ if (baseSystemPrompt) systemPrompt += "\n\n" + baseSystemPrompt;
3402
+ systemPrompt += "\n\n" + memoryDocs;
3403
+ return handler({
3404
+ ...request,
3405
+ systemPrompt
3406
+ });
3407
+ }
3408
+ });
3409
+ }
3410
+
3411
+ //#endregion
3412
+ exports.BaseSandbox = BaseSandbox;
3413
+ exports.CompositeBackend = CompositeBackend;
3414
+ exports.FilesystemBackend = FilesystemBackend;
3415
+ exports.MAX_SKILL_DESCRIPTION_LENGTH = MAX_SKILL_DESCRIPTION_LENGTH;
3416
+ exports.MAX_SKILL_FILE_SIZE = MAX_SKILL_FILE_SIZE;
3417
+ exports.MAX_SKILL_NAME_LENGTH = MAX_SKILL_NAME_LENGTH;
3418
+ exports.StateBackend = StateBackend;
3419
+ exports.StoreBackend = StoreBackend;
3420
+ exports.createAgentMemoryMiddleware = createAgentMemoryMiddleware;
3421
+ exports.createDeepAgent = createDeepAgent;
3422
+ exports.createFilesystemMiddleware = createFilesystemMiddleware;
3423
+ exports.createPatchToolCallsMiddleware = createPatchToolCallsMiddleware;
3424
+ exports.createSettings = createSettings;
3425
+ exports.createSkillsMiddleware = createSkillsMiddleware;
3426
+ exports.createSubAgentMiddleware = createSubAgentMiddleware;
3427
+ exports.findProjectRoot = findProjectRoot;
3428
+ exports.isSandboxBackend = isSandboxBackend;
3429
+ exports.listSkills = listSkills;
3430
+ exports.parseSkillMetadata = parseSkillMetadata;
3431
+ //# sourceMappingURL=index.cjs.map