deepagents 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +3431 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1510 -0
- package/dist/index.d.ts +844 -9
- package/dist/index.js +1483 -37
- package/dist/index.js.map +1 -0
- package/package.json +40 -38
- package/README.md +0 -555
package/dist/index.js
CHANGED
|
@@ -1,17 +1,39 @@
|
|
|
1
1
|
import { AIMessage, ToolMessage, anthropicPromptCachingMiddleware, createAgent, createMiddleware, humanInTheLoopMiddleware, summarizationMiddleware, todoListMiddleware, tool } from "langchain";
|
|
2
2
|
import { Command, REMOVE_ALL_MESSAGES, getCurrentTaskInput, isCommand } from "@langchain/langgraph";
|
|
3
|
-
import { z } from "zod/
|
|
4
|
-
import { withLangGraph } from "@langchain/langgraph/zod";
|
|
3
|
+
import { z } from "zod/v4";
|
|
5
4
|
import micromatch from "micromatch";
|
|
6
|
-
import * as path from "path";
|
|
7
5
|
import { basename } from "path";
|
|
6
|
+
import { z as z$1 } from "zod/v3";
|
|
8
7
|
import { HumanMessage, RemoveMessage } from "@langchain/core/messages";
|
|
9
|
-
import
|
|
10
|
-
import
|
|
11
|
-
import
|
|
8
|
+
import fs from "node:fs/promises";
|
|
9
|
+
import fs$1 from "node:fs";
|
|
10
|
+
import path from "node:path";
|
|
11
|
+
import { spawn } from "node:child_process";
|
|
12
12
|
import fg from "fast-glob";
|
|
13
|
+
import os from "node:os";
|
|
14
|
+
import { z as z$2 } from "zod";
|
|
15
|
+
import yaml from "yaml";
|
|
13
16
|
|
|
17
|
+
//#region src/backends/protocol.ts
|
|
18
|
+
/**
|
|
19
|
+
* Type guard to check if a backend supports execution.
|
|
20
|
+
*
|
|
21
|
+
* @param backend - Backend instance to check
|
|
22
|
+
* @returns True if the backend implements SandboxBackendProtocol
|
|
23
|
+
*/
|
|
24
|
+
function isSandboxBackend(backend) {
|
|
25
|
+
return typeof backend.execute === "function" && typeof backend.id === "string";
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
//#endregion
|
|
14
29
|
//#region src/backends/utils.ts
|
|
30
|
+
/**
|
|
31
|
+
* Shared utility functions for memory backend implementations.
|
|
32
|
+
*
|
|
33
|
+
* This module contains both user-facing string formatters and structured
|
|
34
|
+
* helpers used by backends and the composite router. Structured helpers
|
|
35
|
+
* enable composition without fragile string parsing.
|
|
36
|
+
*/
|
|
15
37
|
const EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents";
|
|
16
38
|
const MAX_LINE_LENGTH = 1e4;
|
|
17
39
|
const LINE_NUMBER_WIDTH = 6;
|
|
@@ -378,11 +400,75 @@ var StateBackend = class {
|
|
|
378
400
|
}
|
|
379
401
|
return infos;
|
|
380
402
|
}
|
|
403
|
+
/**
|
|
404
|
+
* Upload multiple files.
|
|
405
|
+
*
|
|
406
|
+
* Note: Since LangGraph state must be updated via Command objects,
|
|
407
|
+
* the caller must apply filesUpdate via Command after calling this method.
|
|
408
|
+
*
|
|
409
|
+
* @param files - List of [path, content] tuples to upload
|
|
410
|
+
* @returns List of FileUploadResponse objects, one per input file
|
|
411
|
+
*/
|
|
412
|
+
uploadFiles(files) {
|
|
413
|
+
const responses = [];
|
|
414
|
+
const updates = {};
|
|
415
|
+
for (const [path$1, content] of files) try {
|
|
416
|
+
updates[path$1] = createFileData(new TextDecoder().decode(content));
|
|
417
|
+
responses.push({
|
|
418
|
+
path: path$1,
|
|
419
|
+
error: null
|
|
420
|
+
});
|
|
421
|
+
} catch {
|
|
422
|
+
responses.push({
|
|
423
|
+
path: path$1,
|
|
424
|
+
error: "invalid_path"
|
|
425
|
+
});
|
|
426
|
+
}
|
|
427
|
+
const result = responses;
|
|
428
|
+
result.filesUpdate = updates;
|
|
429
|
+
return result;
|
|
430
|
+
}
|
|
431
|
+
/**
|
|
432
|
+
* Download multiple files.
|
|
433
|
+
*
|
|
434
|
+
* @param paths - List of file paths to download
|
|
435
|
+
* @returns List of FileDownloadResponse objects, one per input path
|
|
436
|
+
*/
|
|
437
|
+
downloadFiles(paths) {
|
|
438
|
+
const files = this.getFiles();
|
|
439
|
+
const responses = [];
|
|
440
|
+
for (const path$1 of paths) {
|
|
441
|
+
const fileData = files[path$1];
|
|
442
|
+
if (!fileData) {
|
|
443
|
+
responses.push({
|
|
444
|
+
path: path$1,
|
|
445
|
+
content: null,
|
|
446
|
+
error: "file_not_found"
|
|
447
|
+
});
|
|
448
|
+
continue;
|
|
449
|
+
}
|
|
450
|
+
const contentStr = fileDataToString(fileData);
|
|
451
|
+
const content = new TextEncoder().encode(contentStr);
|
|
452
|
+
responses.push({
|
|
453
|
+
path: path$1,
|
|
454
|
+
content,
|
|
455
|
+
error: null
|
|
456
|
+
});
|
|
457
|
+
}
|
|
458
|
+
return responses;
|
|
459
|
+
}
|
|
381
460
|
};
|
|
382
461
|
|
|
383
462
|
//#endregion
|
|
384
463
|
//#region src/middleware/fs.ts
|
|
385
464
|
/**
|
|
465
|
+
* Middleware for providing filesystem tools to an agent.
|
|
466
|
+
*
|
|
467
|
+
* Provides ls, read_file, write_file, edit_file, glob, and grep tools with support for:
|
|
468
|
+
* - Pluggable backends (StateBackend, StoreBackend, FilesystemBackend, CompositeBackend)
|
|
469
|
+
* - Tool result eviction for large outputs
|
|
470
|
+
*/
|
|
471
|
+
/**
|
|
386
472
|
* Zod v3 schema for FileData (re-export from backends)
|
|
387
473
|
*/
|
|
388
474
|
const FileDataSchema = z.object({
|
|
@@ -405,6 +491,16 @@ function fileDataReducer(left, right) {
|
|
|
405
491
|
return result;
|
|
406
492
|
}
|
|
407
493
|
/**
|
|
494
|
+
* Shared filesystem state schema.
|
|
495
|
+
* Defined at module level to ensure the same object identity is used across all agents,
|
|
496
|
+
* preventing "Channel already exists with different type" errors when multiple agents
|
|
497
|
+
* use createFilesystemMiddleware.
|
|
498
|
+
*/
|
|
499
|
+
const FilesystemStateSchema = z.object({ files: z.record(z.string(), FileDataSchema).default({}).meta({ reducer: {
|
|
500
|
+
fn: fileDataReducer,
|
|
501
|
+
schema: z.record(z.string(), FileDataSchema.nullable())
|
|
502
|
+
} }) });
|
|
503
|
+
/**
|
|
408
504
|
* Resolve backend from factory or instance.
|
|
409
505
|
*
|
|
410
506
|
* @param backend - Backend instance or factory function
|
|
@@ -428,6 +524,31 @@ const WRITE_FILE_TOOL_DESCRIPTION = "Write content to a new file. Returns an err
|
|
|
428
524
|
const EDIT_FILE_TOOL_DESCRIPTION = "Edit a file by replacing a specific string with a new string";
|
|
429
525
|
const GLOB_TOOL_DESCRIPTION = "Find files matching a glob pattern (e.g., '**/*.py' for all Python files)";
|
|
430
526
|
const GREP_TOOL_DESCRIPTION = "Search for a regex pattern in files. Returns matching files and line numbers";
|
|
527
|
+
const EXECUTE_TOOL_DESCRIPTION = `Executes a given command in the sandbox environment with proper handling and security measures.
|
|
528
|
+
|
|
529
|
+
Before executing the command, please follow these steps:
|
|
530
|
+
|
|
531
|
+
1. Directory Verification:
|
|
532
|
+
- If the command will create new directories or files, first use the ls tool to verify the parent directory exists
|
|
533
|
+
|
|
534
|
+
2. Command Execution:
|
|
535
|
+
- Always quote file paths that contain spaces with double quotes
|
|
536
|
+
- Commands run in an isolated sandbox environment
|
|
537
|
+
- Returns combined stdout/stderr output with exit code
|
|
538
|
+
|
|
539
|
+
Usage notes:
|
|
540
|
+
- The command parameter is required
|
|
541
|
+
- If the output is very large, it may be truncated
|
|
542
|
+
- IMPORTANT: Avoid using search commands like find and grep. Use the grep, glob tools instead.
|
|
543
|
+
- Avoid read tools like cat, head, tail - use read_file instead.
|
|
544
|
+
- Use '&&' to chain dependent commands, ';' for independent commands
|
|
545
|
+
- Try to use absolute paths to avoid cd`;
|
|
546
|
+
const EXECUTION_SYSTEM_PROMPT = `## Execute Tool \`execute\`
|
|
547
|
+
|
|
548
|
+
You have access to an \`execute\` tool for running shell commands in a sandboxed environment.
|
|
549
|
+
Use this tool to run commands, scripts, tests, builds, and other shell operations.
|
|
550
|
+
|
|
551
|
+
- execute: run a shell command in the sandbox (returns output and exit code)`;
|
|
431
552
|
/**
|
|
432
553
|
* Create ls tool using backend.
|
|
433
554
|
*/
|
|
@@ -471,8 +592,8 @@ function createReadFileTool(backend, options) {
|
|
|
471
592
|
description: customDescription || READ_FILE_TOOL_DESCRIPTION,
|
|
472
593
|
schema: z.object({
|
|
473
594
|
file_path: z.string().describe("Absolute path to the file to read"),
|
|
474
|
-
offset: z.number(
|
|
475
|
-
limit: z.number(
|
|
595
|
+
offset: z.coerce.number().optional().default(0).describe("Line offset to start reading from (0-indexed)"),
|
|
596
|
+
limit: z.coerce.number().optional().default(2e3).describe("Maximum number of lines to read")
|
|
476
597
|
})
|
|
477
598
|
});
|
|
478
599
|
}
|
|
@@ -602,35 +723,66 @@ function createGrepTool(backend, options) {
|
|
|
602
723
|
});
|
|
603
724
|
}
|
|
604
725
|
/**
|
|
726
|
+
* Create execute tool using backend.
|
|
727
|
+
*/
|
|
728
|
+
function createExecuteTool(backend, options) {
|
|
729
|
+
const { customDescription } = options;
|
|
730
|
+
return tool(async (input, config) => {
|
|
731
|
+
const resolvedBackend = getBackend(backend, {
|
|
732
|
+
state: getCurrentTaskInput(config),
|
|
733
|
+
store: config.store
|
|
734
|
+
});
|
|
735
|
+
if (!isSandboxBackend(resolvedBackend)) return "Error: Execution not available. This agent's backend does not support command execution (SandboxBackendProtocol). To use the execute tool, provide a backend that implements SandboxBackendProtocol.";
|
|
736
|
+
const result = await resolvedBackend.execute(input.command);
|
|
737
|
+
const parts = [result.output];
|
|
738
|
+
if (result.exitCode !== null) {
|
|
739
|
+
const status = result.exitCode === 0 ? "succeeded" : "failed";
|
|
740
|
+
parts.push(`\n[Command ${status} with exit code ${result.exitCode}]`);
|
|
741
|
+
}
|
|
742
|
+
if (result.truncated) parts.push("\n[Output was truncated due to size limits]");
|
|
743
|
+
return parts.join("");
|
|
744
|
+
}, {
|
|
745
|
+
name: "execute",
|
|
746
|
+
description: customDescription || EXECUTE_TOOL_DESCRIPTION,
|
|
747
|
+
schema: z.object({ command: z.string().describe("The shell command to execute") })
|
|
748
|
+
});
|
|
749
|
+
}
|
|
750
|
+
/**
|
|
605
751
|
* Create filesystem middleware with all tools and features.
|
|
606
752
|
*/
|
|
607
753
|
function createFilesystemMiddleware(options = {}) {
|
|
608
754
|
const { backend = (stateAndStore) => new StateBackend(stateAndStore), systemPrompt: customSystemPrompt = null, customToolDescriptions = null, toolTokenLimitBeforeEvict = 2e4 } = options;
|
|
609
|
-
const
|
|
610
|
-
const tools = [
|
|
611
|
-
createLsTool(backend, { customDescription: customToolDescriptions?.ls }),
|
|
612
|
-
createReadFileTool(backend, { customDescription: customToolDescriptions?.read_file }),
|
|
613
|
-
createWriteFileTool(backend, { customDescription: customToolDescriptions?.write_file }),
|
|
614
|
-
createEditFileTool(backend, { customDescription: customToolDescriptions?.edit_file }),
|
|
615
|
-
createGlobTool(backend, { customDescription: customToolDescriptions?.glob }),
|
|
616
|
-
createGrepTool(backend, { customDescription: customToolDescriptions?.grep })
|
|
617
|
-
];
|
|
755
|
+
const baseSystemPrompt = customSystemPrompt || FILESYSTEM_SYSTEM_PROMPT;
|
|
618
756
|
return createMiddleware({
|
|
619
757
|
name: "FilesystemMiddleware",
|
|
620
|
-
stateSchema:
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
758
|
+
stateSchema: FilesystemStateSchema,
|
|
759
|
+
tools: [
|
|
760
|
+
createLsTool(backend, { customDescription: customToolDescriptions?.ls }),
|
|
761
|
+
createReadFileTool(backend, { customDescription: customToolDescriptions?.read_file }),
|
|
762
|
+
createWriteFileTool(backend, { customDescription: customToolDescriptions?.write_file }),
|
|
763
|
+
createEditFileTool(backend, { customDescription: customToolDescriptions?.edit_file }),
|
|
764
|
+
createGlobTool(backend, { customDescription: customToolDescriptions?.glob }),
|
|
765
|
+
createGrepTool(backend, { customDescription: customToolDescriptions?.grep }),
|
|
766
|
+
createExecuteTool(backend, { customDescription: customToolDescriptions?.execute })
|
|
767
|
+
],
|
|
768
|
+
wrapModelCall: async (request, handler) => {
|
|
769
|
+
const supportsExecution = isSandboxBackend(getBackend(backend, {
|
|
770
|
+
state: request.state || {},
|
|
771
|
+
store: request.config?.store
|
|
772
|
+
}));
|
|
773
|
+
let tools = request.tools;
|
|
774
|
+
if (!supportsExecution) tools = tools.filter((t) => t.name !== "execute");
|
|
775
|
+
let systemPrompt = baseSystemPrompt;
|
|
776
|
+
if (supportsExecution) systemPrompt = `${systemPrompt}\n\n${EXECUTION_SYSTEM_PROMPT}`;
|
|
626
777
|
const currentSystemPrompt = request.systemPrompt || "";
|
|
627
778
|
const newSystemPrompt = currentSystemPrompt ? `${currentSystemPrompt}\n\n${systemPrompt}` : systemPrompt;
|
|
628
779
|
return handler({
|
|
629
780
|
...request,
|
|
781
|
+
tools,
|
|
630
782
|
systemPrompt: newSystemPrompt
|
|
631
783
|
});
|
|
632
|
-
}
|
|
633
|
-
wrapToolCall: toolTokenLimitBeforeEvict ?
|
|
784
|
+
},
|
|
785
|
+
wrapToolCall: toolTokenLimitBeforeEvict ? async (request, handler) => {
|
|
634
786
|
const result = await handler(request);
|
|
635
787
|
async function processToolMessage(msg) {
|
|
636
788
|
if (typeof msg.content === "string" && msg.content.length > toolTokenLimitBeforeEvict * 4) {
|
|
@@ -687,7 +839,7 @@ function createFilesystemMiddleware(options = {}) {
|
|
|
687
839
|
} });
|
|
688
840
|
}
|
|
689
841
|
return result;
|
|
690
|
-
}
|
|
842
|
+
} : void 0
|
|
691
843
|
});
|
|
692
844
|
}
|
|
693
845
|
|
|
@@ -697,7 +849,8 @@ const DEFAULT_SUBAGENT_PROMPT = "In order to complete the objective that the use
|
|
|
697
849
|
const EXCLUDED_STATE_KEYS = [
|
|
698
850
|
"messages",
|
|
699
851
|
"todos",
|
|
700
|
-
"jumpTo"
|
|
852
|
+
"jumpTo",
|
|
853
|
+
"files"
|
|
701
854
|
];
|
|
702
855
|
const DEFAULT_GENERAL_PURPOSE_DESCRIPTION = "General-purpose agent for researching complex questions, searching for files and content, and executing multi-step tasks. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries use this agent to perform the search for you. This agent has access to all tools as the main agent.";
|
|
703
856
|
function getTaskToolDescription(subagentDescriptions) {
|
|
@@ -931,9 +1084,9 @@ function createTaskTool(options) {
|
|
|
931
1084
|
}, {
|
|
932
1085
|
name: "task",
|
|
933
1086
|
description: taskDescription ? taskDescription : getTaskToolDescription(subagentDescriptions),
|
|
934
|
-
schema: z.object({
|
|
935
|
-
description: z.string().describe("The task to execute with the selected agent"),
|
|
936
|
-
subagent_type: z.string().describe(`Name of the agent to use. Available: ${Object.keys(subagentGraphs).join(", ")}`)
|
|
1087
|
+
schema: z$1.object({
|
|
1088
|
+
description: z$1.string().describe("The task to execute with the selected agent"),
|
|
1089
|
+
subagent_type: z$1.string().describe(`Name of the agent to use. Available: ${Object.keys(subagentGraphs).join(", ")}`)
|
|
937
1090
|
})
|
|
938
1091
|
});
|
|
939
1092
|
}
|
|
@@ -1268,11 +1421,82 @@ var StoreBackend = class {
|
|
|
1268
1421
|
}
|
|
1269
1422
|
return infos;
|
|
1270
1423
|
}
|
|
1424
|
+
/**
|
|
1425
|
+
* Upload multiple files.
|
|
1426
|
+
*
|
|
1427
|
+
* @param files - List of [path, content] tuples to upload
|
|
1428
|
+
* @returns List of FileUploadResponse objects, one per input file
|
|
1429
|
+
*/
|
|
1430
|
+
async uploadFiles(files) {
|
|
1431
|
+
const store = this.getStore();
|
|
1432
|
+
const namespace = this.getNamespace();
|
|
1433
|
+
const responses = [];
|
|
1434
|
+
for (const [path$1, content] of files) try {
|
|
1435
|
+
const fileData = createFileData(new TextDecoder().decode(content));
|
|
1436
|
+
const storeValue = this.convertFileDataToStoreValue(fileData);
|
|
1437
|
+
await store.put(namespace, path$1, storeValue);
|
|
1438
|
+
responses.push({
|
|
1439
|
+
path: path$1,
|
|
1440
|
+
error: null
|
|
1441
|
+
});
|
|
1442
|
+
} catch {
|
|
1443
|
+
responses.push({
|
|
1444
|
+
path: path$1,
|
|
1445
|
+
error: "invalid_path"
|
|
1446
|
+
});
|
|
1447
|
+
}
|
|
1448
|
+
return responses;
|
|
1449
|
+
}
|
|
1450
|
+
/**
|
|
1451
|
+
* Download multiple files.
|
|
1452
|
+
*
|
|
1453
|
+
* @param paths - List of file paths to download
|
|
1454
|
+
* @returns List of FileDownloadResponse objects, one per input path
|
|
1455
|
+
*/
|
|
1456
|
+
async downloadFiles(paths) {
|
|
1457
|
+
const store = this.getStore();
|
|
1458
|
+
const namespace = this.getNamespace();
|
|
1459
|
+
const responses = [];
|
|
1460
|
+
for (const path$1 of paths) try {
|
|
1461
|
+
const item = await store.get(namespace, path$1);
|
|
1462
|
+
if (!item) {
|
|
1463
|
+
responses.push({
|
|
1464
|
+
path: path$1,
|
|
1465
|
+
content: null,
|
|
1466
|
+
error: "file_not_found"
|
|
1467
|
+
});
|
|
1468
|
+
continue;
|
|
1469
|
+
}
|
|
1470
|
+
const contentStr = fileDataToString(this.convertStoreItemToFileData(item));
|
|
1471
|
+
const content = new TextEncoder().encode(contentStr);
|
|
1472
|
+
responses.push({
|
|
1473
|
+
path: path$1,
|
|
1474
|
+
content,
|
|
1475
|
+
error: null
|
|
1476
|
+
});
|
|
1477
|
+
} catch {
|
|
1478
|
+
responses.push({
|
|
1479
|
+
path: path$1,
|
|
1480
|
+
content: null,
|
|
1481
|
+
error: "file_not_found"
|
|
1482
|
+
});
|
|
1483
|
+
}
|
|
1484
|
+
return responses;
|
|
1485
|
+
}
|
|
1271
1486
|
};
|
|
1272
1487
|
|
|
1273
1488
|
//#endregion
|
|
1274
1489
|
//#region src/backends/filesystem.ts
|
|
1275
|
-
|
|
1490
|
+
/**
|
|
1491
|
+
* FilesystemBackend: Read and write files directly from the filesystem.
|
|
1492
|
+
*
|
|
1493
|
+
* Security and search upgrades:
|
|
1494
|
+
* - Secure path resolution with root containment when in virtual_mode (sandboxed to cwd)
|
|
1495
|
+
* - Prevent symlink-following on file I/O using O_NOFOLLOW when available
|
|
1496
|
+
* - Ripgrep-powered grep with JSON parsing, plus regex fallback
|
|
1497
|
+
* and optional glob include filtering, while preserving virtual path behavior
|
|
1498
|
+
*/
|
|
1499
|
+
const SUPPORTS_NOFOLLOW = fs$1.constants.O_NOFOLLOW !== void 0;
|
|
1276
1500
|
/**
|
|
1277
1501
|
* Backend that reads and writes files directly from the filesystem.
|
|
1278
1502
|
*
|
|
@@ -1391,7 +1615,7 @@ var FilesystemBackend = class {
|
|
|
1391
1615
|
let content;
|
|
1392
1616
|
if (SUPPORTS_NOFOLLOW) {
|
|
1393
1617
|
if (!(await fs.stat(resolvedPath)).isFile()) return `Error: File '${filePath}' not found`;
|
|
1394
|
-
const fd = await fs.open(resolvedPath,
|
|
1618
|
+
const fd = await fs.open(resolvedPath, fs$1.constants.O_RDONLY | fs$1.constants.O_NOFOLLOW);
|
|
1395
1619
|
try {
|
|
1396
1620
|
content = await fd.readFile({ encoding: "utf-8" });
|
|
1397
1621
|
} finally {
|
|
@@ -1427,7 +1651,7 @@ var FilesystemBackend = class {
|
|
|
1427
1651
|
if (SUPPORTS_NOFOLLOW) {
|
|
1428
1652
|
stat = await fs.stat(resolvedPath);
|
|
1429
1653
|
if (!stat.isFile()) throw new Error(`File '${filePath}' not found`);
|
|
1430
|
-
const fd = await fs.open(resolvedPath,
|
|
1654
|
+
const fd = await fs.open(resolvedPath, fs$1.constants.O_RDONLY | fs$1.constants.O_NOFOLLOW);
|
|
1431
1655
|
try {
|
|
1432
1656
|
content = await fd.readFile({ encoding: "utf-8" });
|
|
1433
1657
|
} finally {
|
|
@@ -1458,7 +1682,7 @@ var FilesystemBackend = class {
|
|
|
1458
1682
|
} catch {}
|
|
1459
1683
|
await fs.mkdir(path.dirname(resolvedPath), { recursive: true });
|
|
1460
1684
|
if (SUPPORTS_NOFOLLOW) {
|
|
1461
|
-
const flags =
|
|
1685
|
+
const flags = fs$1.constants.O_WRONLY | fs$1.constants.O_CREAT | fs$1.constants.O_TRUNC | fs$1.constants.O_NOFOLLOW;
|
|
1462
1686
|
const fd = await fs.open(resolvedPath, flags, 420);
|
|
1463
1687
|
try {
|
|
1464
1688
|
await fd.writeFile(content, "utf-8");
|
|
@@ -1484,7 +1708,7 @@ var FilesystemBackend = class {
|
|
|
1484
1708
|
let content;
|
|
1485
1709
|
if (SUPPORTS_NOFOLLOW) {
|
|
1486
1710
|
if (!(await fs.stat(resolvedPath)).isFile()) return { error: `Error: File '${filePath}' not found` };
|
|
1487
|
-
const fd = await fs.open(resolvedPath,
|
|
1711
|
+
const fd = await fs.open(resolvedPath, fs$1.constants.O_RDONLY | fs$1.constants.O_NOFOLLOW);
|
|
1488
1712
|
try {
|
|
1489
1713
|
content = await fd.readFile({ encoding: "utf-8" });
|
|
1490
1714
|
} finally {
|
|
@@ -1500,7 +1724,7 @@ var FilesystemBackend = class {
|
|
|
1500
1724
|
if (typeof result === "string") return { error: result };
|
|
1501
1725
|
const [newContent, occurrences] = result;
|
|
1502
1726
|
if (SUPPORTS_NOFOLLOW) {
|
|
1503
|
-
const flags =
|
|
1727
|
+
const flags = fs$1.constants.O_WRONLY | fs$1.constants.O_TRUNC | fs$1.constants.O_NOFOLLOW;
|
|
1504
1728
|
const fd = await fs.open(resolvedPath, flags);
|
|
1505
1729
|
try {
|
|
1506
1730
|
await fd.writeFile(newContent, "utf-8");
|
|
@@ -1694,6 +1918,82 @@ var FilesystemBackend = class {
|
|
|
1694
1918
|
results.sort((a, b) => a.path.localeCompare(b.path));
|
|
1695
1919
|
return results;
|
|
1696
1920
|
}
|
|
1921
|
+
/**
|
|
1922
|
+
* Upload multiple files to the filesystem.
|
|
1923
|
+
*
|
|
1924
|
+
* @param files - List of [path, content] tuples to upload
|
|
1925
|
+
* @returns List of FileUploadResponse objects, one per input file
|
|
1926
|
+
*/
|
|
1927
|
+
async uploadFiles(files) {
|
|
1928
|
+
const responses = [];
|
|
1929
|
+
for (const [filePath, content] of files) try {
|
|
1930
|
+
const resolvedPath = this.resolvePath(filePath);
|
|
1931
|
+
await fs.mkdir(path.dirname(resolvedPath), { recursive: true });
|
|
1932
|
+
await fs.writeFile(resolvedPath, content);
|
|
1933
|
+
responses.push({
|
|
1934
|
+
path: filePath,
|
|
1935
|
+
error: null
|
|
1936
|
+
});
|
|
1937
|
+
} catch (e) {
|
|
1938
|
+
if (e.code === "ENOENT") responses.push({
|
|
1939
|
+
path: filePath,
|
|
1940
|
+
error: "file_not_found"
|
|
1941
|
+
});
|
|
1942
|
+
else if (e.code === "EACCES") responses.push({
|
|
1943
|
+
path: filePath,
|
|
1944
|
+
error: "permission_denied"
|
|
1945
|
+
});
|
|
1946
|
+
else if (e.code === "EISDIR") responses.push({
|
|
1947
|
+
path: filePath,
|
|
1948
|
+
error: "is_directory"
|
|
1949
|
+
});
|
|
1950
|
+
else responses.push({
|
|
1951
|
+
path: filePath,
|
|
1952
|
+
error: "invalid_path"
|
|
1953
|
+
});
|
|
1954
|
+
}
|
|
1955
|
+
return responses;
|
|
1956
|
+
}
|
|
1957
|
+
/**
|
|
1958
|
+
* Download multiple files from the filesystem.
|
|
1959
|
+
*
|
|
1960
|
+
* @param paths - List of file paths to download
|
|
1961
|
+
* @returns List of FileDownloadResponse objects, one per input path
|
|
1962
|
+
*/
|
|
1963
|
+
async downloadFiles(paths) {
|
|
1964
|
+
const responses = [];
|
|
1965
|
+
for (const filePath of paths) try {
|
|
1966
|
+
const resolvedPath = this.resolvePath(filePath);
|
|
1967
|
+
const content = await fs.readFile(resolvedPath);
|
|
1968
|
+
responses.push({
|
|
1969
|
+
path: filePath,
|
|
1970
|
+
content,
|
|
1971
|
+
error: null
|
|
1972
|
+
});
|
|
1973
|
+
} catch (e) {
|
|
1974
|
+
if (e.code === "ENOENT") responses.push({
|
|
1975
|
+
path: filePath,
|
|
1976
|
+
content: null,
|
|
1977
|
+
error: "file_not_found"
|
|
1978
|
+
});
|
|
1979
|
+
else if (e.code === "EACCES") responses.push({
|
|
1980
|
+
path: filePath,
|
|
1981
|
+
content: null,
|
|
1982
|
+
error: "permission_denied"
|
|
1983
|
+
});
|
|
1984
|
+
else if (e.code === "EISDIR") responses.push({
|
|
1985
|
+
path: filePath,
|
|
1986
|
+
content: null,
|
|
1987
|
+
error: "is_directory"
|
|
1988
|
+
});
|
|
1989
|
+
else responses.push({
|
|
1990
|
+
path: filePath,
|
|
1991
|
+
content: null,
|
|
1992
|
+
error: "invalid_path"
|
|
1993
|
+
});
|
|
1994
|
+
}
|
|
1995
|
+
return responses;
|
|
1996
|
+
}
|
|
1697
1997
|
};
|
|
1698
1998
|
|
|
1699
1999
|
//#endregion
|
|
@@ -1861,6 +2161,465 @@ var CompositeBackend = class {
|
|
|
1861
2161
|
const [backend, strippedKey] = this.getBackendAndKey(filePath);
|
|
1862
2162
|
return await backend.edit(strippedKey, oldString, newString, replaceAll);
|
|
1863
2163
|
}
|
|
2164
|
+
/**
|
|
2165
|
+
* Execute a command via the default backend.
|
|
2166
|
+
* Execution is not path-specific, so it always delegates to the default backend.
|
|
2167
|
+
*
|
|
2168
|
+
* @param command - Full shell command string to execute
|
|
2169
|
+
* @returns ExecuteResponse with combined output, exit code, and truncation flag
|
|
2170
|
+
* @throws Error if the default backend doesn't support command execution
|
|
2171
|
+
*/
|
|
2172
|
+
execute(command) {
|
|
2173
|
+
if (!isSandboxBackend(this.default)) throw new Error("Default backend doesn't support command execution (SandboxBackendProtocol). To enable execution, provide a default backend that implements SandboxBackendProtocol.");
|
|
2174
|
+
return Promise.resolve(this.default.execute(command));
|
|
2175
|
+
}
|
|
2176
|
+
/**
|
|
2177
|
+
* Upload multiple files, batching by backend for efficiency.
|
|
2178
|
+
*
|
|
2179
|
+
* @param files - List of [path, content] tuples to upload
|
|
2180
|
+
* @returns List of FileUploadResponse objects, one per input file
|
|
2181
|
+
*/
|
|
2182
|
+
async uploadFiles(files) {
|
|
2183
|
+
const results = new Array(files.length).fill(null);
|
|
2184
|
+
const batchesByBackend = /* @__PURE__ */ new Map();
|
|
2185
|
+
for (let idx = 0; idx < files.length; idx++) {
|
|
2186
|
+
const [path$1, content] = files[idx];
|
|
2187
|
+
const [backend, strippedPath] = this.getBackendAndKey(path$1);
|
|
2188
|
+
if (!batchesByBackend.has(backend)) batchesByBackend.set(backend, []);
|
|
2189
|
+
batchesByBackend.get(backend).push({
|
|
2190
|
+
idx,
|
|
2191
|
+
path: strippedPath,
|
|
2192
|
+
content
|
|
2193
|
+
});
|
|
2194
|
+
}
|
|
2195
|
+
for (const [backend, batch] of batchesByBackend) {
|
|
2196
|
+
const batchFiles = batch.map((b) => [b.path, b.content]);
|
|
2197
|
+
const batchResponses = await backend.uploadFiles(batchFiles);
|
|
2198
|
+
for (let i = 0; i < batch.length; i++) {
|
|
2199
|
+
const originalIdx = batch[i].idx;
|
|
2200
|
+
results[originalIdx] = {
|
|
2201
|
+
path: files[originalIdx][0],
|
|
2202
|
+
error: batchResponses[i]?.error ?? null
|
|
2203
|
+
};
|
|
2204
|
+
}
|
|
2205
|
+
}
|
|
2206
|
+
return results;
|
|
2207
|
+
}
|
|
2208
|
+
/**
|
|
2209
|
+
* Download multiple files, batching by backend for efficiency.
|
|
2210
|
+
*
|
|
2211
|
+
* @param paths - List of file paths to download
|
|
2212
|
+
* @returns List of FileDownloadResponse objects, one per input path
|
|
2213
|
+
*/
|
|
2214
|
+
async downloadFiles(paths) {
|
|
2215
|
+
const results = new Array(paths.length).fill(null);
|
|
2216
|
+
const batchesByBackend = /* @__PURE__ */ new Map();
|
|
2217
|
+
for (let idx = 0; idx < paths.length; idx++) {
|
|
2218
|
+
const path$1 = paths[idx];
|
|
2219
|
+
const [backend, strippedPath] = this.getBackendAndKey(path$1);
|
|
2220
|
+
if (!batchesByBackend.has(backend)) batchesByBackend.set(backend, []);
|
|
2221
|
+
batchesByBackend.get(backend).push({
|
|
2222
|
+
idx,
|
|
2223
|
+
path: strippedPath
|
|
2224
|
+
});
|
|
2225
|
+
}
|
|
2226
|
+
for (const [backend, batch] of batchesByBackend) {
|
|
2227
|
+
const batchPaths = batch.map((b) => b.path);
|
|
2228
|
+
const batchResponses = await backend.downloadFiles(batchPaths);
|
|
2229
|
+
for (let i = 0; i < batch.length; i++) {
|
|
2230
|
+
const originalIdx = batch[i].idx;
|
|
2231
|
+
results[originalIdx] = {
|
|
2232
|
+
path: paths[originalIdx],
|
|
2233
|
+
content: batchResponses[i]?.content ?? null,
|
|
2234
|
+
error: batchResponses[i]?.error ?? null
|
|
2235
|
+
};
|
|
2236
|
+
}
|
|
2237
|
+
}
|
|
2238
|
+
return results;
|
|
2239
|
+
}
|
|
2240
|
+
};
|
|
2241
|
+
|
|
2242
|
+
//#endregion
|
|
2243
|
+
//#region src/backends/sandbox.ts
|
|
2244
|
+
/**
|
|
2245
|
+
* Node.js command template for glob operations.
|
|
2246
|
+
* Uses web-standard atob() for base64 decoding.
|
|
2247
|
+
*/
|
|
2248
|
+
function buildGlobCommand(searchPath, pattern) {
|
|
2249
|
+
return `node -e "
|
|
2250
|
+
const fs = require('fs');
|
|
2251
|
+
const path = require('path');
|
|
2252
|
+
|
|
2253
|
+
const searchPath = atob('${btoa(searchPath)}');
|
|
2254
|
+
const pattern = atob('${btoa(pattern)}');
|
|
2255
|
+
|
|
2256
|
+
function globMatch(relativePath, pattern) {
|
|
2257
|
+
const regexPattern = pattern
|
|
2258
|
+
.replace(/\\*\\*/g, '<<<GLOBSTAR>>>')
|
|
2259
|
+
.replace(/\\*/g, '[^/]*')
|
|
2260
|
+
.replace(/\\?/g, '.')
|
|
2261
|
+
.replace(/<<<GLOBSTAR>>>/g, '.*');
|
|
2262
|
+
return new RegExp('^' + regexPattern + '$').test(relativePath);
|
|
2263
|
+
}
|
|
2264
|
+
|
|
2265
|
+
function walkDir(dir, baseDir, results) {
|
|
2266
|
+
try {
|
|
2267
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
2268
|
+
for (const entry of entries) {
|
|
2269
|
+
const fullPath = path.join(dir, entry.name);
|
|
2270
|
+
const relativePath = path.relative(baseDir, fullPath);
|
|
2271
|
+
if (entry.isDirectory()) {
|
|
2272
|
+
walkDir(fullPath, baseDir, results);
|
|
2273
|
+
} else if (globMatch(relativePath, pattern)) {
|
|
2274
|
+
const stat = fs.statSync(fullPath);
|
|
2275
|
+
console.log(JSON.stringify({
|
|
2276
|
+
path: relativePath,
|
|
2277
|
+
size: stat.size,
|
|
2278
|
+
mtime: stat.mtimeMs,
|
|
2279
|
+
isDir: false
|
|
2280
|
+
}));
|
|
2281
|
+
}
|
|
2282
|
+
}
|
|
2283
|
+
} catch (e) {
|
|
2284
|
+
// Silent failure for non-existent paths
|
|
2285
|
+
}
|
|
2286
|
+
}
|
|
2287
|
+
|
|
2288
|
+
try {
|
|
2289
|
+
process.chdir(searchPath);
|
|
2290
|
+
walkDir('.', '.', []);
|
|
2291
|
+
} catch (e) {
|
|
2292
|
+
// Silent failure for non-existent paths
|
|
2293
|
+
}
|
|
2294
|
+
"`;
|
|
2295
|
+
}
|
|
2296
|
+
/**
|
|
2297
|
+
* Node.js command template for listing directory contents.
|
|
2298
|
+
*/
|
|
2299
|
+
function buildLsCommand(dirPath) {
|
|
2300
|
+
return `node -e "
|
|
2301
|
+
const fs = require('fs');
|
|
2302
|
+
const path = require('path');
|
|
2303
|
+
|
|
2304
|
+
const dirPath = atob('${btoa(dirPath)}');
|
|
2305
|
+
|
|
2306
|
+
try {
|
|
2307
|
+
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
|
|
2308
|
+
for (const entry of entries) {
|
|
2309
|
+
const fullPath = path.join(dirPath, entry.name);
|
|
2310
|
+
const stat = fs.statSync(fullPath);
|
|
2311
|
+
console.log(JSON.stringify({
|
|
2312
|
+
path: entry.isDirectory() ? fullPath + '/' : fullPath,
|
|
2313
|
+
size: stat.size,
|
|
2314
|
+
mtime: stat.mtimeMs,
|
|
2315
|
+
isDir: entry.isDirectory()
|
|
2316
|
+
}));
|
|
2317
|
+
}
|
|
2318
|
+
} catch (e) {
|
|
2319
|
+
console.error('Error: ' + e.message);
|
|
2320
|
+
process.exit(1);
|
|
2321
|
+
}
|
|
2322
|
+
"`;
|
|
2323
|
+
}
|
|
2324
|
+
/**
|
|
2325
|
+
* Node.js command template for reading files.
|
|
2326
|
+
*/
|
|
2327
|
+
function buildReadCommand(filePath, offset, limit) {
|
|
2328
|
+
return `node -e "
|
|
2329
|
+
const fs = require('fs');
|
|
2330
|
+
|
|
2331
|
+
const filePath = atob('${btoa(filePath)}');
|
|
2332
|
+
const offset = ${Number.isFinite(offset) && offset > 0 ? Math.floor(offset) : 0};
|
|
2333
|
+
const limit = ${Number.isFinite(limit) && limit > 0 && limit < Number.MAX_SAFE_INTEGER ? Math.floor(limit) : 0};
|
|
2334
|
+
|
|
2335
|
+
if (!fs.existsSync(filePath)) {
|
|
2336
|
+
console.log('Error: File not found');
|
|
2337
|
+
process.exit(1);
|
|
2338
|
+
}
|
|
2339
|
+
|
|
2340
|
+
const stat = fs.statSync(filePath);
|
|
2341
|
+
if (stat.size === 0) {
|
|
2342
|
+
console.log('System reminder: File exists but has empty contents');
|
|
2343
|
+
process.exit(0);
|
|
2344
|
+
}
|
|
2345
|
+
|
|
2346
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
2347
|
+
const lines = content.split('\\n');
|
|
2348
|
+
const selected = lines.slice(offset, offset + limit);
|
|
2349
|
+
|
|
2350
|
+
for (let i = 0; i < selected.length; i++) {
|
|
2351
|
+
const lineNum = offset + i + 1;
|
|
2352
|
+
console.log(String(lineNum).padStart(6) + '\\t' + selected[i]);
|
|
2353
|
+
}
|
|
2354
|
+
"`;
|
|
2355
|
+
}
|
|
2356
|
+
/**
|
|
2357
|
+
* Node.js command template for writing files.
|
|
2358
|
+
*/
|
|
2359
|
+
function buildWriteCommand(filePath, content) {
|
|
2360
|
+
return `node -e "
|
|
2361
|
+
const fs = require('fs');
|
|
2362
|
+
const path = require('path');
|
|
2363
|
+
|
|
2364
|
+
const filePath = atob('${btoa(filePath)}');
|
|
2365
|
+
const content = atob('${btoa(content)}');
|
|
2366
|
+
|
|
2367
|
+
if (fs.existsSync(filePath)) {
|
|
2368
|
+
console.error('Error: File already exists');
|
|
2369
|
+
process.exit(1);
|
|
2370
|
+
}
|
|
2371
|
+
|
|
2372
|
+
const parentDir = path.dirname(filePath) || '.';
|
|
2373
|
+
fs.mkdirSync(parentDir, { recursive: true });
|
|
2374
|
+
|
|
2375
|
+
fs.writeFileSync(filePath, content, 'utf-8');
|
|
2376
|
+
console.log('OK');
|
|
2377
|
+
"`;
|
|
2378
|
+
}
|
|
2379
|
+
/**
|
|
2380
|
+
* Node.js command template for editing files.
|
|
2381
|
+
*/
|
|
2382
|
+
function buildEditCommand(filePath, oldStr, newStr, replaceAll) {
|
|
2383
|
+
return `node -e "
|
|
2384
|
+
const fs = require('fs');
|
|
2385
|
+
|
|
2386
|
+
const filePath = atob('${btoa(filePath)}');
|
|
2387
|
+
const oldStr = atob('${btoa(oldStr)}');
|
|
2388
|
+
const newStr = atob('${btoa(newStr)}');
|
|
2389
|
+
const replaceAll = ${Boolean(replaceAll)};
|
|
2390
|
+
|
|
2391
|
+
let text;
|
|
2392
|
+
try {
|
|
2393
|
+
text = fs.readFileSync(filePath, 'utf-8');
|
|
2394
|
+
} catch (e) {
|
|
2395
|
+
process.exit(3);
|
|
2396
|
+
}
|
|
2397
|
+
|
|
2398
|
+
const count = text.split(oldStr).length - 1;
|
|
2399
|
+
|
|
2400
|
+
if (count === 0) {
|
|
2401
|
+
process.exit(1);
|
|
2402
|
+
}
|
|
2403
|
+
if (count > 1 && !replaceAll) {
|
|
2404
|
+
process.exit(2);
|
|
2405
|
+
}
|
|
2406
|
+
|
|
2407
|
+
const result = text.split(oldStr).join(newStr);
|
|
2408
|
+
fs.writeFileSync(filePath, result, 'utf-8');
|
|
2409
|
+
console.log(count);
|
|
2410
|
+
"`;
|
|
2411
|
+
}
|
|
2412
|
+
/**
|
|
2413
|
+
* Node.js command template for grep operations.
|
|
2414
|
+
*/
|
|
2415
|
+
function buildGrepCommand(pattern, searchPath, globPattern) {
|
|
2416
|
+
const patternB64 = btoa(pattern);
|
|
2417
|
+
const pathB64 = btoa(searchPath);
|
|
2418
|
+
const globB64 = globPattern ? btoa(globPattern) : "";
|
|
2419
|
+
return `node -e "
|
|
2420
|
+
const fs = require('fs');
|
|
2421
|
+
const path = require('path');
|
|
2422
|
+
|
|
2423
|
+
const pattern = atob('${patternB64}');
|
|
2424
|
+
const searchPath = atob('${pathB64}');
|
|
2425
|
+
const globPattern = ${globPattern ? `atob('${globB64}')` : "null"};
|
|
2426
|
+
|
|
2427
|
+
let regex;
|
|
2428
|
+
try {
|
|
2429
|
+
regex = new RegExp(pattern);
|
|
2430
|
+
} catch (e) {
|
|
2431
|
+
console.error('Invalid regex: ' + e.message);
|
|
2432
|
+
process.exit(1);
|
|
2433
|
+
}
|
|
2434
|
+
|
|
2435
|
+
function globMatch(filePath, pattern) {
|
|
2436
|
+
if (!pattern) return true;
|
|
2437
|
+
const regexPattern = pattern
|
|
2438
|
+
.replace(/\\*\\*/g, '<<<GLOBSTAR>>>')
|
|
2439
|
+
.replace(/\\*/g, '[^/]*')
|
|
2440
|
+
.replace(/\\?/g, '.')
|
|
2441
|
+
.replace(/<<<GLOBSTAR>>>/g, '.*');
|
|
2442
|
+
return new RegExp('^' + regexPattern + '$').test(filePath);
|
|
2443
|
+
}
|
|
2444
|
+
|
|
2445
|
+
function walkDir(dir, results) {
|
|
2446
|
+
try {
|
|
2447
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
2448
|
+
for (const entry of entries) {
|
|
2449
|
+
const fullPath = path.join(dir, entry.name);
|
|
2450
|
+
if (entry.isDirectory()) {
|
|
2451
|
+
walkDir(fullPath, results);
|
|
2452
|
+
} else {
|
|
2453
|
+
const relativePath = path.relative(searchPath, fullPath);
|
|
2454
|
+
if (globMatch(relativePath, globPattern)) {
|
|
2455
|
+
try {
|
|
2456
|
+
const content = fs.readFileSync(fullPath, 'utf-8');
|
|
2457
|
+
const lines = content.split('\\n');
|
|
2458
|
+
for (let i = 0; i < lines.length; i++) {
|
|
2459
|
+
if (regex.test(lines[i])) {
|
|
2460
|
+
console.log(JSON.stringify({
|
|
2461
|
+
path: fullPath,
|
|
2462
|
+
line: i + 1,
|
|
2463
|
+
text: lines[i]
|
|
2464
|
+
}));
|
|
2465
|
+
}
|
|
2466
|
+
}
|
|
2467
|
+
} catch (e) {
|
|
2468
|
+
// Skip unreadable files
|
|
2469
|
+
}
|
|
2470
|
+
}
|
|
2471
|
+
}
|
|
2472
|
+
}
|
|
2473
|
+
} catch (e) {
|
|
2474
|
+
// Skip unreadable directories
|
|
2475
|
+
}
|
|
2476
|
+
}
|
|
2477
|
+
|
|
2478
|
+
try {
|
|
2479
|
+
walkDir(searchPath, []);
|
|
2480
|
+
} catch (e) {
|
|
2481
|
+
// Silent failure
|
|
2482
|
+
}
|
|
2483
|
+
"`;
|
|
2484
|
+
}
|
|
2485
|
+
/**
|
|
2486
|
+
* Base sandbox implementation with execute() as the only abstract method.
|
|
2487
|
+
*
|
|
2488
|
+
* This class provides default implementations for all SandboxBackendProtocol
|
|
2489
|
+
* methods using shell commands executed via execute(). Concrete implementations
|
|
2490
|
+
* only need to implement the execute() method.
|
|
2491
|
+
*
|
|
2492
|
+
* Requires Node.js 20+ on the sandbox host.
|
|
2493
|
+
*/
|
|
2494
|
+
var BaseSandbox = class {
|
|
2495
|
+
/**
|
|
2496
|
+
* List files and directories in the specified directory (non-recursive).
|
|
2497
|
+
*
|
|
2498
|
+
* @param path - Absolute path to directory
|
|
2499
|
+
* @returns List of FileInfo objects for files and directories directly in the directory.
|
|
2500
|
+
*/
|
|
2501
|
+
async lsInfo(path$1) {
|
|
2502
|
+
const command = buildLsCommand(path$1);
|
|
2503
|
+
const result = await this.execute(command);
|
|
2504
|
+
if (result.exitCode !== 0) return [];
|
|
2505
|
+
const infos = [];
|
|
2506
|
+
const lines = result.output.trim().split("\n").filter(Boolean);
|
|
2507
|
+
for (const line of lines) try {
|
|
2508
|
+
const parsed = JSON.parse(line);
|
|
2509
|
+
infos.push({
|
|
2510
|
+
path: parsed.path,
|
|
2511
|
+
is_dir: parsed.isDir,
|
|
2512
|
+
size: parsed.size,
|
|
2513
|
+
modified_at: parsed.mtime ? new Date(parsed.mtime).toISOString() : void 0
|
|
2514
|
+
});
|
|
2515
|
+
} catch {}
|
|
2516
|
+
return infos;
|
|
2517
|
+
}
|
|
2518
|
+
/**
|
|
2519
|
+
* Read file content with line numbers.
|
|
2520
|
+
*
|
|
2521
|
+
* @param filePath - Absolute file path
|
|
2522
|
+
* @param offset - Line offset to start reading from (0-indexed)
|
|
2523
|
+
* @param limit - Maximum number of lines to read
|
|
2524
|
+
* @returns Formatted file content with line numbers, or error message
|
|
2525
|
+
*/
|
|
2526
|
+
async read(filePath, offset = 0, limit = 2e3) {
|
|
2527
|
+
const command = buildReadCommand(filePath, offset, limit);
|
|
2528
|
+
const result = await this.execute(command);
|
|
2529
|
+
if (result.exitCode !== 0) return `Error: File '${filePath}' not found`;
|
|
2530
|
+
return result.output;
|
|
2531
|
+
}
|
|
2532
|
+
/**
|
|
2533
|
+
* Read file content as raw FileData.
|
|
2534
|
+
*
|
|
2535
|
+
* @param filePath - Absolute file path
|
|
2536
|
+
* @returns Raw file content as FileData
|
|
2537
|
+
*/
|
|
2538
|
+
async readRaw(filePath) {
|
|
2539
|
+
const command = buildReadCommand(filePath, 0, Number.MAX_SAFE_INTEGER);
|
|
2540
|
+
const result = await this.execute(command);
|
|
2541
|
+
if (result.exitCode !== 0) throw new Error(`File '${filePath}' not found`);
|
|
2542
|
+
const lines = [];
|
|
2543
|
+
for (const line of result.output.split("\n")) {
|
|
2544
|
+
const tabIndex = line.indexOf(" ");
|
|
2545
|
+
if (tabIndex !== -1) lines.push(line.substring(tabIndex + 1));
|
|
2546
|
+
}
|
|
2547
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
2548
|
+
return {
|
|
2549
|
+
content: lines,
|
|
2550
|
+
created_at: now,
|
|
2551
|
+
modified_at: now
|
|
2552
|
+
};
|
|
2553
|
+
}
|
|
2554
|
+
/**
|
|
2555
|
+
* Structured search results or error string for invalid input.
|
|
2556
|
+
*/
|
|
2557
|
+
async grepRaw(pattern, path$1 = "/", glob = null) {
|
|
2558
|
+
const command = buildGrepCommand(pattern, path$1, glob);
|
|
2559
|
+
const result = await this.execute(command);
|
|
2560
|
+
if (result.exitCode === 1) {
|
|
2561
|
+
if (result.output.includes("Invalid regex:")) return result.output.trim();
|
|
2562
|
+
}
|
|
2563
|
+
const matches = [];
|
|
2564
|
+
const lines = result.output.trim().split("\n").filter(Boolean);
|
|
2565
|
+
for (const line of lines) try {
|
|
2566
|
+
const parsed = JSON.parse(line);
|
|
2567
|
+
matches.push({
|
|
2568
|
+
path: parsed.path,
|
|
2569
|
+
line: parsed.line,
|
|
2570
|
+
text: parsed.text
|
|
2571
|
+
});
|
|
2572
|
+
} catch {}
|
|
2573
|
+
return matches;
|
|
2574
|
+
}
|
|
2575
|
+
/**
|
|
2576
|
+
* Structured glob matching returning FileInfo objects.
|
|
2577
|
+
*/
|
|
2578
|
+
async globInfo(pattern, path$1 = "/") {
|
|
2579
|
+
const command = buildGlobCommand(path$1, pattern);
|
|
2580
|
+
const result = await this.execute(command);
|
|
2581
|
+
const infos = [];
|
|
2582
|
+
const lines = result.output.trim().split("\n").filter(Boolean);
|
|
2583
|
+
for (const line of lines) try {
|
|
2584
|
+
const parsed = JSON.parse(line);
|
|
2585
|
+
infos.push({
|
|
2586
|
+
path: parsed.path,
|
|
2587
|
+
is_dir: parsed.isDir,
|
|
2588
|
+
size: parsed.size,
|
|
2589
|
+
modified_at: parsed.mtime ? new Date(parsed.mtime).toISOString() : void 0
|
|
2590
|
+
});
|
|
2591
|
+
} catch {}
|
|
2592
|
+
return infos;
|
|
2593
|
+
}
|
|
2594
|
+
/**
|
|
2595
|
+
* Create a new file with content.
|
|
2596
|
+
*/
|
|
2597
|
+
async write(filePath, content) {
|
|
2598
|
+
const command = buildWriteCommand(filePath, content);
|
|
2599
|
+
if ((await this.execute(command)).exitCode !== 0) return { error: `Cannot write to ${filePath} because it already exists. Read and then make an edit, or write to a new path.` };
|
|
2600
|
+
return {
|
|
2601
|
+
path: filePath,
|
|
2602
|
+
filesUpdate: null
|
|
2603
|
+
};
|
|
2604
|
+
}
|
|
2605
|
+
/**
|
|
2606
|
+
* Edit a file by replacing string occurrences.
|
|
2607
|
+
*/
|
|
2608
|
+
async edit(filePath, oldString, newString, replaceAll = false) {
|
|
2609
|
+
const command = buildEditCommand(filePath, oldString, newString, replaceAll);
|
|
2610
|
+
const result = await this.execute(command);
|
|
2611
|
+
switch (result.exitCode) {
|
|
2612
|
+
case 0: return {
|
|
2613
|
+
path: filePath,
|
|
2614
|
+
filesUpdate: null,
|
|
2615
|
+
occurrences: parseInt(result.output.trim(), 10) || 1
|
|
2616
|
+
};
|
|
2617
|
+
case 1: return { error: `String not found in file '${filePath}'` };
|
|
2618
|
+
case 2: return { error: `Multiple occurrences found in '${filePath}'. Use replaceAll=true to replace all.` };
|
|
2619
|
+
case 3: return { error: `Error: File '${filePath}' not found` };
|
|
2620
|
+
default: return { error: `Unknown error editing file '${filePath}'` };
|
|
2621
|
+
}
|
|
2622
|
+
}
|
|
1864
2623
|
};
|
|
1865
2624
|
|
|
1866
2625
|
//#endregion
|
|
@@ -1930,4 +2689,691 @@ function createDeepAgent(params = {}) {
|
|
|
1930
2689
|
}
|
|
1931
2690
|
|
|
1932
2691
|
//#endregion
|
|
1933
|
-
|
|
2692
|
+
//#region src/config.ts
|
|
2693
|
+
/**
|
|
2694
|
+
* Configuration and settings for deepagents.
|
|
2695
|
+
*
|
|
2696
|
+
* Provides project detection, path management, and environment configuration
|
|
2697
|
+
* for skills and agent memory middleware.
|
|
2698
|
+
*/
|
|
2699
|
+
/**
|
|
2700
|
+
* Find the project root by looking for .git directory.
|
|
2701
|
+
*
|
|
2702
|
+
* Walks up the directory tree from startPath (or cwd) looking for a .git
|
|
2703
|
+
* directory, which indicates the project root.
|
|
2704
|
+
*
|
|
2705
|
+
* @param startPath - Directory to start searching from. Defaults to current working directory.
|
|
2706
|
+
* @returns Path to the project root if found, null otherwise.
|
|
2707
|
+
*/
|
|
2708
|
+
function findProjectRoot(startPath) {
|
|
2709
|
+
let current = path.resolve(startPath || process.cwd());
|
|
2710
|
+
while (current !== path.dirname(current)) {
|
|
2711
|
+
const gitDir = path.join(current, ".git");
|
|
2712
|
+
if (fs$1.existsSync(gitDir)) return current;
|
|
2713
|
+
current = path.dirname(current);
|
|
2714
|
+
}
|
|
2715
|
+
const rootGitDir = path.join(current, ".git");
|
|
2716
|
+
if (fs$1.existsSync(rootGitDir)) return current;
|
|
2717
|
+
return null;
|
|
2718
|
+
}
|
|
2719
|
+
/**
|
|
2720
|
+
* Validate agent name to prevent invalid filesystem paths and security issues.
|
|
2721
|
+
*
|
|
2722
|
+
* @param agentName - The agent name to validate
|
|
2723
|
+
* @returns True if valid, false otherwise
|
|
2724
|
+
*/
|
|
2725
|
+
function isValidAgentName(agentName) {
|
|
2726
|
+
if (!agentName || !agentName.trim()) return false;
|
|
2727
|
+
return /^[a-zA-Z0-9_\-\s]+$/.test(agentName);
|
|
2728
|
+
}
|
|
2729
|
+
/**
|
|
2730
|
+
* Create a Settings instance with detected environment.
|
|
2731
|
+
*
|
|
2732
|
+
* @param options - Configuration options
|
|
2733
|
+
* @returns Settings instance with project detection and path management
|
|
2734
|
+
*/
|
|
2735
|
+
function createSettings(options = {}) {
|
|
2736
|
+
const projectRoot = findProjectRoot(options.startPath);
|
|
2737
|
+
const userDeepagentsDir = path.join(os.homedir(), ".deepagents");
|
|
2738
|
+
return {
|
|
2739
|
+
projectRoot,
|
|
2740
|
+
userDeepagentsDir,
|
|
2741
|
+
hasProject: projectRoot !== null,
|
|
2742
|
+
getAgentDir(agentName) {
|
|
2743
|
+
if (!isValidAgentName(agentName)) throw new Error(`Invalid agent name: ${JSON.stringify(agentName)}. Agent names can only contain letters, numbers, hyphens, underscores, and spaces.`);
|
|
2744
|
+
return path.join(userDeepagentsDir, agentName);
|
|
2745
|
+
},
|
|
2746
|
+
ensureAgentDir(agentName) {
|
|
2747
|
+
const agentDir = this.getAgentDir(agentName);
|
|
2748
|
+
fs$1.mkdirSync(agentDir, { recursive: true });
|
|
2749
|
+
return agentDir;
|
|
2750
|
+
},
|
|
2751
|
+
getUserAgentMdPath(agentName) {
|
|
2752
|
+
return path.join(this.getAgentDir(agentName), "agent.md");
|
|
2753
|
+
},
|
|
2754
|
+
getProjectAgentMdPath() {
|
|
2755
|
+
if (!projectRoot) return null;
|
|
2756
|
+
return path.join(projectRoot, ".deepagents", "agent.md");
|
|
2757
|
+
},
|
|
2758
|
+
getUserSkillsDir(agentName) {
|
|
2759
|
+
return path.join(this.getAgentDir(agentName), "skills");
|
|
2760
|
+
},
|
|
2761
|
+
ensureUserSkillsDir(agentName) {
|
|
2762
|
+
const skillsDir = this.getUserSkillsDir(agentName);
|
|
2763
|
+
fs$1.mkdirSync(skillsDir, { recursive: true });
|
|
2764
|
+
return skillsDir;
|
|
2765
|
+
},
|
|
2766
|
+
getProjectSkillsDir() {
|
|
2767
|
+
if (!projectRoot) return null;
|
|
2768
|
+
return path.join(projectRoot, ".deepagents", "skills");
|
|
2769
|
+
},
|
|
2770
|
+
ensureProjectSkillsDir() {
|
|
2771
|
+
const skillsDir = this.getProjectSkillsDir();
|
|
2772
|
+
if (!skillsDir) return null;
|
|
2773
|
+
fs$1.mkdirSync(skillsDir, { recursive: true });
|
|
2774
|
+
return skillsDir;
|
|
2775
|
+
},
|
|
2776
|
+
ensureProjectDeepagentsDir() {
|
|
2777
|
+
if (!projectRoot) return null;
|
|
2778
|
+
const deepagentsDir = path.join(projectRoot, ".deepagents");
|
|
2779
|
+
fs$1.mkdirSync(deepagentsDir, { recursive: true });
|
|
2780
|
+
return deepagentsDir;
|
|
2781
|
+
}
|
|
2782
|
+
};
|
|
2783
|
+
}
|
|
2784
|
+
|
|
2785
|
+
//#endregion
|
|
2786
|
+
//#region src/skills/loader.ts
|
|
2787
|
+
/**
|
|
2788
|
+
* Skill loader for parsing and loading agent skills from SKILL.md files.
|
|
2789
|
+
*
|
|
2790
|
+
* This module implements Anthropic's agent skills pattern with YAML frontmatter parsing.
|
|
2791
|
+
* Each skill is a directory containing a SKILL.md file with:
|
|
2792
|
+
* - YAML frontmatter (name, description required)
|
|
2793
|
+
* - Markdown instructions for the agent
|
|
2794
|
+
* - Optional supporting files (scripts, configs, etc.)
|
|
2795
|
+
*
|
|
2796
|
+
* @example
|
|
2797
|
+
* ```markdown
|
|
2798
|
+
* ---
|
|
2799
|
+
* name: web-research
|
|
2800
|
+
* description: Structured approach to conducting thorough web research
|
|
2801
|
+
* ---
|
|
2802
|
+
*
|
|
2803
|
+
* # Web Research Skill
|
|
2804
|
+
*
|
|
2805
|
+
* ## When to Use
|
|
2806
|
+
* - User asks you to research a topic
|
|
2807
|
+
* ...
|
|
2808
|
+
* ```
|
|
2809
|
+
*
|
|
2810
|
+
* @see https://agentskills.io/specification
|
|
2811
|
+
*/
|
|
2812
|
+
/** Maximum size for SKILL.md files (10MB) */
|
|
2813
|
+
const MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024;
|
|
2814
|
+
/** Agent Skills spec constraints */
|
|
2815
|
+
const MAX_SKILL_NAME_LENGTH = 64;
|
|
2816
|
+
const MAX_SKILL_DESCRIPTION_LENGTH = 1024;
|
|
2817
|
+
/** Pattern for validating skill names per Agent Skills spec */
|
|
2818
|
+
const SKILL_NAME_PATTERN = /^[a-z0-9]+(-[a-z0-9]+)*$/;
|
|
2819
|
+
/** Pattern for extracting YAML frontmatter */
|
|
2820
|
+
const FRONTMATTER_PATTERN = /^---\s*\n([\s\S]*?)\n---\s*\n/;
|
|
2821
|
+
/**
|
|
2822
|
+
* Check if a path is safely contained within base_dir.
|
|
2823
|
+
*
|
|
2824
|
+
* This prevents directory traversal attacks via symlinks or path manipulation.
|
|
2825
|
+
* The function resolves both paths to their canonical form (following symlinks)
|
|
2826
|
+
* and verifies that the target path is within the base directory.
|
|
2827
|
+
*
|
|
2828
|
+
* @param targetPath - The path to validate
|
|
2829
|
+
* @param baseDir - The base directory that should contain the path
|
|
2830
|
+
* @returns True if the path is safely within baseDir, false otherwise
|
|
2831
|
+
*/
|
|
2832
|
+
function isSafePath(targetPath, baseDir) {
|
|
2833
|
+
try {
|
|
2834
|
+
const resolvedPath = fs$1.realpathSync(targetPath);
|
|
2835
|
+
const resolvedBase = fs$1.realpathSync(baseDir);
|
|
2836
|
+
return resolvedPath.startsWith(resolvedBase + path.sep) || resolvedPath === resolvedBase;
|
|
2837
|
+
} catch {
|
|
2838
|
+
return false;
|
|
2839
|
+
}
|
|
2840
|
+
}
|
|
2841
|
+
/**
|
|
2842
|
+
* Validate skill name per Agent Skills spec.
|
|
2843
|
+
*
|
|
2844
|
+
* Requirements:
|
|
2845
|
+
* - Max 64 characters
|
|
2846
|
+
* - Lowercase alphanumeric and hyphens only (a-z, 0-9, -)
|
|
2847
|
+
* - Cannot start or end with hyphen
|
|
2848
|
+
* - No consecutive hyphens
|
|
2849
|
+
* - Must match parent directory name
|
|
2850
|
+
*
|
|
2851
|
+
* @param name - The skill name from YAML frontmatter
|
|
2852
|
+
* @param directoryName - The parent directory name
|
|
2853
|
+
* @returns Validation result with error message if invalid
|
|
2854
|
+
*/
|
|
2855
|
+
function validateSkillName(name, directoryName) {
|
|
2856
|
+
if (!name) return {
|
|
2857
|
+
valid: false,
|
|
2858
|
+
error: "name is required"
|
|
2859
|
+
};
|
|
2860
|
+
if (name.length > MAX_SKILL_NAME_LENGTH) return {
|
|
2861
|
+
valid: false,
|
|
2862
|
+
error: "name exceeds 64 characters"
|
|
2863
|
+
};
|
|
2864
|
+
if (!SKILL_NAME_PATTERN.test(name)) return {
|
|
2865
|
+
valid: false,
|
|
2866
|
+
error: "name must be lowercase alphanumeric with single hyphens only"
|
|
2867
|
+
};
|
|
2868
|
+
if (name !== directoryName) return {
|
|
2869
|
+
valid: false,
|
|
2870
|
+
error: `name '${name}' must match directory name '${directoryName}'`
|
|
2871
|
+
};
|
|
2872
|
+
return { valid: true };
|
|
2873
|
+
}
|
|
2874
|
+
/**
|
|
2875
|
+
* Parse YAML frontmatter from content.
|
|
2876
|
+
*
|
|
2877
|
+
* @param content - The file content
|
|
2878
|
+
* @returns Parsed frontmatter object, or null if parsing fails
|
|
2879
|
+
*/
|
|
2880
|
+
function parseFrontmatter(content) {
|
|
2881
|
+
const match = content.match(FRONTMATTER_PATTERN);
|
|
2882
|
+
if (!match) return null;
|
|
2883
|
+
try {
|
|
2884
|
+
const parsed = yaml.parse(match[1]);
|
|
2885
|
+
return typeof parsed === "object" && parsed !== null ? parsed : null;
|
|
2886
|
+
} catch {
|
|
2887
|
+
return null;
|
|
2888
|
+
}
|
|
2889
|
+
}
|
|
2890
|
+
/**
|
|
2891
|
+
* Parse YAML frontmatter from a SKILL.md file per Agent Skills spec.
|
|
2892
|
+
*
|
|
2893
|
+
* @param skillMdPath - Path to the SKILL.md file
|
|
2894
|
+
* @param source - Source of the skill ('user' or 'project')
|
|
2895
|
+
* @returns SkillMetadata with all fields, or null if parsing fails
|
|
2896
|
+
*/
|
|
2897
|
+
function parseSkillMetadata(skillMdPath, source) {
|
|
2898
|
+
try {
|
|
2899
|
+
const stats = fs$1.statSync(skillMdPath);
|
|
2900
|
+
if (stats.size > MAX_SKILL_FILE_SIZE) {
|
|
2901
|
+
console.warn(`Skipping ${skillMdPath}: file too large (${stats.size} bytes)`);
|
|
2902
|
+
return null;
|
|
2903
|
+
}
|
|
2904
|
+
const frontmatter = parseFrontmatter(fs$1.readFileSync(skillMdPath, "utf-8"));
|
|
2905
|
+
if (!frontmatter) {
|
|
2906
|
+
console.warn(`Skipping ${skillMdPath}: no valid YAML frontmatter found`);
|
|
2907
|
+
return null;
|
|
2908
|
+
}
|
|
2909
|
+
const name = frontmatter.name;
|
|
2910
|
+
const description = frontmatter.description;
|
|
2911
|
+
if (!name || !description) {
|
|
2912
|
+
console.warn(`Skipping ${skillMdPath}: missing required 'name' or 'description'`);
|
|
2913
|
+
return null;
|
|
2914
|
+
}
|
|
2915
|
+
const directoryName = path.basename(path.dirname(skillMdPath));
|
|
2916
|
+
const validation = validateSkillName(String(name), directoryName);
|
|
2917
|
+
if (!validation.valid) console.warn(`Skill '${name}' in ${skillMdPath} does not follow Agent Skills spec: ${validation.error}. Consider renaming to be spec-compliant.`);
|
|
2918
|
+
let descriptionStr = String(description);
|
|
2919
|
+
if (descriptionStr.length > MAX_SKILL_DESCRIPTION_LENGTH) {
|
|
2920
|
+
console.warn(`Description exceeds ${MAX_SKILL_DESCRIPTION_LENGTH} chars in ${skillMdPath}, truncating`);
|
|
2921
|
+
descriptionStr = descriptionStr.slice(0, MAX_SKILL_DESCRIPTION_LENGTH);
|
|
2922
|
+
}
|
|
2923
|
+
return {
|
|
2924
|
+
name: String(name),
|
|
2925
|
+
description: descriptionStr,
|
|
2926
|
+
path: skillMdPath,
|
|
2927
|
+
source,
|
|
2928
|
+
license: frontmatter.license ? String(frontmatter.license) : void 0,
|
|
2929
|
+
compatibility: frontmatter.compatibility ? String(frontmatter.compatibility) : void 0,
|
|
2930
|
+
metadata: frontmatter.metadata && typeof frontmatter.metadata === "object" ? frontmatter.metadata : void 0,
|
|
2931
|
+
allowedTools: frontmatter["allowed-tools"] ? String(frontmatter["allowed-tools"]) : void 0
|
|
2932
|
+
};
|
|
2933
|
+
} catch (error) {
|
|
2934
|
+
console.warn(`Error reading ${skillMdPath}: ${error}`);
|
|
2935
|
+
return null;
|
|
2936
|
+
}
|
|
2937
|
+
}
|
|
2938
|
+
/**
|
|
2939
|
+
* List all skills from a single skills directory (internal helper).
|
|
2940
|
+
*
|
|
2941
|
+
* Scans the skills directory for subdirectories containing SKILL.md files,
|
|
2942
|
+
* parses YAML frontmatter, and returns skill metadata.
|
|
2943
|
+
*
|
|
2944
|
+
* Skills are organized as:
|
|
2945
|
+
* ```
|
|
2946
|
+
* skills/
|
|
2947
|
+
* ├── skill-name/
|
|
2948
|
+
* │ ├── SKILL.md # Required: instructions with YAML frontmatter
|
|
2949
|
+
* │ ├── script.py # Optional: supporting files
|
|
2950
|
+
* │ └── config.json # Optional: supporting files
|
|
2951
|
+
* ```
|
|
2952
|
+
*
|
|
2953
|
+
* @param skillsDir - Path to the skills directory
|
|
2954
|
+
* @param source - Source of the skills ('user' or 'project')
|
|
2955
|
+
* @returns List of skill metadata
|
|
2956
|
+
*/
|
|
2957
|
+
function listSkillsFromDir(skillsDir, source) {
|
|
2958
|
+
const expandedDir = skillsDir.startsWith("~") ? path.join(process.env.HOME || process.env.USERPROFILE || "", skillsDir.slice(1)) : skillsDir;
|
|
2959
|
+
if (!fs$1.existsSync(expandedDir)) return [];
|
|
2960
|
+
let resolvedBase;
|
|
2961
|
+
try {
|
|
2962
|
+
resolvedBase = fs$1.realpathSync(expandedDir);
|
|
2963
|
+
} catch {
|
|
2964
|
+
return [];
|
|
2965
|
+
}
|
|
2966
|
+
const skills = [];
|
|
2967
|
+
let entries;
|
|
2968
|
+
try {
|
|
2969
|
+
entries = fs$1.readdirSync(resolvedBase, { withFileTypes: true });
|
|
2970
|
+
} catch {
|
|
2971
|
+
return [];
|
|
2972
|
+
}
|
|
2973
|
+
for (const entry of entries) {
|
|
2974
|
+
const skillDir = path.join(resolvedBase, entry.name);
|
|
2975
|
+
if (!isSafePath(skillDir, resolvedBase)) continue;
|
|
2976
|
+
if (!entry.isDirectory()) continue;
|
|
2977
|
+
const skillMdPath = path.join(skillDir, "SKILL.md");
|
|
2978
|
+
if (!fs$1.existsSync(skillMdPath)) continue;
|
|
2979
|
+
if (!isSafePath(skillMdPath, resolvedBase)) continue;
|
|
2980
|
+
const metadata = parseSkillMetadata(skillMdPath, source);
|
|
2981
|
+
if (metadata) skills.push(metadata);
|
|
2982
|
+
}
|
|
2983
|
+
return skills;
|
|
2984
|
+
}
|
|
2985
|
+
/**
|
|
2986
|
+
* List skills from user and/or project directories.
|
|
2987
|
+
*
|
|
2988
|
+
* When both directories are provided, project skills with the same name as
|
|
2989
|
+
* user skills will override them.
|
|
2990
|
+
*
|
|
2991
|
+
* @param options - Options specifying which directories to search
|
|
2992
|
+
* @returns Merged list of skill metadata from both sources, with project skills
|
|
2993
|
+
* taking precedence over user skills when names conflict
|
|
2994
|
+
*/
|
|
2995
|
+
function listSkills(options) {
|
|
2996
|
+
const allSkills = /* @__PURE__ */ new Map();
|
|
2997
|
+
if (options.userSkillsDir) {
|
|
2998
|
+
const userSkills = listSkillsFromDir(options.userSkillsDir, "user");
|
|
2999
|
+
for (const skill of userSkills) allSkills.set(skill.name, skill);
|
|
3000
|
+
}
|
|
3001
|
+
if (options.projectSkillsDir) {
|
|
3002
|
+
const projectSkills = listSkillsFromDir(options.projectSkillsDir, "project");
|
|
3003
|
+
for (const skill of projectSkills) allSkills.set(skill.name, skill);
|
|
3004
|
+
}
|
|
3005
|
+
return Array.from(allSkills.values());
|
|
3006
|
+
}
|
|
3007
|
+
|
|
3008
|
+
//#endregion
|
|
3009
|
+
//#region src/middleware/skills.ts
|
|
3010
|
+
/**
|
|
3011
|
+
* Middleware for loading and exposing agent skills to the system prompt.
|
|
3012
|
+
*
|
|
3013
|
+
* This middleware implements Anthropic's "Agent Skills" pattern with progressive disclosure:
|
|
3014
|
+
* 1. Parse YAML frontmatter from SKILL.md files at session start
|
|
3015
|
+
* 2. Inject skills metadata (name + description) into system prompt
|
|
3016
|
+
* 3. Agent reads full SKILL.md content when relevant to a task
|
|
3017
|
+
*
|
|
3018
|
+
* Skills directory structure (per-agent + project):
|
|
3019
|
+
* User-level: ~/.deepagents/{AGENT_NAME}/skills/
|
|
3020
|
+
* Project-level: {PROJECT_ROOT}/.deepagents/skills/
|
|
3021
|
+
*
|
|
3022
|
+
* @example
|
|
3023
|
+
* ```
|
|
3024
|
+
* ~/.deepagents/{AGENT_NAME}/skills/
|
|
3025
|
+
* ├── web-research/
|
|
3026
|
+
* │ ├── SKILL.md # Required: YAML frontmatter + instructions
|
|
3027
|
+
* │ └── helper.py # Optional: supporting files
|
|
3028
|
+
* ├── code-review/
|
|
3029
|
+
* │ ├── SKILL.md
|
|
3030
|
+
* │ └── checklist.md
|
|
3031
|
+
*
|
|
3032
|
+
* .deepagents/skills/
|
|
3033
|
+
* ├── project-specific/
|
|
3034
|
+
* │ └── SKILL.md # Project-specific skills
|
|
3035
|
+
* ```
|
|
3036
|
+
*/
|
|
3037
|
+
/**
|
|
3038
|
+
* State schema for skills middleware.
|
|
3039
|
+
*/
|
|
3040
|
+
const SkillsStateSchema = z$2.object({ skillsMetadata: z$2.array(z$2.object({
|
|
3041
|
+
name: z$2.string(),
|
|
3042
|
+
description: z$2.string(),
|
|
3043
|
+
path: z$2.string(),
|
|
3044
|
+
source: z$2.enum(["user", "project"]),
|
|
3045
|
+
license: z$2.string().optional(),
|
|
3046
|
+
compatibility: z$2.string().optional(),
|
|
3047
|
+
metadata: z$2.record(z$2.string(), z$2.string()).optional(),
|
|
3048
|
+
allowedTools: z$2.string().optional()
|
|
3049
|
+
})).optional() });
|
|
3050
|
+
/**
|
|
3051
|
+
* Skills System Documentation prompt template.
|
|
3052
|
+
*/
|
|
3053
|
+
const SKILLS_SYSTEM_PROMPT = `
|
|
3054
|
+
|
|
3055
|
+
## Skills System
|
|
3056
|
+
|
|
3057
|
+
You have access to a skills library that provides specialized capabilities and domain knowledge.
|
|
3058
|
+
|
|
3059
|
+
{skills_locations}
|
|
3060
|
+
|
|
3061
|
+
**Available Skills:**
|
|
3062
|
+
|
|
3063
|
+
{skills_list}
|
|
3064
|
+
|
|
3065
|
+
**How to Use Skills (Progressive Disclosure):**
|
|
3066
|
+
|
|
3067
|
+
Skills follow a **progressive disclosure** pattern - you know they exist (name + description above), but you only read the full instructions when needed:
|
|
3068
|
+
|
|
3069
|
+
1. **Recognize when a skill applies**: Check if the user's task matches any skill's description
|
|
3070
|
+
2. **Read the skill's full instructions**: The skill list above shows the exact path to use with read_file
|
|
3071
|
+
3. **Follow the skill's instructions**: SKILL.md contains step-by-step workflows, best practices, and examples
|
|
3072
|
+
4. **Access supporting files**: Skills may include Python scripts, configs, or reference docs - use absolute paths
|
|
3073
|
+
|
|
3074
|
+
**When to Use Skills:**
|
|
3075
|
+
- When the user's request matches a skill's domain (e.g., "research X" → web-research skill)
|
|
3076
|
+
- When you need specialized knowledge or structured workflows
|
|
3077
|
+
- When a skill provides proven patterns for complex tasks
|
|
3078
|
+
|
|
3079
|
+
**Skills are Self-Documenting:**
|
|
3080
|
+
- Each SKILL.md tells you exactly what the skill does and how to use it
|
|
3081
|
+
- The skill list above shows the full path for each skill's SKILL.md file
|
|
3082
|
+
|
|
3083
|
+
**Executing Skill Scripts:**
|
|
3084
|
+
Skills may contain Python scripts or other executable files. Always use absolute paths from the skill list.
|
|
3085
|
+
|
|
3086
|
+
**Example Workflow:**
|
|
3087
|
+
|
|
3088
|
+
User: "Can you research the latest developments in quantum computing?"
|
|
3089
|
+
|
|
3090
|
+
1. Check available skills above → See "web-research" skill with its full path
|
|
3091
|
+
2. Read the skill using the path shown in the list
|
|
3092
|
+
3. Follow the skill's research workflow (search → organize → synthesize)
|
|
3093
|
+
4. Use any helper scripts with absolute paths
|
|
3094
|
+
|
|
3095
|
+
Remember: Skills are tools to make you more capable and consistent. When in doubt, check if a skill exists for the task!
|
|
3096
|
+
`;
|
|
3097
|
+
/**
|
|
3098
|
+
* Format skills locations for display in system prompt.
|
|
3099
|
+
*/
|
|
3100
|
+
function formatSkillsLocations(userSkillsDisplay, projectSkillsDir) {
|
|
3101
|
+
const locations = [`**User Skills**: \`${userSkillsDisplay}\``];
|
|
3102
|
+
if (projectSkillsDir) locations.push(`**Project Skills**: \`${projectSkillsDir}\` (overrides user skills)`);
|
|
3103
|
+
return locations.join("\n");
|
|
3104
|
+
}
|
|
3105
|
+
/**
|
|
3106
|
+
* Format skills metadata for display in system prompt.
|
|
3107
|
+
*/
|
|
3108
|
+
function formatSkillsList(skills, userSkillsDisplay, projectSkillsDir) {
|
|
3109
|
+
if (skills.length === 0) {
|
|
3110
|
+
const locations = [userSkillsDisplay];
|
|
3111
|
+
if (projectSkillsDir) locations.push(projectSkillsDir);
|
|
3112
|
+
return `(No skills available yet. You can create skills in ${locations.join(" or ")})`;
|
|
3113
|
+
}
|
|
3114
|
+
const userSkills = skills.filter((s) => s.source === "user");
|
|
3115
|
+
const projectSkills = skills.filter((s) => s.source === "project");
|
|
3116
|
+
const lines = [];
|
|
3117
|
+
if (userSkills.length > 0) {
|
|
3118
|
+
lines.push("**User Skills:**");
|
|
3119
|
+
for (const skill of userSkills) {
|
|
3120
|
+
lines.push(`- **${skill.name}**: ${skill.description}`);
|
|
3121
|
+
lines.push(` → Read \`${skill.path}\` for full instructions`);
|
|
3122
|
+
}
|
|
3123
|
+
lines.push("");
|
|
3124
|
+
}
|
|
3125
|
+
if (projectSkills.length > 0) {
|
|
3126
|
+
lines.push("**Project Skills:**");
|
|
3127
|
+
for (const skill of projectSkills) {
|
|
3128
|
+
lines.push(`- **${skill.name}**: ${skill.description}`);
|
|
3129
|
+
lines.push(` → Read \`${skill.path}\` for full instructions`);
|
|
3130
|
+
}
|
|
3131
|
+
}
|
|
3132
|
+
return lines.join("\n");
|
|
3133
|
+
}
|
|
3134
|
+
/**
|
|
3135
|
+
* Create middleware for loading and exposing agent skills.
|
|
3136
|
+
*
|
|
3137
|
+
* This middleware implements Anthropic's agent skills pattern:
|
|
3138
|
+
* - Loads skills metadata (name, description) from YAML frontmatter at session start
|
|
3139
|
+
* - Injects skills list into system prompt for discoverability
|
|
3140
|
+
* - Agent reads full SKILL.md content when a skill is relevant (progressive disclosure)
|
|
3141
|
+
*
|
|
3142
|
+
* Supports both user-level and project-level skills:
|
|
3143
|
+
* - User skills: ~/.deepagents/{AGENT_NAME}/skills/
|
|
3144
|
+
* - Project skills: {PROJECT_ROOT}/.deepagents/skills/
|
|
3145
|
+
* - Project skills override user skills with the same name
|
|
3146
|
+
*
|
|
3147
|
+
* @param options - Configuration options
|
|
3148
|
+
* @returns AgentMiddleware for skills loading and injection
|
|
3149
|
+
*/
|
|
3150
|
+
function createSkillsMiddleware(options) {
|
|
3151
|
+
const { skillsDir, assistantId, projectSkillsDir } = options;
|
|
3152
|
+
const userSkillsDisplay = `~/.deepagents/${assistantId}/skills`;
|
|
3153
|
+
return createMiddleware({
|
|
3154
|
+
name: "SkillsMiddleware",
|
|
3155
|
+
stateSchema: SkillsStateSchema,
|
|
3156
|
+
beforeAgent() {
|
|
3157
|
+
return { skillsMetadata: listSkills({
|
|
3158
|
+
userSkillsDir: skillsDir,
|
|
3159
|
+
projectSkillsDir
|
|
3160
|
+
}) };
|
|
3161
|
+
},
|
|
3162
|
+
wrapModelCall(request, handler) {
|
|
3163
|
+
const skillsMetadata = request.state?.skillsMetadata || [];
|
|
3164
|
+
const skillsLocations = formatSkillsLocations(userSkillsDisplay, projectSkillsDir);
|
|
3165
|
+
const skillsList = formatSkillsList(skillsMetadata, userSkillsDisplay, projectSkillsDir);
|
|
3166
|
+
const skillsSection = SKILLS_SYSTEM_PROMPT.replace("{skills_locations}", skillsLocations).replace("{skills_list}", skillsList);
|
|
3167
|
+
const currentSystemPrompt = request.systemPrompt || "";
|
|
3168
|
+
const newSystemPrompt = currentSystemPrompt ? `${currentSystemPrompt}\n\n${skillsSection}` : skillsSection;
|
|
3169
|
+
return handler({
|
|
3170
|
+
...request,
|
|
3171
|
+
systemPrompt: newSystemPrompt
|
|
3172
|
+
});
|
|
3173
|
+
}
|
|
3174
|
+
});
|
|
3175
|
+
}
|
|
3176
|
+
|
|
3177
|
+
//#endregion
|
|
3178
|
+
//#region src/middleware/agent-memory.ts
|
|
3179
|
+
/**
|
|
3180
|
+
* Middleware for loading agent-specific long-term memory into the system prompt.
|
|
3181
|
+
*
|
|
3182
|
+
* This middleware loads the agent's long-term memory from agent.md files
|
|
3183
|
+
* and injects it into the system prompt. Memory is loaded from:
|
|
3184
|
+
* - User memory: ~/.deepagents/{agent_name}/agent.md
|
|
3185
|
+
* - Project memory: {project_root}/.deepagents/agent.md
|
|
3186
|
+
*/
|
|
3187
|
+
/**
|
|
3188
|
+
* State schema for agent memory middleware.
|
|
3189
|
+
*/
|
|
3190
|
+
const AgentMemoryStateSchema = z$2.object({
|
|
3191
|
+
userMemory: z$2.string().optional(),
|
|
3192
|
+
projectMemory: z$2.string().optional()
|
|
3193
|
+
});
|
|
3194
|
+
/**
|
|
3195
|
+
* Default template for memory injection.
|
|
3196
|
+
*/
|
|
3197
|
+
const DEFAULT_MEMORY_TEMPLATE = `<user_memory>
|
|
3198
|
+
{user_memory}
|
|
3199
|
+
</user_memory>
|
|
3200
|
+
|
|
3201
|
+
<project_memory>
|
|
3202
|
+
{project_memory}
|
|
3203
|
+
</project_memory>`;
|
|
3204
|
+
/**
|
|
3205
|
+
* Long-term Memory Documentation system prompt.
|
|
3206
|
+
*/
|
|
3207
|
+
const LONGTERM_MEMORY_SYSTEM_PROMPT = `
|
|
3208
|
+
|
|
3209
|
+
## Long-term Memory
|
|
3210
|
+
|
|
3211
|
+
Your long-term memory is stored in files on the filesystem and persists across sessions.
|
|
3212
|
+
|
|
3213
|
+
**User Memory Location**: \`{agent_dir_absolute}\` (displays as \`{agent_dir_display}\`)
|
|
3214
|
+
**Project Memory Location**: {project_memory_info}
|
|
3215
|
+
|
|
3216
|
+
Your system prompt is loaded from TWO sources at startup:
|
|
3217
|
+
1. **User agent.md**: \`{agent_dir_absolute}/agent.md\` - Your personal preferences across all projects
|
|
3218
|
+
2. **Project agent.md**: Loaded from project root if available - Project-specific instructions
|
|
3219
|
+
|
|
3220
|
+
Project-specific agent.md is loaded from these locations (both combined if both exist):
|
|
3221
|
+
- \`[project-root]/.deepagents/agent.md\` (preferred)
|
|
3222
|
+
- \`[project-root]/agent.md\` (fallback, but also included if both exist)
|
|
3223
|
+
|
|
3224
|
+
**When to CHECK/READ memories (CRITICAL - do this FIRST):**
|
|
3225
|
+
- **At the start of ANY new session**: Check both user and project memories
|
|
3226
|
+
- User: \`ls {agent_dir_absolute}\`
|
|
3227
|
+
- Project: \`ls {project_deepagents_dir}\` (if in a project)
|
|
3228
|
+
- **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check project memories FIRST, then user
|
|
3229
|
+
- **When user asks you to do something**: Check if you have project-specific guides or examples
|
|
3230
|
+
- **When user references past work**: Search project memory files for related context
|
|
3231
|
+
|
|
3232
|
+
**Memory-first response pattern:**
|
|
3233
|
+
1. User asks a question → Check project directory first: \`ls {project_deepagents_dir}\`
|
|
3234
|
+
2. If relevant files exist → Read them with \`read_file '{project_deepagents_dir}/[filename]'\`
|
|
3235
|
+
3. Check user memory if needed → \`ls {agent_dir_absolute}\`
|
|
3236
|
+
4. Base your answer on saved knowledge supplemented by general knowledge
|
|
3237
|
+
|
|
3238
|
+
**When to update memories:**
|
|
3239
|
+
- **IMMEDIATELY when the user describes your role or how you should behave**
|
|
3240
|
+
- **IMMEDIATELY when the user gives feedback on your work** - Update memories to capture what was wrong and how to do it better
|
|
3241
|
+
- When the user explicitly asks you to remember something
|
|
3242
|
+
- When patterns or preferences emerge (coding styles, conventions, workflows)
|
|
3243
|
+
- After significant work where context would help in future sessions
|
|
3244
|
+
|
|
3245
|
+
**Learning from feedback:**
|
|
3246
|
+
- When user says something is better/worse, capture WHY and encode it as a pattern
|
|
3247
|
+
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
|
|
3248
|
+
- When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
|
|
3249
|
+
- Look for the underlying principle behind corrections, not just the specific mistake
|
|
3250
|
+
|
|
3251
|
+
## Deciding Where to Store Memory
|
|
3252
|
+
|
|
3253
|
+
When writing or updating agent memory, decide whether each fact, configuration, or behavior belongs in:
|
|
3254
|
+
|
|
3255
|
+
### User Agent File: \`{agent_dir_absolute}/agent.md\`
|
|
3256
|
+
→ Describes the agent's **personality, style, and universal behavior** across all projects.
|
|
3257
|
+
|
|
3258
|
+
**Store here:**
|
|
3259
|
+
- Your general tone and communication style
|
|
3260
|
+
- Universal coding preferences (formatting, comment style, etc.)
|
|
3261
|
+
- General workflows and methodologies you follow
|
|
3262
|
+
- Tool usage patterns that apply everywhere
|
|
3263
|
+
- Personal preferences that don't change per-project
|
|
3264
|
+
|
|
3265
|
+
**Examples:**
|
|
3266
|
+
- "Be concise and direct in responses"
|
|
3267
|
+
- "Always use type hints in Python"
|
|
3268
|
+
- "Prefer functional programming patterns"
|
|
3269
|
+
|
|
3270
|
+
### Project Agent File: \`{project_deepagents_dir}/agent.md\`
|
|
3271
|
+
→ Describes **how this specific project works** and **how the agent should behave here only.**
|
|
3272
|
+
|
|
3273
|
+
**Store here:**
|
|
3274
|
+
- Project-specific architecture and design patterns
|
|
3275
|
+
- Coding conventions specific to this codebase
|
|
3276
|
+
- Project structure and organization
|
|
3277
|
+
- Testing strategies for this project
|
|
3278
|
+
- Deployment processes and workflows
|
|
3279
|
+
- Team conventions and guidelines
|
|
3280
|
+
|
|
3281
|
+
**Examples:**
|
|
3282
|
+
- "This project uses FastAPI with SQLAlchemy"
|
|
3283
|
+
- "Tests go in tests/ directory mirroring src/ structure"
|
|
3284
|
+
- "All API changes require updating OpenAPI spec"
|
|
3285
|
+
|
|
3286
|
+
### Project Memory Files: \`{project_deepagents_dir}/*.md\`
|
|
3287
|
+
→ Use for **project-specific reference information** and structured notes.
|
|
3288
|
+
|
|
3289
|
+
**Store here:**
|
|
3290
|
+
- API design documentation
|
|
3291
|
+
- Architecture decisions and rationale
|
|
3292
|
+
- Deployment procedures
|
|
3293
|
+
- Common debugging patterns
|
|
3294
|
+
- Onboarding information
|
|
3295
|
+
|
|
3296
|
+
**Examples:**
|
|
3297
|
+
- \`{project_deepagents_dir}/api-design.md\` - REST API patterns used
|
|
3298
|
+
- \`{project_deepagents_dir}/architecture.md\` - System architecture overview
|
|
3299
|
+
- \`{project_deepagents_dir}/deployment.md\` - How to deploy this project
|
|
3300
|
+
|
|
3301
|
+
### File Operations:
|
|
3302
|
+
|
|
3303
|
+
**User memory:**
|
|
3304
|
+
\`\`\`
|
|
3305
|
+
ls {agent_dir_absolute} # List user memory files
|
|
3306
|
+
read_file '{agent_dir_absolute}/agent.md' # Read user preferences
|
|
3307
|
+
edit_file '{agent_dir_absolute}/agent.md' ... # Update user preferences
|
|
3308
|
+
\`\`\`
|
|
3309
|
+
|
|
3310
|
+
**Project memory (preferred for project-specific information):**
|
|
3311
|
+
\`\`\`
|
|
3312
|
+
ls {project_deepagents_dir} # List project memory files
|
|
3313
|
+
read_file '{project_deepagents_dir}/agent.md' # Read project instructions
|
|
3314
|
+
edit_file '{project_deepagents_dir}/agent.md' ... # Update project instructions
|
|
3315
|
+
write_file '{project_deepagents_dir}/agent.md' ... # Create project memory file
|
|
3316
|
+
\`\`\`
|
|
3317
|
+
|
|
3318
|
+
**Important**:
|
|
3319
|
+
- Project memory files are stored in \`.deepagents/\` inside the project root
|
|
3320
|
+
- Always use absolute paths for file operations
|
|
3321
|
+
- Check project memories BEFORE user when answering project-specific questions`;
|
|
3322
|
+
/**
|
|
3323
|
+
* Create middleware for loading agent-specific long-term memory.
|
|
3324
|
+
*
|
|
3325
|
+
* This middleware loads the agent's long-term memory from a file (agent.md)
|
|
3326
|
+
* and injects it into the system prompt. The memory is loaded once at the
|
|
3327
|
+
* start of the conversation and stored in state.
|
|
3328
|
+
*
|
|
3329
|
+
* @param options - Configuration options
|
|
3330
|
+
* @returns AgentMiddleware for memory loading and injection
|
|
3331
|
+
*/
|
|
3332
|
+
function createAgentMemoryMiddleware(options) {
|
|
3333
|
+
const { settings, assistantId, systemPromptTemplate } = options;
|
|
3334
|
+
const agentDir = settings.getAgentDir(assistantId);
|
|
3335
|
+
const agentDirDisplay = `~/.deepagents/${assistantId}`;
|
|
3336
|
+
const agentDirAbsolute = agentDir;
|
|
3337
|
+
const projectRoot = settings.projectRoot;
|
|
3338
|
+
const projectMemoryInfo = projectRoot ? `\`${projectRoot}\` (detected)` : "None (not in a git project)";
|
|
3339
|
+
const projectDeepagentsDir = projectRoot ? `${projectRoot}/.deepagents` : "[project-root]/.deepagents (not in a project)";
|
|
3340
|
+
const template = systemPromptTemplate || DEFAULT_MEMORY_TEMPLATE;
|
|
3341
|
+
return createMiddleware({
|
|
3342
|
+
name: "AgentMemoryMiddleware",
|
|
3343
|
+
stateSchema: AgentMemoryStateSchema,
|
|
3344
|
+
beforeAgent(state) {
|
|
3345
|
+
const result = {};
|
|
3346
|
+
if (!("userMemory" in state)) {
|
|
3347
|
+
const userPath = settings.getUserAgentMdPath(assistantId);
|
|
3348
|
+
if (fs$1.existsSync(userPath)) try {
|
|
3349
|
+
result.userMemory = fs$1.readFileSync(userPath, "utf-8");
|
|
3350
|
+
} catch {}
|
|
3351
|
+
}
|
|
3352
|
+
if (!("projectMemory" in state)) {
|
|
3353
|
+
const projectPath = settings.getProjectAgentMdPath();
|
|
3354
|
+
if (projectPath && fs$1.existsSync(projectPath)) try {
|
|
3355
|
+
result.projectMemory = fs$1.readFileSync(projectPath, "utf-8");
|
|
3356
|
+
} catch {}
|
|
3357
|
+
}
|
|
3358
|
+
return Object.keys(result).length > 0 ? result : void 0;
|
|
3359
|
+
},
|
|
3360
|
+
wrapModelCall(request, handler) {
|
|
3361
|
+
const userMemory = request.state?.userMemory;
|
|
3362
|
+
const projectMemory = request.state?.projectMemory;
|
|
3363
|
+
const baseSystemPrompt = request.systemPrompt || "";
|
|
3364
|
+
const memorySection = template.replace("{user_memory}", userMemory || "(No user agent.md)").replace("{project_memory}", projectMemory || "(No project agent.md)");
|
|
3365
|
+
const memoryDocs = LONGTERM_MEMORY_SYSTEM_PROMPT.replaceAll("{agent_dir_absolute}", agentDirAbsolute).replaceAll("{agent_dir_display}", agentDirDisplay).replaceAll("{project_memory_info}", projectMemoryInfo).replaceAll("{project_deepagents_dir}", projectDeepagentsDir);
|
|
3366
|
+
let systemPrompt = memorySection;
|
|
3367
|
+
if (baseSystemPrompt) systemPrompt += "\n\n" + baseSystemPrompt;
|
|
3368
|
+
systemPrompt += "\n\n" + memoryDocs;
|
|
3369
|
+
return handler({
|
|
3370
|
+
...request,
|
|
3371
|
+
systemPrompt
|
|
3372
|
+
});
|
|
3373
|
+
}
|
|
3374
|
+
});
|
|
3375
|
+
}
|
|
3376
|
+
|
|
3377
|
+
//#endregion
|
|
3378
|
+
export { BaseSandbox, CompositeBackend, FilesystemBackend, MAX_SKILL_DESCRIPTION_LENGTH, MAX_SKILL_FILE_SIZE, MAX_SKILL_NAME_LENGTH, StateBackend, StoreBackend, createAgentMemoryMiddleware, createDeepAgent, createFilesystemMiddleware, createPatchToolCallsMiddleware, createSettings, createSkillsMiddleware, createSubAgentMiddleware, findProjectRoot, isSandboxBackend, listSkills, parseSkillMetadata };
|
|
3379
|
+
//# sourceMappingURL=index.js.map
|