deepagents-cli 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of deepagents-cli might be problematic. Click here for more details.
- deepagents/__init__.py +18 -0
- deepagents/cli.py +582 -0
- deepagents/default_agent_prompt.md +91 -0
- deepagents/graph.py +168 -0
- deepagents/middleware/__init__.py +13 -0
- deepagents/middleware/common.py +16 -0
- deepagents/middleware/filesystem.py +1159 -0
- deepagents/middleware/local_filesystem.py +741 -0
- deepagents/middleware/subagents.py +480 -0
- deepagents/prompts.py +327 -0
- deepagents/skills.py +85 -0
- deepagents_cli-0.0.1.dist-info/METADATA +555 -0
- deepagents_cli-0.0.1.dist-info/RECORD +17 -0
- deepagents_cli-0.0.1.dist-info/WHEEL +5 -0
- deepagents_cli-0.0.1.dist-info/entry_points.txt +2 -0
- deepagents_cli-0.0.1.dist-info/licenses/LICENSE +21 -0
- deepagents_cli-0.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1159 @@
|
|
|
1
|
+
"""Middleware for providing filesystem tools to an agent."""
|
|
2
|
+
# ruff: noqa: E501
|
|
3
|
+
|
|
4
|
+
from collections.abc import Awaitable, Callable, Sequence
|
|
5
|
+
from typing import TYPE_CHECKING, Annotated, Any
|
|
6
|
+
from typing_extensions import NotRequired
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from langgraph.runtime import Runtime
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
from datetime import UTC, datetime
|
|
13
|
+
from typing import TYPE_CHECKING, Literal
|
|
14
|
+
|
|
15
|
+
from langchain.agents.middleware.types import (
|
|
16
|
+
AgentMiddleware,
|
|
17
|
+
AgentState,
|
|
18
|
+
ModelRequest,
|
|
19
|
+
ModelResponse,
|
|
20
|
+
)
|
|
21
|
+
from langchain.tools import ToolRuntime
|
|
22
|
+
from langchain.tools.tool_node import ToolCallRequest
|
|
23
|
+
from langchain_core.messages import ToolMessage
|
|
24
|
+
from langchain_core.tools import BaseTool, tool
|
|
25
|
+
from langgraph.config import get_config
|
|
26
|
+
from langgraph.runtime import Runtime
|
|
27
|
+
from langgraph.store.base import BaseStore, Item
|
|
28
|
+
from langgraph.types import Command
|
|
29
|
+
from typing_extensions import TypedDict
|
|
30
|
+
from deepagents.middleware.common import TOO_LARGE_TOOL_MSG
|
|
31
|
+
from deepagents.prompts import EDIT_DESCRIPTION, TOOL_DESCRIPTION
|
|
32
|
+
|
|
33
|
+
MEMORIES_PREFIX = "/memories/"
|
|
34
|
+
EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
|
|
35
|
+
MAX_LINE_LENGTH = 2000
|
|
36
|
+
LINE_NUMBER_WIDTH = 6
|
|
37
|
+
DEFAULT_READ_OFFSET = 0
|
|
38
|
+
DEFAULT_READ_LIMIT = 2000
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class FileData(TypedDict):
|
|
42
|
+
"""Data structure for storing file contents with metadata."""
|
|
43
|
+
|
|
44
|
+
content: list[str]
|
|
45
|
+
"""Lines of the file."""
|
|
46
|
+
|
|
47
|
+
created_at: str
|
|
48
|
+
"""ISO 8601 timestamp of file creation."""
|
|
49
|
+
|
|
50
|
+
modified_at: str
|
|
51
|
+
"""ISO 8601 timestamp of last modification."""
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _file_data_reducer(left: dict[str, FileData] | None, right: dict[str, FileData | None]) -> dict[str, FileData]:
|
|
55
|
+
"""Merge file updates with support for deletions.
|
|
56
|
+
|
|
57
|
+
This reducer enables file deletion by treating `None` values in the right
|
|
58
|
+
dictionary as deletion markers. It's designed to work with LangGraph's
|
|
59
|
+
state management where annotated reducers control how state updates merge.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
left: Existing files dictionary. May be `None` during initialization.
|
|
63
|
+
right: New files dictionary to merge. Files with `None` values are
|
|
64
|
+
treated as deletion markers and removed from the result.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
Merged dictionary where right overwrites left for matching keys,
|
|
68
|
+
and `None` values in right trigger deletions.
|
|
69
|
+
|
|
70
|
+
Example:
|
|
71
|
+
```python
|
|
72
|
+
existing = {"/file1.txt": FileData(...), "/file2.txt": FileData(...)}
|
|
73
|
+
updates = {"/file2.txt": None, "/file3.txt": FileData(...)}
|
|
74
|
+
result = file_data_reducer(existing, updates)
|
|
75
|
+
# Result: {"/file1.txt": FileData(...), "/file3.txt": FileData(...)}
|
|
76
|
+
```
|
|
77
|
+
"""
|
|
78
|
+
if left is None:
|
|
79
|
+
# Filter out None values when initializing
|
|
80
|
+
return {k: v for k, v in right.items() if v is not None}
|
|
81
|
+
|
|
82
|
+
# Merge, filtering out None values (deletions)
|
|
83
|
+
result = {**left}
|
|
84
|
+
for key, value in right.items():
|
|
85
|
+
if value is None:
|
|
86
|
+
result.pop(key, None)
|
|
87
|
+
else:
|
|
88
|
+
result[key] = value
|
|
89
|
+
return result
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _validate_path(path: str, *, allowed_prefixes: Sequence[str] | None = None) -> str:
|
|
93
|
+
"""Validate and normalize file path for security.
|
|
94
|
+
|
|
95
|
+
Ensures paths are safe to use by preventing directory traversal attacks
|
|
96
|
+
and enforcing consistent formatting. All paths are normalized to use
|
|
97
|
+
forward slashes and start with a leading slash.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
path: The path to validate and normalize.
|
|
101
|
+
allowed_prefixes: Optional list of allowed path prefixes. If provided,
|
|
102
|
+
the normalized path must start with one of these prefixes.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Normalized canonical path starting with `/` and using forward slashes.
|
|
106
|
+
|
|
107
|
+
Raises:
|
|
108
|
+
ValueError: If path contains traversal sequences (`..` or `~`) or does
|
|
109
|
+
not start with an allowed prefix when `allowed_prefixes` is specified.
|
|
110
|
+
|
|
111
|
+
Example:
|
|
112
|
+
```python
|
|
113
|
+
validate_path("foo/bar") # Returns: "/foo/bar"
|
|
114
|
+
validate_path("/./foo//bar") # Returns: "/foo/bar"
|
|
115
|
+
validate_path("../etc/passwd") # Raises ValueError
|
|
116
|
+
validate_path("/data/file.txt", allowed_prefixes=["/data/"]) # OK
|
|
117
|
+
validate_path("/etc/file.txt", allowed_prefixes=["/data/"]) # Raises ValueError
|
|
118
|
+
```
|
|
119
|
+
"""
|
|
120
|
+
# Reject paths with traversal attempts
|
|
121
|
+
if ".." in path or path.startswith("~"):
|
|
122
|
+
msg = f"Path traversal not allowed: {path}"
|
|
123
|
+
raise ValueError(msg)
|
|
124
|
+
|
|
125
|
+
# Normalize path (resolve ., //, etc.)
|
|
126
|
+
normalized = os.path.normpath(path)
|
|
127
|
+
|
|
128
|
+
# Convert to forward slashes for consistency
|
|
129
|
+
normalized = normalized.replace("\\", "/")
|
|
130
|
+
|
|
131
|
+
# Ensure path starts with /
|
|
132
|
+
if not normalized.startswith("/"):
|
|
133
|
+
normalized = f"/{normalized}"
|
|
134
|
+
|
|
135
|
+
# Check allowed prefixes if specified
|
|
136
|
+
if allowed_prefixes is not None and not any(normalized.startswith(prefix) for prefix in allowed_prefixes):
|
|
137
|
+
msg = f"Path must start with one of {allowed_prefixes}: {path}"
|
|
138
|
+
raise ValueError(msg)
|
|
139
|
+
|
|
140
|
+
return normalized
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _format_content_with_line_numbers(
|
|
144
|
+
content: str | list[str],
|
|
145
|
+
*,
|
|
146
|
+
format_style: Literal["pipe", "tab"] = "pipe",
|
|
147
|
+
start_line: int = 1,
|
|
148
|
+
) -> str:
|
|
149
|
+
r"""Format file content with line numbers for display.
|
|
150
|
+
|
|
151
|
+
Converts file content to a numbered format similar to `cat -n` output,
|
|
152
|
+
with support for two different formatting styles.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
content: File content as a string or list of lines.
|
|
156
|
+
format_style: Format style for line numbers:
|
|
157
|
+
- `"pipe"`: Compact format like `"1|content"`
|
|
158
|
+
- `"tab"`: Right-aligned format like `" 1\tcontent"` (lines truncated at 2000 chars)
|
|
159
|
+
start_line: Starting line number (default: 1).
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Formatted content with line numbers prepended to each line.
|
|
163
|
+
|
|
164
|
+
Example:
|
|
165
|
+
```python
|
|
166
|
+
content = "Hello\nWorld"
|
|
167
|
+
format_content_with_line_numbers(content, format_style="pipe")
|
|
168
|
+
# Returns: "1|Hello\n2|World"
|
|
169
|
+
|
|
170
|
+
format_content_with_line_numbers(content, format_style="tab", start_line=10)
|
|
171
|
+
# Returns: " 10\tHello\n 11\tWorld"
|
|
172
|
+
```
|
|
173
|
+
"""
|
|
174
|
+
if isinstance(content, str):
|
|
175
|
+
lines = content.split("\n")
|
|
176
|
+
# Remove trailing empty line from split
|
|
177
|
+
if lines and lines[-1] == "":
|
|
178
|
+
lines = lines[:-1]
|
|
179
|
+
else:
|
|
180
|
+
lines = content
|
|
181
|
+
|
|
182
|
+
if format_style == "pipe":
|
|
183
|
+
return "\n".join(f"{i + start_line}|{line}" for i, line in enumerate(lines))
|
|
184
|
+
|
|
185
|
+
# Tab format with defined width and line truncation
|
|
186
|
+
return "\n".join(f"{i + start_line:{LINE_NUMBER_WIDTH}d}\t{line[:MAX_LINE_LENGTH]}" for i, line in enumerate(lines))
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _create_file_data(
|
|
190
|
+
content: str | list[str],
|
|
191
|
+
*,
|
|
192
|
+
created_at: str | None = None,
|
|
193
|
+
) -> FileData:
|
|
194
|
+
r"""Create a FileData object with automatic timestamp generation.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
content: File content as a string or list of lines.
|
|
198
|
+
created_at: Optional creation timestamp in ISO 8601 format.
|
|
199
|
+
If `None`, uses the current UTC time.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
FileData object with content and timestamps.
|
|
203
|
+
|
|
204
|
+
Example:
|
|
205
|
+
```python
|
|
206
|
+
file_data = create_file_data("Hello\nWorld")
|
|
207
|
+
# Returns: {"content": ["Hello", "World"], "created_at": "2024-...",
|
|
208
|
+
# "modified_at": "2024-..."}
|
|
209
|
+
```
|
|
210
|
+
"""
|
|
211
|
+
lines = content.split("\n") if isinstance(content, str) else content
|
|
212
|
+
now = datetime.now(UTC).isoformat()
|
|
213
|
+
|
|
214
|
+
return {
|
|
215
|
+
"content": lines,
|
|
216
|
+
"created_at": created_at or now,
|
|
217
|
+
"modified_at": now,
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _update_file_data(
|
|
222
|
+
file_data: FileData,
|
|
223
|
+
content: str | list[str],
|
|
224
|
+
) -> FileData:
|
|
225
|
+
"""Update FileData with new content while preserving creation timestamp.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
file_data: Existing FileData object to update.
|
|
229
|
+
content: New file content as a string or list of lines.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Updated FileData object with new content and updated `modified_at`
|
|
233
|
+
timestamp. The `created_at` timestamp is preserved from the original.
|
|
234
|
+
|
|
235
|
+
Example:
|
|
236
|
+
```python
|
|
237
|
+
original = create_file_data("Hello")
|
|
238
|
+
updated = update_file_data(original, "Hello World")
|
|
239
|
+
# updated["created_at"] == original["created_at"]
|
|
240
|
+
# updated["modified_at"] > original["modified_at"]
|
|
241
|
+
```
|
|
242
|
+
"""
|
|
243
|
+
lines = content.split("\n") if isinstance(content, str) else content
|
|
244
|
+
now = datetime.now(UTC).isoformat()
|
|
245
|
+
|
|
246
|
+
return {
|
|
247
|
+
"content": lines,
|
|
248
|
+
"created_at": file_data["created_at"],
|
|
249
|
+
"modified_at": now,
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _file_data_to_string(file_data: FileData) -> str:
|
|
254
|
+
r"""Convert FileData to plain string content.
|
|
255
|
+
|
|
256
|
+
Joins the lines stored in FileData with newline characters to produce
|
|
257
|
+
a single string representation of the file content.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
file_data: FileData object containing lines of content.
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
File content as a single string with lines joined by newlines.
|
|
264
|
+
|
|
265
|
+
Example:
|
|
266
|
+
```python
|
|
267
|
+
file_data = {
|
|
268
|
+
"content": ["Hello", "World"],
|
|
269
|
+
"created_at": "...",
|
|
270
|
+
"modified_at": "...",
|
|
271
|
+
}
|
|
272
|
+
file_data_to_string(file_data) # Returns: "Hello\nWorld"
|
|
273
|
+
```
|
|
274
|
+
"""
|
|
275
|
+
return "\n".join(file_data["content"])
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _check_empty_content(content: str) -> str | None:
|
|
279
|
+
"""Check if file content is empty and return a warning message.
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
content: File content to check.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Warning message string if content is empty or contains only whitespace,
|
|
286
|
+
`None` otherwise.
|
|
287
|
+
|
|
288
|
+
Example:
|
|
289
|
+
```python
|
|
290
|
+
check_empty_content("") # Returns: "System reminder: File exists but has empty contents"
|
|
291
|
+
check_empty_content(" ") # Returns: "System reminder: File exists but has empty contents"
|
|
292
|
+
check_empty_content("Hello") # Returns: None
|
|
293
|
+
```
|
|
294
|
+
"""
|
|
295
|
+
if not content or content.strip() == "":
|
|
296
|
+
return EMPTY_CONTENT_WARNING
|
|
297
|
+
return None
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _has_memories_prefix(file_path: str) -> bool:
|
|
301
|
+
"""Check if a file path is in the longterm memory filesystem.
|
|
302
|
+
|
|
303
|
+
Longterm memory files are distinguished by the `/memories/` path prefix.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
file_path: File path to check.
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
`True` if the file path starts with `/memories/`, `False` otherwise.
|
|
310
|
+
|
|
311
|
+
Example:
|
|
312
|
+
```python
|
|
313
|
+
has_memories_prefix("/memories/notes.txt") # Returns: True
|
|
314
|
+
has_memories_prefix("/temp/file.txt") # Returns: False
|
|
315
|
+
```
|
|
316
|
+
"""
|
|
317
|
+
return file_path.startswith(MEMORIES_PREFIX)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def _append_memories_prefix(file_path: str) -> str:
|
|
321
|
+
"""Add the longterm memory prefix to a file path.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
file_path: File path to prefix.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
File path with `/memories` prepended.
|
|
328
|
+
|
|
329
|
+
Example:
|
|
330
|
+
```python
|
|
331
|
+
append_memories_prefix("/notes.txt") # Returns: "/memories/notes.txt"
|
|
332
|
+
```
|
|
333
|
+
"""
|
|
334
|
+
return f"/memories{file_path}"
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def _strip_memories_prefix(file_path: str) -> str:
|
|
338
|
+
"""Remove the longterm memory prefix from a file path.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
file_path: File path potentially containing the memories prefix.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
File path with `/memories` removed if present at the start.
|
|
345
|
+
|
|
346
|
+
Example:
|
|
347
|
+
```python
|
|
348
|
+
strip_memories_prefix("/memories/notes.txt") # Returns: "/notes.txt"
|
|
349
|
+
strip_memories_prefix("/notes.txt") # Returns: "/notes.txt"
|
|
350
|
+
```
|
|
351
|
+
"""
|
|
352
|
+
if file_path.startswith(MEMORIES_PREFIX):
|
|
353
|
+
return file_path[len(MEMORIES_PREFIX) - 1 :] # Keep the leading slash
|
|
354
|
+
return file_path
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
class FilesystemState(AgentState):
|
|
358
|
+
"""State for the filesystem middleware."""
|
|
359
|
+
|
|
360
|
+
files: Annotated[NotRequired[dict[str, FileData]], _file_data_reducer]
|
|
361
|
+
"""Files in the filesystem."""
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
LIST_FILES_TOOL_DESCRIPTION = """Lists all files in the filesystem, optionally filtering by directory.
|
|
365
|
+
|
|
366
|
+
Usage:
|
|
367
|
+
- The list_files tool will return a list of all files in the filesystem.
|
|
368
|
+
- You can optionally provide a path parameter to list files in a specific directory.
|
|
369
|
+
- This is very useful for exploring the file system and finding the right file to read or edit.
|
|
370
|
+
- You should almost ALWAYS use this tool before using the Read or Edit tools."""
|
|
371
|
+
LIST_FILES_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT = f"\n- Files from the longterm filesystem will be prefixed with the {MEMORIES_PREFIX} path."
|
|
372
|
+
|
|
373
|
+
READ_FILE_TOOL_DESCRIPTION = TOOL_DESCRIPTION + "\n- You should ALWAYS make sure a file has been read before editing it."
|
|
374
|
+
READ_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT = f"\n- file_paths prefixed with the {MEMORIES_PREFIX} path will be read from the longterm filesystem."
|
|
375
|
+
|
|
376
|
+
EDIT_FILE_TOOL_DESCRIPTION = EDIT_DESCRIPTION
|
|
377
|
+
EDIT_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT = (
|
|
378
|
+
f"\n- You can edit files in the longterm filesystem by prefixing the filename with the {MEMORIES_PREFIX} path."
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
WRITE_FILE_TOOL_DESCRIPTION = """Writes to a new file in the filesystem.
|
|
382
|
+
|
|
383
|
+
Usage:
|
|
384
|
+
- The file_path parameter must be an absolute path, not a relative path
|
|
385
|
+
- The content parameter must be a string
|
|
386
|
+
- The write_file tool will create the a new file.
|
|
387
|
+
- Prefer to edit existing files over creating new ones when possible.
|
|
388
|
+
- file_paths prefixed with the /memories/ path will be written to the longterm filesystem."""
|
|
389
|
+
WRITE_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT = (
|
|
390
|
+
f"\n- file_paths prefixed with the {MEMORIES_PREFIX} path will be written to the longterm filesystem."
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
FILESYSTEM_SYSTEM_PROMPT = """## Filesystem Tools `ls`, `read_file`, `write_file`, `edit_file`
|
|
394
|
+
|
|
395
|
+
You have access to a filesystem which you can interact with using these tools.
|
|
396
|
+
All file paths must start with a /.
|
|
397
|
+
|
|
398
|
+
- ls: list all files in the filesystem
|
|
399
|
+
- read_file: read a file from the filesystem
|
|
400
|
+
- write_file: write to a file in the filesystem
|
|
401
|
+
- edit_file: edit a file in the filesystem"""
|
|
402
|
+
FILESYSTEM_SYSTEM_PROMPT_LONGTERM_SUPPLEMENT = f"""
|
|
403
|
+
|
|
404
|
+
You also have access to a longterm filesystem in which you can store files that you want to keep around for longer than the current conversation.
|
|
405
|
+
In order to interact with the longterm filesystem, you can use those same tools, but filenames must be prefixed with the {MEMORIES_PREFIX} path.
|
|
406
|
+
Remember, to interact with the longterm filesystem, you must prefix the filename with the {MEMORIES_PREFIX} path."""
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def _get_namespace() -> tuple[str] | tuple[str, str]:
|
|
410
|
+
"""Get the namespace for longterm filesystem storage.
|
|
411
|
+
|
|
412
|
+
Returns a tuple for organizing files in the store. If an assistant_id is available
|
|
413
|
+
in the config metadata, returns a 2-tuple of (assistant_id, "filesystem") to provide
|
|
414
|
+
per-assistant isolation. Otherwise, returns a 1-tuple of ("filesystem",) for shared storage.
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Namespace tuple for store operations, either `(assistant_id, "filesystem")` or `("filesystem",)`.
|
|
418
|
+
"""
|
|
419
|
+
namespace = "filesystem"
|
|
420
|
+
config = get_config()
|
|
421
|
+
if config is None:
|
|
422
|
+
return (namespace,)
|
|
423
|
+
assistant_id = config.get("metadata", {}).get("assistant_id")
|
|
424
|
+
if assistant_id is None:
|
|
425
|
+
return (namespace,)
|
|
426
|
+
return (assistant_id, "filesystem")
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def _get_store(runtime: Runtime[Any]) -> BaseStore:
|
|
430
|
+
"""Get the store from the runtime, raising an error if unavailable.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
runtime: The LangGraph runtime containing the store.
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
The BaseStore instance for longterm file storage.
|
|
437
|
+
|
|
438
|
+
Raises:
|
|
439
|
+
ValueError: If longterm memory is enabled but no store is available in runtime.
|
|
440
|
+
"""
|
|
441
|
+
if runtime.store is None:
|
|
442
|
+
msg = "Longterm memory is enabled, but no store is available"
|
|
443
|
+
raise ValueError(msg)
|
|
444
|
+
return runtime.store
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
def _convert_store_item_to_file_data(store_item: Item) -> FileData:
|
|
448
|
+
"""Convert a store Item to FileData format.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
store_item: The store Item containing file data.
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
FileData with content, created_at, and modified_at fields.
|
|
455
|
+
|
|
456
|
+
Raises:
|
|
457
|
+
ValueError: If required fields are missing or have incorrect types.
|
|
458
|
+
"""
|
|
459
|
+
if "content" not in store_item.value or not isinstance(store_item.value["content"], list):
|
|
460
|
+
msg = f"Store item does not contain valid content field. Got: {store_item.value.keys()}"
|
|
461
|
+
raise ValueError(msg)
|
|
462
|
+
if "created_at" not in store_item.value or not isinstance(store_item.value["created_at"], str):
|
|
463
|
+
msg = f"Store item does not contain valid created_at field. Got: {store_item.value.keys()}"
|
|
464
|
+
raise ValueError(msg)
|
|
465
|
+
if "modified_at" not in store_item.value or not isinstance(store_item.value["modified_at"], str):
|
|
466
|
+
msg = f"Store item does not contain valid modified_at field. Got: {store_item.value.keys()}"
|
|
467
|
+
raise ValueError(msg)
|
|
468
|
+
return FileData(
|
|
469
|
+
content=store_item.value["content"],
|
|
470
|
+
created_at=store_item.value["created_at"],
|
|
471
|
+
modified_at=store_item.value["modified_at"],
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def _convert_file_data_to_store_item(file_data: FileData) -> dict[str, Any]:
|
|
476
|
+
"""Convert FileData to a dict suitable for store.put().
|
|
477
|
+
|
|
478
|
+
Args:
|
|
479
|
+
file_data: The FileData to convert.
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
Dictionary with content, created_at, and modified_at fields.
|
|
483
|
+
"""
|
|
484
|
+
return {
|
|
485
|
+
"content": file_data["content"],
|
|
486
|
+
"created_at": file_data["created_at"],
|
|
487
|
+
"modified_at": file_data["modified_at"],
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def _get_file_data_from_state(state: FilesystemState, file_path: str) -> FileData:
|
|
492
|
+
"""Retrieve file data from the agent's state.
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
state: The current filesystem state.
|
|
496
|
+
file_path: The path of the file to retrieve.
|
|
497
|
+
|
|
498
|
+
Returns:
|
|
499
|
+
The FileData for the requested file.
|
|
500
|
+
|
|
501
|
+
Raises:
|
|
502
|
+
ValueError: If the file is not found in state.
|
|
503
|
+
"""
|
|
504
|
+
mock_filesystem = state.get("files", {})
|
|
505
|
+
if file_path not in mock_filesystem:
|
|
506
|
+
msg = f"File '{file_path}' not found"
|
|
507
|
+
raise ValueError(msg)
|
|
508
|
+
return mock_filesystem[file_path]
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def _ls_tool_generator(custom_description: str | None = None, *, long_term_memory: bool) -> BaseTool:
|
|
512
|
+
"""Generate the ls (list files) tool.
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
custom_description: Optional custom description for the tool.
|
|
516
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
517
|
+
|
|
518
|
+
Returns:
|
|
519
|
+
Configured ls tool that lists files from state and optionally from longterm store.
|
|
520
|
+
"""
|
|
521
|
+
tool_description = LIST_FILES_TOOL_DESCRIPTION
|
|
522
|
+
if custom_description:
|
|
523
|
+
tool_description = custom_description
|
|
524
|
+
elif long_term_memory:
|
|
525
|
+
tool_description += LIST_FILES_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
|
|
526
|
+
|
|
527
|
+
def _get_filenames_from_state(state: FilesystemState) -> list[str]:
|
|
528
|
+
"""Extract list of filenames from the filesystem state.
|
|
529
|
+
|
|
530
|
+
Args:
|
|
531
|
+
state: The current filesystem state.
|
|
532
|
+
|
|
533
|
+
Returns:
|
|
534
|
+
List of file paths in the state.
|
|
535
|
+
"""
|
|
536
|
+
files_dict = state.get("files", {})
|
|
537
|
+
return list(files_dict.keys())
|
|
538
|
+
|
|
539
|
+
def _filter_files_by_path(filenames: list[str], path: str | None) -> list[str]:
|
|
540
|
+
"""Filter filenames by path prefix.
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
filenames: List of file paths to filter.
|
|
544
|
+
path: Optional path prefix to filter by.
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
Filtered list of file paths matching the prefix.
|
|
548
|
+
"""
|
|
549
|
+
if path is None:
|
|
550
|
+
return filenames
|
|
551
|
+
normalized_path = _validate_path(path)
|
|
552
|
+
return [f for f in filenames if f.startswith(normalized_path)]
|
|
553
|
+
|
|
554
|
+
if long_term_memory:
|
|
555
|
+
|
|
556
|
+
@tool(description=tool_description)
|
|
557
|
+
def ls(runtime: ToolRuntime[None, FilesystemState], path: str | None = None) -> list[str]:
|
|
558
|
+
files = _get_filenames_from_state(runtime.state)
|
|
559
|
+
# Add filenames from longterm memory
|
|
560
|
+
store = _get_store(runtime)
|
|
561
|
+
namespace = _get_namespace()
|
|
562
|
+
longterm_files = store.search(namespace)
|
|
563
|
+
longterm_files_prefixed = [_append_memories_prefix(f.key) for f in longterm_files]
|
|
564
|
+
files.extend(longterm_files_prefixed)
|
|
565
|
+
return _filter_files_by_path(files, path)
|
|
566
|
+
else:
|
|
567
|
+
|
|
568
|
+
@tool(description=tool_description)
|
|
569
|
+
def ls(runtime: ToolRuntime[None, FilesystemState], path: str | None = None) -> list[str]:
|
|
570
|
+
files = _get_filenames_from_state(runtime.state)
|
|
571
|
+
return _filter_files_by_path(files, path)
|
|
572
|
+
|
|
573
|
+
return ls
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
def _read_file_tool_generator(custom_description: str | None = None, *, long_term_memory: bool) -> BaseTool:
|
|
577
|
+
"""Generate the read_file tool.
|
|
578
|
+
|
|
579
|
+
Args:
|
|
580
|
+
custom_description: Optional custom description for the tool.
|
|
581
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
582
|
+
|
|
583
|
+
Returns:
|
|
584
|
+
Configured read_file tool that reads files from state and optionally from longterm store.
|
|
585
|
+
"""
|
|
586
|
+
tool_description = READ_FILE_TOOL_DESCRIPTION
|
|
587
|
+
if custom_description:
|
|
588
|
+
tool_description = custom_description
|
|
589
|
+
elif long_term_memory:
|
|
590
|
+
tool_description += READ_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
|
|
591
|
+
|
|
592
|
+
def _read_file_data_content(file_data: FileData, offset: int, limit: int) -> str:
|
|
593
|
+
"""Read and format file content with line numbers.
|
|
594
|
+
|
|
595
|
+
Args:
|
|
596
|
+
file_data: The file data to read.
|
|
597
|
+
offset: Line offset to start reading from (0-indexed).
|
|
598
|
+
limit: Maximum number of lines to read.
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
Formatted file content with line numbers, or an error message.
|
|
602
|
+
"""
|
|
603
|
+
content = _file_data_to_string(file_data)
|
|
604
|
+
empty_msg = _check_empty_content(content)
|
|
605
|
+
if empty_msg:
|
|
606
|
+
return empty_msg
|
|
607
|
+
lines = content.splitlines()
|
|
608
|
+
start_idx = offset
|
|
609
|
+
end_idx = min(start_idx + limit, len(lines))
|
|
610
|
+
if start_idx >= len(lines):
|
|
611
|
+
return f"Error: Line offset {offset} exceeds file length ({len(lines)} lines)"
|
|
612
|
+
selected_lines = lines[start_idx:end_idx]
|
|
613
|
+
return _format_content_with_line_numbers(selected_lines, format_style="tab", start_line=start_idx + 1)
|
|
614
|
+
|
|
615
|
+
if long_term_memory:
|
|
616
|
+
|
|
617
|
+
@tool(description=tool_description)
|
|
618
|
+
def read_file(
|
|
619
|
+
file_path: str,
|
|
620
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
621
|
+
offset: int = DEFAULT_READ_OFFSET,
|
|
622
|
+
limit: int = DEFAULT_READ_LIMIT,
|
|
623
|
+
) -> str:
|
|
624
|
+
file_path = _validate_path(file_path)
|
|
625
|
+
if _has_memories_prefix(file_path):
|
|
626
|
+
stripped_file_path = _strip_memories_prefix(file_path)
|
|
627
|
+
store = _get_store(runtime)
|
|
628
|
+
namespace = _get_namespace()
|
|
629
|
+
item: Item | None = store.get(namespace, stripped_file_path)
|
|
630
|
+
if item is None:
|
|
631
|
+
return f"Error: File '{file_path}' not found"
|
|
632
|
+
file_data = _convert_store_item_to_file_data(item)
|
|
633
|
+
else:
|
|
634
|
+
try:
|
|
635
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
636
|
+
except ValueError as e:
|
|
637
|
+
return str(e)
|
|
638
|
+
return _read_file_data_content(file_data, offset, limit)
|
|
639
|
+
|
|
640
|
+
else:
|
|
641
|
+
|
|
642
|
+
@tool(description=tool_description)
|
|
643
|
+
def read_file(
|
|
644
|
+
file_path: str,
|
|
645
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
646
|
+
offset: int = DEFAULT_READ_OFFSET,
|
|
647
|
+
limit: int = DEFAULT_READ_LIMIT,
|
|
648
|
+
) -> str:
|
|
649
|
+
file_path = _validate_path(file_path)
|
|
650
|
+
try:
|
|
651
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
652
|
+
except ValueError as e:
|
|
653
|
+
return str(e)
|
|
654
|
+
return _read_file_data_content(file_data, offset, limit)
|
|
655
|
+
|
|
656
|
+
return read_file
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def _write_file_tool_generator(custom_description: str | None = None, *, long_term_memory: bool) -> BaseTool:
|
|
660
|
+
"""Generate the write_file tool.
|
|
661
|
+
|
|
662
|
+
Args:
|
|
663
|
+
custom_description: Optional custom description for the tool.
|
|
664
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
665
|
+
|
|
666
|
+
Returns:
|
|
667
|
+
Configured write_file tool that creates new files in state or longterm store.
|
|
668
|
+
"""
|
|
669
|
+
tool_description = WRITE_FILE_TOOL_DESCRIPTION
|
|
670
|
+
if custom_description:
|
|
671
|
+
tool_description = custom_description
|
|
672
|
+
elif long_term_memory:
|
|
673
|
+
tool_description += WRITE_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
|
|
674
|
+
|
|
675
|
+
def _write_file_to_state(state: FilesystemState, tool_call_id: str, file_path: str, content: str) -> Command | str:
|
|
676
|
+
"""Write a new file to the filesystem state.
|
|
677
|
+
|
|
678
|
+
Args:
|
|
679
|
+
state: The current filesystem state.
|
|
680
|
+
tool_call_id: ID of the tool call for generating ToolMessage.
|
|
681
|
+
file_path: The path where the file should be written.
|
|
682
|
+
content: The content to write to the file.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
Command to update state with new file, or error string if file exists.
|
|
686
|
+
"""
|
|
687
|
+
mock_filesystem = state.get("files", {})
|
|
688
|
+
existing = mock_filesystem.get(file_path)
|
|
689
|
+
if existing:
|
|
690
|
+
return f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path."
|
|
691
|
+
new_file_data = _create_file_data(content)
|
|
692
|
+
return Command(
|
|
693
|
+
update={
|
|
694
|
+
"files": {file_path: new_file_data},
|
|
695
|
+
"messages": [ToolMessage(f"Updated file {file_path}", tool_call_id=tool_call_id)],
|
|
696
|
+
}
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
if long_term_memory:
|
|
700
|
+
|
|
701
|
+
@tool(description=tool_description)
|
|
702
|
+
def write_file(
|
|
703
|
+
file_path: str,
|
|
704
|
+
content: str,
|
|
705
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
706
|
+
) -> Command | str:
|
|
707
|
+
file_path = _validate_path(file_path)
|
|
708
|
+
if _has_memories_prefix(file_path):
|
|
709
|
+
stripped_file_path = _strip_memories_prefix(file_path)
|
|
710
|
+
store = _get_store(runtime)
|
|
711
|
+
namespace = _get_namespace()
|
|
712
|
+
if store.get(namespace, stripped_file_path) is not None:
|
|
713
|
+
return f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path."
|
|
714
|
+
new_file_data = _create_file_data(content)
|
|
715
|
+
store.put(namespace, stripped_file_path, _convert_file_data_to_store_item(new_file_data))
|
|
716
|
+
return f"Updated longterm memories file {file_path}"
|
|
717
|
+
return _write_file_to_state(runtime.state, runtime.tool_call_id, file_path, content)
|
|
718
|
+
|
|
719
|
+
else:
|
|
720
|
+
|
|
721
|
+
@tool(description=tool_description)
|
|
722
|
+
def write_file(
|
|
723
|
+
file_path: str,
|
|
724
|
+
content: str,
|
|
725
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
726
|
+
) -> Command | str:
|
|
727
|
+
file_path = _validate_path(file_path)
|
|
728
|
+
return _write_file_to_state(runtime.state, runtime.tool_call_id, file_path, content)
|
|
729
|
+
|
|
730
|
+
return write_file
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
def _edit_file_tool_generator(custom_description: str | None = None, *, long_term_memory: bool) -> BaseTool:
|
|
734
|
+
"""Generate the edit_file tool.
|
|
735
|
+
|
|
736
|
+
Args:
|
|
737
|
+
custom_description: Optional custom description for the tool.
|
|
738
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
739
|
+
|
|
740
|
+
Returns:
|
|
741
|
+
Configured edit_file tool that performs string replacements in files.
|
|
742
|
+
"""
|
|
743
|
+
tool_description = EDIT_FILE_TOOL_DESCRIPTION
|
|
744
|
+
if custom_description:
|
|
745
|
+
tool_description = custom_description
|
|
746
|
+
elif long_term_memory:
|
|
747
|
+
tool_description += EDIT_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
|
|
748
|
+
|
|
749
|
+
def _perform_file_edit(
|
|
750
|
+
file_data: FileData,
|
|
751
|
+
old_string: str,
|
|
752
|
+
new_string: str,
|
|
753
|
+
*,
|
|
754
|
+
replace_all: bool = False,
|
|
755
|
+
) -> tuple[FileData, str] | str:
|
|
756
|
+
"""Perform string replacement on file data.
|
|
757
|
+
|
|
758
|
+
Args:
|
|
759
|
+
file_data: The file data to edit.
|
|
760
|
+
old_string: String to find and replace.
|
|
761
|
+
new_string: Replacement string.
|
|
762
|
+
replace_all: If True, replace all occurrences.
|
|
763
|
+
|
|
764
|
+
Returns:
|
|
765
|
+
Tuple of (updated_file_data, success_message) on success,
|
|
766
|
+
or error string on failure.
|
|
767
|
+
"""
|
|
768
|
+
content = _file_data_to_string(file_data)
|
|
769
|
+
occurrences = content.count(old_string)
|
|
770
|
+
if occurrences == 0:
|
|
771
|
+
return f"Error: String not found in file: '{old_string}'"
|
|
772
|
+
if occurrences > 1 and not replace_all:
|
|
773
|
+
return f"Error: String '{old_string}' appears {occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context."
|
|
774
|
+
new_content = content.replace(old_string, new_string)
|
|
775
|
+
new_file_data = _update_file_data(file_data, new_content)
|
|
776
|
+
result_msg = f"Successfully replaced {occurrences} instance(s) of the string"
|
|
777
|
+
return new_file_data, result_msg
|
|
778
|
+
|
|
779
|
+
if long_term_memory:
|
|
780
|
+
|
|
781
|
+
@tool(description=tool_description)
|
|
782
|
+
def edit_file(
|
|
783
|
+
file_path: str,
|
|
784
|
+
old_string: str,
|
|
785
|
+
new_string: str,
|
|
786
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
787
|
+
*,
|
|
788
|
+
replace_all: bool = False,
|
|
789
|
+
) -> Command | str:
|
|
790
|
+
file_path = _validate_path(file_path)
|
|
791
|
+
is_longterm_memory = _has_memories_prefix(file_path)
|
|
792
|
+
|
|
793
|
+
# Retrieve file data from appropriate storage
|
|
794
|
+
if is_longterm_memory:
|
|
795
|
+
stripped_file_path = _strip_memories_prefix(file_path)
|
|
796
|
+
store = _get_store(runtime)
|
|
797
|
+
namespace = _get_namespace()
|
|
798
|
+
item: Item | None = store.get(namespace, stripped_file_path)
|
|
799
|
+
if item is None:
|
|
800
|
+
return f"Error: File '{file_path}' not found"
|
|
801
|
+
file_data = _convert_store_item_to_file_data(item)
|
|
802
|
+
else:
|
|
803
|
+
try:
|
|
804
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
805
|
+
except ValueError as e:
|
|
806
|
+
return str(e)
|
|
807
|
+
|
|
808
|
+
# Perform the edit
|
|
809
|
+
result = _perform_file_edit(file_data, old_string, new_string, replace_all=replace_all)
|
|
810
|
+
if isinstance(result, str): # Error message
|
|
811
|
+
return result
|
|
812
|
+
|
|
813
|
+
new_file_data, result_msg = result
|
|
814
|
+
full_msg = f"{result_msg} in '{file_path}'"
|
|
815
|
+
|
|
816
|
+
# Save to appropriate storage
|
|
817
|
+
if is_longterm_memory:
|
|
818
|
+
store.put(namespace, stripped_file_path, _convert_file_data_to_store_item(new_file_data))
|
|
819
|
+
return full_msg
|
|
820
|
+
|
|
821
|
+
return Command(
|
|
822
|
+
update={
|
|
823
|
+
"files": {file_path: new_file_data},
|
|
824
|
+
"messages": [ToolMessage(full_msg, tool_call_id=runtime.tool_call_id)],
|
|
825
|
+
}
|
|
826
|
+
)
|
|
827
|
+
else:
|
|
828
|
+
|
|
829
|
+
@tool(description=tool_description)
|
|
830
|
+
def edit_file(
|
|
831
|
+
file_path: str,
|
|
832
|
+
old_string: str,
|
|
833
|
+
new_string: str,
|
|
834
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
835
|
+
*,
|
|
836
|
+
replace_all: bool = False,
|
|
837
|
+
) -> Command | str:
|
|
838
|
+
file_path = _validate_path(file_path)
|
|
839
|
+
|
|
840
|
+
# Retrieve file data from state
|
|
841
|
+
try:
|
|
842
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
843
|
+
except ValueError as e:
|
|
844
|
+
return str(e)
|
|
845
|
+
|
|
846
|
+
# Perform the edit
|
|
847
|
+
result = _perform_file_edit(file_data, old_string, new_string, replace_all=replace_all)
|
|
848
|
+
if isinstance(result, str): # Error message
|
|
849
|
+
return result
|
|
850
|
+
|
|
851
|
+
new_file_data, result_msg = result
|
|
852
|
+
full_msg = f"{result_msg} in '{file_path}'"
|
|
853
|
+
|
|
854
|
+
return Command(
|
|
855
|
+
update={
|
|
856
|
+
"files": {file_path: new_file_data},
|
|
857
|
+
"messages": [ToolMessage(full_msg, tool_call_id=runtime.tool_call_id)],
|
|
858
|
+
}
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
return edit_file
|
|
862
|
+
|
|
863
|
+
|
|
864
|
+
TOOL_GENERATORS = {
|
|
865
|
+
"ls": _ls_tool_generator,
|
|
866
|
+
"read_file": _read_file_tool_generator,
|
|
867
|
+
"write_file": _write_file_tool_generator,
|
|
868
|
+
"edit_file": _edit_file_tool_generator,
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
def _get_filesystem_tools(custom_tool_descriptions: dict[str, str] | None = None, *, long_term_memory: bool) -> list[BaseTool]:
|
|
873
|
+
"""Get filesystem tools.
|
|
874
|
+
|
|
875
|
+
Args:
|
|
876
|
+
custom_tool_descriptions: Optional custom descriptions for tools.
|
|
877
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
List of configured filesystem tools (ls, read_file, write_file, edit_file).
|
|
881
|
+
"""
|
|
882
|
+
if custom_tool_descriptions is None:
|
|
883
|
+
custom_tool_descriptions = {}
|
|
884
|
+
tools = []
|
|
885
|
+
for tool_name, tool_generator in TOOL_GENERATORS.items():
|
|
886
|
+
tool = tool_generator(custom_tool_descriptions.get(tool_name), long_term_memory=long_term_memory)
|
|
887
|
+
tools.append(tool)
|
|
888
|
+
return tools
|
|
889
|
+
|
|
890
|
+
|
|
891
|
+
class FilesystemMiddleware(AgentMiddleware):
|
|
892
|
+
"""Middleware for providing filesystem tools to an agent.
|
|
893
|
+
|
|
894
|
+
This middleware adds four filesystem tools to the agent: ls, read_file, write_file,
|
|
895
|
+
and edit_file. Files can be stored in two locations:
|
|
896
|
+
- Short-term: In the agent's state (ephemeral, lasts only for the conversation)
|
|
897
|
+
- Long-term: In a persistent store (persists across conversations when enabled)
|
|
898
|
+
|
|
899
|
+
Args:
|
|
900
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
901
|
+
system_prompt_extension: Optional custom system prompt override.
|
|
902
|
+
custom_tool_descriptions: Optional custom tool descriptions override.
|
|
903
|
+
skills: Optional list of SkillDefinition to load into virtual filesystem.
|
|
904
|
+
|
|
905
|
+
Raises:
|
|
906
|
+
ValueError: If longterm memory is enabled but no store is available.
|
|
907
|
+
|
|
908
|
+
Example:
|
|
909
|
+
```python
|
|
910
|
+
from langchain.agents.middleware.filesystem import FilesystemMiddleware
|
|
911
|
+
from langchain.agents import create_agent
|
|
912
|
+
|
|
913
|
+
# Short-term memory only
|
|
914
|
+
agent = create_agent(middleware=[FilesystemMiddleware(long_term_memory=False)])
|
|
915
|
+
|
|
916
|
+
# With long-term memory
|
|
917
|
+
agent = create_agent(middleware=[FilesystemMiddleware(long_term_memory=True)])
|
|
918
|
+
|
|
919
|
+
# With skills
|
|
920
|
+
skills = [{"name": "slack-gif", "files": {"SKILL.md": "..."}}]
|
|
921
|
+
agent = create_agent(middleware=[FilesystemMiddleware(skills=skills)])
|
|
922
|
+
```
|
|
923
|
+
"""
|
|
924
|
+
|
|
925
|
+
state_schema = FilesystemState
|
|
926
|
+
|
|
927
|
+
def __init__(
|
|
928
|
+
self,
|
|
929
|
+
*,
|
|
930
|
+
long_term_memory: bool = False,
|
|
931
|
+
system_prompt: str | None = None,
|
|
932
|
+
custom_tool_descriptions: dict[str, str] | None = None,
|
|
933
|
+
tool_token_limit_before_evict: int | None = 20000,
|
|
934
|
+
skills: list[dict[str, Any]] | None = None,
|
|
935
|
+
) -> None:
|
|
936
|
+
"""Initialize the filesystem middleware.
|
|
937
|
+
|
|
938
|
+
Args:
|
|
939
|
+
long_term_memory: Whether to enable longterm memory support.
|
|
940
|
+
system_prompt: Optional custom system prompt override.
|
|
941
|
+
custom_tool_descriptions: Optional custom tool descriptions override.
|
|
942
|
+
tool_token_limit_before_evict: Optional token limit before evicting a tool result to the filesystem.
|
|
943
|
+
skills: Optional list of SkillDefinition to load into virtual filesystem.
|
|
944
|
+
"""
|
|
945
|
+
self.long_term_memory = long_term_memory
|
|
946
|
+
self.tool_token_limit_before_evict = tool_token_limit_before_evict
|
|
947
|
+
self.skills = skills or []
|
|
948
|
+
|
|
949
|
+
# Build system prompt with skills
|
|
950
|
+
base_prompt = FILESYSTEM_SYSTEM_PROMPT
|
|
951
|
+
if system_prompt is not None:
|
|
952
|
+
base_prompt = system_prompt
|
|
953
|
+
elif long_term_memory:
|
|
954
|
+
base_prompt += FILESYSTEM_SYSTEM_PROMPT_LONGTERM_SUPPLEMENT
|
|
955
|
+
|
|
956
|
+
skills_prompt = self._build_skills_prompt()
|
|
957
|
+
self.system_prompt = base_prompt + skills_prompt
|
|
958
|
+
|
|
959
|
+
self.tools = _get_filesystem_tools(custom_tool_descriptions, long_term_memory=long_term_memory)
|
|
960
|
+
|
|
961
|
+
def _build_skills_prompt(self) -> str:
|
|
962
|
+
"""Build the skills section of the system prompt.
|
|
963
|
+
|
|
964
|
+
Returns:
|
|
965
|
+
System prompt text describing available skills, or empty string if no skills.
|
|
966
|
+
"""
|
|
967
|
+
if not self.skills:
|
|
968
|
+
return ""
|
|
969
|
+
|
|
970
|
+
from deepagents.skills import parse_skill_frontmatter
|
|
971
|
+
|
|
972
|
+
prompt = "\n\n## Available Skills\n\nYou have access to the following skills:"
|
|
973
|
+
|
|
974
|
+
for i, skill in enumerate(self.skills, 1):
|
|
975
|
+
skill_name = skill['name']
|
|
976
|
+
skill_path = f"/skills/{skill_name}/SKILL.md"
|
|
977
|
+
|
|
978
|
+
# Try to extract description from SKILL.md if present
|
|
979
|
+
description = ""
|
|
980
|
+
skill_files = skill.get('files', {})
|
|
981
|
+
if 'SKILL.md' in skill_files:
|
|
982
|
+
try:
|
|
983
|
+
content = skill_files['SKILL.md']
|
|
984
|
+
if isinstance(content, str):
|
|
985
|
+
frontmatter = parse_skill_frontmatter(content)
|
|
986
|
+
description = frontmatter.get('description', '')
|
|
987
|
+
except Exception:
|
|
988
|
+
pass
|
|
989
|
+
|
|
990
|
+
prompt += f"\n\n{i}. **{skill_name}** ({skill_path})"
|
|
991
|
+
if description:
|
|
992
|
+
prompt += f"\n - {description}"
|
|
993
|
+
|
|
994
|
+
prompt += "\n\nTo use a skill, read its SKILL.md file using `read_file`. Skills may contain additional resources in scripts/, references/, and assets/ subdirectories."
|
|
995
|
+
|
|
996
|
+
return prompt
|
|
997
|
+
|
|
998
|
+
def before_agent(self, state: AgentState, runtime: Runtime[Any]) -> dict[str, Any] | None:
|
|
999
|
+
"""Load skills into virtual filesystem and validate store if needed.
|
|
1000
|
+
|
|
1001
|
+
Args:
|
|
1002
|
+
state: The state of the agent.
|
|
1003
|
+
runtime: The LangGraph runtime.
|
|
1004
|
+
|
|
1005
|
+
Returns:
|
|
1006
|
+
State update with skills loaded into filesystem, or None if no skills.
|
|
1007
|
+
|
|
1008
|
+
Raises:
|
|
1009
|
+
ValueError: If long_term_memory is True but runtime.store is None.
|
|
1010
|
+
"""
|
|
1011
|
+
if self.long_term_memory and runtime.store is None:
|
|
1012
|
+
msg = "Longterm memory is enabled, but no store is available"
|
|
1013
|
+
raise ValueError(msg)
|
|
1014
|
+
|
|
1015
|
+
# Load skills into virtual filesystem
|
|
1016
|
+
if not self.skills:
|
|
1017
|
+
return None
|
|
1018
|
+
|
|
1019
|
+
files_update = {}
|
|
1020
|
+
for skill in self.skills:
|
|
1021
|
+
skill_name = skill['name']
|
|
1022
|
+
skill_files = skill.get('files', {})
|
|
1023
|
+
|
|
1024
|
+
for file_path, content in skill_files.items():
|
|
1025
|
+
# Write to /skills/<skill_name>/<file_path>
|
|
1026
|
+
full_path = f"/skills/{skill_name}/{file_path}"
|
|
1027
|
+
files_update[full_path] = _create_file_data(content)
|
|
1028
|
+
|
|
1029
|
+
return {"files": files_update}
|
|
1030
|
+
|
|
1031
|
+
|
|
1032
|
+
|
|
1033
|
+
def wrap_model_call(
|
|
1034
|
+
self,
|
|
1035
|
+
request: ModelRequest,
|
|
1036
|
+
handler: Callable[[ModelRequest], ModelResponse],
|
|
1037
|
+
) -> ModelResponse:
|
|
1038
|
+
"""Update the system prompt to include instructions on using the filesystem.
|
|
1039
|
+
|
|
1040
|
+
Args:
|
|
1041
|
+
request: The model request being processed.
|
|
1042
|
+
handler: The handler function to call with the modified request.
|
|
1043
|
+
|
|
1044
|
+
Returns:
|
|
1045
|
+
The model response from the handler.
|
|
1046
|
+
"""
|
|
1047
|
+
if self.system_prompt is not None:
|
|
1048
|
+
request.system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
|
1049
|
+
return handler(request)
|
|
1050
|
+
|
|
1051
|
+
async def awrap_model_call(
|
|
1052
|
+
self,
|
|
1053
|
+
request: ModelRequest,
|
|
1054
|
+
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
|
1055
|
+
) -> ModelResponse:
|
|
1056
|
+
"""(async) Update the system prompt to include instructions on using the filesystem.
|
|
1057
|
+
|
|
1058
|
+
Args:
|
|
1059
|
+
request: The model request being processed.
|
|
1060
|
+
handler: The handler function to call with the modified request.
|
|
1061
|
+
|
|
1062
|
+
Returns:
|
|
1063
|
+
The model response from the handler.
|
|
1064
|
+
"""
|
|
1065
|
+
if self.system_prompt is not None:
|
|
1066
|
+
request.system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
|
1067
|
+
return await handler(request)
|
|
1068
|
+
|
|
1069
|
+
def _intercept_large_tool_result(self, tool_result: ToolMessage | Command) -> ToolMessage | Command:
|
|
1070
|
+
if isinstance(tool_result, ToolMessage) and isinstance(tool_result.content, str):
|
|
1071
|
+
content = tool_result.content
|
|
1072
|
+
if self.tool_token_limit_before_evict and len(content) > 4 * self.tool_token_limit_before_evict:
|
|
1073
|
+
file_path = f"/large_tool_results/{tool_result.tool_call_id}"
|
|
1074
|
+
file_data = _create_file_data(content)
|
|
1075
|
+
state_update = {
|
|
1076
|
+
"messages": [
|
|
1077
|
+
ToolMessage(
|
|
1078
|
+
TOO_LARGE_TOOL_MSG.format(
|
|
1079
|
+
tool_call_id=tool_result.tool_call_id,
|
|
1080
|
+
file_path=file_path,
|
|
1081
|
+
content_sample=_format_content_with_line_numbers(file_data["content"][:10], format_style="tab", start_line=1),
|
|
1082
|
+
),
|
|
1083
|
+
tool_call_id=tool_result.tool_call_id,
|
|
1084
|
+
)
|
|
1085
|
+
],
|
|
1086
|
+
"files": {file_path: file_data},
|
|
1087
|
+
}
|
|
1088
|
+
return Command(update=state_update)
|
|
1089
|
+
elif isinstance(tool_result, Command):
|
|
1090
|
+
update = tool_result.update
|
|
1091
|
+
if update is None:
|
|
1092
|
+
return tool_result
|
|
1093
|
+
message_updates = update.get("messages", [])
|
|
1094
|
+
file_updates = update.get("files", {})
|
|
1095
|
+
|
|
1096
|
+
edited_message_updates = []
|
|
1097
|
+
for message in message_updates:
|
|
1098
|
+
if self.tool_token_limit_before_evict and isinstance(message, ToolMessage) and isinstance(message.content, str):
|
|
1099
|
+
content = message.content
|
|
1100
|
+
if len(content) > 4 * self.tool_token_limit_before_evict:
|
|
1101
|
+
file_path = f"/large_tool_results/{message.tool_call_id}"
|
|
1102
|
+
file_data = _create_file_data(content)
|
|
1103
|
+
edited_message_updates.append(
|
|
1104
|
+
ToolMessage(
|
|
1105
|
+
TOO_LARGE_TOOL_MSG.format(
|
|
1106
|
+
tool_call_id=message.tool_call_id,
|
|
1107
|
+
file_path=file_path,
|
|
1108
|
+
content_sample=_format_content_with_line_numbers(file_data["content"][:10], format_style="tab", start_line=1),
|
|
1109
|
+
),
|
|
1110
|
+
tool_call_id=message.tool_call_id,
|
|
1111
|
+
)
|
|
1112
|
+
)
|
|
1113
|
+
file_updates[file_path] = file_data
|
|
1114
|
+
continue
|
|
1115
|
+
edited_message_updates.append(message)
|
|
1116
|
+
return Command(update={**update, "messages": edited_message_updates, "files": file_updates})
|
|
1117
|
+
return tool_result
|
|
1118
|
+
|
|
1119
|
+
def wrap_tool_call(
|
|
1120
|
+
self,
|
|
1121
|
+
request: ToolCallRequest,
|
|
1122
|
+
handler: Callable[[ToolCallRequest], ToolMessage | Command],
|
|
1123
|
+
) -> ToolMessage | Command:
|
|
1124
|
+
"""Check the size of the tool call result and evict to filesystem if too large.
|
|
1125
|
+
|
|
1126
|
+
Args:
|
|
1127
|
+
request: The tool call request being processed.
|
|
1128
|
+
handler: The handler function to call with the modified request.
|
|
1129
|
+
|
|
1130
|
+
Returns:
|
|
1131
|
+
The raw ToolMessage, or a pseudo tool message with the ToolResult in state.
|
|
1132
|
+
"""
|
|
1133
|
+
# If no token limit specified, or if it is a filesystem tool, do not evict
|
|
1134
|
+
if self.tool_token_limit_before_evict is None or request.tool_call["name"] in TOOL_GENERATORS:
|
|
1135
|
+
return handler(request)
|
|
1136
|
+
|
|
1137
|
+
tool_result = handler(request)
|
|
1138
|
+
return self._intercept_large_tool_result(tool_result)
|
|
1139
|
+
|
|
1140
|
+
async def awrap_tool_call(
|
|
1141
|
+
self,
|
|
1142
|
+
request: ToolCallRequest,
|
|
1143
|
+
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
|
|
1144
|
+
) -> ToolMessage | Command:
|
|
1145
|
+
"""(async)Check the size of the tool call result and evict to filesystem if too large.
|
|
1146
|
+
|
|
1147
|
+
Args:
|
|
1148
|
+
request: The tool call request being processed.
|
|
1149
|
+
handler: The handler function to call with the modified request.
|
|
1150
|
+
|
|
1151
|
+
Returns:
|
|
1152
|
+
The raw ToolMessage, or a pseudo tool message with the ToolResult in state.
|
|
1153
|
+
"""
|
|
1154
|
+
# If no token limit specified, or if it is a filesystem tool, do not evict
|
|
1155
|
+
if self.tool_token_limit_before_evict is None or request.tool_call["name"] in TOOL_GENERATORS:
|
|
1156
|
+
return await handler(request)
|
|
1157
|
+
|
|
1158
|
+
tool_result = await handler(request)
|
|
1159
|
+
return self._intercept_large_tool_result(tool_result)
|