deepagents-cli 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of deepagents-cli might be problematic. Click here for more details.
- deepagents_cli/__init__.py +5 -0
- deepagents_cli/__main__.py +6 -0
- deepagents_cli/agent.py +267 -0
- deepagents_cli/cli.py +13 -0
- deepagents_cli/commands.py +86 -0
- deepagents_cli/config.py +138 -0
- deepagents_cli/execution.py +644 -0
- deepagents_cli/file_ops.py +347 -0
- deepagents_cli/input.py +249 -0
- deepagents_cli/main.py +217 -0
- deepagents_cli/py.typed +0 -0
- deepagents_cli/tools.py +140 -0
- deepagents_cli/ui.py +455 -0
- deepagents_cli-0.0.4.dist-info/METADATA +18 -0
- deepagents_cli-0.0.4.dist-info/RECORD +18 -0
- deepagents_cli-0.0.4.dist-info/entry_points.txt +3 -0
- deepagents_cli-0.0.4.dist-info/top_level.txt +1 -0
- deepagents/__init__.py +0 -7
- deepagents/cli.py +0 -567
- deepagents/default_agent_prompt.md +0 -64
- deepagents/graph.py +0 -144
- deepagents/memory/__init__.py +0 -17
- deepagents/memory/backends/__init__.py +0 -15
- deepagents/memory/backends/composite.py +0 -250
- deepagents/memory/backends/filesystem.py +0 -330
- deepagents/memory/backends/state.py +0 -206
- deepagents/memory/backends/store.py +0 -351
- deepagents/memory/backends/utils.py +0 -319
- deepagents/memory/protocol.py +0 -164
- deepagents/middleware/__init__.py +0 -13
- deepagents/middleware/agent_memory.py +0 -207
- deepagents/middleware/filesystem.py +0 -615
- deepagents/middleware/patch_tool_calls.py +0 -44
- deepagents/middleware/subagents.py +0 -481
- deepagents/pretty_cli.py +0 -289
- deepagents_cli-0.0.3.dist-info/METADATA +0 -551
- deepagents_cli-0.0.3.dist-info/RECORD +0 -24
- deepagents_cli-0.0.3.dist-info/entry_points.txt +0 -2
- deepagents_cli-0.0.3.dist-info/licenses/LICENSE +0 -21
- deepagents_cli-0.0.3.dist-info/top_level.txt +0 -1
- {deepagents_cli-0.0.3.dist-info → deepagents_cli-0.0.4.dist-info}/WHEEL +0 -0
|
@@ -1,615 +0,0 @@
|
|
|
1
|
-
"""Middleware for providing filesystem tools to an agent."""
|
|
2
|
-
# ruff: noqa: E501
|
|
3
|
-
|
|
4
|
-
from collections.abc import Awaitable, Callable, Sequence
|
|
5
|
-
from typing import Annotated
|
|
6
|
-
from typing_extensions import NotRequired
|
|
7
|
-
|
|
8
|
-
import os
|
|
9
|
-
from typing import Literal
|
|
10
|
-
|
|
11
|
-
from langchain.agents.middleware.types import (
|
|
12
|
-
AgentMiddleware,
|
|
13
|
-
AgentState,
|
|
14
|
-
ModelRequest,
|
|
15
|
-
ModelResponse,
|
|
16
|
-
)
|
|
17
|
-
from langchain.tools import ToolRuntime
|
|
18
|
-
from langchain.tools.tool_node import ToolCallRequest
|
|
19
|
-
from langchain_core.messages import ToolMessage
|
|
20
|
-
from langchain_core.tools import BaseTool, tool
|
|
21
|
-
from langgraph.types import Command
|
|
22
|
-
from typing_extensions import TypedDict
|
|
23
|
-
|
|
24
|
-
from deepagents.memory.protocol import MemoryBackend
|
|
25
|
-
from deepagents.memory.backends import StateBackend
|
|
26
|
-
from deepagents.memory.backends.utils import (
|
|
27
|
-
create_file_data,
|
|
28
|
-
format_content_with_line_numbers,
|
|
29
|
-
)
|
|
30
|
-
|
|
31
|
-
EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
|
|
32
|
-
MAX_LINE_LENGTH = 2000
|
|
33
|
-
LINE_NUMBER_WIDTH = 6
|
|
34
|
-
DEFAULT_READ_OFFSET = 0
|
|
35
|
-
DEFAULT_READ_LIMIT = 2000
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
class FileData(TypedDict):
|
|
39
|
-
"""Data structure for storing file contents with metadata."""
|
|
40
|
-
|
|
41
|
-
content: list[str]
|
|
42
|
-
"""Lines of the file."""
|
|
43
|
-
|
|
44
|
-
created_at: str
|
|
45
|
-
"""ISO 8601 timestamp of file creation."""
|
|
46
|
-
|
|
47
|
-
modified_at: str
|
|
48
|
-
"""ISO 8601 timestamp of last modification."""
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def _file_data_reducer(left: dict[str, FileData] | None, right: dict[str, FileData | None]) -> dict[str, FileData]:
|
|
52
|
-
"""Merge file updates with support for deletions.
|
|
53
|
-
|
|
54
|
-
This reducer enables file deletion by treating `None` values in the right
|
|
55
|
-
dictionary as deletion markers. It's designed to work with LangGraph's
|
|
56
|
-
state management where annotated reducers control how state updates merge.
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
left: Existing files dictionary. May be `None` during initialization.
|
|
60
|
-
right: New files dictionary to merge. Files with `None` values are
|
|
61
|
-
treated as deletion markers and removed from the result.
|
|
62
|
-
|
|
63
|
-
Returns:
|
|
64
|
-
Merged dictionary where right overwrites left for matching keys,
|
|
65
|
-
and `None` values in right trigger deletions.
|
|
66
|
-
|
|
67
|
-
Example:
|
|
68
|
-
```python
|
|
69
|
-
existing = {"/file1.txt": FileData(...), "/file2.txt": FileData(...)}
|
|
70
|
-
updates = {"/file2.txt": None, "/file3.txt": FileData(...)}
|
|
71
|
-
result = file_data_reducer(existing, updates)
|
|
72
|
-
# Result: {"/file1.txt": FileData(...), "/file3.txt": FileData(...)}
|
|
73
|
-
```
|
|
74
|
-
"""
|
|
75
|
-
if left is None:
|
|
76
|
-
return {k: v for k, v in right.items() if v is not None}
|
|
77
|
-
|
|
78
|
-
result = {**left}
|
|
79
|
-
for key, value in right.items():
|
|
80
|
-
if value is None:
|
|
81
|
-
result.pop(key, None)
|
|
82
|
-
else:
|
|
83
|
-
result[key] = value
|
|
84
|
-
return result
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
def _validate_path(path: str, *, allowed_prefixes: Sequence[str] | None = None) -> str:
|
|
88
|
-
"""Validate and normalize file path for security.
|
|
89
|
-
|
|
90
|
-
Ensures paths are safe to use by preventing directory traversal attacks
|
|
91
|
-
and enforcing consistent formatting. All paths are normalized to use
|
|
92
|
-
forward slashes and start with a leading slash.
|
|
93
|
-
|
|
94
|
-
Args:
|
|
95
|
-
path: The path to validate and normalize.
|
|
96
|
-
allowed_prefixes: Optional list of allowed path prefixes. If provided,
|
|
97
|
-
the normalized path must start with one of these prefixes.
|
|
98
|
-
|
|
99
|
-
Returns:
|
|
100
|
-
Normalized canonical path starting with `/` and using forward slashes.
|
|
101
|
-
|
|
102
|
-
Raises:
|
|
103
|
-
ValueError: If path contains traversal sequences (`..` or `~`) or does
|
|
104
|
-
not start with an allowed prefix when `allowed_prefixes` is specified.
|
|
105
|
-
|
|
106
|
-
Example:
|
|
107
|
-
```python
|
|
108
|
-
validate_path("foo/bar") # Returns: "/foo/bar"
|
|
109
|
-
validate_path("/./foo//bar") # Returns: "/foo/bar"
|
|
110
|
-
validate_path("../etc/passwd") # Raises ValueError
|
|
111
|
-
validate_path("/data/file.txt", allowed_prefixes=["/data/"]) # OK
|
|
112
|
-
validate_path("/etc/file.txt", allowed_prefixes=["/data/"]) # Raises ValueError
|
|
113
|
-
```
|
|
114
|
-
"""
|
|
115
|
-
if ".." in path or path.startswith("~"):
|
|
116
|
-
msg = f"Path traversal not allowed: {path}"
|
|
117
|
-
raise ValueError(msg)
|
|
118
|
-
|
|
119
|
-
normalized = os.path.normpath(path)
|
|
120
|
-
normalized = normalized.replace("\\", "/")
|
|
121
|
-
|
|
122
|
-
if not normalized.startswith("/"):
|
|
123
|
-
normalized = f"/{normalized}"
|
|
124
|
-
|
|
125
|
-
if allowed_prefixes is not None and not any(normalized.startswith(prefix) for prefix in allowed_prefixes):
|
|
126
|
-
msg = f"Path must start with one of {allowed_prefixes}: {path}"
|
|
127
|
-
raise ValueError(msg)
|
|
128
|
-
|
|
129
|
-
return normalized
|
|
130
|
-
|
|
131
|
-
class FilesystemState(AgentState):
|
|
132
|
-
"""State for the filesystem middleware."""
|
|
133
|
-
|
|
134
|
-
files: Annotated[NotRequired[dict[str, FileData]], _file_data_reducer]
|
|
135
|
-
"""Files in the filesystem."""
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
LIST_FILES_TOOL_DESCRIPTION = """Lists all files in the filesystem, filtering by directory.
|
|
139
|
-
|
|
140
|
-
Usage:
|
|
141
|
-
- The path parameter must be an absolute path, not a relative path
|
|
142
|
-
- The list_files tool will return a list of all files in the specified directory.
|
|
143
|
-
- This is very useful for exploring the file system and finding the right file to read or edit.
|
|
144
|
-
- You should almost ALWAYS use this tool before using the Read or Edit tools."""
|
|
145
|
-
|
|
146
|
-
READ_FILE_TOOL_DESCRIPTION = """Reads a file from the filesystem. You can access any file directly by using this tool.
|
|
147
|
-
Assume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.
|
|
148
|
-
|
|
149
|
-
Usage:
|
|
150
|
-
- The file_path parameter must be an absolute path, not a relative path
|
|
151
|
-
- By default, it reads up to 2000 lines starting from the beginning of the file
|
|
152
|
-
- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters
|
|
153
|
-
- Any lines longer than 2000 characters will be truncated
|
|
154
|
-
- Results are returned using cat -n format, with line numbers starting at 1
|
|
155
|
-
- You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful.
|
|
156
|
-
- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents.
|
|
157
|
-
- You should ALWAYS make sure a file has been read before editing it."""
|
|
158
|
-
|
|
159
|
-
EDIT_FILE_TOOL_DESCRIPTION = """Performs exact string replacements in files.
|
|
160
|
-
|
|
161
|
-
Usage:
|
|
162
|
-
- You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file.
|
|
163
|
-
- When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the old_string or new_string.
|
|
164
|
-
- ALWAYS prefer editing existing files. NEVER write new files unless explicitly required.
|
|
165
|
-
- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.
|
|
166
|
-
- The edit will FAIL if `old_string` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replace_all` to change every instance of `old_string`.
|
|
167
|
-
- Use `replace_all` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance."""
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
WRITE_FILE_TOOL_DESCRIPTION = """Writes to a new file in the filesystem.
|
|
171
|
-
|
|
172
|
-
Usage:
|
|
173
|
-
- The file_path parameter must be an absolute path, not a relative path
|
|
174
|
-
- The content parameter must be a string
|
|
175
|
-
- The write_file tool will create the a new file.
|
|
176
|
-
- Prefer to edit existing files over creating new ones when possible."""
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
GLOB_TOOL_DESCRIPTION = """Find files matching a glob pattern.
|
|
180
|
-
|
|
181
|
-
Usage:
|
|
182
|
-
- The glob tool finds files by matching patterns with wildcards
|
|
183
|
-
- Supports standard glob patterns: `*` (any characters), `**` (any directories), `?` (single character)
|
|
184
|
-
- Patterns can be absolute (starting with `/`) or relative
|
|
185
|
-
- Returns a list of absolute file paths that match the pattern
|
|
186
|
-
|
|
187
|
-
Examples:
|
|
188
|
-
- `**/*.py` - Find all Python files
|
|
189
|
-
- `*.txt` - Find all text files in root
|
|
190
|
-
- `/subdir/**/*.md` - Find all markdown files under /subdir"""
|
|
191
|
-
|
|
192
|
-
GREP_TOOL_DESCRIPTION = """Search for a pattern in files.
|
|
193
|
-
|
|
194
|
-
Usage:
|
|
195
|
-
- The grep tool searches for text patterns across files
|
|
196
|
-
- The pattern parameter is the text to search for (literal string, not regex)
|
|
197
|
-
- The path parameter filters which directory to search in (default is `/` for all files)
|
|
198
|
-
- The glob parameter accepts a glob pattern to filter which files to search (e.g., `*.py`)
|
|
199
|
-
- The output_mode parameter controls the output format:
|
|
200
|
-
- `files_with_matches`: List only file paths containing matches (default)
|
|
201
|
-
- `content`: Show matching lines with file path and line numbers
|
|
202
|
-
- `count`: Show count of matches per file
|
|
203
|
-
|
|
204
|
-
Examples:
|
|
205
|
-
- Search all files: `grep(pattern="TODO")`
|
|
206
|
-
- Search Python files only: `grep(pattern="import", glob="*.py")`
|
|
207
|
-
- Show matching lines: `grep(pattern="error", output_mode="content")`"""
|
|
208
|
-
|
|
209
|
-
FILESYSTEM_SYSTEM_PROMPT = """## Filesystem Tools `ls`, `read_file`, `write_file`, `edit_file`, `glob`, `grep`
|
|
210
|
-
|
|
211
|
-
You have access to a filesystem which you can interact with using these tools.
|
|
212
|
-
All file paths must start with a /.
|
|
213
|
-
|
|
214
|
-
- ls: list files in a directory (requires absolute path)
|
|
215
|
-
- read_file: read a file from the filesystem
|
|
216
|
-
- write_file: write to a file in the filesystem
|
|
217
|
-
- edit_file: edit a file in the filesystem
|
|
218
|
-
- glob: find files matching a pattern (e.g., "**/*.py")
|
|
219
|
-
- grep: search for text within files"""
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
def _ls_tool_generator(
|
|
223
|
-
backend: MemoryBackend | Callable[[ToolRuntime], MemoryBackend],
|
|
224
|
-
custom_description: str | None = None,
|
|
225
|
-
) -> BaseTool:
|
|
226
|
-
"""Generate the ls (list files) tool.
|
|
227
|
-
|
|
228
|
-
Args:
|
|
229
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
230
|
-
custom_description: Optional custom description for the tool.
|
|
231
|
-
|
|
232
|
-
Returns:
|
|
233
|
-
Configured ls tool that lists files using the backend.
|
|
234
|
-
"""
|
|
235
|
-
tool_description = custom_description or LIST_FILES_TOOL_DESCRIPTION
|
|
236
|
-
|
|
237
|
-
@tool(description=tool_description)
|
|
238
|
-
def ls(runtime: ToolRuntime[None, FilesystemState], path: str) -> list[str]:
|
|
239
|
-
# Resolve backend if it's a factory function
|
|
240
|
-
resolved_backend = backend(runtime) if callable(backend) else backend
|
|
241
|
-
validated_path = _validate_path(path)
|
|
242
|
-
files = resolved_backend.ls(validated_path)
|
|
243
|
-
return files
|
|
244
|
-
|
|
245
|
-
return ls
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
def _read_file_tool_generator(
|
|
249
|
-
backend: MemoryBackend | Callable[[ToolRuntime], MemoryBackend],
|
|
250
|
-
custom_description: str | None = None,
|
|
251
|
-
) -> BaseTool:
|
|
252
|
-
"""Generate the read_file tool.
|
|
253
|
-
|
|
254
|
-
Args:
|
|
255
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
256
|
-
custom_description: Optional custom description for the tool.
|
|
257
|
-
|
|
258
|
-
Returns:
|
|
259
|
-
Configured read_file tool that reads files using the backend.
|
|
260
|
-
"""
|
|
261
|
-
tool_description = custom_description or READ_FILE_TOOL_DESCRIPTION
|
|
262
|
-
|
|
263
|
-
@tool(description=tool_description)
|
|
264
|
-
def read_file(
|
|
265
|
-
file_path: str,
|
|
266
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
267
|
-
offset: int = DEFAULT_READ_OFFSET,
|
|
268
|
-
limit: int = DEFAULT_READ_LIMIT,
|
|
269
|
-
) -> str:
|
|
270
|
-
resolved_backend = backend(runtime) if callable(backend) else backend
|
|
271
|
-
file_path = _validate_path(file_path)
|
|
272
|
-
return resolved_backend.read(file_path, offset=offset, limit=limit)
|
|
273
|
-
|
|
274
|
-
return read_file
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
def _write_file_tool_generator(
|
|
278
|
-
backend: MemoryBackend | Callable[[ToolRuntime], MemoryBackend],
|
|
279
|
-
custom_description: str | None = None,
|
|
280
|
-
) -> BaseTool:
|
|
281
|
-
"""Generate the write_file tool.
|
|
282
|
-
|
|
283
|
-
Args:
|
|
284
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
285
|
-
custom_description: Optional custom description for the tool.
|
|
286
|
-
|
|
287
|
-
Returns:
|
|
288
|
-
Configured write_file tool that creates new files using the backend.
|
|
289
|
-
"""
|
|
290
|
-
tool_description = custom_description or WRITE_FILE_TOOL_DESCRIPTION
|
|
291
|
-
|
|
292
|
-
@tool(description=tool_description)
|
|
293
|
-
def write_file(
|
|
294
|
-
file_path: str,
|
|
295
|
-
content: str,
|
|
296
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
297
|
-
) -> Command | str:
|
|
298
|
-
resolved_backend = backend(runtime) if callable(backend) else backend
|
|
299
|
-
file_path = _validate_path(file_path)
|
|
300
|
-
return resolved_backend.write(file_path, content)
|
|
301
|
-
|
|
302
|
-
return write_file
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
def _edit_file_tool_generator(
|
|
306
|
-
backend: MemoryBackend | Callable[[ToolRuntime], MemoryBackend],
|
|
307
|
-
custom_description: str | None = None,
|
|
308
|
-
) -> BaseTool:
|
|
309
|
-
"""Generate the edit_file tool.
|
|
310
|
-
|
|
311
|
-
Args:
|
|
312
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
313
|
-
custom_description: Optional custom description for the tool.
|
|
314
|
-
|
|
315
|
-
Returns:
|
|
316
|
-
Configured edit_file tool that performs string replacements in files using the backend.
|
|
317
|
-
"""
|
|
318
|
-
tool_description = custom_description or EDIT_FILE_TOOL_DESCRIPTION
|
|
319
|
-
|
|
320
|
-
@tool(description=tool_description)
|
|
321
|
-
def edit_file(
|
|
322
|
-
file_path: str,
|
|
323
|
-
old_string: str,
|
|
324
|
-
new_string: str,
|
|
325
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
326
|
-
*,
|
|
327
|
-
replace_all: bool = False,
|
|
328
|
-
) -> Command | str:
|
|
329
|
-
resolved_backend = backend(runtime) if callable(backend) else backend
|
|
330
|
-
file_path = _validate_path(file_path)
|
|
331
|
-
return resolved_backend.edit(file_path, old_string, new_string, replace_all=replace_all)
|
|
332
|
-
|
|
333
|
-
return edit_file
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
def _glob_tool_generator(
|
|
337
|
-
backend: MemoryBackend | Callable[[ToolRuntime], MemoryBackend],
|
|
338
|
-
custom_description: str | None = None,
|
|
339
|
-
) -> BaseTool:
|
|
340
|
-
"""Generate the glob tool.
|
|
341
|
-
|
|
342
|
-
Args:
|
|
343
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
344
|
-
custom_description: Optional custom description for the tool.
|
|
345
|
-
|
|
346
|
-
Returns:
|
|
347
|
-
Configured glob tool that finds files by pattern using the backend.
|
|
348
|
-
"""
|
|
349
|
-
tool_description = custom_description or GLOB_TOOL_DESCRIPTION
|
|
350
|
-
|
|
351
|
-
@tool(description=tool_description)
|
|
352
|
-
def glob(pattern: str, runtime: ToolRuntime[None, FilesystemState], path: str = "/") -> list[str]:
|
|
353
|
-
resolved_backend = backend(runtime) if callable(backend) else backend
|
|
354
|
-
return resolved_backend.glob(pattern, path=path)
|
|
355
|
-
|
|
356
|
-
return glob
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
def _grep_tool_generator(
|
|
360
|
-
backend: MemoryBackend | Callable[[ToolRuntime], MemoryBackend],
|
|
361
|
-
custom_description: str | None = None,
|
|
362
|
-
) -> BaseTool:
|
|
363
|
-
"""Generate the grep tool.
|
|
364
|
-
|
|
365
|
-
Args:
|
|
366
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
367
|
-
custom_description: Optional custom description for the tool.
|
|
368
|
-
|
|
369
|
-
Returns:
|
|
370
|
-
Configured grep tool that searches for patterns in files using the backend.
|
|
371
|
-
"""
|
|
372
|
-
tool_description = custom_description or GREP_TOOL_DESCRIPTION
|
|
373
|
-
|
|
374
|
-
@tool(description=tool_description)
|
|
375
|
-
def grep(
|
|
376
|
-
pattern: str,
|
|
377
|
-
runtime: ToolRuntime[None, FilesystemState],
|
|
378
|
-
path: str = "/",
|
|
379
|
-
glob: str | None = None,
|
|
380
|
-
output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
|
|
381
|
-
) -> str:
|
|
382
|
-
resolved_backend = backend(runtime) if callable(backend) else backend
|
|
383
|
-
return resolved_backend.grep(pattern, path=path, glob=glob, output_mode=output_mode)
|
|
384
|
-
|
|
385
|
-
return grep
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
TOOL_GENERATORS = {
|
|
389
|
-
"ls": _ls_tool_generator,
|
|
390
|
-
"read_file": _read_file_tool_generator,
|
|
391
|
-
"write_file": _write_file_tool_generator,
|
|
392
|
-
"edit_file": _edit_file_tool_generator,
|
|
393
|
-
"glob": _glob_tool_generator,
|
|
394
|
-
"grep": _grep_tool_generator,
|
|
395
|
-
}
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
def _get_filesystem_tools(
|
|
399
|
-
backend: MemoryBackend,
|
|
400
|
-
custom_tool_descriptions: dict[str, str] | None = None,
|
|
401
|
-
) -> list[BaseTool]:
|
|
402
|
-
"""Get filesystem tools.
|
|
403
|
-
|
|
404
|
-
Args:
|
|
405
|
-
backend: Backend to use for file storage, or a factory function that takes runtime and returns a backend.
|
|
406
|
-
custom_tool_descriptions: Optional custom descriptions for tools.
|
|
407
|
-
|
|
408
|
-
Returns:
|
|
409
|
-
List of configured filesystem tools (ls, read_file, write_file, edit_file, glob, grep).
|
|
410
|
-
"""
|
|
411
|
-
if custom_tool_descriptions is None:
|
|
412
|
-
custom_tool_descriptions = {}
|
|
413
|
-
tools = []
|
|
414
|
-
for tool_name, tool_generator in TOOL_GENERATORS.items():
|
|
415
|
-
tool = tool_generator(backend, custom_tool_descriptions.get(tool_name))
|
|
416
|
-
tools.append(tool)
|
|
417
|
-
return tools
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
TOO_LARGE_TOOL_MSG = """Tool result too large, the result of this tool call {tool_call_id} was saved in the filesystem at this path: {file_path}
|
|
421
|
-
You can read the result from the filesystem by using the read_file tool, but make sure to only read part of the result at a time.
|
|
422
|
-
You can do this by specifying an offset and limit in the read_file tool call.
|
|
423
|
-
For example, to read the first 100 lines, you can use the read_file tool with offset=0 and limit=100.
|
|
424
|
-
|
|
425
|
-
Here are the first 10 lines of the result:
|
|
426
|
-
{content_sample}
|
|
427
|
-
"""
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
class FilesystemMiddleware(AgentMiddleware):
|
|
431
|
-
"""Middleware for providing filesystem tools to an agent.
|
|
432
|
-
|
|
433
|
-
This middleware adds six filesystem tools to the agent: ls, read_file, write_file,
|
|
434
|
-
edit_file, glob, and grep. Files can be stored using any backend that implements
|
|
435
|
-
the MemoryBackend protocol.
|
|
436
|
-
|
|
437
|
-
Args:
|
|
438
|
-
memory_backend: Backend for file storage. If not provided, defaults to StateBackend
|
|
439
|
-
(ephemeral storage in agent state). For persistent storage or hybrid setups,
|
|
440
|
-
use CompositeBackend with custom routes.
|
|
441
|
-
system_prompt: Optional custom system prompt override.
|
|
442
|
-
custom_tool_descriptions: Optional custom tool descriptions override.
|
|
443
|
-
tool_token_limit_before_evict: Optional token limit before evicting a tool result to the filesystem.
|
|
444
|
-
|
|
445
|
-
Example:
|
|
446
|
-
```python
|
|
447
|
-
from deepagents.middleware.filesystem import FilesystemMiddleware
|
|
448
|
-
from deepagents.memory.backends import StateBackend, StoreBackend, CompositeBackend
|
|
449
|
-
from langchain.agents import create_agent
|
|
450
|
-
|
|
451
|
-
# Ephemeral storage only (default)
|
|
452
|
-
agent = create_agent(middleware=[FilesystemMiddleware()])
|
|
453
|
-
|
|
454
|
-
# With hybrid storage (ephemeral + persistent /memories/)
|
|
455
|
-
backend = CompositeBackend(
|
|
456
|
-
default=StateBackend(),
|
|
457
|
-
routes={"/memories/": StoreBackend()}
|
|
458
|
-
)
|
|
459
|
-
agent = create_agent(middleware=[FilesystemMiddleware(memory_backend=backend)])
|
|
460
|
-
```
|
|
461
|
-
"""
|
|
462
|
-
|
|
463
|
-
state_schema = FilesystemState
|
|
464
|
-
|
|
465
|
-
def __init__(
|
|
466
|
-
self,
|
|
467
|
-
*,
|
|
468
|
-
memory_backend: MemoryBackend | None = None,
|
|
469
|
-
system_prompt: str | None = None,
|
|
470
|
-
custom_tool_descriptions: dict[str, str] | None = None,
|
|
471
|
-
tool_token_limit_before_evict: int | None = 20000,
|
|
472
|
-
) -> None:
|
|
473
|
-
"""Initialize the filesystem middleware.
|
|
474
|
-
|
|
475
|
-
Args:
|
|
476
|
-
memory_backend: Backend for file storage. Defaults to StateBackend if not provided.
|
|
477
|
-
system_prompt: Optional custom system prompt override.
|
|
478
|
-
custom_tool_descriptions: Optional custom tool descriptions override.
|
|
479
|
-
tool_token_limit_before_evict: Optional token limit before evicting a tool result to the filesystem.
|
|
480
|
-
"""
|
|
481
|
-
self.tool_token_limit_before_evict = tool_token_limit_before_evict
|
|
482
|
-
|
|
483
|
-
# Use provided backend or default to StateBackend factory
|
|
484
|
-
self.backend = memory_backend if memory_backend is not None else (lambda runtime: StateBackend(runtime))
|
|
485
|
-
|
|
486
|
-
# Set system prompt (allow full override)
|
|
487
|
-
self.system_prompt = system_prompt if system_prompt is not None else FILESYSTEM_SYSTEM_PROMPT
|
|
488
|
-
|
|
489
|
-
self.tools = _get_filesystem_tools(self.backend, custom_tool_descriptions)
|
|
490
|
-
|
|
491
|
-
def wrap_model_call(
|
|
492
|
-
self,
|
|
493
|
-
request: ModelRequest,
|
|
494
|
-
handler: Callable[[ModelRequest], ModelResponse],
|
|
495
|
-
) -> ModelResponse:
|
|
496
|
-
"""Update the system prompt to include instructions on using the filesystem.
|
|
497
|
-
|
|
498
|
-
Args:
|
|
499
|
-
request: The model request being processed.
|
|
500
|
-
handler: The handler function to call with the modified request.
|
|
501
|
-
|
|
502
|
-
Returns:
|
|
503
|
-
The model response from the handler.
|
|
504
|
-
"""
|
|
505
|
-
if self.system_prompt is not None:
|
|
506
|
-
request.system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
|
507
|
-
return handler(request)
|
|
508
|
-
|
|
509
|
-
async def awrap_model_call(
|
|
510
|
-
self,
|
|
511
|
-
request: ModelRequest,
|
|
512
|
-
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
|
513
|
-
) -> ModelResponse:
|
|
514
|
-
"""(async) Update the system prompt to include instructions on using the filesystem.
|
|
515
|
-
|
|
516
|
-
Args:
|
|
517
|
-
request: The model request being processed.
|
|
518
|
-
handler: The handler function to call with the modified request.
|
|
519
|
-
|
|
520
|
-
Returns:
|
|
521
|
-
The model response from the handler.
|
|
522
|
-
"""
|
|
523
|
-
if self.system_prompt is not None:
|
|
524
|
-
request.system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
|
|
525
|
-
return await handler(request)
|
|
526
|
-
|
|
527
|
-
def _intercept_large_tool_result(self, tool_result: ToolMessage | Command) -> ToolMessage | Command:
|
|
528
|
-
if isinstance(tool_result, ToolMessage) and isinstance(tool_result.content, str):
|
|
529
|
-
content = tool_result.content
|
|
530
|
-
if self.tool_token_limit_before_evict and len(content) > 4 * self.tool_token_limit_before_evict:
|
|
531
|
-
file_path = f"/large_tool_results/{tool_result.tool_call_id}"
|
|
532
|
-
file_data = create_file_data(content)
|
|
533
|
-
state_update = {
|
|
534
|
-
"messages": [
|
|
535
|
-
ToolMessage(
|
|
536
|
-
TOO_LARGE_TOOL_MSG.format(
|
|
537
|
-
tool_call_id=tool_result.tool_call_id,
|
|
538
|
-
file_path=file_path,
|
|
539
|
-
content_sample=format_content_with_line_numbers(file_data["content"][:10], start_line=1),
|
|
540
|
-
),
|
|
541
|
-
tool_call_id=tool_result.tool_call_id,
|
|
542
|
-
)
|
|
543
|
-
],
|
|
544
|
-
"files": {file_path: file_data},
|
|
545
|
-
}
|
|
546
|
-
return Command(update=state_update)
|
|
547
|
-
elif isinstance(tool_result, Command):
|
|
548
|
-
update = tool_result.update
|
|
549
|
-
if update is None:
|
|
550
|
-
return tool_result
|
|
551
|
-
message_updates = update.get("messages", [])
|
|
552
|
-
file_updates = update.get("files", {})
|
|
553
|
-
|
|
554
|
-
edited_message_updates = []
|
|
555
|
-
for message in message_updates:
|
|
556
|
-
if self.tool_token_limit_before_evict and isinstance(message, ToolMessage) and isinstance(message.content, str):
|
|
557
|
-
content = message.content
|
|
558
|
-
if len(content) > 4 * self.tool_token_limit_before_evict:
|
|
559
|
-
file_path = f"/large_tool_results/{message.tool_call_id}"
|
|
560
|
-
file_data = create_file_data(content)
|
|
561
|
-
edited_message_updates.append(
|
|
562
|
-
ToolMessage(
|
|
563
|
-
TOO_LARGE_TOOL_MSG.format(
|
|
564
|
-
tool_call_id=message.tool_call_id,
|
|
565
|
-
file_path=file_path,
|
|
566
|
-
content_sample=format_content_with_line_numbers(file_data["content"][:10], start_line=1),
|
|
567
|
-
),
|
|
568
|
-
tool_call_id=message.tool_call_id,
|
|
569
|
-
)
|
|
570
|
-
)
|
|
571
|
-
file_updates[file_path] = file_data
|
|
572
|
-
continue
|
|
573
|
-
edited_message_updates.append(message)
|
|
574
|
-
return Command(update={**update, "messages": edited_message_updates, "files": file_updates})
|
|
575
|
-
return tool_result
|
|
576
|
-
|
|
577
|
-
def wrap_tool_call(
|
|
578
|
-
self,
|
|
579
|
-
request: ToolCallRequest,
|
|
580
|
-
handler: Callable[[ToolCallRequest], ToolMessage | Command],
|
|
581
|
-
) -> ToolMessage | Command:
|
|
582
|
-
"""Check the size of the tool call result and evict to filesystem if too large.
|
|
583
|
-
|
|
584
|
-
Args:
|
|
585
|
-
request: The tool call request being processed.
|
|
586
|
-
handler: The handler function to call with the modified request.
|
|
587
|
-
|
|
588
|
-
Returns:
|
|
589
|
-
The raw ToolMessage, or a pseudo tool message with the ToolResult in state.
|
|
590
|
-
"""
|
|
591
|
-
if self.tool_token_limit_before_evict is None or request.tool_call["name"] in TOOL_GENERATORS:
|
|
592
|
-
return handler(request)
|
|
593
|
-
|
|
594
|
-
tool_result = handler(request)
|
|
595
|
-
return self._intercept_large_tool_result(tool_result)
|
|
596
|
-
|
|
597
|
-
async def awrap_tool_call(
|
|
598
|
-
self,
|
|
599
|
-
request: ToolCallRequest,
|
|
600
|
-
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
|
|
601
|
-
) -> ToolMessage | Command:
|
|
602
|
-
"""(async)Check the size of the tool call result and evict to filesystem if too large.
|
|
603
|
-
|
|
604
|
-
Args:
|
|
605
|
-
request: The tool call request being processed.
|
|
606
|
-
handler: The handler function to call with the modified request.
|
|
607
|
-
|
|
608
|
-
Returns:
|
|
609
|
-
The raw ToolMessage, or a pseudo tool message with the ToolResult in state.
|
|
610
|
-
"""
|
|
611
|
-
if self.tool_token_limit_before_evict is None or request.tool_call["name"] in TOOL_GENERATORS:
|
|
612
|
-
return await handler(request)
|
|
613
|
-
|
|
614
|
-
tool_result = await handler(request)
|
|
615
|
-
return self._intercept_large_tool_result(tool_result)
|
|
@@ -1,44 +0,0 @@
|
|
|
1
|
-
"""Middleware to patch dangling tool calls in the messages history."""
|
|
2
|
-
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
from langchain.agents.middleware import AgentMiddleware, AgentState
|
|
6
|
-
from langchain_core.messages import RemoveMessage, ToolMessage
|
|
7
|
-
from langgraph.graph.message import REMOVE_ALL_MESSAGES
|
|
8
|
-
from langgraph.runtime import Runtime
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class PatchToolCallsMiddleware(AgentMiddleware):
|
|
12
|
-
"""Middleware to patch dangling tool calls in the messages history."""
|
|
13
|
-
|
|
14
|
-
def before_agent(self, state: AgentState, runtime: Runtime[Any]) -> dict[str, Any] | None: # noqa: ARG002
|
|
15
|
-
"""Before the agent runs, handle dangling tool calls from any AIMessage."""
|
|
16
|
-
messages = state["messages"]
|
|
17
|
-
if not messages or len(messages) == 0:
|
|
18
|
-
return None
|
|
19
|
-
|
|
20
|
-
patched_messages = []
|
|
21
|
-
# Iterate over the messages and add any dangling tool calls
|
|
22
|
-
for i, msg in enumerate(messages):
|
|
23
|
-
patched_messages.append(msg)
|
|
24
|
-
if msg.type == "ai" and msg.tool_calls:
|
|
25
|
-
for tool_call in msg.tool_calls:
|
|
26
|
-
corresponding_tool_msg = next(
|
|
27
|
-
(msg for msg in messages[i:] if msg.type == "tool" and msg.tool_call_id == tool_call["id"]),
|
|
28
|
-
None,
|
|
29
|
-
)
|
|
30
|
-
if corresponding_tool_msg is None:
|
|
31
|
-
# We have a dangling tool call which needs a ToolMessage
|
|
32
|
-
tool_msg = (
|
|
33
|
-
f"Tool call {tool_call['name']} with id {tool_call['id']} was "
|
|
34
|
-
"cancelled - another message came in before it could be completed."
|
|
35
|
-
)
|
|
36
|
-
patched_messages.append(
|
|
37
|
-
ToolMessage(
|
|
38
|
-
content=tool_msg,
|
|
39
|
-
name=tool_call["name"],
|
|
40
|
-
tool_call_id=tool_call["id"],
|
|
41
|
-
)
|
|
42
|
-
)
|
|
43
|
-
|
|
44
|
-
return {"messages": [RemoveMessage(id=REMOVE_ALL_MESSAGES), *patched_messages]}
|