deepagents 0.3.7__py3-none-any.whl → 0.3.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,39 +31,6 @@ class FilesystemBackend(BackendProtocol):
31
31
  Files are accessed using their actual filesystem paths. Relative paths are
32
32
  resolved relative to the current working directory. Content is read/written
33
33
  as plain text, and metadata (timestamps) are derived from filesystem stats.
34
-
35
- !!! warning "Security Warning"
36
-
37
- This backend grants agents direct filesystem read/write access. Use with
38
- caution and only in appropriate environments.
39
-
40
- **Appropriate use cases:**
41
-
42
- - Local development CLIs (coding assistants, development tools)
43
- - CI/CD pipelines (see security considerations below)
44
-
45
- **Inappropriate use cases:**
46
-
47
- - Web servers or HTTP APIs - use `StateBackend`, `StoreBackend`, or
48
- `SandboxBackend` instead
49
-
50
- **Security risks:**
51
-
52
- - Agents can read any accessible file, including secrets (API keys,
53
- credentials, `.env` files)
54
- - Combined with network tools, secrets may be exfiltrated via SSRF attacks
55
- - File modifications are permanent and irreversible
56
-
57
- **Recommended safeguards:**
58
-
59
- 1. Enable Human-in-the-Loop (HITL) middleware to review sensitive operations
60
- 2. Exclude secrets from accessible filesystem paths (especially in CI/CD)
61
- 3. Use `SandboxBackend` for production environments requiring filesystem
62
- interaction
63
- 4. **Always** use `virtual_mode=True` with `root_dir` to enable path-based
64
- access restrictions (blocks `..`, `~`, and absolute paths outside root).
65
- Note that the default (`virtual_mode=False`) provides no security even with
66
- `root_dir` set.
67
34
  """
68
35
 
69
36
  def __init__(
@@ -77,29 +44,14 @@ class FilesystemBackend(BackendProtocol):
77
44
  Args:
78
45
  root_dir: Optional root directory for file operations.
79
46
 
80
- - If not provided, defaults to the current working directory.
81
- - When `virtual_mode=False` (default): Only affects relative path
82
- resolution. Provides **no security** - agents can access any file
83
- using absolute paths or `..` sequences.
84
- - When `virtual_mode=True`: All paths are restricted to this
85
- directory with traversal protection enabled.
86
-
87
- virtual_mode: Enable path-based access restrictions.
88
-
89
- When `True`, all paths are treated as virtual paths anchored to
90
- `root_dir`. Path traversal (`..`, `~`) is blocked and all resolved paths
91
- are verified to remain within `root_dir`.
92
-
93
- When `False` (default), **no security is provided**:
94
-
95
- - Absolute paths (e.g., `/etc/passwd`) bypass `root_dir` entirely
96
- - Relative paths with `..` can escape `root_dir`
97
- - Agents have unrestricted filesystem access
98
-
99
- **Security note:** `virtual_mode=True` provides path-based access
100
- control, not process isolation. It restricts which files can be
101
- accessed via paths, but does not sandbox the Python process itself.
47
+ If provided, all file paths will be resolved relative to this directory.
48
+ If not provided, uses the current working directory.
49
+ virtual_mode: Enables sandboxed operation where all paths are treated as
50
+ virtual paths rooted at `root_dir`.
102
51
 
52
+ Path traversal (using `..` or `~`) is disallowed and all resolved paths
53
+ must remain within the root directory. When `False` (default), absolute
54
+ paths are allowed as-is and relative paths resolve under cwd.
103
55
  max_file_size_mb: Maximum file size in megabytes for operations like
104
56
  grep's Python fallback search.
105
57
 
deepagents/graph.py CHANGED
@@ -5,6 +5,7 @@ from typing import Any
5
5
 
6
6
  from langchain.agents import create_agent
7
7
  from langchain.agents.middleware import HumanInTheLoopMiddleware, InterruptOnConfig, TodoListMiddleware
8
+ from langchain.agents.middleware.summarization import SummarizationMiddleware
8
9
  from langchain.agents.middleware.types import AgentMiddleware
9
10
  from langchain.agents.structured_output import ResponseFormat
10
11
  from langchain.chat_models import init_chat_model
@@ -25,7 +26,6 @@ from deepagents.middleware.memory import MemoryMiddleware
25
26
  from deepagents.middleware.patch_tool_calls import PatchToolCallsMiddleware
26
27
  from deepagents.middleware.skills import SkillsMiddleware
27
28
  from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
28
- from deepagents.middleware.summarization import SummarizationMiddleware
29
29
 
30
30
  BASE_AGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools."
31
31
 
@@ -38,7 +38,7 @@ def get_default_model() -> ChatAnthropic:
38
38
  """
39
39
  return ChatAnthropic(
40
40
  model_name="claude-sonnet-4-5-20250929",
41
- max_tokens=20000, # type: ignore[call-arg]
41
+ max_tokens=20000,
42
42
  )
43
43
 
44
44
 
@@ -63,14 +63,11 @@ def create_deep_agent(
63
63
  ) -> CompiledStateGraph:
64
64
  """Create a deep agent.
65
65
 
66
- !!! warning "Deep agents require a LLM that supports tool calling!"
66
+ Deep agents require a LLM that supports tool calling.
67
67
 
68
- By default, this agent has access to the following tools:
69
-
70
- - `write_todos`: manage a todo list
71
- - `ls`, `read_file`, `write_file`, `edit_file`, `glob`, `grep`: file operations
72
- - `execute`: run shell commands
73
- - `task`: call subagents
68
+ This agent will by default have access to a tool to write todos (`write_todos`),
69
+ seven file and execution tools: `ls`, `read_file`, `write_file`, `edit_file`, `glob`, `grep`, `execute`,
70
+ and a tool to call subagents (`task`).
74
71
 
75
72
  The `execute` tool allows running shell commands if the backend implements `SandboxBackendProtocol`.
76
73
  For non-sandbox backends, the `execute` tool will return an error message.
@@ -85,14 +82,10 @@ def create_deep_agent(
85
82
 
86
83
  In addition to custom tools you provide, deep agents include built-in tools for planning,
87
84
  file management, and subagent spawning.
88
- system_prompt: Custom system instructions to prepend before the base deep agent
89
- prompt.
90
-
91
- If a string, it's concatenated with the base prompt.
92
- middleware: Additional middleware to apply after the standard middleware stack
93
- (`TodoListMiddleware`, `FilesystemMiddleware`, `SubAgentMiddleware`,
94
- `SummarizationMiddleware`, `AnthropicPromptCachingMiddleware`,
95
- `PatchToolCallsMiddleware`).
85
+ system_prompt: The additional instructions the agent should have.
86
+
87
+ Will go in the system prompt. Can be a string or a `SystemMessage`.
88
+ middleware: Additional middleware to apply after standard middleware.
96
89
  subagents: The subagents to use.
97
90
 
98
91
  Each subagent should be a `dict` with the following keys:
@@ -149,17 +142,9 @@ def create_deep_agent(
149
142
  ):
150
143
  trigger = ("fraction", 0.85)
151
144
  keep = ("fraction", 0.10)
152
- truncate_args_settings = {
153
- "trigger": ("fraction", 0.85),
154
- "keep": ("fraction", 0.10),
155
- }
156
145
  else:
157
146
  trigger = ("tokens", 170000)
158
147
  keep = ("messages", 6)
159
- truncate_args_settings = {
160
- "trigger": ("messages", 20),
161
- "keep": ("messages", 20),
162
- }
163
148
 
164
149
  # Build middleware stack for subagents (includes skills if provided)
165
150
  subagent_middleware: list[AgentMiddleware] = [
@@ -175,11 +160,9 @@ def create_deep_agent(
175
160
  FilesystemMiddleware(backend=backend),
176
161
  SummarizationMiddleware(
177
162
  model=model,
178
- backend=backend,
179
163
  trigger=trigger,
180
164
  keep=keep,
181
165
  trim_tokens_to_summarize=None,
182
- truncate_args_settings=truncate_args_settings,
183
166
  ),
184
167
  AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
185
168
  PatchToolCallsMiddleware(),
@@ -207,11 +190,9 @@ def create_deep_agent(
207
190
  ),
208
191
  SummarizationMiddleware(
209
192
  model=model,
210
- backend=backend,
211
193
  trigger=trigger,
212
194
  keep=keep,
213
195
  trim_tokens_to_summarize=None,
214
- truncate_args_settings=truncate_args_settings,
215
196
  ),
216
197
  AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
217
198
  PatchToolCallsMiddleware(),
@@ -1,10 +1,9 @@
1
- """Middleware for the agent."""
1
+ """Middleware for the DeepAgent."""
2
2
 
3
3
  from deepagents.middleware.filesystem import FilesystemMiddleware
4
4
  from deepagents.middleware.memory import MemoryMiddleware
5
5
  from deepagents.middleware.skills import SkillsMiddleware
6
6
  from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
7
- from deepagents.middleware.summarization import SummarizationMiddleware
8
7
 
9
8
  __all__ = [
10
9
  "CompiledSubAgent",
@@ -13,5 +12,4 @@ __all__ = [
13
12
  "SkillsMiddleware",
14
13
  "SubAgent",
15
14
  "SubAgentMiddleware",
16
- "SummarizationMiddleware",
17
15
  ]
@@ -154,16 +154,19 @@ class FilesystemState(AgentState):
154
154
  """Files in the filesystem."""
155
155
 
156
156
 
157
- LIST_FILES_TOOL_DESCRIPTION = """Lists all files in a directory.
157
+ LIST_FILES_TOOL_DESCRIPTION = """Lists all files in the filesystem, filtering by directory.
158
158
 
159
- This is useful for exploring the filesystem and finding the right file to read or edit.
160
- You should almost ALWAYS use this tool before using the read_file or edit_file tools."""
161
-
162
- READ_FILE_TOOL_DESCRIPTION = """Reads a file from the filesystem.
159
+ Usage:
160
+ - The path parameter must be an absolute path, not a relative path
161
+ - The list_files tool will return a list of all files in the specified directory.
162
+ - This is very useful for exploring the file system and finding the right file to read or edit.
163
+ - You should almost ALWAYS use this tool before using the Read or Edit tools."""
163
164
 
164
- Assume this tool is able to read all files. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.
165
+ READ_FILE_TOOL_DESCRIPTION = """Reads a file from the filesystem. You can access any file directly by using this tool.
166
+ Assume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.
165
167
 
166
168
  Usage:
169
+ - The file_path parameter must be an absolute path, not a relative path
167
170
  - By default, it reads up to 100 lines starting from the beginning of the file
168
171
  - **IMPORTANT for large files and codebase exploration**: Use pagination with offset and limit parameters to avoid context overflow
169
172
  - First scan: read_file(path, limit=100) to see file structure
@@ -179,46 +182,61 @@ Usage:
179
182
  EDIT_FILE_TOOL_DESCRIPTION = """Performs exact string replacements in files.
180
183
 
181
184
  Usage:
182
- - You must read the file before editing. This tool will error if you attempt an edit without reading the file first.
183
- - When editing, preserve the exact indentation (tabs/spaces) from the read output. Never include line number prefixes in old_string or new_string.
184
- - ALWAYS prefer editing existing files over creating new ones.
185
- - Only use emojis if the user explicitly requests it."""
185
+ - You must use your `Read` tool at least once in the conversation before editing. This tool will error if you attempt an edit without reading the file.
186
+ - When editing text from Read tool output, ensure you preserve the exact indentation (tabs/spaces) as it appears AFTER the line number prefix. The line number prefix format is: spaces + line number + tab. Everything after that tab is the actual file content to match. Never include any part of the line number prefix in the old_string or new_string.
187
+ - ALWAYS prefer editing existing files. NEVER write new files unless explicitly required.
188
+ - Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked.
189
+ - The edit will FAIL if `old_string` is not unique in the file. Either provide a larger string with more surrounding context to make it unique or use `replace_all` to change every instance of `old_string`.
190
+ - Use `replace_all` for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance."""
186
191
 
187
192
 
188
193
  WRITE_FILE_TOOL_DESCRIPTION = """Writes to a new file in the filesystem.
189
194
 
190
195
  Usage:
196
+ - The file_path parameter must be an absolute path, not a relative path
197
+ - The content parameter must be a string
191
198
  - The write_file tool will create the a new file.
192
- - Prefer to edit existing files (with the edit_file tool) over creating new ones when possible.
193
- """
199
+ - Prefer to edit existing files over creating new ones when possible."""
200
+
194
201
 
195
202
  GLOB_TOOL_DESCRIPTION = """Find files matching a glob pattern.
196
203
 
197
- Supports standard glob patterns: `*` (any characters), `**` (any directories), `?` (single character).
198
- Returns a list of absolute file paths that match the pattern.
204
+ Usage:
205
+ - The glob tool finds files by matching patterns with wildcards
206
+ - Supports standard glob patterns: `*` (any characters), `**` (any directories), `?` (single character)
207
+ - Patterns can be absolute (starting with `/`) or relative
208
+ - Returns a list of absolute file paths that match the pattern
199
209
 
200
210
  Examples:
201
211
  - `**/*.py` - Find all Python files
202
212
  - `*.txt` - Find all text files in root
203
213
  - `/subdir/**/*.md` - Find all markdown files under /subdir"""
204
214
 
205
- GREP_TOOL_DESCRIPTION = """Search for a text pattern across files.
215
+ GREP_TOOL_DESCRIPTION = """Search for a pattern in files.
206
216
 
207
- Searches for literal text (not regex) and returns matching files or content based on output_mode.
217
+ Usage:
218
+ - The grep tool searches for text patterns across files
219
+ - The pattern parameter is the text to search for (literal string, not regex)
220
+ - The path parameter filters which directory to search in (default is the current working directory)
221
+ - The glob parameter accepts a glob pattern to filter which files to search (e.g., `*.py`)
222
+ - The output_mode parameter controls the output format:
223
+ - `files_with_matches`: List only file paths containing matches (default)
224
+ - `content`: Show matching lines with file path and line numbers
225
+ - `count`: Show count of matches per file
208
226
 
209
227
  Examples:
210
228
  - Search all files: `grep(pattern="TODO")`
211
229
  - Search Python files only: `grep(pattern="import", glob="*.py")`
212
230
  - Show matching lines: `grep(pattern="error", output_mode="content")`"""
213
231
 
214
- EXECUTE_TOOL_DESCRIPTION = """Executes a shell command in an isolated sandbox environment.
232
+ EXECUTE_TOOL_DESCRIPTION = """Executes a given command in the sandbox environment with proper handling and security measures.
215
233
 
216
- Usage:
217
- Executes a given command in the sandbox environment with proper handling and security measures.
218
234
  Before executing the command, please follow these steps:
235
+
219
236
  1. Directory Verification:
220
237
  - If the command will create new directories or files, first use the ls tool to verify the parent directory exists and is the correct location
221
238
  - For example, before running "mkdir foo/bar", first use ls to check that "foo" exists and is the intended parent directory
239
+
222
240
  2. Command Execution:
223
241
  - Always quote file paths that contain spaces with double quotes (e.g., cd "path with spaces/file.txt")
224
242
  - Examples of proper quoting:
@@ -228,7 +246,9 @@ Before executing the command, please follow these steps:
228
246
  - python /path/with spaces/script.py (incorrect - will fail)
229
247
  - After ensuring proper quoting, execute the command
230
248
  - Capture the output of the command
249
+
231
250
  Usage notes:
251
+ - The command parameter is required
232
252
  - Commands run in an isolated sandbox environment
233
253
  - Returns combined stdout/stderr output with exit code
234
254
  - If the output is very large, it may be truncated
@@ -303,10 +323,7 @@ def _ls_tool_generator(
303
323
  """
304
324
  tool_description = custom_description or LIST_FILES_TOOL_DESCRIPTION
305
325
 
306
- def sync_ls(
307
- runtime: ToolRuntime[None, FilesystemState],
308
- path: Annotated[str, "Absolute path to the directory to list. Must be absolute, not relative."],
309
- ) -> str:
326
+ def sync_ls(runtime: ToolRuntime[None, FilesystemState], path: str) -> str:
310
327
  """Synchronous wrapper for ls tool."""
311
328
  resolved_backend = _get_backend(backend, runtime)
312
329
  validated_path = _validate_path(path)
@@ -315,10 +332,7 @@ def _ls_tool_generator(
315
332
  result = truncate_if_too_long(paths)
316
333
  return str(result)
317
334
 
318
- async def async_ls(
319
- runtime: ToolRuntime[None, FilesystemState],
320
- path: Annotated[str, "Absolute path to the directory to list. Must be absolute, not relative."],
321
- ) -> str:
335
+ async def async_ls(runtime: ToolRuntime[None, FilesystemState], path: str) -> str:
322
336
  """Asynchronous wrapper for ls tool."""
323
337
  resolved_backend = _get_backend(backend, runtime)
324
338
  validated_path = _validate_path(path)
@@ -351,10 +365,10 @@ def _read_file_tool_generator(
351
365
  tool_description = custom_description or READ_FILE_TOOL_DESCRIPTION
352
366
 
353
367
  def sync_read_file(
354
- file_path: Annotated[str, "Absolute path to the file to read. Must be absolute, not relative."],
368
+ file_path: str,
355
369
  runtime: ToolRuntime[None, FilesystemState],
356
- offset: Annotated[int, "Line number to start reading from (0-indexed). Use for pagination of large files."] = DEFAULT_READ_OFFSET,
357
- limit: Annotated[int, "Maximum number of lines to read. Use for pagination of large files."] = DEFAULT_READ_LIMIT,
370
+ offset: int = DEFAULT_READ_OFFSET,
371
+ limit: int = DEFAULT_READ_LIMIT,
358
372
  ) -> str:
359
373
  """Synchronous wrapper for read_file tool."""
360
374
  resolved_backend = _get_backend(backend, runtime)
@@ -369,10 +383,10 @@ def _read_file_tool_generator(
369
383
  return result
370
384
 
371
385
  async def async_read_file(
372
- file_path: Annotated[str, "Absolute path to the file to read. Must be absolute, not relative."],
386
+ file_path: str,
373
387
  runtime: ToolRuntime[None, FilesystemState],
374
- offset: Annotated[int, "Line number to start reading from (0-indexed). Use for pagination of large files."] = DEFAULT_READ_OFFSET,
375
- limit: Annotated[int, "Maximum number of lines to read. Use for pagination of large files."] = DEFAULT_READ_LIMIT,
388
+ offset: int = DEFAULT_READ_OFFSET,
389
+ limit: int = DEFAULT_READ_LIMIT,
376
390
  ) -> str:
377
391
  """Asynchronous wrapper for read_file tool."""
378
392
  resolved_backend = _get_backend(backend, runtime)
@@ -410,8 +424,8 @@ def _write_file_tool_generator(
410
424
  tool_description = custom_description or WRITE_FILE_TOOL_DESCRIPTION
411
425
 
412
426
  def sync_write_file(
413
- file_path: Annotated[str, "Absolute path where the file should be created. Must be absolute, not relative."],
414
- content: Annotated[str, "The text content to write to the file. This parameter is required."],
427
+ file_path: str,
428
+ content: str,
415
429
  runtime: ToolRuntime[None, FilesystemState],
416
430
  ) -> Command | str:
417
431
  """Synchronous wrapper for write_file tool."""
@@ -436,8 +450,8 @@ def _write_file_tool_generator(
436
450
  return f"Updated file {res.path}"
437
451
 
438
452
  async def async_write_file(
439
- file_path: Annotated[str, "Absolute path where the file should be created. Must be absolute, not relative."],
440
- content: Annotated[str, "The text content to write to the file. This parameter is required."],
453
+ file_path: str,
454
+ content: str,
441
455
  runtime: ToolRuntime[None, FilesystemState],
442
456
  ) -> Command | str:
443
457
  """Asynchronous wrapper for write_file tool."""
@@ -485,12 +499,12 @@ def _edit_file_tool_generator(
485
499
  tool_description = custom_description or EDIT_FILE_TOOL_DESCRIPTION
486
500
 
487
501
  def sync_edit_file(
488
- file_path: Annotated[str, "Absolute path to the file to edit. Must be absolute, not relative."],
489
- old_string: Annotated[str, "The exact text to find and replace. Must be unique in the file unless replace_all is True."],
490
- new_string: Annotated[str, "The text to replace old_string with. Must be different from old_string."],
502
+ file_path: str,
503
+ old_string: str,
504
+ new_string: str,
491
505
  runtime: ToolRuntime[None, FilesystemState],
492
506
  *,
493
- replace_all: Annotated[bool, "If True, replace all occurrences of old_string. If False (default), old_string must be unique."] = False,
507
+ replace_all: bool = False,
494
508
  ) -> Command | str:
495
509
  """Synchronous wrapper for edit_file tool."""
496
510
  resolved_backend = _get_backend(backend, runtime)
@@ -513,12 +527,12 @@ def _edit_file_tool_generator(
513
527
  return f"Successfully replaced {res.occurrences} instance(s) of the string in '{res.path}'"
514
528
 
515
529
  async def async_edit_file(
516
- file_path: Annotated[str, "Absolute path to the file to edit. Must be absolute, not relative."],
517
- old_string: Annotated[str, "The exact text to find and replace. Must be unique in the file unless replace_all is True."],
518
- new_string: Annotated[str, "The text to replace old_string with. Must be different from old_string."],
530
+ file_path: str,
531
+ old_string: str,
532
+ new_string: str,
519
533
  runtime: ToolRuntime[None, FilesystemState],
520
534
  *,
521
- replace_all: Annotated[bool, "If True, replace all occurrences of old_string. If False (default), old_string must be unique."] = False,
535
+ replace_all: bool = False,
522
536
  ) -> Command | str:
523
537
  """Asynchronous wrapper for edit_file tool."""
524
538
  resolved_backend = _get_backend(backend, runtime)
@@ -563,11 +577,7 @@ def _glob_tool_generator(
563
577
  """
564
578
  tool_description = custom_description or GLOB_TOOL_DESCRIPTION
565
579
 
566
- def sync_glob(
567
- pattern: Annotated[str, "Glob pattern to match files (e.g., '**/*.py', '*.txt', '/subdir/**/*.md')."],
568
- runtime: ToolRuntime[None, FilesystemState],
569
- path: Annotated[str, "Base directory to search from. Defaults to root '/'."] = "/",
570
- ) -> str:
580
+ def sync_glob(pattern: str, runtime: ToolRuntime[None, FilesystemState], path: str = "/") -> str:
571
581
  """Synchronous wrapper for glob tool."""
572
582
  resolved_backend = _get_backend(backend, runtime)
573
583
  infos = resolved_backend.glob_info(pattern, path=path)
@@ -575,11 +585,7 @@ def _glob_tool_generator(
575
585
  result = truncate_if_too_long(paths)
576
586
  return str(result)
577
587
 
578
- async def async_glob(
579
- pattern: Annotated[str, "Glob pattern to match files (e.g., '**/*.py', '*.txt', '/subdir/**/*.md')."],
580
- runtime: ToolRuntime[None, FilesystemState],
581
- path: Annotated[str, "Base directory to search from. Defaults to root '/'."] = "/",
582
- ) -> str:
588
+ async def async_glob(pattern: str, runtime: ToolRuntime[None, FilesystemState], path: str = "/") -> str:
583
589
  """Asynchronous wrapper for glob tool."""
584
590
  resolved_backend = _get_backend(backend, runtime)
585
591
  infos = await resolved_backend.aglob_info(pattern, path=path)
@@ -611,14 +617,11 @@ def _grep_tool_generator(
611
617
  tool_description = custom_description or GREP_TOOL_DESCRIPTION
612
618
 
613
619
  def sync_grep(
614
- pattern: Annotated[str, "Text pattern to search for (literal string, not regex)."],
620
+ pattern: str,
615
621
  runtime: ToolRuntime[None, FilesystemState],
616
- path: Annotated[str | None, "Directory to search in. Defaults to current working directory."] = None,
617
- glob: Annotated[str | None, "Glob pattern to filter which files to search (e.g., '*.py')."] = None,
618
- output_mode: Annotated[
619
- Literal["files_with_matches", "content", "count"],
620
- "Output format: 'files_with_matches' (file paths only, default), 'content' (matching lines with context), 'count' (match counts per file).",
621
- ] = "files_with_matches",
622
+ path: str | None = None,
623
+ glob: str | None = None,
624
+ output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
622
625
  ) -> str:
623
626
  """Synchronous wrapper for grep tool."""
624
627
  resolved_backend = _get_backend(backend, runtime)
@@ -629,14 +632,11 @@ def _grep_tool_generator(
629
632
  return truncate_if_too_long(formatted) # type: ignore[arg-type]
630
633
 
631
634
  async def async_grep(
632
- pattern: Annotated[str, "Text pattern to search for (literal string, not regex)."],
635
+ pattern: str,
633
636
  runtime: ToolRuntime[None, FilesystemState],
634
- path: Annotated[str | None, "Directory to search in. Defaults to current working directory."] = None,
635
- glob: Annotated[str | None, "Glob pattern to filter which files to search (e.g., '*.py')."] = None,
636
- output_mode: Annotated[
637
- Literal["files_with_matches", "content", "count"],
638
- "Output format: 'files_with_matches' (file paths only, default), 'content' (matching lines with context), 'count' (match counts per file).",
639
- ] = "files_with_matches",
637
+ path: str | None = None,
638
+ glob: str | None = None,
639
+ output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
640
640
  ) -> str:
641
641
  """Asynchronous wrapper for grep tool."""
642
642
  resolved_backend = _get_backend(backend, runtime)
@@ -693,7 +693,7 @@ def _execute_tool_generator(
693
693
  tool_description = custom_description or EXECUTE_TOOL_DESCRIPTION
694
694
 
695
695
  def sync_execute(
696
- command: Annotated[str, "Shell command to execute in the sandbox environment."],
696
+ command: str,
697
697
  runtime: ToolRuntime[None, FilesystemState],
698
698
  ) -> str:
699
699
  """Synchronous wrapper for execute tool."""
@@ -726,7 +726,7 @@ def _execute_tool_generator(
726
726
  return "".join(parts)
727
727
 
728
728
  async def async_execute(
729
- command: Annotated[str, "Shell command to execute in the sandbox environment."],
729
+ command: str,
730
730
  runtime: ToolRuntime[None, FilesystemState],
731
731
  ) -> str:
732
732
  """Asynchronous wrapper for execute tool."""
@@ -814,9 +814,8 @@ class FilesystemMiddleware(AgentMiddleware):
814
814
  """Middleware for providing filesystem and optional execution tools to an agent.
815
815
 
816
816
  This middleware adds filesystem tools to the agent: `ls`, `read_file`, `write_file`,
817
- `edit_file`, `glob`, and `grep`.
818
-
819
- Files can be stored using any backend that implements the `BackendProtocol`.
817
+ `edit_file`, `glob`, and `grep`. Files can be stored using any backend that implements
818
+ the `BackendProtocol`.
820
819
 
821
820
  If the backend implements `SandboxBackendProtocol`, an `execute` tool is also added
822
821
  for running shell commands.
@@ -837,6 +836,8 @@ class FilesystemMiddleware(AgentMiddleware):
837
836
  tool_token_limit_before_evict: Token limit before evicting a tool result to the
838
837
  filesystem.
839
838
 
839
+ Defaults to 20,000 tokens.
840
+
840
841
  When exceeded, writes the result using the configured backend and replaces it
841
842
  with a truncated preview and file reference.
842
843
 
@@ -1069,7 +1070,6 @@ class FilesystemMiddleware(AgentMiddleware):
1069
1070
  processed_message = ToolMessage(
1070
1071
  content=replacement_text,
1071
1072
  tool_call_id=message.tool_call_id,
1072
- name=message.name,
1073
1073
  )
1074
1074
  return processed_message, result.files_update
1075
1075
 
@@ -1128,7 +1128,6 @@ class FilesystemMiddleware(AgentMiddleware):
1128
1128
  processed_message = ToolMessage(
1129
1129
  content=replacement_text,
1130
1130
  tool_call_id=message.tool_call_id,
1131
- name=message.name,
1132
1131
  )
1133
1132
  return processed_message, result.files_update
1134
1133
 
@@ -74,7 +74,7 @@ logger = logging.getLogger(__name__)
74
74
 
75
75
 
76
76
  class MemoryState(AgentState):
77
- """State schema for `MemoryMiddleware`.
77
+ """State schema for MemoryMiddleware.
78
78
 
79
79
  Attributes:
80
80
  memory_contents: Dict mapping source paths to their loaded content.
@@ -85,7 +85,7 @@ class MemoryState(AgentState):
85
85
 
86
86
 
87
87
  class MemoryStateUpdate(TypedDict):
88
- """State update for `MemoryMiddleware`."""
88
+ """State update for MemoryMiddleware."""
89
89
 
90
90
  memory_contents: dict[str, str]
91
91
 
@@ -153,15 +153,14 @@ MEMORY_SYSTEM_PROMPT = """<agent_memory>
153
153
 
154
154
 
155
155
  class MemoryMiddleware(AgentMiddleware):
156
- """Middleware for loading agent memory from `AGENTS.md` files.
156
+ """Middleware for loading agent memory from AGENTS.md files.
157
157
 
158
158
  Loads memory content from configured sources and injects into the system prompt.
159
-
160
159
  Supports multiple sources that are combined together.
161
160
 
162
161
  Args:
163
162
  backend: Backend instance or factory function for file operations.
164
- sources: List of `MemorySource` configurations specifying paths and names.
163
+ sources: List of MemorySource configurations specifying paths and names.
165
164
  """
166
165
 
167
166
  state_schema = MemoryState
@@ -177,12 +176,9 @@ class MemoryMiddleware(AgentMiddleware):
177
176
  Args:
178
177
  backend: Backend instance or factory function that takes runtime
179
178
  and returns a backend. Use a factory for StateBackend.
180
- sources: List of memory file paths to load (e.g., `["~/.deepagents/AGENTS.md",
181
- "./.deepagents/AGENTS.md"]`).
182
-
183
- Display names are automatically derived from the paths.
184
-
185
- Sources are loaded in order.
179
+ sources: List of memory file paths to load (e.g., ["~/.deepagents/AGENTS.md",
180
+ "./.deepagents/AGENTS.md"]). Display names are automatically derived
181
+ from the paths. Sources are loaded in order.
186
182
  """
187
183
  self._backend = backend
188
184
  self.sources = sources
@@ -560,8 +560,6 @@ class SkillsMiddleware(AgentMiddleware):
560
560
  lines = []
561
561
  for skill in skills:
562
562
  lines.append(f"- **{skill['name']}**: {skill['description']}")
563
- if skill["allowed_tools"]:
564
- lines.append(f" -> Allowed tools: {', '.join(skill['allowed_tools'])}")
565
563
  lines.append(f" -> Read `{skill['path']}` for full instructions")
566
564
 
567
565
  return "\n".join(lines)
@@ -603,7 +601,7 @@ class SkillsMiddleware(AgentMiddleware):
603
601
  config: Runnable config.
604
602
 
605
603
  Returns:
606
- State update with `skills_metadata` populated, or `None` if already present
604
+ State update with skills_metadata populated, or None if already present
607
605
  """
608
606
  # Skip if skills_metadata is already present in state (even if empty)
609
607
  if "skills_metadata" in state:
@@ -638,7 +636,7 @@ class SkillsMiddleware(AgentMiddleware):
638
636
  config: Runnable config.
639
637
 
640
638
  Returns:
641
- State update with `skills_metadata` populated, or `None` if already present
639
+ State update with skills_metadata populated, or None if already present
642
640
  """
643
641
  # Skip if skills_metadata is already present in state (even if empty)
644
642
  if "skills_metadata" in state: