deepagents 0.3.6__py3-none-any.whl → 0.3.7a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepagents/backends/filesystem.py +100 -22
- deepagents/backends/sandbox.py +1 -1
- deepagents/backends/store.py +85 -0
- deepagents/backends/utils.py +1 -1
- deepagents/graph.py +18 -3
- deepagents/middleware/_utils.py +23 -0
- deepagents/middleware/filesystem.py +129 -9
- deepagents/middleware/memory.py +6 -8
- deepagents/middleware/skills.py +6 -7
- deepagents/middleware/subagents.py +18 -7
- {deepagents-0.3.6.dist-info → deepagents-0.3.7a1.dist-info}/METADATA +5 -3
- deepagents-0.3.7a1.dist-info/RECORD +21 -0
- deepagents-0.3.6.dist-info/RECORD +0 -20
- {deepagents-0.3.6.dist-info → deepagents-0.3.7a1.dist-info}/WHEEL +0 -0
- {deepagents-0.3.6.dist-info → deepagents-0.3.7a1.dist-info}/top_level.txt +0 -0
|
@@ -1,11 +1,4 @@
|
|
|
1
|
-
"""FilesystemBackend
|
|
2
|
-
|
|
3
|
-
Security and search upgrades:
|
|
4
|
-
- Secure path resolution with root containment when in virtual_mode (sandboxed to cwd)
|
|
5
|
-
- Prevent symlink-following on file I/O using O_NOFOLLOW when available
|
|
6
|
-
- Ripgrep-powered grep with JSON parsing, plus Python fallback with regex
|
|
7
|
-
and optional glob include filtering, while preserving virtual path behavior
|
|
8
|
-
"""
|
|
1
|
+
"""`FilesystemBackend`: Read and write files directly from the filesystem."""
|
|
9
2
|
|
|
10
3
|
import json
|
|
11
4
|
import os
|
|
@@ -49,9 +42,20 @@ class FilesystemBackend(BackendProtocol):
|
|
|
49
42
|
"""Initialize filesystem backend.
|
|
50
43
|
|
|
51
44
|
Args:
|
|
52
|
-
root_dir: Optional root directory for file operations.
|
|
53
|
-
|
|
54
|
-
|
|
45
|
+
root_dir: Optional root directory for file operations.
|
|
46
|
+
|
|
47
|
+
If provided, all file paths will be resolved relative to this directory.
|
|
48
|
+
If not provided, uses the current working directory.
|
|
49
|
+
virtual_mode: Enables sandboxed operation where all paths are treated as
|
|
50
|
+
virtual paths rooted at `root_dir`.
|
|
51
|
+
|
|
52
|
+
Path traversal (using `..` or `~`) is disallowed and all resolved paths
|
|
53
|
+
must remain within the root directory. When `False` (default), absolute
|
|
54
|
+
paths are allowed as-is and relative paths resolve under cwd.
|
|
55
|
+
max_file_size_mb: Maximum file size in megabytes for operations like
|
|
56
|
+
grep's Python fallback search.
|
|
57
|
+
|
|
58
|
+
Files exceeding this limit are skipped during search. Defaults to 10 MB.
|
|
55
59
|
"""
|
|
56
60
|
self.cwd = Path(root_dir).resolve() if root_dir else Path.cwd()
|
|
57
61
|
self.virtual_mode = virtual_mode
|
|
@@ -60,16 +64,22 @@ class FilesystemBackend(BackendProtocol):
|
|
|
60
64
|
def _resolve_path(self, key: str) -> Path:
|
|
61
65
|
"""Resolve a file path with security checks.
|
|
62
66
|
|
|
63
|
-
When virtual_mode=True
|
|
64
|
-
self.cwd
|
|
65
|
-
|
|
67
|
+
When `virtual_mode=True`, treat incoming paths as virtual absolute paths under
|
|
68
|
+
`self.cwd`, disallow traversal (`..`, `~`) and ensure resolved path stays within
|
|
69
|
+
root.
|
|
70
|
+
|
|
71
|
+
When `virtual_mode=False`, preserve legacy behavior: absolute paths are allowed
|
|
66
72
|
as-is; relative paths resolve under cwd.
|
|
67
73
|
|
|
68
74
|
Args:
|
|
69
|
-
key: File path (absolute, relative, or virtual when virtual_mode=True)
|
|
75
|
+
key: File path (absolute, relative, or virtual when `virtual_mode=True`).
|
|
70
76
|
|
|
71
77
|
Returns:
|
|
72
|
-
Resolved absolute Path object
|
|
78
|
+
Resolved absolute `Path` object.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
ValueError: If path traversal is attempted in `virtual_mode` or if the
|
|
82
|
+
resolved path escapes the root directory.
|
|
73
83
|
"""
|
|
74
84
|
if self.virtual_mode:
|
|
75
85
|
vpath = key if key.startswith("/") else "/" + key
|
|
@@ -94,8 +104,9 @@ class FilesystemBackend(BackendProtocol):
|
|
|
94
104
|
path: Absolute directory path to list files from.
|
|
95
105
|
|
|
96
106
|
Returns:
|
|
97
|
-
List of FileInfo
|
|
98
|
-
|
|
107
|
+
List of `FileInfo`-like dicts for files and directories directly in the
|
|
108
|
+
directory. Directories have a trailing `/` in their path and
|
|
109
|
+
`is_dir=True`.
|
|
99
110
|
"""
|
|
100
111
|
dir_path = self._resolve_path(path)
|
|
101
112
|
if not dir_path.exists() or not dir_path.is_dir():
|
|
@@ -242,7 +253,14 @@ class FilesystemBackend(BackendProtocol):
|
|
|
242
253
|
content: str,
|
|
243
254
|
) -> WriteResult:
|
|
244
255
|
"""Create a new file with content.
|
|
245
|
-
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
file_path: Path where the new file will be created.
|
|
259
|
+
content: Text content to write to the file.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
`WriteResult` with path on success, or error message if the file
|
|
263
|
+
already exists or write fails. External storage sets `files_update=None`.
|
|
246
264
|
"""
|
|
247
265
|
resolved_path = self._resolve_path(file_path)
|
|
248
266
|
|
|
@@ -273,7 +291,18 @@ class FilesystemBackend(BackendProtocol):
|
|
|
273
291
|
replace_all: bool = False,
|
|
274
292
|
) -> EditResult:
|
|
275
293
|
"""Edit a file by replacing string occurrences.
|
|
276
|
-
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
file_path: Path to the file to edit.
|
|
297
|
+
old_string: The text to search for and replace.
|
|
298
|
+
new_string: The replacement text.
|
|
299
|
+
replace_all: If `True`, replace all occurrences. If `False` (default),
|
|
300
|
+
replace only if exactly one occurrence exists.
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
`EditResult` with path and occurrence count on success, or error
|
|
304
|
+
message if file not found or replacement fails. External storage sets
|
|
305
|
+
`files_update=None`.
|
|
277
306
|
"""
|
|
278
307
|
resolved_path = self._resolve_path(file_path)
|
|
279
308
|
|
|
@@ -311,6 +340,19 @@ class FilesystemBackend(BackendProtocol):
|
|
|
311
340
|
path: str | None = None,
|
|
312
341
|
glob: str | None = None,
|
|
313
342
|
) -> list[GrepMatch] | str:
|
|
343
|
+
"""Search for a regex pattern in files.
|
|
344
|
+
|
|
345
|
+
Uses ripgrep if available, falling back to Python regex search.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
pattern: Regular expression pattern to search for.
|
|
349
|
+
path: Directory or file path to search in. Defaults to current directory.
|
|
350
|
+
glob: Optional glob pattern to filter which files to search.
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
List of GrepMatch dicts containing path, line number, and matched text.
|
|
354
|
+
Returns an error string if the regex pattern is invalid.
|
|
355
|
+
"""
|
|
314
356
|
# Validate regex
|
|
315
357
|
try:
|
|
316
358
|
re.compile(pattern)
|
|
@@ -338,6 +380,17 @@ class FilesystemBackend(BackendProtocol):
|
|
|
338
380
|
return matches
|
|
339
381
|
|
|
340
382
|
def _ripgrep_search(self, pattern: str, base_full: Path, include_glob: str | None) -> dict[str, list[tuple[int, str]]] | None:
|
|
383
|
+
"""Search using ripgrep with JSON output parsing.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
pattern: Regex pattern to search for.
|
|
387
|
+
base_full: Resolved base path to search in.
|
|
388
|
+
include_glob: Optional glob pattern to filter files.
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
Dict mapping file paths to list of `(line_number, line_text)` tuples.
|
|
392
|
+
Returns `None` if ripgrep is unavailable or times out.
|
|
393
|
+
"""
|
|
341
394
|
cmd = ["rg", "--json"]
|
|
342
395
|
if include_glob:
|
|
343
396
|
cmd.extend(["--glob", include_glob])
|
|
@@ -383,6 +436,18 @@ class FilesystemBackend(BackendProtocol):
|
|
|
383
436
|
return results
|
|
384
437
|
|
|
385
438
|
def _python_search(self, pattern: str, base_full: Path, include_glob: str | None) -> dict[str, list[tuple[int, str]]]:
|
|
439
|
+
"""Fallback search using Python regex when ripgrep is unavailable.
|
|
440
|
+
|
|
441
|
+
Recursively searches files, respecting `max_file_size_bytes` limit.
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
pattern: Regex pattern to search for.
|
|
445
|
+
base_full: Resolved base path to search in.
|
|
446
|
+
include_glob: Optional glob pattern to filter files by name.
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
Dict mapping file paths to list of `(line_number, line_text)` tuples.
|
|
450
|
+
"""
|
|
386
451
|
try:
|
|
387
452
|
regex = re.compile(pattern)
|
|
388
453
|
except re.error:
|
|
@@ -392,7 +457,10 @@ class FilesystemBackend(BackendProtocol):
|
|
|
392
457
|
root = base_full if base_full.is_dir() else base_full.parent
|
|
393
458
|
|
|
394
459
|
for fp in root.rglob("*"):
|
|
395
|
-
|
|
460
|
+
try:
|
|
461
|
+
if not fp.is_file():
|
|
462
|
+
continue
|
|
463
|
+
except (PermissionError, OSError):
|
|
396
464
|
continue
|
|
397
465
|
if include_glob and not wcglob.globmatch(fp.name, include_glob, flags=wcglob.BRACE):
|
|
398
466
|
continue
|
|
@@ -419,6 +487,16 @@ class FilesystemBackend(BackendProtocol):
|
|
|
419
487
|
return results
|
|
420
488
|
|
|
421
489
|
def glob_info(self, pattern: str, path: str = "/") -> list[FileInfo]:
|
|
490
|
+
"""Find files matching a glob pattern.
|
|
491
|
+
|
|
492
|
+
Args:
|
|
493
|
+
pattern: Glob pattern to match files against (e.g., `'*.py'`, `'**/*.txt'`).
|
|
494
|
+
path: Base directory to search from. Defaults to root (`/`).
|
|
495
|
+
|
|
496
|
+
Returns:
|
|
497
|
+
List of `FileInfo` dicts for matching files, sorted by path. Each dict
|
|
498
|
+
contains `path`, `is_dir`, `size`, and `modified_at` fields.
|
|
499
|
+
"""
|
|
422
500
|
if pattern.startswith("/"):
|
|
423
501
|
pattern = pattern.lstrip("/")
|
|
424
502
|
|
|
@@ -432,7 +510,7 @@ class FilesystemBackend(BackendProtocol):
|
|
|
432
510
|
for matched_path in search_path.rglob(pattern):
|
|
433
511
|
try:
|
|
434
512
|
is_file = matched_path.is_file()
|
|
435
|
-
except OSError:
|
|
513
|
+
except (PermissionError, OSError):
|
|
436
514
|
continue
|
|
437
515
|
if not is_file:
|
|
438
516
|
continue
|
deepagents/backends/sandbox.py
CHANGED
deepagents/backends/store.py
CHANGED
|
@@ -279,6 +279,30 @@ class StoreBackend(BackendProtocol):
|
|
|
279
279
|
|
|
280
280
|
return format_read_response(file_data, offset, limit)
|
|
281
281
|
|
|
282
|
+
async def aread(
|
|
283
|
+
self,
|
|
284
|
+
file_path: str,
|
|
285
|
+
offset: int = 0,
|
|
286
|
+
limit: int = 2000,
|
|
287
|
+
) -> str:
|
|
288
|
+
"""Async version of read using native store async methods.
|
|
289
|
+
|
|
290
|
+
This avoids sync calls in async context by using store.aget directly.
|
|
291
|
+
"""
|
|
292
|
+
store = self._get_store()
|
|
293
|
+
namespace = self._get_namespace()
|
|
294
|
+
item: Item | None = await store.aget(namespace, file_path)
|
|
295
|
+
|
|
296
|
+
if item is None:
|
|
297
|
+
return f"Error: File '{file_path}' not found"
|
|
298
|
+
|
|
299
|
+
try:
|
|
300
|
+
file_data = self._convert_store_item_to_file_data(item)
|
|
301
|
+
except ValueError as e:
|
|
302
|
+
return f"Error: {e}"
|
|
303
|
+
|
|
304
|
+
return format_read_response(file_data, offset, limit)
|
|
305
|
+
|
|
282
306
|
def write(
|
|
283
307
|
self,
|
|
284
308
|
file_path: str,
|
|
@@ -301,6 +325,29 @@ class StoreBackend(BackendProtocol):
|
|
|
301
325
|
store.put(namespace, file_path, store_value)
|
|
302
326
|
return WriteResult(path=file_path, files_update=None)
|
|
303
327
|
|
|
328
|
+
async def awrite(
|
|
329
|
+
self,
|
|
330
|
+
file_path: str,
|
|
331
|
+
content: str,
|
|
332
|
+
) -> WriteResult:
|
|
333
|
+
"""Async version of write using native store async methods.
|
|
334
|
+
|
|
335
|
+
This avoids sync calls in async context by using store.aget/aput directly.
|
|
336
|
+
"""
|
|
337
|
+
store = self._get_store()
|
|
338
|
+
namespace = self._get_namespace()
|
|
339
|
+
|
|
340
|
+
# Check if file exists using async method
|
|
341
|
+
existing = await store.aget(namespace, file_path)
|
|
342
|
+
if existing is not None:
|
|
343
|
+
return WriteResult(error=f"Cannot write to {file_path} because it already exists. Read and then make an edit, or write to a new path.")
|
|
344
|
+
|
|
345
|
+
# Create new file using async method
|
|
346
|
+
file_data = create_file_data(content)
|
|
347
|
+
store_value = self._convert_file_data_to_store_value(file_data)
|
|
348
|
+
await store.aput(namespace, file_path, store_value)
|
|
349
|
+
return WriteResult(path=file_path, files_update=None)
|
|
350
|
+
|
|
304
351
|
def edit(
|
|
305
352
|
self,
|
|
306
353
|
file_path: str,
|
|
@@ -338,6 +385,44 @@ class StoreBackend(BackendProtocol):
|
|
|
338
385
|
store.put(namespace, file_path, store_value)
|
|
339
386
|
return EditResult(path=file_path, files_update=None, occurrences=int(occurrences))
|
|
340
387
|
|
|
388
|
+
async def aedit(
|
|
389
|
+
self,
|
|
390
|
+
file_path: str,
|
|
391
|
+
old_string: str,
|
|
392
|
+
new_string: str,
|
|
393
|
+
replace_all: bool = False,
|
|
394
|
+
) -> EditResult:
|
|
395
|
+
"""Async version of edit using native store async methods.
|
|
396
|
+
|
|
397
|
+
This avoids sync calls in async context by using store.aget/aput directly.
|
|
398
|
+
"""
|
|
399
|
+
store = self._get_store()
|
|
400
|
+
namespace = self._get_namespace()
|
|
401
|
+
|
|
402
|
+
# Get existing file using async method
|
|
403
|
+
item = await store.aget(namespace, file_path)
|
|
404
|
+
if item is None:
|
|
405
|
+
return EditResult(error=f"Error: File '{file_path}' not found")
|
|
406
|
+
|
|
407
|
+
try:
|
|
408
|
+
file_data = self._convert_store_item_to_file_data(item)
|
|
409
|
+
except ValueError as e:
|
|
410
|
+
return EditResult(error=f"Error: {e}")
|
|
411
|
+
|
|
412
|
+
content = file_data_to_string(file_data)
|
|
413
|
+
result = perform_string_replacement(content, old_string, new_string, replace_all)
|
|
414
|
+
|
|
415
|
+
if isinstance(result, str):
|
|
416
|
+
return EditResult(error=result)
|
|
417
|
+
|
|
418
|
+
new_content, occurrences = result
|
|
419
|
+
new_file_data = update_file_data(file_data, new_content)
|
|
420
|
+
|
|
421
|
+
# Update file in store using async method
|
|
422
|
+
store_value = self._convert_file_data_to_store_value(new_file_data)
|
|
423
|
+
await store.aput(namespace, file_path, store_value)
|
|
424
|
+
return EditResult(path=file_path, files_update=None, occurrences=int(occurrences))
|
|
425
|
+
|
|
341
426
|
# Removed legacy grep() convenience to keep lean surface
|
|
342
427
|
|
|
343
428
|
def grep_raw(
|
deepagents/backends/utils.py
CHANGED
|
@@ -15,7 +15,7 @@ import wcmatch.glob as wcglob
|
|
|
15
15
|
from deepagents.backends.protocol import FileInfo as _FileInfo, GrepMatch as _GrepMatch
|
|
16
16
|
|
|
17
17
|
EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
|
|
18
|
-
MAX_LINE_LENGTH =
|
|
18
|
+
MAX_LINE_LENGTH = 5000
|
|
19
19
|
LINE_NUMBER_WIDTH = 6
|
|
20
20
|
TOOL_RESULT_TOKEN_LIMIT = 20000 # Same threshold as eviction
|
|
21
21
|
TRUNCATION_GUIDANCE = "... [results truncated, try being more specific with your parameters]"
|
deepagents/graph.py
CHANGED
|
@@ -12,6 +12,7 @@ from langchain.chat_models import init_chat_model
|
|
|
12
12
|
from langchain_anthropic import ChatAnthropic
|
|
13
13
|
from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware
|
|
14
14
|
from langchain_core.language_models import BaseChatModel
|
|
15
|
+
from langchain_core.messages import SystemMessage
|
|
15
16
|
from langchain_core.tools import BaseTool
|
|
16
17
|
from langgraph.cache.base import BaseCache
|
|
17
18
|
from langgraph.graph.state import CompiledStateGraph
|
|
@@ -45,7 +46,7 @@ def create_deep_agent(
|
|
|
45
46
|
model: str | BaseChatModel | None = None,
|
|
46
47
|
tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
|
|
47
48
|
*,
|
|
48
|
-
system_prompt: str | None = None,
|
|
49
|
+
system_prompt: str | SystemMessage | None = None,
|
|
49
50
|
middleware: Sequence[AgentMiddleware] = (),
|
|
50
51
|
subagents: list[SubAgent | CompiledSubAgent] | None = None,
|
|
51
52
|
skills: list[str] | None = None,
|
|
@@ -83,7 +84,7 @@ def create_deep_agent(
|
|
|
83
84
|
file management, and subagent spawning.
|
|
84
85
|
system_prompt: The additional instructions the agent should have.
|
|
85
86
|
|
|
86
|
-
Will go in the system prompt.
|
|
87
|
+
Will go in the system prompt. Can be a string or a `SystemMessage`.
|
|
87
88
|
middleware: Additional middleware to apply after standard middleware.
|
|
88
89
|
subagents: The subagents to use.
|
|
89
90
|
|
|
@@ -202,9 +203,23 @@ def create_deep_agent(
|
|
|
202
203
|
if interrupt_on is not None:
|
|
203
204
|
deepagent_middleware.append(HumanInTheLoopMiddleware(interrupt_on=interrupt_on))
|
|
204
205
|
|
|
206
|
+
# Combine system_prompt with BASE_AGENT_PROMPT
|
|
207
|
+
if system_prompt is None:
|
|
208
|
+
final_system_prompt: str | SystemMessage = BASE_AGENT_PROMPT
|
|
209
|
+
elif isinstance(system_prompt, SystemMessage):
|
|
210
|
+
# SystemMessage: append BASE_AGENT_PROMPT to content_blocks
|
|
211
|
+
new_content = [
|
|
212
|
+
*system_prompt.content_blocks,
|
|
213
|
+
{"type": "text", "text": f"\n\n{BASE_AGENT_PROMPT}"},
|
|
214
|
+
]
|
|
215
|
+
final_system_prompt = SystemMessage(content=new_content)
|
|
216
|
+
else:
|
|
217
|
+
# String: simple concatenation
|
|
218
|
+
final_system_prompt = system_prompt + "\n\n" + BASE_AGENT_PROMPT
|
|
219
|
+
|
|
205
220
|
return create_agent(
|
|
206
221
|
model,
|
|
207
|
-
system_prompt=
|
|
222
|
+
system_prompt=final_system_prompt,
|
|
208
223
|
tools=tools,
|
|
209
224
|
middleware=deepagent_middleware,
|
|
210
225
|
response_format=response_format,
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Utility functions for middleware."""
|
|
2
|
+
|
|
3
|
+
from langchain_core.messages import SystemMessage
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def append_to_system_message(
|
|
7
|
+
system_message: SystemMessage | None,
|
|
8
|
+
text: str,
|
|
9
|
+
) -> SystemMessage:
|
|
10
|
+
"""Append text to a system message.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
system_message: Existing system message or None.
|
|
14
|
+
text: Text to add to the system message.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
New SystemMessage with the text appended.
|
|
18
|
+
"""
|
|
19
|
+
new_content: list[str | dict[str, str]] = list(system_message.content_blocks) if system_message else []
|
|
20
|
+
if new_content:
|
|
21
|
+
text = f"\n\n{text}"
|
|
22
|
+
new_content.append({"type": "text", "text": text})
|
|
23
|
+
return SystemMessage(content=new_content)
|
|
@@ -33,12 +33,12 @@ from deepagents.backends.utils import (
|
|
|
33
33
|
sanitize_tool_call_id,
|
|
34
34
|
truncate_if_too_long,
|
|
35
35
|
)
|
|
36
|
+
from deepagents.middleware._utils import append_to_system_message
|
|
36
37
|
|
|
37
38
|
EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
|
|
38
|
-
MAX_LINE_LENGTH = 2000
|
|
39
39
|
LINE_NUMBER_WIDTH = 6
|
|
40
40
|
DEFAULT_READ_OFFSET = 0
|
|
41
|
-
DEFAULT_READ_LIMIT =
|
|
41
|
+
DEFAULT_READ_LIMIT = 100
|
|
42
42
|
|
|
43
43
|
|
|
44
44
|
class FileData(TypedDict):
|
|
@@ -167,14 +167,14 @@ Assume this tool is able to read all files on the machine. If the User provides
|
|
|
167
167
|
|
|
168
168
|
Usage:
|
|
169
169
|
- The file_path parameter must be an absolute path, not a relative path
|
|
170
|
-
- By default, it reads up to
|
|
170
|
+
- By default, it reads up to 100 lines starting from the beginning of the file
|
|
171
171
|
- **IMPORTANT for large files and codebase exploration**: Use pagination with offset and limit parameters to avoid context overflow
|
|
172
172
|
- First scan: read_file(path, limit=100) to see file structure
|
|
173
173
|
- Read more sections: read_file(path, offset=100, limit=200) for next 200 lines
|
|
174
174
|
- Only omit limit (read full file) when necessary for editing
|
|
175
175
|
- Specify offset and limit: read_file(path, offset=0, limit=100) reads first 100 lines
|
|
176
|
-
- Any lines longer than 2000 characters will be truncated
|
|
177
176
|
- Results are returned using cat -n format, with line numbers starting at 1
|
|
177
|
+
- Lines longer than 5,000 characters will be split into multiple lines with continuation markers (e.g., 5.1, 5.2, etc.). When you specify a limit, these continuation lines count towards the limit.
|
|
178
178
|
- You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful.
|
|
179
179
|
- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents.
|
|
180
180
|
- You should ALWAYS make sure a file has been read before editing it."""
|
|
@@ -373,7 +373,14 @@ def _read_file_tool_generator(
|
|
|
373
373
|
"""Synchronous wrapper for read_file tool."""
|
|
374
374
|
resolved_backend = _get_backend(backend, runtime)
|
|
375
375
|
file_path = _validate_path(file_path)
|
|
376
|
-
|
|
376
|
+
result = resolved_backend.read(file_path, offset=offset, limit=limit)
|
|
377
|
+
|
|
378
|
+
lines = result.splitlines(keepends=True)
|
|
379
|
+
if len(lines) > limit:
|
|
380
|
+
lines = lines[:limit]
|
|
381
|
+
result = "".join(lines)
|
|
382
|
+
|
|
383
|
+
return result
|
|
377
384
|
|
|
378
385
|
async def async_read_file(
|
|
379
386
|
file_path: str,
|
|
@@ -384,7 +391,14 @@ def _read_file_tool_generator(
|
|
|
384
391
|
"""Asynchronous wrapper for read_file tool."""
|
|
385
392
|
resolved_backend = _get_backend(backend, runtime)
|
|
386
393
|
file_path = _validate_path(file_path)
|
|
387
|
-
|
|
394
|
+
result = await resolved_backend.aread(file_path, offset=offset, limit=limit)
|
|
395
|
+
|
|
396
|
+
lines = result.splitlines(keepends=True)
|
|
397
|
+
if len(lines) > limit:
|
|
398
|
+
lines = lines[:limit]
|
|
399
|
+
result = "".join(lines)
|
|
400
|
+
|
|
401
|
+
return result
|
|
388
402
|
|
|
389
403
|
return StructuredTool.from_function(
|
|
390
404
|
name="read_file",
|
|
@@ -933,7 +947,8 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
933
947
|
system_prompt = "\n\n".join(prompt_parts)
|
|
934
948
|
|
|
935
949
|
if system_prompt:
|
|
936
|
-
|
|
950
|
+
new_system_message = append_to_system_message(request.system_message, system_prompt)
|
|
951
|
+
request = request.override(system_message=new_system_message)
|
|
937
952
|
|
|
938
953
|
return handler(request)
|
|
939
954
|
|
|
@@ -980,7 +995,8 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
980
995
|
system_prompt = "\n\n".join(prompt_parts)
|
|
981
996
|
|
|
982
997
|
if system_prompt:
|
|
983
|
-
|
|
998
|
+
new_system_message = append_to_system_message(request.system_message, system_prompt)
|
|
999
|
+
request = request.override(system_message=new_system_message)
|
|
984
1000
|
|
|
985
1001
|
return await handler(request)
|
|
986
1002
|
|
|
@@ -1057,6 +1073,64 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1057
1073
|
)
|
|
1058
1074
|
return processed_message, result.files_update
|
|
1059
1075
|
|
|
1076
|
+
async def _aprocess_large_message(
|
|
1077
|
+
self,
|
|
1078
|
+
message: ToolMessage,
|
|
1079
|
+
resolved_backend: BackendProtocol,
|
|
1080
|
+
) -> tuple[ToolMessage, dict[str, FileData] | None]:
|
|
1081
|
+
"""Async version of _process_large_message.
|
|
1082
|
+
|
|
1083
|
+
Uses async backend methods to avoid sync calls in async context.
|
|
1084
|
+
See _process_large_message for full documentation.
|
|
1085
|
+
"""
|
|
1086
|
+
# Early exit if eviction not configured
|
|
1087
|
+
if not self.tool_token_limit_before_evict:
|
|
1088
|
+
return message, None
|
|
1089
|
+
|
|
1090
|
+
# Convert content to string once for both size check and eviction
|
|
1091
|
+
# Special case: single text block - extract text directly for readability
|
|
1092
|
+
if (
|
|
1093
|
+
isinstance(message.content, list)
|
|
1094
|
+
and len(message.content) == 1
|
|
1095
|
+
and isinstance(message.content[0], dict)
|
|
1096
|
+
and message.content[0].get("type") == "text"
|
|
1097
|
+
and "text" in message.content[0]
|
|
1098
|
+
):
|
|
1099
|
+
content_str = str(message.content[0]["text"])
|
|
1100
|
+
elif isinstance(message.content, str):
|
|
1101
|
+
content_str = message.content
|
|
1102
|
+
else:
|
|
1103
|
+
# Multiple blocks or non-text content - stringify entire structure
|
|
1104
|
+
content_str = str(message.content)
|
|
1105
|
+
|
|
1106
|
+
# Check if content exceeds eviction threshold
|
|
1107
|
+
# Using 4 chars per token as a conservative approximation (actual ratio varies by content)
|
|
1108
|
+
# This errs on the high side to avoid premature eviction of content that might fit
|
|
1109
|
+
if len(content_str) <= 4 * self.tool_token_limit_before_evict:
|
|
1110
|
+
return message, None
|
|
1111
|
+
|
|
1112
|
+
# Write content to filesystem using async method
|
|
1113
|
+
sanitized_id = sanitize_tool_call_id(message.tool_call_id)
|
|
1114
|
+
file_path = f"/large_tool_results/{sanitized_id}"
|
|
1115
|
+
result = await resolved_backend.awrite(file_path, content_str)
|
|
1116
|
+
if result.error:
|
|
1117
|
+
return message, None
|
|
1118
|
+
|
|
1119
|
+
# Create truncated preview for the replacement message
|
|
1120
|
+
content_sample = format_content_with_line_numbers([line[:1000] for line in content_str.splitlines()[:10]], start_line=1)
|
|
1121
|
+
replacement_text = TOO_LARGE_TOOL_MSG.format(
|
|
1122
|
+
tool_call_id=message.tool_call_id,
|
|
1123
|
+
file_path=file_path,
|
|
1124
|
+
content_sample=content_sample,
|
|
1125
|
+
)
|
|
1126
|
+
|
|
1127
|
+
# Always return as plain string after eviction
|
|
1128
|
+
processed_message = ToolMessage(
|
|
1129
|
+
content=replacement_text,
|
|
1130
|
+
tool_call_id=message.tool_call_id,
|
|
1131
|
+
)
|
|
1132
|
+
return processed_message, result.files_update
|
|
1133
|
+
|
|
1060
1134
|
def _intercept_large_tool_result(self, tool_result: ToolMessage | Command, runtime: ToolRuntime) -> ToolMessage | Command:
|
|
1061
1135
|
"""Intercept and process large tool results before they're added to state.
|
|
1062
1136
|
|
|
@@ -1113,6 +1187,52 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1113
1187
|
return Command(update={**update, "messages": processed_messages, "files": accumulated_file_updates})
|
|
1114
1188
|
raise AssertionError(f"Unreachable code reached in _intercept_large_tool_result: for tool_result of type {type(tool_result)}")
|
|
1115
1189
|
|
|
1190
|
+
async def _aintercept_large_tool_result(self, tool_result: ToolMessage | Command, runtime: ToolRuntime) -> ToolMessage | Command:
|
|
1191
|
+
"""Async version of _intercept_large_tool_result.
|
|
1192
|
+
|
|
1193
|
+
Uses async backend methods to avoid sync calls in async context.
|
|
1194
|
+
See _intercept_large_tool_result for full documentation.
|
|
1195
|
+
"""
|
|
1196
|
+
if isinstance(tool_result, ToolMessage):
|
|
1197
|
+
resolved_backend = self._get_backend(runtime)
|
|
1198
|
+
processed_message, files_update = await self._aprocess_large_message(
|
|
1199
|
+
tool_result,
|
|
1200
|
+
resolved_backend,
|
|
1201
|
+
)
|
|
1202
|
+
return (
|
|
1203
|
+
Command(
|
|
1204
|
+
update={
|
|
1205
|
+
"files": files_update,
|
|
1206
|
+
"messages": [processed_message],
|
|
1207
|
+
}
|
|
1208
|
+
)
|
|
1209
|
+
if files_update is not None
|
|
1210
|
+
else processed_message
|
|
1211
|
+
)
|
|
1212
|
+
|
|
1213
|
+
if isinstance(tool_result, Command):
|
|
1214
|
+
update = tool_result.update
|
|
1215
|
+
if update is None:
|
|
1216
|
+
return tool_result
|
|
1217
|
+
command_messages = update.get("messages", [])
|
|
1218
|
+
accumulated_file_updates = dict(update.get("files", {}))
|
|
1219
|
+
resolved_backend = self._get_backend(runtime)
|
|
1220
|
+
processed_messages = []
|
|
1221
|
+
for message in command_messages:
|
|
1222
|
+
if not isinstance(message, ToolMessage):
|
|
1223
|
+
processed_messages.append(message)
|
|
1224
|
+
continue
|
|
1225
|
+
|
|
1226
|
+
processed_message, files_update = await self._aprocess_large_message(
|
|
1227
|
+
message,
|
|
1228
|
+
resolved_backend,
|
|
1229
|
+
)
|
|
1230
|
+
processed_messages.append(processed_message)
|
|
1231
|
+
if files_update is not None:
|
|
1232
|
+
accumulated_file_updates.update(files_update)
|
|
1233
|
+
return Command(update={**update, "messages": processed_messages, "files": accumulated_file_updates})
|
|
1234
|
+
raise AssertionError(f"Unreachable code reached in _aintercept_large_tool_result: for tool_result of type {type(tool_result)}")
|
|
1235
|
+
|
|
1116
1236
|
def wrap_tool_call(
|
|
1117
1237
|
self,
|
|
1118
1238
|
request: ToolCallRequest,
|
|
@@ -1151,4 +1271,4 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1151
1271
|
return await handler(request)
|
|
1152
1272
|
|
|
1153
1273
|
tool_result = await handler(request)
|
|
1154
|
-
return self.
|
|
1274
|
+
return await self._aintercept_large_tool_result(tool_result, request.runtime)
|
deepagents/middleware/memory.py
CHANGED
|
@@ -53,7 +53,6 @@ import logging
|
|
|
53
53
|
from collections.abc import Awaitable, Callable
|
|
54
54
|
from typing import TYPE_CHECKING, Annotated, NotRequired, TypedDict
|
|
55
55
|
|
|
56
|
-
from langchain.messages import SystemMessage
|
|
57
56
|
from langchain_core.runnables import RunnableConfig
|
|
58
57
|
|
|
59
58
|
if TYPE_CHECKING:
|
|
@@ -69,6 +68,8 @@ from langchain.agents.middleware.types import (
|
|
|
69
68
|
from langchain.tools import ToolRuntime
|
|
70
69
|
from langgraph.runtime import Runtime
|
|
71
70
|
|
|
71
|
+
from deepagents.middleware._utils import append_to_system_message
|
|
72
|
+
|
|
72
73
|
logger = logging.getLogger(__name__)
|
|
73
74
|
|
|
74
75
|
|
|
@@ -354,23 +355,20 @@ class MemoryMiddleware(AgentMiddleware):
|
|
|
354
355
|
return MemoryStateUpdate(memory_contents=contents)
|
|
355
356
|
|
|
356
357
|
def modify_request(self, request: ModelRequest) -> ModelRequest:
|
|
357
|
-
"""Inject memory content into the system
|
|
358
|
+
"""Inject memory content into the system message.
|
|
358
359
|
|
|
359
360
|
Args:
|
|
360
361
|
request: Model request to modify.
|
|
361
362
|
|
|
362
363
|
Returns:
|
|
363
|
-
Modified request with memory injected into system
|
|
364
|
+
Modified request with memory injected into system message.
|
|
364
365
|
"""
|
|
365
366
|
contents = request.state.get("memory_contents", {})
|
|
366
367
|
agent_memory = self._format_agent_memory(contents)
|
|
367
368
|
|
|
368
|
-
|
|
369
|
-
system_prompt = agent_memory + "\n\n" + request.system_prompt
|
|
370
|
-
else:
|
|
371
|
-
system_prompt = agent_memory
|
|
369
|
+
new_system_message = append_to_system_message(request.system_message, agent_memory)
|
|
372
370
|
|
|
373
|
-
return request.override(system_message=
|
|
371
|
+
return request.override(system_message=new_system_message)
|
|
374
372
|
|
|
375
373
|
def wrap_model_call(
|
|
376
374
|
self,
|
deepagents/middleware/skills.py
CHANGED
|
@@ -114,6 +114,8 @@ from langchain_core.runnables import RunnableConfig
|
|
|
114
114
|
from langgraph.prebuilt import ToolRuntime
|
|
115
115
|
from langgraph.runtime import Runtime
|
|
116
116
|
|
|
117
|
+
from deepagents.middleware._utils import append_to_system_message
|
|
118
|
+
|
|
117
119
|
logger = logging.getLogger(__name__)
|
|
118
120
|
|
|
119
121
|
# Security: Maximum size for SKILL.md files to prevent DoS attacks (10MB)
|
|
@@ -563,13 +565,13 @@ class SkillsMiddleware(AgentMiddleware):
|
|
|
563
565
|
return "\n".join(lines)
|
|
564
566
|
|
|
565
567
|
def modify_request(self, request: ModelRequest) -> ModelRequest:
|
|
566
|
-
"""Inject skills documentation into a model request's system
|
|
568
|
+
"""Inject skills documentation into a model request's system message.
|
|
567
569
|
|
|
568
570
|
Args:
|
|
569
571
|
request: Model request to modify
|
|
570
572
|
|
|
571
573
|
Returns:
|
|
572
|
-
New model request with skills documentation injected into system
|
|
574
|
+
New model request with skills documentation injected into system message
|
|
573
575
|
"""
|
|
574
576
|
skills_metadata = request.state.get("skills_metadata", [])
|
|
575
577
|
skills_locations = self._format_skills_locations()
|
|
@@ -580,12 +582,9 @@ class SkillsMiddleware(AgentMiddleware):
|
|
|
580
582
|
skills_list=skills_list,
|
|
581
583
|
)
|
|
582
584
|
|
|
583
|
-
|
|
584
|
-
system_prompt = request.system_prompt + "\n\n" + skills_section
|
|
585
|
-
else:
|
|
586
|
-
system_prompt = skills_section
|
|
585
|
+
new_system_message = append_to_system_message(request.system_message, skills_section)
|
|
587
586
|
|
|
588
|
-
return request.override(
|
|
587
|
+
return request.override(system_message=new_system_message)
|
|
589
588
|
|
|
590
589
|
def before_agent(self, state: SkillsState, runtime: Runtime, config: RunnableConfig) -> SkillsStateUpdate | None:
|
|
591
590
|
"""Load skills metadata before agent execution (synchronous).
|
|
@@ -13,6 +13,8 @@ from langchain_core.runnables import Runnable
|
|
|
13
13
|
from langchain_core.tools import StructuredTool
|
|
14
14
|
from langgraph.types import Command
|
|
15
15
|
|
|
16
|
+
from deepagents.middleware._utils import append_to_system_message
|
|
17
|
+
|
|
16
18
|
|
|
17
19
|
class SubAgent(TypedDict):
|
|
18
20
|
"""Specification for an agent.
|
|
@@ -84,7 +86,16 @@ class CompiledSubAgent(TypedDict):
|
|
|
84
86
|
"""What this subagent does."""
|
|
85
87
|
|
|
86
88
|
runnable: Runnable
|
|
87
|
-
"""
|
|
89
|
+
"""A custom agent implementation.
|
|
90
|
+
|
|
91
|
+
Create a custom agent using either:
|
|
92
|
+
|
|
93
|
+
1. LangChain's `create_agent()`: https://docs.langchain.com/oss/python/langchain/quickstart
|
|
94
|
+
2. A custom graph using langgraph: https://docs.langchain.com/oss/python/langgraph/quickstart
|
|
95
|
+
|
|
96
|
+
If you're creating a custom graph, make sure the state schema includes a 'messages' key.
|
|
97
|
+
This is required for the subagent to communicate results back to the main agent.
|
|
98
|
+
"""
|
|
88
99
|
|
|
89
100
|
|
|
90
101
|
DEFAULT_SUBAGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools."
|
|
@@ -513,10 +524,10 @@ class SubAgentMiddleware(AgentMiddleware):
|
|
|
513
524
|
request: ModelRequest,
|
|
514
525
|
handler: Callable[[ModelRequest], ModelResponse],
|
|
515
526
|
) -> ModelResponse:
|
|
516
|
-
"""Update the system
|
|
527
|
+
"""Update the system message to include instructions on using subagents."""
|
|
517
528
|
if self.system_prompt is not None:
|
|
518
|
-
|
|
519
|
-
return handler(request.override(
|
|
529
|
+
new_system_message = append_to_system_message(request.system_message, self.system_prompt)
|
|
530
|
+
return handler(request.override(system_message=new_system_message))
|
|
520
531
|
return handler(request)
|
|
521
532
|
|
|
522
533
|
async def awrap_model_call(
|
|
@@ -524,8 +535,8 @@ class SubAgentMiddleware(AgentMiddleware):
|
|
|
524
535
|
request: ModelRequest,
|
|
525
536
|
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
|
526
537
|
) -> ModelResponse:
|
|
527
|
-
"""(async) Update the system
|
|
538
|
+
"""(async) Update the system message to include instructions on using subagents."""
|
|
528
539
|
if self.system_prompt is not None:
|
|
529
|
-
|
|
530
|
-
return await handler(request.override(
|
|
540
|
+
new_system_message = append_to_system_message(request.system_message, self.system_prompt)
|
|
541
|
+
return await handler(request.override(system_message=new_system_message))
|
|
531
542
|
return await handler(request)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: deepagents
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.7a1
|
|
4
4
|
Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Homepage, https://docs.langchain.com/oss/python/deepagents/overview
|
|
@@ -46,7 +46,7 @@ poetry add deepagents
|
|
|
46
46
|
|
|
47
47
|
## Usage
|
|
48
48
|
|
|
49
|
-
> **Note:** `deepagents` requires using a LLM that supports [tool calling](https://
|
|
49
|
+
> **Note:** `deepagents` requires using a LLM that supports [tool calling](https://docs.langchain.com/oss/python/langchain/overview).
|
|
50
50
|
|
|
51
51
|
This example uses [Tavily](https://tavily.com/) as an example search provider, but you can substitute any search API (e.g., DuckDuckGo, SerpAPI, Brave Search). To run the example below, you will need to `pip install tavily-python`.
|
|
52
52
|
|
|
@@ -125,7 +125,7 @@ There are several parameters you can pass to `create_deep_agent` to create your
|
|
|
125
125
|
|
|
126
126
|
### `model`
|
|
127
127
|
|
|
128
|
-
By default, `deepagents` uses `claude-sonnet-4-5-20250929`. You can customize this by passing any [LangChain model object](https://
|
|
128
|
+
By default, `deepagents` uses `claude-sonnet-4-5-20250929`. You can customize this by passing any [LangChain model object](https://docs.langchain.com/oss/python/integrations/providers/overview).
|
|
129
129
|
|
|
130
130
|
> **Tip:** Use the `provider:model` format (e.g., `openai:gpt-5`) to quickly switch between models. See the [reference](https://reference.langchain.com/python/langchain/models/#langchain.chat_models.init_chat_model(model)) for more info.
|
|
131
131
|
|
|
@@ -237,6 +237,7 @@ class CompiledSubAgent(TypedDict):
|
|
|
237
237
|
```
|
|
238
238
|
|
|
239
239
|
**`SubAgent` fields:**
|
|
240
|
+
|
|
240
241
|
- **name**: This is the name of the subagent, and how the main agent will call the subagent
|
|
241
242
|
- **description**: This is the description of the subagent that is shown to the main agent
|
|
242
243
|
- **system_prompt**: This is the system prompt used for the subagent
|
|
@@ -246,6 +247,7 @@ class CompiledSubAgent(TypedDict):
|
|
|
246
247
|
- **interrupt_on** A custom interrupt config that specifies human-in-the-loop interactions for your tools.
|
|
247
248
|
|
|
248
249
|
**CompiledSubAgent fields:**
|
|
250
|
+
|
|
249
251
|
- **name**: This is the name of the subagent, and how the main agent will call the subagent
|
|
250
252
|
- **description**: This is the description of the subagent that is shown to the main agent
|
|
251
253
|
- **runnable**: A pre-built LangGraph graph/agent that will be used as the subagent. **Important:** The runnable's state schema must include a `messages` key. This is required for the subagent to communicate results back to the main agent.
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
deepagents/__init__.py,sha256=LHQm0v_7N9Gd4pmpRjhnlOCMIK2O0jQ4cEU8RiXEI8k,447
|
|
2
|
+
deepagents/graph.py,sha256=Rk1qDzPpjSKGHc1NJ1CQcbFrFoMlPgKIGFm2EWW3--0,9802
|
|
3
|
+
deepagents/backends/__init__.py,sha256=BOKu2cQ1OdMyO_l2rLqZQiXppYFmQbx7OIQb7WYwvZc,457
|
|
4
|
+
deepagents/backends/composite.py,sha256=WZ_dnn63BmrU19ZJ5-m728f99pSa0Uq_CnwZjwmxz1U,26198
|
|
5
|
+
deepagents/backends/filesystem.py,sha256=Okr4zUJkDc_tx01Sya8sHTsYzahsRiDdFWWX2A_G-RQ,24617
|
|
6
|
+
deepagents/backends/protocol.py,sha256=HUmIrwYGduPfDcs_wtOzVU2QPA9kICZuGO-sUwxzz5I,15997
|
|
7
|
+
deepagents/backends/sandbox.py,sha256=PE4-DkVRU5Z3zp4NViHCkNHUHKiFUKh55UAXCicBvRo,10884
|
|
8
|
+
deepagents/backends/state.py,sha256=Qq4uRjKg6POEqLl4tNnWnXzbmLBpu3bZdMkcUROIgHw,7899
|
|
9
|
+
deepagents/backends/store.py,sha256=9gdUQqPWChYgHVoopOUaocUdyUbFBpf-PxhTiXRXCto,18219
|
|
10
|
+
deepagents/backends/utils.py,sha256=CE_HXddNTr954auqFIVgYLLD4Gdsfr9U8b384g07Wuc,13932
|
|
11
|
+
deepagents/middleware/__init__.py,sha256=2smUxjwghA3Eml_wp0kd4dAY-rwLyW-XQPBE3dAoo50,467
|
|
12
|
+
deepagents/middleware/_utils.py,sha256=ojy62kQLASQ2GabevWJaPGLItyccdNxLMPpYV25Lf20,687
|
|
13
|
+
deepagents/middleware/filesystem.py,sha256=z6jrmXuksloebN6ZlNbjKw847AC8IjLS-TYz6ELyXFY,52745
|
|
14
|
+
deepagents/middleware/memory.py,sha256=1w-laeDHCiY0hxf5fURFnwcXvI78FhPo7_mYjvZudWE,15826
|
|
15
|
+
deepagents/middleware/patch_tool_calls.py,sha256=PdNhxPaQqwnFkhEAZEE2kEzadTNAOO3_iJRA30WqpGE,1981
|
|
16
|
+
deepagents/middleware/skills.py,sha256=3UhHWGzVlmX6qubvhFTx14LUhd6-ZK6nhkhxV8XrIMQ,24045
|
|
17
|
+
deepagents/middleware/subagents.py,sha256=jU1a45CTlMNz1D3U9f3V5J2AoWzKMDXOf-55N6PNG00,26596
|
|
18
|
+
deepagents-0.3.7a1.dist-info/METADATA,sha256=P8TN8XaOCgNTfy9R-7VLgVMl7S1WkAggGqJqtmubAj0,19764
|
|
19
|
+
deepagents-0.3.7a1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
20
|
+
deepagents-0.3.7a1.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
|
|
21
|
+
deepagents-0.3.7a1.dist-info/RECORD,,
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
deepagents/__init__.py,sha256=LHQm0v_7N9Gd4pmpRjhnlOCMIK2O0jQ4cEU8RiXEI8k,447
|
|
2
|
-
deepagents/graph.py,sha256=oqcL-H14t-oYxc7RvMkNCge1twVjqogFcLCzH-4vSdc,9161
|
|
3
|
-
deepagents/backends/__init__.py,sha256=BOKu2cQ1OdMyO_l2rLqZQiXppYFmQbx7OIQb7WYwvZc,457
|
|
4
|
-
deepagents/backends/composite.py,sha256=WZ_dnn63BmrU19ZJ5-m728f99pSa0Uq_CnwZjwmxz1U,26198
|
|
5
|
-
deepagents/backends/filesystem.py,sha256=kGBFuW3ie0LLz4wXCPGJm3WAiYpimJTgEwodJSnXWCs,21477
|
|
6
|
-
deepagents/backends/protocol.py,sha256=HUmIrwYGduPfDcs_wtOzVU2QPA9kICZuGO-sUwxzz5I,15997
|
|
7
|
-
deepagents/backends/sandbox.py,sha256=8Bi8itqjW2PpXORlIfT8thMN1aBXExgHz8cm8xwVaxI,10864
|
|
8
|
-
deepagents/backends/state.py,sha256=Qq4uRjKg6POEqLl4tNnWnXzbmLBpu3bZdMkcUROIgHw,7899
|
|
9
|
-
deepagents/backends/store.py,sha256=0mmeTsim4J8bjcf62dljwNrDv4PavT2KHdGbaBzVzRE,15197
|
|
10
|
-
deepagents/backends/utils.py,sha256=wXMzfrUxp-ZAKlbl3QFZXlSSPRmIXQIUEehqsy2Agy8,13933
|
|
11
|
-
deepagents/middleware/__init__.py,sha256=2smUxjwghA3Eml_wp0kd4dAY-rwLyW-XQPBE3dAoo50,467
|
|
12
|
-
deepagents/middleware/filesystem.py,sha256=Qx9NuRZPNuK1NVYomPCRauDY0ZqZ5j_wro9MRYyvcx0,47563
|
|
13
|
-
deepagents/middleware/memory.py,sha256=E1UAtBAyIxkyNHuG2-j_X_fClMbbSwobdKa3e_P0FDk,15883
|
|
14
|
-
deepagents/middleware/patch_tool_calls.py,sha256=PdNhxPaQqwnFkhEAZEE2kEzadTNAOO3_iJRA30WqpGE,1981
|
|
15
|
-
deepagents/middleware/skills.py,sha256=EABvIq4ES_NHGFL9USUnlH1WwiZgnAacoOLz2kkkVkw,24043
|
|
16
|
-
deepagents/middleware/subagents.py,sha256=c_JxFb2Z3yBWEInqNEciC9Rlu4uGo1tpuN9tLXivWeY,26196
|
|
17
|
-
deepagents-0.3.6.dist-info/METADATA,sha256=KpjDBOFZXmVrZYfuec3UiFsm193LLjfkigSlFhULJE4,19743
|
|
18
|
-
deepagents-0.3.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
19
|
-
deepagents-0.3.6.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
|
|
20
|
-
deepagents-0.3.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|