deepagents 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepagents/backends/utils.py +51 -52
- {deepagents-0.2.2.dist-info → deepagents-0.2.4.dist-info}/METADATA +7 -1
- {deepagents-0.2.2.dist-info → deepagents-0.2.4.dist-info}/RECORD +6 -7
- deepagents/middleware/agent_memory.py +0 -222
- {deepagents-0.2.2.dist-info → deepagents-0.2.4.dist-info}/WHEEL +0 -0
- {deepagents-0.2.2.dist-info → deepagents-0.2.4.dist-info}/licenses/LICENSE +0 -0
- {deepagents-0.2.2.dist-info → deepagents-0.2.4.dist-info}/top_level.txt +0 -0
deepagents/backends/utils.py
CHANGED
|
@@ -6,10 +6,11 @@ enable composition without fragile string parsing.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import re
|
|
9
|
-
import wcmatch.glob as wcglob
|
|
10
9
|
from datetime import UTC, datetime
|
|
11
10
|
from pathlib import Path
|
|
12
|
-
from typing import Any, Literal, TypedDict
|
|
11
|
+
from typing import Any, Literal, TypedDict
|
|
12
|
+
|
|
13
|
+
import wcmatch.glob as wcglob
|
|
13
14
|
|
|
14
15
|
EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
|
|
15
16
|
MAX_LINE_LENGTH = 10000
|
|
@@ -24,6 +25,7 @@ class FileInfo(TypedDict, total=False):
|
|
|
24
25
|
Minimal contract used across backends. Only "path" is required.
|
|
25
26
|
Other fields are best-effort and may be absent depending on backend.
|
|
26
27
|
"""
|
|
28
|
+
|
|
27
29
|
path: str
|
|
28
30
|
is_dir: bool
|
|
29
31
|
size: int # bytes (approx)
|
|
@@ -32,14 +34,15 @@ class FileInfo(TypedDict, total=False):
|
|
|
32
34
|
|
|
33
35
|
class GrepMatch(TypedDict):
|
|
34
36
|
"""Structured grep match entry."""
|
|
37
|
+
|
|
35
38
|
path: str
|
|
36
39
|
line: int
|
|
37
40
|
text: str
|
|
38
41
|
|
|
39
42
|
|
|
40
43
|
def sanitize_tool_call_id(tool_call_id: str) -> str:
|
|
41
|
-
"""Sanitize tool_call_id to prevent path traversal and separator issues.
|
|
42
|
-
|
|
44
|
+
r"""Sanitize tool_call_id to prevent path traversal and separator issues.
|
|
45
|
+
|
|
43
46
|
Replaces dangerous characters (., /, \) with underscores.
|
|
44
47
|
"""
|
|
45
48
|
sanitized = tool_call_id.replace(".", "_").replace("/", "_").replace("\\", "_")
|
|
@@ -94,10 +97,10 @@ def format_content_with_line_numbers(
|
|
|
94
97
|
|
|
95
98
|
def check_empty_content(content: str) -> str | None:
|
|
96
99
|
"""Check if content is empty and return warning message.
|
|
97
|
-
|
|
100
|
+
|
|
98
101
|
Args:
|
|
99
102
|
content: Content to check
|
|
100
|
-
|
|
103
|
+
|
|
101
104
|
Returns:
|
|
102
105
|
Warning message if empty, None otherwise
|
|
103
106
|
"""
|
|
@@ -108,10 +111,10 @@ def check_empty_content(content: str) -> str | None:
|
|
|
108
111
|
|
|
109
112
|
def file_data_to_string(file_data: dict[str, Any]) -> str:
|
|
110
113
|
"""Convert FileData to plain string content.
|
|
111
|
-
|
|
114
|
+
|
|
112
115
|
Args:
|
|
113
116
|
file_data: FileData dict with 'content' key
|
|
114
|
-
|
|
117
|
+
|
|
115
118
|
Returns:
|
|
116
119
|
Content as string with lines joined by newlines
|
|
117
120
|
"""
|
|
@@ -164,12 +167,12 @@ def format_read_response(
|
|
|
164
167
|
limit: int,
|
|
165
168
|
) -> str:
|
|
166
169
|
"""Format file data for read response with line numbers.
|
|
167
|
-
|
|
170
|
+
|
|
168
171
|
Args:
|
|
169
172
|
file_data: FileData dict
|
|
170
173
|
offset: Line offset (0-indexed)
|
|
171
174
|
limit: Maximum number of lines
|
|
172
|
-
|
|
175
|
+
|
|
173
176
|
Returns:
|
|
174
177
|
Formatted content or error message
|
|
175
178
|
"""
|
|
@@ -177,14 +180,14 @@ def format_read_response(
|
|
|
177
180
|
empty_msg = check_empty_content(content)
|
|
178
181
|
if empty_msg:
|
|
179
182
|
return empty_msg
|
|
180
|
-
|
|
183
|
+
|
|
181
184
|
lines = content.splitlines()
|
|
182
185
|
start_idx = offset
|
|
183
186
|
end_idx = min(start_idx + limit, len(lines))
|
|
184
|
-
|
|
187
|
+
|
|
185
188
|
if start_idx >= len(lines):
|
|
186
189
|
return f"Error: Line offset {offset} exceeds file length ({len(lines)} lines)"
|
|
187
|
-
|
|
190
|
+
|
|
188
191
|
selected_lines = lines[start_idx:end_idx]
|
|
189
192
|
return format_content_with_line_numbers(selected_lines, start_line=start_idx + 1)
|
|
190
193
|
|
|
@@ -196,24 +199,24 @@ def perform_string_replacement(
|
|
|
196
199
|
replace_all: bool,
|
|
197
200
|
) -> tuple[str, int] | str:
|
|
198
201
|
"""Perform string replacement with occurrence validation.
|
|
199
|
-
|
|
202
|
+
|
|
200
203
|
Args:
|
|
201
204
|
content: Original content
|
|
202
205
|
old_string: String to replace
|
|
203
206
|
new_string: Replacement string
|
|
204
207
|
replace_all: Whether to replace all occurrences
|
|
205
|
-
|
|
208
|
+
|
|
206
209
|
Returns:
|
|
207
210
|
Tuple of (new_content, occurrences) on success, or error message string
|
|
208
211
|
"""
|
|
209
212
|
occurrences = content.count(old_string)
|
|
210
|
-
|
|
213
|
+
|
|
211
214
|
if occurrences == 0:
|
|
212
215
|
return f"Error: String not found in file: '{old_string}'"
|
|
213
|
-
|
|
216
|
+
|
|
214
217
|
if occurrences > 1 and not replace_all:
|
|
215
218
|
return f"Error: String '{old_string}' appears {occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context."
|
|
216
|
-
|
|
219
|
+
|
|
217
220
|
new_content = content.replace(old_string, new_string)
|
|
218
221
|
return new_content, occurrences
|
|
219
222
|
|
|
@@ -225,33 +228,33 @@ def truncate_if_too_long(result: list[str] | str) -> list[str] | str:
|
|
|
225
228
|
if total_chars > TOOL_RESULT_TOKEN_LIMIT * 4:
|
|
226
229
|
return result[: len(result) * TOOL_RESULT_TOKEN_LIMIT * 4 // total_chars] + [TRUNCATION_GUIDANCE]
|
|
227
230
|
return result
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
231
|
+
# string
|
|
232
|
+
if len(result) > TOOL_RESULT_TOKEN_LIMIT * 4:
|
|
233
|
+
return result[: TOOL_RESULT_TOKEN_LIMIT * 4] + "\n" + TRUNCATION_GUIDANCE
|
|
234
|
+
return result
|
|
232
235
|
|
|
233
236
|
|
|
234
237
|
def _validate_path(path: str | None) -> str:
|
|
235
238
|
"""Validate and normalize a path.
|
|
236
|
-
|
|
239
|
+
|
|
237
240
|
Args:
|
|
238
241
|
path: Path to validate
|
|
239
|
-
|
|
242
|
+
|
|
240
243
|
Returns:
|
|
241
244
|
Normalized path starting with /
|
|
242
|
-
|
|
245
|
+
|
|
243
246
|
Raises:
|
|
244
247
|
ValueError: If path is invalid
|
|
245
248
|
"""
|
|
246
249
|
path = path or "/"
|
|
247
250
|
if not path or path.strip() == "":
|
|
248
251
|
raise ValueError("Path cannot be empty")
|
|
249
|
-
|
|
252
|
+
|
|
250
253
|
normalized = path if path.startswith("/") else "/" + path
|
|
251
|
-
|
|
254
|
+
|
|
252
255
|
if not normalized.endswith("/"):
|
|
253
256
|
normalized += "/"
|
|
254
|
-
|
|
257
|
+
|
|
255
258
|
return normalized
|
|
256
259
|
|
|
257
260
|
|
|
@@ -261,16 +264,16 @@ def _glob_search_files(
|
|
|
261
264
|
path: str = "/",
|
|
262
265
|
) -> str:
|
|
263
266
|
"""Search files dict for paths matching glob pattern.
|
|
264
|
-
|
|
267
|
+
|
|
265
268
|
Args:
|
|
266
269
|
files: Dictionary of file paths to FileData.
|
|
267
270
|
pattern: Glob pattern (e.g., "*.py", "**/*.ts").
|
|
268
271
|
path: Base path to search from.
|
|
269
|
-
|
|
272
|
+
|
|
270
273
|
Returns:
|
|
271
274
|
Newline-separated file paths, sorted by modification time (most recent first).
|
|
272
275
|
Returns "No files found" if no matches.
|
|
273
|
-
|
|
276
|
+
|
|
274
277
|
Example:
|
|
275
278
|
```python
|
|
276
279
|
files = {"/src/main.py": FileData(...), "/test.py": FileData(...)}
|
|
@@ -313,29 +316,28 @@ def _format_grep_results(
|
|
|
313
316
|
output_mode: Literal["files_with_matches", "content", "count"],
|
|
314
317
|
) -> str:
|
|
315
318
|
"""Format grep search results based on output mode.
|
|
316
|
-
|
|
319
|
+
|
|
317
320
|
Args:
|
|
318
321
|
results: Dictionary mapping file paths to list of (line_num, line_content) tuples
|
|
319
322
|
output_mode: Output format - "files_with_matches", "content", or "count"
|
|
320
|
-
|
|
323
|
+
|
|
321
324
|
Returns:
|
|
322
325
|
Formatted string output
|
|
323
326
|
"""
|
|
324
327
|
if output_mode == "files_with_matches":
|
|
325
328
|
return "\n".join(sorted(results.keys()))
|
|
326
|
-
|
|
329
|
+
if output_mode == "count":
|
|
327
330
|
lines = []
|
|
328
331
|
for file_path in sorted(results.keys()):
|
|
329
332
|
count = len(results[file_path])
|
|
330
333
|
lines.append(f"{file_path}: {count}")
|
|
331
334
|
return "\n".join(lines)
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
return "\n".join(lines)
|
|
335
|
+
lines = []
|
|
336
|
+
for file_path in sorted(results.keys()):
|
|
337
|
+
lines.append(f"{file_path}:")
|
|
338
|
+
for line_num, line in results[file_path]:
|
|
339
|
+
lines.append(f" {line_num}: {line}")
|
|
340
|
+
return "\n".join(lines)
|
|
339
341
|
|
|
340
342
|
|
|
341
343
|
def _grep_search_files(
|
|
@@ -346,17 +348,17 @@ def _grep_search_files(
|
|
|
346
348
|
output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
|
|
347
349
|
) -> str:
|
|
348
350
|
"""Search file contents for regex pattern.
|
|
349
|
-
|
|
351
|
+
|
|
350
352
|
Args:
|
|
351
353
|
files: Dictionary of file paths to FileData.
|
|
352
354
|
pattern: Regex pattern to search for.
|
|
353
355
|
path: Base path to search from.
|
|
354
356
|
glob: Optional glob pattern to filter files (e.g., "*.py").
|
|
355
357
|
output_mode: Output format - "files_with_matches", "content", or "count".
|
|
356
|
-
|
|
358
|
+
|
|
357
359
|
Returns:
|
|
358
360
|
Formatted search results. Returns "No matches found" if no results.
|
|
359
|
-
|
|
361
|
+
|
|
360
362
|
Example:
|
|
361
363
|
```python
|
|
362
364
|
files = {"/file.py": FileData(content=["import os", "print('hi')"], ...)}
|
|
@@ -394,6 +396,7 @@ def _grep_search_files(
|
|
|
394
396
|
|
|
395
397
|
# -------- Structured helpers for composition --------
|
|
396
398
|
|
|
399
|
+
|
|
397
400
|
def grep_matches_from_files(
|
|
398
401
|
files: dict[str, Any],
|
|
399
402
|
pattern: str,
|
|
@@ -419,11 +422,7 @@ def grep_matches_from_files(
|
|
|
419
422
|
filtered = {fp: fd for fp, fd in files.items() if fp.startswith(normalized_path)}
|
|
420
423
|
|
|
421
424
|
if glob:
|
|
422
|
-
filtered = {
|
|
423
|
-
fp: fd
|
|
424
|
-
for fp, fd in filtered.items()
|
|
425
|
-
if wcglob.globmatch(Path(fp).name, glob, flags=wcglob.BRACE)
|
|
426
|
-
}
|
|
425
|
+
filtered = {fp: fd for fp, fd in filtered.items() if wcglob.globmatch(Path(fp).name, glob, flags=wcglob.BRACE)}
|
|
427
426
|
|
|
428
427
|
matches: list[GrepMatch] = []
|
|
429
428
|
for file_path, file_data in filtered.items():
|
|
@@ -433,16 +432,16 @@ def grep_matches_from_files(
|
|
|
433
432
|
return matches
|
|
434
433
|
|
|
435
434
|
|
|
436
|
-
def build_grep_results_dict(matches:
|
|
435
|
+
def build_grep_results_dict(matches: list[GrepMatch]) -> dict[str, list[tuple[int, str]]]:
|
|
437
436
|
"""Group structured matches into the legacy dict form used by formatters."""
|
|
438
|
-
grouped:
|
|
437
|
+
grouped: dict[str, list[tuple[int, str]]] = {}
|
|
439
438
|
for m in matches:
|
|
440
439
|
grouped.setdefault(m["path"], []).append((m["line"], m["text"]))
|
|
441
440
|
return grouped
|
|
442
441
|
|
|
443
442
|
|
|
444
443
|
def format_grep_matches(
|
|
445
|
-
matches:
|
|
444
|
+
matches: list[GrepMatch],
|
|
446
445
|
output_mode: Literal["files_with_matches", "content", "count"],
|
|
447
446
|
) -> str:
|
|
448
447
|
"""Format structured grep matches using existing formatting logic."""
|
|
@@ -1,8 +1,14 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: deepagents
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.4
|
|
4
4
|
Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
|
|
5
5
|
License: MIT
|
|
6
|
+
Project-URL: Homepage, https://docs.langchain.com/oss/python/deepagents/overview
|
|
7
|
+
Project-URL: Documentation, https://reference.langchain.com/python/deepagents/
|
|
8
|
+
Project-URL: Source, https://github.com/langchain-ai/deepagents
|
|
9
|
+
Project-URL: Twitter, https://x.com/LangChainAI
|
|
10
|
+
Project-URL: Slack, https://www.langchain.com/join-community
|
|
11
|
+
Project-URL: Reddit, https://www.reddit.com/r/LangChain/
|
|
6
12
|
Requires-Python: <4.0,>=3.11
|
|
7
13
|
Description-Content-Type: text/markdown
|
|
8
14
|
License-File: LICENSE
|
|
@@ -6,15 +6,14 @@ deepagents/backends/filesystem.py,sha256=U9Tmf8BDTqKbw-gQjJkJOYG1nXQo9FhxyfzN97W
|
|
|
6
6
|
deepagents/backends/protocol.py,sha256=fwqJa_Ec6F4BoNYz0bcPHL_fiKksxw2RoyA6x5wr7dc,4181
|
|
7
7
|
deepagents/backends/state.py,sha256=BxMNm1kDpxtgzIzpuF78h1NuYh9VIpXqnUbbETGe4Y4,6584
|
|
8
8
|
deepagents/backends/store.py,sha256=VsPSj6ayABPjkKiN6CcvOGm7YCWKuWP_ltJWvFJ1nF0,13358
|
|
9
|
-
deepagents/backends/utils.py,sha256=
|
|
9
|
+
deepagents/backends/utils.py,sha256=_0mkTs3fD4eaChlOjOivYtF9i_AuJHdV64jRlGRF5LE,14207
|
|
10
10
|
deepagents/middleware/__init__.py,sha256=x7UHqGcrKlhzORNdChPvnUwa_PIJCbFUHY6zTKVfloI,418
|
|
11
|
-
deepagents/middleware/agent_memory.py,sha256=BRP8Dyuzl1ms4Eja-3nRHI3g2vNWfK8tUW6zBr2JJOc,9196
|
|
12
11
|
deepagents/middleware/filesystem.py,sha256=Zwpt6ILniHbNzfLWXrSGLbd__ZFkkO1xv0mGiRsNB7s,28144
|
|
13
12
|
deepagents/middleware/patch_tool_calls.py,sha256=PdNhxPaQqwnFkhEAZEE2kEzadTNAOO3_iJRA30WqpGE,1981
|
|
14
13
|
deepagents/middleware/resumable_shell.py,sha256=WbtjW81DItZgFiy5k4cSLAu5NTBHdOk2UEnFecuqlJU,3353
|
|
15
14
|
deepagents/middleware/subagents.py,sha256=JxXwZvi41pBKKMguKlyVqwjCoydnZboWEgJGkWOCIY8,23503
|
|
16
|
-
deepagents-0.2.
|
|
17
|
-
deepagents-0.2.
|
|
18
|
-
deepagents-0.2.
|
|
19
|
-
deepagents-0.2.
|
|
20
|
-
deepagents-0.2.
|
|
15
|
+
deepagents-0.2.4.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
|
|
16
|
+
deepagents-0.2.4.dist-info/METADATA,sha256=byw286YaV5O5vsd-USesxgs7coiW63tg5GE1wqe-eI0,19050
|
|
17
|
+
deepagents-0.2.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
18
|
+
deepagents-0.2.4.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
|
|
19
|
+
deepagents-0.2.4.dist-info/RECORD,,
|
|
@@ -1,222 +0,0 @@
|
|
|
1
|
-
"""Middleware for loading agent-specific long-term memory into the system prompt."""
|
|
2
|
-
|
|
3
|
-
from collections.abc import Awaitable, Callable
|
|
4
|
-
from typing import TYPE_CHECKING, Any
|
|
5
|
-
|
|
6
|
-
if TYPE_CHECKING:
|
|
7
|
-
from langgraph.runtime import Runtime
|
|
8
|
-
|
|
9
|
-
from langchain.agents.middleware.types import (
|
|
10
|
-
AgentMiddleware,
|
|
11
|
-
AgentState,
|
|
12
|
-
ModelRequest,
|
|
13
|
-
ModelResponse,
|
|
14
|
-
)
|
|
15
|
-
from typing_extensions import NotRequired, TypedDict
|
|
16
|
-
|
|
17
|
-
from deepagents.backends.protocol import BackendProtocol
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class AgentMemoryState(AgentState):
|
|
21
|
-
"""State for the agent memory middleware."""
|
|
22
|
-
|
|
23
|
-
agent_memory: NotRequired[str | None]
|
|
24
|
-
"""Long-term memory content for the agent."""
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
AGENT_MEMORY_FILE_PATH = "/agent.md"
|
|
28
|
-
|
|
29
|
-
# Long-term Memory Documentation
|
|
30
|
-
LONGTERM_MEMORY_SYSTEM_PROMPT = """
|
|
31
|
-
|
|
32
|
-
## Long-term Memory
|
|
33
|
-
|
|
34
|
-
You have access to a long-term memory system using the {memory_path} path prefix.
|
|
35
|
-
Files stored in {memory_path} persist across sessions and conversations.
|
|
36
|
-
|
|
37
|
-
Your system prompt is loaded from {memory_path}agent.md at startup. You can update your own instructions by editing this file.
|
|
38
|
-
|
|
39
|
-
**When to CHECK/READ memories (CRITICAL - do this FIRST):**
|
|
40
|
-
- **At the start of ANY new session**: Run `ls {memory_path}` to see what you know
|
|
41
|
-
- **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check `ls {memory_path}` for relevant files FIRST
|
|
42
|
-
- **When user asks you to do something**: Check if you have guides, examples, or patterns in {memory_path} before proceeding
|
|
43
|
-
- **When user references past work or conversations**: Search {memory_path} for related content
|
|
44
|
-
- **If you're unsure**: Check your memories rather than guessing or using only general knowledge
|
|
45
|
-
|
|
46
|
-
**Memory-first response pattern:**
|
|
47
|
-
1. User asks a question → Run `ls {memory_path}` to check for relevant files
|
|
48
|
-
2. If relevant files exist → Read them with `read_file {memory_path}[filename]`
|
|
49
|
-
3. Base your answer on saved knowledge (from memories) supplemented by general knowledge
|
|
50
|
-
4. If no relevant memories exist → Use general knowledge, then consider if this is worth saving
|
|
51
|
-
|
|
52
|
-
**When to update memories:**
|
|
53
|
-
- **IMMEDIATELY when the user describes your role or how you should behave** (e.g., "you are a web researcher", "you are an expert in X")
|
|
54
|
-
- **IMMEDIATELY when the user gives feedback on your work** - Before continuing, update memories to capture what was wrong and how to do it better
|
|
55
|
-
- When the user explicitly asks you to remember something
|
|
56
|
-
- When patterns or preferences emerge (coding styles, conventions, workflows)
|
|
57
|
-
- After significant work where context would help in future sessions
|
|
58
|
-
|
|
59
|
-
**Learning from feedback:**
|
|
60
|
-
- When user says something is better/worse, capture WHY and encode it as a pattern
|
|
61
|
-
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
|
|
62
|
-
- When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
|
|
63
|
-
- Look for the underlying principle behind corrections, not just the specific mistake
|
|
64
|
-
- If it's something you "should have remembered", identify where that instruction should live permanently
|
|
65
|
-
|
|
66
|
-
**What to store where:**
|
|
67
|
-
- **{memory_path}agent.md**: Update this to modify your core instructions and behavioral patterns
|
|
68
|
-
- **Other {memory_path} files**: Use for project-specific context, reference information, or structured notes
|
|
69
|
-
- If you create additional memory files, add references to them in {memory_path}agent.md so you remember to consult them
|
|
70
|
-
|
|
71
|
-
The portion of your system prompt that comes from {memory_path}agent.md is marked with `<agent_memory>` tags so you can identify what instructions come from your persistent memory.
|
|
72
|
-
|
|
73
|
-
Example: `ls {memory_path}` to see what memories you have
|
|
74
|
-
Example: `read_file '{memory_path}deep-agents-guide.md'` to recall saved knowledge
|
|
75
|
-
Example: `edit_file('{memory_path}agent.md', ...)` to update your instructions
|
|
76
|
-
Example: `write_file('{memory_path}project_context.md', ...)` for project-specific notes, then reference it in agent.md
|
|
77
|
-
|
|
78
|
-
Remember: To interact with the longterm filesystem, you must prefix the filename with the {memory_path} path."""
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
DEFAULT_MEMORY_SNIPPET = """<agent_memory>
|
|
82
|
-
{agent_memory}
|
|
83
|
-
</agent_memory>
|
|
84
|
-
"""
|
|
85
|
-
|
|
86
|
-
class AgentMemoryMiddleware(AgentMiddleware):
|
|
87
|
-
"""Middleware for loading agent-specific long-term memory.
|
|
88
|
-
|
|
89
|
-
This middleware loads the agent's long-term memory from a file (agent.md)
|
|
90
|
-
and injects it into the system prompt. The memory is loaded once at the
|
|
91
|
-
start of the conversation and stored in state.
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
backend: Backend to use for loading the agent memory file.
|
|
95
|
-
system_prompt_template: Optional custom template for how to inject
|
|
96
|
-
the agent memory into the system prompt. Use {agent_memory} as
|
|
97
|
-
a placeholder. Defaults to a simple section header.
|
|
98
|
-
|
|
99
|
-
Example:
|
|
100
|
-
```python
|
|
101
|
-
from deepagents.middleware.agent_memory import AgentMemoryMiddleware
|
|
102
|
-
from deepagents.memory.backends import FilesystemBackend
|
|
103
|
-
from pathlib import Path
|
|
104
|
-
|
|
105
|
-
# Set up backend pointing to agent's directory
|
|
106
|
-
agent_dir = Path.home() / ".deepagents" / "my-agent"
|
|
107
|
-
backend = FilesystemBackend(root_dir=agent_dir)
|
|
108
|
-
|
|
109
|
-
# Create middleware
|
|
110
|
-
middleware = AgentMemoryMiddleware(backend=backend)
|
|
111
|
-
```
|
|
112
|
-
"""
|
|
113
|
-
|
|
114
|
-
state_schema = AgentMemoryState
|
|
115
|
-
|
|
116
|
-
def __init__(
|
|
117
|
-
self,
|
|
118
|
-
*,
|
|
119
|
-
backend: BackendProtocol,
|
|
120
|
-
memory_path: str,
|
|
121
|
-
system_prompt_template: str | None = None,
|
|
122
|
-
) -> None:
|
|
123
|
-
"""Initialize the agent memory middleware.
|
|
124
|
-
|
|
125
|
-
Args:
|
|
126
|
-
backend: Backend to use for loading the agent memory file.
|
|
127
|
-
system_prompt_template: Optional custom template for injecting
|
|
128
|
-
agent memory into system prompt.
|
|
129
|
-
"""
|
|
130
|
-
self.backend = backend
|
|
131
|
-
self.memory_path = memory_path
|
|
132
|
-
self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
|
|
133
|
-
|
|
134
|
-
def before_agent(
|
|
135
|
-
self,
|
|
136
|
-
state: AgentMemoryState,
|
|
137
|
-
runtime,
|
|
138
|
-
) -> AgentMemoryState:
|
|
139
|
-
"""Load agent memory from file before agent execution.
|
|
140
|
-
|
|
141
|
-
Args:
|
|
142
|
-
state: Current agent state.
|
|
143
|
-
handler: Handler function to call after loading memory.
|
|
144
|
-
|
|
145
|
-
Returns:
|
|
146
|
-
Updated state with agent_memory populated.
|
|
147
|
-
"""
|
|
148
|
-
# Only load memory if it hasn't been loaded yet
|
|
149
|
-
if "agent_memory" not in state or state.get("agent_memory") is None:
|
|
150
|
-
file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
|
|
151
|
-
return {"agent_memory": file_data}
|
|
152
|
-
|
|
153
|
-
async def abefore_agent(
|
|
154
|
-
self,
|
|
155
|
-
state: AgentMemoryState,
|
|
156
|
-
runtime,
|
|
157
|
-
) -> AgentMemoryState:
|
|
158
|
-
"""(async) Load agent memory from file before agent execution.
|
|
159
|
-
|
|
160
|
-
Args:
|
|
161
|
-
state: Current agent state.
|
|
162
|
-
handler: Handler function to call after loading memory.
|
|
163
|
-
|
|
164
|
-
Returns:
|
|
165
|
-
Updated state with agent_memory populated.
|
|
166
|
-
"""
|
|
167
|
-
# Only load memory if it hasn't been loaded yet
|
|
168
|
-
if "agent_memory" not in state or state.get("agent_memory") is None:
|
|
169
|
-
file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
|
|
170
|
-
return {"agent_memory": file_data}
|
|
171
|
-
|
|
172
|
-
def wrap_model_call(
|
|
173
|
-
self,
|
|
174
|
-
request: ModelRequest,
|
|
175
|
-
handler: Callable[[ModelRequest], ModelResponse],
|
|
176
|
-
) -> ModelResponse:
|
|
177
|
-
"""Inject agent memory into the system prompt.
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
request: The model request being processed.
|
|
181
|
-
handler: The handler function to call with the modified request.
|
|
182
|
-
|
|
183
|
-
Returns:
|
|
184
|
-
The model response from the handler.
|
|
185
|
-
"""
|
|
186
|
-
# Get agent memory from state
|
|
187
|
-
agent_memory = request.state.get("agent_memory", "")
|
|
188
|
-
|
|
189
|
-
memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
|
|
190
|
-
if request.system_prompt:
|
|
191
|
-
request.system_prompt = memory_section + "\n\n" + request.system_prompt
|
|
192
|
-
else:
|
|
193
|
-
request.system_prompt = memory_section
|
|
194
|
-
request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
|
|
195
|
-
|
|
196
|
-
return handler(request)
|
|
197
|
-
|
|
198
|
-
async def awrap_model_call(
|
|
199
|
-
self,
|
|
200
|
-
request: ModelRequest,
|
|
201
|
-
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
|
202
|
-
) -> ModelResponse:
|
|
203
|
-
"""(async) Inject agent memory into the system prompt.
|
|
204
|
-
|
|
205
|
-
Args:
|
|
206
|
-
request: The model request being processed.
|
|
207
|
-
handler: The handler function to call with the modified request.
|
|
208
|
-
|
|
209
|
-
Returns:
|
|
210
|
-
The model response from the handler.
|
|
211
|
-
"""
|
|
212
|
-
# Get agent memory from state
|
|
213
|
-
agent_memory = request.state.get("agent_memory", "")
|
|
214
|
-
|
|
215
|
-
memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
|
|
216
|
-
if request.system_prompt:
|
|
217
|
-
request.system_prompt = memory_section + "\n\n" + request.system_prompt
|
|
218
|
-
else:
|
|
219
|
-
request.system_prompt = memory_section
|
|
220
|
-
request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
|
|
221
|
-
|
|
222
|
-
return await handler(request)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|