deepagents 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepagents/__init__.py +9 -1
- deepagents/backends/composite.py +136 -46
- deepagents/backends/state.py +50 -1
- deepagents/graph.py +68 -29
- deepagents/middleware/__init__.py +4 -0
- deepagents/middleware/memory.py +369 -0
- deepagents/middleware/skills.py +695 -0
- {deepagents-0.3.1.dist-info → deepagents-0.3.2.dist-info}/METADATA +2 -2
- {deepagents-0.3.1.dist-info → deepagents-0.3.2.dist-info}/RECORD +11 -9
- {deepagents-0.3.1.dist-info → deepagents-0.3.2.dist-info}/WHEEL +0 -0
- {deepagents-0.3.1.dist-info → deepagents-0.3.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
"""Middleware for loading agent memory/context from AGENTS.md files.
|
|
2
|
+
|
|
3
|
+
This module implements support for the AGENTS.md specification (https://agents.md/),
|
|
4
|
+
loading memory/context from configurable sources and injecting into the system prompt.
|
|
5
|
+
|
|
6
|
+
## Overview
|
|
7
|
+
|
|
8
|
+
AGENTS.md files provide project-specific context and instructions to help AI agents
|
|
9
|
+
work effectively. Unlike skills (which are on-demand workflows), memory is always
|
|
10
|
+
loaded and provides persistent context.
|
|
11
|
+
|
|
12
|
+
## Usage
|
|
13
|
+
|
|
14
|
+
```python
|
|
15
|
+
from deepagents import MemoryMiddleware
|
|
16
|
+
from deepagents.backends.filesystem import FilesystemBackend
|
|
17
|
+
|
|
18
|
+
# Security: FilesystemBackend allows reading/writing from the entire filesystem.
|
|
19
|
+
# Either ensure the agent is running within a sandbox OR add human-in-the-loop (HIL)
|
|
20
|
+
# approval to file operations.
|
|
21
|
+
backend = FilesystemBackend(root_dir="/")
|
|
22
|
+
|
|
23
|
+
middleware = MemoryMiddleware(
|
|
24
|
+
backend=backend,
|
|
25
|
+
sources=[
|
|
26
|
+
"~/.deepagents/AGENTS.md",
|
|
27
|
+
"./.deepagents/AGENTS.md",
|
|
28
|
+
],
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
agent = create_deep_agent(middleware=[middleware])
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Memory Sources
|
|
35
|
+
|
|
36
|
+
Sources are simply paths to AGENTS.md files that are loaded in order and combined.
|
|
37
|
+
Multiple sources are concatenated in order, with all content included.
|
|
38
|
+
Later sources appear after earlier ones in the combined prompt.
|
|
39
|
+
|
|
40
|
+
## File Format
|
|
41
|
+
|
|
42
|
+
AGENTS.md files are standard Markdown with no required structure.
|
|
43
|
+
Common sections include:
|
|
44
|
+
- Project overview
|
|
45
|
+
- Build/test commands
|
|
46
|
+
- Code style guidelines
|
|
47
|
+
- Architecture notes
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
from __future__ import annotations
|
|
51
|
+
|
|
52
|
+
import logging
|
|
53
|
+
from collections.abc import Awaitable, Callable
|
|
54
|
+
from typing import TYPE_CHECKING, Annotated, NotRequired, TypedDict
|
|
55
|
+
|
|
56
|
+
from langchain.messages import SystemMessage
|
|
57
|
+
from langchain_core.runnables import RunnableConfig
|
|
58
|
+
|
|
59
|
+
if TYPE_CHECKING:
|
|
60
|
+
from deepagents.backends.protocol import BACKEND_TYPES, BackendProtocol
|
|
61
|
+
|
|
62
|
+
from langchain.agents.middleware.types import (
|
|
63
|
+
AgentMiddleware,
|
|
64
|
+
AgentState,
|
|
65
|
+
ModelRequest,
|
|
66
|
+
ModelResponse,
|
|
67
|
+
PrivateStateAttr,
|
|
68
|
+
)
|
|
69
|
+
from langchain.tools import ToolRuntime
|
|
70
|
+
from langgraph.runtime import Runtime
|
|
71
|
+
|
|
72
|
+
logger = logging.getLogger(__name__)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class MemoryState(AgentState):
|
|
76
|
+
"""State schema for MemoryMiddleware.
|
|
77
|
+
|
|
78
|
+
Attributes:
|
|
79
|
+
memory_contents: Dict mapping source paths to their loaded content.
|
|
80
|
+
Marked as private so it's not included in the final agent state.
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
memory_contents: NotRequired[Annotated[dict[str, str], PrivateStateAttr]]
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class MemoryStateUpdate(TypedDict):
|
|
87
|
+
"""State update for MemoryMiddleware."""
|
|
88
|
+
|
|
89
|
+
memory_contents: dict[str, str]
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# Default system prompt template for memory
|
|
93
|
+
MEMORY_SYSTEM_PROMPT = """
|
|
94
|
+
## Agent Memory
|
|
95
|
+
|
|
96
|
+
You have access to persistent memory that provides context and instructions.
|
|
97
|
+
|
|
98
|
+
{memory_locations}
|
|
99
|
+
|
|
100
|
+
{memory_contents}
|
|
101
|
+
|
|
102
|
+
**Memory Guidelines:**
|
|
103
|
+
- Memory content above provides project-specific context and instructions
|
|
104
|
+
- Follow any guidelines, conventions, or patterns described in memory
|
|
105
|
+
- Memory is read-only during this session (loaded at startup)
|
|
106
|
+
- If you need to update memory, use the appropriate file editing tools
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class MemoryMiddleware(AgentMiddleware):
|
|
111
|
+
"""Middleware for loading agent memory from AGENTS.md files.
|
|
112
|
+
|
|
113
|
+
Loads memory content from configured sources and injects into the system prompt.
|
|
114
|
+
Supports multiple sources that are combined together.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
backend: Backend instance or factory function for file operations.
|
|
118
|
+
sources: List of MemorySource configurations specifying paths and names.
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
state_schema = MemoryState
|
|
122
|
+
|
|
123
|
+
def __init__(
|
|
124
|
+
self,
|
|
125
|
+
*,
|
|
126
|
+
backend: BACKEND_TYPES,
|
|
127
|
+
sources: list[str],
|
|
128
|
+
) -> None:
|
|
129
|
+
"""Initialize the memory middleware.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
backend: Backend instance or factory function that takes runtime
|
|
133
|
+
and returns a backend. Use a factory for StateBackend.
|
|
134
|
+
sources: List of memory file paths to load (e.g., ["~/.deepagents/AGENTS.md",
|
|
135
|
+
"./.deepagents/AGENTS.md"]). Display names are automatically derived
|
|
136
|
+
from the paths. Sources are loaded in order.
|
|
137
|
+
"""
|
|
138
|
+
self._backend = backend
|
|
139
|
+
self.sources = sources
|
|
140
|
+
self.system_prompt_template = MEMORY_SYSTEM_PROMPT
|
|
141
|
+
|
|
142
|
+
def _get_backend(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> BackendProtocol:
|
|
143
|
+
"""Resolve backend from instance or factory.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
state: Current agent state.
|
|
147
|
+
runtime: Runtime context for factory functions.
|
|
148
|
+
config: Runnable config to pass to backend factory.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Resolved backend instance.
|
|
152
|
+
"""
|
|
153
|
+
if callable(self._backend):
|
|
154
|
+
# Construct an artificial tool runtime to resolve backend factory
|
|
155
|
+
tool_runtime = ToolRuntime(
|
|
156
|
+
state=state,
|
|
157
|
+
context=runtime.context,
|
|
158
|
+
stream_writer=runtime.stream_writer,
|
|
159
|
+
store=runtime.store,
|
|
160
|
+
config=config,
|
|
161
|
+
tool_call_id=None,
|
|
162
|
+
)
|
|
163
|
+
return self._backend(tool_runtime)
|
|
164
|
+
return self._backend
|
|
165
|
+
|
|
166
|
+
def _format_memory_locations(self) -> str:
|
|
167
|
+
"""Format memory source locations for display."""
|
|
168
|
+
if not self.sources:
|
|
169
|
+
return "**Memory Sources:** None configured"
|
|
170
|
+
|
|
171
|
+
lines = ["**Memory Sources:**"]
|
|
172
|
+
for path in self.sources:
|
|
173
|
+
lines.append(f"- `{path}`")
|
|
174
|
+
return "\n".join(lines)
|
|
175
|
+
|
|
176
|
+
def _format_memory_contents(self, contents: dict[str, str]) -> str:
|
|
177
|
+
"""Format loaded memory contents for injection into prompt.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
contents: Dict mapping source paths to content.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Formatted string with all memory contents concatenated.
|
|
184
|
+
"""
|
|
185
|
+
if not contents:
|
|
186
|
+
return "(No memory loaded)"
|
|
187
|
+
|
|
188
|
+
sections = []
|
|
189
|
+
for path in self.sources:
|
|
190
|
+
if contents.get(path):
|
|
191
|
+
sections.append(contents[path])
|
|
192
|
+
|
|
193
|
+
if not sections:
|
|
194
|
+
return "(No memory loaded)"
|
|
195
|
+
|
|
196
|
+
return "\n\n".join(sections)
|
|
197
|
+
|
|
198
|
+
async def _load_memory_from_backend(
|
|
199
|
+
self,
|
|
200
|
+
backend: BackendProtocol,
|
|
201
|
+
path: str,
|
|
202
|
+
) -> str | None:
|
|
203
|
+
"""Load memory content from a backend path.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
backend: Backend to load from.
|
|
207
|
+
path: Path to the AGENTS.md file.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
File content if found, None otherwise.
|
|
211
|
+
"""
|
|
212
|
+
results = await backend.adownload_files([path])
|
|
213
|
+
# Should get exactly one response for one path
|
|
214
|
+
if len(results) != 1:
|
|
215
|
+
raise AssertionError(f"Expected 1 response for path {path}, got {len(results)}")
|
|
216
|
+
response = results[0]
|
|
217
|
+
|
|
218
|
+
if response.error is not None:
|
|
219
|
+
raise ValueError(f"Failed to download {path}: {response.error}")
|
|
220
|
+
|
|
221
|
+
if response.content is not None:
|
|
222
|
+
return response.content.decode("utf-8")
|
|
223
|
+
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
def _load_memory_from_backend_sync(
|
|
227
|
+
self,
|
|
228
|
+
backend: BackendProtocol,
|
|
229
|
+
path: str,
|
|
230
|
+
) -> str | None:
|
|
231
|
+
"""Load memory content from a backend path synchronously.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
backend: Backend to load from.
|
|
235
|
+
path: Path to the AGENTS.md file.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
File content if found, None otherwise.
|
|
239
|
+
"""
|
|
240
|
+
results = backend.download_files([path])
|
|
241
|
+
# Should get exactly one response for one path
|
|
242
|
+
if len(results) != 1:
|
|
243
|
+
raise AssertionError(f"Expected 1 response for path {path}, got {len(results)}")
|
|
244
|
+
response = results[0]
|
|
245
|
+
|
|
246
|
+
if response.error is not None:
|
|
247
|
+
raise ValueError(f"Failed to download {path}: {response.error}")
|
|
248
|
+
|
|
249
|
+
if response.content is not None:
|
|
250
|
+
return response.content.decode("utf-8")
|
|
251
|
+
|
|
252
|
+
return None
|
|
253
|
+
|
|
254
|
+
def before_agent(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> MemoryStateUpdate | None:
|
|
255
|
+
"""Load memory content before agent execution (synchronous).
|
|
256
|
+
|
|
257
|
+
Loads memory from all configured sources and stores in state.
|
|
258
|
+
Only loads if not already present in state.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
state: Current agent state.
|
|
262
|
+
runtime: Runtime context.
|
|
263
|
+
config: Runnable config.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
State update with memory_contents populated.
|
|
267
|
+
"""
|
|
268
|
+
# Skip if already loaded
|
|
269
|
+
if "memory_contents" in state:
|
|
270
|
+
return None
|
|
271
|
+
|
|
272
|
+
backend = self._get_backend(state, runtime, config)
|
|
273
|
+
contents: dict[str, str] = {}
|
|
274
|
+
|
|
275
|
+
for path in self.sources:
|
|
276
|
+
content = self._load_memory_from_backend_sync(backend, path)
|
|
277
|
+
if content:
|
|
278
|
+
contents[path] = content
|
|
279
|
+
logger.debug(f"Loaded memory from: {path}")
|
|
280
|
+
|
|
281
|
+
return MemoryStateUpdate(memory_contents=contents)
|
|
282
|
+
|
|
283
|
+
async def abefore_agent(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> MemoryStateUpdate | None:
|
|
284
|
+
"""Load memory content before agent execution.
|
|
285
|
+
|
|
286
|
+
Loads memory from all configured sources and stores in state.
|
|
287
|
+
Only loads if not already present in state.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
state: Current agent state.
|
|
291
|
+
runtime: Runtime context.
|
|
292
|
+
config: Runnable config.
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
State update with memory_contents populated.
|
|
296
|
+
"""
|
|
297
|
+
# Skip if already loaded
|
|
298
|
+
if "memory_contents" in state:
|
|
299
|
+
return None
|
|
300
|
+
|
|
301
|
+
backend = self._get_backend(state, runtime, config)
|
|
302
|
+
contents: dict[str, str] = {}
|
|
303
|
+
|
|
304
|
+
for path in self.sources:
|
|
305
|
+
content = await self._load_memory_from_backend(backend, path)
|
|
306
|
+
if content:
|
|
307
|
+
contents[path] = content
|
|
308
|
+
logger.debug(f"Loaded memory from: {path}")
|
|
309
|
+
|
|
310
|
+
return MemoryStateUpdate(memory_contents=contents)
|
|
311
|
+
|
|
312
|
+
def modify_request(self, request: ModelRequest) -> ModelRequest:
|
|
313
|
+
"""Inject memory content into the system prompt.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
request: Model request to modify.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
Modified request with memory injected into system prompt.
|
|
320
|
+
"""
|
|
321
|
+
contents = request.state.get("memory_contents", {})
|
|
322
|
+
memory_locations = self._format_memory_locations()
|
|
323
|
+
memory_contents = self._format_memory_contents(contents)
|
|
324
|
+
|
|
325
|
+
memory_section = self.system_prompt_template.format(
|
|
326
|
+
memory_locations=memory_locations,
|
|
327
|
+
memory_contents=memory_contents,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if request.system_prompt:
|
|
331
|
+
system_prompt = memory_section + "\n\n" + request.system_prompt
|
|
332
|
+
else:
|
|
333
|
+
system_prompt = memory_section
|
|
334
|
+
|
|
335
|
+
return request.override(system_message=SystemMessage(system_prompt))
|
|
336
|
+
|
|
337
|
+
def wrap_model_call(
|
|
338
|
+
self,
|
|
339
|
+
request: ModelRequest,
|
|
340
|
+
handler: Callable[[ModelRequest], ModelResponse],
|
|
341
|
+
) -> ModelResponse:
|
|
342
|
+
"""Wrap model call to inject memory into system prompt.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
request: Model request being processed.
|
|
346
|
+
handler: Handler function to call with modified request.
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
Model response from handler.
|
|
350
|
+
"""
|
|
351
|
+
modified_request = self.modify_request(request)
|
|
352
|
+
return handler(modified_request)
|
|
353
|
+
|
|
354
|
+
async def awrap_model_call(
|
|
355
|
+
self,
|
|
356
|
+
request: ModelRequest,
|
|
357
|
+
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
|
358
|
+
) -> ModelResponse:
|
|
359
|
+
"""Async wrap model call to inject memory into system prompt.
|
|
360
|
+
|
|
361
|
+
Args:
|
|
362
|
+
request: Model request being processed.
|
|
363
|
+
handler: Async handler function to call with modified request.
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
Model response from handler.
|
|
367
|
+
"""
|
|
368
|
+
modified_request = self.modify_request(request)
|
|
369
|
+
return await handler(modified_request)
|