deepagents 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepagents/backends/__init__.py +1 -1
- deepagents/backends/composite.py +32 -42
- deepagents/backends/filesystem.py +92 -86
- deepagents/backends/protocol.py +39 -13
- deepagents/backends/state.py +59 -58
- deepagents/backends/store.py +74 -67
- deepagents/backends/utils.py +7 -21
- deepagents/graph.py +1 -1
- deepagents/middleware/filesystem.py +49 -47
- deepagents/middleware/resumable_shell.py +5 -4
- deepagents/middleware/subagents.py +1 -2
- {deepagents-0.2.4.dist-info → deepagents-0.2.5.dist-info}/METADATA +1 -7
- deepagents-0.2.5.dist-info/RECORD +38 -0
- deepagents-0.2.5.dist-info/top_level.txt +2 -0
- deepagents-cli/README.md +3 -0
- deepagents-cli/deepagents_cli/README.md +196 -0
- deepagents-cli/deepagents_cli/__init__.py +5 -0
- deepagents-cli/deepagents_cli/__main__.py +6 -0
- deepagents-cli/deepagents_cli/agent.py +278 -0
- deepagents-cli/deepagents_cli/agent_memory.py +226 -0
- deepagents-cli/deepagents_cli/commands.py +89 -0
- deepagents-cli/deepagents_cli/config.py +118 -0
- deepagents-cli/deepagents_cli/default_agent_prompt.md +110 -0
- deepagents-cli/deepagents_cli/execution.py +636 -0
- deepagents-cli/deepagents_cli/file_ops.py +347 -0
- deepagents-cli/deepagents_cli/input.py +270 -0
- deepagents-cli/deepagents_cli/main.py +226 -0
- deepagents-cli/deepagents_cli/py.typed +0 -0
- deepagents-cli/deepagents_cli/token_utils.py +63 -0
- deepagents-cli/deepagents_cli/tools.py +140 -0
- deepagents-cli/deepagents_cli/ui.py +489 -0
- deepagents-cli/tests/test_file_ops.py +119 -0
- deepagents-cli/tests/test_placeholder.py +5 -0
- deepagents-0.2.4.dist-info/RECORD +0 -19
- deepagents-0.2.4.dist-info/top_level.txt +0 -1
- {deepagents-0.2.4.dist-info → deepagents-0.2.5.dist-info}/WHEEL +0 -0
- {deepagents-0.2.4.dist-info → deepagents-0.2.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""Agent management and creation for the CLI."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from deepagents import create_deep_agent
|
|
8
|
+
from deepagents.backends import CompositeBackend
|
|
9
|
+
from deepagents.backends.filesystem import FilesystemBackend
|
|
10
|
+
from deepagents.middleware.resumable_shell import ResumableShellToolMiddleware
|
|
11
|
+
from langchain.agents.middleware import HostExecutionPolicy
|
|
12
|
+
from langgraph.checkpoint.memory import InMemorySaver
|
|
13
|
+
|
|
14
|
+
from .agent_memory import AgentMemoryMiddleware
|
|
15
|
+
from .config import COLORS, config, console, get_default_coding_instructions
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def list_agents():
|
|
19
|
+
"""List all available agents."""
|
|
20
|
+
agents_dir = Path.home() / ".deepagents"
|
|
21
|
+
|
|
22
|
+
if not agents_dir.exists() or not any(agents_dir.iterdir()):
|
|
23
|
+
console.print("[yellow]No agents found.[/yellow]")
|
|
24
|
+
console.print(
|
|
25
|
+
"[dim]Agents will be created in ~/.deepagents/ when you first use them.[/dim]",
|
|
26
|
+
style=COLORS["dim"],
|
|
27
|
+
)
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
console.print("\n[bold]Available Agents:[/bold]\n", style=COLORS["primary"])
|
|
31
|
+
|
|
32
|
+
for agent_path in sorted(agents_dir.iterdir()):
|
|
33
|
+
if agent_path.is_dir():
|
|
34
|
+
agent_name = agent_path.name
|
|
35
|
+
agent_md = agent_path / "agent.md"
|
|
36
|
+
|
|
37
|
+
if agent_md.exists():
|
|
38
|
+
console.print(f" • [bold]{agent_name}[/bold]", style=COLORS["primary"])
|
|
39
|
+
console.print(f" {agent_path}", style=COLORS["dim"])
|
|
40
|
+
else:
|
|
41
|
+
console.print(
|
|
42
|
+
f" • [bold]{agent_name}[/bold] [dim](incomplete)[/dim]", style=COLORS["tool"]
|
|
43
|
+
)
|
|
44
|
+
console.print(f" {agent_path}", style=COLORS["dim"])
|
|
45
|
+
|
|
46
|
+
console.print()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def reset_agent(agent_name: str, source_agent: str = None):
|
|
50
|
+
"""Reset an agent to default or copy from another agent."""
|
|
51
|
+
agents_dir = Path.home() / ".deepagents"
|
|
52
|
+
agent_dir = agents_dir / agent_name
|
|
53
|
+
|
|
54
|
+
if source_agent:
|
|
55
|
+
source_dir = agents_dir / source_agent
|
|
56
|
+
source_md = source_dir / "agent.md"
|
|
57
|
+
|
|
58
|
+
if not source_md.exists():
|
|
59
|
+
console.print(
|
|
60
|
+
f"[bold red]Error:[/bold red] Source agent '{source_agent}' not found or has no agent.md"
|
|
61
|
+
)
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
source_content = source_md.read_text()
|
|
65
|
+
action_desc = f"contents of agent '{source_agent}'"
|
|
66
|
+
else:
|
|
67
|
+
source_content = get_default_coding_instructions()
|
|
68
|
+
action_desc = "default"
|
|
69
|
+
|
|
70
|
+
if agent_dir.exists():
|
|
71
|
+
shutil.rmtree(agent_dir)
|
|
72
|
+
console.print(f"Removed existing agent directory: {agent_dir}", style=COLORS["tool"])
|
|
73
|
+
|
|
74
|
+
agent_dir.mkdir(parents=True, exist_ok=True)
|
|
75
|
+
agent_md = agent_dir / "agent.md"
|
|
76
|
+
agent_md.write_text(source_content)
|
|
77
|
+
|
|
78
|
+
console.print(f"✓ Agent '{agent_name}' reset to {action_desc}", style=COLORS["primary"])
|
|
79
|
+
console.print(f"Location: {agent_dir}\n", style=COLORS["dim"])
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def get_system_prompt() -> str:
|
|
83
|
+
"""Get the base system prompt for the agent.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
The system prompt string (without agent.md content)
|
|
87
|
+
"""
|
|
88
|
+
return f"""### Current Working Directory
|
|
89
|
+
|
|
90
|
+
The filesystem backend is currently operating in: `{Path.cwd()}`
|
|
91
|
+
|
|
92
|
+
### Memory System Reminder
|
|
93
|
+
|
|
94
|
+
Your long-term memory is stored in /memories/ and persists across sessions.
|
|
95
|
+
|
|
96
|
+
**IMPORTANT - Check memories before answering:**
|
|
97
|
+
- When asked "what do you know about X?" → Run `ls /memories/` FIRST, then read relevant files
|
|
98
|
+
- When starting a task → Check if you have guides or examples in /memories/
|
|
99
|
+
- At the beginning of new sessions → Consider checking `ls /memories/` to see what context you have
|
|
100
|
+
|
|
101
|
+
Base your answers on saved knowledge (from /memories/) when available, supplemented by general knowledge.
|
|
102
|
+
|
|
103
|
+
### Human-in-the-Loop Tool Approval
|
|
104
|
+
|
|
105
|
+
Some tool calls require user approval before execution. When a tool call is rejected by the user:
|
|
106
|
+
1. Accept their decision immediately - do NOT retry the same command
|
|
107
|
+
2. Explain that you understand they rejected the action
|
|
108
|
+
3. Suggest an alternative approach or ask for clarification
|
|
109
|
+
4. Never attempt the exact same rejected command again
|
|
110
|
+
|
|
111
|
+
Respect the user's decisions and work with them collaboratively.
|
|
112
|
+
|
|
113
|
+
### Web Search Tool Usage
|
|
114
|
+
|
|
115
|
+
When you use the web_search tool:
|
|
116
|
+
1. The tool will return search results with titles, URLs, and content excerpts
|
|
117
|
+
2. You MUST read and process these results, then respond naturally to the user
|
|
118
|
+
3. NEVER show raw JSON or tool results directly to the user
|
|
119
|
+
4. Synthesize the information from multiple sources into a coherent answer
|
|
120
|
+
5. Cite your sources by mentioning page titles or URLs when relevant
|
|
121
|
+
6. If the search doesn't find what you need, explain what you found and ask clarifying questions
|
|
122
|
+
|
|
123
|
+
The user only sees your text responses - not tool results. Always provide a complete, natural language answer after using web_search.
|
|
124
|
+
|
|
125
|
+
### Todo List Management
|
|
126
|
+
|
|
127
|
+
When using the write_todos tool:
|
|
128
|
+
1. Keep the todo list MINIMAL - aim for 3-6 items maximum
|
|
129
|
+
2. Only create todos for complex, multi-step tasks that truly need tracking
|
|
130
|
+
3. Break down work into clear, actionable items without over-fragmenting
|
|
131
|
+
4. For simple tasks (1-2 steps), just do them directly without creating todos
|
|
132
|
+
5. When first creating a todo list for a task, ALWAYS ask the user if the plan looks good before starting work
|
|
133
|
+
- Create the todos, let them render, then ask: "Does this plan look good?" or similar
|
|
134
|
+
- Wait for the user's response before marking the first todo as in_progress
|
|
135
|
+
- If they want changes, adjust the plan accordingly
|
|
136
|
+
6. Update todo status promptly as you complete each item
|
|
137
|
+
|
|
138
|
+
The todo list is a planning tool - use it judiciously to avoid overwhelming the user with excessive task tracking."""
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def create_agent_with_config(model, assistant_id: str, tools: list):
|
|
142
|
+
"""Create and configure an agent with the specified model and tools."""
|
|
143
|
+
shell_middleware = ResumableShellToolMiddleware(
|
|
144
|
+
workspace_root=os.getcwd(), execution_policy=HostExecutionPolicy()
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# For long-term memory, point to ~/.deepagents/AGENT_NAME/ with /memories/ prefix
|
|
148
|
+
agent_dir = Path.home() / ".deepagents" / assistant_id
|
|
149
|
+
agent_dir.mkdir(parents=True, exist_ok=True)
|
|
150
|
+
agent_md = agent_dir / "agent.md"
|
|
151
|
+
if not agent_md.exists():
|
|
152
|
+
source_content = get_default_coding_instructions()
|
|
153
|
+
agent_md.write_text(source_content)
|
|
154
|
+
|
|
155
|
+
# Long-term backend - rooted at agent directory
|
|
156
|
+
# This handles both /memories/ files and /agent.md
|
|
157
|
+
long_term_backend = FilesystemBackend(root_dir=agent_dir, virtual_mode=True)
|
|
158
|
+
|
|
159
|
+
# Composite backend: current working directory for default, agent directory for /memories/
|
|
160
|
+
backend = CompositeBackend(
|
|
161
|
+
default=FilesystemBackend(), routes={"/memories/": long_term_backend}
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Use the same backend for agent memory middleware
|
|
165
|
+
agent_middleware = [
|
|
166
|
+
AgentMemoryMiddleware(backend=long_term_backend, memory_path="/memories/"),
|
|
167
|
+
shell_middleware,
|
|
168
|
+
]
|
|
169
|
+
|
|
170
|
+
# Get the system prompt
|
|
171
|
+
system_prompt = get_system_prompt()
|
|
172
|
+
|
|
173
|
+
# Helper functions for formatting tool descriptions in HITL prompts
|
|
174
|
+
def format_write_file_description(tool_call: dict) -> str:
|
|
175
|
+
"""Format write_file tool call for approval prompt."""
|
|
176
|
+
args = tool_call.get("args", {})
|
|
177
|
+
file_path = args.get("file_path", "unknown")
|
|
178
|
+
content = args.get("content", "")
|
|
179
|
+
|
|
180
|
+
action = "Overwrite" if os.path.exists(file_path) else "Create"
|
|
181
|
+
line_count = len(content.splitlines())
|
|
182
|
+
size = len(content.encode("utf-8"))
|
|
183
|
+
|
|
184
|
+
return f"File: {file_path}\nAction: {action} file\nLines: {line_count} · Bytes: {size}"
|
|
185
|
+
|
|
186
|
+
def format_edit_file_description(tool_call: dict) -> str:
|
|
187
|
+
"""Format edit_file tool call for approval prompt."""
|
|
188
|
+
args = tool_call.get("args", {})
|
|
189
|
+
file_path = args.get("file_path", "unknown")
|
|
190
|
+
old_string = args.get("old_string", "")
|
|
191
|
+
new_string = args.get("new_string", "")
|
|
192
|
+
replace_all = bool(args.get("replace_all", False))
|
|
193
|
+
|
|
194
|
+
delta = len(new_string) - len(old_string)
|
|
195
|
+
|
|
196
|
+
return (
|
|
197
|
+
f"File: {file_path}\n"
|
|
198
|
+
f"Action: Replace text ({'all occurrences' if replace_all else 'single occurrence'})\n"
|
|
199
|
+
f"Snippet delta: {delta:+} characters"
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def format_web_search_description(tool_call: dict) -> str:
|
|
203
|
+
"""Format web_search tool call for approval prompt."""
|
|
204
|
+
args = tool_call.get("args", {})
|
|
205
|
+
query = args.get("query", "unknown")
|
|
206
|
+
max_results = args.get("max_results", 5)
|
|
207
|
+
|
|
208
|
+
return f"Query: {query}\nMax results: {max_results}\n\n⚠️ This will use Tavily API credits"
|
|
209
|
+
|
|
210
|
+
def format_task_description(tool_call: dict) -> str:
|
|
211
|
+
"""Format task (subagent) tool call for approval prompt."""
|
|
212
|
+
args = tool_call.get("args", {})
|
|
213
|
+
description = args.get("description", "unknown")
|
|
214
|
+
prompt = args.get("prompt", "")
|
|
215
|
+
|
|
216
|
+
# Truncate prompt if too long
|
|
217
|
+
prompt_preview = prompt[:300]
|
|
218
|
+
if len(prompt) > 300:
|
|
219
|
+
prompt_preview += "..."
|
|
220
|
+
|
|
221
|
+
return (
|
|
222
|
+
f"Task: {description}\n\n"
|
|
223
|
+
f"Instructions to subagent:\n"
|
|
224
|
+
f"{'─' * 40}\n"
|
|
225
|
+
f"{prompt_preview}\n"
|
|
226
|
+
f"{'─' * 40}\n\n"
|
|
227
|
+
f"⚠️ Subagent will have access to file operations and shell commands"
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# Configure human-in-the-loop for potentially destructive tools
|
|
231
|
+
from langchain.agents.middleware import InterruptOnConfig
|
|
232
|
+
|
|
233
|
+
shell_interrupt_config: InterruptOnConfig = {
|
|
234
|
+
"allowed_decisions": ["approve", "reject"],
|
|
235
|
+
"description": lambda tool_call, state, runtime: (
|
|
236
|
+
f"Shell Command: {tool_call['args'].get('command', 'N/A')}\n"
|
|
237
|
+
f"Working Directory: {os.getcwd()}"
|
|
238
|
+
),
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
write_file_interrupt_config: InterruptOnConfig = {
|
|
242
|
+
"allowed_decisions": ["approve", "reject"],
|
|
243
|
+
"description": lambda tool_call, state, runtime: format_write_file_description(tool_call),
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
edit_file_interrupt_config: InterruptOnConfig = {
|
|
247
|
+
"allowed_decisions": ["approve", "reject"],
|
|
248
|
+
"description": lambda tool_call, state, runtime: format_edit_file_description(tool_call),
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
web_search_interrupt_config: InterruptOnConfig = {
|
|
252
|
+
"allowed_decisions": ["approve", "reject"],
|
|
253
|
+
"description": lambda tool_call, state, runtime: format_web_search_description(tool_call),
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
task_interrupt_config: InterruptOnConfig = {
|
|
257
|
+
"allowed_decisions": ["approve", "reject"],
|
|
258
|
+
"description": lambda tool_call, state, runtime: format_task_description(tool_call),
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
agent = create_deep_agent(
|
|
262
|
+
model=model,
|
|
263
|
+
system_prompt=system_prompt,
|
|
264
|
+
tools=tools,
|
|
265
|
+
backend=backend,
|
|
266
|
+
middleware=agent_middleware,
|
|
267
|
+
interrupt_on={
|
|
268
|
+
"shell": shell_interrupt_config,
|
|
269
|
+
"write_file": write_file_interrupt_config,
|
|
270
|
+
"edit_file": edit_file_interrupt_config,
|
|
271
|
+
"web_search": web_search_interrupt_config,
|
|
272
|
+
"task": task_interrupt_config,
|
|
273
|
+
},
|
|
274
|
+
).with_config(config)
|
|
275
|
+
|
|
276
|
+
agent.checkpointer = InMemorySaver()
|
|
277
|
+
|
|
278
|
+
return agent
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
"""Middleware for loading agent-specific long-term memory into the system prompt."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Awaitable, Callable
|
|
4
|
+
from typing import NotRequired
|
|
5
|
+
|
|
6
|
+
from deepagents.backends.protocol import BackendProtocol
|
|
7
|
+
from langchain.agents.middleware.types import (
|
|
8
|
+
AgentMiddleware,
|
|
9
|
+
AgentState,
|
|
10
|
+
ModelRequest,
|
|
11
|
+
ModelResponse,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AgentMemoryState(AgentState):
|
|
16
|
+
"""State for the agent memory middleware."""
|
|
17
|
+
|
|
18
|
+
agent_memory: NotRequired[str | None]
|
|
19
|
+
"""Long-term memory content for the agent."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
AGENT_MEMORY_FILE_PATH = "/agent.md"
|
|
23
|
+
|
|
24
|
+
# Long-term Memory Documentation
|
|
25
|
+
LONGTERM_MEMORY_SYSTEM_PROMPT = """
|
|
26
|
+
|
|
27
|
+
## Long-term Memory
|
|
28
|
+
|
|
29
|
+
You have access to a long-term memory system using the {memory_path} path prefix.
|
|
30
|
+
Files stored in {memory_path} persist across sessions and conversations.
|
|
31
|
+
|
|
32
|
+
Your system prompt is loaded from {memory_path}agent.md at startup. You can update your own instructions by editing this file.
|
|
33
|
+
|
|
34
|
+
**When to CHECK/READ memories (CRITICAL - do this FIRST):**
|
|
35
|
+
- **At the start of ANY new session**: Run `ls {memory_path}` to see what you know
|
|
36
|
+
- **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check `ls {memory_path}` for relevant files FIRST
|
|
37
|
+
- **When user asks you to do something**: Check if you have guides, examples, or patterns in {memory_path} before proceeding
|
|
38
|
+
- **When user references past work or conversations**: Search {memory_path} for related content
|
|
39
|
+
- **If you're unsure**: Check your memories rather than guessing or using only general knowledge
|
|
40
|
+
|
|
41
|
+
**Memory-first response pattern:**
|
|
42
|
+
1. User asks a question → Run `ls {memory_path}` to check for relevant files
|
|
43
|
+
2. If relevant files exist → Read them with `read_file {memory_path}[filename]`
|
|
44
|
+
3. Base your answer on saved knowledge (from memories) supplemented by general knowledge
|
|
45
|
+
4. If no relevant memories exist → Use general knowledge, then consider if this is worth saving
|
|
46
|
+
|
|
47
|
+
**When to update memories:**
|
|
48
|
+
- **IMMEDIATELY when the user describes your role or how you should behave** (e.g., "you are a web researcher", "you are an expert in X")
|
|
49
|
+
- **IMMEDIATELY when the user gives feedback on your work** - Before continuing, update memories to capture what was wrong and how to do it better
|
|
50
|
+
- When the user explicitly asks you to remember something
|
|
51
|
+
- When patterns or preferences emerge (coding styles, conventions, workflows)
|
|
52
|
+
- After significant work where context would help in future sessions
|
|
53
|
+
|
|
54
|
+
**Learning from feedback:**
|
|
55
|
+
- When user says something is better/worse, capture WHY and encode it as a pattern
|
|
56
|
+
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
|
|
57
|
+
- When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
|
|
58
|
+
- Look for the underlying principle behind corrections, not just the specific mistake
|
|
59
|
+
- If it's something you "should have remembered", identify where that instruction should live permanently
|
|
60
|
+
|
|
61
|
+
**What to store where:**
|
|
62
|
+
- **{memory_path}agent.md**: Update this to modify your core instructions and behavioral patterns
|
|
63
|
+
- **Other {memory_path} files**: Use for project-specific context, reference information, or structured notes
|
|
64
|
+
- If you create additional memory files, add references to them in {memory_path}agent.md so you remember to consult them
|
|
65
|
+
|
|
66
|
+
The portion of your system prompt that comes from {memory_path}agent.md is marked with `<agent_memory>` tags so you can identify what instructions come from your persistent memory.
|
|
67
|
+
|
|
68
|
+
Example: `ls {memory_path}` to see what memories you have
|
|
69
|
+
Example: `read_file '{memory_path}deep-agents-guide.md'` to recall saved knowledge
|
|
70
|
+
Example: `edit_file('{memory_path}agent.md', ...)` to update your instructions
|
|
71
|
+
Example: `write_file('{memory_path}project_context.md', ...)` for project-specific notes, then reference it in agent.md
|
|
72
|
+
|
|
73
|
+
Remember: To interact with the longterm filesystem, you must prefix the filename with the {memory_path} path."""
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
DEFAULT_MEMORY_SNIPPET = """<agent_memory>
|
|
77
|
+
{agent_memory}
|
|
78
|
+
</agent_memory>
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class AgentMemoryMiddleware(AgentMiddleware):
|
|
83
|
+
"""Middleware for loading agent-specific long-term memory.
|
|
84
|
+
|
|
85
|
+
This middleware loads the agent's long-term memory from a file (agent.md)
|
|
86
|
+
and injects it into the system prompt. The memory is loaded once at the
|
|
87
|
+
start of the conversation and stored in state.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
backend: Backend to use for loading the agent memory file.
|
|
91
|
+
system_prompt_template: Optional custom template for how to inject
|
|
92
|
+
the agent memory into the system prompt. Use {agent_memory} as
|
|
93
|
+
a placeholder. Defaults to a simple section header.
|
|
94
|
+
|
|
95
|
+
Example:
|
|
96
|
+
```python
|
|
97
|
+
from deepagents.middleware.agent_memory import AgentMemoryMiddleware
|
|
98
|
+
from deepagents.memory.backends import FilesystemBackend
|
|
99
|
+
from pathlib import Path
|
|
100
|
+
|
|
101
|
+
# Set up backend pointing to agent's directory
|
|
102
|
+
agent_dir = Path.home() / ".deepagents" / "my-agent"
|
|
103
|
+
backend = FilesystemBackend(root_dir=agent_dir)
|
|
104
|
+
|
|
105
|
+
# Create middleware
|
|
106
|
+
middleware = AgentMemoryMiddleware(backend=backend)
|
|
107
|
+
```
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
state_schema = AgentMemoryState
|
|
111
|
+
|
|
112
|
+
def __init__(
|
|
113
|
+
self,
|
|
114
|
+
*,
|
|
115
|
+
backend: BackendProtocol,
|
|
116
|
+
memory_path: str,
|
|
117
|
+
system_prompt_template: str | None = None,
|
|
118
|
+
) -> None:
|
|
119
|
+
"""Initialize the agent memory middleware.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
backend: Backend to use for loading the agent memory file.
|
|
123
|
+
system_prompt_template: Optional custom template for injecting
|
|
124
|
+
agent memory into system prompt.
|
|
125
|
+
"""
|
|
126
|
+
self.backend = backend
|
|
127
|
+
self.memory_path = memory_path
|
|
128
|
+
self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
|
|
129
|
+
|
|
130
|
+
def before_agent(
|
|
131
|
+
self,
|
|
132
|
+
state: AgentMemoryState,
|
|
133
|
+
runtime,
|
|
134
|
+
) -> AgentMemoryState:
|
|
135
|
+
"""Load agent memory from file before agent execution.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
state: Current agent state.
|
|
139
|
+
handler: Handler function to call after loading memory.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Updated state with agent_memory populated.
|
|
143
|
+
"""
|
|
144
|
+
# Only load memory if it hasn't been loaded yet
|
|
145
|
+
if "agent_memory" not in state or state.get("agent_memory") is None:
|
|
146
|
+
file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
|
|
147
|
+
return {"agent_memory": file_data}
|
|
148
|
+
|
|
149
|
+
async def abefore_agent(
|
|
150
|
+
self,
|
|
151
|
+
state: AgentMemoryState,
|
|
152
|
+
runtime,
|
|
153
|
+
) -> AgentMemoryState:
|
|
154
|
+
"""(async) Load agent memory from file before agent execution.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
state: Current agent state.
|
|
158
|
+
handler: Handler function to call after loading memory.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Updated state with agent_memory populated.
|
|
162
|
+
"""
|
|
163
|
+
# Only load memory if it hasn't been loaded yet
|
|
164
|
+
if "agent_memory" not in state or state.get("agent_memory") is None:
|
|
165
|
+
file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
|
|
166
|
+
return {"agent_memory": file_data}
|
|
167
|
+
|
|
168
|
+
def wrap_model_call(
|
|
169
|
+
self,
|
|
170
|
+
request: ModelRequest,
|
|
171
|
+
handler: Callable[[ModelRequest], ModelResponse],
|
|
172
|
+
) -> ModelResponse:
|
|
173
|
+
"""Inject agent memory into the system prompt.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
request: The model request being processed.
|
|
177
|
+
handler: The handler function to call with the modified request.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
The model response from the handler.
|
|
181
|
+
"""
|
|
182
|
+
# Get agent memory from state
|
|
183
|
+
agent_memory = request.state.get("agent_memory", "")
|
|
184
|
+
|
|
185
|
+
memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
|
|
186
|
+
if request.system_prompt:
|
|
187
|
+
request.system_prompt = memory_section + "\n\n" + request.system_prompt
|
|
188
|
+
else:
|
|
189
|
+
request.system_prompt = memory_section
|
|
190
|
+
request.system_prompt = (
|
|
191
|
+
request.system_prompt
|
|
192
|
+
+ "\n\n"
|
|
193
|
+
+ LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
return handler(request)
|
|
197
|
+
|
|
198
|
+
async def awrap_model_call(
|
|
199
|
+
self,
|
|
200
|
+
request: ModelRequest,
|
|
201
|
+
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
|
|
202
|
+
) -> ModelResponse:
|
|
203
|
+
"""(async) Inject agent memory into the system prompt.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
request: The model request being processed.
|
|
207
|
+
handler: The handler function to call with the modified request.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
The model response from the handler.
|
|
211
|
+
"""
|
|
212
|
+
# Get agent memory from state
|
|
213
|
+
agent_memory = request.state.get("agent_memory", "")
|
|
214
|
+
|
|
215
|
+
memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
|
|
216
|
+
if request.system_prompt:
|
|
217
|
+
request.system_prompt = memory_section + "\n\n" + request.system_prompt
|
|
218
|
+
else:
|
|
219
|
+
request.system_prompt = memory_section
|
|
220
|
+
request.system_prompt = (
|
|
221
|
+
request.system_prompt
|
|
222
|
+
+ "\n\n"
|
|
223
|
+
+ LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
return await handler(request)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Command handlers for slash commands and bash execution."""
|
|
2
|
+
|
|
3
|
+
import subprocess
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from langgraph.checkpoint.memory import InMemorySaver
|
|
7
|
+
|
|
8
|
+
from .config import COLORS, DEEP_AGENTS_ASCII, console
|
|
9
|
+
from .ui import TokenTracker, show_interactive_help
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def handle_command(command: str, agent, token_tracker: TokenTracker) -> str | bool:
|
|
13
|
+
"""Handle slash commands. Returns 'exit' to exit, True if handled, False to pass to agent."""
|
|
14
|
+
cmd = command.lower().strip().lstrip("/")
|
|
15
|
+
|
|
16
|
+
if cmd in ["quit", "exit", "q"]:
|
|
17
|
+
return "exit"
|
|
18
|
+
|
|
19
|
+
if cmd == "clear":
|
|
20
|
+
# Reset agent conversation state
|
|
21
|
+
agent.checkpointer = InMemorySaver()
|
|
22
|
+
|
|
23
|
+
# Reset token tracking to baseline
|
|
24
|
+
token_tracker.reset()
|
|
25
|
+
|
|
26
|
+
# Clear screen and show fresh UI
|
|
27
|
+
console.clear()
|
|
28
|
+
console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
|
|
29
|
+
console.print()
|
|
30
|
+
console.print(
|
|
31
|
+
"... Fresh start! Screen cleared and conversation reset.", style=COLORS["agent"]
|
|
32
|
+
)
|
|
33
|
+
console.print()
|
|
34
|
+
return True
|
|
35
|
+
|
|
36
|
+
if cmd == "help":
|
|
37
|
+
show_interactive_help()
|
|
38
|
+
return True
|
|
39
|
+
|
|
40
|
+
if cmd == "tokens":
|
|
41
|
+
token_tracker.display_session()
|
|
42
|
+
return True
|
|
43
|
+
|
|
44
|
+
console.print()
|
|
45
|
+
console.print(f"[yellow]Unknown command: /{cmd}[/yellow]")
|
|
46
|
+
console.print("[dim]Type /help for available commands.[/dim]")
|
|
47
|
+
console.print()
|
|
48
|
+
return True
|
|
49
|
+
|
|
50
|
+
return False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def execute_bash_command(command: str) -> bool:
|
|
54
|
+
"""Execute a bash command and display output. Returns True if handled."""
|
|
55
|
+
cmd = command.strip().lstrip("!")
|
|
56
|
+
|
|
57
|
+
if not cmd:
|
|
58
|
+
return True
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
console.print()
|
|
62
|
+
console.print(f"[dim]$ {cmd}[/dim]")
|
|
63
|
+
|
|
64
|
+
# Execute the command
|
|
65
|
+
result = subprocess.run(
|
|
66
|
+
cmd, check=False, shell=True, capture_output=True, text=True, timeout=30, cwd=Path.cwd()
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# Display output
|
|
70
|
+
if result.stdout:
|
|
71
|
+
console.print(result.stdout, style=COLORS["dim"], markup=False)
|
|
72
|
+
if result.stderr:
|
|
73
|
+
console.print(result.stderr, style="red", markup=False)
|
|
74
|
+
|
|
75
|
+
# Show return code if non-zero
|
|
76
|
+
if result.returncode != 0:
|
|
77
|
+
console.print(f"[dim]Exit code: {result.returncode}[/dim]")
|
|
78
|
+
|
|
79
|
+
console.print()
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
except subprocess.TimeoutExpired:
|
|
83
|
+
console.print("[red]Command timed out after 30 seconds[/red]")
|
|
84
|
+
console.print()
|
|
85
|
+
return True
|
|
86
|
+
except Exception as e:
|
|
87
|
+
console.print(f"[red]Error executing command: {e}[/red]")
|
|
88
|
+
console.print()
|
|
89
|
+
return True
|