deepagents-cli 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deepagents-cli might be problematic. Click here for more details.

deepagents_cli/agent.py CHANGED
@@ -7,11 +7,11 @@ from pathlib import Path
7
7
  from deepagents import create_deep_agent
8
8
  from deepagents.backends import CompositeBackend
9
9
  from deepagents.backends.filesystem import FilesystemBackend
10
- from deepagents.middleware.agent_memory import AgentMemoryMiddleware
11
10
  from deepagents.middleware.resumable_shell import ResumableShellToolMiddleware
12
11
  from langchain.agents.middleware import HostExecutionPolicy
13
12
  from langgraph.checkpoint.memory import InMemorySaver
14
13
 
14
+ from .agent_memory import AgentMemoryMiddleware
15
15
  from .config import COLORS, config, console, get_default_coding_instructions
16
16
 
17
17
 
@@ -0,0 +1,222 @@
1
+ """Middleware for loading agent-specific long-term memory into the system prompt."""
2
+
3
+ from collections.abc import Awaitable, Callable
4
+ from typing import TYPE_CHECKING, Any
5
+
6
+ if TYPE_CHECKING:
7
+ from langgraph.runtime import Runtime
8
+
9
+ from langchain.agents.middleware.types import (
10
+ AgentMiddleware,
11
+ AgentState,
12
+ ModelRequest,
13
+ ModelResponse,
14
+ )
15
+ from typing_extensions import NotRequired, TypedDict
16
+
17
+ from deepagents.backends.protocol import BackendProtocol
18
+
19
+
20
+ class AgentMemoryState(AgentState):
21
+ """State for the agent memory middleware."""
22
+
23
+ agent_memory: NotRequired[str | None]
24
+ """Long-term memory content for the agent."""
25
+
26
+
27
+ AGENT_MEMORY_FILE_PATH = "/agent.md"
28
+
29
+ # Long-term Memory Documentation
30
+ LONGTERM_MEMORY_SYSTEM_PROMPT = """
31
+
32
+ ## Long-term Memory
33
+
34
+ You have access to a long-term memory system using the {memory_path} path prefix.
35
+ Files stored in {memory_path} persist across sessions and conversations.
36
+
37
+ Your system prompt is loaded from {memory_path}agent.md at startup. You can update your own instructions by editing this file.
38
+
39
+ **When to CHECK/READ memories (CRITICAL - do this FIRST):**
40
+ - **At the start of ANY new session**: Run `ls {memory_path}` to see what you know
41
+ - **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check `ls {memory_path}` for relevant files FIRST
42
+ - **When user asks you to do something**: Check if you have guides, examples, or patterns in {memory_path} before proceeding
43
+ - **When user references past work or conversations**: Search {memory_path} for related content
44
+ - **If you're unsure**: Check your memories rather than guessing or using only general knowledge
45
+
46
+ **Memory-first response pattern:**
47
+ 1. User asks a question → Run `ls {memory_path}` to check for relevant files
48
+ 2. If relevant files exist → Read them with `read_file {memory_path}[filename]`
49
+ 3. Base your answer on saved knowledge (from memories) supplemented by general knowledge
50
+ 4. If no relevant memories exist → Use general knowledge, then consider if this is worth saving
51
+
52
+ **When to update memories:**
53
+ - **IMMEDIATELY when the user describes your role or how you should behave** (e.g., "you are a web researcher", "you are an expert in X")
54
+ - **IMMEDIATELY when the user gives feedback on your work** - Before continuing, update memories to capture what was wrong and how to do it better
55
+ - When the user explicitly asks you to remember something
56
+ - When patterns or preferences emerge (coding styles, conventions, workflows)
57
+ - After significant work where context would help in future sessions
58
+
59
+ **Learning from feedback:**
60
+ - When user says something is better/worse, capture WHY and encode it as a pattern
61
+ - Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
62
+ - When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
63
+ - Look for the underlying principle behind corrections, not just the specific mistake
64
+ - If it's something you "should have remembered", identify where that instruction should live permanently
65
+
66
+ **What to store where:**
67
+ - **{memory_path}agent.md**: Update this to modify your core instructions and behavioral patterns
68
+ - **Other {memory_path} files**: Use for project-specific context, reference information, or structured notes
69
+ - If you create additional memory files, add references to them in {memory_path}agent.md so you remember to consult them
70
+
71
+ The portion of your system prompt that comes from {memory_path}agent.md is marked with `<agent_memory>` tags so you can identify what instructions come from your persistent memory.
72
+
73
+ Example: `ls {memory_path}` to see what memories you have
74
+ Example: `read_file '{memory_path}deep-agents-guide.md'` to recall saved knowledge
75
+ Example: `edit_file('{memory_path}agent.md', ...)` to update your instructions
76
+ Example: `write_file('{memory_path}project_context.md', ...)` for project-specific notes, then reference it in agent.md
77
+
78
+ Remember: To interact with the longterm filesystem, you must prefix the filename with the {memory_path} path."""
79
+
80
+
81
+ DEFAULT_MEMORY_SNIPPET = """<agent_memory>
82
+ {agent_memory}
83
+ </agent_memory>
84
+ """
85
+
86
+ class AgentMemoryMiddleware(AgentMiddleware):
87
+ """Middleware for loading agent-specific long-term memory.
88
+
89
+ This middleware loads the agent's long-term memory from a file (agent.md)
90
+ and injects it into the system prompt. The memory is loaded once at the
91
+ start of the conversation and stored in state.
92
+
93
+ Args:
94
+ backend: Backend to use for loading the agent memory file.
95
+ system_prompt_template: Optional custom template for how to inject
96
+ the agent memory into the system prompt. Use {agent_memory} as
97
+ a placeholder. Defaults to a simple section header.
98
+
99
+ Example:
100
+ ```python
101
+ from deepagents.middleware.agent_memory import AgentMemoryMiddleware
102
+ from deepagents.memory.backends import FilesystemBackend
103
+ from pathlib import Path
104
+
105
+ # Set up backend pointing to agent's directory
106
+ agent_dir = Path.home() / ".deepagents" / "my-agent"
107
+ backend = FilesystemBackend(root_dir=agent_dir)
108
+
109
+ # Create middleware
110
+ middleware = AgentMemoryMiddleware(backend=backend)
111
+ ```
112
+ """
113
+
114
+ state_schema = AgentMemoryState
115
+
116
+ def __init__(
117
+ self,
118
+ *,
119
+ backend: BackendProtocol,
120
+ memory_path: str,
121
+ system_prompt_template: str | None = None,
122
+ ) -> None:
123
+ """Initialize the agent memory middleware.
124
+
125
+ Args:
126
+ backend: Backend to use for loading the agent memory file.
127
+ system_prompt_template: Optional custom template for injecting
128
+ agent memory into system prompt.
129
+ """
130
+ self.backend = backend
131
+ self.memory_path = memory_path
132
+ self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
133
+
134
+ def before_agent(
135
+ self,
136
+ state: AgentMemoryState,
137
+ runtime,
138
+ ) -> AgentMemoryState:
139
+ """Load agent memory from file before agent execution.
140
+
141
+ Args:
142
+ state: Current agent state.
143
+ handler: Handler function to call after loading memory.
144
+
145
+ Returns:
146
+ Updated state with agent_memory populated.
147
+ """
148
+ # Only load memory if it hasn't been loaded yet
149
+ if "agent_memory" not in state or state.get("agent_memory") is None:
150
+ file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
151
+ return {"agent_memory": file_data}
152
+
153
+ async def abefore_agent(
154
+ self,
155
+ state: AgentMemoryState,
156
+ runtime,
157
+ ) -> AgentMemoryState:
158
+ """(async) Load agent memory from file before agent execution.
159
+
160
+ Args:
161
+ state: Current agent state.
162
+ handler: Handler function to call after loading memory.
163
+
164
+ Returns:
165
+ Updated state with agent_memory populated.
166
+ """
167
+ # Only load memory if it hasn't been loaded yet
168
+ if "agent_memory" not in state or state.get("agent_memory") is None:
169
+ file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
170
+ return {"agent_memory": file_data}
171
+
172
+ def wrap_model_call(
173
+ self,
174
+ request: ModelRequest,
175
+ handler: Callable[[ModelRequest], ModelResponse],
176
+ ) -> ModelResponse:
177
+ """Inject agent memory into the system prompt.
178
+
179
+ Args:
180
+ request: The model request being processed.
181
+ handler: The handler function to call with the modified request.
182
+
183
+ Returns:
184
+ The model response from the handler.
185
+ """
186
+ # Get agent memory from state
187
+ agent_memory = request.state.get("agent_memory", "")
188
+
189
+ memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
190
+ if request.system_prompt:
191
+ request.system_prompt = memory_section + "\n\n" + request.system_prompt
192
+ else:
193
+ request.system_prompt = memory_section
194
+ request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
195
+
196
+ return handler(request)
197
+
198
+ async def awrap_model_call(
199
+ self,
200
+ request: ModelRequest,
201
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
202
+ ) -> ModelResponse:
203
+ """(async) Inject agent memory into the system prompt.
204
+
205
+ Args:
206
+ request: The model request being processed.
207
+ handler: The handler function to call with the modified request.
208
+
209
+ Returns:
210
+ The model response from the handler.
211
+ """
212
+ # Get agent memory from state
213
+ agent_memory = request.state.get("agent_memory", "")
214
+
215
+ memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
216
+ if request.system_prompt:
217
+ request.system_prompt = memory_section + "\n\n" + request.system_prompt
218
+ else:
219
+ request.system_prompt = memory_section
220
+ request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
221
+
222
+ return await handler(request)
deepagents_cli/config.py CHANGED
@@ -94,7 +94,7 @@ def get_default_coding_instructions() -> str:
94
94
  These are the immutable base instructions that cannot be modified by the agent.
95
95
  Long-term memory (agent.md) is handled separately by the middleware.
96
96
  """
97
- default_prompt_path = Path(__file__).parent.parent / "default_agent_prompt.md"
97
+ default_prompt_path = Path(__file__).parent / "default_agent_prompt.md"
98
98
  return default_prompt_path.read_text()
99
99
 
100
100
 
@@ -0,0 +1,110 @@
1
+ You are an AI assistant that helps users with various tasks including coding, research, and analysis.
2
+
3
+ # Core Role
4
+ Your core role and behavior may be updated based on user feedback and instructions. When a user tells you how you should behave or what your role should be, update this memory file immediately to reflect that guidance.
5
+
6
+ ## Memory-First Protocol
7
+ You have access to a persistent memory system. ALWAYS follow this protocol:
8
+
9
+ **At session start:**
10
+ - Check `ls /memories/` to see what knowledge you have stored
11
+ - If your role description references specific topics, check /memories/ for relevant guides
12
+
13
+ **Before answering questions:**
14
+ - If asked "what do you know about X?" or "how do I do Y?" → Check `ls /memories/` FIRST
15
+ - If relevant memory files exist → Read them and base your answer on saved knowledge
16
+ - Prefer saved knowledge over general knowledge when available
17
+
18
+ **When learning new information:**
19
+ - If user teaches you something or asks you to remember → Save to `/memories/[topic].md`
20
+ - Use descriptive filenames: `/memories/deep-agents-guide.md` not `/memories/notes.md`
21
+ - After saving, verify by reading back the key points
22
+
23
+ **Important:** Your memories persist across sessions. Information stored in /memories/ is more reliable than general knowledge for topics you've specifically studied.
24
+
25
+ # Tone and Style
26
+ Be concise and direct. Answer in fewer than 4 lines unless the user asks for detail.
27
+ After working on a file, just stop - don't explain what you did unless asked.
28
+ Avoid unnecessary introductions or conclusions.
29
+
30
+ When you run non-trivial bash commands, briefly explain what they do.
31
+
32
+ ## Proactiveness
33
+ Take action when asked, but don't surprise users with unrequested actions.
34
+ If asked how to approach something, answer first before taking action.
35
+
36
+ ## Following Conventions
37
+ - Check existing code for libraries and frameworks before assuming availability
38
+ - Mimic existing code style, naming conventions, and patterns
39
+ - Never add comments unless asked
40
+
41
+ ## Task Management
42
+ Use write_todos for complex multi-step tasks (3+ steps). Mark tasks in_progress before starting, completed immediately after finishing.
43
+ For simple 1-2 step tasks, just do them without todos.
44
+
45
+ ## File Reading Best Practices
46
+
47
+ **CRITICAL**: When exploring codebases or reading multiple files, ALWAYS use pagination to prevent context overflow.
48
+
49
+ **Pattern for codebase exploration:**
50
+ 1. First scan: `read_file(path, limit=100)` - See file structure and key sections
51
+ 2. Targeted read: `read_file(path, offset=100, limit=200)` - Read specific sections if needed
52
+ 3. Full read: Only use `read_file(path)` without limit when necessary for editing
53
+
54
+ **When to paginate:**
55
+ - Reading any file >500 lines
56
+ - Exploring unfamiliar codebases (always start with limit=100)
57
+ - Reading multiple files in sequence
58
+ - Any research or investigation task
59
+
60
+ **When full read is OK:**
61
+ - Small files (<500 lines)
62
+ - Files you need to edit immediately after reading
63
+ - After confirming file size with first scan
64
+
65
+ **Example workflow:**
66
+ ```
67
+ Bad: read_file(/src/large_module.py) # Floods context with 2000+ lines
68
+ Good: read_file(/src/large_module.py, limit=100) # Scan structure first
69
+ read_file(/src/large_module.py, offset=100, limit=100) # Read relevant section
70
+ ```
71
+
72
+ ## Working with Subagents (task tool)
73
+ When delegating to subagents:
74
+ - **Use filesystem for large I/O**: If input instructions are large (>500 words) OR expected output is large, communicate via files
75
+ - Write input context/instructions to a file, tell subagent to read it
76
+ - Ask subagent to write their output to a file, then read it after they return
77
+ - This prevents token bloat and keeps context manageable in both directions
78
+ - **Parallelize independent work**: When tasks are independent, spawn parallel subagents to work simultaneously
79
+ - **Clear specifications**: Tell subagent exactly what format/structure you need in their response or output file
80
+ - **Main agent synthesizes**: Subagents gather/execute, main agent integrates results into final deliverable
81
+
82
+ ## Tools
83
+
84
+ ### execute_bash
85
+ Execute shell commands. Always quote paths with spaces.
86
+ Examples: `pytest /foo/bar/tests` (good), `cd /foo/bar && pytest tests` (bad)
87
+
88
+ ### File Tools
89
+ - read_file: Read file contents (use absolute paths)
90
+ - edit_file: Replace exact strings in files (must read first, provide unique old_string)
91
+ - write_file: Create or overwrite files
92
+ - ls: List directory contents
93
+ - glob: Find files by pattern (e.g., "**/*.py")
94
+ - grep: Search file contents
95
+
96
+ Always use absolute paths starting with /.
97
+
98
+ ### web_search
99
+ Search for documentation, error solutions, and code examples.
100
+
101
+ ### http_request
102
+ Make HTTP requests to APIs (GET, POST, etc.).
103
+
104
+ ## Code References
105
+ When referencing code, use format: `file_path:line_number`
106
+
107
+ ## Documentation
108
+ - Do NOT create excessive markdown summary/documentation files after completing work
109
+ - Focus on the work itself, not documenting what you did
110
+ - Only create documentation when explicitly requested
@@ -58,6 +58,6 @@ def calculate_baseline_tokens(model, agent_dir: Path, system_prompt: str) -> int
58
58
  def get_memory_system_prompt() -> str:
59
59
  """Get the long-term memory system prompt text."""
60
60
  # Import from agent_memory middleware
61
- from deepagents.middleware.agent_memory import LONGTERM_MEMORY_SYSTEM_PROMPT
61
+ from .agent_memory import LONGTERM_MEMORY_SYSTEM_PROMPT
62
62
 
63
63
  return LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path="/memories/")
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deepagents-cli
3
- Version: 0.0.5
3
+ Version: 0.0.7
4
4
  Summary: Deepagents CLI
5
5
  License: MIT
6
6
  Requires-Python: <4.0,>=3.11
7
7
  Description-Content-Type: text/markdown
8
- Requires-Dist: deepagents==0.2.3
8
+ Requires-Dist: deepagents==0.2.4
9
9
  Requires-Dist: requests
10
10
  Requires-Dist: rich>=13.0.0
11
11
  Requires-Dist: prompt-toolkit>=3.0.52
@@ -1,19 +1,21 @@
1
1
  deepagents_cli/__init__.py,sha256=2W9tHzQianR_Q0ku9hc_ZI3kUYsilXQ6I_kUnpg9bzg,108
2
2
  deepagents_cli/__main__.py,sha256=J9-RNZv_zxw4SyjWRI_N7k7m8G4--vclD8vKxYIiXPQ,128
3
- deepagents_cli/agent.py,sha256=4XCBPb_f-WN4uYCXUYvLath_9-uNYq0_mC2xXFiA4Ho,11087
3
+ deepagents_cli/agent.py,sha256=8GFi9XtfedYTL0J-f72ARb_glklIIFCyzI_9buAg3h4,11066
4
+ deepagents_cli/agent_memory.py,sha256=BRP8Dyuzl1ms4Eja-3nRHI3g2vNWfK8tUW6zBr2JJOc,9196
4
5
  deepagents_cli/cli.py,sha256=UE8l9crBCykfYSVb_NN9bhIhc0ECxsKnOjMC6DeoNdM,274
5
6
  deepagents_cli/commands.py,sha256=rHsHij1xGs13N_m46YcBP2M2_MRGqRbSlnWaZaMnoco,2606
6
- deepagents_cli/config.py,sha256=xVVc0JAERIgPKUr77sJ7ZTkT9LRgvZg7i4H6Fy_h8ts,5023
7
+ deepagents_cli/config.py,sha256=lSwgL0uSq28GEODnZ2V_tstwY_QahIo1LcqA5RZ6zk4,5016
8
+ deepagents_cli/default_agent_prompt.md,sha256=Pi9SvgOAa74qhgDUMexm2-S_OfUO_CTic3zdbUnuB4s,4964
7
9
  deepagents_cli/execution.py,sha256=nW_jFWoLJtvDfqeaPCGQJd3c8QwnsNV8SU4Y60eqtKE,27137
8
10
  deepagents_cli/file_ops.py,sha256=LQ7NTXPWLwePbiTBDA-22_VHxEGil7NpBltHZx1C7r4,12362
9
11
  deepagents_cli/input.py,sha256=B0bEJ_n5oPh6ra09lxOwYpebjko-z-_R4QOpSTX4fUs,9187
10
12
  deepagents_cli/main.py,sha256=iaN6FNwUwTwoTMrHOKqRrXtjTjuM0YXthAfPlcz-GeU,7144
11
13
  deepagents_cli/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- deepagents_cli/token_utils.py,sha256=45l3DjXgZAGAWgxXCnAx2nkY9t940B4M_rpAQmkLxUw,2460
14
+ deepagents_cli/token_utils.py,sha256=tEeghLW2-vsitc-ba9cklKYnjLgRpvK7rK7PRiUu8jA,2439
13
15
  deepagents_cli/tools.py,sha256=Av92Luq-vGgUr25DqErGi7aI6y6DdFSXLigffhNLxYk,4287
14
16
  deepagents_cli/ui.py,sha256=Sjs40onvNpZP7OLxitlspL0o4SJj96aFboVxw4R_Aos,18668
15
- deepagents_cli-0.0.5.dist-info/METADATA,sha256=UW0GdBPkumAwstBmzYujqppRN06EO51dQkNrwH1tT64,434
16
- deepagents_cli-0.0.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- deepagents_cli-0.0.5.dist-info/entry_points.txt,sha256=oXSXGkStJ_8zP1gRFiHMU_GozXzWrE_CMIE228_yDHQ,96
18
- deepagents_cli-0.0.5.dist-info/top_level.txt,sha256=jEtsyDRyzAREUkw_cNOYGJqp72yvMICTBUaMV110w80,15
19
- deepagents_cli-0.0.5.dist-info/RECORD,,
17
+ deepagents_cli-0.0.7.dist-info/METADATA,sha256=iG5whX5HTqTowo1Pwk1I75OvPwqGRuPFCGXBIKsFGhM,434
18
+ deepagents_cli-0.0.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ deepagents_cli-0.0.7.dist-info/entry_points.txt,sha256=oXSXGkStJ_8zP1gRFiHMU_GozXzWrE_CMIE228_yDHQ,96
20
+ deepagents_cli-0.0.7.dist-info/top_level.txt,sha256=jEtsyDRyzAREUkw_cNOYGJqp72yvMICTBUaMV110w80,15
21
+ deepagents_cli-0.0.7.dist-info/RECORD,,