deepagents-cli 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deepagents-cli might be problematic. Click here for more details.

Files changed (41) hide show
  1. deepagents_cli/__init__.py +5 -0
  2. deepagents_cli/__main__.py +6 -0
  3. deepagents_cli/agent.py +267 -0
  4. deepagents_cli/cli.py +13 -0
  5. deepagents_cli/commands.py +86 -0
  6. deepagents_cli/config.py +138 -0
  7. deepagents_cli/execution.py +644 -0
  8. deepagents_cli/file_ops.py +347 -0
  9. deepagents_cli/input.py +249 -0
  10. deepagents_cli/main.py +217 -0
  11. deepagents_cli/py.typed +0 -0
  12. deepagents_cli/tools.py +140 -0
  13. deepagents_cli/ui.py +455 -0
  14. deepagents_cli-0.0.4.dist-info/METADATA +18 -0
  15. deepagents_cli-0.0.4.dist-info/RECORD +18 -0
  16. deepagents_cli-0.0.4.dist-info/entry_points.txt +3 -0
  17. deepagents_cli-0.0.4.dist-info/top_level.txt +1 -0
  18. deepagents/__init__.py +0 -7
  19. deepagents/cli.py +0 -567
  20. deepagents/default_agent_prompt.md +0 -64
  21. deepagents/graph.py +0 -144
  22. deepagents/memory/__init__.py +0 -17
  23. deepagents/memory/backends/__init__.py +0 -15
  24. deepagents/memory/backends/composite.py +0 -250
  25. deepagents/memory/backends/filesystem.py +0 -330
  26. deepagents/memory/backends/state.py +0 -206
  27. deepagents/memory/backends/store.py +0 -351
  28. deepagents/memory/backends/utils.py +0 -319
  29. deepagents/memory/protocol.py +0 -164
  30. deepagents/middleware/__init__.py +0 -13
  31. deepagents/middleware/agent_memory.py +0 -207
  32. deepagents/middleware/filesystem.py +0 -615
  33. deepagents/middleware/patch_tool_calls.py +0 -44
  34. deepagents/middleware/subagents.py +0 -481
  35. deepagents/pretty_cli.py +0 -289
  36. deepagents_cli-0.0.3.dist-info/METADATA +0 -551
  37. deepagents_cli-0.0.3.dist-info/RECORD +0 -24
  38. deepagents_cli-0.0.3.dist-info/entry_points.txt +0 -2
  39. deepagents_cli-0.0.3.dist-info/licenses/LICENSE +0 -21
  40. deepagents_cli-0.0.3.dist-info/top_level.txt +0 -1
  41. {deepagents_cli-0.0.3.dist-info → deepagents_cli-0.0.4.dist-info}/WHEEL +0 -0
@@ -1,164 +0,0 @@
1
- """Protocol definition for pluggable memory backends.
2
-
3
- This module defines the MemoryBackend protocol that all backend implementations
4
- must follow. Backends can store files in different locations (state, filesystem,
5
- database, etc.) and provide a uniform interface for file operations.
6
- """
7
-
8
- from typing import TYPE_CHECKING, Optional, Protocol, runtime_checkable
9
- from langgraph.types import Command
10
-
11
- if TYPE_CHECKING:
12
- from langchain.tools import ToolRuntime
13
-
14
-
15
- @runtime_checkable
16
- class MemoryBackend(Protocol):
17
- """Protocol for pluggable memory backends.
18
-
19
- Backends can store files in different locations (state, filesystem, database, etc.)
20
- and provide a uniform interface for file operations.
21
-
22
- All file data is represented as dicts with the following structure:
23
- {
24
- "content": list[str], # Lines of text content
25
- "created_at": str, # ISO format timestamp
26
- "modified_at": str, # ISO format timestamp
27
- }
28
- """
29
-
30
- def ls(self, path: str) -> list[str]:
31
- """List all file paths in a directory.
32
-
33
- Args:
34
- path: Absolute path to directory (e.g., "/", "/subdir/", "/memories/")
35
-
36
- Returns:
37
- List of absolute file paths in the specified directory.
38
- """
39
- ...
40
-
41
- def read(
42
- self,
43
- file_path: str,
44
- offset: int = 0,
45
- limit: int = 2000,
46
- ) -> str:
47
- """Read file content with line numbers.
48
-
49
- Args:
50
- file_path: Absolute file path (e.g., "/notes.txt", "/memories/agent.md")
51
- offset: Line offset to start reading from (0-indexed)
52
- limit: Maximum number of lines to read
53
-
54
- Returns:
55
- Formatted file content with line numbers (cat -n style), or error message.
56
- Returns "Error: File '{file_path}' not found" if file doesn't exist.
57
- Returns "System reminder: File exists but has empty contents" for empty files.
58
- """
59
- ...
60
-
61
- def write(
62
- self,
63
- file_path: str,
64
- content: str,
65
- ) -> Command | str:
66
- """Create a new file with content.
67
-
68
- Args:
69
- file_path: Absolute file path (e.g., "/notes.txt", "/memories/agent.md")
70
- content: File content as a string
71
-
72
- Returns:
73
- - Command object for StateBackend (uses_state=True) to update LangGraph state
74
- - Success message string for other backends, or error if file already exists
75
-
76
- Error cases:
77
- - Returns error message if file already exists (should use edit instead)
78
- """
79
- ...
80
-
81
- def edit(
82
- self,
83
- file_path: str,
84
- old_string: str,
85
- new_string: str,
86
- replace_all: bool = False,
87
- ) -> Command | str:
88
- """Edit a file by replacing string occurrences.
89
-
90
- Args:
91
- file_path: Absolute file path (e.g., "/notes.txt", "/memories/agent.md")
92
- old_string: String to find and replace
93
- new_string: Replacement string
94
- replace_all: If True, replace all occurrences; if False, require unique match
95
-
96
- Returns:
97
- - Command object for StateBackend (uses_state=True) to update LangGraph state
98
- - Success message string for other backends, or error message on failure
99
-
100
- Error cases:
101
- - "Error: File '{file_path}' not found" if file doesn't exist
102
- - "Error: String not found in file: '{old_string}'" if string not found
103
- - "Error: String '{old_string}' appears {n} times. Use replace_all=True..."
104
- if multiple matches found and replace_all=False
105
- """
106
- ...
107
-
108
- def delete(self, file_path: str) -> Command | None:
109
- """Delete a file by path.
110
-
111
- Args:
112
- file_path: Absolute file path to delete
113
-
114
- Returns:
115
- - None for backends that modify storage directly (uses_state=False)
116
- - Command object for StateBackend (uses_state=True) to update LangGraph state
117
- """
118
- ...
119
-
120
- def grep(
121
- self,
122
- pattern: str,
123
- path: str = "/",
124
- glob: Optional[str] = None,
125
- output_mode: str = "files_with_matches",
126
- ) -> str:
127
- """Search for a pattern in files.
128
-
129
- TODO: This implementation is significantly less capable than Claude Code's Grep tool.
130
- Missing features to add in the future:
131
- - Context lines: -A (after), -B (before), -C (context) parameters
132
- - Line numbers: -n parameter to show line numbers in output
133
- - Case sensitivity: -i parameter for case-insensitive search
134
- - Output limiting: head_limit parameter for large result sets
135
- - File type filter: type parameter (e.g., "py", "js")
136
- - Multiline support: multiline parameter for cross-line pattern matching
137
- - Pattern semantics: Clarify if pattern is regex or literal string
138
- See /memories/memory_backend_vs_claude_code_comparison.md for full details.
139
-
140
- Args:
141
- pattern: String pattern to search for (currently literal string)
142
- path: Path to search in (default "/")
143
- glob: Optional glob pattern to filter files (e.g., "*.py")
144
- output_mode: Output format - "files_with_matches", "content", or "count"
145
- - files_with_matches: List file paths that contain matches
146
- - content: Show matching lines with file paths and line numbers
147
- - count: Show count of matches per file
148
-
149
- Returns:
150
- Formatted search results based on output_mode, or message if no matches found.
151
- """
152
- ...
153
-
154
- def glob(self, pattern: str, path: str = "/") -> list[str]:
155
- """Find files matching a glob pattern.
156
-
157
- Args:
158
- pattern: Glob pattern (e.g., "**/*.py", "*.txt", "/subdir/**/*.md")
159
- path: Base path to search from (default: "/")
160
-
161
- Returns:
162
- List of absolute file paths matching the pattern.
163
- """
164
- ...
@@ -1,13 +0,0 @@
1
- """Middleware for the DeepAgent."""
2
-
3
- from deepagents.middleware.agent_memory import AgentMemoryMiddleware
4
- from deepagents.middleware.filesystem import FilesystemMiddleware
5
- from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
6
-
7
- __all__ = [
8
- "AgentMemoryMiddleware",
9
- "CompiledSubAgent",
10
- "FilesystemMiddleware",
11
- "SubAgent",
12
- "SubAgentMiddleware",
13
- ]
@@ -1,207 +0,0 @@
1
- """Middleware for loading agent-specific long-term memory into the system prompt."""
2
-
3
- from collections.abc import Awaitable, Callable
4
- from typing import TYPE_CHECKING, Any
5
-
6
- if TYPE_CHECKING:
7
- from langgraph.runtime import Runtime
8
-
9
- from langchain.agents.middleware.types import (
10
- AgentMiddleware,
11
- AgentState,
12
- ModelRequest,
13
- ModelResponse,
14
- )
15
- from typing_extensions import NotRequired, TypedDict
16
-
17
- from deepagents.memory.protocol import MemoryBackend
18
-
19
-
20
- class AgentMemoryState(AgentState):
21
- """State for the agent memory middleware."""
22
-
23
- agent_memory: NotRequired[str | None]
24
- """Long-term memory content for the agent."""
25
-
26
-
27
- AGENT_MEMORY_FILE_PATH = "/agent.md"
28
-
29
- # Long-term Memory Documentation
30
- LONGTERM_MEMORY_SYSTEM_PROMPT = """
31
-
32
- ## Long-term Memory
33
-
34
- You have access to a long-term memory system using the {memory_path} path prefix.
35
- Files stored in {memory_path} persist across sessions and conversations.
36
-
37
- Your system prompt is loaded from {memory_path}agent.md at startup. You can update your own instructions by editing this file.
38
-
39
- **When to update memories:**
40
- - **IMMEDIATELY when the user describes your role or how you should behave** (e.g., "you are a web researcher", "you are an expert in X")
41
- - **IMMEDIATELY when the user gives feedback on your work** - Before continuing, update memories to capture what was wrong and how to do it better
42
- - When the user explicitly asks you to remember something
43
- - When patterns or preferences emerge (coding styles, conventions, workflows)
44
- - After significant work where context would help in future sessions
45
-
46
- **Learning from feedback:**
47
- - When user says something is better/worse, capture WHY and encode it as a pattern
48
- - Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
49
- - When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
50
- - Look for the underlying principle behind corrections, not just the specific mistake
51
- - If it's something you "should have remembered", identify where that instruction should live permanently
52
-
53
- **What to store where:**
54
- - **{memory_path}agent.md**: Update this to modify your core instructions and behavioral patterns
55
- - **Other {memory_path} files**: Use for project-specific context, reference information, or structured notes
56
- - If you create additional memory files, add references to them in {memory_path}agent.md so you remember to consult them
57
-
58
- The portion of your system prompt that comes from {memory_path}agent.md is marked with `<agent_memory>` tags so you can identify what instructions come from your persistent memory.
59
-
60
- Example: `edit_file('{memory_path}agent.md', ...)` to update your instructions
61
- Example: `write_file('{memory_path}project_context.md', ...)` for project-specific notes, then reference it in agent.md
62
-
63
- Remember: To interact with the longterm filesystem, you must prefix the filename with the {memory_path} path."""
64
-
65
-
66
- DEFAULT_MEMORY_SNIPPET = """<agent_memory>
67
- {agent_memory}
68
- </agent_memory>
69
- """
70
-
71
- class AgentMemoryMiddleware(AgentMiddleware):
72
- """Middleware for loading agent-specific long-term memory.
73
-
74
- This middleware loads the agent's long-term memory from a file (agent.md)
75
- and injects it into the system prompt. The memory is loaded once at the
76
- start of the conversation and stored in state.
77
-
78
- Args:
79
- backend: Backend to use for loading the agent memory file.
80
- system_prompt_template: Optional custom template for how to inject
81
- the agent memory into the system prompt. Use {agent_memory} as
82
- a placeholder. Defaults to a simple section header.
83
-
84
- Example:
85
- ```python
86
- from deepagents.middleware.agent_memory import AgentMemoryMiddleware
87
- from deepagents.memory.backends import FilesystemBackend
88
- from pathlib import Path
89
-
90
- # Set up backend pointing to agent's directory
91
- agent_dir = Path.home() / ".deepagents" / "my-agent"
92
- backend = FilesystemBackend(root_dir=agent_dir)
93
-
94
- # Create middleware
95
- middleware = AgentMemoryMiddleware(backend=backend)
96
- ```
97
- """
98
-
99
- state_schema = AgentMemoryState
100
-
101
- def __init__(
102
- self,
103
- *,
104
- backend: MemoryBackend,
105
- memory_path: str,
106
- system_prompt_template: str | None = None,
107
- ) -> None:
108
- """Initialize the agent memory middleware.
109
-
110
- Args:
111
- backend: Backend to use for loading the agent memory file.
112
- system_prompt_template: Optional custom template for injecting
113
- agent memory into system prompt.
114
- """
115
- self.backend = backend
116
- self.memory_path = memory_path
117
- self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
118
-
119
- def before_agent(
120
- self,
121
- state: AgentMemoryState,
122
- runtime,
123
- ) -> AgentMemoryState:
124
- """Load agent memory from file before agent execution.
125
-
126
- Args:
127
- state: Current agent state.
128
- handler: Handler function to call after loading memory.
129
-
130
- Returns:
131
- Updated state with agent_memory populated.
132
- """
133
- # Only load memory if it hasn't been loaded yet
134
- if "agent_memory" not in state or state.get("agent_memory") is None:
135
- file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
136
- return {"agent_memory": file_data}
137
-
138
- async def abefore_agent(
139
- self,
140
- state: AgentMemoryState,
141
- runtime,
142
- ) -> AgentMemoryState:
143
- """(async) Load agent memory from file before agent execution.
144
-
145
- Args:
146
- state: Current agent state.
147
- handler: Handler function to call after loading memory.
148
-
149
- Returns:
150
- Updated state with agent_memory populated.
151
- """
152
- # Only load memory if it hasn't been loaded yet
153
- if "agent_memory" not in state or state.get("agent_memory") is None:
154
- file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
155
- return {"agent_memory": file_data}
156
-
157
- def wrap_model_call(
158
- self,
159
- request: ModelRequest,
160
- handler: Callable[[ModelRequest], ModelResponse],
161
- ) -> ModelResponse:
162
- """Inject agent memory into the system prompt.
163
-
164
- Args:
165
- request: The model request being processed.
166
- handler: The handler function to call with the modified request.
167
-
168
- Returns:
169
- The model response from the handler.
170
- """
171
- # Get agent memory from state
172
- agent_memory = request.state.get("agent_memory", "")
173
-
174
- memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
175
- if request.system_prompt:
176
- request.system_prompt = memory_section + "\n\n" + request.system_prompt
177
- else:
178
- request.system_prompt = memory_section
179
- request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
180
-
181
- return handler(request)
182
-
183
- async def awrap_model_call(
184
- self,
185
- request: ModelRequest,
186
- handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
187
- ) -> ModelResponse:
188
- """(async) Inject agent memory into the system prompt.
189
-
190
- Args:
191
- request: The model request being processed.
192
- handler: The handler function to call with the modified request.
193
-
194
- Returns:
195
- The model response from the handler.
196
- """
197
- # Get agent memory from state
198
- agent_memory = request.state.get("agent_memory", "")
199
-
200
- memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
201
- if request.system_prompt:
202
- request.system_prompt = memory_section + "\n\n" + request.system_prompt
203
- else:
204
- request.system_prompt = memory_section
205
- request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
206
-
207
- return await handler(request)