deepagents-cli 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deepagents-cli might be problematic. Click here for more details.

@@ -0,0 +1,5 @@
1
+ """DeepAgents CLI - Interactive AI coding assistant."""
2
+
3
+ from .main import cli_main
4
+
5
+ __all__ = ["cli_main"]
@@ -0,0 +1,6 @@
1
+ """Allow running the CLI as: python -m deepagents.cli"""
2
+
3
+ from .main import cli_main
4
+
5
+ if __name__ == "__main__":
6
+ cli_main()
@@ -0,0 +1,267 @@
1
+ """Agent management and creation for the CLI."""
2
+
3
+ import os
4
+ import shutil
5
+ from pathlib import Path
6
+
7
+ from deepagents import create_deep_agent
8
+ from deepagents.backends import CompositeBackend
9
+ from deepagents.backends.filesystem import FilesystemBackend
10
+ from deepagents.middleware.agent_memory import AgentMemoryMiddleware
11
+ from deepagents.middleware.resumable_shell import ResumableShellToolMiddleware
12
+ from langchain.agents.middleware import HostExecutionPolicy
13
+ from langgraph.checkpoint.memory import InMemorySaver
14
+
15
+ from .config import COLORS, config, console, get_default_coding_instructions
16
+
17
+
18
+ def list_agents():
19
+ """List all available agents."""
20
+ agents_dir = Path.home() / ".deepagents"
21
+
22
+ if not agents_dir.exists() or not any(agents_dir.iterdir()):
23
+ console.print("[yellow]No agents found.[/yellow]")
24
+ console.print(
25
+ "[dim]Agents will be created in ~/.deepagents/ when you first use them.[/dim]",
26
+ style=COLORS["dim"],
27
+ )
28
+ return
29
+
30
+ console.print("\n[bold]Available Agents:[/bold]\n", style=COLORS["primary"])
31
+
32
+ for agent_path in sorted(agents_dir.iterdir()):
33
+ if agent_path.is_dir():
34
+ agent_name = agent_path.name
35
+ agent_md = agent_path / "agent.md"
36
+
37
+ if agent_md.exists():
38
+ console.print(f" • [bold]{agent_name}[/bold]", style=COLORS["primary"])
39
+ console.print(f" {agent_path}", style=COLORS["dim"])
40
+ else:
41
+ console.print(
42
+ f" • [bold]{agent_name}[/bold] [dim](incomplete)[/dim]", style=COLORS["tool"]
43
+ )
44
+ console.print(f" {agent_path}", style=COLORS["dim"])
45
+
46
+ console.print()
47
+
48
+
49
+ def reset_agent(agent_name: str, source_agent: str = None):
50
+ """Reset an agent to default or copy from another agent."""
51
+ agents_dir = Path.home() / ".deepagents"
52
+ agent_dir = agents_dir / agent_name
53
+
54
+ if source_agent:
55
+ source_dir = agents_dir / source_agent
56
+ source_md = source_dir / "agent.md"
57
+
58
+ if not source_md.exists():
59
+ console.print(
60
+ f"[bold red]Error:[/bold red] Source agent '{source_agent}' not found or has no agent.md"
61
+ )
62
+ return
63
+
64
+ source_content = source_md.read_text()
65
+ action_desc = f"contents of agent '{source_agent}'"
66
+ else:
67
+ source_content = get_default_coding_instructions()
68
+ action_desc = "default"
69
+
70
+ if agent_dir.exists():
71
+ shutil.rmtree(agent_dir)
72
+ console.print(f"Removed existing agent directory: {agent_dir}", style=COLORS["tool"])
73
+
74
+ agent_dir.mkdir(parents=True, exist_ok=True)
75
+ agent_md = agent_dir / "agent.md"
76
+ agent_md.write_text(source_content)
77
+
78
+ console.print(f"✓ Agent '{agent_name}' reset to {action_desc}", style=COLORS["primary"])
79
+ console.print(f"Location: {agent_dir}\n", style=COLORS["dim"])
80
+
81
+
82
+ def create_agent_with_config(model, assistant_id: str, tools: list):
83
+ """Create and configure an agent with the specified model and tools."""
84
+ shell_middleware = ResumableShellToolMiddleware(
85
+ workspace_root=os.getcwd(), execution_policy=HostExecutionPolicy()
86
+ )
87
+
88
+ # For long-term memory, point to ~/.deepagents/AGENT_NAME/ with /memories/ prefix
89
+ agent_dir = Path.home() / ".deepagents" / assistant_id
90
+ agent_dir.mkdir(parents=True, exist_ok=True)
91
+ agent_md = agent_dir / "agent.md"
92
+ if not agent_md.exists():
93
+ source_content = get_default_coding_instructions()
94
+ agent_md.write_text(source_content)
95
+
96
+ # Long-term backend - rooted at agent directory
97
+ # This handles both /memories/ files and /agent.md
98
+ long_term_backend = FilesystemBackend(root_dir=agent_dir, virtual_mode=True)
99
+
100
+ # Composite backend: current working directory for default, agent directory for /memories/
101
+ backend = CompositeBackend(
102
+ default=FilesystemBackend(), routes={"/memories/": long_term_backend}
103
+ )
104
+
105
+ # Use the same backend for agent memory middleware
106
+ agent_middleware = [
107
+ AgentMemoryMiddleware(backend=long_term_backend, memory_path="/memories/"),
108
+ shell_middleware,
109
+ ]
110
+ system_prompt = f"""### Current Working Directory
111
+
112
+ The filesystem backend is currently operating in: `{Path.cwd()}`
113
+
114
+ ### Memory System Reminder
115
+
116
+ Your long-term memory is stored in /memories/ and persists across sessions.
117
+
118
+ **IMPORTANT - Check memories before answering:**
119
+ - When asked "what do you know about X?" → Run `ls /memories/` FIRST, then read relevant files
120
+ - When starting a task → Check if you have guides or examples in /memories/
121
+ - At the beginning of new sessions → Consider checking `ls /memories/` to see what context you have
122
+
123
+ Base your answers on saved knowledge (from /memories/) when available, supplemented by general knowledge.
124
+
125
+ ### Human-in-the-Loop Tool Approval
126
+
127
+ Some tool calls require user approval before execution. When a tool call is rejected by the user:
128
+ 1. Accept their decision immediately - do NOT retry the same command
129
+ 2. Explain that you understand they rejected the action
130
+ 3. Suggest an alternative approach or ask for clarification
131
+ 4. Never attempt the exact same rejected command again
132
+
133
+ Respect the user's decisions and work with them collaboratively.
134
+
135
+ ### Web Search Tool Usage
136
+
137
+ When you use the web_search tool:
138
+ 1. The tool will return search results with titles, URLs, and content excerpts
139
+ 2. You MUST read and process these results, then respond naturally to the user
140
+ 3. NEVER show raw JSON or tool results directly to the user
141
+ 4. Synthesize the information from multiple sources into a coherent answer
142
+ 5. Cite your sources by mentioning page titles or URLs when relevant
143
+ 6. If the search doesn't find what you need, explain what you found and ask clarifying questions
144
+
145
+ The user only sees your text responses - not tool results. Always provide a complete, natural language answer after using web_search.
146
+
147
+ ### Todo List Management
148
+
149
+ When using the write_todos tool:
150
+ 1. Keep the todo list MINIMAL - aim for 3-6 items maximum
151
+ 2. Only create todos for complex, multi-step tasks that truly need tracking
152
+ 3. Break down work into clear, actionable items without over-fragmenting
153
+ 4. For simple tasks (1-2 steps), just do them directly without creating todos
154
+ 5. When first creating a todo list for a task, ALWAYS ask the user if the plan looks good before starting work
155
+ - Create the todos, let them render, then ask: "Does this plan look good?" or similar
156
+ - Wait for the user's response before marking the first todo as in_progress
157
+ - If they want changes, adjust the plan accordingly
158
+ 6. Update todo status promptly as you complete each item
159
+
160
+ The todo list is a planning tool - use it judiciously to avoid overwhelming the user with excessive task tracking."""
161
+
162
+ # Helper functions for formatting tool descriptions in HITL prompts
163
+ def format_write_file_description(tool_call: dict) -> str:
164
+ """Format write_file tool call for approval prompt."""
165
+ args = tool_call.get("args", {})
166
+ file_path = args.get("file_path", "unknown")
167
+ content = args.get("content", "")
168
+
169
+ action = "Overwrite" if os.path.exists(file_path) else "Create"
170
+ line_count = len(content.splitlines())
171
+ size = len(content.encode("utf-8"))
172
+
173
+ return f"File: {file_path}\nAction: {action} file\nLines: {line_count} · Bytes: {size}"
174
+
175
+ def format_edit_file_description(tool_call: dict) -> str:
176
+ """Format edit_file tool call for approval prompt."""
177
+ args = tool_call.get("args", {})
178
+ file_path = args.get("file_path", "unknown")
179
+ old_string = args.get("old_string", "")
180
+ new_string = args.get("new_string", "")
181
+ replace_all = bool(args.get("replace_all", False))
182
+
183
+ delta = len(new_string) - len(old_string)
184
+
185
+ return (
186
+ f"File: {file_path}\n"
187
+ f"Action: Replace text ({'all occurrences' if replace_all else 'single occurrence'})\n"
188
+ f"Snippet delta: {delta:+} characters"
189
+ )
190
+
191
+ def format_web_search_description(tool_call: dict) -> str:
192
+ """Format web_search tool call for approval prompt."""
193
+ args = tool_call.get("args", {})
194
+ query = args.get("query", "unknown")
195
+ max_results = args.get("max_results", 5)
196
+
197
+ return f"Query: {query}\nMax results: {max_results}\n\n⚠️ This will use Tavily API credits"
198
+
199
+ def format_task_description(tool_call: dict) -> str:
200
+ """Format task (subagent) tool call for approval prompt."""
201
+ args = tool_call.get("args", {})
202
+ description = args.get("description", "unknown")
203
+ prompt = args.get("prompt", "")
204
+
205
+ # Truncate prompt if too long
206
+ prompt_preview = prompt[:300]
207
+ if len(prompt) > 300:
208
+ prompt_preview += "..."
209
+
210
+ return (
211
+ f"Task: {description}\n\n"
212
+ f"Instructions to subagent:\n"
213
+ f"{'─' * 40}\n"
214
+ f"{prompt_preview}\n"
215
+ f"{'─' * 40}\n\n"
216
+ f"⚠️ Subagent will have access to file operations and shell commands"
217
+ )
218
+
219
+ # Configure human-in-the-loop for potentially destructive tools
220
+ from langchain.agents.middleware import InterruptOnConfig
221
+
222
+ shell_interrupt_config: InterruptOnConfig = {
223
+ "allowed_decisions": ["approve", "reject"],
224
+ "description": lambda tool_call, state, runtime: (
225
+ f"Shell Command: {tool_call['args'].get('command', 'N/A')}\n"
226
+ f"Working Directory: {os.getcwd()}"
227
+ ),
228
+ }
229
+
230
+ write_file_interrupt_config: InterruptOnConfig = {
231
+ "allowed_decisions": ["approve", "reject"],
232
+ "description": lambda tool_call, state, runtime: format_write_file_description(tool_call),
233
+ }
234
+
235
+ edit_file_interrupt_config: InterruptOnConfig = {
236
+ "allowed_decisions": ["approve", "reject"],
237
+ "description": lambda tool_call, state, runtime: format_edit_file_description(tool_call),
238
+ }
239
+
240
+ web_search_interrupt_config: InterruptOnConfig = {
241
+ "allowed_decisions": ["approve", "reject"],
242
+ "description": lambda tool_call, state, runtime: format_web_search_description(tool_call),
243
+ }
244
+
245
+ task_interrupt_config: InterruptOnConfig = {
246
+ "allowed_decisions": ["approve", "reject"],
247
+ "description": lambda tool_call, state, runtime: format_task_description(tool_call),
248
+ }
249
+
250
+ agent = create_deep_agent(
251
+ model=model,
252
+ system_prompt=system_prompt,
253
+ tools=tools,
254
+ backend=backend,
255
+ middleware=agent_middleware,
256
+ interrupt_on={
257
+ "shell": shell_interrupt_config,
258
+ "write_file": write_file_interrupt_config,
259
+ "edit_file": edit_file_interrupt_config,
260
+ "web_search": web_search_interrupt_config,
261
+ "task": task_interrupt_config,
262
+ },
263
+ ).with_config(config)
264
+
265
+ agent.checkpointer = InMemorySaver()
266
+
267
+ return agent
deepagents_cli/cli.py ADDED
@@ -0,0 +1,13 @@
1
+ """Main CLI entrypoint for deepagents."""
2
+
3
+
4
+ def cli_main() -> None:
5
+ """Main entrypoint for the deepagents CLI.
6
+
7
+ This function is registered as a console script entrypoint in pyproject.toml.
8
+ """
9
+ print("I'm alive!")
10
+
11
+
12
+ if __name__ == "__main__":
13
+ cli_main()
@@ -0,0 +1,86 @@
1
+ """Command handlers for slash commands and bash execution."""
2
+
3
+ import subprocess
4
+ from pathlib import Path
5
+
6
+ from langgraph.checkpoint.memory import InMemorySaver
7
+
8
+ from .config import COLORS, DEEP_AGENTS_ASCII, console
9
+ from .ui import TokenTracker, show_interactive_help
10
+
11
+
12
+ def handle_command(command: str, agent, token_tracker: TokenTracker) -> str | bool:
13
+ """Handle slash commands. Returns 'exit' to exit, True if handled, False to pass to agent."""
14
+ cmd = command.lower().strip().lstrip("/")
15
+
16
+ if cmd in ["quit", "exit", "q"]:
17
+ return "exit"
18
+
19
+ if cmd == "clear":
20
+ # Reset agent conversation state
21
+ agent.checkpointer = InMemorySaver()
22
+
23
+ # Clear screen and show fresh UI
24
+ console.clear()
25
+ console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
26
+ console.print()
27
+ console.print(
28
+ "... Fresh start! Screen cleared and conversation reset.", style=COLORS["agent"]
29
+ )
30
+ console.print()
31
+ return True
32
+
33
+ if cmd == "help":
34
+ show_interactive_help()
35
+ return True
36
+
37
+ if cmd == "tokens":
38
+ token_tracker.display_session()
39
+ return True
40
+
41
+ console.print()
42
+ console.print(f"[yellow]Unknown command: /{cmd}[/yellow]")
43
+ console.print("[dim]Type /help for available commands.[/dim]")
44
+ console.print()
45
+ return True
46
+
47
+ return False
48
+
49
+
50
+ def execute_bash_command(command: str) -> bool:
51
+ """Execute a bash command and display output. Returns True if handled."""
52
+ cmd = command.strip().lstrip("!")
53
+
54
+ if not cmd:
55
+ return True
56
+
57
+ try:
58
+ console.print()
59
+ console.print(f"[dim]$ {cmd}[/dim]")
60
+
61
+ # Execute the command
62
+ result = subprocess.run(
63
+ cmd, check=False, shell=True, capture_output=True, text=True, timeout=30, cwd=Path.cwd()
64
+ )
65
+
66
+ # Display output
67
+ if result.stdout:
68
+ console.print(result.stdout, style=COLORS["dim"], markup=False)
69
+ if result.stderr:
70
+ console.print(result.stderr, style="red", markup=False)
71
+
72
+ # Show return code if non-zero
73
+ if result.returncode != 0:
74
+ console.print(f"[dim]Exit code: {result.returncode}[/dim]")
75
+
76
+ console.print()
77
+ return True
78
+
79
+ except subprocess.TimeoutExpired:
80
+ console.print("[red]Command timed out after 30 seconds[/red]")
81
+ console.print()
82
+ return True
83
+ except Exception as e:
84
+ console.print(f"[red]Error executing command: {e}[/red]")
85
+ console.print()
86
+ return True
@@ -0,0 +1,138 @@
1
+ """Configuration, constants, and model creation for the CLI."""
2
+
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import dotenv
8
+ from rich.console import Console
9
+
10
+ dotenv.load_dotenv()
11
+
12
+ # Color scheme
13
+ COLORS = {
14
+ "primary": "#10b981",
15
+ "dim": "#6b7280",
16
+ "user": "#ffffff",
17
+ "agent": "#10b981",
18
+ "thinking": "#34d399",
19
+ "tool": "#fbbf24",
20
+ }
21
+
22
+ # ASCII art banner
23
+ DEEP_AGENTS_ASCII = """
24
+ ██████╗ ███████╗ ███████╗ ██████╗
25
+ ██╔══██╗ ██╔════╝ ██╔════╝ ██╔══██╗
26
+ ██║ ██║ █████╗ █████╗ ██████╔╝
27
+ ██║ ██║ ██╔══╝ ██╔══╝ ██╔═══╝
28
+ ██████╔╝ ███████╗ ███████╗ ██║
29
+ ╚═════╝ ╚══════╝ ╚══════╝ ╚═╝
30
+
31
+ █████╗ ██████╗ ███████╗ ███╗ ██╗ ████████╗ ███████╗
32
+ ██╔══██╗ ██╔════╝ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██╔════╝
33
+ ███████║ ██║ ███╗ █████╗ ██╔██╗ ██║ ██║ ███████╗
34
+ ██╔══██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ╚════██║
35
+ ██║ ██║ ╚██████╔╝ ███████╗ ██║ ╚████║ ██║ ███████║
36
+ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚══════╝
37
+ """
38
+
39
+ # Interactive commands
40
+ COMMANDS = {
41
+ "clear": "Clear screen and reset conversation",
42
+ "help": "Show help information",
43
+ "tokens": "Show token usage for current session",
44
+ "quit": "Exit the CLI",
45
+ "exit": "Exit the CLI",
46
+ }
47
+
48
+ # Common bash commands for autocomplete
49
+ COMMON_BASH_COMMANDS = {
50
+ "ls": "List directory contents",
51
+ "ls -la": "List all files with details",
52
+ "cd": "Change directory",
53
+ "pwd": "Print working directory",
54
+ "cat": "Display file contents",
55
+ "grep": "Search text patterns",
56
+ "find": "Find files",
57
+ "mkdir": "Make directory",
58
+ "rm": "Remove file",
59
+ "cp": "Copy file",
60
+ "mv": "Move/rename file",
61
+ "echo": "Print text",
62
+ "touch": "Create empty file",
63
+ "head": "Show first lines",
64
+ "tail": "Show last lines",
65
+ "wc": "Count lines/words",
66
+ "chmod": "Change permissions",
67
+ }
68
+
69
+ # Maximum argument length for display
70
+ MAX_ARG_LENGTH = 150
71
+
72
+ # Agent configuration
73
+ config = {"recursion_limit": 1000}
74
+
75
+ # Rich console instance
76
+ console = Console(highlight=False)
77
+
78
+
79
+ class SessionState:
80
+ """Holds mutable session state (auto-approve mode, etc)."""
81
+
82
+ def __init__(self, auto_approve: bool = False):
83
+ self.auto_approve = auto_approve
84
+
85
+ def toggle_auto_approve(self) -> bool:
86
+ """Toggle auto-approve and return new state."""
87
+ self.auto_approve = not self.auto_approve
88
+ return self.auto_approve
89
+
90
+
91
+ def get_default_coding_instructions() -> str:
92
+ """Get the default coding agent instructions.
93
+
94
+ These are the immutable base instructions that cannot be modified by the agent.
95
+ Long-term memory (agent.md) is handled separately by the middleware.
96
+ """
97
+ default_prompt_path = Path(__file__).parent.parent / "default_agent_prompt.md"
98
+ return default_prompt_path.read_text()
99
+
100
+
101
+ def create_model():
102
+ """Create the appropriate model based on available API keys.
103
+
104
+ Returns:
105
+ ChatModel instance (OpenAI or Anthropic)
106
+
107
+ Raises:
108
+ SystemExit if no API key is configured
109
+ """
110
+ openai_key = os.environ.get("OPENAI_API_KEY")
111
+ anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
112
+
113
+ if openai_key:
114
+ from langchain_openai import ChatOpenAI
115
+
116
+ model_name = os.environ.get("OPENAI_MODEL", "gpt-5-mini")
117
+ console.print(f"[dim]Using OpenAI model: {model_name}[/dim]")
118
+ return ChatOpenAI(
119
+ model=model_name,
120
+ temperature=0.7,
121
+ )
122
+ if anthropic_key:
123
+ from langchain_anthropic import ChatAnthropic
124
+
125
+ model_name = os.environ.get("ANTHROPIC_MODEL", "claude-sonnet-4-5-20250929")
126
+ console.print(f"[dim]Using Anthropic model: {model_name}[/dim]")
127
+ return ChatAnthropic(
128
+ model_name=model_name,
129
+ max_tokens=20000,
130
+ )
131
+ console.print("[bold red]Error:[/bold red] No API key configured.")
132
+ console.print("\nPlease set one of the following environment variables:")
133
+ console.print(" - OPENAI_API_KEY (for OpenAI models like gpt-5-mini)")
134
+ console.print(" - ANTHROPIC_API_KEY (for Claude models)")
135
+ console.print("\nExample:")
136
+ console.print(" export OPENAI_API_KEY=your_api_key_here")
137
+ console.print("\nOr add it to your .env file.")
138
+ sys.exit(1)