deepagents-cli 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deepagents-cli might be problematic. Click here for more details.

Files changed (42) hide show
  1. deepagents_cli/__init__.py +5 -0
  2. deepagents_cli/__main__.py +6 -0
  3. deepagents_cli/agent.py +278 -0
  4. deepagents_cli/cli.py +13 -0
  5. deepagents_cli/commands.py +89 -0
  6. deepagents_cli/config.py +138 -0
  7. deepagents_cli/execution.py +644 -0
  8. deepagents_cli/file_ops.py +347 -0
  9. deepagents_cli/input.py +249 -0
  10. deepagents_cli/main.py +226 -0
  11. deepagents_cli/py.typed +0 -0
  12. deepagents_cli/token_utils.py +63 -0
  13. deepagents_cli/tools.py +140 -0
  14. deepagents_cli/ui.py +489 -0
  15. deepagents_cli-0.0.5.dist-info/METADATA +18 -0
  16. deepagents_cli-0.0.5.dist-info/RECORD +19 -0
  17. deepagents_cli-0.0.5.dist-info/entry_points.txt +3 -0
  18. deepagents_cli-0.0.5.dist-info/top_level.txt +1 -0
  19. deepagents/__init__.py +0 -7
  20. deepagents/cli.py +0 -567
  21. deepagents/default_agent_prompt.md +0 -64
  22. deepagents/graph.py +0 -144
  23. deepagents/memory/__init__.py +0 -17
  24. deepagents/memory/backends/__init__.py +0 -15
  25. deepagents/memory/backends/composite.py +0 -250
  26. deepagents/memory/backends/filesystem.py +0 -330
  27. deepagents/memory/backends/state.py +0 -206
  28. deepagents/memory/backends/store.py +0 -351
  29. deepagents/memory/backends/utils.py +0 -319
  30. deepagents/memory/protocol.py +0 -164
  31. deepagents/middleware/__init__.py +0 -13
  32. deepagents/middleware/agent_memory.py +0 -207
  33. deepagents/middleware/filesystem.py +0 -615
  34. deepagents/middleware/patch_tool_calls.py +0 -44
  35. deepagents/middleware/subagents.py +0 -481
  36. deepagents/pretty_cli.py +0 -289
  37. deepagents_cli-0.0.3.dist-info/METADATA +0 -551
  38. deepagents_cli-0.0.3.dist-info/RECORD +0 -24
  39. deepagents_cli-0.0.3.dist-info/entry_points.txt +0 -2
  40. deepagents_cli-0.0.3.dist-info/licenses/LICENSE +0 -21
  41. deepagents_cli-0.0.3.dist-info/top_level.txt +0 -1
  42. {deepagents_cli-0.0.3.dist-info → deepagents_cli-0.0.5.dist-info}/WHEEL +0 -0
@@ -0,0 +1,5 @@
1
+ """DeepAgents CLI - Interactive AI coding assistant."""
2
+
3
+ from .main import cli_main
4
+
5
+ __all__ = ["cli_main"]
@@ -0,0 +1,6 @@
1
+ """Allow running the CLI as: python -m deepagents.cli"""
2
+
3
+ from .main import cli_main
4
+
5
+ if __name__ == "__main__":
6
+ cli_main()
@@ -0,0 +1,278 @@
1
+ """Agent management and creation for the CLI."""
2
+
3
+ import os
4
+ import shutil
5
+ from pathlib import Path
6
+
7
+ from deepagents import create_deep_agent
8
+ from deepagents.backends import CompositeBackend
9
+ from deepagents.backends.filesystem import FilesystemBackend
10
+ from deepagents.middleware.agent_memory import AgentMemoryMiddleware
11
+ from deepagents.middleware.resumable_shell import ResumableShellToolMiddleware
12
+ from langchain.agents.middleware import HostExecutionPolicy
13
+ from langgraph.checkpoint.memory import InMemorySaver
14
+
15
+ from .config import COLORS, config, console, get_default_coding_instructions
16
+
17
+
18
+ def list_agents():
19
+ """List all available agents."""
20
+ agents_dir = Path.home() / ".deepagents"
21
+
22
+ if not agents_dir.exists() or not any(agents_dir.iterdir()):
23
+ console.print("[yellow]No agents found.[/yellow]")
24
+ console.print(
25
+ "[dim]Agents will be created in ~/.deepagents/ when you first use them.[/dim]",
26
+ style=COLORS["dim"],
27
+ )
28
+ return
29
+
30
+ console.print("\n[bold]Available Agents:[/bold]\n", style=COLORS["primary"])
31
+
32
+ for agent_path in sorted(agents_dir.iterdir()):
33
+ if agent_path.is_dir():
34
+ agent_name = agent_path.name
35
+ agent_md = agent_path / "agent.md"
36
+
37
+ if agent_md.exists():
38
+ console.print(f" • [bold]{agent_name}[/bold]", style=COLORS["primary"])
39
+ console.print(f" {agent_path}", style=COLORS["dim"])
40
+ else:
41
+ console.print(
42
+ f" • [bold]{agent_name}[/bold] [dim](incomplete)[/dim]", style=COLORS["tool"]
43
+ )
44
+ console.print(f" {agent_path}", style=COLORS["dim"])
45
+
46
+ console.print()
47
+
48
+
49
+ def reset_agent(agent_name: str, source_agent: str = None):
50
+ """Reset an agent to default or copy from another agent."""
51
+ agents_dir = Path.home() / ".deepagents"
52
+ agent_dir = agents_dir / agent_name
53
+
54
+ if source_agent:
55
+ source_dir = agents_dir / source_agent
56
+ source_md = source_dir / "agent.md"
57
+
58
+ if not source_md.exists():
59
+ console.print(
60
+ f"[bold red]Error:[/bold red] Source agent '{source_agent}' not found or has no agent.md"
61
+ )
62
+ return
63
+
64
+ source_content = source_md.read_text()
65
+ action_desc = f"contents of agent '{source_agent}'"
66
+ else:
67
+ source_content = get_default_coding_instructions()
68
+ action_desc = "default"
69
+
70
+ if agent_dir.exists():
71
+ shutil.rmtree(agent_dir)
72
+ console.print(f"Removed existing agent directory: {agent_dir}", style=COLORS["tool"])
73
+
74
+ agent_dir.mkdir(parents=True, exist_ok=True)
75
+ agent_md = agent_dir / "agent.md"
76
+ agent_md.write_text(source_content)
77
+
78
+ console.print(f"✓ Agent '{agent_name}' reset to {action_desc}", style=COLORS["primary"])
79
+ console.print(f"Location: {agent_dir}\n", style=COLORS["dim"])
80
+
81
+
82
+ def get_system_prompt() -> str:
83
+ """Get the base system prompt for the agent.
84
+
85
+ Returns:
86
+ The system prompt string (without agent.md content)
87
+ """
88
+ return f"""### Current Working Directory
89
+
90
+ The filesystem backend is currently operating in: `{Path.cwd()}`
91
+
92
+ ### Memory System Reminder
93
+
94
+ Your long-term memory is stored in /memories/ and persists across sessions.
95
+
96
+ **IMPORTANT - Check memories before answering:**
97
+ - When asked "what do you know about X?" → Run `ls /memories/` FIRST, then read relevant files
98
+ - When starting a task → Check if you have guides or examples in /memories/
99
+ - At the beginning of new sessions → Consider checking `ls /memories/` to see what context you have
100
+
101
+ Base your answers on saved knowledge (from /memories/) when available, supplemented by general knowledge.
102
+
103
+ ### Human-in-the-Loop Tool Approval
104
+
105
+ Some tool calls require user approval before execution. When a tool call is rejected by the user:
106
+ 1. Accept their decision immediately - do NOT retry the same command
107
+ 2. Explain that you understand they rejected the action
108
+ 3. Suggest an alternative approach or ask for clarification
109
+ 4. Never attempt the exact same rejected command again
110
+
111
+ Respect the user's decisions and work with them collaboratively.
112
+
113
+ ### Web Search Tool Usage
114
+
115
+ When you use the web_search tool:
116
+ 1. The tool will return search results with titles, URLs, and content excerpts
117
+ 2. You MUST read and process these results, then respond naturally to the user
118
+ 3. NEVER show raw JSON or tool results directly to the user
119
+ 4. Synthesize the information from multiple sources into a coherent answer
120
+ 5. Cite your sources by mentioning page titles or URLs when relevant
121
+ 6. If the search doesn't find what you need, explain what you found and ask clarifying questions
122
+
123
+ The user only sees your text responses - not tool results. Always provide a complete, natural language answer after using web_search.
124
+
125
+ ### Todo List Management
126
+
127
+ When using the write_todos tool:
128
+ 1. Keep the todo list MINIMAL - aim for 3-6 items maximum
129
+ 2. Only create todos for complex, multi-step tasks that truly need tracking
130
+ 3. Break down work into clear, actionable items without over-fragmenting
131
+ 4. For simple tasks (1-2 steps), just do them directly without creating todos
132
+ 5. When first creating a todo list for a task, ALWAYS ask the user if the plan looks good before starting work
133
+ - Create the todos, let them render, then ask: "Does this plan look good?" or similar
134
+ - Wait for the user's response before marking the first todo as in_progress
135
+ - If they want changes, adjust the plan accordingly
136
+ 6. Update todo status promptly as you complete each item
137
+
138
+ The todo list is a planning tool - use it judiciously to avoid overwhelming the user with excessive task tracking."""
139
+
140
+
141
+ def create_agent_with_config(model, assistant_id: str, tools: list):
142
+ """Create and configure an agent with the specified model and tools."""
143
+ shell_middleware = ResumableShellToolMiddleware(
144
+ workspace_root=os.getcwd(), execution_policy=HostExecutionPolicy()
145
+ )
146
+
147
+ # For long-term memory, point to ~/.deepagents/AGENT_NAME/ with /memories/ prefix
148
+ agent_dir = Path.home() / ".deepagents" / assistant_id
149
+ agent_dir.mkdir(parents=True, exist_ok=True)
150
+ agent_md = agent_dir / "agent.md"
151
+ if not agent_md.exists():
152
+ source_content = get_default_coding_instructions()
153
+ agent_md.write_text(source_content)
154
+
155
+ # Long-term backend - rooted at agent directory
156
+ # This handles both /memories/ files and /agent.md
157
+ long_term_backend = FilesystemBackend(root_dir=agent_dir, virtual_mode=True)
158
+
159
+ # Composite backend: current working directory for default, agent directory for /memories/
160
+ backend = CompositeBackend(
161
+ default=FilesystemBackend(), routes={"/memories/": long_term_backend}
162
+ )
163
+
164
+ # Use the same backend for agent memory middleware
165
+ agent_middleware = [
166
+ AgentMemoryMiddleware(backend=long_term_backend, memory_path="/memories/"),
167
+ shell_middleware,
168
+ ]
169
+
170
+ # Get the system prompt
171
+ system_prompt = get_system_prompt()
172
+
173
+ # Helper functions for formatting tool descriptions in HITL prompts
174
+ def format_write_file_description(tool_call: dict) -> str:
175
+ """Format write_file tool call for approval prompt."""
176
+ args = tool_call.get("args", {})
177
+ file_path = args.get("file_path", "unknown")
178
+ content = args.get("content", "")
179
+
180
+ action = "Overwrite" if os.path.exists(file_path) else "Create"
181
+ line_count = len(content.splitlines())
182
+ size = len(content.encode("utf-8"))
183
+
184
+ return f"File: {file_path}\nAction: {action} file\nLines: {line_count} · Bytes: {size}"
185
+
186
+ def format_edit_file_description(tool_call: dict) -> str:
187
+ """Format edit_file tool call for approval prompt."""
188
+ args = tool_call.get("args", {})
189
+ file_path = args.get("file_path", "unknown")
190
+ old_string = args.get("old_string", "")
191
+ new_string = args.get("new_string", "")
192
+ replace_all = bool(args.get("replace_all", False))
193
+
194
+ delta = len(new_string) - len(old_string)
195
+
196
+ return (
197
+ f"File: {file_path}\n"
198
+ f"Action: Replace text ({'all occurrences' if replace_all else 'single occurrence'})\n"
199
+ f"Snippet delta: {delta:+} characters"
200
+ )
201
+
202
+ def format_web_search_description(tool_call: dict) -> str:
203
+ """Format web_search tool call for approval prompt."""
204
+ args = tool_call.get("args", {})
205
+ query = args.get("query", "unknown")
206
+ max_results = args.get("max_results", 5)
207
+
208
+ return f"Query: {query}\nMax results: {max_results}\n\n⚠️ This will use Tavily API credits"
209
+
210
+ def format_task_description(tool_call: dict) -> str:
211
+ """Format task (subagent) tool call for approval prompt."""
212
+ args = tool_call.get("args", {})
213
+ description = args.get("description", "unknown")
214
+ prompt = args.get("prompt", "")
215
+
216
+ # Truncate prompt if too long
217
+ prompt_preview = prompt[:300]
218
+ if len(prompt) > 300:
219
+ prompt_preview += "..."
220
+
221
+ return (
222
+ f"Task: {description}\n\n"
223
+ f"Instructions to subagent:\n"
224
+ f"{'─' * 40}\n"
225
+ f"{prompt_preview}\n"
226
+ f"{'─' * 40}\n\n"
227
+ f"⚠️ Subagent will have access to file operations and shell commands"
228
+ )
229
+
230
+ # Configure human-in-the-loop for potentially destructive tools
231
+ from langchain.agents.middleware import InterruptOnConfig
232
+
233
+ shell_interrupt_config: InterruptOnConfig = {
234
+ "allowed_decisions": ["approve", "reject"],
235
+ "description": lambda tool_call, state, runtime: (
236
+ f"Shell Command: {tool_call['args'].get('command', 'N/A')}\n"
237
+ f"Working Directory: {os.getcwd()}"
238
+ ),
239
+ }
240
+
241
+ write_file_interrupt_config: InterruptOnConfig = {
242
+ "allowed_decisions": ["approve", "reject"],
243
+ "description": lambda tool_call, state, runtime: format_write_file_description(tool_call),
244
+ }
245
+
246
+ edit_file_interrupt_config: InterruptOnConfig = {
247
+ "allowed_decisions": ["approve", "reject"],
248
+ "description": lambda tool_call, state, runtime: format_edit_file_description(tool_call),
249
+ }
250
+
251
+ web_search_interrupt_config: InterruptOnConfig = {
252
+ "allowed_decisions": ["approve", "reject"],
253
+ "description": lambda tool_call, state, runtime: format_web_search_description(tool_call),
254
+ }
255
+
256
+ task_interrupt_config: InterruptOnConfig = {
257
+ "allowed_decisions": ["approve", "reject"],
258
+ "description": lambda tool_call, state, runtime: format_task_description(tool_call),
259
+ }
260
+
261
+ agent = create_deep_agent(
262
+ model=model,
263
+ system_prompt=system_prompt,
264
+ tools=tools,
265
+ backend=backend,
266
+ middleware=agent_middleware,
267
+ interrupt_on={
268
+ "shell": shell_interrupt_config,
269
+ "write_file": write_file_interrupt_config,
270
+ "edit_file": edit_file_interrupt_config,
271
+ "web_search": web_search_interrupt_config,
272
+ "task": task_interrupt_config,
273
+ },
274
+ ).with_config(config)
275
+
276
+ agent.checkpointer = InMemorySaver()
277
+
278
+ return agent
deepagents_cli/cli.py ADDED
@@ -0,0 +1,13 @@
1
+ """Main CLI entrypoint for deepagents."""
2
+
3
+
4
+ def cli_main() -> None:
5
+ """Main entrypoint for the deepagents CLI.
6
+
7
+ This function is registered as a console script entrypoint in pyproject.toml.
8
+ """
9
+ print("I'm alive!")
10
+
11
+
12
+ if __name__ == "__main__":
13
+ cli_main()
@@ -0,0 +1,89 @@
1
+ """Command handlers for slash commands and bash execution."""
2
+
3
+ import subprocess
4
+ from pathlib import Path
5
+
6
+ from langgraph.checkpoint.memory import InMemorySaver
7
+
8
+ from .config import COLORS, DEEP_AGENTS_ASCII, console
9
+ from .ui import TokenTracker, show_interactive_help
10
+
11
+
12
+ def handle_command(command: str, agent, token_tracker: TokenTracker) -> str | bool:
13
+ """Handle slash commands. Returns 'exit' to exit, True if handled, False to pass to agent."""
14
+ cmd = command.lower().strip().lstrip("/")
15
+
16
+ if cmd in ["quit", "exit", "q"]:
17
+ return "exit"
18
+
19
+ if cmd == "clear":
20
+ # Reset agent conversation state
21
+ agent.checkpointer = InMemorySaver()
22
+
23
+ # Reset token tracking to baseline
24
+ token_tracker.reset()
25
+
26
+ # Clear screen and show fresh UI
27
+ console.clear()
28
+ console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
29
+ console.print()
30
+ console.print(
31
+ "... Fresh start! Screen cleared and conversation reset.", style=COLORS["agent"]
32
+ )
33
+ console.print()
34
+ return True
35
+
36
+ if cmd == "help":
37
+ show_interactive_help()
38
+ return True
39
+
40
+ if cmd == "tokens":
41
+ token_tracker.display_session()
42
+ return True
43
+
44
+ console.print()
45
+ console.print(f"[yellow]Unknown command: /{cmd}[/yellow]")
46
+ console.print("[dim]Type /help for available commands.[/dim]")
47
+ console.print()
48
+ return True
49
+
50
+ return False
51
+
52
+
53
+ def execute_bash_command(command: str) -> bool:
54
+ """Execute a bash command and display output. Returns True if handled."""
55
+ cmd = command.strip().lstrip("!")
56
+
57
+ if not cmd:
58
+ return True
59
+
60
+ try:
61
+ console.print()
62
+ console.print(f"[dim]$ {cmd}[/dim]")
63
+
64
+ # Execute the command
65
+ result = subprocess.run(
66
+ cmd, check=False, shell=True, capture_output=True, text=True, timeout=30, cwd=Path.cwd()
67
+ )
68
+
69
+ # Display output
70
+ if result.stdout:
71
+ console.print(result.stdout, style=COLORS["dim"], markup=False)
72
+ if result.stderr:
73
+ console.print(result.stderr, style="red", markup=False)
74
+
75
+ # Show return code if non-zero
76
+ if result.returncode != 0:
77
+ console.print(f"[dim]Exit code: {result.returncode}[/dim]")
78
+
79
+ console.print()
80
+ return True
81
+
82
+ except subprocess.TimeoutExpired:
83
+ console.print("[red]Command timed out after 30 seconds[/red]")
84
+ console.print()
85
+ return True
86
+ except Exception as e:
87
+ console.print(f"[red]Error executing command: {e}[/red]")
88
+ console.print()
89
+ return True
@@ -0,0 +1,138 @@
1
+ """Configuration, constants, and model creation for the CLI."""
2
+
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import dotenv
8
+ from rich.console import Console
9
+
10
+ dotenv.load_dotenv()
11
+
12
+ # Color scheme
13
+ COLORS = {
14
+ "primary": "#10b981",
15
+ "dim": "#6b7280",
16
+ "user": "#ffffff",
17
+ "agent": "#10b981",
18
+ "thinking": "#34d399",
19
+ "tool": "#fbbf24",
20
+ }
21
+
22
+ # ASCII art banner
23
+ DEEP_AGENTS_ASCII = """
24
+ ██████╗ ███████╗ ███████╗ ██████╗
25
+ ██╔══██╗ ██╔════╝ ██╔════╝ ██╔══██╗
26
+ ██║ ██║ █████╗ █████╗ ██████╔╝
27
+ ██║ ██║ ██╔══╝ ██╔══╝ ██╔═══╝
28
+ ██████╔╝ ███████╗ ███████╗ ██║
29
+ ╚═════╝ ╚══════╝ ╚══════╝ ╚═╝
30
+
31
+ █████╗ ██████╗ ███████╗ ███╗ ██╗ ████████╗ ███████╗
32
+ ██╔══██╗ ██╔════╝ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██╔════╝
33
+ ███████║ ██║ ███╗ █████╗ ██╔██╗ ██║ ██║ ███████╗
34
+ ██╔══██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ╚════██║
35
+ ██║ ██║ ╚██████╔╝ ███████╗ ██║ ╚████║ ██║ ███████║
36
+ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚══════╝
37
+ """
38
+
39
+ # Interactive commands
40
+ COMMANDS = {
41
+ "clear": "Clear screen and reset conversation",
42
+ "help": "Show help information",
43
+ "tokens": "Show token usage for current session",
44
+ "quit": "Exit the CLI",
45
+ "exit": "Exit the CLI",
46
+ }
47
+
48
+ # Common bash commands for autocomplete
49
+ COMMON_BASH_COMMANDS = {
50
+ "ls": "List directory contents",
51
+ "ls -la": "List all files with details",
52
+ "cd": "Change directory",
53
+ "pwd": "Print working directory",
54
+ "cat": "Display file contents",
55
+ "grep": "Search text patterns",
56
+ "find": "Find files",
57
+ "mkdir": "Make directory",
58
+ "rm": "Remove file",
59
+ "cp": "Copy file",
60
+ "mv": "Move/rename file",
61
+ "echo": "Print text",
62
+ "touch": "Create empty file",
63
+ "head": "Show first lines",
64
+ "tail": "Show last lines",
65
+ "wc": "Count lines/words",
66
+ "chmod": "Change permissions",
67
+ }
68
+
69
+ # Maximum argument length for display
70
+ MAX_ARG_LENGTH = 150
71
+
72
+ # Agent configuration
73
+ config = {"recursion_limit": 1000}
74
+
75
+ # Rich console instance
76
+ console = Console(highlight=False)
77
+
78
+
79
+ class SessionState:
80
+ """Holds mutable session state (auto-approve mode, etc)."""
81
+
82
+ def __init__(self, auto_approve: bool = False):
83
+ self.auto_approve = auto_approve
84
+
85
+ def toggle_auto_approve(self) -> bool:
86
+ """Toggle auto-approve and return new state."""
87
+ self.auto_approve = not self.auto_approve
88
+ return self.auto_approve
89
+
90
+
91
+ def get_default_coding_instructions() -> str:
92
+ """Get the default coding agent instructions.
93
+
94
+ These are the immutable base instructions that cannot be modified by the agent.
95
+ Long-term memory (agent.md) is handled separately by the middleware.
96
+ """
97
+ default_prompt_path = Path(__file__).parent.parent / "default_agent_prompt.md"
98
+ return default_prompt_path.read_text()
99
+
100
+
101
+ def create_model():
102
+ """Create the appropriate model based on available API keys.
103
+
104
+ Returns:
105
+ ChatModel instance (OpenAI or Anthropic)
106
+
107
+ Raises:
108
+ SystemExit if no API key is configured
109
+ """
110
+ openai_key = os.environ.get("OPENAI_API_KEY")
111
+ anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
112
+
113
+ if openai_key:
114
+ from langchain_openai import ChatOpenAI
115
+
116
+ model_name = os.environ.get("OPENAI_MODEL", "gpt-5-mini")
117
+ console.print(f"[dim]Using OpenAI model: {model_name}[/dim]")
118
+ return ChatOpenAI(
119
+ model=model_name,
120
+ temperature=0.7,
121
+ )
122
+ if anthropic_key:
123
+ from langchain_anthropic import ChatAnthropic
124
+
125
+ model_name = os.environ.get("ANTHROPIC_MODEL", "claude-sonnet-4-5-20250929")
126
+ console.print(f"[dim]Using Anthropic model: {model_name}[/dim]")
127
+ return ChatAnthropic(
128
+ model_name=model_name,
129
+ max_tokens=20000,
130
+ )
131
+ console.print("[bold red]Error:[/bold red] No API key configured.")
132
+ console.print("\nPlease set one of the following environment variables:")
133
+ console.print(" - OPENAI_API_KEY (for OpenAI models like gpt-5-mini)")
134
+ console.print(" - ANTHROPIC_API_KEY (for Claude models)")
135
+ console.print("\nExample:")
136
+ console.print(" export OPENAI_API_KEY=your_api_key_here")
137
+ console.print("\nOr add it to your .env file.")
138
+ sys.exit(1)