cmdo-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cmdo/__init__.py +3 -0
- cmdo/cli.py +174 -0
- cmdo/clipboard.py +14 -0
- cmdo/config.py +201 -0
- cmdo/context.py +93 -0
- cmdo/display.py +134 -0
- cmdo/executor.py +103 -0
- cmdo/llm/__init__.py +0 -0
- cmdo/llm/client.py +56 -0
- cmdo/llm/parser.py +56 -0
- cmdo/llm/prompt.py +70 -0
- cmdo/models.py +65 -0
- cmdo/safety/__init__.py +0 -0
- cmdo/safety/classifier.py +72 -0
- cmdo/safety/forbidden.py +35 -0
- cmdo_cli-0.1.0.dist-info/METADATA +136 -0
- cmdo_cli-0.1.0.dist-info/RECORD +21 -0
- cmdo_cli-0.1.0.dist-info/WHEEL +5 -0
- cmdo_cli-0.1.0.dist-info/entry_points.txt +2 -0
- cmdo_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- cmdo_cli-0.1.0.dist-info/top_level.txt +1 -0
cmdo/__init__.py
ADDED
cmdo/cli.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
"""CLI entry point for cmdo."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
|
|
7
|
+
import click
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
from cmdo import __version__
|
|
11
|
+
|
|
12
|
+
console = Console()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@click.command(context_settings={"ignore_unknown_options": True})
|
|
16
|
+
@click.argument("query", nargs=-1)
|
|
17
|
+
@click.option("--config", "do_config", is_flag=True, help="Configure cmdo")
|
|
18
|
+
@click.option("--show", is_flag=True, help="Show current configuration (use with --config)")
|
|
19
|
+
@click.option("--reset", is_flag=True, help="Reset configuration (use with --config)")
|
|
20
|
+
@click.option("--dry-run", "-d", is_flag=True, help="Generate command without executing")
|
|
21
|
+
@click.option("--yes", "-y", is_flag=True, help="Auto-confirm safe commands")
|
|
22
|
+
@click.option("--model", "-m", default=None, help="Override default model")
|
|
23
|
+
@click.option("--version", "-V", is_flag=True, help="Show version")
|
|
24
|
+
def main(
|
|
25
|
+
query: tuple[str, ...],
|
|
26
|
+
do_config: bool,
|
|
27
|
+
show: bool,
|
|
28
|
+
reset: bool,
|
|
29
|
+
dry_run: bool,
|
|
30
|
+
yes: bool,
|
|
31
|
+
model: str | None,
|
|
32
|
+
version: bool,
|
|
33
|
+
) -> None:
|
|
34
|
+
"""cmdo — Natural language to shell commands.
|
|
35
|
+
|
|
36
|
+
\b
|
|
37
|
+
Examples:
|
|
38
|
+
cmdo "find all Python files larger than 1MB"
|
|
39
|
+
cmdo "start a local HTTP server on port 8080"
|
|
40
|
+
cmdo --explain "tar -czf archive.tar.gz dir/"
|
|
41
|
+
cmdo --dry-run "delete the temp folder"
|
|
42
|
+
"""
|
|
43
|
+
if version:
|
|
44
|
+
click.echo(f"cmdo v{__version__}")
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
# Config management
|
|
48
|
+
if do_config:
|
|
49
|
+
if show:
|
|
50
|
+
from cmdo.config import show_config
|
|
51
|
+
show_config()
|
|
52
|
+
elif reset:
|
|
53
|
+
from cmdo.config import reset_config
|
|
54
|
+
reset_config()
|
|
55
|
+
else:
|
|
56
|
+
from cmdo.config import configure
|
|
57
|
+
configure()
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# No query provided
|
|
61
|
+
if not query:
|
|
62
|
+
click.echo(f"cmdo v{__version__} — Natural language to shell commands")
|
|
63
|
+
click.echo('Usage: cmdo "your instruction"')
|
|
64
|
+
click.echo(" cmdo --config Set up or reconfigure")
|
|
65
|
+
click.echo(" cmdo --help Show full help")
|
|
66
|
+
return
|
|
67
|
+
|
|
68
|
+
# Core flow
|
|
69
|
+
query_str = " ".join(query)
|
|
70
|
+
_run_query(query_str, dry_run=dry_run, auto_yes=yes, model_override=model)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _run_query(
|
|
74
|
+
query: str,
|
|
75
|
+
*,
|
|
76
|
+
dry_run: bool = False,
|
|
77
|
+
auto_yes: bool = False,
|
|
78
|
+
model_override: str | None = None,
|
|
79
|
+
) -> None:
|
|
80
|
+
"""Core flow: query → generate → display → confirm → execute."""
|
|
81
|
+
from cmdo.clipboard import copy_to_clipboard
|
|
82
|
+
from cmdo.config import ensure_configured
|
|
83
|
+
from cmdo.context import gather_context
|
|
84
|
+
from cmdo.display import (
|
|
85
|
+
display_command,
|
|
86
|
+
display_error,
|
|
87
|
+
display_execution_result,
|
|
88
|
+
display_forbidden,
|
|
89
|
+
edit_command,
|
|
90
|
+
prompt_user,
|
|
91
|
+
)
|
|
92
|
+
from cmdo.executor import execute_command
|
|
93
|
+
from cmdo.llm.client import generate_command
|
|
94
|
+
from cmdo.models import RiskLevel, UserAction
|
|
95
|
+
from cmdo.safety.classifier import classify_risk, upgrade_risk
|
|
96
|
+
from cmdo.safety.forbidden import check_forbidden
|
|
97
|
+
|
|
98
|
+
# 1. Ensure configured
|
|
99
|
+
config = ensure_configured()
|
|
100
|
+
if model_override:
|
|
101
|
+
config.model = model_override
|
|
102
|
+
|
|
103
|
+
# 2. Gather context
|
|
104
|
+
with console.status("[dim]Gathering context...[/dim]", spinner="dots"):
|
|
105
|
+
context = gather_context()
|
|
106
|
+
|
|
107
|
+
# 3. Generate command via LLM
|
|
108
|
+
try:
|
|
109
|
+
with console.status("[dim]Thinking...[/dim]", spinner="dots"):
|
|
110
|
+
result = generate_command(query, context, config)
|
|
111
|
+
except Exception as e:
|
|
112
|
+
display_error(f"Failed to generate command: {e}")
|
|
113
|
+
sys.exit(2)
|
|
114
|
+
|
|
115
|
+
if not result.command:
|
|
116
|
+
display_error("Could not generate a command for that request.")
|
|
117
|
+
sys.exit(1)
|
|
118
|
+
|
|
119
|
+
# 4. Safety checks
|
|
120
|
+
forbidden_msg = check_forbidden(result.command)
|
|
121
|
+
if forbidden_msg:
|
|
122
|
+
display_forbidden(forbidden_msg)
|
|
123
|
+
sys.exit(1)
|
|
124
|
+
|
|
125
|
+
local_risk, local_reason = classify_risk(result.command)
|
|
126
|
+
result.risk_level = upgrade_risk(result.risk_level, local_risk)
|
|
127
|
+
if local_reason and result.risk_reason is None:
|
|
128
|
+
result.risk_reason = local_reason
|
|
129
|
+
|
|
130
|
+
# 5. Display
|
|
131
|
+
display_command(result)
|
|
132
|
+
|
|
133
|
+
# 6. Dry run — stop here
|
|
134
|
+
if dry_run:
|
|
135
|
+
return
|
|
136
|
+
|
|
137
|
+
# 7. Auto-confirm if --yes flag or config auto_confirm_safe
|
|
138
|
+
if (auto_yes or config.auto_confirm_safe) and result.risk_level != RiskLevel.DANGEROUS:
|
|
139
|
+
action = UserAction.EXECUTE
|
|
140
|
+
else:
|
|
141
|
+
action = prompt_user(result)
|
|
142
|
+
|
|
143
|
+
# 8. Handle action
|
|
144
|
+
if action == UserAction.CANCEL:
|
|
145
|
+
console.print("[dim]Cancelled.[/dim]")
|
|
146
|
+
sys.exit(1)
|
|
147
|
+
|
|
148
|
+
if action == UserAction.COPY:
|
|
149
|
+
if copy_to_clipboard(result.command):
|
|
150
|
+
console.print("[green]📋 Copied to clipboard![/green]")
|
|
151
|
+
else:
|
|
152
|
+
console.print(f"[yellow]Could not copy. Here's the command:[/yellow]\n{result.command}")
|
|
153
|
+
return
|
|
154
|
+
|
|
155
|
+
if action == UserAction.EDIT:
|
|
156
|
+
edited = edit_command(result.command)
|
|
157
|
+
if edited != result.command:
|
|
158
|
+
# Re-check safety on edited command
|
|
159
|
+
forbidden_msg = check_forbidden(edited)
|
|
160
|
+
if forbidden_msg:
|
|
161
|
+
display_forbidden(forbidden_msg)
|
|
162
|
+
sys.exit(1)
|
|
163
|
+
result.command = edited
|
|
164
|
+
|
|
165
|
+
# 9. Execute
|
|
166
|
+
stepwise = action == UserAction.EXECUTE_STEPWISE
|
|
167
|
+
exec_result = execute_command(result.command, stepwise=stepwise)
|
|
168
|
+
display_execution_result(exec_result.exit_code, exec_result.duration)
|
|
169
|
+
|
|
170
|
+
sys.exit(0 if exec_result.exit_code == 0 else 2)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
if __name__ == "__main__":
|
|
174
|
+
main()
|
cmdo/clipboard.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""Clipboard integration."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def copy_to_clipboard(command: str) -> bool:
|
|
7
|
+
"""Copy command to system clipboard. Returns True on success."""
|
|
8
|
+
try:
|
|
9
|
+
import pyperclip
|
|
10
|
+
|
|
11
|
+
pyperclip.copy(command)
|
|
12
|
+
return True
|
|
13
|
+
except Exception:
|
|
14
|
+
return False
|
cmdo/config.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""Configuration management for cmdo."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import tomli_w
|
|
9
|
+
|
|
10
|
+
from cmdo.models import Config
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
import tomllib
|
|
14
|
+
except ModuleNotFoundError:
|
|
15
|
+
import tomli as tomllib
|
|
16
|
+
|
|
17
|
+
CONFIG_DIR = Path.home() / ".config" / "cmdo"
|
|
18
|
+
CONFIG_FILE = CONFIG_DIR / "config.toml"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _load_toml() -> dict | None:
|
|
22
|
+
if not CONFIG_FILE.exists():
|
|
23
|
+
return None
|
|
24
|
+
with open(CONFIG_FILE, "rb") as f:
|
|
25
|
+
return tomllib.load(f)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _save_config(config: Config) -> None:
|
|
29
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
30
|
+
data = {
|
|
31
|
+
"llm": {
|
|
32
|
+
"provider": config.provider,
|
|
33
|
+
"api_key": config.api_key,
|
|
34
|
+
"model": config.model,
|
|
35
|
+
"base_url": config.base_url,
|
|
36
|
+
},
|
|
37
|
+
"behavior": {
|
|
38
|
+
"auto_confirm_safe": config.auto_confirm_safe,
|
|
39
|
+
"danger_confirmation": config.danger_confirmation,
|
|
40
|
+
},
|
|
41
|
+
"display": {
|
|
42
|
+
"color": config.color,
|
|
43
|
+
"explanation": config.explanation,
|
|
44
|
+
},
|
|
45
|
+
}
|
|
46
|
+
with open(CONFIG_FILE, "wb") as f:
|
|
47
|
+
tomli_w.dump(data, f)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def check_config() -> Config | None:
|
|
51
|
+
"""Load and return Config if valid, else None."""
|
|
52
|
+
data = _load_toml()
|
|
53
|
+
if data is None:
|
|
54
|
+
return None
|
|
55
|
+
try:
|
|
56
|
+
llm = data.get("llm", {})
|
|
57
|
+
behavior = data.get("behavior", {})
|
|
58
|
+
display = data.get("display", {})
|
|
59
|
+
config = Config(
|
|
60
|
+
provider=llm.get("provider", "openai"),
|
|
61
|
+
api_key=llm.get("api_key", ""),
|
|
62
|
+
model=llm.get("model", "gpt-5-mini"),
|
|
63
|
+
base_url=llm.get("base_url", ""),
|
|
64
|
+
auto_confirm_safe=behavior.get("auto_confirm_safe", False),
|
|
65
|
+
danger_confirmation=behavior.get("danger_confirmation", "type"),
|
|
66
|
+
color=display.get("color", True),
|
|
67
|
+
explanation=display.get("explanation", True),
|
|
68
|
+
)
|
|
69
|
+
if not config.api_key:
|
|
70
|
+
return None
|
|
71
|
+
return config
|
|
72
|
+
except (KeyError, TypeError):
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def configure() -> Config:
|
|
77
|
+
"""Interactive setup wizard."""
|
|
78
|
+
from rich.console import Console
|
|
79
|
+
|
|
80
|
+
console = Console()
|
|
81
|
+
console.print("\n[bold]🔧 cmdo setup[/bold]\n")
|
|
82
|
+
|
|
83
|
+
# Provider (OpenAI only for MVP)
|
|
84
|
+
console.print("1. Choose your LLM provider:")
|
|
85
|
+
console.print(" [1] OpenAI (GPT-5.4, GPT-5-mini)")
|
|
86
|
+
console.print()
|
|
87
|
+
choice = input(" > ").strip()
|
|
88
|
+
if choice not in ("", "1"):
|
|
89
|
+
console.print("[yellow]Only OpenAI is supported in this version.[/yellow]")
|
|
90
|
+
provider = "openai"
|
|
91
|
+
|
|
92
|
+
# API key
|
|
93
|
+
console.print("\n2. Enter your OpenAI API key:")
|
|
94
|
+
console.print(" [dim](Get one at https://platform.openai.com/api-keys)[/dim]")
|
|
95
|
+
api_key = input(" > ").strip()
|
|
96
|
+
if not api_key:
|
|
97
|
+
console.print("[red]API key is required.[/red]")
|
|
98
|
+
sys.exit(2)
|
|
99
|
+
|
|
100
|
+
# Validate API key
|
|
101
|
+
console.print("\n [dim]Validating API key...[/dim]", end="")
|
|
102
|
+
if _validate_api_key(api_key):
|
|
103
|
+
console.print(" [green]✓[/green]")
|
|
104
|
+
else:
|
|
105
|
+
console.print(" [red]✗[/red]")
|
|
106
|
+
console.print("[red] API key validation failed. Please check your key.[/red]")
|
|
107
|
+
sys.exit(2)
|
|
108
|
+
|
|
109
|
+
# Model
|
|
110
|
+
console.print("\n3. Choose default model:")
|
|
111
|
+
console.print(" [1] gpt-5.4 (best quality, slower)")
|
|
112
|
+
console.print(" [2] gpt-5-mini (fast, cheaper)")
|
|
113
|
+
console.print(" [3] Custom (enter model name)")
|
|
114
|
+
model_choice = input(" > ").strip()
|
|
115
|
+
if model_choice == "1":
|
|
116
|
+
model = "gpt-5.4"
|
|
117
|
+
elif model_choice == "3":
|
|
118
|
+
model = input(" Enter model name: ").strip()
|
|
119
|
+
if not model:
|
|
120
|
+
console.print("[yellow]No model entered, defaulting to gpt-5-mini.[/yellow]")
|
|
121
|
+
model = "gpt-5-mini"
|
|
122
|
+
else:
|
|
123
|
+
model = "gpt-5-mini"
|
|
124
|
+
|
|
125
|
+
# Auto-confirm
|
|
126
|
+
console.print("\n4. Auto-confirm safe commands? (skip [Y/n] for non-destructive)")
|
|
127
|
+
auto_confirm = input(" [y/N] > ").strip().lower() == "y"
|
|
128
|
+
|
|
129
|
+
config = Config(
|
|
130
|
+
provider=provider,
|
|
131
|
+
api_key=api_key,
|
|
132
|
+
model=model,
|
|
133
|
+
auto_confirm_safe=auto_confirm,
|
|
134
|
+
)
|
|
135
|
+
_save_config(config)
|
|
136
|
+
console.print(f"\n[green]✅ Configuration saved to {CONFIG_FILE}[/green]")
|
|
137
|
+
console.print(' Run [bold]cmdo "list all python files"[/bold] to try it out!\n')
|
|
138
|
+
return config
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _validate_api_key(api_key: str) -> bool:
|
|
142
|
+
"""Validate API key by listing available models."""
|
|
143
|
+
try:
|
|
144
|
+
from openai import OpenAI
|
|
145
|
+
|
|
146
|
+
client = OpenAI(api_key=api_key)
|
|
147
|
+
client.models.list()
|
|
148
|
+
return True
|
|
149
|
+
except Exception:
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def show_config() -> None:
|
|
154
|
+
"""Print current config with masked API key."""
|
|
155
|
+
from rich.console import Console
|
|
156
|
+
|
|
157
|
+
console = Console()
|
|
158
|
+
config = check_config()
|
|
159
|
+
if config is None:
|
|
160
|
+
console.print("[yellow]No configuration found. Run `cmdo --config` to set up.[/yellow]")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
masked_key = config.api_key[:7] + "..." + config.api_key[-4:] if len(config.api_key) > 11 else "****"
|
|
164
|
+
console.print("\n[bold]cmdo configuration[/bold]\n")
|
|
165
|
+
console.print(f" Provider: {config.provider}")
|
|
166
|
+
console.print(f" API Key: {masked_key}")
|
|
167
|
+
console.print(f" Model: {config.model}")
|
|
168
|
+
console.print(f" Auto-confirm safe: {config.auto_confirm_safe}")
|
|
169
|
+
console.print(f" Color: {config.color}")
|
|
170
|
+
console.print(f" Explanations: {config.explanation}")
|
|
171
|
+
console.print()
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def reset_config() -> None:
|
|
175
|
+
"""Delete config after confirmation."""
|
|
176
|
+
from rich.console import Console
|
|
177
|
+
|
|
178
|
+
console = Console()
|
|
179
|
+
if not CONFIG_FILE.exists():
|
|
180
|
+
console.print("[yellow]No configuration file to reset.[/yellow]")
|
|
181
|
+
return
|
|
182
|
+
|
|
183
|
+
confirm = input("Are you sure you want to reset the configuration? [y/N] ").strip().lower()
|
|
184
|
+
if confirm == "y":
|
|
185
|
+
CONFIG_FILE.unlink()
|
|
186
|
+
console.print("[green]Configuration reset. Run `cmdo --config` to reconfigure.[/green]")
|
|
187
|
+
else:
|
|
188
|
+
console.print("Cancelled.")
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def ensure_configured() -> Config:
|
|
192
|
+
"""Return config or exit with error if not configured."""
|
|
193
|
+
config = check_config()
|
|
194
|
+
if config is None:
|
|
195
|
+
from rich.console import Console
|
|
196
|
+
|
|
197
|
+
console = Console()
|
|
198
|
+
console.print("[yellow]⚠ cmdo is not configured yet.[/yellow]")
|
|
199
|
+
console.print("Run [bold]cmdo --config[/bold] to set up your LLM provider and API key.")
|
|
200
|
+
sys.exit(2)
|
|
201
|
+
return config
|
cmdo/context.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""Shell context gathering for cmdo."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import platform
|
|
7
|
+
import subprocess
|
|
8
|
+
|
|
9
|
+
from cmdo.models import ShellContext
|
|
10
|
+
|
|
11
|
+
COMMON_TOOLS = [
|
|
12
|
+
"tar", "gzip", "zip", "unzip", "pigz",
|
|
13
|
+
"docker", "docker-compose",
|
|
14
|
+
"git", "gh",
|
|
15
|
+
"python", "python3", "pip", "pip3",
|
|
16
|
+
"node", "npm", "npx",
|
|
17
|
+
"ffmpeg", "convert", "jq",
|
|
18
|
+
"curl", "wget", "httpie",
|
|
19
|
+
"rsync", "scp", "ssh",
|
|
20
|
+
"awk", "sed", "grep",
|
|
21
|
+
"kubectl", "terraform",
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _run(cmd: str, timeout: float = 2.0) -> str:
|
|
26
|
+
"""Run a shell command and return stdout, or empty string on failure."""
|
|
27
|
+
try:
|
|
28
|
+
result = subprocess.run(
|
|
29
|
+
cmd, shell=True, capture_output=True, text=True, timeout=timeout
|
|
30
|
+
)
|
|
31
|
+
return result.stdout.strip()
|
|
32
|
+
except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
|
|
33
|
+
return ""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def detect_tools(names: list[str] | None = None) -> list[str]:
|
|
37
|
+
"""Check which CLI tools are installed."""
|
|
38
|
+
if names is None:
|
|
39
|
+
names = COMMON_TOOLS
|
|
40
|
+
available = []
|
|
41
|
+
for name in names:
|
|
42
|
+
if _run(f"which {name}"):
|
|
43
|
+
available.append(name)
|
|
44
|
+
return available
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def gather_context() -> ShellContext:
|
|
48
|
+
"""Collect environmental context for the LLM prompt."""
|
|
49
|
+
# OS info
|
|
50
|
+
system = platform.system()
|
|
51
|
+
if system == "Darwin":
|
|
52
|
+
os_info = f"macOS {platform.mac_ver()[0]}"
|
|
53
|
+
elif system == "Linux":
|
|
54
|
+
os_info = _run("cat /etc/os-release | grep PRETTY_NAME | cut -d'\"' -f2") or f"Linux {platform.release()}"
|
|
55
|
+
else:
|
|
56
|
+
os_info = f"{system} {platform.release()}"
|
|
57
|
+
|
|
58
|
+
# Shell
|
|
59
|
+
shell = os.environ.get("SHELL", "")
|
|
60
|
+
shell_name = os.path.basename(shell) if shell else "unknown"
|
|
61
|
+
shell_version = _run(f"{shell} --version 2>&1 | head -1") if shell else ""
|
|
62
|
+
shell_info = f"{shell_name} {shell_version}".strip() if shell_version else shell_name
|
|
63
|
+
|
|
64
|
+
# CWD listing (first 50 entries)
|
|
65
|
+
cwd = os.getcwd()
|
|
66
|
+
try:
|
|
67
|
+
entries = sorted(os.listdir(cwd))[:50]
|
|
68
|
+
except OSError:
|
|
69
|
+
entries = []
|
|
70
|
+
|
|
71
|
+
# Git branch
|
|
72
|
+
git_branch = _run("git rev-parse --abbrev-ref HEAD 2>/dev/null") or None
|
|
73
|
+
|
|
74
|
+
# Environment hints
|
|
75
|
+
env_hints: dict[str, str] = {}
|
|
76
|
+
for key in ("CONDA_DEFAULT_ENV", "VIRTUAL_ENV", "PYENV_VERSION", "NVM_DIR"):
|
|
77
|
+
val = os.environ.get(key)
|
|
78
|
+
if val:
|
|
79
|
+
env_hints[key] = val
|
|
80
|
+
|
|
81
|
+
# Available tools
|
|
82
|
+
path_tools = detect_tools()
|
|
83
|
+
|
|
84
|
+
return ShellContext(
|
|
85
|
+
os=os_info,
|
|
86
|
+
shell=shell_info,
|
|
87
|
+
cwd=cwd,
|
|
88
|
+
cwd_listing=entries,
|
|
89
|
+
user=os.environ.get("USER", "unknown"),
|
|
90
|
+
path_tools=path_tools,
|
|
91
|
+
env_hints=env_hints,
|
|
92
|
+
git_branch=git_branch,
|
|
93
|
+
)
|
cmdo/display.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""Terminal display and user interaction."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import readline # noqa: F401 — enables input() line editing
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.panel import Panel
|
|
10
|
+
from rich.text import Text
|
|
11
|
+
|
|
12
|
+
from cmdo.models import CommandResult, RiskLevel, UserAction
|
|
13
|
+
|
|
14
|
+
console = Console()
|
|
15
|
+
|
|
16
|
+
RISK_STYLES = {
|
|
17
|
+
RiskLevel.SAFE: ("green", "🤖"),
|
|
18
|
+
RiskLevel.CAUTION: ("yellow", "🟡"),
|
|
19
|
+
RiskLevel.DANGEROUS: ("red", "🔴"),
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def display_command(result: CommandResult) -> None:
|
|
24
|
+
"""Render the command result with color-coded formatting."""
|
|
25
|
+
color, _ = RISK_STYLES.get(result.risk_level, ("white", "🤖"))
|
|
26
|
+
|
|
27
|
+
# Command box
|
|
28
|
+
console.print()
|
|
29
|
+
console.print("🤖 Will run:")
|
|
30
|
+
console.print(Panel(
|
|
31
|
+
Text(result.command, style=f"bold {color}"),
|
|
32
|
+
border_style=color,
|
|
33
|
+
expand=False,
|
|
34
|
+
padding=(0, 1),
|
|
35
|
+
))
|
|
36
|
+
|
|
37
|
+
# Risk warning
|
|
38
|
+
if result.risk_level == RiskLevel.DANGEROUS:
|
|
39
|
+
reason = result.risk_reason or "This command may be destructive."
|
|
40
|
+
console.print(f"[bold red]🔴 WARNING: DESTRUCTIVE — {reason}[/bold red]")
|
|
41
|
+
console.print("[red] This action may be IRREVERSIBLE.[/red]")
|
|
42
|
+
elif result.risk_level == RiskLevel.CAUTION:
|
|
43
|
+
reason = result.risk_reason or "This command modifies system state."
|
|
44
|
+
console.print(f"[yellow]🟡 CAUTION: {reason}[/yellow]")
|
|
45
|
+
|
|
46
|
+
# Explanation
|
|
47
|
+
if result.explanation:
|
|
48
|
+
console.print(f"\n[dim]📝 {result.explanation}[/dim]")
|
|
49
|
+
|
|
50
|
+
# Low confidence warning
|
|
51
|
+
if result.confidence < 0.5:
|
|
52
|
+
pct = int(result.confidence * 100)
|
|
53
|
+
console.print(f"\n[yellow]⚠ Low confidence ({pct}%). Please review carefully.[/yellow]")
|
|
54
|
+
|
|
55
|
+
console.print()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def prompt_user(result: CommandResult) -> UserAction:
|
|
59
|
+
"""Prompt user for action based on risk level."""
|
|
60
|
+
if result.risk_level == RiskLevel.DANGEROUS:
|
|
61
|
+
console.print('[bold red]Type "yes" to confirm, or [n] to cancel:[/bold red]')
|
|
62
|
+
try:
|
|
63
|
+
answer = input("> ").strip().lower()
|
|
64
|
+
except (KeyboardInterrupt, EOFError):
|
|
65
|
+
console.print()
|
|
66
|
+
return UserAction.CANCEL
|
|
67
|
+
if answer == "yes":
|
|
68
|
+
return UserAction.EXECUTE
|
|
69
|
+
return UserAction.CANCEL
|
|
70
|
+
else:
|
|
71
|
+
# Show action menu
|
|
72
|
+
if result.is_multi_step:
|
|
73
|
+
console.print(r"\[Y] Execute all \[s] Step-by-step \[e] Edit \[c] Copy \[n] Cancel")
|
|
74
|
+
else:
|
|
75
|
+
console.print(r"\[Y] Execute \[e] Edit \[c] Copy \[n] Cancel")
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
answer = input("> ").strip().lower()
|
|
79
|
+
except (KeyboardInterrupt, EOFError):
|
|
80
|
+
console.print()
|
|
81
|
+
return UserAction.CANCEL
|
|
82
|
+
|
|
83
|
+
if answer in ("", "y"):
|
|
84
|
+
return UserAction.EXECUTE
|
|
85
|
+
if answer == "s" and result.is_multi_step:
|
|
86
|
+
return UserAction.EXECUTE_STEPWISE
|
|
87
|
+
if answer == "e":
|
|
88
|
+
return UserAction.EDIT
|
|
89
|
+
if answer == "c":
|
|
90
|
+
return UserAction.COPY
|
|
91
|
+
return UserAction.CANCEL
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def edit_command(command: str) -> str:
|
|
95
|
+
"""Allow user to edit the command inline."""
|
|
96
|
+
console.print("[dim]Edit the command (press Enter when done):[/dim]")
|
|
97
|
+
|
|
98
|
+
def prefill_input(text: str) -> str:
|
|
99
|
+
import readline as rl
|
|
100
|
+
def hook():
|
|
101
|
+
rl.insert_text(text)
|
|
102
|
+
rl.redisplay()
|
|
103
|
+
rl.set_pre_input_hook(hook)
|
|
104
|
+
try:
|
|
105
|
+
result = input("> ")
|
|
106
|
+
finally:
|
|
107
|
+
rl.set_pre_input_hook()
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
edited = prefill_input(command)
|
|
112
|
+
return edited.strip() or command
|
|
113
|
+
except (KeyboardInterrupt, EOFError):
|
|
114
|
+
console.print()
|
|
115
|
+
return command
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def display_execution_result(exit_code: int, duration: float) -> None:
|
|
119
|
+
"""Show execution result summary."""
|
|
120
|
+
if exit_code == 0:
|
|
121
|
+
console.print(f"[green]✅ Done ({duration:.1f}s)[/green]")
|
|
122
|
+
else:
|
|
123
|
+
console.print(f"[red]❌ Command failed (exit code {exit_code})[/red]")
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def display_error(message: str) -> None:
|
|
127
|
+
"""Display an error message."""
|
|
128
|
+
console.print(f"[red]❌ {message}[/red]")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def display_forbidden(message: str) -> None:
|
|
132
|
+
"""Display a forbidden command message."""
|
|
133
|
+
console.print(f"\n[bold red]🚫 {message}[/bold red]")
|
|
134
|
+
console.print("[red]This command is blocked for safety and cannot be executed.[/red]\n")
|
cmdo/executor.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Command execution with real-time output streaming."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import signal
|
|
7
|
+
import subprocess
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
from cmdo.models import ExecutionResult
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def execute_command(command: str, stepwise: bool = False) -> ExecutionResult:
|
|
14
|
+
"""Execute a shell command with real-time output streaming."""
|
|
15
|
+
if stepwise:
|
|
16
|
+
return _execute_stepwise(command)
|
|
17
|
+
return _execute_single(command)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _execute_single(command: str) -> ExecutionResult:
|
|
21
|
+
"""Execute a single command."""
|
|
22
|
+
shell = os.environ.get("SHELL", "/bin/sh")
|
|
23
|
+
start = time.time()
|
|
24
|
+
was_interrupted = False
|
|
25
|
+
|
|
26
|
+
try:
|
|
27
|
+
process = subprocess.Popen(
|
|
28
|
+
[shell, "-c", command],
|
|
29
|
+
stdout=None, # inherit — stream to terminal
|
|
30
|
+
stderr=None, # inherit — stream to terminal
|
|
31
|
+
)
|
|
32
|
+
exit_code = process.wait()
|
|
33
|
+
|
|
34
|
+
except KeyboardInterrupt:
|
|
35
|
+
# Send SIGINT to child process group
|
|
36
|
+
was_interrupted = True
|
|
37
|
+
try:
|
|
38
|
+
os.killpg(os.getpgid(process.pid), signal.SIGINT)
|
|
39
|
+
except (ProcessLookupError, OSError):
|
|
40
|
+
pass
|
|
41
|
+
try:
|
|
42
|
+
exit_code = process.wait(timeout=2)
|
|
43
|
+
except subprocess.TimeoutExpired:
|
|
44
|
+
process.kill()
|
|
45
|
+
exit_code = -1
|
|
46
|
+
|
|
47
|
+
duration = time.time() - start
|
|
48
|
+
return ExecutionResult(
|
|
49
|
+
exit_code=exit_code,
|
|
50
|
+
duration=duration,
|
|
51
|
+
was_interrupted=was_interrupted,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _execute_stepwise(command: str) -> ExecutionResult:
|
|
56
|
+
"""Execute a multi-step command one step at a time."""
|
|
57
|
+
from rich.console import Console
|
|
58
|
+
console = Console()
|
|
59
|
+
|
|
60
|
+
# Split on && and ; (simple split — doesn't handle quoted strings perfectly)
|
|
61
|
+
import re
|
|
62
|
+
steps = re.split(r"\s*&&\s*|\s*;\s*", command)
|
|
63
|
+
steps = [s.strip() for s in steps if s.strip()]
|
|
64
|
+
|
|
65
|
+
total_duration = 0.0
|
|
66
|
+
for i, step in enumerate(steps, 1):
|
|
67
|
+
console.print(f"\n[bold]Step {i}/{len(steps)}:[/bold] {step}")
|
|
68
|
+
result = _execute_single(step)
|
|
69
|
+
total_duration += result.duration
|
|
70
|
+
|
|
71
|
+
if result.was_interrupted:
|
|
72
|
+
return ExecutionResult(
|
|
73
|
+
exit_code=-1,
|
|
74
|
+
duration=total_duration,
|
|
75
|
+
was_interrupted=True,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
if result.exit_code != 0:
|
|
79
|
+
console.print(f"[red]Step {i} failed (exit code {result.exit_code}). Stopping.[/red]")
|
|
80
|
+
return ExecutionResult(
|
|
81
|
+
exit_code=result.exit_code,
|
|
82
|
+
duration=total_duration,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
console.print(f"[green]Step {i} done ({result.duration:.1f}s)[/green]")
|
|
86
|
+
|
|
87
|
+
if i < len(steps):
|
|
88
|
+
try:
|
|
89
|
+
answer = input("Continue to next step? [Y/n] ").strip().lower()
|
|
90
|
+
if answer == "n":
|
|
91
|
+
return ExecutionResult(
|
|
92
|
+
exit_code=0,
|
|
93
|
+
duration=total_duration,
|
|
94
|
+
was_interrupted=True,
|
|
95
|
+
)
|
|
96
|
+
except (KeyboardInterrupt, EOFError):
|
|
97
|
+
return ExecutionResult(
|
|
98
|
+
exit_code=0,
|
|
99
|
+
duration=total_duration,
|
|
100
|
+
was_interrupted=True,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
return ExecutionResult(exit_code=0, duration=total_duration)
|
cmdo/llm/__init__.py
ADDED
|
File without changes
|
cmdo/llm/client.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""LLM API client for command generation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
from openai import APIConnectionError, APITimeoutError, OpenAI, RateLimitError
|
|
8
|
+
|
|
9
|
+
from cmdo.llm.parser import parse_response
|
|
10
|
+
from cmdo.llm.prompt import build_prompt
|
|
11
|
+
from cmdo.models import CommandResult, Config, ShellContext
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def generate_command(
|
|
15
|
+
query: str, context: ShellContext, config: Config
|
|
16
|
+
) -> CommandResult:
|
|
17
|
+
"""Send query + context to LLM and return a structured CommandResult."""
|
|
18
|
+
client = OpenAI(
|
|
19
|
+
api_key=config.api_key,
|
|
20
|
+
base_url=config.base_url or None,
|
|
21
|
+
)
|
|
22
|
+
messages = build_prompt(query, context)
|
|
23
|
+
|
|
24
|
+
for attempt in range(2):
|
|
25
|
+
try:
|
|
26
|
+
response = client.chat.completions.create(
|
|
27
|
+
model=config.model,
|
|
28
|
+
messages=messages,
|
|
29
|
+
max_completion_tokens=1024,
|
|
30
|
+
)
|
|
31
|
+
raw = response.choices[0].message.content or ""
|
|
32
|
+
result = parse_response(raw)
|
|
33
|
+
if result.command:
|
|
34
|
+
return result
|
|
35
|
+
# Empty command on first attempt — retry
|
|
36
|
+
if attempt == 0:
|
|
37
|
+
continue
|
|
38
|
+
return result
|
|
39
|
+
|
|
40
|
+
except RateLimitError:
|
|
41
|
+
if attempt == 0:
|
|
42
|
+
wait = 5
|
|
43
|
+
from rich.console import Console
|
|
44
|
+
|
|
45
|
+
Console().print(f"[yellow]⏳ Rate limited. Retrying in {wait}s...[/yellow]")
|
|
46
|
+
time.sleep(wait)
|
|
47
|
+
continue
|
|
48
|
+
raise
|
|
49
|
+
|
|
50
|
+
except (APITimeoutError, APIConnectionError):
|
|
51
|
+
if attempt == 0:
|
|
52
|
+
continue
|
|
53
|
+
raise
|
|
54
|
+
|
|
55
|
+
# Should not reach here, but just in case
|
|
56
|
+
return CommandResult(command="", explanation="Failed to generate command.", confidence=0.0)
|
cmdo/llm/parser.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""Parse LLM responses into CommandResult."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import re
|
|
7
|
+
|
|
8
|
+
from cmdo.models import CommandResult, RiskLevel
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _extract_json(text: str) -> str:
|
|
12
|
+
"""Extract JSON from text that may contain markdown fences."""
|
|
13
|
+
# Try to find JSON in code block
|
|
14
|
+
match = re.search(r"```(?:json)?\s*\n?(.*?)\n?\s*```", text, re.DOTALL)
|
|
15
|
+
if match:
|
|
16
|
+
return match.group(1).strip()
|
|
17
|
+
# Try to find raw JSON object
|
|
18
|
+
match = re.search(r"\{.*\}", text, re.DOTALL)
|
|
19
|
+
if match:
|
|
20
|
+
return match.group(0)
|
|
21
|
+
return text
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _parse_risk_level(value: str) -> RiskLevel:
|
|
25
|
+
"""Parse risk level string to enum."""
|
|
26
|
+
value = value.upper().strip()
|
|
27
|
+
if value == "DANGEROUS":
|
|
28
|
+
return RiskLevel.DANGEROUS
|
|
29
|
+
if value == "CAUTION":
|
|
30
|
+
return RiskLevel.CAUTION
|
|
31
|
+
return RiskLevel.SAFE
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def parse_response(raw: str) -> CommandResult:
|
|
35
|
+
"""Parse LLM JSON response into a CommandResult."""
|
|
36
|
+
json_str = _extract_json(raw)
|
|
37
|
+
try:
|
|
38
|
+
data = json.loads(json_str)
|
|
39
|
+
except json.JSONDecodeError:
|
|
40
|
+
# Fallback: treat the whole response as a command
|
|
41
|
+
return CommandResult(
|
|
42
|
+
command=raw.strip(),
|
|
43
|
+
explanation="(Could not parse LLM response)",
|
|
44
|
+
confidence=0.3,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
return CommandResult(
|
|
48
|
+
command=data.get("command", ""),
|
|
49
|
+
explanation=data.get("explanation", ""),
|
|
50
|
+
risk_level=_parse_risk_level(data.get("risk_level", "SAFE")),
|
|
51
|
+
risk_reason=data.get("risk_reason"),
|
|
52
|
+
alternatives=data.get("alternatives", []),
|
|
53
|
+
is_multi_step=data.get("is_multi_step", False),
|
|
54
|
+
estimated_duration=data.get("estimated_duration"),
|
|
55
|
+
confidence=float(data.get("confidence", 0.8)),
|
|
56
|
+
)
|
cmdo/llm/prompt.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Prompt construction for LLM command generation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from cmdo.models import ShellContext
|
|
6
|
+
|
|
7
|
+
SYSTEM_PROMPT = """\
|
|
8
|
+
You are a shell command generator. Given a natural language instruction \
|
|
9
|
+
and shell context, produce the exact command(s) to execute.
|
|
10
|
+
|
|
11
|
+
Rules:
|
|
12
|
+
1. Output ONLY valid shell commands for the user's OS and shell.
|
|
13
|
+
2. Prefer simple, standard tools over obscure ones.
|
|
14
|
+
3. Use tools confirmed available in the context. If the best tool \
|
|
15
|
+
is unavailable, use a fallback AND note it.
|
|
16
|
+
4. Never generate commands that require interactive input (use flags \
|
|
17
|
+
to avoid prompts, e.g., `rm -f` not `rm -i` when deletion is intended).
|
|
18
|
+
5. For file operations, use the cwd_listing to resolve ambiguous names.
|
|
19
|
+
6. Classify risk level:
|
|
20
|
+
- SAFE: read-only, create files, list, search, compress
|
|
21
|
+
- CAUTION: modify files, install packages, change permissions
|
|
22
|
+
- DANGEROUS: delete files, format disks, overwrite data, sudo operations, \
|
|
23
|
+
network-facing services, database drops, recursive force operations
|
|
24
|
+
7. If the request is ambiguous, prefer the SAFER interpretation.
|
|
25
|
+
8. If you cannot generate a command, say so — never hallucinate.
|
|
26
|
+
|
|
27
|
+
Respond in JSON:
|
|
28
|
+
{
|
|
29
|
+
"command": "...",
|
|
30
|
+
"explanation": "...",
|
|
31
|
+
"risk_level": "SAFE|CAUTION|DANGEROUS",
|
|
32
|
+
"risk_reason": "..." or null,
|
|
33
|
+
"alternatives": [...] or [],
|
|
34
|
+
"is_multi_step": true|false,
|
|
35
|
+
"estimated_duration": "..." or null,
|
|
36
|
+
"confidence": 0.0-1.0
|
|
37
|
+
}"""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _format_context(context: ShellContext) -> str:
|
|
41
|
+
"""Format shell context as a concise string for the LLM."""
|
|
42
|
+
lines = [
|
|
43
|
+
f"OS: {context.os}",
|
|
44
|
+
f"Shell: {context.shell}",
|
|
45
|
+
f"CWD: {context.cwd}",
|
|
46
|
+
f"User: {context.user}",
|
|
47
|
+
]
|
|
48
|
+
if context.cwd_listing:
|
|
49
|
+
listing = ", ".join(context.cwd_listing[:30])
|
|
50
|
+
lines.append(f"Files in CWD: {listing}")
|
|
51
|
+
if context.path_tools:
|
|
52
|
+
lines.append(f"Available tools: {', '.join(context.path_tools)}")
|
|
53
|
+
if context.env_hints:
|
|
54
|
+
hints = ", ".join(f"{k}={v}" for k, v in context.env_hints.items())
|
|
55
|
+
lines.append(f"Environment: {hints}")
|
|
56
|
+
if context.git_branch:
|
|
57
|
+
lines.append(f"Git branch: {context.git_branch}")
|
|
58
|
+
return "\n".join(lines)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def build_prompt(query: str, context: ShellContext) -> list[dict[str, str]]:
|
|
62
|
+
"""Build the message list for the OpenAI API call."""
|
|
63
|
+
context_str = _format_context(context)
|
|
64
|
+
return [
|
|
65
|
+
{"role": "system", "content": SYSTEM_PROMPT},
|
|
66
|
+
{
|
|
67
|
+
"role": "user",
|
|
68
|
+
"content": f"Shell context:\n{context_str}\n\nInstruction: {query}",
|
|
69
|
+
},
|
|
70
|
+
]
|
cmdo/models.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""Shared data models for cmdo."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from enum import Enum
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RiskLevel(Enum):
|
|
10
|
+
SAFE = "safe"
|
|
11
|
+
CAUTION = "caution"
|
|
12
|
+
DANGEROUS = "dangerous"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class UserAction(Enum):
|
|
16
|
+
EXECUTE = "execute"
|
|
17
|
+
EXECUTE_STEPWISE = "step"
|
|
18
|
+
EDIT = "edit"
|
|
19
|
+
COPY = "copy"
|
|
20
|
+
CANCEL = "cancel"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class ShellContext:
|
|
25
|
+
os: str = ""
|
|
26
|
+
shell: str = ""
|
|
27
|
+
cwd: str = ""
|
|
28
|
+
cwd_listing: list[str] = field(default_factory=list)
|
|
29
|
+
user: str = ""
|
|
30
|
+
path_tools: list[str] = field(default_factory=list)
|
|
31
|
+
env_hints: dict[str, str] = field(default_factory=dict)
|
|
32
|
+
git_branch: str | None = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class CommandResult:
|
|
37
|
+
command: str = ""
|
|
38
|
+
explanation: str = ""
|
|
39
|
+
risk_level: RiskLevel = RiskLevel.SAFE
|
|
40
|
+
risk_reason: str | None = None
|
|
41
|
+
alternatives: list[str] = field(default_factory=list)
|
|
42
|
+
is_multi_step: bool = False
|
|
43
|
+
estimated_duration: str | None = None
|
|
44
|
+
confidence: float = 1.0
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class ExecutionResult:
|
|
49
|
+
exit_code: int = 0
|
|
50
|
+
stdout: str = ""
|
|
51
|
+
stderr: str = ""
|
|
52
|
+
duration: float = 0.0
|
|
53
|
+
was_interrupted: bool = False
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class Config:
|
|
58
|
+
provider: str = "openai"
|
|
59
|
+
api_key: str = ""
|
|
60
|
+
model: str = "gpt-5-mini"
|
|
61
|
+
base_url: str = ""
|
|
62
|
+
auto_confirm_safe: bool = False
|
|
63
|
+
danger_confirmation: str = "type"
|
|
64
|
+
color: bool = True
|
|
65
|
+
explanation: bool = True
|
cmdo/safety/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Local risk classification using pattern matching."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from cmdo.models import RiskLevel
|
|
8
|
+
|
|
9
|
+
# Patterns ordered from most to least dangerous
|
|
10
|
+
DANGEROUS_PATTERNS: list[tuple[str, str]] = [
|
|
11
|
+
(r"\brm\s+(-\w*r\w*\s+-\w*f\w*|-\w*f\w*\s+-\w*r\w*|-\w*rf\w*)", "Recursive forced deletion"),
|
|
12
|
+
(r"\brm\s+-\w*r", "Recursive deletion"),
|
|
13
|
+
(r"\bmkfs\b", "Filesystem format"),
|
|
14
|
+
(r"\bdd\s+if=", "Direct disk write"),
|
|
15
|
+
(r">\s*/dev/", "Write to device"),
|
|
16
|
+
(r":\(\)\{\s*:\|:&\s*\};:", "Fork bomb"),
|
|
17
|
+
(r"\bchmod\s+(-\w*R\w*\s+)?777\b", "Open permissions to everyone"),
|
|
18
|
+
(r"\bDROP\s+(TABLE|DATABASE)\b", "Database destruction"),
|
|
19
|
+
(r"\bTRUNCATE\s+", "Database truncation"),
|
|
20
|
+
(r"\b(shutdown|reboot)\b", "System shutdown/reboot"),
|
|
21
|
+
(r"\bsudo\s+rm\b", "Privileged deletion"),
|
|
22
|
+
(r"\bcurl\b.*\|\s*(sudo\s+)?(ba)?sh\b", "Remote code execution"),
|
|
23
|
+
(r"\bwget\b.*\|\s*(sudo\s+)?(ba)?sh\b", "Remote code execution"),
|
|
24
|
+
(r"\bformat\b", "Disk format"),
|
|
25
|
+
(r"\bfdisk\b", "Disk partition"),
|
|
26
|
+
(r":>\s*\S+", "File truncation"),
|
|
27
|
+
(r">\s*/etc/", "Overwrite system config"),
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
CAUTION_PATTERNS: list[tuple[str, str]] = [
|
|
31
|
+
(r"\bsudo\b", "Requires elevated privileges"),
|
|
32
|
+
(r"\bmv\b", "File move/rename (overwrite risk)"),
|
|
33
|
+
(r"\bchmod\b", "Permission change"),
|
|
34
|
+
(r"\bchown\b", "Ownership change"),
|
|
35
|
+
(r"\bpip\s+install\b", "Package installation"),
|
|
36
|
+
(r"\bnpm\s+install\s+-g\b", "Global package installation"),
|
|
37
|
+
(r"\bbrew\s+install\b", "Package installation"),
|
|
38
|
+
(r"\bapt\s+(install|remove)\b", "Package management"),
|
|
39
|
+
(r"\bgit\s+push\s+--force\b", "Force push"),
|
|
40
|
+
(r"\bgit\s+reset\s+--hard\b", "Hard reset"),
|
|
41
|
+
(r"\bsed\s+-i\b", "In-place file edit"),
|
|
42
|
+
(r"\bdocker\s+rm\b", "Container removal"),
|
|
43
|
+
(r"\bkill\b", "Process termination"),
|
|
44
|
+
(r"\bpkill\b", "Process termination"),
|
|
45
|
+
(r"/etc/|/usr/", "System path modification"),
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def classify_risk(command: str) -> tuple[RiskLevel, str | None]:
|
|
50
|
+
"""Classify command risk level using local pattern matching.
|
|
51
|
+
|
|
52
|
+
Returns (risk_level, reason) tuple.
|
|
53
|
+
"""
|
|
54
|
+
for pattern, reason in DANGEROUS_PATTERNS:
|
|
55
|
+
if re.search(pattern, command, re.IGNORECASE):
|
|
56
|
+
return RiskLevel.DANGEROUS, reason
|
|
57
|
+
|
|
58
|
+
for pattern, reason in CAUTION_PATTERNS:
|
|
59
|
+
if re.search(pattern, command, re.IGNORECASE):
|
|
60
|
+
return RiskLevel.CAUTION, reason
|
|
61
|
+
|
|
62
|
+
return RiskLevel.SAFE, None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def upgrade_risk(
|
|
66
|
+
llm_level: RiskLevel, local_level: RiskLevel
|
|
67
|
+
) -> RiskLevel:
|
|
68
|
+
"""Return the higher of two risk levels."""
|
|
69
|
+
order = {RiskLevel.SAFE: 0, RiskLevel.CAUTION: 1, RiskLevel.DANGEROUS: 2}
|
|
70
|
+
if order[local_level] > order[llm_level]:
|
|
71
|
+
return local_level
|
|
72
|
+
return llm_level
|
cmdo/safety/forbidden.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Hard-blocked forbidden commands."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
FORBIDDEN_PATTERNS: list[tuple[str, str]] = [
|
|
8
|
+
(
|
|
9
|
+
r":\(\)\{\s*:\|:&\s*\};:",
|
|
10
|
+
"Fork bomb detected. This command would crash your system.",
|
|
11
|
+
),
|
|
12
|
+
(
|
|
13
|
+
r"\bdd\s+if=/dev/(zero|urandom)\s+of=/dev/[hs]d[a-z]\b",
|
|
14
|
+
"Disk wipe detected. This would destroy all data on the target drive.",
|
|
15
|
+
),
|
|
16
|
+
(
|
|
17
|
+
r"\brm\s+-\w*rf?\w*\s+/\s*$",
|
|
18
|
+
"Full system deletion detected. This would destroy your entire filesystem.",
|
|
19
|
+
),
|
|
20
|
+
(
|
|
21
|
+
r"\brm\s+-\w*rf?\w*\s+/\*",
|
|
22
|
+
"Full system deletion detected. This would destroy your entire filesystem.",
|
|
23
|
+
),
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def check_forbidden(command: str) -> str | None:
|
|
28
|
+
"""Check if a command is forbidden.
|
|
29
|
+
|
|
30
|
+
Returns an error message if forbidden, None if allowed.
|
|
31
|
+
"""
|
|
32
|
+
for pattern, message in FORBIDDEN_PATTERNS:
|
|
33
|
+
if re.search(pattern, command, re.IGNORECASE):
|
|
34
|
+
return message
|
|
35
|
+
return None
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cmdo-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Natural language to shell commands — just say what you want and cmdo does it.
|
|
5
|
+
Author: Yijiang Pang
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/YijiangPang/cmdo
|
|
8
|
+
Project-URL: Repository, https://github.com/YijiangPang/cmdo
|
|
9
|
+
Project-URL: Issues, https://github.com/YijiangPang/cmdo/issues
|
|
10
|
+
Keywords: cli,terminal,ai,shell,natural-language,command-line,openai,gpt
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Environment :: Console
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: System Administrators
|
|
15
|
+
Classifier: Operating System :: MacOS
|
|
16
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
22
|
+
Classifier: Topic :: System :: Shells
|
|
23
|
+
Classifier: Topic :: Utilities
|
|
24
|
+
Requires-Python: >=3.10
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
License-File: LICENSE
|
|
27
|
+
Requires-Dist: click>=8.0
|
|
28
|
+
Requires-Dist: rich>=13.0
|
|
29
|
+
Requires-Dist: openai>=1.0
|
|
30
|
+
Requires-Dist: tomli>=2.0; python_version < "3.11"
|
|
31
|
+
Requires-Dist: tomli-w>=1.0
|
|
32
|
+
Requires-Dist: pyperclip>=1.8
|
|
33
|
+
Provides-Extra: dev
|
|
34
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
35
|
+
Requires-Dist: pytest-mock>=3.0; extra == "dev"
|
|
36
|
+
Requires-Dist: build; extra == "dev"
|
|
37
|
+
Requires-Dist: twine; extra == "dev"
|
|
38
|
+
Dynamic: license-file
|
|
39
|
+
|
|
40
|
+
# cmdo
|
|
41
|
+
|
|
42
|
+
**Natural language to shell commands.** Just say what you want and cmdo does it.
|
|
43
|
+
|
|
44
|
+
```
|
|
45
|
+
$ cmdo "compress the checkpoints folder"
|
|
46
|
+
|
|
47
|
+
🤖 Will run:
|
|
48
|
+
╭──────────────────────────────────────────────╮
|
|
49
|
+
│ tar -czf checkpoints.tar.gz checkpoints/ │
|
|
50
|
+
╰──────────────────────────────────────────────╯
|
|
51
|
+
📝 Compresses the "checkpoints" directory into a gzipped tarball.
|
|
52
|
+
|
|
53
|
+
[Y] Execute [e] Edit [c] Copy [n] Cancel
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Features
|
|
57
|
+
|
|
58
|
+
- **Translates natural language** into accurate shell commands using GPT
|
|
59
|
+
- **Safety-first**: commands are classified as Safe / Caution / Dangerous with color-coded warnings
|
|
60
|
+
- **Hard-blocks** catastrophic commands (fork bombs, `rm -rf /`, disk wipes)
|
|
61
|
+
- **Context-aware**: knows your OS, shell, current directory, installed tools, and git branch
|
|
62
|
+
- **Works everywhere**: any terminal — iTerm, VSCode, Terminal.app, SSH, tmux
|
|
63
|
+
|
|
64
|
+
## Install
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
pip install cmdo
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
Requires Python 3.10+.
|
|
71
|
+
|
|
72
|
+
## Quick Start
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
# First-time setup — enter your OpenAI API key
|
|
76
|
+
cmdo --config
|
|
77
|
+
|
|
78
|
+
# Use it
|
|
79
|
+
cmdo "find all Python files larger than 1MB"
|
|
80
|
+
cmdo "start a local HTTP server on port 8080"
|
|
81
|
+
cmdo "show disk usage sorted by size"
|
|
82
|
+
|
|
83
|
+
# Generate without executing
|
|
84
|
+
cmdo --dry-run "delete the temp folder"
|
|
85
|
+
|
|
86
|
+
# Auto-confirm safe commands
|
|
87
|
+
cmdo --yes "list all docker containers"
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Safety
|
|
91
|
+
|
|
92
|
+
Every generated command goes through two layers of safety checking:
|
|
93
|
+
|
|
94
|
+
1. **LLM classification** — the model labels each command as SAFE, CAUTION, or DANGEROUS
|
|
95
|
+
2. **Local pattern matching** — a regex-based classifier catches anything the LLM misses
|
|
96
|
+
|
|
97
|
+
| Risk Level | Confirmation | Auto-confirm (`--yes`) |
|
|
98
|
+
|---|---|---|
|
|
99
|
+
| **SAFE** (green) | Single keypress `Y` | Allowed |
|
|
100
|
+
| **CAUTION** (yellow) | Single keypress `Y` | Allowed |
|
|
101
|
+
| **DANGEROUS** (red) | Type full word `yes` | Never |
|
|
102
|
+
|
|
103
|
+
Certain commands are **hard-blocked** and can never be executed:
|
|
104
|
+
- Fork bombs
|
|
105
|
+
- Full disk wipes (`dd if=/dev/zero of=/dev/sda`)
|
|
106
|
+
- System-wide deletion (`rm -rf /`)
|
|
107
|
+
|
|
108
|
+
## Configuration
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
cmdo --config # Interactive setup wizard
|
|
112
|
+
cmdo --config --show # View current config (API key masked)
|
|
113
|
+
cmdo --config --reset # Reset to defaults
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
Config is stored in `~/.config/cmdo/config.toml`.
|
|
117
|
+
|
|
118
|
+
## CLI Reference
|
|
119
|
+
|
|
120
|
+
```
|
|
121
|
+
Usage: cmdo [OPTIONS] [QUERY]...
|
|
122
|
+
|
|
123
|
+
Options:
|
|
124
|
+
--config Configure cmdo
|
|
125
|
+
--show Show current configuration (use with --config)
|
|
126
|
+
--reset Reset configuration (use with --config)
|
|
127
|
+
-d, --dry-run Generate command without executing
|
|
128
|
+
-y, --yes Auto-confirm safe commands
|
|
129
|
+
-m, --model TEXT Override default model for one query
|
|
130
|
+
-V, --version Show version
|
|
131
|
+
--help Show help
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
## License
|
|
135
|
+
|
|
136
|
+
MIT
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
cmdo/__init__.py,sha256=Jt4rFOgqLrGQGWHwudm-1ANtlvyuLxz87SO7T0w4A4A,74
|
|
2
|
+
cmdo/cli.py,sha256=XXU97K-gtGX-vKdN-ifMcNisPpwjPa_B1YDrH2VZW5s,5458
|
|
3
|
+
cmdo/clipboard.py,sha256=u-KVpJgk9Na4GZj6Go_jSZ0KVPO7FgqA-IDMnbuP5Pw,311
|
|
4
|
+
cmdo/config.py,sha256=4KkiL6hM7rt9Nd-lqwV6zPREme3HxNZsZ473oR_Fnxo,6415
|
|
5
|
+
cmdo/context.py,sha256=XxLnitT1_Nq3TQWZmDkkX-15afMQEPwwh2l90XJ6Msw,2662
|
|
6
|
+
cmdo/display.py,sha256=ncGP1RmleyrX9VyGOnDzzwEu1n1_N9lE5ZheVI62Thk,4323
|
|
7
|
+
cmdo/executor.py,sha256=J_LT9VShZ0TVjyo2M71pG-PkV0Qz5K3tlR8dGCUW5Y0,3229
|
|
8
|
+
cmdo/models.py,sha256=QyFcT3WtC4TEbSnpufrxTH6CaEgUDttKzIkPoVWO7V4,1420
|
|
9
|
+
cmdo/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
+
cmdo/llm/client.py,sha256=xGZ3Zu4Q3Lxzp5xoPKljzDV7tme6v3oF4kbD0pS2muA,1739
|
|
11
|
+
cmdo/llm/parser.py,sha256=8CHuCp-qY71qhrzpRD5Bv3vJ4eUxrZkeASJCr_iqD_s,1740
|
|
12
|
+
cmdo/llm/prompt.py,sha256=LsWq4bXZ5ZsBnDw3dXLc54gcWXqoQsaSjLLWeD8bjg0,2572
|
|
13
|
+
cmdo/safety/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
|
+
cmdo/safety/classifier.py,sha256=DZJrw2BJcaPZF1xCEVA-BeMwjWNwuXMeBbzfIUwZhnw,2730
|
|
15
|
+
cmdo/safety/forbidden.py,sha256=n6x0BsU5uye5gzS6h_opIV25vYY8exj9YHYVERFsoP8,986
|
|
16
|
+
cmdo_cli-0.1.0.dist-info/licenses/LICENSE,sha256=jjc_D9cf5Mv3Lkf4EJAKytg4rN65jZIl6J5LHvys0x4,1064
|
|
17
|
+
cmdo_cli-0.1.0.dist-info/METADATA,sha256=FWjqSsLV80wlRb8RMrsUeZG7WOJJqZ0j8mDfJ_Vb6H8,4369
|
|
18
|
+
cmdo_cli-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
19
|
+
cmdo_cli-0.1.0.dist-info/entry_points.txt,sha256=CzKLGJtjwzHE1lry4iNM6GwKZu9RLW4BzX46R-nKSzI,39
|
|
20
|
+
cmdo_cli-0.1.0.dist-info/top_level.txt,sha256=DMcTFo6H1jZoijIshDn0tX-GKn9ok5q6KXtOnXC_GT4,5
|
|
21
|
+
cmdo_cli-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Yijiang
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
cmdo
|