cmdexy 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cmdexy/__init__.py ADDED
File without changes
cmdexy/cli/__init__.py ADDED
File without changes
@@ -0,0 +1,28 @@
1
+ from rich.console import Console
2
+ from cmdexy.core.controller import Controller
3
+ import asyncio
4
+
5
+ console = Console()
6
+
7
+ async def interactive_session():
8
+ """Start the interactive AI session."""
9
+ try:
10
+ controller = Controller()
11
+ except Exception as e:
12
+ console.print(f"[bold red]Error:[/bold red] {e}")
13
+ return
14
+
15
+ console.print("[bold green]Welcome to cmdexy![/bold green] Type 'exit' to quit.")
16
+
17
+ while True:
18
+ try:
19
+ user_input = console.input("[bold blue]cmdexy >[/bold blue] ")
20
+ if user_input.lower() in ("exit", "quit"):
21
+ break
22
+
23
+ await controller.process_input(user_input)
24
+
25
+ except KeyboardInterrupt:
26
+ break
27
+ except Exception as e:
28
+ console.print(f"[bold red]Error:[/bold red] {e}")
cmdexy/cli/run.py ADDED
@@ -0,0 +1,10 @@
1
+ from cmdexy.core.controller import Controller
2
+ import asyncio
3
+
4
+ async def run_instruction(instruction: str):
5
+ """Execute a single instruction."""
6
+ try:
7
+ controller = Controller()
8
+ await controller.process_input(instruction)
9
+ except Exception as e:
10
+ print(f"Error: {e}")
cmdexy/cli/wrapper.py ADDED
@@ -0,0 +1,42 @@
1
+ import sys
2
+ from rich.console import Console
3
+ from cmdexy.core.controller import Controller
4
+ from cmdexy.core.execution import ExecutionEngine
5
+ import asyncio
6
+
7
+ console = Console()
8
+
9
+ async def execute_wrapper(command_parts: list[str]):
10
+ """
11
+ Directly execute a command and monitor for failure.
12
+ If it fails, trigger AI error recovery.
13
+ """
14
+ controller = Controller()
15
+
16
+ # Reconstruct command
17
+ cmd = " ".join(command_parts)
18
+
19
+ # console.print(f"[dim]Executing: {cmd}[/dim]")
20
+
21
+ # Execute directly
22
+ exit_code, stdout, stderr = await controller.execution.run_shell_command(cmd)
23
+
24
+ # Identify if output should be printed based on successful execution or not?
25
+ # Actually run_shell_command buffers output. For a wrapper we ideally want streaming.
26
+ # But for MVP we just print result.
27
+
28
+ if exit_code == 0:
29
+ if stdout: print(stdout)
30
+ if stderr: print(stderr, file=sys.stderr)
31
+ else:
32
+ # Failure case
33
+ if stdout: print(stdout)
34
+ if stderr: print(stderr, file=sys.stderr)
35
+
36
+ console.print(f"\n[bold red]Command Failed (Exit Code {exit_code})[/bold red]")
37
+
38
+ # Trigger Recovery
39
+ try:
40
+ await controller.trigger_error_recovery(cmd, stdout, stderr)
41
+ except Exception as e:
42
+ console.print(f"[red]Error during recovery:[/red] {e}")
File without changes
@@ -0,0 +1,132 @@
1
+ import cohere
2
+ import os
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ from .config import ConfigManager
8
+
9
+ class AIEngine:
10
+ def __init__(self):
11
+ self.config = ConfigManager()
12
+ api_key = self.config.get_api_key()
13
+ if not api_key:
14
+ raise ValueError(
15
+ "API Key not found or configured.\n"
16
+ "Please run 'cmdexy config' to set it.\n"
17
+ "Get your key from: https://dashboard.cohere.com/api-keys"
18
+ )
19
+ self.client = cohere.Client(api_key)
20
+
21
+ async def analyze_intent(self, user_input: str) -> str:
22
+ """
23
+ Simple intent classification using Cohere Chat.
24
+ """
25
+ prompt = f"""
26
+ Classify the following user input into one of these categories:
27
+ - EXECUTE_CODE: The user wants to write and run code (e.g. "calculate fibonacci", "plot this csv").
28
+ - SYSTEM_COMMAND: The user wants to run a shell command (e.g. "ls -la", "mkdir foo").
29
+ - QUESTION: The user is asking a general question.
30
+
31
+ Input: "{user_input}"
32
+ Category:"""
33
+
34
+ response = self.client.chat(
35
+ model='command-r-08-2024',
36
+ message=prompt,
37
+ temperature=0
38
+ )
39
+ return response.text.strip()
40
+
41
+ async def generate_code(self, instruction: str, language: str = "python") -> str:
42
+ """
43
+ Generate a script based on instruction.
44
+ """
45
+ prompt = f"""
46
+ Write a complete, runnable {language} script to: {instruction}.
47
+ Return ONLY the code, no markdown, no explanation.
48
+ If using third-party libraries, assume they are installed or standard.
49
+ """
50
+ response = self.client.chat(
51
+ model='command-r-08-2024',
52
+ message=prompt,
53
+ temperature=0.3
54
+ )
55
+ code = response.text.strip()
56
+ # Strip markdown fences if present
57
+ if code.startswith("```"):
58
+ code = code.split("\n", 1)[1]
59
+ if code.endswith("```"):
60
+ code = code.rsplit("\n", 1)[0]
61
+ # Additional cleanup for fences like ```python
62
+ if code.startswith("python"):
63
+ code = code.split("\n", 1)[1]
64
+
65
+ return code.strip()
66
+
67
+ async def generate_shell_command(self, instruction: str, os_context: str = "Unknown Linux") -> str:
68
+ """
69
+ Generate a shell command based on instruction and OS context.
70
+ """
71
+ prompt = f"""
72
+ You are an expert command line assistant for {os_context}.
73
+ Write a shell command to: {instruction}.
74
+ Return ONLY the command, no code blocks, no explanation.
75
+ Ensure the command is appropriate for the detected OS ({os_context}).
76
+
77
+ Guidelines:
78
+ - For creating or writing files with multiple lines, USE Heredoc syntax (cat << 'EOF' > filename).
79
+ - Avoid long one-line 'echo' chains for complex content.
80
+ - Ensure the usage of Heredoc is compatible with standard shells (bash/zsh).
81
+
82
+ Example:
83
+ Input: create hello.py with print hello
84
+ Output:
85
+ cat << 'EOF' > hello.py
86
+ print("Hello")
87
+ EOF
88
+ """
89
+ response = self.client.chat(
90
+ model='command-r-08-2024',
91
+ message=prompt,
92
+ temperature=0
93
+ )
94
+ return response.text.strip()
95
+
96
+ async def suggest_fix(self, command: str, error_log: str, context: str = "", os_context: str = "Unknown OS") -> str:
97
+ """
98
+ Analyze an error and suggest a fix.
99
+ Returns text that describes the fix and provides the corrected code block if applicable.
100
+ """
101
+ prompt = f"""
102
+ You are an expert command line assistant for {os_context}.
103
+ I ran this command: `{command}`
104
+ It failed with this error:
105
+ {error_log}
106
+
107
+ {f"Context: {context}" if context else ""}
108
+
109
+ Analyze the error.
110
+ If the error is in the code file (e.g. syntax error, name error):
111
+ 1. Explain the fix briefly.
112
+ 2. Provide the COMPLETELY CORRECTED file content wrapped in <FILE_CONTENT> tags.
113
+ Example:
114
+ <FILE_CONTENT>
115
+ def foo():
116
+ return 1
117
+ </FILE_CONTENT>
118
+
119
+ If it's a 'command not found' error or tool missing error, YOU MUST PROVIDE the installation command wrapped in <COMMAND_FIX>.
120
+ Example:
121
+ <COMMAND_FIX>
122
+ brew install cowsay
123
+ </COMMAND_FIX>
124
+
125
+ If it's just a wrong flag (e.g. ls -z), provide the corrected command in a standard code block.
126
+ """
127
+ response = self.client.chat(
128
+ model='command-r-08-2024',
129
+ message=prompt,
130
+ temperature=0
131
+ )
132
+ return response.text.strip()
cmdexy/core/config.py ADDED
@@ -0,0 +1,39 @@
1
+ import os
2
+ import json
3
+ import platform
4
+ from pathlib import Path
5
+
6
+ class ConfigManager:
7
+ def __init__(self):
8
+ self.config_dir = Path.home() / ".cmdexy"
9
+ self.config_file = self.config_dir / "config.json"
10
+ self.config = self._load_config()
11
+
12
+ def _load_config(self):
13
+ if not self.config_file.exists():
14
+ return {}
15
+ try:
16
+ with open(self.config_file, 'r') as f:
17
+ return json.load(f)
18
+ except Exception:
19
+ return {}
20
+
21
+ def save_config(self, key: str, value: str):
22
+ self.config_dir.mkdir(parents=True, exist_ok=True)
23
+ self.config[key] = value
24
+ with open(self.config_file, 'w') as f:
25
+ json.dump(self.config, f, indent=2)
26
+ # Restrict permissions to owner only (0600 = rw-------)
27
+ os.chmod(self.config_file, 0o600)
28
+
29
+ def get_api_key(self):
30
+ # Priority: Env Var > Config File
31
+ return os.getenv("COHERE_API_KEY") or self.config.get("api_key")
32
+
33
+ def get_os_context(self):
34
+ # We compute this live but could cache it
35
+ system = platform.system()
36
+ if system == "Darwin":
37
+ # Return clearer macOS string for AI
38
+ return f"macOS {platform.mac_ver()[0]} ({platform.machine()})"
39
+ return f"{system} {platform.release()} ({platform.machine()})"
@@ -0,0 +1,152 @@
1
+ from rich.console import Console
2
+ from cmdexy.core.ai_engine import AIEngine
3
+ from cmdexy.core.execution import ExecutionEngine
4
+ import platform
5
+ import os
6
+
7
+ console = Console()
8
+
9
+ class Controller:
10
+ def __init__(self):
11
+ self.ai = AIEngine()
12
+ self.execution = ExecutionEngine()
13
+ self.os_context = f"{platform.system()} {platform.release()} ({platform.machine()})"
14
+
15
+ async def process_input(self, user_input: str):
16
+ # 1. Analyze Intent
17
+ with console.status("[bold green]Acting...[/bold green]"):
18
+ intent = await self.ai.analyze_intent(user_input)
19
+
20
+ console.print(f"Intent detected: [bold cyan]{intent}[/bold cyan]")
21
+
22
+ if "EXECUTE_CODE" in intent:
23
+ with console.status("[bold green]Generating Code...[/bold green]"):
24
+ code = await self.ai.generate_code(user_input)
25
+
26
+ console.print("[bold]Generated Code:[/bold]")
27
+ console.print(code, style="italic")
28
+
29
+ confirm = console.input("[yellow]Execute? (y/n): [/yellow]")
30
+ if confirm.lower() == 'y':
31
+ result = await self.execution.execute_code("python", code)
32
+ console.print("[bold]Result:[/bold]")
33
+ console.print(result)
34
+ else:
35
+ console.print("[red]Aborted.[/red]")
36
+
37
+ elif "SYSTEM_COMMAND" in intent:
38
+ with console.status("[bold green]Generating Command...[/bold green]"):
39
+ cmd = await self.ai.generate_shell_command(user_input, self.os_context)
40
+
41
+ console.print(f"Generated Command: [bold yellow]{cmd}[/bold yellow]")
42
+
43
+ confirm = console.input("[yellow]Execute on HOST? (y/n): [/yellow]")
44
+ if confirm.lower() == 'y':
45
+ exit_code, stdout, stderr = await self.execution.run_shell_command(cmd)
46
+
47
+ if exit_code == 0:
48
+ console.print(stdout)
49
+ else:
50
+ if stdout: console.print(stdout)
51
+ console.print(f"[bold red]Command Failed (Exit Code {exit_code})[/bold red]")
52
+ if stderr: console.print(stderr, style="red")
53
+
54
+
55
+ # Interactive Error Recovery
56
+ await self.trigger_error_recovery(cmd, stdout, stderr)
57
+
58
+ else:
59
+ # QUESTION
60
+ # CoHere Client is synchronous by default unless AsyncClient is used
61
+ ans = self.ai.client.chat(message=user_input, model='command-r-08-2024')
62
+ console.print(f"[bold]AI Answer:[/bold] {ans.text}")
63
+
64
+ async def trigger_error_recovery(self, cmd: str, stdout: str, stderr: str):
65
+ """
66
+ Public method to trigger the AI error analysis and fix loop.
67
+ Useful for direct command execution wrapper.
68
+ """
69
+ analyze = console.input("[yellow]Analyze Error? (y/n): [/yellow]")
70
+ if analyze.lower() == 'y':
71
+ with console.status("[bold yellow]Analyzing Error & Generating Fix...[/bold yellow]"):
72
+ # If the command involved a file, try to read it for context
73
+ # Generalized heuristic: check every word in valid command args
74
+ file_context = ""
75
+ filename = None
76
+
77
+ # Split command into words and check if they exist as files
78
+ parts = cmd.split()
79
+ for part in parts:
80
+ # Clean potential quotes or flags
81
+ candidate = part.strip("'\"")
82
+ if os.path.isfile(candidate):
83
+ try:
84
+ with open(candidate, 'r') as f:
85
+ # Read content to check encoding and capture context
86
+ content = f.read()
87
+
88
+ filename = candidate
89
+ file_context = f"\nFile '{filename}' content:\n{content}"
90
+ # Found a valid text file, use it as context
91
+ break
92
+ except UnicodeDecodeError:
93
+ # Skip binary files (like executables)
94
+ continue
95
+
96
+ fix_suggestion = await self.ai.suggest_fix(cmd, stderr + stdout, file_context, self.os_context)
97
+
98
+ console.print("[bold]AI Suggestion:[/bold]")
99
+ console.print(fix_suggestion, style="cyan")
100
+
101
+ # Extract code block via XML tags (robust)
102
+ new_code = None
103
+ if "<FILE_CONTENT>" in fix_suggestion:
104
+ try:
105
+ new_code = fix_suggestion.split("<FILE_CONTENT>")[1].split("</FILE_CONTENT>")[0].strip()
106
+ # Strip markdown fences if inside tags
107
+ if new_code.startswith("```"):
108
+ new_code = new_code.split("\n", 1)[1]
109
+ if new_code.endswith("```"):
110
+ new_code = new_code.rsplit("\n", 1)[0]
111
+ if new_code.startswith("python"): # simplified
112
+ new_code = new_code[6:].lstrip()
113
+ except IndexError:
114
+ pass
115
+
116
+ if new_code and filename:
117
+ apply_fix = console.input(f"[bold green]Apply fix to {filename} and retry? (y/n): [/bold green]")
118
+ if apply_fix.lower() == 'y':
119
+ with open(filename, 'w') as f:
120
+ f.write(new_code)
121
+ console.print(f"[green]Fixed {filename}. Retrying...[/green]")
122
+ # Let's just run it once more
123
+ # Note: For wrapper mode, this retry executes via shell on host
124
+ exit_code, stdout, stderr = await self.execution.run_shell_command(cmd)
125
+ if stdout: console.print(stdout)
126
+ if exit_code != 0:
127
+ console.print(f"[bold red]Retry Failed (Exit Code {exit_code})[/bold red]")
128
+ if stderr: console.print(stderr, style="red")
129
+ await self.trigger_error_recovery(cmd, stdout, stderr)
130
+ else:
131
+ # Check for Command Fix
132
+ fix_cmd = None
133
+ if "<COMMAND_FIX>" in fix_suggestion:
134
+ try:
135
+ fix_cmd = fix_suggestion.split("<COMMAND_FIX>")[1].split("</COMMAND_FIX>")[0].strip()
136
+ except IndexError:
137
+ pass
138
+
139
+ if fix_cmd:
140
+ run_fix = console.input(f"[bold green]Run fix command: '{fix_cmd}'? (y/n): [/bold green]")
141
+ if run_fix.lower() == 'y':
142
+ console.print(f"[green]Running fix...[/green]")
143
+ await self.execution.run_shell_command(fix_cmd)
144
+ console.print(f"[green]Fix applied. Retrying original command...[/green]")
145
+ exit_code, stdout, stderr = await self.execution.run_shell_command(cmd)
146
+ if stdout: console.print(stdout)
147
+ if exit_code != 0:
148
+ console.print(f"[bold red]Retry Failed (Exit Code {exit_code})[/bold red]")
149
+ if stderr: console.print(stderr, style="red")
150
+ await self.trigger_error_recovery(cmd, stdout, stderr)
151
+ else:
152
+ console.print("[red]Aborted.[/red]")
@@ -0,0 +1,75 @@
1
+ import docker
2
+ import os
3
+ import asyncio
4
+ from typing import Tuple
5
+
6
+ class ExecutionEngine:
7
+ def __init__(self):
8
+ try:
9
+ self.client = docker.from_env()
10
+ except:
11
+ print("Warning: Docker not available. Sandboxed execution will be limited.")
12
+ self.client = None
13
+
14
+ async def execute_code(self, language: str, code: str) -> str:
15
+ """
16
+ Execute code in a sandbox (Docker).
17
+ Handles interactive scripts by running in foreground if input() detected.
18
+ """
19
+ if not self.client:
20
+ return "Error: Docker not running."
21
+
22
+ # Support mainly Python for MVP
23
+ image = "python:3.10-slim"
24
+
25
+ # Interactive Mode Check
26
+ if "input(" in code:
27
+ # We must run this attached to the user's terminal
28
+ # Write code to a temp file on host first
29
+ temp_file = "temp_script.py"
30
+ with open(temp_file, "w") as f:
31
+ f.write(code)
32
+
33
+ # Use subprocess to run docker run -it ...
34
+ # Mounting current dir to /app so script is accessible
35
+ cmd = ["docker", "run", "-it", "--rm", "-v", f"{os.getcwd()}:/app", "-w", "/app", image, "python", temp_file]
36
+
37
+ # Using asyncio.create_subprocess_exec doesn't handle stdin/tty well for interactive
38
+ # So we rely on standard subprocess for interactive session
39
+ import subprocess
40
+ try:
41
+ subprocess.run(cmd)
42
+ return "Interactive session ended."
43
+ except Exception as e:
44
+ return f"Interactive Execution Failed: {e}"
45
+ finally:
46
+ if os.path.exists(temp_file):
47
+ os.remove(temp_file)
48
+
49
+ # Standard Detached Execution
50
+ try:
51
+ container = self.client.containers.run(
52
+ image,
53
+ command=f"python -c '{code}'",
54
+ remove=True,
55
+ detach=False, # Wait for output
56
+ working_dir="/app",
57
+ stderr=True
58
+ )
59
+ return container.decode("utf-8")
60
+ except Exception as e:
61
+ return f"Execution Failed: {e}"
62
+
63
+ async def run_shell_command(self, command: str) -> Tuple[int, str, str]:
64
+ """
65
+ Execute a shell command on the HOST system.
66
+ Returns (exit_code, stdout, stderr).
67
+ """
68
+ process = await asyncio.create_subprocess_shell(
69
+ command,
70
+ stdout=asyncio.subprocess.PIPE,
71
+ stderr=asyncio.subprocess.PIPE
72
+ )
73
+
74
+ stdout, stderr = await process.communicate()
75
+ return process.returncode, stdout.decode().strip(), stderr.decode().strip()
cmdexy/main.py ADDED
@@ -0,0 +1,95 @@
1
+ import sys
2
+ import typer
3
+ import asyncio
4
+ from rich.console import Console
5
+ from typing import List, Optional
6
+ from cmdexy.cli.wrapper import execute_wrapper
7
+ from cmdexy.cli.interactive import interactive_session
8
+ from cmdexy.cli.run import run_instruction
9
+ from cmdexy.core.config import ConfigManager
10
+
11
+ __version__ = "1.0.0"
12
+
13
+ def version_callback(value: bool):
14
+ if value:
15
+ print(f"cmdexy version {__version__}")
16
+ raise typer.Exit()
17
+
18
+ app = typer.Typer(help="cmdexy: AI-powered CLI Assistant",
19
+ context_settings={"help_option_names": ["-h", "--help"]})
20
+ console = Console()
21
+
22
+ @app.callback(invoke_without_command=True)
23
+ def main(
24
+ ctx: typer.Context,
25
+ version: bool = typer.Option(None, "--version", "-v", callback=version_callback, is_eager=True, help="Show version and exit")
26
+ ):
27
+ """
28
+ cmdexy: AI-powered CLI Assistant
29
+
30
+ Run subcommands: config, int, run, shell
31
+ """
32
+ if ctx.invoked_subcommand is None:
33
+ # Show help if no subcommand
34
+ console.print("[dim]Use --help for more info[/dim]")
35
+ ctx.get_help()
36
+
37
+ @app.command(name="shell",
38
+ context_settings={"allow_extra_args": True, "ignore_unknown_options": True})
39
+ def shell_command(ctx: typer.Context):
40
+ """Run any shell command with AI error recovery. Example: cmdexy shell python3 hello.py"""
41
+ if not ctx.args:
42
+ console.print("[red]Usage: cmdexy shell <command> [args...][/red]")
43
+ raise typer.Exit(1)
44
+ asyncio.run(execute_wrapper(ctx.args))
45
+
46
+ def interactive():
47
+ """Start the interactive AI session."""
48
+ asyncio.run(interactive_session())
49
+
50
+ @app.command(name="int", hidden=True)
51
+ def interactive_alias():
52
+ """Alias for interactive."""
53
+ asyncio.run(interactive_session())
54
+
55
+ @app.command()
56
+ def run(instruction: str):
57
+ """Execute a single instruction."""
58
+ asyncio.run(run_instruction(instruction))
59
+
60
+ @app.command()
61
+ def config():
62
+ """Configure cmdexy (API Key, etc)."""
63
+ manager = ConfigManager()
64
+ console.print("[bold]Configure cmdexy[/bold]")
65
+
66
+ # API Key
67
+ current_key = manager.get_api_key()
68
+ if current_key:
69
+ console.print(f"Current API Key: [green]{current_key[:4]}...{current_key[-4:]}[/green]")
70
+
71
+ api_key = console.input("Enter Cohere API Key (leave blank to keep current): ", password=True)
72
+ if api_key.strip():
73
+ new_key = api_key.strip()
74
+ console.print("[yellow]Validating API Key...[/yellow]")
75
+ try:
76
+ # Validate by making a lightweight call
77
+ import cohere
78
+ client = cohere.Client(new_key)
79
+ # Use Chat API as Generate is deprecated
80
+ client.chat(message="hi", model="command-r-08-2024")
81
+
82
+ manager.save_config("api_key", new_key)
83
+ console.print("[green]API Key valid and saved successfully![/green]")
84
+ except Exception as e:
85
+ console.print(f"[bold red]Invalid API Key:[/bold red] {e}")
86
+ console.print("[red]Key NOT saved.[/red]")
87
+ else:
88
+ console.print("[yellow]API Key unchanged.[/yellow]")
89
+
90
+ def entry_point():
91
+ """Main entry point."""
92
+ app()
93
+
94
+ if __name__ == "__main__":
95
+ entry_point()
@@ -0,0 +1,236 @@
1
+ Metadata-Version: 2.4
2
+ Name: cmdexy
3
+ Version: 1.0.0
4
+ Summary: AI-powered CLI assistant with automatic error recovery and code fixing.
5
+ License: MIT
6
+ License-File: LICENSE
7
+ Keywords: cli,ai,assistant,error-recovery,cohere,automation
8
+ Author: Akhilesh S
9
+ Author-email: akhilesh2220@github.com
10
+ Requires-Python: >=3.10,<4.0
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Environment :: Console
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Programming Language :: Python :: 3.14
22
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
+ Classifier: Topic :: Utilities
24
+ Requires-Dist: cohere (>=4.0.0,<5.0.0)
25
+ Requires-Dist: docker (>=7.0.0,<8.0.0)
26
+ Requires-Dist: pydantic (>=2.0.0,<3.0.0)
27
+ Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
28
+ Requires-Dist: rich (>=13.0.0,<14.0.0)
29
+ Requires-Dist: typer[all] (>=0.21.1,<0.22.0)
30
+ Project-URL: Homepage, https://github.com/akhilesh2220/cmdexy
31
+ Project-URL: Repository, https://github.com/akhilesh2220/cmdexy
32
+ Description-Content-Type: text/markdown
33
+
34
+ # cmdexy
35
+
36
+ **AI-powered CLI assistant with automatic error recovery and code fixing.**
37
+
38
+ [![GitHub](https://img.shields.io/badge/GitHub-akhilesh2220%2Fcmdexy-blue)](https://github.com/akhilesh2220/cmdexy)
39
+ [![Python](https://img.shields.io/badge/Python-3.10%2B-green)](https://python.org)
40
+ [![License](https://img.shields.io/badge/License-MIT-yellow)](LICENSE)
41
+
42
+ ---
43
+
44
+ ## ✨ Features
45
+
46
+ - 🤖 **AI-Powered Intent Analysis** - Understands what you want to do
47
+ - 🔧 **Automatic Error Recovery** - Detects failures and suggests fixes
48
+ - 📝 **Code Auto-Fixing** - Patches your files automatically
49
+ - 💻 **Shell Command Wrapper** - Monitor any command with AI assistance
50
+ - 🍎 **OS-Aware** - Generates platform-specific commands (macOS, Linux, Windows)
51
+ - 🔐 **Secure Config** - API keys stored safely in `~/.cmdexy/`
52
+
53
+ ---
54
+
55
+ ## 🚀 Installation
56
+
57
+ ### Prerequisites
58
+ - Python 3.10+
59
+ - [Cohere API Key](https://dashboard.cohere.com/api-keys) (free tier available)
60
+
61
+ ### Install from source
62
+
63
+ ```bash
64
+ # Clone the repository
65
+ git clone https://github.com/akhilesh2220/cmdexy.git
66
+ cd cmdexy
67
+
68
+ # Create virtual environment
69
+ python3 -m venv venv
70
+ source venv/bin/activate # On Windows: venv\Scripts\activate
71
+
72
+ # Install dependencies
73
+ pip install -e .
74
+
75
+ # Configure your API key
76
+ cmdexy config
77
+ ```
78
+
79
+ ---
80
+
81
+ ## 📖 Usage
82
+
83
+ ### Configure API Key (First Time)
84
+ ```bash
85
+ cmdexy config
86
+ ```
87
+
88
+ ### Interactive Mode
89
+ Start an AI-powered shell session:
90
+ ```bash
91
+ cmdexy int
92
+ ```
93
+
94
+ Example session:
95
+ ```
96
+ cmdexy > create a python file that prints hello world
97
+ Intent detected: SYSTEM_COMMAND
98
+ Generated Command: cat <<EOF > hello.py
99
+ print("Hello, World!")
100
+ EOF
101
+ Execute on HOST? (y/n): y
102
+ ```
103
+
104
+ ### Run Single Instructions
105
+ ```bash
106
+ cmdexy run "list all files in current directory"
107
+ cmdexy run "create a flask app with one endpoint"
108
+ ```
109
+
110
+ ### Shell Wrapper (Error Recovery)
111
+ Run any command with AI-powered error monitoring:
112
+ ```bash
113
+ cmdexy shell python3 script.py
114
+ cmdexy shell ansible-playbook deploy.yml
115
+ cmdexy shell npm run build
116
+ ```
117
+
118
+ If the command fails, cmdexy will:
119
+ 1. Analyze the error using AI
120
+ 2. Suggest a fix (code patch or install command)
121
+ 3. Apply the fix automatically (with your permission)
122
+ 4. Retry the command
123
+
124
+ ### Check Version
125
+ ```bash
126
+ cmdexy --version
127
+ ```
128
+
129
+ ---
130
+
131
+ ## 🎯 Commands
132
+
133
+ | Command | Description |
134
+ |---------|-------------|
135
+ | `cmdexy config` | Configure API key and settings |
136
+ | `cmdexy int` | Start interactive AI session |
137
+ | `cmdexy run "<instruction>"` | Execute a single AI-powered instruction |
138
+ | `cmdexy shell <command>` | Run command with error recovery |
139
+ | `cmdexy --version` | Show version |
140
+ | `cmdexy --help` | Show help |
141
+
142
+ ---
143
+
144
+ ## 🔄 Error Recovery Flow
145
+
146
+ ```
147
+ ┌─────────────────┐
148
+ │ Run Command │
149
+ └────────┬────────┘
150
+
151
+
152
+ ┌───────────┐ Success
153
+ │ Execute │────────────► Done
154
+ └─────┬─────┘
155
+ │ Failure
156
+
157
+ ┌─────────────────┐
158
+ │ Analyze Error? │──── No ───► Exit
159
+ └────────┬────────┘
160
+ │ Yes
161
+
162
+ ┌─────────────────┐
163
+ │ AI Analysis │
164
+ └────────┬────────┘
165
+
166
+ ┌────┴────┐
167
+ ▼ ▼
168
+ ┌───────┐ ┌───────┐
169
+ │ Code │ │ Cmd │
170
+ │ Fix │ │ Fix │
171
+ └───┬───┘ └───┬───┘
172
+ │ │
173
+ ▼ ▼
174
+ ┌─────────────────┐
175
+ │ Apply & Retry │
176
+ └─────────────────┘
177
+ ```
178
+
179
+ ---
180
+
181
+ ## 📁 Project Structure
182
+
183
+ ```
184
+ cmdexy/
185
+ ├── cmdexy/
186
+ │ ├── main.py # CLI entry point
187
+ │ ├── cli/
188
+ │ │ ├── interactive.py # Interactive mode
189
+ │ │ ├── run.py # Single instruction mode
190
+ │ │ └── wrapper.py # Shell wrapper
191
+ │ └── core/
192
+ │ ├── ai_engine.py # Cohere AI integration
193
+ │ ├── config.py # Configuration manager
194
+ │ ├── controller.py # Main orchestration logic
195
+ │ └── execution.py # Command execution
196
+ ├── pyproject.toml
197
+ └── README.md
198
+ ```
199
+
200
+ ---
201
+
202
+ ## ⚙️ Configuration
203
+
204
+ Config is stored in `~/.cmdexy/config.json`:
205
+
206
+ ```json
207
+ {
208
+ "api_key": "your-cohere-api-key"
209
+ }
210
+ ```
211
+
212
+ You can also use environment variable:
213
+ ```bash
214
+ export COHERE_API_KEY="your-key"
215
+ ```
216
+
217
+ ---
218
+
219
+ ## 🤝 Contributing
220
+
221
+ Contributions are welcome! Please feel free to submit a Pull Request.
222
+
223
+ ---
224
+
225
+ ## 📄 License
226
+
227
+ MIT License - see [LICENSE](LICENSE) for details.
228
+
229
+ ---
230
+
231
+ ## 🙏 Acknowledgments
232
+
233
+ - [Cohere](https://cohere.com/) for the AI API
234
+ - [Typer](https://typer.tiangolo.com/) for the CLI framework
235
+ - [Rich](https://rich.readthedocs.io/) for beautiful terminal output
236
+
@@ -0,0 +1,16 @@
1
+ cmdexy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ cmdexy/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ cmdexy/cli/interactive.py,sha256=5yXFU7BfA2FGi53NbfiC9Qlh-pFnXJ4u6pYPh4YkDS4,835
4
+ cmdexy/cli/run.py,sha256=QOAEZO-dM-S9Z_Q7-Bt6nTHlIKi-CvafVM8gp3-yJHQ,298
5
+ cmdexy/cli/wrapper.py,sha256=nL-QW2z9fvDPPujQorIE9SJWb68239H5_xBfxUpwfb0,1369
6
+ cmdexy/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ cmdexy/core/ai_engine.py,sha256=rt3StIUkxpF9pswahLQ6ULTCVwihIVUwaI528emysF4,4708
8
+ cmdexy/core/config.py,sha256=Di2F8btmGcw2ssLqJFXdbi_itxntFJMXREfMZ-PVgN8,1337
9
+ cmdexy/core/controller.py,sha256=fl91WjtGOhg0GOCurnUDrB0R71GA9Rk5pgDagDeLP7o,7554
10
+ cmdexy/core/execution.py,sha256=KSAEHoE9oWbk___m8GQ14SvUmjy0YMjV2yjF8pJwR7g,2725
11
+ cmdexy/main.py,sha256=b4f791Y-ICgt0jffhPZYm8QZOwAxcInu8r7orPkqBzk,3152
12
+ cmdexy-1.0.0.dist-info/METADATA,sha256=Wv9r-AE8naurvIk1OBWLGyJHQjoMUPt0kqTrc_c1Hfo,6334
13
+ cmdexy-1.0.0.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
14
+ cmdexy-1.0.0.dist-info/entry_points.txt,sha256=lPu84sraJxKdF3gqUUv-Whh2G7W7J-DtW338CMjXLgQ,50
15
+ cmdexy-1.0.0.dist-info/licenses/LICENSE,sha256=mDA3fQorLDqAQb96IX-zfKbwzx0oIj1gSGOvGVOpdBc,1067
16
+ cmdexy-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 2.3.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ cmdexy=cmdexy.main:entry_point
3
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Akhilesh S
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.