llmshell-cli 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,225 @@
1
+ """LLM backend manager with fallback support."""
2
+
3
+ from typing import Optional, List, Tuple
4
+ from pathlib import Path
5
+
6
+ from gpt_shell.config import Config
7
+ from gpt_shell.llm_client import (
8
+ LLMClient,
9
+ GPT4AllClient,
10
+ OpenAIClient,
11
+ OllamaClient,
12
+ CustomClient,
13
+ )
14
+
15
+
16
+ class LLMManager:
17
+ """Manages LLM backends and provides fallback logic."""
18
+
19
+ def __init__(self, config: Config):
20
+ """
21
+ Initialize LLM manager.
22
+
23
+ Args:
24
+ config: Configuration object
25
+ """
26
+ self.config = config
27
+ self.current_client: Optional[LLMClient] = None
28
+ self.backend_name: Optional[str] = None
29
+
30
+ def get_client(self, backend: Optional[str] = None) -> LLMClient:
31
+ """
32
+ Get LLM client for specified backend.
33
+
34
+ Args:
35
+ backend: Backend name. If None, uses config default
36
+
37
+ Returns:
38
+ LLM client instance
39
+
40
+ Raises:
41
+ RuntimeError: If no backends are available
42
+ """
43
+ if backend is None:
44
+ backend = self.config.get("llm_backend", "gpt4all")
45
+
46
+ # Try requested backend first
47
+ client = self._create_client(backend)
48
+ if client and client.is_available():
49
+ self.current_client = client
50
+ self.backend_name = backend
51
+ return client
52
+
53
+ # If requested backend not available, try fallback order
54
+ fallback_order = self._get_fallback_order(backend)
55
+ for fallback_backend in fallback_order:
56
+ client = self._create_client(fallback_backend)
57
+ if client and client.is_available():
58
+ self.current_client = client
59
+ self.backend_name = fallback_backend
60
+ return client
61
+
62
+ # No backends available
63
+ raise RuntimeError(
64
+ "No LLM backends available. Please configure at least one backend:\n"
65
+ " - GPT4All: run 'llmshell model install' to download model\n"
66
+ " - OpenAI: set API key with 'llmshell config set backends.openai.api_key YOUR_KEY'\n"
67
+ " - Ollama: ensure Ollama is running locally\n"
68
+ " - Custom: configure custom API endpoint"
69
+ )
70
+
71
+ def _create_client(self, backend: str) -> Optional[LLMClient]:
72
+ """
73
+ Create client for specific backend.
74
+
75
+ Args:
76
+ backend: Backend name
77
+
78
+ Returns:
79
+ LLM client instance or None if configuration invalid
80
+ """
81
+ backend_config = self.config.get_backend_config(backend)
82
+
83
+ try:
84
+ if backend == "gpt4all":
85
+ model_name = backend_config.get("model", "mistral-7b-instruct-v0.2.Q4_0.gguf")
86
+ model_path = backend_config.get("model_path")
87
+
88
+ # If no explicit path, check models directory
89
+ if not model_path:
90
+ models_dir = self.config.get_models_dir()
91
+ potential_path = models_dir / model_name
92
+ if potential_path.exists():
93
+ model_path = str(models_dir)
94
+
95
+ return GPT4AllClient(model_name=model_name, model_path=model_path)
96
+
97
+ elif backend == "openai":
98
+ api_key = backend_config.get("api_key")
99
+ if not api_key:
100
+ return None
101
+ model = backend_config.get("model", "gpt-4-turbo")
102
+ base_url = backend_config.get("base_url")
103
+ return OpenAIClient(api_key=api_key, model=model, base_url=base_url)
104
+
105
+ elif backend == "ollama":
106
+ model = backend_config.get("model", "llama3")
107
+ api_url = backend_config.get("api_url", "http://localhost:11434")
108
+ return OllamaClient(model=model, api_url=api_url)
109
+
110
+ elif backend == "custom":
111
+ api_url = backend_config.get("api_url")
112
+ if not api_url:
113
+ return None
114
+ headers = backend_config.get("headers", {})
115
+ return CustomClient(api_url=api_url, headers=headers)
116
+
117
+ else:
118
+ return None
119
+
120
+ except Exception:
121
+ return None
122
+
123
+ def _get_fallback_order(self, preferred: str) -> List[str]:
124
+ """
125
+ Get fallback order for backends.
126
+
127
+ Args:
128
+ preferred: Preferred backend (to exclude from fallbacks)
129
+
130
+ Returns:
131
+ List of backend names in fallback order
132
+ """
133
+ # Default fallback order: gpt4all -> ollama -> openai -> custom
134
+ all_backends = ["gpt4all", "ollama", "openai", "custom"]
135
+
136
+ # Remove preferred backend from fallbacks
137
+ fallbacks = [b for b in all_backends if b != preferred]
138
+
139
+ return fallbacks
140
+
141
+ def check_backends(self) -> List[Tuple[str, bool, str]]:
142
+ """
143
+ Check status of all configured backends.
144
+
145
+ Returns:
146
+ List of tuples (backend_name, is_available, status_message)
147
+ """
148
+ results = []
149
+
150
+ for backend in self.config.list_backends():
151
+ client = self._create_client(backend)
152
+ if client is None:
153
+ results.append((backend, False, "Not configured"))
154
+ elif client.is_available():
155
+ results.append((backend, True, "Available"))
156
+ else:
157
+ error_msg = getattr(client, "error", "Not available")
158
+ results.append((backend, False, error_msg))
159
+
160
+ return results
161
+
162
+ def generate_command(self, prompt: str, explain: bool = False, backend: Optional[str] = None) -> str:
163
+ """
164
+ Generate command using LLM.
165
+
166
+ Args:
167
+ prompt: Natural language prompt
168
+ explain: Whether to include explanation
169
+ backend: Specific backend to use (optional)
170
+
171
+ Returns:
172
+ Generated command
173
+ """
174
+ client = self.get_client(backend)
175
+ return client.generate_command(prompt, explain)
176
+
177
+ def get_current_backend(self) -> Optional[str]:
178
+ """
179
+ Get name of currently active backend.
180
+
181
+ Returns:
182
+ Backend name or None
183
+ """
184
+ return self.backend_name
185
+
186
+ def download_gpt4all_model(self, model_name: Optional[str] = None) -> Path:
187
+ """
188
+ Download GPT4All model.
189
+
190
+ Args:
191
+ model_name: Model name to download. If None, uses config default
192
+
193
+ Returns:
194
+ Path to downloaded model
195
+
196
+ Raises:
197
+ RuntimeError: If download fails
198
+ """
199
+ if model_name is None:
200
+ model_name = self.config.get("backends.gpt4all.model", "mistral-7b-instruct-v0.2.Q4_0.gguf")
201
+
202
+ models_dir = self.config.get_models_dir()
203
+
204
+ try:
205
+ from gpt4all import GPT4All
206
+
207
+ # GPT4All will download to its default location, then we can use it
208
+ print(f"Downloading {model_name}...")
209
+ print("This may take a few minutes depending on your internet connection.")
210
+
211
+ model = GPT4All(
212
+ model_name=model_name,
213
+ model_path=str(models_dir),
214
+ allow_download=True,
215
+ )
216
+
217
+ model_path = models_dir / model_name
218
+
219
+ # Update config with model path
220
+ self.config.set("backends.gpt4all.model_path", str(models_dir))
221
+
222
+ return model_path
223
+
224
+ except Exception as e:
225
+ raise RuntimeError(f"Failed to download model: {e}")
gpt_shell/main.py ADDED
@@ -0,0 +1,327 @@
1
+ """Main CLI interface for llmshell using Typer."""
2
+
3
+ import typer
4
+ from typing import Optional
5
+ from pathlib import Path
6
+
7
+ from gpt_shell import __version__
8
+ from gpt_shell.config import Config
9
+ from gpt_shell.llm_manager import LLMManager
10
+ from gpt_shell.utils import (
11
+ print_success,
12
+ print_error,
13
+ print_warning,
14
+ print_info,
15
+ print_command,
16
+ print_config_table,
17
+ print_backend_status,
18
+ confirm_execution,
19
+ execute_command,
20
+ print_execution_result,
21
+ is_dangerous_command,
22
+ print_danger_warning,
23
+ console,
24
+ )
25
+
26
+ app = typer.Typer(
27
+ name="llmshell",
28
+ help="Convert natural language to shell commands using LLMs",
29
+ add_completion=False,
30
+ )
31
+
32
+
33
+ def get_manager() -> LLMManager:
34
+ """Get LLM manager instance."""
35
+ config = Config()
36
+ return LLMManager(config)
37
+
38
+
39
+ @app.command(name="run")
40
+ def run_command(
41
+ prompt: str = typer.Argument(..., help="Natural language description of command"),
42
+ execute: bool = typer.Option(False, "--execute", "-e", help="Execute command without confirmation"),
43
+ dry_run: bool = typer.Option(False, "--dry-run", "-d", help="Show command but don't execute"),
44
+ explain: bool = typer.Option(False, "--explain", "-x", help="Include explanation with command"),
45
+ backend: Optional[str] = typer.Option(None, "--backend", "-b", help="Specific backend to use"),
46
+ ):
47
+ """Generate and optionally execute shell commands from natural language."""
48
+ try:
49
+ manager = get_manager()
50
+
51
+ # Generate command
52
+ with console.status("[cyan]Thinking...", spinner="dots"):
53
+ command = manager.generate_command(prompt, explain=explain, backend=backend)
54
+
55
+ # Display generated command
56
+ if explain:
57
+ console.print("\n[bold cyan]Response:[/bold cyan]")
58
+ console.print(command)
59
+ else:
60
+ print_command(command)
61
+
62
+ # Show which backend was used
63
+ current_backend = manager.get_current_backend()
64
+ if current_backend:
65
+ print_info(f"Using backend: {current_backend}")
66
+
67
+ # Check for dangerous commands
68
+ is_dangerous = is_dangerous_command(command)
69
+ if is_dangerous:
70
+ print_danger_warning(command)
71
+
72
+ # Handle execution
73
+ if not dry_run and not explain:
74
+ should_execute = execute
75
+
76
+ # For dangerous commands, ALWAYS ask for confirmation
77
+ if is_dangerous:
78
+ should_execute = False # Force confirmation for dangerous commands
79
+ print_warning("Dangerous command detected - confirmation required regardless of --execute flag")
80
+
81
+ if not should_execute:
82
+ # Ask for confirmation
83
+ config = Config()
84
+ if not config.get("execution.confirmation_required", True) and not is_dangerous:
85
+ should_execute = True
86
+ else:
87
+ should_execute = confirm_execution(command)
88
+
89
+ if should_execute:
90
+ console.print()
91
+ returncode, stdout, stderr = execute_command(command)
92
+ print_execution_result(returncode, stdout, stderr)
93
+ elif dry_run:
94
+ print_info("Dry run mode - command not executed")
95
+
96
+ except RuntimeError as e:
97
+ print_error(str(e))
98
+ raise typer.Exit(code=1)
99
+ except KeyboardInterrupt:
100
+ print_warning("Interrupted by user")
101
+ raise typer.Exit(code=130)
102
+ except Exception as e:
103
+ print_error(f"Unexpected error: {e}")
104
+ raise typer.Exit(code=1)
105
+
106
+
107
+ @app.command(name="config")
108
+ def config_command(
109
+ action: Optional[str] = typer.Argument(None, help="Action: show, set, get"),
110
+ key: Optional[str] = typer.Argument(None, help="Configuration key"),
111
+ value: Optional[str] = typer.Argument(None, help="Configuration value"),
112
+ ):
113
+ """Manage llmshell configuration."""
114
+ config = Config()
115
+
116
+ if action is None or action == "show":
117
+ # Show current configuration
118
+ print_config_table(config.to_dict())
119
+ console.print(f"\n[dim]Config file: {config.config_path}[/dim]")
120
+
121
+ elif action == "set":
122
+ if not key or value is None:
123
+ print_error("Usage: llmshell config set KEY VALUE")
124
+ raise typer.Exit(code=1)
125
+
126
+ # Set configuration value
127
+ try:
128
+ # Try to parse as boolean or number
129
+ if value.lower() in ["true", "false"]:
130
+ value = value.lower() == "true"
131
+ elif value.isdigit():
132
+ value = int(value)
133
+ elif value.replace(".", "", 1).isdigit():
134
+ value = float(value)
135
+
136
+ config.set(key, value)
137
+ print_success(f"Set {key} = {value}")
138
+ print_info(f"Config saved to {config.config_path}")
139
+ except Exception as e:
140
+ print_error(f"Failed to set config: {e}")
141
+ raise typer.Exit(code=1)
142
+
143
+ elif action == "get":
144
+ if not key:
145
+ print_error("Usage: llmshell config get KEY")
146
+ raise typer.Exit(code=1)
147
+
148
+ # Get configuration value
149
+ value = config.get(key)
150
+ if value is not None:
151
+ console.print(f"[cyan]{key}[/cyan] = {value}")
152
+ else:
153
+ print_warning(f"Key '{key}' not found")
154
+ raise typer.Exit(code=1)
155
+
156
+ else:
157
+ print_error(f"Unknown action: {action}")
158
+ print_info("Available actions: show, set, get")
159
+ raise typer.Exit(code=1)
160
+
161
+
162
+ @app.command(name="model")
163
+ def model_command(
164
+ action: str = typer.Argument(..., help="Action: install, list, show-available"),
165
+ model_name: Optional[str] = typer.Option(None, "--name", "-n", help="Model name to install"),
166
+ ):
167
+ """Manage GPT4All models."""
168
+ if action == "install":
169
+ try:
170
+ manager = get_manager()
171
+
172
+ if model_name:
173
+ print_info(f"Installing model: {model_name}")
174
+ else:
175
+ config = Config()
176
+ model_name = config.get("backends.gpt4all.model", "mistral-7b-instruct-v0.2.Q4_0.gguf")
177
+ print_info(f"Installing default model: {model_name}")
178
+
179
+ model_path = manager.download_gpt4all_model(model_name)
180
+ print_success(f"Model installed successfully: {model_path}")
181
+
182
+ except Exception as e:
183
+ print_error(f"Failed to install model: {e}")
184
+ raise typer.Exit(code=1)
185
+
186
+ elif action == "list":
187
+ config = Config()
188
+ models_dir = config.get_models_dir()
189
+
190
+ if models_dir.exists():
191
+ models = list(models_dir.glob("*.gguf"))
192
+ if models:
193
+ console.print("\n[bold cyan]Installed Models:[/bold cyan]")
194
+ for model in models:
195
+ console.print(f" • {model.name}")
196
+ else:
197
+ print_warning("No models installed")
198
+ print_info("Run 'llmshell model install' to download a model")
199
+ else:
200
+ print_warning("Models directory not found")
201
+ print_info("Run 'llmshell model install' to download a model")
202
+
203
+ elif action == "show-available":
204
+ # List popular available models from GPT4All
205
+ console.print("\n[bold cyan]Popular GPT4All Models:[/bold cyan]\n")
206
+
207
+ popular_models = [
208
+ ("Meta-Llama-3-8B-Instruct.Q4_0.gguf", "4.7GB", "Meta's Llama 3 8B - Fast, accurate, recommended"),
209
+ ("Mistral-7B-Instruct-v0.2.Q4_0.gguf", "4.1GB", "Mistral AI's 7B model - Good for code"),
210
+ ("Phi-3-mini-4k-instruct.Q4_0.gguf", "2.3GB", "Microsoft's compact model - Very fast"),
211
+ ("orca-mini-3b-gguf2-q4_0.gguf", "1.9GB", "Smaller model - Low resource usage"),
212
+ ("gpt4all-falcon-newbpe-q4_0.gguf", "3.9GB", "Falcon 7B - Good general purpose"),
213
+ ]
214
+
215
+ from rich.table import Table
216
+ table = Table(show_header=True, header_style="bold cyan")
217
+ table.add_column("Model Name", style="yellow", no_wrap=False)
218
+ table.add_column("Size", style="green", justify="right")
219
+ table.add_column("Description", style="white")
220
+
221
+ for name, size, desc in popular_models:
222
+ table.add_row(name, size, desc)
223
+
224
+ console.print(table)
225
+ console.print("\n[bold]To install a model:[/bold]")
226
+ console.print(" [cyan]llmshell model install --name MODEL_NAME[/cyan]")
227
+ console.print("\n[bold]Example:[/bold]")
228
+ console.print(" [cyan]llmshell model install --name Meta-Llama-3-8B-Instruct.Q4_0.gguf[/cyan]")
229
+ console.print("\n[dim]Note: Models are downloaded from GPT4All's official repository[/dim]")
230
+
231
+ else:
232
+ print_error(f"Unknown action: {action}")
233
+ print_info("Available actions: install, list, show-available")
234
+ raise typer.Exit(code=1)
235
+
236
+
237
+ @app.command(name="doctor")
238
+ def doctor_command():
239
+ """Diagnose llmshell setup and check backend availability."""
240
+ console.print("[bold cyan]Running diagnostics...[/bold cyan]\n")
241
+
242
+ # Check config
243
+ config = Config()
244
+ console.print(f"[green]✓[/green] Config file: {config.config_path}")
245
+
246
+ if not config.config_path.exists():
247
+ print_warning("Config file does not exist (will be created on first run)")
248
+
249
+ # Check models directory
250
+ models_dir = config.get_models_dir()
251
+ console.print(f"[green]✓[/green] Models directory: {models_dir}")
252
+
253
+ # Check for GPT4All models
254
+ if models_dir.exists():
255
+ models = list(models_dir.glob("*.gguf"))
256
+ if models:
257
+ console.print(f"[green]✓[/green] Found {len(models)} GPT4All model(s)")
258
+ else:
259
+ print_warning("No GPT4All models found")
260
+ print_info("Run 'llmshell model install' to download a model")
261
+
262
+ console.print()
263
+
264
+ # Check backends
265
+ manager = LLMManager(config)
266
+ backends = manager.check_backends()
267
+ print_backend_status(backends)
268
+
269
+ console.print()
270
+
271
+ # Check if at least one backend is available
272
+ available = any(available for _, available, _ in backends)
273
+ if available:
274
+ print_success("At least one backend is available")
275
+ else:
276
+ print_error("No backends are available")
277
+ console.print("\n[bold]Recommendations:[/bold]")
278
+ console.print(" 1. Install GPT4All model: [cyan]llmshell model install[/cyan]")
279
+ console.print(" 2. Configure OpenAI: [cyan]llmshell config set backends.openai.api_key YOUR_KEY[/cyan]")
280
+ console.print(" 3. Start Ollama server: [cyan]ollama serve[/cyan]")
281
+
282
+ # Show current backend
283
+ current_backend = config.get("llm_backend")
284
+ console.print(f"\n[bold]Current Backend:[/bold] {current_backend}")
285
+
286
+ # Test current backend
287
+ console.print("\n[bold cyan]Testing current backend...[/bold cyan]")
288
+ try:
289
+ with console.status("[cyan]Generating test command...", spinner="dots"):
290
+ test_result = manager.generate_command("list files", backend=current_backend)
291
+ print_success(f"Backend test successful")
292
+ print_command(test_result, "Test Output")
293
+ except Exception as e:
294
+ print_error(f"Backend test failed: {e}")
295
+
296
+
297
+ @app.command(name="version")
298
+ def version_command():
299
+ """Show version information."""
300
+ console.print(f"[bold cyan]llmshell[/bold cyan] version [green]{__version__}[/green]")
301
+
302
+
303
+ @app.callback(invoke_without_command=True)
304
+ def main(
305
+ ctx: typer.Context,
306
+ version: bool = typer.Option(False, "--version", "-v", help="Show version"),
307
+ ):
308
+ """
309
+ llmshell - Convert natural language to shell commands using LLMs.
310
+
311
+ Examples:
312
+ llmshell run "list all docker containers"
313
+ llmshell run "find python files" --execute
314
+ llmshell config show
315
+ llmshell model install
316
+ llmshell doctor
317
+ """
318
+ if version:
319
+ console.print(f"[bold cyan]llmshell[/bold cyan] version [green]{__version__}[/green]")
320
+ raise typer.Exit()
321
+
322
+ if ctx.invoked_subcommand is None:
323
+ console.print(ctx.get_help())
324
+
325
+
326
+ if __name__ == "__main__":
327
+ app()
gpt_shell/py.typed ADDED
@@ -0,0 +1 @@
1
+ # Marker file for PEP 561