llmshell-cli 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gpt_shell/__init__.py ADDED
@@ -0,0 +1,9 @@
1
+ """llmshell-cli: A Python package for GPT shell interactions."""
2
+
3
+ __version__ = "0.0.1"
4
+
5
+ from gpt_shell.config import Config
6
+ from gpt_shell.llm_manager import LLMManager
7
+ from gpt_shell.llm_client import LLMClient
8
+
9
+ __all__ = ["Config", "LLMManager", "LLMClient", "__version__"]
gpt_shell/cli.py ADDED
@@ -0,0 +1,56 @@
1
+ """Command-line interface for llmshell."""
2
+
3
+ import argparse
4
+ import sys
5
+ from gpt_shell import GPTShell, __version__
6
+
7
+
8
+ def main():
9
+ """Main CLI entry point."""
10
+ parser = argparse.ArgumentParser(
11
+ description="GPT Shell - A tool for GPT shell interactions"
12
+ )
13
+ parser.add_argument(
14
+ "--version",
15
+ action="version",
16
+ version=f"llmshell {__version__}"
17
+ )
18
+ parser.add_argument(
19
+ "command",
20
+ nargs="?",
21
+ help="Command to execute"
22
+ )
23
+ parser.add_argument(
24
+ "-i", "--interactive",
25
+ action="store_true",
26
+ help="Start interactive mode"
27
+ )
28
+
29
+ args = parser.parse_args()
30
+
31
+ shell = GPTShell()
32
+
33
+ if args.interactive:
34
+ print(f"llmshell {__version__} - Interactive Mode")
35
+ print("Type 'exit' or 'quit' to exit\n")
36
+ while True:
37
+ try:
38
+ command = input("llmshell> ")
39
+ if command.lower() in ("exit", "quit"):
40
+ break
41
+ if command.strip():
42
+ result = shell.execute(command)
43
+ print(result)
44
+ except (KeyboardInterrupt, EOFError):
45
+ print("\nExiting...")
46
+ break
47
+ elif args.command:
48
+ result = shell.execute(args.command)
49
+ print(result)
50
+ else:
51
+ parser.print_help()
52
+ sys.exit(1)
53
+
54
+
55
+ if __name__ == "__main__":
56
+ main()
gpt_shell/config.py ADDED
@@ -0,0 +1,190 @@
1
+ """Configuration management for llmshell."""
2
+
3
+ import os
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Optional
6
+ import yaml
7
+
8
+
9
+ DEFAULT_CONFIG = {
10
+ "llm_backend": "gpt4all",
11
+ "backends": {
12
+ "gpt4all": {
13
+ "model": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
14
+ "model_path": None, # Auto-detect or download
15
+ },
16
+ "openai": {
17
+ "api_key": None,
18
+ "model": "gpt-4-turbo",
19
+ "base_url": None,
20
+ },
21
+ "ollama": {
22
+ "model": "llama3",
23
+ "api_url": "http://localhost:11434",
24
+ },
25
+ "custom": {
26
+ "api_url": None,
27
+ "headers": {},
28
+ },
29
+ },
30
+ "execution": {
31
+ "auto_execute": False,
32
+ "confirmation_required": True,
33
+ },
34
+ "output": {
35
+ "colored": True,
36
+ "verbose": False,
37
+ },
38
+ }
39
+
40
+
41
+ class Config:
42
+ """Configuration manager for llmshell."""
43
+
44
+ def __init__(self, config_path: Optional[Path] = None):
45
+ """
46
+ Initialize configuration manager.
47
+
48
+ Args:
49
+ config_path: Path to config file. Defaults to ~/.llmshell/config.yaml
50
+ """
51
+ if config_path is None:
52
+ config_path = Path.home() / ".llmshell" / "config.yaml"
53
+ self.config_path = config_path
54
+ self.config_dir = config_path.parent
55
+ self.config: Dict[str, Any] = {}
56
+ self.load()
57
+
58
+ def load(self) -> None:
59
+ """Load configuration from file, creating default if not exists."""
60
+ if not self.config_path.exists():
61
+ self.create_default()
62
+ else:
63
+ try:
64
+ with open(self.config_path, "r") as f:
65
+ loaded_config = yaml.safe_load(f)
66
+ if loaded_config:
67
+ self.config = self._merge_with_defaults(loaded_config)
68
+ else:
69
+ self.config = DEFAULT_CONFIG.copy()
70
+ except Exception as e:
71
+ print(f"Warning: Error loading config: {e}")
72
+ self.config = DEFAULT_CONFIG.copy()
73
+
74
+ def _merge_with_defaults(self, user_config: Dict[str, Any]) -> Dict[str, Any]:
75
+ """
76
+ Merge user config with defaults.
77
+
78
+ Args:
79
+ user_config: User-provided configuration
80
+
81
+ Returns:
82
+ Merged configuration
83
+ """
84
+ merged = DEFAULT_CONFIG.copy()
85
+
86
+ def deep_merge(base: Dict, override: Dict) -> Dict:
87
+ """Recursively merge dictionaries."""
88
+ result = base.copy()
89
+ for key, value in override.items():
90
+ if key in result and isinstance(result[key], dict) and isinstance(value, dict):
91
+ result[key] = deep_merge(result[key], value)
92
+ else:
93
+ result[key] = value
94
+ return result
95
+
96
+ return deep_merge(merged, user_config)
97
+
98
+ def create_default(self) -> None:
99
+ """Create default configuration file."""
100
+ self.config_dir.mkdir(parents=True, exist_ok=True)
101
+ self.config = DEFAULT_CONFIG.copy()
102
+ self.save()
103
+
104
+ def save(self) -> None:
105
+ """Save current configuration to file."""
106
+ self.config_dir.mkdir(parents=True, exist_ok=True)
107
+ with open(self.config_path, "w") as f:
108
+ yaml.dump(self.config, f, default_flow_style=False, sort_keys=False)
109
+
110
+ def get(self, key: str, default: Any = None) -> Any:
111
+ """
112
+ Get configuration value by key.
113
+
114
+ Args:
115
+ key: Configuration key (supports dot notation, e.g., 'backends.openai.api_key')
116
+ default: Default value if key not found
117
+
118
+ Returns:
119
+ Configuration value
120
+ """
121
+ keys = key.split(".")
122
+ value = self.config
123
+ for k in keys:
124
+ if isinstance(value, dict) and k in value:
125
+ value = value[k]
126
+ else:
127
+ return default
128
+ return value
129
+
130
+ def set(self, key: str, value: Any) -> None:
131
+ """
132
+ Set configuration value.
133
+
134
+ Args:
135
+ key: Configuration key (supports dot notation)
136
+ value: Value to set
137
+ """
138
+ keys = key.split(".")
139
+ config = self.config
140
+
141
+ for k in keys[:-1]:
142
+ if k not in config:
143
+ config[k] = {}
144
+ config = config[k]
145
+
146
+ config[keys[-1]] = value
147
+ self.save()
148
+
149
+ def get_backend_config(self, backend: Optional[str] = None) -> Dict[str, Any]:
150
+ """
151
+ Get configuration for specific backend.
152
+
153
+ Args:
154
+ backend: Backend name. If None, uses current llm_backend setting
155
+
156
+ Returns:
157
+ Backend configuration dictionary
158
+ """
159
+ if backend is None:
160
+ backend = self.config.get("llm_backend", "gpt4all")
161
+ return self.config.get("backends", {}).get(backend, {})
162
+
163
+ def list_backends(self) -> list[str]:
164
+ """
165
+ List available backends.
166
+
167
+ Returns:
168
+ List of backend names
169
+ """
170
+ return list(self.config.get("backends", {}).keys())
171
+
172
+ def get_models_dir(self) -> Path:
173
+ """
174
+ Get directory for storing downloaded models.
175
+
176
+ Returns:
177
+ Path to models directory
178
+ """
179
+ models_dir = self.config_dir / "models"
180
+ models_dir.mkdir(parents=True, exist_ok=True)
181
+ return models_dir
182
+
183
+ def to_dict(self) -> Dict[str, Any]:
184
+ """
185
+ Get configuration as dictionary.
186
+
187
+ Returns:
188
+ Configuration dictionary
189
+ """
190
+ return self.config.copy()
gpt_shell/core.py ADDED
@@ -0,0 +1,32 @@
1
+ """Core functionality for llmshell."""
2
+
3
+
4
+ class GPTShell:
5
+ """Main class for GPT shell interactions."""
6
+
7
+ def __init__(self):
8
+ """Initialize GPTShell."""
9
+ self.history = []
10
+
11
+ def execute(self, command: str) -> str:
12
+ """
13
+ Execute a command.
14
+
15
+ Args:
16
+ command: The command to execute
17
+
18
+ Returns:
19
+ The result of the command execution
20
+ """
21
+ self.history.append(command)
22
+ # TODO: Implement actual command execution logic
23
+ return f"Executed: {command}"
24
+
25
+ def get_history(self) -> list[str]:
26
+ """
27
+ Get command history.
28
+
29
+ Returns:
30
+ List of executed commands
31
+ """
32
+ return self.history.copy()
@@ -0,0 +1,325 @@
1
+ """LLM client abstraction for multiple backends."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import Optional, Dict, Any
5
+ import json
6
+
7
+
8
+ SYSTEM_PROMPT = """You are a helpful Linux/Unix shell command generator.
9
+ Your task is to convert natural language requests into valid shell commands.
10
+
11
+ Rules:
12
+ 1. Output ONLY the command, no explanations unless specifically asked
13
+ 2. Use safe, standard Unix/Linux commands
14
+ 3. Prefer common tools (ls, grep, find, docker, etc.)
15
+ 4. If the request is unclear, output the most likely command
16
+ 5. For destructive operations (rm, etc.), include safety flags when possible
17
+
18
+ Examples:
19
+ Input: "list all docker containers"
20
+ Output: docker ps -a
21
+
22
+ Input: "find all python files in current directory"
23
+ Output: find . -name "*.py"
24
+
25
+ Input: "show disk usage"
26
+ Output: df -h
27
+
28
+ Now, respond to the user's request with just the command."""
29
+
30
+
31
+ class LLMClient(ABC):
32
+ """Abstract base class for LLM clients."""
33
+
34
+ @abstractmethod
35
+ def generate_command(self, prompt: str, explain: bool = False) -> str:
36
+ """
37
+ Generate a shell command from natural language prompt.
38
+
39
+ Args:
40
+ prompt: Natural language description
41
+ explain: If True, include explanation
42
+
43
+ Returns:
44
+ Generated command or command with explanation
45
+ """
46
+ pass
47
+
48
+ @abstractmethod
49
+ def is_available(self) -> bool:
50
+ """
51
+ Check if this client is available and properly configured.
52
+
53
+ Returns:
54
+ True if available, False otherwise
55
+ """
56
+ pass
57
+
58
+
59
+ class GPT4AllClient(LLMClient):
60
+ """GPT4All local LLM client."""
61
+
62
+ def __init__(self, model_name: str, model_path: Optional[str] = None):
63
+ """
64
+ Initialize GPT4All client.
65
+
66
+ Args:
67
+ model_name: Name of the model
68
+ model_path: Path to model file (optional)
69
+ """
70
+ self.model_name = model_name
71
+ self.model_path = model_path
72
+ self.model = None
73
+ self._available = False
74
+ self._initialize()
75
+
76
+ def _initialize(self):
77
+ """Initialize GPT4All model."""
78
+ try:
79
+ from gpt4all import GPT4All
80
+ self.model = GPT4All(
81
+ model_name=self.model_name,
82
+ model_path=self.model_path,
83
+ allow_download=False, # Don't auto-download, we'll handle this
84
+ )
85
+ self._available = True
86
+ except Exception as e:
87
+ self._available = False
88
+ self.error = str(e)
89
+
90
+ def generate_command(self, prompt: str, explain: bool = False) -> str:
91
+ """Generate command using GPT4All."""
92
+ if not self.is_available():
93
+ raise RuntimeError("GPT4All client is not available")
94
+
95
+ if explain:
96
+ user_prompt = f"{prompt}\n\nProvide the command and a brief explanation."
97
+ else:
98
+ user_prompt = prompt
99
+
100
+ try:
101
+ with self.model.chat_session(system_prompt=SYSTEM_PROMPT):
102
+ response = self.model.generate(user_prompt, max_tokens=200, temp=0.1)
103
+
104
+ # Extract just the command (before ### or other delimiters)
105
+ cleaned_response = response.strip()
106
+
107
+ # Split by common delimiters
108
+ for delimiter in ['###', '```', '\n\n', 'End of']:
109
+ if delimiter in cleaned_response:
110
+ cleaned_response = cleaned_response.split(delimiter)[0].strip()
111
+ break
112
+
113
+ return cleaned_response
114
+ except Exception as e:
115
+ raise RuntimeError(f"Error generating command: {e}")
116
+
117
+ def is_available(self) -> bool:
118
+ """Check if GPT4All is available."""
119
+ return self._available
120
+
121
+
122
+ class OpenAIClient(LLMClient):
123
+ """OpenAI API client."""
124
+
125
+ def __init__(self, api_key: str, model: str = "gpt-4-turbo", base_url: Optional[str] = None):
126
+ """
127
+ Initialize OpenAI client.
128
+
129
+ Args:
130
+ api_key: OpenAI API key
131
+ model: Model name
132
+ base_url: Optional custom base URL
133
+ """
134
+ self.api_key = api_key
135
+ self.model = model
136
+ self.base_url = base_url
137
+ self.client = None
138
+ self._available = False
139
+ self._initialize()
140
+
141
+ def _initialize(self):
142
+ """Initialize OpenAI client."""
143
+ try:
144
+ from openai import OpenAI
145
+ if self.api_key:
146
+ kwargs = {"api_key": self.api_key}
147
+ if self.base_url:
148
+ kwargs["base_url"] = self.base_url
149
+ self.client = OpenAI(**kwargs)
150
+ self._available = True
151
+ else:
152
+ self._available = False
153
+ except Exception as e:
154
+ self._available = False
155
+ self.error = str(e)
156
+
157
+ def generate_command(self, prompt: str, explain: bool = False) -> str:
158
+ """Generate command using OpenAI."""
159
+ if not self.is_available():
160
+ raise RuntimeError("OpenAI client is not available")
161
+
162
+ if explain:
163
+ user_prompt = f"{prompt}\n\nProvide the command and a brief explanation."
164
+ else:
165
+ user_prompt = prompt
166
+
167
+ try:
168
+ response = self.client.chat.completions.create(
169
+ model=self.model,
170
+ messages=[
171
+ {"role": "system", "content": SYSTEM_PROMPT},
172
+ {"role": "user", "content": user_prompt}
173
+ ],
174
+ max_tokens=200,
175
+ temperature=0.1,
176
+ )
177
+ return response.choices[0].message.content.strip()
178
+ except Exception as e:
179
+ raise RuntimeError(f"Error generating command: {e}")
180
+
181
+ def is_available(self) -> bool:
182
+ """Check if OpenAI is available."""
183
+ return self._available and self.client is not None
184
+
185
+
186
+ class OllamaClient(LLMClient):
187
+ """Ollama API client."""
188
+
189
+ def __init__(self, model: str, api_url: str = "http://localhost:11434"):
190
+ """
191
+ Initialize Ollama client.
192
+
193
+ Args:
194
+ model: Model name
195
+ api_url: Ollama API URL
196
+ """
197
+ self.model = model
198
+ self.api_url = api_url.rstrip("/")
199
+ self._available = False
200
+ self._check_availability()
201
+
202
+ def _check_availability(self):
203
+ """Check if Ollama is available."""
204
+ try:
205
+ import requests
206
+ response = requests.get(f"{self.api_url}/api/tags", timeout=2)
207
+ if response.status_code == 200:
208
+ # Check if our model is available
209
+ models = response.json().get("models", [])
210
+ model_names = [m.get("name", "").split(":")[0] for m in models]
211
+ self._available = any(self.model in name for name in model_names)
212
+ else:
213
+ self._available = False
214
+ except Exception:
215
+ self._available = False
216
+
217
+ def generate_command(self, prompt: str, explain: bool = False) -> str:
218
+ """Generate command using Ollama."""
219
+ if not self.is_available():
220
+ raise RuntimeError("Ollama client is not available")
221
+
222
+ import requests
223
+
224
+ if explain:
225
+ user_prompt = f"{prompt}\n\nProvide the command and a brief explanation."
226
+ else:
227
+ user_prompt = prompt
228
+
229
+ try:
230
+ response = requests.post(
231
+ f"{self.api_url}/api/generate",
232
+ json={
233
+ "model": self.model,
234
+ "prompt": f"{SYSTEM_PROMPT}\n\nUser request: {user_prompt}",
235
+ "stream": False,
236
+ "options": {
237
+ "temperature": 0.1,
238
+ "num_predict": 200,
239
+ }
240
+ },
241
+ timeout=30,
242
+ )
243
+ response.raise_for_status()
244
+ result = response.json()
245
+ return result.get("response", "").strip()
246
+ except Exception as e:
247
+ raise RuntimeError(f"Error generating command: {e}")
248
+
249
+ def is_available(self) -> bool:
250
+ """Check if Ollama is available."""
251
+ if not self._available:
252
+ self._check_availability()
253
+ return self._available
254
+
255
+
256
+ class CustomClient(LLMClient):
257
+ """Custom API client for generic LLM endpoints."""
258
+
259
+ def __init__(self, api_url: str, headers: Optional[Dict[str, str]] = None):
260
+ """
261
+ Initialize custom API client.
262
+
263
+ Args:
264
+ api_url: API endpoint URL
265
+ headers: Optional headers (e.g., Authorization)
266
+ """
267
+ self.api_url = api_url
268
+ self.headers = headers or {}
269
+ self._available = False
270
+ self._check_availability()
271
+
272
+ def _check_availability(self):
273
+ """Check if custom API is available."""
274
+ try:
275
+ import requests
276
+ response = requests.get(self.api_url, headers=self.headers, timeout=2)
277
+ self._available = response.status_code in [200, 401, 405] # Endpoint exists
278
+ except Exception:
279
+ self._available = False
280
+
281
+ def generate_command(self, prompt: str, explain: bool = False) -> str:
282
+ """Generate command using custom API."""
283
+ if not self.is_available():
284
+ raise RuntimeError("Custom API client is not available")
285
+
286
+ import requests
287
+
288
+ if explain:
289
+ user_prompt = f"{prompt}\n\nProvide the command and a brief explanation."
290
+ else:
291
+ user_prompt = prompt
292
+
293
+ try:
294
+ # Assume OpenAI-compatible format
295
+ response = requests.post(
296
+ self.api_url,
297
+ headers={**self.headers, "Content-Type": "application/json"},
298
+ json={
299
+ "messages": [
300
+ {"role": "system", "content": SYSTEM_PROMPT},
301
+ {"role": "user", "content": user_prompt}
302
+ ],
303
+ "max_tokens": 200,
304
+ "temperature": 0.1,
305
+ },
306
+ timeout=30,
307
+ )
308
+ response.raise_for_status()
309
+ result = response.json()
310
+
311
+ # Try different response formats
312
+ if "choices" in result:
313
+ return result["choices"][0]["message"]["content"].strip()
314
+ elif "response" in result:
315
+ return result["response"].strip()
316
+ else:
317
+ return str(result)
318
+ except Exception as e:
319
+ raise RuntimeError(f"Error generating command: {e}")
320
+
321
+ def is_available(self) -> bool:
322
+ """Check if custom API is available."""
323
+ if not self._available:
324
+ self._check_availability()
325
+ return self._available