yaicli 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yaicli.py CHANGED
@@ -3,7 +3,6 @@ import json
3
3
  import platform
4
4
  import subprocess
5
5
  import time
6
- from enum import StrEnum
7
6
  from os import getenv
8
7
  from os.path import basename, pathsep
9
8
  from pathlib import Path
@@ -21,55 +20,56 @@ from rich.live import Live
21
20
  from rich.markdown import Markdown
22
21
  from rich.prompt import Confirm
23
22
 
23
+ SHELL_PROMPT = """Your are a Shell Command Generator.
24
+ Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
25
+ Rules:
26
+ 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
27
+ 2. Output STRICTLY in plain text format
28
+ 3. NEVER use markdown, code blocks or explanations
29
+ 4. Chain multi-step commands in SINGLE LINE
30
+ 5. Return NOTHING except the ready-to-run command"""
24
31
 
25
- class ModeEnum(StrEnum):
26
- CHAT = "chat"
27
- EXECUTE = "exec"
28
- TEMP = "temp"
29
-
32
+ DEFAULT_PROMPT = (
33
+ "You are yaili, a system management and programing assistant, "
34
+ "You are managing {_os} operating system with {_shell} shell. "
35
+ "Your responses should be concise and use Markdown format, "
36
+ "unless the user explicitly requests more details."
37
+ )
30
38
 
31
- class CasePreservingConfigParser(configparser.RawConfigParser):
32
- def optionxform(self, optionstr):
33
- return optionstr
39
+ CMD_CLEAR = "/clear"
40
+ CMD_EXIT = "/exit"
41
+
42
+ EXEC_MODE = "exec"
43
+ CHAT_MODE = "chat"
44
+ TEMP_MODE = "temp"
45
+
46
+ DEFAULT_CONFIG_MAP = {
47
+ "BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "AI_BASE_URL"},
48
+ "API_KEY": {"value": "", "env_key": "AI_API_KEY"},
49
+ "MODEL": {"value": "gpt-4o", "env_key": "AI_MODEL"},
50
+ "SHELL_NAME": {"value": "auto", "env_key": "AI_SHELL_NAME"},
51
+ "OS_NAME": {"value": "auto", "env_key": "AI_OS_NAME"},
52
+ "COMPLETION_PATH": {"value": "chat/completions", "env_key": "AI_COMPLETION_PATH"},
53
+ "ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "AI_ANSWER_PATH"},
54
+ "STREAM": {"value": "true", "env_key": "AI_STREAM"},
55
+ }
34
56
 
57
+ app = typer.Typer(
58
+ name="yaicli",
59
+ context_settings={"help_option_names": ["-h", "--help"]},
60
+ pretty_exceptions_enable=False,
61
+ )
35
62
 
36
- # Default configuration values (lowest priority)
37
- DEFAULT_CONFIG = {
38
- "BASE_URL": "https://api.openai.com/v1",
39
- "API_KEY": "",
40
- "MODEL": "gpt-4o",
41
- "SHELL_NAME": "auto",
42
- "OS_NAME": "auto",
43
- "COMPLETION_PATH": "chat/completions",
44
- "ANSWER_PATH": "choices[0].message.content",
45
- "STREAM": "true",
46
- }
47
63
 
48
- # Environment variable mapping (config key -> environment variable name)
49
- ENV_VAR_MAPPING = {
50
- "BASE_URL": "AI_BASE_URL",
51
- "API_KEY": "AI_API_KEY",
52
- "MODEL": "AI_MODEL",
53
- "SHELL_NAME": "AI_SHELL_NAME",
54
- "OS_NAME": "AI_OS_NAME",
55
- "COMPLETION_PATH": "AI_COMPLETION_PATH",
56
- "ANSWER_PATH": "AI_ANSWER_PATH",
57
- "STREAM": "AI_STREAM",
58
- }
64
+ class CasePreservingConfigParser(configparser.RawConfigParser):
65
+ """Case preserving config parser"""
59
66
 
67
+ def optionxform(self, optionstr):
68
+ return optionstr
60
69
 
61
- class YAICLI:
62
- """Main class for YAICLI
63
- Chat mode: interactive chat mode
64
- One-shot mode:
65
- Temp: ask a question and get a response once
66
- Execute: generate and execute shell commands
67
- """
68
70
 
69
- # Configuration file path
71
+ class CLI:
70
72
  CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
71
-
72
- # Default configuration template
73
73
  DEFAULT_CONFIG_INI = """[core]
74
74
  BASE_URL=https://api.openai.com/v1
75
75
  API_KEY=
@@ -88,109 +88,22 @@ ANSWER_PATH=choices[0].message.content
88
88
  # false: non-streaming response
89
89
  STREAM=true"""
90
90
 
91
- def __init__(self, verbose: bool = False):
91
+ def __init__(self, verbose: bool = False) -> None:
92
92
  self.verbose = verbose
93
93
  self.console = Console()
94
94
  self.bindings = KeyBindings()
95
95
  self.session = PromptSession(key_bindings=self.bindings)
96
- self.current_mode = ModeEnum.TEMP.value
97
96
  self.config = {}
98
97
  self.history = []
99
98
  self.max_history_length = 25
99
+ self.current_mode = TEMP_MODE
100
100
 
101
- # Setup key bindings
102
- self._setup_key_bindings()
103
-
104
- def _setup_key_bindings(self):
101
+ def _setup_key_bindings(self) -> None:
105
102
  """Setup keyboard shortcuts"""
106
103
 
107
- @self.bindings.add(Keys.ControlI) # Bind Ctrl+I to switch modes
108
- def _(event: KeyPressEvent):
109
- self.current_mode = (
110
- ModeEnum.CHAT.value
111
- if self.current_mode == ModeEnum.EXECUTE.value
112
- else ModeEnum.EXECUTE.value
113
- )
114
-
115
- def clear_history(self):
116
- """Clear chat history"""
117
- self.history = []
118
-
119
- def detect_os(self) -> str:
120
- """Detect operating system
121
- Returns:
122
- str: operating system name
123
- Raises:
124
- typer.Exit: if there is an error with the request
125
- """
126
- if self.config.get("OS_NAME") != "auto":
127
- return self.config["OS_NAME"]
128
- current_platform = platform.system()
129
- if current_platform == "Linux":
130
- return "Linux/" + distro_name(pretty=True)
131
- if current_platform == "Windows":
132
- return "Windows " + platform.release()
133
- if current_platform == "Darwin":
134
- return "Darwin/MacOS " + platform.mac_ver()[0]
135
- return current_platform
136
-
137
- def detect_shell(self) -> str:
138
- """Detect shell
139
- Returns:
140
- str: shell name
141
- Raises:
142
- typer.Exit: if there is an error with the request
143
- """
144
- if self.config["SHELL_NAME"] != "auto":
145
- return self.config["SHELL_NAME"]
146
- import platform
147
-
148
- current_platform = platform.system()
149
- if current_platform in ("Windows", "nt"):
150
- is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
151
- return "powershell.exe" if is_powershell else "cmd.exe"
152
- return basename(getenv("SHELL", "/bin/sh"))
153
-
154
- def build_cmd_prompt(self) -> str:
155
- """Build command prompt
156
- Returns:
157
- str: command prompt
158
- Raises:
159
- typer.Exit: if there is an error with the request
160
- """
161
- _os = self.detect_os()
162
- _shell = self.detect_shell()
163
- return f"""Your are a Shell Command Generator.
164
- Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
165
- Rules:
166
- 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
167
- 2. Output STRICTLY in plain text format
168
- 3. NEVER use markdown, code blocks or explanations
169
- 4. Chain multi-step commands in SINGLE LINE
170
- 5. Return NOTHING except the ready-to-run command"""
171
-
172
- def build_default_prompt(self) -> str:
173
- """Build default prompt
174
- Returns:
175
- str: default prompt
176
- Raises:
177
- typer.Exit: if there is an error with the request
178
- """
179
- _os = self.detect_os()
180
- _shell = self.detect_shell()
181
- return (
182
- "You are yaili, a system management and programing assistant, "
183
- f"You are managing {_os} operating system with {_shell} shell. "
184
- "Your responses should be concise and use Markdown format, "
185
- "unless the user explicitly requests more details."
186
- )
187
-
188
- def get_default_config(self) -> dict[str, str]:
189
- """Get default configuration
190
- Returns:
191
- dict: default configuration with lowest priority
192
- """
193
- return DEFAULT_CONFIG.copy()
104
+ @self.bindings.add(Keys.ControlI) # Bind TAB to switch modes
105
+ def _(event: KeyPressEvent) -> None:
106
+ self.current_mode = EXEC_MODE if self.current_mode == CHAT_MODE else CHAT_MODE
194
107
 
195
108
  def load_config(self) -> dict[str, str]:
196
109
  """Load LLM API configuration with priority:
@@ -202,431 +115,250 @@ Rules:
202
115
  dict: merged configuration
203
116
  """
204
117
  # Start with default configuration (lowest priority)
205
- merged_config = self.get_default_config()
118
+ merged_config = {k: v["value"] for k, v in DEFAULT_CONFIG_MAP.items()}
206
119
 
207
- # Load from configuration file (middle priority)
120
+ # Create default config file if it doesn't exist
208
121
  if not self.CONFIG_PATH.exists():
209
- self.console.print(
210
- "[bold yellow]Configuration file not found. Creating default configuration file.[/bold yellow]"
211
- )
122
+ self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
212
123
  self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
213
124
  with open(self.CONFIG_PATH, "w") as f:
214
125
  f.write(self.DEFAULT_CONFIG_INI)
215
126
  else:
127
+ # Load from configuration file (middle priority)
216
128
  config_parser = CasePreservingConfigParser()
217
129
  config_parser.read(self.CONFIG_PATH)
218
130
  if "core" in config_parser:
219
- # Update with values from config file
220
- for key, value in config_parser["core"].items():
221
- if value.strip(): # Only use non-empty values from config file
222
- merged_config[key] = value
131
+ # Update with non-empty values from config file
132
+ merged_config.update({k: v for k, v in config_parser["core"].items() if v.strip()})
223
133
 
224
134
  # Override with environment variables (highest priority)
225
- for config_key, env_var in ENV_VAR_MAPPING.items():
226
- env_value = getenv(env_var)
227
- if env_value is not None: # Only override if environment variable exists
228
- merged_config[config_key] = env_value
135
+ for key, config in DEFAULT_CONFIG_MAP.items():
136
+ env_value = getenv(config["env_key"])
137
+ if env_value is not None:
138
+ merged_config[key] = env_value
229
139
 
230
- # Ensure STREAM is lowercase string
231
140
  merged_config["STREAM"] = str(merged_config.get("STREAM", "true")).lower()
232
141
 
233
142
  self.config = merged_config
234
- return self.config
235
-
236
- def _call_api(self, url: str, headers: dict, data: dict) -> requests.Response:
237
- """Call the API and return the response.
238
- Args:
239
- url: API endpoint URL
240
- headers: request headers
241
- data: request data
242
- Returns:
243
- requests.Response: response object
244
- Raises:
245
- requests.exceptions.RequestException: if there is an error with the request
246
- """
247
- response = requests.post(url, headers=headers, json=data)
248
- response.raise_for_status() # Raise an exception for non-200 status codes
249
- return response
250
-
251
- def get_llm_url(self) -> str:
252
- """Get LLM API URL
253
- Returns:
254
- str: LLM API URL
255
- Raises:
256
- typer.Exit: if API key or base URL is not set
257
- """
258
- base = self.config.get("BASE_URL", "").rstrip("/")
259
- if not base:
260
- self.console.print(
261
- "[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
262
- )
263
- raise typer.Exit(code=1)
264
- COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
265
- if not COMPLETION_PATH:
266
- self.console.print(
267
- "[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
268
- )
269
- raise typer.Exit(code=1)
270
- return f"{base}/{COMPLETION_PATH}"
271
-
272
- def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
273
- """Build request data
274
- Args:
275
- prompt: user input
276
- mode: chat or execute mode
277
- Returns:
278
- dict: request data
279
- """
280
- if mode == ModeEnum.EXECUTE.value:
281
- system_prompt = self.build_cmd_prompt()
282
- else:
283
- system_prompt = self.build_default_prompt()
143
+ return merged_config
284
144
 
285
- # Build messages list, first add system prompt
286
- messages = [{"role": "system", "content": system_prompt}]
145
+ def detect_os(self) -> str:
146
+ """Detect operating system + version"""
147
+ if self.config.get("OS_NAME") != "auto":
148
+ return self.config["OS_NAME"]
149
+ current_platform = platform.system()
150
+ if current_platform == "Linux":
151
+ return "Linux/" + distro_name(pretty=True)
152
+ if current_platform == "Windows":
153
+ return "Windows " + platform.release()
154
+ if current_platform == "Darwin":
155
+ return "Darwin/MacOS " + platform.mac_ver()[0]
156
+ return current_platform
287
157
 
288
- # Add history records in chat mode
289
- if mode == ModeEnum.CHAT.value and self.history:
290
- messages.extend(self.history)
158
+ def detect_shell(self) -> str:
159
+ """Detect shell name"""
160
+ if self.config["SHELL_NAME"] != "auto":
161
+ return self.config["SHELL_NAME"]
291
162
 
292
- # Add current user message
293
- messages.append({"role": "user", "content": prompt})
163
+ current_platform = platform.system()
164
+ if current_platform in ("Windows", "nt"):
165
+ is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
166
+ return "powershell.exe" if is_powershell else "cmd.exe"
167
+ return basename(getenv("SHELL", "/bin/sh"))
294
168
 
295
- return {
296
- "model": self.config["MODEL"],
297
- "messages": messages,
169
+ def post(self, message: list[dict[str, str]]) -> requests.Response:
170
+ """Post message to LLM API and return response"""
171
+ url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
172
+ body = {
173
+ "messages": message,
174
+ "model": self.config.get("MODEL", "gpt-4o"),
298
175
  "stream": self.config.get("STREAM", "true") == "true",
299
176
  "temperature": 0.7,
300
- "top_p": 0.7,
177
+ "top_p": 1,
301
178
  }
179
+ response = requests.post(url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"})
180
+ try:
181
+ response.raise_for_status()
182
+ except requests.exceptions.HTTPError as e:
183
+ self.console.print(f"[red]Error calling API: {e}[/red]")
184
+ if self.verbose:
185
+ self.console.print(f"Reason: {e.response.reason}")
186
+ self.console.print(f"Response: {response.text}")
187
+ raise typer.Exit(code=1) from None
188
+ return response
302
189
 
303
- def stream_response(self, response: requests.Response) -> str:
304
- """Stream response from LLM API
305
- Args:
306
- response: requests.Response object
307
- Returns:
308
- str: full completion text
309
- """
190
+ def _print(self, response: requests.Response, stream: bool = True) -> str:
191
+ """Print response from LLM and return full completion"""
310
192
  full_completion = ""
311
- # Streaming response loop
312
- with Live(console=self.console) as live:
313
- for line in response.iter_lines():
314
- if not line:
315
- continue
316
- decoded_line = line.decode("utf-8")
317
- if decoded_line.startswith("data: "):
318
- decoded_line = decoded_line[6:]
319
- if decoded_line == "[DONE]":
193
+ if stream:
194
+ with Live() as live:
195
+ for line in response.iter_lines():
196
+ # Skip empty lines
197
+ if not line:
198
+ continue
199
+
200
+ # Process server-sent events
201
+ data = line.decode("utf-8")
202
+ if not data.startswith("data: "):
203
+ continue
204
+
205
+ # Extract data portion
206
+ data = data[6:]
207
+ if data == "[DONE]":
320
208
  break
209
+
210
+ # Parse JSON and update display
321
211
  try:
322
- json_data = json.loads(decoded_line)
212
+ json_data = json.loads(data)
323
213
  content = json_data["choices"][0]["delta"].get("content", "")
324
214
  full_completion += content
325
- markdown = Markdown(markup=full_completion)
326
- live.update(markdown, refresh=True)
215
+ live.update(Markdown(markup=full_completion), refresh=True)
327
216
  except json.JSONDecodeError:
328
217
  self.console.print("[red]Error decoding response JSON[/red]")
329
218
  if self.verbose:
330
- self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
331
- time.sleep(0.05)
332
-
333
- return full_completion
334
-
335
- def call_llm_api(self, prompt: str) -> str:
336
- """Call LLM API, return streaming output
337
- Args:
338
- prompt: user input
339
- Returns:
340
- str: streaming output
341
- """
342
- url = self.get_llm_url()
343
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
344
- data = self.build_data(prompt)
345
- try:
346
- response = self._call_api(url, headers, data)
347
- except requests.exceptions.RequestException as e:
348
- self.console.print(f"[red]Error calling API: {e}[/red]")
349
- if self.verbose and e.response:
350
- self.console.print(f"{e.response.text}")
351
- raise typer.Exit(code=1) from None
352
- if not response:
353
- raise typer.Exit(code=1)
354
-
355
- self.console.print("\n[bold green]Assistant:[/bold green]")
356
- assistant_response = self.stream_response(
357
- response
358
- ) # Stream the response and get the full text
359
- self.console.print() # Add a newline after the completion
219
+ self.console.print(f"[red]Error: {data}[/red]")
360
220
 
361
- return assistant_response
362
-
363
- def get_command_from_llm(self, prompt: str) -> Optional[str]:
364
- """Request Shell command from LLM
365
- Args:
366
- prompt: user input
367
- Returns:
368
- str: shell command
369
- """
370
- url = self.get_llm_url()
371
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
372
- data = self.build_data(prompt, mode=ModeEnum.EXECUTE.value)
373
- data["stream"] = False
374
- try:
375
- response = self._call_api(url, headers, data)
376
- except requests.exceptions.RequestException as e:
377
- self.console.print(f"[red]Error calling API: {e}[/red]")
378
- return None
379
- if not response:
380
- return None
381
- ANSWER_PATH = self.config.get("ANSWER_PATH", None)
382
- if not ANSWER_PATH:
383
- ANSWER_PATH = "choices[0].message.content"
384
- if self.verbose:
385
- self.console.print(
386
- "[bold yellow]Answer path not set. Using default: `choices[0].message.content`[/bold yellow]"
387
- )
388
- content = jmespath.search(ANSWER_PATH, response.json())
389
- return content.strip()
390
-
391
- def execute_shell_command(self, command: str) -> int:
392
- """Execute shell command
393
- Args:
394
- command: shell command
395
- Returns:
396
- int: return code
397
- """
398
- self.console.print(f"\n[bold green]Executing command: [/bold green] {command}\n")
399
- result = subprocess.run(command, shell=True)
400
- if result.returncode != 0:
401
- self.console.print(
402
- f"\n[bold red]Command failed with return code: {result.returncode}[/bold red]"
221
+ time.sleep(0.01)
222
+ else:
223
+ # Non-streaming response
224
+ full_completion = jmespath.search(
225
+ self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json()
403
226
  )
404
- return result.returncode
227
+ self.console.print(Markdown(full_completion))
228
+ self.console.print() # Add a newline after the response to separate from the next input
229
+ return full_completion
405
230
 
406
- def get_prompt_tokens(self):
407
- """Get prompt tokens based on current mode
408
- Returns:
409
- list: prompt tokens for prompt_toolkit
410
- """
411
- if self.current_mode == ModeEnum.CHAT.value:
231
+ def get_prompt_tokens(self) -> list[tuple[str, str]]:
232
+ """Return prompt tokens for current mode"""
233
+ if self.current_mode == CHAT_MODE:
412
234
  qmark = "💬"
413
- elif self.current_mode == ModeEnum.EXECUTE.value:
235
+ elif self.current_mode == EXEC_MODE:
414
236
  qmark = "🚀"
415
237
  else:
416
238
  qmark = ""
417
239
  return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
418
240
 
419
- def chat_mode(self, user_input: str):
420
- """
421
- This method handles the chat mode.
422
- It adds the user input to the history and calls the API to get a response.
423
- It then adds the response to the history and manages the history length.
424
- Args:
425
- user_input: user input
426
- Returns:
427
- ModeEnum: current mode
428
- """
429
- if self.current_mode != ModeEnum.CHAT.value:
430
- return self.current_mode
431
-
432
- # Add user message to history
433
- self.history.append({"role": "user", "content": user_input})
434
-
435
- # Call API and get response
436
- assistant_response = self.call_llm_api(user_input)
437
-
438
- # Add assistant response to history
439
- if assistant_response:
440
- self.history.append({"role": "assistant", "content": assistant_response})
441
-
442
- # Manage history length, keep recent conversations
443
- if (
444
- len(self.history) > self.max_history_length * 2
445
- ): # Each conversation has user and assistant messages
446
- self.history = self.history[-self.max_history_length * 2 :]
447
-
448
- return ModeEnum.CHAT.value
449
-
450
- def _filter_command(self, command: str) -> Optional[str]:
451
- """Filter out unwanted characters from command
452
-
453
- The LLM may return commands in markdown format with code blocks.
454
- This method removes markdown formatting from the command.
455
- It handles various formats including:
456
- - Commands surrounded by ``` (plain code blocks)
457
- - Commands with language specifiers like ```bash, ```zsh, etc.
458
- - Commands with specific examples like ```ls -al```
459
-
460
- example:
461
- ```bash\nls -la\n``` ==> ls -al
462
- ```zsh\nls -la\n``` ==> ls -al
463
- ```ls -al``` ==> ls -al
464
- ls -al ==> ls -al
465
- ```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
466
- ```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
467
- """
468
- if not command or not command.strip():
469
- return ""
470
-
471
- # Handle commands that are already without code blocks
472
- if "```" not in command:
473
- return command.strip()
474
-
475
- # Handle code blocks with or without language specifiers
476
- lines = command.strip().split("\n")
477
-
478
- # Check if it's a single-line code block like ```ls -al```
479
- if len(lines) == 1 and lines[0].startswith("```") and lines[0].endswith("```"):
480
- return lines[0][3:-3].strip()
481
-
482
- # Handle multi-line code blocks
483
- if lines[0].startswith("```"):
484
- # Remove the opening ``` line (with or without language specifier)
485
- content_lines = lines[1:]
486
-
487
- # If the last line is a closing ```, remove it
488
- if content_lines and content_lines[-1].strip() == "```":
489
- content_lines = content_lines[:-1]
490
-
491
- # Join the remaining lines and strip any extra whitespace
492
- return "\n".join(line.strip() for line in content_lines if line.strip())
241
+ def _run_repl(self) -> None:
242
+ """Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
243
+ # Show REPL instructions
244
+ self._setup_key_bindings()
245
+ self.console.print("[bold]Starting REPL loop[/bold]")
246
+ self.console.print("[bold]Press TAB to change in chat and exec mode[/bold]")
247
+ self.console.print("[bold]Type /clear to clear chat history[/bold]")
248
+ self.console.print("[bold]Type /his to see chat history[/bold]")
249
+ self.console.print("[bold]Press Ctrl+C or type /exit to exit[/bold]\n")
493
250
 
494
- def execute_mode(self, user_input: str):
495
- """
496
- This method generates a shell command from the user input and executes it.
497
- If the user confirms the command, it is executed.
498
- Args:
499
- user_input: user input
500
- Returns:
501
- ModeEnum: current mode
502
- """
503
- if user_input == "" or self.current_mode != ModeEnum.EXECUTE.value:
504
- return self.current_mode
505
-
506
- command = self.get_command_from_llm(user_input)
507
- if not command:
508
- self.console.print("[bold red]No command generated[/bold red]")
509
- return self.current_mode
510
- command = self._filter_command(command)
511
- if not command:
512
- self.console.print("[bold red]No command generated[/bold red]")
513
- return self.current_mode
514
- self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {command}")
515
- confirm = Confirm.ask("Execute this command?")
516
- if confirm:
517
- self.execute_shell_command(command)
518
- return ModeEnum.EXECUTE.value
519
-
520
- def run_repl_loop(self):
521
251
  while True:
522
- user_input = self.session.prompt(self.get_prompt_tokens)
523
- # Skip empty input
524
- if not user_input.strip():
252
+ # Get user input
253
+ user_input = self.session.prompt(self.get_prompt_tokens).strip()
254
+ if not user_input:
525
255
  continue
526
256
 
527
- if user_input.lower() in ("/exit", "/quit", "/q"):
257
+ # Handle exit commands
258
+ if user_input.lower() == CMD_EXIT:
528
259
  break
529
260
 
530
- # Handle special commands
531
- if self.current_mode == ModeEnum.CHAT.value:
532
- if user_input.lower() == "/clear":
533
- self.clear_history()
534
- self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
535
- continue
536
- else:
537
- self.chat_mode(user_input)
538
- elif self.current_mode == ModeEnum.EXECUTE.value:
539
- self.execute_mode(user_input)
261
+ # Handle clear command
262
+ if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
263
+ self.history = []
264
+ self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
265
+ continue
266
+ elif user_input.lower() == "/his":
267
+ self.console.print(self.history)
268
+ continue
269
+ # Create appropriate system prompt based on mode
270
+ system_prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
271
+ system_content = system_prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
272
+
273
+ # Create message with system prompt and history
274
+ message = [{"role": "system", "content": system_content}]
275
+ message.extend(self.history)
276
+
277
+ # Add current user message
278
+ message.append({"role": "user", "content": user_input})
279
+
280
+ # Get response from LLM
281
+ response = self.post(message)
282
+ self.console.print("\n[bold green]Assistant:[/bold green]")
283
+ content = self._print(response, stream=self.config["STREAM"] == "true")
284
+
285
+ # Add user input and assistant response to history
286
+ self.history.append({"role": "user", "content": user_input})
287
+ self.history.append({"role": "assistant", "content": content})
288
+
289
+ # Trim history if needed
290
+ if len(self.history) > self.max_history_length * 2:
291
+ self.history = self.history[-self.max_history_length * 2 :]
292
+
293
+ # Handle command execution in exec mode
294
+ if self.current_mode == EXEC_MODE:
295
+ self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
296
+ if Confirm.ask("Execute this command?", default=False):
297
+ returncode = subprocess.call(content, shell=True)
298
+ if returncode != 0:
299
+ self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
540
300
 
541
301
  self.console.print("[bold green]Exiting...[/bold green]")
542
302
 
543
- def run_one_shot(self, prompt: str):
544
- """Run one-shot mode with given prompt
545
- Args:
546
- prompt (str): Prompt to send to LLM
547
- Returns:
548
- None
549
- """
550
- if self.current_mode == ModeEnum.EXECUTE.value:
551
- self.execute_mode(prompt) # Execute mode for one-shot prompt
552
- else:
553
- self.call_llm_api(prompt)
554
-
555
- def run(self, chat=False, shell=False, prompt: Optional[str] = None):
556
- """Run the CLI application
557
- Args:
558
- chat (bool): Whether to run in chat mode
559
- shell (bool): Whether to run in shell mode
560
- prompt (Optional[str]): Prompt send to LLM
561
-
562
- Returns:
563
- None
564
- """
565
- # Load configuration
566
- self.config = self.load_config()
567
- if not self.config.get("API_KEY", None):
303
+ def run(self, chat: bool, shell: bool, prompt: str) -> None:
304
+ self.load_config()
305
+ if not self.config.get("API_KEY"):
306
+ self.console.print("[bold red]API key not set[/bold red]")
568
307
  self.console.print(
569
- "[red]API key not found. Please set it in the configuration file.[/red]"
308
+ "[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
570
309
  )
571
- return
310
+ raise typer.Exit(code=1)
311
+ _os = self.detect_os()
312
+ _shell = self.detect_shell()
572
313
 
573
- # Check run mode from command line arguments
574
- if all([chat, shell]):
575
- self.console.print("[red]Cannot use both --chat and --shell[/red]")
314
+ # Handle chat mode
315
+ if chat:
316
+ self.current_mode = CHAT_MODE
317
+ self._run_repl()
576
318
  return
577
- elif chat:
578
- self.current_mode = ModeEnum.CHAT.value
579
- elif shell:
580
- self.current_mode = ModeEnum.EXECUTE.value
581
-
582
- if self.verbose:
583
- self.console.print("[bold yellow]Verbose mode enabled[/bold yellow]")
584
- self.console.print(f"[bold yellow]Current mode: {self.current_mode}[/bold yellow]")
585
- self.console.print(f"[bold yellow]Using model: {self.config['MODEL']}[/bold yellow]")
586
-
587
- if self.current_mode in (ModeEnum.TEMP.value, ModeEnum.EXECUTE.value) and prompt:
588
- self.run_one_shot(prompt)
589
- elif self.current_mode == ModeEnum.CHAT.value:
590
- self.run_repl_loop()
591
-
592
-
593
- # CLI application setup
594
- CONTEXT_SETTINGS = {
595
- "help_option_names": ["-h", "--help"],
596
- "show_default": True,
597
- }
598
319
 
599
- app = typer.Typer(
600
- name="yaicli",
601
- context_settings=CONTEXT_SETTINGS,
602
- pretty_exceptions_enable=False,
603
- short_help="yaicli. Your AI interface in cli.",
604
- no_args_is_help=True,
605
- invoke_without_command=True,
606
- )
320
+ # Create appropriate system prompt based on mode
321
+ system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
322
+ system_content = system_prompt.format(_os=_os, _shell=_shell)
323
+
324
+ # Create message with system prompt and user input
325
+ message = [
326
+ {"role": "system", "content": system_content},
327
+ {"role": "user", "content": prompt},
328
+ ]
329
+
330
+ # Get response from LLM
331
+ response = self.post(message)
332
+ self.console.print("\n[bold green]Assistant:[/bold green]")
333
+ content = self._print(response, stream=(not shell and self.config["STREAM"] == "true"))
334
+
335
+ # Handle shell mode execution
336
+ if shell:
337
+ self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
338
+ if Confirm.ask("Execute this command?", default=False):
339
+ returncode = subprocess.call(content, shell=True)
340
+ if returncode != 0:
341
+ self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
607
342
 
608
343
 
609
344
  @app.command()
610
345
  def main(
611
346
  ctx: typer.Context,
612
- prompt: Annotated[
613
- str, typer.Argument(show_default=False, help="The prompt send to the LLM")
614
- ] = "",
615
- verbose: Annotated[
616
- bool, typer.Option("--verbose", "-V", help="Show verbose information")
617
- ] = False,
347
+ prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
348
+ verbose: Annotated[bool, typer.Option("--verbose", "-V", help="Show verbose information")] = False,
618
349
  chat: Annotated[bool, typer.Option("--chat", "-c", help="Start in chat mode")] = False,
619
- shell: Annotated[
620
- bool, typer.Option("--shell", "-s", help="Generate and execute shell command")
621
- ] = False,
350
+ shell: Annotated[bool, typer.Option("--shell", "-s", help="Generate and execute shell command")] = False,
622
351
  ):
623
- """yaicli. Your AI interface in cli."""
352
+ """yaicli - Your AI interface in cli."""
353
+ if prompt == "":
354
+ typer.echo("Empty prompt, ignored")
355
+ return
624
356
  if not prompt and not chat:
625
357
  typer.echo(ctx.get_help())
626
- raise typer.Exit()
358
+ return
627
359
 
628
- cli = YAICLI(verbose=verbose)
629
- cli.run(chat=chat, shell=shell, prompt=prompt)
360
+ cli = CLI(verbose=verbose)
361
+ cli.run(chat=chat, shell=shell, prompt=prompt or "")
630
362
 
631
363
 
632
364
  if __name__ == "__main__":