yaicli 0.0.19__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yaicli.py DELETED
@@ -1,667 +0,0 @@
1
- import configparser
2
- import json
3
- import platform
4
- import subprocess
5
- import sys
6
- import time
7
- from os import getenv
8
- from os.path import basename, exists, pathsep, devnull
9
- from pathlib import Path
10
- from typing import Annotated, Any, Dict, Optional, Union
11
-
12
- import httpx
13
- import jmespath
14
- import typer
15
- from distro import name as distro_name
16
- from prompt_toolkit import PromptSession, prompt
17
- from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
18
- from prompt_toolkit.history import FileHistory, _StrOrBytesPath
19
- from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
20
- from prompt_toolkit.keys import Keys
21
- from rich.console import Console
22
- from rich.live import Live
23
- from rich.markdown import Markdown
24
- from rich.panel import Panel
25
- from rich.prompt import Prompt
26
-
27
- SHELL_PROMPT = """Your are a Shell Command Generator.
28
- Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
29
- Rules:
30
- 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
31
- 2. Output STRICTLY in plain text format
32
- 3. NEVER use markdown, code blocks or explanations
33
- 4. Chain multi-step commands in SINGLE LINE
34
- 5. Return NOTHING except the ready-to-run command"""
35
-
36
- DEFAULT_PROMPT = (
37
- "You are YAICLI, a system management and programing assistant, "
38
- "You are managing {_os} operating system with {_shell} shell. "
39
- "Your responses should be concise and use Markdown format, "
40
- "unless the user explicitly requests more details."
41
- )
42
-
43
- CMD_CLEAR = "/clear"
44
- CMD_EXIT = "/exit"
45
- CMD_HISTORY = "/his"
46
-
47
- EXEC_MODE = "exec"
48
- CHAT_MODE = "chat"
49
- TEMP_MODE = "temp"
50
-
51
- DEFAULT_CONFIG_MAP = {
52
- # Core API settings
53
- "BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "YAI_BASE_URL", "type": str},
54
- "API_KEY": {"value": "", "env_key": "YAI_API_KEY", "type": str},
55
- "MODEL": {"value": "gpt-4o", "env_key": "YAI_MODEL", "type": str},
56
- # System detection hints
57
- "SHELL_NAME": {"value": "auto", "env_key": "YAI_SHELL_NAME", "type": str},
58
- "OS_NAME": {"value": "auto", "env_key": "YAI_OS_NAME", "type": str},
59
- # API response parsing
60
- "COMPLETION_PATH": {"value": "chat/completions", "env_key": "YAI_COMPLETION_PATH", "type": str},
61
- "ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "YAI_ANSWER_PATH", "type": str},
62
- # API call parameters
63
- "STREAM": {"value": "true", "env_key": "YAI_STREAM", "type": bool},
64
- "TEMPERATURE": {"value": "0.7", "env_key": "YAI_TEMPERATURE", "type": float},
65
- "TOP_P": {"value": "1.0", "env_key": "YAI_TOP_P", "type": float},
66
- "MAX_TOKENS": {"value": "1024", "env_key": "YAI_MAX_TOKENS", "type": int},
67
- # UI/UX settings
68
- "CODE_THEME": {"value": "monokai", "env_key": "YAI_CODE_THEME", "type": str},
69
- "MAX_HISTORY": {"value": "500", "env_key": "YAI_MAX_HISTORY", "type": int}, # readline history file limit
70
- "AUTO_SUGGEST": {"value": "true", "env_key": "YAI_AUTO_SUGGEST", "type": bool},
71
- }
72
-
73
- DEFAULT_CONFIG_INI = f"""[core]
74
- PROVIDER=openai
75
- BASE_URL={DEFAULT_CONFIG_MAP["BASE_URL"]["value"]}
76
- API_KEY={DEFAULT_CONFIG_MAP["API_KEY"]["value"]}
77
- MODEL={DEFAULT_CONFIG_MAP["MODEL"]["value"]}
78
-
79
- # auto detect shell and os (or specify manually, e.g., bash, zsh, powershell.exe)
80
- SHELL_NAME={DEFAULT_CONFIG_MAP["SHELL_NAME"]["value"]}
81
- OS_NAME={DEFAULT_CONFIG_MAP["OS_NAME"]["value"]}
82
-
83
- # API paths (usually no need to change for OpenAI compatible APIs)
84
- COMPLETION_PATH={DEFAULT_CONFIG_MAP["COMPLETION_PATH"]["value"]}
85
- ANSWER_PATH={DEFAULT_CONFIG_MAP["ANSWER_PATH"]["value"]}
86
-
87
- # true: streaming response, false: non-streaming
88
- STREAM={DEFAULT_CONFIG_MAP["STREAM"]["value"]}
89
-
90
- # LLM parameters
91
- TEMPERATURE={DEFAULT_CONFIG_MAP["TEMPERATURE"]["value"]}
92
- TOP_P={DEFAULT_CONFIG_MAP["TOP_P"]["value"]}
93
- MAX_TOKENS={DEFAULT_CONFIG_MAP["MAX_TOKENS"]["value"]}
94
-
95
- # UI/UX
96
- CODE_THEME={DEFAULT_CONFIG_MAP["CODE_THEME"]["value"]}
97
- MAX_HISTORY={DEFAULT_CONFIG_MAP["MAX_HISTORY"]["value"]} # Max entries kept in history file
98
- AUTO_SUGGEST={DEFAULT_CONFIG_MAP["AUTO_SUGGEST"]["value"]}
99
- """
100
-
101
- app = typer.Typer(
102
- name="yaicli",
103
- context_settings={"help_option_names": ["-h", "--help"]},
104
- pretty_exceptions_enable=False,
105
- )
106
-
107
-
108
- class CasePreservingConfigParser(configparser.RawConfigParser):
109
- """Case preserving config parser"""
110
-
111
- def optionxform(self, optionstr):
112
- return optionstr
113
-
114
-
115
- class LimitedFileHistory(FileHistory):
116
- def __init__(self, filename: _StrOrBytesPath, max_entries: int = 500, trim_every: int = 2):
117
- """Limited file history
118
- Args:
119
- filename (str): path to history file
120
- max_entries (int): maximum number of entries to keep
121
- trim_every (int): trim history every `trim_every` appends
122
-
123
- Example:
124
- >>> history = LimitedFileHistory("~/.yaicli_history", max_entries=500, trim_every=10)
125
- >>> history.append_string("echo hello")
126
- >>> history.append_string("echo world")
127
- >>> session = PromptSession(history=history)
128
- """
129
- self.max_entries = max_entries
130
- self._append_count = 0
131
- self._trim_every = trim_every
132
- super().__init__(filename)
133
-
134
- def store_string(self, string: str) -> None:
135
- # Call the original method to deposit a new record
136
- super().store_string(string)
137
-
138
- self._append_count += 1
139
- if self._append_count >= self._trim_every:
140
- self._trim_history()
141
- self._append_count = 0
142
-
143
- def _trim_history(self):
144
- if not exists(self.filename):
145
- return
146
-
147
- with open(self.filename, "r", encoding="utf-8") as f:
148
- lines = f.readlines()
149
-
150
- # By record: each record starts with "# timestamp" followed by a number of "+lines".
151
- entries = []
152
- current_entry = []
153
-
154
- for line in lines:
155
- if line.startswith("# "):
156
- if current_entry:
157
- entries.append(current_entry)
158
- current_entry = [line]
159
- elif line.startswith("+") or line.strip() == "":
160
- current_entry.append(line)
161
-
162
- if current_entry:
163
- entries.append(current_entry)
164
-
165
- # Keep the most recent max_entries row (the next row is newer)
166
- trimmed_entries = entries[-self.max_entries :]
167
-
168
- with open(self.filename, "w", encoding="utf-8") as f:
169
- for entry in trimmed_entries:
170
- f.writelines(entry)
171
-
172
-
173
- class CLI:
174
- CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
175
-
176
- def __init__(self, verbose: bool = False) -> None:
177
- self.verbose = verbose
178
- self.console = Console()
179
- self.bindings = KeyBindings()
180
- # Disable nonatty warning
181
- _origin_stderr = None
182
- if not sys.stdin.isatty():
183
- _origin_stderr = sys.stderr
184
- sys.stderr = open(devnull, "w")
185
- self.session = PromptSession(key_bindings=self.bindings)
186
- # Restore stderr
187
- if _origin_stderr:
188
- sys.stderr.close()
189
- sys.stderr = _origin_stderr
190
- self.config = {}
191
- self.history: list[dict[str, str]] = []
192
- self.max_history_length = 25
193
- self.current_mode = TEMP_MODE
194
-
195
- def prepare_chat_loop(self) -> None:
196
- """Setup key bindings and history for chat mode"""
197
- self._setup_key_bindings()
198
- # Initialize history
199
- Path("~/.yaicli_history").expanduser().touch(exist_ok=True)
200
- self.session = PromptSession(
201
- key_bindings=self.bindings,
202
- # completer=WordCompleter(["/clear", "/exit", "/his"]),
203
- history=LimitedFileHistory(
204
- Path("~/.yaicli_history").expanduser(), max_entries=int(self.config["MAX_HISTORY"])
205
- ),
206
- auto_suggest=AutoSuggestFromHistory() if self.config["AUTO_SUGGEST"] else None,
207
- enable_history_search=True,
208
- )
209
-
210
- def _setup_key_bindings(self) -> None:
211
- """Setup keyboard shortcuts"""
212
-
213
- @self.bindings.add(Keys.ControlI) # Bind TAB to switch modes
214
- def _(event: KeyPressEvent) -> None:
215
- self.current_mode = EXEC_MODE if self.current_mode == CHAT_MODE else CHAT_MODE
216
-
217
- def load_config(self) -> dict[str, Any]: # Changed return type hint
218
- """Load LLM API configuration with priority:
219
- 1. Environment variables (highest priority)
220
- 2. Configuration file
221
- 3. Default values (lowest priority)
222
-
223
- Applies type conversion based on DEFAULT_CONFIG_MAP after merging sources.
224
-
225
- Returns:
226
- dict: merged configuration with appropriate types
227
- """
228
- # Start with default configuration string values (lowest priority)
229
- # These serve as the base and also for fallback on type errors
230
- default_values_str = {k: v["value"] for k, v in DEFAULT_CONFIG_MAP.items()}
231
- merged_config: Dict[str, Any] = default_values_str.copy() # Use Any for value type
232
-
233
- # Create default config file if it doesn't exist
234
- if not self.CONFIG_PATH.exists():
235
- self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
236
- self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
237
- with open(self.CONFIG_PATH, "w", encoding="utf-8") as f: # Added encoding
238
- f.write(DEFAULT_CONFIG_INI)
239
- else:
240
- # Load from configuration file (middle priority)
241
- config_parser = CasePreservingConfigParser()
242
- # Read with UTF-8 encoding
243
- config_parser.read(self.CONFIG_PATH, encoding="utf-8")
244
- if "core" in config_parser:
245
- # Update with non-empty values from config file (values are strings)
246
- merged_config.update(
247
- {k: v for k, v in config_parser["core"].items() if k in DEFAULT_CONFIG_MAP and v.strip()}
248
- )
249
-
250
- # Override with environment variables (highest priority)
251
- for key, config_info in DEFAULT_CONFIG_MAP.items():
252
- env_value = getenv(config_info["env_key"])
253
- if env_value is not None:
254
- # Env values are strings
255
- merged_config[key] = env_value
256
- target_type = config_info["type"]
257
- # Fallback, shouldn't be needed here, but safe
258
- raw_value: Any = merged_config.get(key, default_values_str.get(key))
259
- converted_value = None
260
- try:
261
- if target_type is bool:
262
- converted_value = str(raw_value).strip().lower() == "true"
263
- elif target_type in (int, float, str):
264
- converted_value = target_type(raw_value)
265
- except (ValueError, TypeError) as e:
266
- self.console.print(
267
- f"[yellow]Warning:[/yellow] Invalid value '{raw_value}' for '{key}'. "
268
- f"Expected type '{target_type.__name__}'. Using default value '{default_values_str[key]}'. Error: {e}",
269
- style="dim",
270
- )
271
- # Fallback to default string value
272
- converted_value = target_type(default_values_str[key])
273
-
274
- merged_config[key] = converted_value
275
- self.config = merged_config
276
- return self.config
277
-
278
- def detect_os(self) -> str:
279
- """Detect operating system + version"""
280
- if self.config.get("OS_NAME") != "auto":
281
- return self.config["OS_NAME"]
282
- current_platform = platform.system()
283
- if current_platform == "Linux":
284
- return "Linux/" + distro_name(pretty=True)
285
- if current_platform == "Windows":
286
- return "Windows " + platform.release()
287
- if current_platform == "Darwin":
288
- return "Darwin/MacOS " + platform.mac_ver()[0]
289
- return current_platform
290
-
291
- def detect_shell(self) -> str:
292
- """Detect shell name"""
293
- if self.config["SHELL_NAME"] != "auto":
294
- return self.config["SHELL_NAME"]
295
-
296
- current_platform = platform.system()
297
- if current_platform in ("Windows", "nt"):
298
- is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
299
- return "powershell.exe" if is_powershell else "cmd.exe"
300
- return basename(getenv("SHELL", None) or "/bin/sh")
301
-
302
- def _filter_command(self, command: str) -> Optional[str]:
303
- """Filter out unwanted characters from command
304
-
305
- The LLM may return commands in markdown format with code blocks.
306
- This method removes markdown formatting from the command.
307
- It handles various formats including:
308
- - Commands surrounded by ``` (plain code blocks)
309
- - Commands with language specifiers like ```bash, ```zsh, etc.
310
- - Commands with specific examples like ```ls -al```
311
-
312
- example:
313
- ```bash\nls -la\n``` ==> ls -al
314
- ```zsh\nls -la\n``` ==> ls -al
315
- ```ls -la``` ==> ls -la
316
- ls -la ==> ls -la
317
- ```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
318
- ```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
319
- ```plaintext\nls -la\n``` ==> ls -la
320
- """
321
- if not command or not command.strip():
322
- return ""
323
-
324
- # Handle commands that are already without code blocks
325
- if "```" not in command:
326
- return command.strip()
327
-
328
- # Handle code blocks with or without language specifiers
329
- lines = command.strip().split("\n")
330
-
331
- # Check if it's a single-line code block like ```ls -al```
332
- if len(lines) == 1 and lines[0].startswith("```") and lines[0].endswith("```"):
333
- return lines[0][3:-3].strip()
334
-
335
- # Handle multi-line code blocks
336
- if lines[0].startswith("```"):
337
- # Remove the opening ``` line (with or without language specifier)
338
- content_lines = lines[1:]
339
-
340
- # If the last line is a closing ```, remove it
341
- if content_lines and content_lines[-1].strip() == "```":
342
- content_lines = content_lines[:-1]
343
-
344
- # Join the remaining lines and strip any extra whitespace
345
- return "\n".join(line.strip() for line in content_lines if line.strip())
346
-
347
- def _get_number_with_type(self, key, _type: type, default=None):
348
- """Get number with type from config"""
349
- try:
350
- return _type(self.config.get(key, default))
351
- except ValueError:
352
- raise ValueError(f"[red]{key} should be {_type} type.[/red]")
353
-
354
- def post(self, message: list[dict[str, str]]) -> httpx.Response:
355
- """Post message to LLM API and return response"""
356
- url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
357
- body = {
358
- "messages": message,
359
- "model": self.config.get("MODEL", "gpt-4o"),
360
- "stream": self.config["STREAM"],
361
- "temperature": self._get_number_with_type(key="TEMPERATURE", _type=float, default="0.7"),
362
- "top_p": self._get_number_with_type(key="TOP_P", _type=float, default="1.0"),
363
- "max_tokens": self._get_number_with_type(key="MAX_TOKENS", _type=int, default="1024"),
364
- }
365
- with httpx.Client(timeout=120.0) as client:
366
- response = client.post(
367
- url,
368
- json=body,
369
- headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"},
370
- )
371
- try:
372
- response.raise_for_status()
373
- except httpx.HTTPStatusError as e:
374
- self.console.print(f"[red]Error calling API: {e}[/red]")
375
- if self.verbose:
376
- self.console.print(f"Reason: {e}\nResponse: {response.text}")
377
- raise e
378
- return response
379
-
380
- def get_reasoning_content(self, delta: dict) -> Optional[str]:
381
- # reasoning: openrouter
382
- # reasoning_content: infi-ai/deepseek
383
- for k in ("reasoning_content", "reasoning"):
384
- if k in delta:
385
- return delta[k]
386
- return None
387
-
388
- def _parse_stream_line(self, line: Union[bytes, str]) -> Optional[dict]:
389
- """Parse a single line from the stream response"""
390
- if not line:
391
- return None
392
-
393
- line = str(line)
394
- if not line.startswith("data: "):
395
- return None
396
-
397
- line = line[6:]
398
- if line == "[DONE]":
399
- return None
400
-
401
- try:
402
- json_data = json.loads(line)
403
- if not json_data.get("choices"):
404
- return None
405
-
406
- return json_data
407
- except json.JSONDecodeError:
408
- self.console.print("[red]Error decoding response JSON[/red]")
409
- if self.verbose:
410
- self.console.print(f"[red]Error JSON data: {line}[/red]")
411
- return None
412
-
413
- def _process_reasoning_content(self, reason: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
414
- """Process reasoning content in the response"""
415
- if not in_reasoning:
416
- in_reasoning = True
417
- full_completion = "> Reasoning:\n> "
418
- full_completion += reason.replace("\n", "\n> ")
419
- return full_completion, in_reasoning
420
-
421
- def _process_regular_content(self, content: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
422
- """Process regular content in the response"""
423
- if in_reasoning:
424
- in_reasoning = False
425
- full_completion += "\n\n"
426
- full_completion += content
427
- return full_completion, in_reasoning
428
-
429
- def _print_stream(self, response: httpx.Response) -> str:
430
- """Print response from LLM in streaming mode"""
431
- self.console.print("Assistant:", style="bold green")
432
- full_content = ""
433
- in_reasoning = False
434
- cursor_chars = ["_", " "]
435
- cursor_index = 0
436
-
437
- with Live(console=self.console) as live:
438
- for line in response.iter_lines():
439
- json_data = self._parse_stream_line(line)
440
- if not json_data:
441
- continue
442
-
443
- delta = json_data["choices"][0]["delta"]
444
- reason = self.get_reasoning_content(delta)
445
-
446
- if reason is not None:
447
- full_content, in_reasoning = self._process_reasoning_content(reason, full_content, in_reasoning)
448
- else:
449
- full_content, in_reasoning = self._process_regular_content(
450
- delta.get("content", "") or "", full_content, in_reasoning
451
- )
452
-
453
- cursor = cursor_chars[cursor_index]
454
- live.update(
455
- Markdown(markup=full_content + cursor, code_theme=self.config["CODE_THEME"]),
456
- refresh=True,
457
- )
458
- cursor_index = (cursor_index + 1) % 2
459
- time.sleep(0.005) # Slow down the printing speed, avoiding screen flickering
460
- live.update(Markdown(markup=full_content, code_theme=self.config["CODE_THEME"]), refresh=True)
461
- return full_content
462
-
463
- def _print_normal(self, response: httpx.Response) -> str:
464
- """Print response from LLM in non-streaming mode"""
465
- self.console.print("Assistant:", style="bold green")
466
- full_content = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
467
- self.console.print(Markdown(full_content + "\n", code_theme=self.config["CODE_THEME"]))
468
- return full_content
469
-
470
- def get_prompt_tokens(self) -> list[tuple[str, str]]:
471
- """Return prompt tokens for current mode"""
472
- qmark = "💬" if self.current_mode == CHAT_MODE else "🚀" if self.current_mode == EXEC_MODE else ""
473
- return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
474
-
475
- def _check_history_len(self) -> None:
476
- """Check history length and remove oldest messages if necessary"""
477
- if len(self.history) > self.max_history_length:
478
- self.history = self.history[-self.max_history_length :]
479
-
480
- def _handle_special_commands(self, user_input: str) -> Optional[bool]:
481
- """Handle special command return: True-continue loop, False-exit loop, None-non-special command"""
482
- if user_input.lower() == CMD_EXIT:
483
- return False
484
- if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
485
- self.history.clear()
486
- self.console.print("Chat history cleared\n", style="bold yellow")
487
- return True
488
- if user_input.lower() == CMD_HISTORY:
489
- self.console.print(self.history)
490
- return True
491
- return None
492
-
493
- def _confirm_and_execute(self, content: str) -> None:
494
- """Review, edit and execute the command"""
495
- cmd = self._filter_command(content)
496
- if not cmd:
497
- self.console.print("No command generated", style="bold red")
498
- return
499
- self.console.print(Panel(cmd, title="Command", title_align="left", border_style="bold magenta", expand=False))
500
- _input = Prompt.ask(
501
- r"Execute command? \[e]dit, \[y]es, \[n]o",
502
- choices=["y", "n", "e"],
503
- default="n",
504
- case_sensitive=False,
505
- show_choices=False,
506
- )
507
- if _input == "y": # execute cmd
508
- self.console.print("Output:", style="bold green")
509
- subprocess.call(cmd, shell=True)
510
- elif _input == "e": # edit cmd
511
- cmd = prompt("Edit command, press enter to execute:\n", default=cmd)
512
- self.console.print("Output:", style="bold green")
513
- subprocess.call(cmd, shell=True)
514
-
515
- def _build_messages(self, user_input: str) -> list[dict[str, str]]:
516
- return [
517
- {"role": "system", "content": self.get_system_prompt()},
518
- *self.history,
519
- {"role": "user", "content": user_input},
520
- ]
521
-
522
- def _handle_llm_response(self, response: httpx.Response, user_input: str) -> str:
523
- """Print LLM response and update history"""
524
- content = self._print_stream(response) if self.config["STREAM"] else self._print_normal(response)
525
- self.history.extend([{"role": "user", "content": user_input}, {"role": "assistant", "content": content}])
526
- self._check_history_len()
527
- return content
528
-
529
- def _process_user_input(self, user_input: str) -> bool:
530
- """Process user input and generate response"""
531
- try:
532
- response = self.post(self._build_messages(user_input))
533
- content = self._handle_llm_response(response, user_input)
534
- if self.current_mode == EXEC_MODE:
535
- self._confirm_and_execute(content)
536
- return True
537
- except Exception as e:
538
- self.console.print(f"Error: {e}", style="red")
539
- return False
540
-
541
- def get_system_prompt(self) -> str:
542
- """Return system prompt for current mode"""
543
- prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
544
- return prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
545
-
546
- def _run_repl(self) -> None:
547
- """Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
548
- self.prepare_chat_loop()
549
- self.console.print("""
550
- ██ ██ █████ ██ ██████ ██ ██
551
- ██ ██ ██ ██ ██ ██ ██ ██
552
- ████ ███████ ██ ██ ██ ██
553
- ██ ██ ██ ██ ██ ██ ██
554
- ██ ██ ██ ██ ██████ ███████ ██
555
- """)
556
- self.console.print("↑/↓: navigate in history")
557
- self.console.print("Press TAB to change in chat and exec mode", style="bold")
558
- self.console.print("Type /clear to clear chat history", style="bold")
559
- self.console.print("Type /his to see chat history", style="bold")
560
- self.console.print("Press Ctrl+C or type /exit to exit\n", style="bold")
561
-
562
- while True:
563
- self.console.print(Markdown("---"))
564
- user_input = self.session.prompt(self.get_prompt_tokens).strip()
565
- if not user_input:
566
- continue
567
-
568
- # Handle exit commands
569
- if user_input.lower() == CMD_EXIT:
570
- break
571
-
572
- # Handle clear command
573
- if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
574
- self.history = []
575
- self.console.print("Chat history cleared\n", style="bold yellow")
576
- continue
577
- elif user_input.lower() == CMD_HISTORY:
578
- self.console.print(self.history)
579
- continue
580
- if not self._process_user_input(user_input):
581
- continue
582
-
583
- self.console.print("[bold green]Exiting...[/bold green]")
584
-
585
- def _run_once(self, prompt: str, shell: bool = False) -> None:
586
- """Run once with given prompt"""
587
-
588
- try:
589
- response = self.post(self._build_messages(prompt))
590
- content = self._handle_llm_response(response, prompt)
591
- if shell:
592
- self._confirm_and_execute(content)
593
- except Exception as e:
594
- self.console.print(f"[red]Error: {e}[/red]")
595
-
596
- def run(self, chat: bool, shell: bool, prompt: str) -> None:
597
- """Run the CLI"""
598
- self.load_config()
599
- if self.verbose:
600
- self.console.print(f"CODE_THEME: {self.config['CODE_THEME']}")
601
- self.console.print(f"ANSWER_PATH: {self.config['ANSWER_PATH']}")
602
- self.console.print(f"COMPLETION_PATH: {self.config['COMPLETION_PATH']}")
603
- self.console.print(f"BASE_URL: {self.config['BASE_URL']}")
604
- self.console.print(f"MODEL: {self.config['MODEL']}")
605
- self.console.print(f"SHELL_NAME: {self.config['SHELL_NAME']}")
606
- self.console.print(f"OS_NAME: {self.config['OS_NAME']}")
607
- self.console.print(f"STREAM: {self.config['STREAM']}")
608
- self.console.print(f"TEMPERATURE: {self.config['TEMPERATURE']}")
609
- self.console.print(f"TOP_P: {self.config['TOP_P']}")
610
- self.console.print(f"MAX_TOKENS: {self.config['MAX_TOKENS']}")
611
- if not self.config.get("API_KEY"):
612
- self.console.print(
613
- "[yellow]API key not set. Please set in ~/.config/yaicli/config.ini or AI_API_KEY env[/]"
614
- )
615
- raise typer.Exit(code=1)
616
- if chat:
617
- self.current_mode = CHAT_MODE
618
- self._run_repl()
619
- else:
620
- self.current_mode = EXEC_MODE if shell else TEMP_MODE
621
- self._run_once(prompt, shell)
622
-
623
-
624
- @app.command()
625
- def main(
626
- ctx: typer.Context,
627
- prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
628
- chat: Annotated[
629
- bool, typer.Option("--chat", "-c", help="Start in chat mode", rich_help_panel="Run Options")
630
- ] = False,
631
- shell: Annotated[
632
- bool,
633
- typer.Option(
634
- "--shell",
635
- "-s",
636
- help="Generate and execute shell command",
637
- rich_help_panel="Run Options",
638
- ),
639
- ] = False,
640
- verbose: Annotated[
641
- bool,
642
- typer.Option("--verbose", "-V", help="Show verbose information", rich_help_panel="Run Options"),
643
- ] = False,
644
- template: Annotated[bool, typer.Option("--template", help="Show the config template.")] = False,
645
- ):
646
- """yaicli - Your AI interface in cli."""
647
- # Check for stdin input (from pipe or redirect)
648
- if not sys.stdin.isatty():
649
- stdin_content = sys.stdin.read()
650
- prompt = f"{stdin_content}\n\n{prompt}"
651
-
652
- if prompt == "":
653
- typer.echo("Empty prompt, ignored")
654
- return
655
- if template:
656
- typer.echo(DEFAULT_CONFIG_INI)
657
- return
658
- if not prompt and not chat:
659
- typer.echo(ctx.get_help())
660
- return
661
-
662
- cli = CLI(verbose=verbose)
663
- cli.run(chat=chat, shell=shell, prompt=prompt or "")
664
-
665
-
666
- if __name__ == "__main__":
667
- app()