yaicli 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +1 -1
- {yaicli-0.0.11.dist-info → yaicli-0.0.13.dist-info}/METADATA +85 -29
- yaicli-0.0.13.dist-info/RECORD +7 -0
- yaicli.py +134 -130
- yaicli-0.0.11.dist-info/RECORD +0 -7
- {yaicli-0.0.11.dist-info → yaicli-0.0.13.dist-info}/WHEEL +0 -0
- {yaicli-0.0.11.dist-info → yaicli-0.0.13.dist-info}/entry_points.txt +0 -0
- {yaicli-0.0.11.dist-info → yaicli-0.0.13.dist-info}/licenses/LICENSE +0 -0
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.13
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -329,14 +329,17 @@ MAX_TOKENS=1024
|
|
329
329
|
|
330
330
|
Below are the available configuration options and override environment variables:
|
331
331
|
|
332
|
-
- **BASE_URL**: API endpoint URL (default: OpenAI API), env:
|
333
|
-
- **API_KEY**: Your API key for the LLM provider, env:
|
334
|
-
- **MODEL**: The model to use (e.g., gpt-4o, gpt-3.5-turbo), default: gpt-4o, env:
|
335
|
-
- **SHELL_NAME**: Shell to use (auto for automatic detection), default: auto, env:
|
336
|
-
- **OS_NAME**: OS to use (auto for automatic detection), default: auto, env:
|
337
|
-
- **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions, env:
|
338
|
-
- **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env:
|
339
|
-
- **STREAM**: Enable/disable streaming responses, default: true, env:
|
332
|
+
- **BASE_URL**: API endpoint URL (default: OpenAI API), env: YAI_BASE_URL
|
333
|
+
- **API_KEY**: Your API key for the LLM provider, env: YAI_API_KEY
|
334
|
+
- **MODEL**: The model to use (e.g., gpt-4o, gpt-3.5-turbo), default: gpt-4o, env: YAI_MODEL
|
335
|
+
- **SHELL_NAME**: Shell to use (auto for automatic detection), default: auto, env: YAI_SHELL_NAME
|
336
|
+
- **OS_NAME**: OS to use (auto for automatic detection), default: auto, env: YAI_OS_NAME
|
337
|
+
- **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions, env: YAI_COMPLETION_PATH
|
338
|
+
- **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: YAI_ANSWER_PATH
|
339
|
+
- **STREAM**: Enable/disable streaming responses, default: true, env: YAI_STREAM
|
340
|
+
- **TEMPERATURE**: Temperature for response generation (default: 0.7), env: YAI_TEMPERATURE
|
341
|
+
- **TOP_P**: Top-p sampling for response generation (default: 1.0), env: YAI_TOP_P
|
342
|
+
- **MAX_TOKENS**: Maximum number of tokens for response generation (default: 1024), env: YAI_MAX_TOKENS
|
340
343
|
|
341
344
|
Default config of `COMPLETION_PATH` and `ANSWER_PATH` is OpenAI compatible. If you are using OpenAI or other OpenAI compatible LLM provider, you can use the default config.
|
342
345
|
|
@@ -410,33 +413,41 @@ ai --verbose "Explain quantum computing"
|
|
410
413
|
|
411
414
|
### Command Line Options
|
412
415
|
|
416
|
+
Arguments:
|
413
417
|
- `<PROMPT>`: Argument
|
414
|
-
|
415
|
-
|
416
|
-
- `--shell` or `-s`: Generate and execute shell command
|
418
|
+
|
419
|
+
Options:
|
417
420
|
- `--install-completion`: Install completion for the current shell
|
418
421
|
- `--show-completion`: Show completion for the current shell, to copy it or customize the installation
|
419
422
|
- `--help` or `-h`: Show this message and exit
|
423
|
+
- `--template`: Show the config template.
|
424
|
+
|
425
|
+
Run Options:
|
426
|
+
- `--verbose` or `-V`: Show verbose information
|
427
|
+
- `--chat` or `-c`: Start in chat mode
|
428
|
+
- `--shell` or `-s`: Generate and execute shell command
|
420
429
|
|
421
430
|
```bash
|
422
431
|
ai -h
|
423
432
|
|
424
|
-
|
425
|
-
|
426
|
-
yaicli
|
427
|
-
|
428
|
-
╭─ Arguments
|
429
|
-
│ prompt [PROMPT] The prompt send to the LLM
|
430
|
-
|
431
|
-
╭─ Options
|
432
|
-
│ --
|
433
|
-
│ --
|
434
|
-
│ --
|
435
|
-
│ --
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
433
|
+
Usage: ai [OPTIONS] [PROMPT]
|
434
|
+
|
435
|
+
yaicli - Your AI interface in cli.
|
436
|
+
|
437
|
+
╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
438
|
+
│ prompt [PROMPT] The prompt send to the LLM │
|
439
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
440
|
+
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
441
|
+
│ --template Show the config template. │
|
442
|
+
│ --install-completion Install completion for the current shell. │
|
443
|
+
│ --show-completion Show completion for the current shell, to copy it or customize the installation. │
|
444
|
+
│ --help -h Show this message and exit. │
|
445
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
446
|
+
╭─ Run Option ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
447
|
+
│ --chat -c Start in chat mode │
|
448
|
+
│ --shell -s Generate and execute shell command │
|
449
|
+
│ --verbose -V Show verbose information │
|
450
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
440
451
|
|
441
452
|
```
|
442
453
|
|
@@ -459,10 +470,45 @@ In Execute mode:
|
|
459
470
|
|
460
471
|
## Examples
|
461
472
|
|
473
|
+
### Have a Chat
|
474
|
+
|
475
|
+
```bash
|
476
|
+
$ ai "What is the capital of France?"
|
477
|
+
Assistant:
|
478
|
+
The capital of France is Paris.
|
479
|
+
```
|
480
|
+
|
481
|
+
### Command Gen and Run
|
482
|
+
|
483
|
+
```bash
|
484
|
+
$ ai -s 'Check the current directory size'
|
485
|
+
Assistant:
|
486
|
+
du -sh .
|
487
|
+
|
488
|
+
Generated command: du -sh .
|
489
|
+
Execute this command? [y/n/e] (n): e
|
490
|
+
Edit command, press enter to execute:
|
491
|
+
du -sh ./
|
492
|
+
Output:
|
493
|
+
109M ./
|
494
|
+
```
|
495
|
+
|
462
496
|
### Chat Mode Example
|
463
497
|
|
464
498
|
```bash
|
465
499
|
$ ai --chat
|
500
|
+
|
501
|
+
██ ██ █████ ██ ██████ ██ ██
|
502
|
+
██ ██ ██ ██ ██ ██ ██ ██
|
503
|
+
████ ███████ ██ ██ ██ ██
|
504
|
+
██ ██ ██ ██ ██ ██ ██
|
505
|
+
██ ██ ██ ██ ██████ ███████ ██
|
506
|
+
|
507
|
+
Press TAB to change in chat and exec mode
|
508
|
+
Type /clear to clear chat history
|
509
|
+
Type /his to see chat history
|
510
|
+
Press Ctrl+C or type /exit to exit
|
511
|
+
|
466
512
|
💬 > Tell me about the solar system
|
467
513
|
|
468
514
|
Assistant:
|
@@ -481,7 +527,17 @@ Certainly! Here’s a brief overview of the solar system:
|
|
481
527
|
• Dwarf Planets:
|
482
528
|
• Pluto: Once considered the ninth planet, now classified as
|
483
529
|
|
484
|
-
|
530
|
+
🚀 > Check the current directory size
|
531
|
+
Assistant:
|
532
|
+
du -sh .
|
533
|
+
|
534
|
+
Generated command: du -sh .
|
535
|
+
Execute this command? [y/n/e] (n): e
|
536
|
+
Edit command, press enter to execute:
|
537
|
+
du -sh ./
|
538
|
+
Output:
|
539
|
+
109M ./
|
540
|
+
🚀 >
|
485
541
|
```
|
486
542
|
|
487
543
|
### Execute Mode Example
|
@@ -0,0 +1,7 @@
|
|
1
|
+
pyproject.toml,sha256=15od1R0Bb-b7YKSSlz1SmzGoaNNbfHgv8y5Zr0gXfBU,1452
|
2
|
+
yaicli.py,sha256=Cby2e0HHoh7sAOIvAxEKoZA0TRS3A3ikkfZ6o3bem0o,20955
|
3
|
+
yaicli-0.0.13.dist-info/METADATA,sha256=5Yc9O8k_N66OpBTqKG9kVGUvXzj2-L3UrXaiZziWfVU,29445
|
4
|
+
yaicli-0.0.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
+
yaicli-0.0.13.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
|
6
|
+
yaicli-0.0.13.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
7
|
+
yaicli-0.0.13.dist-info/RECORD,,
|
yaicli.py
CHANGED
@@ -12,12 +12,14 @@ import jmespath
|
|
12
12
|
import typer
|
13
13
|
from distro import name as distro_name
|
14
14
|
from prompt_toolkit import PromptSession
|
15
|
+
from prompt_toolkit.completion import WordCompleter
|
16
|
+
from prompt_toolkit.history import FileHistory
|
15
17
|
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
|
16
18
|
from prompt_toolkit.keys import Keys
|
17
19
|
from rich.console import Console
|
18
20
|
from rich.live import Live
|
19
21
|
from rich.markdown import Markdown
|
20
|
-
from rich.prompt import
|
22
|
+
from rich.prompt import Prompt
|
21
23
|
|
22
24
|
SHELL_PROMPT = """Your are a Shell Command Generator.
|
23
25
|
Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
|
@@ -37,20 +39,24 @@ DEFAULT_PROMPT = (
|
|
37
39
|
|
38
40
|
CMD_CLEAR = "/clear"
|
39
41
|
CMD_EXIT = "/exit"
|
42
|
+
CMD_HISTORY = "/his"
|
40
43
|
|
41
44
|
EXEC_MODE = "exec"
|
42
45
|
CHAT_MODE = "chat"
|
43
46
|
TEMP_MODE = "temp"
|
44
47
|
|
45
48
|
DEFAULT_CONFIG_MAP = {
|
46
|
-
"BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "
|
47
|
-
"API_KEY": {"value": "", "env_key": "
|
48
|
-
"MODEL": {"value": "gpt-4o", "env_key": "
|
49
|
-
"SHELL_NAME": {"value": "auto", "env_key": "
|
50
|
-
"OS_NAME": {"value": "auto", "env_key": "
|
51
|
-
"COMPLETION_PATH": {"value": "chat/completions", "env_key": "
|
52
|
-
"ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "
|
53
|
-
"STREAM": {"value": "true", "env_key": "
|
49
|
+
"BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "YAI_BASE_URL"},
|
50
|
+
"API_KEY": {"value": "", "env_key": "YAI_API_KEY"},
|
51
|
+
"MODEL": {"value": "gpt-4o", "env_key": "YAI_MODEL"},
|
52
|
+
"SHELL_NAME": {"value": "auto", "env_key": "YAI_SHELL_NAME"},
|
53
|
+
"OS_NAME": {"value": "auto", "env_key": "YAI_OS_NAME"},
|
54
|
+
"COMPLETION_PATH": {"value": "chat/completions", "env_key": "YAI_COMPLETION_PATH"},
|
55
|
+
"ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "YAI_ANSWER_PATH"},
|
56
|
+
"STREAM": {"value": "true", "env_key": "YAI_STREAM"},
|
57
|
+
"TEMPERATURE": {"value": "0.7", "env_key": "YAI_TEMPERATURE"},
|
58
|
+
"TOP_P": {"value": "1.0", "env_key": "YAI_TOP_P"},
|
59
|
+
"MAX_TOKENS": {"value": "1024", "env_key": "YAI_MAX_TOKENS"},
|
54
60
|
}
|
55
61
|
|
56
62
|
DEFAULT_CONFIG_INI = """[core]
|
@@ -99,10 +105,27 @@ class CLI:
|
|
99
105
|
self.bindings = KeyBindings()
|
100
106
|
self.session = PromptSession(key_bindings=self.bindings)
|
101
107
|
self.config = {}
|
102
|
-
self.history = []
|
108
|
+
self.history: list[dict[str, str]] = []
|
103
109
|
self.max_history_length = 25
|
104
110
|
self.current_mode = TEMP_MODE
|
105
111
|
|
112
|
+
def is_stream(self) -> bool:
|
113
|
+
"""Check if streaming is enabled"""
|
114
|
+
return self.config["STREAM"] == "true"
|
115
|
+
|
116
|
+
def prepare_chat_loop(self) -> None:
|
117
|
+
"""Setup key bindings and history for chat mode"""
|
118
|
+
self._setup_key_bindings()
|
119
|
+
# Initialize history
|
120
|
+
Path("~/.yaicli_history").expanduser().touch(exist_ok=True)
|
121
|
+
self.session = PromptSession(
|
122
|
+
key_bindings=self.bindings,
|
123
|
+
completer=WordCompleter(["/clear", "/exit", "/his"]),
|
124
|
+
complete_while_typing=True,
|
125
|
+
history=FileHistory(Path("~/.yaicli_history").expanduser()),
|
126
|
+
enable_history_search=True,
|
127
|
+
)
|
128
|
+
|
106
129
|
def _setup_key_bindings(self) -> None:
|
107
130
|
"""Setup keyboard shortcuts"""
|
108
131
|
|
@@ -216,7 +239,7 @@ class CLI:
|
|
216
239
|
# Join the remaining lines and strip any extra whitespace
|
217
240
|
return "\n".join(line.strip() for line in content_lines if line.strip())
|
218
241
|
|
219
|
-
def
|
242
|
+
def _get_number_with_type(self, key, _type: type, default=None):
|
220
243
|
"""Get number with type from config"""
|
221
244
|
try:
|
222
245
|
return _type(self.config.get(key, default))
|
@@ -229,10 +252,10 @@ class CLI:
|
|
229
252
|
body = {
|
230
253
|
"messages": message,
|
231
254
|
"model": self.config.get("MODEL", "gpt-4o"),
|
232
|
-
"stream": self.
|
233
|
-
"temperature": self.
|
234
|
-
"top_p": self.
|
235
|
-
"max_tokens": self.
|
255
|
+
"stream": self.is_stream(),
|
256
|
+
"temperature": self._get_number_with_type(key="TEMPERATURE", _type=float, default="0.7"),
|
257
|
+
"top_p": self._get_number_with_type(key="TOP_P", _type=float, default="1.0"),
|
258
|
+
"max_tokens": self._get_number_with_type(key="MAX_TOKENS", _type=int, default="1024"),
|
236
259
|
}
|
237
260
|
with httpx.Client(timeout=120.0) as client:
|
238
261
|
response = client.post(
|
@@ -243,8 +266,7 @@ class CLI:
|
|
243
266
|
except httpx.HTTPStatusError as e:
|
244
267
|
self.console.print(f"[red]Error calling API: {e}[/red]")
|
245
268
|
if self.verbose:
|
246
|
-
self.console.print(f"Reason: {e}")
|
247
|
-
self.console.print(f"Response: {response.text}")
|
269
|
+
self.console.print(f"Reason: {e}\nResponse: {response.text}")
|
248
270
|
raise e
|
249
271
|
return response
|
250
272
|
|
@@ -300,6 +322,7 @@ class CLI:
|
|
300
322
|
|
301
323
|
def _print_stream(self, response: httpx.Response) -> str:
|
302
324
|
"""Print response from LLM in streaming mode"""
|
325
|
+
self.console.print("Assistant:", style="bold green")
|
303
326
|
full_completion = ""
|
304
327
|
in_reasoning = False
|
305
328
|
|
@@ -317,40 +340,24 @@ class CLI:
|
|
317
340
|
reason, full_completion, in_reasoning
|
318
341
|
)
|
319
342
|
else:
|
320
|
-
content = delta.get("content", "") or ""
|
321
343
|
full_completion, in_reasoning = self._process_regular_content(
|
322
|
-
content, full_completion, in_reasoning
|
344
|
+
delta.get("content", "") or "", full_completion, in_reasoning
|
323
345
|
)
|
324
346
|
|
325
347
|
live.update(Markdown(markup=full_completion), refresh=True)
|
326
|
-
|
348
|
+
# self.console.print()
|
327
349
|
return full_completion
|
328
350
|
|
329
|
-
def
|
351
|
+
def _print_normal(self, response: httpx.Response) -> str:
|
330
352
|
"""Print response from LLM in non-streaming mode"""
|
353
|
+
self.console.print("Assistant:", style="bold green")
|
331
354
|
full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
|
332
|
-
self.console.print(Markdown(full_completion))
|
333
|
-
return full_completion
|
334
|
-
|
335
|
-
def _print(self, response: httpx.Response, stream: bool = True) -> str:
|
336
|
-
"""Print response from LLM and return full completion"""
|
337
|
-
if stream:
|
338
|
-
# Streaming response
|
339
|
-
full_completion = self._print_stream(response)
|
340
|
-
else:
|
341
|
-
# Non-streaming response
|
342
|
-
full_completion = self._print_non_stream(response)
|
343
|
-
self.console.print() # Add a newline after the response to separate from the next input
|
355
|
+
self.console.print(Markdown(full_completion + '\n'))
|
344
356
|
return full_completion
|
345
357
|
|
346
358
|
def get_prompt_tokens(self) -> list[tuple[str, str]]:
|
347
359
|
"""Return prompt tokens for current mode"""
|
348
|
-
if self.current_mode == CHAT_MODE
|
349
|
-
qmark = "💬"
|
350
|
-
elif self.current_mode == EXEC_MODE:
|
351
|
-
qmark = "🚀"
|
352
|
-
else:
|
353
|
-
qmark = ""
|
360
|
+
qmark = "💬" if self.current_mode == CHAT_MODE else "🚀" if self.current_mode == EXEC_MODE else ""
|
354
361
|
return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
|
355
362
|
|
356
363
|
def _check_history_len(self) -> None:
|
@@ -358,10 +365,69 @@ class CLI:
|
|
358
365
|
if len(self.history) > self.max_history_length:
|
359
366
|
self.history = self.history[-self.max_history_length :]
|
360
367
|
|
368
|
+
def _handle_special_commands(self, user_input: str) -> Optional[bool]:
|
369
|
+
"""Handle special command return: True-continue loop, False-exit loop, None-non-special command"""
|
370
|
+
if user_input.lower() == CMD_EXIT:
|
371
|
+
return False
|
372
|
+
if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
|
373
|
+
self.history.clear()
|
374
|
+
self.console.print("Chat history cleared\n", style="bold yellow")
|
375
|
+
return True
|
376
|
+
if user_input.lower() == CMD_HISTORY:
|
377
|
+
self.console.print(self.history)
|
378
|
+
return True
|
379
|
+
return None
|
380
|
+
|
381
|
+
def _confirm_and_execute(self, content: str) -> None:
|
382
|
+
"""Review, edit and execute the command"""
|
383
|
+
cmd = self._filter_command(content)
|
384
|
+
if not cmd:
|
385
|
+
self.console.print("No command generated", style="bold red")
|
386
|
+
return
|
387
|
+
self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {cmd}")
|
388
|
+
_input = Prompt.ask("Execute this command?", choices=['y', 'n', 'e'], default="n", case_sensitive=False)
|
389
|
+
if _input == 'y': # execute cmd
|
390
|
+
self.console.print("Output:", style="bold green")
|
391
|
+
subprocess.call(cmd, shell=True)
|
392
|
+
elif _input == 'e': # edit cmd
|
393
|
+
cmd = self.session.prompt("Edit command, press enter to execute:\n", key_bindings=None, default=cmd)
|
394
|
+
self.console.print("Output:", style="bold green")
|
395
|
+
subprocess.call(cmd, shell=True)
|
396
|
+
|
397
|
+
def _build_messages(self, user_input: str) -> list[dict[str, str]]:
|
398
|
+
return [
|
399
|
+
{"role": "system", "content": self.get_system_prompt()},
|
400
|
+
*self.history,
|
401
|
+
{"role": "user", "content": user_input}
|
402
|
+
]
|
403
|
+
|
404
|
+
def _handle_llm_response(self, response: httpx.Response, user_input: str) -> str:
|
405
|
+
"""Print LLM response and update history"""
|
406
|
+
content = self._print_stream(response) if self.is_stream() else self._print_normal(response)
|
407
|
+
self.history.extend([{"role": "user", "content": user_input}, {"role": "assistant", "content": content}])
|
408
|
+
self._check_history_len()
|
409
|
+
return content
|
410
|
+
|
411
|
+
def _process_user_input(self, user_input: str) -> bool:
|
412
|
+
"""Process user input and generate response"""
|
413
|
+
try:
|
414
|
+
response = self.post(self._build_messages(user_input))
|
415
|
+
content = self._handle_llm_response(response, user_input)
|
416
|
+
if self.current_mode == EXEC_MODE:
|
417
|
+
self._confirm_and_execute(content)
|
418
|
+
return True
|
419
|
+
except Exception as e:
|
420
|
+
self.console.print(f"Error: {e}", style="red")
|
421
|
+
return False
|
422
|
+
|
423
|
+
def get_system_prompt(self) -> str:
|
424
|
+
"""Return system prompt for current mode"""
|
425
|
+
prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
|
426
|
+
return prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
|
427
|
+
|
361
428
|
def _run_repl(self) -> None:
|
362
429
|
"""Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
|
363
|
-
|
364
|
-
self._setup_key_bindings()
|
430
|
+
self.prepare_chat_loop()
|
365
431
|
self.console.print("""
|
366
432
|
██ ██ █████ ██ ██████ ██ ██
|
367
433
|
██ ██ ██ ██ ██ ██ ██ ██
|
@@ -369,13 +435,13 @@ class CLI:
|
|
369
435
|
██ ██ ██ ██ ██ ██ ██
|
370
436
|
██ ██ ██ ██ ██████ ███████ ██
|
371
437
|
""")
|
372
|
-
self.console.print("
|
373
|
-
self.console.print("
|
374
|
-
self.console.print("
|
375
|
-
self.console.print("
|
438
|
+
self.console.print("Press TAB to change in chat and exec mode", style="bold")
|
439
|
+
self.console.print("Type /clear to clear chat history", style="bold")
|
440
|
+
self.console.print("Type /his to see chat history", style="bold")
|
441
|
+
self.console.print("Press Ctrl+C or type /exit to exit\n", style="bold")
|
376
442
|
|
377
443
|
while True:
|
378
|
-
|
444
|
+
self.console.print(Markdown("---"))
|
379
445
|
user_input = self.session.prompt(self.get_prompt_tokens).strip()
|
380
446
|
if not user_input:
|
381
447
|
continue
|
@@ -389,110 +455,38 @@ class CLI:
|
|
389
455
|
self.history = []
|
390
456
|
self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
|
391
457
|
continue
|
392
|
-
elif user_input.lower() ==
|
458
|
+
elif user_input.lower() == CMD_HISTORY:
|
393
459
|
self.console.print(self.history)
|
394
460
|
continue
|
395
|
-
|
396
|
-
system_prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
|
397
|
-
system_content = system_prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
|
398
|
-
|
399
|
-
# Create message with system prompt and history
|
400
|
-
message = [{"role": "system", "content": system_content}]
|
401
|
-
message.extend(self.history)
|
402
|
-
|
403
|
-
# Add current user message
|
404
|
-
message.append({"role": "user", "content": user_input})
|
405
|
-
|
406
|
-
# Get response from LLM
|
407
|
-
try:
|
408
|
-
response = self.post(message)
|
409
|
-
except ValueError as e:
|
410
|
-
self.console.print(f"[red]Error: {e}[/red]")
|
411
|
-
return
|
412
|
-
except httpx.ConnectError as e:
|
413
|
-
self.console.print(f"[red]Error: {e}[/red]")
|
414
|
-
continue
|
415
|
-
except httpx.HTTPStatusError:
|
416
|
-
continue
|
417
|
-
self.console.print("\n[bold green]Assistant:[/bold green]")
|
418
|
-
try:
|
419
|
-
content = self._print(response, stream=self.config["STREAM"] == "true")
|
420
|
-
except Exception as e:
|
421
|
-
self.console.print(f"[red]Unknown Error: {e}[/red]")
|
461
|
+
if not self._process_user_input(user_input):
|
422
462
|
continue
|
423
463
|
|
424
|
-
# Add user input and assistant response to history
|
425
|
-
self.history.append({"role": "user", "content": user_input})
|
426
|
-
self.history.append({"role": "assistant", "content": content})
|
427
|
-
|
428
|
-
# Trim history if needed
|
429
|
-
self._check_history_len()
|
430
|
-
|
431
|
-
# Handle command execution in exec mode
|
432
|
-
if self.current_mode == EXEC_MODE:
|
433
|
-
content = self._filter_command(content)
|
434
|
-
if not content:
|
435
|
-
self.console.print("[bold red]No command generated[/bold red]")
|
436
|
-
continue
|
437
|
-
self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
|
438
|
-
if Confirm.ask("Execute this command?", default=False):
|
439
|
-
subprocess.call(content, shell=True)
|
440
|
-
|
441
464
|
self.console.print("[bold green]Exiting...[/bold green]")
|
442
465
|
|
443
466
|
def _run_once(self, prompt: str, shell: bool = False) -> None:
|
444
467
|
"""Run once with given prompt"""
|
445
|
-
_os = self.detect_os()
|
446
|
-
_shell = self.detect_shell()
|
447
|
-
# Create appropriate system prompt based on mode
|
448
|
-
system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
|
449
|
-
system_content = system_prompt.format(_os=_os, _shell=_shell)
|
450
|
-
|
451
|
-
# Create message with system prompt and user input
|
452
|
-
message = [
|
453
|
-
{"role": "system", "content": system_content},
|
454
|
-
{"role": "user", "content": prompt},
|
455
|
-
]
|
456
468
|
|
457
|
-
# Get response from LLM
|
458
469
|
try:
|
459
|
-
response = self.post(
|
460
|
-
|
461
|
-
|
462
|
-
|
470
|
+
response = self.post(self._build_messages(prompt))
|
471
|
+
content = self._handle_llm_response(response, prompt)
|
472
|
+
if shell:
|
473
|
+
self._confirm_and_execute(content)
|
463
474
|
except Exception as e:
|
464
|
-
self.console.print(f"[red]
|
465
|
-
return
|
466
|
-
self.console.print("\n[bold green]Assistant:[/bold green]")
|
467
|
-
content = self._print(response, stream=self.config["STREAM"] == "true")
|
468
|
-
|
469
|
-
# Handle shell mode execution
|
470
|
-
if shell:
|
471
|
-
content = self._filter_command(content)
|
472
|
-
if not content:
|
473
|
-
self.console.print("[bold red]No command generated[/bold red]")
|
474
|
-
return
|
475
|
-
self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
|
476
|
-
if Confirm.ask("Execute this command?", default=False):
|
477
|
-
returncode = subprocess.call(content, shell=True)
|
478
|
-
if returncode != 0:
|
479
|
-
self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
|
475
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
480
476
|
|
481
477
|
def run(self, chat: bool, shell: bool, prompt: str) -> None:
|
482
478
|
"""Run the CLI"""
|
483
479
|
self.load_config()
|
484
480
|
if not self.config.get("API_KEY"):
|
485
|
-
self.console.print("[bold red]API key not set[/bold red]")
|
486
481
|
self.console.print(
|
487
|
-
"[
|
482
|
+
"[yellow]API key not set. Please set in ~/.config/yaicli/config.ini or AI_API_KEY env[/]"
|
488
483
|
)
|
489
484
|
raise typer.Exit(code=1)
|
490
|
-
|
491
|
-
# Handle chat mode
|
492
485
|
if chat:
|
493
486
|
self.current_mode = CHAT_MODE
|
494
487
|
self._run_repl()
|
495
488
|
else:
|
489
|
+
self.current_mode = EXEC_MODE if shell else TEMP_MODE
|
496
490
|
self._run_once(prompt, shell)
|
497
491
|
|
498
492
|
|
@@ -500,14 +494,24 @@ class CLI:
|
|
500
494
|
def main(
|
501
495
|
ctx: typer.Context,
|
502
496
|
prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
|
503
|
-
|
504
|
-
|
505
|
-
|
497
|
+
chat: Annotated[
|
498
|
+
bool, typer.Option("--chat", "-c", help="Start in chat mode", rich_help_panel="Run Options")
|
499
|
+
] = False,
|
500
|
+
shell: Annotated[
|
501
|
+
bool, typer.Option("--shell", "-s", help="Generate and execute shell command", rich_help_panel="Run Options")
|
502
|
+
] = False,
|
503
|
+
verbose: Annotated[
|
504
|
+
bool, typer.Option("--verbose", "-V", help="Show verbose information", rich_help_panel="Run Options")
|
505
|
+
] = False,
|
506
|
+
template: Annotated[bool, typer.Option("--template", help="Show the config template.")] = False,
|
506
507
|
):
|
507
508
|
"""yaicli - Your AI interface in cli."""
|
508
509
|
if prompt == "":
|
509
510
|
typer.echo("Empty prompt, ignored")
|
510
511
|
return
|
512
|
+
if template:
|
513
|
+
typer.echo(DEFAULT_CONFIG_INI)
|
514
|
+
return
|
511
515
|
if not prompt and not chat:
|
512
516
|
typer.echo(ctx.get_help())
|
513
517
|
return
|
yaicli-0.0.11.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
pyproject.toml,sha256=X26aCrit45QBISeLMZi0zzjd4M3YOCK_fYA9L-gKmS8,1452
|
2
|
-
yaicli.py,sha256=CEQlDJrQn7UEFLpAHbNFyo-chVoFi55P1fblealDhd8,20559
|
3
|
-
yaicli-0.0.11.dist-info/METADATA,sha256=0ty4x5vHDyyVJsY5xk4yoII7I3d7d-PhoMWFSS_h-Yg,25934
|
4
|
-
yaicli-0.0.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
-
yaicli-0.0.11.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
|
6
|
-
yaicli-0.0.11.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
7
|
-
yaicli-0.0.11.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|