yaicli 0.0.10__tar.gz → 0.0.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.12
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -214,14 +214,15 @@ Classifier: Operating System :: OS Independent
|
|
214
214
|
Classifier: Programming Language :: Python :: 3
|
215
215
|
Requires-Python: >=3.9
|
216
216
|
Requires-Dist: distro>=1.9.0
|
217
|
+
Requires-Dist: httpx>=0.28.1
|
217
218
|
Requires-Dist: jmespath>=1.0.1
|
218
219
|
Requires-Dist: prompt-toolkit>=3.0.50
|
219
|
-
Requires-Dist: requests>=2.32.3
|
220
220
|
Requires-Dist: rich>=13.9.4
|
221
|
+
Requires-Dist: socksio>=1.0.0
|
221
222
|
Requires-Dist: typer>=0.15.2
|
222
223
|
Description-Content-Type: text/markdown
|
223
224
|
|
224
|
-
# YAICLI - Your AI Command Line
|
225
|
+
# YAICLI - Your AI Interface in Command Line
|
225
226
|
|
226
227
|
[](https://pypi.org/project/yaicli/)
|
227
228
|

|
@@ -318,6 +319,10 @@ ANSWER_PATH=choices[0].message.content
|
|
318
319
|
# true: streaming response
|
319
320
|
# false: non-streaming response
|
320
321
|
STREAM=true
|
322
|
+
|
323
|
+
TEMPERATURE=0.7
|
324
|
+
TOP_P=1.0
|
325
|
+
MAX_TOKENS=1024
|
321
326
|
```
|
322
327
|
|
323
328
|
### Configuration Options
|
@@ -405,33 +410,41 @@ ai --verbose "Explain quantum computing"
|
|
405
410
|
|
406
411
|
### Command Line Options
|
407
412
|
|
413
|
+
Arguments:
|
408
414
|
- `<PROMPT>`: Argument
|
409
|
-
|
410
|
-
|
411
|
-
- `--shell` or `-s`: Generate and execute shell command
|
415
|
+
|
416
|
+
Options:
|
412
417
|
- `--install-completion`: Install completion for the current shell
|
413
418
|
- `--show-completion`: Show completion for the current shell, to copy it or customize the installation
|
414
419
|
- `--help` or `-h`: Show this message and exit
|
420
|
+
- `--template`: Show the config template.
|
421
|
+
|
422
|
+
Run Options:
|
423
|
+
- `--verbose` or `-V`: Show verbose information
|
424
|
+
- `--chat` or `-c`: Start in chat mode
|
425
|
+
- `--shell` or `-s`: Generate and execute shell command
|
415
426
|
|
416
427
|
```bash
|
417
428
|
ai -h
|
418
429
|
|
419
|
-
|
420
|
-
|
421
|
-
yaicli
|
422
|
-
|
423
|
-
╭─ Arguments
|
424
|
-
│ prompt [PROMPT] The prompt send to the LLM
|
425
|
-
|
426
|
-
╭─ Options
|
427
|
-
│ --
|
428
|
-
│ --
|
429
|
-
│ --
|
430
|
-
│ --
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
430
|
+
Usage: ai [OPTIONS] [PROMPT]
|
431
|
+
|
432
|
+
yaicli - Your AI interface in cli.
|
433
|
+
|
434
|
+
╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
435
|
+
│ prompt [PROMPT] The prompt send to the LLM │
|
436
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
437
|
+
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
438
|
+
│ --template Show the config template. │
|
439
|
+
│ --install-completion Install completion for the current shell. │
|
440
|
+
│ --show-completion Show completion for the current shell, to copy it or customize the installation. │
|
441
|
+
│ --help -h Show this message and exit. │
|
442
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
443
|
+
╭─ Run Option ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
444
|
+
│ --chat -c Start in chat mode │
|
445
|
+
│ --shell -s Generate and execute shell command │
|
446
|
+
│ --verbose -V Show verbose information │
|
447
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
435
448
|
|
436
449
|
```
|
437
450
|
|
@@ -514,4 +527,4 @@ Contributions of code, issue reports, or feature suggestions are welcome.
|
|
514
527
|
|
515
528
|
---
|
516
529
|
|
517
|
-
*YAICLI - Making your terminal smarter*
|
530
|
+
*YAICLI - Making your terminal smarter*
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# YAICLI - Your AI Command Line
|
1
|
+
# YAICLI - Your AI Interface in Command Line
|
2
2
|
|
3
3
|
[](https://pypi.org/project/yaicli/)
|
4
4
|

|
@@ -95,6 +95,10 @@ ANSWER_PATH=choices[0].message.content
|
|
95
95
|
# true: streaming response
|
96
96
|
# false: non-streaming response
|
97
97
|
STREAM=true
|
98
|
+
|
99
|
+
TEMPERATURE=0.7
|
100
|
+
TOP_P=1.0
|
101
|
+
MAX_TOKENS=1024
|
98
102
|
```
|
99
103
|
|
100
104
|
### Configuration Options
|
@@ -182,33 +186,41 @@ ai --verbose "Explain quantum computing"
|
|
182
186
|
|
183
187
|
### Command Line Options
|
184
188
|
|
189
|
+
Arguments:
|
185
190
|
- `<PROMPT>`: Argument
|
186
|
-
|
187
|
-
|
188
|
-
- `--shell` or `-s`: Generate and execute shell command
|
191
|
+
|
192
|
+
Options:
|
189
193
|
- `--install-completion`: Install completion for the current shell
|
190
194
|
- `--show-completion`: Show completion for the current shell, to copy it or customize the installation
|
191
195
|
- `--help` or `-h`: Show this message and exit
|
196
|
+
- `--template`: Show the config template.
|
197
|
+
|
198
|
+
Run Options:
|
199
|
+
- `--verbose` or `-V`: Show verbose information
|
200
|
+
- `--chat` or `-c`: Start in chat mode
|
201
|
+
- `--shell` or `-s`: Generate and execute shell command
|
192
202
|
|
193
203
|
```bash
|
194
204
|
ai -h
|
195
205
|
|
196
|
-
|
197
|
-
|
198
|
-
yaicli
|
199
|
-
|
200
|
-
╭─ Arguments
|
201
|
-
│ prompt [PROMPT] The prompt send to the LLM
|
202
|
-
|
203
|
-
╭─ Options
|
204
|
-
│ --
|
205
|
-
│ --
|
206
|
-
│ --
|
207
|
-
│ --
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
206
|
+
Usage: ai [OPTIONS] [PROMPT]
|
207
|
+
|
208
|
+
yaicli - Your AI interface in cli.
|
209
|
+
|
210
|
+
╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
211
|
+
│ prompt [PROMPT] The prompt send to the LLM │
|
212
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
213
|
+
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
214
|
+
│ --template Show the config template. │
|
215
|
+
│ --install-completion Install completion for the current shell. │
|
216
|
+
│ --show-completion Show completion for the current shell, to copy it or customize the installation. │
|
217
|
+
│ --help -h Show this message and exit. │
|
218
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
219
|
+
╭─ Run Option ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
220
|
+
│ --chat -c Start in chat mode │
|
221
|
+
│ --shell -s Generate and execute shell command │
|
222
|
+
│ --verbose -V Show verbose information │
|
223
|
+
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
212
224
|
|
213
225
|
```
|
214
226
|
|
@@ -291,4 +303,4 @@ Contributions of code, issue reports, or feature suggestions are welcome.
|
|
291
303
|
|
292
304
|
---
|
293
305
|
|
294
|
-
*YAICLI - Making your terminal smarter*
|
306
|
+
*YAICLI - Making your terminal smarter*
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yaicli"
|
3
|
-
version = "0.0.
|
3
|
+
version = "0.0.12"
|
4
4
|
description = "A simple CLI tool to interact with LLM"
|
5
5
|
authors = [{ name = "belingud", email = "im.victor@qq.com" }]
|
6
6
|
readme = "README.md"
|
@@ -31,10 +31,11 @@ keywords = [
|
|
31
31
|
]
|
32
32
|
dependencies = [
|
33
33
|
"distro>=1.9.0",
|
34
|
+
"httpx>=0.28.1",
|
34
35
|
"jmespath>=1.0.1",
|
35
36
|
"prompt-toolkit>=3.0.50",
|
36
|
-
"requests>=2.32.3",
|
37
37
|
"rich>=13.9.4",
|
38
|
+
"socksio>=1.0.0",
|
38
39
|
"typer>=0.15.2",
|
39
40
|
]
|
40
41
|
[project.urls]
|
@@ -45,17 +46,22 @@ Documentation = "https://github.com/belingud/yaicli"
|
|
45
46
|
[project.scripts]
|
46
47
|
ai = "yaicli:app"
|
47
48
|
|
49
|
+
[tool.pdm.scripts]
|
50
|
+
bump = "bump2version {args}"
|
51
|
+
changelog = "just changelog"
|
52
|
+
|
48
53
|
[tool.uv]
|
49
54
|
resolution = "highest"
|
50
55
|
|
51
56
|
[dependency-groups]
|
52
57
|
dev = ["bump2version>=1.0.1", "pytest>=8.3.5", "ruff>=0.11.2"]
|
53
58
|
|
59
|
+
[tool.isort]
|
60
|
+
profile = "black"
|
61
|
+
|
54
62
|
[tool.ruff]
|
55
63
|
line-length = 120
|
56
|
-
|
57
|
-
ignore = ["E501"]
|
58
|
-
|
64
|
+
fix = true
|
59
65
|
|
60
66
|
[build-system]
|
61
67
|
requires = ["hatchling>=1.18.0"]
|
@@ -5,13 +5,15 @@ import subprocess
|
|
5
5
|
from os import getenv
|
6
6
|
from os.path import basename, pathsep
|
7
7
|
from pathlib import Path
|
8
|
-
from typing import Annotated, Optional
|
8
|
+
from typing import Annotated, Optional, Union
|
9
9
|
|
10
|
+
import httpx
|
10
11
|
import jmespath
|
11
|
-
import requests
|
12
12
|
import typer
|
13
13
|
from distro import name as distro_name
|
14
14
|
from prompt_toolkit import PromptSession
|
15
|
+
from prompt_toolkit.completion import WordCompleter
|
16
|
+
from prompt_toolkit.history import FileHistory
|
15
17
|
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
|
16
18
|
from prompt_toolkit.keys import Keys
|
17
19
|
from rich.console import Console
|
@@ -37,6 +39,7 @@ DEFAULT_PROMPT = (
|
|
37
39
|
|
38
40
|
CMD_CLEAR = "/clear"
|
39
41
|
CMD_EXIT = "/exit"
|
42
|
+
CMD_HISTORY = "/his"
|
40
43
|
|
41
44
|
EXEC_MODE = "exec"
|
42
45
|
CHAT_MODE = "chat"
|
@@ -53,23 +56,7 @@ DEFAULT_CONFIG_MAP = {
|
|
53
56
|
"STREAM": {"value": "true", "env_key": "AI_STREAM"},
|
54
57
|
}
|
55
58
|
|
56
|
-
|
57
|
-
name="yaicli",
|
58
|
-
context_settings={"help_option_names": ["-h", "--help"]},
|
59
|
-
pretty_exceptions_enable=False,
|
60
|
-
)
|
61
|
-
|
62
|
-
|
63
|
-
class CasePreservingConfigParser(configparser.RawConfigParser):
|
64
|
-
"""Case preserving config parser"""
|
65
|
-
|
66
|
-
def optionxform(self, optionstr):
|
67
|
-
return optionstr
|
68
|
-
|
69
|
-
|
70
|
-
class CLI:
|
71
|
-
CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
|
72
|
-
DEFAULT_CONFIG_INI = """[core]
|
59
|
+
DEFAULT_CONFIG_INI = """[core]
|
73
60
|
PROVIDER=openai
|
74
61
|
BASE_URL=https://api.openai.com/v1
|
75
62
|
API_KEY=
|
@@ -86,7 +73,28 @@ ANSWER_PATH=choices[0].message.content
|
|
86
73
|
|
87
74
|
# true: streaming response
|
88
75
|
# false: non-streaming response
|
89
|
-
STREAM=true
|
76
|
+
STREAM=true
|
77
|
+
|
78
|
+
TEMPERATURE=0.7
|
79
|
+
TOP_P=1.0
|
80
|
+
MAX_TOKENS=1024"""
|
81
|
+
|
82
|
+
app = typer.Typer(
|
83
|
+
name="yaicli",
|
84
|
+
context_settings={"help_option_names": ["-h", "--help"]},
|
85
|
+
pretty_exceptions_enable=False,
|
86
|
+
)
|
87
|
+
|
88
|
+
|
89
|
+
class CasePreservingConfigParser(configparser.RawConfigParser):
|
90
|
+
"""Case preserving config parser"""
|
91
|
+
|
92
|
+
def optionxform(self, optionstr):
|
93
|
+
return optionstr
|
94
|
+
|
95
|
+
|
96
|
+
class CLI:
|
97
|
+
CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
|
90
98
|
|
91
99
|
def __init__(self, verbose: bool = False) -> None:
|
92
100
|
self.verbose = verbose
|
@@ -98,6 +106,19 @@ STREAM=true"""
|
|
98
106
|
self.max_history_length = 25
|
99
107
|
self.current_mode = TEMP_MODE
|
100
108
|
|
109
|
+
def prepare_chat_loop(self) -> None:
|
110
|
+
"""Setup key bindings and history for chat mode"""
|
111
|
+
self._setup_key_bindings()
|
112
|
+
# Initialize history
|
113
|
+
Path("~/.yaicli_history").expanduser().touch(exist_ok=True)
|
114
|
+
self.session = PromptSession(
|
115
|
+
key_bindings=self.bindings,
|
116
|
+
completer=WordCompleter(["/clear", "/exit", "/his"]),
|
117
|
+
complete_while_typing=True,
|
118
|
+
history=FileHistory(Path("~/.yaicli_history").expanduser()),
|
119
|
+
enable_history_search=True,
|
120
|
+
)
|
121
|
+
|
101
122
|
def _setup_key_bindings(self) -> None:
|
102
123
|
"""Setup keyboard shortcuts"""
|
103
124
|
|
@@ -122,7 +143,7 @@ STREAM=true"""
|
|
122
143
|
self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
|
123
144
|
self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
124
145
|
with open(self.CONFIG_PATH, "w") as f:
|
125
|
-
f.write(
|
146
|
+
f.write(DEFAULT_CONFIG_INI)
|
126
147
|
else:
|
127
148
|
# Load from configuration file (middle priority)
|
128
149
|
config_parser = CasePreservingConfigParser()
|
@@ -179,10 +200,11 @@ STREAM=true"""
|
|
179
200
|
example:
|
180
201
|
```bash\nls -la\n``` ==> ls -al
|
181
202
|
```zsh\nls -la\n``` ==> ls -al
|
182
|
-
```ls -
|
183
|
-
ls -
|
203
|
+
```ls -la``` ==> ls -la
|
204
|
+
ls -la ==> ls -la
|
184
205
|
```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
|
185
206
|
```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
|
207
|
+
```plaintext\nls -la\n``` ==> ls -la
|
186
208
|
"""
|
187
209
|
if not command or not command.strip():
|
188
210
|
return ""
|
@@ -210,25 +232,35 @@ STREAM=true"""
|
|
210
232
|
# Join the remaining lines and strip any extra whitespace
|
211
233
|
return "\n".join(line.strip() for line in content_lines if line.strip())
|
212
234
|
|
213
|
-
def
|
235
|
+
def _get_type_number(self, key, _type: type, default=None):
|
236
|
+
"""Get number with type from config"""
|
237
|
+
try:
|
238
|
+
return _type(self.config.get(key, default))
|
239
|
+
except ValueError:
|
240
|
+
raise ValueError(f"[red]{key} should be {_type} type.[/red]")
|
241
|
+
|
242
|
+
def post(self, message: list[dict[str, str]]) -> httpx.Response:
|
214
243
|
"""Post message to LLM API and return response"""
|
215
244
|
url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
|
216
245
|
body = {
|
217
246
|
"messages": message,
|
218
247
|
"model": self.config.get("MODEL", "gpt-4o"),
|
219
248
|
"stream": self.config.get("STREAM", "true") == "true",
|
220
|
-
"temperature": 0.7,
|
221
|
-
"top_p": 1,
|
249
|
+
"temperature": self._get_type_number(key="TEMPERATURE", _type=float, default="0.7"),
|
250
|
+
"top_p": self._get_type_number(key="TOP_P", _type=float, default="1.0"),
|
251
|
+
"max_tokens": self._get_type_number(key="MAX_TOKENS", _type=int, default="1024"),
|
222
252
|
}
|
223
|
-
|
253
|
+
with httpx.Client(timeout=120.0) as client:
|
254
|
+
response = client.post(
|
255
|
+
url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"}
|
256
|
+
)
|
224
257
|
try:
|
225
258
|
response.raise_for_status()
|
226
|
-
except
|
259
|
+
except httpx.HTTPStatusError as e:
|
227
260
|
self.console.print(f"[red]Error calling API: {e}[/red]")
|
228
261
|
if self.verbose:
|
229
|
-
self.console.print(f"Reason: {e
|
230
|
-
|
231
|
-
raise typer.Exit(code=1) from None
|
262
|
+
self.console.print(f"Reason: {e}\nResponse: {response.text}")
|
263
|
+
raise e
|
232
264
|
return response
|
233
265
|
|
234
266
|
def get_reasoning_content(self, delta: dict) -> Optional[str]:
|
@@ -239,60 +271,83 @@ STREAM=true"""
|
|
239
271
|
return delta[k]
|
240
272
|
return None
|
241
273
|
|
242
|
-
def
|
274
|
+
def _parse_stream_line(self, line: Union[bytes, str]) -> Optional[dict]:
|
275
|
+
"""Parse a single line from the stream response"""
|
276
|
+
if not line:
|
277
|
+
return None
|
278
|
+
|
279
|
+
if isinstance(line, bytes):
|
280
|
+
line = line.decode("utf-8")
|
281
|
+
if not line.startswith("data: "):
|
282
|
+
return None
|
283
|
+
|
284
|
+
line = line[6:]
|
285
|
+
if line == "[DONE]":
|
286
|
+
return None
|
287
|
+
|
288
|
+
try:
|
289
|
+
json_data = json.loads(line)
|
290
|
+
if not json_data.get("choices"):
|
291
|
+
return None
|
292
|
+
|
293
|
+
return json_data
|
294
|
+
except json.JSONDecodeError:
|
295
|
+
self.console.print("[red]Error decoding response JSON[/red]")
|
296
|
+
if self.verbose:
|
297
|
+
self.console.print(f"[red]Error JSON data: {line}[/red]")
|
298
|
+
return None
|
299
|
+
|
300
|
+
def _process_reasoning_content(self, reason: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
|
301
|
+
"""Process reasoning content in the response"""
|
302
|
+
if not in_reasoning:
|
303
|
+
in_reasoning = True
|
304
|
+
full_completion = "> Reasoning:\n> "
|
305
|
+
full_completion += reason.replace("\n", "\n> ")
|
306
|
+
return full_completion, in_reasoning
|
307
|
+
|
308
|
+
def _process_regular_content(self, content: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
|
309
|
+
"""Process regular content in the response"""
|
310
|
+
if in_reasoning:
|
311
|
+
in_reasoning = False
|
312
|
+
full_completion += "\n\n"
|
313
|
+
full_completion += content
|
314
|
+
return full_completion, in_reasoning
|
315
|
+
|
316
|
+
def _print_stream(self, response: httpx.Response) -> str:
|
243
317
|
"""Print response from LLM in streaming mode"""
|
244
318
|
full_completion = ""
|
245
319
|
in_reasoning = False
|
246
320
|
|
247
321
|
with Live() as live:
|
248
322
|
for line in response.iter_lines():
|
249
|
-
|
323
|
+
json_data = self._parse_stream_line(line)
|
324
|
+
if not json_data:
|
250
325
|
continue
|
251
326
|
|
252
|
-
|
253
|
-
|
254
|
-
|
327
|
+
delta = json_data["choices"][0]["delta"]
|
328
|
+
reason = self.get_reasoning_content(delta)
|
329
|
+
|
330
|
+
if reason is not None:
|
331
|
+
full_completion, in_reasoning = self._process_reasoning_content(
|
332
|
+
reason, full_completion, in_reasoning
|
333
|
+
)
|
334
|
+
else:
|
335
|
+
content = delta.get("content", "") or ""
|
336
|
+
full_completion, in_reasoning = self._process_regular_content(
|
337
|
+
content, full_completion, in_reasoning
|
338
|
+
)
|
255
339
|
|
256
|
-
|
257
|
-
if data == "[DONE]":
|
258
|
-
break
|
259
|
-
|
260
|
-
try:
|
261
|
-
json_data = json.loads(data)
|
262
|
-
if not json_data.get("choices"):
|
263
|
-
continue
|
264
|
-
|
265
|
-
delta = json_data["choices"][0]["delta"]
|
266
|
-
reason = self.get_reasoning_content(delta)
|
267
|
-
|
268
|
-
if reason is not None:
|
269
|
-
# reasoning started
|
270
|
-
if not in_reasoning:
|
271
|
-
in_reasoning = True
|
272
|
-
full_completion = "> Reasoning:\n> "
|
273
|
-
full_completion += reason.replace("\n", "\n> ")
|
274
|
-
else:
|
275
|
-
# reasoning stoped
|
276
|
-
if in_reasoning:
|
277
|
-
in_reasoning = False
|
278
|
-
full_completion += "\n\n"
|
279
|
-
content = delta.get("content", "") or ""
|
280
|
-
full_completion += content
|
281
|
-
live.update(Markdown(markup=full_completion), refresh=True)
|
282
|
-
except json.JSONDecodeError:
|
283
|
-
self.console.print("[red]Error decoding response JSON[/red]")
|
284
|
-
if self.verbose:
|
285
|
-
self.console.print(f"[red]Error: {data}[/red]")
|
340
|
+
live.update(Markdown(markup=full_completion), refresh=True)
|
286
341
|
|
287
342
|
return full_completion
|
288
343
|
|
289
|
-
def _print_non_stream(self, response:
|
344
|
+
def _print_non_stream(self, response: httpx.Response) -> str:
|
290
345
|
"""Print response from LLM in non-streaming mode"""
|
291
346
|
full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
|
292
347
|
self.console.print(Markdown(full_completion))
|
293
348
|
return full_completion
|
294
349
|
|
295
|
-
def _print(self, response:
|
350
|
+
def _print(self, response: httpx.Response, stream: bool = True) -> str:
|
296
351
|
"""Print response from LLM and return full completion"""
|
297
352
|
if stream:
|
298
353
|
# Streaming response
|
@@ -305,12 +360,7 @@ STREAM=true"""
|
|
305
360
|
|
306
361
|
def get_prompt_tokens(self) -> list[tuple[str, str]]:
|
307
362
|
"""Return prompt tokens for current mode"""
|
308
|
-
if self.current_mode == CHAT_MODE
|
309
|
-
qmark = "💬"
|
310
|
-
elif self.current_mode == EXEC_MODE:
|
311
|
-
qmark = "🚀"
|
312
|
-
else:
|
313
|
-
qmark = ""
|
363
|
+
qmark = "💬" if self.current_mode == CHAT_MODE else "🚀" if self.current_mode == EXEC_MODE else ""
|
314
364
|
return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
|
315
365
|
|
316
366
|
def _check_history_len(self) -> None:
|
@@ -321,7 +371,7 @@ STREAM=true"""
|
|
321
371
|
def _run_repl(self) -> None:
|
322
372
|
"""Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
|
323
373
|
# Show REPL instructions
|
324
|
-
self.
|
374
|
+
self.prepare_chat_loop()
|
325
375
|
self.console.print("""
|
326
376
|
██ ██ █████ ██ ██████ ██ ██
|
327
377
|
██ ██ ██ ██ ██ ██ ██ ██
|
@@ -349,7 +399,7 @@ STREAM=true"""
|
|
349
399
|
self.history = []
|
350
400
|
self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
|
351
401
|
continue
|
352
|
-
elif user_input.lower() ==
|
402
|
+
elif user_input.lower() == CMD_HISTORY:
|
353
403
|
self.console.print(self.history)
|
354
404
|
continue
|
355
405
|
# Create appropriate system prompt based on mode
|
@@ -364,19 +414,25 @@ STREAM=true"""
|
|
364
414
|
message.append({"role": "user", "content": user_input})
|
365
415
|
|
366
416
|
# Get response from LLM
|
367
|
-
|
417
|
+
try:
|
418
|
+
response = self.post(message)
|
419
|
+
except ValueError as e:
|
420
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
421
|
+
return
|
422
|
+
except (httpx.ConnectError, httpx.HTTPStatusError) as e:
|
423
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
424
|
+
continue
|
368
425
|
self.console.print("\n[bold green]Assistant:[/bold green]")
|
369
426
|
try:
|
370
427
|
content = self._print(response, stream=self.config["STREAM"] == "true")
|
371
428
|
except Exception as e:
|
372
|
-
self.console.print(f"[red]Error: {e}[/red]")
|
429
|
+
self.console.print(f"[red]Unknown Error: {e}[/red]")
|
373
430
|
continue
|
374
431
|
|
375
432
|
# Add user input and assistant response to history
|
376
433
|
self.history.append({"role": "user", "content": user_input})
|
377
434
|
self.history.append({"role": "assistant", "content": content})
|
378
435
|
|
379
|
-
# Trim history if needed
|
380
436
|
self._check_history_len()
|
381
437
|
|
382
438
|
# Handle command execution in exec mode
|
@@ -406,7 +462,14 @@ STREAM=true"""
|
|
406
462
|
]
|
407
463
|
|
408
464
|
# Get response from LLM
|
409
|
-
|
465
|
+
try:
|
466
|
+
response = self.post(message)
|
467
|
+
except (ValueError, httpx.ConnectError, httpx.HTTPStatusError) as e:
|
468
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
469
|
+
return
|
470
|
+
except Exception as e:
|
471
|
+
self.console.print(f"[red]Unknown Error: {e}[/red]")
|
472
|
+
return
|
410
473
|
self.console.print("\n[bold green]Assistant:[/bold green]")
|
411
474
|
content = self._print(response, stream=self.config["STREAM"] == "true")
|
412
475
|
|
@@ -422,14 +485,12 @@ STREAM=true"""
|
|
422
485
|
if returncode != 0:
|
423
486
|
self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
|
424
487
|
|
425
|
-
|
426
488
|
def run(self, chat: bool, shell: bool, prompt: str) -> None:
|
427
489
|
"""Run the CLI"""
|
428
490
|
self.load_config()
|
429
491
|
if not self.config.get("API_KEY"):
|
430
|
-
self.console.print("[bold red]API key not set[/bold red]")
|
431
492
|
self.console.print(
|
432
|
-
"[
|
493
|
+
"[yellow]API key not set. Please set in ~/.config/yaicli/config.ini or AI_API_KEY env[/]"
|
433
494
|
)
|
434
495
|
raise typer.Exit(code=1)
|
435
496
|
|
@@ -445,14 +506,24 @@ STREAM=true"""
|
|
445
506
|
def main(
|
446
507
|
ctx: typer.Context,
|
447
508
|
prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
|
448
|
-
|
449
|
-
|
450
|
-
|
509
|
+
chat: Annotated[
|
510
|
+
bool, typer.Option("--chat", "-c", help="Start in chat mode", rich_help_panel="Run Option")
|
511
|
+
] = False,
|
512
|
+
shell: Annotated[
|
513
|
+
bool, typer.Option("--shell", "-s", help="Generate and execute shell command", rich_help_panel="Run Option")
|
514
|
+
] = False,
|
515
|
+
verbose: Annotated[
|
516
|
+
bool, typer.Option("--verbose", "-V", help="Show verbose information", rich_help_panel="Run Option")
|
517
|
+
] = False,
|
518
|
+
template: Annotated[bool, typer.Option("--template", help="Show the config template.")] = False,
|
451
519
|
):
|
452
520
|
"""yaicli - Your AI interface in cli."""
|
453
521
|
if prompt == "":
|
454
522
|
typer.echo("Empty prompt, ignored")
|
455
523
|
return
|
524
|
+
if template:
|
525
|
+
typer.echo(DEFAULT_CONFIG_INI)
|
526
|
+
return
|
456
527
|
if not prompt and not chat:
|
457
528
|
typer.echo(ctx.get_help())
|
458
529
|
return
|
File without changes
|
File without changes
|