yaicli 0.0.10__tar.gz → 0.0.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.11
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -214,14 +214,15 @@ Classifier: Operating System :: OS Independent
|
|
214
214
|
Classifier: Programming Language :: Python :: 3
|
215
215
|
Requires-Python: >=3.9
|
216
216
|
Requires-Dist: distro>=1.9.0
|
217
|
+
Requires-Dist: httpx>=0.28.1
|
217
218
|
Requires-Dist: jmespath>=1.0.1
|
218
219
|
Requires-Dist: prompt-toolkit>=3.0.50
|
219
|
-
Requires-Dist: requests>=2.32.3
|
220
220
|
Requires-Dist: rich>=13.9.4
|
221
|
+
Requires-Dist: socksio>=1.0.0
|
221
222
|
Requires-Dist: typer>=0.15.2
|
222
223
|
Description-Content-Type: text/markdown
|
223
224
|
|
224
|
-
# YAICLI - Your AI Command Line
|
225
|
+
# YAICLI - Your AI Interface in Command Line
|
225
226
|
|
226
227
|
[](https://pypi.org/project/yaicli/)
|
227
228
|

|
@@ -318,6 +319,10 @@ ANSWER_PATH=choices[0].message.content
|
|
318
319
|
# true: streaming response
|
319
320
|
# false: non-streaming response
|
320
321
|
STREAM=true
|
322
|
+
|
323
|
+
TEMPERATURE=0.7
|
324
|
+
TOP_P=1.0
|
325
|
+
MAX_TOKENS=1024
|
321
326
|
```
|
322
327
|
|
323
328
|
### Configuration Options
|
@@ -514,4 +519,4 @@ Contributions of code, issue reports, or feature suggestions are welcome.
|
|
514
519
|
|
515
520
|
---
|
516
521
|
|
517
|
-
*YAICLI - Making your terminal smarter*
|
522
|
+
*YAICLI - Making your terminal smarter*
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# YAICLI - Your AI Command Line
|
1
|
+
# YAICLI - Your AI Interface in Command Line
|
2
2
|
|
3
3
|
[](https://pypi.org/project/yaicli/)
|
4
4
|

|
@@ -95,6 +95,10 @@ ANSWER_PATH=choices[0].message.content
|
|
95
95
|
# true: streaming response
|
96
96
|
# false: non-streaming response
|
97
97
|
STREAM=true
|
98
|
+
|
99
|
+
TEMPERATURE=0.7
|
100
|
+
TOP_P=1.0
|
101
|
+
MAX_TOKENS=1024
|
98
102
|
```
|
99
103
|
|
100
104
|
### Configuration Options
|
@@ -291,4 +295,4 @@ Contributions of code, issue reports, or feature suggestions are welcome.
|
|
291
295
|
|
292
296
|
---
|
293
297
|
|
294
|
-
*YAICLI - Making your terminal smarter*
|
298
|
+
*YAICLI - Making your terminal smarter*
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "yaicli"
|
3
|
-
version = "0.0.
|
3
|
+
version = "0.0.11"
|
4
4
|
description = "A simple CLI tool to interact with LLM"
|
5
5
|
authors = [{ name = "belingud", email = "im.victor@qq.com" }]
|
6
6
|
readme = "README.md"
|
@@ -31,10 +31,11 @@ keywords = [
|
|
31
31
|
]
|
32
32
|
dependencies = [
|
33
33
|
"distro>=1.9.0",
|
34
|
+
"httpx>=0.28.1",
|
34
35
|
"jmespath>=1.0.1",
|
35
36
|
"prompt-toolkit>=3.0.50",
|
36
|
-
"requests>=2.32.3",
|
37
37
|
"rich>=13.9.4",
|
38
|
+
"socksio>=1.0.0",
|
38
39
|
"typer>=0.15.2",
|
39
40
|
]
|
40
41
|
[project.urls]
|
@@ -51,11 +52,12 @@ resolution = "highest"
|
|
51
52
|
[dependency-groups]
|
52
53
|
dev = ["bump2version>=1.0.1", "pytest>=8.3.5", "ruff>=0.11.2"]
|
53
54
|
|
55
|
+
[tool.isort]
|
56
|
+
profile = "black"
|
57
|
+
|
54
58
|
[tool.ruff]
|
55
59
|
line-length = 120
|
56
|
-
|
57
|
-
ignore = ["E501"]
|
58
|
-
|
60
|
+
fix = true
|
59
61
|
|
60
62
|
[build-system]
|
61
63
|
requires = ["hatchling>=1.18.0"]
|
@@ -5,10 +5,10 @@ import subprocess
|
|
5
5
|
from os import getenv
|
6
6
|
from os.path import basename, pathsep
|
7
7
|
from pathlib import Path
|
8
|
-
from typing import Annotated, Optional
|
8
|
+
from typing import Annotated, Optional, Union
|
9
9
|
|
10
|
+
import httpx
|
10
11
|
import jmespath
|
11
|
-
import requests
|
12
12
|
import typer
|
13
13
|
from distro import name as distro_name
|
14
14
|
from prompt_toolkit import PromptSession
|
@@ -53,23 +53,7 @@ DEFAULT_CONFIG_MAP = {
|
|
53
53
|
"STREAM": {"value": "true", "env_key": "AI_STREAM"},
|
54
54
|
}
|
55
55
|
|
56
|
-
|
57
|
-
name="yaicli",
|
58
|
-
context_settings={"help_option_names": ["-h", "--help"]},
|
59
|
-
pretty_exceptions_enable=False,
|
60
|
-
)
|
61
|
-
|
62
|
-
|
63
|
-
class CasePreservingConfigParser(configparser.RawConfigParser):
|
64
|
-
"""Case preserving config parser"""
|
65
|
-
|
66
|
-
def optionxform(self, optionstr):
|
67
|
-
return optionstr
|
68
|
-
|
69
|
-
|
70
|
-
class CLI:
|
71
|
-
CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
|
72
|
-
DEFAULT_CONFIG_INI = """[core]
|
56
|
+
DEFAULT_CONFIG_INI = """[core]
|
73
57
|
PROVIDER=openai
|
74
58
|
BASE_URL=https://api.openai.com/v1
|
75
59
|
API_KEY=
|
@@ -86,7 +70,28 @@ ANSWER_PATH=choices[0].message.content
|
|
86
70
|
|
87
71
|
# true: streaming response
|
88
72
|
# false: non-streaming response
|
89
|
-
STREAM=true
|
73
|
+
STREAM=true
|
74
|
+
|
75
|
+
TEMPERATURE=0.7
|
76
|
+
TOP_P=1.0
|
77
|
+
MAX_TOKENS=1024"""
|
78
|
+
|
79
|
+
app = typer.Typer(
|
80
|
+
name="yaicli",
|
81
|
+
context_settings={"help_option_names": ["-h", "--help"]},
|
82
|
+
pretty_exceptions_enable=False,
|
83
|
+
)
|
84
|
+
|
85
|
+
|
86
|
+
class CasePreservingConfigParser(configparser.RawConfigParser):
|
87
|
+
"""Case preserving config parser"""
|
88
|
+
|
89
|
+
def optionxform(self, optionstr):
|
90
|
+
return optionstr
|
91
|
+
|
92
|
+
|
93
|
+
class CLI:
|
94
|
+
CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
|
90
95
|
|
91
96
|
def __init__(self, verbose: bool = False) -> None:
|
92
97
|
self.verbose = verbose
|
@@ -122,7 +127,7 @@ STREAM=true"""
|
|
122
127
|
self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
|
123
128
|
self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
124
129
|
with open(self.CONFIG_PATH, "w") as f:
|
125
|
-
f.write(
|
130
|
+
f.write(DEFAULT_CONFIG_INI)
|
126
131
|
else:
|
127
132
|
# Load from configuration file (middle priority)
|
128
133
|
config_parser = CasePreservingConfigParser()
|
@@ -179,10 +184,11 @@ STREAM=true"""
|
|
179
184
|
example:
|
180
185
|
```bash\nls -la\n``` ==> ls -al
|
181
186
|
```zsh\nls -la\n``` ==> ls -al
|
182
|
-
```ls -
|
183
|
-
ls -
|
187
|
+
```ls -la``` ==> ls -la
|
188
|
+
ls -la ==> ls -la
|
184
189
|
```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
|
185
190
|
```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
|
191
|
+
```plaintext\nls -la\n``` ==> ls -la
|
186
192
|
"""
|
187
193
|
if not command or not command.strip():
|
188
194
|
return ""
|
@@ -210,25 +216,36 @@ STREAM=true"""
|
|
210
216
|
# Join the remaining lines and strip any extra whitespace
|
211
217
|
return "\n".join(line.strip() for line in content_lines if line.strip())
|
212
218
|
|
213
|
-
def
|
219
|
+
def _get_type_number(self, key, _type: type, default=None):
|
220
|
+
"""Get number with type from config"""
|
221
|
+
try:
|
222
|
+
return _type(self.config.get(key, default))
|
223
|
+
except ValueError:
|
224
|
+
raise ValueError(f"[red]{key} should be {_type} type.[/red]")
|
225
|
+
|
226
|
+
def post(self, message: list[dict[str, str]]) -> httpx.Response:
|
214
227
|
"""Post message to LLM API and return response"""
|
215
228
|
url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
|
216
229
|
body = {
|
217
230
|
"messages": message,
|
218
231
|
"model": self.config.get("MODEL", "gpt-4o"),
|
219
232
|
"stream": self.config.get("STREAM", "true") == "true",
|
220
|
-
"temperature": 0.7,
|
221
|
-
"top_p": 1,
|
233
|
+
"temperature": self._get_type_number(key="TEMPERATURE", _type=float, default="0.7"),
|
234
|
+
"top_p": self._get_type_number(key="TOP_P", _type=float, default="1.0"),
|
235
|
+
"max_tokens": self._get_type_number(key="MAX_TOKENS", _type=int, default="1024"),
|
222
236
|
}
|
223
|
-
|
237
|
+
with httpx.Client(timeout=120.0) as client:
|
238
|
+
response = client.post(
|
239
|
+
url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"}
|
240
|
+
)
|
224
241
|
try:
|
225
242
|
response.raise_for_status()
|
226
|
-
except
|
243
|
+
except httpx.HTTPStatusError as e:
|
227
244
|
self.console.print(f"[red]Error calling API: {e}[/red]")
|
228
245
|
if self.verbose:
|
229
|
-
self.console.print(f"Reason: {e
|
246
|
+
self.console.print(f"Reason: {e}")
|
230
247
|
self.console.print(f"Response: {response.text}")
|
231
|
-
raise
|
248
|
+
raise e
|
232
249
|
return response
|
233
250
|
|
234
251
|
def get_reasoning_content(self, delta: dict) -> Optional[str]:
|
@@ -239,60 +256,83 @@ STREAM=true"""
|
|
239
256
|
return delta[k]
|
240
257
|
return None
|
241
258
|
|
242
|
-
def
|
259
|
+
def _parse_stream_line(self, line: Union[bytes, str]) -> Optional[dict]:
|
260
|
+
"""Parse a single line from the stream response"""
|
261
|
+
if not line:
|
262
|
+
return None
|
263
|
+
|
264
|
+
if isinstance(line, bytes):
|
265
|
+
line = line.decode("utf-8")
|
266
|
+
if not line.startswith("data: "):
|
267
|
+
return None
|
268
|
+
|
269
|
+
line = line[6:]
|
270
|
+
if line == "[DONE]":
|
271
|
+
return None
|
272
|
+
|
273
|
+
try:
|
274
|
+
json_data = json.loads(line)
|
275
|
+
if not json_data.get("choices"):
|
276
|
+
return None
|
277
|
+
|
278
|
+
return json_data
|
279
|
+
except json.JSONDecodeError:
|
280
|
+
self.console.print("[red]Error decoding response JSON[/red]")
|
281
|
+
if self.verbose:
|
282
|
+
self.console.print(f"[red]Error JSON data: {line}[/red]")
|
283
|
+
return None
|
284
|
+
|
285
|
+
def _process_reasoning_content(self, reason: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
|
286
|
+
"""Process reasoning content in the response"""
|
287
|
+
if not in_reasoning:
|
288
|
+
in_reasoning = True
|
289
|
+
full_completion = "> Reasoning:\n> "
|
290
|
+
full_completion += reason.replace("\n", "\n> ")
|
291
|
+
return full_completion, in_reasoning
|
292
|
+
|
293
|
+
def _process_regular_content(self, content: str, full_completion: str, in_reasoning: bool) -> tuple[str, bool]:
|
294
|
+
"""Process regular content in the response"""
|
295
|
+
if in_reasoning:
|
296
|
+
in_reasoning = False
|
297
|
+
full_completion += "\n\n"
|
298
|
+
full_completion += content
|
299
|
+
return full_completion, in_reasoning
|
300
|
+
|
301
|
+
def _print_stream(self, response: httpx.Response) -> str:
|
243
302
|
"""Print response from LLM in streaming mode"""
|
244
303
|
full_completion = ""
|
245
304
|
in_reasoning = False
|
246
305
|
|
247
306
|
with Live() as live:
|
248
307
|
for line in response.iter_lines():
|
249
|
-
|
308
|
+
json_data = self._parse_stream_line(line)
|
309
|
+
if not json_data:
|
250
310
|
continue
|
251
311
|
|
252
|
-
|
253
|
-
|
254
|
-
continue
|
312
|
+
delta = json_data["choices"][0]["delta"]
|
313
|
+
reason = self.get_reasoning_content(delta)
|
255
314
|
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
if reason is not None:
|
269
|
-
# reasoning started
|
270
|
-
if not in_reasoning:
|
271
|
-
in_reasoning = True
|
272
|
-
full_completion = "> Reasoning:\n> "
|
273
|
-
full_completion += reason.replace("\n", "\n> ")
|
274
|
-
else:
|
275
|
-
# reasoning stoped
|
276
|
-
if in_reasoning:
|
277
|
-
in_reasoning = False
|
278
|
-
full_completion += "\n\n"
|
279
|
-
content = delta.get("content", "") or ""
|
280
|
-
full_completion += content
|
281
|
-
live.update(Markdown(markup=full_completion), refresh=True)
|
282
|
-
except json.JSONDecodeError:
|
283
|
-
self.console.print("[red]Error decoding response JSON[/red]")
|
284
|
-
if self.verbose:
|
285
|
-
self.console.print(f"[red]Error: {data}[/red]")
|
315
|
+
if reason is not None:
|
316
|
+
full_completion, in_reasoning = self._process_reasoning_content(
|
317
|
+
reason, full_completion, in_reasoning
|
318
|
+
)
|
319
|
+
else:
|
320
|
+
content = delta.get("content", "") or ""
|
321
|
+
full_completion, in_reasoning = self._process_regular_content(
|
322
|
+
content, full_completion, in_reasoning
|
323
|
+
)
|
324
|
+
|
325
|
+
live.update(Markdown(markup=full_completion), refresh=True)
|
286
326
|
|
287
327
|
return full_completion
|
288
328
|
|
289
|
-
def _print_non_stream(self, response:
|
329
|
+
def _print_non_stream(self, response: httpx.Response) -> str:
|
290
330
|
"""Print response from LLM in non-streaming mode"""
|
291
331
|
full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
|
292
332
|
self.console.print(Markdown(full_completion))
|
293
333
|
return full_completion
|
294
334
|
|
295
|
-
def _print(self, response:
|
335
|
+
def _print(self, response: httpx.Response, stream: bool = True) -> str:
|
296
336
|
"""Print response from LLM and return full completion"""
|
297
337
|
if stream:
|
298
338
|
# Streaming response
|
@@ -364,12 +404,21 @@ STREAM=true"""
|
|
364
404
|
message.append({"role": "user", "content": user_input})
|
365
405
|
|
366
406
|
# Get response from LLM
|
367
|
-
|
407
|
+
try:
|
408
|
+
response = self.post(message)
|
409
|
+
except ValueError as e:
|
410
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
411
|
+
return
|
412
|
+
except httpx.ConnectError as e:
|
413
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
414
|
+
continue
|
415
|
+
except httpx.HTTPStatusError:
|
416
|
+
continue
|
368
417
|
self.console.print("\n[bold green]Assistant:[/bold green]")
|
369
418
|
try:
|
370
419
|
content = self._print(response, stream=self.config["STREAM"] == "true")
|
371
420
|
except Exception as e:
|
372
|
-
self.console.print(f"[red]Error: {e}[/red]")
|
421
|
+
self.console.print(f"[red]Unknown Error: {e}[/red]")
|
373
422
|
continue
|
374
423
|
|
375
424
|
# Add user input and assistant response to history
|
@@ -406,7 +455,14 @@ STREAM=true"""
|
|
406
455
|
]
|
407
456
|
|
408
457
|
# Get response from LLM
|
409
|
-
|
458
|
+
try:
|
459
|
+
response = self.post(message)
|
460
|
+
except (ValueError, httpx.ConnectError, httpx.HTTPStatusError) as e:
|
461
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
462
|
+
return
|
463
|
+
except Exception as e:
|
464
|
+
self.console.print(f"[red]Unknown Error: {e}[/red]")
|
465
|
+
return
|
410
466
|
self.console.print("\n[bold green]Assistant:[/bold green]")
|
411
467
|
content = self._print(response, stream=self.config["STREAM"] == "true")
|
412
468
|
|
@@ -422,7 +478,6 @@ STREAM=true"""
|
|
422
478
|
if returncode != 0:
|
423
479
|
self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
|
424
480
|
|
425
|
-
|
426
481
|
def run(self, chat: bool, shell: bool, prompt: str) -> None:
|
427
482
|
"""Run the CLI"""
|
428
483
|
self.load_config()
|
File without changes
|
File without changes
|