yaicli 0.0.16__tar.gz → 0.0.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: yaicli
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.18
|
4
4
|
Summary: A simple CLI tool to interact with LLM
|
5
5
|
Project-URL: Homepage, https://github.com/belingud/yaicli
|
6
6
|
Project-URL: Repository, https://github.com/belingud/yaicli
|
@@ -238,32 +238,28 @@ Support regular and deep thinking models.
|
|
238
238
|
|
239
239
|
## Features
|
240
240
|
|
241
|
-
- **
|
242
|
-
-
|
243
|
-
-
|
244
|
-
-
|
241
|
+
- **Smart Interaction Modes**:
|
242
|
+
- 💬 Chat Mode: Persistent dialogue with context tracking
|
243
|
+
- 🚀 Execute Mode: Generate & verify OS-specific commands (Windows/macOS/Linux)
|
244
|
+
- ⚡ Quick Query: Single-shot responses without entering REPL
|
245
245
|
|
246
|
-
- **
|
247
|
-
-
|
248
|
-
-
|
246
|
+
- **Environment Intelligence**:
|
247
|
+
- Auto-detects shell type (CMD/PowerShell/bash/zsh)
|
248
|
+
- Dynamic command validation with 3-step confirmation
|
249
|
+
- Pipe input support (`cat log.txt | ai "analyze errors"`)
|
249
250
|
|
250
|
-
- **
|
251
|
-
-
|
252
|
-
-
|
253
|
-
-
|
251
|
+
- **Enterprise LLM Support**:
|
252
|
+
- OpenAI API compatible endpoints
|
253
|
+
- Claude/Gemini/Cohere integration guides
|
254
|
+
- Custom JSON parsing with jmespath
|
254
255
|
|
255
|
-
- **
|
256
|
-
-
|
257
|
-
-
|
258
|
-
- Adjustable response parameters
|
256
|
+
- **Terminal Experience**:
|
257
|
+
- Real-time streaming with cursor animation
|
258
|
+
- LRU history management (500 entries default)
|
259
259
|
|
260
|
-
- **
|
261
|
-
-
|
262
|
-
-
|
263
|
-
- `Ctrl+R` to search history
|
264
|
-
|
265
|
-
- **History**:
|
266
|
-
- Save and recall previous queries
|
260
|
+
- **DevOps Ready**:
|
261
|
+
- Layered configuration (Env > File > Defaults)
|
262
|
+
- Verbose debug mode with API tracing
|
267
263
|
|
268
264
|
## Installation
|
269
265
|
|
@@ -345,6 +341,7 @@ Below are the available configuration options and override environment variables
|
|
345
341
|
- **TOP_P**: Top-p sampling for response generation (default: 1.0), env: YAI_TOP_P
|
346
342
|
- **MAX_TOKENS**: Maximum number of tokens for response generation (default: 1024), env: YAI_MAX_TOKENS
|
347
343
|
- **MAX_HISTORY**: Max history size, default: 500, env: YAI_MAX_HISTORY
|
344
|
+
- **AUTO_SUGGEST**: Auto suggest from history, default: true, env: YAI_AUTO_SUGGEST
|
348
345
|
|
349
346
|
Default config of `COMPLETION_PATH` and `ANSWER_PATH` is OpenAI compatible. If you are using OpenAI or other OpenAI compatible LLM provider, you can use the default config.
|
350
347
|
|
@@ -14,32 +14,28 @@ Support regular and deep thinking models.
|
|
14
14
|
|
15
15
|
## Features
|
16
16
|
|
17
|
-
- **
|
18
|
-
-
|
19
|
-
-
|
20
|
-
-
|
17
|
+
- **Smart Interaction Modes**:
|
18
|
+
- 💬 Chat Mode: Persistent dialogue with context tracking
|
19
|
+
- 🚀 Execute Mode: Generate & verify OS-specific commands (Windows/macOS/Linux)
|
20
|
+
- ⚡ Quick Query: Single-shot responses without entering REPL
|
21
21
|
|
22
|
-
- **
|
23
|
-
-
|
24
|
-
-
|
22
|
+
- **Environment Intelligence**:
|
23
|
+
- Auto-detects shell type (CMD/PowerShell/bash/zsh)
|
24
|
+
- Dynamic command validation with 3-step confirmation
|
25
|
+
- Pipe input support (`cat log.txt | ai "analyze errors"`)
|
25
26
|
|
26
|
-
- **
|
27
|
-
-
|
28
|
-
-
|
29
|
-
-
|
27
|
+
- **Enterprise LLM Support**:
|
28
|
+
- OpenAI API compatible endpoints
|
29
|
+
- Claude/Gemini/Cohere integration guides
|
30
|
+
- Custom JSON parsing with jmespath
|
30
31
|
|
31
|
-
- **
|
32
|
-
-
|
33
|
-
-
|
34
|
-
- Adjustable response parameters
|
32
|
+
- **Terminal Experience**:
|
33
|
+
- Real-time streaming with cursor animation
|
34
|
+
- LRU history management (500 entries default)
|
35
35
|
|
36
|
-
- **
|
37
|
-
-
|
38
|
-
-
|
39
|
-
- `Ctrl+R` to search history
|
40
|
-
|
41
|
-
- **History**:
|
42
|
-
- Save and recall previous queries
|
36
|
+
- **DevOps Ready**:
|
37
|
+
- Layered configuration (Env > File > Defaults)
|
38
|
+
- Verbose debug mode with API tracing
|
43
39
|
|
44
40
|
## Installation
|
45
41
|
|
@@ -121,6 +117,7 @@ Below are the available configuration options and override environment variables
|
|
121
117
|
- **TOP_P**: Top-p sampling for response generation (default: 1.0), env: YAI_TOP_P
|
122
118
|
- **MAX_TOKENS**: Maximum number of tokens for response generation (default: 1024), env: YAI_MAX_TOKENS
|
123
119
|
- **MAX_HISTORY**: Max history size, default: 500, env: YAI_MAX_HISTORY
|
120
|
+
- **AUTO_SUGGEST**: Auto suggest from history, default: true, env: YAI_AUTO_SUGGEST
|
124
121
|
|
125
122
|
Default config of `COMPLETION_PATH` and `ANSWER_PATH` is OpenAI compatible. If you are using OpenAI or other OpenAI compatible LLM provider, you can use the default config.
|
126
123
|
|
@@ -7,14 +7,14 @@ import time
|
|
7
7
|
from os import getenv
|
8
8
|
from os.path import basename, exists, pathsep, devnull
|
9
9
|
from pathlib import Path
|
10
|
-
from typing import Annotated, Optional, Union
|
10
|
+
from typing import Annotated, Any, Dict, Optional, Union
|
11
11
|
|
12
12
|
import httpx
|
13
13
|
import jmespath
|
14
14
|
import typer
|
15
15
|
from distro import name as distro_name
|
16
16
|
from prompt_toolkit import PromptSession, prompt
|
17
|
-
from prompt_toolkit.
|
17
|
+
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
18
18
|
from prompt_toolkit.history import FileHistory, _StrOrBytesPath
|
19
19
|
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
|
20
20
|
from prompt_toolkit.keys import Keys
|
@@ -62,6 +62,7 @@ DEFAULT_CONFIG_MAP = {
|
|
62
62
|
"TOP_P": {"value": "1.0", "env_key": "YAI_TOP_P"},
|
63
63
|
"MAX_TOKENS": {"value": "1024", "env_key": "YAI_MAX_TOKENS"},
|
64
64
|
"MAX_HISTORY": {"value": "500", "env_key": "YAI_MAX_HISTORY"},
|
65
|
+
"AUTO_SUGGEST": {"value": "true", "env_key": "YAI_AUTO_SUGGEST"},
|
65
66
|
}
|
66
67
|
|
67
68
|
DEFAULT_CONFIG_INI = """[core]
|
@@ -88,7 +89,8 @@ TEMPERATURE=0.7
|
|
88
89
|
TOP_P=1.0
|
89
90
|
MAX_TOKENS=1024
|
90
91
|
|
91
|
-
MAX_HISTORY=500
|
92
|
+
MAX_HISTORY=500
|
93
|
+
AUTO_SUGGEST=true"""
|
92
94
|
|
93
95
|
app = typer.Typer(
|
94
96
|
name="yaicli",
|
@@ -105,7 +107,7 @@ class CasePreservingConfigParser(configparser.RawConfigParser):
|
|
105
107
|
|
106
108
|
|
107
109
|
class LimitedFileHistory(FileHistory):
|
108
|
-
def __init__(self, filename: _StrOrBytesPath, max_entries: int = 500, trim_every: int =
|
110
|
+
def __init__(self, filename: _StrOrBytesPath, max_entries: int = 500, trim_every: int = 2):
|
109
111
|
"""Limited file history
|
110
112
|
Args:
|
111
113
|
filename (str): path to history file
|
@@ -171,7 +173,7 @@ class CLI:
|
|
171
173
|
self.bindings = KeyBindings()
|
172
174
|
# Disable nonatty warning
|
173
175
|
_origin_stderr = None
|
174
|
-
if not sys.
|
176
|
+
if not sys.stdin.isatty():
|
175
177
|
_origin_stderr = sys.stderr
|
176
178
|
sys.stderr = open(devnull, "w")
|
177
179
|
self.session = PromptSession(key_bindings=self.bindings)
|
@@ -184,10 +186,6 @@ class CLI:
|
|
184
186
|
self.max_history_length = 25
|
185
187
|
self.current_mode = TEMP_MODE
|
186
188
|
|
187
|
-
def is_stream(self) -> bool:
|
188
|
-
"""Check if streaming is enabled"""
|
189
|
-
return self.config["STREAM"] == "true"
|
190
|
-
|
191
189
|
def prepare_chat_loop(self) -> None:
|
192
190
|
"""Setup key bindings and history for chat mode"""
|
193
191
|
self._setup_key_bindings()
|
@@ -195,11 +193,12 @@ class CLI:
|
|
195
193
|
Path("~/.yaicli_history").expanduser().touch(exist_ok=True)
|
196
194
|
self.session = PromptSession(
|
197
195
|
key_bindings=self.bindings,
|
198
|
-
completer=WordCompleter(["/clear", "/exit", "/his"]),
|
196
|
+
# completer=WordCompleter(["/clear", "/exit", "/his"]),
|
199
197
|
complete_while_typing=True,
|
200
198
|
history=LimitedFileHistory(
|
201
199
|
Path("~/.yaicli_history").expanduser(), max_entries=int(self.config["MAX_HISTORY"])
|
202
200
|
),
|
201
|
+
auto_suggest=AutoSuggestFromHistory() if self.config["AUTO_SUGGEST"] else None,
|
203
202
|
enable_history_search=True,
|
204
203
|
)
|
205
204
|
|
@@ -219,8 +218,9 @@ class CLI:
|
|
219
218
|
Returns:
|
220
219
|
dict: merged configuration
|
221
220
|
"""
|
221
|
+
boolean_keys = ["STREAM", "AUTO_SUGGEST"]
|
222
222
|
# Start with default configuration (lowest priority)
|
223
|
-
merged_config = {k: v["value"] for k, v in DEFAULT_CONFIG_MAP.items()}
|
223
|
+
merged_config: Dict[str, Any] = {k: v["value"] for k, v in DEFAULT_CONFIG_MAP.items()}
|
224
224
|
|
225
225
|
# Create default config file if it doesn't exist
|
226
226
|
if not self.CONFIG_PATH.exists():
|
@@ -241,8 +241,9 @@ class CLI:
|
|
241
241
|
env_value = getenv(config["env_key"])
|
242
242
|
if env_value is not None:
|
243
243
|
merged_config[key] = env_value
|
244
|
-
|
245
|
-
|
244
|
+
# Convert boolean values
|
245
|
+
if key in boolean_keys:
|
246
|
+
merged_config[key] = str(merged_config[key]).lower() == "true"
|
246
247
|
|
247
248
|
self.config = merged_config
|
248
249
|
return merged_config
|
@@ -329,14 +330,16 @@ class CLI:
|
|
329
330
|
body = {
|
330
331
|
"messages": message,
|
331
332
|
"model": self.config.get("MODEL", "gpt-4o"),
|
332
|
-
"stream": self.
|
333
|
+
"stream": self.config["STREAM"],
|
333
334
|
"temperature": self._get_number_with_type(key="TEMPERATURE", _type=float, default="0.7"),
|
334
335
|
"top_p": self._get_number_with_type(key="TOP_P", _type=float, default="1.0"),
|
335
336
|
"max_tokens": self._get_number_with_type(key="MAX_TOKENS", _type=int, default="1024"),
|
336
337
|
}
|
337
338
|
with httpx.Client(timeout=120.0) as client:
|
338
339
|
response = client.post(
|
339
|
-
url,
|
340
|
+
url,
|
341
|
+
json=body,
|
342
|
+
headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"},
|
340
343
|
)
|
341
344
|
try:
|
342
345
|
response.raise_for_status()
|
@@ -421,7 +424,10 @@ class CLI:
|
|
421
424
|
)
|
422
425
|
|
423
426
|
cursor = cursor_chars[cursor_index]
|
424
|
-
live.update(
|
427
|
+
live.update(
|
428
|
+
Markdown(markup=full_content + cursor, code_theme=self.config["CODE_THEME"]),
|
429
|
+
refresh=True,
|
430
|
+
)
|
425
431
|
cursor_index = (cursor_index + 1) % 2
|
426
432
|
time.sleep(0.005) # Slow down the printing speed, avoiding screen flickering
|
427
433
|
live.update(Markdown(markup=full_content, code_theme=self.config["CODE_THEME"]), refresh=True)
|
@@ -488,7 +494,7 @@ class CLI:
|
|
488
494
|
|
489
495
|
def _handle_llm_response(self, response: httpx.Response, user_input: str) -> str:
|
490
496
|
"""Print LLM response and update history"""
|
491
|
-
content = self._print_stream(response) if self.
|
497
|
+
content = self._print_stream(response) if self.config["STREAM"] else self._print_normal(response)
|
492
498
|
self.history.extend([{"role": "user", "content": user_input}, {"role": "assistant", "content": content}])
|
493
499
|
self._check_history_len()
|
494
500
|
return content
|
@@ -564,17 +570,17 @@ class CLI:
|
|
564
570
|
"""Run the CLI"""
|
565
571
|
self.load_config()
|
566
572
|
if self.verbose:
|
567
|
-
self.console.print(f"CODE_THEME:
|
568
|
-
self.console.print(f"ANSWER_PATH:
|
573
|
+
self.console.print(f"CODE_THEME: {self.config['CODE_THEME']}")
|
574
|
+
self.console.print(f"ANSWER_PATH: {self.config['ANSWER_PATH']}")
|
569
575
|
self.console.print(f"COMPLETION_PATH: {self.config['COMPLETION_PATH']}")
|
570
|
-
self.console.print(f"BASE_URL:
|
571
|
-
self.console.print(f"MODEL:
|
572
|
-
self.console.print(f"SHELL_NAME:
|
573
|
-
self.console.print(f"OS_NAME:
|
574
|
-
self.console.print(f"STREAM:
|
575
|
-
self.console.print(f"TEMPERATURE:
|
576
|
-
self.console.print(f"TOP_P:
|
577
|
-
self.console.print(f"MAX_TOKENS:
|
576
|
+
self.console.print(f"BASE_URL: {self.config['BASE_URL']}")
|
577
|
+
self.console.print(f"MODEL: {self.config['MODEL']}")
|
578
|
+
self.console.print(f"SHELL_NAME: {self.config['SHELL_NAME']}")
|
579
|
+
self.console.print(f"OS_NAME: {self.config['OS_NAME']}")
|
580
|
+
self.console.print(f"STREAM: {self.config['STREAM']}")
|
581
|
+
self.console.print(f"TEMPERATURE: {self.config['TEMPERATURE']}")
|
582
|
+
self.console.print(f"TOP_P: {self.config['TOP_P']}")
|
583
|
+
self.console.print(f"MAX_TOKENS: {self.config['MAX_TOKENS']}")
|
578
584
|
if not self.config.get("API_KEY"):
|
579
585
|
self.console.print(
|
580
586
|
"[yellow]API key not set. Please set in ~/.config/yaicli/config.ini or AI_API_KEY env[/]"
|
@@ -596,10 +602,17 @@ def main(
|
|
596
602
|
bool, typer.Option("--chat", "-c", help="Start in chat mode", rich_help_panel="Run Options")
|
597
603
|
] = False,
|
598
604
|
shell: Annotated[
|
599
|
-
bool,
|
605
|
+
bool,
|
606
|
+
typer.Option(
|
607
|
+
"--shell",
|
608
|
+
"-s",
|
609
|
+
help="Generate and execute shell command",
|
610
|
+
rich_help_panel="Run Options",
|
611
|
+
),
|
600
612
|
] = False,
|
601
613
|
verbose: Annotated[
|
602
|
-
bool,
|
614
|
+
bool,
|
615
|
+
typer.Option("--verbose", "-V", help="Show verbose information", rich_help_panel="Run Options"),
|
603
616
|
] = False,
|
604
617
|
template: Annotated[bool, typer.Option("--template", help="Show the config template.")] = False,
|
605
618
|
):
|
File without changes
|
File without changes
|