yaicli 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyproject.toml +66 -0
- yaicli-0.0.8.dist-info/METADATA +459 -0
- yaicli-0.0.8.dist-info/RECORD +7 -0
- yaicli.py +264 -472
- yaicli-0.0.6.dist-info/METADATA +0 -240
- yaicli-0.0.6.dist-info/RECORD +0 -6
- {yaicli-0.0.6.dist-info → yaicli-0.0.8.dist-info}/WHEEL +0 -0
- {yaicli-0.0.6.dist-info → yaicli-0.0.8.dist-info}/entry_points.txt +0 -0
- {yaicli-0.0.6.dist-info → yaicli-0.0.8.dist-info}/licenses/LICENSE +0 -0
yaicli.py
CHANGED
@@ -3,7 +3,6 @@ import json
|
|
3
3
|
import platform
|
4
4
|
import subprocess
|
5
5
|
import time
|
6
|
-
from enum import StrEnum
|
7
6
|
from os import getenv
|
8
7
|
from os.path import basename, pathsep
|
9
8
|
from pathlib import Path
|
@@ -21,56 +20,58 @@ from rich.live import Live
|
|
21
20
|
from rich.markdown import Markdown
|
22
21
|
from rich.prompt import Confirm
|
23
22
|
|
23
|
+
SHELL_PROMPT = """Your are a Shell Command Generator.
|
24
|
+
Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
|
25
|
+
Rules:
|
26
|
+
1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
|
27
|
+
2. Output STRICTLY in plain text format
|
28
|
+
3. NEVER use markdown, code blocks or explanations
|
29
|
+
4. Chain multi-step commands in SINGLE LINE
|
30
|
+
5. Return NOTHING except the ready-to-run command"""
|
24
31
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
32
|
+
DEFAULT_PROMPT = (
|
33
|
+
"You are yaili, a system management and programing assistant, "
|
34
|
+
"You are managing {_os} operating system with {_shell} shell. "
|
35
|
+
"Your responses should be concise and use Markdown format, "
|
36
|
+
"unless the user explicitly requests more details."
|
37
|
+
)
|
30
38
|
|
31
|
-
|
32
|
-
|
33
|
-
|
39
|
+
CMD_CLEAR = "/clear"
|
40
|
+
CMD_EXIT = "/exit"
|
41
|
+
|
42
|
+
EXEC_MODE = "exec"
|
43
|
+
CHAT_MODE = "chat"
|
44
|
+
TEMP_MODE = "temp"
|
45
|
+
|
46
|
+
DEFAULT_CONFIG_MAP = {
|
47
|
+
"BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "AI_BASE_URL"},
|
48
|
+
"API_KEY": {"value": "", "env_key": "AI_API_KEY"},
|
49
|
+
"MODEL": {"value": "gpt-4o", "env_key": "AI_MODEL"},
|
50
|
+
"SHELL_NAME": {"value": "auto", "env_key": "AI_SHELL_NAME"},
|
51
|
+
"OS_NAME": {"value": "auto", "env_key": "AI_OS_NAME"},
|
52
|
+
"COMPLETION_PATH": {"value": "chat/completions", "env_key": "AI_COMPLETION_PATH"},
|
53
|
+
"ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "AI_ANSWER_PATH"},
|
54
|
+
"STREAM": {"value": "true", "env_key": "AI_STREAM"},
|
55
|
+
}
|
34
56
|
|
57
|
+
app = typer.Typer(
|
58
|
+
name="yaicli",
|
59
|
+
context_settings={"help_option_names": ["-h", "--help"]},
|
60
|
+
pretty_exceptions_enable=False,
|
61
|
+
)
|
35
62
|
|
36
|
-
# Default configuration values (lowest priority)
|
37
|
-
DEFAULT_CONFIG = {
|
38
|
-
"BASE_URL": "https://api.openai.com/v1",
|
39
|
-
"API_KEY": "",
|
40
|
-
"MODEL": "gpt-4o",
|
41
|
-
"SHELL_NAME": "auto",
|
42
|
-
"OS_NAME": "auto",
|
43
|
-
"COMPLETION_PATH": "chat/completions",
|
44
|
-
"ANSWER_PATH": "choices[0].message.content",
|
45
|
-
"STREAM": "true",
|
46
|
-
}
|
47
63
|
|
48
|
-
|
49
|
-
|
50
|
-
"BASE_URL": "AI_BASE_URL",
|
51
|
-
"API_KEY": "AI_API_KEY",
|
52
|
-
"MODEL": "AI_MODEL",
|
53
|
-
"SHELL_NAME": "AI_SHELL_NAME",
|
54
|
-
"OS_NAME": "AI_OS_NAME",
|
55
|
-
"COMPLETION_PATH": "AI_COMPLETION_PATH",
|
56
|
-
"ANSWER_PATH": "AI_ANSWER_PATH",
|
57
|
-
"STREAM": "AI_STREAM",
|
58
|
-
}
|
64
|
+
class CasePreservingConfigParser(configparser.RawConfigParser):
|
65
|
+
"""Case preserving config parser"""
|
59
66
|
|
67
|
+
def optionxform(self, optionstr):
|
68
|
+
return optionstr
|
60
69
|
|
61
|
-
class YAICLI:
|
62
|
-
"""Main class for YAICLI
|
63
|
-
Chat mode: interactive chat mode
|
64
|
-
One-shot mode:
|
65
|
-
Temp: ask a question and get a response once
|
66
|
-
Execute: generate and execute shell commands
|
67
|
-
"""
|
68
70
|
|
69
|
-
|
71
|
+
class CLI:
|
70
72
|
CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
|
71
|
-
|
72
|
-
# Default configuration template
|
73
73
|
DEFAULT_CONFIG_INI = """[core]
|
74
|
+
PROVIDER=openai
|
74
75
|
BASE_URL=https://api.openai.com/v1
|
75
76
|
API_KEY=
|
76
77
|
MODEL=gpt-4o
|
@@ -88,109 +89,22 @@ ANSWER_PATH=choices[0].message.content
|
|
88
89
|
# false: non-streaming response
|
89
90
|
STREAM=true"""
|
90
91
|
|
91
|
-
def __init__(self, verbose: bool = False):
|
92
|
+
def __init__(self, verbose: bool = False) -> None:
|
92
93
|
self.verbose = verbose
|
93
94
|
self.console = Console()
|
94
95
|
self.bindings = KeyBindings()
|
95
96
|
self.session = PromptSession(key_bindings=self.bindings)
|
96
|
-
self.current_mode = ModeEnum.TEMP.value
|
97
97
|
self.config = {}
|
98
98
|
self.history = []
|
99
99
|
self.max_history_length = 25
|
100
|
+
self.current_mode = TEMP_MODE
|
100
101
|
|
101
|
-
|
102
|
-
self._setup_key_bindings()
|
103
|
-
|
104
|
-
def _setup_key_bindings(self):
|
102
|
+
def _setup_key_bindings(self) -> None:
|
105
103
|
"""Setup keyboard shortcuts"""
|
106
104
|
|
107
|
-
@self.bindings.add(Keys.ControlI) # Bind
|
108
|
-
def _(event: KeyPressEvent):
|
109
|
-
self.current_mode =
|
110
|
-
ModeEnum.CHAT.value
|
111
|
-
if self.current_mode == ModeEnum.EXECUTE.value
|
112
|
-
else ModeEnum.EXECUTE.value
|
113
|
-
)
|
114
|
-
|
115
|
-
def clear_history(self):
|
116
|
-
"""Clear chat history"""
|
117
|
-
self.history = []
|
118
|
-
|
119
|
-
def detect_os(self) -> str:
|
120
|
-
"""Detect operating system
|
121
|
-
Returns:
|
122
|
-
str: operating system name
|
123
|
-
Raises:
|
124
|
-
typer.Exit: if there is an error with the request
|
125
|
-
"""
|
126
|
-
if self.config.get("OS_NAME") != "auto":
|
127
|
-
return self.config["OS_NAME"]
|
128
|
-
current_platform = platform.system()
|
129
|
-
if current_platform == "Linux":
|
130
|
-
return "Linux/" + distro_name(pretty=True)
|
131
|
-
if current_platform == "Windows":
|
132
|
-
return "Windows " + platform.release()
|
133
|
-
if current_platform == "Darwin":
|
134
|
-
return "Darwin/MacOS " + platform.mac_ver()[0]
|
135
|
-
return current_platform
|
136
|
-
|
137
|
-
def detect_shell(self) -> str:
|
138
|
-
"""Detect shell
|
139
|
-
Returns:
|
140
|
-
str: shell name
|
141
|
-
Raises:
|
142
|
-
typer.Exit: if there is an error with the request
|
143
|
-
"""
|
144
|
-
if self.config["SHELL_NAME"] != "auto":
|
145
|
-
return self.config["SHELL_NAME"]
|
146
|
-
import platform
|
147
|
-
|
148
|
-
current_platform = platform.system()
|
149
|
-
if current_platform in ("Windows", "nt"):
|
150
|
-
is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
|
151
|
-
return "powershell.exe" if is_powershell else "cmd.exe"
|
152
|
-
return basename(getenv("SHELL", "/bin/sh"))
|
153
|
-
|
154
|
-
def build_cmd_prompt(self) -> str:
|
155
|
-
"""Build command prompt
|
156
|
-
Returns:
|
157
|
-
str: command prompt
|
158
|
-
Raises:
|
159
|
-
typer.Exit: if there is an error with the request
|
160
|
-
"""
|
161
|
-
_os = self.detect_os()
|
162
|
-
_shell = self.detect_shell()
|
163
|
-
return f"""Your are a Shell Command Generator.
|
164
|
-
Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
|
165
|
-
Rules:
|
166
|
-
1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
|
167
|
-
2. Output STRICTLY in plain text format
|
168
|
-
3. NEVER use markdown, code blocks or explanations
|
169
|
-
4. Chain multi-step commands in SINGLE LINE
|
170
|
-
5. Return NOTHING except the ready-to-run command"""
|
171
|
-
|
172
|
-
def build_default_prompt(self) -> str:
|
173
|
-
"""Build default prompt
|
174
|
-
Returns:
|
175
|
-
str: default prompt
|
176
|
-
Raises:
|
177
|
-
typer.Exit: if there is an error with the request
|
178
|
-
"""
|
179
|
-
_os = self.detect_os()
|
180
|
-
_shell = self.detect_shell()
|
181
|
-
return (
|
182
|
-
"You are yaili, a system management and programing assistant, "
|
183
|
-
f"You are managing {_os} operating system with {_shell} shell. "
|
184
|
-
"Your responses should be concise and use Markdown format, "
|
185
|
-
"unless the user explicitly requests more details."
|
186
|
-
)
|
187
|
-
|
188
|
-
def get_default_config(self) -> dict[str, str]:
|
189
|
-
"""Get default configuration
|
190
|
-
Returns:
|
191
|
-
dict: default configuration with lowest priority
|
192
|
-
"""
|
193
|
-
return DEFAULT_CONFIG.copy()
|
105
|
+
@self.bindings.add(Keys.ControlI) # Bind TAB to switch modes
|
106
|
+
def _(event: KeyPressEvent) -> None:
|
107
|
+
self.current_mode = EXEC_MODE if self.current_mode == CHAT_MODE else CHAT_MODE
|
194
108
|
|
195
109
|
def load_config(self) -> dict[str, str]:
|
196
110
|
"""Load LLM API configuration with priority:
|
@@ -202,250 +116,56 @@ Rules:
|
|
202
116
|
dict: merged configuration
|
203
117
|
"""
|
204
118
|
# Start with default configuration (lowest priority)
|
205
|
-
merged_config =
|
119
|
+
merged_config = {k: v["value"] for k, v in DEFAULT_CONFIG_MAP.items()}
|
206
120
|
|
207
|
-
#
|
121
|
+
# Create default config file if it doesn't exist
|
208
122
|
if not self.CONFIG_PATH.exists():
|
209
|
-
self.console.print(
|
210
|
-
"[bold yellow]Configuration file not found. Creating default configuration file.[/bold yellow]"
|
211
|
-
)
|
123
|
+
self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
|
212
124
|
self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
213
125
|
with open(self.CONFIG_PATH, "w") as f:
|
214
126
|
f.write(self.DEFAULT_CONFIG_INI)
|
215
127
|
else:
|
128
|
+
# Load from configuration file (middle priority)
|
216
129
|
config_parser = CasePreservingConfigParser()
|
217
130
|
config_parser.read(self.CONFIG_PATH)
|
218
131
|
if "core" in config_parser:
|
219
|
-
# Update with values from config file
|
220
|
-
for
|
221
|
-
if value.strip(): # Only use non-empty values from config file
|
222
|
-
merged_config[key] = value
|
132
|
+
# Update with non-empty values from config file
|
133
|
+
merged_config.update({k: v for k, v in config_parser["core"].items() if v.strip()})
|
223
134
|
|
224
135
|
# Override with environment variables (highest priority)
|
225
|
-
for
|
226
|
-
env_value = getenv(
|
227
|
-
if env_value is not None:
|
228
|
-
merged_config[
|
136
|
+
for key, config in DEFAULT_CONFIG_MAP.items():
|
137
|
+
env_value = getenv(config["env_key"])
|
138
|
+
if env_value is not None:
|
139
|
+
merged_config[key] = env_value
|
229
140
|
|
230
|
-
# Ensure STREAM is lowercase string
|
231
141
|
merged_config["STREAM"] = str(merged_config.get("STREAM", "true")).lower()
|
232
142
|
|
233
143
|
self.config = merged_config
|
234
|
-
return
|
235
|
-
|
236
|
-
def _call_api(self, url: str, headers: dict, data: dict) -> requests.Response:
|
237
|
-
"""Call the API and return the response.
|
238
|
-
Args:
|
239
|
-
url: API endpoint URL
|
240
|
-
headers: request headers
|
241
|
-
data: request data
|
242
|
-
Returns:
|
243
|
-
requests.Response: response object
|
244
|
-
Raises:
|
245
|
-
requests.exceptions.RequestException: if there is an error with the request
|
246
|
-
"""
|
247
|
-
response = requests.post(url, headers=headers, json=data)
|
248
|
-
response.raise_for_status() # Raise an exception for non-200 status codes
|
249
|
-
return response
|
250
|
-
|
251
|
-
def get_llm_url(self) -> str:
|
252
|
-
"""Get LLM API URL
|
253
|
-
Returns:
|
254
|
-
str: LLM API URL
|
255
|
-
Raises:
|
256
|
-
typer.Exit: if API key or base URL is not set
|
257
|
-
"""
|
258
|
-
base = self.config.get("BASE_URL", "").rstrip("/")
|
259
|
-
if not base:
|
260
|
-
self.console.print(
|
261
|
-
"[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
|
262
|
-
)
|
263
|
-
raise typer.Exit(code=1)
|
264
|
-
COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
|
265
|
-
if not COMPLETION_PATH:
|
266
|
-
self.console.print(
|
267
|
-
"[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
|
268
|
-
)
|
269
|
-
raise typer.Exit(code=1)
|
270
|
-
return f"{base}/{COMPLETION_PATH}"
|
271
|
-
|
272
|
-
def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
|
273
|
-
"""Build request data
|
274
|
-
Args:
|
275
|
-
prompt: user input
|
276
|
-
mode: chat or execute mode
|
277
|
-
Returns:
|
278
|
-
dict: request data
|
279
|
-
"""
|
280
|
-
if mode == ModeEnum.EXECUTE.value:
|
281
|
-
system_prompt = self.build_cmd_prompt()
|
282
|
-
else:
|
283
|
-
system_prompt = self.build_default_prompt()
|
284
|
-
|
285
|
-
# Build messages list, first add system prompt
|
286
|
-
messages = [{"role": "system", "content": system_prompt}]
|
287
|
-
|
288
|
-
# Add history records in chat mode
|
289
|
-
if mode == ModeEnum.CHAT.value and self.history:
|
290
|
-
messages.extend(self.history)
|
291
|
-
|
292
|
-
# Add current user message
|
293
|
-
messages.append({"role": "user", "content": prompt})
|
294
|
-
|
295
|
-
return {
|
296
|
-
"model": self.config["MODEL"],
|
297
|
-
"messages": messages,
|
298
|
-
"stream": self.config.get("STREAM", "true") == "true",
|
299
|
-
"temperature": 0.7,
|
300
|
-
"top_p": 0.7,
|
301
|
-
}
|
302
|
-
|
303
|
-
def stream_response(self, response: requests.Response) -> str:
|
304
|
-
"""Stream response from LLM API
|
305
|
-
Args:
|
306
|
-
response: requests.Response object
|
307
|
-
Returns:
|
308
|
-
str: full completion text
|
309
|
-
"""
|
310
|
-
full_completion = ""
|
311
|
-
# Streaming response loop
|
312
|
-
with Live(console=self.console) as live:
|
313
|
-
for line in response.iter_lines():
|
314
|
-
if not line:
|
315
|
-
continue
|
316
|
-
decoded_line = line.decode("utf-8")
|
317
|
-
if decoded_line.startswith("data: "):
|
318
|
-
decoded_line = decoded_line[6:]
|
319
|
-
if decoded_line == "[DONE]":
|
320
|
-
break
|
321
|
-
try:
|
322
|
-
json_data = json.loads(decoded_line)
|
323
|
-
content = json_data["choices"][0]["delta"].get("content", "")
|
324
|
-
full_completion += content
|
325
|
-
markdown = Markdown(markup=full_completion)
|
326
|
-
live.update(markdown, refresh=True)
|
327
|
-
except json.JSONDecodeError:
|
328
|
-
self.console.print("[red]Error decoding response JSON[/red]")
|
329
|
-
if self.verbose:
|
330
|
-
self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
|
331
|
-
time.sleep(0.05)
|
332
|
-
|
333
|
-
return full_completion
|
334
|
-
|
335
|
-
def call_llm_api(self, prompt: str) -> str:
|
336
|
-
"""Call LLM API, return streaming output
|
337
|
-
Args:
|
338
|
-
prompt: user input
|
339
|
-
Returns:
|
340
|
-
str: streaming output
|
341
|
-
"""
|
342
|
-
url = self.get_llm_url()
|
343
|
-
headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
|
344
|
-
data = self.build_data(prompt)
|
345
|
-
try:
|
346
|
-
response = self._call_api(url, headers, data)
|
347
|
-
except requests.exceptions.RequestException as e:
|
348
|
-
self.console.print(f"[red]Error calling API: {e}[/red]")
|
349
|
-
if self.verbose and e.response:
|
350
|
-
self.console.print(f"{e.response.text}")
|
351
|
-
raise typer.Exit(code=1) from None
|
352
|
-
if not response:
|
353
|
-
raise typer.Exit(code=1)
|
354
|
-
|
355
|
-
self.console.print("\n[bold green]Assistant:[/bold green]")
|
356
|
-
assistant_response = self.stream_response(
|
357
|
-
response
|
358
|
-
) # Stream the response and get the full text
|
359
|
-
self.console.print() # Add a newline after the completion
|
144
|
+
return merged_config
|
360
145
|
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
data["stream"] = False
|
374
|
-
try:
|
375
|
-
response = self._call_api(url, headers, data)
|
376
|
-
except requests.exceptions.RequestException as e:
|
377
|
-
self.console.print(f"[red]Error calling API: {e}[/red]")
|
378
|
-
return None
|
379
|
-
if not response:
|
380
|
-
return None
|
381
|
-
ANSWER_PATH = self.config.get("ANSWER_PATH", None)
|
382
|
-
if not ANSWER_PATH:
|
383
|
-
ANSWER_PATH = "choices[0].message.content"
|
384
|
-
if self.verbose:
|
385
|
-
self.console.print(
|
386
|
-
"[bold yellow]Answer path not set. Using default: `choices[0].message.content`[/bold yellow]"
|
387
|
-
)
|
388
|
-
content = jmespath.search(ANSWER_PATH, response.json())
|
389
|
-
return content.strip()
|
390
|
-
|
391
|
-
def execute_shell_command(self, command: str) -> int:
|
392
|
-
"""Execute shell command
|
393
|
-
Args:
|
394
|
-
command: shell command
|
395
|
-
Returns:
|
396
|
-
int: return code
|
397
|
-
"""
|
398
|
-
self.console.print(f"\n[bold green]Executing command: [/bold green] {command}\n")
|
399
|
-
result = subprocess.run(command, shell=True)
|
400
|
-
if result.returncode != 0:
|
401
|
-
self.console.print(
|
402
|
-
f"\n[bold red]Command failed with return code: {result.returncode}[/bold red]"
|
403
|
-
)
|
404
|
-
return result.returncode
|
405
|
-
|
406
|
-
def get_prompt_tokens(self):
|
407
|
-
"""Get prompt tokens based on current mode
|
408
|
-
Returns:
|
409
|
-
list: prompt tokens for prompt_toolkit
|
410
|
-
"""
|
411
|
-
if self.current_mode == ModeEnum.CHAT.value:
|
412
|
-
qmark = "💬"
|
413
|
-
elif self.current_mode == ModeEnum.EXECUTE.value:
|
414
|
-
qmark = "🚀"
|
415
|
-
else:
|
416
|
-
qmark = ""
|
417
|
-
return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
|
418
|
-
|
419
|
-
def chat_mode(self, user_input: str):
|
420
|
-
"""
|
421
|
-
This method handles the chat mode.
|
422
|
-
It adds the user input to the history and calls the API to get a response.
|
423
|
-
It then adds the response to the history and manages the history length.
|
424
|
-
Args:
|
425
|
-
user_input: user input
|
426
|
-
Returns:
|
427
|
-
ModeEnum: current mode
|
428
|
-
"""
|
429
|
-
if self.current_mode != ModeEnum.CHAT.value:
|
430
|
-
return self.current_mode
|
431
|
-
|
432
|
-
# Add user message to history
|
433
|
-
self.history.append({"role": "user", "content": user_input})
|
434
|
-
|
435
|
-
# Call API and get response
|
436
|
-
assistant_response = self.call_llm_api(user_input)
|
437
|
-
|
438
|
-
# Add assistant response to history
|
439
|
-
if assistant_response:
|
440
|
-
self.history.append({"role": "assistant", "content": assistant_response})
|
146
|
+
def detect_os(self) -> str:
|
147
|
+
"""Detect operating system + version"""
|
148
|
+
if self.config.get("OS_NAME") != "auto":
|
149
|
+
return self.config["OS_NAME"]
|
150
|
+
current_platform = platform.system()
|
151
|
+
if current_platform == "Linux":
|
152
|
+
return "Linux/" + distro_name(pretty=True)
|
153
|
+
if current_platform == "Windows":
|
154
|
+
return "Windows " + platform.release()
|
155
|
+
if current_platform == "Darwin":
|
156
|
+
return "Darwin/MacOS " + platform.mac_ver()[0]
|
157
|
+
return current_platform
|
441
158
|
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
self.history = self.history[-self.max_history_length * 2 :]
|
159
|
+
def detect_shell(self) -> str:
|
160
|
+
"""Detect shell name"""
|
161
|
+
if self.config["SHELL_NAME"] != "auto":
|
162
|
+
return self.config["SHELL_NAME"]
|
447
163
|
|
448
|
-
|
164
|
+
current_platform = platform.system()
|
165
|
+
if current_platform in ("Windows", "nt"):
|
166
|
+
is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
|
167
|
+
return "powershell.exe" if is_powershell else "cmd.exe"
|
168
|
+
return basename(getenv("SHELL", "/bin/sh"))
|
449
169
|
|
450
170
|
def _filter_command(self, command: str) -> Optional[str]:
|
451
171
|
"""Filter out unwanted characters from command
|
@@ -491,142 +211,214 @@ Rules:
|
|
491
211
|
# Join the remaining lines and strip any extra whitespace
|
492
212
|
return "\n".join(line.strip() for line in content_lines if line.strip())
|
493
213
|
|
494
|
-
def
|
495
|
-
"""
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
self.console.print("[
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
214
|
+
def post(self, message: list[dict[str, str]]) -> requests.Response:
|
215
|
+
"""Post message to LLM API and return response"""
|
216
|
+
url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
|
217
|
+
body = {
|
218
|
+
"messages": message,
|
219
|
+
"model": self.config.get("MODEL", "gpt-4o"),
|
220
|
+
"stream": self.config.get("STREAM", "true") == "true",
|
221
|
+
"temperature": 0.7,
|
222
|
+
"top_p": 1,
|
223
|
+
}
|
224
|
+
response = requests.post(url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"})
|
225
|
+
try:
|
226
|
+
response.raise_for_status()
|
227
|
+
except requests.exceptions.HTTPError as e:
|
228
|
+
self.console.print(f"[red]Error calling API: {e}[/red]")
|
229
|
+
if self.verbose:
|
230
|
+
self.console.print(f"Reason: {e.response.reason}")
|
231
|
+
self.console.print(f"Response: {response.text}")
|
232
|
+
raise typer.Exit(code=1) from None
|
233
|
+
return response
|
234
|
+
|
235
|
+
def _print(self, response: requests.Response, stream: bool = True) -> str:
|
236
|
+
"""Print response from LLM and return full completion"""
|
237
|
+
full_completion = ""
|
238
|
+
if stream:
|
239
|
+
with Live() as live:
|
240
|
+
for line in response.iter_lines():
|
241
|
+
# Skip empty lines
|
242
|
+
if not line:
|
243
|
+
continue
|
244
|
+
|
245
|
+
# Process server-sent events
|
246
|
+
data = line.decode("utf-8")
|
247
|
+
if not data.startswith("data: "):
|
248
|
+
continue
|
249
|
+
|
250
|
+
# Extract data portion
|
251
|
+
data = data[6:]
|
252
|
+
if data == "[DONE]":
|
253
|
+
break
|
254
|
+
|
255
|
+
# Parse JSON and update display
|
256
|
+
try:
|
257
|
+
json_data = json.loads(data)
|
258
|
+
content = json_data["choices"][0]["delta"].get("content", "")
|
259
|
+
full_completion += content
|
260
|
+
live.update(Markdown(markup=full_completion), refresh=True)
|
261
|
+
except json.JSONDecodeError:
|
262
|
+
self.console.print("[red]Error decoding response JSON[/red]")
|
263
|
+
if self.verbose:
|
264
|
+
self.console.print(f"[red]Error: {data}[/red]")
|
265
|
+
|
266
|
+
time.sleep(0.01)
|
267
|
+
else:
|
268
|
+
# Non-streaming response
|
269
|
+
full_completion = jmespath.search(
|
270
|
+
self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json()
|
271
|
+
)
|
272
|
+
self.console.print(Markdown(full_completion))
|
273
|
+
self.console.print() # Add a newline after the response to separate from the next input
|
274
|
+
return full_completion
|
275
|
+
|
276
|
+
def get_prompt_tokens(self) -> list[tuple[str, str]]:
|
277
|
+
"""Return prompt tokens for current mode"""
|
278
|
+
if self.current_mode == CHAT_MODE:
|
279
|
+
qmark = "💬"
|
280
|
+
elif self.current_mode == EXEC_MODE:
|
281
|
+
qmark = "🚀"
|
282
|
+
else:
|
283
|
+
qmark = ""
|
284
|
+
return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
|
285
|
+
|
286
|
+
def _check_history_len(self) -> None:
|
287
|
+
"""Check history length and remove oldest messages if necessary"""
|
288
|
+
if len(self.history) > self.max_history_length:
|
289
|
+
self.history = self.history[-self.max_history_length :]
|
290
|
+
|
291
|
+
def _run_repl(self) -> None:
|
292
|
+
"""Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
|
293
|
+
# Show REPL instructions
|
294
|
+
self._setup_key_bindings()
|
295
|
+
self.console.print("[bold]Starting REPL loop[/bold]")
|
296
|
+
self.console.print("[bold]Press TAB to change in chat and exec mode[/bold]")
|
297
|
+
self.console.print("[bold]Type /clear to clear chat history[/bold]")
|
298
|
+
self.console.print("[bold]Type /his to see chat history[/bold]")
|
299
|
+
self.console.print("[bold]Press Ctrl+C or type /exit to exit[/bold]\n")
|
300
|
+
|
521
301
|
while True:
|
522
|
-
|
523
|
-
|
524
|
-
if not user_input
|
302
|
+
# Get user input
|
303
|
+
user_input = self.session.prompt(self.get_prompt_tokens).strip()
|
304
|
+
if not user_input:
|
525
305
|
continue
|
526
306
|
|
527
|
-
|
307
|
+
# Handle exit commands
|
308
|
+
if user_input.lower() == CMD_EXIT:
|
528
309
|
break
|
529
310
|
|
530
|
-
# Handle
|
531
|
-
if self.current_mode ==
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
311
|
+
# Handle clear command
|
312
|
+
if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
|
313
|
+
self.history = []
|
314
|
+
self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
|
315
|
+
continue
|
316
|
+
elif user_input.lower() == "/his":
|
317
|
+
self.console.print(self.history)
|
318
|
+
continue
|
319
|
+
# Create appropriate system prompt based on mode
|
320
|
+
system_prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
|
321
|
+
system_content = system_prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
|
322
|
+
|
323
|
+
# Create message with system prompt and history
|
324
|
+
message = [{"role": "system", "content": system_content}]
|
325
|
+
message.extend(self.history)
|
326
|
+
|
327
|
+
# Add current user message
|
328
|
+
message.append({"role": "user", "content": user_input})
|
329
|
+
|
330
|
+
# Get response from LLM
|
331
|
+
response = self.post(message)
|
332
|
+
self.console.print("\n[bold green]Assistant:[/bold green]")
|
333
|
+
try:
|
334
|
+
content = self._print(response, stream=self.config["STREAM"] == "true")
|
335
|
+
except Exception as e:
|
336
|
+
self.console.print(f"[red]Error: {e}[/red]")
|
337
|
+
continue
|
540
338
|
|
541
|
-
|
339
|
+
# Add user input and assistant response to history
|
340
|
+
self.history.append({"role": "user", "content": user_input})
|
341
|
+
self.history.append({"role": "assistant", "content": content})
|
542
342
|
|
543
|
-
|
544
|
-
|
545
|
-
Args:
|
546
|
-
prompt (str): Prompt to send to LLM
|
547
|
-
Returns:
|
548
|
-
None
|
549
|
-
"""
|
550
|
-
if self.current_mode == ModeEnum.EXECUTE.value:
|
551
|
-
self.execute_mode(prompt) # Execute mode for one-shot prompt
|
552
|
-
else:
|
553
|
-
self.call_llm_api(prompt)
|
343
|
+
# Trim history if needed
|
344
|
+
self._check_history_len()
|
554
345
|
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
346
|
+
# Handle command execution in exec mode
|
347
|
+
if self.current_mode == EXEC_MODE:
|
348
|
+
content = self._filter_command(content)
|
349
|
+
if not content:
|
350
|
+
self.console.print("[bold red]No command generated[/bold red]")
|
351
|
+
continue
|
352
|
+
self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
|
353
|
+
if Confirm.ask("Execute this command?", default=False):
|
354
|
+
subprocess.call(content, shell=True)
|
561
355
|
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
self.
|
567
|
-
if not self.config.get("API_KEY"
|
356
|
+
self.console.print("[bold green]Exiting...[/bold green]")
|
357
|
+
|
358
|
+
def run(self, chat: bool, shell: bool, prompt: str) -> None:
|
359
|
+
"""Run the CLI"""
|
360
|
+
self.load_config()
|
361
|
+
if not self.config.get("API_KEY"):
|
362
|
+
self.console.print("[bold red]API key not set[/bold red]")
|
568
363
|
self.console.print(
|
569
|
-
"[red]API key
|
364
|
+
"[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
|
570
365
|
)
|
571
|
-
|
366
|
+
raise typer.Exit(code=1)
|
367
|
+
_os = self.detect_os()
|
368
|
+
_shell = self.detect_shell()
|
572
369
|
|
573
|
-
#
|
574
|
-
if
|
575
|
-
self.
|
370
|
+
# Handle chat mode
|
371
|
+
if chat:
|
372
|
+
self.current_mode = CHAT_MODE
|
373
|
+
self._run_repl()
|
576
374
|
return
|
577
|
-
elif chat:
|
578
|
-
self.current_mode = ModeEnum.CHAT.value
|
579
|
-
elif shell:
|
580
|
-
self.current_mode = ModeEnum.EXECUTE.value
|
581
|
-
|
582
|
-
if self.verbose:
|
583
|
-
self.console.print("[bold yellow]Verbose mode enabled[/bold yellow]")
|
584
|
-
self.console.print(f"[bold yellow]Current mode: {self.current_mode}[/bold yellow]")
|
585
|
-
self.console.print(f"[bold yellow]Using model: {self.config['MODEL']}[/bold yellow]")
|
586
|
-
|
587
|
-
if self.current_mode in (ModeEnum.TEMP.value, ModeEnum.EXECUTE.value) and prompt:
|
588
|
-
self.run_one_shot(prompt)
|
589
|
-
elif self.current_mode == ModeEnum.CHAT.value:
|
590
|
-
self.run_repl_loop()
|
591
|
-
|
592
|
-
|
593
|
-
# CLI application setup
|
594
|
-
CONTEXT_SETTINGS = {
|
595
|
-
"help_option_names": ["-h", "--help"],
|
596
|
-
"show_default": True,
|
597
|
-
}
|
598
375
|
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
376
|
+
# Create appropriate system prompt based on mode
|
377
|
+
system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
|
378
|
+
system_content = system_prompt.format(_os=_os, _shell=_shell)
|
379
|
+
|
380
|
+
# Create message with system prompt and user input
|
381
|
+
message = [
|
382
|
+
{"role": "system", "content": system_content},
|
383
|
+
{"role": "user", "content": prompt},
|
384
|
+
]
|
385
|
+
|
386
|
+
# Get response from LLM
|
387
|
+
response = self.post(message)
|
388
|
+
self.console.print("\n[bold green]Assistant:[/bold green]")
|
389
|
+
content = self._print(response, stream=self.config["STREAM"] == "true")
|
390
|
+
|
391
|
+
# Handle shell mode execution
|
392
|
+
if shell:
|
393
|
+
content = self._filter_command(content)
|
394
|
+
if not content:
|
395
|
+
self.console.print("[bold red]No command generated[/bold red]")
|
396
|
+
return
|
397
|
+
self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
|
398
|
+
if Confirm.ask("Execute this command?", default=False):
|
399
|
+
returncode = subprocess.call(content, shell=True)
|
400
|
+
if returncode != 0:
|
401
|
+
self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
|
607
402
|
|
608
403
|
|
609
404
|
@app.command()
|
610
405
|
def main(
|
611
406
|
ctx: typer.Context,
|
612
|
-
prompt: Annotated[
|
613
|
-
|
614
|
-
] = "",
|
615
|
-
verbose: Annotated[
|
616
|
-
bool, typer.Option("--verbose", "-V", help="Show verbose information")
|
617
|
-
] = False,
|
407
|
+
prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
|
408
|
+
verbose: Annotated[bool, typer.Option("--verbose", "-V", help="Show verbose information")] = False,
|
618
409
|
chat: Annotated[bool, typer.Option("--chat", "-c", help="Start in chat mode")] = False,
|
619
|
-
shell: Annotated[
|
620
|
-
bool, typer.Option("--shell", "-s", help="Generate and execute shell command")
|
621
|
-
] = False,
|
410
|
+
shell: Annotated[bool, typer.Option("--shell", "-s", help="Generate and execute shell command")] = False,
|
622
411
|
):
|
623
|
-
"""yaicli
|
412
|
+
"""yaicli - Your AI interface in cli."""
|
413
|
+
if prompt == "":
|
414
|
+
typer.echo("Empty prompt, ignored")
|
415
|
+
return
|
624
416
|
if not prompt and not chat:
|
625
417
|
typer.echo(ctx.get_help())
|
626
|
-
|
418
|
+
return
|
627
419
|
|
628
|
-
cli =
|
629
|
-
cli.run(chat=chat, shell=shell, prompt=prompt)
|
420
|
+
cli = CLI(verbose=verbose)
|
421
|
+
cli.run(chat=chat, shell=shell, prompt=prompt or "")
|
630
422
|
|
631
423
|
|
632
424
|
if __name__ == "__main__":
|