yaicli 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yaicli.py CHANGED
@@ -3,7 +3,6 @@ import json
3
3
  import platform
4
4
  import subprocess
5
5
  import time
6
- from enum import StrEnum
7
6
  from os import getenv
8
7
  from os.path import basename, pathsep
9
8
  from pathlib import Path
@@ -21,40 +20,61 @@ from rich.live import Live
21
20
  from rich.markdown import Markdown
22
21
  from rich.prompt import Confirm
23
22
 
23
+ SHELL_PROMPT = """Your are a Shell Command Generator.
24
+ Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
25
+ Rules:
26
+ 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
27
+ 2. Output STRICTLY in plain text format
28
+ 3. NEVER use markdown, code blocks or explanations
29
+ 4. Chain multi-step commands in SINGLE LINE
30
+ 5. Return NOTHING except the ready-to-run command"""
24
31
 
25
- class ModeEnum(StrEnum):
26
- CHAT = "chat"
27
- EXECUTE = "exec"
28
- TEMP = "temp"
32
+ DEFAULT_PROMPT = (
33
+ "You are yaili, a system management and programing assistant, "
34
+ "You are managing {_os} operating system with {_shell} shell. "
35
+ "Your responses should be concise and use Markdown format, "
36
+ "unless the user explicitly requests more details."
37
+ )
38
+
39
+ CMD_CLEAR = "/clear"
40
+ CMD_EXIT = "/exit"
41
+
42
+ EXEC_MODE = "exec"
43
+ CHAT_MODE = "chat"
44
+ TEMP_MODE = "temp"
45
+
46
+ DEFAULT_CONFIG_MAP = {
47
+ "BASE_URL": {"value": "https://api.openai.com/v1", "env_key": "AI_BASE_URL"},
48
+ "API_KEY": {"value": "", "env_key": "AI_API_KEY"},
49
+ "MODEL": {"value": "gpt-4o", "env_key": "AI_MODEL"},
50
+ "SHELL_NAME": {"value": "auto", "env_key": "AI_SHELL_NAME"},
51
+ "OS_NAME": {"value": "auto", "env_key": "AI_OS_NAME"},
52
+ "COMPLETION_PATH": {"value": "chat/completions", "env_key": "AI_COMPLETION_PATH"},
53
+ "ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "AI_ANSWER_PATH"},
54
+ "STREAM": {"value": "true", "env_key": "AI_STREAM"},
55
+ }
56
+
57
+ app = typer.Typer(
58
+ name="yaicli",
59
+ context_settings={"help_option_names": ["-h", "--help"]},
60
+ pretty_exceptions_enable=False,
61
+ )
29
62
 
30
63
 
31
64
  class CasePreservingConfigParser(configparser.RawConfigParser):
65
+ """Case preserving config parser"""
66
+
32
67
  def optionxform(self, optionstr):
33
68
  return optionstr
34
69
 
35
70
 
36
- class YAICLI:
37
- """Main class for YAICLI
38
- Chat mode: interactive chat mode
39
- One-shot mode:
40
- Temp mode: ask a question and get a response once
41
- Execute mode: generate and execute shell commands
42
- """
43
- # Configuration file path
71
+ class CLI:
44
72
  CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
45
-
46
- # Default configuration template
47
73
  DEFAULT_CONFIG_INI = """[core]
48
74
  BASE_URL=https://api.openai.com/v1
49
75
  API_KEY=
50
76
  MODEL=gpt-4o
51
77
 
52
- # default run mode, default: temp
53
- # chat: interactive chat mode
54
- # exec: generate and execute shell commands once
55
- # temp: ask a question and get a response once
56
- DEFAULT_MODE=temp
57
-
58
78
  # auto detect shell and os
59
79
  SHELL_NAME=auto
60
80
  OS_NAME=auto
@@ -68,39 +88,62 @@ ANSWER_PATH=choices[0].message.content
68
88
  # false: non-streaming response
69
89
  STREAM=true"""
70
90
 
71
- def __init__(self, verbose: bool = False):
91
+ def __init__(self, verbose: bool = False) -> None:
72
92
  self.verbose = verbose
73
93
  self.console = Console()
74
94
  self.bindings = KeyBindings()
75
95
  self.session = PromptSession(key_bindings=self.bindings)
76
- self.current_mode = ModeEnum.CHAT.value
77
96
  self.config = {}
78
97
  self.history = []
79
98
  self.max_history_length = 25
99
+ self.current_mode = TEMP_MODE
80
100
 
81
- # Setup key bindings
82
- self._setup_key_bindings()
83
-
84
- def _setup_key_bindings(self):
101
+ def _setup_key_bindings(self) -> None:
85
102
  """Setup keyboard shortcuts"""
86
103
 
87
- @self.bindings.add(Keys.ControlI) # Bind Ctrl+I to switch modes
88
- def _(event: KeyPressEvent):
89
- self.current_mode = (
90
- ModeEnum.CHAT.value if self.current_mode == ModeEnum.EXECUTE.value else ModeEnum.EXECUTE.value
91
- )
104
+ @self.bindings.add(Keys.ControlI) # Bind TAB to switch modes
105
+ def _(event: KeyPressEvent) -> None:
106
+ self.current_mode = EXEC_MODE if self.current_mode == CHAT_MODE else CHAT_MODE
92
107
 
93
- def clear_history(self):
94
- """Clear chat history"""
95
- self.history = []
108
+ def load_config(self) -> dict[str, str]:
109
+ """Load LLM API configuration with priority:
110
+ 1. Environment variables (highest priority)
111
+ 2. Configuration file
112
+ 3. Default values (lowest priority)
96
113
 
97
- def detect_os(self) -> str:
98
- """Detect operating system
99
114
  Returns:
100
- str: operating system name
101
- Raises:
102
- typer.Exit: if there is an error with the request
115
+ dict: merged configuration
103
116
  """
117
+ # Start with default configuration (lowest priority)
118
+ merged_config = {k: v["value"] for k, v in DEFAULT_CONFIG_MAP.items()}
119
+
120
+ # Create default config file if it doesn't exist
121
+ if not self.CONFIG_PATH.exists():
122
+ self.console.print("[bold yellow]Creating default configuration file.[/bold yellow]")
123
+ self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
124
+ with open(self.CONFIG_PATH, "w") as f:
125
+ f.write(self.DEFAULT_CONFIG_INI)
126
+ else:
127
+ # Load from configuration file (middle priority)
128
+ config_parser = CasePreservingConfigParser()
129
+ config_parser.read(self.CONFIG_PATH)
130
+ if "core" in config_parser:
131
+ # Update with non-empty values from config file
132
+ merged_config.update({k: v for k, v in config_parser["core"].items() if v.strip()})
133
+
134
+ # Override with environment variables (highest priority)
135
+ for key, config in DEFAULT_CONFIG_MAP.items():
136
+ env_value = getenv(config["env_key"])
137
+ if env_value is not None:
138
+ merged_config[key] = env_value
139
+
140
+ merged_config["STREAM"] = str(merged_config.get("STREAM", "true")).lower()
141
+
142
+ self.config = merged_config
143
+ return merged_config
144
+
145
+ def detect_os(self) -> str:
146
+ """Detect operating system + version"""
104
147
  if self.config.get("OS_NAME") != "auto":
105
148
  return self.config["OS_NAME"]
106
149
  current_platform = platform.system()
@@ -113,15 +156,9 @@ STREAM=true"""
113
156
  return current_platform
114
157
 
115
158
  def detect_shell(self) -> str:
116
- """Detect shell
117
- Returns:
118
- str: shell name
119
- Raises:
120
- typer.Exit: if there is an error with the request
121
- """
159
+ """Detect shell name"""
122
160
  if self.config["SHELL_NAME"] != "auto":
123
161
  return self.config["SHELL_NAME"]
124
- import platform
125
162
 
126
163
  current_platform = platform.system()
127
164
  if current_platform in ("Windows", "nt"):
@@ -129,462 +166,199 @@ STREAM=true"""
129
166
  return "powershell.exe" if is_powershell else "cmd.exe"
130
167
  return basename(getenv("SHELL", "/bin/sh"))
131
168
 
132
- def build_cmd_prompt(self) -> str:
133
- """Build command prompt
134
- Returns:
135
- str: command prompt
136
- Raises:
137
- typer.Exit: if there is an error with the request
138
- """
139
- _os = self.detect_os()
140
- _shell = self.detect_shell()
141
- return f"""Your are a Shell Command Generator.
142
- Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
143
- Rules:
144
- 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
145
- 2. Output STRICTLY in plain text format
146
- 3. NEVER use markdown, code blocks or explanations
147
- 4. Chain multi-step commands in SINGLE LINE
148
- 5. Return NOTHING except the ready-to-run command"""
149
-
150
- def build_default_prompt(self) -> str:
151
- """Build default prompt
152
- Returns:
153
- str: default prompt
154
- Raises:
155
- typer.Exit: if there is an error with the request
156
- """
157
- _os = self.detect_os()
158
- _shell = self.detect_shell()
159
- return (
160
- "You are yaili, a system management and programing assistant, "
161
- f"You are managing {_os} operating system with {_shell} shell. "
162
- "Your responses should be concise and use Markdown format, "
163
- "unless the user explicitly requests more details."
164
- )
165
-
166
- def get_default_config(self) -> dict[str, str]:
167
- """Get default configuration
168
- Returns:
169
- dict: default configuration
170
- Raises:
171
- typer.Exit: if there is an error with the request
172
- """
173
- config = CasePreservingConfigParser()
174
- try:
175
- config.read_string(self.DEFAULT_CONFIG_INI)
176
- config_dict = {k.upper(): v for k, v in config["core"].items()}
177
- config_dict["STREAM"] = str(config_dict.get("STREAM", "true")).lower()
178
- return config_dict
179
- except configparser.Error as e:
180
- self.console.print(f"[red]Error parsing config: {e}[/red]")
181
- raise typer.Exit(code=1) from None
182
-
183
- def load_config(self) -> dict[str, str]:
184
- """Load LLM API configuration
185
- Returns:
186
- dict: configuration
187
- Raises:
188
- typer.Exit: if there is an error with the request
189
- """
190
- if not self.CONFIG_PATH.exists():
191
- self.console.print(
192
- "[bold yellow]Configuration file not found. Creating default configuration file.[/bold yellow]"
193
- )
194
- self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
195
- with open(self.CONFIG_PATH, "w") as f:
196
- f.write(self.DEFAULT_CONFIG_INI)
197
- return self.config
198
- config = CasePreservingConfigParser()
199
- config.read(self.CONFIG_PATH)
200
- self.config = dict(config["core"])
201
- self.config["STREAM"] = str(self.config.get("STREAM", "true")).lower()
202
- return self.config
203
-
204
- def _call_api(self, url: str, headers: dict, data: dict) -> requests.Response:
205
- """Call the API and return the response.
206
- Args:
207
- url: API endpoint URL
208
- headers: request headers
209
- data: request data
210
- Returns:
211
- requests.Response: response object
212
- Raises:
213
- requests.exceptions.RequestException: if there is an error with the request
214
- """
215
- response = requests.post(url, headers=headers, json=data)
216
- response.raise_for_status() # Raise an exception for non-200 status codes
217
- return response
218
-
219
- def get_llm_url(self) -> str:
220
- """Get LLM API URL
221
- Returns:
222
- str: LLM API URL
223
- Raises:
224
- typer.Exit: if API key or base URL is not set
225
- """
226
- base = self.config.get("BASE_URL", "").rstrip("/")
227
- if not base:
228
- self.console.print(
229
- "[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
230
- )
231
- raise typer.Exit(code=1)
232
- COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
233
- if not COMPLETION_PATH:
234
- self.console.print(
235
- "[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
236
- )
237
- raise typer.Exit(code=1)
238
- return f"{base}/{COMPLETION_PATH}"
239
-
240
- def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
241
- """Build request data
242
- Args:
243
- prompt: user input
244
- mode: chat or execute mode
245
- Returns:
246
- dict: request data
247
- """
248
- if mode == ModeEnum.EXECUTE.value:
249
- system_prompt = self.build_cmd_prompt()
250
- else:
251
- system_prompt = self.build_default_prompt()
252
-
253
- # Build messages list, first add system prompt
254
- messages = [{"role": "system", "content": system_prompt}]
255
-
256
- # Add history records in chat mode
257
- if mode == ModeEnum.CHAT.value and self.history:
258
- messages.extend(self.history)
259
-
260
- # Add current user message
261
- messages.append({"role": "user", "content": prompt})
262
-
263
- return {
264
- "model": self.config["MODEL"],
265
- "messages": messages,
169
+ def post(self, message: list[dict[str, str]]) -> requests.Response:
170
+ """Post message to LLM API and return response"""
171
+ url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
172
+ body = {
173
+ "messages": message,
174
+ "model": self.config.get("MODEL", "gpt-4o"),
266
175
  "stream": self.config.get("STREAM", "true") == "true",
267
176
  "temperature": 0.7,
268
- "top_p": 0.7,
269
- "max_tokens": 200,
177
+ "top_p": 1,
270
178
  }
179
+ response = requests.post(url, json=body, headers={"Authorization": f"Bearer {self.config.get('API_KEY', '')}"})
180
+ try:
181
+ response.raise_for_status()
182
+ except requests.exceptions.HTTPError as e:
183
+ self.console.print(f"[red]Error calling API: {e}[/red]")
184
+ if self.verbose:
185
+ self.console.print(f"Reason: {e.response.reason}")
186
+ self.console.print(f"Response: {response.text}")
187
+ raise typer.Exit(code=1) from None
188
+ return response
271
189
 
272
- def stream_response(self, response: requests.Response) -> str:
273
- """Stream response from LLM API
274
- Args:
275
- response: requests.Response object
276
- Returns:
277
- str: full completion text
278
- """
190
+ def _print(self, response: requests.Response, stream: bool = True) -> str:
191
+ """Print response from LLM and return full completion"""
279
192
  full_completion = ""
280
- # Streaming response loop
281
- with Live(console=self.console) as live:
282
- for line in response.iter_lines():
283
- if not line:
284
- continue
285
- decoded_line = line.decode("utf-8")
286
- if decoded_line.startswith("data: "):
287
- decoded_line = decoded_line[6:]
288
- if decoded_line == "[DONE]":
193
+ if stream:
194
+ with Live() as live:
195
+ for line in response.iter_lines():
196
+ # Skip empty lines
197
+ if not line:
198
+ continue
199
+
200
+ # Process server-sent events
201
+ data = line.decode("utf-8")
202
+ if not data.startswith("data: "):
203
+ continue
204
+
205
+ # Extract data portion
206
+ data = data[6:]
207
+ if data == "[DONE]":
289
208
  break
209
+
210
+ # Parse JSON and update display
290
211
  try:
291
- json_data = json.loads(decoded_line)
212
+ json_data = json.loads(data)
292
213
  content = json_data["choices"][0]["delta"].get("content", "")
293
214
  full_completion += content
294
- markdown = Markdown(markup=full_completion)
295
- live.update(markdown, refresh=True)
215
+ live.update(Markdown(markup=full_completion), refresh=True)
296
216
  except json.JSONDecodeError:
297
217
  self.console.print("[red]Error decoding response JSON[/red]")
298
218
  if self.verbose:
299
- self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
300
- time.sleep(0.05)
219
+ self.console.print(f"[red]Error: {data}[/red]")
301
220
 
221
+ time.sleep(0.01)
222
+ else:
223
+ # Non-streaming response
224
+ full_completion = jmespath.search(
225
+ self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json()
226
+ )
227
+ self.console.print(Markdown(full_completion))
228
+ self.console.print() # Add a newline after the response to separate from the next input
302
229
  return full_completion
303
230
 
304
- def call_llm_api(self, prompt: str) -> str:
305
- """Call LLM API, return streaming output
306
- Args:
307
- prompt: user input
308
- Returns:
309
- str: streaming output
310
- """
311
- url = self.get_llm_url()
312
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
313
- data = self.build_data(prompt)
314
- try:
315
- response = self._call_api(url, headers, data)
316
- except requests.exceptions.RequestException as e:
317
- self.console.print(f"[red]Error calling API: {e}[/red]")
318
- if self.verbose and e.response:
319
- self.console.print(f"{e.response.text}")
320
- raise typer.Exit(code=1) from None
321
- if not response:
322
- raise typer.Exit(code=1)
323
-
324
- self.console.print("\n[bold green]Assistant:[/bold green]")
325
- assistant_response = self.stream_response(response) # Stream the response and get the full text
326
- self.console.print() # Add a newline after the completion
327
-
328
- return assistant_response
329
-
330
- def get_command_from_llm(self, prompt: str) -> Optional[str]:
331
- """Request Shell command from LLM
332
- Args:
333
- prompt: user input
334
- Returns:
335
- str: shell command
336
- """
337
- url = self.get_llm_url()
338
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
339
- data = self.build_data(prompt, mode=ModeEnum.EXECUTE.value)
340
- data["stream"] = False
341
- try:
342
- response = self._call_api(url, headers, data)
343
- except requests.exceptions.RequestException as e:
344
- self.console.print(f"[red]Error calling API: {e}[/red]")
345
- return None
346
- if not response:
347
- return None
348
- ANSWER_PATH = self.config.get("ANSWER_PATH", None)
349
- if not ANSWER_PATH:
350
- ANSWER_PATH = "choices[0].message.content"
351
- if self.verbose:
352
- self.console.print(
353
- "[bold yellow]Answer path not set. Using default: `choices[0].message.content`[/bold yellow]"
354
- )
355
- content = jmespath.search(ANSWER_PATH, response.json())
356
- return content.strip()
357
-
358
- def execute_shell_command(self, command: str) -> int:
359
- """Execute shell command
360
- Args:
361
- command: shell command
362
- Returns:
363
- int: return code
364
- """
365
- self.console.print(f"\n[bold green]Executing command: [/bold green] {command}\n")
366
- result = subprocess.run(command, shell=True)
367
- if result.returncode != 0:
368
- self.console.print(f"\n[bold red]Command failed with return code: {result.returncode}[/bold red]")
369
- return result.returncode
370
-
371
- def get_prompt_tokens(self):
372
- """Get prompt tokens based on current mode
373
- Returns:
374
- list: prompt tokens for prompt_toolkit
375
- """
376
- if self.current_mode == ModeEnum.CHAT.value:
231
+ def get_prompt_tokens(self) -> list[tuple[str, str]]:
232
+ """Return prompt tokens for current mode"""
233
+ if self.current_mode == CHAT_MODE:
377
234
  qmark = "💬"
378
- elif self.current_mode == ModeEnum.EXECUTE.value:
235
+ elif self.current_mode == EXEC_MODE:
379
236
  qmark = "🚀"
380
237
  else:
381
238
  qmark = ""
382
239
  return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
383
240
 
384
- def chat_mode(self, user_input: str):
385
- """
386
- This method handles the chat mode.
387
- It adds the user input to the history and calls the API to get a response.
388
- It then adds the response to the history and manages the history length.
389
- Args:
390
- user_input: user input
391
- Returns:
392
- ModeEnum: current mode
393
- """
394
- if self.current_mode != ModeEnum.CHAT.value:
395
- return self.current_mode
396
-
397
- # Add user message to history
398
- self.history.append({"role": "user", "content": user_input})
399
-
400
- # Call API and get response
401
- assistant_response = self.call_llm_api(user_input)
402
-
403
- # Add assistant response to history
404
- if assistant_response:
405
- self.history.append({"role": "assistant", "content": assistant_response})
406
-
407
- # Manage history length, keep recent conversations
408
- if len(self.history) > self.max_history_length * 2: # Each conversation has user and assistant messages
409
- self.history = self.history[-self.max_history_length * 2 :]
410
-
411
- return ModeEnum.CHAT.value
412
-
413
- def _filter_command(self, command: str) -> Optional[str]:
414
- """Filter out unwanted characters from command
415
-
416
- The LLM may return commands in markdown format with code blocks.
417
- This method removes markdown formatting from the command.
418
- It handles various formats including:
419
- - Commands surrounded by ``` (plain code blocks)
420
- - Commands with language specifiers like ```bash, ```zsh, etc.
421
- - Commands with specific examples like ```ls -al```
422
-
423
- example:
424
- ```bash\nls -la\n``` ==> ls -al
425
- ```zsh\nls -la\n``` ==> ls -al
426
- ```ls -al``` ==> ls -al
427
- ls -al ==> ls -al
428
- ```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
429
- ```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
430
- """
431
- if not command or not command.strip():
432
- return ""
433
-
434
- # Handle commands that are already without code blocks
435
- if "```" not in command:
436
- return command.strip()
437
-
438
- # Handle code blocks with or without language specifiers
439
- lines = command.strip().split("\n")
440
-
441
- # Check if it's a single-line code block like ```ls -al```
442
- if len(lines) == 1 and lines[0].startswith("```") and lines[0].endswith("```"):
443
- return lines[0][3:-3].strip()
444
-
445
- # Handle multi-line code blocks
446
- if lines[0].startswith("```"):
447
- # Remove the opening ``` line (with or without language specifier)
448
- content_lines = lines[1:]
449
-
450
- # If the last line is a closing ```, remove it
451
- if content_lines and content_lines[-1].strip() == "```":
452
- content_lines = content_lines[:-1]
453
-
454
- # Join the remaining lines and strip any extra whitespace
455
- return "\n".join(line.strip() for line in content_lines if line.strip())
241
+ def _run_repl(self) -> None:
242
+ """Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
243
+ # Show REPL instructions
244
+ self._setup_key_bindings()
245
+ self.console.print("[bold]Starting REPL loop[/bold]")
246
+ self.console.print("[bold]Press TAB to change in chat and exec mode[/bold]")
247
+ self.console.print("[bold]Type /clear to clear chat history[/bold]")
248
+ self.console.print("[bold]Type /his to see chat history[/bold]")
249
+ self.console.print("[bold]Press Ctrl+C or type /exit to exit[/bold]\n")
456
250
 
457
- def execute_mode(self, user_input: str):
458
- """
459
- This method generates a shell command from the user input and executes it.
460
- If the user confirms the command, it is executed.
461
- Args:
462
- user_input: user input
463
- Returns:
464
- ModeEnum: current mode
465
- """
466
- if user_input == "" or self.current_mode != ModeEnum.EXECUTE.value:
467
- return self.current_mode
468
-
469
- command = self.get_command_from_llm(user_input)
470
- if not command:
471
- self.console.print("[bold red]No command generated[/bold red]")
472
- return self.current_mode
473
- command = self._filter_command(command)
474
- if not command:
475
- self.console.print("[bold red]No command generated[/bold red]")
476
- return self.current_mode
477
- self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {command}")
478
- confirm = Confirm.ask("Execute this command?")
479
- if confirm:
480
- self.execute_shell_command(command)
481
- return ModeEnum.EXECUTE.value
482
-
483
- def run_repl_loop(self):
484
251
  while True:
485
- user_input = self.session.prompt(self.get_prompt_tokens)
486
- # Skip empty input
487
- if not user_input.strip():
252
+ # Get user input
253
+ user_input = self.session.prompt(self.get_prompt_tokens).strip()
254
+ if not user_input:
488
255
  continue
489
256
 
490
- if user_input.lower() in ("/exit", "/quit", "/q"):
257
+ # Handle exit commands
258
+ if user_input.lower() == CMD_EXIT:
491
259
  break
492
260
 
493
- # Handle special commands
494
- if self.current_mode == ModeEnum.CHAT.value:
495
- if user_input.lower() == "/clear":
496
- self.clear_history()
497
- self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
498
- continue
499
- else:
500
- self.chat_mode(user_input)
501
- elif self.current_mode == ModeEnum.EXECUTE.value:
502
- self.execute_mode(user_input)
261
+ # Handle clear command
262
+ if user_input.lower() == CMD_CLEAR and self.current_mode == CHAT_MODE:
263
+ self.history = []
264
+ self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
265
+ continue
266
+ elif user_input.lower() == "/his":
267
+ self.console.print(self.history)
268
+ continue
269
+ # Create appropriate system prompt based on mode
270
+ system_prompt = SHELL_PROMPT if self.current_mode == EXEC_MODE else DEFAULT_PROMPT
271
+ system_content = system_prompt.format(_os=self.detect_os(), _shell=self.detect_shell())
272
+
273
+ # Create message with system prompt and history
274
+ message = [{"role": "system", "content": system_content}]
275
+ message.extend(self.history)
276
+
277
+ # Add current user message
278
+ message.append({"role": "user", "content": user_input})
279
+
280
+ # Get response from LLM
281
+ response = self.post(message)
282
+ self.console.print("\n[bold green]Assistant:[/bold green]")
283
+ content = self._print(response, stream=self.config["STREAM"] == "true")
284
+
285
+ # Add user input and assistant response to history
286
+ self.history.append({"role": "user", "content": user_input})
287
+ self.history.append({"role": "assistant", "content": content})
288
+
289
+ # Trim history if needed
290
+ if len(self.history) > self.max_history_length * 2:
291
+ self.history = self.history[-self.max_history_length * 2 :]
292
+
293
+ # Handle command execution in exec mode
294
+ if self.current_mode == EXEC_MODE:
295
+ self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
296
+ if Confirm.ask("Execute this command?", default=False):
297
+ returncode = subprocess.call(content, shell=True)
298
+ if returncode != 0:
299
+ self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
503
300
 
504
301
  self.console.print("[bold green]Exiting...[/bold green]")
505
302
 
506
- def run_one_shot(self, prompt: str):
507
- """Run one-shot mode with given prompt
508
- Args:
509
- prompt (str): Prompt to send to LLM
510
- Returns:
511
- None
512
- """
513
- if self.current_mode == ModeEnum.EXECUTE.value:
514
- self.execute_mode(prompt) # Execute mode for one-shot prompt
515
- else:
516
- self.call_llm_api(prompt)
517
-
518
- def run(self, chat=False, shell=False, prompt: Optional[str] = None):
519
- """Run the CLI application
520
- Args:
521
- chat (bool): Whether to run in chat mode
522
- shell (bool): Whether to run in shell mode
523
- prompt (Optional[str]): Prompt send to LLM
303
+ def run(self, chat: bool, shell: bool, prompt: str) -> None:
304
+ self.load_config()
305
+ if not self.config.get("API_KEY"):
306
+ self.console.print("[bold red]API key not set[/bold red]")
307
+ self.console.print(
308
+ "[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
309
+ )
310
+ raise typer.Exit(code=1)
311
+ _os = self.detect_os()
312
+ _shell = self.detect_shell()
524
313
 
525
- Returns:
526
- None
527
- """
528
- # Load configuration
529
- self.config = self.load_config()
530
- if not self.config.get("API_KEY", None):
531
- self.console.print("[red]API key not found. Please set it in the configuration file.[/red]")
314
+ # Handle chat mode
315
+ if chat:
316
+ self.current_mode = CHAT_MODE
317
+ self._run_repl()
532
318
  return
533
319
 
534
- # Set initial mode
535
- self.current_mode = self.config["DEFAULT_MODE"]
320
+ # Create appropriate system prompt based on mode
321
+ system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
322
+ system_content = system_prompt.format(_os=_os, _shell=_shell)
536
323
 
537
- # Check run mode from command line arguments
538
- if all([chat, shell]):
539
- self.console.print("[red]Cannot use both --chat and --shell[/red]")
540
- return
541
- elif chat:
542
- self.current_mode = ModeEnum.CHAT.value
543
- elif shell:
544
- self.current_mode = ModeEnum.EXECUTE.value
545
-
546
- if self.verbose:
547
- self.console.print("[bold yellow]Verbose mode enabled[/bold yellow]")
548
- self.console.print(f"[bold yellow]Current mode: {self.current_mode}[/bold yellow]")
549
- self.console.print(f"[bold yellow]Using model: {self.config['MODEL']}[/bold yellow]")
550
-
551
- if self.current_mode in (ModeEnum.TEMP.value, ModeEnum.EXECUTE.value) and prompt:
552
- self.run_one_shot(prompt)
553
- elif self.current_mode == ModeEnum.CHAT.value:
554
- self.run_repl_loop()
555
-
556
-
557
- # CLI application setup
558
- CONTEXT_SETTINGS = {
559
- "help_option_names": ["-h", "--help"],
560
- "show_default": True,
561
- }
324
+ # Create message with system prompt and user input
325
+ message = [
326
+ {"role": "system", "content": system_content},
327
+ {"role": "user", "content": prompt},
328
+ ]
562
329
 
563
- app = typer.Typer(
564
- name="yaicli",
565
- context_settings=CONTEXT_SETTINGS,
566
- pretty_exceptions_enable=False,
567
- short_help="yaicli. Your AI interface in cli.",
568
- no_args_is_help=True,
569
- invoke_without_command=True,
570
- )
330
+ # Get response from LLM
331
+ response = self.post(message)
332
+ self.console.print("\n[bold green]Assistant:[/bold green]")
333
+ content = self._print(response, stream=(not shell and self.config["STREAM"] == "true"))
334
+
335
+ # Handle shell mode execution
336
+ if shell:
337
+ self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
338
+ if Confirm.ask("Execute this command?", default=False):
339
+ returncode = subprocess.call(content, shell=True)
340
+ if returncode != 0:
341
+ self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
571
342
 
572
343
 
573
344
  @app.command()
574
345
  def main(
575
346
  ctx: typer.Context,
576
- prompt: Annotated[str, typer.Argument(show_default=False, help="The prompt send to the LLM")] = "",
347
+ prompt: Annotated[Optional[str], typer.Argument(show_default=False, help="The prompt send to the LLM")] = None,
577
348
  verbose: Annotated[bool, typer.Option("--verbose", "-V", help="Show verbose information")] = False,
578
349
  chat: Annotated[bool, typer.Option("--chat", "-c", help="Start in chat mode")] = False,
579
350
  shell: Annotated[bool, typer.Option("--shell", "-s", help="Generate and execute shell command")] = False,
580
351
  ):
581
- """yaicli. Your AI interface in cli."""
352
+ """yaicli - Your AI interface in cli."""
353
+ if prompt == "":
354
+ typer.echo("Empty prompt, ignored")
355
+ return
582
356
  if not prompt and not chat:
583
357
  typer.echo(ctx.get_help())
584
- raise typer.Exit()
358
+ return
585
359
 
586
- cli = YAICLI(verbose=verbose)
587
- cli.run(chat=chat, shell=shell, prompt=prompt)
360
+ cli = CLI(verbose=verbose)
361
+ cli.run(chat=chat, shell=shell, prompt=prompt or "")
588
362
 
589
363
 
590
364
  if __name__ == "__main__":