yaicli 0.0.13__tar.gz → 0.0.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.13
3
+ Version: 0.0.15
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -233,7 +233,7 @@ YAICLI is a compact yet potent command-line AI assistant, allowing you to engage
233
233
 
234
234
  Support regular and deep thinking models.
235
235
 
236
- > [!WARNING]
236
+ > [!WARNING]
237
237
  > This is a work in progress, some features could change or be removed in the future.
238
238
 
239
239
  ## Features
@@ -319,6 +319,7 @@ ANSWER_PATH=choices[0].message.content
319
319
  # true: streaming response
320
320
  # false: non-streaming response
321
321
  STREAM=true
322
+ CODE_THEME=monokia
322
323
 
323
324
  TEMPERATURE=0.7
324
325
  TOP_P=1.0
@@ -337,6 +338,7 @@ Below are the available configuration options and override environment variables
337
338
  - **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions, env: YAI_COMPLETION_PATH
338
339
  - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: YAI_ANSWER_PATH
339
340
  - **STREAM**: Enable/disable streaming responses, default: true, env: YAI_STREAM
341
+ - **CODE_THEME**: Theme for code blocks, default: monokia, env: YAI_CODE_THEME
340
342
  - **TEMPERATURE**: Temperature for response generation (default: 0.7), env: YAI_TEMPERATURE
341
343
  - **TOP_P**: Top-p sampling for response generation (default: 1.0), env: YAI_TOP_P
342
344
  - **MAX_TOKENS**: Maximum number of tokens for response generation (default: 1024), env: YAI_MAX_TOKENS
@@ -392,6 +394,12 @@ If you not sure how to config `COMPLETION_PATH` and `ANSWER_PATH`, here is a gui
392
394
  ```
393
395
  We are looking for the `text` field, so the path should be 1.Key `content`, 2.First obj `[0]`, 3.Key `text`. So it should be `content.[0].text`.
394
396
 
397
+ **CODE_THEME**
398
+
399
+ You can find the list of code theme here: https://pygments.org/styles/
400
+
401
+ Default: monokia
402
+ ![alt text](artwork/monokia.png)
395
403
 
396
404
  ## Usage
397
405
 
@@ -430,10 +438,10 @@ Run Options:
430
438
  ```bash
431
439
  ai -h
432
440
 
433
- Usage: ai [OPTIONS] [PROMPT]
434
-
435
- yaicli - Your AI interface in cli.
436
-
441
+ Usage: ai [OPTIONS] [PROMPT]
442
+
443
+ yaicli - Your AI interface in cli.
444
+
437
445
  ╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
438
446
  │ prompt [PROMPT] The prompt send to the LLM │
439
447
  ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
@@ -484,9 +492,10 @@ The capital of France is Paris.
484
492
  $ ai -s 'Check the current directory size'
485
493
  Assistant:
486
494
  du -sh .
487
-
488
- Generated command: du -sh .
489
- Execute this command? [y/n/e] (n): e
495
+ ╭─ Command ─╮
496
+ du -sh .
497
+ ╰───────────╯
498
+ Execute command? [e]dit, [y]es, [n]o (n): e
490
499
  Edit command, press enter to execute:
491
500
  du -sh ./
492
501
  Output:
@@ -530,9 +539,10 @@ Certainly! Here’s a brief overview of the solar system:
530
539
  🚀 > Check the current directory size
531
540
  Assistant:
532
541
  du -sh .
533
-
534
- Generated command: du -sh .
535
- Execute this command? [y/n/e] (n): e
542
+ ╭─ Command ─╮
543
+ du -sh .
544
+ ╰───────────╯
545
+ Execute command? [e]dit, [y]es, [n]o (n): e
536
546
  Edit command, press enter to execute:
537
547
  du -sh ./
538
548
  Output:
@@ -544,11 +554,13 @@ Output:
544
554
 
545
555
  ```bash
546
556
  $ ai --shell "Find all PDF files in my Downloads folder"
547
-
548
- Generated command: find ~/Downloads -type f -name "*.pdf"
549
- Execute this command? [y/n]: y
550
-
551
- Executing command: find ~/Downloads -type f -name "*.pdf"
557
+ Assistant:
558
+ find ~/Downloads -type f -name "*.pdf"
559
+ ╭─ Command ──────────────────────────────╮
560
+ │ find ~/Downloads -type f -name "*.pdf" │
561
+ ╰────────────────────────────────────────╯
562
+ Execute command? [e]dit, [y]es, [n]o (n): y
563
+ Output:
552
564
 
553
565
  /Users/username/Downloads/document1.pdf
554
566
  /Users/username/Downloads/report.pdf
@@ -9,7 +9,7 @@ YAICLI is a compact yet potent command-line AI assistant, allowing you to engage
9
9
 
10
10
  Support regular and deep thinking models.
11
11
 
12
- > [!WARNING]
12
+ > [!WARNING]
13
13
  > This is a work in progress, some features could change or be removed in the future.
14
14
 
15
15
  ## Features
@@ -95,6 +95,7 @@ ANSWER_PATH=choices[0].message.content
95
95
  # true: streaming response
96
96
  # false: non-streaming response
97
97
  STREAM=true
98
+ CODE_THEME=monokia
98
99
 
99
100
  TEMPERATURE=0.7
100
101
  TOP_P=1.0
@@ -113,6 +114,7 @@ Below are the available configuration options and override environment variables
113
114
  - **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions, env: YAI_COMPLETION_PATH
114
115
  - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: YAI_ANSWER_PATH
115
116
  - **STREAM**: Enable/disable streaming responses, default: true, env: YAI_STREAM
117
+ - **CODE_THEME**: Theme for code blocks, default: monokia, env: YAI_CODE_THEME
116
118
  - **TEMPERATURE**: Temperature for response generation (default: 0.7), env: YAI_TEMPERATURE
117
119
  - **TOP_P**: Top-p sampling for response generation (default: 1.0), env: YAI_TOP_P
118
120
  - **MAX_TOKENS**: Maximum number of tokens for response generation (default: 1024), env: YAI_MAX_TOKENS
@@ -168,6 +170,12 @@ If you not sure how to config `COMPLETION_PATH` and `ANSWER_PATH`, here is a gui
168
170
  ```
169
171
  We are looking for the `text` field, so the path should be 1.Key `content`, 2.First obj `[0]`, 3.Key `text`. So it should be `content.[0].text`.
170
172
 
173
+ **CODE_THEME**
174
+
175
+ You can find the list of code theme here: https://pygments.org/styles/
176
+
177
+ Default: monokia
178
+ ![alt text](artwork/monokia.png)
171
179
 
172
180
  ## Usage
173
181
 
@@ -206,10 +214,10 @@ Run Options:
206
214
  ```bash
207
215
  ai -h
208
216
 
209
- Usage: ai [OPTIONS] [PROMPT]
210
-
211
- yaicli - Your AI interface in cli.
212
-
217
+ Usage: ai [OPTIONS] [PROMPT]
218
+
219
+ yaicli - Your AI interface in cli.
220
+
213
221
  ╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
214
222
  │ prompt [PROMPT] The prompt send to the LLM │
215
223
  ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
@@ -260,9 +268,10 @@ The capital of France is Paris.
260
268
  $ ai -s 'Check the current directory size'
261
269
  Assistant:
262
270
  du -sh .
263
-
264
- Generated command: du -sh .
265
- Execute this command? [y/n/e] (n): e
271
+ ╭─ Command ─╮
272
+ du -sh .
273
+ ╰───────────╯
274
+ Execute command? [e]dit, [y]es, [n]o (n): e
266
275
  Edit command, press enter to execute:
267
276
  du -sh ./
268
277
  Output:
@@ -306,9 +315,10 @@ Certainly! Here’s a brief overview of the solar system:
306
315
  🚀 > Check the current directory size
307
316
  Assistant:
308
317
  du -sh .
309
-
310
- Generated command: du -sh .
311
- Execute this command? [y/n/e] (n): e
318
+ ╭─ Command ─╮
319
+ du -sh .
320
+ ╰───────────╯
321
+ Execute command? [e]dit, [y]es, [n]o (n): e
312
322
  Edit command, press enter to execute:
313
323
  du -sh ./
314
324
  Output:
@@ -320,11 +330,13 @@ Output:
320
330
 
321
331
  ```bash
322
332
  $ ai --shell "Find all PDF files in my Downloads folder"
323
-
324
- Generated command: find ~/Downloads -type f -name "*.pdf"
325
- Execute this command? [y/n]: y
326
-
327
- Executing command: find ~/Downloads -type f -name "*.pdf"
333
+ Assistant:
334
+ find ~/Downloads -type f -name "*.pdf"
335
+ ╭─ Command ──────────────────────────────╮
336
+ │ find ~/Downloads -type f -name "*.pdf" │
337
+ ╰────────────────────────────────────────╯
338
+ Execute command? [e]dit, [y]es, [n]o (n): y
339
+ Output:
328
340
 
329
341
  /Users/username/Downloads/document1.pdf
330
342
  /Users/username/Downloads/report.pdf
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.0.13"
3
+ version = "0.0.15"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  authors = [{ name = "belingud", email = "im.victor@qq.com" }]
6
6
  readme = "README.md"
@@ -2,6 +2,7 @@ import configparser
2
2
  import json
3
3
  import platform
4
4
  import subprocess
5
+ import time
5
6
  from os import getenv
6
7
  from os.path import basename, pathsep
7
8
  from pathlib import Path
@@ -11,7 +12,7 @@ import httpx
11
12
  import jmespath
12
13
  import typer
13
14
  from distro import name as distro_name
14
- from prompt_toolkit import PromptSession
15
+ from prompt_toolkit import PromptSession, prompt
15
16
  from prompt_toolkit.completion import WordCompleter
16
17
  from prompt_toolkit.history import FileHistory
17
18
  from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
@@ -19,6 +20,7 @@ from prompt_toolkit.keys import Keys
19
20
  from rich.console import Console
20
21
  from rich.live import Live
21
22
  from rich.markdown import Markdown
23
+ from rich.panel import Panel
22
24
  from rich.prompt import Prompt
23
25
 
24
26
  SHELL_PROMPT = """Your are a Shell Command Generator.
@@ -54,6 +56,7 @@ DEFAULT_CONFIG_MAP = {
54
56
  "COMPLETION_PATH": {"value": "chat/completions", "env_key": "YAI_COMPLETION_PATH"},
55
57
  "ANSWER_PATH": {"value": "choices[0].message.content", "env_key": "YAI_ANSWER_PATH"},
56
58
  "STREAM": {"value": "true", "env_key": "YAI_STREAM"},
59
+ "CODE_THEME": {"value": "monokia", "env_key": "YAI_CODE_THEME"},
57
60
  "TEMPERATURE": {"value": "0.7", "env_key": "YAI_TEMPERATURE"},
58
61
  "TOP_P": {"value": "1.0", "env_key": "YAI_TOP_P"},
59
62
  "MAX_TOKENS": {"value": "1024", "env_key": "YAI_MAX_TOKENS"},
@@ -77,6 +80,7 @@ ANSWER_PATH=choices[0].message.content
77
80
  # true: streaming response
78
81
  # false: non-streaming response
79
82
  STREAM=true
83
+ CODE_THEME=monokia
80
84
 
81
85
  TEMPERATURE=0.7
82
86
  TOP_P=1.0
@@ -192,7 +196,7 @@ class CLI:
192
196
  if current_platform in ("Windows", "nt"):
193
197
  is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
194
198
  return "powershell.exe" if is_powershell else "cmd.exe"
195
- return basename(getenv("SHELL", "/bin/sh"))
199
+ return basename(getenv("SHELL", None) or "/bin/sh")
196
200
 
197
201
  def _filter_command(self, command: str) -> Optional[str]:
198
202
  """Filter out unwanted characters from command
@@ -323,10 +327,12 @@ class CLI:
323
327
  def _print_stream(self, response: httpx.Response) -> str:
324
328
  """Print response from LLM in streaming mode"""
325
329
  self.console.print("Assistant:", style="bold green")
326
- full_completion = ""
330
+ full_content = ""
327
331
  in_reasoning = False
332
+ cursor_chars = ["_", " "]
333
+ cursor_index = 0
328
334
 
329
- with Live() as live:
335
+ with Live(console=self.console) as live:
330
336
  for line in response.iter_lines():
331
337
  json_data = self._parse_stream_line(line)
332
338
  if not json_data:
@@ -336,24 +342,25 @@ class CLI:
336
342
  reason = self.get_reasoning_content(delta)
337
343
 
338
344
  if reason is not None:
339
- full_completion, in_reasoning = self._process_reasoning_content(
340
- reason, full_completion, in_reasoning
341
- )
345
+ full_content, in_reasoning = self._process_reasoning_content(reason, full_content, in_reasoning)
342
346
  else:
343
- full_completion, in_reasoning = self._process_regular_content(
344
- delta.get("content", "") or "", full_completion, in_reasoning
347
+ full_content, in_reasoning = self._process_regular_content(
348
+ delta.get("content", "") or "", full_content, in_reasoning
345
349
  )
346
350
 
347
- live.update(Markdown(markup=full_completion), refresh=True)
348
- # self.console.print()
349
- return full_completion
351
+ cursor = cursor_chars[cursor_index]
352
+ live.update(Markdown(markup=full_content + cursor, code_theme=self.config["CODE_THEME"]), refresh=True)
353
+ cursor_index = (cursor_index + 1) % 2
354
+ time.sleep(0.005) # Slow down the printing speed, avoiding screen flickering
355
+ live.update(Markdown(markup=full_content, code_theme=self.config["CODE_THEME"]), refresh=True)
356
+ return full_content
350
357
 
351
358
  def _print_normal(self, response: httpx.Response) -> str:
352
359
  """Print response from LLM in non-streaming mode"""
353
360
  self.console.print("Assistant:", style="bold green")
354
- full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
355
- self.console.print(Markdown(full_completion + '\n'))
356
- return full_completion
361
+ full_content = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
362
+ self.console.print(Markdown(full_content + "\n", code_theme=self.config["CODE_THEME"]))
363
+ return full_content
357
364
 
358
365
  def get_prompt_tokens(self) -> list[tuple[str, str]]:
359
366
  """Return prompt tokens for current mode"""
@@ -384,13 +391,19 @@ class CLI:
384
391
  if not cmd:
385
392
  self.console.print("No command generated", style="bold red")
386
393
  return
387
- self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {cmd}")
388
- _input = Prompt.ask("Execute this command?", choices=['y', 'n', 'e'], default="n", case_sensitive=False)
389
- if _input == 'y': # execute cmd
394
+ self.console.print(Panel(cmd, title="Command", title_align="left", border_style="bold magenta", expand=False))
395
+ _input = Prompt.ask(
396
+ r"Execute command? \[e]dit, \[y]es, \[n]o",
397
+ choices=["y", "n", "e"],
398
+ default="n",
399
+ case_sensitive=False,
400
+ show_choices=False,
401
+ )
402
+ if _input == "y": # execute cmd
390
403
  self.console.print("Output:", style="bold green")
391
404
  subprocess.call(cmd, shell=True)
392
- elif _input == 'e': # edit cmd
393
- cmd = self.session.prompt("Edit command, press enter to execute:\n", key_bindings=None, default=cmd)
405
+ elif _input == "e": # edit cmd
406
+ cmd = prompt("Edit command, press enter to execute:\n", default=cmd)
394
407
  self.console.print("Output:", style="bold green")
395
408
  subprocess.call(cmd, shell=True)
396
409
 
@@ -398,7 +411,7 @@ class CLI:
398
411
  return [
399
412
  {"role": "system", "content": self.get_system_prompt()},
400
413
  *self.history,
401
- {"role": "user", "content": user_input}
414
+ {"role": "user", "content": user_input},
402
415
  ]
403
416
 
404
417
  def _handle_llm_response(self, response: httpx.Response, user_input: str) -> str:
File without changes
File without changes