yaicli 0.0.7__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyproject.toml ADDED
@@ -0,0 +1,66 @@
1
+ [project]
2
+ name = "yaicli"
3
+ version = "0.0.10"
4
+ description = "A simple CLI tool to interact with LLM"
5
+ authors = [{ name = "belingud", email = "im.victor@qq.com" }]
6
+ readme = "README.md"
7
+ requires-python = ">=3.9"
8
+ license = { file = "LICENSE" }
9
+ classifiers = [
10
+ "Programming Language :: Python :: 3",
11
+ "License :: OSI Approved :: MIT License",
12
+ "Operating System :: OS Independent",
13
+ ]
14
+ keywords = [
15
+ "cli",
16
+ "llm",
17
+ "ai",
18
+ "chatgpt",
19
+ "openai",
20
+ "gpt",
21
+ "llms",
22
+ "openai",
23
+ "terminal",
24
+ "interactive",
25
+ "interact",
26
+ "interact with llm",
27
+ "interact with chatgpt",
28
+ "interact with openai",
29
+ "interact with gpt",
30
+ "interact with llms",
31
+ ]
32
+ dependencies = [
33
+ "distro>=1.9.0",
34
+ "jmespath>=1.0.1",
35
+ "prompt-toolkit>=3.0.50",
36
+ "requests>=2.32.3",
37
+ "rich>=13.9.4",
38
+ "typer>=0.15.2",
39
+ ]
40
+ [project.urls]
41
+ Homepage = "https://github.com/belingud/yaicli"
42
+ Repository = "https://github.com/belingud/yaicli"
43
+ Documentation = "https://github.com/belingud/yaicli"
44
+
45
+ [project.scripts]
46
+ ai = "yaicli:app"
47
+
48
+ [tool.uv]
49
+ resolution = "highest"
50
+
51
+ [dependency-groups]
52
+ dev = ["bump2version>=1.0.1", "pytest>=8.3.5", "ruff>=0.11.2"]
53
+
54
+ [tool.ruff]
55
+ line-length = 120
56
+ select = ["E", "F", "W", "I", "B", "C90"]
57
+ ignore = ["E501"]
58
+
59
+
60
+ [build-system]
61
+ requires = ["hatchling>=1.18.0"]
62
+ build-backend = "hatchling.build"
63
+
64
+ [tool.hatch.build]
65
+ exclude = ["test_*.py", "tests/*", ".gitignore"]
66
+ include = ["yaicli.py", "pyproject.toml"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.7
3
+ Version: 0.0.10
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  Project-URL: Homepage, https://github.com/belingud/yaicli
6
6
  Project-URL: Repository, https://github.com/belingud/yaicli
@@ -223,7 +223,14 @@ Description-Content-Type: text/markdown
223
223
 
224
224
  # YAICLI - Your AI Command Line Interface
225
225
 
226
- YAICLI is a powerful command-line AI assistant tool that enables you to interact with Large Language Models (LLMs) like ChatGPT's gpt-4o through your terminal. It offers multiple operation modes for everyday conversations, generating and executing shell commands, and one-shot quick queries.
226
+ [![PyPI version](https://img.shields.io/pypi/v/yaicli?style=for-the-badge)](https://pypi.org/project/yaicli/)
227
+ ![GitHub License](https://img.shields.io/github/license/belingud/yaicli?style=for-the-badge)
228
+ ![PyPI - Downloads](https://img.shields.io/pypi/dm/yaicli?logo=pypi&style=for-the-badge)
229
+ ![Pepy Total Downloads](https://img.shields.io/pepy/dt/yaicli?style=for-the-badge&logo=python)
230
+
231
+ YAICLI is a compact yet potent command-line AI assistant, allowing you to engage with Large Language Models (LLMs) such as ChatGPT's gpt-4o directly via your terminal. It offers multiple operation modes for everyday conversations, generating and executing shell commands, and one-shot quick queries.
232
+
233
+ Support regular and deep thinking models.
227
234
 
228
235
  > [!WARNING]
229
236
  > This is a work in progress, some features could change or be removed in the future.
@@ -252,6 +259,9 @@ YAICLI is a powerful command-line AI assistant tool that enables you to interact
252
259
  - **Keyboard Shortcuts**:
253
260
  - Tab to switch between Chat and Execute modes
254
261
 
262
+ - **History**:
263
+ - Save and recall previous queries
264
+
255
265
  ## Installation
256
266
 
257
267
  ### Prerequisites
@@ -291,6 +301,7 @@ The default configuration file is located at `~/.config/yaicli/config.ini`. Look
291
301
 
292
302
  ```ini
293
303
  [core]
304
+ PROVIDER=OPENAI
294
305
  BASE_URL=https://api.openai.com/v1
295
306
  API_KEY=your_api_key_here
296
307
  MODEL=gpt-4o
@@ -322,6 +333,58 @@ Below are the available configuration options and override environment variables
322
333
  - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content, env: AI_ANSWER_PATH
323
334
  - **STREAM**: Enable/disable streaming responses, default: true, env: AI_STREAM
324
335
 
336
+ Default config of `COMPLETION_PATH` and `ANSWER_PATH` is OpenAI compatible. If you are using OpenAI or other OpenAI compatible LLM provider, you can use the default config.
337
+
338
+ If you wish to use other providers that are not compatible with the openai interface, you can use the following config:
339
+
340
+ - claude:
341
+ - BASE_URL: https://api.anthropic.com/v1
342
+ - COMPLETION_PATH: /messages
343
+ - ANSWER_PATH: content.0.text
344
+ - cohere:
345
+ - BASE_URL: https://api.cohere.com/v2
346
+ - COMPLETION_PATH: /chat
347
+ - ANSWER_PATH: message.content.[0].text
348
+ - google:
349
+ - BASE_URL: https://generativelanguage.googleapis.com/v1beta/openai
350
+ - COMPLETION_PATH: /chat/completions
351
+ - ANSWER_PATH: choices[0].message.content
352
+
353
+ You can use google OpenAI complete endpoint and leave `COMPLETION_PATH` and `ANSWER_PATH` as default. BASE_URL: https://generativelanguage.googleapis.com/v1beta/openai. See https://ai.google.dev/gemini-api/docs/openai
354
+
355
+ Claude also has a testable OpenAI-compatible interface, you can just use Calude endpoint and leave `COMPLETION_PATH` and `ANSWER_PATH` as default. See: https://docs.anthropic.com/en/api/openai-sdk
356
+
357
+ If you not sure how to config `COMPLETION_PATH` and `ANSWER_PATH`, here is a guide:
358
+ 1. **Find the API Endpoint**:
359
+ - Visit the documentation of the LLM provider you want to use.
360
+ - Find the API endpoint for the completion task. This is usually under the "API Reference" or "Developer Documentation" section.
361
+ 2. **Identify the Response Structure**:
362
+ - Look for the structure of the response. This typically includes fields like `choices`, `completion`, etc.
363
+ 3. **Identify the Path Expression**:
364
+ Forexample, claude response structure like this:
365
+ ```json
366
+ {
367
+ "content": [
368
+ {
369
+ "text": "Hi! My name is Claude.",
370
+ "type": "text"
371
+ }
372
+ ],
373
+ "id": "msg_013Zva2CMHLNnXjNJJKqJ2EF",
374
+ "model": "claude-3-7-sonnet-20250219",
375
+ "role": "assistant",
376
+ "stop_reason": "end_turn",
377
+ "stop_sequence": null,
378
+ "type": "message",
379
+ "usage": {
380
+ "input_tokens": 2095,
381
+ "output_tokens": 503
382
+ }
383
+ }
384
+ ```
385
+ We are looking for the `text` field, so the path should be 1.Key `content`, 2.First obj `[0]`, 3.Key `text`. So it should be `content.[0].text`.
386
+
387
+
325
388
  ## Usage
326
389
 
327
390
  ### Basic Usage
@@ -0,0 +1,7 @@
1
+ pyproject.toml,sha256=A7V65XMQYQCKkQjQAblk0YfMu300eE2HkDXGf_85CZU,1451
2
+ yaicli.py,sha256=jCM121KssbZWNAmC5-v-tVQtTUCfn_6MWOAC3KUuNys,18539
3
+ yaicli-0.0.10.dist-info/METADATA,sha256=h-p0RxU90dWoFZeipw1RlXP1VnXmRoWo6gy9t0bUL1E,25860
4
+ yaicli-0.0.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ yaicli-0.0.10.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
6
+ yaicli-0.0.10.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
+ yaicli-0.0.10.dist-info/RECORD,,
yaicli.py CHANGED
@@ -2,7 +2,6 @@ import configparser
2
2
  import json
3
3
  import platform
4
4
  import subprocess
5
- import time
6
5
  from os import getenv
7
6
  from os.path import basename, pathsep
8
7
  from pathlib import Path
@@ -30,7 +29,7 @@ Rules:
30
29
  5. Return NOTHING except the ready-to-run command"""
31
30
 
32
31
  DEFAULT_PROMPT = (
33
- "You are yaili, a system management and programing assistant, "
32
+ "You are YAICLI, a system management and programing assistant, "
34
33
  "You are managing {_os} operating system with {_shell} shell. "
35
34
  "Your responses should be concise and use Markdown format, "
36
35
  "unless the user explicitly requests more details."
@@ -71,6 +70,7 @@ class CasePreservingConfigParser(configparser.RawConfigParser):
71
70
  class CLI:
72
71
  CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
73
72
  DEFAULT_CONFIG_INI = """[core]
73
+ PROVIDER=openai
74
74
  BASE_URL=https://api.openai.com/v1
75
75
  API_KEY=
76
76
  MODEL=gpt-4o
@@ -166,6 +166,50 @@ STREAM=true"""
166
166
  return "powershell.exe" if is_powershell else "cmd.exe"
167
167
  return basename(getenv("SHELL", "/bin/sh"))
168
168
 
169
+ def _filter_command(self, command: str) -> Optional[str]:
170
+ """Filter out unwanted characters from command
171
+
172
+ The LLM may return commands in markdown format with code blocks.
173
+ This method removes markdown formatting from the command.
174
+ It handles various formats including:
175
+ - Commands surrounded by ``` (plain code blocks)
176
+ - Commands with language specifiers like ```bash, ```zsh, etc.
177
+ - Commands with specific examples like ```ls -al```
178
+
179
+ example:
180
+ ```bash\nls -la\n``` ==> ls -al
181
+ ```zsh\nls -la\n``` ==> ls -al
182
+ ```ls -al``` ==> ls -al
183
+ ls -al ==> ls -al
184
+ ```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
185
+ ```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
186
+ """
187
+ if not command or not command.strip():
188
+ return ""
189
+
190
+ # Handle commands that are already without code blocks
191
+ if "```" not in command:
192
+ return command.strip()
193
+
194
+ # Handle code blocks with or without language specifiers
195
+ lines = command.strip().split("\n")
196
+
197
+ # Check if it's a single-line code block like ```ls -al```
198
+ if len(lines) == 1 and lines[0].startswith("```") and lines[0].endswith("```"):
199
+ return lines[0][3:-3].strip()
200
+
201
+ # Handle multi-line code blocks
202
+ if lines[0].startswith("```"):
203
+ # Remove the opening ``` line (with or without language specifier)
204
+ content_lines = lines[1:]
205
+
206
+ # If the last line is a closing ```, remove it
207
+ if content_lines and content_lines[-1].strip() == "```":
208
+ content_lines = content_lines[:-1]
209
+
210
+ # Join the remaining lines and strip any extra whitespace
211
+ return "\n".join(line.strip() for line in content_lines if line.strip())
212
+
169
213
  def post(self, message: list[dict[str, str]]) -> requests.Response:
170
214
  """Post message to LLM API and return response"""
171
215
  url = self.config.get("BASE_URL", "").rstrip("/") + "/" + self.config.get("COMPLETION_PATH", "").lstrip("/")
@@ -187,44 +231,75 @@ STREAM=true"""
187
231
  raise typer.Exit(code=1) from None
188
232
  return response
189
233
 
190
- def _print(self, response: requests.Response, stream: bool = True) -> str:
191
- """Print response from LLM and return full completion"""
234
+ def get_reasoning_content(self, delta: dict) -> Optional[str]:
235
+ # reasoning: openrouter
236
+ # reasoning_content: infi-ai/deepseek
237
+ for k in ("reasoning_content", "reasoning"):
238
+ if k in delta:
239
+ return delta[k]
240
+ return None
241
+
242
+ def _print_stream(self, response: requests.Response) -> str:
243
+ """Print response from LLM in streaming mode"""
192
244
  full_completion = ""
193
- if stream:
194
- with Live() as live:
195
- for line in response.iter_lines():
196
- # Skip empty lines
197
- if not line:
198
- continue
245
+ in_reasoning = False
199
246
 
200
- # Process server-sent events
201
- data = line.decode("utf-8")
202
- if not data.startswith("data: "):
203
- continue
247
+ with Live() as live:
248
+ for line in response.iter_lines():
249
+ if not line:
250
+ continue
251
+
252
+ data = line.decode("utf-8")
253
+ if not data.startswith("data: "):
254
+ continue
204
255
 
205
- # Extract data portion
206
- data = data[6:]
207
- if data == "[DONE]":
208
- break
256
+ data = data[6:]
257
+ if data == "[DONE]":
258
+ break
209
259
 
210
- # Parse JSON and update display
211
- try:
212
- json_data = json.loads(data)
213
- content = json_data["choices"][0]["delta"].get("content", "")
260
+ try:
261
+ json_data = json.loads(data)
262
+ if not json_data.get("choices"):
263
+ continue
264
+
265
+ delta = json_data["choices"][0]["delta"]
266
+ reason = self.get_reasoning_content(delta)
267
+
268
+ if reason is not None:
269
+ # reasoning started
270
+ if not in_reasoning:
271
+ in_reasoning = True
272
+ full_completion = "> Reasoning:\n> "
273
+ full_completion += reason.replace("\n", "\n> ")
274
+ else:
275
+ # reasoning stoped
276
+ if in_reasoning:
277
+ in_reasoning = False
278
+ full_completion += "\n\n"
279
+ content = delta.get("content", "") or ""
214
280
  full_completion += content
215
- live.update(Markdown(markup=full_completion), refresh=True)
216
- except json.JSONDecodeError:
217
- self.console.print("[red]Error decoding response JSON[/red]")
218
- if self.verbose:
219
- self.console.print(f"[red]Error: {data}[/red]")
281
+ live.update(Markdown(markup=full_completion), refresh=True)
282
+ except json.JSONDecodeError:
283
+ self.console.print("[red]Error decoding response JSON[/red]")
284
+ if self.verbose:
285
+ self.console.print(f"[red]Error: {data}[/red]")
286
+
287
+ return full_completion
220
288
 
221
- time.sleep(0.01)
289
+ def _print_non_stream(self, response: requests.Response) -> str:
290
+ """Print response from LLM in non-streaming mode"""
291
+ full_completion = jmespath.search(self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json())
292
+ self.console.print(Markdown(full_completion))
293
+ return full_completion
294
+
295
+ def _print(self, response: requests.Response, stream: bool = True) -> str:
296
+ """Print response from LLM and return full completion"""
297
+ if stream:
298
+ # Streaming response
299
+ full_completion = self._print_stream(response)
222
300
  else:
223
301
  # Non-streaming response
224
- full_completion = jmespath.search(
225
- self.config.get("ANSWER_PATH", "choices[0].message.content"), response.json()
226
- )
227
- self.console.print(Markdown(full_completion))
302
+ full_completion = self._print_non_stream(response)
228
303
  self.console.print() # Add a newline after the response to separate from the next input
229
304
  return full_completion
230
305
 
@@ -238,11 +313,22 @@ STREAM=true"""
238
313
  qmark = ""
239
314
  return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
240
315
 
316
+ def _check_history_len(self) -> None:
317
+ """Check history length and remove oldest messages if necessary"""
318
+ if len(self.history) > self.max_history_length:
319
+ self.history = self.history[-self.max_history_length :]
320
+
241
321
  def _run_repl(self) -> None:
242
322
  """Run REPL loop, handling user input and generating responses, saving history, and executing commands"""
243
323
  # Show REPL instructions
244
324
  self._setup_key_bindings()
245
- self.console.print("[bold]Starting REPL loop[/bold]")
325
+ self.console.print("""
326
+ ██ ██ █████ ██ ██████ ██ ██
327
+ ██ ██ ██ ██ ██ ██ ██ ██
328
+ ████ ███████ ██ ██ ██ ██
329
+ ██ ██ ██ ██ ██ ██ ██
330
+ ██ ██ ██ ██ ██████ ███████ ██
331
+ """)
246
332
  self.console.print("[bold]Press TAB to change in chat and exec mode[/bold]")
247
333
  self.console.print("[bold]Type /clear to clear chat history[/bold]")
248
334
  self.console.print("[bold]Type /his to see chat history[/bold]")
@@ -280,43 +366,35 @@ STREAM=true"""
280
366
  # Get response from LLM
281
367
  response = self.post(message)
282
368
  self.console.print("\n[bold green]Assistant:[/bold green]")
283
- content = self._print(response, stream=self.config["STREAM"] == "true")
369
+ try:
370
+ content = self._print(response, stream=self.config["STREAM"] == "true")
371
+ except Exception as e:
372
+ self.console.print(f"[red]Error: {e}[/red]")
373
+ continue
284
374
 
285
375
  # Add user input and assistant response to history
286
376
  self.history.append({"role": "user", "content": user_input})
287
377
  self.history.append({"role": "assistant", "content": content})
288
378
 
289
379
  # Trim history if needed
290
- if len(self.history) > self.max_history_length * 2:
291
- self.history = self.history[-self.max_history_length * 2 :]
380
+ self._check_history_len()
292
381
 
293
382
  # Handle command execution in exec mode
294
383
  if self.current_mode == EXEC_MODE:
384
+ content = self._filter_command(content)
385
+ if not content:
386
+ self.console.print("[bold red]No command generated[/bold red]")
387
+ continue
295
388
  self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
296
389
  if Confirm.ask("Execute this command?", default=False):
297
- returncode = subprocess.call(content, shell=True)
298
- if returncode != 0:
299
- self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
390
+ subprocess.call(content, shell=True)
300
391
 
301
392
  self.console.print("[bold green]Exiting...[/bold green]")
302
393
 
303
- def run(self, chat: bool, shell: bool, prompt: str) -> None:
304
- self.load_config()
305
- if not self.config.get("API_KEY"):
306
- self.console.print("[bold red]API key not set[/bold red]")
307
- self.console.print(
308
- "[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
309
- )
310
- raise typer.Exit(code=1)
394
+ def _run_once(self, prompt: str, shell: bool = False) -> None:
395
+ """Run once with given prompt"""
311
396
  _os = self.detect_os()
312
397
  _shell = self.detect_shell()
313
-
314
- # Handle chat mode
315
- if chat:
316
- self.current_mode = CHAT_MODE
317
- self._run_repl()
318
- return
319
-
320
398
  # Create appropriate system prompt based on mode
321
399
  system_prompt = SHELL_PROMPT if shell else DEFAULT_PROMPT
322
400
  system_content = system_prompt.format(_os=_os, _shell=_shell)
@@ -330,10 +408,14 @@ STREAM=true"""
330
408
  # Get response from LLM
331
409
  response = self.post(message)
332
410
  self.console.print("\n[bold green]Assistant:[/bold green]")
333
- content = self._print(response, stream=(not shell and self.config["STREAM"] == "true"))
411
+ content = self._print(response, stream=self.config["STREAM"] == "true")
334
412
 
335
413
  # Handle shell mode execution
336
414
  if shell:
415
+ content = self._filter_command(content)
416
+ if not content:
417
+ self.console.print("[bold red]No command generated[/bold red]")
418
+ return
337
419
  self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {content}")
338
420
  if Confirm.ask("Execute this command?", default=False):
339
421
  returncode = subprocess.call(content, shell=True)
@@ -341,6 +423,24 @@ STREAM=true"""
341
423
  self.console.print(f"[bold red]Command failed with return code {returncode}[/bold red]")
342
424
 
343
425
 
426
+ def run(self, chat: bool, shell: bool, prompt: str) -> None:
427
+ """Run the CLI"""
428
+ self.load_config()
429
+ if not self.config.get("API_KEY"):
430
+ self.console.print("[bold red]API key not set[/bold red]")
431
+ self.console.print(
432
+ "[bold red]Please set API key in ~/.config/yaicli/config.ini or environment variable[/bold red]"
433
+ )
434
+ raise typer.Exit(code=1)
435
+
436
+ # Handle chat mode
437
+ if chat:
438
+ self.current_mode = CHAT_MODE
439
+ self._run_repl()
440
+ else:
441
+ self._run_once(prompt, shell)
442
+
443
+
344
444
  @app.command()
345
445
  def main(
346
446
  ctx: typer.Context,
@@ -1,6 +0,0 @@
1
- yaicli.py,sha256=Wsi6VWGw3FfCkl8d6AR6p3nY8QbdZM0bx8JXFNrfZP8,14538
2
- yaicli-0.0.7.dist-info/METADATA,sha256=EHpi_s3X6a31V6z2EuGQ9hOgh1Karxi07f-H6Vbeekg,23101
3
- yaicli-0.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
- yaicli-0.0.7.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
5
- yaicli-0.0.7.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
- yaicli-0.0.7.dist-info/RECORD,,