yaicli 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.8
7
+ Requires-Dist: distro>=1.9.0
7
8
  Requires-Dist: jmespath>=1.0.1
8
9
  Requires-Dist: prompt-toolkit>=3.0.50
9
10
  Requires-Dist: requests>=2.32.3
@@ -0,0 +1,6 @@
1
+ yaicli.py,sha256=4c-cGPzZlUPdqv8uZrWmiimlCp8Q8S9UH7jKHRWYI8U,16709
2
+ yaicli-0.0.3.dist-info/METADATA,sha256=3C60zYZfELyBiKNPqXyPKKqAh8cKe7FG42oc_Esr01Y,379
3
+ yaicli-0.0.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
+ yaicli-0.0.3.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
5
+ yaicli-0.0.3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
+ yaicli-0.0.3.dist-info/RECORD,,
yaicli.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import configparser
2
2
  import json
3
+ import platform
3
4
  import subprocess
4
5
  import time
5
6
  from enum import StrEnum
@@ -11,6 +12,7 @@ from typing import Annotated, Optional
11
12
  import jmespath
12
13
  import requests
13
14
  import typer
15
+ from distro import name as distro_name
14
16
  from prompt_toolkit import PromptSession
15
17
  from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
16
18
  from prompt_toolkit.keys import Keys
@@ -33,7 +35,7 @@ class CasePreservingConfigParser(configparser.RawConfigParser):
33
35
 
34
36
  class ShellAI:
35
37
  # Configuration file path
36
- CONFIG_PATH = Path("~/.config/llmcli/config.ini").expanduser()
38
+ CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
37
39
 
38
40
  # Default configuration template
39
41
  DEFAULT_CONFIG_INI = """[core]
@@ -61,7 +63,6 @@ ANSWER_PATH=choices[0].message.content
61
63
  STREAM=true"""
62
64
 
63
65
  def __init__(self, verbose: bool = False):
64
- # Initialize terminal components
65
66
  self.verbose = verbose
66
67
  self.console = Console()
67
68
  self.bindings = KeyBindings()
@@ -83,15 +84,20 @@ STREAM=true"""
83
84
  else ModeEnum.EXECUTE.value
84
85
  )
85
86
 
86
- def get_os(self):
87
+ def detect_os(self):
87
88
  """Detect operating system"""
88
89
  if self.config.get("OS_NAME") != "auto":
89
90
  return self.config.get("OS_NAME")
90
- import platform
91
-
92
- return platform.system()
93
-
94
- def get_shell(self):
91
+ current_platform = platform.system()
92
+ if current_platform == "Linux":
93
+ return "Linux/" + distro_name(pretty=True)
94
+ if current_platform == "Windows":
95
+ return "Windows " + platform.release()
96
+ if current_platform == "Darwin":
97
+ return "Darwin/MacOS " + platform.mac_ver()[0]
98
+ return current_platform
99
+
100
+ def detect_shell(self):
95
101
  """Detect shell"""
96
102
  if self.config.get("SHELL_NAME") != "auto":
97
103
  return self.config.get("SHELL_NAME")
@@ -103,6 +109,29 @@ STREAM=true"""
103
109
  return "powershell.exe" if is_powershell else "cmd.exe"
104
110
  return basename(getenv("SHELL", "/bin/sh"))
105
111
 
112
+ def build_cmd_prompt(self):
113
+ _os = self.detect_os()
114
+ _shell = self.detect_shell()
115
+ return f"""Your are a Shell Command Generator.
116
+ Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
117
+ Rules:
118
+ 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
119
+ 2. Output STRICTLY in plain text format
120
+ 3. NEVER use markdown, code blocks or explanations
121
+ 4. Chain multi-step commands in SINGLE LINE
122
+ 5. Return NOTHING except the ready-to-run command"""
123
+
124
+ def build_default_prompt(self):
125
+ """Build default prompt"""
126
+ _os = self.detect_os()
127
+ _shell = self.detect_shell()
128
+ return (
129
+ "You are yaili, a system management and programing assistant, "
130
+ f"You are managing {_os} operating system with {_shell} shell. "
131
+ "Your responses should be concise and use Markdown format, "
132
+ "unless the user explicitly requests more details."
133
+ )
134
+
106
135
  def get_default_config(self):
107
136
  """Get default configuration"""
108
137
  config = CasePreservingConfigParser()
@@ -137,36 +166,42 @@ STREAM=true"""
137
166
  response.raise_for_status() # Raise an exception for non-200 status codes
138
167
  return response
139
168
 
140
- def call_llm_api(self, prompt):
141
- """Call LLM API, return streaming output"""
169
+ def get_llm_url(self) -> Optional[str]:
170
+ """Get LLM API URL"""
142
171
  base = self.config.get("BASE_URL", "").rstrip("/")
143
172
  if not base:
144
173
  self.console.print(
145
174
  "[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
146
175
  )
147
- return
176
+ raise typer.Exit(code=1)
148
177
  COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
149
178
  if not COMPLETION_PATH:
150
179
  self.console.print(
151
180
  "[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
152
181
  )
153
- return
154
- url = f"{base}/{COMPLETION_PATH}"
155
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
156
- data = {
182
+ raise typer.Exit(code=1)
183
+ return f"{base}/{COMPLETION_PATH}"
184
+
185
+ def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
186
+ """Build request data"""
187
+ if mode == ModeEnum.EXECUTE.value:
188
+ system_prompt = self.build_cmd_prompt()
189
+ else:
190
+ system_prompt = self.build_default_prompt()
191
+ return {
157
192
  "model": self.config["MODEL"],
158
- "messages": [{"role": "user", "content": prompt}],
193
+ "messages": [
194
+ {"role": "system", "content": system_prompt},
195
+ {"role": "user", "content": prompt},
196
+ ],
159
197
  "stream": self.config.get("STREAM", "true") == "true",
198
+ "temperature": 0.7,
199
+ "top_p": 0.7,
200
+ "max_tokens": 200,
160
201
  }
161
- try:
162
- response = self._call_api(url, headers, data)
163
- except requests.exceptions.RequestException as e:
164
- self.console.print(f"[red]Error calling API: {e}[/red]")
165
- return
166
- if not response:
167
- return
168
202
 
169
- self.console.print("\n[bold green]Assistant:[/bold green]")
203
+ def stream_response(self, response):
204
+ """Stream response from LLM API"""
170
205
  full_completion = ""
171
206
  # Streaming response loop
172
207
  with Live(console=self.console) as live:
@@ -190,24 +225,31 @@ STREAM=true"""
190
225
  self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
191
226
  time.sleep(0.05)
192
227
 
228
+ def call_llm_api(self, prompt: str):
229
+ """Call LLM API, return streaming output"""
230
+ url = self.get_llm_url()
231
+ headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
232
+ data = self.build_data(prompt)
233
+ try:
234
+ response = self._call_api(url, headers, data)
235
+ except requests.exceptions.RequestException as e:
236
+ self.console.print(f"[red]Error calling API: {e}[/red]")
237
+ if self.verbose and e.response:
238
+ self.console.print(f"{e.response.text}")
239
+ raise typer.Exit(code=1) from None
240
+ if not response:
241
+ raise typer.Exit(code=1)
242
+
243
+ self.console.print("\n[bold green]Assistant:[/bold green]")
244
+ self.stream_response(response) # Stream the response
193
245
  self.console.print() # Add a newline after the completion
194
246
 
195
247
  def get_command_from_llm(self, prompt):
196
248
  """Request Shell command from LLM"""
197
- url = f"{self.config['BASE_URL']}/chat/completions"
249
+ url = self.get_llm_url()
198
250
  headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
199
- data = {
200
- "model": self.config["MODEL"],
201
- "messages": [
202
- {
203
- "role": "system",
204
- "content": "You are a command line assistant, return one Linux/macOS shell commands only, without explanation and triple-backtick code blocks.",
205
- },
206
- {"role": "user", "content": prompt},
207
- ],
208
- "stream": False, # Always use non-streaming for command generation
209
- }
210
-
251
+ data = self.build_data(prompt, mode=ModeEnum.EXECUTE.value)
252
+ data["stream"] = False
211
253
  try:
212
254
  response = self._call_api(url, headers, data)
213
255
  except requests.exceptions.RequestException as e:
@@ -397,7 +439,7 @@ def main(
397
439
  ] = False,
398
440
  ):
399
441
  """LLM CLI Tool"""
400
- cli = ShellAI()
442
+ cli = ShellAI(verbose=verbose)
401
443
  cli.run(chat=chat, shell=shell, prompt=prompt)
402
444
 
403
445
 
@@ -1,6 +0,0 @@
1
- yaicli.py,sha256=ODlhfiH3uqgcep1WElGCSWv5HIelZGKXeH9EI3qgHrA,14895
2
- yaicli-0.0.1.dist-info/METADATA,sha256=CKiylTH_4iI1V5LXKy6kzBAR09p6pKHjvDwC7m33ork,350
3
- yaicli-0.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
- yaicli-0.0.1.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
5
- yaicli-0.0.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
- yaicli-0.0.1.dist-info/RECORD,,
File without changes