yaicli 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.1
3
+ Version: 0.0.2
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.8
7
+ Requires-Dist: distro>=1.9.0
7
8
  Requires-Dist: jmespath>=1.0.1
8
9
  Requires-Dist: prompt-toolkit>=3.0.50
9
10
  Requires-Dist: requests>=2.32.3
@@ -0,0 +1,6 @@
1
+ yaicli.py,sha256=4c5UFeIUFI7lQERM31drdaj-bR6Q1DP3WBhm5qnu6b4,16620
2
+ yaicli-0.0.2.dist-info/METADATA,sha256=N-j9mcia0VG5XmxoXfrLz5ziXWKE8lqbVgSMfv-qnCQ,379
3
+ yaicli-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
+ yaicli-0.0.2.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
5
+ yaicli-0.0.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
+ yaicli-0.0.2.dist-info/RECORD,,
yaicli.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import configparser
2
2
  import json
3
+ import platform
3
4
  import subprocess
4
5
  import time
5
6
  from enum import StrEnum
@@ -11,6 +12,7 @@ from typing import Annotated, Optional
11
12
  import jmespath
12
13
  import requests
13
14
  import typer
15
+ from distro import name as distro_name
14
16
  from prompt_toolkit import PromptSession
15
17
  from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
16
18
  from prompt_toolkit.keys import Keys
@@ -33,7 +35,7 @@ class CasePreservingConfigParser(configparser.RawConfigParser):
33
35
 
34
36
  class ShellAI:
35
37
  # Configuration file path
36
- CONFIG_PATH = Path("~/.config/llmcli/config.ini").expanduser()
38
+ CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
37
39
 
38
40
  # Default configuration template
39
41
  DEFAULT_CONFIG_INI = """[core]
@@ -61,7 +63,6 @@ ANSWER_PATH=choices[0].message.content
61
63
  STREAM=true"""
62
64
 
63
65
  def __init__(self, verbose: bool = False):
64
- # Initialize terminal components
65
66
  self.verbose = verbose
66
67
  self.console = Console()
67
68
  self.bindings = KeyBindings()
@@ -83,15 +84,20 @@ STREAM=true"""
83
84
  else ModeEnum.EXECUTE.value
84
85
  )
85
86
 
86
- def get_os(self):
87
+ def detect_os(self):
87
88
  """Detect operating system"""
88
89
  if self.config.get("OS_NAME") != "auto":
89
90
  return self.config.get("OS_NAME")
90
- import platform
91
-
92
- return platform.system()
93
-
94
- def get_shell(self):
91
+ current_platform = platform.system()
92
+ if current_platform == "Linux":
93
+ return "Linux/" + distro_name(pretty=True)
94
+ if current_platform == "Windows":
95
+ return "Windows " + platform.release()
96
+ if current_platform == "Darwin":
97
+ return "Darwin/MacOS " + platform.mac_ver()[0]
98
+ return current_platform
99
+
100
+ def detect_shell(self):
95
101
  """Detect shell"""
96
102
  if self.config.get("SHELL_NAME") != "auto":
97
103
  return self.config.get("SHELL_NAME")
@@ -103,6 +109,30 @@ STREAM=true"""
103
109
  return "powershell.exe" if is_powershell else "cmd.exe"
104
110
  return basename(getenv("SHELL", "/bin/sh"))
105
111
 
112
+ def build_cmd_prompt(self):
113
+ _os = self.detect_os()
114
+ _shell = self.detect_shell()
115
+ return f"""Your are a Shell Command Generator.
116
+ Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
117
+ Rules:
118
+ 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
119
+ 2. Output STRICTLY in plain text format
120
+ 3. NEVER use markdown, code blocks or explanations
121
+ 4. Chain multi-step commands in SINGLE LINE
122
+ 5. Return NOTHING except the ready-to-run command"""
123
+
124
+ def build_default_prompt(self):
125
+ """Build default prompt"""
126
+ _os = self.detect_os()
127
+ _shell = self.detect_shell()
128
+ return (
129
+ "You are a system and code assistant, "
130
+ f"focusing on {_os} and {_shell}. "
131
+ "Assist with system management, script writing, and coding tasks. "
132
+ "Your responses should be concise and use Markdown format, "
133
+ "unless the user explicitly requests more details."
134
+ )
135
+
106
136
  def get_default_config(self):
107
137
  """Get default configuration"""
108
138
  config = CasePreservingConfigParser()
@@ -137,36 +167,42 @@ STREAM=true"""
137
167
  response.raise_for_status() # Raise an exception for non-200 status codes
138
168
  return response
139
169
 
140
- def call_llm_api(self, prompt):
141
- """Call LLM API, return streaming output"""
170
+ def get_llm_url(self) -> Optional[str]:
171
+ """Get LLM API URL"""
142
172
  base = self.config.get("BASE_URL", "").rstrip("/")
143
173
  if not base:
144
174
  self.console.print(
145
175
  "[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
146
176
  )
147
- return
177
+ raise typer.Exit(code=1)
148
178
  COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
149
179
  if not COMPLETION_PATH:
150
180
  self.console.print(
151
181
  "[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
152
182
  )
153
- return
154
- url = f"{base}/{COMPLETION_PATH}"
155
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
156
- data = {
183
+ raise typer.Exit(code=1)
184
+ return f"{base}/{COMPLETION_PATH}"
185
+
186
+ def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
187
+ """Build request data"""
188
+ if mode == ModeEnum.EXECUTE.value:
189
+ system_prompt = self.build_cmd_prompt()
190
+ else:
191
+ system_prompt = self.build_default_prompt()
192
+ return {
157
193
  "model": self.config["MODEL"],
158
- "messages": [{"role": "user", "content": prompt}],
194
+ "messages": [
195
+ {"role": "system", "content": system_prompt},
196
+ {"role": "user", "content": prompt},
197
+ ],
159
198
  "stream": self.config.get("STREAM", "true") == "true",
199
+ "temperature": 0.7,
200
+ "top_p": 0.7,
201
+ "max_tokens": 200,
160
202
  }
161
- try:
162
- response = self._call_api(url, headers, data)
163
- except requests.exceptions.RequestException as e:
164
- self.console.print(f"[red]Error calling API: {e}[/red]")
165
- return
166
- if not response:
167
- return
168
203
 
169
- self.console.print("\n[bold green]Assistant:[/bold green]")
204
+ def stream_response(self, response):
205
+ """Stream response from LLM API"""
170
206
  full_completion = ""
171
207
  # Streaming response loop
172
208
  with Live(console=self.console) as live:
@@ -190,24 +226,29 @@ STREAM=true"""
190
226
  self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
191
227
  time.sleep(0.05)
192
228
 
229
+ def call_llm_api(self, prompt: str):
230
+ """Call LLM API, return streaming output"""
231
+ url = self.get_llm_url()
232
+ headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
233
+ data = self.build_data(prompt)
234
+ try:
235
+ response = self._call_api(url, headers, data)
236
+ except requests.exceptions.RequestException as e:
237
+ self.console.print(f"[red]Error calling API: {e}[/red]")
238
+ raise typer.Exit(code=1) from None
239
+ if not response:
240
+ raise typer.Exit(code=1)
241
+
242
+ self.console.print("\n[bold green]Assistant:[/bold green]")
243
+ self.stream_response(response) # Stream the response
193
244
  self.console.print() # Add a newline after the completion
194
245
 
195
246
  def get_command_from_llm(self, prompt):
196
247
  """Request Shell command from LLM"""
197
- url = f"{self.config['BASE_URL']}/chat/completions"
248
+ url = self.get_llm_url()
198
249
  headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
199
- data = {
200
- "model": self.config["MODEL"],
201
- "messages": [
202
- {
203
- "role": "system",
204
- "content": "You are a command line assistant, return one Linux/macOS shell commands only, without explanation and triple-backtick code blocks.",
205
- },
206
- {"role": "user", "content": prompt},
207
- ],
208
- "stream": False, # Always use non-streaming for command generation
209
- }
210
-
250
+ data = self.build_data(prompt, mode=ModeEnum.EXECUTE.value)
251
+ data["stream"] = False
211
252
  try:
212
253
  response = self._call_api(url, headers, data)
213
254
  except requests.exceptions.RequestException as e:
@@ -1,6 +0,0 @@
1
- yaicli.py,sha256=ODlhfiH3uqgcep1WElGCSWv5HIelZGKXeH9EI3qgHrA,14895
2
- yaicli-0.0.1.dist-info/METADATA,sha256=CKiylTH_4iI1V5LXKy6kzBAR09p6pKHjvDwC7m33ork,350
3
- yaicli-0.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
- yaicli-0.0.1.dist-info/entry_points.txt,sha256=gdduQwAuu_LeDqnDU81Fv3NPmD2tRQ1FffvolIP3S1Q,34
5
- yaicli-0.0.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
- yaicli-0.0.1.dist-info/RECORD,,
File without changes