yaicli 0.0.1__tar.gz → 0.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  [bumpversion]
2
- current_version = 0.0.1
2
+ current_version = 0.0.3
3
3
  commit = True
4
4
  tag = True
5
5
 
@@ -0,0 +1,33 @@
1
+
2
+ ---
3
+ ## [0.0.3](https://github.com/belingud/yaicli/compare/v0.0.2..v0.0.3) - 2025-04-02
4
+
5
+ ### 🚜 Refactor
6
+
7
+ - **(changelog)** switch from git log to git cliff for changelog generation - ([242a51d](https://github.com/belingud/yaicli/commit/242a51d93a25675041a7fe19c4a0db1c7c12a663)) - Belingud
8
+ - update CLI assistant description and error handling - ([1a8bcdc](https://github.com/belingud/yaicli/commit/1a8bcdc86c72a75259d266291bc9e5350fc81f61)) - Belingud
9
+ - fix spacing in clean target and simplify build target - ([2a1050e](https://github.com/belingud/yaicli/commit/2a1050e7a9eea8753c164553b13796e5510b7965)) - Belingud
10
+
11
+
12
+ ---
13
+ ## [0.0.2] - 2025-04-02
14
+
15
+ ### ⛰️ Features
16
+
17
+ - enhance OS detection and LLM API request handling - ([8c00de0](https://github.com/belingud/yaicli/commit/8c00de099e1c75fedcdd33e9d7e0443818538059)) - Belingud
18
+ - add new dependencies and configurable API paths - ([3be3f67](https://github.com/belingud/yaicli/commit/3be3f67b3bd9645a9a8c9909be0e5542612c1c71)) - Belingud
19
+
20
+ ### 🚜 Refactor
21
+
22
+ - **(config)** migrate config from JSON to INI format and enhance error handling - ([f556872](https://github.com/belingud/yaicli/commit/f556872fdb521adf8e20da6498fdaac26998a5b7)) - Belingud
23
+ - update config path to reflect project name change - ([e6bf761](https://github.com/belingud/yaicli/commit/e6bf761aa0fae20b643848c586c6dc55d6f324fb)) - Belingud
24
+ - rename project from llmcli to yaicli and update related files - ([cdb5a97](https://github.com/belingud/yaicli/commit/cdb5a97d06c9f042c1c8731c8f11a4a7284783f0)) - Belingud
25
+ - rename project from shellai to llmcli and update related files - ([db4ecb8](https://github.com/belingud/yaicli/commit/db4ecb85236b72fd7536c99b21d210037c09889e)) - Belingud
26
+ - reorganize imports and improve code readability - ([0f52c05](https://github.com/belingud/yaicli/commit/0f52c05918d177d4623ff2f07aff2fc2e3aa9e91)) - Belingud
27
+ - migrate llmcli to class-based ShellAI implementation - ([2509cba](https://github.com/belingud/yaicli/commit/2509cba5cad7c8626f794d88d971fff7eea9a404)) - Belingud
28
+
29
+ ### Build
30
+
31
+ - add bump2version for version management - ([de287f1](https://github.com/belingud/yaicli/commit/de287f1262eae64ae5fd0cb634dea93762a17282)) - Belingud
32
+
33
+
yaicli-0.0.3/Justfile ADDED
@@ -0,0 +1,33 @@
1
+ # Justfile for Python project management
2
+
3
+ # Clean build artifacts
4
+ clean:
5
+ @echo "Cleaning build artifacts..."
6
+ @rm -rf build/ dist/ *.egg-info/
7
+ @echo "Cleaning cache files..."
8
+ @find . -type d -name "__pycache__" -exec rm -rf {} +
9
+ @echo "Cleaning test artifacts..."
10
+ @rm -rf .pytest_cache/
11
+ @echo "Cleaning pdm build artifacts..."
12
+ @rm -rf .pdm_build/
13
+
14
+ # Run tests with pytest
15
+ test:
16
+ @echo "Running tests..."
17
+ @pytest tests/
18
+
19
+ # Build package with hatch (runs clean first)
20
+ build:
21
+ @echo "Building package..."
22
+ @rm -rf dist/
23
+ @uv build
24
+
25
+ # Install package in editable mode
26
+ install:
27
+ @echo "Installing packages..."
28
+ @pip install -e .
29
+
30
+ # Generate changelog from git log
31
+ changelog:
32
+ @echo "Generating changelog..."
33
+ @git cliff -l --prepend CHANGELOG.md
@@ -1,9 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: yaicli
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: A simple CLI tool to interact with LLM
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.8
7
+ Requires-Dist: distro>=1.9.0
7
8
  Requires-Dist: jmespath>=1.0.1
8
9
  Requires-Dist: prompt-toolkit>=3.0.50
9
10
  Requires-Dist: requests>=2.32.3
@@ -0,0 +1,90 @@
1
+ # git-cliff ~ configuration file
2
+ # https://git-cliff.org/docs/configuration
3
+
4
+ [changelog]
5
+ # template for the changelog header
6
+ header = """"""
7
+ # template for the changelog body
8
+ # https://keats.github.io/tera/docs/#introduction
9
+ body = """
10
+ ---
11
+ {% if version %}\
12
+ {% if previous.version %}\
13
+ ## [{{ version | trim_start_matches(pat="v") }}]($REPO/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
14
+ {% else %}\
15
+ ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
16
+ {% endif %}\
17
+ {% else %}\
18
+ ## [unreleased]
19
+ {% endif %}\
20
+ {% for group, commits in commits | group_by(attribute="group") %}
21
+ ### {{ group | striptags | trim | upper_first }}
22
+ {% for commit in commits
23
+ | filter(attribute="scope")
24
+ | sort(attribute="scope") %}
25
+ - **({{commit.scope}})**{% if commit.breaking %} [**breaking**]{% endif %} \
26
+ {{ commit.message }} - ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }})) - {{ commit.author.name }}
27
+ {%- endfor -%}
28
+ {% raw %}\n{% endraw %}\
29
+ {%- for commit in commits %}
30
+ {%- if commit.scope -%}
31
+ {% else -%}
32
+ - {% if commit.breaking %} [**breaking**]{% endif %}\
33
+ {{ commit.message }} - ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }})) - {{ commit.author.name }}
34
+ {% endif -%}
35
+ {% endfor -%}
36
+ {% endfor %}\n
37
+ """
38
+ # template for the changelog footer
39
+ footer = """"""
40
+ # remove the leading and trailing whitespace from the templates
41
+ trim = true
42
+ # postprocessors
43
+ postprocessors = [
44
+ { pattern = '\$REPO', replace = "https://github.com/belingud/yaicli" }, # replace repository URL
45
+ ]
46
+
47
+ [git]
48
+ # parse the commits based on https://www.conventionalcommits.org
49
+ conventional_commits = true
50
+ # filter out the commits that are not conventional
51
+ filter_unconventional = true
52
+ # process each line of a commit as an individual commit
53
+ split_commits = false
54
+ # regex for preprocessing the commit messages
55
+ commit_preprocessors = [
56
+ # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/orhun/git-cliff/issues/${2}))"}, # replace issue numbers
57
+ ]
58
+ # regex for parsing and grouping commits
59
+ commit_parsers = [
60
+ { message = "^feat", group = "<!-- 0 -->⛰️ Features" },
61
+ { message = "^fix", group = "<!-- 1 -->🐛 Bug Fixes" },
62
+ { message = "^doc", group = "<!-- 3 -->📚 Documentation" },
63
+ { message = "^perf", group = "<!-- 4 -->⚡ Performance" },
64
+ { message = "^refactor\\(clippy\\)", skip = true },
65
+ { message = "^refactor", group = "<!-- 2 -->🚜 Refactor" },
66
+ { message = "^style", group = "<!-- 5 -->🎨 Styling" },
67
+ { message = "^test", group = "<!-- 6 -->🧪 Testing" },
68
+ { message = "^chore\\(release\\): prepare for", skip = true },
69
+ { message = "^chore\\(deps.*\\)", skip = true },
70
+ { message = "^chore\\(pr\\)", skip = true },
71
+ { message = "^chore\\(pull\\)", skip = true },
72
+ { message = "^chore\\(npm\\).*yarn\\.lock", skip = true },
73
+ { message = "^chore|^ci", group = "<!-- 7 -->⚙️ Miscellaneous Tasks" },
74
+ { body = ".*security", group = "<!-- 8 -->🛡️ Security" },
75
+ { message = "^revert", group = "<!-- 9 -->◀️ Revert" },
76
+ ]
77
+ # protect breaking changes from being skipped due to matching a skipping commit_parser
78
+ protect_breaking_commits = false
79
+ # filter out the commits that are not matched by commit parsers
80
+ filter_commits = false
81
+ # regex for matching git tags
82
+ tag_pattern = "v[0-9].*"
83
+ # regex for skipping tags
84
+ skip_tags = "beta|alpha"
85
+ # regex for ignoring tags
86
+ ignore_tags = "rc|beta"
87
+ # sort the tags topologically
88
+ topo_order = false
89
+ # sort the commits inside sections by oldest/newest order
90
+ sort_commits = "newest"
yaicli-0.0.3/debug.py ADDED
@@ -0,0 +1,12 @@
1
+ from yaicli import ShellAI
2
+
3
+
4
+ def main():
5
+ ai = ShellAI()
6
+ ai.load_config()
7
+ r = ai.get_command_from_llm("列出当前目录中所有的文件,包含隐藏文件")
8
+ print(r)
9
+
10
+
11
+ if __name__ == "__main__":
12
+ main()
@@ -1,10 +1,11 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.0.1"
3
+ version = "0.0.3"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.8"
7
7
  dependencies = [
8
+ "distro>=1.9.0",
8
9
  "jmespath>=1.0.1",
9
10
  "prompt-toolkit>=3.0.50",
10
11
  "requests>=2.32.3",
@@ -1,6 +1,6 @@
1
1
  import pytest
2
2
 
3
- from llmcli import ShellAI
3
+ from yaicli import ShellAI
4
4
 
5
5
 
6
6
  @pytest.fixture
@@ -132,6 +132,15 @@ wheels = [
132
132
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6" },
133
133
  ]
134
134
 
135
+ [[package]]
136
+ name = "distro"
137
+ version = "1.9.0"
138
+ source = { registry = "https://repo.huaweicloud.com/repository/pypi/simple/" }
139
+ sdist = { url = "https://repo.huaweicloud.com/repository/pypi/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed" }
140
+ wheels = [
141
+ { url = "https://repo.huaweicloud.com/repository/pypi/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2" },
142
+ ]
143
+
135
144
  [[package]]
136
145
  name = "exceptiongroup"
137
146
  version = "1.2.2"
@@ -168,41 +177,6 @@ wheels = [
168
177
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980" },
169
178
  ]
170
179
 
171
- [[package]]
172
- name = "llm-cli"
173
- version = "0.0.1"
174
- source = { editable = "." }
175
- dependencies = [
176
- { name = "jmespath" },
177
- { name = "prompt-toolkit" },
178
- { name = "requests" },
179
- { name = "rich" },
180
- { name = "typer" },
181
- ]
182
-
183
- [package.dev-dependencies]
184
- dev = [
185
- { name = "bump2version" },
186
- { name = "pytest" },
187
- { name = "ruff" },
188
- ]
189
-
190
- [package.metadata]
191
- requires-dist = [
192
- { name = "jmespath", specifier = ">=1.0.1" },
193
- { name = "prompt-toolkit", specifier = ">=3.0.50" },
194
- { name = "requests", specifier = ">=2.32.3" },
195
- { name = "rich", specifier = ">=13.9.4" },
196
- { name = "typer", specifier = ">=0.15.2" },
197
- ]
198
-
199
- [package.metadata.requires-dev]
200
- dev = [
201
- { name = "bump2version", specifier = ">=1.0.1" },
202
- { name = "pytest", specifier = ">=8.3.5" },
203
- { name = "ruff", specifier = ">=0.11.2" },
204
- ]
205
-
206
180
  [[package]]
207
181
  name = "markdown-it-py"
208
182
  version = "3.0.0"
@@ -439,3 +413,40 @@ sdist = { url = "https://repo.huaweicloud.com/repository/pypi/packages/6c/63/535
439
413
  wheels = [
440
414
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859" },
441
415
  ]
416
+
417
+ [[package]]
418
+ name = "yaicli"
419
+ version = "0.0.3"
420
+ source = { editable = "." }
421
+ dependencies = [
422
+ { name = "distro" },
423
+ { name = "jmespath" },
424
+ { name = "prompt-toolkit" },
425
+ { name = "requests" },
426
+ { name = "rich" },
427
+ { name = "typer" },
428
+ ]
429
+
430
+ [package.dev-dependencies]
431
+ dev = [
432
+ { name = "bump2version" },
433
+ { name = "pytest" },
434
+ { name = "ruff" },
435
+ ]
436
+
437
+ [package.metadata]
438
+ requires-dist = [
439
+ { name = "distro", specifier = ">=1.9.0" },
440
+ { name = "jmespath", specifier = ">=1.0.1" },
441
+ { name = "prompt-toolkit", specifier = ">=3.0.50" },
442
+ { name = "requests", specifier = ">=2.32.3" },
443
+ { name = "rich", specifier = ">=13.9.4" },
444
+ { name = "typer", specifier = ">=0.15.2" },
445
+ ]
446
+
447
+ [package.metadata.requires-dev]
448
+ dev = [
449
+ { name = "bump2version", specifier = ">=1.0.1" },
450
+ { name = "pytest", specifier = ">=8.3.5" },
451
+ { name = "ruff", specifier = ">=0.11.2" },
452
+ ]
@@ -1,5 +1,6 @@
1
1
  import configparser
2
2
  import json
3
+ import platform
3
4
  import subprocess
4
5
  import time
5
6
  from enum import StrEnum
@@ -11,6 +12,7 @@ from typing import Annotated, Optional
11
12
  import jmespath
12
13
  import requests
13
14
  import typer
15
+ from distro import name as distro_name
14
16
  from prompt_toolkit import PromptSession
15
17
  from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
16
18
  from prompt_toolkit.keys import Keys
@@ -33,7 +35,7 @@ class CasePreservingConfigParser(configparser.RawConfigParser):
33
35
 
34
36
  class ShellAI:
35
37
  # Configuration file path
36
- CONFIG_PATH = Path("~/.config/llmcli/config.ini").expanduser()
38
+ CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
37
39
 
38
40
  # Default configuration template
39
41
  DEFAULT_CONFIG_INI = """[core]
@@ -61,7 +63,6 @@ ANSWER_PATH=choices[0].message.content
61
63
  STREAM=true"""
62
64
 
63
65
  def __init__(self, verbose: bool = False):
64
- # Initialize terminal components
65
66
  self.verbose = verbose
66
67
  self.console = Console()
67
68
  self.bindings = KeyBindings()
@@ -83,15 +84,20 @@ STREAM=true"""
83
84
  else ModeEnum.EXECUTE.value
84
85
  )
85
86
 
86
- def get_os(self):
87
+ def detect_os(self):
87
88
  """Detect operating system"""
88
89
  if self.config.get("OS_NAME") != "auto":
89
90
  return self.config.get("OS_NAME")
90
- import platform
91
-
92
- return platform.system()
93
-
94
- def get_shell(self):
91
+ current_platform = platform.system()
92
+ if current_platform == "Linux":
93
+ return "Linux/" + distro_name(pretty=True)
94
+ if current_platform == "Windows":
95
+ return "Windows " + platform.release()
96
+ if current_platform == "Darwin":
97
+ return "Darwin/MacOS " + platform.mac_ver()[0]
98
+ return current_platform
99
+
100
+ def detect_shell(self):
95
101
  """Detect shell"""
96
102
  if self.config.get("SHELL_NAME") != "auto":
97
103
  return self.config.get("SHELL_NAME")
@@ -103,6 +109,29 @@ STREAM=true"""
103
109
  return "powershell.exe" if is_powershell else "cmd.exe"
104
110
  return basename(getenv("SHELL", "/bin/sh"))
105
111
 
112
+ def build_cmd_prompt(self):
113
+ _os = self.detect_os()
114
+ _shell = self.detect_shell()
115
+ return f"""Your are a Shell Command Generator.
116
+ Generate a command EXCLUSIVELY for {_os} OS with {_shell} shell.
117
+ Rules:
118
+ 1. Use ONLY {_shell}-specific syntax and connectors (&&, ||, |, etc)
119
+ 2. Output STRICTLY in plain text format
120
+ 3. NEVER use markdown, code blocks or explanations
121
+ 4. Chain multi-step commands in SINGLE LINE
122
+ 5. Return NOTHING except the ready-to-run command"""
123
+
124
+ def build_default_prompt(self):
125
+ """Build default prompt"""
126
+ _os = self.detect_os()
127
+ _shell = self.detect_shell()
128
+ return (
129
+ "You are yaili, a system management and programing assistant, "
130
+ f"You are managing {_os} operating system with {_shell} shell. "
131
+ "Your responses should be concise and use Markdown format, "
132
+ "unless the user explicitly requests more details."
133
+ )
134
+
106
135
  def get_default_config(self):
107
136
  """Get default configuration"""
108
137
  config = CasePreservingConfigParser()
@@ -137,36 +166,42 @@ STREAM=true"""
137
166
  response.raise_for_status() # Raise an exception for non-200 status codes
138
167
  return response
139
168
 
140
- def call_llm_api(self, prompt):
141
- """Call LLM API, return streaming output"""
169
+ def get_llm_url(self) -> Optional[str]:
170
+ """Get LLM API URL"""
142
171
  base = self.config.get("BASE_URL", "").rstrip("/")
143
172
  if not base:
144
173
  self.console.print(
145
174
  "[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
146
175
  )
147
- return
176
+ raise typer.Exit(code=1)
148
177
  COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
149
178
  if not COMPLETION_PATH:
150
179
  self.console.print(
151
180
  "[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
152
181
  )
153
- return
154
- url = f"{base}/{COMPLETION_PATH}"
155
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
156
- data = {
182
+ raise typer.Exit(code=1)
183
+ return f"{base}/{COMPLETION_PATH}"
184
+
185
+ def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
186
+ """Build request data"""
187
+ if mode == ModeEnum.EXECUTE.value:
188
+ system_prompt = self.build_cmd_prompt()
189
+ else:
190
+ system_prompt = self.build_default_prompt()
191
+ return {
157
192
  "model": self.config["MODEL"],
158
- "messages": [{"role": "user", "content": prompt}],
193
+ "messages": [
194
+ {"role": "system", "content": system_prompt},
195
+ {"role": "user", "content": prompt},
196
+ ],
159
197
  "stream": self.config.get("STREAM", "true") == "true",
198
+ "temperature": 0.7,
199
+ "top_p": 0.7,
200
+ "max_tokens": 200,
160
201
  }
161
- try:
162
- response = self._call_api(url, headers, data)
163
- except requests.exceptions.RequestException as e:
164
- self.console.print(f"[red]Error calling API: {e}[/red]")
165
- return
166
- if not response:
167
- return
168
202
 
169
- self.console.print("\n[bold green]Assistant:[/bold green]")
203
+ def stream_response(self, response):
204
+ """Stream response from LLM API"""
170
205
  full_completion = ""
171
206
  # Streaming response loop
172
207
  with Live(console=self.console) as live:
@@ -190,24 +225,31 @@ STREAM=true"""
190
225
  self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
191
226
  time.sleep(0.05)
192
227
 
228
+ def call_llm_api(self, prompt: str):
229
+ """Call LLM API, return streaming output"""
230
+ url = self.get_llm_url()
231
+ headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
232
+ data = self.build_data(prompt)
233
+ try:
234
+ response = self._call_api(url, headers, data)
235
+ except requests.exceptions.RequestException as e:
236
+ self.console.print(f"[red]Error calling API: {e}[/red]")
237
+ if self.verbose and e.response:
238
+ self.console.print(f"{e.response.text}")
239
+ raise typer.Exit(code=1) from None
240
+ if not response:
241
+ raise typer.Exit(code=1)
242
+
243
+ self.console.print("\n[bold green]Assistant:[/bold green]")
244
+ self.stream_response(response) # Stream the response
193
245
  self.console.print() # Add a newline after the completion
194
246
 
195
247
  def get_command_from_llm(self, prompt):
196
248
  """Request Shell command from LLM"""
197
- url = f"{self.config['BASE_URL']}/chat/completions"
249
+ url = self.get_llm_url()
198
250
  headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
199
- data = {
200
- "model": self.config["MODEL"],
201
- "messages": [
202
- {
203
- "role": "system",
204
- "content": "You are a command line assistant, return one Linux/macOS shell commands only, without explanation and triple-backtick code blocks.",
205
- },
206
- {"role": "user", "content": prompt},
207
- ],
208
- "stream": False, # Always use non-streaming for command generation
209
- }
210
-
251
+ data = self.build_data(prompt, mode=ModeEnum.EXECUTE.value)
252
+ data["stream"] = False
211
253
  try:
212
254
  response = self._call_api(url, headers, data)
213
255
  except requests.exceptions.RequestException as e:
@@ -397,7 +439,7 @@ def main(
397
439
  ] = False,
398
440
  ):
399
441
  """LLM CLI Tool"""
400
- cli = ShellAI()
442
+ cli = ShellAI(verbose=verbose)
401
443
  cli.run(chat=chat, shell=shell, prompt=prompt)
402
444
 
403
445
 
yaicli-0.0.1/cmd_llm.py DELETED
@@ -1,405 +0,0 @@
1
- import configparser
2
- import json
3
- import subprocess
4
- import time
5
- from enum import StrEnum
6
- from os import getenv
7
- from os.path import basename, pathsep
8
- from pathlib import Path
9
- from typing import Annotated, Optional
10
-
11
- import jmespath
12
- import requests
13
- import typer
14
- from prompt_toolkit import PromptSession
15
- from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
16
- from prompt_toolkit.keys import Keys
17
- from rich.console import Console
18
- from rich.live import Live
19
- from rich.markdown import Markdown
20
- from rich.prompt import Confirm
21
-
22
-
23
- class ModeEnum(StrEnum):
24
- CHAT = "chat"
25
- EXECUTE = "exec"
26
- TEMP = "temp"
27
-
28
-
29
- class CasePreservingConfigParser(configparser.RawConfigParser):
30
- def optionxform(self, optionstr):
31
- return optionstr
32
-
33
-
34
- class ShellAI:
35
- # Configuration file path
36
- CONFIG_PATH = Path("~/.config/llmcli/config.ini").expanduser()
37
-
38
- # Default configuration template
39
- DEFAULT_CONFIG_INI = """[core]
40
- BASE_URL=https://api.openai.com/v1
41
- API_KEY=
42
- MODEL=gpt-4o
43
-
44
- # default run mode, default: temp
45
- # chat: interactive chat mode
46
- # exec: shell command generation mode
47
- # temp: one-shot mode
48
- DEFAULT_MODE=temp
49
-
50
- # auto detect shell and os
51
- SHELL_NAME=auto
52
- OS_NAME=auto
53
-
54
- # if you want to use custom completions path, you can set it here
55
- COMPLETION_PATH=/chat/completions
56
- # if you want to use custom answer path, you can set it here
57
- ANSWER_PATH=choices[0].message.content
58
-
59
- # true: streaming response
60
- # false: non-streaming response
61
- STREAM=true"""
62
-
63
- def __init__(self, verbose: bool = False):
64
- # Initialize terminal components
65
- self.verbose = verbose
66
- self.console = Console()
67
- self.bindings = KeyBindings()
68
- self.session = PromptSession(key_bindings=self.bindings)
69
- self.current_mode = ModeEnum.CHAT.value
70
- self.config = {}
71
-
72
- # Setup key bindings
73
- self._setup_key_bindings()
74
-
75
- def _setup_key_bindings(self):
76
- """Setup keyboard shortcuts"""
77
-
78
- @self.bindings.add(Keys.ControlI) # Bind Ctrl+I to switch modes
79
- def _(event: KeyPressEvent):
80
- self.current_mode = (
81
- ModeEnum.CHAT.value
82
- if self.current_mode == ModeEnum.EXECUTE.value
83
- else ModeEnum.EXECUTE.value
84
- )
85
-
86
- def get_os(self):
87
- """Detect operating system"""
88
- if self.config.get("OS_NAME") != "auto":
89
- return self.config.get("OS_NAME")
90
- import platform
91
-
92
- return platform.system()
93
-
94
- def get_shell(self):
95
- """Detect shell"""
96
- if self.config.get("SHELL_NAME") != "auto":
97
- return self.config.get("SHELL_NAME")
98
- import platform
99
-
100
- current_platform = platform.system()
101
- if current_platform in ("Windows", "nt"):
102
- is_powershell = len(getenv("PSModulePath", "").split(pathsep)) >= 3
103
- return "powershell.exe" if is_powershell else "cmd.exe"
104
- return basename(getenv("SHELL", "/bin/sh"))
105
-
106
- def get_default_config(self):
107
- """Get default configuration"""
108
- config = CasePreservingConfigParser()
109
- try:
110
- config.read_string(self.DEFAULT_CONFIG_INI)
111
- config_dict = {k.upper(): v for k, v in config["core"].items()}
112
- config_dict["STREAM"] = str(config_dict.get("STREAM", "true")).lower()
113
- return config_dict
114
- except configparser.Error as e:
115
- self.console.print(f"[red]Error parsing config: {e}[/red]")
116
- raise typer.Exit(code=1) from None
117
-
118
- def load_config(self):
119
- """Load LLM API configuration"""
120
- if not self.CONFIG_PATH.exists():
121
- self.console.print(
122
- "[bold yellow]Configuration file not found. Creating default configuration file.[/bold yellow]"
123
- )
124
- self.CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
125
- with open(self.CONFIG_PATH, "w") as f:
126
- f.write(self.DEFAULT_CONFIG_INI)
127
- return self.config
128
- config = CasePreservingConfigParser()
129
- config.read(self.CONFIG_PATH)
130
- self.config = dict(config["core"])
131
- self.config["STREAM"] = str(self.config.get("STREAM", "true")).lower()
132
- return self.config
133
-
134
- def _call_api(self, url, headers, data):
135
- """Generic API call method"""
136
- response = requests.post(url, headers=headers, json=data)
137
- response.raise_for_status() # Raise an exception for non-200 status codes
138
- return response
139
-
140
- def call_llm_api(self, prompt):
141
- """Call LLM API, return streaming output"""
142
- base = self.config.get("BASE_URL", "").rstrip("/")
143
- if not base:
144
- self.console.print(
145
- "[red]Base URL not found. Please set it in the configuration file. Default: https://api.openai.com/v1[/red]"
146
- )
147
- return
148
- COMPLETION_PATH = self.config.get("COMPLETION_PATH", "").lstrip("/")
149
- if not COMPLETION_PATH:
150
- self.console.print(
151
- "[red]Completions path not set. Please set it in the configuration file. Default: `/chat/completions`[/red]"
152
- )
153
- return
154
- url = f"{base}/{COMPLETION_PATH}"
155
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
156
- data = {
157
- "model": self.config["MODEL"],
158
- "messages": [{"role": "user", "content": prompt}],
159
- "stream": self.config.get("STREAM", "true") == "true",
160
- }
161
- try:
162
- response = self._call_api(url, headers, data)
163
- except requests.exceptions.RequestException as e:
164
- self.console.print(f"[red]Error calling API: {e}[/red]")
165
- return
166
- if not response:
167
- return
168
-
169
- self.console.print("\n[bold green]Assistant:[/bold green]")
170
- full_completion = ""
171
- # Streaming response loop
172
- with Live(console=self.console) as live:
173
- for line in response.iter_lines():
174
- if not line:
175
- continue
176
- decoded_line = line.decode("utf-8")
177
- if decoded_line.startswith("data: "):
178
- decoded_line = decoded_line[6:]
179
- if decoded_line == "[DONE]":
180
- break
181
- try:
182
- json_data = json.loads(decoded_line)
183
- content = json_data["choices"][0]["delta"].get("content", "")
184
- full_completion += content
185
- markdown = Markdown(markup=full_completion)
186
- live.update(markdown, refresh=True)
187
- except json.JSONDecodeError:
188
- self.console.print("[red]Error decoding response JSON[/red]")
189
- if self.verbose:
190
- self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
191
- time.sleep(0.05)
192
-
193
- self.console.print() # Add a newline after the completion
194
-
195
- def get_command_from_llm(self, prompt):
196
- """Request Shell command from LLM"""
197
- url = f"{self.config['BASE_URL']}/chat/completions"
198
- headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
199
- data = {
200
- "model": self.config["MODEL"],
201
- "messages": [
202
- {
203
- "role": "system",
204
- "content": "You are a command line assistant, return one Linux/macOS shell commands only, without explanation and triple-backtick code blocks.",
205
- },
206
- {"role": "user", "content": prompt},
207
- ],
208
- "stream": False, # Always use non-streaming for command generation
209
- }
210
-
211
- try:
212
- response = self._call_api(url, headers, data)
213
- except requests.exceptions.RequestException as e:
214
- self.console.print(f"[red]Error calling API: {e}[/red]")
215
- return None
216
- if not response:
217
- return None
218
- ANSWER_PATH = self.config.get("ANSWER_PATH", None)
219
- if not ANSWER_PATH:
220
- ANSWER_PATH = "choices[0].message.content"
221
- if self.verbose:
222
- self.console.print(
223
- "[bold yellow]Answer path not set. Using default: `choices[0].message.content`[/bold yellow]"
224
- )
225
- content = jmespath.search(ANSWER_PATH, response.json())
226
- return content.strip()
227
-
228
- def execute_shell_command(self, command):
229
- """Execute shell command"""
230
- self.console.print(f"\n[bold green]Executing command: [/bold green] {command}\n")
231
- result = subprocess.run(command, shell=True)
232
- if result.returncode != 0:
233
- self.console.print(
234
- f"\n[bold red]Command failed with return code: {result.returncode}[/bold red]"
235
- )
236
-
237
- def get_prompt_tokens(self):
238
- """Get prompt tokens based on current mode"""
239
- if self.current_mode == ModeEnum.CHAT.value:
240
- qmark = "💬"
241
- elif self.current_mode == ModeEnum.EXECUTE.value:
242
- qmark = "🚀"
243
- else:
244
- qmark = ""
245
- return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
246
-
247
- def chat_mode(self, user_input: str):
248
- """Interactive chat mode"""
249
- if self.current_mode != ModeEnum.CHAT.value:
250
- return self.current_mode
251
-
252
- self.call_llm_api(user_input)
253
- return ModeEnum.CHAT.value
254
-
255
- def _filter_command(self, command):
256
- """Filter out unwanted characters from command
257
-
258
- The LLM may return commands in markdown format with code blocks.
259
- This method removes markdown formatting from the command.
260
- It handles various formats including:
261
- - Commands surrounded by ``` (plain code blocks)
262
- - Commands with language specifiers like ```bash, ```zsh, etc.
263
- - Commands with specific examples like ```ls -al```
264
-
265
- example:
266
- ```bash\nls -la\n``` ==> ls -al
267
- ```zsh\nls -la\n``` ==> ls -al
268
- ```ls -al``` ==> ls -al
269
- ls -al ==> ls -al
270
- ```\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
271
- ```bash\ncd /tmp\nls -la\n``` ==> cd /tmp\nls -la
272
- """
273
- if not command or not command.strip():
274
- return ""
275
-
276
- # Handle commands that are already without code blocks
277
- if "```" not in command:
278
- return command.strip()
279
-
280
- # Handle code blocks with or without language specifiers
281
- lines = command.strip().split("\n")
282
-
283
- # Check if it's a single-line code block like ```ls -al```
284
- if len(lines) == 1 and lines[0].startswith("```") and lines[0].endswith("```"):
285
- return lines[0][3:-3].strip()
286
-
287
- # Handle multi-line code blocks
288
- if lines[0].startswith("```"):
289
- # Remove the opening ``` line (with or without language specifier)
290
- content_lines = lines[1:]
291
-
292
- # If the last line is a closing ```, remove it
293
- if content_lines and content_lines[-1].strip() == "```":
294
- content_lines = content_lines[:-1]
295
-
296
- # Join the remaining lines and strip any extra whitespace
297
- return "\n".join(line.strip() for line in content_lines if line.strip())
298
-
299
- def execute_mode(self, user_input: str):
300
- """Execute mode"""
301
- if user_input == "" or self.current_mode != ModeEnum.EXECUTE.value:
302
- return self.current_mode
303
-
304
- command = self.get_command_from_llm(user_input)
305
- command = self._filter_command(command)
306
- if not command:
307
- self.console.print("[bold red]No command generated[/bold red]")
308
- return self.current_mode
309
- self.console.print(f"\n[bold magenta]Generated command:[/bold magenta] {command}")
310
- confirm = Confirm.ask("Execute this command?")
311
- if confirm:
312
- self.execute_shell_command(command)
313
- return ModeEnum.EXECUTE.value
314
-
315
- def run_repl_loop(self):
316
- while True:
317
- user_input = self.session.prompt(self.get_prompt_tokens)
318
- # Skip empty input
319
- if not user_input.strip():
320
- continue
321
-
322
- if user_input.lower() in ("exit", "quit"):
323
- break
324
-
325
- if self.current_mode == ModeEnum.CHAT.value:
326
- self.chat_mode(user_input)
327
- elif self.current_mode == ModeEnum.EXECUTE.value:
328
- self.execute_mode(user_input)
329
-
330
- self.console.print("[bold green]Exiting...[/bold green]")
331
-
332
- def run_one_shot(self, prompt: str):
333
- """Run one-shot mode with given prompt"""
334
- if self.current_mode == ModeEnum.EXECUTE.value:
335
- self.execute_mode(prompt) # Execute mode for one-shot prompt
336
- else:
337
- self.call_llm_api(prompt)
338
-
339
- def run(self, chat=False, shell=False, prompt: Optional[str] = None):
340
- """Run the CLI application"""
341
- # Load configuration
342
- self.config = self.load_config()
343
- if not self.config.get("API_KEY", None):
344
- self.console.print(
345
- "[red]API key not found. Please set it in the configuration file.[/red]"
346
- )
347
- return
348
-
349
- # Set initial mode
350
- self.current_mode = self.config["DEFAULT_MODE"]
351
-
352
- # Check run mode from command line arguments
353
- if all([chat, shell]):
354
- self.console.print("[red]Cannot use both --chat and --shell[/red]")
355
- return
356
- elif chat:
357
- self.current_mode = ModeEnum.CHAT.value
358
- elif shell:
359
- self.current_mode = ModeEnum.EXECUTE.value
360
-
361
- if self.verbose:
362
- self.console.print("[bold yellow]Verbose mode enabled[/bold yellow]")
363
- self.console.print(f"[bold yellow]Current mode: {self.current_mode}[/bold yellow]")
364
- self.console.print(f"[bold yellow]Using model: {self.config['MODEL']}[/bold yellow]")
365
-
366
- if self.current_mode in (ModeEnum.TEMP.value, ModeEnum.EXECUTE.value) and prompt:
367
- self.run_one_shot(prompt)
368
- elif self.current_mode == ModeEnum.CHAT.value:
369
- self.run_repl_loop()
370
-
371
-
372
- # CLI application setup
373
- CONTEXT_SETTINGS = {
374
- "help_option_names": ["-h", "--help"],
375
- "show_default": True,
376
- }
377
-
378
- app = typer.Typer(
379
- name="ShellAI",
380
- context_settings=CONTEXT_SETTINGS,
381
- pretty_exceptions_enable=False,
382
- short_help="ShellAI Command Line Tool",
383
- no_args_is_help=True,
384
- invoke_without_command=True,
385
- )
386
-
387
-
388
- @app.command()
389
- def main(
390
- prompt: Annotated[str, typer.Argument(show_default=False, help="The prompt send to the LLM")],
391
- verbose: Annotated[
392
- bool, typer.Option("--verbose", "-V", help="Show verbose information")
393
- ] = False,
394
- chat: Annotated[bool, typer.Option("--chat", "-c", help="Start in chat mode")] = False,
395
- shell: Annotated[
396
- bool, typer.Option("--shell", "-s", help="Generate and execute shell command")
397
- ] = False,
398
- ):
399
- """LLM CLI Tool"""
400
- cli = ShellAI()
401
- cli.run(chat=chat, shell=shell, prompt=prompt)
402
-
403
-
404
- if __name__ == "__main__":
405
- app()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes