yaicli 0.0.4__tar.gz → 0.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  [bumpversion]
2
- current_version = 0.0.4
2
+ current_version = 0.0.5
3
3
  commit = True
4
4
  tag = True
5
5
 
@@ -1,4 +1,17 @@
1
1
 
2
+ ---
3
+ ## [0.0.4](https://github.com/belingud/yaicli/compare/v0.0.3..v0.0.4) - 2025-04-02
4
+
5
+ ### 🚜 Refactor
6
+
7
+ - simplify multi-line statements for better readability - ([4870bb7](https://github.com/belingud/yaicli/commit/4870bb7460682791ca79bbe2bcc0cef94bf51c26)) - Belingud
8
+ - rename ShellAI to yaicli and improve CLI help handling - ([dad0905](https://github.com/belingud/yaicli/commit/dad090540a7c1f8c8b30e3c688643a76e504b0c0)) - Belingud
9
+
10
+ ### ⚙️ Miscellaneous Tasks
11
+
12
+ - update yaicli version to 0.0.3 - ([a689a02](https://github.com/belingud/yaicli/commit/a689a02b1c5e0cb278484267ef71f5f3acec8203)) - Belingud
13
+
14
+
2
15
  ---
3
16
  ## [0.0.3](https://github.com/belingud/yaicli/compare/v0.0.2..v0.0.3) - 2025-04-02
4
17
 
yaicli-0.0.5/PKG-INFO ADDED
@@ -0,0 +1,244 @@
1
+ Metadata-Version: 2.4
2
+ Name: yaicli
3
+ Version: 0.0.5
4
+ Summary: A simple CLI tool to interact with LLM
5
+ License-File: LICENSE
6
+ Requires-Python: >=3.9
7
+ Requires-Dist: distro>=1.9.0
8
+ Requires-Dist: jmespath>=1.0.1
9
+ Requires-Dist: prompt-toolkit>=3.0.50
10
+ Requires-Dist: requests>=2.32.3
11
+ Requires-Dist: rich>=13.9.4
12
+ Requires-Dist: typer>=0.15.2
13
+ Description-Content-Type: text/markdown
14
+
15
+ # YAICLI - Your AI Command Line Interface
16
+
17
+ YAICLI is a powerful command-line AI assistant tool that enables you to interact with Large Language Models (LLMs) through your terminal. It offers multiple operation modes for everyday conversations, generating and executing shell commands, and one-shot quick queries.
18
+
19
+ ## Features
20
+
21
+ - **Multiple Operation Modes**:
22
+ - **Chat Mode (💬)**: Interactive conversation with the AI assistant
23
+ - **Execute Mode (🚀)**: Generate and execute shell commands specific to your OS and shell
24
+ - **Temp Mode**: Quick queries without entering interactive mode
25
+
26
+ - **Smart Environment Detection**:
27
+ - Automatically detects your operating system and shell
28
+ - Customizes responses and commands for your specific environment
29
+
30
+ - **Rich Terminal Interface**:
31
+ - Markdown rendering for formatted responses
32
+ - Streaming responses for real-time feedback
33
+ - Color-coded output for better readability
34
+
35
+ - **Configurable**:
36
+ - Customizable API endpoints
37
+ - Support for different LLM providers
38
+ - Adjustable response parameters
39
+
40
+ - **Keyboard Shortcuts**:
41
+ - Tab to switch between Chat and Execute modes
42
+
43
+ ## Installation
44
+
45
+ ### Prerequisites
46
+
47
+ - Python 3.9 or higher
48
+ - pip (Python package manager)
49
+
50
+ ### Install from PyPI
51
+
52
+ ```bash
53
+ # Install by pip
54
+ pip install yaicli
55
+
56
+ # Install by pipx
57
+ pipx install yaicli
58
+
59
+ # Install by uv
60
+ uv tool install yaicli
61
+ ```
62
+
63
+ ### Install from Source
64
+
65
+ ```bash
66
+ git clone https://github.com/yourusername/yaicli.git
67
+ cd yaicli
68
+ pip install .
69
+ ```
70
+
71
+ ## Configuration
72
+
73
+ On first run, YAICLI will create a default configuration file at `~/.config/yaicli/config.ini`. You'll need to edit this file to add your API key and customize other settings.
74
+
75
+ Just run `ai`, and it will create the config file for you. Then you can edit it to add your api key.
76
+
77
+ ### Configuration File
78
+
79
+ ```ini
80
+ [core]
81
+ BASE_URL=https://api.openai.com/v1
82
+ API_KEY=your_api_key_here
83
+ MODEL=gpt-4o
84
+
85
+ # default run mode, default: temp
86
+ # chat: interactive chat mode
87
+ # exec: shell command generation mode
88
+ # temp: one-shot mode
89
+ DEFAULT_MODE=temp
90
+
91
+ # auto detect shell and os
92
+ SHELL_NAME=auto
93
+ OS_NAME=auto
94
+
95
+ # if you want to use custom completions path, you can set it here
96
+ COMPLETION_PATH=/chat/completions
97
+ # if you want to use custom answer path, you can set it here
98
+ ANSWER_PATH=choices[0].message.content
99
+
100
+ # true: streaming response
101
+ # false: non-streaming response
102
+ STREAM=true
103
+ ```
104
+
105
+ ### Configuration Options
106
+
107
+ - **BASE_URL**: API endpoint URL (default: OpenAI API)
108
+ - **API_KEY**: Your API key for the LLM provider
109
+ - **MODEL**: The model to use (e.g., gpt-4o, gpt-3.5-turbo), default: gpt-4o
110
+ - **DEFAULT_MODE**: Default operation mode (chat, exec, or temp), default: temp
111
+ - **SHELL_NAME**: Shell to use (auto for automatic detection), default: auto
112
+ - **OS_NAME**: OS to use (auto for automatic detection), default: auto
113
+ - **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions
114
+ - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content
115
+ - **STREAM**: Enable/disable streaming responses
116
+
117
+ ## Usage
118
+
119
+ ### Basic Usage
120
+
121
+ ```bash
122
+ # One-shot mode
123
+ ai "What is the capital of France?"
124
+
125
+ # Chat mode
126
+ ai --chat
127
+
128
+ # Shell command generation mode
129
+ ai --shell "Create a backup of my Documents folder"
130
+
131
+ # Verbose mode for debugging
132
+ ai --verbose "Explain quantum computing"
133
+ ```
134
+
135
+ ### Command Line Options
136
+
137
+ - `<PROMPT>`: Argument
138
+ - `--verbose` or `-V`: Show verbose information
139
+ - `--chat` or `-c`: Start in chat mode
140
+ - `--shell` or `-s`: Generate and execute shell command
141
+ - `--install-completion`: Install completion for the current shell
142
+ - `--show-completion`: Show completion for the current shell, to copy it or customize the installation
143
+ - `--help` or `-h`: Show this message and exit
144
+
145
+ ```bash
146
+ ai -h
147
+
148
+ Usage: ai [OPTIONS] [PROMPT]
149
+
150
+ yaicli. Your AI interface in cli.
151
+
152
+ ╭─ Arguments ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
153
+ │ prompt [PROMPT] The prompt send to the LLM │
154
+ ╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
155
+ ╭─ Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
156
+ │ --verbose -V Show verbose information │
157
+ │ --chat -c Start in chat mode │
158
+ │ --shell -s Generate and execute shell command │
159
+ │ --install-completion Install completion for the current shell. │
160
+ │ --show-completion Show completion for the current shell, to copy it or customize the installation. │
161
+ │ --help -h Show this message and exit. │
162
+ ╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
163
+
164
+
165
+ ```
166
+
167
+ ### Interactive Mode
168
+
169
+ In interactive mode (chat or shell), you can:
170
+ - Type your queries and get responses
171
+ - Use `Tab` to switch between Chat and Execute modes
172
+ - Type 'exit' or 'quit' to exit
173
+
174
+ ### Shell Command Generation
175
+
176
+ In Execute mode:
177
+ 1. Enter your request in natural language
178
+ 2. YAICLI will generate an appropriate shell command
179
+ 3. Review the command
180
+ 4. Confirm to execute or reject
181
+
182
+ ## Examples
183
+
184
+ ### Chat Mode Example
185
+
186
+ ```bash
187
+ $ ai --chat
188
+ 💬 > Tell me about the solar system
189
+
190
+ Assistant:
191
+ Certainly! Here’s a brief overview of the solar system:
192
+
193
+ • Sun: The central star of the solar system, providing light and energy.
194
+ • Planets:
195
+ • Mercury: Closest to the Sun, smallest planet.
196
+ • Venus: Second planet, known for its thick atmosphere and high surface temperature.
197
+ • Earth: Third planet, the only known planet to support life.
198
+ • Mars: Fourth planet, often called the "Red Planet" due to its reddish appearance.
199
+ • Jupiter: Largest planet, a gas giant with many moons.
200
+ • Saturn: Known for its prominent ring system, also a gas giant.
201
+ • Uranus: An ice giant, known for its unique axial tilt.
202
+ • Neptune: Another ice giant, known for its deep blue color.
203
+ • Dwarf Planets:
204
+ • Pluto: Once considered the ninth planet, now classified as
205
+
206
+ 💬 >
207
+ ```
208
+
209
+ ### Execute Mode Example
210
+
211
+ ```bash
212
+ $ ai --shell "Find all PDF files in my Downloads folder"
213
+
214
+ Generated command: find ~/Downloads -type f -name "*.pdf"
215
+ Execute this command? [y/n]: y
216
+
217
+ Executing command: find ~/Downloads -type f -name "*.pdf"
218
+
219
+ /Users/username/Downloads/document1.pdf
220
+ /Users/username/Downloads/report.pdf
221
+ ...
222
+ ```
223
+
224
+ ## Technical Implementation
225
+
226
+ YAICLI is built using several Python libraries:
227
+
228
+ - **Typer**: Provides the command-line interface
229
+ - **Rich**: Provides terminal content formatting and beautiful display
230
+ - **prompt_toolkit**: Provides interactive command-line input experience
231
+ - **requests**: Handles API requests
232
+ - **jmespath**: Parses JSON responses
233
+
234
+ ## Contributing
235
+
236
+ Contributions of code, issue reports, or feature suggestions are welcome.
237
+
238
+ ## License
239
+
240
+ [Apache License 2.0](LICENSE)
241
+
242
+ ---
243
+
244
+ *YAICLI - Making your terminal smarter*
yaicli-0.0.5/README.md ADDED
@@ -0,0 +1,230 @@
1
+ # YAICLI - Your AI Command Line Interface
2
+
3
+ YAICLI is a powerful command-line AI assistant tool that enables you to interact with Large Language Models (LLMs) through your terminal. It offers multiple operation modes for everyday conversations, generating and executing shell commands, and one-shot quick queries.
4
+
5
+ ## Features
6
+
7
+ - **Multiple Operation Modes**:
8
+ - **Chat Mode (💬)**: Interactive conversation with the AI assistant
9
+ - **Execute Mode (🚀)**: Generate and execute shell commands specific to your OS and shell
10
+ - **Temp Mode**: Quick queries without entering interactive mode
11
+
12
+ - **Smart Environment Detection**:
13
+ - Automatically detects your operating system and shell
14
+ - Customizes responses and commands for your specific environment
15
+
16
+ - **Rich Terminal Interface**:
17
+ - Markdown rendering for formatted responses
18
+ - Streaming responses for real-time feedback
19
+ - Color-coded output for better readability
20
+
21
+ - **Configurable**:
22
+ - Customizable API endpoints
23
+ - Support for different LLM providers
24
+ - Adjustable response parameters
25
+
26
+ - **Keyboard Shortcuts**:
27
+ - Tab to switch between Chat and Execute modes
28
+
29
+ ## Installation
30
+
31
+ ### Prerequisites
32
+
33
+ - Python 3.9 or higher
34
+ - pip (Python package manager)
35
+
36
+ ### Install from PyPI
37
+
38
+ ```bash
39
+ # Install by pip
40
+ pip install yaicli
41
+
42
+ # Install by pipx
43
+ pipx install yaicli
44
+
45
+ # Install by uv
46
+ uv tool install yaicli
47
+ ```
48
+
49
+ ### Install from Source
50
+
51
+ ```bash
52
+ git clone https://github.com/yourusername/yaicli.git
53
+ cd yaicli
54
+ pip install .
55
+ ```
56
+
57
+ ## Configuration
58
+
59
+ On first run, YAICLI will create a default configuration file at `~/.config/yaicli/config.ini`. You'll need to edit this file to add your API key and customize other settings.
60
+
61
+ Just run `ai`, and it will create the config file for you. Then you can edit it to add your api key.
62
+
63
+ ### Configuration File
64
+
65
+ ```ini
66
+ [core]
67
+ BASE_URL=https://api.openai.com/v1
68
+ API_KEY=your_api_key_here
69
+ MODEL=gpt-4o
70
+
71
+ # default run mode, default: temp
72
+ # chat: interactive chat mode
73
+ # exec: shell command generation mode
74
+ # temp: one-shot mode
75
+ DEFAULT_MODE=temp
76
+
77
+ # auto detect shell and os
78
+ SHELL_NAME=auto
79
+ OS_NAME=auto
80
+
81
+ # if you want to use custom completions path, you can set it here
82
+ COMPLETION_PATH=/chat/completions
83
+ # if you want to use custom answer path, you can set it here
84
+ ANSWER_PATH=choices[0].message.content
85
+
86
+ # true: streaming response
87
+ # false: non-streaming response
88
+ STREAM=true
89
+ ```
90
+
91
+ ### Configuration Options
92
+
93
+ - **BASE_URL**: API endpoint URL (default: OpenAI API)
94
+ - **API_KEY**: Your API key for the LLM provider
95
+ - **MODEL**: The model to use (e.g., gpt-4o, gpt-3.5-turbo), default: gpt-4o
96
+ - **DEFAULT_MODE**: Default operation mode (chat, exec, or temp), default: temp
97
+ - **SHELL_NAME**: Shell to use (auto for automatic detection), default: auto
98
+ - **OS_NAME**: OS to use (auto for automatic detection), default: auto
99
+ - **COMPLETION_PATH**: Path for completions endpoint, default: /chat/completions
100
+ - **ANSWER_PATH**: Json path expression to extract answer from response, default: choices[0].message.content
101
+ - **STREAM**: Enable/disable streaming responses
102
+
103
+ ## Usage
104
+
105
+ ### Basic Usage
106
+
107
+ ```bash
108
+ # One-shot mode
109
+ ai "What is the capital of France?"
110
+
111
+ # Chat mode
112
+ ai --chat
113
+
114
+ # Shell command generation mode
115
+ ai --shell "Create a backup of my Documents folder"
116
+
117
+ # Verbose mode for debugging
118
+ ai --verbose "Explain quantum computing"
119
+ ```
120
+
121
+ ### Command Line Options
122
+
123
+ - `<PROMPT>`: Argument
124
+ - `--verbose` or `-V`: Show verbose information
125
+ - `--chat` or `-c`: Start in chat mode
126
+ - `--shell` or `-s`: Generate and execute shell command
127
+ - `--install-completion`: Install completion for the current shell
128
+ - `--show-completion`: Show completion for the current shell, to copy it or customize the installation
129
+ - `--help` or `-h`: Show this message and exit
130
+
131
+ ```bash
132
+ ai -h
133
+
134
+ Usage: ai [OPTIONS] [PROMPT]
135
+
136
+ yaicli. Your AI interface in cli.
137
+
138
+ ╭─ Arguments ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
139
+ │ prompt [PROMPT] The prompt send to the LLM │
140
+ ╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
141
+ ╭─ Options ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
142
+ │ --verbose -V Show verbose information │
143
+ │ --chat -c Start in chat mode │
144
+ │ --shell -s Generate and execute shell command │
145
+ │ --install-completion Install completion for the current shell. │
146
+ │ --show-completion Show completion for the current shell, to copy it or customize the installation. │
147
+ │ --help -h Show this message and exit. │
148
+ ╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
149
+
150
+
151
+ ```
152
+
153
+ ### Interactive Mode
154
+
155
+ In interactive mode (chat or shell), you can:
156
+ - Type your queries and get responses
157
+ - Use `Tab` to switch between Chat and Execute modes
158
+ - Type 'exit' or 'quit' to exit
159
+
160
+ ### Shell Command Generation
161
+
162
+ In Execute mode:
163
+ 1. Enter your request in natural language
164
+ 2. YAICLI will generate an appropriate shell command
165
+ 3. Review the command
166
+ 4. Confirm to execute or reject
167
+
168
+ ## Examples
169
+
170
+ ### Chat Mode Example
171
+
172
+ ```bash
173
+ $ ai --chat
174
+ 💬 > Tell me about the solar system
175
+
176
+ Assistant:
177
+ Certainly! Here’s a brief overview of the solar system:
178
+
179
+ • Sun: The central star of the solar system, providing light and energy.
180
+ • Planets:
181
+ • Mercury: Closest to the Sun, smallest planet.
182
+ • Venus: Second planet, known for its thick atmosphere and high surface temperature.
183
+ • Earth: Third planet, the only known planet to support life.
184
+ • Mars: Fourth planet, often called the "Red Planet" due to its reddish appearance.
185
+ • Jupiter: Largest planet, a gas giant with many moons.
186
+ • Saturn: Known for its prominent ring system, also a gas giant.
187
+ • Uranus: An ice giant, known for its unique axial tilt.
188
+ • Neptune: Another ice giant, known for its deep blue color.
189
+ • Dwarf Planets:
190
+ • Pluto: Once considered the ninth planet, now classified as
191
+
192
+ 💬 >
193
+ ```
194
+
195
+ ### Execute Mode Example
196
+
197
+ ```bash
198
+ $ ai --shell "Find all PDF files in my Downloads folder"
199
+
200
+ Generated command: find ~/Downloads -type f -name "*.pdf"
201
+ Execute this command? [y/n]: y
202
+
203
+ Executing command: find ~/Downloads -type f -name "*.pdf"
204
+
205
+ /Users/username/Downloads/document1.pdf
206
+ /Users/username/Downloads/report.pdf
207
+ ...
208
+ ```
209
+
210
+ ## Technical Implementation
211
+
212
+ YAICLI is built using several Python libraries:
213
+
214
+ - **Typer**: Provides the command-line interface
215
+ - **Rich**: Provides terminal content formatting and beautiful display
216
+ - **prompt_toolkit**: Provides interactive command-line input experience
217
+ - **requests**: Handles API requests
218
+ - **jmespath**: Parses JSON responses
219
+
220
+ ## Contributing
221
+
222
+ Contributions of code, issue reports, or feature suggestions are welcome.
223
+
224
+ ## License
225
+
226
+ [Apache License 2.0](LICENSE)
227
+
228
+ ---
229
+
230
+ *YAICLI - Making your terminal smarter*
@@ -0,0 +1,195 @@
1
+ # YAICLI - Your AI Command Line Interface
2
+
3
+ YAICLI is a powerful command-line AI assistant tool that allows you to interact with Large Language Models (LLMs) through your terminal. It provides multiple operation modes for daily conversations, generating and executing shell commands, and quick one-time queries.
4
+
5
+ ## Features
6
+
7
+ ### Multiple Operation Modes
8
+
9
+ 1. **Chat Mode**
10
+ - Interactive conversation interface
11
+ - Markdown-formatted responses
12
+ - Streaming output display
13
+
14
+ 2. **Execute Mode**
15
+ - Automatically generates shell commands from natural language descriptions
16
+ - Detects current operating system and shell environment to generate appropriate commands
17
+ - Automatically filters unnecessary Markdown formatting from command output
18
+ - Confirmation mechanism before execution for safety
19
+
20
+ 3. **Temp Mode (One-shot)**
21
+ - Quick single queries without maintaining a session
22
+
23
+ ### Intelligent Environment Detection
24
+
25
+ - Automatically detects operating system (Windows, MacOS, Linux and its distributions)
26
+ - Automatically detects shell type (bash, zsh, PowerShell, cmd, etc.)
27
+ - Customizes prompts and commands based on your environment
28
+
29
+ ### User Experience
30
+
31
+ - Streaming response display for real-time AI output viewing
32
+ - Keyboard shortcuts (e.g., Ctrl+I to switch modes)
33
+ - Command confirmation mechanism to prevent accidental execution
34
+
35
+ ## Requirements
36
+
37
+ - Python 3.9+
38
+ - Supported operating systems: Windows, MacOS, Linux
39
+
40
+ ## Dependencies
41
+
42
+ ```
43
+ configparser
44
+ json
45
+ platform
46
+ subprocess
47
+ time
48
+ jmespath
49
+ requests
50
+ typer
51
+ distro
52
+ prompt_toolkit
53
+ ```
54
+
55
+ ## Installation
56
+
57
+ 1. Clone the repository:
58
+ ```bash
59
+ git clone https://github.com/yourusername/yaicli.git
60
+ cd yaicli
61
+ ```
62
+
63
+ 2. Create virtual environment and install dependencies:
64
+ ```bash
65
+ uv sync
66
+ ```
67
+
68
+
69
+ ## Configuration
70
+
71
+ The first time you run YAICLI, it will create a default configuration file at `~/.config/yaicli/config.ini`. You'll need to edit this file and add your API key.
72
+
73
+ Example configuration file:
74
+
75
+ ```ini
76
+ [core]
77
+ BASE_URL=https://api.openai.com/v1
78
+ API_KEY=your-api-key-here
79
+ MODEL=gpt-4o
80
+
81
+ # default run mode, default: temp
82
+ # chat: interactive chat mode
83
+ # exec: shell command generation mode
84
+ # temp: one-shot mode
85
+ DEFAULT_MODE=temp
86
+
87
+ # auto detect shell and os
88
+ SHELL_NAME=auto
89
+ OS_NAME=auto
90
+
91
+ # if you want to use custom completions path, you can set it here
92
+ COMPLETION_PATH=/chat/completions
93
+ # if you want to use custom answer path, you can set it here
94
+ ANSWER_PATH=choices[0].message.content
95
+
96
+ # true: streaming response
97
+ # false: non-streaming response
98
+ STREAM=true
99
+ ```
100
+
101
+ ## Usage
102
+
103
+ ### Basic Usage
104
+
105
+ ```bash
106
+ # One-shot mode (default): Send a prompt and get a response
107
+ ai "How do I check memory usage on a Linux system?"
108
+
109
+ # Chat mode: Start an interactive chat session
110
+ ai --chat
111
+
112
+ # Execute mode: Generate and execute shell commands
113
+ ai --shell "Create a directory named project and create src and docs subdirectories in it"
114
+
115
+ # Enable verbose output
116
+ ai --verbose "Explain how Docker works"
117
+ ```
118
+
119
+ ### Interactive Commands
120
+
121
+ In chat or execute mode:
122
+
123
+ - Press `Ctrl+I` to switch between chat and execute modes
124
+ - Type `exit` or `quit` to exit the application
125
+
126
+ ### Mode Descriptions
127
+
128
+ 1. **Chat Mode (--chat)**
129
+ ```bash
130
+ ai --chat
131
+ ```
132
+ Starts an interactive session where you can converse with the AI. Responses will be displayed in Markdown format.
133
+
134
+ 2. **Execute Mode (--shell)**
135
+ ```bash
136
+ ai --shell "Find all files larger than 100MB"
137
+ ```
138
+ The AI will generate appropriate shell commands and execute them after your confirmation.
139
+
140
+ 3. **One-shot Mode (default)**
141
+ ```bash
142
+ ai "Explain the difference between TCP and UDP"
143
+ ```
144
+ Sends a single query, provides the response, and exits.
145
+
146
+ ## Advanced Usage
147
+
148
+ ### Custom System Recognition
149
+
150
+ If you want to specify a particular operating system or shell, you can modify the configuration file:
151
+
152
+ ```ini
153
+ [core]
154
+ # ...other configuration...
155
+ SHELL_NAME=bash # Force bash shell command syntax
156
+ OS_NAME=Ubuntu # Force commands targeting Ubuntu
157
+ ```
158
+
159
+ ### Custom API Endpoint
160
+
161
+ YAICLI uses the OpenAI API by default, but you can use compatible APIs by modifying the configuration:
162
+
163
+ ```ini
164
+ [core]
165
+ BASE_URL=https://your-api-endpoint/v1
166
+ COMPLETION_PATH=/chat/completions
167
+ ANSWER_PATH=custom.json.path.to.content
168
+ ```
169
+
170
+ ## Technical Implementation
171
+
172
+ YAICLI is built using several Python libraries:
173
+
174
+ - **Typer**: Provides the command-line interface
175
+ - **prompt_toolkit**: Provides interactive command-line input experience
176
+ - **requests**: Handles API requests
177
+ - **jmespath**: Parses JSON responses
178
+
179
+ ## Limitations and Notes
180
+
181
+ - Requires a valid OpenAI API key or compatible API
182
+ - Commands generated in execute mode should be carefully reviewed before execution
183
+ - API calls may incur charges depending on your service provider and model
184
+
185
+ ## Contributing
186
+
187
+ Contributions of code, issue reports, or feature suggestions are welcome.
188
+
189
+ ## License
190
+
191
+ Apache License 2.0
192
+
193
+ ---
194
+
195
+ *YAICLI - Making your terminal smarter*
@@ -1,9 +1,9 @@
1
1
  [project]
2
2
  name = "yaicli"
3
- version = "0.0.4"
3
+ version = "0.0.5"
4
4
  description = "A simple CLI tool to interact with LLM"
5
5
  readme = "README.md"
6
- requires-python = ">=3.8"
6
+ requires-python = ">=3.9"
7
7
  dependencies = [
8
8
  "distro>=1.9.0",
9
9
  "jmespath>=1.0.1",
@@ -0,0 +1,52 @@
1
+ import pytest
2
+
3
+ from yaicli import YAICLI
4
+
5
+
6
+ @pytest.fixture
7
+ def yaicli():
8
+ return YAICLI()
9
+
10
+ def test_filter_command_basic(yaicli):
11
+ """test basic command filtering"""
12
+ command = "ls -la"
13
+ assert yaicli._filter_command(command) == "ls -la"
14
+
15
+
16
+ @pytest.mark.parametrize("input_cmd,expected", [
17
+ ("```\nls -la\n```", "ls -la"),
18
+ ("```ls -la```", "ls -la"),
19
+ ])
20
+ def test_filter_command_with_code_block(yaicli, input_cmd, expected):
21
+ """test code block"""
22
+ assert yaicli._filter_command(input_cmd) == expected
23
+
24
+ @pytest.mark.parametrize("input_cmd,expected", [
25
+ ("```bash\nls -la\n```", "ls -la"),
26
+ ("```zsh\nls -la\n```", "ls -la"),
27
+ ("```shell\nls -la\n```", "ls -la"),
28
+ ("```sh\nls -la\n```", "ls -la"),
29
+ ])
30
+ def test_filter_command_with_shell_type(yaicli, input_cmd, expected):
31
+ """test shell type declaration code block"""
32
+ assert yaicli._filter_command(input_cmd) == expected
33
+
34
+ def test_filter_command_multiline(yaicli):
35
+ """test multiline command"""
36
+ command = "```\ncd /tmp\nls -la\n```"
37
+ assert yaicli._filter_command(command) == "cd /tmp\nls -la"
38
+
39
+ def test_filter_command_with_spaces(yaicli):
40
+ """test command with extra spaces"""
41
+ command = "```bash \n ls -la \n ```"
42
+ assert yaicli._filter_command(command) == "ls -la"
43
+
44
+ def test_filter_command_empty_block(yaicli):
45
+ """test empty code block"""
46
+ command = "```\n\n```"
47
+ assert yaicli._filter_command(command) == ""
48
+
49
+ def test_filter_command_nested_blocks(yaicli):
50
+ """test nested code block"""
51
+ command = "```bash\n```echo hello```\n```"
52
+ assert yaicli._filter_command(command) == "```echo hello```"
@@ -1,10 +1,6 @@
1
1
  version = 1
2
2
  revision = 1
3
- requires-python = ">=3.8"
4
- resolution-markers = [
5
- "python_full_version >= '3.9'",
6
- "python_full_version < '3.9'",
7
- ]
3
+ requires-python = ">=3.9"
8
4
 
9
5
  [[package]]
10
6
  name = "bump2version"
@@ -82,19 +78,6 @@ wheels = [
82
78
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd" },
83
79
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407" },
84
80
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971" },
85
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/10/bd/6517ea94f2672e801011d50b5d06be2a0deaf566aea27bcdcd47e5195357/charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c" },
86
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/e5/0d/815a2ba3f283b4eeaa5ece57acade365c5b4135f65a807a083c818716582/charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9" },
87
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/aa/17/c94be7ee0d142687e047fe1de72060f6d6837f40eedc26e87e6e124a3fc6/charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8" },
88
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/f7/33/557ac796c47165fc141e4fb71d7b0310f67e05cb420756f3a82e0a0068e0/charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6" },
89
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/1e/0d/38ef4ae41e9248d63fc4998d933cae22473b1b2ac4122cf908d0f5eb32aa/charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c" },
90
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/43/01/754cdb29dd0560f58290aaaa284d43eea343ad0512e6ad3b8b5c11f08592/charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a" },
91
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/ba/cd/861883ba5160c7a9bd242c30b2c71074cda2aefcc0addc91118e0d4e0765/charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd" },
92
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/6f/7f/0c0dad447819e90b93f8ed238cc8f11b91353c23c19e70fa80483a155bed/charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd" },
93
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/8e/09/9f8abcc6fff60fb727268b63c376c8c79cc37b833c2dfe1f535dfb59523b/charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824" },
94
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/be/e5/3f363dad2e24378f88ccf63ecc39e817c29f32e308ef21a7a6d9c1201165/charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca" },
95
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/e4/10/a78c0e91f487b4ad0ef7480ac765e15b774f83de2597f1b6ef0eaf7a2f99/charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b" },
96
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/d3/81/396e7d7f5d7420da8273c91175d2e9a3f569288e3611d521685e4b9ac9cc/charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e" },
97
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/40/bb/20affbbd9ea29c71ea123769dc568a6d42052ff5089c5fe23e21e21084a6/charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4" },
98
81
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/7f/c0/b913f8f02836ed9ab32ea643c6fe4d3325c3d8627cf6e78098671cafff86/charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41" },
99
82
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/0f/6c/2bee440303d705b6fb1e2ec789543edec83d32d258299b16eed28aad48e0/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f" },
100
83
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/3d/04/cb42585f07f6f9fd3219ffb6f37d5a39b4fd2db2355b23683060029c35f7/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2" },
@@ -262,8 +245,7 @@ dependencies = [
262
245
  { name = "certifi" },
263
246
  { name = "charset-normalizer" },
264
247
  { name = "idna" },
265
- { name = "urllib3", version = "2.2.3", source = { registry = "https://repo.huaweicloud.com/repository/pypi/simple/" }, marker = "python_full_version < '3.9'" },
266
- { name = "urllib3", version = "2.3.0", source = { registry = "https://repo.huaweicloud.com/repository/pypi/simple/" }, marker = "python_full_version >= '3.9'" },
248
+ { name = "urllib3" },
267
249
  ]
268
250
  sdist = { url = "https://repo.huaweicloud.com/repository/pypi/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760" }
269
251
  wheels = [
@@ -381,25 +363,10 @@ wheels = [
381
363
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/e0/86/39b65d676ec5732de17b7e3c476e45bb80ec64eb50737a8dce1a4178aba1/typing_extensions-4.13.0-py3-none-any.whl", hash = "sha256:c8dd92cc0d6425a97c18fbb9d1954e5ff92c1ca881a309c45f06ebc0b79058e5" },
382
364
  ]
383
365
 
384
- [[package]]
385
- name = "urllib3"
386
- version = "2.2.3"
387
- source = { registry = "https://repo.huaweicloud.com/repository/pypi/simple/" }
388
- resolution-markers = [
389
- "python_full_version < '3.9'",
390
- ]
391
- sdist = { url = "https://repo.huaweicloud.com/repository/pypi/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" }
392
- wheels = [
393
- { url = "https://repo.huaweicloud.com/repository/pypi/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac" },
394
- ]
395
-
396
366
  [[package]]
397
367
  name = "urllib3"
398
368
  version = "2.3.0"
399
369
  source = { registry = "https://repo.huaweicloud.com/repository/pypi/simple/" }
400
- resolution-markers = [
401
- "python_full_version >= '3.9'",
402
- ]
403
370
  sdist = { url = "https://repo.huaweicloud.com/repository/pypi/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d" }
404
371
  wheels = [
405
372
  { url = "https://repo.huaweicloud.com/repository/pypi/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df" },
@@ -33,7 +33,13 @@ class CasePreservingConfigParser(configparser.RawConfigParser):
33
33
  return optionstr
34
34
 
35
35
 
36
- class ShellAI:
36
+ class YAICLI:
37
+ """Main class for YAICLI
38
+ Chat mode: interactive chat mode
39
+ One-shot mode:
40
+ Temp mode: ask a question and get a response once
41
+ Execute mode: generate and execute shell commands
42
+ """
37
43
  # Configuration file path
38
44
  CONFIG_PATH = Path("~/.config/yaicli/config.ini").expanduser()
39
45
 
@@ -45,8 +51,8 @@ MODEL=gpt-4o
45
51
 
46
52
  # default run mode, default: temp
47
53
  # chat: interactive chat mode
48
- # exec: shell command generation mode
49
- # temp: one-shot mode
54
+ # exec: generate and execute shell commands once
55
+ # temp: ask a question and get a response once
50
56
  DEFAULT_MODE=temp
51
57
 
52
58
  # auto detect shell and os
@@ -69,6 +75,8 @@ STREAM=true"""
69
75
  self.session = PromptSession(key_bindings=self.bindings)
70
76
  self.current_mode = ModeEnum.CHAT.value
71
77
  self.config = {}
78
+ self.history = []
79
+ self.max_history_length = 25
72
80
 
73
81
  # Setup key bindings
74
82
  self._setup_key_bindings()
@@ -82,10 +90,19 @@ STREAM=true"""
82
90
  ModeEnum.CHAT.value if self.current_mode == ModeEnum.EXECUTE.value else ModeEnum.EXECUTE.value
83
91
  )
84
92
 
85
- def detect_os(self):
86
- """Detect operating system"""
93
+ def clear_history(self):
94
+ """Clear chat history"""
95
+ self.history = []
96
+
97
+ def detect_os(self) -> str:
98
+ """Detect operating system
99
+ Returns:
100
+ str: operating system name
101
+ Raises:
102
+ typer.Exit: if there is an error with the request
103
+ """
87
104
  if self.config.get("OS_NAME") != "auto":
88
- return self.config.get("OS_NAME")
105
+ return self.config["OS_NAME"]
89
106
  current_platform = platform.system()
90
107
  if current_platform == "Linux":
91
108
  return "Linux/" + distro_name(pretty=True)
@@ -95,10 +112,15 @@ STREAM=true"""
95
112
  return "Darwin/MacOS " + platform.mac_ver()[0]
96
113
  return current_platform
97
114
 
98
- def detect_shell(self):
99
- """Detect shell"""
100
- if self.config.get("SHELL_NAME") != "auto":
101
- return self.config.get("SHELL_NAME")
115
+ def detect_shell(self) -> str:
116
+ """Detect shell
117
+ Returns:
118
+ str: shell name
119
+ Raises:
120
+ typer.Exit: if there is an error with the request
121
+ """
122
+ if self.config["SHELL_NAME"] != "auto":
123
+ return self.config["SHELL_NAME"]
102
124
  import platform
103
125
 
104
126
  current_platform = platform.system()
@@ -107,7 +129,13 @@ STREAM=true"""
107
129
  return "powershell.exe" if is_powershell else "cmd.exe"
108
130
  return basename(getenv("SHELL", "/bin/sh"))
109
131
 
110
- def build_cmd_prompt(self):
132
+ def build_cmd_prompt(self) -> str:
133
+ """Build command prompt
134
+ Returns:
135
+ str: command prompt
136
+ Raises:
137
+ typer.Exit: if there is an error with the request
138
+ """
111
139
  _os = self.detect_os()
112
140
  _shell = self.detect_shell()
113
141
  return f"""Your are a Shell Command Generator.
@@ -119,8 +147,13 @@ Rules:
119
147
  4. Chain multi-step commands in SINGLE LINE
120
148
  5. Return NOTHING except the ready-to-run command"""
121
149
 
122
- def build_default_prompt(self):
123
- """Build default prompt"""
150
+ def build_default_prompt(self) -> str:
151
+ """Build default prompt
152
+ Returns:
153
+ str: default prompt
154
+ Raises:
155
+ typer.Exit: if there is an error with the request
156
+ """
124
157
  _os = self.detect_os()
125
158
  _shell = self.detect_shell()
126
159
  return (
@@ -130,8 +163,13 @@ Rules:
130
163
  "unless the user explicitly requests more details."
131
164
  )
132
165
 
133
- def get_default_config(self):
134
- """Get default configuration"""
166
+ def get_default_config(self) -> dict[str, str]:
167
+ """Get default configuration
168
+ Returns:
169
+ dict: default configuration
170
+ Raises:
171
+ typer.Exit: if there is an error with the request
172
+ """
135
173
  config = CasePreservingConfigParser()
136
174
  try:
137
175
  config.read_string(self.DEFAULT_CONFIG_INI)
@@ -142,8 +180,13 @@ Rules:
142
180
  self.console.print(f"[red]Error parsing config: {e}[/red]")
143
181
  raise typer.Exit(code=1) from None
144
182
 
145
- def load_config(self):
146
- """Load LLM API configuration"""
183
+ def load_config(self) -> dict[str, str]:
184
+ """Load LLM API configuration
185
+ Returns:
186
+ dict: configuration
187
+ Raises:
188
+ typer.Exit: if there is an error with the request
189
+ """
147
190
  if not self.CONFIG_PATH.exists():
148
191
  self.console.print(
149
192
  "[bold yellow]Configuration file not found. Creating default configuration file.[/bold yellow]"
@@ -158,14 +201,28 @@ Rules:
158
201
  self.config["STREAM"] = str(self.config.get("STREAM", "true")).lower()
159
202
  return self.config
160
203
 
161
- def _call_api(self, url, headers, data):
162
- """Generic API call method"""
204
+ def _call_api(self, url: str, headers: dict, data: dict) -> requests.Response:
205
+ """Call the API and return the response.
206
+ Args:
207
+ url: API endpoint URL
208
+ headers: request headers
209
+ data: request data
210
+ Returns:
211
+ requests.Response: response object
212
+ Raises:
213
+ requests.exceptions.RequestException: if there is an error with the request
214
+ """
163
215
  response = requests.post(url, headers=headers, json=data)
164
216
  response.raise_for_status() # Raise an exception for non-200 status codes
165
217
  return response
166
218
 
167
- def get_llm_url(self) -> Optional[str]:
168
- """Get LLM API URL"""
219
+ def get_llm_url(self) -> str:
220
+ """Get LLM API URL
221
+ Returns:
222
+ str: LLM API URL
223
+ Raises:
224
+ typer.Exit: if API key or base URL is not set
225
+ """
169
226
  base = self.config.get("BASE_URL", "").rstrip("/")
170
227
  if not base:
171
228
  self.console.print(
@@ -181,25 +238,44 @@ Rules:
181
238
  return f"{base}/{COMPLETION_PATH}"
182
239
 
183
240
  def build_data(self, prompt: str, mode: str = ModeEnum.TEMP.value) -> dict:
184
- """Build request data"""
241
+ """Build request data
242
+ Args:
243
+ prompt: user input
244
+ mode: chat or execute mode
245
+ Returns:
246
+ dict: request data
247
+ """
185
248
  if mode == ModeEnum.EXECUTE.value:
186
249
  system_prompt = self.build_cmd_prompt()
187
250
  else:
188
251
  system_prompt = self.build_default_prompt()
252
+
253
+ # Build messages list, first add system prompt
254
+ messages = [{"role": "system", "content": system_prompt}]
255
+
256
+ # Add history records in chat mode
257
+ if mode == ModeEnum.CHAT.value and self.history:
258
+ messages.extend(self.history)
259
+
260
+ # Add current user message
261
+ messages.append({"role": "user", "content": prompt})
262
+
189
263
  return {
190
264
  "model": self.config["MODEL"],
191
- "messages": [
192
- {"role": "system", "content": system_prompt},
193
- {"role": "user", "content": prompt},
194
- ],
265
+ "messages": messages,
195
266
  "stream": self.config.get("STREAM", "true") == "true",
196
267
  "temperature": 0.7,
197
268
  "top_p": 0.7,
198
269
  "max_tokens": 200,
199
270
  }
200
271
 
201
- def stream_response(self, response):
202
- """Stream response from LLM API"""
272
+ def stream_response(self, response: requests.Response) -> str:
273
+ """Stream response from LLM API
274
+ Args:
275
+ response: requests.Response object
276
+ Returns:
277
+ str: full completion text
278
+ """
203
279
  full_completion = ""
204
280
  # Streaming response loop
205
281
  with Live(console=self.console) as live:
@@ -223,8 +299,15 @@ Rules:
223
299
  self.console.print(f"[red]Error decoding JSON: {decoded_line}[/red]")
224
300
  time.sleep(0.05)
225
301
 
226
- def call_llm_api(self, prompt: str):
227
- """Call LLM API, return streaming output"""
302
+ return full_completion
303
+
304
+ def call_llm_api(self, prompt: str) -> str:
305
+ """Call LLM API, return streaming output
306
+ Args:
307
+ prompt: user input
308
+ Returns:
309
+ str: streaming output
310
+ """
228
311
  url = self.get_llm_url()
229
312
  headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
230
313
  data = self.build_data(prompt)
@@ -239,11 +322,18 @@ Rules:
239
322
  raise typer.Exit(code=1)
240
323
 
241
324
  self.console.print("\n[bold green]Assistant:[/bold green]")
242
- self.stream_response(response) # Stream the response
325
+ assistant_response = self.stream_response(response) # Stream the response and get the full text
243
326
  self.console.print() # Add a newline after the completion
244
327
 
245
- def get_command_from_llm(self, prompt):
246
- """Request Shell command from LLM"""
328
+ return assistant_response
329
+
330
+ def get_command_from_llm(self, prompt: str) -> Optional[str]:
331
+ """Request Shell command from LLM
332
+ Args:
333
+ prompt: user input
334
+ Returns:
335
+ str: shell command
336
+ """
247
337
  url = self.get_llm_url()
248
338
  headers = {"Authorization": f"Bearer {self.config['API_KEY']}"}
249
339
  data = self.build_data(prompt, mode=ModeEnum.EXECUTE.value)
@@ -265,15 +355,24 @@ Rules:
265
355
  content = jmespath.search(ANSWER_PATH, response.json())
266
356
  return content.strip()
267
357
 
268
- def execute_shell_command(self, command):
269
- """Execute shell command"""
358
+ def execute_shell_command(self, command: str) -> int:
359
+ """Execute shell command
360
+ Args:
361
+ command: shell command
362
+ Returns:
363
+ int: return code
364
+ """
270
365
  self.console.print(f"\n[bold green]Executing command: [/bold green] {command}\n")
271
366
  result = subprocess.run(command, shell=True)
272
367
  if result.returncode != 0:
273
368
  self.console.print(f"\n[bold red]Command failed with return code: {result.returncode}[/bold red]")
369
+ return result.returncode
274
370
 
275
371
  def get_prompt_tokens(self):
276
- """Get prompt tokens based on current mode"""
372
+ """Get prompt tokens based on current mode
373
+ Returns:
374
+ list: prompt tokens for prompt_toolkit
375
+ """
277
376
  if self.current_mode == ModeEnum.CHAT.value:
278
377
  qmark = "💬"
279
378
  elif self.current_mode == ModeEnum.EXECUTE.value:
@@ -283,14 +382,35 @@ Rules:
283
382
  return [("class:qmark", qmark), ("class:question", " {} ".format(">"))]
284
383
 
285
384
  def chat_mode(self, user_input: str):
286
- """Interactive chat mode"""
385
+ """
386
+ This method handles the chat mode.
387
+ It adds the user input to the history and calls the API to get a response.
388
+ It then adds the response to the history and manages the history length.
389
+ Args:
390
+ user_input: user input
391
+ Returns:
392
+ ModeEnum: current mode
393
+ """
287
394
  if self.current_mode != ModeEnum.CHAT.value:
288
395
  return self.current_mode
289
396
 
290
- self.call_llm_api(user_input)
397
+ # Add user message to history
398
+ self.history.append({"role": "user", "content": user_input})
399
+
400
+ # Call API and get response
401
+ assistant_response = self.call_llm_api(user_input)
402
+
403
+ # Add assistant response to history
404
+ if assistant_response:
405
+ self.history.append({"role": "assistant", "content": assistant_response})
406
+
407
+ # Manage history length, keep recent conversations
408
+ if len(self.history) > self.max_history_length * 2: # Each conversation has user and assistant messages
409
+ self.history = self.history[-self.max_history_length * 2 :]
410
+
291
411
  return ModeEnum.CHAT.value
292
412
 
293
- def _filter_command(self, command):
413
+ def _filter_command(self, command: str) -> Optional[str]:
294
414
  """Filter out unwanted characters from command
295
415
 
296
416
  The LLM may return commands in markdown format with code blocks.
@@ -335,11 +455,21 @@ Rules:
335
455
  return "\n".join(line.strip() for line in content_lines if line.strip())
336
456
 
337
457
  def execute_mode(self, user_input: str):
338
- """Execute mode"""
458
+ """
459
+ This method generates a shell command from the user input and executes it.
460
+ If the user confirms the command, it is executed.
461
+ Args:
462
+ user_input: user input
463
+ Returns:
464
+ ModeEnum: current mode
465
+ """
339
466
  if user_input == "" or self.current_mode != ModeEnum.EXECUTE.value:
340
467
  return self.current_mode
341
468
 
342
469
  command = self.get_command_from_llm(user_input)
470
+ if not command:
471
+ self.console.print("[bold red]No command generated[/bold red]")
472
+ return self.current_mode
343
473
  command = self._filter_command(command)
344
474
  if not command:
345
475
  self.console.print("[bold red]No command generated[/bold red]")
@@ -357,25 +487,44 @@ Rules:
357
487
  if not user_input.strip():
358
488
  continue
359
489
 
360
- if user_input.lower() in ("exit", "quit"):
490
+ if user_input.lower() in ("/exit", "/quit", "/q"):
361
491
  break
362
492
 
493
+ # Handle special commands
363
494
  if self.current_mode == ModeEnum.CHAT.value:
364
- self.chat_mode(user_input)
495
+ if user_input.lower() == "/clear":
496
+ self.clear_history()
497
+ self.console.print("[bold yellow]Chat history cleared[/bold yellow]\n")
498
+ continue
499
+ else:
500
+ self.chat_mode(user_input)
365
501
  elif self.current_mode == ModeEnum.EXECUTE.value:
366
502
  self.execute_mode(user_input)
367
503
 
368
504
  self.console.print("[bold green]Exiting...[/bold green]")
369
505
 
370
506
  def run_one_shot(self, prompt: str):
371
- """Run one-shot mode with given prompt"""
507
+ """Run one-shot mode with given prompt
508
+ Args:
509
+ prompt (str): Prompt to send to LLM
510
+ Returns:
511
+ None
512
+ """
372
513
  if self.current_mode == ModeEnum.EXECUTE.value:
373
514
  self.execute_mode(prompt) # Execute mode for one-shot prompt
374
515
  else:
375
516
  self.call_llm_api(prompt)
376
517
 
377
518
  def run(self, chat=False, shell=False, prompt: Optional[str] = None):
378
- """Run the CLI application"""
519
+ """Run the CLI application
520
+ Args:
521
+ chat (bool): Whether to run in chat mode
522
+ shell (bool): Whether to run in shell mode
523
+ prompt (Optional[str]): Prompt send to LLM
524
+
525
+ Returns:
526
+ None
527
+ """
379
528
  # Load configuration
380
529
  self.config = self.load_config()
381
530
  if not self.config.get("API_KEY", None):
@@ -434,7 +583,7 @@ def main(
434
583
  typer.echo(ctx.get_help())
435
584
  raise typer.Exit()
436
585
 
437
- cli = ShellAI(verbose=verbose)
586
+ cli = YAICLI(verbose=verbose)
438
587
  cli.run(chat=chat, shell=shell, prompt=prompt)
439
588
 
440
589
 
yaicli-0.0.4/PKG-INFO DELETED
@@ -1,15 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: yaicli
3
- Version: 0.0.4
4
- Summary: A simple CLI tool to interact with LLM
5
- License-File: LICENSE
6
- Requires-Python: >=3.8
7
- Requires-Dist: distro>=1.9.0
8
- Requires-Dist: jmespath>=1.0.1
9
- Requires-Dist: prompt-toolkit>=3.0.50
10
- Requires-Dist: requests>=2.32.3
11
- Requires-Dist: rich>=13.9.4
12
- Requires-Dist: typer>=0.15.2
13
- Description-Content-Type: text/markdown
14
-
15
- # llmcli
yaicli-0.0.4/README.md DELETED
@@ -1 +0,0 @@
1
- # llmcli
yaicli-0.0.4/tests DELETED
@@ -1,52 +0,0 @@
1
- import pytest
2
-
3
- from yaicli import ShellAI
4
-
5
-
6
- @pytest.fixture
7
- def shell_ai():
8
- return ShellAI()
9
-
10
- def test_filter_command_basic(shell_ai):
11
- """测试基本的命令过滤"""
12
- command = "ls -la"
13
- assert shell_ai._filter_command(command) == "ls -la"
14
-
15
-
16
- @pytest.mark.parametrize("input_cmd,expected", [
17
- ("```\nls -la\n```", "ls -la"),
18
- ("```ls -la```", "ls -la"),
19
- ])
20
- def test_filter_command_with_code_block(shell_ai, input_cmd, expected):
21
- """test code block"""
22
- assert shell_ai._filter_command(input_cmd) == expected
23
-
24
- @pytest.mark.parametrize("input_cmd,expected", [
25
- ("```bash\nls -la\n```", "ls -la"),
26
- ("```zsh\nls -la\n```", "ls -la"),
27
- ("```shell\nls -la\n```", "ls -la"),
28
- ("```sh\nls -la\n```", "ls -la"),
29
- ])
30
- def test_filter_command_with_shell_type(shell_ai, input_cmd, expected):
31
- """测试带有shell类型声明的代码块"""
32
- assert shell_ai._filter_command(input_cmd) == expected
33
-
34
- def test_filter_command_multiline(shell_ai):
35
- """测试多行命令"""
36
- command = "```\ncd /tmp\nls -la\n```"
37
- assert shell_ai._filter_command(command) == "cd /tmp\nls -la"
38
-
39
- def test_filter_command_with_spaces(shell_ai):
40
- """测试带有额外空格的命令"""
41
- command = "```bash \n ls -la \n ```"
42
- assert shell_ai._filter_command(command) == "ls -la"
43
-
44
- def test_filter_command_empty_block(shell_ai):
45
- """测试空代码块"""
46
- command = "```\n\n```"
47
- assert shell_ai._filter_command(command) == ""
48
-
49
- def test_filter_command_nested_blocks(shell_ai):
50
- """测试嵌套代码块"""
51
- command = "```bash\n```echo hello```\n```"
52
- assert shell_ai._filter_command(command) == "```echo hello```"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes