vexis-cli-1 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vexis_cli_1-1.0.0/LICENSE +21 -0
- vexis_cli_1-1.0.0/PKG-INFO +223 -0
- vexis_cli_1-1.0.0/README.md +181 -0
- vexis_cli_1-1.0.0/pyproject.toml +63 -0
- vexis_cli_1-1.0.0/setup.cfg +4 -0
- vexis_cli_1-1.0.0/setup.py +191 -0
- vexis_cli_1-1.0.0/src/ai_agent/__init__.py +27 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/__init__.py +12 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/command_output.py +195 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/command_parser.py +236 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/enhanced_task_verifier.py +357 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/save_command.py +257 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/task_completion_verifier.py +379 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/task_generator.py +167 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/task_robustness_manager.py +351 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/terminal_history.py +952 -0
- vexis_cli_1-1.0.0/src/ai_agent/core_processing/two_phase_engine.py +1071 -0
- vexis_cli_1-1.0.0/src/ai_agent/external_integration/__init__.py +15 -0
- vexis_cli_1-1.0.0/src/ai_agent/external_integration/google_provider.py +154 -0
- vexis_cli_1-1.0.0/src/ai_agent/external_integration/model_runner.py +367 -0
- vexis_cli_1-1.0.0/src/ai_agent/external_integration/ollama_provider.py +322 -0
- vexis_cli_1-1.0.0/src/ai_agent/external_integration/vision_api_client.py +211 -0
- vexis_cli_1-1.0.0/src/ai_agent/platform_abstraction/__init__.py +10 -0
- vexis_cli_1-1.0.0/src/ai_agent/platform_abstraction/platform_detector.py +712 -0
- vexis_cli_1-1.0.0/src/ai_agent/user_interface/__init__.py +10 -0
- vexis_cli_1-1.0.0/src/ai_agent/user_interface/main_app.py +168 -0
- vexis_cli_1-1.0.0/src/ai_agent/user_interface/two_phase_app.py +345 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/__init__.py +20 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/config.py +397 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/curses_menu.py +426 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/dependency_checker.py +755 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/environment_detector.py +573 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/exceptions.py +67 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/interactive_menu.py +319 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/logger.py +386 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/model_definitions.py +633 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/ollama_error_handler.py +683 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/ollama_manager.py +515 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/ollama_model_selector.py +34 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/yellow_selection/__init__.py +26 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/yellow_selection/clean_hierarchical_selector.py +196 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/yellow_selection/clean_interactive_menu.py +314 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/yellow_selection/config.py +105 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/yellow_selection/fallback_interactive_menu.py +273 -0
- vexis_cli_1-1.0.0/src/ai_agent/utils/yellow_selection/main.py +71 -0
- vexis_cli_1-1.0.0/src/vexis_cli_1.egg-info/PKG-INFO +223 -0
- vexis_cli_1-1.0.0/src/vexis_cli_1.egg-info/SOURCES.txt +49 -0
- vexis_cli_1-1.0.0/src/vexis_cli_1.egg-info/dependency_links.txt +1 -0
- vexis_cli_1-1.0.0/src/vexis_cli_1.egg-info/entry_points.txt +3 -0
- vexis_cli_1-1.0.0/src/vexis_cli_1.egg-info/requires.txt +17 -0
- vexis_cli_1-1.0.0/src/vexis_cli_1.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT LICENSE
|
|
2
|
+
|
|
3
|
+
COPYRIGHT (C) 2026 AINOHOgosya-TEAM (VEXIS-CLI)
|
|
4
|
+
|
|
5
|
+
PERMISSION IS HEREBY GRANTED, FREE OF CHARGE, TO ANY PERSON OBTAINING A COPY
|
|
6
|
+
OF THIS SOFTWARE AND ASSOCIATED DOCUMENTATION FILES (THE "SOFTWARE"), TO DEAL
|
|
7
|
+
IN THE SOFTWARE WITHOUT RESTRICTION, INCLUDING WITHOUT LIMITATION THE RIGHTS
|
|
8
|
+
TO USE, COPY, MODIFY, MERGE, PUBLISH, DISTRIBUTE, SUBLICENSE, AND/OR SELL
|
|
9
|
+
COPIES OF THE SOFTWARE, AND TO PERMIT PERSONS TO WHOM THE SOFTWARE IS
|
|
10
|
+
FURNISHED TO DO SO, SUBJECT TO THE FOLLOWING CONDITIONS:
|
|
11
|
+
|
|
12
|
+
THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE SHALL BE INCLUDED IN ALL
|
|
13
|
+
COPIES OR SUBSTANTIAL PORTIONS OF THE SOFTWARE.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: vexis-cli-1
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: AI command-line agent for terminal automation and task execution
|
|
5
|
+
Home-page: https://github.com/AInohogosya-team/VEXIS-CLI-1
|
|
6
|
+
Author: AInohogosya-team
|
|
7
|
+
Author-email: AInohogosya@proton.me
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
Project-URL: Homepage, https://github.com/AInohogosya-team/VEXIS-CLI-1
|
|
10
|
+
Project-URL: Repository, https://github.com/AInohogosya-team/VEXIS-CLI-1
|
|
11
|
+
Project-URL: Bug Reports, https://github.com/AInohogosya-team/VEXIS-CLI-1/issues
|
|
12
|
+
Keywords: ai,automation,cli,terminal,command-line
|
|
13
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
24
|
+
Requires-Python: >=3.8
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
License-File: LICENSE
|
|
27
|
+
Requires-Dist: Pillow>=10.0.0
|
|
28
|
+
Requires-Dist: requests>=2.31.0
|
|
29
|
+
Requires-Dist: numpy>=1.24.0
|
|
30
|
+
Requires-Dist: structlog>=23.0.0
|
|
31
|
+
Requires-Dist: rich>=13.0.0
|
|
32
|
+
Requires-Dist: PyYAML>=6.0.0
|
|
33
|
+
Requires-Dist: ollama>=0.1.0
|
|
34
|
+
Requires-Dist: pyautogui>=0.9.54; sys_platform == "darwin"
|
|
35
|
+
Requires-Dist: pyobjc-framework-Cocoa>=9.0; sys_platform == "darwin"
|
|
36
|
+
Requires-Dist: pywin32>=306; sys_platform == "win32"
|
|
37
|
+
Requires-Dist: python-xlib>=0.33; sys_platform == "linux"
|
|
38
|
+
Dynamic: author-email
|
|
39
|
+
Dynamic: home-page
|
|
40
|
+
Dynamic: license-file
|
|
41
|
+
Dynamic: requires-python
|
|
42
|
+
|
|
43
|
+
<div align="center">
|
|
44
|
+
|
|
45
|
+
# VEXIS-CLI-1
|
|
46
|
+
|
|
47
|
+
</div>
|
|
48
|
+
|
|
49
|
+
<div align="center">
|
|
50
|
+
|
|
51
|
+

|
|
52
|
+

|
|
53
|
+

|
|
54
|
+

|
|
55
|
+
|
|
56
|
+
**AI-Powered Command Line Interface for Intelligent Automation**
|
|
57
|
+
|
|
58
|
+
[VEXIS-CLI-1](https://github.com/AInohogosya-team/VEXIS-CLI-1) is an AI agent derived from VEXIS-1.1 that performs tasks through command execution. It leverages large language models to intelligently interpret natural language commands and execute them through terminal operations, enabling automated workflow management and system administration.
|
|
59
|
+
|
|
60
|
+
[Quick Start](#quick-start) • [Documentation](#documentation) • [Models](#supported-ai-models) • [Configuration](#configuration) • [Contributing](#contributing)
|
|
61
|
+
|
|
62
|
+
</div>
|
|
63
|
+
|
|
64
|
+
## Key Features
|
|
65
|
+
|
|
66
|
+
### AI-Powered Intelligence
|
|
67
|
+
- **Natural Language Processing**: Execute commands using plain English descriptions
|
|
68
|
+
- **Context-Aware Execution**: Understands your workflow and adapts to your needs
|
|
69
|
+
- **Multi-Model Support**: Compatible with 80+ AI models from 12 major providers
|
|
70
|
+
- **Smart Verification**: Automatic task completion validation with confidence scoring
|
|
71
|
+
|
|
72
|
+
### Advanced Automation
|
|
73
|
+
- **Two-Phase Engine**: Planning and execution phases for reliable task completion
|
|
74
|
+
- **Cross-Platform Compatibility**: Works seamlessly on macOS, Linux, and Windows
|
|
75
|
+
- **GUI Automation**: Integrate terminal commands with graphical interface interactions
|
|
76
|
+
- **Screenshot Integration**: Visual context capture for enhanced understanding
|
|
77
|
+
|
|
78
|
+
### Developer Experience
|
|
79
|
+
- **Rich Terminal UI**: Beautiful, informative output with progress indicators
|
|
80
|
+
- **Flexible Configuration**: YAML-based settings with environment variable overrides
|
|
81
|
+
- **Extensible Architecture**: Plugin-ready design for custom integrations
|
|
82
|
+
- **Comprehensive Logging**: Structured logging for debugging and monitoring
|
|
83
|
+
|
|
84
|
+
## 🚀 Quick Start
|
|
85
|
+
|
|
86
|
+
### Prerequisites
|
|
87
|
+
|
|
88
|
+
- Python 3.8 or higher
|
|
89
|
+
- [Ollama](https://ollama.com/) installed and running (for local AI models)
|
|
90
|
+
- Git (for cloning the repository)
|
|
91
|
+
|
|
92
|
+
### Installation
|
|
93
|
+
|
|
94
|
+
1. **Clone the repository**
|
|
95
|
+
```bash
|
|
96
|
+
git clone https://github.com/AInohogosya-team/VEXIS-CLI-1.git
|
|
97
|
+
cd VEXIS-CLI-1
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
2. **Install dependencies**
|
|
101
|
+
```bash
|
|
102
|
+
pip install -r requirements.txt
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
3. **Set up Ollama** (optional, for local models)
|
|
106
|
+
```bash
|
|
107
|
+
# Install Ollama
|
|
108
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
109
|
+
|
|
110
|
+
# Pull a recommended model
|
|
111
|
+
ollama pull llama3.2:latest
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
4. **Run VEXIS-CLI**
|
|
115
|
+
```bash
|
|
116
|
+
python run.py
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Your First Command
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
# Start the interactive interface
|
|
123
|
+
python run.py
|
|
124
|
+
|
|
125
|
+
# Or use direct commands
|
|
126
|
+
vexis-cli "List all Python files in the current directory"
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
## Supported AI Models
|
|
130
|
+
|
|
131
|
+
VEXIS-CLI-1 supports **150+ models** from **20 major providers** through Ollama:
|
|
132
|
+
|
|
133
|
+
### Core Providers
|
|
134
|
+
- **Meta**: Llama 3.1/3.2 series (8B, 70B, 1B, 3B variants)
|
|
135
|
+
- **Google**: Gemma 2/3 series (1B-27B parameters, multimodal capabilities)
|
|
136
|
+
- **DeepSeek**: R1/V3/Coder series (8B-671B, reasoning and coding specialists)
|
|
137
|
+
- **Microsoft**: Phi-3/4 series (3.8B-14B, efficient small models)
|
|
138
|
+
- **Mistral**: Mistral/Large/Ministral series (7B-675B, European open-source leader)
|
|
139
|
+
|
|
140
|
+
### Advanced Providers
|
|
141
|
+
- **Alibaba (Qwen)**: Qwen 2.5/3 series (0.5B-235B, multilingual with 128K+ context)
|
|
142
|
+
- **IBM**: Granite/Code series (350M-34B, enterprise-grade models)
|
|
143
|
+
- **BigCode**: StarCoder 2 series (3B-15B, specialized for code generation)
|
|
144
|
+
- **Cohere**: Command R series (7B-35B, retrieval-augmented generation)
|
|
145
|
+
- **01.AI**: Yi series (1.5B-34B, bilingual models)
|
|
146
|
+
|
|
147
|
+
### Specialized Models
|
|
148
|
+
- **Vision-Language**: LLaVA, Moondream, Qwen3-VL (7B-235B)
|
|
149
|
+
- **Coding**: DeepSeek Coder, Qwen Coder, Granite Code, StarCoder 2
|
|
150
|
+
- **Agentic**: Hermes 3, Reflection, Devstral Small 2 (3B-405B)
|
|
151
|
+
- **Cloud-Only**: GPT-OSS, Gemini 3, GLM-5, MiniMax, Kimi (20B-744B)
|
|
152
|
+
|
|
153
|
+
### Cloud & Local Models
|
|
154
|
+
- **Local Models**: Run entirely on your machine with Ollama
|
|
155
|
+
- **Cloud Models**: Access high-performance models via API
|
|
156
|
+
- **Hybrid Mode**: Seamlessly switch between local and cloud models
|
|
157
|
+
|
|
158
|
+
<details>
|
|
159
|
+
<summary>Complete Model List</summary>
|
|
160
|
+
|
|
161
|
+
**Popular Local Models:**
|
|
162
|
+
- `llama3.2:latest` (3B) - Balanced performance with 128K context
|
|
163
|
+
- `qwen2.5:7b` - Multilingual capabilities with 128K context
|
|
164
|
+
- `mistral:7b` - Fast and efficient with 32K context
|
|
165
|
+
- `deepseek-r1:8b` - Advanced reasoning with 128K context
|
|
166
|
+
- `gemma2:9b` - High-performing with 8K context
|
|
167
|
+
- `phi3:mini` - Efficient small model with 4K context
|
|
168
|
+
|
|
169
|
+
**High-Performance Cloud Models:**
|
|
170
|
+
- `deepseek-v3:671b` - State-of-the-art MoE with 160K context
|
|
171
|
+
- `qwen3:235b` - Advanced MoE with 256K context
|
|
172
|
+
- `mistral-large-3:675b-cloud` - Multimodal enterprise model
|
|
173
|
+
- `gpt-oss:120b-cloud` - Frontier performance
|
|
174
|
+
- `gemini-3-flash-preview:cloud` - Built for speed
|
|
175
|
+
|
|
176
|
+
</details>
|
|
177
|
+
|
|
178
|
+
## Usage Examples
|
|
179
|
+
|
|
180
|
+
### Quick Start
|
|
181
|
+
```bash
|
|
182
|
+
# Start the interactive interface
|
|
183
|
+
python run.py
|
|
184
|
+
|
|
185
|
+
# Or use direct commands
|
|
186
|
+
vexis-cli "List all Python files in the current directory"
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
For detailed usage examples, see our [Detailed Guide](DETAILED_GUIDE.md).
|
|
190
|
+
|
|
191
|
+
## Documentation
|
|
192
|
+
|
|
193
|
+
- [📖 Detailed Guide](DETAILED_GUIDE.md) - Comprehensive usage examples and advanced features
|
|
194
|
+
- [🔧 Troubleshooting](TROUBLESHOOTING.md) - Common issues and solutions
|
|
195
|
+
- [📚 API Reference](docs/API_REFERENCE.md)
|
|
196
|
+
- [🏗️ Architecture](docs/ARCHITECTURE.md)
|
|
197
|
+
- [⚙️ Configuration](docs/CONFIGURATION.md)
|
|
198
|
+
- [🤝 Contributing](docs/CONTRIBUTING.md)
|
|
199
|
+
|
|
200
|
+
## Community
|
|
201
|
+
|
|
202
|
+
- **GitHub Issues**: [Report bugs and request features](https://github.com/AInohogosya-team/VEXIS-CLI-1/issues)
|
|
203
|
+
- **Discussions**: [Join the community discussion](https://github.com/AInohogosya-team/VEXIS-CLI-1/discussions)
|
|
204
|
+
|
|
205
|
+
## License
|
|
206
|
+
|
|
207
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
208
|
+
|
|
209
|
+
## Support
|
|
210
|
+
|
|
211
|
+
- 📧 Email: AInohogosya@proton.me
|
|
212
|
+
- X: [AInohogosya](https://twitter.com/AInohogosya)
|
|
213
|
+
- Home Page: https://ainohogosya.github.io/home-page/
|
|
214
|
+
|
|
215
|
+
---
|
|
216
|
+
|
|
217
|
+
<div align="center">
|
|
218
|
+
|
|
219
|
+
[Back to top](#vexis-cli-1)
|
|
220
|
+
|
|
221
|
+
Made with ❤️ by the [AInohogosya-team](https://github.com/AInohogosya-team)
|
|
222
|
+
|
|
223
|
+
</div>
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
# VEXIS-CLI-1
|
|
4
|
+
|
|
5
|
+
</div>
|
|
6
|
+
|
|
7
|
+
<div align="center">
|
|
8
|
+
|
|
9
|
+

|
|
10
|
+

|
|
11
|
+

|
|
12
|
+

|
|
13
|
+
|
|
14
|
+
**AI-Powered Command Line Interface for Intelligent Automation**
|
|
15
|
+
|
|
16
|
+
[VEXIS-CLI-1](https://github.com/AInohogosya-team/VEXIS-CLI-1) is an AI agent derived from VEXIS-1.1 that performs tasks through command execution. It leverages large language models to intelligently interpret natural language commands and execute them through terminal operations, enabling automated workflow management and system administration.
|
|
17
|
+
|
|
18
|
+
[Quick Start](#quick-start) • [Documentation](#documentation) • [Models](#supported-ai-models) • [Configuration](#configuration) • [Contributing](#contributing)
|
|
19
|
+
|
|
20
|
+
</div>
|
|
21
|
+
|
|
22
|
+
## Key Features
|
|
23
|
+
|
|
24
|
+
### AI-Powered Intelligence
|
|
25
|
+
- **Natural Language Processing**: Execute commands using plain English descriptions
|
|
26
|
+
- **Context-Aware Execution**: Understands your workflow and adapts to your needs
|
|
27
|
+
- **Multi-Model Support**: Compatible with 80+ AI models from 12 major providers
|
|
28
|
+
- **Smart Verification**: Automatic task completion validation with confidence scoring
|
|
29
|
+
|
|
30
|
+
### Advanced Automation
|
|
31
|
+
- **Two-Phase Engine**: Planning and execution phases for reliable task completion
|
|
32
|
+
- **Cross-Platform Compatibility**: Works seamlessly on macOS, Linux, and Windows
|
|
33
|
+
- **GUI Automation**: Integrate terminal commands with graphical interface interactions
|
|
34
|
+
- **Screenshot Integration**: Visual context capture for enhanced understanding
|
|
35
|
+
|
|
36
|
+
### Developer Experience
|
|
37
|
+
- **Rich Terminal UI**: Beautiful, informative output with progress indicators
|
|
38
|
+
- **Flexible Configuration**: YAML-based settings with environment variable overrides
|
|
39
|
+
- **Extensible Architecture**: Plugin-ready design for custom integrations
|
|
40
|
+
- **Comprehensive Logging**: Structured logging for debugging and monitoring
|
|
41
|
+
|
|
42
|
+
## 🚀 Quick Start
|
|
43
|
+
|
|
44
|
+
### Prerequisites
|
|
45
|
+
|
|
46
|
+
- Python 3.8 or higher
|
|
47
|
+
- [Ollama](https://ollama.com/) installed and running (for local AI models)
|
|
48
|
+
- Git (for cloning the repository)
|
|
49
|
+
|
|
50
|
+
### Installation
|
|
51
|
+
|
|
52
|
+
1. **Clone the repository**
|
|
53
|
+
```bash
|
|
54
|
+
git clone https://github.com/AInohogosya-team/VEXIS-CLI-1.git
|
|
55
|
+
cd VEXIS-CLI-1
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
2. **Install dependencies**
|
|
59
|
+
```bash
|
|
60
|
+
pip install -r requirements.txt
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
3. **Set up Ollama** (optional, for local models)
|
|
64
|
+
```bash
|
|
65
|
+
# Install Ollama
|
|
66
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
67
|
+
|
|
68
|
+
# Pull a recommended model
|
|
69
|
+
ollama pull llama3.2:latest
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
4. **Run VEXIS-CLI**
|
|
73
|
+
```bash
|
|
74
|
+
python run.py
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
### Your First Command
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
# Start the interactive interface
|
|
81
|
+
python run.py
|
|
82
|
+
|
|
83
|
+
# Or use direct commands
|
|
84
|
+
vexis-cli "List all Python files in the current directory"
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Supported AI Models
|
|
88
|
+
|
|
89
|
+
VEXIS-CLI-1 supports **150+ models** from **20 major providers** through Ollama:
|
|
90
|
+
|
|
91
|
+
### Core Providers
|
|
92
|
+
- **Meta**: Llama 3.1/3.2 series (8B, 70B, 1B, 3B variants)
|
|
93
|
+
- **Google**: Gemma 2/3 series (1B-27B parameters, multimodal capabilities)
|
|
94
|
+
- **DeepSeek**: R1/V3/Coder series (8B-671B, reasoning and coding specialists)
|
|
95
|
+
- **Microsoft**: Phi-3/4 series (3.8B-14B, efficient small models)
|
|
96
|
+
- **Mistral**: Mistral/Large/Ministral series (7B-675B, European open-source leader)
|
|
97
|
+
|
|
98
|
+
### Advanced Providers
|
|
99
|
+
- **Alibaba (Qwen)**: Qwen 2.5/3 series (0.5B-235B, multilingual with 128K+ context)
|
|
100
|
+
- **IBM**: Granite/Code series (350M-34B, enterprise-grade models)
|
|
101
|
+
- **BigCode**: StarCoder 2 series (3B-15B, specialized for code generation)
|
|
102
|
+
- **Cohere**: Command R series (7B-35B, retrieval-augmented generation)
|
|
103
|
+
- **01.AI**: Yi series (1.5B-34B, bilingual models)
|
|
104
|
+
|
|
105
|
+
### Specialized Models
|
|
106
|
+
- **Vision-Language**: LLaVA, Moondream, Qwen3-VL (7B-235B)
|
|
107
|
+
- **Coding**: DeepSeek Coder, Qwen Coder, Granite Code, StarCoder 2
|
|
108
|
+
- **Agentic**: Hermes 3, Reflection, Devstral Small 2 (3B-405B)
|
|
109
|
+
- **Cloud-Only**: GPT-OSS, Gemini 3, GLM-5, MiniMax, Kimi (20B-744B)
|
|
110
|
+
|
|
111
|
+
### Cloud & Local Models
|
|
112
|
+
- **Local Models**: Run entirely on your machine with Ollama
|
|
113
|
+
- **Cloud Models**: Access high-performance models via API
|
|
114
|
+
- **Hybrid Mode**: Seamlessly switch between local and cloud models
|
|
115
|
+
|
|
116
|
+
<details>
|
|
117
|
+
<summary>Complete Model List</summary>
|
|
118
|
+
|
|
119
|
+
**Popular Local Models:**
|
|
120
|
+
- `llama3.2:latest` (3B) - Balanced performance with 128K context
|
|
121
|
+
- `qwen2.5:7b` - Multilingual capabilities with 128K context
|
|
122
|
+
- `mistral:7b` - Fast and efficient with 32K context
|
|
123
|
+
- `deepseek-r1:8b` - Advanced reasoning with 128K context
|
|
124
|
+
- `gemma2:9b` - High-performing with 8K context
|
|
125
|
+
- `phi3:mini` - Efficient small model with 4K context
|
|
126
|
+
|
|
127
|
+
**High-Performance Cloud Models:**
|
|
128
|
+
- `deepseek-v3:671b` - State-of-the-art MoE with 160K context
|
|
129
|
+
- `qwen3:235b` - Advanced MoE with 256K context
|
|
130
|
+
- `mistral-large-3:675b-cloud` - Multimodal enterprise model
|
|
131
|
+
- `gpt-oss:120b-cloud` - Frontier performance
|
|
132
|
+
- `gemini-3-flash-preview:cloud` - Built for speed
|
|
133
|
+
|
|
134
|
+
</details>
|
|
135
|
+
|
|
136
|
+
## Usage Examples
|
|
137
|
+
|
|
138
|
+
### Quick Start
|
|
139
|
+
```bash
|
|
140
|
+
# Start the interactive interface
|
|
141
|
+
python run.py
|
|
142
|
+
|
|
143
|
+
# Or use direct commands
|
|
144
|
+
vexis-cli "List all Python files in the current directory"
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
For detailed usage examples, see our [Detailed Guide](DETAILED_GUIDE.md).
|
|
148
|
+
|
|
149
|
+
## Documentation
|
|
150
|
+
|
|
151
|
+
- [📖 Detailed Guide](DETAILED_GUIDE.md) - Comprehensive usage examples and advanced features
|
|
152
|
+
- [🔧 Troubleshooting](TROUBLESHOOTING.md) - Common issues and solutions
|
|
153
|
+
- [📚 API Reference](docs/API_REFERENCE.md)
|
|
154
|
+
- [🏗️ Architecture](docs/ARCHITECTURE.md)
|
|
155
|
+
- [⚙️ Configuration](docs/CONFIGURATION.md)
|
|
156
|
+
- [🤝 Contributing](docs/CONTRIBUTING.md)
|
|
157
|
+
|
|
158
|
+
## Community
|
|
159
|
+
|
|
160
|
+
- **GitHub Issues**: [Report bugs and request features](https://github.com/AInohogosya-team/VEXIS-CLI-1/issues)
|
|
161
|
+
- **Discussions**: [Join the community discussion](https://github.com/AInohogosya-team/VEXIS-CLI-1/discussions)
|
|
162
|
+
|
|
163
|
+
## License
|
|
164
|
+
|
|
165
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
166
|
+
|
|
167
|
+
## Support
|
|
168
|
+
|
|
169
|
+
- 📧 Email: AInohogosya@proton.me
|
|
170
|
+
- X: [AInohogosya](https://twitter.com/AInohogosya)
|
|
171
|
+
- Home Page: https://ainohogosya.github.io/home-page/
|
|
172
|
+
|
|
173
|
+
---
|
|
174
|
+
|
|
175
|
+
<div align="center">
|
|
176
|
+
|
|
177
|
+
[Back to top](#vexis-cli-1)
|
|
178
|
+
|
|
179
|
+
Made with ❤️ by the [AInohogosya-team](https://github.com/AInohogosya-team)
|
|
180
|
+
|
|
181
|
+
</div>
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "vexis-cli-1"
|
|
7
|
+
version = "1.0.0"
|
|
8
|
+
description = "AI command-line agent for terminal automation and task execution"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
authors = [
|
|
12
|
+
{name = "AInohogosya-team"}
|
|
13
|
+
]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 5 - Production/Stable",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"Operating System :: OS Independent",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.8",
|
|
20
|
+
"Programming Language :: Python :: 3.9",
|
|
21
|
+
"Programming Language :: Python :: 3.10",
|
|
22
|
+
"Programming Language :: Python :: 3.11",
|
|
23
|
+
"Programming Language :: Python :: 3.12",
|
|
24
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
25
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
26
|
+
]
|
|
27
|
+
keywords = ["ai", "automation", "cli", "terminal", "command-line"]
|
|
28
|
+
requires-python = ">=3.8"
|
|
29
|
+
dependencies = [
|
|
30
|
+
"Pillow>=10.0.0",
|
|
31
|
+
"requests>=2.31.0",
|
|
32
|
+
"numpy>=1.24.0",
|
|
33
|
+
"structlog>=23.0.0",
|
|
34
|
+
"rich>=13.0.0",
|
|
35
|
+
"PyYAML>=6.0.0",
|
|
36
|
+
"ollama>=0.1.0",
|
|
37
|
+
"pyautogui>=0.9.54; sys_platform=='darwin'",
|
|
38
|
+
"pyobjc-framework-Cocoa>=9.0; sys_platform=='darwin'",
|
|
39
|
+
"pywin32>=306; sys_platform=='win32'",
|
|
40
|
+
"python-xlib>=0.33; sys_platform=='linux'",
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
[project.scripts]
|
|
45
|
+
"vexis-cli" = "ai_agent.user_interface.main_app:main"
|
|
46
|
+
"vexis-cli-enhanced" = "ai_agent.user_interface.two_phase_app:main"
|
|
47
|
+
|
|
48
|
+
[project.urls]
|
|
49
|
+
Homepage = "https://github.com/AInohogosya-team/VEXIS-CLI-1"
|
|
50
|
+
Repository = "https://github.com/AInohogosya-team/VEXIS-CLI-1"
|
|
51
|
+
"Bug Reports" = "https://github.com/AInohogosya-team/VEXIS-CLI-1/issues"
|
|
52
|
+
|
|
53
|
+
[tool.setuptools.packages.find]
|
|
54
|
+
where = ["src"]
|
|
55
|
+
|
|
56
|
+
[tool.setuptools.package-data]
|
|
57
|
+
ai_agent = [
|
|
58
|
+
"config/*.yaml",
|
|
59
|
+
"config/*.json",
|
|
60
|
+
"templates/*.txt",
|
|
61
|
+
"scripts/*.sh",
|
|
62
|
+
"scripts/*.ps1",
|
|
63
|
+
]
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Quick Setup Script for VEXIS-CLI
|
|
4
|
+
Automatically checks and fixes common setup issues
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import subprocess
|
|
8
|
+
import sys
|
|
9
|
+
import os
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
# PyPI publishing configuration
|
|
13
|
+
from setuptools import setup, find_packages
|
|
14
|
+
|
|
15
|
+
setup(
|
|
16
|
+
name="vexis-cli-1",
|
|
17
|
+
version="1.0.0",
|
|
18
|
+
description="AI command-line agent for terminal automation and task execution",
|
|
19
|
+
long_description=open("README.md").read(),
|
|
20
|
+
long_description_content_type="text/markdown",
|
|
21
|
+
author="AInohogosya-team",
|
|
22
|
+
author_email="AInohogosya@proton.me",
|
|
23
|
+
url="https://github.com/AInohogosya-team/VEXIS-CLI-1",
|
|
24
|
+
license="MIT",
|
|
25
|
+
packages=find_packages(where="src"),
|
|
26
|
+
package_dir={"": "src"},
|
|
27
|
+
include_package_data=True,
|
|
28
|
+
package_data={
|
|
29
|
+
"ai_agent": [
|
|
30
|
+
"config/*.yaml",
|
|
31
|
+
"config/*.json",
|
|
32
|
+
"templates/*.txt",
|
|
33
|
+
"scripts/*.sh",
|
|
34
|
+
"scripts/*.ps1",
|
|
35
|
+
]
|
|
36
|
+
},
|
|
37
|
+
classifiers=[
|
|
38
|
+
"Development Status :: 5 - Production/Stable",
|
|
39
|
+
"Intended Audience :: Developers",
|
|
40
|
+
"Operating System :: OS Independent",
|
|
41
|
+
"Programming Language :: Python :: 3",
|
|
42
|
+
"Programming Language :: Python :: 3.8",
|
|
43
|
+
"Programming Language :: Python :: 3.9",
|
|
44
|
+
"Programming Language :: Python :: 3.10",
|
|
45
|
+
"Programming Language :: Python :: 3.11",
|
|
46
|
+
"Programming Language :: Python :: 3.12",
|
|
47
|
+
"Topic :: Software Development :: Automation",
|
|
48
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
49
|
+
],
|
|
50
|
+
keywords="ai automation cli terminal command-line",
|
|
51
|
+
python_requires=">=3.8",
|
|
52
|
+
install_requires=[
|
|
53
|
+
"Pillow>=10.0.0",
|
|
54
|
+
"requests>=2.31.0",
|
|
55
|
+
"numpy>=1.24.0",
|
|
56
|
+
"structlog>=23.0.0",
|
|
57
|
+
"rich>=13.0.0",
|
|
58
|
+
"PyYAML>=6.0.0",
|
|
59
|
+
"ollama>=0.1.0",
|
|
60
|
+
"pyautogui>=0.9.54; sys_platform=='darwin'",
|
|
61
|
+
"pyobjc-framework-Cocoa>=9.0; sys_platform=='darwin'",
|
|
62
|
+
"pywin32>=306; sys_platform=='win32'",
|
|
63
|
+
"python-xlib>=0.33; sys_platform=='linux'",
|
|
64
|
+
],
|
|
65
|
+
entry_points={
|
|
66
|
+
"console_scripts": [
|
|
67
|
+
"vexis-cli=ai_agent.user_interface.main_app:main",
|
|
68
|
+
"vexis-cli-enhanced=ai_agent.user_interface.two_phase_app:main",
|
|
69
|
+
],
|
|
70
|
+
},
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def run_command(cmd, description, check=True):
|
|
75
|
+
"""Run a command and handle errors"""
|
|
76
|
+
print(f"🔧 {description}...")
|
|
77
|
+
try:
|
|
78
|
+
result = subprocess.run(cmd, shell=True, capture_output=True, text=True, check=check)
|
|
79
|
+
if result.stdout.strip():
|
|
80
|
+
print(f"✅ {result.stdout.strip()}")
|
|
81
|
+
return True
|
|
82
|
+
except subprocess.CalledProcessError as e:
|
|
83
|
+
print(f"❌ Failed: {e.stderr.strip()}")
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def check_ollama():
|
|
88
|
+
"""Check and install Ollama if needed"""
|
|
89
|
+
print("\n=== Checking Ollama ===")
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
result = subprocess.run(['ollama', '--version'], capture_output=True, text=True, timeout=5)
|
|
93
|
+
print(f"✅ Ollama is installed: {result.stdout.strip()}")
|
|
94
|
+
return True
|
|
95
|
+
except (subprocess.TimeoutExpired, FileNotFoundError):
|
|
96
|
+
print("❌ Ollama is not installed")
|
|
97
|
+
response = input("Would you like to install Ollama? (y/n): ")
|
|
98
|
+
if response.lower() == 'y':
|
|
99
|
+
return run_command("curl -fsSL https://ollama.com/install.sh | sh", "Installing Ollama")
|
|
100
|
+
return False
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def check_models():
|
|
104
|
+
"""Check and install basic models if needed"""
|
|
105
|
+
print("\n=== Checking Models ===")
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
result = subprocess.run(['ollama', 'list'], capture_output=True, text=True, timeout=10)
|
|
109
|
+
if result.returncode == 0:
|
|
110
|
+
lines = result.stdout.strip().split('\n')[1:] # Skip header
|
|
111
|
+
models = [line.split()[0] for line in lines if line.strip()]
|
|
112
|
+
|
|
113
|
+
if models:
|
|
114
|
+
print(f"✅ Found {len(models)} models: {', '.join(models[:3])}")
|
|
115
|
+
return True
|
|
116
|
+
else:
|
|
117
|
+
print("❌ No models found")
|
|
118
|
+
response = input("Would you like to install a lightweight model? (y/n): ")
|
|
119
|
+
if response.lower() == 'y':
|
|
120
|
+
return run_command("ollama pull llama3.2:1b", "Installing llama3.2:1b model")
|
|
121
|
+
return False
|
|
122
|
+
except Exception as e:
|
|
123
|
+
print(f"❌ Error checking models: {e}")
|
|
124
|
+
return False
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def check_python_deps():
|
|
128
|
+
"""Check Python dependencies"""
|
|
129
|
+
print("\n=== Checking Python Dependencies ===")
|
|
130
|
+
|
|
131
|
+
# Check if we're in a virtual environment
|
|
132
|
+
if sys.prefix == sys.base_prefix:
|
|
133
|
+
print("⚠️ Not in a virtual environment")
|
|
134
|
+
response = input("Would you like to create a virtual environment? (y/n): ")
|
|
135
|
+
if response.lower() == 'y':
|
|
136
|
+
if not run_command("python3 -m venv venv", "Creating virtual environment"):
|
|
137
|
+
return False
|
|
138
|
+
print("✅ Virtual environment created")
|
|
139
|
+
print("Please run: source venv/bin/activate && python3 run.py 'your command'")
|
|
140
|
+
return False
|
|
141
|
+
else:
|
|
142
|
+
print("✅ Running in virtual environment")
|
|
143
|
+
|
|
144
|
+
# Check requirements
|
|
145
|
+
requirements_file = Path("requirements.txt")
|
|
146
|
+
if requirements_file.exists():
|
|
147
|
+
# Use --break-system-packages only if in venv or explicitly allowed
|
|
148
|
+
venv_python = sys.executable
|
|
149
|
+
pip_cmd = f"{venv_python} -m pip install -r requirements.txt"
|
|
150
|
+
return run_command(pip_cmd, "Installing Python dependencies")
|
|
151
|
+
else:
|
|
152
|
+
print("⚠️ requirements.txt not found")
|
|
153
|
+
return False
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def main():
|
|
157
|
+
"""Main setup function"""
|
|
158
|
+
print("=== VEXIS-CLI Quick Setup ===")
|
|
159
|
+
print("This script will check and fix common setup issues.\n")
|
|
160
|
+
|
|
161
|
+
# Change to project directory if needed
|
|
162
|
+
script_dir = Path(__file__).parent
|
|
163
|
+
os.chdir(script_dir)
|
|
164
|
+
|
|
165
|
+
success_count = 0
|
|
166
|
+
total_checks = 3
|
|
167
|
+
|
|
168
|
+
# Check each component
|
|
169
|
+
if check_ollama():
|
|
170
|
+
success_count += 1
|
|
171
|
+
|
|
172
|
+
if check_models():
|
|
173
|
+
success_count += 1
|
|
174
|
+
|
|
175
|
+
if check_python_deps():
|
|
176
|
+
success_count += 1
|
|
177
|
+
|
|
178
|
+
# Summary
|
|
179
|
+
print(f"\n=== Setup Summary ===")
|
|
180
|
+
print(f"✅ {success_count}/{total_checks} components ready")
|
|
181
|
+
|
|
182
|
+
if success_count == total_checks:
|
|
183
|
+
print("🎉 Setup complete! You can now run:")
|
|
184
|
+
print("python3 run.py \"your instruction here\"")
|
|
185
|
+
else:
|
|
186
|
+
print("⚠️ Some issues remain. Please fix them manually.")
|
|
187
|
+
print("You can also run: python3 check_models.py for detailed model status.")
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
if __name__ == "__main__":
|
|
191
|
+
main()
|