tzamuncode 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tzamuncode-0.1.0/LICENSE +21 -0
- tzamuncode-0.1.0/PKG-INFO +200 -0
- tzamuncode-0.1.0/README.md +162 -0
- tzamuncode-0.1.0/pyproject.toml +55 -0
- tzamuncode-0.1.0/setup.cfg +4 -0
- tzamuncode-0.1.0/tzamuncode/__init__.py +10 -0
- tzamuncode-0.1.0/tzamuncode/agents/__init__.py +1 -0
- tzamuncode-0.1.0/tzamuncode/agents/coder.py +144 -0
- tzamuncode-0.1.0/tzamuncode/agents/tools.py +159 -0
- tzamuncode-0.1.0/tzamuncode/auth/__init__.py +1 -0
- tzamuncode-0.1.0/tzamuncode/auth/auth_manager.py +159 -0
- tzamuncode-0.1.0/tzamuncode/cli/__init__.py +1 -0
- tzamuncode-0.1.0/tzamuncode/cli/agentic_commands.py +131 -0
- tzamuncode-0.1.0/tzamuncode/cli/auth_commands.py +125 -0
- tzamuncode-0.1.0/tzamuncode/cli/commands.py +203 -0
- tzamuncode-0.1.0/tzamuncode/cli/enhanced_chat.py +312 -0
- tzamuncode-0.1.0/tzamuncode/cli/interactive_chat.py +323 -0
- tzamuncode-0.1.0/tzamuncode/cli/main.py +444 -0
- tzamuncode-0.1.0/tzamuncode/cli/realtime_chat.py +965 -0
- tzamuncode-0.1.0/tzamuncode/cli/realtime_chat_methods.py +200 -0
- tzamuncode-0.1.0/tzamuncode/cli/tui_chat.py +323 -0
- tzamuncode-0.1.0/tzamuncode/config/__init__.py +1 -0
- tzamuncode-0.1.0/tzamuncode/models/__init__.py +1 -0
- tzamuncode-0.1.0/tzamuncode/models/ollama.py +124 -0
- tzamuncode-0.1.0/tzamuncode/models/vllm_client.py +121 -0
- tzamuncode-0.1.0/tzamuncode/utils/__init__.py +1 -0
- tzamuncode-0.1.0/tzamuncode/utils/file_ops.py +59 -0
- tzamuncode-0.1.0/tzamuncode/utils/project_scanner.py +193 -0
- tzamuncode-0.1.0/tzamuncode.egg-info/PKG-INFO +200 -0
- tzamuncode-0.1.0/tzamuncode.egg-info/SOURCES.txt +32 -0
- tzamuncode-0.1.0/tzamuncode.egg-info/dependency_links.txt +1 -0
- tzamuncode-0.1.0/tzamuncode.egg-info/entry_points.txt +3 -0
- tzamuncode-0.1.0/tzamuncode.egg-info/requires.txt +18 -0
- tzamuncode-0.1.0/tzamuncode.egg-info/top_level.txt +1 -0
tzamuncode-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Tzamun Arabia IT Co.
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: tzamuncode
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: TzamunCode - AI Coding Assistant powered by local models
|
|
5
|
+
Author-email: "Tzamun Arabia IT Co." <info@tzamun.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://tzamun.com
|
|
8
|
+
Project-URL: Documentation, https://docs.tzamun.com/tzamuncode
|
|
9
|
+
Project-URL: Repository, https://github.com/tzamun/tzamuncode-cli
|
|
10
|
+
Keywords: ai,coding,assistant,ollama,cli
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Requires-Python: >=3.9
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
License-File: LICENSE
|
|
20
|
+
Requires-Dist: typer>=0.9.0
|
|
21
|
+
Requires-Dist: rich>=13.0.0
|
|
22
|
+
Requires-Dist: textual>=0.40.0
|
|
23
|
+
Requires-Dist: readchar>=4.0.0
|
|
24
|
+
Requires-Dist: langchain>=0.1.0
|
|
25
|
+
Requires-Dist: langchain-community>=0.0.10
|
|
26
|
+
Requires-Dist: openai>=1.0.0
|
|
27
|
+
Requires-Dist: requests>=2.31.0
|
|
28
|
+
Requires-Dist: gitpython>=3.1.40
|
|
29
|
+
Requires-Dist: pathspec>=0.11.0
|
|
30
|
+
Requires-Dist: tiktoken>=0.5.0
|
|
31
|
+
Requires-Dist: prompt-toolkit>=3.0.0
|
|
32
|
+
Provides-Extra: dev
|
|
33
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
34
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
35
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
36
|
+
Requires-Dist: mypy>=1.5.0; extra == "dev"
|
|
37
|
+
Dynamic: license-file
|
|
38
|
+
|
|
39
|
+
# TzamunCode CLI π
|
|
40
|
+
|
|
41
|
+
> AI Coding Assistant powered by local models - Built in Saudi Arabia πΈπ¦
|
|
42
|
+
|
|
43
|
+
**TzamunCode** is a privacy-first AI coding assistant that runs entirely on your local infrastructure using Ollama and vLLM. No cloud dependencies, no API costs, complete control.
|
|
44
|
+
|
|
45
|
+
## β¨ Features
|
|
46
|
+
|
|
47
|
+
- π€ **Agentic AI** - Multi-step planning and execution
|
|
48
|
+
- π **Code Generation** - Create files, functions, and entire projects
|
|
49
|
+
- βοΈ **Multi-file Editing** - Edit multiple files in one operation
|
|
50
|
+
- π§ **Tool Calling** - Git operations, file search, command execution
|
|
51
|
+
- π― **Context Aware** - Understands your project structure
|
|
52
|
+
- π **Privacy First** - Everything runs locally
|
|
53
|
+
- β‘ **Fast** - Powered by vLLM for optimized inference
|
|
54
|
+
- π **Multi-model** - Use any Ollama model (15+ available)
|
|
55
|
+
|
|
56
|
+
## π Quick Start
|
|
57
|
+
|
|
58
|
+
### Installation
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
# Clone the repository
|
|
62
|
+
git clone https://github.com/tzamun/tzamuncode-cli.git
|
|
63
|
+
cd tzamuncode-cli
|
|
64
|
+
|
|
65
|
+
# Install
|
|
66
|
+
pip install -e .
|
|
67
|
+
|
|
68
|
+
# Or install from PyPI (when published)
|
|
69
|
+
pip install tzamuncode
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Prerequisites
|
|
73
|
+
|
|
74
|
+
- Python 3.9+
|
|
75
|
+
- Ollama running locally (http://localhost:11434)
|
|
76
|
+
- At least one Ollama model installed
|
|
77
|
+
|
|
78
|
+
### Basic Usage
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
# Start interactive chat
|
|
82
|
+
tzamuncode chat
|
|
83
|
+
|
|
84
|
+
# Generate code
|
|
85
|
+
tzamuncode generate "Create a Flask REST API with authentication"
|
|
86
|
+
|
|
87
|
+
# Edit a file
|
|
88
|
+
tzamuncode edit app.py "Add error handling to all routes"
|
|
89
|
+
|
|
90
|
+
# Explain code
|
|
91
|
+
tzamuncode explain main.py
|
|
92
|
+
|
|
93
|
+
# Quick alias
|
|
94
|
+
tzc chat
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## π Documentation
|
|
98
|
+
|
|
99
|
+
### Commands
|
|
100
|
+
|
|
101
|
+
#### `chat` - Interactive Chat
|
|
102
|
+
```bash
|
|
103
|
+
tzamuncode chat
|
|
104
|
+
tzamuncode chat --model qwen2.5:32b
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
#### `generate` - Code Generation
|
|
108
|
+
```bash
|
|
109
|
+
tzamuncode generate "Create a Python web scraper"
|
|
110
|
+
tzamuncode generate "Add unit tests for user.py" --output tests/
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
#### `edit` - File Editing
|
|
114
|
+
```bash
|
|
115
|
+
tzamuncode edit app.py "Refactor to use async/await"
|
|
116
|
+
tzamuncode edit . "Add type hints to all functions"
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
#### `explain` - Code Explanation
|
|
120
|
+
```bash
|
|
121
|
+
tzamuncode explain complex_function.py
|
|
122
|
+
tzamuncode explain --detailed auth.py
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
#### `review` - Code Review
|
|
126
|
+
```bash
|
|
127
|
+
tzamuncode review pull_request.diff
|
|
128
|
+
tzamuncode review --strict src/
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### Configuration
|
|
132
|
+
|
|
133
|
+
Create `~/.tzamuncode/config.yaml`:
|
|
134
|
+
|
|
135
|
+
```yaml
|
|
136
|
+
# Default model
|
|
137
|
+
model: qwen2.5:32b
|
|
138
|
+
|
|
139
|
+
# Ollama settings
|
|
140
|
+
ollama:
|
|
141
|
+
base_url: http://localhost:11434
|
|
142
|
+
timeout: 120
|
|
143
|
+
|
|
144
|
+
# vLLM settings (optional, for faster inference)
|
|
145
|
+
vllm:
|
|
146
|
+
enabled: true
|
|
147
|
+
base_url: http://localhost:8000
|
|
148
|
+
model: deepseek-coder-7b
|
|
149
|
+
|
|
150
|
+
# Preferences
|
|
151
|
+
preferences:
|
|
152
|
+
show_diff: true
|
|
153
|
+
auto_apply: false
|
|
154
|
+
max_context: 64000
|
|
155
|
+
temperature: 0.7
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
## ποΈ Architecture
|
|
159
|
+
|
|
160
|
+
```
|
|
161
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
162
|
+
β TzamunCode CLI β
|
|
163
|
+
β (Typer + Rich UI) β
|
|
164
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
165
|
+
β
|
|
166
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
167
|
+
β Agentic Layer (LangChain) β
|
|
168
|
+
β - Multi-step planning β
|
|
169
|
+
β - Tool calling β
|
|
170
|
+
β - Context management β
|
|
171
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
172
|
+
β
|
|
173
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
174
|
+
β AI Backend β
|
|
175
|
+
β - Ollama (15+ models) β
|
|
176
|
+
β - vLLM (fast inference) β
|
|
177
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## π€ Contributing
|
|
181
|
+
|
|
182
|
+
We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
|
183
|
+
|
|
184
|
+
## π License
|
|
185
|
+
|
|
186
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
187
|
+
|
|
188
|
+
## π Built by Tzamun Arabia IT Co.
|
|
189
|
+
|
|
190
|
+
**TzamunCode** is part of the Tzamun AI ecosystem:
|
|
191
|
+
- **TzamunAI** - AI platform with 15+ models
|
|
192
|
+
- **TzamunERP** - ERPNext + AI integration
|
|
193
|
+
- **Auxly** - AI coding assistant for IDEs
|
|
194
|
+
- **AccessHub** - Privileged Access Management
|
|
195
|
+
|
|
196
|
+
Visit [tzamun.com](https://tzamun.com) to learn more.
|
|
197
|
+
|
|
198
|
+
---
|
|
199
|
+
|
|
200
|
+
Made with β€οΈ in Saudi Arabia πΈπ¦
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
# TzamunCode CLI π
|
|
2
|
+
|
|
3
|
+
> AI Coding Assistant powered by local models - Built in Saudi Arabia πΈπ¦
|
|
4
|
+
|
|
5
|
+
**TzamunCode** is a privacy-first AI coding assistant that runs entirely on your local infrastructure using Ollama and vLLM. No cloud dependencies, no API costs, complete control.
|
|
6
|
+
|
|
7
|
+
## β¨ Features
|
|
8
|
+
|
|
9
|
+
- π€ **Agentic AI** - Multi-step planning and execution
|
|
10
|
+
- π **Code Generation** - Create files, functions, and entire projects
|
|
11
|
+
- βοΈ **Multi-file Editing** - Edit multiple files in one operation
|
|
12
|
+
- π§ **Tool Calling** - Git operations, file search, command execution
|
|
13
|
+
- π― **Context Aware** - Understands your project structure
|
|
14
|
+
- π **Privacy First** - Everything runs locally
|
|
15
|
+
- β‘ **Fast** - Powered by vLLM for optimized inference
|
|
16
|
+
- π **Multi-model** - Use any Ollama model (15+ available)
|
|
17
|
+
|
|
18
|
+
## π Quick Start
|
|
19
|
+
|
|
20
|
+
### Installation
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
# Clone the repository
|
|
24
|
+
git clone https://github.com/tzamun/tzamuncode-cli.git
|
|
25
|
+
cd tzamuncode-cli
|
|
26
|
+
|
|
27
|
+
# Install
|
|
28
|
+
pip install -e .
|
|
29
|
+
|
|
30
|
+
# Or install from PyPI (when published)
|
|
31
|
+
pip install tzamuncode
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
### Prerequisites
|
|
35
|
+
|
|
36
|
+
- Python 3.9+
|
|
37
|
+
- Ollama running locally (http://localhost:11434)
|
|
38
|
+
- At least one Ollama model installed
|
|
39
|
+
|
|
40
|
+
### Basic Usage
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
# Start interactive chat
|
|
44
|
+
tzamuncode chat
|
|
45
|
+
|
|
46
|
+
# Generate code
|
|
47
|
+
tzamuncode generate "Create a Flask REST API with authentication"
|
|
48
|
+
|
|
49
|
+
# Edit a file
|
|
50
|
+
tzamuncode edit app.py "Add error handling to all routes"
|
|
51
|
+
|
|
52
|
+
# Explain code
|
|
53
|
+
tzamuncode explain main.py
|
|
54
|
+
|
|
55
|
+
# Quick alias
|
|
56
|
+
tzc chat
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## π Documentation
|
|
60
|
+
|
|
61
|
+
### Commands
|
|
62
|
+
|
|
63
|
+
#### `chat` - Interactive Chat
|
|
64
|
+
```bash
|
|
65
|
+
tzamuncode chat
|
|
66
|
+
tzamuncode chat --model qwen2.5:32b
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
#### `generate` - Code Generation
|
|
70
|
+
```bash
|
|
71
|
+
tzamuncode generate "Create a Python web scraper"
|
|
72
|
+
tzamuncode generate "Add unit tests for user.py" --output tests/
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
#### `edit` - File Editing
|
|
76
|
+
```bash
|
|
77
|
+
tzamuncode edit app.py "Refactor to use async/await"
|
|
78
|
+
tzamuncode edit . "Add type hints to all functions"
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
#### `explain` - Code Explanation
|
|
82
|
+
```bash
|
|
83
|
+
tzamuncode explain complex_function.py
|
|
84
|
+
tzamuncode explain --detailed auth.py
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
#### `review` - Code Review
|
|
88
|
+
```bash
|
|
89
|
+
tzamuncode review pull_request.diff
|
|
90
|
+
tzamuncode review --strict src/
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Configuration
|
|
94
|
+
|
|
95
|
+
Create `~/.tzamuncode/config.yaml`:
|
|
96
|
+
|
|
97
|
+
```yaml
|
|
98
|
+
# Default model
|
|
99
|
+
model: qwen2.5:32b
|
|
100
|
+
|
|
101
|
+
# Ollama settings
|
|
102
|
+
ollama:
|
|
103
|
+
base_url: http://localhost:11434
|
|
104
|
+
timeout: 120
|
|
105
|
+
|
|
106
|
+
# vLLM settings (optional, for faster inference)
|
|
107
|
+
vllm:
|
|
108
|
+
enabled: true
|
|
109
|
+
base_url: http://localhost:8000
|
|
110
|
+
model: deepseek-coder-7b
|
|
111
|
+
|
|
112
|
+
# Preferences
|
|
113
|
+
preferences:
|
|
114
|
+
show_diff: true
|
|
115
|
+
auto_apply: false
|
|
116
|
+
max_context: 64000
|
|
117
|
+
temperature: 0.7
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## ποΈ Architecture
|
|
121
|
+
|
|
122
|
+
```
|
|
123
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
124
|
+
β TzamunCode CLI β
|
|
125
|
+
β (Typer + Rich UI) β
|
|
126
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
127
|
+
β
|
|
128
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
129
|
+
β Agentic Layer (LangChain) β
|
|
130
|
+
β - Multi-step planning β
|
|
131
|
+
β - Tool calling β
|
|
132
|
+
β - Context management β
|
|
133
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
134
|
+
β
|
|
135
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
136
|
+
β AI Backend β
|
|
137
|
+
β - Ollama (15+ models) β
|
|
138
|
+
β - vLLM (fast inference) β
|
|
139
|
+
βββββββββββββββββββββββββββββββββββββββ
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## π€ Contributing
|
|
143
|
+
|
|
144
|
+
We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
|
145
|
+
|
|
146
|
+
## π License
|
|
147
|
+
|
|
148
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
149
|
+
|
|
150
|
+
## π Built by Tzamun Arabia IT Co.
|
|
151
|
+
|
|
152
|
+
**TzamunCode** is part of the Tzamun AI ecosystem:
|
|
153
|
+
- **TzamunAI** - AI platform with 15+ models
|
|
154
|
+
- **TzamunERP** - ERPNext + AI integration
|
|
155
|
+
- **Auxly** - AI coding assistant for IDEs
|
|
156
|
+
- **AccessHub** - Privileged Access Management
|
|
157
|
+
|
|
158
|
+
Visit [tzamun.com](https://tzamun.com) to learn more.
|
|
159
|
+
|
|
160
|
+
---
|
|
161
|
+
|
|
162
|
+
Made with β€οΈ in Saudi Arabia πΈπ¦
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "tzamuncode"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "TzamunCode - AI Coding Assistant powered by local models"
|
|
9
|
+
authors = [
|
|
10
|
+
{name = "Tzamun Arabia IT Co.", email = "info@tzamun.com"}
|
|
11
|
+
]
|
|
12
|
+
readme = "README.md"
|
|
13
|
+
requires-python = ">=3.9"
|
|
14
|
+
license = {text = "MIT"}
|
|
15
|
+
keywords = ["ai", "coding", "assistant", "ollama", "cli"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 3 - Alpha",
|
|
18
|
+
"Intended Audience :: Developers",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Programming Language :: Python :: 3.9",
|
|
21
|
+
"Programming Language :: Python :: 3.10",
|
|
22
|
+
"Programming Language :: Python :: 3.11",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
dependencies = [
|
|
26
|
+
"typer>=0.9.0",
|
|
27
|
+
"rich>=13.0.0",
|
|
28
|
+
"textual>=0.40.0",
|
|
29
|
+
"readchar>=4.0.0",
|
|
30
|
+
"langchain>=0.1.0",
|
|
31
|
+
"langchain-community>=0.0.10",
|
|
32
|
+
"openai>=1.0.0",
|
|
33
|
+
"requests>=2.31.0",
|
|
34
|
+
"gitpython>=3.1.40",
|
|
35
|
+
"pathspec>=0.11.0",
|
|
36
|
+
"tiktoken>=0.5.0",
|
|
37
|
+
"prompt-toolkit>=3.0.0",
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
[project.optional-dependencies]
|
|
41
|
+
dev = [
|
|
42
|
+
"pytest>=7.4.0",
|
|
43
|
+
"black>=23.0.0",
|
|
44
|
+
"ruff>=0.1.0",
|
|
45
|
+
"mypy>=1.5.0",
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
[project.scripts]
|
|
49
|
+
tzamuncode = "tzamuncode.cli.main:app"
|
|
50
|
+
tzc = "tzamuncode.cli.main:app"
|
|
51
|
+
|
|
52
|
+
[project.urls]
|
|
53
|
+
Homepage = "https://tzamun.com"
|
|
54
|
+
Documentation = "https://docs.tzamun.com/tzamuncode"
|
|
55
|
+
Repository = "https://github.com/tzamun/tzamuncode-cli"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Agentic AI module for TzamunCode"""
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agentic Coder - Advanced AI coding agent with file access and tool calling
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Dict, Optional, Any
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
from langchain.agents import initialize_agent, Tool, AgentType
|
|
10
|
+
from langchain.memory import ConversationBufferMemory
|
|
11
|
+
from langchain_community.llms import Ollama
|
|
12
|
+
|
|
13
|
+
from ..utils.file_ops import FileManager
|
|
14
|
+
from ..utils.project_scanner import ProjectScanner
|
|
15
|
+
from .tools import create_tools
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgenticCoder:
|
|
19
|
+
"""
|
|
20
|
+
Advanced AI coding agent with file access and autonomous capabilities.
|
|
21
|
+
|
|
22
|
+
Similar to Claude Code, this agent can:
|
|
23
|
+
- Read and write files autonomously
|
|
24
|
+
- Scan project structure
|
|
25
|
+
- Edit multiple files
|
|
26
|
+
- Execute git operations
|
|
27
|
+
- Search codebase
|
|
28
|
+
- Plan multi-step tasks
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
model: str = "qwen2.5:32b",
|
|
34
|
+
base_url: str = "http://localhost:11434",
|
|
35
|
+
workspace: Optional[str] = None
|
|
36
|
+
):
|
|
37
|
+
self.model = model
|
|
38
|
+
self.base_url = base_url
|
|
39
|
+
self.workspace = Path(workspace) if workspace else Path.cwd()
|
|
40
|
+
|
|
41
|
+
# Initialize components
|
|
42
|
+
self.file_manager = FileManager()
|
|
43
|
+
self.project_scanner = ProjectScanner(self.workspace)
|
|
44
|
+
|
|
45
|
+
# Initialize LLM
|
|
46
|
+
self.llm = Ollama(
|
|
47
|
+
model=model,
|
|
48
|
+
base_url=base_url,
|
|
49
|
+
temperature=0.7
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Create tools
|
|
53
|
+
self.tools = create_tools(
|
|
54
|
+
file_manager=self.file_manager,
|
|
55
|
+
project_scanner=self.project_scanner,
|
|
56
|
+
workspace=self.workspace
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# Initialize memory
|
|
60
|
+
self.memory = ConversationBufferMemory(
|
|
61
|
+
memory_key="chat_history",
|
|
62
|
+
return_messages=True
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Initialize agent
|
|
66
|
+
self.agent = initialize_agent(
|
|
67
|
+
tools=self.tools,
|
|
68
|
+
llm=self.llm,
|
|
69
|
+
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
|
70
|
+
verbose=True,
|
|
71
|
+
memory=self.memory,
|
|
72
|
+
handle_parsing_errors=True,
|
|
73
|
+
max_iterations=10
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
def execute(self, instruction: str) -> Dict[str, Any]:
|
|
77
|
+
"""
|
|
78
|
+
Execute an instruction using the agentic AI.
|
|
79
|
+
|
|
80
|
+
The agent will autonomously:
|
|
81
|
+
- Scan project if needed
|
|
82
|
+
- Read relevant files
|
|
83
|
+
- Plan the changes
|
|
84
|
+
- Edit multiple files
|
|
85
|
+
- Provide summary
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
instruction: What to do (e.g., "Add authentication to this Flask app")
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Dict with result, files_changed, and summary
|
|
92
|
+
"""
|
|
93
|
+
# Add context about workspace
|
|
94
|
+
context = f"""You are working in: {self.workspace}
|
|
95
|
+
|
|
96
|
+
Available tools:
|
|
97
|
+
- read_file: Read any file in the project
|
|
98
|
+
- write_file: Write/create files
|
|
99
|
+
- list_files: List files in directories
|
|
100
|
+
- search_code: Search for code patterns
|
|
101
|
+
- git_status: Check git status
|
|
102
|
+
- scan_project: Understand project structure
|
|
103
|
+
|
|
104
|
+
Instruction: {instruction}
|
|
105
|
+
|
|
106
|
+
Think step by step:
|
|
107
|
+
1. Understand what needs to be done
|
|
108
|
+
2. Scan project structure if needed
|
|
109
|
+
3. Read relevant files
|
|
110
|
+
4. Plan the changes
|
|
111
|
+
5. Make the changes
|
|
112
|
+
6. Summarize what was done
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
result = self.agent.run(context)
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
"success": True,
|
|
120
|
+
"result": result,
|
|
121
|
+
"workspace": str(self.workspace)
|
|
122
|
+
}
|
|
123
|
+
except Exception as e:
|
|
124
|
+
return {
|
|
125
|
+
"success": False,
|
|
126
|
+
"error": str(e),
|
|
127
|
+
"workspace": str(self.workspace)
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
def chat(self, message: str) -> str:
|
|
131
|
+
"""
|
|
132
|
+
Chat with the agent while maintaining context.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
message: User message
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
Agent response
|
|
139
|
+
"""
|
|
140
|
+
return self.agent.run(message)
|
|
141
|
+
|
|
142
|
+
def reset_memory(self):
|
|
143
|
+
"""Reset conversation memory"""
|
|
144
|
+
self.memory.clear()
|