pygentix 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygentix-0.1.0/LICENSE +21 -0
- pygentix-0.1.0/PKG-INFO +328 -0
- pygentix-0.1.0/README.md +289 -0
- pygentix-0.1.0/pyproject.toml +61 -0
- pygentix-0.1.0/setup.cfg +4 -0
- pygentix-0.1.0/src/pygentix/__init__.py +23 -0
- pygentix-0.1.0/src/pygentix/chatgpt.py +153 -0
- pygentix-0.1.0/src/pygentix/copilot.py +62 -0
- pygentix-0.1.0/src/pygentix/core.py +310 -0
- pygentix-0.1.0/src/pygentix/gemini.py +207 -0
- pygentix-0.1.0/src/pygentix/ollama.py +58 -0
- pygentix-0.1.0/src/pygentix/output.py +106 -0
- pygentix-0.1.0/src/pygentix/py.typed +0 -0
- pygentix-0.1.0/src/pygentix/sqlalchemy.py +268 -0
- pygentix-0.1.0/src/pygentix.egg-info/PKG-INFO +328 -0
- pygentix-0.1.0/src/pygentix.egg-info/SOURCES.txt +22 -0
- pygentix-0.1.0/src/pygentix.egg-info/dependency_links.txt +1 -0
- pygentix-0.1.0/src/pygentix.egg-info/requires.txt +21 -0
- pygentix-0.1.0/src/pygentix.egg-info/top_level.txt +1 -0
- pygentix-0.1.0/tests/test_core.py +163 -0
- pygentix-0.1.0/tests/test_integration.py +280 -0
- pygentix-0.1.0/tests/test_output.py +237 -0
- pygentix-0.1.0/tests/test_sqlalchemy.py +288 -0
- pygentix-0.1.0/tests/test_vision.py +78 -0
pygentix-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Andre Perussi
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
pygentix-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pygentix
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Composable AI agent framework — drop-in LLM tool-calling, structured output, and SQLAlchemy integration for any Python project.
|
|
5
|
+
Author-email: Andre Perussi <dlperussi@hotmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/andreperussi/pygentix
|
|
8
|
+
Project-URL: Repository, https://github.com/andreperussi/pygentix
|
|
9
|
+
Project-URL: Issues, https://github.com/andreperussi/pygentix/issues
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
18
|
+
Classifier: Typing :: Typed
|
|
19
|
+
Requires-Python: >=3.10
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: sqlalchemy>=2.0
|
|
23
|
+
Provides-Extra: ollama
|
|
24
|
+
Requires-Dist: ollama>=0.4; extra == "ollama"
|
|
25
|
+
Provides-Extra: openai
|
|
26
|
+
Requires-Dist: openai>=1.0; extra == "openai"
|
|
27
|
+
Provides-Extra: gemini
|
|
28
|
+
Requires-Dist: google-genai>=1.0; extra == "gemini"
|
|
29
|
+
Provides-Extra: all
|
|
30
|
+
Requires-Dist: ollama>=0.4; extra == "all"
|
|
31
|
+
Requires-Dist: openai>=1.0; extra == "all"
|
|
32
|
+
Requires-Dist: google-genai>=1.0; extra == "all"
|
|
33
|
+
Provides-Extra: dev
|
|
34
|
+
Requires-Dist: ollama>=0.4; extra == "dev"
|
|
35
|
+
Requires-Dist: openai>=1.0; extra == "dev"
|
|
36
|
+
Requires-Dist: google-genai>=1.0; extra == "dev"
|
|
37
|
+
Requires-Dist: pytest>=8.0; extra == "dev"
|
|
38
|
+
Dynamic: license-file
|
|
39
|
+
|
|
40
|
+
# pygentix
|
|
41
|
+
|
|
42
|
+
A composable Python framework for building AI agents with **tool-calling**, **structured output**, and **SQLAlchemy integration** — across any LLM provider.
|
|
43
|
+
|
|
44
|
+
```
|
|
45
|
+
pip install pygentix # core only
|
|
46
|
+
pip install pygentix[ollama] # + Ollama backend
|
|
47
|
+
pip install pygentix[openai] # + OpenAI (ChatGPT) backend
|
|
48
|
+
pip install pygentix[gemini] # + Google Gemini backend
|
|
49
|
+
pip install pygentix[all] # every backend
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
> **Azure OpenAI / Copilot** uses the `openai` package — install `pygentix[openai]`.
|
|
53
|
+
|
|
54
|
+
---
|
|
55
|
+
|
|
56
|
+
## Quick Start
|
|
57
|
+
|
|
58
|
+
Pick a backend, register tools, and start a conversation:
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
from pygentix import Ollama
|
|
62
|
+
|
|
63
|
+
agent = Ollama(model="qwen2.5:7b") # runs locally — no API key needed
|
|
64
|
+
|
|
65
|
+
@agent.uses
|
|
66
|
+
def get_weather(city: str) -> str:
|
|
67
|
+
"""Return the current weather for a city."""
|
|
68
|
+
return f"Sunny, 22 °C in {city}"
|
|
69
|
+
|
|
70
|
+
conv = agent.start_conversation()
|
|
71
|
+
response = conv.ask("What's the weather in Paris?")
|
|
72
|
+
print(response.message.content)
|
|
73
|
+
# → "It's sunny and 22 °C in Paris right now."
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Every backend returns the same `ChatResponse` object, so switching providers is a one-line change:
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
from pygentix import ChatGPT, Gemini, Copilot
|
|
80
|
+
|
|
81
|
+
agent = ChatGPT(model="gpt-4o-mini") # OpenAI
|
|
82
|
+
agent = Gemini(model="gemini-2.5-flash") # Google
|
|
83
|
+
agent = Copilot(model="gpt-4o") # Azure OpenAI
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## Backends
|
|
89
|
+
|
|
90
|
+
| Class | Provider | Default model | Install extra |
|
|
91
|
+
|---|---|---|---|
|
|
92
|
+
| `Ollama` | [Ollama](https://ollama.com) (local) | `qwen2.5:7b` | `ollama` |
|
|
93
|
+
| `ChatGPT` | [OpenAI](https://platform.openai.com) | `gpt-4o-mini` | `openai` |
|
|
94
|
+
| `Gemini` | [Google AI](https://ai.google.dev) | `gemini-2.5-flash` | `gemini` |
|
|
95
|
+
| `Copilot` | [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service) | `gpt-4o` | `openai` |
|
|
96
|
+
|
|
97
|
+
### API keys
|
|
98
|
+
|
|
99
|
+
Cloud backends read their key from the environment (or accept it in the constructor). Ollama runs locally and needs no key.
|
|
100
|
+
|
|
101
|
+
| Backend | Environment variable | Constructor kwarg |
|
|
102
|
+
|---|---|---|
|
|
103
|
+
| `Ollama` | *(none — runs locally)* | — |
|
|
104
|
+
| `ChatGPT` | `OPENAI_API_KEY` | `api_key` |
|
|
105
|
+
| `Gemini` | `GEMINI_API_KEY` | `api_key` |
|
|
106
|
+
| `Copilot` | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_ENDPOINT` | `api_key`, `endpoint` |
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
from pygentix import ChatGPT
|
|
110
|
+
|
|
111
|
+
agent = ChatGPT(api_key="sk-...") # explicit
|
|
112
|
+
agent = ChatGPT() # reads OPENAI_API_KEY
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
---
|
|
116
|
+
|
|
117
|
+
## Tool Calling
|
|
118
|
+
|
|
119
|
+
Decorate any Python function with `@agent.uses` to expose it as a tool the LLM can invoke:
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
from pygentix import Ollama
|
|
123
|
+
|
|
124
|
+
agent = Ollama()
|
|
125
|
+
|
|
126
|
+
@agent.uses
|
|
127
|
+
def search_docs(query: str) -> str:
|
|
128
|
+
"""Search the documentation for relevant articles."""
|
|
129
|
+
return run_search(query)
|
|
130
|
+
|
|
131
|
+
@agent.uses
|
|
132
|
+
def send_email(to: str, subject: str, body: str) -> str:
|
|
133
|
+
"""Send an email to the specified address."""
|
|
134
|
+
return mailer.send(to, subject, body)
|
|
135
|
+
|
|
136
|
+
conv = agent.start_conversation()
|
|
137
|
+
response = conv.ask("Find docs about authentication and email them to alice@co.com")
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
The framework introspects the function's signature and docstring to build the tool definition automatically. When the model decides to call a tool, the framework executes it and feeds the result back — looping until the model produces a final answer.
|
|
141
|
+
|
|
142
|
+
---
|
|
143
|
+
|
|
144
|
+
## Vision / Image Understanding
|
|
145
|
+
|
|
146
|
+
Pass images alongside your question to any vision-capable model:
|
|
147
|
+
|
|
148
|
+
```python
|
|
149
|
+
from pygentix import Ollama
|
|
150
|
+
|
|
151
|
+
agent = Ollama(model="llama3.2-vision") # local vision model
|
|
152
|
+
conv = agent.start_conversation()
|
|
153
|
+
|
|
154
|
+
response = conv.ask("How many cats are in this photo?", images=["photo.jpeg"])
|
|
155
|
+
print(response.message.content)
|
|
156
|
+
# → "There are 3 cats in the photo."
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
The `images` parameter accepts a list of file paths and works across all backends:
|
|
160
|
+
|
|
161
|
+
| Backend | Vision model examples |
|
|
162
|
+
|---|---|
|
|
163
|
+
| `Ollama` | `llama3.2-vision`, `moondream` |
|
|
164
|
+
| `ChatGPT` | `gpt-4o`, `gpt-4o-mini` |
|
|
165
|
+
| `Gemini` | `gemini-2.5-flash`, `gemini-2.5-pro` |
|
|
166
|
+
| `Copilot` | `gpt-4o` (via Azure) |
|
|
167
|
+
|
|
168
|
+
---
|
|
169
|
+
|
|
170
|
+
## Structured Output
|
|
171
|
+
|
|
172
|
+
Use `OutputAgent` to guarantee responses follow a JSON schema:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
from pygentix import Ollama, OutputAgent
|
|
176
|
+
|
|
177
|
+
class MyAgent(Ollama, OutputAgent):
|
|
178
|
+
pass
|
|
179
|
+
|
|
180
|
+
agent = MyAgent()
|
|
181
|
+
|
|
182
|
+
@agent.output
|
|
183
|
+
class Answer:
|
|
184
|
+
answer: str
|
|
185
|
+
confidence: float = 0.0
|
|
186
|
+
sources: list = []
|
|
187
|
+
|
|
188
|
+
conv = agent.start_conversation()
|
|
189
|
+
response = conv.ask("What is the capital of France?")
|
|
190
|
+
|
|
191
|
+
parsed = agent.parse_output(response)
|
|
192
|
+
print(parsed.answer) # "Paris"
|
|
193
|
+
print(parsed.confidence) # 0.95
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
The schema can also be a raw dict — pass any valid JSON Schema to `agent.output({"type": "object", ...})`.
|
|
197
|
+
|
|
198
|
+
---
|
|
199
|
+
|
|
200
|
+
## SQLAlchemy Integration
|
|
201
|
+
|
|
202
|
+
`SqlAlchemyAgent` gives the LLM read/write access to your database through auto-generated tools:
|
|
203
|
+
|
|
204
|
+
```python
|
|
205
|
+
from sqlalchemy import Column, Integer, String, create_engine
|
|
206
|
+
from sqlalchemy.orm import declarative_base
|
|
207
|
+
|
|
208
|
+
from pygentix import Ollama, OutputAgent, SqlAlchemyAgent
|
|
209
|
+
|
|
210
|
+
Base = declarative_base()
|
|
211
|
+
|
|
212
|
+
class Product(Base):
|
|
213
|
+
__tablename__ = "products"
|
|
214
|
+
id = Column(Integer, primary_key=True)
|
|
215
|
+
name = Column(String)
|
|
216
|
+
price = Column(Integer)
|
|
217
|
+
|
|
218
|
+
engine = create_engine("sqlite:///shop.db")
|
|
219
|
+
Base.metadata.create_all(engine)
|
|
220
|
+
|
|
221
|
+
class ShopAgent(Ollama, SqlAlchemyAgent, OutputAgent):
|
|
222
|
+
pass
|
|
223
|
+
|
|
224
|
+
agent = ShopAgent(engine=engine)
|
|
225
|
+
agent.reads(Product) # enables run_query
|
|
226
|
+
agent.writes(Product) # enables run_insert, run_update, run_delete
|
|
227
|
+
|
|
228
|
+
@agent.output
|
|
229
|
+
class Response:
|
|
230
|
+
answer: str
|
|
231
|
+
data: list = []
|
|
232
|
+
|
|
233
|
+
conv = agent.start_conversation()
|
|
234
|
+
conv.ask("Add a product called 'Widget' priced at 9.99")
|
|
235
|
+
response = conv.ask("List all products under $20")
|
|
236
|
+
|
|
237
|
+
parsed = agent.parse_output(response)
|
|
238
|
+
for item in parsed.data:
|
|
239
|
+
print(item)
|
|
240
|
+
```
|
|
241
|
+
|
|
242
|
+
The agent automatically generates `run_query`, `run_insert`, `run_update`, and `run_delete` tools, handles type coercion (strings → ints, dates, etc.), and serialises results back to the model.
|
|
243
|
+
|
|
244
|
+
---
|
|
245
|
+
|
|
246
|
+
## Mixing Backends
|
|
247
|
+
|
|
248
|
+
Every agent is a composable mixin — swap the backend class and everything else stays the same:
|
|
249
|
+
|
|
250
|
+
```python
|
|
251
|
+
from pygentix import Ollama, ChatGPT, Gemini, Copilot, SqlAlchemyAgent, OutputAgent
|
|
252
|
+
|
|
253
|
+
class LocalAgent(Ollama, SqlAlchemyAgent, OutputAgent):
|
|
254
|
+
"""Runs entirely on your machine via Ollama."""
|
|
255
|
+
|
|
256
|
+
class CloudAgent(ChatGPT, SqlAlchemyAgent, OutputAgent):
|
|
257
|
+
"""Uses OpenAI for inference."""
|
|
258
|
+
|
|
259
|
+
class GoogleAgent(Gemini, SqlAlchemyAgent, OutputAgent):
|
|
260
|
+
"""Uses Google Gemini for inference."""
|
|
261
|
+
|
|
262
|
+
class EnterpriseAgent(Copilot, SqlAlchemyAgent, OutputAgent):
|
|
263
|
+
"""Routes through your Azure OpenAI deployment."""
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
---
|
|
267
|
+
|
|
268
|
+
## Multi-turn Conversations
|
|
269
|
+
|
|
270
|
+
A `Conversation` maintains the full message history, so follow-up questions have context:
|
|
271
|
+
|
|
272
|
+
```python
|
|
273
|
+
from pygentix import Ollama, SqlAlchemyAgent
|
|
274
|
+
|
|
275
|
+
# ... define models, engine, etc.
|
|
276
|
+
|
|
277
|
+
agent = Ollama(engine=engine)
|
|
278
|
+
conv = agent.start_conversation()
|
|
279
|
+
conv.ask("Create a user named Alice with email alice@example.com")
|
|
280
|
+
conv.ask("Now create one for Bob at bob@example.com")
|
|
281
|
+
response = conv.ask("List all users")
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
---
|
|
285
|
+
|
|
286
|
+
## API Reference
|
|
287
|
+
|
|
288
|
+
### Core
|
|
289
|
+
|
|
290
|
+
| Symbol | Description |
|
|
291
|
+
|---|---|
|
|
292
|
+
| `Agent` | Abstract base class — subclass to create a backend |
|
|
293
|
+
| `ChatResponse` | Normalized response every backend returns |
|
|
294
|
+
| `Conversation` | Multi-turn conversation manager |
|
|
295
|
+
| `Function` | Introspectable wrapper around a tool callable |
|
|
296
|
+
|
|
297
|
+
### Backends
|
|
298
|
+
|
|
299
|
+
| Symbol | Description |
|
|
300
|
+
|---|---|
|
|
301
|
+
| `Ollama` | Local inference via Ollama |
|
|
302
|
+
| `ChatGPT` | OpenAI Chat Completions |
|
|
303
|
+
| `Gemini` | Google Gemini (via `google-genai`) |
|
|
304
|
+
| `Copilot` | Azure OpenAI |
|
|
305
|
+
|
|
306
|
+
### Mixins
|
|
307
|
+
|
|
308
|
+
| Symbol | Description |
|
|
309
|
+
|---|---|
|
|
310
|
+
| `OutputAgent` | JSON schema enforcement for responses |
|
|
311
|
+
| `SqlAlchemyAgent` | Database CRUD tools from ORM models |
|
|
312
|
+
|
|
313
|
+
---
|
|
314
|
+
|
|
315
|
+
## Development
|
|
316
|
+
|
|
317
|
+
```bash
|
|
318
|
+
git clone https://github.com/andreperussi/pygentix.git
|
|
319
|
+
cd pygentix
|
|
320
|
+
pip install -e ".[dev]"
|
|
321
|
+
pytest
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
---
|
|
325
|
+
|
|
326
|
+
## License
|
|
327
|
+
|
|
328
|
+
MIT
|
pygentix-0.1.0/README.md
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
# pygentix
|
|
2
|
+
|
|
3
|
+
A composable Python framework for building AI agents with **tool-calling**, **structured output**, and **SQLAlchemy integration** — across any LLM provider.
|
|
4
|
+
|
|
5
|
+
```
|
|
6
|
+
pip install pygentix # core only
|
|
7
|
+
pip install pygentix[ollama] # + Ollama backend
|
|
8
|
+
pip install pygentix[openai] # + OpenAI (ChatGPT) backend
|
|
9
|
+
pip install pygentix[gemini] # + Google Gemini backend
|
|
10
|
+
pip install pygentix[all] # every backend
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
> **Azure OpenAI / Copilot** uses the `openai` package — install `pygentix[openai]`.
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
## Quick Start
|
|
18
|
+
|
|
19
|
+
Pick a backend, register tools, and start a conversation:
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
from pygentix import Ollama
|
|
23
|
+
|
|
24
|
+
agent = Ollama(model="qwen2.5:7b") # runs locally — no API key needed
|
|
25
|
+
|
|
26
|
+
@agent.uses
|
|
27
|
+
def get_weather(city: str) -> str:
|
|
28
|
+
"""Return the current weather for a city."""
|
|
29
|
+
return f"Sunny, 22 °C in {city}"
|
|
30
|
+
|
|
31
|
+
conv = agent.start_conversation()
|
|
32
|
+
response = conv.ask("What's the weather in Paris?")
|
|
33
|
+
print(response.message.content)
|
|
34
|
+
# → "It's sunny and 22 °C in Paris right now."
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
Every backend returns the same `ChatResponse` object, so switching providers is a one-line change:
|
|
38
|
+
|
|
39
|
+
```python
|
|
40
|
+
from pygentix import ChatGPT, Gemini, Copilot
|
|
41
|
+
|
|
42
|
+
agent = ChatGPT(model="gpt-4o-mini") # OpenAI
|
|
43
|
+
agent = Gemini(model="gemini-2.5-flash") # Google
|
|
44
|
+
agent = Copilot(model="gpt-4o") # Azure OpenAI
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## Backends
|
|
50
|
+
|
|
51
|
+
| Class | Provider | Default model | Install extra |
|
|
52
|
+
|---|---|---|---|
|
|
53
|
+
| `Ollama` | [Ollama](https://ollama.com) (local) | `qwen2.5:7b` | `ollama` |
|
|
54
|
+
| `ChatGPT` | [OpenAI](https://platform.openai.com) | `gpt-4o-mini` | `openai` |
|
|
55
|
+
| `Gemini` | [Google AI](https://ai.google.dev) | `gemini-2.5-flash` | `gemini` |
|
|
56
|
+
| `Copilot` | [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service) | `gpt-4o` | `openai` |
|
|
57
|
+
|
|
58
|
+
### API keys
|
|
59
|
+
|
|
60
|
+
Cloud backends read their key from the environment (or accept it in the constructor). Ollama runs locally and needs no key.
|
|
61
|
+
|
|
62
|
+
| Backend | Environment variable | Constructor kwarg |
|
|
63
|
+
|---|---|---|
|
|
64
|
+
| `Ollama` | *(none — runs locally)* | — |
|
|
65
|
+
| `ChatGPT` | `OPENAI_API_KEY` | `api_key` |
|
|
66
|
+
| `Gemini` | `GEMINI_API_KEY` | `api_key` |
|
|
67
|
+
| `Copilot` | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_ENDPOINT` | `api_key`, `endpoint` |
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from pygentix import ChatGPT
|
|
71
|
+
|
|
72
|
+
agent = ChatGPT(api_key="sk-...") # explicit
|
|
73
|
+
agent = ChatGPT() # reads OPENAI_API_KEY
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## Tool Calling
|
|
79
|
+
|
|
80
|
+
Decorate any Python function with `@agent.uses` to expose it as a tool the LLM can invoke:
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from pygentix import Ollama
|
|
84
|
+
|
|
85
|
+
agent = Ollama()
|
|
86
|
+
|
|
87
|
+
@agent.uses
|
|
88
|
+
def search_docs(query: str) -> str:
|
|
89
|
+
"""Search the documentation for relevant articles."""
|
|
90
|
+
return run_search(query)
|
|
91
|
+
|
|
92
|
+
@agent.uses
|
|
93
|
+
def send_email(to: str, subject: str, body: str) -> str:
|
|
94
|
+
"""Send an email to the specified address."""
|
|
95
|
+
return mailer.send(to, subject, body)
|
|
96
|
+
|
|
97
|
+
conv = agent.start_conversation()
|
|
98
|
+
response = conv.ask("Find docs about authentication and email them to alice@co.com")
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
The framework introspects the function's signature and docstring to build the tool definition automatically. When the model decides to call a tool, the framework executes it and feeds the result back — looping until the model produces a final answer.
|
|
102
|
+
|
|
103
|
+
---
|
|
104
|
+
|
|
105
|
+
## Vision / Image Understanding
|
|
106
|
+
|
|
107
|
+
Pass images alongside your question to any vision-capable model:
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
from pygentix import Ollama
|
|
111
|
+
|
|
112
|
+
agent = Ollama(model="llama3.2-vision") # local vision model
|
|
113
|
+
conv = agent.start_conversation()
|
|
114
|
+
|
|
115
|
+
response = conv.ask("How many cats are in this photo?", images=["photo.jpeg"])
|
|
116
|
+
print(response.message.content)
|
|
117
|
+
# → "There are 3 cats in the photo."
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
The `images` parameter accepts a list of file paths and works across all backends:
|
|
121
|
+
|
|
122
|
+
| Backend | Vision model examples |
|
|
123
|
+
|---|---|
|
|
124
|
+
| `Ollama` | `llama3.2-vision`, `moondream` |
|
|
125
|
+
| `ChatGPT` | `gpt-4o`, `gpt-4o-mini` |
|
|
126
|
+
| `Gemini` | `gemini-2.5-flash`, `gemini-2.5-pro` |
|
|
127
|
+
| `Copilot` | `gpt-4o` (via Azure) |
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
## Structured Output
|
|
132
|
+
|
|
133
|
+
Use `OutputAgent` to guarantee responses follow a JSON schema:
|
|
134
|
+
|
|
135
|
+
```python
|
|
136
|
+
from pygentix import Ollama, OutputAgent
|
|
137
|
+
|
|
138
|
+
class MyAgent(Ollama, OutputAgent):
|
|
139
|
+
pass
|
|
140
|
+
|
|
141
|
+
agent = MyAgent()
|
|
142
|
+
|
|
143
|
+
@agent.output
|
|
144
|
+
class Answer:
|
|
145
|
+
answer: str
|
|
146
|
+
confidence: float = 0.0
|
|
147
|
+
sources: list = []
|
|
148
|
+
|
|
149
|
+
conv = agent.start_conversation()
|
|
150
|
+
response = conv.ask("What is the capital of France?")
|
|
151
|
+
|
|
152
|
+
parsed = agent.parse_output(response)
|
|
153
|
+
print(parsed.answer) # "Paris"
|
|
154
|
+
print(parsed.confidence) # 0.95
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
The schema can also be a raw dict — pass any valid JSON Schema to `agent.output({"type": "object", ...})`.
|
|
158
|
+
|
|
159
|
+
---
|
|
160
|
+
|
|
161
|
+
## SQLAlchemy Integration
|
|
162
|
+
|
|
163
|
+
`SqlAlchemyAgent` gives the LLM read/write access to your database through auto-generated tools:
|
|
164
|
+
|
|
165
|
+
```python
|
|
166
|
+
from sqlalchemy import Column, Integer, String, create_engine
|
|
167
|
+
from sqlalchemy.orm import declarative_base
|
|
168
|
+
|
|
169
|
+
from pygentix import Ollama, OutputAgent, SqlAlchemyAgent
|
|
170
|
+
|
|
171
|
+
Base = declarative_base()
|
|
172
|
+
|
|
173
|
+
class Product(Base):
|
|
174
|
+
__tablename__ = "products"
|
|
175
|
+
id = Column(Integer, primary_key=True)
|
|
176
|
+
name = Column(String)
|
|
177
|
+
price = Column(Integer)
|
|
178
|
+
|
|
179
|
+
engine = create_engine("sqlite:///shop.db")
|
|
180
|
+
Base.metadata.create_all(engine)
|
|
181
|
+
|
|
182
|
+
class ShopAgent(Ollama, SqlAlchemyAgent, OutputAgent):
|
|
183
|
+
pass
|
|
184
|
+
|
|
185
|
+
agent = ShopAgent(engine=engine)
|
|
186
|
+
agent.reads(Product) # enables run_query
|
|
187
|
+
agent.writes(Product) # enables run_insert, run_update, run_delete
|
|
188
|
+
|
|
189
|
+
@agent.output
|
|
190
|
+
class Response:
|
|
191
|
+
answer: str
|
|
192
|
+
data: list = []
|
|
193
|
+
|
|
194
|
+
conv = agent.start_conversation()
|
|
195
|
+
conv.ask("Add a product called 'Widget' priced at 9.99")
|
|
196
|
+
response = conv.ask("List all products under $20")
|
|
197
|
+
|
|
198
|
+
parsed = agent.parse_output(response)
|
|
199
|
+
for item in parsed.data:
|
|
200
|
+
print(item)
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
The agent automatically generates `run_query`, `run_insert`, `run_update`, and `run_delete` tools, handles type coercion (strings → ints, dates, etc.), and serialises results back to the model.
|
|
204
|
+
|
|
205
|
+
---
|
|
206
|
+
|
|
207
|
+
## Mixing Backends
|
|
208
|
+
|
|
209
|
+
Every agent is a composable mixin — swap the backend class and everything else stays the same:
|
|
210
|
+
|
|
211
|
+
```python
|
|
212
|
+
from pygentix import Ollama, ChatGPT, Gemini, Copilot, SqlAlchemyAgent, OutputAgent
|
|
213
|
+
|
|
214
|
+
class LocalAgent(Ollama, SqlAlchemyAgent, OutputAgent):
|
|
215
|
+
"""Runs entirely on your machine via Ollama."""
|
|
216
|
+
|
|
217
|
+
class CloudAgent(ChatGPT, SqlAlchemyAgent, OutputAgent):
|
|
218
|
+
"""Uses OpenAI for inference."""
|
|
219
|
+
|
|
220
|
+
class GoogleAgent(Gemini, SqlAlchemyAgent, OutputAgent):
|
|
221
|
+
"""Uses Google Gemini for inference."""
|
|
222
|
+
|
|
223
|
+
class EnterpriseAgent(Copilot, SqlAlchemyAgent, OutputAgent):
|
|
224
|
+
"""Routes through your Azure OpenAI deployment."""
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
---
|
|
228
|
+
|
|
229
|
+
## Multi-turn Conversations
|
|
230
|
+
|
|
231
|
+
A `Conversation` maintains the full message history, so follow-up questions have context:
|
|
232
|
+
|
|
233
|
+
```python
|
|
234
|
+
from pygentix import Ollama, SqlAlchemyAgent
|
|
235
|
+
|
|
236
|
+
# ... define models, engine, etc.
|
|
237
|
+
|
|
238
|
+
agent = Ollama(engine=engine)
|
|
239
|
+
conv = agent.start_conversation()
|
|
240
|
+
conv.ask("Create a user named Alice with email alice@example.com")
|
|
241
|
+
conv.ask("Now create one for Bob at bob@example.com")
|
|
242
|
+
response = conv.ask("List all users")
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## API Reference
|
|
248
|
+
|
|
249
|
+
### Core
|
|
250
|
+
|
|
251
|
+
| Symbol | Description |
|
|
252
|
+
|---|---|
|
|
253
|
+
| `Agent` | Abstract base class — subclass to create a backend |
|
|
254
|
+
| `ChatResponse` | Normalized response every backend returns |
|
|
255
|
+
| `Conversation` | Multi-turn conversation manager |
|
|
256
|
+
| `Function` | Introspectable wrapper around a tool callable |
|
|
257
|
+
|
|
258
|
+
### Backends
|
|
259
|
+
|
|
260
|
+
| Symbol | Description |
|
|
261
|
+
|---|---|
|
|
262
|
+
| `Ollama` | Local inference via Ollama |
|
|
263
|
+
| `ChatGPT` | OpenAI Chat Completions |
|
|
264
|
+
| `Gemini` | Google Gemini (via `google-genai`) |
|
|
265
|
+
| `Copilot` | Azure OpenAI |
|
|
266
|
+
|
|
267
|
+
### Mixins
|
|
268
|
+
|
|
269
|
+
| Symbol | Description |
|
|
270
|
+
|---|---|
|
|
271
|
+
| `OutputAgent` | JSON schema enforcement for responses |
|
|
272
|
+
| `SqlAlchemyAgent` | Database CRUD tools from ORM models |
|
|
273
|
+
|
|
274
|
+
---
|
|
275
|
+
|
|
276
|
+
## Development
|
|
277
|
+
|
|
278
|
+
```bash
|
|
279
|
+
git clone https://github.com/andreperussi/pygentix.git
|
|
280
|
+
cd pygentix
|
|
281
|
+
pip install -e ".[dev]"
|
|
282
|
+
pytest
|
|
283
|
+
```
|
|
284
|
+
|
|
285
|
+
---
|
|
286
|
+
|
|
287
|
+
## License
|
|
288
|
+
|
|
289
|
+
MIT
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "pygentix"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Composable AI agent framework — drop-in LLM tool-calling, structured output, and SQLAlchemy integration for any Python project."
|
|
9
|
+
authors = [
|
|
10
|
+
{ name = "Andre Perussi", email = "dlperussi@hotmail.com" },
|
|
11
|
+
]
|
|
12
|
+
license = { text = "MIT" }
|
|
13
|
+
readme = "README.md"
|
|
14
|
+
requires-python = ">=3.10"
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 3 - Alpha",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Programming Language :: Python :: 3",
|
|
20
|
+
"Programming Language :: Python :: 3.10",
|
|
21
|
+
"Programming Language :: Python :: 3.11",
|
|
22
|
+
"Programming Language :: Python :: 3.12",
|
|
23
|
+
"Programming Language :: Python :: 3.13",
|
|
24
|
+
"Typing :: Typed",
|
|
25
|
+
]
|
|
26
|
+
dependencies = [
|
|
27
|
+
"sqlalchemy>=2.0",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[project.optional-dependencies]
|
|
31
|
+
ollama = ["ollama>=0.4"]
|
|
32
|
+
openai = ["openai>=1.0"]
|
|
33
|
+
gemini = ["google-genai>=1.0"]
|
|
34
|
+
all = [
|
|
35
|
+
"ollama>=0.4",
|
|
36
|
+
"openai>=1.0",
|
|
37
|
+
"google-genai>=1.0",
|
|
38
|
+
]
|
|
39
|
+
dev = [
|
|
40
|
+
"ollama>=0.4",
|
|
41
|
+
"openai>=1.0",
|
|
42
|
+
"google-genai>=1.0",
|
|
43
|
+
"pytest>=8.0",
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
[project.urls]
|
|
47
|
+
Homepage = "https://github.com/andreperussi/pygentix"
|
|
48
|
+
Repository = "https://github.com/andreperussi/pygentix"
|
|
49
|
+
Issues = "https://github.com/andreperussi/pygentix/issues"
|
|
50
|
+
|
|
51
|
+
[tool.setuptools]
|
|
52
|
+
package-dir = { "" = "src" }
|
|
53
|
+
|
|
54
|
+
[tool.setuptools.packages.find]
|
|
55
|
+
where = ["src"]
|
|
56
|
+
|
|
57
|
+
[tool.setuptools.package-data]
|
|
58
|
+
pygentix = ["py.typed"]
|
|
59
|
+
|
|
60
|
+
[tool.pytest.ini_options]
|
|
61
|
+
testpaths = ["tests"]
|
pygentix-0.1.0/setup.cfg
ADDED