voxagent 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voxagent-0.1.0/.gitignore +17 -0
- voxagent-0.1.0/PKG-INFO +186 -0
- voxagent-0.1.0/README.md +131 -0
- voxagent-0.1.0/examples/README.md +47 -0
- voxagent-0.1.0/pyproject.toml +132 -0
- voxagent-0.1.0/src/voxagent/__init__.py +143 -0
- voxagent-0.1.0/src/voxagent/_version.py +5 -0
- voxagent-0.1.0/src/voxagent/agent/__init__.py +32 -0
- voxagent-0.1.0/src/voxagent/agent/abort.py +178 -0
- voxagent-0.1.0/src/voxagent/agent/core.py +902 -0
- voxagent-0.1.0/src/voxagent/code/__init__.py +9 -0
- voxagent-0.1.0/src/voxagent/mcp/__init__.py +16 -0
- voxagent-0.1.0/src/voxagent/mcp/manager.py +188 -0
- voxagent-0.1.0/src/voxagent/mcp/tool.py +152 -0
- voxagent-0.1.0/src/voxagent/providers/__init__.py +110 -0
- voxagent-0.1.0/src/voxagent/providers/anthropic.py +498 -0
- voxagent-0.1.0/src/voxagent/providers/augment.py +293 -0
- voxagent-0.1.0/src/voxagent/providers/auth.py +116 -0
- voxagent-0.1.0/src/voxagent/providers/base.py +268 -0
- voxagent-0.1.0/src/voxagent/providers/chatgpt.py +415 -0
- voxagent-0.1.0/src/voxagent/providers/claudecode.py +162 -0
- voxagent-0.1.0/src/voxagent/providers/cli_base.py +265 -0
- voxagent-0.1.0/src/voxagent/providers/codex.py +183 -0
- voxagent-0.1.0/src/voxagent/providers/failover.py +90 -0
- voxagent-0.1.0/src/voxagent/providers/google.py +532 -0
- voxagent-0.1.0/src/voxagent/providers/groq.py +96 -0
- voxagent-0.1.0/src/voxagent/providers/ollama.py +425 -0
- voxagent-0.1.0/src/voxagent/providers/openai.py +435 -0
- voxagent-0.1.0/src/voxagent/providers/registry.py +175 -0
- voxagent-0.1.0/src/voxagent/py.typed +1 -0
- voxagent-0.1.0/src/voxagent/security/__init__.py +14 -0
- voxagent-0.1.0/src/voxagent/security/events.py +75 -0
- voxagent-0.1.0/src/voxagent/security/filter.py +169 -0
- voxagent-0.1.0/src/voxagent/security/registry.py +87 -0
- voxagent-0.1.0/src/voxagent/session/__init__.py +39 -0
- voxagent-0.1.0/src/voxagent/session/compaction.py +237 -0
- voxagent-0.1.0/src/voxagent/session/lock.py +103 -0
- voxagent-0.1.0/src/voxagent/session/model.py +109 -0
- voxagent-0.1.0/src/voxagent/session/storage.py +184 -0
- voxagent-0.1.0/src/voxagent/streaming/__init__.py +52 -0
- voxagent-0.1.0/src/voxagent/streaming/emitter.py +286 -0
- voxagent-0.1.0/src/voxagent/streaming/events.py +255 -0
- voxagent-0.1.0/src/voxagent/subagent/__init__.py +20 -0
- voxagent-0.1.0/src/voxagent/subagent/context.py +124 -0
- voxagent-0.1.0/src/voxagent/subagent/definition.py +172 -0
- voxagent-0.1.0/src/voxagent/tools/__init__.py +32 -0
- voxagent-0.1.0/src/voxagent/tools/context.py +50 -0
- voxagent-0.1.0/src/voxagent/tools/decorator.py +175 -0
- voxagent-0.1.0/src/voxagent/tools/definition.py +131 -0
- voxagent-0.1.0/src/voxagent/tools/executor.py +109 -0
- voxagent-0.1.0/src/voxagent/tools/policy.py +89 -0
- voxagent-0.1.0/src/voxagent/tools/registry.py +89 -0
- voxagent-0.1.0/src/voxagent/types/__init__.py +46 -0
- voxagent-0.1.0/src/voxagent/types/messages.py +134 -0
- voxagent-0.1.0/src/voxagent/types/run.py +176 -0
voxagent-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: voxagent
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A lightweight, model-agnostic LLM provider abstraction with streaming and tool support
|
|
5
|
+
Project-URL: Homepage, https://github.com/lensator/voxagent
|
|
6
|
+
Project-URL: Documentation, https://github.com/lensator/voxagent#readme
|
|
7
|
+
Project-URL: Repository, https://github.com/lensator/voxagent
|
|
8
|
+
Project-URL: Issues, https://github.com/lensator/voxagent/issues
|
|
9
|
+
Project-URL: Changelog, https://github.com/lensator/voxagent/blob/main/CHANGELOG.md
|
|
10
|
+
Author: voxDomus team
|
|
11
|
+
License-Expression: MIT
|
|
12
|
+
Keywords: agent,ai,anthropic,llm,mcp,openai,streaming,tools
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.11
|
|
24
|
+
Requires-Dist: anyio>=4.0
|
|
25
|
+
Requires-Dist: httpx>=0.25
|
|
26
|
+
Requires-Dist: pydantic>=2.0
|
|
27
|
+
Provides-Extra: all
|
|
28
|
+
Requires-Dist: anthropic>=0.25; extra == 'all'
|
|
29
|
+
Requires-Dist: google-generativeai>=0.5; extra == 'all'
|
|
30
|
+
Requires-Dist: groq>=0.4; extra == 'all'
|
|
31
|
+
Requires-Dist: mcp>=1.0; extra == 'all'
|
|
32
|
+
Requires-Dist: ollama>=0.2; extra == 'all'
|
|
33
|
+
Requires-Dist: openai>=1.0; extra == 'all'
|
|
34
|
+
Requires-Dist: tiktoken>=0.5; extra == 'all'
|
|
35
|
+
Provides-Extra: anthropic
|
|
36
|
+
Requires-Dist: anthropic>=0.25; extra == 'anthropic'
|
|
37
|
+
Provides-Extra: dev
|
|
38
|
+
Requires-Dist: mypy>=1.0; extra == 'dev'
|
|
39
|
+
Requires-Dist: pytest-asyncio>=0.21; extra == 'dev'
|
|
40
|
+
Requires-Dist: pytest-cov>=4.0; extra == 'dev'
|
|
41
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
42
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
43
|
+
Provides-Extra: google
|
|
44
|
+
Requires-Dist: google-generativeai>=0.5; extra == 'google'
|
|
45
|
+
Provides-Extra: groq
|
|
46
|
+
Requires-Dist: groq>=0.4; extra == 'groq'
|
|
47
|
+
Provides-Extra: mcp
|
|
48
|
+
Requires-Dist: mcp>=1.0; extra == 'mcp'
|
|
49
|
+
Provides-Extra: ollama
|
|
50
|
+
Requires-Dist: ollama>=0.2; extra == 'ollama'
|
|
51
|
+
Provides-Extra: openai
|
|
52
|
+
Requires-Dist: openai>=1.0; extra == 'openai'
|
|
53
|
+
Requires-Dist: tiktoken>=0.5; extra == 'openai'
|
|
54
|
+
Description-Content-Type: text/markdown
|
|
55
|
+
|
|
56
|
+
# voxagent
|
|
57
|
+
|
|
58
|
+
[](https://badge.fury.io/py/voxagent)
|
|
59
|
+
[](https://www.python.org/downloads/)
|
|
60
|
+
[](https://opensource.org/licenses/MIT)
|
|
61
|
+
[](https://peps.python.org/pep-0561/)
|
|
62
|
+
|
|
63
|
+
A lightweight, model-agnostic LLM provider abstraction with streaming and tool support.
|
|
64
|
+
|
|
65
|
+
## Features
|
|
66
|
+
|
|
67
|
+
- **Multi-Provider**: Unified interface for OpenAI, Anthropic, Google, Groq, Ollama
|
|
68
|
+
- **Streaming**: Typed `StreamChunk` union (TextDelta, ToolUse, MessageEnd, Error)
|
|
69
|
+
- **Tool System**: `@tool` decorator for easy function-to-tool conversion
|
|
70
|
+
- **MCP Integration**: First-class Model Context Protocol support
|
|
71
|
+
- **Type Safe**: Full type hints with `py.typed` marker
|
|
72
|
+
- **Minimal Dependencies**: Core requires only `pydantic`, `httpx`, `anyio`
|
|
73
|
+
|
|
74
|
+
## Installation
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
# Core only (no provider SDKs)
|
|
78
|
+
pip install voxagent
|
|
79
|
+
|
|
80
|
+
# With specific providers
|
|
81
|
+
pip install voxagent[openai]
|
|
82
|
+
pip install voxagent[anthropic]
|
|
83
|
+
pip install voxagent[google]
|
|
84
|
+
pip install voxagent[ollama]
|
|
85
|
+
|
|
86
|
+
# All providers
|
|
87
|
+
pip install voxagent[all]
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Quick Start
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
import asyncio
|
|
94
|
+
from voxagent import Agent
|
|
95
|
+
|
|
96
|
+
async def main():
|
|
97
|
+
agent = Agent(model="openai:gpt-4o")
|
|
98
|
+
result = await agent.run("Hello, world!")
|
|
99
|
+
print(result.output)
|
|
100
|
+
|
|
101
|
+
asyncio.run(main())
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
## Streaming
|
|
105
|
+
|
|
106
|
+
```python
|
|
107
|
+
from voxagent import Agent
|
|
108
|
+
from voxagent.providers import TextDeltaChunk
|
|
109
|
+
|
|
110
|
+
agent = Agent(model="anthropic:claude-3-5-sonnet")
|
|
111
|
+
|
|
112
|
+
async for chunk in agent.stream("Tell me a story"):
|
|
113
|
+
if isinstance(chunk, TextDeltaChunk):
|
|
114
|
+
print(chunk.delta, end="", flush=True)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## Tools
|
|
118
|
+
|
|
119
|
+
```python
|
|
120
|
+
from voxagent import Agent
|
|
121
|
+
from voxagent.tools import tool
|
|
122
|
+
|
|
123
|
+
@tool()
|
|
124
|
+
def get_weather(city: str) -> str:
|
|
125
|
+
"""Get the current weather for a city."""
|
|
126
|
+
return f"Sunny, 72°F in {city}"
|
|
127
|
+
|
|
128
|
+
agent = Agent(
|
|
129
|
+
model="openai:gpt-4o",
|
|
130
|
+
tools=[get_weather],
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
result = await agent.run("What's the weather in Paris?")
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## Supported Providers
|
|
137
|
+
|
|
138
|
+
| Provider | Model Format | Example |
|
|
139
|
+
|----------|--------------|---------|
|
|
140
|
+
| OpenAI | `openai:model` | `openai:gpt-4o` |
|
|
141
|
+
| Anthropic | `anthropic:model` | `anthropic:claude-3-5-sonnet` |
|
|
142
|
+
| Google | `google:model` | `google:gemini-1.5-pro` |
|
|
143
|
+
| Groq | `groq:model` | `groq:llama-3.1-70b` |
|
|
144
|
+
| Ollama | `ollama:model` | `ollama:llama3.2` |
|
|
145
|
+
|
|
146
|
+
## API Reference
|
|
147
|
+
|
|
148
|
+
### Agent
|
|
149
|
+
|
|
150
|
+
```python
|
|
151
|
+
from voxagent import Agent
|
|
152
|
+
|
|
153
|
+
agent = Agent(
|
|
154
|
+
model="provider:model", # Required: provider:model string
|
|
155
|
+
system_prompt="...", # Optional: system instructions
|
|
156
|
+
tools=[...], # Optional: list of tools
|
|
157
|
+
temperature=0.7, # Optional: sampling temperature
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Single response
|
|
161
|
+
result = await agent.run("prompt")
|
|
162
|
+
|
|
163
|
+
# Streaming
|
|
164
|
+
async for chunk in agent.stream("prompt"):
|
|
165
|
+
...
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### StreamChunk Types
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
from voxagent.providers import (
|
|
172
|
+
TextDeltaChunk, # Text content
|
|
173
|
+
ToolUseChunk, # Tool invocation
|
|
174
|
+
MessageEndChunk, # End of message
|
|
175
|
+
ErrorChunk, # Error occurred
|
|
176
|
+
)
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
## Contributing
|
|
180
|
+
|
|
181
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for development setup and guidelines.
|
|
182
|
+
|
|
183
|
+
## License
|
|
184
|
+
|
|
185
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
186
|
+
|
voxagent-0.1.0/README.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
# voxagent
|
|
2
|
+
|
|
3
|
+
[](https://badge.fury.io/py/voxagent)
|
|
4
|
+
[](https://www.python.org/downloads/)
|
|
5
|
+
[](https://opensource.org/licenses/MIT)
|
|
6
|
+
[](https://peps.python.org/pep-0561/)
|
|
7
|
+
|
|
8
|
+
A lightweight, model-agnostic LLM provider abstraction with streaming and tool support.
|
|
9
|
+
|
|
10
|
+
## Features
|
|
11
|
+
|
|
12
|
+
- **Multi-Provider**: Unified interface for OpenAI, Anthropic, Google, Groq, Ollama
|
|
13
|
+
- **Streaming**: Typed `StreamChunk` union (TextDelta, ToolUse, MessageEnd, Error)
|
|
14
|
+
- **Tool System**: `@tool` decorator for easy function-to-tool conversion
|
|
15
|
+
- **MCP Integration**: First-class Model Context Protocol support
|
|
16
|
+
- **Type Safe**: Full type hints with `py.typed` marker
|
|
17
|
+
- **Minimal Dependencies**: Core requires only `pydantic`, `httpx`, `anyio`
|
|
18
|
+
|
|
19
|
+
## Installation
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
# Core only (no provider SDKs)
|
|
23
|
+
pip install voxagent
|
|
24
|
+
|
|
25
|
+
# With specific providers
|
|
26
|
+
pip install voxagent[openai]
|
|
27
|
+
pip install voxagent[anthropic]
|
|
28
|
+
pip install voxagent[google]
|
|
29
|
+
pip install voxagent[ollama]
|
|
30
|
+
|
|
31
|
+
# All providers
|
|
32
|
+
pip install voxagent[all]
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Quick Start
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
import asyncio
|
|
39
|
+
from voxagent import Agent
|
|
40
|
+
|
|
41
|
+
async def main():
|
|
42
|
+
agent = Agent(model="openai:gpt-4o")
|
|
43
|
+
result = await agent.run("Hello, world!")
|
|
44
|
+
print(result.output)
|
|
45
|
+
|
|
46
|
+
asyncio.run(main())
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Streaming
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
from voxagent import Agent
|
|
53
|
+
from voxagent.providers import TextDeltaChunk
|
|
54
|
+
|
|
55
|
+
agent = Agent(model="anthropic:claude-3-5-sonnet")
|
|
56
|
+
|
|
57
|
+
async for chunk in agent.stream("Tell me a story"):
|
|
58
|
+
if isinstance(chunk, TextDeltaChunk):
|
|
59
|
+
print(chunk.delta, end="", flush=True)
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## Tools
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
from voxagent import Agent
|
|
66
|
+
from voxagent.tools import tool
|
|
67
|
+
|
|
68
|
+
@tool()
|
|
69
|
+
def get_weather(city: str) -> str:
|
|
70
|
+
"""Get the current weather for a city."""
|
|
71
|
+
return f"Sunny, 72°F in {city}"
|
|
72
|
+
|
|
73
|
+
agent = Agent(
|
|
74
|
+
model="openai:gpt-4o",
|
|
75
|
+
tools=[get_weather],
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
result = await agent.run("What's the weather in Paris?")
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## Supported Providers
|
|
82
|
+
|
|
83
|
+
| Provider | Model Format | Example |
|
|
84
|
+
|----------|--------------|---------|
|
|
85
|
+
| OpenAI | `openai:model` | `openai:gpt-4o` |
|
|
86
|
+
| Anthropic | `anthropic:model` | `anthropic:claude-3-5-sonnet` |
|
|
87
|
+
| Google | `google:model` | `google:gemini-1.5-pro` |
|
|
88
|
+
| Groq | `groq:model` | `groq:llama-3.1-70b` |
|
|
89
|
+
| Ollama | `ollama:model` | `ollama:llama3.2` |
|
|
90
|
+
|
|
91
|
+
## API Reference
|
|
92
|
+
|
|
93
|
+
### Agent
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
from voxagent import Agent
|
|
97
|
+
|
|
98
|
+
agent = Agent(
|
|
99
|
+
model="provider:model", # Required: provider:model string
|
|
100
|
+
system_prompt="...", # Optional: system instructions
|
|
101
|
+
tools=[...], # Optional: list of tools
|
|
102
|
+
temperature=0.7, # Optional: sampling temperature
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Single response
|
|
106
|
+
result = await agent.run("prompt")
|
|
107
|
+
|
|
108
|
+
# Streaming
|
|
109
|
+
async for chunk in agent.stream("prompt"):
|
|
110
|
+
...
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### StreamChunk Types
|
|
114
|
+
|
|
115
|
+
```python
|
|
116
|
+
from voxagent.providers import (
|
|
117
|
+
TextDeltaChunk, # Text content
|
|
118
|
+
ToolUseChunk, # Tool invocation
|
|
119
|
+
MessageEndChunk, # End of message
|
|
120
|
+
ErrorChunk, # Error occurred
|
|
121
|
+
)
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Contributing
|
|
125
|
+
|
|
126
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for development setup and guidelines.
|
|
127
|
+
|
|
128
|
+
## License
|
|
129
|
+
|
|
130
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
131
|
+
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# voxagent Examples
|
|
2
|
+
|
|
3
|
+
This directory contains working examples demonstrating voxagent features.
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
# Install voxagent with all providers
|
|
9
|
+
pip install voxagent[all]
|
|
10
|
+
|
|
11
|
+
# Or install specific providers
|
|
12
|
+
pip install voxagent[openai] # For OpenAI examples
|
|
13
|
+
pip install voxagent[anthropic] # For Anthropic examples
|
|
14
|
+
pip install voxagent[ollama] # For Ollama examples
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Examples
|
|
18
|
+
|
|
19
|
+
| File | Description | Provider |
|
|
20
|
+
|------|-------------|----------|
|
|
21
|
+
| `01_openai_streaming.py` | Basic streaming with OpenAI | OpenAI |
|
|
22
|
+
| `02_anthropic_tools.py` | Tool calling with Anthropic | Anthropic |
|
|
23
|
+
| `03_ollama_local.py` | Local models with Ollama | Ollama |
|
|
24
|
+
| `04_mcp_integration.py` | MCP server integration | Any |
|
|
25
|
+
| `05_multi_provider.py` | Provider switching & failover | Multiple |
|
|
26
|
+
|
|
27
|
+
## Running Examples
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
# Set your API keys
|
|
31
|
+
export OPENAI_API_KEY="sk-..."
|
|
32
|
+
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
33
|
+
|
|
34
|
+
# Run an example
|
|
35
|
+
python 01_openai_streaming.py
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Environment Variables
|
|
39
|
+
|
|
40
|
+
| Variable | Provider | Required |
|
|
41
|
+
|----------|----------|----------|
|
|
42
|
+
| `OPENAI_API_KEY` | OpenAI | Yes |
|
|
43
|
+
| `ANTHROPIC_API_KEY` | Anthropic | Yes |
|
|
44
|
+
| `GOOGLE_API_KEY` | Google | Yes |
|
|
45
|
+
| `GROQ_API_KEY` | Groq | Yes |
|
|
46
|
+
| (none) | Ollama | No (local) |
|
|
47
|
+
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "voxagent"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = "A lightweight, model-agnostic LLM provider abstraction with streaming and tool support"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.11"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "voxDomus team" },
|
|
14
|
+
]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 3 - Alpha",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Programming Language :: Python :: 3",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Programming Language :: Python :: 3.13",
|
|
23
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
24
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
25
|
+
"Typing :: Typed",
|
|
26
|
+
]
|
|
27
|
+
keywords = ["ai", "agent", "llm", "openai", "anthropic", "streaming", "mcp", "tools"]
|
|
28
|
+
|
|
29
|
+
# Core dependencies - minimal footprint
|
|
30
|
+
dependencies = [
|
|
31
|
+
"pydantic>=2.0",
|
|
32
|
+
"httpx>=0.25",
|
|
33
|
+
"anyio>=4.0",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[project.optional-dependencies]
|
|
37
|
+
# Provider-specific dependencies
|
|
38
|
+
openai = [
|
|
39
|
+
"openai>=1.0",
|
|
40
|
+
"tiktoken>=0.5",
|
|
41
|
+
]
|
|
42
|
+
anthropic = [
|
|
43
|
+
"anthropic>=0.25",
|
|
44
|
+
]
|
|
45
|
+
google = [
|
|
46
|
+
"google-generativeai>=0.5",
|
|
47
|
+
]
|
|
48
|
+
groq = [
|
|
49
|
+
"groq>=0.4",
|
|
50
|
+
]
|
|
51
|
+
ollama = [
|
|
52
|
+
"ollama>=0.2",
|
|
53
|
+
]
|
|
54
|
+
# MCP support
|
|
55
|
+
mcp = [
|
|
56
|
+
"mcp>=1.0",
|
|
57
|
+
]
|
|
58
|
+
# All providers
|
|
59
|
+
all = [
|
|
60
|
+
"voxagent[openai,anthropic,google,groq,ollama,mcp]",
|
|
61
|
+
]
|
|
62
|
+
# Development dependencies
|
|
63
|
+
dev = [
|
|
64
|
+
"pytest>=7.0",
|
|
65
|
+
"pytest-asyncio>=0.21",
|
|
66
|
+
"pytest-cov>=4.0",
|
|
67
|
+
"ruff>=0.1.0",
|
|
68
|
+
"mypy>=1.0",
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
[project.urls]
|
|
72
|
+
Homepage = "https://github.com/lensator/voxagent"
|
|
73
|
+
Documentation = "https://github.com/lensator/voxagent#readme"
|
|
74
|
+
Repository = "https://github.com/lensator/voxagent"
|
|
75
|
+
Issues = "https://github.com/lensator/voxagent/issues"
|
|
76
|
+
Changelog = "https://github.com/lensator/voxagent/blob/main/CHANGELOG.md"
|
|
77
|
+
|
|
78
|
+
# Hatch configuration for src layout
|
|
79
|
+
[tool.hatch.version]
|
|
80
|
+
path = "src/voxagent/_version.py"
|
|
81
|
+
|
|
82
|
+
[tool.hatch.build.targets.wheel]
|
|
83
|
+
packages = ["src/voxagent"]
|
|
84
|
+
|
|
85
|
+
[tool.hatch.build.targets.sdist]
|
|
86
|
+
include = [
|
|
87
|
+
"src/voxagent/**/*.py",
|
|
88
|
+
"src/voxagent/py.typed",
|
|
89
|
+
"README.md",
|
|
90
|
+
"LICENSE",
|
|
91
|
+
]
|
|
92
|
+
exclude = [
|
|
93
|
+
"__pycache__",
|
|
94
|
+
"*.pyc",
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
# Ruff configuration
|
|
98
|
+
[tool.ruff]
|
|
99
|
+
target-version = "py311"
|
|
100
|
+
line-length = 100
|
|
101
|
+
src = ["src"]
|
|
102
|
+
|
|
103
|
+
[tool.ruff.lint]
|
|
104
|
+
select = [
|
|
105
|
+
"E", # pycodestyle errors
|
|
106
|
+
"W", # pycodestyle warnings
|
|
107
|
+
"F", # Pyflakes
|
|
108
|
+
"I", # isort
|
|
109
|
+
"B", # flake8-bugbear
|
|
110
|
+
"C4", # flake8-comprehensions
|
|
111
|
+
"UP", # pyupgrade
|
|
112
|
+
]
|
|
113
|
+
ignore = [
|
|
114
|
+
"E501", # line too long (handled by formatter)
|
|
115
|
+
]
|
|
116
|
+
|
|
117
|
+
[tool.ruff.lint.isort]
|
|
118
|
+
known-first-party = ["voxagent"]
|
|
119
|
+
|
|
120
|
+
# Mypy configuration
|
|
121
|
+
[tool.mypy]
|
|
122
|
+
python_version = "3.11"
|
|
123
|
+
warn_return_any = true
|
|
124
|
+
warn_unused_configs = true
|
|
125
|
+
disallow_untyped_defs = true
|
|
126
|
+
disallow_incomplete_defs = true
|
|
127
|
+
|
|
128
|
+
# Pytest configuration
|
|
129
|
+
[tool.pytest.ini_options]
|
|
130
|
+
asyncio_mode = "auto"
|
|
131
|
+
testpaths = ["tests"]
|
|
132
|
+
addopts = "-v --tb=short"
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""voxagent - A lightweight, model-agnostic LLM provider abstraction.
|
|
2
|
+
|
|
3
|
+
voxagent provides:
|
|
4
|
+
- Multi-Provider: Unified interface for OpenAI, Anthropic, Google, Groq, Ollama
|
|
5
|
+
- Streaming: Typed StreamChunk union (TextDelta, ToolUse, MessageEnd, Error)
|
|
6
|
+
- Tool System: @tool decorator, typed definitions, abort signal propagation
|
|
7
|
+
- MCP Integration: First-class Model Context Protocol support
|
|
8
|
+
- Sub-Agent Support: Hierarchical agent composition with depth-limited delegation
|
|
9
|
+
- Session Management: File-based sessions with context compaction
|
|
10
|
+
|
|
11
|
+
Quick Start:
|
|
12
|
+
>>> from voxagent import Agent
|
|
13
|
+
>>> agent = Agent(model="openai:gpt-4o")
|
|
14
|
+
>>> result = await agent.run("Hello!")
|
|
15
|
+
|
|
16
|
+
With Tools:
|
|
17
|
+
>>> from voxagent import Agent
|
|
18
|
+
>>> from voxagent.tools import tool
|
|
19
|
+
>>>
|
|
20
|
+
>>> @tool()
|
|
21
|
+
... def get_weather(city: str) -> str:
|
|
22
|
+
... '''Get weather for a city.'''
|
|
23
|
+
... return f"Sunny in {city}"
|
|
24
|
+
>>>
|
|
25
|
+
>>> agent = Agent(model="anthropic:claude-3-5-sonnet", tools=[get_weather])
|
|
26
|
+
>>> result = await agent.run("What's the weather in Paris?")
|
|
27
|
+
|
|
28
|
+
Streaming:
|
|
29
|
+
>>> from voxagent import Agent
|
|
30
|
+
>>> from voxagent.providers import TextDeltaChunk
|
|
31
|
+
>>>
|
|
32
|
+
>>> agent = Agent(model="openai:gpt-4o")
|
|
33
|
+
>>> async for chunk in agent.stream("Tell me a story"):
|
|
34
|
+
... if isinstance(chunk, TextDeltaChunk):
|
|
35
|
+
... print(chunk.delta, end="")
|
|
36
|
+
|
|
37
|
+
For more information, see: https://github.com/voxdomus/voxagent
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
from ._version import __version__, __version_info__
|
|
41
|
+
|
|
42
|
+
# =============================================================================
|
|
43
|
+
# Lazy imports for top-level convenience
|
|
44
|
+
# =============================================================================
|
|
45
|
+
# We use __getattr__ to avoid importing the full dependency chain on module load.
|
|
46
|
+
# This keeps `import voxagent` fast and allows users to import only what they need.
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def __getattr__(name: str) -> object:
|
|
50
|
+
"""Lazy import for top-level classes."""
|
|
51
|
+
# Core Agent class
|
|
52
|
+
if name == "Agent":
|
|
53
|
+
from .agent import Agent
|
|
54
|
+
|
|
55
|
+
return Agent
|
|
56
|
+
|
|
57
|
+
# Provider base classes and chunks
|
|
58
|
+
if name in (
|
|
59
|
+
"BaseProvider",
|
|
60
|
+
"StreamChunk",
|
|
61
|
+
"TextDeltaChunk",
|
|
62
|
+
"ToolUseChunk",
|
|
63
|
+
"MessageEndChunk",
|
|
64
|
+
"ErrorChunk",
|
|
65
|
+
"AbortSignal",
|
|
66
|
+
):
|
|
67
|
+
from . import providers
|
|
68
|
+
|
|
69
|
+
return getattr(providers, name)
|
|
70
|
+
|
|
71
|
+
# Tool system
|
|
72
|
+
if name in ("tool", "ToolDefinition", "ToolContext"):
|
|
73
|
+
from . import tools
|
|
74
|
+
|
|
75
|
+
return getattr(tools, name)
|
|
76
|
+
|
|
77
|
+
# Message types
|
|
78
|
+
if name in ("Message", "ToolCall", "ToolResult"):
|
|
79
|
+
from . import types
|
|
80
|
+
|
|
81
|
+
return getattr(types, name)
|
|
82
|
+
|
|
83
|
+
# Sub-agent support
|
|
84
|
+
if name == "SubAgentDefinition":
|
|
85
|
+
from .subagent import SubAgentDefinition
|
|
86
|
+
|
|
87
|
+
return SubAgentDefinition
|
|
88
|
+
if name == "SubAgentContext":
|
|
89
|
+
from .subagent import SubAgentContext
|
|
90
|
+
|
|
91
|
+
return SubAgentContext
|
|
92
|
+
if name == "MaxDepthExceededError":
|
|
93
|
+
from .subagent import MaxDepthExceededError
|
|
94
|
+
|
|
95
|
+
return MaxDepthExceededError
|
|
96
|
+
|
|
97
|
+
# MCP
|
|
98
|
+
if name == "MCPServerManager":
|
|
99
|
+
from .mcp import MCPServerManager
|
|
100
|
+
|
|
101
|
+
return MCPServerManager
|
|
102
|
+
|
|
103
|
+
# Registry
|
|
104
|
+
if name in ("ProviderRegistry", "get_default_registry"):
|
|
105
|
+
from . import providers
|
|
106
|
+
|
|
107
|
+
return getattr(providers, name)
|
|
108
|
+
|
|
109
|
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
__all__ = [
|
|
113
|
+
# Version
|
|
114
|
+
"__version__",
|
|
115
|
+
"__version_info__",
|
|
116
|
+
# Core
|
|
117
|
+
"Agent",
|
|
118
|
+
# Providers
|
|
119
|
+
"BaseProvider",
|
|
120
|
+
"StreamChunk",
|
|
121
|
+
"TextDeltaChunk",
|
|
122
|
+
"ToolUseChunk",
|
|
123
|
+
"MessageEndChunk",
|
|
124
|
+
"ErrorChunk",
|
|
125
|
+
"AbortSignal",
|
|
126
|
+
"ProviderRegistry",
|
|
127
|
+
"get_default_registry",
|
|
128
|
+
# Tools
|
|
129
|
+
"tool",
|
|
130
|
+
"ToolDefinition",
|
|
131
|
+
"ToolContext",
|
|
132
|
+
# Types
|
|
133
|
+
"Message",
|
|
134
|
+
"ToolCall",
|
|
135
|
+
"ToolResult",
|
|
136
|
+
# Sub-agents
|
|
137
|
+
"SubAgentDefinition",
|
|
138
|
+
"SubAgentContext",
|
|
139
|
+
"MaxDepthExceededError",
|
|
140
|
+
# MCP
|
|
141
|
+
"MCPServerManager",
|
|
142
|
+
]
|
|
143
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Agent core module for voxagent.
|
|
2
|
+
|
|
3
|
+
This subpackage provides:
|
|
4
|
+
- Agent class for managing AI agent interactions
|
|
5
|
+
- AbortController for abort signal management
|
|
6
|
+
- TimeoutHandler for timeout-based abort triggering
|
|
7
|
+
- Error recovery and failover handling
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from voxagent.agent.abort import (
|
|
11
|
+
AbortController,
|
|
12
|
+
AllProfilesExhausted,
|
|
13
|
+
FailoverError,
|
|
14
|
+
FailoverReason,
|
|
15
|
+
TimeoutHandler,
|
|
16
|
+
handle_context_overflow,
|
|
17
|
+
)
|
|
18
|
+
from voxagent.agent.core import Agent
|
|
19
|
+
|
|
20
|
+
# Import providers to ensure they are registered with the default registry
|
|
21
|
+
import voxagent.providers # noqa: F401
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"AbortController",
|
|
25
|
+
"Agent",
|
|
26
|
+
"AllProfilesExhausted",
|
|
27
|
+
"FailoverError",
|
|
28
|
+
"FailoverReason",
|
|
29
|
+
"TimeoutHandler",
|
|
30
|
+
"handle_context_overflow",
|
|
31
|
+
]
|
|
32
|
+
|