llm-actor 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_actor-0.1.0/.gitignore +14 -0
- llm_actor-0.1.0/LICENSE +21 -0
- llm_actor-0.1.0/PKG-INFO +179 -0
- llm_actor-0.1.0/README.md +148 -0
- llm_actor-0.1.0/pyproject.toml +117 -0
- llm_actor-0.1.0/src/llm_actor/__init__.py +76 -0
- llm_actor-0.1.0/src/llm_actor/actors/__init__.py +0 -0
- llm_actor-0.1.0/src/llm_actor/actors/pool.py +451 -0
- llm_actor-0.1.0/src/llm_actor/actors/worker.py +297 -0
- llm_actor-0.1.0/src/llm_actor/client/__init__.py +0 -0
- llm_actor-0.1.0/src/llm_actor/client/adapters/__init__.py +25 -0
- llm_actor-0.1.0/src/llm_actor/client/adapters/anthropic.py +174 -0
- llm_actor-0.1.0/src/llm_actor/client/adapters/gigachat.py +188 -0
- llm_actor-0.1.0/src/llm_actor/client/adapters/openai.py +176 -0
- llm_actor-0.1.0/src/llm_actor/client/adapters/openai_compatible.py +17 -0
- llm_actor-0.1.0/src/llm_actor/client/interface.py +81 -0
- llm_actor-0.1.0/src/llm_actor/client/llm.py +259 -0
- llm_actor-0.1.0/src/llm_actor/client/retry.py +107 -0
- llm_actor-0.1.0/src/llm_actor/client/tool_loop.py +139 -0
- llm_actor-0.1.0/src/llm_actor/core/__init__.py +0 -0
- llm_actor-0.1.0/src/llm_actor/core/messages.py +24 -0
- llm_actor-0.1.0/src/llm_actor/core/request.py +22 -0
- llm_actor-0.1.0/src/llm_actor/core/tools.py +126 -0
- llm_actor-0.1.0/src/llm_actor/exceptions.py +121 -0
- llm_actor-0.1.0/src/llm_actor/logger.py +134 -0
- llm_actor-0.1.0/src/llm_actor/metrics.py +68 -0
- llm_actor-0.1.0/src/llm_actor/py.typed +0 -0
- llm_actor-0.1.0/src/llm_actor/resilience/__init__.py +0 -0
- llm_actor-0.1.0/src/llm_actor/resilience/circuit_breaker.py +111 -0
- llm_actor-0.1.0/src/llm_actor/service.py +246 -0
- llm_actor-0.1.0/src/llm_actor/settings.py +54 -0
- llm_actor-0.1.0/src/llm_actor/tracing.py +45 -0
llm_actor-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
llm_actor-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: llm-actor
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: LLM actor: pool, circuit breaker, retries, metrics
|
|
5
|
+
Project-URL: Homepage, https://github.com/SergeyArc/llm-actor
|
|
6
|
+
Project-URL: Repository, https://github.com/SergeyArc/llm-actor
|
|
7
|
+
Project-URL: Issues, https://github.com/SergeyArc/llm-actor/issues
|
|
8
|
+
Author-email: Sergey Arс <sergey-vd@outlook.com>
|
|
9
|
+
License: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: actor,anthropic,circuit-breaker,concurrent,gigachat,llm,openai,parallel,resilience
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
|
+
Requires-Python: <3.14,>=3.13
|
|
20
|
+
Requires-Dist: loguru>=0.7.0
|
|
21
|
+
Requires-Dist: opentelemetry-api>=1.25.0
|
|
22
|
+
Requires-Dist: pydantic-settings~=2.5.2
|
|
23
|
+
Requires-Dist: pydantic~=2.9.2
|
|
24
|
+
Provides-Extra: gigachat
|
|
25
|
+
Requires-Dist: gigachat; extra == 'gigachat'
|
|
26
|
+
Provides-Extra: metrics
|
|
27
|
+
Requires-Dist: prometheus-client>=0.19.0; extra == 'metrics'
|
|
28
|
+
Provides-Extra: openai
|
|
29
|
+
Requires-Dist: openai; extra == 'openai'
|
|
30
|
+
Description-Content-Type: text/markdown
|
|
31
|
+
|
|
32
|
+
# LLM Actor: Resilient Throughput, Structured Output & Tool Calling
|
|
33
|
+
|
|
34
|
+
<p align="center">
|
|
35
|
+
<a href="https://pypi.org/project/llm-actor/"><img src="https://img.shields.io/pypi/v/llm-actor.svg" alt="PyPI version"></a>
|
|
36
|
+
<a href="https://github.com/SergeyArc/llm-actor/actions/workflows/test.yml"><img src="https://github.com/SergeyArc/llm-actor/actions/workflows/test.yml/badge.svg" alt="Tests status"></a>
|
|
37
|
+
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
|
|
38
|
+
<a href="https://www.python.org/downloads/release/python-3130/"><img src="https://img.shields.io/badge/python-3.13-blue.svg" alt="Python 3.13"></a>
|
|
39
|
+
</p>
|
|
40
|
+
|
|
41
|
+
<p align="center">
|
|
42
|
+
<i>Documentation: <b>English</b> | <a href="docs/README.ru.md">Russian</a></i>
|
|
43
|
+
</p>
|
|
44
|
+
|
|
45
|
+
**LLM Actor** is a high-performance orchestration layer designed to handle Large Language Model (LLM) requests at scale. Inspired by the **Actor Model**, it solves the "last mile" of production LLM integration: handling concurrency, ensuring resilience, and providing **guaranteed structured output** and **agentic tool calling** without the boilerplate.
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## Why LLM Actor?
|
|
50
|
+
|
|
51
|
+
Most developers start with simple API calls. But when you move to production, you quickly hit:
|
|
52
|
+
- **Rate Limit Exhaustion**: No global coordination for token usage.
|
|
53
|
+
- **Provider Outages**: One slow response can hang your entire app.
|
|
54
|
+
- **Unreliable Parsing**: Hard to get **guaranteed Structured Output** from raw strings.
|
|
55
|
+
- **Complex Agentic Flows**: Orchestrating **Tool Calling** (especially in parallel) is error-prone.
|
|
56
|
+
- **Lack of Priority**: Background tasks block high-priority user UI requests.
|
|
57
|
+
|
|
58
|
+
**LLM Actor** fixes this. It’s not just a wrapper; it’s a **resilient worker pool** built to sit between your application logic and your LLM providers.
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## Key Features
|
|
63
|
+
|
|
64
|
+
- **High Throughput Actor Pool**: Efficiently manage hundreds of concurrent requests using a dedicated worker pool.
|
|
65
|
+
- **Intelligent Resilience**:
|
|
66
|
+
- **Circuit Breaker**: Detects provider failures and "fails fast" to protect your infrastructure.
|
|
67
|
+
- **Exponential Backoff**: Automatic retries for transient HTTP errors (429, 502, 503).
|
|
68
|
+
- **Semantic Validation**: Typed response validation with Pydantic; auto-retry on schema mismatch.
|
|
69
|
+
- **Built-in Tool Calling Loop**: Native support for complex agentic flows. Run multiple tools **in parallel** to slash latency.
|
|
70
|
+
- **Global Priority Queue**: Assign priorities to tasks. Ensure user-facing interactions always jump to the front of the line.
|
|
71
|
+
- **Multi-Provider & Self-Hosted**:
|
|
72
|
+
- Native support: OpenAI, Anthropic, Sber GigaChat.
|
|
73
|
+
- Proxy support: **vLLM**, **Ollama**, and any OpenAI-compatible endpoint.
|
|
74
|
+
- **Deep Observability**: Full **OpenTelemetry** integration. Trace every request from the queue through the actor to the final provider response.
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## Installation
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
# Install core package
|
|
82
|
+
pip install llm-actor
|
|
83
|
+
|
|
84
|
+
# Install with your preferred providers
|
|
85
|
+
pip install "llm-actor[openai,anthropic]"
|
|
86
|
+
|
|
87
|
+
# Full installation (all providers + metrics)
|
|
88
|
+
pip install "llm-actor[all]"
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
---
|
|
92
|
+
|
|
93
|
+
## Quick Start: 60 Seconds to Scale
|
|
94
|
+
|
|
95
|
+
Create a service and start processing tasks with priority and auto-recovery:
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from llm_actor import LLMActorService, LLMActorSettings, Priority
|
|
99
|
+
from pydantic import BaseModel
|
|
100
|
+
|
|
101
|
+
# 1. Setup Service
|
|
102
|
+
service = LLMActorService.from_openai(
|
|
103
|
+
api_key="sk-...",
|
|
104
|
+
model="gpt-4o",
|
|
105
|
+
settings=LLMActorSettings(LLM_NUM_ACTORS=10) # 10 concurrent workers
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# 2. Define Output Schema
|
|
109
|
+
class UserProfile(BaseModel):
|
|
110
|
+
name: str
|
|
111
|
+
skills: list[str]
|
|
112
|
+
|
|
113
|
+
# 3. Use via Context Manager (handles Start/Stop automatically)
|
|
114
|
+
async with service:
|
|
115
|
+
# 4. Queue a High-Priority Task
|
|
116
|
+
request = service.request(
|
|
117
|
+
"Extract profile from: Alex is a Senior Python Dev with LLM expertise.",
|
|
118
|
+
response_model=UserProfile,
|
|
119
|
+
priority=Priority.HIGH
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# 5. Get Your Results (Blocking or Async)
|
|
123
|
+
result = request.get()
|
|
124
|
+
print(f"Found: {result.name} with skills: {result.skills}")
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
---
|
|
128
|
+
|
|
129
|
+
## Provider Support Matrix
|
|
130
|
+
|
|
131
|
+
| Provider | Generations | Parallel Tools | Tested |
|
|
132
|
+
|---|---|---|---|
|
|
133
|
+
| **OpenAI / compatible** | Yes | Yes | Yes Full |
|
|
134
|
+
| **Anthropic** | Yes | Yes | Yes Full |
|
|
135
|
+
| **vLLM / Ollama** | Yes | Yes* | Yes Full |
|
|
136
|
+
| **Sber GigaChat** | Yes | Warning | Experimental |
|
|
137
|
+
|
|
138
|
+
*\*Tool calling in vLLM requires specific server-side flags.*
|
|
139
|
+
|
|
140
|
+
---
|
|
141
|
+
|
|
142
|
+
## Built for Reliability
|
|
143
|
+
|
|
144
|
+
| Mechanism | Description |
|
|
145
|
+
|---|---|
|
|
146
|
+
| **Actor Supervision** | If a worker process crashes, it's automatically restarted by the supervisor. |
|
|
147
|
+
| **Backpressure** | Prevents system overload by limiting the number of active tasks. |
|
|
148
|
+
| **Otel Tracing** | Visualize latency including "Queue Wait Time" vs "In-LLM Time". |
|
|
149
|
+
|
|
150
|
+
---
|
|
151
|
+
|
|
152
|
+
## Contributing
|
|
153
|
+
|
|
154
|
+
We love contributions! Whether it's adding a new provider adapter, fixing a bug, or improving documentation.
|
|
155
|
+
|
|
156
|
+
1. Fork the repo.
|
|
157
|
+
2. Install dev dependencies: `uv sync --all-extras --group dev`
|
|
158
|
+
3. Run tests: `pytest tests/unit`
|
|
159
|
+
4. Submit your PR!
|
|
160
|
+
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
## License
|
|
164
|
+
|
|
165
|
+
Distributed under the **MIT License**. See `LICENSE` for more information.
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## Examples & Advanced Usage
|
|
170
|
+
|
|
171
|
+
Check out the [examples/](examples/) directory for complete, runnable scripts:
|
|
172
|
+
|
|
173
|
+
1. **[Basic Generation](examples/01_basic_generation.py)**: Quick start with any provider.
|
|
174
|
+
2. **[Structured Output](examples/02_structured_output.py)**: Extract data into Pydantic models.
|
|
175
|
+
3. **[Tool Calling](examples/03_tool_calling.py)**: Orchestrate complex agentic loops with parallel tool execution.
|
|
176
|
+
|
|
177
|
+
---
|
|
178
|
+
|
|
179
|
+
<p align="center">Built for the AI Developer Community.</p>
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
# LLM Actor: Resilient Throughput, Structured Output & Tool Calling
|
|
2
|
+
|
|
3
|
+
<p align="center">
|
|
4
|
+
<a href="https://pypi.org/project/llm-actor/"><img src="https://img.shields.io/pypi/v/llm-actor.svg" alt="PyPI version"></a>
|
|
5
|
+
<a href="https://github.com/SergeyArc/llm-actor/actions/workflows/test.yml"><img src="https://github.com/SergeyArc/llm-actor/actions/workflows/test.yml/badge.svg" alt="Tests status"></a>
|
|
6
|
+
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
|
|
7
|
+
<a href="https://www.python.org/downloads/release/python-3130/"><img src="https://img.shields.io/badge/python-3.13-blue.svg" alt="Python 3.13"></a>
|
|
8
|
+
</p>
|
|
9
|
+
|
|
10
|
+
<p align="center">
|
|
11
|
+
<i>Documentation: <b>English</b> | <a href="docs/README.ru.md">Russian</a></i>
|
|
12
|
+
</p>
|
|
13
|
+
|
|
14
|
+
**LLM Actor** is a high-performance orchestration layer designed to handle Large Language Model (LLM) requests at scale. Inspired by the **Actor Model**, it solves the "last mile" of production LLM integration: handling concurrency, ensuring resilience, and providing **guaranteed structured output** and **agentic tool calling** without the boilerplate.
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## Why LLM Actor?
|
|
19
|
+
|
|
20
|
+
Most developers start with simple API calls. But when you move to production, you quickly hit:
|
|
21
|
+
- **Rate Limit Exhaustion**: No global coordination for token usage.
|
|
22
|
+
- **Provider Outages**: One slow response can hang your entire app.
|
|
23
|
+
- **Unreliable Parsing**: Hard to get **guaranteed Structured Output** from raw strings.
|
|
24
|
+
- **Complex Agentic Flows**: Orchestrating **Tool Calling** (especially in parallel) is error-prone.
|
|
25
|
+
- **Lack of Priority**: Background tasks block high-priority user UI requests.
|
|
26
|
+
|
|
27
|
+
**LLM Actor** fixes this. It’s not just a wrapper; it’s a **resilient worker pool** built to sit between your application logic and your LLM providers.
|
|
28
|
+
|
|
29
|
+
---
|
|
30
|
+
|
|
31
|
+
## Key Features
|
|
32
|
+
|
|
33
|
+
- **High Throughput Actor Pool**: Efficiently manage hundreds of concurrent requests using a dedicated worker pool.
|
|
34
|
+
- **Intelligent Resilience**:
|
|
35
|
+
- **Circuit Breaker**: Detects provider failures and "fails fast" to protect your infrastructure.
|
|
36
|
+
- **Exponential Backoff**: Automatic retries for transient HTTP errors (429, 502, 503).
|
|
37
|
+
- **Semantic Validation**: Typed response validation with Pydantic; auto-retry on schema mismatch.
|
|
38
|
+
- **Built-in Tool Calling Loop**: Native support for complex agentic flows. Run multiple tools **in parallel** to slash latency.
|
|
39
|
+
- **Global Priority Queue**: Assign priorities to tasks. Ensure user-facing interactions always jump to the front of the line.
|
|
40
|
+
- **Multi-Provider & Self-Hosted**:
|
|
41
|
+
- Native support: OpenAI, Anthropic, Sber GigaChat.
|
|
42
|
+
- Proxy support: **vLLM**, **Ollama**, and any OpenAI-compatible endpoint.
|
|
43
|
+
- **Deep Observability**: Full **OpenTelemetry** integration. Trace every request from the queue through the actor to the final provider response.
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
## Installation
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
# Install core package
|
|
51
|
+
pip install llm-actor
|
|
52
|
+
|
|
53
|
+
# Install with your preferred providers
|
|
54
|
+
pip install "llm-actor[openai,anthropic]"
|
|
55
|
+
|
|
56
|
+
# Full installation (all providers + metrics)
|
|
57
|
+
pip install "llm-actor[all]"
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## Quick Start: 60 Seconds to Scale
|
|
63
|
+
|
|
64
|
+
Create a service and start processing tasks with priority and auto-recovery:
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
from llm_actor import LLMActorService, LLMActorSettings, Priority
|
|
68
|
+
from pydantic import BaseModel
|
|
69
|
+
|
|
70
|
+
# 1. Setup Service
|
|
71
|
+
service = LLMActorService.from_openai(
|
|
72
|
+
api_key="sk-...",
|
|
73
|
+
model="gpt-4o",
|
|
74
|
+
settings=LLMActorSettings(LLM_NUM_ACTORS=10) # 10 concurrent workers
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# 2. Define Output Schema
|
|
78
|
+
class UserProfile(BaseModel):
|
|
79
|
+
name: str
|
|
80
|
+
skills: list[str]
|
|
81
|
+
|
|
82
|
+
# 3. Use via Context Manager (handles Start/Stop automatically)
|
|
83
|
+
async with service:
|
|
84
|
+
# 4. Queue a High-Priority Task
|
|
85
|
+
request = service.request(
|
|
86
|
+
"Extract profile from: Alex is a Senior Python Dev with LLM expertise.",
|
|
87
|
+
response_model=UserProfile,
|
|
88
|
+
priority=Priority.HIGH
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# 5. Get Your Results (Blocking or Async)
|
|
92
|
+
result = request.get()
|
|
93
|
+
print(f"Found: {result.name} with skills: {result.skills}")
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## Provider Support Matrix
|
|
99
|
+
|
|
100
|
+
| Provider | Generations | Parallel Tools | Tested |
|
|
101
|
+
|---|---|---|---|
|
|
102
|
+
| **OpenAI / compatible** | Yes | Yes | Yes Full |
|
|
103
|
+
| **Anthropic** | Yes | Yes | Yes Full |
|
|
104
|
+
| **vLLM / Ollama** | Yes | Yes* | Yes Full |
|
|
105
|
+
| **Sber GigaChat** | Yes | Warning | Experimental |
|
|
106
|
+
|
|
107
|
+
*\*Tool calling in vLLM requires specific server-side flags.*
|
|
108
|
+
|
|
109
|
+
---
|
|
110
|
+
|
|
111
|
+
## Built for Reliability
|
|
112
|
+
|
|
113
|
+
| Mechanism | Description |
|
|
114
|
+
|---|---|
|
|
115
|
+
| **Actor Supervision** | If a worker process crashes, it's automatically restarted by the supervisor. |
|
|
116
|
+
| **Backpressure** | Prevents system overload by limiting the number of active tasks. |
|
|
117
|
+
| **Otel Tracing** | Visualize latency including "Queue Wait Time" vs "In-LLM Time". |
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## Contributing
|
|
122
|
+
|
|
123
|
+
We love contributions! Whether it's adding a new provider adapter, fixing a bug, or improving documentation.
|
|
124
|
+
|
|
125
|
+
1. Fork the repo.
|
|
126
|
+
2. Install dev dependencies: `uv sync --all-extras --group dev`
|
|
127
|
+
3. Run tests: `pytest tests/unit`
|
|
128
|
+
4. Submit your PR!
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## License
|
|
133
|
+
|
|
134
|
+
Distributed under the **MIT License**. See `LICENSE` for more information.
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## Examples & Advanced Usage
|
|
139
|
+
|
|
140
|
+
Check out the [examples/](examples/) directory for complete, runnable scripts:
|
|
141
|
+
|
|
142
|
+
1. **[Basic Generation](examples/01_basic_generation.py)**: Quick start with any provider.
|
|
143
|
+
2. **[Structured Output](examples/02_structured_output.py)**: Extract data into Pydantic models.
|
|
144
|
+
3. **[Tool Calling](examples/03_tool_calling.py)**: Orchestrate complex agentic loops with parallel tool execution.
|
|
145
|
+
|
|
146
|
+
---
|
|
147
|
+
|
|
148
|
+
<p align="center">Built for the AI Developer Community.</p>
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "llm-actor"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "LLM actor: pool, circuit breaker, retries, metrics"
|
|
9
|
+
authors = [
|
|
10
|
+
{ name = "Sergey Arс", email = "sergey-vd@outlook.com" }
|
|
11
|
+
]
|
|
12
|
+
license = { text = "MIT" }
|
|
13
|
+
readme = "README.md"
|
|
14
|
+
requires-python = ">=3.13,<3.14"
|
|
15
|
+
dependencies = [
|
|
16
|
+
"pydantic~=2.9.2",
|
|
17
|
+
"pydantic-settings~=2.5.2",
|
|
18
|
+
"loguru>=0.7.0",
|
|
19
|
+
"opentelemetry-api>=1.25.0",
|
|
20
|
+
]
|
|
21
|
+
keywords = ["llm", "actor", "concurrent", "parallel", "resilience", "circuit-breaker", "openai", "anthropic", "gigachat"]
|
|
22
|
+
classifiers = [
|
|
23
|
+
"Development Status :: 4 - Beta",
|
|
24
|
+
"Intended Audience :: Developers",
|
|
25
|
+
"License :: OSI Approved :: MIT License",
|
|
26
|
+
"Programming Language :: Python :: 3",
|
|
27
|
+
"Programming Language :: Python :: 3.13",
|
|
28
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
29
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
[project.urls]
|
|
33
|
+
Homepage = "https://github.com/SergeyArc/llm-actor"
|
|
34
|
+
Repository = "https://github.com/SergeyArc/llm-actor"
|
|
35
|
+
Issues = "https://github.com/SergeyArc/llm-actor/issues"
|
|
36
|
+
|
|
37
|
+
[project.optional-dependencies]
|
|
38
|
+
gigachat = ["gigachat"]
|
|
39
|
+
metrics = ["prometheus-client>=0.19.0"]
|
|
40
|
+
openai = ["openai"]
|
|
41
|
+
|
|
42
|
+
[dependency-groups]
|
|
43
|
+
dev = [
|
|
44
|
+
"pytest>=8.0.0",
|
|
45
|
+
"pytest-asyncio>=0.23.0",
|
|
46
|
+
"mypy>=1.11.0",
|
|
47
|
+
"prometheus-client>=0.19.0",
|
|
48
|
+
"ruff>=0.6.0",
|
|
49
|
+
"opentelemetry-sdk>=1.25.0",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
[tool.hatch.build.targets.wheel]
|
|
53
|
+
packages = ["src/llm_actor"]
|
|
54
|
+
|
|
55
|
+
[tool.hatch.build.targets.sdist]
|
|
56
|
+
include = ["src/llm_actor"]
|
|
57
|
+
|
|
58
|
+
[tool.ruff]
|
|
59
|
+
line-length = 100
|
|
60
|
+
target-version = "py313"
|
|
61
|
+
|
|
62
|
+
[tool.ruff.lint]
|
|
63
|
+
select = [
|
|
64
|
+
"E",
|
|
65
|
+
"F",
|
|
66
|
+
"I",
|
|
67
|
+
"B",
|
|
68
|
+
"C4",
|
|
69
|
+
"UP",
|
|
70
|
+
"C90",
|
|
71
|
+
"N",
|
|
72
|
+
]
|
|
73
|
+
ignore = ["E501"]
|
|
74
|
+
|
|
75
|
+
[tool.ruff.format]
|
|
76
|
+
quote-style = "double"
|
|
77
|
+
indent-style = "space"
|
|
78
|
+
line-ending = "lf"
|
|
79
|
+
|
|
80
|
+
[tool.mypy]
|
|
81
|
+
python_version = "3.13"
|
|
82
|
+
mypy_path = "src"
|
|
83
|
+
files = ["src"]
|
|
84
|
+
check_untyped_defs = true
|
|
85
|
+
disallow_any_generics = true
|
|
86
|
+
disallow_incomplete_defs = true
|
|
87
|
+
disallow_untyped_defs = true
|
|
88
|
+
no_implicit_optional = true
|
|
89
|
+
warn_redundant_casts = true
|
|
90
|
+
warn_unused_ignores = true
|
|
91
|
+
warn_return_any = true
|
|
92
|
+
warn_unused_configs = true
|
|
93
|
+
strict_equality = true
|
|
94
|
+
show_error_codes = true
|
|
95
|
+
disallow_any_unimported = true
|
|
96
|
+
plugins = ["pydantic.mypy"]
|
|
97
|
+
|
|
98
|
+
[[tool.mypy.overrides]]
|
|
99
|
+
module = "anthropic"
|
|
100
|
+
ignore_missing_imports = true
|
|
101
|
+
|
|
102
|
+
[[tool.mypy.overrides]]
|
|
103
|
+
module = "openai"
|
|
104
|
+
ignore_missing_imports = true
|
|
105
|
+
|
|
106
|
+
[[tool.mypy.overrides]]
|
|
107
|
+
module = "gigachat"
|
|
108
|
+
ignore_missing_imports = true
|
|
109
|
+
|
|
110
|
+
[[tool.mypy.overrides]]
|
|
111
|
+
module = "gigachat.models"
|
|
112
|
+
ignore_missing_imports = true
|
|
113
|
+
|
|
114
|
+
[tool.pytest.ini_options]
|
|
115
|
+
pythonpath = ["src"]
|
|
116
|
+
testpaths = ["tests"]
|
|
117
|
+
asyncio_mode = "auto"
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from llm_actor.actors.pool import HealthStatus, SupervisedActorPool
|
|
2
|
+
from llm_actor.actors.worker import ModelActor
|
|
3
|
+
from llm_actor.client.interface import (
|
|
4
|
+
LLMClientInterface,
|
|
5
|
+
LLMClientWithCircuitBreakerInterface,
|
|
6
|
+
ToolCapableClientInterface,
|
|
7
|
+
)
|
|
8
|
+
from llm_actor.client.llm import LLMClientWithCircuitBreaker, build_json_prompt
|
|
9
|
+
from llm_actor.client.retry import LLMClientWithRetry
|
|
10
|
+
from llm_actor.core.messages import ActorMessage
|
|
11
|
+
from llm_actor.core.request import LLMRequest
|
|
12
|
+
from llm_actor.core.tools import LLMResponse, Tool, ToolCall, ToolResult
|
|
13
|
+
from llm_actor.exceptions import (
|
|
14
|
+
ActorFailedError,
|
|
15
|
+
CircuitBreakerOpenError,
|
|
16
|
+
LLMActorError,
|
|
17
|
+
LLMServiceError,
|
|
18
|
+
LLMServiceGeneralError,
|
|
19
|
+
LLMServiceHTTPError,
|
|
20
|
+
LLMServiceOverloadedError,
|
|
21
|
+
LLMServiceTimeoutError,
|
|
22
|
+
LLMServiceUnavailableError,
|
|
23
|
+
OverloadError,
|
|
24
|
+
PoolShuttingDownError,
|
|
25
|
+
ToolExecutionError,
|
|
26
|
+
ToolExecutionTimeoutError,
|
|
27
|
+
ToolLoopMaxIterationsError,
|
|
28
|
+
)
|
|
29
|
+
from llm_actor.logger import ActorLogger
|
|
30
|
+
from llm_actor.metrics import (
|
|
31
|
+
MetricsCollector,
|
|
32
|
+
default_metrics_collector,
|
|
33
|
+
is_prometheus_metrics_available,
|
|
34
|
+
)
|
|
35
|
+
from llm_actor.resilience.circuit_breaker import CircuitBreaker
|
|
36
|
+
from llm_actor.service import LLMActorService
|
|
37
|
+
from llm_actor.settings import LLMActorSettings
|
|
38
|
+
|
|
39
|
+
__all__ = [
|
|
40
|
+
"ActorMessage",
|
|
41
|
+
"LLMRequest",
|
|
42
|
+
"LLMResponse",
|
|
43
|
+
"ActorFailedError",
|
|
44
|
+
"ActorLogger",
|
|
45
|
+
"CircuitBreaker",
|
|
46
|
+
"CircuitBreakerOpenError",
|
|
47
|
+
"HealthStatus",
|
|
48
|
+
"LLMActorError",
|
|
49
|
+
"LLMActorService",
|
|
50
|
+
"LLMActorSettings",
|
|
51
|
+
"LLMClientInterface",
|
|
52
|
+
"LLMClientWithCircuitBreaker",
|
|
53
|
+
"build_json_prompt",
|
|
54
|
+
"LLMClientWithCircuitBreakerInterface",
|
|
55
|
+
"ToolCapableClientInterface",
|
|
56
|
+
"LLMClientWithRetry",
|
|
57
|
+
"LLMServiceError",
|
|
58
|
+
"LLMServiceGeneralError",
|
|
59
|
+
"LLMServiceHTTPError",
|
|
60
|
+
"LLMServiceOverloadedError",
|
|
61
|
+
"LLMServiceTimeoutError",
|
|
62
|
+
"LLMServiceUnavailableError",
|
|
63
|
+
"MetricsCollector",
|
|
64
|
+
"default_metrics_collector",
|
|
65
|
+
"is_prometheus_metrics_available",
|
|
66
|
+
"ModelActor",
|
|
67
|
+
"OverloadError",
|
|
68
|
+
"PoolShuttingDownError",
|
|
69
|
+
"SupervisedActorPool",
|
|
70
|
+
"Tool",
|
|
71
|
+
"ToolCall",
|
|
72
|
+
"ToolExecutionError",
|
|
73
|
+
"ToolExecutionTimeoutError",
|
|
74
|
+
"ToolLoopMaxIterationsError",
|
|
75
|
+
"ToolResult",
|
|
76
|
+
]
|
|
File without changes
|