kalibr 1.2.3__tar.gz → 1.2.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kalibr-1.2.5/PKG-INFO +187 -0
- kalibr-1.2.5/README.md +120 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/__init__.py +2 -0
- kalibr-1.2.5/kalibr/router.py +370 -0
- kalibr-1.2.5/kalibr.egg-info/PKG-INFO +187 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr.egg-info/SOURCES.txt +5 -1
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_langchain/__init__.py +3 -1
- kalibr-1.2.5/kalibr_langchain/chat_model.py +103 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/pyproject.toml +1 -1
- kalibr-1.2.5/tests/test_langchain_routing.py +122 -0
- kalibr-1.2.5/tests/test_router.py +56 -0
- kalibr-1.2.3/PKG-INFO +0 -384
- kalibr-1.2.3/README.md +0 -317
- kalibr-1.2.3/kalibr.egg-info/PKG-INFO +0 -384
- {kalibr-1.2.3 → kalibr-1.2.5}/LICENSE +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/__main__.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/capsule_middleware.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cli/__init__.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cli/capsule_cmd.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cli/deploy_cmd.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cli/main.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cli/run.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cli/serve.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/client.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/collector.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/context.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/cost_adapter.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/decorators.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/instrumentation/__init__.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/instrumentation/anthropic_instr.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/instrumentation/base.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/instrumentation/google_instr.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/instrumentation/openai_instr.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/instrumentation/registry.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/intelligence.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/kalibr.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/kalibr_app.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/middleware/__init__.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/middleware/auto_tracer.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/models.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/redaction.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/schemas.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/simple_tracer.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/tokens.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/trace_capsule.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/trace_models.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/tracer.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/types.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr/utils.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr.egg-info/dependency_links.txt +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr.egg-info/entry_points.txt +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr.egg-info/requires.txt +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr.egg-info/top_level.txt +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_crewai/__init__.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_crewai/callbacks.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_crewai/instrumentor.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_langchain/async_callback.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_langchain/callback.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_openai_agents/__init__.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/kalibr_openai_agents/processor.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/setup.cfg +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/tests/test_capsule_builder.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/tests/test_instrumentation.py +0 -0
- {kalibr-1.2.3 → kalibr-1.2.5}/tests/test_intelligence.py +0 -0
kalibr-1.2.5/PKG-INFO
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: kalibr
|
|
3
|
+
Version: 1.2.5
|
|
4
|
+
Summary: Unified LLM Observability & Multi-Model AI Integration Framework - Deploy to GPT, Claude, Gemini, Copilot with full telemetry.
|
|
5
|
+
Author-email: Kalibr Team <support@kalibr.systems>
|
|
6
|
+
License: Apache-2.0
|
|
7
|
+
Project-URL: Homepage, https://github.com/kalibr-ai/kalibr-sdk-python
|
|
8
|
+
Project-URL: Documentation, https://kalibr.systems/docs
|
|
9
|
+
Project-URL: Repository, https://github.com/kalibr-ai/kalibr-sdk-python
|
|
10
|
+
Project-URL: Issues, https://github.com/kalibr-ai/kalibr-sdk-python/issues
|
|
11
|
+
Keywords: ai,mcp,gpt,claude,gemini,copilot,openai,anthropic,google,microsoft,observability,telemetry,tracing,llm,schema-generation,api,multi-model,langchain,crewai
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
23
|
+
Requires-Python: >=3.9
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
License-File: LICENSE
|
|
26
|
+
Requires-Dist: httpx>=0.27.0
|
|
27
|
+
Requires-Dist: tiktoken>=0.8.0
|
|
28
|
+
Requires-Dist: fastapi>=0.110.1
|
|
29
|
+
Requires-Dist: uvicorn>=0.25.0
|
|
30
|
+
Requires-Dist: pydantic>=2.6.4
|
|
31
|
+
Requires-Dist: typer>=0.9.0
|
|
32
|
+
Requires-Dist: python-multipart>=0.0.9
|
|
33
|
+
Requires-Dist: rich>=10.0.0
|
|
34
|
+
Requires-Dist: requests>=2.31.0
|
|
35
|
+
Requires-Dist: opentelemetry-api>=1.20.0
|
|
36
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
37
|
+
Requires-Dist: opentelemetry-exporter-otlp>=1.20.0
|
|
38
|
+
Provides-Extra: langchain
|
|
39
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
|
|
40
|
+
Provides-Extra: langchain-openai
|
|
41
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-openai"
|
|
42
|
+
Requires-Dist: langchain-openai>=0.1.0; extra == "langchain-openai"
|
|
43
|
+
Provides-Extra: langchain-anthropic
|
|
44
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-anthropic"
|
|
45
|
+
Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain-anthropic"
|
|
46
|
+
Provides-Extra: langchain-google
|
|
47
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-google"
|
|
48
|
+
Requires-Dist: langchain-google-genai>=0.0.10; extra == "langchain-google"
|
|
49
|
+
Provides-Extra: langchain-all
|
|
50
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-all"
|
|
51
|
+
Requires-Dist: langchain-openai>=0.1.0; extra == "langchain-all"
|
|
52
|
+
Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain-all"
|
|
53
|
+
Requires-Dist: langchain-google-genai>=0.0.10; extra == "langchain-all"
|
|
54
|
+
Provides-Extra: crewai
|
|
55
|
+
Requires-Dist: crewai>=0.28.0; extra == "crewai"
|
|
56
|
+
Provides-Extra: openai-agents
|
|
57
|
+
Requires-Dist: openai-agents>=0.0.3; extra == "openai-agents"
|
|
58
|
+
Provides-Extra: integrations
|
|
59
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "integrations"
|
|
60
|
+
Requires-Dist: crewai>=0.28.0; extra == "integrations"
|
|
61
|
+
Requires-Dist: openai-agents>=0.0.3; extra == "integrations"
|
|
62
|
+
Provides-Extra: dev
|
|
63
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
64
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
65
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
66
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
67
|
+
|
|
68
|
+
# Kalibr
|
|
69
|
+
|
|
70
|
+
Adaptive routing for AI agents. Kalibr learns which models, tools, and configs work best for each task and routes automatically.
|
|
71
|
+
|
|
72
|
+
[](https://pypi.org/project/kalibr/)
|
|
73
|
+
[](https://pypi.org/project/kalibr/)
|
|
74
|
+
[](LICENSE)
|
|
75
|
+
|
|
76
|
+
## Installation
|
|
77
|
+
```bash
|
|
78
|
+
pip install kalibr
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## Quick Start
|
|
82
|
+
```python
|
|
83
|
+
from kalibr import Router
|
|
84
|
+
|
|
85
|
+
router = Router(
|
|
86
|
+
goal="book_meeting",
|
|
87
|
+
paths=["gpt-4o", "claude-sonnet-4-20250514", "gpt-4o-mini"],
|
|
88
|
+
success_when=lambda output: "confirmed" in output.lower()
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
response = router.completion(
|
|
92
|
+
messages=[{"role": "user", "content": "Book a meeting with John tomorrow"}]
|
|
93
|
+
)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
Kalibr picks the best model, makes the call, checks success, and learns for next time.
|
|
97
|
+
|
|
98
|
+
## Paths
|
|
99
|
+
|
|
100
|
+
A path is a model + optional tools + optional params:
|
|
101
|
+
```python
|
|
102
|
+
# Just models
|
|
103
|
+
paths = ["gpt-4o", "claude-sonnet-4-20250514"]
|
|
104
|
+
|
|
105
|
+
# With tools
|
|
106
|
+
paths = [
|
|
107
|
+
{"model": "gpt-4o", "tools": ["web_search"]},
|
|
108
|
+
{"model": "claude-sonnet-4-20250514", "tools": ["web_search", "browser"]},
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
# With params
|
|
112
|
+
paths = [
|
|
113
|
+
{"model": "gpt-4o", "params": {"temperature": 0.7}},
|
|
114
|
+
{"model": "gpt-4o", "params": {"temperature": 0.2}},
|
|
115
|
+
]
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
## Manual Outcome Reporting
|
|
119
|
+
```python
|
|
120
|
+
router = Router(goal="book_meeting", paths=["gpt-4o", "claude-sonnet-4-20250514"])
|
|
121
|
+
response = router.completion(messages=[...])
|
|
122
|
+
|
|
123
|
+
meeting_created = check_calendar_api()
|
|
124
|
+
router.report(success=meeting_created)
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
## LangChain Integration
|
|
128
|
+
```bash
|
|
129
|
+
pip install kalibr[langchain]
|
|
130
|
+
```
|
|
131
|
+
```python
|
|
132
|
+
from kalibr import Router
|
|
133
|
+
|
|
134
|
+
router = Router(goal="summarize", paths=["gpt-4o", "claude-sonnet-4-20250514"])
|
|
135
|
+
llm = router.as_langchain()
|
|
136
|
+
|
|
137
|
+
chain = prompt | llm | parser
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
## Auto-Instrumentation
|
|
141
|
+
|
|
142
|
+
Kalibr auto-instruments OpenAI, Anthropic, and Google SDKs when imported:
|
|
143
|
+
```python
|
|
144
|
+
import kalibr # Must be first import
|
|
145
|
+
from openai import OpenAI
|
|
146
|
+
|
|
147
|
+
client = OpenAI()
|
|
148
|
+
response = client.chat.completions.create(model="gpt-4o", messages=[...])
|
|
149
|
+
# Traced automatically
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
Disable with `KALIBR_AUTO_INSTRUMENT=false`.
|
|
153
|
+
|
|
154
|
+
## Other Integrations
|
|
155
|
+
```bash
|
|
156
|
+
pip install kalibr[crewai] # CrewAI
|
|
157
|
+
pip install kalibr[openai-agents] # OpenAI Agents SDK
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## Configuration
|
|
161
|
+
|
|
162
|
+
| Variable | Description | Default |
|
|
163
|
+
|----------|-------------|---------|
|
|
164
|
+
| `KALIBR_API_KEY` | API key | Required |
|
|
165
|
+
| `KALIBR_TENANT_ID` | Tenant ID | `default` |
|
|
166
|
+
| `KALIBR_AUTO_INSTRUMENT` | Auto-instrument SDKs | `true` |
|
|
167
|
+
|
|
168
|
+
## Development
|
|
169
|
+
```bash
|
|
170
|
+
git clone https://github.com/kalibr-ai/kalibr-sdk-python.git
|
|
171
|
+
cd kalibr-sdk-python
|
|
172
|
+
pip install -e ".[dev]"
|
|
173
|
+
pytest
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
## Contributing
|
|
177
|
+
|
|
178
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md).
|
|
179
|
+
|
|
180
|
+
## License
|
|
181
|
+
|
|
182
|
+
Apache-2.0
|
|
183
|
+
|
|
184
|
+
## Links
|
|
185
|
+
|
|
186
|
+
- [Docs](https://kalibr.systems/docs)
|
|
187
|
+
- [Dashboard](https://dashboard.kalibr.systems)
|
kalibr-1.2.5/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# Kalibr
|
|
2
|
+
|
|
3
|
+
Adaptive routing for AI agents. Kalibr learns which models, tools, and configs work best for each task and routes automatically.
|
|
4
|
+
|
|
5
|
+
[](https://pypi.org/project/kalibr/)
|
|
6
|
+
[](https://pypi.org/project/kalibr/)
|
|
7
|
+
[](LICENSE)
|
|
8
|
+
|
|
9
|
+
## Installation
|
|
10
|
+
```bash
|
|
11
|
+
pip install kalibr
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
## Quick Start
|
|
15
|
+
```python
|
|
16
|
+
from kalibr import Router
|
|
17
|
+
|
|
18
|
+
router = Router(
|
|
19
|
+
goal="book_meeting",
|
|
20
|
+
paths=["gpt-4o", "claude-sonnet-4-20250514", "gpt-4o-mini"],
|
|
21
|
+
success_when=lambda output: "confirmed" in output.lower()
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
response = router.completion(
|
|
25
|
+
messages=[{"role": "user", "content": "Book a meeting with John tomorrow"}]
|
|
26
|
+
)
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Kalibr picks the best model, makes the call, checks success, and learns for next time.
|
|
30
|
+
|
|
31
|
+
## Paths
|
|
32
|
+
|
|
33
|
+
A path is a model + optional tools + optional params:
|
|
34
|
+
```python
|
|
35
|
+
# Just models
|
|
36
|
+
paths = ["gpt-4o", "claude-sonnet-4-20250514"]
|
|
37
|
+
|
|
38
|
+
# With tools
|
|
39
|
+
paths = [
|
|
40
|
+
{"model": "gpt-4o", "tools": ["web_search"]},
|
|
41
|
+
{"model": "claude-sonnet-4-20250514", "tools": ["web_search", "browser"]},
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
# With params
|
|
45
|
+
paths = [
|
|
46
|
+
{"model": "gpt-4o", "params": {"temperature": 0.7}},
|
|
47
|
+
{"model": "gpt-4o", "params": {"temperature": 0.2}},
|
|
48
|
+
]
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Manual Outcome Reporting
|
|
52
|
+
```python
|
|
53
|
+
router = Router(goal="book_meeting", paths=["gpt-4o", "claude-sonnet-4-20250514"])
|
|
54
|
+
response = router.completion(messages=[...])
|
|
55
|
+
|
|
56
|
+
meeting_created = check_calendar_api()
|
|
57
|
+
router.report(success=meeting_created)
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
## LangChain Integration
|
|
61
|
+
```bash
|
|
62
|
+
pip install kalibr[langchain]
|
|
63
|
+
```
|
|
64
|
+
```python
|
|
65
|
+
from kalibr import Router
|
|
66
|
+
|
|
67
|
+
router = Router(goal="summarize", paths=["gpt-4o", "claude-sonnet-4-20250514"])
|
|
68
|
+
llm = router.as_langchain()
|
|
69
|
+
|
|
70
|
+
chain = prompt | llm | parser
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Auto-Instrumentation
|
|
74
|
+
|
|
75
|
+
Kalibr auto-instruments OpenAI, Anthropic, and Google SDKs when imported:
|
|
76
|
+
```python
|
|
77
|
+
import kalibr # Must be first import
|
|
78
|
+
from openai import OpenAI
|
|
79
|
+
|
|
80
|
+
client = OpenAI()
|
|
81
|
+
response = client.chat.completions.create(model="gpt-4o", messages=[...])
|
|
82
|
+
# Traced automatically
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
Disable with `KALIBR_AUTO_INSTRUMENT=false`.
|
|
86
|
+
|
|
87
|
+
## Other Integrations
|
|
88
|
+
```bash
|
|
89
|
+
pip install kalibr[crewai] # CrewAI
|
|
90
|
+
pip install kalibr[openai-agents] # OpenAI Agents SDK
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## Configuration
|
|
94
|
+
|
|
95
|
+
| Variable | Description | Default |
|
|
96
|
+
|----------|-------------|---------|
|
|
97
|
+
| `KALIBR_API_KEY` | API key | Required |
|
|
98
|
+
| `KALIBR_TENANT_ID` | Tenant ID | `default` |
|
|
99
|
+
| `KALIBR_AUTO_INSTRUMENT` | Auto-instrument SDKs | `true` |
|
|
100
|
+
|
|
101
|
+
## Development
|
|
102
|
+
```bash
|
|
103
|
+
git clone https://github.com/kalibr-ai/kalibr-sdk-python.git
|
|
104
|
+
cd kalibr-sdk-python
|
|
105
|
+
pip install -e ".[dev]"
|
|
106
|
+
pytest
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Contributing
|
|
110
|
+
|
|
111
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md).
|
|
112
|
+
|
|
113
|
+
## License
|
|
114
|
+
|
|
115
|
+
Apache-2.0
|
|
116
|
+
|
|
117
|
+
## Links
|
|
118
|
+
|
|
119
|
+
- [Docs](https://kalibr.systems/docs)
|
|
120
|
+
- [Dashboard](https://dashboard.kalibr.systems)
|
|
@@ -92,6 +92,7 @@ from .intelligence import (
|
|
|
92
92
|
register_path,
|
|
93
93
|
decide,
|
|
94
94
|
)
|
|
95
|
+
from .router import Router
|
|
95
96
|
|
|
96
97
|
if os.getenv("KALIBR_AUTO_INSTRUMENT", "true").lower() == "true":
|
|
97
98
|
# Setup OpenTelemetry collector
|
|
@@ -163,4 +164,5 @@ __all__ = [
|
|
|
163
164
|
"get_recommendation",
|
|
164
165
|
"register_path",
|
|
165
166
|
"decide",
|
|
167
|
+
"Router",
|
|
166
168
|
]
|
|
@@ -0,0 +1,370 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kalibr Router - Intelligent model routing with outcome learning.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
# Type for paths - either string or dict
|
|
12
|
+
PathSpec = Union[str, Dict[str, Any]]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Router:
|
|
16
|
+
"""
|
|
17
|
+
Routes LLM requests to the best model based on learned outcomes.
|
|
18
|
+
|
|
19
|
+
Example:
|
|
20
|
+
router = Router(
|
|
21
|
+
goal="summarize",
|
|
22
|
+
paths=["gpt-4o", "claude-3-sonnet"],
|
|
23
|
+
success_when=lambda out: len(out) > 100
|
|
24
|
+
)
|
|
25
|
+
response = router.completion(messages=[...])
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
goal: str,
|
|
31
|
+
paths: Optional[List[PathSpec]] = None,
|
|
32
|
+
success_when: Optional[Callable[[str], bool]] = None,
|
|
33
|
+
exploration_rate: Optional[float] = None,
|
|
34
|
+
auto_register: bool = True,
|
|
35
|
+
):
|
|
36
|
+
"""
|
|
37
|
+
Initialize router.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
goal: Name of the goal (e.g., "book_meeting", "summarize")
|
|
41
|
+
paths: List of models or path configs. Examples:
|
|
42
|
+
["gpt-4o", "claude-3-sonnet"]
|
|
43
|
+
[{"model": "gpt-4o", "tools": ["search"]}]
|
|
44
|
+
success_when: Optional function to auto-evaluate success from output
|
|
45
|
+
exploration_rate: Override exploration rate (0.0-1.0)
|
|
46
|
+
auto_register: If True, register paths on init
|
|
47
|
+
"""
|
|
48
|
+
self.goal = goal
|
|
49
|
+
self.success_when = success_when
|
|
50
|
+
self.exploration_rate = exploration_rate
|
|
51
|
+
self._last_trace_id: Optional[str] = None
|
|
52
|
+
self._last_decision: Optional[dict] = None
|
|
53
|
+
self._outcome_reported = False
|
|
54
|
+
|
|
55
|
+
# Normalize paths to list of dicts
|
|
56
|
+
self._paths = self._normalize_paths(paths or ["gpt-4o"])
|
|
57
|
+
|
|
58
|
+
# Register paths if requested
|
|
59
|
+
if auto_register:
|
|
60
|
+
self._register_paths()
|
|
61
|
+
|
|
62
|
+
def _normalize_paths(self, paths: List[PathSpec]) -> List[Dict[str, Any]]:
|
|
63
|
+
"""Convert paths to consistent format."""
|
|
64
|
+
normalized = []
|
|
65
|
+
for p in paths:
|
|
66
|
+
if isinstance(p, str):
|
|
67
|
+
normalized.append({"model": p, "tools": None, "params": None})
|
|
68
|
+
elif isinstance(p, dict):
|
|
69
|
+
normalized.append({
|
|
70
|
+
"model": p.get("model") or p.get("model_id"),
|
|
71
|
+
"tools": p.get("tools") or p.get("tool_id"),
|
|
72
|
+
"params": p.get("params"),
|
|
73
|
+
})
|
|
74
|
+
else:
|
|
75
|
+
raise ValueError(f"Invalid path spec: {p}")
|
|
76
|
+
return normalized
|
|
77
|
+
|
|
78
|
+
def _register_paths(self):
|
|
79
|
+
"""Register paths with intelligence service."""
|
|
80
|
+
from kalibr.intelligence import register_path
|
|
81
|
+
|
|
82
|
+
for path in self._paths:
|
|
83
|
+
try:
|
|
84
|
+
register_path(
|
|
85
|
+
goal=self.goal,
|
|
86
|
+
model_id=path["model"],
|
|
87
|
+
tool_id=path["tools"][0] if isinstance(path["tools"], list) and path["tools"] else path["tools"],
|
|
88
|
+
params=path["params"],
|
|
89
|
+
)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
# Log but don't fail - path might already exist
|
|
92
|
+
logger.debug(f"Path registration note: {e}")
|
|
93
|
+
|
|
94
|
+
def completion(
|
|
95
|
+
self,
|
|
96
|
+
messages: List[Dict[str, str]],
|
|
97
|
+
force_model: Optional[str] = None,
|
|
98
|
+
**kwargs
|
|
99
|
+
) -> Any:
|
|
100
|
+
"""
|
|
101
|
+
Make a completion request with intelligent routing.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
messages: OpenAI-format messages
|
|
105
|
+
force_model: Override routing and use this model
|
|
106
|
+
**kwargs: Additional args passed to provider
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
OpenAI-compatible ChatCompletion response
|
|
110
|
+
"""
|
|
111
|
+
from kalibr.intelligence import decide
|
|
112
|
+
from kalibr.context import get_trace_id
|
|
113
|
+
|
|
114
|
+
# Reset state for new request
|
|
115
|
+
self._outcome_reported = False
|
|
116
|
+
|
|
117
|
+
# Get routing decision (or use forced model)
|
|
118
|
+
if force_model:
|
|
119
|
+
model_id = force_model
|
|
120
|
+
tool_id = None
|
|
121
|
+
params = {}
|
|
122
|
+
self._last_decision = {"model_id": model_id, "forced": True}
|
|
123
|
+
else:
|
|
124
|
+
try:
|
|
125
|
+
decision = decide(goal=self.goal)
|
|
126
|
+
model_id = decision.get("model_id") or self._paths[0]["model"]
|
|
127
|
+
tool_id = decision.get("tool_id")
|
|
128
|
+
params = decision.get("params") or {}
|
|
129
|
+
self._last_decision = decision
|
|
130
|
+
except Exception as e:
|
|
131
|
+
# Fallback to first path if routing fails
|
|
132
|
+
logger.warning(f"Routing failed, using fallback: {e}")
|
|
133
|
+
model_id = self._paths[0]["model"]
|
|
134
|
+
tool_id = self._paths[0].get("tools")
|
|
135
|
+
params = self._paths[0].get("params") or {}
|
|
136
|
+
self._last_decision = {"model_id": model_id, "fallback": True, "error": str(e)}
|
|
137
|
+
|
|
138
|
+
# Dispatch to provider
|
|
139
|
+
try:
|
|
140
|
+
response = self._dispatch(model_id, messages, tool_id, **{**params, **kwargs})
|
|
141
|
+
self._last_trace_id = get_trace_id()
|
|
142
|
+
|
|
143
|
+
# Auto-report if success_when provided
|
|
144
|
+
if self.success_when and not self._outcome_reported:
|
|
145
|
+
try:
|
|
146
|
+
output = response.choices[0].message.content or ""
|
|
147
|
+
success = self.success_when(output)
|
|
148
|
+
self.report(success=success)
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.warning(f"Auto-outcome evaluation failed: {e}")
|
|
151
|
+
|
|
152
|
+
return response
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
# Auto-report failure
|
|
156
|
+
self._last_trace_id = get_trace_id()
|
|
157
|
+
if not self._outcome_reported:
|
|
158
|
+
try:
|
|
159
|
+
self.report(success=False, reason=f"provider_error: {type(e).__name__}")
|
|
160
|
+
except:
|
|
161
|
+
pass
|
|
162
|
+
raise
|
|
163
|
+
|
|
164
|
+
def report(
|
|
165
|
+
self,
|
|
166
|
+
success: bool,
|
|
167
|
+
reason: Optional[str] = None,
|
|
168
|
+
score: Optional[float] = None,
|
|
169
|
+
):
|
|
170
|
+
"""
|
|
171
|
+
Report outcome for the last completion.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
success: Whether the task succeeded
|
|
175
|
+
reason: Optional failure reason
|
|
176
|
+
score: Optional quality score (0.0-1.0)
|
|
177
|
+
"""
|
|
178
|
+
if self._outcome_reported:
|
|
179
|
+
logger.warning("Outcome already reported for this request")
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
from kalibr.intelligence import report_outcome
|
|
183
|
+
from kalibr.context import get_trace_id
|
|
184
|
+
|
|
185
|
+
trace_id = self._last_trace_id or get_trace_id()
|
|
186
|
+
if not trace_id:
|
|
187
|
+
logger.warning("No trace_id available for outcome reporting")
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
report_outcome(
|
|
192
|
+
trace_id=trace_id,
|
|
193
|
+
goal=self.goal,
|
|
194
|
+
success=success,
|
|
195
|
+
score=score,
|
|
196
|
+
failure_reason=reason,
|
|
197
|
+
)
|
|
198
|
+
self._outcome_reported = True
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.warning(f"Failed to report outcome: {e}")
|
|
201
|
+
|
|
202
|
+
def add_path(
|
|
203
|
+
self,
|
|
204
|
+
model: str,
|
|
205
|
+
tools: Optional[List[str]] = None,
|
|
206
|
+
params: Optional[Dict] = None,
|
|
207
|
+
):
|
|
208
|
+
"""Add a new path dynamically."""
|
|
209
|
+
from kalibr.intelligence import register_path
|
|
210
|
+
|
|
211
|
+
path = {"model": model, "tools": tools, "params": params}
|
|
212
|
+
self._paths.append(path)
|
|
213
|
+
|
|
214
|
+
register_path(
|
|
215
|
+
goal=self.goal,
|
|
216
|
+
model_id=model,
|
|
217
|
+
tool_id=tools[0] if tools else None,
|
|
218
|
+
params=params,
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
def _dispatch(
|
|
222
|
+
self,
|
|
223
|
+
model_id: str,
|
|
224
|
+
messages: List[Dict],
|
|
225
|
+
tools: Optional[Any] = None,
|
|
226
|
+
**kwargs
|
|
227
|
+
) -> Any:
|
|
228
|
+
"""Dispatch to the appropriate provider."""
|
|
229
|
+
if model_id.startswith(("gpt-", "o1-", "o3-")):
|
|
230
|
+
return self._call_openai(model_id, messages, tools, **kwargs)
|
|
231
|
+
elif model_id.startswith("claude-"):
|
|
232
|
+
return self._call_anthropic(model_id, messages, tools, **kwargs)
|
|
233
|
+
elif model_id.startswith(("gemini-", "models/gemini")):
|
|
234
|
+
return self._call_google(model_id, messages, tools, **kwargs)
|
|
235
|
+
else:
|
|
236
|
+
# Default to OpenAI-compatible
|
|
237
|
+
logger.info(f"Unknown model prefix '{model_id}', trying OpenAI")
|
|
238
|
+
return self._call_openai(model_id, messages, tools, **kwargs)
|
|
239
|
+
|
|
240
|
+
def _call_openai(self, model: str, messages: List[Dict], tools: Any, **kwargs) -> Any:
|
|
241
|
+
"""Call OpenAI API."""
|
|
242
|
+
try:
|
|
243
|
+
from openai import OpenAI
|
|
244
|
+
except ImportError:
|
|
245
|
+
raise ImportError("Install 'openai' package: pip install openai")
|
|
246
|
+
|
|
247
|
+
client = OpenAI()
|
|
248
|
+
|
|
249
|
+
call_kwargs = {"model": model, "messages": messages, **kwargs}
|
|
250
|
+
if tools:
|
|
251
|
+
call_kwargs["tools"] = tools
|
|
252
|
+
|
|
253
|
+
return client.chat.completions.create(**call_kwargs)
|
|
254
|
+
|
|
255
|
+
def _call_anthropic(self, model: str, messages: List[Dict], tools: Any, **kwargs) -> Any:
|
|
256
|
+
"""Call Anthropic API and convert response to OpenAI format."""
|
|
257
|
+
try:
|
|
258
|
+
from anthropic import Anthropic
|
|
259
|
+
except ImportError:
|
|
260
|
+
raise ImportError("Install 'anthropic' package: pip install anthropic")
|
|
261
|
+
|
|
262
|
+
client = Anthropic()
|
|
263
|
+
|
|
264
|
+
# Convert messages (handle system message)
|
|
265
|
+
system = None
|
|
266
|
+
anthropic_messages = []
|
|
267
|
+
for m in messages:
|
|
268
|
+
if m["role"] == "system":
|
|
269
|
+
system = m["content"]
|
|
270
|
+
else:
|
|
271
|
+
anthropic_messages.append({"role": m["role"], "content": m["content"]})
|
|
272
|
+
|
|
273
|
+
call_kwargs = {"model": model, "messages": anthropic_messages, "max_tokens": kwargs.pop("max_tokens", 4096)}
|
|
274
|
+
if system:
|
|
275
|
+
call_kwargs["system"] = system
|
|
276
|
+
if tools:
|
|
277
|
+
call_kwargs["tools"] = tools
|
|
278
|
+
call_kwargs.update(kwargs)
|
|
279
|
+
|
|
280
|
+
response = client.messages.create(**call_kwargs)
|
|
281
|
+
|
|
282
|
+
# Convert to OpenAI format
|
|
283
|
+
return self._anthropic_to_openai_response(response, model)
|
|
284
|
+
|
|
285
|
+
def _call_google(self, model: str, messages: List[Dict], tools: Any, **kwargs) -> Any:
|
|
286
|
+
"""Call Google API and convert response to OpenAI format."""
|
|
287
|
+
try:
|
|
288
|
+
import google.generativeai as genai
|
|
289
|
+
except ImportError:
|
|
290
|
+
raise ImportError("Install 'google-generativeai' package: pip install google-generativeai")
|
|
291
|
+
|
|
292
|
+
# Configure if API key available
|
|
293
|
+
api_key = os.environ.get("GOOGLE_API_KEY")
|
|
294
|
+
if api_key:
|
|
295
|
+
genai.configure(api_key=api_key)
|
|
296
|
+
|
|
297
|
+
# Convert messages to Google format
|
|
298
|
+
model_name = model.replace("models/", "") if model.startswith("models/") else model
|
|
299
|
+
gmodel = genai.GenerativeModel(model_name)
|
|
300
|
+
|
|
301
|
+
# Simple conversion - concatenate messages
|
|
302
|
+
prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
|
|
303
|
+
|
|
304
|
+
response = gmodel.generate_content(prompt)
|
|
305
|
+
|
|
306
|
+
# Convert to OpenAI format
|
|
307
|
+
return self._google_to_openai_response(response, model)
|
|
308
|
+
|
|
309
|
+
def _anthropic_to_openai_response(self, response: Any, model: str) -> Any:
|
|
310
|
+
"""Convert Anthropic response to OpenAI format."""
|
|
311
|
+
from types import SimpleNamespace
|
|
312
|
+
|
|
313
|
+
content = ""
|
|
314
|
+
if response.content:
|
|
315
|
+
content = response.content[0].text if hasattr(response.content[0], "text") else str(response.content[0])
|
|
316
|
+
|
|
317
|
+
return SimpleNamespace(
|
|
318
|
+
id=response.id,
|
|
319
|
+
model=model,
|
|
320
|
+
choices=[
|
|
321
|
+
SimpleNamespace(
|
|
322
|
+
index=0,
|
|
323
|
+
message=SimpleNamespace(
|
|
324
|
+
role="assistant",
|
|
325
|
+
content=content,
|
|
326
|
+
),
|
|
327
|
+
finish_reason=response.stop_reason,
|
|
328
|
+
)
|
|
329
|
+
],
|
|
330
|
+
usage=SimpleNamespace(
|
|
331
|
+
prompt_tokens=response.usage.input_tokens,
|
|
332
|
+
completion_tokens=response.usage.output_tokens,
|
|
333
|
+
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
|
|
334
|
+
),
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
def _google_to_openai_response(self, response: Any, model: str) -> Any:
|
|
338
|
+
"""Convert Google response to OpenAI format."""
|
|
339
|
+
from types import SimpleNamespace
|
|
340
|
+
import uuid
|
|
341
|
+
|
|
342
|
+
content = response.text if hasattr(response, "text") else str(response)
|
|
343
|
+
|
|
344
|
+
return SimpleNamespace(
|
|
345
|
+
id=f"google-{uuid.uuid4().hex[:8]}",
|
|
346
|
+
model=model,
|
|
347
|
+
choices=[
|
|
348
|
+
SimpleNamespace(
|
|
349
|
+
index=0,
|
|
350
|
+
message=SimpleNamespace(
|
|
351
|
+
role="assistant",
|
|
352
|
+
content=content,
|
|
353
|
+
),
|
|
354
|
+
finish_reason="stop",
|
|
355
|
+
)
|
|
356
|
+
],
|
|
357
|
+
usage=SimpleNamespace(
|
|
358
|
+
prompt_tokens=getattr(response, "usage_metadata", {}).get("prompt_token_count", 0),
|
|
359
|
+
completion_tokens=getattr(response, "usage_metadata", {}).get("candidates_token_count", 0),
|
|
360
|
+
total_tokens=getattr(response, "usage_metadata", {}).get("total_token_count", 0),
|
|
361
|
+
),
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def as_langchain(self):
|
|
365
|
+
"""Return a LangChain-compatible chat model."""
|
|
366
|
+
try:
|
|
367
|
+
from kalibr_langchain.chat_model import KalibrChatModel
|
|
368
|
+
return KalibrChatModel(router=self)
|
|
369
|
+
except ImportError:
|
|
370
|
+
raise ImportError("Install 'kalibr-langchain' package for LangChain integration")
|