kalibr 1.2.2__py3-none-any.whl → 1.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kalibr/__init__.py +2 -0
- kalibr/router.py +370 -0
- kalibr-1.2.4.dist-info/METADATA +233 -0
- {kalibr-1.2.2.dist-info → kalibr-1.2.4.dist-info}/RECORD +11 -9
- kalibr_crewai/instrumentor.py +32 -0
- kalibr_langchain/__init__.py +3 -1
- kalibr_langchain/chat_model.py +103 -0
- kalibr-1.2.2.dist-info/METADATA +0 -384
- {kalibr-1.2.2.dist-info → kalibr-1.2.4.dist-info}/LICENSE +0 -0
- {kalibr-1.2.2.dist-info → kalibr-1.2.4.dist-info}/WHEEL +0 -0
- {kalibr-1.2.2.dist-info → kalibr-1.2.4.dist-info}/entry_points.txt +0 -0
- {kalibr-1.2.2.dist-info → kalibr-1.2.4.dist-info}/top_level.txt +0 -0
kalibr/__init__.py
CHANGED
|
@@ -92,6 +92,7 @@ from .intelligence import (
|
|
|
92
92
|
register_path,
|
|
93
93
|
decide,
|
|
94
94
|
)
|
|
95
|
+
from .router import Router
|
|
95
96
|
|
|
96
97
|
if os.getenv("KALIBR_AUTO_INSTRUMENT", "true").lower() == "true":
|
|
97
98
|
# Setup OpenTelemetry collector
|
|
@@ -163,4 +164,5 @@ __all__ = [
|
|
|
163
164
|
"get_recommendation",
|
|
164
165
|
"register_path",
|
|
165
166
|
"decide",
|
|
167
|
+
"Router",
|
|
166
168
|
]
|
kalibr/router.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kalibr Router - Intelligent model routing with outcome learning.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
# Type for paths - either string or dict
|
|
12
|
+
PathSpec = Union[str, Dict[str, Any]]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Router:
|
|
16
|
+
"""
|
|
17
|
+
Routes LLM requests to the best model based on learned outcomes.
|
|
18
|
+
|
|
19
|
+
Example:
|
|
20
|
+
router = Router(
|
|
21
|
+
goal="summarize",
|
|
22
|
+
paths=["gpt-4o", "claude-3-sonnet"],
|
|
23
|
+
success_when=lambda out: len(out) > 100
|
|
24
|
+
)
|
|
25
|
+
response = router.completion(messages=[...])
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
goal: str,
|
|
31
|
+
paths: Optional[List[PathSpec]] = None,
|
|
32
|
+
success_when: Optional[Callable[[str], bool]] = None,
|
|
33
|
+
exploration_rate: Optional[float] = None,
|
|
34
|
+
auto_register: bool = True,
|
|
35
|
+
):
|
|
36
|
+
"""
|
|
37
|
+
Initialize router.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
goal: Name of the goal (e.g., "book_meeting", "summarize")
|
|
41
|
+
paths: List of models or path configs. Examples:
|
|
42
|
+
["gpt-4o", "claude-3-sonnet"]
|
|
43
|
+
[{"model": "gpt-4o", "tools": ["search"]}]
|
|
44
|
+
success_when: Optional function to auto-evaluate success from output
|
|
45
|
+
exploration_rate: Override exploration rate (0.0-1.0)
|
|
46
|
+
auto_register: If True, register paths on init
|
|
47
|
+
"""
|
|
48
|
+
self.goal = goal
|
|
49
|
+
self.success_when = success_when
|
|
50
|
+
self.exploration_rate = exploration_rate
|
|
51
|
+
self._last_trace_id: Optional[str] = None
|
|
52
|
+
self._last_decision: Optional[dict] = None
|
|
53
|
+
self._outcome_reported = False
|
|
54
|
+
|
|
55
|
+
# Normalize paths to list of dicts
|
|
56
|
+
self._paths = self._normalize_paths(paths or ["gpt-4o"])
|
|
57
|
+
|
|
58
|
+
# Register paths if requested
|
|
59
|
+
if auto_register:
|
|
60
|
+
self._register_paths()
|
|
61
|
+
|
|
62
|
+
def _normalize_paths(self, paths: List[PathSpec]) -> List[Dict[str, Any]]:
|
|
63
|
+
"""Convert paths to consistent format."""
|
|
64
|
+
normalized = []
|
|
65
|
+
for p in paths:
|
|
66
|
+
if isinstance(p, str):
|
|
67
|
+
normalized.append({"model": p, "tools": None, "params": None})
|
|
68
|
+
elif isinstance(p, dict):
|
|
69
|
+
normalized.append({
|
|
70
|
+
"model": p.get("model") or p.get("model_id"),
|
|
71
|
+
"tools": p.get("tools") or p.get("tool_id"),
|
|
72
|
+
"params": p.get("params"),
|
|
73
|
+
})
|
|
74
|
+
else:
|
|
75
|
+
raise ValueError(f"Invalid path spec: {p}")
|
|
76
|
+
return normalized
|
|
77
|
+
|
|
78
|
+
def _register_paths(self):
|
|
79
|
+
"""Register paths with intelligence service."""
|
|
80
|
+
from kalibr.intelligence import register_path
|
|
81
|
+
|
|
82
|
+
for path in self._paths:
|
|
83
|
+
try:
|
|
84
|
+
register_path(
|
|
85
|
+
goal=self.goal,
|
|
86
|
+
model_id=path["model"],
|
|
87
|
+
tool_id=path["tools"][0] if isinstance(path["tools"], list) and path["tools"] else path["tools"],
|
|
88
|
+
params=path["params"],
|
|
89
|
+
)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
# Log but don't fail - path might already exist
|
|
92
|
+
logger.debug(f"Path registration note: {e}")
|
|
93
|
+
|
|
94
|
+
def completion(
|
|
95
|
+
self,
|
|
96
|
+
messages: List[Dict[str, str]],
|
|
97
|
+
force_model: Optional[str] = None,
|
|
98
|
+
**kwargs
|
|
99
|
+
) -> Any:
|
|
100
|
+
"""
|
|
101
|
+
Make a completion request with intelligent routing.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
messages: OpenAI-format messages
|
|
105
|
+
force_model: Override routing and use this model
|
|
106
|
+
**kwargs: Additional args passed to provider
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
OpenAI-compatible ChatCompletion response
|
|
110
|
+
"""
|
|
111
|
+
from kalibr.intelligence import decide
|
|
112
|
+
from kalibr.context import get_trace_id
|
|
113
|
+
|
|
114
|
+
# Reset state for new request
|
|
115
|
+
self._outcome_reported = False
|
|
116
|
+
|
|
117
|
+
# Get routing decision (or use forced model)
|
|
118
|
+
if force_model:
|
|
119
|
+
model_id = force_model
|
|
120
|
+
tool_id = None
|
|
121
|
+
params = {}
|
|
122
|
+
self._last_decision = {"model_id": model_id, "forced": True}
|
|
123
|
+
else:
|
|
124
|
+
try:
|
|
125
|
+
decision = decide(goal=self.goal)
|
|
126
|
+
model_id = decision.get("model_id") or self._paths[0]["model"]
|
|
127
|
+
tool_id = decision.get("tool_id")
|
|
128
|
+
params = decision.get("params") or {}
|
|
129
|
+
self._last_decision = decision
|
|
130
|
+
except Exception as e:
|
|
131
|
+
# Fallback to first path if routing fails
|
|
132
|
+
logger.warning(f"Routing failed, using fallback: {e}")
|
|
133
|
+
model_id = self._paths[0]["model"]
|
|
134
|
+
tool_id = self._paths[0].get("tools")
|
|
135
|
+
params = self._paths[0].get("params") or {}
|
|
136
|
+
self._last_decision = {"model_id": model_id, "fallback": True, "error": str(e)}
|
|
137
|
+
|
|
138
|
+
# Dispatch to provider
|
|
139
|
+
try:
|
|
140
|
+
response = self._dispatch(model_id, messages, tool_id, **{**params, **kwargs})
|
|
141
|
+
self._last_trace_id = get_trace_id()
|
|
142
|
+
|
|
143
|
+
# Auto-report if success_when provided
|
|
144
|
+
if self.success_when and not self._outcome_reported:
|
|
145
|
+
try:
|
|
146
|
+
output = response.choices[0].message.content or ""
|
|
147
|
+
success = self.success_when(output)
|
|
148
|
+
self.report(success=success)
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.warning(f"Auto-outcome evaluation failed: {e}")
|
|
151
|
+
|
|
152
|
+
return response
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
# Auto-report failure
|
|
156
|
+
self._last_trace_id = get_trace_id()
|
|
157
|
+
if not self._outcome_reported:
|
|
158
|
+
try:
|
|
159
|
+
self.report(success=False, reason=f"provider_error: {type(e).__name__}")
|
|
160
|
+
except:
|
|
161
|
+
pass
|
|
162
|
+
raise
|
|
163
|
+
|
|
164
|
+
def report(
|
|
165
|
+
self,
|
|
166
|
+
success: bool,
|
|
167
|
+
reason: Optional[str] = None,
|
|
168
|
+
score: Optional[float] = None,
|
|
169
|
+
):
|
|
170
|
+
"""
|
|
171
|
+
Report outcome for the last completion.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
success: Whether the task succeeded
|
|
175
|
+
reason: Optional failure reason
|
|
176
|
+
score: Optional quality score (0.0-1.0)
|
|
177
|
+
"""
|
|
178
|
+
if self._outcome_reported:
|
|
179
|
+
logger.warning("Outcome already reported for this request")
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
from kalibr.intelligence import report_outcome
|
|
183
|
+
from kalibr.context import get_trace_id
|
|
184
|
+
|
|
185
|
+
trace_id = self._last_trace_id or get_trace_id()
|
|
186
|
+
if not trace_id:
|
|
187
|
+
logger.warning("No trace_id available for outcome reporting")
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
report_outcome(
|
|
192
|
+
trace_id=trace_id,
|
|
193
|
+
goal=self.goal,
|
|
194
|
+
success=success,
|
|
195
|
+
score=score,
|
|
196
|
+
failure_reason=reason,
|
|
197
|
+
)
|
|
198
|
+
self._outcome_reported = True
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.warning(f"Failed to report outcome: {e}")
|
|
201
|
+
|
|
202
|
+
def add_path(
|
|
203
|
+
self,
|
|
204
|
+
model: str,
|
|
205
|
+
tools: Optional[List[str]] = None,
|
|
206
|
+
params: Optional[Dict] = None,
|
|
207
|
+
):
|
|
208
|
+
"""Add a new path dynamically."""
|
|
209
|
+
from kalibr.intelligence import register_path
|
|
210
|
+
|
|
211
|
+
path = {"model": model, "tools": tools, "params": params}
|
|
212
|
+
self._paths.append(path)
|
|
213
|
+
|
|
214
|
+
register_path(
|
|
215
|
+
goal=self.goal,
|
|
216
|
+
model_id=model,
|
|
217
|
+
tool_id=tools[0] if tools else None,
|
|
218
|
+
params=params,
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
def _dispatch(
|
|
222
|
+
self,
|
|
223
|
+
model_id: str,
|
|
224
|
+
messages: List[Dict],
|
|
225
|
+
tools: Optional[Any] = None,
|
|
226
|
+
**kwargs
|
|
227
|
+
) -> Any:
|
|
228
|
+
"""Dispatch to the appropriate provider."""
|
|
229
|
+
if model_id.startswith(("gpt-", "o1-", "o3-")):
|
|
230
|
+
return self._call_openai(model_id, messages, tools, **kwargs)
|
|
231
|
+
elif model_id.startswith("claude-"):
|
|
232
|
+
return self._call_anthropic(model_id, messages, tools, **kwargs)
|
|
233
|
+
elif model_id.startswith(("gemini-", "models/gemini")):
|
|
234
|
+
return self._call_google(model_id, messages, tools, **kwargs)
|
|
235
|
+
else:
|
|
236
|
+
# Default to OpenAI-compatible
|
|
237
|
+
logger.info(f"Unknown model prefix '{model_id}', trying OpenAI")
|
|
238
|
+
return self._call_openai(model_id, messages, tools, **kwargs)
|
|
239
|
+
|
|
240
|
+
def _call_openai(self, model: str, messages: List[Dict], tools: Any, **kwargs) -> Any:
|
|
241
|
+
"""Call OpenAI API."""
|
|
242
|
+
try:
|
|
243
|
+
from openai import OpenAI
|
|
244
|
+
except ImportError:
|
|
245
|
+
raise ImportError("Install 'openai' package: pip install openai")
|
|
246
|
+
|
|
247
|
+
client = OpenAI()
|
|
248
|
+
|
|
249
|
+
call_kwargs = {"model": model, "messages": messages, **kwargs}
|
|
250
|
+
if tools:
|
|
251
|
+
call_kwargs["tools"] = tools
|
|
252
|
+
|
|
253
|
+
return client.chat.completions.create(**call_kwargs)
|
|
254
|
+
|
|
255
|
+
def _call_anthropic(self, model: str, messages: List[Dict], tools: Any, **kwargs) -> Any:
|
|
256
|
+
"""Call Anthropic API and convert response to OpenAI format."""
|
|
257
|
+
try:
|
|
258
|
+
from anthropic import Anthropic
|
|
259
|
+
except ImportError:
|
|
260
|
+
raise ImportError("Install 'anthropic' package: pip install anthropic")
|
|
261
|
+
|
|
262
|
+
client = Anthropic()
|
|
263
|
+
|
|
264
|
+
# Convert messages (handle system message)
|
|
265
|
+
system = None
|
|
266
|
+
anthropic_messages = []
|
|
267
|
+
for m in messages:
|
|
268
|
+
if m["role"] == "system":
|
|
269
|
+
system = m["content"]
|
|
270
|
+
else:
|
|
271
|
+
anthropic_messages.append({"role": m["role"], "content": m["content"]})
|
|
272
|
+
|
|
273
|
+
call_kwargs = {"model": model, "messages": anthropic_messages, "max_tokens": kwargs.pop("max_tokens", 4096)}
|
|
274
|
+
if system:
|
|
275
|
+
call_kwargs["system"] = system
|
|
276
|
+
if tools:
|
|
277
|
+
call_kwargs["tools"] = tools
|
|
278
|
+
call_kwargs.update(kwargs)
|
|
279
|
+
|
|
280
|
+
response = client.messages.create(**call_kwargs)
|
|
281
|
+
|
|
282
|
+
# Convert to OpenAI format
|
|
283
|
+
return self._anthropic_to_openai_response(response, model)
|
|
284
|
+
|
|
285
|
+
def _call_google(self, model: str, messages: List[Dict], tools: Any, **kwargs) -> Any:
|
|
286
|
+
"""Call Google API and convert response to OpenAI format."""
|
|
287
|
+
try:
|
|
288
|
+
import google.generativeai as genai
|
|
289
|
+
except ImportError:
|
|
290
|
+
raise ImportError("Install 'google-generativeai' package: pip install google-generativeai")
|
|
291
|
+
|
|
292
|
+
# Configure if API key available
|
|
293
|
+
api_key = os.environ.get("GOOGLE_API_KEY")
|
|
294
|
+
if api_key:
|
|
295
|
+
genai.configure(api_key=api_key)
|
|
296
|
+
|
|
297
|
+
# Convert messages to Google format
|
|
298
|
+
model_name = model.replace("models/", "") if model.startswith("models/") else model
|
|
299
|
+
gmodel = genai.GenerativeModel(model_name)
|
|
300
|
+
|
|
301
|
+
# Simple conversion - concatenate messages
|
|
302
|
+
prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
|
|
303
|
+
|
|
304
|
+
response = gmodel.generate_content(prompt)
|
|
305
|
+
|
|
306
|
+
# Convert to OpenAI format
|
|
307
|
+
return self._google_to_openai_response(response, model)
|
|
308
|
+
|
|
309
|
+
def _anthropic_to_openai_response(self, response: Any, model: str) -> Any:
|
|
310
|
+
"""Convert Anthropic response to OpenAI format."""
|
|
311
|
+
from types import SimpleNamespace
|
|
312
|
+
|
|
313
|
+
content = ""
|
|
314
|
+
if response.content:
|
|
315
|
+
content = response.content[0].text if hasattr(response.content[0], "text") else str(response.content[0])
|
|
316
|
+
|
|
317
|
+
return SimpleNamespace(
|
|
318
|
+
id=response.id,
|
|
319
|
+
model=model,
|
|
320
|
+
choices=[
|
|
321
|
+
SimpleNamespace(
|
|
322
|
+
index=0,
|
|
323
|
+
message=SimpleNamespace(
|
|
324
|
+
role="assistant",
|
|
325
|
+
content=content,
|
|
326
|
+
),
|
|
327
|
+
finish_reason=response.stop_reason,
|
|
328
|
+
)
|
|
329
|
+
],
|
|
330
|
+
usage=SimpleNamespace(
|
|
331
|
+
prompt_tokens=response.usage.input_tokens,
|
|
332
|
+
completion_tokens=response.usage.output_tokens,
|
|
333
|
+
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
|
|
334
|
+
),
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
def _google_to_openai_response(self, response: Any, model: str) -> Any:
|
|
338
|
+
"""Convert Google response to OpenAI format."""
|
|
339
|
+
from types import SimpleNamespace
|
|
340
|
+
import uuid
|
|
341
|
+
|
|
342
|
+
content = response.text if hasattr(response, "text") else str(response)
|
|
343
|
+
|
|
344
|
+
return SimpleNamespace(
|
|
345
|
+
id=f"google-{uuid.uuid4().hex[:8]}",
|
|
346
|
+
model=model,
|
|
347
|
+
choices=[
|
|
348
|
+
SimpleNamespace(
|
|
349
|
+
index=0,
|
|
350
|
+
message=SimpleNamespace(
|
|
351
|
+
role="assistant",
|
|
352
|
+
content=content,
|
|
353
|
+
),
|
|
354
|
+
finish_reason="stop",
|
|
355
|
+
)
|
|
356
|
+
],
|
|
357
|
+
usage=SimpleNamespace(
|
|
358
|
+
prompt_tokens=getattr(response, "usage_metadata", {}).get("prompt_token_count", 0),
|
|
359
|
+
completion_tokens=getattr(response, "usage_metadata", {}).get("candidates_token_count", 0),
|
|
360
|
+
total_tokens=getattr(response, "usage_metadata", {}).get("total_token_count", 0),
|
|
361
|
+
),
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def as_langchain(self):
|
|
365
|
+
"""Return a LangChain-compatible chat model."""
|
|
366
|
+
try:
|
|
367
|
+
from kalibr_langchain.chat_model import KalibrChatModel
|
|
368
|
+
return KalibrChatModel(router=self)
|
|
369
|
+
except ImportError:
|
|
370
|
+
raise ImportError("Install 'kalibr-langchain' package for LangChain integration")
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: kalibr
|
|
3
|
+
Version: 1.2.4
|
|
4
|
+
Summary: Unified LLM Observability & Multi-Model AI Integration Framework - Deploy to GPT, Claude, Gemini, Copilot with full telemetry.
|
|
5
|
+
Author-email: Kalibr Team <support@kalibr.systems>
|
|
6
|
+
License: Apache-2.0
|
|
7
|
+
Project-URL: Homepage, https://github.com/kalibr-ai/kalibr-sdk-python
|
|
8
|
+
Project-URL: Documentation, https://kalibr.systems/docs
|
|
9
|
+
Project-URL: Repository, https://github.com/kalibr-ai/kalibr-sdk-python
|
|
10
|
+
Project-URL: Issues, https://github.com/kalibr-ai/kalibr-sdk-python/issues
|
|
11
|
+
Keywords: ai,mcp,gpt,claude,gemini,copilot,openai,anthropic,google,microsoft,observability,telemetry,tracing,llm,schema-generation,api,multi-model,langchain,crewai
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
23
|
+
Requires-Python: >=3.9
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
License-File: LICENSE
|
|
26
|
+
Requires-Dist: httpx>=0.27.0
|
|
27
|
+
Requires-Dist: tiktoken>=0.8.0
|
|
28
|
+
Requires-Dist: fastapi>=0.110.1
|
|
29
|
+
Requires-Dist: uvicorn>=0.25.0
|
|
30
|
+
Requires-Dist: pydantic>=2.6.4
|
|
31
|
+
Requires-Dist: typer>=0.9.0
|
|
32
|
+
Requires-Dist: python-multipart>=0.0.9
|
|
33
|
+
Requires-Dist: rich>=10.0.0
|
|
34
|
+
Requires-Dist: requests>=2.31.0
|
|
35
|
+
Requires-Dist: opentelemetry-api>=1.20.0
|
|
36
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
37
|
+
Requires-Dist: opentelemetry-exporter-otlp>=1.20.0
|
|
38
|
+
Provides-Extra: langchain
|
|
39
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
|
|
40
|
+
Provides-Extra: langchain-openai
|
|
41
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-openai"
|
|
42
|
+
Requires-Dist: langchain-openai>=0.1.0; extra == "langchain-openai"
|
|
43
|
+
Provides-Extra: langchain-anthropic
|
|
44
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-anthropic"
|
|
45
|
+
Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain-anthropic"
|
|
46
|
+
Provides-Extra: langchain-google
|
|
47
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-google"
|
|
48
|
+
Requires-Dist: langchain-google-genai>=0.0.10; extra == "langchain-google"
|
|
49
|
+
Provides-Extra: langchain-all
|
|
50
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-all"
|
|
51
|
+
Requires-Dist: langchain-openai>=0.1.0; extra == "langchain-all"
|
|
52
|
+
Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain-all"
|
|
53
|
+
Requires-Dist: langchain-google-genai>=0.0.10; extra == "langchain-all"
|
|
54
|
+
Provides-Extra: crewai
|
|
55
|
+
Requires-Dist: crewai>=0.28.0; extra == "crewai"
|
|
56
|
+
Provides-Extra: openai-agents
|
|
57
|
+
Requires-Dist: openai-agents>=0.0.3; extra == "openai-agents"
|
|
58
|
+
Provides-Extra: integrations
|
|
59
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "integrations"
|
|
60
|
+
Requires-Dist: crewai>=0.28.0; extra == "integrations"
|
|
61
|
+
Requires-Dist: openai-agents>=0.0.3; extra == "integrations"
|
|
62
|
+
Provides-Extra: dev
|
|
63
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
64
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
65
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
66
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
67
|
+
|
|
68
|
+
# Kalibr SDK
|
|
69
|
+
|
|
70
|
+
**Intelligent routing for AI agents.** Kalibr picks the best model for each request, learns from outcomes, and shifts traffic to what works.
|
|
71
|
+
|
|
72
|
+
## Installation
|
|
73
|
+
```bash
|
|
74
|
+
pip install kalibr
|
|
75
|
+
```
|
|
76
|
+
```bash
|
|
77
|
+
export KALIBR_API_KEY=kal_xxx # Get from dashboard.kalibr.dev
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## Quick Start
|
|
81
|
+
```python
|
|
82
|
+
from kalibr import Router
|
|
83
|
+
|
|
84
|
+
router = Router(
|
|
85
|
+
goal="book_meeting",
|
|
86
|
+
paths=["gpt-4o", "claude-3-sonnet", "gpt-4o-mini"],
|
|
87
|
+
success_when=lambda output: "confirmed" in output.lower()
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
response = router.completion(
|
|
91
|
+
messages=[{"role": "user", "content": "Book a meeting with John tomorrow"}]
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
print(response.choices[0].message.content)
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
That's it. Kalibr handles:
|
|
98
|
+
- ✅ Picking the best model (Thompson Sampling)
|
|
99
|
+
- ✅ Making the API call
|
|
100
|
+
- ✅ Checking success
|
|
101
|
+
- ✅ Learning for next time
|
|
102
|
+
- ✅ Tracing everything
|
|
103
|
+
|
|
104
|
+
## How It Works
|
|
105
|
+
|
|
106
|
+
1. **Define a goal** - What is your agent trying to do?
|
|
107
|
+
2. **Register paths** - Which models/tools can achieve it?
|
|
108
|
+
3. **Report outcomes** - Did it work?
|
|
109
|
+
4. **Kalibr routes** - Traffic shifts to winners
|
|
110
|
+
|
|
111
|
+
## Paths
|
|
112
|
+
|
|
113
|
+
A path is a model + optional tools + optional params:
|
|
114
|
+
```python
|
|
115
|
+
# Simple: just models
|
|
116
|
+
paths = ["gpt-4o", "claude-3-sonnet"]
|
|
117
|
+
|
|
118
|
+
# With tools
|
|
119
|
+
paths = [
|
|
120
|
+
{"model": "gpt-4o", "tools": ["web_search"]},
|
|
121
|
+
{"model": "claude-3-sonnet", "tools": ["web_search", "browser"]},
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
# With params
|
|
125
|
+
paths = [
|
|
126
|
+
{"model": "gpt-4o", "params": {"temperature": 0.7}},
|
|
127
|
+
{"model": "gpt-4o", "params": {"temperature": 0.2}},
|
|
128
|
+
]
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Success Criteria
|
|
132
|
+
|
|
133
|
+
### Auto-detect from output
|
|
134
|
+
```python
|
|
135
|
+
router = Router(
|
|
136
|
+
goal="summarize",
|
|
137
|
+
paths=["gpt-4o", "claude-3-sonnet"],
|
|
138
|
+
success_when=lambda output: len(output) > 100
|
|
139
|
+
)
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### Manual reporting
|
|
143
|
+
```python
|
|
144
|
+
router = Router(goal="book_meeting", paths=["gpt-4o", "claude-3-sonnet"])
|
|
145
|
+
|
|
146
|
+
response = router.completion(messages=[...])
|
|
147
|
+
|
|
148
|
+
# Your verification logic
|
|
149
|
+
meeting_created = check_calendar_api()
|
|
150
|
+
|
|
151
|
+
router.report(success=meeting_created)
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
## Framework Integration
|
|
155
|
+
|
|
156
|
+
### LangChain
|
|
157
|
+
```python
|
|
158
|
+
from kalibr import Router
|
|
159
|
+
|
|
160
|
+
router = Router(goal="summarize", paths=["gpt-4o", "claude-3-sonnet"])
|
|
161
|
+
llm = router.as_langchain()
|
|
162
|
+
|
|
163
|
+
chain = prompt | llm | parser
|
|
164
|
+
result = chain.invoke({"text": "..."})
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
### CrewAI
|
|
168
|
+
```python
|
|
169
|
+
from kalibr import Router
|
|
170
|
+
|
|
171
|
+
router = Router(goal="research", paths=["gpt-4o", "claude-3-sonnet"])
|
|
172
|
+
|
|
173
|
+
agent = Agent(
|
|
174
|
+
role="Researcher",
|
|
175
|
+
llm=router.as_langchain(),
|
|
176
|
+
...
|
|
177
|
+
)
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## Observability (Included)
|
|
181
|
+
|
|
182
|
+
Every call is automatically traced:
|
|
183
|
+
|
|
184
|
+
- Token counts and costs
|
|
185
|
+
- Latency (p50, p95, p99)
|
|
186
|
+
- Tool usage
|
|
187
|
+
- Errors with stack traces
|
|
188
|
+
|
|
189
|
+
View in the [dashboard](https://dashboard.kalibr.dev) or use callback handlers directly:
|
|
190
|
+
```python
|
|
191
|
+
from kalibr_langchain import KalibrCallbackHandler
|
|
192
|
+
|
|
193
|
+
handler = KalibrCallbackHandler()
|
|
194
|
+
chain.invoke({"input": "..."}, config={"callbacks": [handler]})
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
## Pricing
|
|
198
|
+
|
|
199
|
+
| Tier | Routing Decisions | Price |
|
|
200
|
+
|------|-------------------|-------|
|
|
201
|
+
| Free | 1,000/month | $0 |
|
|
202
|
+
| Pro | 50,000/month | $49/month |
|
|
203
|
+
| Enterprise | Unlimited | Custom |
|
|
204
|
+
|
|
205
|
+
## API Reference
|
|
206
|
+
|
|
207
|
+
### Router
|
|
208
|
+
```python
|
|
209
|
+
Router(
|
|
210
|
+
goal: str, # Required: name of the goal
|
|
211
|
+
paths: List[str | dict], # Models/tools to route between
|
|
212
|
+
success_when: Callable, # Optional: auto-evaluate success
|
|
213
|
+
exploration_rate: float, # Optional: 0.0-1.0, default 0.1
|
|
214
|
+
)
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### Methods
|
|
218
|
+
```python
|
|
219
|
+
router.completion(messages, **kwargs) # Make routed request
|
|
220
|
+
router.report(success, reason=None) # Report outcome manually
|
|
221
|
+
router.add_path(model, tools=None) # Add path dynamically
|
|
222
|
+
router.as_langchain() # Get LangChain-compatible LLM
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
## Links
|
|
226
|
+
|
|
227
|
+
- [Documentation](https://docs.kalibr.dev)
|
|
228
|
+
- [Dashboard](https://dashboard.kalibr.dev)
|
|
229
|
+
- [GitHub](https://github.com/kalibr-ai/kalibr-sdk-python)
|
|
230
|
+
|
|
231
|
+
## License
|
|
232
|
+
|
|
233
|
+
MIT
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
kalibr/__init__.py,sha256=
|
|
1
|
+
kalibr/__init__.py,sha256=_Aq0ZfsOGbLC-GMB43br5CPprmjwLfmeBZuuVHUOQOY,5189
|
|
2
2
|
kalibr/__main__.py,sha256=jO96I4pqinwHg7ONRvNVKbySBh5pSIhOAiNrgSQrNlY,110
|
|
3
3
|
kalibr/capsule_middleware.py,sha256=pXG_wORgCqo3wHjtkn_zY4doLyiDmTwJtB7XiZNnbPk,3163
|
|
4
4
|
kalibr/client.py,sha256=6D1paakE6zgWJStaow3ak9t0R8afodQhSSpUO3WTs_8,9732
|
|
@@ -11,6 +11,7 @@ kalibr/kalibr.py,sha256=cNXC3W_TX5SvGsy1lRopkwFqsHOpyd1kkVjEMOz1Yr4,6084
|
|
|
11
11
|
kalibr/kalibr_app.py,sha256=ItZwEh0FZPx9_BE-zPQajC2yxI2y9IHYwJD0k9tbHvY,2773
|
|
12
12
|
kalibr/models.py,sha256=HwD_-iysZMSnCzMQYO1Qcf0aeXySupY7yJeBwl_dLS0,1024
|
|
13
13
|
kalibr/redaction.py,sha256=XibxX4Lv1Ci0opE6Tb5ZI2GLbO0a8E9U66MAg60llnc,1139
|
|
14
|
+
kalibr/router.py,sha256=Z_LYvRxfLK5bQD-VUc1arqfAoC_CqOwJR8yoz_sGmvY,13234
|
|
14
15
|
kalibr/schemas.py,sha256=XLZNLkXca6jbj9AF6gDIyGVnIcr1SVOsNYaKvW-wbgE,3669
|
|
15
16
|
kalibr/simple_tracer.py,sha256=VAhqxGhCMBz9rVFXfpJtRmt6SrM_cpUBKE5ygP9PC9Y,9779
|
|
16
17
|
kalibr/tokens.py,sha256=istjgaxi9S4dMddjuGtoQaTnZYcWLCqdnxRjV86yNXA,1297
|
|
@@ -35,15 +36,16 @@ kalibr/middleware/__init__.py,sha256=qyDUn_irAX67MS-IkuDVxg4RmFnJHDf_BfIT3qfGoBI
|
|
|
35
36
|
kalibr/middleware/auto_tracer.py,sha256=ZBSBM0O3a6rwVzfik1n5NUmQDah8_iaf86rU64aPYT4,13037
|
|
36
37
|
kalibr_crewai/__init__.py,sha256=b0HFTiE80eArtSMBOIEKu1JM6KU0tCjEylKCVVVF29Q,1796
|
|
37
38
|
kalibr_crewai/callbacks.py,sha256=_d1M4J-6XfKqrVIxnOgOQu57jpFKVv-VIsmPV0HNgZ4,20419
|
|
38
|
-
kalibr_crewai/instrumentor.py,sha256
|
|
39
|
-
kalibr_langchain/__init__.py,sha256=
|
|
39
|
+
kalibr_crewai/instrumentor.py,sha256=-G_-xaqE3Op70MSEIaZjPYioGDxKRagwLbZmcmmvzFg,26793
|
|
40
|
+
kalibr_langchain/__init__.py,sha256=voHgdkcZ6oo336YK_uAFBHyOB11EBbnDS92UDoXRZiI,1448
|
|
40
41
|
kalibr_langchain/async_callback.py,sha256=_Mj_YrKbULNtfxixZ7iwiHyWEV9l178ZA5Oy5A5Pakk,27748
|
|
41
42
|
kalibr_langchain/callback.py,sha256=SNM1aHOXdG55grHmGyTwbXOeM6hjZTub2REiZD2H-d8,35216
|
|
43
|
+
kalibr_langchain/chat_model.py,sha256=Y4xsZGx9gZpDUF8NP-edJuYam4k0NBySdA6B5484MKk,3190
|
|
42
44
|
kalibr_openai_agents/__init__.py,sha256=wL59LzGstptKigfQDrKKt_7hcMO1JGVQtVAsE0lz-Zw,1367
|
|
43
45
|
kalibr_openai_agents/processor.py,sha256=F550sdRf3rpguP1yOlgAUQWDLPBy4hSACV3-zOyCpOU,18257
|
|
44
|
-
kalibr-1.2.
|
|
45
|
-
kalibr-1.2.
|
|
46
|
-
kalibr-1.2.
|
|
47
|
-
kalibr-1.2.
|
|
48
|
-
kalibr-1.2.
|
|
49
|
-
kalibr-1.2.
|
|
46
|
+
kalibr-1.2.4.dist-info/LICENSE,sha256=5mwAnB38l3_PjmOQn6_L6cZnJvus143DUjMBPIH1yso,10768
|
|
47
|
+
kalibr-1.2.4.dist-info/METADATA,sha256=aXtgLOmzI6z7R1Zb0pFYU2jy2V4PUcUJKVFm0nciSL8,7037
|
|
48
|
+
kalibr-1.2.4.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
|
|
49
|
+
kalibr-1.2.4.dist-info/entry_points.txt,sha256=Kojlc6WRX8V1qS9lOMdDPZpTUVHCtzGtHqXusErgmLY,47
|
|
50
|
+
kalibr-1.2.4.dist-info/top_level.txt,sha256=dIfBOWUnnHGFDwgz5zfIx5_0bU3wOUgAbYr4JcFHZmo,59
|
|
51
|
+
kalibr-1.2.4.dist-info/RECORD,,
|
kalibr_crewai/instrumentor.py
CHANGED
|
@@ -12,6 +12,8 @@ from datetime import datetime, timezone
|
|
|
12
12
|
from functools import wraps
|
|
13
13
|
from typing import Any, Callable, Dict, List, Optional
|
|
14
14
|
|
|
15
|
+
from opentelemetry import trace as otel_trace
|
|
16
|
+
|
|
15
17
|
from .callbacks import EventBatcher, _count_tokens, _get_provider_from_model
|
|
16
18
|
|
|
17
19
|
# Import Kalibr cost adapters
|
|
@@ -275,6 +277,21 @@ class KalibrCrewAIInstrumentor:
|
|
|
275
277
|
duration_ms = int((time.time() - start_time) * 1000)
|
|
276
278
|
ts_end = datetime.now(timezone.utc)
|
|
277
279
|
|
|
280
|
+
# Enrich CrewAI's OTel span with Kalibr telemetry
|
|
281
|
+
try:
|
|
282
|
+
current_span = otel_trace.get_current_span()
|
|
283
|
+
if current_span and current_span.is_recording():
|
|
284
|
+
current_span.set_attribute("kalibr.cost_usd", instrumentor._accumulated_cost)
|
|
285
|
+
current_span.set_attribute("kalibr.input_tokens", instrumentor._accumulated_tokens["input"])
|
|
286
|
+
current_span.set_attribute("kalibr.output_tokens", instrumentor._accumulated_tokens["output"])
|
|
287
|
+
current_span.set_attribute("kalibr.total_tokens", instrumentor._accumulated_tokens["input"] + instrumentor._accumulated_tokens["output"])
|
|
288
|
+
current_span.set_attribute("kalibr.model_id", model_name)
|
|
289
|
+
current_span.set_attribute("kalibr.provider", provider)
|
|
290
|
+
current_span.set_attribute("kalibr.duration_ms", duration_ms)
|
|
291
|
+
current_span.set_attribute("kalibr.tenant_id", instrumentor.tenant_id)
|
|
292
|
+
except Exception:
|
|
293
|
+
pass # Don't fail if span enrichment fails
|
|
294
|
+
|
|
278
295
|
# Build output info
|
|
279
296
|
output_preview = None
|
|
280
297
|
if instrumentor.capture_output and result is not None:
|
|
@@ -376,6 +393,21 @@ class KalibrCrewAIInstrumentor:
|
|
|
376
393
|
duration_ms = int((time.time() - start_time) * 1000)
|
|
377
394
|
ts_end = datetime.now(timezone.utc)
|
|
378
395
|
|
|
396
|
+
# Enrich CrewAI's OTel span with Kalibr telemetry
|
|
397
|
+
try:
|
|
398
|
+
current_span = otel_trace.get_current_span()
|
|
399
|
+
if current_span and current_span.is_recording():
|
|
400
|
+
current_span.set_attribute("kalibr.cost_usd", instrumentor._accumulated_cost)
|
|
401
|
+
current_span.set_attribute("kalibr.input_tokens", instrumentor._accumulated_tokens["input"])
|
|
402
|
+
current_span.set_attribute("kalibr.output_tokens", instrumentor._accumulated_tokens["output"])
|
|
403
|
+
current_span.set_attribute("kalibr.total_tokens", instrumentor._accumulated_tokens["input"] + instrumentor._accumulated_tokens["output"])
|
|
404
|
+
current_span.set_attribute("kalibr.model_id", model_name)
|
|
405
|
+
current_span.set_attribute("kalibr.provider", provider)
|
|
406
|
+
current_span.set_attribute("kalibr.duration_ms", duration_ms)
|
|
407
|
+
current_span.set_attribute("kalibr.tenant_id", instrumentor.tenant_id)
|
|
408
|
+
except Exception:
|
|
409
|
+
pass # Don't fail if span enrichment fails
|
|
410
|
+
|
|
379
411
|
output_preview = None
|
|
380
412
|
if instrumentor.capture_output and result is not None:
|
|
381
413
|
output_preview = str(result)[:500]
|
kalibr_langchain/__init__.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Kalibr LangChain Integration - Observability for LangChain applications.
|
|
1
|
+
"""Kalibr LangChain Integration - Observability and Routing for LangChain applications.
|
|
2
2
|
|
|
3
3
|
This package provides a callback handler that integrates LangChain with
|
|
4
4
|
Kalibr's observability platform, capturing:
|
|
@@ -39,9 +39,11 @@ __version__ = "0.1.0"
|
|
|
39
39
|
|
|
40
40
|
from .callback import KalibrCallbackHandler
|
|
41
41
|
from .async_callback import AsyncKalibrCallbackHandler
|
|
42
|
+
from .chat_model import KalibrChatModel
|
|
42
43
|
|
|
43
44
|
__all__ = [
|
|
44
45
|
"KalibrCallbackHandler",
|
|
45
46
|
"AsyncKalibrCallbackHandler",
|
|
47
|
+
"KalibrChatModel",
|
|
46
48
|
"__version__",
|
|
47
49
|
]
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kalibr LangChain Chat Model - Routes requests through Kalibr.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any, List, Optional
|
|
6
|
+
|
|
7
|
+
from langchain_core.callbacks import CallbackManagerForLLMRun
|
|
8
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
9
|
+
from langchain_core.messages import AIMessage, BaseMessage
|
|
10
|
+
from langchain_core.outputs import ChatGeneration, ChatResult
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class KalibrChatModel(BaseChatModel):
|
|
14
|
+
"""
|
|
15
|
+
LangChain chat model that routes through Kalibr.
|
|
16
|
+
|
|
17
|
+
Example:
|
|
18
|
+
from kalibr import Router
|
|
19
|
+
|
|
20
|
+
router = Router(goal="summarize", paths=["gpt-4o", "claude-3"])
|
|
21
|
+
llm = router.as_langchain()
|
|
22
|
+
|
|
23
|
+
chain = prompt | llm | parser
|
|
24
|
+
result = chain.invoke({"text": "..."})
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
router: Any # Kalibr Router instance
|
|
28
|
+
|
|
29
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def _llm_type(self) -> str:
|
|
33
|
+
return "kalibr"
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def _identifying_params(self) -> dict:
|
|
37
|
+
return {"goal": self.router.goal}
|
|
38
|
+
|
|
39
|
+
def _generate(
|
|
40
|
+
self,
|
|
41
|
+
messages: List[BaseMessage],
|
|
42
|
+
stop: Optional[List[str]] = None,
|
|
43
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
44
|
+
**kwargs: Any,
|
|
45
|
+
) -> ChatResult:
|
|
46
|
+
"""Generate a response using Kalibr routing."""
|
|
47
|
+
|
|
48
|
+
# Convert LangChain messages to OpenAI format
|
|
49
|
+
openai_messages = []
|
|
50
|
+
for m in messages:
|
|
51
|
+
role = self._get_role(m)
|
|
52
|
+
openai_messages.append({"role": role, "content": m.content})
|
|
53
|
+
|
|
54
|
+
# Add stop sequences if provided
|
|
55
|
+
if stop:
|
|
56
|
+
kwargs["stop"] = stop
|
|
57
|
+
|
|
58
|
+
# Call router
|
|
59
|
+
response = self.router.completion(messages=openai_messages, **kwargs)
|
|
60
|
+
|
|
61
|
+
# Convert response to LangChain format
|
|
62
|
+
content = response.choices[0].message.content or ""
|
|
63
|
+
|
|
64
|
+
return ChatResult(
|
|
65
|
+
generations=[
|
|
66
|
+
ChatGeneration(
|
|
67
|
+
message=AIMessage(content=content),
|
|
68
|
+
generation_info={
|
|
69
|
+
"model": response.model,
|
|
70
|
+
"finish_reason": response.choices[0].finish_reason,
|
|
71
|
+
},
|
|
72
|
+
)
|
|
73
|
+
],
|
|
74
|
+
llm_output={
|
|
75
|
+
"model": response.model,
|
|
76
|
+
"usage": {
|
|
77
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
|
78
|
+
"completion_tokens": response.usage.completion_tokens,
|
|
79
|
+
"total_tokens": response.usage.total_tokens,
|
|
80
|
+
} if hasattr(response, "usage") else {},
|
|
81
|
+
},
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def _get_role(self, message: BaseMessage) -> str:
|
|
85
|
+
"""Convert LangChain message type to OpenAI role."""
|
|
86
|
+
from langchain_core.messages import (
|
|
87
|
+
HumanMessage,
|
|
88
|
+
AIMessage,
|
|
89
|
+
SystemMessage,
|
|
90
|
+
FunctionMessage,
|
|
91
|
+
ToolMessage,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if isinstance(message, HumanMessage):
|
|
95
|
+
return "user"
|
|
96
|
+
elif isinstance(message, AIMessage):
|
|
97
|
+
return "assistant"
|
|
98
|
+
elif isinstance(message, SystemMessage):
|
|
99
|
+
return "system"
|
|
100
|
+
elif isinstance(message, (FunctionMessage, ToolMessage)):
|
|
101
|
+
return "function"
|
|
102
|
+
else:
|
|
103
|
+
return "user"
|
kalibr-1.2.2.dist-info/METADATA
DELETED
|
@@ -1,384 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.2
|
|
2
|
-
Name: kalibr
|
|
3
|
-
Version: 1.2.2
|
|
4
|
-
Summary: Unified LLM Observability & Multi-Model AI Integration Framework - Deploy to GPT, Claude, Gemini, Copilot with full telemetry.
|
|
5
|
-
Author-email: Kalibr Team <support@kalibr.systems>
|
|
6
|
-
License: Apache-2.0
|
|
7
|
-
Project-URL: Homepage, https://github.com/kalibr-ai/kalibr-sdk-python
|
|
8
|
-
Project-URL: Documentation, https://kalibr.systems/docs
|
|
9
|
-
Project-URL: Repository, https://github.com/kalibr-ai/kalibr-sdk-python
|
|
10
|
-
Project-URL: Issues, https://github.com/kalibr-ai/kalibr-sdk-python/issues
|
|
11
|
-
Keywords: ai,mcp,gpt,claude,gemini,copilot,openai,anthropic,google,microsoft,observability,telemetry,tracing,llm,schema-generation,api,multi-model,langchain,crewai
|
|
12
|
-
Classifier: Development Status :: 4 - Beta
|
|
13
|
-
Classifier: Intended Audience :: Developers
|
|
14
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
15
|
-
Classifier: Programming Language :: Python :: 3
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
-
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
-
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
23
|
-
Requires-Python: >=3.9
|
|
24
|
-
Description-Content-Type: text/markdown
|
|
25
|
-
License-File: LICENSE
|
|
26
|
-
Requires-Dist: httpx>=0.27.0
|
|
27
|
-
Requires-Dist: tiktoken>=0.8.0
|
|
28
|
-
Requires-Dist: fastapi>=0.110.1
|
|
29
|
-
Requires-Dist: uvicorn>=0.25.0
|
|
30
|
-
Requires-Dist: pydantic>=2.6.4
|
|
31
|
-
Requires-Dist: typer>=0.9.0
|
|
32
|
-
Requires-Dist: python-multipart>=0.0.9
|
|
33
|
-
Requires-Dist: rich>=10.0.0
|
|
34
|
-
Requires-Dist: requests>=2.31.0
|
|
35
|
-
Requires-Dist: opentelemetry-api>=1.20.0
|
|
36
|
-
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
37
|
-
Requires-Dist: opentelemetry-exporter-otlp>=1.20.0
|
|
38
|
-
Provides-Extra: langchain
|
|
39
|
-
Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
|
|
40
|
-
Provides-Extra: langchain-openai
|
|
41
|
-
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-openai"
|
|
42
|
-
Requires-Dist: langchain-openai>=0.1.0; extra == "langchain-openai"
|
|
43
|
-
Provides-Extra: langchain-anthropic
|
|
44
|
-
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-anthropic"
|
|
45
|
-
Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain-anthropic"
|
|
46
|
-
Provides-Extra: langchain-google
|
|
47
|
-
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-google"
|
|
48
|
-
Requires-Dist: langchain-google-genai>=0.0.10; extra == "langchain-google"
|
|
49
|
-
Provides-Extra: langchain-all
|
|
50
|
-
Requires-Dist: langchain-core>=0.1.0; extra == "langchain-all"
|
|
51
|
-
Requires-Dist: langchain-openai>=0.1.0; extra == "langchain-all"
|
|
52
|
-
Requires-Dist: langchain-anthropic>=0.1.0; extra == "langchain-all"
|
|
53
|
-
Requires-Dist: langchain-google-genai>=0.0.10; extra == "langchain-all"
|
|
54
|
-
Provides-Extra: crewai
|
|
55
|
-
Requires-Dist: crewai>=0.28.0; extra == "crewai"
|
|
56
|
-
Provides-Extra: openai-agents
|
|
57
|
-
Requires-Dist: openai-agents>=0.0.3; extra == "openai-agents"
|
|
58
|
-
Provides-Extra: integrations
|
|
59
|
-
Requires-Dist: langchain-core>=0.1.0; extra == "integrations"
|
|
60
|
-
Requires-Dist: crewai>=0.28.0; extra == "integrations"
|
|
61
|
-
Requires-Dist: openai-agents>=0.0.3; extra == "integrations"
|
|
62
|
-
Provides-Extra: dev
|
|
63
|
-
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
64
|
-
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
65
|
-
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
66
|
-
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
67
|
-
|
|
68
|
-
# Kalibr Python SDK
|
|
69
|
-
|
|
70
|
-
Production-grade observability and execution intelligence for LLM applications. Automatically instrument OpenAI, Anthropic, and Google AI SDKs with zero code changes.
|
|
71
|
-
|
|
72
|
-
[](https://pypi.org/project/kalibr/)
|
|
73
|
-
[](https://pypi.org/project/kalibr/)
|
|
74
|
-
[](LICENSE)
|
|
75
|
-
|
|
76
|
-
## Features
|
|
77
|
-
|
|
78
|
-
- **Zero-code instrumentation** - Automatic tracing for OpenAI, Anthropic, and Google AI SDKs
|
|
79
|
-
- **Outcome-conditioned routing** - Query for optimal models based on historical success rates
|
|
80
|
-
- **TraceCapsule** - Cross-agent context propagation for multi-agent systems
|
|
81
|
-
- **Cost tracking** - Real-time cost calculation for all LLM calls
|
|
82
|
-
- **Token monitoring** - Track input/output tokens across providers
|
|
83
|
-
- **Framework integrations** - LangChain, CrewAI, OpenAI Agents SDK
|
|
84
|
-
|
|
85
|
-
## Installation
|
|
86
|
-
|
|
87
|
-
```bash
|
|
88
|
-
pip install kalibr
|
|
89
|
-
```
|
|
90
|
-
|
|
91
|
-
## Quick Start
|
|
92
|
-
|
|
93
|
-
### Auto-instrumentation (Recommended)
|
|
94
|
-
|
|
95
|
-
Simply import `kalibr` at the start of your application - all LLM calls are automatically traced:
|
|
96
|
-
|
|
97
|
-
```python
|
|
98
|
-
import kalibr # Must be FIRST import
|
|
99
|
-
from openai import OpenAI
|
|
100
|
-
|
|
101
|
-
client = OpenAI()
|
|
102
|
-
response = client.chat.completions.create(
|
|
103
|
-
model="gpt-4o",
|
|
104
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
105
|
-
)
|
|
106
|
-
# That's it. The call is automatically traced.
|
|
107
|
-
```
|
|
108
|
-
|
|
109
|
-
### Manual Tracing with @trace Decorator
|
|
110
|
-
|
|
111
|
-
For more control, use the `@trace` decorator:
|
|
112
|
-
|
|
113
|
-
```python
|
|
114
|
-
from kalibr import trace
|
|
115
|
-
from openai import OpenAI
|
|
116
|
-
|
|
117
|
-
@trace(operation="summarize", provider="openai", model="gpt-4o")
|
|
118
|
-
def summarize_text(text: str) -> str:
|
|
119
|
-
client = OpenAI()
|
|
120
|
-
response = client.chat.completions.create(
|
|
121
|
-
model="gpt-4o",
|
|
122
|
-
messages=[
|
|
123
|
-
{"role": "system", "content": "Summarize the following text."},
|
|
124
|
-
{"role": "user", "content": text}
|
|
125
|
-
]
|
|
126
|
-
)
|
|
127
|
-
return response.choices[0].message.content
|
|
128
|
-
```
|
|
129
|
-
|
|
130
|
-
### Multi-Provider Example
|
|
131
|
-
|
|
132
|
-
```python
|
|
133
|
-
import kalibr
|
|
134
|
-
from openai import OpenAI
|
|
135
|
-
from anthropic import Anthropic
|
|
136
|
-
|
|
137
|
-
# Both are automatically traced
|
|
138
|
-
openai_client = OpenAI()
|
|
139
|
-
anthropic_client = Anthropic()
|
|
140
|
-
|
|
141
|
-
gpt_response = openai_client.chat.completions.create(
|
|
142
|
-
model="gpt-4o",
|
|
143
|
-
messages=[{"role": "user", "content": "Explain quantum computing"}]
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
claude_response = anthropic_client.messages.create(
|
|
147
|
-
model="claude-3-5-sonnet-20241022",
|
|
148
|
-
max_tokens=1024,
|
|
149
|
-
messages=[{"role": "user", "content": "Explain machine learning"}]
|
|
150
|
-
)
|
|
151
|
-
```
|
|
152
|
-
|
|
153
|
-
## Outcome-Conditioned Routing
|
|
154
|
-
|
|
155
|
-
Query Kalibr for optimal model recommendations based on real execution outcomes:
|
|
156
|
-
|
|
157
|
-
```python
|
|
158
|
-
from kalibr import get_policy, report_outcome
|
|
159
|
-
|
|
160
|
-
# Before executing - get the best model for your goal
|
|
161
|
-
policy = get_policy(goal="book_meeting")
|
|
162
|
-
print(f"Use {policy['recommended_model']} - {policy['outcome_success_rate']:.0%} success rate")
|
|
163
|
-
|
|
164
|
-
# Execute with the recommended model
|
|
165
|
-
# ...
|
|
166
|
-
|
|
167
|
-
# After executing - report what happened
|
|
168
|
-
report_outcome(
|
|
169
|
-
trace_id="abc123",
|
|
170
|
-
goal="book_meeting",
|
|
171
|
-
success=True
|
|
172
|
-
)
|
|
173
|
-
```
|
|
174
|
-
|
|
175
|
-
### With Constraints
|
|
176
|
-
|
|
177
|
-
```python
|
|
178
|
-
from kalibr import get_policy
|
|
179
|
-
|
|
180
|
-
policy = get_policy(
|
|
181
|
-
goal="resolve_ticket",
|
|
182
|
-
constraints={
|
|
183
|
-
"max_cost_usd": 0.05,
|
|
184
|
-
"max_latency_ms": 3000,
|
|
185
|
-
"min_quality": 0.8
|
|
186
|
-
}
|
|
187
|
-
)
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
### Intelligent Routing with decide()
|
|
191
|
-
|
|
192
|
-
Register execution paths and let Kalibr decide the best strategy:
|
|
193
|
-
|
|
194
|
-
```python
|
|
195
|
-
from kalibr import register_path, decide
|
|
196
|
-
|
|
197
|
-
# Register available paths
|
|
198
|
-
register_path(goal="book_meeting", model_id="gpt-4o", tool_id="calendar_api")
|
|
199
|
-
register_path(goal="book_meeting", model_id="claude-3-sonnet")
|
|
200
|
-
|
|
201
|
-
# Get intelligent routing decision
|
|
202
|
-
decision = decide(goal="book_meeting")
|
|
203
|
-
model = decision["model_id"] # Selected based on outcomes
|
|
204
|
-
tool = decision.get("tool_id") # If tool routing enabled
|
|
205
|
-
print(decision["exploration"]) # True if exploring new paths
|
|
206
|
-
```
|
|
207
|
-
|
|
208
|
-
### Goal Context
|
|
209
|
-
|
|
210
|
-
Tag traces with goals for outcome tracking:
|
|
211
|
-
|
|
212
|
-
```python
|
|
213
|
-
from kalibr import goal, set_goal, get_goal, clear_goal
|
|
214
|
-
|
|
215
|
-
# Context manager (recommended)
|
|
216
|
-
with goal("book_meeting"):
|
|
217
|
-
response = openai.chat.completions.create(...)
|
|
218
|
-
|
|
219
|
-
# Or manual control
|
|
220
|
-
set_goal("book_meeting")
|
|
221
|
-
response = openai.chat.completions.create(...)
|
|
222
|
-
clear_goal()
|
|
223
|
-
```
|
|
224
|
-
|
|
225
|
-
## TraceCapsule - Cross-Agent Tracing
|
|
226
|
-
|
|
227
|
-
Propagate trace context across agent boundaries:
|
|
228
|
-
|
|
229
|
-
```python
|
|
230
|
-
from kalibr import TraceCapsule, get_or_create_capsule
|
|
231
|
-
|
|
232
|
-
# Agent 1: Create capsule and add hop
|
|
233
|
-
capsule = get_or_create_capsule()
|
|
234
|
-
capsule.append_hop({
|
|
235
|
-
"provider": "openai",
|
|
236
|
-
"operation": "chat_completion",
|
|
237
|
-
"model": "gpt-4o",
|
|
238
|
-
"duration_ms": 150,
|
|
239
|
-
"cost_usd": 0.002,
|
|
240
|
-
"status": "success"
|
|
241
|
-
})
|
|
242
|
-
|
|
243
|
-
# Pass to Agent 2 via HTTP header
|
|
244
|
-
headers = {"X-Kalibr-Capsule": capsule.to_json()}
|
|
245
|
-
|
|
246
|
-
# Agent 2: Receive and continue
|
|
247
|
-
capsule = TraceCapsule.from_json(headers["X-Kalibr-Capsule"])
|
|
248
|
-
capsule.append_hop({
|
|
249
|
-
"provider": "anthropic",
|
|
250
|
-
"operation": "chat_completion",
|
|
251
|
-
"model": "claude-3-5-sonnet-20241022",
|
|
252
|
-
"duration_ms": 200,
|
|
253
|
-
"cost_usd": 0.003,
|
|
254
|
-
"status": "success"
|
|
255
|
-
})
|
|
256
|
-
```
|
|
257
|
-
|
|
258
|
-
## Framework Integrations
|
|
259
|
-
|
|
260
|
-
### LangChain
|
|
261
|
-
|
|
262
|
-
```bash
|
|
263
|
-
pip install kalibr[langchain]
|
|
264
|
-
```
|
|
265
|
-
|
|
266
|
-
```python
|
|
267
|
-
from kalibr_langchain import KalibrCallbackHandler
|
|
268
|
-
from langchain_openai import ChatOpenAI
|
|
269
|
-
|
|
270
|
-
handler = KalibrCallbackHandler()
|
|
271
|
-
llm = ChatOpenAI(model="gpt-4o", callbacks=[handler])
|
|
272
|
-
response = llm.invoke("What is the capital of France?")
|
|
273
|
-
```
|
|
274
|
-
|
|
275
|
-
See [LangChain Integration Guide](kalibr_langchain/README.md) for full documentation.
|
|
276
|
-
|
|
277
|
-
### CrewAI
|
|
278
|
-
|
|
279
|
-
```bash
|
|
280
|
-
pip install kalibr[crewai]
|
|
281
|
-
```
|
|
282
|
-
|
|
283
|
-
```python
|
|
284
|
-
from kalibr_crewai import KalibrCrewAIInstrumentor
|
|
285
|
-
from crewai import Agent, Task, Crew
|
|
286
|
-
|
|
287
|
-
instrumentor = KalibrCrewAIInstrumentor()
|
|
288
|
-
instrumentor.instrument()
|
|
289
|
-
|
|
290
|
-
# Use CrewAI normally - all operations are traced
|
|
291
|
-
```
|
|
292
|
-
|
|
293
|
-
See [CrewAI Integration Guide](kalibr_crewai/README.md) for full documentation.
|
|
294
|
-
|
|
295
|
-
### OpenAI Agents SDK
|
|
296
|
-
|
|
297
|
-
```bash
|
|
298
|
-
pip install kalibr[openai-agents]
|
|
299
|
-
```
|
|
300
|
-
|
|
301
|
-
```python
|
|
302
|
-
from kalibr_openai_agents import setup_kalibr_tracing
|
|
303
|
-
from agents import Agent, Runner
|
|
304
|
-
|
|
305
|
-
setup_kalibr_tracing()
|
|
306
|
-
|
|
307
|
-
agent = Agent(name="Assistant", instructions="You are helpful.")
|
|
308
|
-
result = Runner.run_sync(agent, "Hello!")
|
|
309
|
-
```
|
|
310
|
-
|
|
311
|
-
See [OpenAI Agents Integration Guide](kalibr_openai_agents/README.md) for full documentation.
|
|
312
|
-
|
|
313
|
-
## Configuration
|
|
314
|
-
|
|
315
|
-
Configure via environment variables:
|
|
316
|
-
|
|
317
|
-
| Variable | Description | Default |
|
|
318
|
-
|----------|-------------|---------|
|
|
319
|
-
| `KALIBR_API_KEY` | API key for authentication | *Required* |
|
|
320
|
-
| `KALIBR_TENANT_ID` | Tenant identifier | `default` |
|
|
321
|
-
| `KALIBR_COLLECTOR_URL` | Collector endpoint URL | `https://api.kalibr.systems/api/ingest` |
|
|
322
|
-
| `KALIBR_INTELLIGENCE_URL` | Intelligence API URL | `https://dashboard.kalibr.systems/intelligence` |
|
|
323
|
-
| `KALIBR_SERVICE_NAME` | Service name for spans | `kalibr-app` |
|
|
324
|
-
| `KALIBR_ENVIRONMENT` | Environment (prod/staging/dev) | `prod` |
|
|
325
|
-
| `KALIBR_WORKFLOW_ID` | Workflow identifier | `default` |
|
|
326
|
-
| `KALIBR_AUTO_INSTRUMENT` | Enable auto-instrumentation | `true` |
|
|
327
|
-
|
|
328
|
-
## CLI Commands
|
|
329
|
-
|
|
330
|
-
```bash
|
|
331
|
-
# Show version
|
|
332
|
-
kalibr version
|
|
333
|
-
|
|
334
|
-
# Validate configuration
|
|
335
|
-
kalibr validate
|
|
336
|
-
|
|
337
|
-
# Check connection status
|
|
338
|
-
kalibr status
|
|
339
|
-
|
|
340
|
-
# Package for deployment
|
|
341
|
-
kalibr package
|
|
342
|
-
|
|
343
|
-
# Update schemas
|
|
344
|
-
kalibr update_schemas
|
|
345
|
-
```
|
|
346
|
-
|
|
347
|
-
## Supported Providers
|
|
348
|
-
|
|
349
|
-
| Provider | Models | Auto-Instrumentation |
|
|
350
|
-
|----------|--------|---------------------|
|
|
351
|
-
| OpenAI | GPT-4, GPT-4o, GPT-3.5 | Yes |
|
|
352
|
-
| Anthropic | Claude 3.5 Sonnet, Claude 3 Opus/Sonnet/Haiku | Yes |
|
|
353
|
-
| Google | Gemini Pro, Gemini Flash | Yes |
|
|
354
|
-
|
|
355
|
-
## Development
|
|
356
|
-
|
|
357
|
-
```bash
|
|
358
|
-
git clone https://github.com/kalibr-ai/kalibr-sdk-python.git
|
|
359
|
-
cd kalibr-sdk-python
|
|
360
|
-
|
|
361
|
-
pip install -e ".[dev]"
|
|
362
|
-
|
|
363
|
-
# Run tests
|
|
364
|
-
pytest
|
|
365
|
-
|
|
366
|
-
# Format code
|
|
367
|
-
black kalibr/
|
|
368
|
-
ruff check kalibr/
|
|
369
|
-
```
|
|
370
|
-
|
|
371
|
-
## Contributing
|
|
372
|
-
|
|
373
|
-
We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md).
|
|
374
|
-
|
|
375
|
-
## License
|
|
376
|
-
|
|
377
|
-
Apache 2.0 - see [LICENSE](LICENSE).
|
|
378
|
-
|
|
379
|
-
## Links
|
|
380
|
-
|
|
381
|
-
- [Documentation](https://kalibr.systems/docs)
|
|
382
|
-
- [Dashboard](https://dashboard.kalibr.systems)
|
|
383
|
-
- [GitHub](https://github.com/kalibr-ai/kalibr-sdk-python)
|
|
384
|
-
- [PyPI](https://pypi.org/project/kalibr/)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|