kalibr 1.1.2a0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kalibr/__init__.py CHANGED
@@ -1,36 +1,24 @@
1
- """Kalibr SDK v1.1.0 - Unified LLM Observability & Multi-Model AI Integration Framework
2
-
3
- This SDK combines:
4
- 1. Full LLM Observability with tracing, cost tracking, and analytics
5
- 2. Multi-Model AI Integration (GPT, Claude, Gemini, Copilot)
6
- 3. One-line deployment with Docker and runtime router
7
- 4. Schema generation for all major AI platforms
8
- 5. **NEW in 1.1.0**: Auto-instrumentation of LLM SDKs (OpenAI, Anthropic, Google)
1
+ """Kalibr SDK v1.2.0 - LLM Observability & Tracing Framework
9
2
 
10
3
  Features:
11
4
  - **Auto-Instrumentation**: Zero-config tracing of OpenAI, Anthropic, Google SDK calls
12
5
  - **OpenTelemetry**: OTel-compatible spans with OTLP export
13
6
  - **Tracing**: Complete telemetry with @trace decorator
14
7
  - **Cost Tracking**: Multi-vendor cost calculation (OpenAI, Anthropic, etc.)
15
- - **Deployment**: One-command deployment to Fly.io, Render, or local
16
- - **Schema Generation**: Auto-generate schemas for GPT Actions, Claude MCP, Gemini, Copilot
17
8
  - **Error Handling**: Automatic error capture with stack traces
18
9
  - **Analytics**: ClickHouse-backed analytics and alerting
19
10
 
20
- Usage - Auto-Instrumentation (NEW):
21
- from kalibr import Kalibr
11
+ Usage - Auto-Instrumentation:
12
+ from kalibr import auto_instrument
22
13
  import openai # Automatically instrumented!
23
14
 
24
- app = Kalibr(title="My API")
15
+ auto_instrument(["openai", "anthropic", "google"])
25
16
 
26
- @app.action("chat", "Chat with GPT")
27
- def chat(message: str):
28
- # This OpenAI call is automatically traced!
29
- response = openai.chat.completions.create(
30
- model="gpt-4",
31
- messages=[{"role": "user", "content": message}]
32
- )
33
- return response.choices[0].message.content
17
+ # All LLM calls are now traced automatically
18
+ response = openai.chat.completions.create(
19
+ model="gpt-4",
20
+ messages=[{"role": "user", "content": "Hello!"}]
21
+ )
34
22
 
35
23
  Usage - Manual Tracing:
36
24
  from kalibr import trace
@@ -44,13 +32,11 @@ Usage - Manual Tracing:
44
32
  return response
45
33
 
46
34
  CLI Usage:
47
- kalibr serve my_app.py # Run locally
48
- kalibr deploy my_app.py --runtime fly # Deploy to Fly.io
49
35
  kalibr run my_app.py # Run with auto-tracing
50
36
  kalibr version # Show version
51
37
  """
52
38
 
53
- __version__ = "1.1.0-alpha"
39
+ __version__ = "1.2.0"
54
40
 
55
41
  # Auto-instrument LLM SDKs on import (can be disabled via env var)
56
42
  import os
@@ -79,25 +65,22 @@ from .cost_adapter import (
79
65
  )
80
66
  from .instrumentation import auto_instrument, get_instrumented_providers
81
67
 
82
- # ============================================================================
83
- # SDK & DEPLOYMENT (from 1.0.30)
84
- # ============================================================================
85
- from .kalibr import Kalibr
86
- from .kalibr_app import KalibrApp
87
68
  from .models import EventData, TraceConfig
88
- from .schemas import (
89
- generate_copilot_schema,
90
- generate_gemini_schema,
91
- generate_mcp_schema,
92
- get_base_url,
93
- get_supported_models,
94
- )
95
69
  from .simple_tracer import trace
96
70
  from .trace_capsule import TraceCapsule, get_or_create_capsule
97
71
  from .tracer import SpanContext, Tracer
98
- from .types import FileUpload, Session
99
72
  from .utils import load_config_from_env
100
73
 
74
+ # ============================================================================
75
+ # INTELLIGENCE & OUTCOME ROUTING (v1.2.0)
76
+ # ============================================================================
77
+ from .intelligence import (
78
+ KalibrIntelligence,
79
+ get_policy,
80
+ report_outcome,
81
+ get_recommendation,
82
+ )
83
+
101
84
  if os.getenv("KALIBR_AUTO_INSTRUMENT", "true").lower() == "true":
102
85
  # Setup OpenTelemetry collector
103
86
  try:
@@ -145,21 +128,6 @@ __all__ = [
145
128
  # Utils
146
129
  "load_config_from_env",
147
130
  # ========================================================================
148
- # SDK & DEPLOYMENT
149
- # ========================================================================
150
- # SDK Classes
151
- "Kalibr",
152
- "KalibrApp",
153
- # Types
154
- "FileUpload",
155
- "Session",
156
- # Schema Generation
157
- "get_base_url",
158
- "generate_mcp_schema",
159
- "generate_gemini_schema",
160
- "generate_copilot_schema",
161
- "get_supported_models",
162
- # ========================================================================
163
131
  # PHASE 1: SDK INSTRUMENTATION & OPENTELEMETRY (v1.1.0)
164
132
  # ========================================================================
165
133
  # Auto-instrumentation
@@ -169,4 +137,11 @@ __all__ = [
169
137
  "setup_collector",
170
138
  "get_tracer_provider",
171
139
  "is_collector_configured",
140
+ # ========================================================================
141
+ # INTELLIGENCE & OUTCOME ROUTING (v1.2.0)
142
+ # ========================================================================
143
+ "KalibrIntelligence",
144
+ "get_policy",
145
+ "report_outcome",
146
+ "get_recommendation",
172
147
  ]
kalibr/cli/capsule_cmd.py CHANGED
@@ -23,7 +23,7 @@ def capsule(
23
23
  None,
24
24
  "--api-url",
25
25
  "-u",
26
- help="Kalibr API base URL (default: from env KALIBR_API_URL or http://localhost:8001)",
26
+ help="Kalibr API base URL (default: from env KALIBR_API_URL or https://api.kalibr.systems)",
27
27
  envvar="KALIBR_API_URL",
28
28
  ),
29
29
  output: Optional[Path] = typer.Option(
@@ -63,10 +63,10 @@ def capsule(
63
63
  kalibr capsule abc-123-def --export --output capsule.json
64
64
 
65
65
  # Specify custom API URL
66
- kalibr capsule abc-123-def -u https://api.kalibr.io
66
+ kalibr capsule abc-123-def -u https://api.kalibr.systems
67
67
  """
68
68
  # Determine API base URL
69
- base_url = api_url or "http://localhost:8001"
69
+ base_url = api_url or "https://api.kalibr.systems"
70
70
  base_url = base_url.rstrip("/")
71
71
 
72
72
  # Build endpoint URL
kalibr/cli/main.py CHANGED
@@ -30,9 +30,9 @@ def version():
30
30
  from kalibr import __version__
31
31
 
32
32
  console.print(f"[bold]Kalibr SDK version:[/bold] {__version__}")
33
- console.print("Enhanced multi-model AI integration framework")
34
- console.print("Supports: GPT Actions, Claude MCP, Gemini Extensions, Copilot Plugins")
35
- console.print("GitHub: https://github.com/devonakelley/kalibr-sdk")
33
+ console.print("LLM Observability & Execution Intelligence")
34
+ console.print("Auto-instrumentation for OpenAI, Anthropic, Google AI")
35
+ console.print("GitHub: https://github.com/kalibr-ai/kalibr-sdk-python")
36
36
 
37
37
 
38
38
  @app.command()
kalibr/cli/run.py CHANGED
@@ -47,7 +47,7 @@ def run(
47
47
  kalibr run weather.py --runtime fly.io
48
48
 
49
49
  # Custom backend
50
- kalibr run weather.py --backend-url https://api.kalibr.io
50
+ kalibr run weather.py --backend-url https://api.kalibr.systems
51
51
  """
52
52
  # Validate file exists
53
53
  agent_path = Path(file_path).resolve()
@@ -56,7 +56,7 @@ def run(
56
56
  raise typer.Exit(1)
57
57
 
58
58
  # Configure backend
59
- backend = backend_url or os.getenv("KALIBR_BACKEND_URL", "http://localhost:8001")
59
+ backend = backend_url or os.getenv("KALIBR_BACKEND_URL", "https://api.kalibr.systems")
60
60
  api_key = os.getenv("KALIBR_API_KEY")
61
61
  if not api_key:
62
62
  console.print("[yellow]⚠️ KALIBR_API_KEY not set. Set it for trace authentication.[/yellow]")
kalibr/client.py CHANGED
@@ -70,7 +70,7 @@ class KalibrClient:
70
70
 
71
71
  self.api_key = api_key or env_config.get("auth_token", "")
72
72
  self.endpoint = endpoint or env_config.get(
73
- "api_endpoint", "http://localhost:8001/api/v1/traces"
73
+ "api_endpoint", "https://api.kalibr.systems/api/v1/traces"
74
74
  )
75
75
  self.tenant_id = tenant_id or env_config.get("tenant_id", "default")
76
76
  self.environment = environment or env_config.get("environment", "prod")
kalibr/intelligence.py ADDED
@@ -0,0 +1,317 @@
1
+ """Kalibr Intelligence Client - Query execution intelligence and report outcomes.
2
+
3
+ This module enables the outcome-conditioned routing loop:
4
+ 1. Before executing: query get_policy() to get the best path for your goal
5
+ 2. After executing: call report_outcome() to teach Kalibr what worked
6
+
7
+ Example:
8
+ from kalibr import get_policy, report_outcome
9
+
10
+ # Before executing - get best path
11
+ policy = get_policy(goal="book_meeting")
12
+ model = policy["recommended_model"] # Use this model
13
+
14
+ # After executing - report what happened
15
+ report_outcome(
16
+ trace_id=trace_id,
17
+ goal="book_meeting",
18
+ success=True
19
+ )
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import os
25
+ from typing import Any, Optional
26
+
27
+ import httpx
28
+
29
+ # Default intelligence API endpoint
30
+ DEFAULT_INTELLIGENCE_URL = "https://kalibr-intelligence.fly.dev"
31
+
32
+
33
+ class KalibrIntelligence:
34
+ """Client for Kalibr Intelligence API.
35
+
36
+ Provides methods to query execution policies and report outcomes
37
+ for the outcome-conditioned routing loop.
38
+
39
+ Args:
40
+ api_key: Kalibr API key (or set KALIBR_API_KEY env var)
41
+ tenant_id: Tenant identifier (or set KALIBR_TENANT_ID env var)
42
+ base_url: Intelligence API base URL (or set KALIBR_INTELLIGENCE_URL env var)
43
+ timeout: Request timeout in seconds
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ api_key: str | None = None,
49
+ tenant_id: str | None = None,
50
+ base_url: str | None = None,
51
+ timeout: float = 10.0,
52
+ ):
53
+ self.api_key = api_key or os.getenv("KALIBR_API_KEY", "")
54
+ self.tenant_id = tenant_id or os.getenv("KALIBR_TENANT_ID", "")
55
+ self.base_url = (
56
+ base_url
57
+ or os.getenv("KALIBR_INTELLIGENCE_URL", DEFAULT_INTELLIGENCE_URL)
58
+ ).rstrip("/")
59
+ self.timeout = timeout
60
+ self._client = httpx.Client(timeout=timeout)
61
+
62
+ def _request(
63
+ self,
64
+ method: str,
65
+ path: str,
66
+ json: dict | None = None,
67
+ ) -> httpx.Response:
68
+ """Make authenticated request to intelligence API."""
69
+ headers = {
70
+ "X-API-Key": self.api_key,
71
+ "X-Tenant-ID": self.tenant_id,
72
+ "Content-Type": "application/json",
73
+ }
74
+
75
+ url = f"{self.base_url}{path}"
76
+ response = self._client.request(method, url, json=json, headers=headers)
77
+ response.raise_for_status()
78
+ return response
79
+
80
+ def get_policy(
81
+ self,
82
+ goal: str,
83
+ task_type: str | None = None,
84
+ constraints: dict | None = None,
85
+ window_hours: int = 168,
86
+ ) -> dict[str, Any]:
87
+ """Get execution policy for a goal.
88
+
89
+ Returns the historically best-performing path for achieving
90
+ the specified goal, based on outcome data.
91
+
92
+ Args:
93
+ goal: The goal to optimize for (e.g., "book_meeting", "resolve_ticket")
94
+ task_type: Optional task type filter (e.g., "code", "summarize")
95
+ constraints: Optional constraints dict with keys:
96
+ - max_cost_usd: Maximum cost per request
97
+ - max_latency_ms: Maximum latency
98
+ - min_quality: Minimum quality score (0-1)
99
+ - min_confidence: Minimum statistical confidence (0-1)
100
+ - max_risk: Maximum risk score (0-1)
101
+ window_hours: Time window for pattern analysis (default 1 week)
102
+
103
+ Returns:
104
+ dict with:
105
+ - goal: The goal queried
106
+ - recommended_model: Best model for this goal
107
+ - recommended_provider: Provider for the recommended model
108
+ - outcome_success_rate: Historical success rate (0-1)
109
+ - outcome_sample_count: Number of outcomes in the data
110
+ - confidence: Statistical confidence in recommendation
111
+ - risk_score: Risk score (lower is better)
112
+ - reasoning: Human-readable explanation
113
+ - alternatives: List of alternative models
114
+
115
+ Raises:
116
+ httpx.HTTPStatusError: If the API returns an error
117
+
118
+ Example:
119
+ policy = intelligence.get_policy(goal="book_meeting")
120
+ print(f"Use {policy['recommended_model']} - {policy['outcome_success_rate']:.0%} success rate")
121
+ """
122
+ response = self._request(
123
+ "POST",
124
+ "/api/v1/intelligence/policy",
125
+ json={
126
+ "goal": goal,
127
+ "task_type": task_type,
128
+ "constraints": constraints,
129
+ "window_hours": window_hours,
130
+ },
131
+ )
132
+ return response.json()
133
+
134
+ def report_outcome(
135
+ self,
136
+ trace_id: str,
137
+ goal: str,
138
+ success: bool,
139
+ score: float | None = None,
140
+ failure_reason: str | None = None,
141
+ metadata: dict | None = None,
142
+ ) -> dict[str, Any]:
143
+ """Report execution outcome for a goal.
144
+
145
+ This is the feedback loop that teaches Kalibr what works.
146
+ Call this after your agent completes (or fails) a task.
147
+
148
+ Args:
149
+ trace_id: The trace ID from the execution
150
+ goal: The goal this execution was trying to achieve
151
+ success: Whether the goal was achieved
152
+ score: Optional quality score (0-1) for more granular feedback
153
+ failure_reason: Optional reason for failure (helps with debugging)
154
+ metadata: Optional additional context as a dict
155
+
156
+ Returns:
157
+ dict with:
158
+ - status: "accepted" if successful
159
+ - trace_id: The trace ID recorded
160
+ - goal: The goal recorded
161
+
162
+ Raises:
163
+ httpx.HTTPStatusError: If the API returns an error
164
+
165
+ Example:
166
+ # Success case
167
+ report_outcome(trace_id="abc123", goal="book_meeting", success=True)
168
+
169
+ # Failure case with reason
170
+ report_outcome(
171
+ trace_id="abc123",
172
+ goal="book_meeting",
173
+ success=False,
174
+ failure_reason="calendar_conflict"
175
+ )
176
+ """
177
+ response = self._request(
178
+ "POST",
179
+ "/api/v1/intelligence/report-outcome",
180
+ json={
181
+ "trace_id": trace_id,
182
+ "goal": goal,
183
+ "success": success,
184
+ "score": score,
185
+ "failure_reason": failure_reason,
186
+ "metadata": metadata,
187
+ },
188
+ )
189
+ return response.json()
190
+
191
+ def get_recommendation(
192
+ self,
193
+ task_type: str,
194
+ goal: str | None = None,
195
+ optimize_for: str = "balanced",
196
+ constraints: dict | None = None,
197
+ window_hours: int = 168,
198
+ ) -> dict[str, Any]:
199
+ """Get model recommendation for a task type.
200
+
201
+ This is the original recommendation endpoint. For goal-based
202
+ optimization, prefer get_policy() instead.
203
+
204
+ Args:
205
+ task_type: Type of task (e.g., "summarize", "code", "qa")
206
+ goal: Optional goal for outcome-based optimization
207
+ optimize_for: Optimization target - one of:
208
+ - "cost": Minimize cost
209
+ - "quality": Maximize output quality
210
+ - "latency": Minimize response time
211
+ - "balanced": Balance all factors (default)
212
+ - "cost_efficiency": Maximize quality-per-dollar
213
+ - "outcome": Optimize for goal success rate
214
+ constraints: Optional constraints dict
215
+ window_hours: Time window for pattern analysis
216
+
217
+ Returns:
218
+ dict with recommendation, alternatives, stats, reasoning
219
+ """
220
+ response = self._request(
221
+ "POST",
222
+ "/api/v1/intelligence/recommend",
223
+ json={
224
+ "task_type": task_type,
225
+ "goal": goal,
226
+ "optimize_for": optimize_for,
227
+ "constraints": constraints,
228
+ "window_hours": window_hours,
229
+ },
230
+ )
231
+ return response.json()
232
+
233
+ def close(self):
234
+ """Close the HTTP client."""
235
+ self._client.close()
236
+
237
+ def __enter__(self):
238
+ return self
239
+
240
+ def __exit__(self, *args):
241
+ self.close()
242
+
243
+
244
+ # Module-level singleton for convenience functions
245
+ _intelligence_client: KalibrIntelligence | None = None
246
+
247
+
248
+ def _get_intelligence_client() -> KalibrIntelligence:
249
+ """Get or create the singleton intelligence client."""
250
+ global _intelligence_client
251
+ if _intelligence_client is None:
252
+ _intelligence_client = KalibrIntelligence()
253
+ return _intelligence_client
254
+
255
+
256
+ def get_policy(goal: str, tenant_id: str | None = None, **kwargs) -> dict[str, Any]:
257
+ """Get execution policy for a goal.
258
+
259
+ Convenience function that uses the default intelligence client.
260
+ See KalibrIntelligence.get_policy for full documentation.
261
+
262
+ Args:
263
+ goal: The goal to optimize for
264
+ tenant_id: Optional tenant ID override (default: uses KALIBR_TENANT_ID env var)
265
+ **kwargs: Additional arguments (task_type, constraints, window_hours)
266
+
267
+ Returns:
268
+ Policy dict with recommended_model, outcome_success_rate, etc.
269
+
270
+ Example:
271
+ from kalibr import get_policy
272
+
273
+ policy = get_policy(goal="book_meeting")
274
+ model = policy["recommended_model"]
275
+ """
276
+ client = _get_intelligence_client()
277
+ if tenant_id:
278
+ # Create a new client with the specified tenant_id
279
+ client = KalibrIntelligence(tenant_id=tenant_id)
280
+ return client.get_policy(goal, **kwargs)
281
+
282
+
283
+ def report_outcome(trace_id: str, goal: str, success: bool, tenant_id: str | None = None, **kwargs) -> dict[str, Any]:
284
+ """Report execution outcome for a goal.
285
+
286
+ Convenience function that uses the default intelligence client.
287
+ See KalibrIntelligence.report_outcome for full documentation.
288
+
289
+ Args:
290
+ trace_id: The trace ID from the execution
291
+ goal: The goal this execution was trying to achieve
292
+ success: Whether the goal was achieved
293
+ tenant_id: Optional tenant ID override (default: uses KALIBR_TENANT_ID env var)
294
+ **kwargs: Additional arguments (score, failure_reason, metadata)
295
+
296
+ Returns:
297
+ Response dict with status confirmation
298
+
299
+ Example:
300
+ from kalibr import report_outcome
301
+
302
+ report_outcome(trace_id="abc123", goal="book_meeting", success=True)
303
+ """
304
+ client = _get_intelligence_client()
305
+ if tenant_id:
306
+ # Create a new client with the specified tenant_id
307
+ client = KalibrIntelligence(tenant_id=tenant_id)
308
+ return client.report_outcome(trace_id, goal, success, **kwargs)
309
+
310
+
311
+ def get_recommendation(task_type: str, **kwargs) -> dict[str, Any]:
312
+ """Get model recommendation for a task type.
313
+
314
+ Convenience function that uses the default intelligence client.
315
+ See KalibrIntelligence.get_recommendation for full documentation.
316
+ """
317
+ return _get_intelligence_client().get_recommendation(task_type, **kwargs)
@@ -54,7 +54,7 @@ class AutoTracerMiddleware(BaseHTTPMiddleware):
54
54
 
55
55
  # Collector config
56
56
  self.collector_url = collector_url or os.getenv(
57
- "KALIBR_COLLECTOR_URL", "http://localhost:8001/api/ingest"
57
+ "KALIBR_COLLECTOR_URL", "https://api.kalibr.systems/api/ingest"
58
58
  )
59
59
  self.api_key = api_key or os.getenv("KALIBR_API_KEY", "")
60
60
  self.tenant_id = tenant_id or os.getenv("KALIBR_TENANT_ID", "default")
kalibr/simple_tracer.py CHANGED
@@ -53,7 +53,7 @@ def send_event(payload: dict):
53
53
  print("[Kalibr SDK] ❌ requests library not available")
54
54
  return
55
55
 
56
- url = os.getenv("KALIBR_COLLECTOR_URL", "http://localhost:8001/api/ingest")
56
+ url = os.getenv("KALIBR_COLLECTOR_URL", "https://api.kalibr.systems/api/ingest")
57
57
  api_key = os.getenv("KALIBR_API_KEY")
58
58
  if not api_key:
59
59
  print("[Kalibr SDK] ⚠️ KALIBR_API_KEY not set, traces will not be sent")
@@ -123,7 +123,7 @@ def trace(
123
123
  parent_span_id = kwargs.pop("parent_span_id", None) # None or base62 string
124
124
 
125
125
  # Load environment config
126
- tenant_id = os.getenv("KALIBR_TENANT_ID", "emergent")
126
+ tenant_id = os.getenv("KALIBR_TENANT_ID", "default")
127
127
  workflow_id = os.getenv("KALIBR_WORKFLOW_ID", "multi_agent_demo")
128
128
  sandbox_id = os.getenv("SANDBOX_ID", "vercel_vm_001")
129
129
  runtime_env = os.getenv("RUNTIME_ENV", "vercel_vm")
kalibr/utils.py CHANGED
@@ -38,8 +38,8 @@ def load_config_from_env() -> Dict[str, str]:
38
38
  "workflow_id": os.getenv("KALIBR_WORKFLOW_ID", "default-workflow"),
39
39
  "sandbox_id": os.getenv("SANDBOX_ID", "local"),
40
40
  "runtime_env": os.getenv("RUNTIME_ENV", "local"),
41
- "api_endpoint": os.getenv("KALIBR_API_ENDPOINT", "http://localhost:8001/api/v1/traces"),
42
- "collector_url": os.getenv("KALIBR_COLLECTOR_URL", "http://localhost:8080/api/ingest"),
41
+ "api_endpoint": os.getenv("KALIBR_API_ENDPOINT", "https://api.kalibr.systems/api/v1/traces"),
42
+ "collector_url": os.getenv("KALIBR_COLLECTOR_URL", "https://api.kalibr.systems/api/ingest"),
43
43
  }
44
44
  return config
45
45