kalibr 1.0.28__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. kalibr/__init__.py +170 -3
  2. kalibr/__main__.py +3 -203
  3. kalibr/capsule_middleware.py +108 -0
  4. kalibr/cli/__init__.py +5 -0
  5. kalibr/cli/capsule_cmd.py +174 -0
  6. kalibr/cli/deploy_cmd.py +114 -0
  7. kalibr/cli/main.py +67 -0
  8. kalibr/cli/run.py +200 -0
  9. kalibr/cli/serve.py +59 -0
  10. kalibr/client.py +293 -0
  11. kalibr/collector.py +173 -0
  12. kalibr/context.py +132 -0
  13. kalibr/cost_adapter.py +222 -0
  14. kalibr/decorators.py +140 -0
  15. kalibr/instrumentation/__init__.py +13 -0
  16. kalibr/instrumentation/anthropic_instr.py +282 -0
  17. kalibr/instrumentation/base.py +108 -0
  18. kalibr/instrumentation/google_instr.py +281 -0
  19. kalibr/instrumentation/openai_instr.py +265 -0
  20. kalibr/instrumentation/registry.py +153 -0
  21. kalibr/kalibr.py +144 -230
  22. kalibr/kalibr_app.py +53 -314
  23. kalibr/middleware/__init__.py +5 -0
  24. kalibr/middleware/auto_tracer.py +356 -0
  25. kalibr/models.py +41 -0
  26. kalibr/redaction.py +44 -0
  27. kalibr/schemas.py +116 -0
  28. kalibr/simple_tracer.py +255 -0
  29. kalibr/tokens.py +52 -0
  30. kalibr/trace_capsule.py +296 -0
  31. kalibr/trace_models.py +201 -0
  32. kalibr/tracer.py +354 -0
  33. kalibr/types.py +25 -93
  34. kalibr/utils.py +198 -0
  35. kalibr-1.1.0.dist-info/METADATA +97 -0
  36. kalibr-1.1.0.dist-info/RECORD +40 -0
  37. kalibr-1.1.0.dist-info/entry_points.txt +2 -0
  38. kalibr-1.1.0.dist-info/licenses/LICENSE +21 -0
  39. kalibr/deployment.py +0 -41
  40. kalibr/packager.py +0 -43
  41. kalibr/runtime_router.py +0 -138
  42. kalibr/schema_generators.py +0 -159
  43. kalibr/validator.py +0 -70
  44. kalibr-1.0.28.data/data/examples/README.md +0 -173
  45. kalibr-1.0.28.data/data/examples/basic_kalibr_example.py +0 -66
  46. kalibr-1.0.28.data/data/examples/enhanced_kalibr_example.py +0 -347
  47. kalibr-1.0.28.dist-info/METADATA +0 -175
  48. kalibr-1.0.28.dist-info/RECORD +0 -19
  49. kalibr-1.0.28.dist-info/entry_points.txt +0 -2
  50. kalibr-1.0.28.dist-info/licenses/LICENSE +0 -11
  51. {kalibr-1.0.28.dist-info → kalibr-1.1.0.dist-info}/WHEEL +0 -0
  52. {kalibr-1.0.28.dist-info → kalibr-1.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,255 @@
1
+ """Simplified Kalibr tracer with direct event emission and capsule support.
2
+
3
+ Usage:
4
+ from kalibr.simple_tracer import trace
5
+
6
+ @trace(operation="summarize", provider="openai", model="gpt-4o")
7
+ def my_function(text):
8
+ return call_llm(text)
9
+
10
+ Capsule Usage (automatic when middleware is active):
11
+ from fastapi import FastAPI, Request
12
+ from kalibr.capsule_middleware import add_capsule_middleware
13
+ from kalibr import trace
14
+
15
+ app = FastAPI()
16
+ add_capsule_middleware(app)
17
+
18
+ @trace(operation="chat", provider="openai", model="gpt-4o")
19
+ def process_request(request: Request, prompt: str):
20
+ # Capsule automatically updated with this hop
21
+ return llm_call(prompt)
22
+ """
23
+
24
+ import json
25
+ import os
26
+ import random
27
+ import string
28
+ import time
29
+ import uuid
30
+ from datetime import datetime, timezone
31
+ from functools import wraps
32
+ from typing import Callable, Optional
33
+
34
+ try:
35
+ import requests
36
+ except ImportError:
37
+ print("[Kalibr SDK] ⚠️ requests not installed, install with: pip install requests")
38
+ requests = None
39
+
40
+
41
+ def generate_span_id() -> str:
42
+ """Generate UUIDv4 span ID for consistency."""
43
+ return str(uuid.uuid4())
44
+
45
+
46
+ def send_event(payload: dict):
47
+ """Send event directly to collector.
48
+
49
+ Args:
50
+ payload: Event data dict
51
+ """
52
+ if not requests:
53
+ print("[Kalibr SDK] ❌ requests library not available")
54
+ return
55
+
56
+ url = os.getenv("KALIBR_COLLECTOR_URL", "http://localhost:8001/api/ingest")
57
+ api_key = os.getenv("KALIBR_API_KEY", "test_key_12345")
58
+
59
+ format_pref = os.getenv("KALIBR_COLLECTOR_FORMAT", "ndjson").lower()
60
+ use_json_envelope = format_pref == "json"
61
+
62
+ headers = {"X-API-Key": api_key}
63
+ if use_json_envelope:
64
+ headers["Content-Type"] = "application/json"
65
+ body_cfg = {"events": [payload]}
66
+ else:
67
+ headers["Content-Type"] = "application/x-ndjson"
68
+ body_cfg = "\n".join(json.dumps(evt) for evt in [payload]) + "\n"
69
+
70
+ try:
71
+ if use_json_envelope:
72
+ response = requests.post(url, headers=headers, json=body_cfg, timeout=5)
73
+ else:
74
+ response = requests.post(url, headers=headers, data=body_cfg, timeout=5)
75
+ if not response.ok:
76
+ print(
77
+ f"[Kalibr SDK] ❌ Collector rejected event: {response.status_code} - {response.text}"
78
+ )
79
+ else:
80
+ duration_ms = payload.get("duration_ms") or payload.get("latency_ms") or 0
81
+ total_cost = payload.get("total_cost_usd") or payload.get("cost_usd") or 0.0
82
+ print(
83
+ f"[Kalibr SDK] ✅ Event sent: {payload.get('operation','event')} ({duration_ms}ms, ${total_cost:.6f})"
84
+ )
85
+ except Exception as e:
86
+ print(f"[Kalibr SDK] ❌ Failed to send event: {e}")
87
+
88
+
89
+ def trace(
90
+ operation: str, provider: str, model: str, input_tokens: int = None, output_tokens: int = None
91
+ ):
92
+ """Decorator to trace function calls with full telemetry.
93
+
94
+ Captures:
95
+ - Duration (ms)
96
+ - Tokens (estimated if not provided)
97
+ - Cost (USD)
98
+ - Errors
99
+ - Runtime metadata
100
+
101
+ Args:
102
+ operation: Operation type (e.g., "summarize", "refine", "analyze")
103
+ provider: LLM provider (e.g., "openai", "anthropic", "google")
104
+ model: Model name (e.g., "gpt-4o", "claude-3-sonnet")
105
+ input_tokens: Input token count (optional, will estimate)
106
+ output_tokens: Output token count (optional, will estimate)
107
+
108
+ Example:
109
+ @trace(operation="summarize", provider="openai", model="gpt-4o")
110
+ def summarize_text(text: str) -> str:
111
+ return openai.chat.completions.create(...)
112
+ """
113
+
114
+ def decorator(func: Callable) -> Callable:
115
+ @wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ # Generate IDs
118
+ trace_id = str(uuid.uuid4())
119
+ span_id = generate_span_id() # Base62, 16 chars
120
+ parent_span_id = kwargs.pop("parent_span_id", None) # None or base62 string
121
+
122
+ # Load environment config
123
+ tenant_id = os.getenv("KALIBR_TENANT_ID", "emergent")
124
+ workflow_id = os.getenv("KALIBR_WORKFLOW_ID", "multi_agent_demo")
125
+ sandbox_id = os.getenv("SANDBOX_ID", "vercel_vm_001")
126
+ runtime_env = os.getenv("RUNTIME_ENV", "vercel_vm")
127
+
128
+ # Start timing
129
+ start_time = time.time()
130
+
131
+ # Execute function
132
+ result = None
133
+ status = "success"
134
+ error_type = None
135
+ error_message = None
136
+ exception_to_raise = None
137
+
138
+ try:
139
+ result = func(*args, **kwargs)
140
+ except Exception as e:
141
+ status = "error"
142
+ error_type = type(e).__name__
143
+ error_message = str(e)
144
+ exception_to_raise = e
145
+ print(f"[Kalibr SDK] ⚠️ Error in {func.__name__}: {error_type} - {error_message}")
146
+
147
+ # End timing
148
+ end_time = time.time()
149
+ duration_ms = int((end_time - start_time) * 1000)
150
+
151
+ # Token estimation
152
+ actual_input_tokens = input_tokens or kwargs.get("input_tokens", 1000)
153
+ actual_output_tokens = output_tokens or kwargs.get("output_tokens", 500)
154
+
155
+ # Cost calculation (simplified pricing)
156
+ # OpenAI GPT-4o: ~$2.50/1M input, ~$10/1M output
157
+ # Anthropic Claude-3-Sonnet: ~$3/1M input, ~$15/1M output
158
+ pricing_map = {
159
+ "openai": {"gpt-4o": 0.00000250, "gpt-4": 0.00003000},
160
+ "anthropic": {"claude-3-sonnet": 0.00000300, "claude-3-opus": 0.00001500},
161
+ "google": {"gemini-pro": 0.00000125},
162
+ }
163
+
164
+ # Get unit price
165
+ provider_pricing = pricing_map.get(provider, {})
166
+ unit_price_usd = provider_pricing.get(model, 0.00002000) # Default $0.02/1M
167
+
168
+ # Calculate total cost
169
+ total_cost_usd = (actual_input_tokens + actual_output_tokens) * unit_price_usd
170
+
171
+ # Build payload
172
+ payload = {
173
+ "schema_version": "1.0",
174
+ "trace_id": trace_id,
175
+ "span_id": span_id,
176
+ "parent_id": parent_span_id, # Note: parent_id not parent_span_id
177
+ "tenant_id": tenant_id,
178
+ "workflow_id": workflow_id,
179
+ "sandbox_id": sandbox_id,
180
+ "runtime_env": runtime_env,
181
+ "provider": provider,
182
+ "model_name": model,
183
+ "model_id": model, # For backward compatibility
184
+ "operation": operation,
185
+ "endpoint": func.__name__,
186
+ "input_tokens": actual_input_tokens,
187
+ "output_tokens": actual_output_tokens,
188
+ "duration_ms": duration_ms,
189
+ "latency_ms": duration_ms, # For backward compatibility
190
+ "unit_price_usd": unit_price_usd,
191
+ "total_cost_usd": round(total_cost_usd, 6),
192
+ "cost_usd": round(total_cost_usd, 6), # For backward compatibility
193
+ "status": status,
194
+ "error_type": error_type,
195
+ "error_message": error_message,
196
+ "timestamp": datetime.now(timezone.utc).isoformat(),
197
+ "ts_start": datetime.now(timezone.utc).isoformat(),
198
+ "ts_end": datetime.now(timezone.utc).isoformat(),
199
+ "environment": os.getenv("KALIBR_ENVIRONMENT", "prod"),
200
+ "service": os.getenv("KALIBR_SERVICE", "kalibr-app"),
201
+ "vendor": provider, # v0.2 compatibility
202
+ "data_class": "economic",
203
+ }
204
+
205
+ # Send event to collector
206
+ send_event(payload)
207
+
208
+ # ========================================================================
209
+ # PHASE 6: Append hop to capsule if available (from middleware)
210
+ # ========================================================================
211
+ try:
212
+ # Check if we're in a FastAPI context with capsule middleware
213
+ from starlette.requests import Request
214
+
215
+ # Try to get capsule from request context (if available)
216
+ capsule = kwargs.get("__kalibr_capsule")
217
+
218
+ if capsule:
219
+ # Create hop from trace data
220
+ hop = {
221
+ "provider": provider,
222
+ "operation": operation,
223
+ "model": model,
224
+ "duration_ms": duration_ms,
225
+ "status": status,
226
+ "cost_usd": round(total_cost_usd, 6),
227
+ "input_tokens": actual_input_tokens,
228
+ "output_tokens": actual_output_tokens,
229
+ }
230
+
231
+ if error_type:
232
+ hop["error_type"] = error_type
233
+
234
+ # Add agent name if available
235
+ agent_name = os.getenv("KALIBR_AGENT_NAME", func.__name__)
236
+ hop["agent_name"] = agent_name
237
+
238
+ capsule.append_hop(hop)
239
+ print(
240
+ f"[Kalibr SDK] 📦 Appended hop to capsule: {operation} ({provider}/{model})"
241
+ )
242
+ except Exception as e:
243
+ # Capsule update is non-critical, just log
244
+ print(f"[Kalibr SDK] ⚠️ Could not update capsule: {e}")
245
+ # ========================================================================
246
+
247
+ # Re-raise exception if there was one
248
+ if exception_to_raise:
249
+ raise exception_to_raise
250
+
251
+ return result
252
+
253
+ return wrapper
254
+
255
+ return decorator
kalibr/tokens.py ADDED
@@ -0,0 +1,52 @@
1
+ """Token counting utilities."""
2
+
3
+ from typing import Optional
4
+
5
+ import tiktoken
6
+
7
+ # Cache for tokenizer instances
8
+ _tokenizer_cache = {}
9
+
10
+
11
+ def count_tokens(text: str, model_id: str) -> int:
12
+ """Count tokens for given text and model.
13
+
14
+ Args:
15
+ text: Input text
16
+ model_id: Model identifier
17
+
18
+ Returns:
19
+ Token count (approximate)
20
+ """
21
+ if not text:
22
+ return 0
23
+
24
+ # Try to get exact tokenizer for OpenAI models
25
+ if "gpt" in model_id.lower():
26
+ try:
27
+ encoding = get_openai_encoding(model_id)
28
+ return len(encoding.encode(text))
29
+ except Exception:
30
+ pass
31
+
32
+ # Fallback: approximate (1 token ~= 4 chars)
33
+ return len(text) // 4
34
+
35
+
36
+ def get_openai_encoding(model_id: str):
37
+ """Get tiktoken encoding for OpenAI model."""
38
+ if model_id in _tokenizer_cache:
39
+ return _tokenizer_cache[model_id]
40
+
41
+ try:
42
+ # Map model to encoding
43
+ if "gpt-4" in model_id or "gpt-3.5" in model_id:
44
+ encoding = tiktoken.encoding_for_model("gpt-4")
45
+ else:
46
+ encoding = tiktoken.get_encoding("cl100k_base")
47
+
48
+ _tokenizer_cache[model_id] = encoding
49
+ return encoding
50
+ except Exception as e:
51
+ print(f"⚠️ Failed to load tokenizer for {model_id}: {e}")
52
+ raise
@@ -0,0 +1,296 @@
1
+ """
2
+ Kalibr Trace Capsule - Portable JSON payload for cross-MCP trace propagation.
3
+
4
+ A capsule carries observability context across agent hops, maintaining a rolling
5
+ window of recent operations and aggregate metrics.
6
+
7
+ Usage:
8
+ from kalibr.trace_capsule import TraceCapsule
9
+
10
+ # Create new capsule
11
+ capsule = TraceCapsule()
12
+
13
+ # Append hop
14
+ capsule.append_hop({
15
+ "provider": "openai",
16
+ "operation": "summarize",
17
+ "model": "gpt-4o",
18
+ "duration_ms": 1200,
19
+ "status": "success",
20
+ "cost_usd": 0.005
21
+ })
22
+
23
+ # Serialize for HTTP header
24
+ header_value = capsule.to_json()
25
+
26
+ # Deserialize from header
27
+ received_capsule = TraceCapsule.from_json(header_value)
28
+ """
29
+
30
+ import json
31
+ import uuid
32
+ from datetime import datetime, timezone
33
+ from typing import Any, Dict, List, Optional
34
+
35
+
36
+ class TraceCapsule:
37
+ """Portable JSON payload containing rolling trace history.
38
+
39
+ Attributes:
40
+ trace_id: Unique identifier for the trace chain
41
+ timestamp: ISO 8601 timestamp of last update
42
+ aggregate_cost_usd: Cumulative cost across all hops
43
+ aggregate_latency_ms: Cumulative latency across all hops
44
+ last_n_hops: Rolling window of last N hops (max 5)
45
+ tenant_id: Optional tenant identifier
46
+ workflow_id: Optional workflow identifier
47
+ metadata: Optional custom metadata
48
+ """
49
+
50
+ MAX_HOPS = 5 # Keep payload compact for HTTP headers
51
+
52
+ def __init__(
53
+ self,
54
+ trace_id: Optional[str] = None,
55
+ last_n_hops: Optional[List[Dict[str, Any]]] = None,
56
+ aggregate_cost_usd: float = 0.0,
57
+ aggregate_latency_ms: float = 0.0,
58
+ tenant_id: Optional[str] = None,
59
+ workflow_id: Optional[str] = None,
60
+ metadata: Optional[Dict[str, Any]] = None,
61
+ context_token: Optional[str] = None,
62
+ parent_context_token: Optional[str] = None,
63
+ ):
64
+ """Initialize a new TraceCapsule.
65
+
66
+ Args:
67
+ trace_id: Unique trace identifier (generates UUID if not provided)
68
+ last_n_hops: Existing hop history
69
+ aggregate_cost_usd: Starting cumulative cost
70
+ aggregate_latency_ms: Starting cumulative latency
71
+ tenant_id: Tenant identifier
72
+ workflow_id: Workflow identifier
73
+ metadata: Custom metadata
74
+ context_token: Context token for this runtime session (Phase 3C)
75
+ parent_context_token: Parent runtime's context token (Phase 3C)
76
+ """
77
+ self.trace_id = trace_id or str(uuid.uuid4())
78
+ self.timestamp = datetime.now(timezone.utc).isoformat()
79
+ self.aggregate_cost_usd = aggregate_cost_usd
80
+ self.aggregate_latency_ms = aggregate_latency_ms
81
+ self.last_n_hops: List[Dict[str, Any]] = last_n_hops or []
82
+ self.tenant_id = tenant_id
83
+ self.workflow_id = workflow_id
84
+ self.metadata = metadata or {}
85
+ # Phase 3C: Context token propagation (keep as UUID for consistency)
86
+ self.context_token = context_token or str(uuid.uuid4())
87
+ self.parent_context_token = parent_context_token
88
+
89
+ def append_hop(self, hop: Dict[str, Any]) -> None:
90
+ """Append a new hop to the capsule.
91
+
92
+ Maintains a rolling window of last N hops to keep payload compact.
93
+ Updates aggregate metrics automatically.
94
+
95
+ Args:
96
+ hop: Dictionary containing hop metadata
97
+ Required fields: provider, operation, status
98
+ Optional fields: model, duration_ms, cost_usd, input_tokens,
99
+ output_tokens, error_type, agent_name
100
+
101
+ Example:
102
+ capsule.append_hop({
103
+ "provider": "openai",
104
+ "operation": "chat_completion",
105
+ "model": "gpt-4o",
106
+ "duration_ms": 1200,
107
+ "status": "success",
108
+ "cost_usd": 0.005,
109
+ "input_tokens": 150,
110
+ "output_tokens": 75,
111
+ "agent_name": "code-writer"
112
+ })
113
+ """
114
+ # Add hop_index
115
+ hop["hop_index"] = len(self.last_n_hops)
116
+
117
+ # Append to history
118
+ self.last_n_hops.append(hop)
119
+
120
+ # Maintain rolling window (keep last N hops)
121
+ if len(self.last_n_hops) > self.MAX_HOPS:
122
+ self.last_n_hops.pop(0)
123
+
124
+ # Update aggregates
125
+ self.aggregate_cost_usd += hop.get("cost_usd", 0.0)
126
+ self.aggregate_latency_ms += hop.get("duration_ms", 0.0)
127
+
128
+ # Update timestamp
129
+ self.timestamp = datetime.now(timezone.utc).isoformat()
130
+
131
+ def get_last_hop(self) -> Optional[Dict[str, Any]]:
132
+ """Get the most recent hop.
133
+
134
+ Returns:
135
+ Last hop dictionary or None if no hops exist
136
+ """
137
+ return self.last_n_hops[-1] if self.last_n_hops else None
138
+
139
+ def get_hop_count(self) -> int:
140
+ """Get total number of hops in capsule.
141
+
142
+ Returns:
143
+ Number of hops in the rolling window
144
+ """
145
+ return len(self.last_n_hops)
146
+
147
+ def to_json(self) -> str:
148
+ """Serialize capsule to JSON string for HTTP header transmission.
149
+
150
+ Returns:
151
+ Compact JSON string representation
152
+ """
153
+ data = {
154
+ "trace_id": self.trace_id,
155
+ "timestamp": self.timestamp,
156
+ "aggregate_cost_usd": round(self.aggregate_cost_usd, 6),
157
+ "aggregate_latency_ms": round(self.aggregate_latency_ms, 2),
158
+ "last_n_hops": self.last_n_hops,
159
+ }
160
+
161
+ # Add optional fields only if present
162
+ if self.tenant_id:
163
+ data["tenant_id"] = self.tenant_id
164
+ if self.workflow_id:
165
+ data["workflow_id"] = self.workflow_id
166
+ if self.metadata:
167
+ data["metadata"] = self.metadata
168
+
169
+ # Phase 3C: Include context tokens
170
+ if self.context_token:
171
+ data["context_token"] = self.context_token
172
+ if self.parent_context_token:
173
+ data["parent_context_token"] = self.parent_context_token
174
+
175
+ return json.dumps(data, separators=(",", ":")) # Compact JSON
176
+
177
+ def to_dict(self) -> Dict[str, Any]:
178
+ """Convert capsule to dictionary.
179
+
180
+ Returns:
181
+ Dictionary representation of capsule
182
+ """
183
+ data = {
184
+ "trace_id": self.trace_id,
185
+ "timestamp": self.timestamp,
186
+ "aggregate_cost_usd": self.aggregate_cost_usd,
187
+ "aggregate_latency_ms": self.aggregate_latency_ms,
188
+ "last_n_hops": self.last_n_hops,
189
+ }
190
+
191
+ if self.tenant_id:
192
+ data["tenant_id"] = self.tenant_id
193
+ if self.workflow_id:
194
+ data["workflow_id"] = self.workflow_id
195
+ if self.metadata:
196
+ data["metadata"] = self.metadata
197
+
198
+ # Phase 3C: Include context tokens
199
+ if self.context_token:
200
+ data["context_token"] = self.context_token
201
+ if self.parent_context_token:
202
+ data["parent_context_token"] = self.parent_context_token
203
+
204
+ return data
205
+
206
+ @classmethod
207
+ def from_json(cls, s: str) -> "TraceCapsule":
208
+ """Deserialize capsule from JSON string.
209
+
210
+ Args:
211
+ s: JSON string from HTTP header
212
+
213
+ Returns:
214
+ TraceCapsule instance
215
+
216
+ Raises:
217
+ json.JSONDecodeError: If JSON is invalid
218
+ KeyError: If required fields are missing
219
+ """
220
+ try:
221
+ data = json.loads(s)
222
+ return cls(
223
+ trace_id=data.get("trace_id"),
224
+ last_n_hops=data.get("last_n_hops", []),
225
+ aggregate_cost_usd=data.get("aggregate_cost_usd", 0.0),
226
+ aggregate_latency_ms=data.get("aggregate_latency_ms", 0.0),
227
+ tenant_id=data.get("tenant_id"),
228
+ workflow_id=data.get("workflow_id"),
229
+ metadata=data.get("metadata"),
230
+ # Phase 3C: Context token propagation
231
+ context_token=data.get("context_token"),
232
+ parent_context_token=data.get("parent_context_token"),
233
+ )
234
+ except json.JSONDecodeError as e:
235
+ # Return empty capsule if parsing fails (graceful degradation)
236
+ print(f"⚠️ Failed to parse TraceCapsule: {e}")
237
+ return cls()
238
+ except Exception as e:
239
+ print(f"⚠️ Error creating TraceCapsule: {e}")
240
+ return cls()
241
+
242
+ @classmethod
243
+ def from_dict(cls, data: Dict[str, Any]) -> "TraceCapsule":
244
+ """Create capsule from dictionary.
245
+
246
+ Args:
247
+ data: Dictionary containing capsule data
248
+
249
+ Returns:
250
+ TraceCapsule instance
251
+ """
252
+ return cls(
253
+ trace_id=data.get("trace_id"),
254
+ last_n_hops=data.get("last_n_hops", []),
255
+ aggregate_cost_usd=data.get("aggregate_cost_usd", 0.0),
256
+ aggregate_latency_ms=data.get("aggregate_latency_ms", 0.0),
257
+ tenant_id=data.get("tenant_id"),
258
+ workflow_id=data.get("workflow_id"),
259
+ metadata=data.get("metadata"),
260
+ )
261
+
262
+ def __repr__(self) -> str:
263
+ """String representation of capsule."""
264
+ return (
265
+ f"TraceCapsule(trace_id={self.trace_id}, "
266
+ f"hops={len(self.last_n_hops)}, "
267
+ f"cost=${self.aggregate_cost_usd:.6f}, "
268
+ f"latency={self.aggregate_latency_ms:.2f}ms)"
269
+ )
270
+
271
+ def __str__(self) -> str:
272
+ """Human-readable string representation."""
273
+ hops_summary = ", ".join(
274
+ [f"{hop.get('provider', '?')}/{hop.get('operation', '?')}" for hop in self.last_n_hops]
275
+ )
276
+ return (
277
+ f"TraceCapsule[{self.trace_id}]: "
278
+ f"{len(self.last_n_hops)} hops ({hops_summary}), "
279
+ f"${self.aggregate_cost_usd:.4f}, "
280
+ f"{self.aggregate_latency_ms:.0f}ms"
281
+ )
282
+
283
+
284
+ # Convenience function for FastAPI integration
285
+ def get_or_create_capsule(header_value: Optional[str] = None) -> TraceCapsule:
286
+ """Get existing capsule from header or create new one.
287
+
288
+ Args:
289
+ header_value: Value of X-Kalibr-Capsule header
290
+
291
+ Returns:
292
+ TraceCapsule instance
293
+ """
294
+ if header_value:
295
+ return TraceCapsule.from_json(header_value)
296
+ return TraceCapsule()