traccia 0.1.2__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. traccia/__init__.py +73 -0
  2. traccia/auto.py +736 -0
  3. traccia/auto_instrumentation.py +74 -0
  4. traccia/cli.py +349 -0
  5. traccia/config.py +693 -0
  6. traccia/context/__init__.py +33 -0
  7. traccia/context/context.py +67 -0
  8. traccia/context/propagators.py +283 -0
  9. traccia/errors.py +48 -0
  10. traccia/exporter/__init__.py +8 -0
  11. traccia/exporter/console_exporter.py +31 -0
  12. traccia/exporter/file_exporter.py +178 -0
  13. traccia/exporter/http_exporter.py +214 -0
  14. traccia/exporter/otlp_exporter.py +190 -0
  15. traccia/instrumentation/__init__.py +20 -0
  16. traccia/instrumentation/anthropic.py +92 -0
  17. traccia/instrumentation/decorator.py +263 -0
  18. traccia/instrumentation/fastapi.py +38 -0
  19. traccia/instrumentation/http_client.py +21 -0
  20. traccia/instrumentation/http_server.py +25 -0
  21. traccia/instrumentation/openai.py +178 -0
  22. traccia/instrumentation/requests.py +68 -0
  23. traccia/integrations/__init__.py +22 -0
  24. traccia/integrations/langchain/__init__.py +14 -0
  25. traccia/integrations/langchain/callback.py +418 -0
  26. traccia/integrations/langchain/utils.py +129 -0
  27. traccia/pricing_config.py +58 -0
  28. traccia/processors/__init__.py +35 -0
  29. traccia/processors/agent_enricher.py +159 -0
  30. traccia/processors/batch_processor.py +140 -0
  31. traccia/processors/cost_engine.py +71 -0
  32. traccia/processors/cost_processor.py +70 -0
  33. traccia/processors/drop_policy.py +44 -0
  34. traccia/processors/logging_processor.py +31 -0
  35. traccia/processors/rate_limiter.py +223 -0
  36. traccia/processors/sampler.py +22 -0
  37. traccia/processors/token_counter.py +216 -0
  38. traccia/runtime_config.py +106 -0
  39. traccia/tracer/__init__.py +15 -0
  40. traccia/tracer/otel_adapter.py +577 -0
  41. traccia/tracer/otel_utils.py +24 -0
  42. traccia/tracer/provider.py +155 -0
  43. traccia/tracer/span.py +286 -0
  44. traccia/tracer/span_context.py +16 -0
  45. traccia/tracer/tracer.py +243 -0
  46. traccia/utils/__init__.py +19 -0
  47. traccia/utils/helpers.py +95 -0
  48. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/METADATA +32 -15
  49. traccia-0.1.5.dist-info/RECORD +53 -0
  50. traccia-0.1.5.dist-info/top_level.txt +1 -0
  51. traccia-0.1.2.dist-info/RECORD +0 -6
  52. traccia-0.1.2.dist-info/top_level.txt +0 -1
  53. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/WHEEL +0 -0
  54. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/entry_points.txt +0 -0
  55. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,418 @@
1
+ """Traccia callback handler for LangChain."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, Dict, List, Optional, Set
7
+ from uuid import UUID
8
+
9
+ try:
10
+ from langchain_core.callbacks import BaseCallbackHandler
11
+ from langchain_core.outputs import LLMResult
12
+ from langchain_core.messages import BaseMessage
13
+ except ImportError as e:
14
+ raise ModuleNotFoundError(
15
+ "LangChain integration requires langchain-core. "
16
+ "Install with: pip install traccia[langchain]"
17
+ ) from e
18
+
19
+ from traccia import get_tracer
20
+ from traccia.tracer.span import SpanStatus
21
+
22
+
23
+ class TracciaCallbackHandler(BaseCallbackHandler):
24
+ """
25
+ LangChain callback handler that creates Traccia spans for LLM and chain runs.
26
+
27
+ This handler integrates LangChain with Traccia's tracing system, creating spans
28
+ for LLM calls with the same attributes used by Traccia's OpenAI instrumentation.
29
+
30
+ Usage:
31
+ ```python
32
+ from traccia import init
33
+ from traccia.integrations.langchain import CallbackHandler # or TracciaCallbackHandler
34
+ from langchain_openai import ChatOpenAI
35
+
36
+ # Initialize Traccia
37
+ init()
38
+
39
+ # Create Traccia handler (no args)
40
+ traccia_handler = CallbackHandler()
41
+
42
+ # Use with any LangChain runnable
43
+ llm = ChatOpenAI(model="gpt-4o-mini")
44
+ result = llm.invoke(
45
+ "Tell me a joke",
46
+ config={"callbacks": [traccia_handler]}
47
+ )
48
+ ```
49
+
50
+ Note:
51
+ Requires langchain-core to be installed:
52
+ ```bash
53
+ pip install traccia[langchain]
54
+ ```
55
+ """
56
+
57
+ def __init__(self):
58
+ """Initialize the callback handler."""
59
+ super().__init__()
60
+ self.tracer = get_tracer("traccia.langchain")
61
+
62
+ # Track active spans by run_id
63
+ self._spans: Dict[UUID, Any] = {}
64
+ self._context_tokens: Dict[UUID, Any] = {}
65
+
66
+ # Track parent relationships
67
+ self._parent_map: Dict[UUID, Optional[UUID]] = {}
68
+
69
+ def on_llm_start(
70
+ self,
71
+ serialized: Dict[str, Any],
72
+ prompts: List[str],
73
+ *,
74
+ run_id: UUID,
75
+ parent_run_id: Optional[UUID] = None,
76
+ tags: Optional[List[str]] = None,
77
+ metadata: Optional[Dict[str, Any]] = None,
78
+ **kwargs: Any,
79
+ ) -> None:
80
+ """Handle LLM start event."""
81
+ self._parent_map[run_id] = parent_run_id
82
+
83
+ try:
84
+ # Extract attributes
85
+ attributes = self._build_llm_attributes(
86
+ serialized, prompts, None, kwargs, metadata
87
+ )
88
+
89
+ # Start span
90
+ span = self.tracer.start_as_current_span(
91
+ "llm.langchain.run",
92
+ attributes=attributes
93
+ )
94
+
95
+ # Store span
96
+ self._spans[run_id] = span
97
+ self._context_tokens[run_id] = span
98
+
99
+ except Exception as e:
100
+ # Don't break LangChain execution
101
+ import logging
102
+ logging.getLogger(__name__).exception(f"Error in on_llm_start: {e}")
103
+
104
+ def on_chat_model_start(
105
+ self,
106
+ serialized: Dict[str, Any],
107
+ messages: List[List[BaseMessage]],
108
+ *,
109
+ run_id: UUID,
110
+ parent_run_id: Optional[UUID] = None,
111
+ tags: Optional[List[str]] = None,
112
+ metadata: Optional[Dict[str, Any]] = None,
113
+ **kwargs: Any,
114
+ ) -> None:
115
+ """Handle chat model start event."""
116
+ self._parent_map[run_id] = parent_run_id
117
+
118
+ try:
119
+ # Convert messages to prompt format
120
+ message_dicts = []
121
+ for msg_list in messages:
122
+ for msg in msg_list:
123
+ message_dicts.append(self._convert_message_to_dict(msg))
124
+
125
+ # Extract attributes
126
+ attributes = self._build_llm_attributes(
127
+ serialized, None, message_dicts, kwargs, metadata
128
+ )
129
+
130
+ # Start span
131
+ span = self.tracer.start_as_current_span(
132
+ "llm.langchain.run",
133
+ attributes=attributes
134
+ )
135
+
136
+ # Store span
137
+ self._spans[run_id] = span
138
+ self._context_tokens[run_id] = span
139
+
140
+ except Exception as e:
141
+ import logging
142
+ logging.getLogger(__name__).exception(f"Error in on_chat_model_start: {e}")
143
+
144
+ def on_llm_end(
145
+ self,
146
+ response: LLMResult,
147
+ *,
148
+ run_id: UUID,
149
+ parent_run_id: Optional[UUID] = None,
150
+ **kwargs: Any,
151
+ ) -> None:
152
+ """Handle LLM end event."""
153
+ try:
154
+ span = self._spans.pop(run_id, None)
155
+ if span is None:
156
+ return
157
+
158
+ # Extract usage and output
159
+ self._set_llm_response_attributes(span, response)
160
+
161
+ # End span
162
+ span.__exit__(None, None, None)
163
+
164
+ # Clean up context
165
+ self._context_tokens.pop(run_id, None)
166
+ self._parent_map.pop(run_id, None)
167
+
168
+ except Exception as e:
169
+ import logging
170
+ logging.getLogger(__name__).exception(f"Error in on_llm_end: {e}")
171
+
172
+ def on_llm_error(
173
+ self,
174
+ error: BaseException,
175
+ *,
176
+ run_id: UUID,
177
+ parent_run_id: Optional[UUID] = None,
178
+ **kwargs: Any,
179
+ ) -> None:
180
+ """Handle LLM error event."""
181
+ try:
182
+ span = self._spans.pop(run_id, None)
183
+ if span is None:
184
+ return
185
+
186
+ # Record exception
187
+ span._otel_span.record_exception(error)
188
+ span.set_status(SpanStatus.ERROR, str(error))
189
+
190
+ # End span
191
+ span.__exit__(type(error), error, None)
192
+
193
+ # Clean up
194
+ self._context_tokens.pop(run_id, None)
195
+ self._parent_map.pop(run_id, None)
196
+
197
+ except Exception as e:
198
+ import logging
199
+ logging.getLogger(__name__).exception(f"Error in on_llm_error: {e}")
200
+
201
+ def on_chain_start(
202
+ self,
203
+ serialized: Dict[str, Any],
204
+ inputs: Dict[str, Any],
205
+ *,
206
+ run_id: UUID,
207
+ parent_run_id: Optional[UUID] = None,
208
+ tags: Optional[List[str]] = None,
209
+ metadata: Optional[Dict[str, Any]] = None,
210
+ **kwargs: Any,
211
+ ) -> None:
212
+ """Handle chain start event (optional Phase 2)."""
213
+ self._parent_map[run_id] = parent_run_id
214
+ # Phase 2: Can add chain spans here
215
+
216
+ def on_chain_end(
217
+ self,
218
+ outputs: Dict[str, Any],
219
+ *,
220
+ run_id: UUID,
221
+ parent_run_id: Optional[UUID] = None,
222
+ **kwargs: Any,
223
+ ) -> None:
224
+ """Handle chain end event (optional Phase 2)."""
225
+ self._parent_map.pop(run_id, None)
226
+ # Phase 2: Can end chain spans here
227
+
228
+ def on_chain_error(
229
+ self,
230
+ error: BaseException,
231
+ *,
232
+ run_id: UUID,
233
+ parent_run_id: Optional[UUID] = None,
234
+ **kwargs: Any,
235
+ ) -> None:
236
+ """Handle chain error event (optional Phase 2)."""
237
+ self._parent_map.pop(run_id, None)
238
+ # Phase 2: Can handle chain errors here
239
+
240
+ def _build_llm_attributes(
241
+ self,
242
+ serialized: Dict[str, Any],
243
+ prompts: Optional[List[str]],
244
+ messages: Optional[List[Dict[str, Any]]],
245
+ kwargs: Dict[str, Any],
246
+ metadata: Optional[Dict[str, Any]],
247
+ ) -> Dict[str, Any]:
248
+ """Build LLM span attributes."""
249
+ from traccia.integrations.langchain.utils import extract_model_name
250
+
251
+ attributes: Dict[str, Any] = {}
252
+
253
+ # Extract vendor
254
+ vendor = self._extract_vendor(serialized)
255
+ if vendor:
256
+ attributes["llm.vendor"] = vendor
257
+
258
+ # Extract model
259
+ model = extract_model_name(serialized, kwargs, metadata)
260
+ if model:
261
+ attributes["llm.model"] = model
262
+
263
+ # Set prompt
264
+ if messages:
265
+ # Chat messages
266
+ prompt_text = self._format_messages_as_prompt(messages)
267
+ if prompt_text:
268
+ attributes["llm.prompt"] = prompt_text
269
+
270
+ # Store messages as JSON (truncated)
271
+ try:
272
+ messages_json = json.dumps(messages)[:1000]
273
+ attributes["llm.openai.messages"] = messages_json
274
+ except Exception:
275
+ pass
276
+ elif prompts:
277
+ # Text prompts
278
+ if len(prompts) == 1:
279
+ attributes["llm.prompt"] = prompts[0]
280
+ else:
281
+ attributes["llm.prompt"] = json.dumps(prompts)[:1000]
282
+
283
+ return attributes
284
+
285
+ def _set_llm_response_attributes(
286
+ self,
287
+ span: Any,
288
+ response: LLMResult,
289
+ ) -> None:
290
+ """Set response attributes on span."""
291
+ usage = self._parse_usage(response)
292
+ if usage:
293
+ span.set_attribute("llm.usage.source", "provider_usage")
294
+ prompt_tokens = usage.get("prompt_tokens") or usage.get("input_tokens")
295
+ if prompt_tokens is not None:
296
+ span.set_attribute("llm.usage.prompt_tokens", int(prompt_tokens))
297
+ span.set_attribute("llm.usage.prompt_source", "provider_usage")
298
+ completion_tokens = usage.get("completion_tokens") or usage.get("output_tokens")
299
+ if completion_tokens is not None:
300
+ span.set_attribute("llm.usage.completion_tokens", int(completion_tokens))
301
+ span.set_attribute("llm.usage.completion_source", "provider_usage")
302
+ total = usage.get("total_tokens")
303
+ if total is not None:
304
+ span.set_attribute("llm.usage.total_tokens", int(total))
305
+
306
+ # Extract completion (last generation text or message content)
307
+ if response.generations and len(response.generations) > 0:
308
+ last_gen = response.generations[-1]
309
+ if last_gen and len(last_gen) > 0:
310
+ chunk = last_gen[-1]
311
+ completion = getattr(chunk, "text", None) or (
312
+ getattr(getattr(chunk, "message", None), "content", None)
313
+ )
314
+ if completion:
315
+ span.set_attribute("llm.completion", str(completion))
316
+
317
+ def _extract_vendor(self, serialized: Dict[str, Any]) -> Optional[str]:
318
+ """Extract vendor from serialized LLM config."""
319
+ if not serialized or "id" not in serialized:
320
+ return None
321
+
322
+ id_list = serialized["id"]
323
+ if not isinstance(id_list, list) or len(id_list) == 0:
324
+ return None
325
+
326
+ # Get last component (class name)
327
+ class_name = id_list[-1].lower()
328
+
329
+ # Map to vendor
330
+ if "openai" in class_name:
331
+ return "openai"
332
+ elif "anthropic" in class_name:
333
+ return "anthropic"
334
+ elif "cohere" in class_name:
335
+ return "cohere"
336
+ elif "huggingface" in class_name:
337
+ return "huggingface"
338
+ elif "vertexai" in class_name or "vertex" in class_name:
339
+ return "google"
340
+ elif "bedrock" in class_name:
341
+ return "aws"
342
+
343
+ return "langchain"
344
+
345
+ def _convert_message_to_dict(self, message: BaseMessage) -> Dict[str, Any]:
346
+ """Convert LangChain message to dict."""
347
+ return {
348
+ "role": getattr(message, "type", "unknown"),
349
+ "content": str(message.content) if hasattr(message, "content") else str(message),
350
+ }
351
+
352
+ def _format_messages_as_prompt(self, messages: List[Dict[str, Any]]) -> str:
353
+ """Format messages as a prompt string."""
354
+ parts = []
355
+ for msg in messages:
356
+ role = msg.get("role", "unknown")
357
+ content = msg.get("content", "")
358
+ if content:
359
+ parts.append(f"{role}: {content}")
360
+ return "\n".join(parts) if parts else ""
361
+
362
+ def _parse_usage(self, response: LLMResult) -> Optional[Dict[str, Any]]:
363
+ """
364
+ Extract token usage from LLMResult.
365
+ Checks llm_output['token_usage'], llm_output['usage'], and generation_info.
366
+ """
367
+ usage = None
368
+ if response.llm_output:
369
+ for key in ("token_usage", "usage"):
370
+ if key in response.llm_output and response.llm_output[key]:
371
+ raw = response.llm_output[key]
372
+ usage = self._normalize_usage(raw)
373
+ if usage:
374
+ break
375
+ if not usage and response.generations:
376
+ for gen_list in response.generations:
377
+ for chunk in gen_list:
378
+ if getattr(chunk, "generation_info", None) and isinstance(
379
+ chunk.generation_info, dict
380
+ ):
381
+ raw = chunk.generation_info.get("usage_metadata")
382
+ if raw:
383
+ usage = self._normalize_usage(raw)
384
+ break
385
+ msg = getattr(chunk, "message", None)
386
+ if msg is not None:
387
+ meta = getattr(msg, "response_metadata", None) or {}
388
+ raw = meta.get("usage") if isinstance(meta, dict) else None
389
+ if raw:
390
+ usage = self._normalize_usage(raw)
391
+ break
392
+ if usage:
393
+ break
394
+ return usage
395
+
396
+ @staticmethod
397
+ def _normalize_usage(raw: Any) -> Optional[Dict[str, Any]]:
398
+ """Normalize usage dict to prompt_tokens, completion_tokens, total_tokens."""
399
+ if raw is None:
400
+ return None
401
+ if hasattr(raw, "__dict__"):
402
+ raw = getattr(raw, "__dict__", raw)
403
+ if not isinstance(raw, dict):
404
+ return None
405
+ # Map common keys to Traccia/OpenAI style
406
+ prompt = raw.get("prompt_tokens") or raw.get("input_tokens") or raw.get("input")
407
+ completion = raw.get("completion_tokens") or raw.get("output_tokens") or raw.get("output")
408
+ total = raw.get("total_tokens") or raw.get("total")
409
+ if prompt is None and completion is None and total is None:
410
+ return None
411
+ out: Dict[str, Any] = {}
412
+ if prompt is not None:
413
+ out["prompt_tokens"] = int(prompt) if not isinstance(prompt, list) else sum(prompt)
414
+ if completion is not None:
415
+ out["completion_tokens"] = int(completion) if not isinstance(completion, list) else sum(completion)
416
+ if total is not None:
417
+ out["total_tokens"] = int(total) if not isinstance(total, list) else sum(total)
418
+ return out if out else None
@@ -0,0 +1,129 @@
1
+ """Utility functions for LangChain integration."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import re
6
+ from typing import Any, Dict, List, Literal, Optional, cast
7
+
8
+
9
+ def extract_model_name(
10
+ serialized: Optional[Dict[str, Any]],
11
+ kwargs: Dict[str, Any],
12
+ metadata: Optional[Dict[str, Any]] = None,
13
+ ) -> Optional[str]:
14
+ """
15
+ Extract model name from LangChain serialized config, invocation params, or metadata.
16
+
17
+ Args:
18
+ serialized: LangChain's serialized component dict
19
+ kwargs: Keyword arguments from the callback (contains invocation_params)
20
+ metadata: Optional metadata dict
21
+
22
+ Returns:
23
+ Model name string or None
24
+ """
25
+ # Check metadata first
26
+ if metadata:
27
+ model_from_meta = _parse_model_name_from_metadata(metadata)
28
+ if model_from_meta:
29
+ return model_from_meta
30
+
31
+ # Try known model paths by ID
32
+ models_by_id = [
33
+ ("ChatOpenAI", ["invocation_params", "model_name"], "kwargs"),
34
+ ("ChatOpenAI", ["invocation_params", "model"], "kwargs"),
35
+ ("OpenAI", ["invocation_params", "model_name"], "kwargs"),
36
+ ("AzureChatOpenAI", ["invocation_params", "model"], "kwargs"),
37
+ ("AzureChatOpenAI", ["invocation_params", "model_name"], "kwargs"),
38
+ ("AzureChatOpenAI", ["invocation_params", "azure_deployment"], "kwargs"),
39
+ ("ChatAnthropic", ["invocation_params", "model"], "kwargs"),
40
+ ("ChatAnthropic", ["invocation_params", "model_name"], "kwargs"),
41
+ ("ChatGoogleGenerativeAI", ["kwargs", "model"], "serialized"),
42
+ ("ChatVertexAI", ["kwargs", "model_name"], "serialized"),
43
+ ("BedrockChat", ["kwargs", "model_id"], "serialized"),
44
+ ("ChatBedrock", ["kwargs", "model_id"], "serialized"),
45
+ ]
46
+
47
+ for model_name, keys, select_from in models_by_id:
48
+ model = _extract_model_by_path_for_id(
49
+ model_name,
50
+ serialized,
51
+ kwargs,
52
+ keys,
53
+ cast(Literal["serialized", "kwargs"], select_from),
54
+ )
55
+ if model:
56
+ return model
57
+
58
+ # Try common paths as catch-all
59
+ common_paths = [
60
+ ["invocation_params", "model_name"],
61
+ ["invocation_params", "model"],
62
+ ["kwargs", "model_name"],
63
+ ["kwargs", "model"],
64
+ ]
65
+
66
+ for select in ["kwargs", "serialized"]:
67
+ for path in common_paths:
68
+ model = _extract_model_by_path(
69
+ serialized, kwargs, path, cast(Literal["serialized", "kwargs"], select)
70
+ )
71
+ if model:
72
+ return str(model)
73
+
74
+ return None
75
+
76
+
77
+ def _parse_model_name_from_metadata(metadata: Dict[str, Any]) -> Optional[str]:
78
+ """Extract model name from metadata if present."""
79
+ if not isinstance(metadata, dict):
80
+ return None
81
+ return metadata.get("ls_model_name", None)
82
+
83
+
84
+ def _extract_model_by_path_for_id(
85
+ id: str,
86
+ serialized: Optional[Dict[str, Any]],
87
+ kwargs: Dict[str, Any],
88
+ keys: List[str],
89
+ select_from: Literal["serialized", "kwargs"],
90
+ ) -> Optional[str]:
91
+ """Extract model if the serialized ID matches."""
92
+ if serialized is None and select_from == "serialized":
93
+ return None
94
+
95
+ if serialized:
96
+ serialized_id = serialized.get("id")
97
+ if (
98
+ serialized_id
99
+ and isinstance(serialized_id, list)
100
+ and len(serialized_id) > 0
101
+ and serialized_id[-1] == id
102
+ ):
103
+ result = _extract_model_by_path(serialized, kwargs, keys, select_from)
104
+ return str(result) if result is not None else None
105
+
106
+ return None
107
+
108
+
109
+ def _extract_model_by_path(
110
+ serialized: Optional[Dict[str, Any]],
111
+ kwargs: dict,
112
+ keys: List[str],
113
+ select_from: Literal["serialized", "kwargs"],
114
+ ) -> Optional[str]:
115
+ """Extract value by following a path in the dict."""
116
+ if serialized is None and select_from == "serialized":
117
+ return None
118
+
119
+ current_obj = kwargs if select_from == "kwargs" else serialized
120
+
121
+ for key in keys:
122
+ if current_obj and isinstance(current_obj, dict):
123
+ current_obj = current_obj.get(key)
124
+ else:
125
+ return None
126
+ if not current_obj:
127
+ return None
128
+
129
+ return str(current_obj) if current_obj else None
@@ -0,0 +1,58 @@
1
+ """Pricing configuration fetcher with optional env override.
2
+
3
+ Pricing should be treated as configuration, not source code: vendors update
4
+ prices and model versions frequently. The SDK therefore supports:
5
+ - defaults (stub)
6
+ - env override: AGENT_DASHBOARD_PRICING_JSON
7
+ - direct override via start_tracing(pricing_override=...)
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ import os
14
+ from typing import Dict, Literal, Tuple
15
+
16
+ from traccia.processors.cost_engine import DEFAULT_PRICING
17
+
18
+
19
+ def fetch_remote_pricing() -> Dict[str, Dict[str, float]]:
20
+ """
21
+ Placeholder for remote pricing sync.
22
+ In production this would fetch from backend service; here we return defaults.
23
+ """
24
+ return DEFAULT_PRICING.copy()
25
+
26
+
27
+ PricingSource = Literal["default", "env", "override"]
28
+
29
+
30
+ def load_pricing_with_source(
31
+ override: Dict[str, Dict[str, float]] | None = None,
32
+ ) -> Tuple[Dict[str, Dict[str, float]], PricingSource]:
33
+ """
34
+ Return (pricing_table, source_of_latest_override).
35
+ """
36
+ pricing = fetch_remote_pricing()
37
+ source: PricingSource = "default"
38
+
39
+ env_override = os.getenv("AGENT_DASHBOARD_PRICING_JSON")
40
+ if env_override:
41
+ try:
42
+ env_pricing = json.loads(env_override)
43
+ if isinstance(env_pricing, dict):
44
+ pricing.update(env_pricing)
45
+ source = "env"
46
+ except Exception:
47
+ pass
48
+ if override:
49
+ pricing.update(override)
50
+ source = "override"
51
+ return pricing, source
52
+
53
+
54
+ def load_pricing(override: Dict[str, Dict[str, float]] | None = None) -> Dict[str, Dict[str, float]]:
55
+ """Backward-compatible helper returning only the pricing table."""
56
+ pricing, _ = load_pricing_with_source(override)
57
+ return pricing
58
+
@@ -0,0 +1,35 @@
1
+ """Span processors and supporting utilities."""
2
+
3
+ from traccia.processors.batch_processor import BatchSpanProcessor
4
+ from traccia.processors.drop_policy import (
5
+ DEFAULT_DROP_POLICY,
6
+ DropNewestPolicy,
7
+ DropOldestPolicy,
8
+ DropPolicy,
9
+ )
10
+ from traccia.processors.sampler import Sampler, SamplingResult
11
+ from traccia.processors.token_counter import TokenCountingProcessor, estimate_tokens_from_text
12
+ from traccia.processors.cost_engine import compute_cost, DEFAULT_PRICING
13
+ from traccia.processors.cost_processor import CostAnnotatingProcessor
14
+ from traccia.processors.logging_processor import LoggingSpanProcessor
15
+ from traccia.processors.agent_enricher import AgentEnrichmentProcessor
16
+ from traccia.processors.rate_limiter import RateLimiter, RateLimitingSpanProcessor
17
+
18
+ __all__ = [
19
+ "BatchSpanProcessor",
20
+ "DropPolicy",
21
+ "DropOldestPolicy",
22
+ "DropNewestPolicy",
23
+ "DEFAULT_DROP_POLICY",
24
+ "Sampler",
25
+ "SamplingResult",
26
+ "TokenCountingProcessor",
27
+ "estimate_tokens_from_text",
28
+ "compute_cost",
29
+ "DEFAULT_PRICING",
30
+ "CostAnnotatingProcessor",
31
+ "LoggingSpanProcessor",
32
+ "AgentEnrichmentProcessor",
33
+ "RateLimiter",
34
+ "RateLimitingSpanProcessor",
35
+ ]