traccia 0.1.2__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. traccia/__init__.py +73 -0
  2. traccia/auto.py +736 -0
  3. traccia/auto_instrumentation.py +74 -0
  4. traccia/cli.py +349 -0
  5. traccia/config.py +693 -0
  6. traccia/context/__init__.py +33 -0
  7. traccia/context/context.py +67 -0
  8. traccia/context/propagators.py +283 -0
  9. traccia/errors.py +48 -0
  10. traccia/exporter/__init__.py +8 -0
  11. traccia/exporter/console_exporter.py +31 -0
  12. traccia/exporter/file_exporter.py +178 -0
  13. traccia/exporter/http_exporter.py +214 -0
  14. traccia/exporter/otlp_exporter.py +190 -0
  15. traccia/instrumentation/__init__.py +20 -0
  16. traccia/instrumentation/anthropic.py +92 -0
  17. traccia/instrumentation/decorator.py +263 -0
  18. traccia/instrumentation/fastapi.py +38 -0
  19. traccia/instrumentation/http_client.py +21 -0
  20. traccia/instrumentation/http_server.py +25 -0
  21. traccia/instrumentation/openai.py +178 -0
  22. traccia/instrumentation/requests.py +68 -0
  23. traccia/integrations/__init__.py +22 -0
  24. traccia/integrations/langchain/__init__.py +14 -0
  25. traccia/integrations/langchain/callback.py +418 -0
  26. traccia/integrations/langchain/utils.py +129 -0
  27. traccia/pricing_config.py +58 -0
  28. traccia/processors/__init__.py +35 -0
  29. traccia/processors/agent_enricher.py +159 -0
  30. traccia/processors/batch_processor.py +140 -0
  31. traccia/processors/cost_engine.py +71 -0
  32. traccia/processors/cost_processor.py +70 -0
  33. traccia/processors/drop_policy.py +44 -0
  34. traccia/processors/logging_processor.py +31 -0
  35. traccia/processors/rate_limiter.py +223 -0
  36. traccia/processors/sampler.py +22 -0
  37. traccia/processors/token_counter.py +216 -0
  38. traccia/runtime_config.py +106 -0
  39. traccia/tracer/__init__.py +15 -0
  40. traccia/tracer/otel_adapter.py +577 -0
  41. traccia/tracer/otel_utils.py +24 -0
  42. traccia/tracer/provider.py +155 -0
  43. traccia/tracer/span.py +286 -0
  44. traccia/tracer/span_context.py +16 -0
  45. traccia/tracer/tracer.py +243 -0
  46. traccia/utils/__init__.py +19 -0
  47. traccia/utils/helpers.py +95 -0
  48. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/METADATA +32 -15
  49. traccia-0.1.5.dist-info/RECORD +53 -0
  50. traccia-0.1.5.dist-info/top_level.txt +1 -0
  51. traccia-0.1.2.dist-info/RECORD +0 -6
  52. traccia-0.1.2.dist-info/top_level.txt +0 -1
  53. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/WHEEL +0 -0
  54. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/entry_points.txt +0 -0
  55. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/licenses/LICENSE +0 -0
traccia/auto.py ADDED
@@ -0,0 +1,736 @@
1
+ """Initialization helpers for wiring tracer provider, processors, and patches."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import atexit
6
+ import inspect
7
+ import os
8
+ import sys
9
+ from pathlib import Path
10
+ from typing import Optional, Any
11
+
12
+ from traccia.exporter import HttpExporter, ConsoleExporter, FileExporter, OTLPExporter
13
+ from traccia.exporter.http_exporter import DEFAULT_ENDPOINT
14
+ from traccia.instrumentation import patch_anthropic, patch_openai, patch_requests
15
+ from traccia.processors import (
16
+ BatchSpanProcessor,
17
+ Sampler,
18
+ TokenCountingProcessor,
19
+ CostAnnotatingProcessor,
20
+ LoggingSpanProcessor,
21
+ AgentEnrichmentProcessor,
22
+ )
23
+ from traccia import pricing_config
24
+ import threading
25
+ import time
26
+ from traccia.tracer.provider import TracerProvider
27
+ from traccia import config as sdk_config
28
+ from traccia import runtime_config
29
+ from traccia import auto_instrumentation
30
+
31
+ _started = False
32
+ _registered_shutdown = False
33
+ _active_processor: Optional[BatchSpanProcessor] = None
34
+ _init_method: Optional[str] = None # Track how SDK was initialized: "init" or "start_tracing"
35
+ _auto_trace_context: Optional[Any] = None # Context for auto-started trace
36
+ _auto_trace_name: str = "root" # Default name for auto-started trace
37
+
38
+
39
+ def init(
40
+ api_key: Optional[str] = None,
41
+ *,
42
+ auto_start_trace: bool = True,
43
+ auto_trace_name: str = "root",
44
+ config_file: Optional[str] = None,
45
+ **kwargs
46
+ ) -> TracerProvider:
47
+ """
48
+ Simplified initialization for Traccia SDK with config file support.
49
+
50
+ Configuration priority (highest to lowest):
51
+ 1. Explicit parameters (kwargs)
52
+ 2. Environment variables
53
+ 3. Config file (./traccia.toml or ~/.traccia/config.toml)
54
+
55
+ Args:
56
+ api_key: Optional API key (required for SaaS, optional for open-source)
57
+ auto_start_trace: If True, automatically start a root trace (default: True)
58
+ auto_trace_name: Name for auto-started trace (default: "root")
59
+ config_file: Optional explicit path to config file
60
+ **kwargs: All parameters from start_tracing() can be passed here
61
+
62
+ Returns:
63
+ TracerProvider instance
64
+
65
+ Example:
66
+ >>> import traccia
67
+ >>> traccia.init(api_key="...")
68
+ >>> # All spans created after this are children of auto-started trace
69
+ """
70
+ global _started, _init_method, _auto_trace_context, _auto_trace_name
71
+
72
+ # Check if already initialized
73
+ if _started:
74
+ if _init_method == "start_tracing":
75
+ import logging
76
+ logger = logging.getLogger(__name__)
77
+ logger.warning(
78
+ "SDK was initialized with start_tracing(). "
79
+ "Calling init() will not re-initialize. "
80
+ "Use stop_tracing() first if you need to re-initialize."
81
+ )
82
+ return _get_provider()
83
+
84
+ # Load config file if exists (lowest priority)
85
+ merged_config = {}
86
+ if config_file or sdk_config.find_config_file():
87
+ file_config = sdk_config.load_config_with_priority(config_file=config_file)
88
+ merged_config.update(file_config)
89
+
90
+ # Override with explicit parameters (highest priority)
91
+ if api_key is not None:
92
+ merged_config['api_key'] = api_key
93
+ for key, value in kwargs.items():
94
+ if value is not None:
95
+ merged_config[key] = value
96
+
97
+ # Handle auto_start_trace and auto_trace_name - these are init() specific, not start_tracing()
98
+ # Get auto_start_trace from merged config or use default
99
+ final_auto_start = merged_config.pop('auto_start_trace', auto_start_trace)
100
+ if isinstance(final_auto_start, str):
101
+ # Convert string to bool if needed
102
+ final_auto_start = final_auto_start.lower() in ('true', '1', 'yes')
103
+
104
+ # Store auto-trace config before calling start_tracing
105
+ _auto_trace_name = merged_config.pop('auto_trace_name', auto_trace_name)
106
+
107
+ # Map config file keys to start_tracing() parameter names
108
+ # Config file uses shorter names, start_tracing() uses full names
109
+ key_mapping = {
110
+ 'enable_console': 'enable_console_exporter',
111
+ 'enable_file': 'enable_file_exporter',
112
+ }
113
+ for old_key, new_key in key_mapping.items():
114
+ if old_key in merged_config:
115
+ merged_config[new_key] = merged_config.pop(old_key)
116
+
117
+ # Extract rate limiting config to pass separately to start_tracing
118
+ rate_limit_config = {
119
+ 'max_spans_per_second': merged_config.pop('max_spans_per_second', None),
120
+ 'max_block_ms': merged_config.pop('max_block_ms', 100),
121
+ }
122
+
123
+ # Add rate limiting config back into merged_config for start_tracing
124
+ merged_config.update(rate_limit_config)
125
+
126
+ # Initialize via start_tracing with full config
127
+ provider = start_tracing(**merged_config)
128
+ _init_method = "init"
129
+
130
+ # Auto-start trace if requested
131
+ if final_auto_start:
132
+ _auto_trace_context = _start_auto_trace(provider, _auto_trace_name)
133
+ if not _registered_shutdown:
134
+ atexit.register(_cleanup_auto_trace)
135
+
136
+ return provider
137
+
138
+
139
+ def _start_auto_trace(provider: TracerProvider, name: str = "root") -> Any:
140
+ """
141
+ Start an auto-managed root trace.
142
+
143
+ Args:
144
+ provider: TracerProvider instance
145
+ name: Name for the root trace span
146
+
147
+ Returns:
148
+ Span context for cleanup
149
+ """
150
+ import logging
151
+ logger = logging.getLogger(__name__)
152
+
153
+ try:
154
+ tracer = provider.get_tracer("traccia.auto")
155
+
156
+ # Create root span and make it current
157
+ span = tracer.start_span(
158
+ name=name,
159
+ attributes={"traccia.auto_started": True}
160
+ )
161
+
162
+ # Make this span the current span in the context
163
+ from opentelemetry import context
164
+ from opentelemetry.trace import set_span_in_context
165
+
166
+ token = context.attach(set_span_in_context(span))
167
+
168
+ logger.debug(f"Auto-started trace '{name}' created")
169
+
170
+ return {"span": span, "token": token}
171
+
172
+ except Exception as e:
173
+ logger.error(f"Failed to start auto-trace: {e}")
174
+ return None
175
+
176
+
177
+ def _cleanup_auto_trace() -> None:
178
+ """Cleanup auto-started trace on program exit."""
179
+ global _auto_trace_context
180
+
181
+ if _auto_trace_context and _auto_trace_context.get("span"):
182
+ import logging
183
+ logger = logging.getLogger(__name__)
184
+
185
+ try:
186
+ span = _auto_trace_context["span"]
187
+ if hasattr(span, "is_recording") and span.is_recording():
188
+ span.end()
189
+ logger.debug("Auto-started trace ended")
190
+
191
+ # Detach context
192
+ if _auto_trace_context.get("token"):
193
+ from opentelemetry import context
194
+ context.detach(_auto_trace_context["token"])
195
+
196
+ except Exception as e:
197
+ logger.error(f"Error cleaning up auto-trace: {e}")
198
+
199
+ finally:
200
+ _auto_trace_context = None
201
+
202
+
203
+ def end_auto_trace() -> None:
204
+ """
205
+ Explicitly end the auto-started trace.
206
+
207
+ This allows users to end the auto-trace and create their own root traces.
208
+ """
209
+ global _auto_trace_context
210
+
211
+ if _auto_trace_context:
212
+ _cleanup_auto_trace()
213
+
214
+
215
+ class trace:
216
+ """
217
+ Context manager for explicit trace management.
218
+
219
+ Ends auto-trace if active and starts a new explicit trace.
220
+
221
+ Example:
222
+ >>> import traccia
223
+ >>> traccia.init()
224
+ >>> with traccia.trace("custom-trace"):
225
+ ... # Your code here
226
+ ... pass
227
+ """
228
+
229
+ def __init__(self, name: str = "trace", **kwargs):
230
+ """
231
+ Initialize trace context manager.
232
+
233
+ Args:
234
+ name: Name for the trace span
235
+ **kwargs: Additional span attributes
236
+ """
237
+ self.name = name
238
+ self.kwargs = kwargs
239
+ self.span = None
240
+ self.token = None
241
+
242
+ def __enter__(self):
243
+ """Start the explicit trace."""
244
+ import logging
245
+ logger = logging.getLogger(__name__)
246
+
247
+ # End auto-trace if active
248
+ if _auto_trace_context:
249
+ logger.debug("Ending auto-trace to start explicit trace")
250
+ end_auto_trace()
251
+
252
+ # Start new explicit trace
253
+ try:
254
+ provider = _get_provider()
255
+ tracer = provider.get_tracer("traccia.explicit")
256
+
257
+ self.span = tracer.start_span(
258
+ name=self.name,
259
+ attributes=self.kwargs
260
+ )
261
+
262
+ # Make this span the current span
263
+ from opentelemetry import context
264
+ from opentelemetry.trace import set_span_in_context
265
+
266
+ self.token = context.attach(set_span_in_context(self.span._otel_span))
267
+
268
+ return self.span
269
+
270
+ except Exception as e:
271
+ logger.error(f"Failed to start explicit trace: {e}")
272
+ return None
273
+
274
+ def __exit__(self, exc_type, exc_val, exc_tb):
275
+ """End the explicit trace."""
276
+ if self.span:
277
+ try:
278
+ if exc_type:
279
+ # Record exception if one occurred
280
+ self.span.record_exception(exc_val)
281
+ from traccia.tracer.span import SpanStatus
282
+ self.span.set_status(SpanStatus.ERROR, str(exc_val))
283
+
284
+ self.span.end()
285
+ except Exception:
286
+ pass
287
+
288
+ if self.token:
289
+ try:
290
+ from opentelemetry import context
291
+ context.detach(self.token)
292
+ except Exception:
293
+ pass
294
+
295
+ return False # Don't suppress exceptions
296
+
297
+
298
+ def start_tracing(
299
+ *,
300
+ api_key: Optional[str] = None,
301
+ endpoint: Optional[str] = None,
302
+ sample_rate: float = 1.0,
303
+ max_queue_size: int = 5000,
304
+ max_export_batch_size: int = 512,
305
+ schedule_delay_millis: int = 5000,
306
+ exporter: Optional[Any] = None,
307
+ use_otlp: bool = True, # Use OTLP exporter by default
308
+ transport=None,
309
+ enable_patching: bool = True,
310
+ enable_token_counting: bool = True,
311
+ enable_costs: bool = True,
312
+ pricing_override=None,
313
+ pricing_refresh_seconds: Optional[int] = None,
314
+ enable_console_exporter: bool = False,
315
+ enable_file_exporter: bool = False,
316
+ file_exporter_path: str = "traces.jsonl",
317
+ reset_trace_file: bool = False,
318
+ load_env: bool = True,
319
+ enable_span_logging: bool = False,
320
+ auto_instrument_tools: bool = False,
321
+ tool_include: Optional[list] = None,
322
+ max_tool_spans: int = 100,
323
+ max_span_depth: int = 10,
324
+ session_id: Optional[str] = None,
325
+ user_id: Optional[str] = None,
326
+ tenant_id: Optional[str] = None,
327
+ project_id: Optional[str] = None,
328
+ agent_id: Optional[str] = None,
329
+ debug: bool = False,
330
+ attr_truncation_limit: Optional[int] = None,
331
+ service_name: Optional[str] = None,
332
+ max_spans_per_second: Optional[float] = None, # Rate limiting
333
+ max_block_ms: int = 100, # Rate limiting block time
334
+ ) -> TracerProvider:
335
+ """
336
+ Initialize global tracing:
337
+ - Builds HttpExporter (or uses provided one)
338
+ - Attaches BatchSpanProcessor with sampling and bounded queue
339
+ - Registers monkey patches (OpenAI, Anthropic, requests)
340
+ - Registers atexit shutdown hook
341
+ """
342
+ global _started, _active_processor, _init_method
343
+ if _started:
344
+ if _init_method == "init":
345
+ import logging
346
+ logger = logging.getLogger(__name__)
347
+ logger.warning(
348
+ "SDK was initialized with init(). "
349
+ "Calling start_tracing() will not re-initialize. "
350
+ "Use stop_tracing() first if you need to re-initialize."
351
+ )
352
+ return _get_provider()
353
+
354
+ if load_env:
355
+ sdk_config.load_dotenv()
356
+
357
+ # Load config from environment (backward compatible)
358
+ env_cfg = sdk_config.load_config_from_env()
359
+
360
+ # Apply any explicit overrides
361
+ if api_key:
362
+ env_cfg['api_key'] = api_key
363
+ if endpoint:
364
+ env_cfg['endpoint'] = endpoint
365
+
366
+ # Resolve agent configuration path automatically if not provided by env.
367
+ agent_cfg_path = _resolve_agent_config_path()
368
+ if agent_cfg_path:
369
+ os.environ.setdefault("AGENT_DASHBOARD_AGENT_CONFIG", agent_cfg_path)
370
+
371
+ provider = _get_provider()
372
+ key = env_cfg.get("api_key") or api_key
373
+ endpoint = env_cfg.get("endpoint") or endpoint
374
+ try:
375
+ sample_rate = float(env_cfg.get("sample_rate", sample_rate))
376
+ except Exception:
377
+ sample_rate = sample_rate
378
+
379
+ # Set runtime config for auto-instrumentation
380
+ runtime_config.set_auto_instrument_tools(auto_instrument_tools)
381
+ runtime_config.set_tool_include(tool_include or [])
382
+ runtime_config.set_max_tool_spans(max_tool_spans)
383
+ runtime_config.set_max_span_depth(max_span_depth)
384
+ runtime_config.set_session_id(session_id)
385
+ runtime_config.set_user_id(user_id)
386
+ runtime_config.set_tenant_id(_resolve_tenant_id(tenant_id))
387
+ runtime_config.set_project_id(_resolve_project_id(project_id))
388
+ runtime_config.set_agent_id(agent_id)
389
+ runtime_config.set_debug(_resolve_debug(debug))
390
+ runtime_config.set_attr_truncation_limit(attr_truncation_limit)
391
+
392
+ # Build resource attributes from runtime config
393
+ # This ensures tenant.id, project.id, etc. are included in OTLP exports
394
+ resource_attrs = {}
395
+
396
+ # Set service.name - required for proper service identification in Tempo/Grafana
397
+ # This prevents "unknown_service" from appearing
398
+ from opentelemetry.semconv.resource import ResourceAttributes
399
+ service_name_value = _resolve_service_name(service_name)
400
+ resource_attrs[ResourceAttributes.SERVICE_NAME] = service_name_value
401
+
402
+ if runtime_config.get_tenant_id():
403
+ resource_attrs["tenant.id"] = runtime_config.get_tenant_id()
404
+ if runtime_config.get_project_id():
405
+ resource_attrs["project.id"] = runtime_config.get_project_id()
406
+ if runtime_config.get_session_id():
407
+ resource_attrs["session.id"] = runtime_config.get_session_id()
408
+ if runtime_config.get_user_id():
409
+ resource_attrs["user.id"] = runtime_config.get_user_id()
410
+ if runtime_config.get_agent_id():
411
+ resource_attrs["agent.id"] = runtime_config.get_agent_id()
412
+ if runtime_config.get_debug():
413
+ resource_attrs["trace.debug"] = True
414
+
415
+ # Update provider resource dict (for HttpExporter compatibility)
416
+ if resource_attrs:
417
+ provider.resource.update(resource_attrs)
418
+
419
+ # For OTLP, we need to recreate the provider with updated resource
420
+ # since OTel Resource is immutable
421
+ if resource_attrs and use_otlp:
422
+ from opentelemetry.sdk.resources import Resource as OTelResource
423
+ from opentelemetry.sdk.trace import TracerProvider as OTelTracerProvider
424
+ # Merge with existing resource attributes
425
+ existing_resource = provider._otel_provider.resource
426
+ existing_attrs = dict(existing_resource.attributes) if existing_resource.attributes else {}
427
+ existing_attrs.update(resource_attrs)
428
+ # Create new resource with merged attributes
429
+ new_resource = OTelResource.create(existing_attrs)
430
+ # Recreate OTel provider with updated resource
431
+ provider._otel_provider = OTelTracerProvider(resource=new_resource)
432
+ # Re-add any existing export processors to the new provider
433
+ for proc in provider._export_processors:
434
+ provider._otel_provider.add_span_processor(proc)
435
+
436
+ # Use OTLP exporter by default, fall back to HttpExporter if use_otlp=False
437
+ if exporter:
438
+ network_exporter = exporter
439
+ elif use_otlp:
440
+ # Use OTLP exporter (OpenTelemetry standard)
441
+ network_exporter = OTLPExporter(
442
+ endpoint=endpoint or DEFAULT_ENDPOINT,
443
+ api_key=key,
444
+ )
445
+ else:
446
+ # Legacy HttpExporter for backward compatibility
447
+ network_exporter = HttpExporter(
448
+ endpoint=endpoint or DEFAULT_ENDPOINT,
449
+ api_key=key,
450
+ transport=transport,
451
+ )
452
+
453
+ if enable_console_exporter:
454
+ network_exporter = _combine_exporters(network_exporter, ConsoleExporter())
455
+
456
+ if enable_file_exporter:
457
+ # If reset_trace_file is True, clear the file when start_tracing is called
458
+ if reset_trace_file:
459
+ try:
460
+ with open(file_exporter_path, "w", encoding="utf-8") as f:
461
+ pass # Truncate file to empty
462
+ except Exception:
463
+ pass # Silently fail if file cannot be cleared
464
+ network_exporter = _combine_exporters(
465
+ network_exporter,
466
+ FileExporter(file_path=file_exporter_path, reset_on_start=False)
467
+ )
468
+
469
+ sampler = Sampler(sample_rate)
470
+ # Use the sampler at trace start (head sampling) and also to make the
471
+ # batch processor respect trace_flags.
472
+ try:
473
+ provider.set_sampler(sampler)
474
+ except Exception:
475
+ pass
476
+
477
+ # Ordering matters: enrich spans before batching/export.
478
+ if enable_token_counting:
479
+ provider.add_span_processor(TokenCountingProcessor())
480
+ cost_processor = None
481
+ if enable_costs:
482
+ pricing_table, pricing_source = pricing_config.load_pricing_with_source(pricing_override)
483
+ cost_processor = CostAnnotatingProcessor(
484
+ pricing_table=pricing_table, pricing_source=pricing_source
485
+ )
486
+ provider.add_span_processor(cost_processor)
487
+ if enable_span_logging:
488
+ provider.add_span_processor(LoggingSpanProcessor())
489
+ # Agent enrichment should run after cost/token processors so it can fill any gaps.
490
+ provider.add_span_processor(
491
+ AgentEnrichmentProcessor(agent_config_path=os.getenv("AGENT_DASHBOARD_AGENT_CONFIG"))
492
+ )
493
+
494
+ # For OTLP exporter, use OTel's BatchSpanProcessor directly
495
+ # For HttpExporter, use our custom BatchSpanProcessor
496
+ if use_otlp and isinstance(network_exporter, OTLPExporter) and hasattr(network_exporter, '_otel_exporter'):
497
+ # Use OTel's BatchSpanProcessor for OTLP export
498
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor as OTelBatchSpanProcessor
499
+ otel_processor = OTelBatchSpanProcessor(
500
+ network_exporter._otel_exporter,
501
+ max_queue_size=max_queue_size,
502
+ max_export_batch_size=max_export_batch_size,
503
+ schedule_delay_millis=schedule_delay_millis,
504
+ )
505
+
506
+ # Wrap with rate limiting if configured
507
+ if max_spans_per_second is not None and max_spans_per_second > 0:
508
+ from traccia.processors.rate_limiter import RateLimitingSpanProcessor
509
+ rate_limited_processor = RateLimitingSpanProcessor(
510
+ next_processor=otel_processor,
511
+ max_spans_per_second=max_spans_per_second,
512
+ max_block_ms=max_block_ms,
513
+ )
514
+ provider._otel_provider.add_span_processor(rate_limited_processor)
515
+ else:
516
+ provider._otel_provider.add_span_processor(otel_processor)
517
+ _active_processor = None # OTel handles this
518
+ else:
519
+ # Use our custom BatchSpanProcessor for HttpExporter
520
+ processor = BatchSpanProcessor(
521
+ exporter=network_exporter,
522
+ sampler=sampler,
523
+ max_queue_size=max_queue_size,
524
+ max_export_batch_size=max_export_batch_size,
525
+ schedule_delay_millis=schedule_delay_millis,
526
+ )
527
+
528
+ # Wrap with rate limiting if configured
529
+ if max_spans_per_second is not None and max_spans_per_second > 0:
530
+ from traccia.processors.rate_limiter import RateLimitingSpanProcessor
531
+ rate_limited_processor = RateLimitingSpanProcessor(
532
+ next_processor=processor,
533
+ max_spans_per_second=max_spans_per_second,
534
+ max_block_ms=max_block_ms,
535
+ )
536
+ provider.add_span_processor(rate_limited_processor)
537
+ _active_processor = rate_limited_processor
538
+ else:
539
+ provider.add_span_processor(processor)
540
+ _active_processor = processor
541
+
542
+ if _active_processor:
543
+ _register_shutdown(provider, _active_processor)
544
+ _start_pricing_refresh(cost_processor, pricing_override, pricing_refresh_seconds)
545
+
546
+ # Auto-instrument in-repo functions/tools if enabled
547
+ if auto_instrument_tools and tool_include:
548
+ try:
549
+ auto_instrumentation.instrument_functions(tool_include or [])
550
+ except Exception:
551
+ pass
552
+
553
+ if enable_patching:
554
+ try:
555
+ patch_openai()
556
+ except Exception:
557
+ pass
558
+ try:
559
+ patch_anthropic()
560
+ except Exception:
561
+ pass
562
+ try:
563
+ patch_requests()
564
+ except Exception:
565
+ pass
566
+
567
+ _started = True
568
+ if _init_method is None:
569
+ _init_method = "start_tracing"
570
+ return provider
571
+
572
+
573
+ def stop_tracing(flush_timeout: Optional[float] = None) -> None:
574
+ """Force flush and shutdown registered processors and provider."""
575
+ global _started, _init_method, _auto_trace_context
576
+
577
+ # End auto-trace if active
578
+ if _auto_trace_context:
579
+ _cleanup_auto_trace()
580
+
581
+ _stop_pricing_refresh()
582
+ provider = _get_provider()
583
+ if _active_processor:
584
+ try:
585
+ _active_processor.force_flush(timeout=flush_timeout)
586
+ finally:
587
+ _active_processor.shutdown()
588
+ provider.shutdown()
589
+ _started = False
590
+ _init_method = None
591
+
592
+
593
+ def _register_shutdown(provider: TracerProvider, processor: Optional[BatchSpanProcessor]) -> None:
594
+ global _registered_shutdown
595
+ if _registered_shutdown:
596
+ return
597
+
598
+ def _cleanup():
599
+ try:
600
+ if processor:
601
+ processor.force_flush()
602
+ processor.shutdown()
603
+ finally:
604
+ provider.shutdown()
605
+
606
+ atexit.register(_cleanup)
607
+ _registered_shutdown = True
608
+
609
+
610
+ def _resolve_service_name(service_name: Optional[str]) -> str:
611
+ """Resolve service.name using override, env, or inferred entrypoint."""
612
+ if service_name:
613
+ return service_name
614
+ env_name = os.getenv("OTEL_SERVICE_NAME") or os.getenv("SERVICE_NAME")
615
+ if env_name:
616
+ return env_name
617
+ # Use current working directory name
618
+ cwd_name = Path.cwd().name
619
+ if cwd_name:
620
+ return cwd_name
621
+ # Infer from entry script if available (e.g., "app.py" -> "app")
622
+ argv0 = sys.argv[0] if sys.argv else ""
623
+ if argv0 and argv0 not in ("-c", "-m"):
624
+ script_name = Path(argv0).name
625
+ if script_name:
626
+ return Path(script_name).stem or script_name
627
+ return "traccia_app"
628
+
629
+
630
+ def _get_provider() -> TracerProvider:
631
+ import traccia
632
+
633
+ return traccia.get_tracer_provider()
634
+
635
+
636
+ def _resolve_agent_config_path() -> Optional[str]:
637
+ """
638
+ Locate agent_config.json for users automatically:
639
+ 1) Respect AGENT_DASHBOARD_AGENT_CONFIG if set and file exists
640
+ 2) Use ./agent_config.json from current working directory if present
641
+ 3) Try to find agent_config.json adjacent to the first non-sdk caller
642
+ """
643
+ env_path = os.getenv("AGENT_DASHBOARD_AGENT_CONFIG")
644
+ if env_path:
645
+ path = Path(env_path)
646
+ if path.exists():
647
+ return str(path.resolve())
648
+
649
+ cwd_path = Path.cwd() / "agent_config.json"
650
+ if cwd_path.exists():
651
+ return str(cwd_path.resolve())
652
+
653
+ try:
654
+ for frame in inspect.stack():
655
+ frame_path = Path(frame.filename)
656
+ # Skip SDK internal files
657
+ if "traccia" in frame_path.parts:
658
+ continue
659
+ candidate = frame_path.parent / "agent_config.json"
660
+ if candidate.exists():
661
+ return str(candidate.resolve())
662
+ except Exception:
663
+ return None
664
+ return None
665
+
666
+
667
+ def _resolve_debug(cli_value: bool) -> bool:
668
+ raw = os.getenv("AGENT_DASHBOARD_DEBUG")
669
+ if raw is None:
670
+ return bool(cli_value)
671
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
672
+
673
+
674
+ def _resolve_tenant_id(cli_value: Optional[str]) -> str:
675
+ return (
676
+ cli_value
677
+ or os.getenv("AGENT_DASHBOARD_TENANT_ID")
678
+ or "study-agent-sf23jj56c34234"
679
+ )
680
+
681
+
682
+ def _resolve_project_id(cli_value: Optional[str]) -> str:
683
+ return cli_value or os.getenv("AGENT_DASHBOARD_PROJECT_ID") or "gmail"
684
+
685
+
686
+ def _combine_exporters(primary, secondary):
687
+ if primary is None:
688
+ return secondary
689
+ if secondary is None:
690
+ return primary
691
+
692
+ class _Multi:
693
+ def export(self, spans):
694
+ ok1 = primary.export(spans)
695
+ ok2 = secondary.export(spans)
696
+ return ok1 and ok2
697
+
698
+ def shutdown(self):
699
+ for exp in (primary, secondary):
700
+ if hasattr(exp, "shutdown"):
701
+ exp.shutdown()
702
+
703
+ return _Multi()
704
+
705
+
706
+ _pricing_refresh_stop: Optional[threading.Event] = None
707
+ _pricing_refresh_thread: Optional[threading.Thread] = None
708
+
709
+
710
+ def _start_pricing_refresh(cost_processor: Optional[CostAnnotatingProcessor], override, interval: Optional[int]) -> None:
711
+ global _pricing_refresh_stop, _pricing_refresh_thread
712
+ if not cost_processor or not interval or interval <= 0:
713
+ return
714
+ _pricing_refresh_stop = threading.Event()
715
+
716
+ def _loop():
717
+ while not _pricing_refresh_stop.is_set():
718
+ time.sleep(interval)
719
+ if _pricing_refresh_stop.is_set():
720
+ break
721
+ try:
722
+ table, source = pricing_config.load_pricing_with_source(override)
723
+ cost_processor.update_pricing_table(table, pricing_source=source)
724
+ except Exception:
725
+ continue
726
+
727
+ _pricing_refresh_thread = threading.Thread(target=_loop, daemon=True)
728
+ _pricing_refresh_thread.start()
729
+
730
+
731
+ def _stop_pricing_refresh() -> None:
732
+ if _pricing_refresh_stop:
733
+ _pricing_refresh_stop.set()
734
+ if _pricing_refresh_thread:
735
+ _pricing_refresh_thread.join(timeout=1)
736
+