traccia 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,674 @@
1
+ Metadata-Version: 2.4
2
+ Name: traccia
3
+ Version: 0.1.0
4
+ Summary: Production-ready distributed tracing SDK for AI agents and LLM applications
5
+ License: Apache-2.0
6
+ Project-URL: Homepage, https://github.com/traccia-ai/traccia
7
+ Project-URL: Documentation, https://github.com/traccia-ai/traccia#readme
8
+ Project-URL: Repository, https://github.com/traccia-ai/traccia
9
+ Project-URL: Issues, https://github.com/traccia-ai/traccia/issues
10
+ Project-URL: Bug Tracker, https://github.com/traccia-ai/traccia/issues
11
+ Keywords: tracing,observability,opentelemetry,ai-agents,llm,distributed-tracing,monitoring,telemetry
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: Apache Software License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Classifier: Topic :: System :: Monitoring
23
+ Classifier: Topic :: Software Development :: Testing
24
+ Requires-Python: >=3.9
25
+ Description-Content-Type: text/markdown
26
+ License-File: LICENSE
27
+ Requires-Dist: tiktoken>=0.7.0
28
+ Requires-Dist: opentelemetry-api>=1.20.0
29
+ Requires-Dist: opentelemetry-sdk>=1.20.0
30
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.20.0
31
+ Requires-Dist: opentelemetry-semantic-conventions>=0.40b0
32
+ Requires-Dist: tomli>=2.0.0; python_version < "3.11"
33
+ Requires-Dist: toml>=0.10.0
34
+ Requires-Dist: pydantic>=2.0.0
35
+ Dynamic: license-file
36
+
37
+ # Traccia
38
+
39
+ **Production-ready distributed tracing for AI agents and LLM applications**
40
+
41
+ Traccia is a lightweight, high-performance Python SDK for observability and tracing of AI agents, LLM applications, and complex distributed systems. Built on OpenTelemetry standards with specialized instrumentation for AI workloads.
42
+
43
+ ## ✨ Features
44
+
45
+ - **🔍 Automatic Instrumentation**: Auto-patch OpenAI, Anthropic, requests, and HTTP libraries
46
+ - **📊 LLM-Aware Tracing**: Track tokens, costs, prompts, and completions automatically
47
+ - **⚡ Zero-Config Start**: Simple `init()` call with automatic config discovery
48
+ - **🎯 Decorator-Based**: Trace any function with `@observe` decorator
49
+ - **🔧 Multiple Exporters**: OTLP (compatible with Grafana Tempo, Jaeger, Zipkin), Console, or File
50
+ - **🛡️ Production-Ready**: Rate limiting, error handling, config validation, robust flushing
51
+ - **📝 Type-Safe**: Full Pydantic validation for configuration
52
+ - **🚀 High Performance**: Efficient batching, async support, minimal overhead
53
+ - **🔐 Secure**: No secrets in logs, configurable data truncation
54
+
55
+ ---
56
+
57
+ ## 🚀 Quick Start
58
+
59
+ ### Installation
60
+
61
+ ```bash
62
+ pip install traccia
63
+ ```
64
+
65
+ ### Basic Usage
66
+
67
+ ```python
68
+ from traccia import init, observe
69
+
70
+ # Initialize (auto-loads from traccia.toml if present)
71
+ init()
72
+
73
+ # Trace any function
74
+ @observe()
75
+ def my_function(x, y):
76
+ return x + y
77
+
78
+ # That's it! Traces are automatically created and exported
79
+ result = my_function(2, 3)
80
+ ```
81
+
82
+ ### With LLM Calls
83
+
84
+ ```python
85
+ from traccia import init, observe
86
+ from openai import OpenAI
87
+
88
+ init() # Auto-patches OpenAI
89
+
90
+ client = OpenAI()
91
+
92
+ @observe(as_type="llm")
93
+ def generate_text(prompt: str) -> str:
94
+ response = client.chat.completions.create(
95
+ model="gpt-4",
96
+ messages=[{"role": "user", "content": prompt}]
97
+ )
98
+ return response.choices[0].message.content
99
+
100
+ # Automatically tracks: model, tokens, cost, prompt, completion, latency
101
+ text = generate_text("Write a haiku about Python")
102
+ ```
103
+
104
+ ---
105
+
106
+ ## 📖 Configuration
107
+
108
+ ### Configuration File
109
+
110
+ Create a `traccia.toml` file in your project root:
111
+
112
+ ```bash
113
+ traccia config init
114
+ ```
115
+
116
+ This creates a template config file:
117
+
118
+ ```toml
119
+ [tracing]
120
+ # API key (optional - for future Traccia UI, not needed for OTLP backends)
121
+ api_key = ""
122
+
123
+ # Endpoint URL for OTLP trace ingestion
124
+ # Works with Grafana Tempo, Jaeger, Zipkin, and other OTLP-compatible backends
125
+ endpoint = "http://localhost:4318/v1/traces"
126
+
127
+ sample_rate = 1.0 # 0.0 to 1.0
128
+ auto_start_trace = true # Auto-start root trace on init
129
+ auto_trace_name = "root" # Name for auto-started trace
130
+ use_otlp = true # Use OTLP exporter
131
+ # service_name = "my-app" # Optional service name
132
+
133
+ [exporters]
134
+ # Only enable ONE exporter at a time
135
+ enable_console = false # Print traces to console
136
+ enable_file = false # Write traces to file
137
+ file_exporter_path = "traces.jsonl"
138
+ reset_trace_file = false # Reset file on initialization
139
+
140
+ [instrumentation]
141
+ enable_patching = true # Auto-patch libraries (OpenAI, Anthropic, requests)
142
+ enable_token_counting = true # Count tokens for LLM calls
143
+ enable_costs = true # Calculate costs
144
+ auto_instrument_tools = false # Auto-instrument tool calls (experimental)
145
+ max_tool_spans = 100 # Max tool spans to create
146
+ max_span_depth = 10 # Max nested span depth
147
+
148
+ [rate_limiting]
149
+ # Optional: limit spans per second
150
+ # max_spans_per_second = 100.0
151
+ max_queue_size = 5000 # Max buffered spans
152
+ max_block_ms = 100 # Max ms to block before dropping
153
+ max_export_batch_size = 512 # Spans per export batch
154
+ schedule_delay_millis = 5000 # Delay between batches
155
+
156
+ [runtime]
157
+ # Optional runtime metadata
158
+ # session_id = ""
159
+ # user_id = ""
160
+ # tenant_id = ""
161
+ # project_id = ""
162
+
163
+ [logging]
164
+ debug = false # Enable debug logging
165
+ enable_span_logging = false # Enable span-level logging
166
+
167
+ [advanced]
168
+ # attr_truncation_limit = 1000 # Max attribute value length
169
+ ```
170
+
171
+ ### OTLP Backend Compatibility
172
+
173
+ Traccia is fully OTLP-compatible and works with:
174
+ - **Grafana Tempo** - `http://tempo:4318/v1/traces`
175
+ - **Jaeger** - `http://jaeger:4318/v1/traces`
176
+ - **Zipkin** - Configure via OTLP endpoint
177
+ - **SigNoz** - Self-hosted observability platform
178
+ - **Traccia Cloud** - Coming soon (will require API key)
179
+
180
+ ### Environment Variables
181
+
182
+ All config parameters can be set via environment variables with the `TRACCIA_` prefix:
183
+
184
+ **Tracing**: `TRACCIA_API_KEY`, `TRACCIA_ENDPOINT`, `TRACCIA_SAMPLE_RATE`, `TRACCIA_AUTO_START_TRACE`, `TRACCIA_AUTO_TRACE_NAME`, `TRACCIA_USE_OTLP`, `TRACCIA_SERVICE_NAME`
185
+
186
+ **Exporters**: `TRACCIA_ENABLE_CONSOLE`, `TRACCIA_ENABLE_FILE`, `TRACCIA_FILE_PATH`, `TRACCIA_RESET_TRACE_FILE`
187
+
188
+ **Instrumentation**: `TRACCIA_ENABLE_PATCHING`, `TRACCIA_ENABLE_TOKEN_COUNTING`, `TRACCIA_ENABLE_COSTS`, `TRACCIA_AUTO_INSTRUMENT_TOOLS`, `TRACCIA_MAX_TOOL_SPANS`, `TRACCIA_MAX_SPAN_DEPTH`
189
+
190
+ **Rate Limiting**: `TRACCIA_MAX_SPANS_PER_SECOND`, `TRACCIA_MAX_QUEUE_SIZE`, `TRACCIA_MAX_BLOCK_MS`, `TRACCIA_MAX_EXPORT_BATCH_SIZE`, `TRACCIA_SCHEDULE_DELAY_MILLIS`
191
+
192
+ **Runtime**: `TRACCIA_SESSION_ID`, `TRACCIA_USER_ID`, `TRACCIA_TENANT_ID`, `TRACCIA_PROJECT_ID`
193
+
194
+ **Logging**: `TRACCIA_DEBUG`, `TRACCIA_ENABLE_SPAN_LOGGING`
195
+
196
+ **Advanced**: `TRACCIA_ATTR_TRUNCATION_LIMIT`
197
+
198
+ **Priority**: Explicit parameters > Environment variables > Config file > Defaults
199
+
200
+ ### Programmatic Configuration
201
+
202
+ ```python
203
+ from traccia import init
204
+
205
+ # Override config programmatically
206
+ init(
207
+ endpoint="http://tempo:4318/v1/traces",
208
+ sample_rate=0.5,
209
+ enable_costs=True,
210
+ max_spans_per_second=100.0
211
+ )
212
+ ```
213
+
214
+ ---
215
+
216
+ ## 🎯 Usage Guide
217
+
218
+ ### The `@observe` Decorator
219
+
220
+ The `@observe` decorator is the primary way to instrument your code:
221
+
222
+ ```python
223
+ from traccia import observe
224
+
225
+ # Basic usage
226
+ @observe()
227
+ def process_data(data):
228
+ return transform(data)
229
+
230
+ # Custom span name
231
+ @observe(name="data_pipeline")
232
+ def process_data(data):
233
+ return transform(data)
234
+
235
+ # Add custom attributes
236
+ @observe(attributes={"version": "2.0", "env": "prod"})
237
+ def process_data(data):
238
+ return transform(data)
239
+
240
+ # Specify span type
241
+ @observe(as_type="llm") # "span", "llm", "tool"
242
+ def call_llm():
243
+ pass
244
+
245
+ # Skip capturing specific arguments
246
+ @observe(skip_args=["password", "secret"])
247
+ def authenticate(username, password):
248
+ pass
249
+
250
+ # Skip capturing result (for large returns)
251
+ @observe(skip_result=True)
252
+ def fetch_large_dataset():
253
+ return huge_data
254
+ ```
255
+
256
+ **Available Parameters**:
257
+ - `name` (str, optional): Custom span name (defaults to function name)
258
+ - `attributes` (dict, optional): Initial span attributes
259
+ - `as_type` (str): Span type - `"span"`, `"llm"`, or `"tool"`
260
+ - `skip_args` (list, optional): List of argument names to skip capturing
261
+ - `skip_result` (bool): Skip capturing the return value
262
+
263
+ ### Async Functions
264
+
265
+ `@observe` works seamlessly with async functions:
266
+
267
+ ```python
268
+ @observe()
269
+ async def async_task(x):
270
+ await asyncio.sleep(1)
271
+ return x * 2
272
+
273
+ result = await async_task(5)
274
+ ```
275
+
276
+ ### Manual Span Creation
277
+
278
+ For more control, create spans manually:
279
+
280
+ ```python
281
+ from traccia import get_tracer, span
282
+
283
+ # Using convenience function
284
+ with span("operation_name") as s:
285
+ s.set_attribute("key", "value")
286
+ s.add_event("checkpoint_reached")
287
+ do_work()
288
+
289
+ # Using tracer directly
290
+ tracer = get_tracer("my_service")
291
+ with tracer.start_as_current_span("operation") as s:
292
+ s.set_attribute("user_id", 123)
293
+ do_work()
294
+ ```
295
+
296
+ ### Error Handling
297
+
298
+ Traccia automatically captures and records errors:
299
+
300
+ ```python
301
+ @observe()
302
+ def failing_function():
303
+ raise ValueError("Something went wrong")
304
+
305
+ # Span will contain:
306
+ # - error.type: "ValueError"
307
+ # - error.message: "Something went wrong"
308
+ # - error.stack_trace: (truncated stack trace)
309
+ # - span status: ERROR
310
+ ```
311
+
312
+ ### Nested Spans
313
+
314
+ Spans are automatically nested based on call hierarchy:
315
+
316
+ ```python
317
+ @observe()
318
+ def parent_operation():
319
+ child_operation()
320
+ return "done"
321
+
322
+ @observe()
323
+ def child_operation():
324
+ grandchild_operation()
325
+
326
+ @observe()
327
+ def grandchild_operation():
328
+ pass
329
+
330
+ # Creates nested span hierarchy:
331
+ # parent_operation
332
+ # └── child_operation
333
+ # └── grandchild_operation
334
+ ```
335
+
336
+ ---
337
+
338
+ ## 🛠️ CLI Tools
339
+
340
+ Traccia includes a powerful CLI for configuration and diagnostics:
341
+
342
+ ### `traccia config init`
343
+
344
+ Create a new `traccia.toml` configuration file:
345
+
346
+ ```bash
347
+ traccia config init
348
+ traccia config init --force # Overwrite existing
349
+ ```
350
+
351
+ ### `traccia doctor`
352
+
353
+ Validate configuration and diagnose issues:
354
+
355
+ ```bash
356
+ traccia doctor
357
+
358
+ # Output:
359
+ # 🩺 Running Traccia configuration diagnostics...
360
+ #
361
+ # ✅ Found config file: ./traccia.toml
362
+ # ✅ Configuration is valid
363
+ #
364
+ # 📊 Configuration summary:
365
+ # • API Key: ❌ Not set (optional)
366
+ # • Endpoint: http://localhost:4318/v1/traces
367
+ # • Sample Rate: 1.0
368
+ # • OTLP Exporter: ✅ Enabled
369
+ ```
370
+
371
+ ### `traccia check`
372
+
373
+ Test connectivity to your exporter endpoint:
374
+
375
+ ```bash
376
+ traccia check
377
+ traccia check --endpoint http://tempo:4318/v1/traces
378
+ ```
379
+
380
+ ---
381
+
382
+ ## 🎨 Advanced Features
383
+
384
+ ### Rate Limiting
385
+
386
+ Protect your infrastructure with built-in rate limiting:
387
+
388
+ ```toml
389
+ [rate_limiting]
390
+ max_spans_per_second = 100.0 # Limit to 100 spans/sec
391
+ max_queue_size = 5000 # Max buffered spans
392
+ max_block_ms = 100 # Block up to 100ms before dropping
393
+ ```
394
+
395
+ **Behavior**:
396
+ 1. Try to acquire capacity immediately
397
+ 2. If unavailable, block for up to `max_block_ms`
398
+ 3. If still unavailable, drop span and log warning
399
+
400
+ When spans are dropped due to rate limiting, warnings are logged to help you monitor and adjust limits.
401
+
402
+ ### Sampling
403
+
404
+ Control trace volume with sampling:
405
+
406
+ ```python
407
+ # Sample 10% of traces
408
+ init(sample_rate=0.1)
409
+
410
+ # Sampling is applied at trace creation time
411
+ # Traces are either fully included or fully excluded
412
+ ```
413
+
414
+ ### Token Counting & Cost Calculation
415
+
416
+ Automatic for supported LLM providers (OpenAI, Anthropic):
417
+
418
+ ```python
419
+ @observe(as_type="llm")
420
+ def call_openai(prompt):
421
+ response = client.chat.completions.create(
422
+ model="gpt-4",
423
+ messages=[{"role": "user", "content": prompt}]
424
+ )
425
+ return response.choices[0].message.content
426
+
427
+ # Span automatically includes:
428
+ # - llm.token.prompt_tokens
429
+ # - llm.token.completion_tokens
430
+ # - llm.token.total_tokens
431
+ # - llm.cost.total (in USD)
432
+ ```
433
+
434
+ ---
435
+
436
+ ## 🔧 Troubleshooting
437
+
438
+ ### Enable Debug Logging
439
+
440
+ ```python
441
+ import logging
442
+ logging.basicConfig(level=logging.DEBUG)
443
+
444
+ # Or via config
445
+ init(debug=True)
446
+
447
+ # Or via env var
448
+ # TRACCIA_DEBUG=1 python your_script.py
449
+ ```
450
+
451
+ ### Common Issues
452
+
453
+ #### **Traces not appearing**
454
+
455
+ 1. Check connectivity: `traccia check`
456
+ 2. Validate config: `traccia doctor`
457
+ 3. Enable debug logging
458
+ 4. Verify endpoint is correct and accessible
459
+
460
+ #### **High memory usage**
461
+
462
+ - Reduce `max_queue_size` in rate limiting config
463
+ - Lower `sample_rate` to reduce volume
464
+ - Enable rate limiting with `max_spans_per_second`
465
+
466
+ #### **Spans being dropped**
467
+
468
+ - Check rate limiter logs for warnings
469
+ - Increase `max_spans_per_second` if set
470
+ - Increase `max_queue_size` if spans are queued
471
+ - Check `traccia doctor` output
472
+
473
+ #### **Import errors after upgrade**
474
+
475
+ If you're migrating from `traccia_sdk`:
476
+
477
+ ```python
478
+ # OLD (will raise helpful error)
479
+ from traccia_sdk import init # ❌ ImportError with migration guide
480
+
481
+ # NEW
482
+ from traccia import init # ✅ Correct
483
+ ```
484
+
485
+ ---
486
+
487
+ ## 📚 API Reference
488
+
489
+ ### Core Functions
490
+
491
+ #### `init(**kwargs) -> TracerProvider`
492
+
493
+ Initialize the Traccia SDK.
494
+
495
+ **Parameters**:
496
+ - `endpoint` (str, optional): OTLP endpoint URL
497
+ - `api_key` (str, optional): API key (optional, for future Traccia UI)
498
+ - `sample_rate` (float, optional): Sampling rate (0.0-1.0)
499
+ - `auto_start_trace` (bool, optional): Auto-start root trace
500
+ - `config_file` (str, optional): Path to config file
501
+ - `use_otlp` (bool, optional): Use OTLP exporter
502
+ - `enable_console` (bool, optional): Enable console exporter
503
+ - `enable_file` (bool, optional): Enable file exporter
504
+ - `enable_patching` (bool, optional): Auto-patch libraries
505
+ - `enable_token_counting` (bool, optional): Count tokens
506
+ - `enable_costs` (bool, optional): Calculate costs
507
+ - `max_spans_per_second` (float, optional): Rate limit
508
+ - `**kwargs`: Any other config parameter
509
+
510
+ **Returns**: TracerProvider instance
511
+
512
+ #### `stop_tracing(flush_timeout: float = 1.0) -> None`
513
+
514
+ Stop tracing and flush pending spans.
515
+
516
+ **Parameters**:
517
+ - `flush_timeout` (float): Max seconds to wait for flush
518
+
519
+ #### `get_tracer(name: str = "default") -> Tracer`
520
+
521
+ Get a tracer instance.
522
+
523
+ **Parameters**:
524
+ - `name` (str): Tracer name (typically module/service name)
525
+
526
+ **Returns**: Tracer instance
527
+
528
+ #### `span(name: str, attributes: dict = None) -> Span`
529
+
530
+ Create a span context manager.
531
+
532
+ **Parameters**:
533
+ - `name` (str): Span name
534
+ - `attributes` (dict, optional): Initial attributes
535
+
536
+ **Returns**: Span context manager
537
+
538
+ ### Decorator
539
+
540
+ #### `@observe(name=None, *, attributes=None, as_type="span", skip_args=None, skip_result=False)`
541
+
542
+ Decorate a function to create spans automatically.
543
+
544
+ **Parameters**:
545
+ - `name` (str, optional): Span name (default: function name)
546
+ - `attributes` (dict, optional): Initial attributes
547
+ - `as_type` (str): Span type (`"span"`, `"llm"`, `"tool"`)
548
+ - `skip_args` (list, optional): Arguments to skip capturing
549
+ - `skip_result` (bool): Skip capturing return value
550
+
551
+ ### Configuration
552
+
553
+ #### `load_config(config_file=None, overrides=None) -> TracciaConfig`
554
+
555
+ Load and validate configuration.
556
+
557
+ **Parameters**:
558
+ - `config_file` (str, optional): Path to config file
559
+ - `overrides` (dict, optional): Override values
560
+
561
+ **Returns**: Validated TracciaConfig instance
562
+
563
+ **Raises**: `ConfigError` if invalid
564
+
565
+ #### `validate_config(config_file=None, overrides=None) -> tuple[bool, str, TracciaConfig | None]`
566
+
567
+ Validate configuration without loading.
568
+
569
+ **Returns**: Tuple of (is_valid, message, config_or_none)
570
+
571
+ ---
572
+
573
+ ## 🏗️ Architecture
574
+
575
+ ### Data Flow
576
+
577
+ ```
578
+ Application Code (@observe)
579
+
580
+ Span Creation
581
+
582
+ Processors (token counting, cost, enrichment)
583
+
584
+ Rate Limiter (optional)
585
+
586
+ Batch Processor (buffering)
587
+
588
+ Exporter (OTLP/Console/File)
589
+
590
+ Backend (Grafana Tempo / Jaeger / Zipkin / etc.)
591
+ ```
592
+
593
+ ---
594
+
595
+ ## 🤝 Contributing
596
+
597
+ Contributions are welcome! Whether it's bug fixes, new features, documentation improvements, or examples - we appreciate your help.
598
+
599
+ ### How to Contribute
600
+
601
+ 1. **Fork the repository**
602
+ 2. **Create a feature branch**: `git checkout -b feature/amazing-feature`
603
+ 3. **Make your changes** and add tests
604
+ 4. **Run tests**: `pytest traccia/tests/`
605
+ 5. **Lint your code**: `ruff check traccia/`
606
+ 6. **Commit**: `git commit -m "Add amazing feature"`
607
+ 7. **Push**: `git push origin feature/amazing-feature`
608
+ 8. **Open a Pull Request**
609
+
610
+ ### Development Setup
611
+
612
+ ```bash
613
+ # Clone the repository
614
+ git clone https://github.com/traccia-ai/traccia.git
615
+ cd traccia
616
+
617
+ # Create virtual environment
618
+ python -m venv venv
619
+ source venv/bin/activate # On Windows: venv\Scripts\activate
620
+
621
+ # Install in editable mode with dev dependencies
622
+ pip install -e ".[dev]"
623
+
624
+ # Run tests
625
+ pytest traccia/tests/ -v
626
+
627
+ # Run with coverage
628
+ pytest traccia/tests/ --cov=traccia --cov-report=html
629
+ ```
630
+
631
+ ### Code Style
632
+
633
+ - Follow PEP 8
634
+ - Use type hints where appropriate
635
+ - Add docstrings for public APIs
636
+ - Write tests for new features
637
+ - Keep PRs focused and atomic
638
+
639
+ ### Areas We'd Love Help With
640
+
641
+ - **Integrations**: Add support for more LLM providers (Cohere, AI21, local models)
642
+ - **Backends**: Test and document setup with different OTLP backends
643
+ - **Examples**: Real-world examples of agent instrumentation
644
+ - **Documentation**: Tutorials, guides, video walkthroughs
645
+ - **Performance**: Optimize hot paths, reduce overhead
646
+ - **Testing**: Improve test coverage, add integration tests
647
+
648
+ ---
649
+
650
+ ## 📄 License
651
+
652
+ MIT License - see [LICENSE](LICENSE) for details
653
+
654
+ ---
655
+
656
+ ## 🙏 Acknowledgments
657
+
658
+ Built with:
659
+ - [OpenTelemetry](https://opentelemetry.io/) - Vendor-neutral observability framework
660
+ - [Pydantic](https://pydantic.dev/) - Data validation
661
+ - [tiktoken](https://github.com/openai/tiktoken) - Token counting
662
+
663
+ Inspired by observability tools in the ecosystem and designed to work seamlessly with the OTLP standard.
664
+
665
+ ---
666
+
667
+ ## 📞 Support & Community
668
+
669
+ - **Issues**: [GitHub Issues](https://github.com/traccia-ai/traccia/issues) - Report bugs or request features
670
+ - **Discussions**: [GitHub Discussions](https://github.com/traccia-ai/traccia/discussions) - Ask questions, share ideas
671
+
672
+ ---
673
+
674
+ **Made with ❤️ for the AI agent community**
@@ -0,0 +1,14 @@
1
+ traccia/__init__.py,sha256=O2Hs3GFVcFsmeBlJwJhqPvFH9kB-pAJarEfB9G4xLZU,1998
2
+ traccia/auto.py,sha256=S-T6edev59c3HEgAU9ENQjOtL2duUJdjJzlBk6hGek0,25792
3
+ traccia/auto_instrumentation.py,sha256=e2Gzt2AtGSXbv6BSZpAApAtMcTlEwc08dgZgQMfrREU,2107
4
+ traccia/cli.py,sha256=lQJU-dAxxcWyOew7OCBi2u7RbMXcwOxtLyfzFwlZ4f0,12362
5
+ traccia/config.py,sha256=BCj_N_zkuRlfPMsuTO-LpcZDQdKQjJ6QHxAIWfCg0HI,24527
6
+ traccia/errors.py,sha256=CMIS01M3pnr3oRhtzQkyKYkDgYkJNlGd6D9Zg2AohA0,1158
7
+ traccia/pricing_config.py,sha256=ZTccshJbAySWJw9Rdvpj2SMaHkEio325t8NkfJfNzfY,1732
8
+ traccia/runtime_config.py,sha256=LjeKCPYKkbZiI38ih4OX4XMkW72hA29Or0hso4W63-M,2157
9
+ traccia-0.1.0.dist-info/licenses/LICENSE,sha256=HNl57LOj88EfKh-IWmeqWxsDRh_FF6lj0l-E2A4Hr8w,10757
10
+ traccia-0.1.0.dist-info/METADATA,sha256=vYjnTMp2sclYNc-BT5nGDheiCu0XQYAk9orDBijDXDo,17986
11
+ traccia-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
12
+ traccia-0.1.0.dist-info/entry_points.txt,sha256=SG7gacPRmFzLw2HYTblUZsmC_TO3n14-dNi28SL-C2k,45
13
+ traccia-0.1.0.dist-info/top_level.txt,sha256=Kc56JudupqSkzJPOnuQ6mPHJmhtike7pssNX0u_p59w,8
14
+ traccia-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ traccia = traccia.cli:main