traccia 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
traccia/auto.py CHANGED
@@ -123,10 +123,22 @@ def init(
123
123
  # Add rate limiting config back into merged_config for start_tracing
124
124
  merged_config.update(rate_limit_config)
125
125
 
126
+ # Extract openai_agents config (not passed to start_tracing)
127
+ openai_agents_enabled = merged_config.pop('openai_agents', True)
128
+
126
129
  # Initialize via start_tracing with full config
127
130
  provider = start_tracing(**merged_config)
128
131
  _init_method = "init"
129
132
 
133
+ # Auto-install OpenAI Agents SDK integration if available
134
+ if openai_agents_enabled:
135
+ try:
136
+ from traccia.integrations.openai_agents import install as install_openai_agents
137
+ install_openai_agents(enabled=True)
138
+ except Exception:
139
+ # Agents SDK not installed or error during install, skip silently
140
+ pass
141
+
130
142
  # Auto-start trace if requested
131
143
  if final_auto_start:
132
144
  _auto_trace_context = _start_auto_trace(provider, _auto_trace_name)
traccia/config.py CHANGED
@@ -34,6 +34,7 @@ ENV_VAR_MAPPING = {
34
34
  "auto_instrument_tools": ["TRACCIA_AUTO_INSTRUMENT_TOOLS"],
35
35
  "max_tool_spans": ["TRACCIA_MAX_TOOL_SPANS"],
36
36
  "max_span_depth": ["TRACCIA_MAX_SPAN_DEPTH"],
37
+ "openai_agents": ["TRACCIA_OPENAI_AGENTS"],
37
38
 
38
39
  # Rate limiting & Batching
39
40
  "max_spans_per_second": ["TRACCIA_MAX_SPANS_PER_SECOND"],
@@ -161,6 +162,10 @@ class InstrumentationConfig(BaseModel):
161
162
  gt=0,
162
163
  description="Maximum depth of nested spans"
163
164
  )
165
+ openai_agents: bool = Field(
166
+ default=True,
167
+ description="Auto-install OpenAI Agents SDK integration when available"
168
+ )
164
169
 
165
170
 
166
171
  class RateLimitConfig(BaseModel):
@@ -299,6 +304,7 @@ class TracciaConfig(BaseModel):
299
304
  "auto_instrument_tools": self.instrumentation.auto_instrument_tools,
300
305
  "max_tool_spans": self.instrumentation.max_tool_spans,
301
306
  "max_span_depth": self.instrumentation.max_span_depth,
307
+ "openai_agents": self.instrumentation.openai_agents,
302
308
  # Rate limiting & Batching
303
309
  "max_spans_per_second": self.rate_limiting.max_spans_per_second,
304
310
  "max_queue_size": self.rate_limiting.max_queue_size,
@@ -460,7 +466,7 @@ def load_config_from_env(flat: bool = False) -> Dict[str, Any]:
460
466
  env_config["exporters"][key] = value
461
467
 
462
468
  # Instrumentation section
463
- for key in ["enable_patching", "enable_token_counting", "enable_costs", "auto_instrument_tools"]:
469
+ for key in ["enable_patching", "enable_token_counting", "enable_costs", "auto_instrument_tools", "openai_agents"]:
464
470
  value = get_env_value(key)
465
471
  if value is not None:
466
472
  env_config["instrumentation"][key] = value.lower() in ("true", "1", "yes")
@@ -1,7 +1,12 @@
1
- """Instrumentation helpers and monkey patching."""
1
+ """Infrastructure and vendor instrumentation.
2
+
3
+ This module exposes helpers for HTTP client/server tracing (including FastAPI),
4
+ vendor SDK patching (OpenAI, Anthropic, requests), and decorators used for
5
+ auto-instrumentation.
6
+ """
2
7
 
3
8
  from traccia.instrumentation.decorator import observe
4
- from traccia.instrumentation.openai import patch_openai
9
+ from traccia.instrumentation.openai import patch_openai, patch_openai_responses
5
10
  from traccia.instrumentation.anthropic import patch_anthropic
6
11
  from traccia.instrumentation.requests import patch_requests
7
12
  from traccia.instrumentation.http_client import inject_headers as inject_http_headers
@@ -11,6 +16,7 @@ from traccia.instrumentation.fastapi import install_http_middleware
11
16
  __all__ = [
12
17
  "observe",
13
18
  "patch_openai",
19
+ "patch_openai_responses",
14
20
  "patch_anthropic",
15
21
  "patch_requests",
16
22
  "inject_http_headers",
@@ -1,4 +1,4 @@
1
- """OpenAI monkey patching for chat completions."""
1
+ """OpenAI monkey patching for chat completions and responses API."""
2
2
 
3
3
  from __future__ import annotations
4
4
 
@@ -6,6 +6,7 @@ from typing import Any, Dict, Optional, Callable
6
6
  from traccia.tracer.span import SpanStatus
7
7
 
8
8
  _patched = False
9
+ _responses_patched = False
9
10
 
10
11
 
11
12
  def _safe_get(obj, path: str, default=None):
@@ -168,6 +169,185 @@ def patch_openai() -> bool:
168
169
 
169
170
  if patched_any:
170
171
  _patched = True
172
+
173
+ # Also patch Responses API (used by OpenAI Agents SDK)
174
+ patch_openai_responses()
175
+
176
+ return patched_any
177
+
178
+
179
+ def patch_openai_responses() -> bool:
180
+ """Patch OpenAI Responses API for tracing."""
181
+ global _responses_patched
182
+ if _responses_patched:
183
+ return True
184
+ try:
185
+ import openai
186
+ except Exception:
187
+ return False
188
+
189
+ def _extract_responses_input(kwargs, args):
190
+ """Extract input from responses.create call."""
191
+ input_data = kwargs.get("input")
192
+ if input_data is None and len(args) >= 2:
193
+ input_data = args[1]
194
+ if not input_data:
195
+ return None, None
196
+
197
+ # input can be a string or list of ResponseInputItem
198
+ if isinstance(input_data, str):
199
+ return [{"role": "user", "content": input_data}], input_data
200
+ elif isinstance(input_data, list):
201
+ # Convert to slim representation
202
+ slim = []
203
+ parts = []
204
+ for item in list(input_data)[:50]:
205
+ if isinstance(item, dict):
206
+ role = item.get("role", "user")
207
+ content_items = item.get("content", [])
208
+
209
+ # Extract text from content items
210
+ text_parts = []
211
+ if isinstance(content_items, str):
212
+ text_parts.append(content_items)
213
+ elif isinstance(content_items, list):
214
+ for c in content_items:
215
+ if isinstance(c, dict) and c.get("type") == "text":
216
+ text_parts.append(c.get("text", ""))
217
+
218
+ content_str = " ".join(text_parts) if text_parts else ""
219
+ slim.append({"role": role, "content": content_str})
220
+ if content_str:
221
+ parts.append(f"{role}: {content_str}")
222
+
223
+ prompt_text = "\n".join(parts) if parts else None
224
+ return slim or None, prompt_text
225
+
226
+ return None, None
227
+
228
+ def _extract_responses_output(resp) -> Optional[str]:
229
+ """Extract output text from Response object."""
230
+ output = getattr(resp, "output", None) or _safe_get(resp, "output")
231
+ if not output:
232
+ return None
233
+
234
+ parts = []
235
+ for item in output:
236
+ if isinstance(item, dict):
237
+ content = item.get("content", [])
238
+ else:
239
+ content = getattr(item, "content", [])
240
+
241
+ # Extract text from content items
242
+ if isinstance(content, list):
243
+ for c in content:
244
+ if isinstance(c, dict) and c.get("type") == "text":
245
+ text = c.get("text", "")
246
+ if text:
247
+ parts.append(text)
248
+ elif hasattr(c, "type") and c.type == "text":
249
+ text = getattr(c, "text", "")
250
+ if text:
251
+ parts.append(text)
252
+
253
+ return "\n".join(parts) if parts else None
254
+
255
+ def _wrap_responses(create_fn: Callable):
256
+ if getattr(create_fn, "_agent_trace_patched", False):
257
+ return create_fn
258
+
259
+ async def wrapped_create(*args, **kwargs):
260
+ tracer = _get_tracer("openai.responses")
261
+ model = kwargs.get("model") or _safe_get(args, "0.model", None)
262
+ input_slim, prompt_text = _extract_responses_input(kwargs, args)
263
+
264
+ attributes: Dict[str, Any] = {
265
+ "llm.vendor": "openai",
266
+ "llm.api": "responses"
267
+ }
268
+ if model:
269
+ attributes["llm.model"] = model
270
+ if input_slim:
271
+ import json
272
+ try:
273
+ attributes["llm.openai.input"] = json.dumps(input_slim)[:1000]
274
+ except Exception:
275
+ attributes["llm.openai.input"] = str(input_slim)[:1000]
276
+ if prompt_text:
277
+ attributes["llm.prompt"] = prompt_text[:2000]
278
+
279
+ with tracer.start_as_current_span("llm.openai.responses", attributes=attributes) as span:
280
+ try:
281
+ resp = await create_fn(*args, **kwargs)
282
+
283
+ # Extract response details
284
+ resp_model = getattr(resp, "model", None) or _safe_get(resp, "model")
285
+ if resp_model and "llm.model" not in span.attributes:
286
+ span.set_attribute("llm.model", str(resp_model))
287
+
288
+ # Extract usage
289
+ usage = getattr(resp, "usage", None) or _safe_get(resp, "usage")
290
+ if usage:
291
+ span.set_attribute("llm.usage.source", "provider_usage")
292
+ input_tokens = getattr(usage, "input_tokens", None) or (usage.get("input_tokens") if isinstance(usage, dict) else None)
293
+ output_tokens = getattr(usage, "output_tokens", None) or (usage.get("output_tokens") if isinstance(usage, dict) else None)
294
+ total_tokens = getattr(usage, "total_tokens", None) or (usage.get("total_tokens") if isinstance(usage, dict) else None)
295
+
296
+ if input_tokens is not None:
297
+ span.set_attribute("llm.usage.prompt_tokens", input_tokens)
298
+ span.set_attribute("llm.usage.input_tokens", input_tokens)
299
+ span.set_attribute("llm.usage.prompt_source", "provider_usage")
300
+ if output_tokens is not None:
301
+ span.set_attribute("llm.usage.completion_tokens", output_tokens)
302
+ span.set_attribute("llm.usage.output_tokens", output_tokens)
303
+ span.set_attribute("llm.usage.completion_source", "provider_usage")
304
+ if total_tokens is not None:
305
+ span.set_attribute("llm.usage.total_tokens", total_tokens)
306
+
307
+ # Extract completion text
308
+ completion = _extract_responses_output(resp)
309
+ if completion:
310
+ span.set_attribute("llm.completion", completion[:2000])
311
+
312
+ # Extract status
313
+ status = getattr(resp, "status", None) or _safe_get(resp, "status")
314
+ if status:
315
+ span.set_attribute("llm.response.status", str(status))
316
+
317
+ return resp
318
+ except Exception as exc:
319
+ span.record_exception(exc)
320
+ span.set_status(SpanStatus.ERROR, str(exc))
321
+ raise
322
+
323
+ wrapped_create._agent_trace_patched = True
324
+ return wrapped_create
325
+
326
+ patched_any = False
327
+
328
+ # Patch AsyncOpenAI.responses.create
329
+ try:
330
+ from openai import AsyncOpenAI
331
+ if hasattr(AsyncOpenAI, "responses"):
332
+ responses = getattr(AsyncOpenAI, "responses")
333
+ if hasattr(responses, "create"):
334
+ # This is a property/descriptor, need to patch the underlying class
335
+ pass
336
+ except Exception:
337
+ pass
338
+
339
+ # Patch the Responses resource class directly
340
+ try:
341
+ from openai.resources.responses import AsyncResponses
342
+ if hasattr(AsyncResponses, "create"):
343
+ original_create = AsyncResponses.create
344
+ AsyncResponses.create = _wrap_responses(original_create)
345
+ patched_any = True
346
+ except Exception:
347
+ pass
348
+
349
+ if patched_any:
350
+ _responses_patched = True
171
351
  return patched_any
172
352
 
173
353
 
@@ -1,4 +1,4 @@
1
- """Traccia integrations for frameworks like LangChain, CrewAI, LlamaIndex."""
1
+ """Traccia integrations for frameworks like LangChain, OpenAI Agents SDK."""
2
2
 
3
3
  __all__ = []
4
4
 
@@ -14,9 +14,26 @@ def _import_langchain():
14
14
  ) from e
15
15
 
16
16
 
17
+ def _import_openai_agents():
18
+ try:
19
+ from traccia.integrations.openai_agents import install
20
+ return install
21
+ except ImportError as e:
22
+ raise ModuleNotFoundError(
23
+ "OpenAI Agents integration requires openai-agents. "
24
+ "Install with: pip install openai-agents"
25
+ ) from e
26
+
27
+
17
28
  # Make available if imported
18
29
  try:
19
30
  from traccia.integrations.langchain import TracciaCallbackHandler
20
31
  __all__.append("TracciaCallbackHandler")
21
32
  except ImportError:
22
33
  pass
34
+
35
+ try:
36
+ from traccia.integrations.openai_agents import install as install_openai_agents
37
+ __all__.append("install_openai_agents")
38
+ except ImportError:
39
+ pass
@@ -0,0 +1,73 @@
1
+ """Traccia integration for OpenAI Agents SDK."""
2
+
3
+ from typing import Optional
4
+
5
+ _installed = False
6
+
7
+
8
+ def install(enabled: Optional[bool] = None) -> bool:
9
+ """
10
+ Install Traccia tracing for OpenAI Agents SDK.
11
+
12
+ This registers a TracingProcessor with the Agents SDK that captures
13
+ agent runs, tool calls, handoffs, and LLM generations as Traccia spans.
14
+
15
+ Args:
16
+ enabled: If False, skip installation. If None, check config.
17
+
18
+ Returns:
19
+ True if installed successfully, False otherwise.
20
+
21
+ Example:
22
+ ```python
23
+ from traccia import init
24
+ from traccia.integrations.openai_agents import install
25
+
26
+ init()
27
+ install() # Register Agents SDK tracing
28
+ ```
29
+
30
+ Note:
31
+ This is automatically called by `traccia.init()` when the `openai-agents`
32
+ package is installed, unless disabled via config.
33
+ """
34
+ global _installed
35
+
36
+ if _installed:
37
+ return True
38
+
39
+ # Check if explicitly disabled
40
+ if enabled is False:
41
+ return False
42
+
43
+ # Check config if not explicitly enabled
44
+ if enabled is None:
45
+ from traccia import runtime_config
46
+ # Check if disabled in config
47
+ if runtime_config.get_config_value("openai_agents") is False:
48
+ return False
49
+
50
+ try:
51
+ # Import Agents SDK components
52
+ from agents import add_trace_processor
53
+ from agents.tracing import TracingProcessor
54
+
55
+ # Import our processor
56
+ from traccia.integrations.openai_agents.processor import TracciaAgentsTracingProcessor
57
+
58
+ # Register the processor
59
+ processor = TracciaAgentsTracingProcessor()
60
+ add_trace_processor(processor)
61
+
62
+ _installed = True
63
+ return True
64
+
65
+ except ImportError:
66
+ # Agents SDK not installed, skip silently
67
+ return False
68
+ except Exception:
69
+ # Other errors, fail silently to avoid breaking app startup
70
+ return False
71
+
72
+
73
+ __all__ = ["install"]
@@ -0,0 +1,262 @@
1
+ """Traccia processor for OpenAI Agents SDK tracing."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ try:
10
+ from agents.tracing import Span, Trace
11
+ from agents.tracing.span_data import (
12
+ AgentSpanData,
13
+ FunctionSpanData,
14
+ GenerationSpanData,
15
+ HandoffSpanData,
16
+ GuardrailSpanData,
17
+ ResponseSpanData,
18
+ CustomSpanData,
19
+ )
20
+ except ImportError:
21
+ pass
22
+
23
+
24
+ class TracciaAgentsTracingProcessor:
25
+ """
26
+ Traccia processor for OpenAI Agents SDK.
27
+
28
+ Implements the Agents SDK TracingProcessor interface to capture agent
29
+ runs, tool calls, handoffs, and generations into Traccia spans.
30
+ """
31
+
32
+ def __init__(self):
33
+ """Initialize the processor."""
34
+ self._trace_map = {} # Map Agents trace_id -> Traccia trace context
35
+ self._span_map = {} # Map Agents span_id -> Traccia span
36
+ self._tracer = None
37
+
38
+ def _get_tracer(self):
39
+ """Get or create the Traccia tracer."""
40
+ if self._tracer is None:
41
+ import traccia
42
+ self._tracer = traccia.get_tracer("openai.agents")
43
+ return self._tracer
44
+
45
+ def on_trace_start(self, trace: Trace) -> None:
46
+ """Called when an Agents trace starts."""
47
+ try:
48
+ # Store trace context for span correlation
49
+ self._trace_map[trace.trace_id] = {
50
+ "trace_id": trace.trace_id,
51
+ "started_at": getattr(trace, "started_at", None),
52
+ }
53
+ except Exception:
54
+ # Don't break agent execution on tracing errors
55
+ pass
56
+
57
+ def on_trace_end(self, trace: Trace) -> None:
58
+ """Called when an Agents trace ends."""
59
+ try:
60
+ # Clean up trace mapping
61
+ self._trace_map.pop(trace.trace_id, None)
62
+ except Exception:
63
+ pass
64
+
65
+ def on_span_start(self, span: Span[Any]) -> None:
66
+ """Called when an Agents span starts."""
67
+ try:
68
+ tracer = self._get_tracer()
69
+ span_data = span.span_data
70
+
71
+ # Determine span name based on span type
72
+ span_name = self._get_span_name(span_data)
73
+
74
+ # Start Traccia span
75
+ attributes = self._extract_attributes(span_data)
76
+ traccia_span = tracer.start_span(span_name, attributes=attributes)
77
+
78
+ # Store mapping
79
+ self._span_map[span.span_id] = traccia_span
80
+ except Exception:
81
+ # Don't break agent execution
82
+ pass
83
+
84
+ def on_span_end(self, span: Span[Any]) -> None:
85
+ """Called when an Agents span ends."""
86
+ try:
87
+ traccia_span = self._span_map.pop(span.span_id, None)
88
+ if not traccia_span:
89
+ return
90
+
91
+ # Update attributes with final data
92
+ span_data = span.span_data
93
+ self._update_span_attributes(traccia_span, span_data)
94
+
95
+ # Record error if present
96
+ error = getattr(span, "error", None)
97
+ if error:
98
+ from traccia.tracer.span import SpanStatus
99
+ error_msg = str(error.get("message", "Unknown error") if isinstance(error, dict) else error)
100
+ traccia_span.set_status(SpanStatus.ERROR, error_msg)
101
+
102
+ # End the span
103
+ traccia_span.end()
104
+ except Exception:
105
+ # Ensure span ends even if there's an error
106
+ try:
107
+ if traccia_span:
108
+ traccia_span.end()
109
+ except:
110
+ pass
111
+
112
+ def _get_span_name(self, span_data: Any) -> str:
113
+ """Determine Traccia span name from Agents span data."""
114
+ span_type = getattr(span_data, "type", "unknown")
115
+
116
+ if span_type == "agent":
117
+ agent_name = getattr(span_data, "name", "unknown")
118
+ return f"agent.{agent_name}"
119
+ elif span_type == "generation":
120
+ return "llm.agents.generation"
121
+ elif span_type == "function":
122
+ func_name = getattr(span_data, "name", "unknown")
123
+ return f"agent.tool.{func_name}"
124
+ elif span_type == "handoff":
125
+ return "agent.handoff"
126
+ elif span_type == "guardrail":
127
+ guardrail_name = getattr(span_data, "name", "unknown")
128
+ return f"agent.guardrail.{guardrail_name}"
129
+ elif span_type == "response":
130
+ return "agent.response"
131
+ elif span_type == "custom":
132
+ custom_name = getattr(span_data, "name", "unknown")
133
+ return f"agent.custom.{custom_name}"
134
+ else:
135
+ return f"agent.{span_type}"
136
+
137
+ def _extract_attributes(self, span_data: Any) -> dict[str, Any]:
138
+ """Extract initial attributes from Agents span data."""
139
+ attrs = {
140
+ "agent.span.type": getattr(span_data, "type", "unknown"),
141
+ }
142
+
143
+ span_type = getattr(span_data, "type", None)
144
+
145
+ if span_type == "agent":
146
+ attrs["agent.name"] = getattr(span_data, "name", None)
147
+ tools = getattr(span_data, "tools", None)
148
+ if tools:
149
+ attrs["agent.tools"] = json.dumps(tools)[:500]
150
+ handoffs = getattr(span_data, "handoffs", None)
151
+ if handoffs:
152
+ attrs["agent.handoffs"] = json.dumps(handoffs)[:500]
153
+ output_type = getattr(span_data, "output_type", None)
154
+ if output_type:
155
+ attrs["agent.output_type"] = str(output_type)
156
+
157
+ elif span_type == "generation":
158
+ model = getattr(span_data, "model", None)
159
+ if model:
160
+ attrs["llm.model"] = str(model)
161
+ model_config = getattr(span_data, "model_config", None)
162
+ if model_config:
163
+ attrs["llm.model_config"] = json.dumps(model_config)[:500]
164
+
165
+ elif span_type == "function":
166
+ func_name = getattr(span_data, "name", None)
167
+ if func_name:
168
+ attrs["agent.tool.name"] = func_name
169
+
170
+ elif span_type == "handoff":
171
+ from_agent = getattr(span_data, "from_agent", None)
172
+ to_agent = getattr(span_data, "to_agent", None)
173
+ if from_agent:
174
+ attrs["agent.handoff.from"] = from_agent
175
+ if to_agent:
176
+ attrs["agent.handoff.to"] = to_agent
177
+
178
+ elif span_type == "guardrail":
179
+ guardrail_name = getattr(span_data, "name", None)
180
+ if guardrail_name:
181
+ attrs["agent.guardrail.name"] = guardrail_name
182
+
183
+ return attrs
184
+
185
+ def _update_span_attributes(self, traccia_span: Any, span_data: Any) -> None:
186
+ """Update Traccia span with final attributes from completed Agents span."""
187
+ try:
188
+ span_type = getattr(span_data, "type", None)
189
+
190
+ if span_type == "generation":
191
+ # Add usage info
192
+ usage = getattr(span_data, "usage", None)
193
+ if usage and isinstance(usage, dict):
194
+ input_tokens = usage.get("input_tokens")
195
+ output_tokens = usage.get("output_tokens")
196
+ if input_tokens is not None:
197
+ traccia_span.set_attribute("llm.usage.input_tokens", input_tokens)
198
+ traccia_span.set_attribute("llm.usage.prompt_tokens", input_tokens)
199
+ if output_tokens is not None:
200
+ traccia_span.set_attribute("llm.usage.output_tokens", output_tokens)
201
+ traccia_span.set_attribute("llm.usage.completion_tokens", output_tokens)
202
+ if input_tokens is not None and output_tokens is not None:
203
+ traccia_span.set_attribute("llm.usage.total_tokens", input_tokens + output_tokens)
204
+
205
+ # Add truncated input/output for observability
206
+ input_data = getattr(span_data, "input", None)
207
+ if input_data:
208
+ try:
209
+ input_str = json.dumps(input_data)[:1000]
210
+ traccia_span.set_attribute("llm.input", input_str)
211
+ except:
212
+ traccia_span.set_attribute("llm.input", str(input_data)[:1000])
213
+
214
+ output_data = getattr(span_data, "output", None)
215
+ if output_data:
216
+ try:
217
+ output_str = json.dumps(output_data)[:1000]
218
+ traccia_span.set_attribute("llm.output", output_str)
219
+ except:
220
+ traccia_span.set_attribute("llm.output", str(output_data)[:1000])
221
+
222
+ elif span_type == "function":
223
+ # Add function input/output
224
+ func_input = getattr(span_data, "input", None)
225
+ if func_input:
226
+ traccia_span.set_attribute("agent.tool.input", str(func_input)[:500])
227
+
228
+ func_output = getattr(span_data, "output", None)
229
+ if func_output:
230
+ traccia_span.set_attribute("agent.tool.output", str(func_output)[:500])
231
+
232
+ mcp_data = getattr(span_data, "mcp_data", None)
233
+ if mcp_data:
234
+ traccia_span.set_attribute("agent.tool.mcp", json.dumps(mcp_data)[:500])
235
+
236
+ elif span_type == "guardrail":
237
+ triggered = getattr(span_data, "triggered", False)
238
+ traccia_span.set_attribute("agent.guardrail.triggered", triggered)
239
+
240
+ elif span_type == "response":
241
+ response = getattr(span_data, "response", None)
242
+ if response:
243
+ response_id = getattr(response, "id", None)
244
+ if response_id:
245
+ traccia_span.set_attribute("agent.response.id", response_id)
246
+
247
+ except Exception:
248
+ # Don't break tracing on attribute errors
249
+ pass
250
+
251
+ def shutdown(self) -> None:
252
+ """Shutdown the processor."""
253
+ try:
254
+ self._trace_map.clear()
255
+ self._span_map.clear()
256
+ except Exception:
257
+ pass
258
+
259
+ def force_flush(self) -> None:
260
+ """Force flush any queued spans."""
261
+ # Traccia handles flushing at the provider level
262
+ pass
traccia/runtime_config.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """Runtime configuration state management."""
2
2
 
3
- from typing import Optional, List
3
+ from typing import Optional, List, Any
4
4
 
5
5
  # Global runtime configuration state
6
6
  _config = {
@@ -15,6 +15,7 @@ _config = {
15
15
  "agent_id": None,
16
16
  "debug": False,
17
17
  "attr_truncation_limit": 1000,
18
+ "openai_agents": True,
18
19
  }
19
20
 
20
21
 
@@ -104,3 +105,23 @@ def set_attr_truncation_limit(value: int) -> None:
104
105
 
105
106
  def get_attr_truncation_limit() -> int:
106
107
  return _config["attr_truncation_limit"]
108
+
109
+
110
+ def get_openai_agents() -> bool:
111
+ """Get whether OpenAI Agents SDK integration is enabled."""
112
+ return _config.get("openai_agents", True)
113
+
114
+
115
+ def set_openai_agents(value: bool) -> None:
116
+ """Set whether OpenAI Agents SDK integration is enabled."""
117
+ _config["openai_agents"] = value
118
+
119
+
120
+ def set_config_value(key: str, value: Any) -> None:
121
+ """Set a runtime config value."""
122
+ _config[key] = value
123
+
124
+
125
+ def get_config_value(key: str, default: Any = None) -> Any:
126
+ """Get a runtime config value."""
127
+ return _config.get(key, default)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: traccia
3
- Version: 0.1.5
3
+ Version: 0.1.6
4
4
  Summary: Production-ready distributed tracing SDK for AI agents and LLM applications
5
5
  License: Apache-2.0
6
6
  Project-URL: Homepage, https://github.com/traccia-ai/traccia
@@ -45,6 +45,7 @@ Traccia is a lightweight, high-performance Python SDK for observability and trac
45
45
  ## ✨ Features
46
46
 
47
47
  - **🔍 Automatic Instrumentation**: Auto-patch OpenAI, Anthropic, requests, and HTTP libraries
48
+ - **🤖 Framework Integrations**: Support for LangChain and OpenAI Agents SDK
48
49
  - **📊 LLM-Aware Tracing**: Track tokens, costs, prompts, and completions automatically
49
50
  - **⚡ Zero-Config Start**: Simple `init()` call with automatic config discovery
50
51
  - **🎯 Decorator-Based**: Trace any function with `@observe` decorator
@@ -129,6 +130,33 @@ Spans for LLM/chat model runs are created automatically with the same attributes
129
130
 
130
131
  **Note:** `pip install traccia[langchain]` installs traccia plus `langchain-core`; you need this extra to use the callback handler. If you already have `langchain-core` (e.g. from `langchain` or `langchain-openai`), base `pip install traccia` may be enough at runtime, but `traccia[langchain]` is the supported way to get a compatible dependency.
131
132
 
133
+ ### OpenAI Agents SDK
134
+
135
+ Traccia **automatically** detects and instruments the OpenAI Agents SDK when installed. No extra code needed:
136
+
137
+ ```python
138
+ from traccia import init
139
+ from agents import Agent, Runner
140
+
141
+ init() # Automatically enables Agents SDK tracing
142
+
143
+ agent = Agent(
144
+ name="Assistant",
145
+ instructions="You are a helpful assistant"
146
+ )
147
+ result = Runner.run_sync(agent, "Write a haiku about recursion")
148
+ ```
149
+
150
+ **Configuration**: Auto-enabled by default when `openai-agents` is installed. To disable:
151
+
152
+ ```python
153
+ init(openai_agents=False) # Explicit parameter
154
+ # OR set environment variable: TRACCIA_OPENAI_AGENTS=false
155
+ # OR in traccia.toml under [instrumentation]: openai_agents = false
156
+ ```
157
+
158
+ **Compatibility**: If you have `openai-agents` installed but don't use it (e.g., using LangChain or pure OpenAI instead), the integration is registered but never invoked—no overhead or extra spans.
159
+
132
160
  ---
133
161
 
134
162
  ## 📖 Configuration
@@ -169,6 +197,7 @@ reset_trace_file = false # Reset file on initialization
169
197
  enable_patching = true # Auto-patch libraries (OpenAI, Anthropic, requests)
170
198
  enable_token_counting = true # Count tokens for LLM calls
171
199
  enable_costs = true # Calculate costs
200
+ openai_agents = true # Auto-enable OpenAI Agents SDK integration
172
201
  auto_instrument_tools = false # Auto-instrument tool calls (experimental)
173
202
  max_tool_spans = 100 # Max tool spans to create
174
203
  max_span_depth = 10 # Max nested span depth
@@ -607,6 +636,17 @@ Application Code (@observe)
607
636
  Backend (Grafana Tempo / Jaeger / Zipkin / etc.)
608
637
  ```
609
638
 
639
+ ### Instrumentation vs Integrations
640
+
641
+ - **`traccia.instrumentation.*`**: Infrastructure and vendor instrumentation.
642
+ - HTTP client/server helpers (including FastAPI middleware).
643
+ - Vendor SDK hooks and monkey patching (e.g., OpenAI, Anthropic, `requests`).
644
+ - Decorators and utilities used for auto-instrumenting arbitrary functions.
645
+
646
+ - **`traccia.integrations.*`**: AI/agent framework integrations.
647
+ - Adapters that plug into higher-level frameworks via their official extension points (e.g., LangChain callbacks).
648
+ - Work at the level of chains, tools, agents, and workflows rather than raw HTTP or SDK calls.
649
+
610
650
  ---
611
651
 
612
652
  ## 🤝 Contributing
@@ -1,11 +1,11 @@
1
1
  traccia/__init__.py,sha256=O2Hs3GFVcFsmeBlJwJhqPvFH9kB-pAJarEfB9G4xLZU,1998
2
- traccia/auto.py,sha256=S-T6edev59c3HEgAU9ENQjOtL2duUJdjJzlBk6hGek0,25792
2
+ traccia/auto.py,sha256=zr4zTMIPEn7epm1KO8APzpdzuaYroeW_ZKB8L41AFXk,26302
3
3
  traccia/auto_instrumentation.py,sha256=e2Gzt2AtGSXbv6BSZpAApAtMcTlEwc08dgZgQMfrREU,2107
4
4
  traccia/cli.py,sha256=lQJU-dAxxcWyOew7OCBi2u7RbMXcwOxtLyfzFwlZ4f0,12362
5
- traccia/config.py,sha256=BCj_N_zkuRlfPMsuTO-LpcZDQdKQjJ6QHxAIWfCg0HI,24527
5
+ traccia/config.py,sha256=BByH9VOjjXKHLeJlWXXEA2Vlq3k9AqriNggdrlUrff0,24798
6
6
  traccia/errors.py,sha256=CMIS01M3pnr3oRhtzQkyKYkDgYkJNlGd6D9Zg2AohA0,1158
7
7
  traccia/pricing_config.py,sha256=ZTccshJbAySWJw9Rdvpj2SMaHkEio325t8NkfJfNzfY,1732
8
- traccia/runtime_config.py,sha256=LjeKCPYKkbZiI38ih4OX4XMkW72hA29Or0hso4W63-M,2157
8
+ traccia/runtime_config.py,sha256=c6Cto1pQjKy4Jb6pSClQrgzQRv8flZrePNpK1wxbx3s,2735
9
9
  traccia/context/__init__.py,sha256=_tNDOoVZ3GgbYtw9I3F8YSKuPovQBa0orjtd7CtfI1w,738
10
10
  traccia/context/context.py,sha256=hBvsrYV_-2cHCC7jTH5iGKHzxZRGpMH2vc0oTW2cqAY,1967
11
11
  traccia/context/propagators.py,sha256=Tq3O-JYEBqkshrrhGYn-j_2V718gfLiFDaVzsDdVmms,8747
@@ -14,18 +14,20 @@ traccia/exporter/console_exporter.py,sha256=IKR9S_DJbOqB7aRaDHGtl7GlDiV3GLHTh7I9
14
14
  traccia/exporter/file_exporter.py,sha256=4TcHvhCs6yGCfU-HKVtA8oKjnlzlSfGiI0TMTdVvdsc,6922
15
15
  traccia/exporter/http_exporter.py,sha256=kLcDn0WoFbSc6pAawj_-MNqOASGMEiFKki-Y3FeieKU,8039
16
16
  traccia/exporter/otlp_exporter.py,sha256=sy74xOgKyLofG_S9QHTuQo4HB4LaKnVVWKYv1cHRZx0,7692
17
- traccia/instrumentation/__init__.py,sha256=Eiq9SCD7jucIQZsaWisHLG1-SFhebscMHRDyAN6Qjqk,736
17
+ traccia/instrumentation/__init__.py,sha256=kOs72xJCraJknhDAtbxgiVNUgwI-efAGGFB3tXJsRhI,967
18
18
  traccia/instrumentation/anthropic.py,sha256=R4vL1plLHgt0TXkDwt1Priic9HppIp2Oyt11IJZZ9Ug,3444
19
19
  traccia/instrumentation/decorator.py,sha256=BTZvtFpNA2MVuNkoMbbZtgVK_c_su1M3V1OIuBoTEl0,10049
20
20
  traccia/instrumentation/fastapi.py,sha256=LpenJ5aIrcTTWZb6_6GsQ_6cXrVMDjdfeyfQeGvDUTs,1271
21
21
  traccia/instrumentation/http_client.py,sha256=fBZx2RNcMxH73cmTrDjlQVYxerBUKQnw8OANyfak5HE,586
22
22
  traccia/instrumentation/http_server.py,sha256=RTMklczBEam8PE-pPVMUWBo8YtU7_ISa6-G0Zqjbptk,913
23
- traccia/instrumentation/openai.py,sha256=GmupQ96g7Jb_gJZZoQrF3UygNzqiEjAxGhpeaAp-3pY,7087
23
+ traccia/instrumentation/openai.py,sha256=nItQnjpq3GXNfM1VE3DIt2YIHRlg5e495KtvpQjFjFs,14862
24
24
  traccia/instrumentation/requests.py,sha256=I7Oc_px9RDtliQ2ktQVymPUc2Zkaz1NdepxWQygMb24,2267
25
- traccia/integrations/__init__.py,sha256=bbAVvZaqg0FcM9OVchpb_fHRrTQN8lylMiFTsXHa9sM,662
25
+ traccia/integrations/__init__.py,sha256=bkLPf2DyX7c6-_p12aKgdgsM9EOyF2EZ7y2-jNjxDzw,1150
26
26
  traccia/integrations/langchain/__init__.py,sha256=98umBkI8DePI4eUJBZtBUgjMsmaUNF2EfMPK2bWH3iM,524
27
27
  traccia/integrations/langchain/callback.py,sha256=xwHVMKRa6TduLm2bU4NNuDvK2AySQsmnQWWqA8whYOQ,14771
28
28
  traccia/integrations/langchain/utils.py,sha256=TjkEUKprTL2CfkSjVG4jSO0fLk_n3wvTgxv5NN_wHeI,4258
29
+ traccia/integrations/openai_agents/__init__.py,sha256=bFxSf5rvnKzl87Ux8MkFylUYY0vagil3cTTrb0HeNeg,2028
30
+ traccia/integrations/openai_agents/processor.py,sha256=DpwSISq9R3dUiS0so95T7bSCLwcRnG8IcsiXCsRg_fY,10450
29
31
  traccia/processors/__init__.py,sha256=-hwG9SXM6JbGJaIol2KnS3aWwr9P_TVbJ4AZtGTe2zY,1192
30
32
  traccia/processors/agent_enricher.py,sha256=Ur1QvqXSCWT9RgZQw6s53P6URfGmH0KsC7WRJlCYRew,6434
31
33
  traccia/processors/batch_processor.py,sha256=JTZzWQs5jXlIiqI7PqukfrHil3V81ZC4tRsqeGYv_v8,4832
@@ -45,9 +47,9 @@ traccia/tracer/span_context.py,sha256=Y6J5Rr6bu-WG2-K2tyncLyyjdAK74b3fbSpIkLOQor
45
47
  traccia/tracer/tracer.py,sha256=oxXJnkR95-UnQeVhdtOOW7q0iAqWf_M9YmvCEcvRhkg,9496
46
48
  traccia/utils/__init__.py,sha256=JelSlN-M6Sv8y_MLpYAz_kBUguBnxFSuzvhg4eFIdpg,357
47
49
  traccia/utils/helpers.py,sha256=yhihp2Jw2TLTPxffGee5nE8nRcZ5C2aWgyLHKZek3Cc,2107
48
- traccia-0.1.5.dist-info/licenses/LICENSE,sha256=HNl57LOj88EfKh-IWmeqWxsDRh_FF6lj0l-E2A4Hr8w,10757
49
- traccia-0.1.5.dist-info/METADATA,sha256=ginDaCSUr7lpnmd2FVuGuTFYSURcVuMrCGee2CNpOhc,19011
50
- traccia-0.1.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
51
- traccia-0.1.5.dist-info/entry_points.txt,sha256=SG7gacPRmFzLw2HYTblUZsmC_TO3n14-dNi28SL-C2k,45
52
- traccia-0.1.5.dist-info/top_level.txt,sha256=Kc56JudupqSkzJPOnuQ6mPHJmhtike7pssNX0u_p59w,8
53
- traccia-0.1.5.dist-info/RECORD,,
50
+ traccia-0.1.6.dist-info/licenses/LICENSE,sha256=HNl57LOj88EfKh-IWmeqWxsDRh_FF6lj0l-E2A4Hr8w,10757
51
+ traccia-0.1.6.dist-info/METADATA,sha256=IJnHGuC2bOBkvx0YdRIpwNWndQOg7x8AgJS2afj1Aj0,20676
52
+ traccia-0.1.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
53
+ traccia-0.1.6.dist-info/entry_points.txt,sha256=SG7gacPRmFzLw2HYTblUZsmC_TO3n14-dNi28SL-C2k,45
54
+ traccia-0.1.6.dist-info/top_level.txt,sha256=Kc56JudupqSkzJPOnuQ6mPHJmhtike7pssNX0u_p59w,8
55
+ traccia-0.1.6.dist-info/RECORD,,