traccia 0.1.2__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- traccia/__init__.py +73 -0
- traccia/auto.py +748 -0
- traccia/auto_instrumentation.py +74 -0
- traccia/cli.py +349 -0
- traccia/config.py +699 -0
- traccia/context/__init__.py +33 -0
- traccia/context/context.py +67 -0
- traccia/context/propagators.py +283 -0
- traccia/errors.py +48 -0
- traccia/exporter/__init__.py +8 -0
- traccia/exporter/console_exporter.py +31 -0
- traccia/exporter/file_exporter.py +178 -0
- traccia/exporter/http_exporter.py +214 -0
- traccia/exporter/otlp_exporter.py +190 -0
- traccia/instrumentation/__init__.py +26 -0
- traccia/instrumentation/anthropic.py +92 -0
- traccia/instrumentation/decorator.py +263 -0
- traccia/instrumentation/fastapi.py +38 -0
- traccia/instrumentation/http_client.py +21 -0
- traccia/instrumentation/http_server.py +25 -0
- traccia/instrumentation/openai.py +358 -0
- traccia/instrumentation/requests.py +68 -0
- traccia/integrations/__init__.py +39 -0
- traccia/integrations/langchain/__init__.py +14 -0
- traccia/integrations/langchain/callback.py +418 -0
- traccia/integrations/langchain/utils.py +129 -0
- traccia/integrations/openai_agents/__init__.py +73 -0
- traccia/integrations/openai_agents/processor.py +262 -0
- traccia/pricing_config.py +58 -0
- traccia/processors/__init__.py +35 -0
- traccia/processors/agent_enricher.py +159 -0
- traccia/processors/batch_processor.py +140 -0
- traccia/processors/cost_engine.py +71 -0
- traccia/processors/cost_processor.py +70 -0
- traccia/processors/drop_policy.py +44 -0
- traccia/processors/logging_processor.py +31 -0
- traccia/processors/rate_limiter.py +223 -0
- traccia/processors/sampler.py +22 -0
- traccia/processors/token_counter.py +216 -0
- traccia/runtime_config.py +127 -0
- traccia/tracer/__init__.py +15 -0
- traccia/tracer/otel_adapter.py +577 -0
- traccia/tracer/otel_utils.py +24 -0
- traccia/tracer/provider.py +155 -0
- traccia/tracer/span.py +286 -0
- traccia/tracer/span_context.py +16 -0
- traccia/tracer/tracer.py +243 -0
- traccia/utils/__init__.py +19 -0
- traccia/utils/helpers.py +95 -0
- {traccia-0.1.2.dist-info → traccia-0.1.6.dist-info}/METADATA +72 -15
- traccia-0.1.6.dist-info/RECORD +55 -0
- traccia-0.1.6.dist-info/top_level.txt +1 -0
- traccia-0.1.2.dist-info/RECORD +0 -6
- traccia-0.1.2.dist-info/top_level.txt +0 -1
- {traccia-0.1.2.dist-info → traccia-0.1.6.dist-info}/WHEEL +0 -0
- {traccia-0.1.2.dist-info → traccia-0.1.6.dist-info}/entry_points.txt +0 -0
- {traccia-0.1.2.dist-info → traccia-0.1.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
"""OpenAI monkey patching for chat completions and responses API."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, Optional, Callable
|
|
6
|
+
from traccia.tracer.span import SpanStatus
|
|
7
|
+
|
|
8
|
+
_patched = False
|
|
9
|
+
_responses_patched = False
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _safe_get(obj, path: str, default=None):
|
|
13
|
+
cur = obj
|
|
14
|
+
for part in path.split("."):
|
|
15
|
+
if cur is None:
|
|
16
|
+
return default
|
|
17
|
+
if isinstance(cur, dict):
|
|
18
|
+
cur = cur.get(part)
|
|
19
|
+
else:
|
|
20
|
+
cur = getattr(cur, part, None)
|
|
21
|
+
return cur if cur is not None else default
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def patch_openai() -> bool:
|
|
25
|
+
"""Patch OpenAI chat completions for both legacy and new client APIs."""
|
|
26
|
+
global _patched
|
|
27
|
+
if _patched:
|
|
28
|
+
return True
|
|
29
|
+
try:
|
|
30
|
+
import openai
|
|
31
|
+
except Exception:
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
def _extract_messages(kwargs, args):
|
|
35
|
+
messages = kwargs.get("messages")
|
|
36
|
+
# For new client, first arg after self is messages
|
|
37
|
+
if messages is None and len(args) >= 2:
|
|
38
|
+
messages = args[1]
|
|
39
|
+
if not messages or not isinstance(messages, (list, tuple)):
|
|
40
|
+
return None
|
|
41
|
+
# Keep only JSON-friendly, small fields to avoid huge/sensitive payloads.
|
|
42
|
+
slim = []
|
|
43
|
+
for m in list(messages)[:50]:
|
|
44
|
+
if not isinstance(m, dict):
|
|
45
|
+
continue
|
|
46
|
+
role = m.get("role")
|
|
47
|
+
name = m.get("name")
|
|
48
|
+
content = m.get("content")
|
|
49
|
+
if isinstance(content, (list, dict)):
|
|
50
|
+
content = str(content)
|
|
51
|
+
elif content is not None and not isinstance(content, str):
|
|
52
|
+
content = str(content)
|
|
53
|
+
item = {"role": role, "content": content}
|
|
54
|
+
if name:
|
|
55
|
+
item["name"] = name
|
|
56
|
+
slim.append(item)
|
|
57
|
+
return slim or None
|
|
58
|
+
|
|
59
|
+
def _extract_prompt_text(messages_slim) -> Optional[str]:
|
|
60
|
+
if not messages_slim:
|
|
61
|
+
return None
|
|
62
|
+
parts = []
|
|
63
|
+
for m in messages_slim:
|
|
64
|
+
role = m.get("role")
|
|
65
|
+
content = m.get("content")
|
|
66
|
+
if not content:
|
|
67
|
+
continue
|
|
68
|
+
parts.append(f"{role}: {content}" if role else str(content))
|
|
69
|
+
return "\n".join(parts) if parts else None
|
|
70
|
+
|
|
71
|
+
def _extract_prompt(kwargs, args) -> Optional[str]:
|
|
72
|
+
messages = kwargs.get("messages")
|
|
73
|
+
if messages is None and len(args) >= 2:
|
|
74
|
+
messages = args[1]
|
|
75
|
+
if not messages:
|
|
76
|
+
return None
|
|
77
|
+
parts = []
|
|
78
|
+
for m in messages:
|
|
79
|
+
content = m.get("content")
|
|
80
|
+
role = m.get("role")
|
|
81
|
+
if content:
|
|
82
|
+
parts.append(f"{role}: {content}" if role else str(content))
|
|
83
|
+
return "\n".join(parts) if parts else None
|
|
84
|
+
|
|
85
|
+
def _wrap(create_fn: Callable):
|
|
86
|
+
if getattr(create_fn, "_agent_trace_patched", False):
|
|
87
|
+
return create_fn
|
|
88
|
+
|
|
89
|
+
def wrapped_create(*args, **kwargs):
|
|
90
|
+
tracer = _get_tracer("openai")
|
|
91
|
+
model = kwargs.get("model") or _safe_get(args, "0.model", None)
|
|
92
|
+
messages_slim = _extract_messages(kwargs, args)
|
|
93
|
+
prompt_text = _extract_prompt_text(messages_slim) or _extract_prompt(kwargs, args)
|
|
94
|
+
attributes: Dict[str, Any] = {"llm.vendor": "openai"}
|
|
95
|
+
if model:
|
|
96
|
+
attributes["llm.model"] = model
|
|
97
|
+
if messages_slim:
|
|
98
|
+
# Convert messages to JSON string for OTel compatibility
|
|
99
|
+
import json
|
|
100
|
+
try:
|
|
101
|
+
attributes["llm.openai.messages"] = json.dumps(messages_slim)[:1000]
|
|
102
|
+
except Exception:
|
|
103
|
+
attributes["llm.openai.messages"] = str(messages_slim)[:1000]
|
|
104
|
+
if prompt_text:
|
|
105
|
+
attributes["llm.prompt"] = prompt_text
|
|
106
|
+
with tracer.start_as_current_span("llm.openai.chat.completions", attributes=attributes) as span:
|
|
107
|
+
try:
|
|
108
|
+
resp = create_fn(*args, **kwargs)
|
|
109
|
+
# capture model from response if not already set
|
|
110
|
+
resp_model = getattr(resp, "model", None) or (_safe_get(resp, "model"))
|
|
111
|
+
if resp_model and "llm.model" not in span.attributes:
|
|
112
|
+
span.set_attribute("llm.model", resp_model)
|
|
113
|
+
usage = getattr(resp, "usage", None) or (resp.get("usage") if isinstance(resp, dict) else None)
|
|
114
|
+
if usage:
|
|
115
|
+
span.set_attribute("llm.usage.source", "provider_usage")
|
|
116
|
+
for k in ("prompt_tokens", "completion_tokens", "total_tokens"):
|
|
117
|
+
val = getattr(usage, k, None) if not isinstance(usage, dict) else usage.get(k)
|
|
118
|
+
if val is not None:
|
|
119
|
+
span.set_attribute(f"llm.usage.{k}", val)
|
|
120
|
+
if "llm.usage.prompt_tokens" in span.attributes:
|
|
121
|
+
span.set_attribute("llm.usage.prompt_source", "provider_usage")
|
|
122
|
+
if "llm.usage.completion_tokens" in span.attributes:
|
|
123
|
+
span.set_attribute("llm.usage.completion_source", "provider_usage")
|
|
124
|
+
finish_reason = _safe_get(resp, "choices.0.finish_reason")
|
|
125
|
+
if finish_reason:
|
|
126
|
+
span.set_attribute("llm.finish_reason", finish_reason)
|
|
127
|
+
completion = _safe_get(resp, "choices.0.message.content")
|
|
128
|
+
if completion:
|
|
129
|
+
span.set_attribute("llm.completion", completion)
|
|
130
|
+
return resp
|
|
131
|
+
except Exception as exc:
|
|
132
|
+
span.record_exception(exc)
|
|
133
|
+
span.set_status(SpanStatus.ERROR, str(exc))
|
|
134
|
+
raise
|
|
135
|
+
|
|
136
|
+
wrapped_create._agent_trace_patched = True
|
|
137
|
+
return wrapped_create
|
|
138
|
+
|
|
139
|
+
patched_any = False
|
|
140
|
+
|
|
141
|
+
# Legacy: openai.ChatCompletion.create
|
|
142
|
+
target_legacy = getattr(openai, "ChatCompletion", None) or getattr(openai, "chat", None)
|
|
143
|
+
if target_legacy:
|
|
144
|
+
create_fn = getattr(target_legacy, "create", None)
|
|
145
|
+
if create_fn:
|
|
146
|
+
setattr(target_legacy, "create", _wrap(create_fn))
|
|
147
|
+
patched_any = True
|
|
148
|
+
|
|
149
|
+
# New client: OpenAI.chat.completions.create
|
|
150
|
+
new_client_cls = getattr(openai, "OpenAI", None)
|
|
151
|
+
if new_client_cls and hasattr(new_client_cls, "chat"):
|
|
152
|
+
chat = getattr(new_client_cls, "chat", None)
|
|
153
|
+
if chat and hasattr(chat, "completions"):
|
|
154
|
+
completions = getattr(chat, "completions")
|
|
155
|
+
if hasattr(completions, "create"):
|
|
156
|
+
patched = _wrap(completions.create)
|
|
157
|
+
setattr(completions, "create", patched)
|
|
158
|
+
patched_any = True
|
|
159
|
+
|
|
160
|
+
# New client resource class: openai.resources.chat.completions.Completions
|
|
161
|
+
try:
|
|
162
|
+
from openai.resources.chat.completions import Completions # type: ignore
|
|
163
|
+
|
|
164
|
+
if hasattr(Completions, "create"):
|
|
165
|
+
Completions.create = _wrap(Completions.create)
|
|
166
|
+
patched_any = True
|
|
167
|
+
except Exception:
|
|
168
|
+
pass
|
|
169
|
+
|
|
170
|
+
if patched_any:
|
|
171
|
+
_patched = True
|
|
172
|
+
|
|
173
|
+
# Also patch Responses API (used by OpenAI Agents SDK)
|
|
174
|
+
patch_openai_responses()
|
|
175
|
+
|
|
176
|
+
return patched_any
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def patch_openai_responses() -> bool:
|
|
180
|
+
"""Patch OpenAI Responses API for tracing."""
|
|
181
|
+
global _responses_patched
|
|
182
|
+
if _responses_patched:
|
|
183
|
+
return True
|
|
184
|
+
try:
|
|
185
|
+
import openai
|
|
186
|
+
except Exception:
|
|
187
|
+
return False
|
|
188
|
+
|
|
189
|
+
def _extract_responses_input(kwargs, args):
|
|
190
|
+
"""Extract input from responses.create call."""
|
|
191
|
+
input_data = kwargs.get("input")
|
|
192
|
+
if input_data is None and len(args) >= 2:
|
|
193
|
+
input_data = args[1]
|
|
194
|
+
if not input_data:
|
|
195
|
+
return None, None
|
|
196
|
+
|
|
197
|
+
# input can be a string or list of ResponseInputItem
|
|
198
|
+
if isinstance(input_data, str):
|
|
199
|
+
return [{"role": "user", "content": input_data}], input_data
|
|
200
|
+
elif isinstance(input_data, list):
|
|
201
|
+
# Convert to slim representation
|
|
202
|
+
slim = []
|
|
203
|
+
parts = []
|
|
204
|
+
for item in list(input_data)[:50]:
|
|
205
|
+
if isinstance(item, dict):
|
|
206
|
+
role = item.get("role", "user")
|
|
207
|
+
content_items = item.get("content", [])
|
|
208
|
+
|
|
209
|
+
# Extract text from content items
|
|
210
|
+
text_parts = []
|
|
211
|
+
if isinstance(content_items, str):
|
|
212
|
+
text_parts.append(content_items)
|
|
213
|
+
elif isinstance(content_items, list):
|
|
214
|
+
for c in content_items:
|
|
215
|
+
if isinstance(c, dict) and c.get("type") == "text":
|
|
216
|
+
text_parts.append(c.get("text", ""))
|
|
217
|
+
|
|
218
|
+
content_str = " ".join(text_parts) if text_parts else ""
|
|
219
|
+
slim.append({"role": role, "content": content_str})
|
|
220
|
+
if content_str:
|
|
221
|
+
parts.append(f"{role}: {content_str}")
|
|
222
|
+
|
|
223
|
+
prompt_text = "\n".join(parts) if parts else None
|
|
224
|
+
return slim or None, prompt_text
|
|
225
|
+
|
|
226
|
+
return None, None
|
|
227
|
+
|
|
228
|
+
def _extract_responses_output(resp) -> Optional[str]:
|
|
229
|
+
"""Extract output text from Response object."""
|
|
230
|
+
output = getattr(resp, "output", None) or _safe_get(resp, "output")
|
|
231
|
+
if not output:
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
parts = []
|
|
235
|
+
for item in output:
|
|
236
|
+
if isinstance(item, dict):
|
|
237
|
+
content = item.get("content", [])
|
|
238
|
+
else:
|
|
239
|
+
content = getattr(item, "content", [])
|
|
240
|
+
|
|
241
|
+
# Extract text from content items
|
|
242
|
+
if isinstance(content, list):
|
|
243
|
+
for c in content:
|
|
244
|
+
if isinstance(c, dict) and c.get("type") == "text":
|
|
245
|
+
text = c.get("text", "")
|
|
246
|
+
if text:
|
|
247
|
+
parts.append(text)
|
|
248
|
+
elif hasattr(c, "type") and c.type == "text":
|
|
249
|
+
text = getattr(c, "text", "")
|
|
250
|
+
if text:
|
|
251
|
+
parts.append(text)
|
|
252
|
+
|
|
253
|
+
return "\n".join(parts) if parts else None
|
|
254
|
+
|
|
255
|
+
def _wrap_responses(create_fn: Callable):
|
|
256
|
+
if getattr(create_fn, "_agent_trace_patched", False):
|
|
257
|
+
return create_fn
|
|
258
|
+
|
|
259
|
+
async def wrapped_create(*args, **kwargs):
|
|
260
|
+
tracer = _get_tracer("openai.responses")
|
|
261
|
+
model = kwargs.get("model") or _safe_get(args, "0.model", None)
|
|
262
|
+
input_slim, prompt_text = _extract_responses_input(kwargs, args)
|
|
263
|
+
|
|
264
|
+
attributes: Dict[str, Any] = {
|
|
265
|
+
"llm.vendor": "openai",
|
|
266
|
+
"llm.api": "responses"
|
|
267
|
+
}
|
|
268
|
+
if model:
|
|
269
|
+
attributes["llm.model"] = model
|
|
270
|
+
if input_slim:
|
|
271
|
+
import json
|
|
272
|
+
try:
|
|
273
|
+
attributes["llm.openai.input"] = json.dumps(input_slim)[:1000]
|
|
274
|
+
except Exception:
|
|
275
|
+
attributes["llm.openai.input"] = str(input_slim)[:1000]
|
|
276
|
+
if prompt_text:
|
|
277
|
+
attributes["llm.prompt"] = prompt_text[:2000]
|
|
278
|
+
|
|
279
|
+
with tracer.start_as_current_span("llm.openai.responses", attributes=attributes) as span:
|
|
280
|
+
try:
|
|
281
|
+
resp = await create_fn(*args, **kwargs)
|
|
282
|
+
|
|
283
|
+
# Extract response details
|
|
284
|
+
resp_model = getattr(resp, "model", None) or _safe_get(resp, "model")
|
|
285
|
+
if resp_model and "llm.model" not in span.attributes:
|
|
286
|
+
span.set_attribute("llm.model", str(resp_model))
|
|
287
|
+
|
|
288
|
+
# Extract usage
|
|
289
|
+
usage = getattr(resp, "usage", None) or _safe_get(resp, "usage")
|
|
290
|
+
if usage:
|
|
291
|
+
span.set_attribute("llm.usage.source", "provider_usage")
|
|
292
|
+
input_tokens = getattr(usage, "input_tokens", None) or (usage.get("input_tokens") if isinstance(usage, dict) else None)
|
|
293
|
+
output_tokens = getattr(usage, "output_tokens", None) or (usage.get("output_tokens") if isinstance(usage, dict) else None)
|
|
294
|
+
total_tokens = getattr(usage, "total_tokens", None) or (usage.get("total_tokens") if isinstance(usage, dict) else None)
|
|
295
|
+
|
|
296
|
+
if input_tokens is not None:
|
|
297
|
+
span.set_attribute("llm.usage.prompt_tokens", input_tokens)
|
|
298
|
+
span.set_attribute("llm.usage.input_tokens", input_tokens)
|
|
299
|
+
span.set_attribute("llm.usage.prompt_source", "provider_usage")
|
|
300
|
+
if output_tokens is not None:
|
|
301
|
+
span.set_attribute("llm.usage.completion_tokens", output_tokens)
|
|
302
|
+
span.set_attribute("llm.usage.output_tokens", output_tokens)
|
|
303
|
+
span.set_attribute("llm.usage.completion_source", "provider_usage")
|
|
304
|
+
if total_tokens is not None:
|
|
305
|
+
span.set_attribute("llm.usage.total_tokens", total_tokens)
|
|
306
|
+
|
|
307
|
+
# Extract completion text
|
|
308
|
+
completion = _extract_responses_output(resp)
|
|
309
|
+
if completion:
|
|
310
|
+
span.set_attribute("llm.completion", completion[:2000])
|
|
311
|
+
|
|
312
|
+
# Extract status
|
|
313
|
+
status = getattr(resp, "status", None) or _safe_get(resp, "status")
|
|
314
|
+
if status:
|
|
315
|
+
span.set_attribute("llm.response.status", str(status))
|
|
316
|
+
|
|
317
|
+
return resp
|
|
318
|
+
except Exception as exc:
|
|
319
|
+
span.record_exception(exc)
|
|
320
|
+
span.set_status(SpanStatus.ERROR, str(exc))
|
|
321
|
+
raise
|
|
322
|
+
|
|
323
|
+
wrapped_create._agent_trace_patched = True
|
|
324
|
+
return wrapped_create
|
|
325
|
+
|
|
326
|
+
patched_any = False
|
|
327
|
+
|
|
328
|
+
# Patch AsyncOpenAI.responses.create
|
|
329
|
+
try:
|
|
330
|
+
from openai import AsyncOpenAI
|
|
331
|
+
if hasattr(AsyncOpenAI, "responses"):
|
|
332
|
+
responses = getattr(AsyncOpenAI, "responses")
|
|
333
|
+
if hasattr(responses, "create"):
|
|
334
|
+
# This is a property/descriptor, need to patch the underlying class
|
|
335
|
+
pass
|
|
336
|
+
except Exception:
|
|
337
|
+
pass
|
|
338
|
+
|
|
339
|
+
# Patch the Responses resource class directly
|
|
340
|
+
try:
|
|
341
|
+
from openai.resources.responses import AsyncResponses
|
|
342
|
+
if hasattr(AsyncResponses, "create"):
|
|
343
|
+
original_create = AsyncResponses.create
|
|
344
|
+
AsyncResponses.create = _wrap_responses(original_create)
|
|
345
|
+
patched_any = True
|
|
346
|
+
except Exception:
|
|
347
|
+
pass
|
|
348
|
+
|
|
349
|
+
if patched_any:
|
|
350
|
+
_responses_patched = True
|
|
351
|
+
return patched_any
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def _get_tracer(name: str):
|
|
355
|
+
import traccia
|
|
356
|
+
|
|
357
|
+
return traccia.get_tracer(name)
|
|
358
|
+
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""Requests monkey patching for HTTP client tracing."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
from traccia.tracer.span import SpanStatus
|
|
7
|
+
from traccia.context import get_current_span, inject_traceparent, inject_tracestate
|
|
8
|
+
|
|
9
|
+
_patched = False
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def patch_requests() -> bool:
|
|
13
|
+
"""Patch requests.Session.request; returns True if patched, False otherwise."""
|
|
14
|
+
global _patched
|
|
15
|
+
if _patched:
|
|
16
|
+
return True
|
|
17
|
+
try:
|
|
18
|
+
import requests
|
|
19
|
+
except Exception:
|
|
20
|
+
return False
|
|
21
|
+
|
|
22
|
+
original_request = requests.sessions.Session.request
|
|
23
|
+
if getattr(original_request, "_agent_trace_patched", False):
|
|
24
|
+
_patched = True
|
|
25
|
+
return True
|
|
26
|
+
|
|
27
|
+
def wrapped_request(self, method, url, *args, **kwargs):
|
|
28
|
+
# Skip instrumentation for OTLP exporter endpoints to prevent feedback loop
|
|
29
|
+
url_str = str(url) if url else ""
|
|
30
|
+
if "/v1/traces" in url_str or "/api/v1/traces" in url_str:
|
|
31
|
+
# This is likely an exporter endpoint - don't instrument it
|
|
32
|
+
import requests
|
|
33
|
+
return original_request(self, method, url, *args, **kwargs)
|
|
34
|
+
|
|
35
|
+
tracer = _get_tracer("requests")
|
|
36
|
+
attributes: Dict[str, Any] = {
|
|
37
|
+
"http.method": method,
|
|
38
|
+
"http.url": url,
|
|
39
|
+
}
|
|
40
|
+
headers = kwargs.get("headers")
|
|
41
|
+
if headers is None:
|
|
42
|
+
headers = {}
|
|
43
|
+
kwargs["headers"] = headers
|
|
44
|
+
current = get_current_span()
|
|
45
|
+
if current:
|
|
46
|
+
inject_traceparent(headers, current.context)
|
|
47
|
+
inject_tracestate(headers, current.context)
|
|
48
|
+
with tracer.start_as_current_span("http.client", attributes=attributes) as span:
|
|
49
|
+
try:
|
|
50
|
+
resp = original_request(self, method, url, *args, **kwargs)
|
|
51
|
+
span.set_attribute("http.status_code", getattr(resp, "status_code", None))
|
|
52
|
+
return resp
|
|
53
|
+
except Exception as exc:
|
|
54
|
+
span.record_exception(exc)
|
|
55
|
+
span.set_status(SpanStatus.ERROR, str(exc))
|
|
56
|
+
raise
|
|
57
|
+
|
|
58
|
+
wrapped_request._agent_trace_patched = True
|
|
59
|
+
requests.sessions.Session.request = wrapped_request
|
|
60
|
+
_patched = True
|
|
61
|
+
return True
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _get_tracer(name: str):
|
|
65
|
+
import traccia
|
|
66
|
+
|
|
67
|
+
return traccia.get_tracer(name)
|
|
68
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Traccia integrations for frameworks like LangChain, OpenAI Agents SDK."""
|
|
2
|
+
|
|
3
|
+
__all__ = []
|
|
4
|
+
|
|
5
|
+
# Lazy imports for optional dependencies
|
|
6
|
+
def _import_langchain():
|
|
7
|
+
try:
|
|
8
|
+
from traccia.integrations.langchain import TracciaCallbackHandler
|
|
9
|
+
return TracciaCallbackHandler
|
|
10
|
+
except ImportError as e:
|
|
11
|
+
raise ModuleNotFoundError(
|
|
12
|
+
"LangChain integration requires langchain-core. "
|
|
13
|
+
"Install with: pip install traccia[langchain]"
|
|
14
|
+
) from e
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _import_openai_agents():
|
|
18
|
+
try:
|
|
19
|
+
from traccia.integrations.openai_agents import install
|
|
20
|
+
return install
|
|
21
|
+
except ImportError as e:
|
|
22
|
+
raise ModuleNotFoundError(
|
|
23
|
+
"OpenAI Agents integration requires openai-agents. "
|
|
24
|
+
"Install with: pip install openai-agents"
|
|
25
|
+
) from e
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Make available if imported
|
|
29
|
+
try:
|
|
30
|
+
from traccia.integrations.langchain import TracciaCallbackHandler
|
|
31
|
+
__all__.append("TracciaCallbackHandler")
|
|
32
|
+
except ImportError:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
from traccia.integrations.openai_agents import install as install_openai_agents
|
|
37
|
+
__all__.append("install_openai_agents")
|
|
38
|
+
except ImportError:
|
|
39
|
+
pass
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""Traccia LangChain integration via callback handler."""
|
|
2
|
+
|
|
3
|
+
try:
|
|
4
|
+
from traccia.integrations.langchain.callback import TracciaCallbackHandler
|
|
5
|
+
|
|
6
|
+
# Convenience alias: from traccia.integrations.langchain import CallbackHandler
|
|
7
|
+
CallbackHandler = TracciaCallbackHandler
|
|
8
|
+
|
|
9
|
+
__all__ = ["TracciaCallbackHandler", "CallbackHandler"]
|
|
10
|
+
except ImportError as e:
|
|
11
|
+
raise ModuleNotFoundError(
|
|
12
|
+
"LangChain integration requires langchain-core. "
|
|
13
|
+
"Install with: pip install traccia[langchain]"
|
|
14
|
+
) from e
|