traccia 0.1.2__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. traccia/__init__.py +73 -0
  2. traccia/auto.py +736 -0
  3. traccia/auto_instrumentation.py +74 -0
  4. traccia/cli.py +349 -0
  5. traccia/config.py +693 -0
  6. traccia/context/__init__.py +33 -0
  7. traccia/context/context.py +67 -0
  8. traccia/context/propagators.py +283 -0
  9. traccia/errors.py +48 -0
  10. traccia/exporter/__init__.py +8 -0
  11. traccia/exporter/console_exporter.py +31 -0
  12. traccia/exporter/file_exporter.py +178 -0
  13. traccia/exporter/http_exporter.py +214 -0
  14. traccia/exporter/otlp_exporter.py +190 -0
  15. traccia/instrumentation/__init__.py +20 -0
  16. traccia/instrumentation/anthropic.py +92 -0
  17. traccia/instrumentation/decorator.py +263 -0
  18. traccia/instrumentation/fastapi.py +38 -0
  19. traccia/instrumentation/http_client.py +21 -0
  20. traccia/instrumentation/http_server.py +25 -0
  21. traccia/instrumentation/openai.py +178 -0
  22. traccia/instrumentation/requests.py +68 -0
  23. traccia/integrations/__init__.py +22 -0
  24. traccia/integrations/langchain/__init__.py +14 -0
  25. traccia/integrations/langchain/callback.py +418 -0
  26. traccia/integrations/langchain/utils.py +129 -0
  27. traccia/pricing_config.py +58 -0
  28. traccia/processors/__init__.py +35 -0
  29. traccia/processors/agent_enricher.py +159 -0
  30. traccia/processors/batch_processor.py +140 -0
  31. traccia/processors/cost_engine.py +71 -0
  32. traccia/processors/cost_processor.py +70 -0
  33. traccia/processors/drop_policy.py +44 -0
  34. traccia/processors/logging_processor.py +31 -0
  35. traccia/processors/rate_limiter.py +223 -0
  36. traccia/processors/sampler.py +22 -0
  37. traccia/processors/token_counter.py +216 -0
  38. traccia/runtime_config.py +106 -0
  39. traccia/tracer/__init__.py +15 -0
  40. traccia/tracer/otel_adapter.py +577 -0
  41. traccia/tracer/otel_utils.py +24 -0
  42. traccia/tracer/provider.py +155 -0
  43. traccia/tracer/span.py +286 -0
  44. traccia/tracer/span_context.py +16 -0
  45. traccia/tracer/tracer.py +243 -0
  46. traccia/utils/__init__.py +19 -0
  47. traccia/utils/helpers.py +95 -0
  48. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/METADATA +32 -15
  49. traccia-0.1.5.dist-info/RECORD +53 -0
  50. traccia-0.1.5.dist-info/top_level.txt +1 -0
  51. traccia-0.1.2.dist-info/RECORD +0 -6
  52. traccia-0.1.2.dist-info/top_level.txt +0 -1
  53. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/WHEEL +0 -0
  54. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/entry_points.txt +0 -0
  55. {traccia-0.1.2.dist-info → traccia-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,263 @@
1
+ """@observe decorator for instrumenting functions."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import functools
6
+ import inspect
7
+ import traceback
8
+ from typing import Any, Callable, Dict, Iterable, Optional
9
+ from traccia.tracer.span import SpanStatus
10
+
11
+
12
+ def _capture_args(bound_args: inspect.BoundArguments, skip: Iterable[str]) -> Dict[str, Any]:
13
+ """Capture function arguments, converting complex types to OTel-compatible types."""
14
+ captured = {}
15
+ for name, value in bound_args.arguments.items():
16
+ if name in skip:
17
+ continue
18
+ # Skip 'self' - it's an object, not a valid OTel attribute
19
+ if name == "self":
20
+ continue
21
+ # Convert value to OTel-compatible type
22
+ captured[name] = _convert_to_otel_type(value)
23
+ return captured
24
+
25
+
26
+ def _convert_to_otel_type(value: Any) -> Any:
27
+ """
28
+ Convert a value to an OpenTelemetry-compatible type.
29
+
30
+ OTel attributes must be: bool, str, bytes, int, float, or sequences of those.
31
+ """
32
+ # Primitive types are fine
33
+ if isinstance(value, (bool, str, bytes, int, float)) or value is None:
34
+ return value
35
+
36
+ # For sequences, convert each element
37
+ if isinstance(value, (list, tuple)):
38
+ converted = []
39
+ for item in value:
40
+ if isinstance(item, (bool, str, bytes, int, float)) or item is None:
41
+ converted.append(item)
42
+ else:
43
+ # Convert complex types to string representation
44
+ converted.append(str(item)[:1000]) # Truncate long strings
45
+ return converted[:100] # Limit sequence length
46
+
47
+ # For dicts and other complex types, convert to JSON string
48
+ if isinstance(value, dict):
49
+ try:
50
+ import json
51
+ json_str = json.dumps(value, default=str)[:1000] # Truncate
52
+ return json_str
53
+ except Exception:
54
+ return str(value)[:1000]
55
+
56
+ # For other types, convert to string
57
+ return str(value)[:1000] # Truncate long strings
58
+
59
+
60
+ def _infer_type_from_attributes(attributes: Dict[str, Any]) -> Optional[str]:
61
+ """
62
+ Infer span type from attributes.
63
+
64
+ Returns:
65
+ - "llm" if LLM-related attributes found
66
+ - "tool" if tool-related attributes found
67
+ - None otherwise (will use default "span")
68
+ """
69
+ # Check for LLM indicators
70
+ if any(key in attributes for key in ["llm.model", "llm.vendor", "model"]):
71
+ return "llm"
72
+
73
+ # Check for tool indicators
74
+ if any(key in attributes for key in ["tool.name", "tool", "http.url"]):
75
+ return "tool"
76
+
77
+ return None
78
+
79
+
80
+ def _extract_llm_attributes(span_attrs: Dict[str, Any], bound_args: inspect.BoundArguments) -> None:
81
+ """
82
+ Extract LLM-related attributes from function arguments.
83
+
84
+ Extracts common LLM parameters like model, temperature, max_tokens, messages.
85
+ Fails silently if extraction fails or attributes not found.
86
+
87
+ Args:
88
+ span_attrs: Dictionary to add extracted attributes to
89
+ bound_args: Bound arguments from the function call
90
+ """
91
+ try:
92
+ args_dict = dict(bound_args.arguments)
93
+
94
+ # Extract model
95
+ if "model" in args_dict and "llm.model" not in span_attrs:
96
+ span_attrs["llm.model"] = str(args_dict["model"])
97
+
98
+ # Extract temperature
99
+ if "temperature" in args_dict and "llm.temperature" not in span_attrs:
100
+ temp = args_dict["temperature"]
101
+ if isinstance(temp, (int, float)):
102
+ span_attrs["llm.temperature"] = temp
103
+
104
+ # Extract max_tokens
105
+ if "max_tokens" in args_dict and "llm.max_tokens" not in span_attrs:
106
+ max_tok = args_dict["max_tokens"]
107
+ if isinstance(max_tok, int):
108
+ span_attrs["llm.max_tokens"] = max_tok
109
+
110
+ # Extract messages/prompt
111
+ if "messages" in args_dict and "llm.prompt" not in span_attrs:
112
+ messages = args_dict["messages"]
113
+ if isinstance(messages, (list, str)):
114
+ # Convert messages to string representation
115
+ prompt_str = _convert_to_otel_type(messages)
116
+ span_attrs["llm.prompt"] = prompt_str
117
+ elif "prompt" in args_dict and "llm.prompt" not in span_attrs:
118
+ prompt = args_dict["prompt"]
119
+ if isinstance(prompt, str):
120
+ span_attrs["llm.prompt"] = prompt[:1000]
121
+
122
+ except Exception:
123
+ # Fail silently - don't interrupt span creation if extraction fails
124
+ pass
125
+
126
+
127
+ def observe(
128
+ name: Optional[str] = None,
129
+ *,
130
+ attributes: Optional[Dict[str, Any]] = None,
131
+ tags: Optional[Iterable[str]] = None,
132
+ as_type: str = "span",
133
+ skip_args: Optional[Iterable[str]] = None,
134
+ skip_result: bool = False,
135
+ ) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
136
+ """
137
+ Decorate a function to create a span around its execution.
138
+
139
+ - Supports sync and async functions.
140
+ - Captures errors and records exception events.
141
+ - Optionally captures arguments/results (skip controls).
142
+ """
143
+
144
+ def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
145
+ span_name = name or func.__name__
146
+ arg_names = func.__code__.co_varnames
147
+ skip_args_set = set(skip_args or [])
148
+ tags_list = [str(tag) for tag in tags] if tags is not None else []
149
+
150
+ is_coro = inspect.iscoroutinefunction(func)
151
+
152
+ @functools.wraps(func)
153
+ def sync_wrapper(*args, **kwargs):
154
+ tracer = _get_tracer(func.__module__ or "default")
155
+ bound = inspect.signature(func).bind_partial(*args, **kwargs)
156
+ bound.apply_defaults()
157
+
158
+ span_attrs = dict(attributes or {})
159
+ if tags_list:
160
+ span_attrs["span.tags"] = tags_list
161
+
162
+ # Capture function arguments first
163
+ span_attrs.update(_capture_args(bound, skip_args_set))
164
+
165
+ # Infer type from attributes if not explicitly set (or if set to default "span")
166
+ inferred_type = as_type
167
+ if as_type == "span":
168
+ # Try to infer from attributes
169
+ detected_type = _infer_type_from_attributes(span_attrs)
170
+ if detected_type:
171
+ inferred_type = detected_type
172
+
173
+ # Set span type
174
+ span_attrs["span.type"] = inferred_type
175
+
176
+ # Extract LLM attributes if this is an LLM call
177
+ if inferred_type == "llm":
178
+ _extract_llm_attributes(span_attrs, bound)
179
+
180
+ with tracer.start_as_current_span(span_name, attributes=span_attrs) as span:
181
+ try:
182
+ result = func(*args, **kwargs)
183
+ if not skip_result:
184
+ # Convert result to OTel-compatible type
185
+ otel_result = _convert_to_otel_type(result)
186
+ span.set_attribute("result", otel_result)
187
+ return result
188
+ except Exception as exc:
189
+ # Record detailed error information
190
+ span.record_exception(exc)
191
+ span.set_status(SpanStatus.ERROR, str(exc))
192
+
193
+ # Add error attributes
194
+ span.set_attribute("error.type", type(exc).__name__)
195
+ span.set_attribute("error.message", str(exc))
196
+
197
+ # Add truncated stack trace
198
+ tb = traceback.format_exc()
199
+ span.set_attribute("error.stack_trace", tb[:2000]) # Truncate to 2000 chars
200
+
201
+ raise
202
+
203
+ @functools.wraps(func)
204
+ async def async_wrapper(*args, **kwargs):
205
+ tracer = _get_tracer(func.__module__ or "default")
206
+ bound = inspect.signature(func).bind_partial(*args, **kwargs)
207
+ bound.apply_defaults()
208
+
209
+ span_attrs = dict(attributes or {})
210
+ if tags_list:
211
+ span_attrs["span.tags"] = tags_list
212
+
213
+ # Capture function arguments first
214
+ span_attrs.update(_capture_args(bound, skip_args_set))
215
+
216
+ # Infer type from attributes if not explicitly set (or if set to default "span")
217
+ inferred_type = as_type
218
+ if as_type == "span":
219
+ # Try to infer from attributes
220
+ detected_type = _infer_type_from_attributes(span_attrs)
221
+ if detected_type:
222
+ inferred_type = detected_type
223
+
224
+ # Set span type
225
+ span_attrs["span.type"] = inferred_type
226
+
227
+ # Extract LLM attributes if this is an LLM call
228
+ if inferred_type == "llm":
229
+ _extract_llm_attributes(span_attrs, bound)
230
+
231
+ async with tracer.start_as_current_span(span_name, attributes=span_attrs) as span:
232
+ try:
233
+ result = await func(*args, **kwargs)
234
+ if not skip_result:
235
+ # Convert result to OTel-compatible type
236
+ otel_result = _convert_to_otel_type(result)
237
+ span.set_attribute("result", otel_result)
238
+ return result
239
+ except Exception as exc:
240
+ # Record detailed error information
241
+ span.record_exception(exc)
242
+ span.set_status(SpanStatus.ERROR, str(exc))
243
+
244
+ # Add error attributes
245
+ span.set_attribute("error.type", type(exc).__name__)
246
+ span.set_attribute("error.message", str(exc))
247
+
248
+ # Add truncated stack trace
249
+ tb = traceback.format_exc()
250
+ span.set_attribute("error.stack_trace", tb[:2000]) # Truncate to 2000 chars
251
+
252
+ raise
253
+
254
+ return async_wrapper if is_coro else sync_wrapper
255
+
256
+ return decorator
257
+
258
+
259
+ def _get_tracer(name: str):
260
+ import traccia
261
+
262
+ return traccia.get_tracer(name)
263
+
@@ -0,0 +1,38 @@
1
+ """
2
+ FastAPI middleware helpers for tracing HTTP requests with the SDK.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import Any, Awaitable, Callable
8
+
9
+ from traccia.instrumentation import start_server_span
10
+
11
+
12
+ def install_http_middleware(app: Any, *, tracer_name: str = "agents-fastapi") -> None:
13
+ """
14
+ Attach an HTTP middleware that wraps each FastAPI request in a server span.
15
+
16
+ - Propagates incoming context from headers
17
+ - Records method/path and response status code
18
+ """
19
+
20
+ @app.middleware("http")
21
+ async def tracing_middleware(request, call_next: Callable[[Any], Awaitable[Any]]): # type: ignore
22
+ # Lazy import to avoid circular import when traccia initializes.
23
+ from traccia import get_tracer
24
+ tracer = get_tracer(tracer_name)
25
+ headers = dict(request.headers)
26
+ attrs = {
27
+ "http.method": request.method,
28
+ "http.target": request.url.path,
29
+ }
30
+ async with start_server_span(tracer, "http.request", headers, attributes=attrs) as span:
31
+ response = await call_next(request)
32
+ try:
33
+ span.set_attribute("http.status_code", response.status_code)
34
+ except Exception:
35
+ pass
36
+ return response
37
+
38
+ return None
@@ -0,0 +1,21 @@
1
+ """HTTP client helpers for context propagation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict
6
+
7
+ from traccia.context import inject_traceparent, inject_tracestate, get_current_span
8
+
9
+
10
+ def inject_headers(headers: Dict[str, str]) -> Dict[str, str]:
11
+ """
12
+ Inject traceparent/tracestate into the provided headers dict if a current span exists.
13
+
14
+ Returns the same headers mapping for convenience.
15
+ """
16
+ span = get_current_span()
17
+ if span:
18
+ inject_traceparent(headers, span.context)
19
+ inject_tracestate(headers, span.context)
20
+ return headers
21
+
@@ -0,0 +1,25 @@
1
+ """HTTP server helpers for extracting context and creating server spans."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, Optional
6
+
7
+ from traccia.context import extract_trace_context
8
+ from traccia.tracer.tracer import Tracer
9
+ from traccia.tracer.span_context import SpanContext
10
+
11
+
12
+ def extract_parent_context(headers: Dict[str, str]) -> Optional[SpanContext]:
13
+ """Parse traceparent/tracestate from headers and return SpanContext if valid."""
14
+ return extract_trace_context(headers)
15
+
16
+
17
+ def start_server_span(tracer: Tracer, name: str, headers: Dict[str, str], attributes=None):
18
+ """
19
+ Convenience helper to start a server span with extracted parent context.
20
+
21
+ Returns the span context manager (caller should use 'with' or 'async with').
22
+ """
23
+ parent_ctx = extract_parent_context(headers)
24
+ return tracer.start_as_current_span(name, attributes=attributes, parent_context=parent_ctx)
25
+
@@ -0,0 +1,178 @@
1
+ """OpenAI monkey patching for chat completions."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, Optional, Callable
6
+ from traccia.tracer.span import SpanStatus
7
+
8
+ _patched = False
9
+
10
+
11
+ def _safe_get(obj, path: str, default=None):
12
+ cur = obj
13
+ for part in path.split("."):
14
+ if cur is None:
15
+ return default
16
+ if isinstance(cur, dict):
17
+ cur = cur.get(part)
18
+ else:
19
+ cur = getattr(cur, part, None)
20
+ return cur if cur is not None else default
21
+
22
+
23
+ def patch_openai() -> bool:
24
+ """Patch OpenAI chat completions for both legacy and new client APIs."""
25
+ global _patched
26
+ if _patched:
27
+ return True
28
+ try:
29
+ import openai
30
+ except Exception:
31
+ return False
32
+
33
+ def _extract_messages(kwargs, args):
34
+ messages = kwargs.get("messages")
35
+ # For new client, first arg after self is messages
36
+ if messages is None and len(args) >= 2:
37
+ messages = args[1]
38
+ if not messages or not isinstance(messages, (list, tuple)):
39
+ return None
40
+ # Keep only JSON-friendly, small fields to avoid huge/sensitive payloads.
41
+ slim = []
42
+ for m in list(messages)[:50]:
43
+ if not isinstance(m, dict):
44
+ continue
45
+ role = m.get("role")
46
+ name = m.get("name")
47
+ content = m.get("content")
48
+ if isinstance(content, (list, dict)):
49
+ content = str(content)
50
+ elif content is not None and not isinstance(content, str):
51
+ content = str(content)
52
+ item = {"role": role, "content": content}
53
+ if name:
54
+ item["name"] = name
55
+ slim.append(item)
56
+ return slim or None
57
+
58
+ def _extract_prompt_text(messages_slim) -> Optional[str]:
59
+ if not messages_slim:
60
+ return None
61
+ parts = []
62
+ for m in messages_slim:
63
+ role = m.get("role")
64
+ content = m.get("content")
65
+ if not content:
66
+ continue
67
+ parts.append(f"{role}: {content}" if role else str(content))
68
+ return "\n".join(parts) if parts else None
69
+
70
+ def _extract_prompt(kwargs, args) -> Optional[str]:
71
+ messages = kwargs.get("messages")
72
+ if messages is None and len(args) >= 2:
73
+ messages = args[1]
74
+ if not messages:
75
+ return None
76
+ parts = []
77
+ for m in messages:
78
+ content = m.get("content")
79
+ role = m.get("role")
80
+ if content:
81
+ parts.append(f"{role}: {content}" if role else str(content))
82
+ return "\n".join(parts) if parts else None
83
+
84
+ def _wrap(create_fn: Callable):
85
+ if getattr(create_fn, "_agent_trace_patched", False):
86
+ return create_fn
87
+
88
+ def wrapped_create(*args, **kwargs):
89
+ tracer = _get_tracer("openai")
90
+ model = kwargs.get("model") or _safe_get(args, "0.model", None)
91
+ messages_slim = _extract_messages(kwargs, args)
92
+ prompt_text = _extract_prompt_text(messages_slim) or _extract_prompt(kwargs, args)
93
+ attributes: Dict[str, Any] = {"llm.vendor": "openai"}
94
+ if model:
95
+ attributes["llm.model"] = model
96
+ if messages_slim:
97
+ # Convert messages to JSON string for OTel compatibility
98
+ import json
99
+ try:
100
+ attributes["llm.openai.messages"] = json.dumps(messages_slim)[:1000]
101
+ except Exception:
102
+ attributes["llm.openai.messages"] = str(messages_slim)[:1000]
103
+ if prompt_text:
104
+ attributes["llm.prompt"] = prompt_text
105
+ with tracer.start_as_current_span("llm.openai.chat.completions", attributes=attributes) as span:
106
+ try:
107
+ resp = create_fn(*args, **kwargs)
108
+ # capture model from response if not already set
109
+ resp_model = getattr(resp, "model", None) or (_safe_get(resp, "model"))
110
+ if resp_model and "llm.model" not in span.attributes:
111
+ span.set_attribute("llm.model", resp_model)
112
+ usage = getattr(resp, "usage", None) or (resp.get("usage") if isinstance(resp, dict) else None)
113
+ if usage:
114
+ span.set_attribute("llm.usage.source", "provider_usage")
115
+ for k in ("prompt_tokens", "completion_tokens", "total_tokens"):
116
+ val = getattr(usage, k, None) if not isinstance(usage, dict) else usage.get(k)
117
+ if val is not None:
118
+ span.set_attribute(f"llm.usage.{k}", val)
119
+ if "llm.usage.prompt_tokens" in span.attributes:
120
+ span.set_attribute("llm.usage.prompt_source", "provider_usage")
121
+ if "llm.usage.completion_tokens" in span.attributes:
122
+ span.set_attribute("llm.usage.completion_source", "provider_usage")
123
+ finish_reason = _safe_get(resp, "choices.0.finish_reason")
124
+ if finish_reason:
125
+ span.set_attribute("llm.finish_reason", finish_reason)
126
+ completion = _safe_get(resp, "choices.0.message.content")
127
+ if completion:
128
+ span.set_attribute("llm.completion", completion)
129
+ return resp
130
+ except Exception as exc:
131
+ span.record_exception(exc)
132
+ span.set_status(SpanStatus.ERROR, str(exc))
133
+ raise
134
+
135
+ wrapped_create._agent_trace_patched = True
136
+ return wrapped_create
137
+
138
+ patched_any = False
139
+
140
+ # Legacy: openai.ChatCompletion.create
141
+ target_legacy = getattr(openai, "ChatCompletion", None) or getattr(openai, "chat", None)
142
+ if target_legacy:
143
+ create_fn = getattr(target_legacy, "create", None)
144
+ if create_fn:
145
+ setattr(target_legacy, "create", _wrap(create_fn))
146
+ patched_any = True
147
+
148
+ # New client: OpenAI.chat.completions.create
149
+ new_client_cls = getattr(openai, "OpenAI", None)
150
+ if new_client_cls and hasattr(new_client_cls, "chat"):
151
+ chat = getattr(new_client_cls, "chat", None)
152
+ if chat and hasattr(chat, "completions"):
153
+ completions = getattr(chat, "completions")
154
+ if hasattr(completions, "create"):
155
+ patched = _wrap(completions.create)
156
+ setattr(completions, "create", patched)
157
+ patched_any = True
158
+
159
+ # New client resource class: openai.resources.chat.completions.Completions
160
+ try:
161
+ from openai.resources.chat.completions import Completions # type: ignore
162
+
163
+ if hasattr(Completions, "create"):
164
+ Completions.create = _wrap(Completions.create)
165
+ patched_any = True
166
+ except Exception:
167
+ pass
168
+
169
+ if patched_any:
170
+ _patched = True
171
+ return patched_any
172
+
173
+
174
+ def _get_tracer(name: str):
175
+ import traccia
176
+
177
+ return traccia.get_tracer(name)
178
+
@@ -0,0 +1,68 @@
1
+ """Requests monkey patching for HTTP client tracing."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict
6
+ from traccia.tracer.span import SpanStatus
7
+ from traccia.context import get_current_span, inject_traceparent, inject_tracestate
8
+
9
+ _patched = False
10
+
11
+
12
+ def patch_requests() -> bool:
13
+ """Patch requests.Session.request; returns True if patched, False otherwise."""
14
+ global _patched
15
+ if _patched:
16
+ return True
17
+ try:
18
+ import requests
19
+ except Exception:
20
+ return False
21
+
22
+ original_request = requests.sessions.Session.request
23
+ if getattr(original_request, "_agent_trace_patched", False):
24
+ _patched = True
25
+ return True
26
+
27
+ def wrapped_request(self, method, url, *args, **kwargs):
28
+ # Skip instrumentation for OTLP exporter endpoints to prevent feedback loop
29
+ url_str = str(url) if url else ""
30
+ if "/v1/traces" in url_str or "/api/v1/traces" in url_str:
31
+ # This is likely an exporter endpoint - don't instrument it
32
+ import requests
33
+ return original_request(self, method, url, *args, **kwargs)
34
+
35
+ tracer = _get_tracer("requests")
36
+ attributes: Dict[str, Any] = {
37
+ "http.method": method,
38
+ "http.url": url,
39
+ }
40
+ headers = kwargs.get("headers")
41
+ if headers is None:
42
+ headers = {}
43
+ kwargs["headers"] = headers
44
+ current = get_current_span()
45
+ if current:
46
+ inject_traceparent(headers, current.context)
47
+ inject_tracestate(headers, current.context)
48
+ with tracer.start_as_current_span("http.client", attributes=attributes) as span:
49
+ try:
50
+ resp = original_request(self, method, url, *args, **kwargs)
51
+ span.set_attribute("http.status_code", getattr(resp, "status_code", None))
52
+ return resp
53
+ except Exception as exc:
54
+ span.record_exception(exc)
55
+ span.set_status(SpanStatus.ERROR, str(exc))
56
+ raise
57
+
58
+ wrapped_request._agent_trace_patched = True
59
+ requests.sessions.Session.request = wrapped_request
60
+ _patched = True
61
+ return True
62
+
63
+
64
+ def _get_tracer(name: str):
65
+ import traccia
66
+
67
+ return traccia.get_tracer(name)
68
+
@@ -0,0 +1,22 @@
1
+ """Traccia integrations for frameworks like LangChain, CrewAI, LlamaIndex."""
2
+
3
+ __all__ = []
4
+
5
+ # Lazy imports for optional dependencies
6
+ def _import_langchain():
7
+ try:
8
+ from traccia.integrations.langchain import TracciaCallbackHandler
9
+ return TracciaCallbackHandler
10
+ except ImportError as e:
11
+ raise ModuleNotFoundError(
12
+ "LangChain integration requires langchain-core. "
13
+ "Install with: pip install traccia[langchain]"
14
+ ) from e
15
+
16
+
17
+ # Make available if imported
18
+ try:
19
+ from traccia.integrations.langchain import TracciaCallbackHandler
20
+ __all__.append("TracciaCallbackHandler")
21
+ except ImportError:
22
+ pass
@@ -0,0 +1,14 @@
1
+ """Traccia LangChain integration via callback handler."""
2
+
3
+ try:
4
+ from traccia.integrations.langchain.callback import TracciaCallbackHandler
5
+
6
+ # Convenience alias: from traccia.integrations.langchain import CallbackHandler
7
+ CallbackHandler = TracciaCallbackHandler
8
+
9
+ __all__ = ["TracciaCallbackHandler", "CallbackHandler"]
10
+ except ImportError as e:
11
+ raise ModuleNotFoundError(
12
+ "LangChain integration requires langchain-core. "
13
+ "Install with: pip install traccia[langchain]"
14
+ ) from e