netra-sdk 0.1.23__tar.gz → 0.1.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of netra-sdk might be problematic. Click here for more details.

Files changed (51) hide show
  1. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/PKG-INFO +3 -1
  2. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/README.md +1 -0
  3. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/__init__.py +22 -7
  4. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/instruments.py +1 -0
  5. netra_sdk-0.1.25/netra/instrumentation/pydantic_ai/__init__.py +200 -0
  6. netra_sdk-0.1.25/netra/instrumentation/pydantic_ai/utils.py +385 -0
  7. netra_sdk-0.1.25/netra/instrumentation/pydantic_ai/version.py +1 -0
  8. netra_sdk-0.1.25/netra/instrumentation/pydantic_ai/wrappers.py +687 -0
  9. netra_sdk-0.1.25/netra/version.py +1 -0
  10. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/pyproject.toml +2 -1
  11. netra_sdk-0.1.23/netra/version.py +0 -1
  12. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/LICENCE +0 -0
  13. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/__init__.py +0 -0
  14. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/anonymizer/__init__.py +0 -0
  15. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/anonymizer/anonymizer.py +0 -0
  16. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/anonymizer/base.py +0 -0
  17. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/anonymizer/fp_anonymizer.py +0 -0
  18. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/config.py +0 -0
  19. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/decorators.py +0 -0
  20. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/exceptions/__init__.py +0 -0
  21. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/exceptions/injection.py +0 -0
  22. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/exceptions/pii.py +0 -0
  23. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/input_scanner.py +0 -0
  24. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/aiohttp/__init__.py +0 -0
  25. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/aiohttp/version.py +0 -0
  26. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/cohere/__init__.py +0 -0
  27. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/cohere/version.py +0 -0
  28. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/fastapi/__init__.py +0 -0
  29. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/fastapi/version.py +0 -0
  30. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/google_genai/__init__.py +0 -0
  31. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/google_genai/config.py +0 -0
  32. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/google_genai/utils.py +0 -0
  33. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/google_genai/version.py +0 -0
  34. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/httpx/__init__.py +0 -0
  35. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/httpx/version.py +0 -0
  36. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/mistralai/__init__.py +0 -0
  37. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/mistralai/config.py +0 -0
  38. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/mistralai/utils.py +0 -0
  39. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/mistralai/version.py +0 -0
  40. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/openai/__init__.py +0 -0
  41. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/openai/version.py +0 -0
  42. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/openai/wrappers.py +0 -0
  43. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/weaviate/__init__.py +0 -0
  44. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/instrumentation/weaviate/version.py +0 -0
  45. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/pii.py +0 -0
  46. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/processors/__init__.py +0 -0
  47. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/processors/session_span_processor.py +0 -0
  48. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/scanner.py +0 -0
  49. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/session_manager.py +0 -0
  50. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/span_wrapper.py +0 -0
  51. {netra_sdk-0.1.23 → netra_sdk-0.1.25}/netra/tracer.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: netra-sdk
3
- Version: 0.1.23
3
+ Version: 0.1.25
4
4
  Summary: A Python SDK for AI application observability that provides OpenTelemetry-based monitoring, tracing, and PII protection for LLM and vector database applications. Enables easy instrumentation, session tracking, and privacy-focused data collection for AI systems in production environments.
5
5
  License: Apache-2.0
6
6
  Keywords: netra,tracing,observability,sdk,ai,llm,vector,database
@@ -72,6 +72,7 @@ Requires-Dist: presidio-anonymizer (==2.2.358) ; extra == "presidio"
72
72
  Requires-Dist: stanza (>=1.10.1,<2.0.0) ; extra == "presidio"
73
73
  Requires-Dist: traceloop-sdk (>=0.40.7,<0.43.0)
74
74
  Requires-Dist: transformers (==4.51.3) ; extra == "presidio"
75
+ Requires-Dist: twine (>=6.1.0,<7.0.0)
75
76
  Project-URL: Bug Tracker, https://github.com/KeyValueSoftwareSystems/netra-sdk-py/issues
76
77
  Project-URL: Documentation, https://github.com/KeyValueSoftwareSystems/netra-sdk-py/blob/main/README.md
77
78
  Project-URL: Homepage, https://github.com/KeyValueSoftwareSystems/netra-sdk-py
@@ -286,6 +287,7 @@ class CustomerSupportAgent:
286
287
  - **LlamaIndex** - Data framework for LLM applications
287
288
  - **Haystack** - End-to-end NLP framework
288
289
  - **CrewAI** - Multi-agent AI systems
290
+ - **Pydantic AI** - AI model communication standard
289
291
  - **MCP (Model Context Protocol)** - AI model communication standard
290
292
 
291
293
  ## 🛡️ Privacy Protection & Security
@@ -206,6 +206,7 @@ class CustomerSupportAgent:
206
206
  - **LlamaIndex** - Data framework for LLM applications
207
207
  - **Haystack** - End-to-end NLP framework
208
208
  - **CrewAI** - Multi-agent AI systems
209
+ - **Pydantic AI** - AI model communication standard
209
210
  - **MCP (Model Context Protocol)** - AI model communication standard
210
211
 
211
212
  ## 🛡️ Privacy Protection & Security
@@ -34,16 +34,11 @@ def init_instrumentations(
34
34
  else:
35
35
  traceloop_block_instruments.add(getattr(Instruments, instrument.name))
36
36
 
37
- # If no instruments are provided for instrumentation
38
- if instruments is None:
39
- traceloop_block_instruments = set(Instruments)
40
- netra_custom_block_instruments = set(CustomInstruments)
41
-
42
- # If only custom instruments from netra are provided for instrumentation
37
+ # If no instruments in traceloop are provided for instrumentation
43
38
  if instruments is not None and not traceloop_instruments and not traceloop_block_instruments:
44
39
  traceloop_block_instruments = set(Instruments)
45
40
 
46
- # If only traceloop instruments are provided for instrumentation
41
+ # If no custom instruments in netra are provided for instrumentation
47
42
  if instruments is not None and not netra_custom_instruments and not netra_custom_block_instruments:
48
43
  netra_custom_block_instruments = set(CustomInstruments)
49
44
 
@@ -102,6 +97,10 @@ def init_instrumentations(
102
97
  if CustomInstruments.OPENAI in netra_custom_instruments:
103
98
  init_openai_instrumentation()
104
99
 
100
+ # Initialize Pydantic AI instrumentation.
101
+ if CustomInstruments.PYDANTIC_AI in netra_custom_instruments:
102
+ init_pydantic_ai_instrumentation()
103
+
105
104
  # Initialize aio_pika instrumentation.
106
105
  if CustomInstruments.AIO_PIKA in netra_custom_instruments:
107
106
  init_aio_pika_instrumentation()
@@ -1124,3 +1123,19 @@ def init_urllib3_instrumentation() -> bool:
1124
1123
  logging.error(f"Error initializing urllib3 instrumentor: {e}")
1125
1124
  Telemetry().log_exception(e)
1126
1125
  return False
1126
+
1127
+
1128
+ def init_pydantic_ai_instrumentation() -> bool:
1129
+ """Initialize pydantic-ai instrumentation."""
1130
+ try:
1131
+ if is_package_installed("pydantic-ai"):
1132
+ from netra.instrumentation.pydantic_ai import NetraPydanticAIInstrumentor
1133
+
1134
+ instrumentor = NetraPydanticAIInstrumentor()
1135
+ if not instrumentor.is_instrumented_by_opentelemetry:
1136
+ instrumentor.instrument()
1137
+ return True
1138
+ except Exception as e:
1139
+ logging.error(f"Error initializing pydantic-ai instrumentation: {e}")
1140
+ Telemetry().log_exception(e)
1141
+ return False
@@ -10,6 +10,7 @@ class CustomInstruments(Enum):
10
10
  HTTPX = "httpx"
11
11
  MISTRALAI = "mistral_ai"
12
12
  OPENAI = "openai"
13
+ PYDANTIC_AI = "pydantic_ai"
13
14
  QDRANTDB = "qdrant_db"
14
15
  WEAVIATEDB = "weaviate_db"
15
16
  GOOGLE_GENERATIVEAI = "google_genai"
@@ -0,0 +1,200 @@
1
+ import logging
2
+ from typing import Collection
3
+
4
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
+ from opentelemetry.instrumentation.utils import unwrap
6
+ from opentelemetry.trace import get_tracer
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from netra.instrumentation.pydantic_ai.version import __version__
10
+ from netra.instrumentation.pydantic_ai.wrappers import (
11
+ agent_iter_wrapper,
12
+ agent_run_stream_wrapper,
13
+ agent_run_sync_wrapper,
14
+ agent_run_wrapper,
15
+ tool_function_wrapper,
16
+ )
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ _instruments = ("pydantic-ai >= 0.0.1",)
21
+
22
+
23
+ class NetraPydanticAIInstrumentor(BaseInstrumentor): # type: ignore[misc]
24
+ """
25
+ Custom Pydantic AI instrumentor for Netra SDK with enhanced support for:
26
+ - Agent.run, Agent.run_sync, Agent.iter, Agent.run_stream methods
27
+ - Tool function execution tracing
28
+ - OpenTelemetry semantic conventions for Generative AI
29
+ - Integration with Netra tracing and monitoring
30
+ """
31
+
32
+ def instrumentation_dependencies(self) -> Collection[str]:
33
+ return _instruments
34
+
35
+ def _instrument(self, **kwargs): # type: ignore[no-untyped-def]
36
+ """Instrument Pydantic AI Agent methods and tool functions"""
37
+ tracer_provider = kwargs.get("tracer_provider")
38
+ tracer = get_tracer(__name__, __version__, tracer_provider)
39
+
40
+ # Instrument Agent.run method
41
+ try:
42
+ wrap_function_wrapper(
43
+ "pydantic_ai.agent",
44
+ "Agent.run",
45
+ agent_run_wrapper(tracer),
46
+ )
47
+ except (AttributeError, ModuleNotFoundError):
48
+ logger.debug("Agent.run method not available in this pydantic-ai version")
49
+
50
+ # Instrument Agent.run_sync method
51
+ try:
52
+ wrap_function_wrapper(
53
+ "pydantic_ai.agent",
54
+ "Agent.run_sync",
55
+ agent_run_sync_wrapper(tracer),
56
+ )
57
+ except (AttributeError, ModuleNotFoundError):
58
+ logger.debug("Agent.run_sync method not available in this pydantic-ai version")
59
+
60
+ # Instrument Agent.iter method
61
+ try:
62
+ wrap_function_wrapper(
63
+ "pydantic_ai.agent",
64
+ "Agent.iter",
65
+ agent_iter_wrapper(tracer),
66
+ )
67
+ except (AttributeError, ModuleNotFoundError):
68
+ logger.debug("Agent.iter method not available in this pydantic-ai version")
69
+
70
+ # Instrument Agent.run_stream method (if available)
71
+ try:
72
+ wrap_function_wrapper(
73
+ "pydantic_ai.agent",
74
+ "Agent.run_stream",
75
+ agent_run_stream_wrapper(tracer),
76
+ )
77
+ except (AttributeError, ModuleNotFoundError):
78
+ logger.debug("Agent.run_stream method not available in this pydantic-ai version")
79
+
80
+ # Instrument AgentRun methods
81
+ try:
82
+ wrap_function_wrapper(
83
+ "pydantic_ai.agent",
84
+ "AgentRun.run",
85
+ agent_run_wrapper(tracer),
86
+ )
87
+ except (AttributeError, ModuleNotFoundError):
88
+ logger.debug("AgentRun.run method not available in this pydantic-ai version")
89
+
90
+ try:
91
+ wrap_function_wrapper(
92
+ "pydantic_ai.agent",
93
+ "AgentRun.run_sync",
94
+ agent_run_sync_wrapper(tracer),
95
+ )
96
+ except (AttributeError, ModuleNotFoundError):
97
+ logger.debug("AgentRun.run_sync method not available in this pydantic-ai version")
98
+
99
+ # Instrument tool execution (if tools module exists)
100
+ try:
101
+ wrap_function_wrapper(
102
+ "pydantic_ai.tools",
103
+ "Tool.__call__",
104
+ tool_function_wrapper(tracer),
105
+ )
106
+ except (AttributeError, ModuleNotFoundError):
107
+ logger.debug("Tool.__call__ method not available in this pydantic-ai version")
108
+
109
+ # Instrument function tools (if function_tools module exists)
110
+ try:
111
+ wrap_function_wrapper(
112
+ "pydantic_ai.tools",
113
+ "FunctionTool.__call__",
114
+ tool_function_wrapper(tracer),
115
+ )
116
+ except (AttributeError, ModuleNotFoundError):
117
+ logger.debug("FunctionTool.__call__ method not available in this pydantic-ai version")
118
+
119
+ # Instrument model calls (if models module exists)
120
+ try:
121
+ wrap_function_wrapper(
122
+ "pydantic_ai.models.base",
123
+ "Model.request",
124
+ agent_run_wrapper(tracer),
125
+ )
126
+ except (AttributeError, ModuleNotFoundError):
127
+ logger.debug("Model.request method not available in this pydantic-ai version")
128
+
129
+ try:
130
+ wrap_function_wrapper(
131
+ "pydantic_ai.models.base",
132
+ "Model.request_stream",
133
+ agent_run_stream_wrapper(tracer),
134
+ )
135
+ except (AttributeError, ModuleNotFoundError):
136
+ logger.debug("Model.request_stream method not available in this pydantic-ai version")
137
+
138
+ def _uninstrument(self, **kwargs): # type: ignore[no-untyped-def]
139
+ """Uninstrument Pydantic AI Agent methods and tool functions"""
140
+ # Uninstrument Agent methods
141
+ try:
142
+ unwrap("pydantic_ai.agent", "Agent.run")
143
+ except (AttributeError, ModuleNotFoundError):
144
+ pass
145
+
146
+ try:
147
+ unwrap("pydantic_ai.agent", "Agent.run_sync")
148
+ except (AttributeError, ModuleNotFoundError):
149
+ pass
150
+
151
+ try:
152
+ unwrap("pydantic_ai.agent", "Agent.iter")
153
+ except (AttributeError, ModuleNotFoundError):
154
+ pass
155
+
156
+ try:
157
+ unwrap("pydantic_ai.agent", "Agent.run_stream")
158
+ except (AttributeError, ModuleNotFoundError):
159
+ pass
160
+
161
+ # Uninstrument AgentRun methods
162
+ try:
163
+ unwrap("pydantic_ai.agent", "AgentRun.run")
164
+ except (AttributeError, ModuleNotFoundError):
165
+ pass
166
+
167
+ try:
168
+ unwrap("pydantic_ai.agent", "AgentRun.run_sync")
169
+ except (AttributeError, ModuleNotFoundError):
170
+ pass
171
+
172
+ # Uninstrument tool methods
173
+ try:
174
+ unwrap("pydantic_ai.tools", "Tool.__call__")
175
+ except (AttributeError, ModuleNotFoundError):
176
+ pass
177
+
178
+ try:
179
+ unwrap("pydantic_ai.tools", "FunctionTool.__call__")
180
+ except (AttributeError, ModuleNotFoundError):
181
+ pass
182
+
183
+ # Uninstrument model methods
184
+ try:
185
+ unwrap("pydantic_ai.models.base", "Model.request")
186
+ except (AttributeError, ModuleNotFoundError):
187
+ pass
188
+
189
+ try:
190
+ unwrap("pydantic_ai.models.base", "Model.request_stream")
191
+ except (AttributeError, ModuleNotFoundError):
192
+ pass
193
+
194
+
195
+ def should_suppress_instrumentation() -> bool:
196
+ """Check if instrumentation should be suppressed"""
197
+ from opentelemetry import context as context_api
198
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
199
+
200
+ return context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) is True
@@ -0,0 +1,385 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from opentelemetry import context as context_api
4
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
5
+ from opentelemetry.semconv_ai import SpanAttributes
6
+ from opentelemetry.trace.status import Status, StatusCode
7
+
8
+ # Constants for consistent truncation and limits
9
+ MAX_CONTENT_LENGTH = 1000
10
+ MAX_ARGS_LENGTH = 500
11
+ MAX_ITEMS_TO_PROCESS = 5
12
+ MAX_FUNCTIONS_TO_PROCESS = 3
13
+
14
+
15
+ def _safe_set_attribute(span: Any, key: str, value: Any, max_length: Optional[int] = None) -> None:
16
+ """Safely set span attribute with optional truncation and null checks."""
17
+ if not span.is_recording() or value is None:
18
+ return
19
+
20
+ str_value = str(value)
21
+ if max_length and len(str_value) > max_length:
22
+ str_value = str_value[:max_length]
23
+
24
+ span.set_attribute(key, str_value)
25
+
26
+
27
+ def _safe_get_attribute(obj: Any, attr_name: str, default: Any = None) -> Any:
28
+ """Safely get attribute from object with default fallback."""
29
+ return getattr(obj, attr_name, default) if hasattr(obj, attr_name) else default
30
+
31
+
32
+ def _handle_span_error(span: Any, exception: Exception) -> None:
33
+ """Common error handling for spans."""
34
+ span.set_status(Status(StatusCode.ERROR, str(exception)))
35
+ _safe_set_attribute(span, "error.type", type(exception).__name__)
36
+ _safe_set_attribute(span, "error.message", str(exception))
37
+
38
+
39
+ def _set_timing_attributes(span: Any, start_time: float, end_time: float) -> None:
40
+ """Set timing attributes on span."""
41
+ duration_ms = (end_time - start_time) * 1000
42
+ _safe_set_attribute(span, "llm.response.duration", duration_ms)
43
+
44
+
45
+ def _set_assistant_response_content(span: Any, result: Any, finish_reason: str = "completed") -> None:
46
+ """Set assistant response content in OpenAI wrapper format."""
47
+ if not span.is_recording():
48
+ return
49
+
50
+ # Set the assistant response in the same format as OpenAI wrapper
51
+ index = 0 # Always use index 0 for pydantic_ai responses
52
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{index}.role", "assistant")
53
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{index}.finish_reason", finish_reason)
54
+
55
+ # Get the output content from the result
56
+ output = _safe_get_attribute(result, "output")
57
+ if output is not None:
58
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{index}.content", output, MAX_CONTENT_LENGTH)
59
+
60
+
61
+ def set_pydantic_request_attributes(
62
+ span: Any,
63
+ kwargs: Dict[str, Any],
64
+ operation_type: str,
65
+ model_name: Optional[str] = None,
66
+ include_model: bool = False,
67
+ ) -> None:
68
+ """Set request attributes on span for pydantic_ai."""
69
+ if not span.is_recording():
70
+ return
71
+
72
+ # Set operation type
73
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_TYPE}", operation_type)
74
+
75
+ # Set model only if explicitly requested (for CallToolsNode spans)
76
+ if include_model and model_name:
77
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_MODEL}", model_name)
78
+
79
+ # Set temperature and max_tokens if available
80
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_TEMPERATURE}", kwargs.get("temperature"))
81
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_MAX_TOKENS}", kwargs.get("max_tokens"))
82
+
83
+
84
+ def set_pydantic_response_attributes(span: Any, result: Any) -> None:
85
+ """Set response attributes on span for pydantic_ai."""
86
+ if not span.is_recording():
87
+ return
88
+
89
+ # Set response model if available
90
+ model_name = _safe_get_attribute(result, "model_name")
91
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_RESPONSE_MODEL}", model_name)
92
+
93
+ # Set usage information
94
+ if hasattr(result, "usage"):
95
+ usage = result.usage()
96
+ if usage:
97
+ _safe_set_attribute(
98
+ span, f"{SpanAttributes.LLM_USAGE_PROMPT_TOKENS}", _safe_get_attribute(usage, "request_tokens")
99
+ )
100
+ _safe_set_attribute(
101
+ span, f"{SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}", _safe_get_attribute(usage, "response_tokens")
102
+ )
103
+ _safe_set_attribute(
104
+ span, f"{SpanAttributes.LLM_USAGE_TOTAL_TOKENS}", _safe_get_attribute(usage, "total_tokens")
105
+ )
106
+
107
+ # Set any additional details from usage
108
+ details = _safe_get_attribute(usage, "details")
109
+ if details:
110
+ for key, value in details.items():
111
+ if value:
112
+ _safe_set_attribute(span, f"gen_ai.usage.details.{key}", value)
113
+
114
+ # Set output content if available
115
+ output = _safe_get_attribute(result, "output")
116
+ if output is not None:
117
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
118
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", output, MAX_CONTENT_LENGTH)
119
+
120
+
121
+ def should_suppress_instrumentation() -> bool:
122
+ """Check if instrumentation should be suppressed"""
123
+ return context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) is True
124
+
125
+
126
+ def get_node_span_name(node: Any) -> str:
127
+ """Get appropriate span name for a node type"""
128
+ node_type = type(node).__name__
129
+ if "UserPrompt" in node_type:
130
+ return "pydantic_ai.node.user_prompt"
131
+ elif "ModelRequest" in node_type:
132
+ return "pydantic_ai.node.model_request"
133
+ elif "CallTools" in node_type:
134
+ return "pydantic_ai.node.call_tools"
135
+ elif "End" in node_type:
136
+ return "pydantic_ai.node.end"
137
+ else:
138
+ return f"pydantic_ai.node.{node_type.lower()}"
139
+
140
+
141
+ def set_node_attributes(span: Any, node: Any) -> None:
142
+ """Set attributes on span based on node type and content"""
143
+ if not span.is_recording():
144
+ return
145
+
146
+ node_type = type(node).__name__
147
+ _safe_set_attribute(span, "pydantic_ai.node.type", node_type)
148
+
149
+ # UserPromptNode attributes
150
+ if "UserPrompt" in node_type:
151
+ _set_user_prompt_node_attributes(span, node)
152
+
153
+ # ModelRequestNode attributes
154
+ elif "ModelRequest" in node_type:
155
+ _set_model_request_node_attributes(span, node)
156
+
157
+ # CallToolsNode attributes
158
+ elif "CallTools" in node_type:
159
+ _set_call_tools_node_attributes(span, node)
160
+
161
+ # End node attributes
162
+ elif "End" in node_type:
163
+ _set_end_node_attributes(span, node)
164
+
165
+ # Generic node attributes for any other node types
166
+ else:
167
+ _set_generic_node_attributes(span, node)
168
+
169
+
170
+ def _set_user_prompt_node_attributes(span: Any, node: Any) -> None:
171
+ """Set attributes specific to UserPromptNode."""
172
+ # User prompt content
173
+ user_prompt = _safe_get_attribute(node, "user_prompt")
174
+ if user_prompt:
175
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.role", "user")
176
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.content", user_prompt, MAX_CONTENT_LENGTH)
177
+ _safe_set_attribute(span, "pydantic_ai.user_prompt", user_prompt, MAX_CONTENT_LENGTH)
178
+
179
+ # Instructions
180
+ instructions = _safe_get_attribute(node, "instructions")
181
+ _safe_set_attribute(span, "pydantic_ai.instructions", instructions, MAX_CONTENT_LENGTH)
182
+
183
+ # Instructions functions
184
+ instructions_functions = _safe_get_attribute(node, "instructions_functions")
185
+ if instructions_functions:
186
+ _safe_set_attribute(span, "pydantic_ai.instructions_functions_count", len(instructions_functions))
187
+ for i, func in enumerate(instructions_functions[:MAX_FUNCTIONS_TO_PROCESS]):
188
+ func_name = _safe_get_attribute(func, "__name__")
189
+ _safe_set_attribute(span, f"pydantic_ai.instructions_functions.{i}.name", func_name)
190
+
191
+ # System prompts
192
+ system_prompts = _safe_get_attribute(node, "system_prompts")
193
+ if system_prompts:
194
+ _safe_set_attribute(span, "pydantic_ai.system_prompts_count", len(system_prompts))
195
+ for i, prompt in enumerate(system_prompts[:MAX_FUNCTIONS_TO_PROCESS]):
196
+ _safe_set_attribute(span, f"pydantic_ai.system_prompts.{i}", prompt, MAX_ARGS_LENGTH)
197
+
198
+ # System prompt functions
199
+ system_prompt_functions = _safe_get_attribute(node, "system_prompt_functions")
200
+ if system_prompt_functions:
201
+ _safe_set_attribute(span, "pydantic_ai.system_prompt_functions_count", len(system_prompt_functions))
202
+
203
+ # System prompt dynamic functions
204
+ system_prompt_dynamic_functions = _safe_get_attribute(node, "system_prompt_dynamic_functions")
205
+ if system_prompt_dynamic_functions:
206
+ _safe_set_attribute(
207
+ span, "pydantic_ai.system_prompt_dynamic_functions_count", len(system_prompt_dynamic_functions)
208
+ )
209
+ for key in list(system_prompt_dynamic_functions.keys())[:MAX_FUNCTIONS_TO_PROCESS]:
210
+ func_type = type(system_prompt_dynamic_functions[key]).__name__
211
+ _safe_set_attribute(span, f"pydantic_ai.system_prompt_dynamic_functions.{key}", func_type)
212
+
213
+
214
+ def _set_model_request_node_attributes(span: Any, node: Any) -> None:
215
+ """Set attributes specific to ModelRequestNode."""
216
+ request = _safe_get_attribute(node, "request")
217
+ if not request:
218
+ return
219
+
220
+ # Request parts
221
+ parts = _safe_get_attribute(request, "parts")
222
+ if parts:
223
+ _safe_set_attribute(span, "pydantic_ai.request.parts_count", len(parts))
224
+
225
+ for i, part in enumerate(parts[:MAX_ITEMS_TO_PROCESS]):
226
+ part_type = type(part).__name__
227
+ _safe_set_attribute(span, f"pydantic_ai.request.parts.{i}.type", part_type)
228
+
229
+ # Content for text parts
230
+ content = _safe_get_attribute(part, "content")
231
+ if content:
232
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", content, MAX_CONTENT_LENGTH)
233
+ _safe_set_attribute(span, f"pydantic_ai.request.parts.{i}.content", content, MAX_CONTENT_LENGTH)
234
+
235
+ # Role for message parts
236
+ role = _safe_get_attribute(part, "role")
237
+ if role:
238
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", role)
239
+ _safe_set_attribute(span, f"pydantic_ai.request.parts.{i}.role", role)
240
+
241
+ # Timestamp, tool call information
242
+ _safe_set_attribute(
243
+ span, f"pydantic_ai.request.parts.{i}.timestamp", _safe_get_attribute(part, "timestamp")
244
+ )
245
+ _safe_set_attribute(
246
+ span, f"pydantic_ai.request.parts.{i}.tool_name", _safe_get_attribute(part, "tool_name")
247
+ )
248
+ _safe_set_attribute(
249
+ span, f"pydantic_ai.request.parts.{i}.tool_call_id", _safe_get_attribute(part, "tool_call_id")
250
+ )
251
+ _safe_set_attribute(
252
+ span, f"pydantic_ai.request.parts.{i}.args", _safe_get_attribute(part, "args"), MAX_ARGS_LENGTH
253
+ )
254
+
255
+ # Request metadata
256
+ _safe_set_attribute(span, "pydantic_ai.request.model_name", _safe_get_attribute(request, "model_name"))
257
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_TEMPERATURE}", _safe_get_attribute(request, "temperature"))
258
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_MAX_TOKENS}", _safe_get_attribute(request, "max_tokens"))
259
+
260
+
261
+ def _set_call_tools_node_attributes(span: Any, node: Any) -> None:
262
+ """Set attributes specific to CallToolsNode."""
263
+ response = _safe_get_attribute(node, "model_response")
264
+ if not response:
265
+ return
266
+
267
+ # Response parts
268
+ parts = _safe_get_attribute(response, "parts")
269
+ if parts:
270
+ _safe_set_attribute(span, "pydantic_ai.response.parts_count", len(parts))
271
+
272
+ for i, part in enumerate(parts[:MAX_ITEMS_TO_PROCESS]):
273
+ part_type = type(part).__name__
274
+ _safe_set_attribute(span, f"pydantic_ai.response.parts.{i}.type", part_type)
275
+
276
+ # Content for text parts
277
+ content = _safe_get_attribute(part, "content")
278
+ if content:
279
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{i}.content", content, MAX_CONTENT_LENGTH)
280
+ _safe_set_attribute(span, f"pydantic_ai.response.parts.{i}.content", content, MAX_CONTENT_LENGTH)
281
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{i}.role", "assistant")
282
+
283
+ # Tool call information
284
+ _safe_set_attribute(
285
+ span, f"pydantic_ai.response.parts.{i}.tool_name", _safe_get_attribute(part, "tool_name")
286
+ )
287
+ _safe_set_attribute(
288
+ span, f"pydantic_ai.response.parts.{i}.tool_call_id", _safe_get_attribute(part, "tool_call_id")
289
+ )
290
+ _safe_set_attribute(
291
+ span, f"pydantic_ai.response.parts.{i}.args", _safe_get_attribute(part, "args"), MAX_ARGS_LENGTH
292
+ )
293
+
294
+ # Usage information
295
+ usage = _safe_get_attribute(response, "usage")
296
+ if usage:
297
+ _safe_set_attribute(span, "pydantic_ai.usage.requests", _safe_get_attribute(usage, "requests"))
298
+
299
+ # Token usage with dual attributes for compatibility
300
+ request_tokens = _safe_get_attribute(usage, "request_tokens")
301
+ if request_tokens is not None:
302
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_USAGE_PROMPT_TOKENS}", request_tokens)
303
+ _safe_set_attribute(span, "pydantic_ai.usage.request_tokens", request_tokens)
304
+
305
+ response_tokens = _safe_get_attribute(usage, "response_tokens")
306
+ if response_tokens is not None:
307
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}", response_tokens)
308
+ _safe_set_attribute(span, "pydantic_ai.usage.response_tokens", response_tokens)
309
+
310
+ total_tokens = _safe_get_attribute(usage, "total_tokens")
311
+ if total_tokens is not None:
312
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_USAGE_TOTAL_TOKENS}", total_tokens)
313
+ _safe_set_attribute(span, "pydantic_ai.usage.total_tokens", total_tokens)
314
+
315
+ # Additional usage details
316
+ details = _safe_get_attribute(usage, "details")
317
+ if details:
318
+ for key, value in details.items():
319
+ if value is not None:
320
+ _safe_set_attribute(span, f"pydantic_ai.usage.details.{key}", value)
321
+
322
+ # Model information (only for CallToolsNode)
323
+ model_name = _safe_get_attribute(response, "model_name")
324
+ if model_name:
325
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_RESPONSE_MODEL}", model_name)
326
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_REQUEST_MODEL}", model_name)
327
+ _safe_set_attribute(span, "pydantic_ai.response.model_name", model_name)
328
+
329
+ # Timestamp
330
+ _safe_set_attribute(span, "pydantic_ai.response.timestamp", _safe_get_attribute(response, "timestamp"))
331
+
332
+ # Tool execution results (if available)
333
+ tool_results = _safe_get_attribute(node, "tool_results")
334
+ if tool_results:
335
+ _safe_set_attribute(span, "pydantic_ai.tool_results_count", len(tool_results))
336
+ for i, result in enumerate(tool_results[:MAX_FUNCTIONS_TO_PROCESS]):
337
+ _safe_set_attribute(span, f"pydantic_ai.tool_results.{i}", result, MAX_ARGS_LENGTH)
338
+
339
+
340
+ def _set_end_node_attributes(span: Any, node: Any) -> None:
341
+ """Set attributes specific to End node."""
342
+ data = _safe_get_attribute(node, "data")
343
+ if not data:
344
+ return
345
+
346
+ # Final output
347
+ output = _safe_get_attribute(data, "output")
348
+ if output is not None:
349
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
350
+ _safe_set_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", output, MAX_CONTENT_LENGTH)
351
+ _safe_set_attribute(span, "pydantic_ai.final_output", output, MAX_CONTENT_LENGTH)
352
+
353
+ # Cost information
354
+ cost = _safe_get_attribute(data, "cost")
355
+ if cost is not None:
356
+ _safe_set_attribute(span, "pydantic_ai.cost", float(cost))
357
+
358
+ # Usage summary
359
+ usage = _safe_get_attribute(data, "usage")
360
+ if usage:
361
+ _safe_set_attribute(span, "pydantic_ai.final_usage.total_tokens", _safe_get_attribute(usage, "total_tokens"))
362
+ _safe_set_attribute(
363
+ span, "pydantic_ai.final_usage.request_tokens", _safe_get_attribute(usage, "request_tokens")
364
+ )
365
+ _safe_set_attribute(
366
+ span, "pydantic_ai.final_usage.response_tokens", _safe_get_attribute(usage, "response_tokens")
367
+ )
368
+
369
+ # Messages history
370
+ messages = _safe_get_attribute(data, "messages")
371
+ if messages:
372
+ _safe_set_attribute(span, "pydantic_ai.messages_count", len(messages))
373
+
374
+ # New messages
375
+ new_messages = _safe_get_attribute(data, "new_messages")
376
+ if new_messages:
377
+ _safe_set_attribute(span, "pydantic_ai.new_messages_count", len(new_messages))
378
+
379
+
380
+ def _set_generic_node_attributes(span: Any, node: Any) -> None:
381
+ """Set attributes for any other node types."""
382
+ # Try to extract common attributes that might be available
383
+ for attr_name in ["content", "message", "value", "result", "error"]:
384
+ attr_value = _safe_get_attribute(node, attr_name)
385
+ _safe_set_attribute(span, f"pydantic_ai.{attr_name}", attr_value, MAX_CONTENT_LENGTH)
@@ -0,0 +1 @@
1
+ __version__ = "1.0.0"