sentry-sdk 2.42.1__py2.py3-none-any.whl → 2.43.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (36) hide show
  1. sentry_sdk/__init__.py +2 -0
  2. sentry_sdk/_metrics_batcher.py +1 -1
  3. sentry_sdk/consts.py +87 -2
  4. sentry_sdk/integrations/__init__.py +2 -0
  5. sentry_sdk/integrations/django/caching.py +16 -3
  6. sentry_sdk/integrations/google_genai/__init__.py +3 -0
  7. sentry_sdk/integrations/google_genai/utils.py +16 -6
  8. sentry_sdk/integrations/langchain.py +8 -2
  9. sentry_sdk/integrations/litellm.py +11 -4
  10. sentry_sdk/integrations/mcp.py +552 -0
  11. sentry_sdk/integrations/openai_agents/__init__.py +2 -0
  12. sentry_sdk/integrations/openai_agents/patches/__init__.py +1 -0
  13. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  14. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  15. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  16. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  17. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +217 -0
  18. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +105 -0
  19. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +35 -0
  20. sentry_sdk/integrations/pydantic_ai/patches/tools.py +75 -0
  21. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  22. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +253 -0
  23. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  24. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  25. sentry_sdk/integrations/pydantic_ai/utils.py +175 -0
  26. sentry_sdk/integrations/starlette.py +1 -1
  27. sentry_sdk/integrations/strawberry.py +10 -9
  28. sentry_sdk/logger.py +14 -2
  29. sentry_sdk/tracing_utils.py +1 -1
  30. {sentry_sdk-2.42.1.dist-info → sentry_sdk-2.43.0.dist-info}/METADATA +6 -1
  31. {sentry_sdk-2.42.1.dist-info → sentry_sdk-2.43.0.dist-info}/RECORD +36 -22
  32. /sentry_sdk/{_metrics.py → metrics.py} +0 -0
  33. {sentry_sdk-2.42.1.dist-info → sentry_sdk-2.43.0.dist-info}/WHEEL +0 -0
  34. {sentry_sdk-2.42.1.dist-info → sentry_sdk-2.43.0.dist-info}/entry_points.txt +0 -0
  35. {sentry_sdk-2.42.1.dist-info → sentry_sdk-2.43.0.dist-info}/licenses/LICENSE +0 -0
  36. {sentry_sdk-2.42.1.dist-info → sentry_sdk-2.43.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,4 @@
1
+ from .agent_run import _patch_agent_run # noqa: F401
2
+ from .graph_nodes import _patch_graph_nodes # noqa: F401
3
+ from .model_request import _patch_model_request # noqa: F401
4
+ from .tools import _patch_tool_execution # noqa: F401
@@ -0,0 +1,217 @@
1
+ from functools import wraps
2
+
3
+ import sentry_sdk
4
+ from sentry_sdk.tracing_utils import set_span_errored
5
+ from sentry_sdk.utils import event_from_exception
6
+
7
+ from ..spans import invoke_agent_span, update_invoke_agent_span
8
+
9
+ from typing import TYPE_CHECKING
10
+ from pydantic_ai.agent import Agent # type: ignore
11
+
12
+ if TYPE_CHECKING:
13
+ from typing import Any, Callable, Optional
14
+
15
+
16
+ def _capture_exception(exc):
17
+ # type: (Any) -> None
18
+ set_span_errored()
19
+
20
+ event, hint = event_from_exception(
21
+ exc,
22
+ client_options=sentry_sdk.get_client().options,
23
+ mechanism={"type": "pydantic_ai", "handled": False},
24
+ )
25
+ sentry_sdk.capture_event(event, hint=hint)
26
+
27
+
28
+ class _StreamingContextManagerWrapper:
29
+ """Wrapper for streaming methods that return async context managers."""
30
+
31
+ def __init__(
32
+ self,
33
+ agent,
34
+ original_ctx_manager,
35
+ user_prompt,
36
+ model,
37
+ model_settings,
38
+ is_streaming=True,
39
+ ):
40
+ # type: (Any, Any, Any, Any, Any, bool) -> None
41
+ self.agent = agent
42
+ self.original_ctx_manager = original_ctx_manager
43
+ self.user_prompt = user_prompt
44
+ self.model = model
45
+ self.model_settings = model_settings
46
+ self.is_streaming = is_streaming
47
+ self._isolation_scope = None # type: Any
48
+ self._span = None # type: Optional[sentry_sdk.tracing.Span]
49
+ self._result = None # type: Any
50
+
51
+ async def __aenter__(self):
52
+ # type: () -> Any
53
+ # Set up isolation scope and invoke_agent span
54
+ self._isolation_scope = sentry_sdk.isolation_scope()
55
+ self._isolation_scope.__enter__()
56
+
57
+ # Store agent reference and streaming flag
58
+ sentry_sdk.get_current_scope().set_context(
59
+ "pydantic_ai_agent", {"_agent": self.agent, "_streaming": self.is_streaming}
60
+ )
61
+
62
+ # Create invoke_agent span (will be closed in __aexit__)
63
+ self._span = invoke_agent_span(
64
+ self.user_prompt, self.agent, self.model, self.model_settings
65
+ )
66
+ self._span.__enter__()
67
+
68
+ # Enter the original context manager
69
+ result = await self.original_ctx_manager.__aenter__()
70
+ self._result = result
71
+ return result
72
+
73
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
74
+ # type: (Any, Any, Any) -> None
75
+ try:
76
+ # Exit the original context manager first
77
+ await self.original_ctx_manager.__aexit__(exc_type, exc_val, exc_tb)
78
+
79
+ # Update span with output if successful
80
+ if exc_type is None and self._result and hasattr(self._result, "output"):
81
+ output = (
82
+ self._result.output if hasattr(self._result, "output") else None
83
+ )
84
+ if self._span is not None:
85
+ update_invoke_agent_span(self._span, output)
86
+ finally:
87
+ sentry_sdk.get_current_scope().remove_context("pydantic_ai_agent")
88
+ # Clean up invoke span
89
+ if self._span:
90
+ self._span.__exit__(exc_type, exc_val, exc_tb)
91
+
92
+ # Clean up isolation scope
93
+ if self._isolation_scope:
94
+ self._isolation_scope.__exit__(exc_type, exc_val, exc_tb)
95
+
96
+
97
+ def _create_run_wrapper(original_func, is_streaming=False):
98
+ # type: (Callable[..., Any], bool) -> Callable[..., Any]
99
+ """
100
+ Wraps the Agent.run method to create an invoke_agent span.
101
+
102
+ Args:
103
+ original_func: The original run method
104
+ is_streaming: Whether this is a streaming method (for future use)
105
+ """
106
+
107
+ @wraps(original_func)
108
+ async def wrapper(self, *args, **kwargs):
109
+ # type: (Any, *Any, **Any) -> Any
110
+ # Isolate each workflow so that when agents are run in asyncio tasks they
111
+ # don't touch each other's scopes
112
+ with sentry_sdk.isolation_scope():
113
+ # Store agent reference and streaming flag in Sentry scope for access in nested spans
114
+ # We store the full agent to allow access to tools and system prompts
115
+ sentry_sdk.get_current_scope().set_context(
116
+ "pydantic_ai_agent", {"_agent": self, "_streaming": is_streaming}
117
+ )
118
+
119
+ # Extract parameters for the span
120
+ user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
121
+ model = kwargs.get("model")
122
+ model_settings = kwargs.get("model_settings")
123
+
124
+ # Create invoke_agent span
125
+ with invoke_agent_span(user_prompt, self, model, model_settings) as span:
126
+ try:
127
+ result = await original_func(self, *args, **kwargs)
128
+
129
+ # Update span with output
130
+ output = result.output if hasattr(result, "output") else None
131
+ update_invoke_agent_span(span, output)
132
+
133
+ return result
134
+ except Exception as exc:
135
+ _capture_exception(exc)
136
+ raise exc from None
137
+ finally:
138
+ sentry_sdk.get_current_scope().remove_context("pydantic_ai_agent")
139
+
140
+ return wrapper
141
+
142
+
143
+ def _create_streaming_wrapper(original_func):
144
+ # type: (Callable[..., Any]) -> Callable[..., Any]
145
+ """
146
+ Wraps run_stream method that returns an async context manager.
147
+ """
148
+
149
+ @wraps(original_func)
150
+ def wrapper(self, *args, **kwargs):
151
+ # type: (Any, *Any, **Any) -> Any
152
+ # Extract parameters for the span
153
+ user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
154
+ model = kwargs.get("model")
155
+ model_settings = kwargs.get("model_settings")
156
+
157
+ # Call original function to get the context manager
158
+ original_ctx_manager = original_func(self, *args, **kwargs)
159
+
160
+ # Wrap it with our instrumentation
161
+ return _StreamingContextManagerWrapper(
162
+ agent=self,
163
+ original_ctx_manager=original_ctx_manager,
164
+ user_prompt=user_prompt,
165
+ model=model,
166
+ model_settings=model_settings,
167
+ is_streaming=True,
168
+ )
169
+
170
+ return wrapper
171
+
172
+
173
+ def _create_streaming_events_wrapper(original_func):
174
+ # type: (Callable[..., Any]) -> Callable[..., Any]
175
+ """
176
+ Wraps run_stream_events method - no span needed as it delegates to run().
177
+
178
+ Note: run_stream_events internally calls self.run() with an event_stream_handler,
179
+ so the invoke_agent span will be created by the run() wrapper.
180
+ """
181
+
182
+ @wraps(original_func)
183
+ async def wrapper(self, *args, **kwargs):
184
+ # type: (Any, *Any, **Any) -> Any
185
+ # Just call the original generator - it will call run() which has the instrumentation
186
+ try:
187
+ async for event in original_func(self, *args, **kwargs):
188
+ yield event
189
+ except Exception as exc:
190
+ _capture_exception(exc)
191
+ raise exc from None
192
+
193
+ return wrapper
194
+
195
+
196
+ def _patch_agent_run():
197
+ # type: () -> None
198
+ """
199
+ Patches the Agent run methods to create spans for agent execution.
200
+
201
+ This patches both non-streaming (run, run_sync) and streaming
202
+ (run_stream, run_stream_events) methods.
203
+ """
204
+
205
+ # Store original methods
206
+ original_run = Agent.run
207
+ original_run_stream = Agent.run_stream
208
+ original_run_stream_events = Agent.run_stream_events
209
+
210
+ # Wrap and apply patches for non-streaming methods
211
+ Agent.run = _create_run_wrapper(original_run, is_streaming=False)
212
+
213
+ # Wrap and apply patches for streaming methods
214
+ Agent.run_stream = _create_streaming_wrapper(original_run_stream)
215
+ Agent.run_stream_events = _create_streaming_events_wrapper(
216
+ original_run_stream_events
217
+ )
@@ -0,0 +1,105 @@
1
+ from contextlib import asynccontextmanager
2
+ from functools import wraps
3
+
4
+ import sentry_sdk
5
+
6
+ from ..spans import (
7
+ ai_client_span,
8
+ update_ai_client_span,
9
+ )
10
+ from pydantic_ai._agent_graph import ModelRequestNode # type: ignore
11
+
12
+ from typing import TYPE_CHECKING
13
+
14
+ if TYPE_CHECKING:
15
+ from typing import Any, Callable
16
+
17
+
18
+ def _extract_span_data(node, ctx):
19
+ # type: (Any, Any) -> tuple[list[Any], Any, Any]
20
+ """Extract common data needed for creating chat spans.
21
+
22
+ Returns:
23
+ Tuple of (messages, model, model_settings)
24
+ """
25
+ # Extract model and settings from context
26
+ model = None
27
+ model_settings = None
28
+ if hasattr(ctx, "deps"):
29
+ model = getattr(ctx.deps, "model", None)
30
+ model_settings = getattr(ctx.deps, "model_settings", None)
31
+
32
+ # Build full message list: history + current request
33
+ messages = []
34
+ if hasattr(ctx, "state") and hasattr(ctx.state, "message_history"):
35
+ messages.extend(ctx.state.message_history)
36
+
37
+ current_request = getattr(node, "request", None)
38
+ if current_request:
39
+ messages.append(current_request)
40
+
41
+ return messages, model, model_settings
42
+
43
+
44
+ def _patch_graph_nodes():
45
+ # type: () -> None
46
+ """
47
+ Patches the graph node execution to create appropriate spans.
48
+
49
+ ModelRequestNode -> Creates ai_client span for model requests
50
+ CallToolsNode -> Handles tool calls (spans created in tool patching)
51
+ """
52
+
53
+ # Patch ModelRequestNode to create ai_client spans
54
+ original_model_request_run = ModelRequestNode.run
55
+
56
+ @wraps(original_model_request_run)
57
+ async def wrapped_model_request_run(self, ctx):
58
+ # type: (Any, Any) -> Any
59
+ messages, model, model_settings = _extract_span_data(self, ctx)
60
+
61
+ with ai_client_span(messages, None, model, model_settings) as span:
62
+ result = await original_model_request_run(self, ctx)
63
+
64
+ # Extract response from result if available
65
+ model_response = None
66
+ if hasattr(result, "model_response"):
67
+ model_response = result.model_response
68
+
69
+ update_ai_client_span(span, model_response)
70
+ return result
71
+
72
+ ModelRequestNode.run = wrapped_model_request_run
73
+
74
+ # Patch ModelRequestNode.stream for streaming requests
75
+ original_model_request_stream = ModelRequestNode.stream
76
+
77
+ def create_wrapped_stream(original_stream_method):
78
+ # type: (Callable[..., Any]) -> Callable[..., Any]
79
+ """Create a wrapper for ModelRequestNode.stream that creates chat spans."""
80
+
81
+ @asynccontextmanager
82
+ @wraps(original_stream_method)
83
+ async def wrapped_model_request_stream(self, ctx):
84
+ # type: (Any, Any) -> Any
85
+ messages, model, model_settings = _extract_span_data(self, ctx)
86
+
87
+ # Create chat span for streaming request
88
+ with ai_client_span(messages, None, model, model_settings) as span:
89
+ # Call the original stream method
90
+ async with original_stream_method(self, ctx) as stream:
91
+ yield stream
92
+
93
+ # After streaming completes, update span with response data
94
+ # The ModelRequestNode stores the final response in _result
95
+ model_response = None
96
+ if hasattr(self, "_result") and self._result is not None:
97
+ # _result is a NextNode containing the model_response
98
+ if hasattr(self._result, "model_response"):
99
+ model_response = self._result.model_response
100
+
101
+ update_ai_client_span(span, model_response)
102
+
103
+ return wrapped_model_request_stream
104
+
105
+ ModelRequestNode.stream = create_wrapped_stream(original_model_request_stream)
@@ -0,0 +1,35 @@
1
+ from functools import wraps
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pydantic_ai import models # type: ignore
5
+
6
+ from ..spans import ai_client_span, update_ai_client_span
7
+
8
+
9
+ if TYPE_CHECKING:
10
+ from typing import Any
11
+
12
+
13
+ def _patch_model_request():
14
+ # type: () -> None
15
+ """
16
+ Patches model request execution to create AI client spans.
17
+
18
+ In pydantic-ai, model requests are handled through the Model interface.
19
+ We need to patch the request method on models to create spans.
20
+ """
21
+
22
+ # Patch the base Model class's request method
23
+ if hasattr(models, "Model"):
24
+ original_request = models.Model.request
25
+
26
+ @wraps(original_request)
27
+ async def wrapped_request(self, messages, *args, **kwargs):
28
+ # type: (Any, Any, *Any, **Any) -> Any
29
+ # Pass all messages (full conversation history)
30
+ with ai_client_span(messages, None, self, None) as span:
31
+ result = await original_request(self, messages, *args, **kwargs)
32
+ update_ai_client_span(span, result)
33
+ return result
34
+
35
+ models.Model.request = wrapped_request
@@ -0,0 +1,75 @@
1
+ from functools import wraps
2
+
3
+ from pydantic_ai._tool_manager import ToolManager # type: ignore
4
+
5
+ import sentry_sdk
6
+
7
+ from ..spans import execute_tool_span, update_execute_tool_span
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ if TYPE_CHECKING:
12
+ from typing import Any
13
+
14
+ try:
15
+ from pydantic_ai.mcp import MCPServer # type: ignore
16
+
17
+ HAS_MCP = True
18
+ except ImportError:
19
+ HAS_MCP = False
20
+
21
+
22
+ def _patch_tool_execution():
23
+ # type: () -> None
24
+ """
25
+ Patch ToolManager._call_tool to create execute_tool spans.
26
+
27
+ This is the single point where ALL tool calls flow through in pydantic_ai,
28
+ regardless of toolset type (function, MCP, combined, wrapper, etc.).
29
+
30
+ By patching here, we avoid:
31
+ - Patching multiple toolset classes
32
+ - Dealing with signature mismatches from instrumented MCP servers
33
+ - Complex nested toolset handling
34
+ """
35
+
36
+ original_call_tool = ToolManager._call_tool
37
+
38
+ @wraps(original_call_tool)
39
+ async def wrapped_call_tool(self, call, allow_partial, wrap_validation_errors):
40
+ # type: (Any, Any, bool, bool) -> Any
41
+
42
+ # Extract tool info before calling original
43
+ name = call.tool_name
44
+ tool = self.tools.get(name) if self.tools else None
45
+
46
+ # Determine tool type by checking tool.toolset
47
+ tool_type = "function" # default
48
+ if tool and HAS_MCP and isinstance(tool.toolset, MCPServer):
49
+ tool_type = "mcp"
50
+
51
+ # Get agent from Sentry scope
52
+ current_span = sentry_sdk.get_current_span()
53
+ if current_span and tool:
54
+ agent_data = (
55
+ sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
56
+ )
57
+ agent = agent_data.get("_agent")
58
+
59
+ # Get args for span (before validation)
60
+ # call.args can be a string (JSON) or dict
61
+ args_dict = call.args if isinstance(call.args, dict) else {}
62
+
63
+ with execute_tool_span(name, args_dict, agent, tool_type=tool_type) as span:
64
+ result = await original_call_tool(
65
+ self, call, allow_partial, wrap_validation_errors
66
+ )
67
+ update_execute_tool_span(span, result)
68
+ return result
69
+
70
+ # No span context - just call original
71
+ return await original_call_tool(
72
+ self, call, allow_partial, wrap_validation_errors
73
+ )
74
+
75
+ ToolManager._call_tool = wrapped_call_tool
@@ -0,0 +1,3 @@
1
+ from .ai_client import ai_client_span, update_ai_client_span # noqa: F401
2
+ from .execute_tool import execute_tool_span, update_execute_tool_span # noqa: F401
3
+ from .invoke_agent import invoke_agent_span, update_invoke_agent_span # noqa: F401
@@ -0,0 +1,253 @@
1
+ import sentry_sdk
2
+ from sentry_sdk.ai.utils import set_data_normalized
3
+ from sentry_sdk.consts import OP, SPANDATA
4
+ from sentry_sdk.utils import safe_serialize
5
+
6
+ from ..consts import SPAN_ORIGIN
7
+ from ..utils import (
8
+ _set_agent_data,
9
+ _set_available_tools,
10
+ _set_model_data,
11
+ _should_send_prompts,
12
+ _get_model_name,
13
+ )
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ if TYPE_CHECKING:
18
+ from typing import Any, List, Dict
19
+ from pydantic_ai.usage import RequestUsage # type: ignore
20
+
21
+ try:
22
+ from pydantic_ai.messages import ( # type: ignore
23
+ BaseToolCallPart,
24
+ BaseToolReturnPart,
25
+ SystemPromptPart,
26
+ UserPromptPart,
27
+ TextPart,
28
+ ThinkingPart,
29
+ )
30
+ except ImportError:
31
+ # Fallback if these classes are not available
32
+ BaseToolCallPart = None
33
+ BaseToolReturnPart = None
34
+ SystemPromptPart = None
35
+ UserPromptPart = None
36
+ TextPart = None
37
+ ThinkingPart = None
38
+
39
+
40
+ def _set_usage_data(span, usage):
41
+ # type: (sentry_sdk.tracing.Span, RequestUsage) -> None
42
+ """Set token usage data on a span."""
43
+ if usage is None:
44
+ return
45
+
46
+ if hasattr(usage, "input_tokens") and usage.input_tokens is not None:
47
+ span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
48
+
49
+ if hasattr(usage, "output_tokens") and usage.output_tokens is not None:
50
+ span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
51
+
52
+ if hasattr(usage, "total_tokens") and usage.total_tokens is not None:
53
+ span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens)
54
+
55
+
56
+ def _set_input_messages(span, messages):
57
+ # type: (sentry_sdk.tracing.Span, Any) -> None
58
+ """Set input messages data on a span."""
59
+ if not _should_send_prompts():
60
+ return
61
+
62
+ if not messages:
63
+ return
64
+
65
+ try:
66
+ formatted_messages = []
67
+ system_prompt = None
68
+
69
+ # Extract system prompt from any ModelRequest with instructions
70
+ for msg in messages:
71
+ if hasattr(msg, "instructions") and msg.instructions:
72
+ system_prompt = msg.instructions
73
+ break
74
+
75
+ # Add system prompt as first message if present
76
+ if system_prompt:
77
+ formatted_messages.append(
78
+ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}
79
+ )
80
+
81
+ for msg in messages:
82
+ if hasattr(msg, "parts"):
83
+ for part in msg.parts:
84
+ role = "user"
85
+ # Use isinstance checks with proper base classes
86
+ if SystemPromptPart and isinstance(part, SystemPromptPart):
87
+ role = "system"
88
+ elif (
89
+ (TextPart and isinstance(part, TextPart))
90
+ or (ThinkingPart and isinstance(part, ThinkingPart))
91
+ or (BaseToolCallPart and isinstance(part, BaseToolCallPart))
92
+ ):
93
+ role = "assistant"
94
+ elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
95
+ role = "tool"
96
+
97
+ content = [] # type: List[Dict[str, Any] | str]
98
+ tool_calls = None
99
+ tool_call_id = None
100
+
101
+ # Handle ToolCallPart (assistant requesting tool use)
102
+ if BaseToolCallPart and isinstance(part, BaseToolCallPart):
103
+ tool_call_data = {}
104
+ if hasattr(part, "tool_name"):
105
+ tool_call_data["name"] = part.tool_name
106
+ if hasattr(part, "args"):
107
+ tool_call_data["arguments"] = safe_serialize(part.args)
108
+ if tool_call_data:
109
+ tool_calls = [tool_call_data]
110
+ # Handle ToolReturnPart (tool result)
111
+ elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
112
+ if hasattr(part, "tool_name"):
113
+ tool_call_id = part.tool_name
114
+ if hasattr(part, "content"):
115
+ content.append({"type": "text", "text": str(part.content)})
116
+ # Handle regular content
117
+ elif hasattr(part, "content"):
118
+ if isinstance(part.content, str):
119
+ content.append({"type": "text", "text": part.content})
120
+ elif isinstance(part.content, list):
121
+ for item in part.content:
122
+ if isinstance(item, str):
123
+ content.append({"type": "text", "text": item})
124
+ else:
125
+ content.append(safe_serialize(item))
126
+ else:
127
+ content.append({"type": "text", "text": str(part.content)})
128
+
129
+ # Add message if we have content or tool calls
130
+ if content or tool_calls:
131
+ message = {"role": role} # type: Dict[str, Any]
132
+ if content:
133
+ message["content"] = content
134
+ if tool_calls:
135
+ message["tool_calls"] = tool_calls
136
+ if tool_call_id:
137
+ message["tool_call_id"] = tool_call_id
138
+ formatted_messages.append(message)
139
+
140
+ if formatted_messages:
141
+ set_data_normalized(
142
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, formatted_messages, unpack=False
143
+ )
144
+ except Exception:
145
+ # If we fail to format messages, just skip it
146
+ pass
147
+
148
+
149
+ def _set_output_data(span, response):
150
+ # type: (sentry_sdk.tracing.Span, Any) -> None
151
+ """Set output data on a span."""
152
+ if not _should_send_prompts():
153
+ return
154
+
155
+ if not response:
156
+ return
157
+
158
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_name)
159
+ try:
160
+ # Extract text from ModelResponse
161
+ if hasattr(response, "parts"):
162
+ texts = []
163
+ tool_calls = []
164
+
165
+ for part in response.parts:
166
+ if TextPart and isinstance(part, TextPart) and hasattr(part, "content"):
167
+ texts.append(part.content)
168
+ elif BaseToolCallPart and isinstance(part, BaseToolCallPart):
169
+ tool_call_data = {
170
+ "type": "function",
171
+ }
172
+ if hasattr(part, "tool_name"):
173
+ tool_call_data["name"] = part.tool_name
174
+ if hasattr(part, "args"):
175
+ tool_call_data["arguments"] = safe_serialize(part.args)
176
+ tool_calls.append(tool_call_data)
177
+
178
+ if texts:
179
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, texts)
180
+
181
+ if tool_calls:
182
+ span.set_data(
183
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls)
184
+ )
185
+
186
+ except Exception:
187
+ # If we fail to format output, just skip it
188
+ pass
189
+
190
+
191
+ def ai_client_span(messages, agent, model, model_settings):
192
+ # type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span
193
+ """Create a span for an AI client call (model request).
194
+
195
+ Args:
196
+ messages: Full conversation history (list of messages)
197
+ agent: Agent object
198
+ model: Model object
199
+ model_settings: Model settings
200
+ """
201
+ # Determine model name for span name
202
+ model_obj = model
203
+ if agent and hasattr(agent, "model"):
204
+ model_obj = agent.model
205
+
206
+ model_name = _get_model_name(model_obj) or "unknown"
207
+
208
+ span = sentry_sdk.start_span(
209
+ op=OP.GEN_AI_CHAT,
210
+ name=f"chat {model_name}",
211
+ origin=SPAN_ORIGIN,
212
+ )
213
+
214
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
215
+
216
+ _set_agent_data(span, agent)
217
+ _set_model_data(span, model, model_settings)
218
+
219
+ # Set streaming flag
220
+ agent_data = sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
221
+ is_streaming = agent_data.get("_streaming", False)
222
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, is_streaming)
223
+
224
+ # Add available tools if agent is available
225
+ agent_obj = agent
226
+ if not agent_obj:
227
+ # Try to get from Sentry scope
228
+ agent_data = (
229
+ sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
230
+ )
231
+ agent_obj = agent_data.get("_agent")
232
+
233
+ _set_available_tools(span, agent_obj)
234
+
235
+ # Set input messages (full conversation history)
236
+ if messages:
237
+ _set_input_messages(span, messages)
238
+
239
+ return span
240
+
241
+
242
+ def update_ai_client_span(span, model_response):
243
+ # type: (sentry_sdk.tracing.Span, Any) -> None
244
+ """Update the AI client span with response data."""
245
+ if not span:
246
+ return
247
+
248
+ # Set usage data if available
249
+ if model_response and hasattr(model_response, "usage"):
250
+ _set_usage_data(span, model_response.usage)
251
+
252
+ # Set output data
253
+ _set_output_data(span, model_response)