sentry-sdk 2.42.0__py2.py3-none-any.whl → 2.43.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/__init__.py +2 -0
- sentry_sdk/_metrics_batcher.py +1 -1
- sentry_sdk/ai/utils.py +49 -2
- sentry_sdk/client.py +18 -1
- sentry_sdk/consts.py +87 -2
- sentry_sdk/integrations/__init__.py +2 -0
- sentry_sdk/integrations/anthropic.py +8 -5
- sentry_sdk/integrations/aws_lambda.py +2 -0
- sentry_sdk/integrations/django/caching.py +16 -3
- sentry_sdk/integrations/gcp.py +6 -1
- sentry_sdk/integrations/google_genai/__init__.py +3 -0
- sentry_sdk/integrations/google_genai/utils.py +16 -6
- sentry_sdk/integrations/langchain.py +49 -23
- sentry_sdk/integrations/langgraph.py +25 -11
- sentry_sdk/integrations/litellm.py +17 -6
- sentry_sdk/integrations/mcp.py +552 -0
- sentry_sdk/integrations/openai.py +33 -9
- sentry_sdk/integrations/openai_agents/__init__.py +2 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
- sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
- sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
- sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
- sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +217 -0
- sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +105 -0
- sentry_sdk/integrations/pydantic_ai/patches/model_request.py +35 -0
- sentry_sdk/integrations/pydantic_ai/patches/tools.py +75 -0
- sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
- sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +253 -0
- sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
- sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
- sentry_sdk/integrations/pydantic_ai/utils.py +175 -0
- sentry_sdk/integrations/redis/utils.py +4 -4
- sentry_sdk/integrations/starlette.py +1 -1
- sentry_sdk/integrations/strawberry.py +10 -9
- sentry_sdk/logger.py +14 -2
- sentry_sdk/scope.py +13 -6
- sentry_sdk/tracing_utils.py +1 -1
- sentry_sdk/utils.py +34 -2
- {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/METADATA +6 -1
- {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/RECORD +46 -32
- /sentry_sdk/{_metrics.py → metrics.py} +0 -0
- {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/WHEEL +0 -0
- {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
import sentry_sdk
|
|
2
|
+
from sentry_sdk.ai.utils import set_data_normalized
|
|
3
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
4
|
+
from sentry_sdk.utils import safe_serialize
|
|
5
|
+
|
|
6
|
+
from ..consts import SPAN_ORIGIN
|
|
7
|
+
from ..utils import (
|
|
8
|
+
_set_agent_data,
|
|
9
|
+
_set_available_tools,
|
|
10
|
+
_set_model_data,
|
|
11
|
+
_should_send_prompts,
|
|
12
|
+
_get_model_name,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from typing import TYPE_CHECKING
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from typing import Any, List, Dict
|
|
19
|
+
from pydantic_ai.usage import RequestUsage # type: ignore
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from pydantic_ai.messages import ( # type: ignore
|
|
23
|
+
BaseToolCallPart,
|
|
24
|
+
BaseToolReturnPart,
|
|
25
|
+
SystemPromptPart,
|
|
26
|
+
UserPromptPart,
|
|
27
|
+
TextPart,
|
|
28
|
+
ThinkingPart,
|
|
29
|
+
)
|
|
30
|
+
except ImportError:
|
|
31
|
+
# Fallback if these classes are not available
|
|
32
|
+
BaseToolCallPart = None
|
|
33
|
+
BaseToolReturnPart = None
|
|
34
|
+
SystemPromptPart = None
|
|
35
|
+
UserPromptPart = None
|
|
36
|
+
TextPart = None
|
|
37
|
+
ThinkingPart = None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _set_usage_data(span, usage):
|
|
41
|
+
# type: (sentry_sdk.tracing.Span, RequestUsage) -> None
|
|
42
|
+
"""Set token usage data on a span."""
|
|
43
|
+
if usage is None:
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
if hasattr(usage, "input_tokens") and usage.input_tokens is not None:
|
|
47
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
|
|
48
|
+
|
|
49
|
+
if hasattr(usage, "output_tokens") and usage.output_tokens is not None:
|
|
50
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
|
|
51
|
+
|
|
52
|
+
if hasattr(usage, "total_tokens") and usage.total_tokens is not None:
|
|
53
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _set_input_messages(span, messages):
|
|
57
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
58
|
+
"""Set input messages data on a span."""
|
|
59
|
+
if not _should_send_prompts():
|
|
60
|
+
return
|
|
61
|
+
|
|
62
|
+
if not messages:
|
|
63
|
+
return
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
formatted_messages = []
|
|
67
|
+
system_prompt = None
|
|
68
|
+
|
|
69
|
+
# Extract system prompt from any ModelRequest with instructions
|
|
70
|
+
for msg in messages:
|
|
71
|
+
if hasattr(msg, "instructions") and msg.instructions:
|
|
72
|
+
system_prompt = msg.instructions
|
|
73
|
+
break
|
|
74
|
+
|
|
75
|
+
# Add system prompt as first message if present
|
|
76
|
+
if system_prompt:
|
|
77
|
+
formatted_messages.append(
|
|
78
|
+
{"role": "system", "content": [{"type": "text", "text": system_prompt}]}
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
for msg in messages:
|
|
82
|
+
if hasattr(msg, "parts"):
|
|
83
|
+
for part in msg.parts:
|
|
84
|
+
role = "user"
|
|
85
|
+
# Use isinstance checks with proper base classes
|
|
86
|
+
if SystemPromptPart and isinstance(part, SystemPromptPart):
|
|
87
|
+
role = "system"
|
|
88
|
+
elif (
|
|
89
|
+
(TextPart and isinstance(part, TextPart))
|
|
90
|
+
or (ThinkingPart and isinstance(part, ThinkingPart))
|
|
91
|
+
or (BaseToolCallPart and isinstance(part, BaseToolCallPart))
|
|
92
|
+
):
|
|
93
|
+
role = "assistant"
|
|
94
|
+
elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
|
|
95
|
+
role = "tool"
|
|
96
|
+
|
|
97
|
+
content = [] # type: List[Dict[str, Any] | str]
|
|
98
|
+
tool_calls = None
|
|
99
|
+
tool_call_id = None
|
|
100
|
+
|
|
101
|
+
# Handle ToolCallPart (assistant requesting tool use)
|
|
102
|
+
if BaseToolCallPart and isinstance(part, BaseToolCallPart):
|
|
103
|
+
tool_call_data = {}
|
|
104
|
+
if hasattr(part, "tool_name"):
|
|
105
|
+
tool_call_data["name"] = part.tool_name
|
|
106
|
+
if hasattr(part, "args"):
|
|
107
|
+
tool_call_data["arguments"] = safe_serialize(part.args)
|
|
108
|
+
if tool_call_data:
|
|
109
|
+
tool_calls = [tool_call_data]
|
|
110
|
+
# Handle ToolReturnPart (tool result)
|
|
111
|
+
elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
|
|
112
|
+
if hasattr(part, "tool_name"):
|
|
113
|
+
tool_call_id = part.tool_name
|
|
114
|
+
if hasattr(part, "content"):
|
|
115
|
+
content.append({"type": "text", "text": str(part.content)})
|
|
116
|
+
# Handle regular content
|
|
117
|
+
elif hasattr(part, "content"):
|
|
118
|
+
if isinstance(part.content, str):
|
|
119
|
+
content.append({"type": "text", "text": part.content})
|
|
120
|
+
elif isinstance(part.content, list):
|
|
121
|
+
for item in part.content:
|
|
122
|
+
if isinstance(item, str):
|
|
123
|
+
content.append({"type": "text", "text": item})
|
|
124
|
+
else:
|
|
125
|
+
content.append(safe_serialize(item))
|
|
126
|
+
else:
|
|
127
|
+
content.append({"type": "text", "text": str(part.content)})
|
|
128
|
+
|
|
129
|
+
# Add message if we have content or tool calls
|
|
130
|
+
if content or tool_calls:
|
|
131
|
+
message = {"role": role} # type: Dict[str, Any]
|
|
132
|
+
if content:
|
|
133
|
+
message["content"] = content
|
|
134
|
+
if tool_calls:
|
|
135
|
+
message["tool_calls"] = tool_calls
|
|
136
|
+
if tool_call_id:
|
|
137
|
+
message["tool_call_id"] = tool_call_id
|
|
138
|
+
formatted_messages.append(message)
|
|
139
|
+
|
|
140
|
+
if formatted_messages:
|
|
141
|
+
set_data_normalized(
|
|
142
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, formatted_messages, unpack=False
|
|
143
|
+
)
|
|
144
|
+
except Exception:
|
|
145
|
+
# If we fail to format messages, just skip it
|
|
146
|
+
pass
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _set_output_data(span, response):
|
|
150
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
151
|
+
"""Set output data on a span."""
|
|
152
|
+
if not _should_send_prompts():
|
|
153
|
+
return
|
|
154
|
+
|
|
155
|
+
if not response:
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_name)
|
|
159
|
+
try:
|
|
160
|
+
# Extract text from ModelResponse
|
|
161
|
+
if hasattr(response, "parts"):
|
|
162
|
+
texts = []
|
|
163
|
+
tool_calls = []
|
|
164
|
+
|
|
165
|
+
for part in response.parts:
|
|
166
|
+
if TextPart and isinstance(part, TextPart) and hasattr(part, "content"):
|
|
167
|
+
texts.append(part.content)
|
|
168
|
+
elif BaseToolCallPart and isinstance(part, BaseToolCallPart):
|
|
169
|
+
tool_call_data = {
|
|
170
|
+
"type": "function",
|
|
171
|
+
}
|
|
172
|
+
if hasattr(part, "tool_name"):
|
|
173
|
+
tool_call_data["name"] = part.tool_name
|
|
174
|
+
if hasattr(part, "args"):
|
|
175
|
+
tool_call_data["arguments"] = safe_serialize(part.args)
|
|
176
|
+
tool_calls.append(tool_call_data)
|
|
177
|
+
|
|
178
|
+
if texts:
|
|
179
|
+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, texts)
|
|
180
|
+
|
|
181
|
+
if tool_calls:
|
|
182
|
+
span.set_data(
|
|
183
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls)
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
except Exception:
|
|
187
|
+
# If we fail to format output, just skip it
|
|
188
|
+
pass
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def ai_client_span(messages, agent, model, model_settings):
|
|
192
|
+
# type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span
|
|
193
|
+
"""Create a span for an AI client call (model request).
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
messages: Full conversation history (list of messages)
|
|
197
|
+
agent: Agent object
|
|
198
|
+
model: Model object
|
|
199
|
+
model_settings: Model settings
|
|
200
|
+
"""
|
|
201
|
+
# Determine model name for span name
|
|
202
|
+
model_obj = model
|
|
203
|
+
if agent and hasattr(agent, "model"):
|
|
204
|
+
model_obj = agent.model
|
|
205
|
+
|
|
206
|
+
model_name = _get_model_name(model_obj) or "unknown"
|
|
207
|
+
|
|
208
|
+
span = sentry_sdk.start_span(
|
|
209
|
+
op=OP.GEN_AI_CHAT,
|
|
210
|
+
name=f"chat {model_name}",
|
|
211
|
+
origin=SPAN_ORIGIN,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
|
|
215
|
+
|
|
216
|
+
_set_agent_data(span, agent)
|
|
217
|
+
_set_model_data(span, model, model_settings)
|
|
218
|
+
|
|
219
|
+
# Set streaming flag
|
|
220
|
+
agent_data = sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
|
|
221
|
+
is_streaming = agent_data.get("_streaming", False)
|
|
222
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, is_streaming)
|
|
223
|
+
|
|
224
|
+
# Add available tools if agent is available
|
|
225
|
+
agent_obj = agent
|
|
226
|
+
if not agent_obj:
|
|
227
|
+
# Try to get from Sentry scope
|
|
228
|
+
agent_data = (
|
|
229
|
+
sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
|
|
230
|
+
)
|
|
231
|
+
agent_obj = agent_data.get("_agent")
|
|
232
|
+
|
|
233
|
+
_set_available_tools(span, agent_obj)
|
|
234
|
+
|
|
235
|
+
# Set input messages (full conversation history)
|
|
236
|
+
if messages:
|
|
237
|
+
_set_input_messages(span, messages)
|
|
238
|
+
|
|
239
|
+
return span
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def update_ai_client_span(span, model_response):
|
|
243
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
244
|
+
"""Update the AI client span with response data."""
|
|
245
|
+
if not span:
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
# Set usage data if available
|
|
249
|
+
if model_response and hasattr(model_response, "usage"):
|
|
250
|
+
_set_usage_data(span, model_response.usage)
|
|
251
|
+
|
|
252
|
+
# Set output data
|
|
253
|
+
_set_output_data(span, model_response)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import sentry_sdk
|
|
2
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
3
|
+
from sentry_sdk.utils import safe_serialize
|
|
4
|
+
|
|
5
|
+
from ..consts import SPAN_ORIGIN
|
|
6
|
+
from ..utils import _set_agent_data, _should_send_prompts
|
|
7
|
+
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def execute_tool_span(tool_name, tool_args, agent, tool_type="function"):
|
|
15
|
+
# type: (str, Any, Any, str) -> sentry_sdk.tracing.Span
|
|
16
|
+
"""Create a span for tool execution.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
tool_name: The name of the tool being executed
|
|
20
|
+
tool_args: The arguments passed to the tool
|
|
21
|
+
agent: The agent executing the tool
|
|
22
|
+
tool_type: The type of tool ("function" for regular tools, "mcp" for MCP services)
|
|
23
|
+
"""
|
|
24
|
+
span = sentry_sdk.start_span(
|
|
25
|
+
op=OP.GEN_AI_EXECUTE_TOOL,
|
|
26
|
+
name=f"execute_tool {tool_name}",
|
|
27
|
+
origin=SPAN_ORIGIN,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool")
|
|
31
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, tool_type)
|
|
32
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
|
|
33
|
+
|
|
34
|
+
_set_agent_data(span, agent)
|
|
35
|
+
|
|
36
|
+
if _should_send_prompts() and tool_args is not None:
|
|
37
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_args))
|
|
38
|
+
|
|
39
|
+
return span
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def update_execute_tool_span(span, result):
|
|
43
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
44
|
+
"""Update the execute tool span with the result."""
|
|
45
|
+
if not span:
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
if _should_send_prompts() and result is not None:
|
|
49
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result))
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import sentry_sdk
|
|
2
|
+
from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
|
|
3
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
4
|
+
|
|
5
|
+
from ..consts import SPAN_ORIGIN
|
|
6
|
+
from ..utils import (
|
|
7
|
+
_set_agent_data,
|
|
8
|
+
_set_available_tools,
|
|
9
|
+
_set_model_data,
|
|
10
|
+
_should_send_prompts,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from typing import TYPE_CHECKING
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def invoke_agent_span(user_prompt, agent, model, model_settings):
|
|
20
|
+
# type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span
|
|
21
|
+
"""Create a span for invoking the agent."""
|
|
22
|
+
# Determine agent name for span
|
|
23
|
+
name = "agent"
|
|
24
|
+
if agent and getattr(agent, "name", None):
|
|
25
|
+
name = agent.name
|
|
26
|
+
|
|
27
|
+
span = get_start_span_function()(
|
|
28
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
29
|
+
name=f"invoke_agent {name}",
|
|
30
|
+
origin=SPAN_ORIGIN,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
34
|
+
|
|
35
|
+
_set_agent_data(span, agent)
|
|
36
|
+
_set_model_data(span, model, model_settings)
|
|
37
|
+
_set_available_tools(span, agent)
|
|
38
|
+
|
|
39
|
+
# Add user prompt and system prompts if available and prompts are enabled
|
|
40
|
+
if _should_send_prompts():
|
|
41
|
+
messages = []
|
|
42
|
+
|
|
43
|
+
# Add system prompts (both instructions and system_prompt)
|
|
44
|
+
system_texts = []
|
|
45
|
+
|
|
46
|
+
if agent:
|
|
47
|
+
# Check for system_prompt
|
|
48
|
+
system_prompts = getattr(agent, "_system_prompts", None) or []
|
|
49
|
+
for prompt in system_prompts:
|
|
50
|
+
if isinstance(prompt, str):
|
|
51
|
+
system_texts.append(prompt)
|
|
52
|
+
|
|
53
|
+
# Check for instructions (stored in _instructions)
|
|
54
|
+
instructions = getattr(agent, "_instructions", None)
|
|
55
|
+
if instructions:
|
|
56
|
+
if isinstance(instructions, str):
|
|
57
|
+
system_texts.append(instructions)
|
|
58
|
+
elif isinstance(instructions, (list, tuple)):
|
|
59
|
+
for instr in instructions:
|
|
60
|
+
if isinstance(instr, str):
|
|
61
|
+
system_texts.append(instr)
|
|
62
|
+
elif callable(instr):
|
|
63
|
+
# Skip dynamic/callable instructions
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
# Add all system texts as system messages
|
|
67
|
+
for system_text in system_texts:
|
|
68
|
+
messages.append(
|
|
69
|
+
{
|
|
70
|
+
"content": [{"text": system_text, "type": "text"}],
|
|
71
|
+
"role": "system",
|
|
72
|
+
}
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Add user prompt
|
|
76
|
+
if user_prompt:
|
|
77
|
+
if isinstance(user_prompt, str):
|
|
78
|
+
messages.append(
|
|
79
|
+
{
|
|
80
|
+
"content": [{"text": user_prompt, "type": "text"}],
|
|
81
|
+
"role": "user",
|
|
82
|
+
}
|
|
83
|
+
)
|
|
84
|
+
elif isinstance(user_prompt, list):
|
|
85
|
+
# Handle list of user content
|
|
86
|
+
content = []
|
|
87
|
+
for item in user_prompt:
|
|
88
|
+
if isinstance(item, str):
|
|
89
|
+
content.append({"text": item, "type": "text"})
|
|
90
|
+
if content:
|
|
91
|
+
messages.append(
|
|
92
|
+
{
|
|
93
|
+
"content": content,
|
|
94
|
+
"role": "user",
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if messages:
|
|
99
|
+
set_data_normalized(
|
|
100
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
return span
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def update_invoke_agent_span(span, output):
|
|
107
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
108
|
+
"""Update and close the invoke agent span."""
|
|
109
|
+
if span and _should_send_prompts() and output:
|
|
110
|
+
set_data_normalized(
|
|
111
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, str(output), unpack=False
|
|
112
|
+
)
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import sentry_sdk
|
|
2
|
+
from sentry_sdk.ai.utils import set_data_normalized
|
|
3
|
+
from sentry_sdk.consts import SPANDATA
|
|
4
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
5
|
+
from sentry_sdk.utils import safe_serialize
|
|
6
|
+
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from typing import Any, List, Dict
|
|
11
|
+
from pydantic_ai.usage import RequestUsage # type: ignore
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _should_send_prompts():
|
|
15
|
+
# type: () -> bool
|
|
16
|
+
"""
|
|
17
|
+
Check if prompts should be sent to Sentry.
|
|
18
|
+
|
|
19
|
+
This checks both send_default_pii and the include_prompts integration setting.
|
|
20
|
+
"""
|
|
21
|
+
if not should_send_default_pii():
|
|
22
|
+
return False
|
|
23
|
+
|
|
24
|
+
from . import PydanticAIIntegration
|
|
25
|
+
|
|
26
|
+
# Get the integration instance from the client
|
|
27
|
+
integration = sentry_sdk.get_client().get_integration(PydanticAIIntegration)
|
|
28
|
+
|
|
29
|
+
if integration is None:
|
|
30
|
+
return False
|
|
31
|
+
|
|
32
|
+
return getattr(integration, "include_prompts", False)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _set_agent_data(span, agent):
|
|
36
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
37
|
+
"""Set agent-related data on a span.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
span: The span to set data on
|
|
41
|
+
agent: Agent object (can be None, will try to get from Sentry scope if not provided)
|
|
42
|
+
"""
|
|
43
|
+
# Extract agent name from agent object or Sentry scope
|
|
44
|
+
agent_obj = agent
|
|
45
|
+
if not agent_obj:
|
|
46
|
+
# Try to get from Sentry scope
|
|
47
|
+
agent_data = (
|
|
48
|
+
sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
|
|
49
|
+
)
|
|
50
|
+
agent_obj = agent_data.get("_agent")
|
|
51
|
+
|
|
52
|
+
if agent_obj and hasattr(agent_obj, "name") and agent_obj.name:
|
|
53
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_obj.name)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _get_model_name(model_obj):
|
|
57
|
+
# type: (Any) -> str | None
|
|
58
|
+
"""Extract model name from a model object.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
model_obj: Model object to extract name from
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Model name string or None if not found
|
|
65
|
+
"""
|
|
66
|
+
if not model_obj:
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
if hasattr(model_obj, "model_name"):
|
|
70
|
+
return model_obj.model_name
|
|
71
|
+
elif hasattr(model_obj, "name"):
|
|
72
|
+
try:
|
|
73
|
+
return model_obj.name()
|
|
74
|
+
except Exception:
|
|
75
|
+
return str(model_obj)
|
|
76
|
+
elif isinstance(model_obj, str):
|
|
77
|
+
return model_obj
|
|
78
|
+
else:
|
|
79
|
+
return str(model_obj)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _set_model_data(span, model, model_settings):
|
|
83
|
+
# type: (sentry_sdk.tracing.Span, Any, Any) -> None
|
|
84
|
+
"""Set model-related data on a span.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
span: The span to set data on
|
|
88
|
+
model: Model object (can be None, will try to get from agent if not provided)
|
|
89
|
+
model_settings: Model settings (can be None, will try to get from agent if not provided)
|
|
90
|
+
"""
|
|
91
|
+
# Try to get agent from Sentry scope if we need it
|
|
92
|
+
agent_data = sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
|
|
93
|
+
agent_obj = agent_data.get("_agent")
|
|
94
|
+
|
|
95
|
+
# Extract model information
|
|
96
|
+
model_obj = model
|
|
97
|
+
if not model_obj and agent_obj and hasattr(agent_obj, "model"):
|
|
98
|
+
model_obj = agent_obj.model
|
|
99
|
+
|
|
100
|
+
if model_obj:
|
|
101
|
+
# Set system from model
|
|
102
|
+
if hasattr(model_obj, "system"):
|
|
103
|
+
span.set_data(SPANDATA.GEN_AI_SYSTEM, model_obj.system)
|
|
104
|
+
|
|
105
|
+
# Set model name
|
|
106
|
+
model_name = _get_model_name(model_obj)
|
|
107
|
+
if model_name:
|
|
108
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
|
|
109
|
+
|
|
110
|
+
# Extract model settings
|
|
111
|
+
settings = model_settings
|
|
112
|
+
if not settings and agent_obj and hasattr(agent_obj, "model_settings"):
|
|
113
|
+
settings = agent_obj.model_settings
|
|
114
|
+
|
|
115
|
+
if settings:
|
|
116
|
+
settings_map = {
|
|
117
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
118
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
119
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
120
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
121
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
# ModelSettings is a TypedDict (dict at runtime), so use dict access
|
|
125
|
+
if isinstance(settings, dict):
|
|
126
|
+
for setting_name, spandata_key in settings_map.items():
|
|
127
|
+
value = settings.get(setting_name)
|
|
128
|
+
if value is not None:
|
|
129
|
+
span.set_data(spandata_key, value)
|
|
130
|
+
else:
|
|
131
|
+
# Fallback for object-style settings
|
|
132
|
+
for setting_name, spandata_key in settings_map.items():
|
|
133
|
+
if hasattr(settings, setting_name):
|
|
134
|
+
value = getattr(settings, setting_name)
|
|
135
|
+
if value is not None:
|
|
136
|
+
span.set_data(spandata_key, value)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _set_available_tools(span, agent):
|
|
140
|
+
# type: (sentry_sdk.tracing.Span, Any) -> None
|
|
141
|
+
"""Set available tools data on a span from an agent's function toolset.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
span: The span to set data on
|
|
145
|
+
agent: Agent object with _function_toolset attribute
|
|
146
|
+
"""
|
|
147
|
+
if not agent or not hasattr(agent, "_function_toolset"):
|
|
148
|
+
return
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
tools = []
|
|
152
|
+
# Get tools from the function toolset
|
|
153
|
+
if hasattr(agent._function_toolset, "tools"):
|
|
154
|
+
for tool_name, tool in agent._function_toolset.tools.items():
|
|
155
|
+
tool_info = {"name": tool_name}
|
|
156
|
+
|
|
157
|
+
# Add description from function_schema if available
|
|
158
|
+
if hasattr(tool, "function_schema"):
|
|
159
|
+
schema = tool.function_schema
|
|
160
|
+
if getattr(schema, "description", None):
|
|
161
|
+
tool_info["description"] = schema.description
|
|
162
|
+
|
|
163
|
+
# Add parameters from json_schema
|
|
164
|
+
if getattr(schema, "json_schema", None):
|
|
165
|
+
tool_info["parameters"] = schema.json_schema
|
|
166
|
+
|
|
167
|
+
tools.append(tool_info)
|
|
168
|
+
|
|
169
|
+
if tools:
|
|
170
|
+
span.set_data(
|
|
171
|
+
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
|
|
172
|
+
)
|
|
173
|
+
except Exception:
|
|
174
|
+
# If we can't extract tools, just skip it
|
|
175
|
+
pass
|
|
@@ -20,12 +20,13 @@ def _get_safe_command(name, args):
|
|
|
20
20
|
# type: (str, Sequence[Any]) -> str
|
|
21
21
|
command_parts = [name]
|
|
22
22
|
|
|
23
|
+
name_low = name.lower()
|
|
24
|
+
send_default_pii = should_send_default_pii()
|
|
25
|
+
|
|
23
26
|
for i, arg in enumerate(args):
|
|
24
27
|
if i > _MAX_NUM_ARGS:
|
|
25
28
|
break
|
|
26
29
|
|
|
27
|
-
name_low = name.lower()
|
|
28
|
-
|
|
29
30
|
if name_low in _COMMANDS_INCLUDING_SENSITIVE_DATA:
|
|
30
31
|
command_parts.append(SENSITIVE_DATA_SUBSTITUTE)
|
|
31
32
|
continue
|
|
@@ -33,9 +34,8 @@ def _get_safe_command(name, args):
|
|
|
33
34
|
arg_is_the_key = i == 0
|
|
34
35
|
if arg_is_the_key:
|
|
35
36
|
command_parts.append(repr(arg))
|
|
36
|
-
|
|
37
37
|
else:
|
|
38
|
-
if
|
|
38
|
+
if send_default_pii:
|
|
39
39
|
command_parts.append(repr(arg))
|
|
40
40
|
else:
|
|
41
41
|
command_parts.append(SENSITIVE_DATA_SUBSTITUTE)
|
|
@@ -326,7 +326,7 @@ def _add_user_to_sentry_scope(scope):
|
|
|
326
326
|
user_info.setdefault("email", starlette_user.email)
|
|
327
327
|
|
|
328
328
|
sentry_scope = sentry_sdk.get_isolation_scope()
|
|
329
|
-
sentry_scope.
|
|
329
|
+
sentry_scope.set_user(user_info)
|
|
330
330
|
|
|
331
331
|
|
|
332
332
|
def patch_authentication_middleware(middleware_class):
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import functools
|
|
2
2
|
import hashlib
|
|
3
|
+
import warnings
|
|
3
4
|
from inspect import isawaitable
|
|
4
5
|
|
|
5
6
|
import sentry_sdk
|
|
@@ -95,17 +96,19 @@ def _patch_schema_init():
|
|
|
95
96
|
|
|
96
97
|
extensions = kwargs.get("extensions") or []
|
|
97
98
|
|
|
99
|
+
should_use_async_extension = None # type: Optional[bool]
|
|
98
100
|
if integration.async_execution is not None:
|
|
99
101
|
should_use_async_extension = integration.async_execution
|
|
100
102
|
else:
|
|
101
103
|
# try to figure it out ourselves
|
|
102
104
|
should_use_async_extension = _guess_if_using_async(extensions)
|
|
103
105
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
106
|
+
if should_use_async_extension is None:
|
|
107
|
+
warnings.warn(
|
|
108
|
+
"Assuming strawberry is running sync. If not, initialize the integration as StrawberryIntegration(async_execution=True).",
|
|
109
|
+
stacklevel=2,
|
|
110
|
+
)
|
|
111
|
+
should_use_async_extension = False
|
|
109
112
|
|
|
110
113
|
# remove the built in strawberry sentry extension, if present
|
|
111
114
|
extensions = [
|
|
@@ -382,12 +385,10 @@ def _make_response_event_processor(response_data):
|
|
|
382
385
|
|
|
383
386
|
|
|
384
387
|
def _guess_if_using_async(extensions):
|
|
385
|
-
# type: (List[SchemaExtension]) -> bool
|
|
388
|
+
# type: (List[SchemaExtension]) -> Optional[bool]
|
|
386
389
|
if StrawberrySentryAsyncExtension in extensions:
|
|
387
390
|
return True
|
|
388
391
|
elif StrawberrySentrySyncExtension in extensions:
|
|
389
392
|
return False
|
|
390
393
|
|
|
391
|
-
return
|
|
392
|
-
{"starlette", "starlite", "litestar", "fastapi"} & set(_get_installed_modules())
|
|
393
|
-
)
|
|
394
|
+
return None
|