sentry-sdk 2.42.0__py2.py3-none-any.whl → 2.43.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. sentry_sdk/__init__.py +2 -0
  2. sentry_sdk/_metrics_batcher.py +1 -1
  3. sentry_sdk/ai/utils.py +49 -2
  4. sentry_sdk/client.py +18 -1
  5. sentry_sdk/consts.py +87 -2
  6. sentry_sdk/integrations/__init__.py +2 -0
  7. sentry_sdk/integrations/anthropic.py +8 -5
  8. sentry_sdk/integrations/aws_lambda.py +2 -0
  9. sentry_sdk/integrations/django/caching.py +16 -3
  10. sentry_sdk/integrations/gcp.py +6 -1
  11. sentry_sdk/integrations/google_genai/__init__.py +3 -0
  12. sentry_sdk/integrations/google_genai/utils.py +16 -6
  13. sentry_sdk/integrations/langchain.py +49 -23
  14. sentry_sdk/integrations/langgraph.py +25 -11
  15. sentry_sdk/integrations/litellm.py +17 -6
  16. sentry_sdk/integrations/mcp.py +552 -0
  17. sentry_sdk/integrations/openai.py +33 -9
  18. sentry_sdk/integrations/openai_agents/__init__.py +2 -0
  19. sentry_sdk/integrations/openai_agents/patches/__init__.py +1 -0
  20. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  21. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  22. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  23. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  24. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +217 -0
  25. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +105 -0
  26. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +35 -0
  27. sentry_sdk/integrations/pydantic_ai/patches/tools.py +75 -0
  28. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  29. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +253 -0
  30. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  31. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  32. sentry_sdk/integrations/pydantic_ai/utils.py +175 -0
  33. sentry_sdk/integrations/redis/utils.py +4 -4
  34. sentry_sdk/integrations/starlette.py +1 -1
  35. sentry_sdk/integrations/strawberry.py +10 -9
  36. sentry_sdk/logger.py +14 -2
  37. sentry_sdk/scope.py +13 -6
  38. sentry_sdk/tracing_utils.py +1 -1
  39. sentry_sdk/utils.py +34 -2
  40. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/METADATA +6 -1
  41. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/RECORD +46 -32
  42. /sentry_sdk/{_metrics.py → metrics.py} +0 -0
  43. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/WHEEL +0 -0
  44. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/entry_points.txt +0 -0
  45. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/licenses/LICENSE +0 -0
  46. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,552 @@
1
+ """
2
+ Sentry integration for MCP (Model Context Protocol) servers.
3
+
4
+ This integration instruments MCP servers to create spans for tool, prompt,
5
+ and resource handler execution, and captures errors that occur during execution.
6
+
7
+ Supports the low-level `mcp.server.lowlevel.Server` API.
8
+ """
9
+
10
+ import inspect
11
+ from functools import wraps
12
+ from typing import TYPE_CHECKING
13
+
14
+ import sentry_sdk
15
+ from sentry_sdk.ai.utils import get_start_span_function
16
+ from sentry_sdk.consts import OP, SPANDATA
17
+ from sentry_sdk.integrations import Integration, DidNotEnable
18
+ from sentry_sdk.utils import safe_serialize
19
+ from sentry_sdk.scope import should_send_default_pii
20
+
21
+ try:
22
+ from mcp.server.lowlevel import Server # type: ignore[import-not-found]
23
+ from mcp.server.lowlevel.server import request_ctx # type: ignore[import-not-found]
24
+ except ImportError:
25
+ raise DidNotEnable("MCP SDK not installed")
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from typing import Any, Callable, Optional
30
+
31
+
32
+ class MCPIntegration(Integration):
33
+ identifier = "mcp"
34
+ origin = "auto.ai.mcp"
35
+
36
+ def __init__(self, include_prompts=True):
37
+ # type: (bool) -> None
38
+ """
39
+ Initialize the MCP integration.
40
+
41
+ Args:
42
+ include_prompts: Whether to include prompts (tool results and prompt content)
43
+ in span data. Requires send_default_pii=True. Default is True.
44
+ """
45
+ self.include_prompts = include_prompts
46
+
47
+ @staticmethod
48
+ def setup_once():
49
+ # type: () -> None
50
+ """
51
+ Patches MCP server classes to instrument handler execution.
52
+ """
53
+ _patch_lowlevel_server()
54
+
55
+
56
+ def _get_request_context_data():
57
+ # type: () -> tuple[Optional[str], Optional[str], str]
58
+ """
59
+ Extract request ID, session ID, and transport type from the MCP request context.
60
+
61
+ Returns:
62
+ Tuple of (request_id, session_id, transport).
63
+ - request_id: May be None if not available
64
+ - session_id: May be None if not available
65
+ - transport: "tcp" for HTTP-based, "pipe" for stdio
66
+ """
67
+ request_id = None # type: Optional[str]
68
+ session_id = None # type: Optional[str]
69
+ transport = "pipe" # type: str
70
+
71
+ try:
72
+ ctx = request_ctx.get()
73
+
74
+ if ctx is not None:
75
+ request_id = ctx.request_id
76
+ if hasattr(ctx, "request") and ctx.request is not None:
77
+ transport = "tcp"
78
+ request = ctx.request
79
+ if hasattr(request, "headers"):
80
+ session_id = request.headers.get("mcp-session-id")
81
+
82
+ except LookupError:
83
+ # No request context available - default to pipe
84
+ pass
85
+
86
+ return request_id, session_id, transport
87
+
88
+
89
+ def _get_span_config(handler_type, item_name):
90
+ # type: (str, str) -> tuple[str, str, str, Optional[str]]
91
+ """
92
+ Get span configuration based on handler type.
93
+
94
+ Returns:
95
+ Tuple of (span_data_key, span_name, mcp_method_name, result_data_key)
96
+ Note: result_data_key is None for resources
97
+ """
98
+ if handler_type == "tool":
99
+ span_data_key = SPANDATA.MCP_TOOL_NAME
100
+ mcp_method_name = "tools/call"
101
+ result_data_key = SPANDATA.MCP_TOOL_RESULT_CONTENT
102
+ elif handler_type == "prompt":
103
+ span_data_key = SPANDATA.MCP_PROMPT_NAME
104
+ mcp_method_name = "prompts/get"
105
+ result_data_key = SPANDATA.MCP_PROMPT_RESULT_MESSAGE_CONTENT
106
+ else: # resource
107
+ span_data_key = SPANDATA.MCP_RESOURCE_URI
108
+ mcp_method_name = "resources/read"
109
+ result_data_key = None # Resources don't capture result content
110
+
111
+ span_name = f"{mcp_method_name} {item_name}"
112
+ return span_data_key, span_name, mcp_method_name, result_data_key
113
+
114
+
115
+ def _set_span_input_data(
116
+ span,
117
+ handler_name,
118
+ span_data_key,
119
+ mcp_method_name,
120
+ arguments,
121
+ request_id,
122
+ session_id,
123
+ transport,
124
+ ):
125
+ # type: (Any, str, str, str, dict[str, Any], Optional[str], Optional[str], str) -> None
126
+ """Set input span data for MCP handlers."""
127
+ # Set handler identifier
128
+ span.set_data(span_data_key, handler_name)
129
+ span.set_data(SPANDATA.MCP_METHOD_NAME, mcp_method_name)
130
+
131
+ # Set transport type
132
+ span.set_data(SPANDATA.MCP_TRANSPORT, transport)
133
+
134
+ # Set request_id if provided
135
+ if request_id:
136
+ span.set_data(SPANDATA.MCP_REQUEST_ID, request_id)
137
+
138
+ # Set session_id if provided
139
+ if session_id:
140
+ span.set_data(SPANDATA.MCP_SESSION_ID, session_id)
141
+
142
+ # Set request arguments (excluding common request context objects)
143
+ for k, v in arguments.items():
144
+ span.set_data(f"mcp.request.argument.{k}", safe_serialize(v))
145
+
146
+
147
+ def _extract_tool_result_content(result):
148
+ # type: (Any) -> Any
149
+ """
150
+ Extract meaningful content from MCP tool result.
151
+
152
+ Tool handlers can return:
153
+ - tuple (UnstructuredContent, StructuredContent): Return the structured content (dict)
154
+ - dict (StructuredContent): Return as-is
155
+ - Iterable (UnstructuredContent): Extract text from content blocks
156
+ """
157
+ if result is None:
158
+ return None
159
+
160
+ # Handle CombinationContent: tuple of (UnstructuredContent, StructuredContent)
161
+ if isinstance(result, tuple) and len(result) == 2:
162
+ # Return the structured content (2nd element)
163
+ return result[1]
164
+
165
+ # Handle StructuredContent: dict
166
+ if isinstance(result, dict):
167
+ return result
168
+
169
+ # Handle UnstructuredContent: iterable of ContentBlock objects
170
+ # Try to extract text content
171
+ if hasattr(result, "__iter__") and not isinstance(result, (str, bytes, dict)):
172
+ texts = []
173
+ try:
174
+ for item in result:
175
+ # Try to get text attribute from ContentBlock objects
176
+ if hasattr(item, "text"):
177
+ texts.append(item.text)
178
+ elif isinstance(item, dict) and "text" in item:
179
+ texts.append(item["text"])
180
+ except Exception:
181
+ # If extraction fails, return the original
182
+ return result
183
+ return " ".join(texts) if texts else result
184
+
185
+ return result
186
+
187
+
188
+ def _set_span_output_data(span, result, result_data_key, handler_type):
189
+ # type: (Any, Any, Optional[str], str) -> None
190
+ """Set output span data for MCP handlers."""
191
+ if result is None:
192
+ return
193
+
194
+ # Get integration to check PII settings
195
+ integration = sentry_sdk.get_client().get_integration(MCPIntegration)
196
+ if integration is None:
197
+ return
198
+
199
+ # Check if we should include sensitive data
200
+ should_include_data = should_send_default_pii() and integration.include_prompts
201
+
202
+ # For tools, extract the meaningful content
203
+ if handler_type == "tool":
204
+ extracted = _extract_tool_result_content(result)
205
+ if extracted is not None and should_include_data:
206
+ span.set_data(result_data_key, safe_serialize(extracted))
207
+ # Set content count if result is a dict
208
+ if isinstance(extracted, dict):
209
+ span.set_data(SPANDATA.MCP_TOOL_RESULT_CONTENT_COUNT, len(extracted))
210
+ elif handler_type == "prompt":
211
+ # For prompts, count messages and set role/content only for single-message prompts
212
+ try:
213
+ messages = None # type: Optional[list[str]]
214
+ message_count = 0
215
+
216
+ # Check if result has messages attribute (GetPromptResult)
217
+ if hasattr(result, "messages") and result.messages:
218
+ messages = result.messages
219
+ message_count = len(messages)
220
+ # Also check if result is a dict with messages
221
+ elif isinstance(result, dict) and result.get("messages"):
222
+ messages = result["messages"]
223
+ message_count = len(messages)
224
+
225
+ # Always set message count if we found messages
226
+ if message_count > 0:
227
+ span.set_data(SPANDATA.MCP_PROMPT_RESULT_MESSAGE_COUNT, message_count)
228
+
229
+ # Only set role and content for single-message prompts if PII is allowed
230
+ if message_count == 1 and should_include_data and messages:
231
+ first_message = messages[0]
232
+ # Extract role
233
+ role = None
234
+ if hasattr(first_message, "role"):
235
+ role = first_message.role
236
+ elif isinstance(first_message, dict) and "role" in first_message:
237
+ role = first_message["role"]
238
+
239
+ if role:
240
+ span.set_data(SPANDATA.MCP_PROMPT_RESULT_MESSAGE_ROLE, role)
241
+
242
+ # Extract content text
243
+ content_text = None
244
+ if hasattr(first_message, "content"):
245
+ msg_content = first_message.content
246
+ # Content can be a TextContent object or similar
247
+ if hasattr(msg_content, "text"):
248
+ content_text = msg_content.text
249
+ elif isinstance(msg_content, dict) and "text" in msg_content:
250
+ content_text = msg_content["text"]
251
+ elif isinstance(msg_content, str):
252
+ content_text = msg_content
253
+ elif isinstance(first_message, dict) and "content" in first_message:
254
+ msg_content = first_message["content"]
255
+ if isinstance(msg_content, dict) and "text" in msg_content:
256
+ content_text = msg_content["text"]
257
+ elif isinstance(msg_content, str):
258
+ content_text = msg_content
259
+
260
+ if content_text:
261
+ span.set_data(result_data_key, content_text)
262
+ except Exception:
263
+ # Silently ignore if we can't extract message info
264
+ pass
265
+ # Resources don't capture result content (result_data_key is None)
266
+
267
+
268
+ # Handler data preparation and wrapping
269
+
270
+
271
+ def _prepare_handler_data(handler_type, original_args):
272
+ # type: (str, tuple[Any, ...]) -> tuple[str, dict[str, Any], str, str, str, Optional[str]]
273
+ """
274
+ Prepare common handler data for both async and sync wrappers.
275
+
276
+ Returns:
277
+ Tuple of (handler_name, arguments, span_data_key, span_name, mcp_method_name, result_data_key)
278
+ """
279
+ # Extract handler-specific data based on handler type
280
+ if handler_type == "tool":
281
+ handler_name = original_args[0] # tool_name
282
+ arguments = original_args[1] if len(original_args) > 1 else {}
283
+ elif handler_type == "prompt":
284
+ handler_name = original_args[0] # name
285
+ arguments = original_args[1] if len(original_args) > 1 else {}
286
+ # Include name in arguments dict for span data
287
+ arguments = {"name": handler_name, **(arguments or {})}
288
+ else: # resource
289
+ uri = original_args[0]
290
+ handler_name = str(uri) if uri else "unknown"
291
+ arguments = {}
292
+
293
+ # Get span configuration
294
+ span_data_key, span_name, mcp_method_name, result_data_key = _get_span_config(
295
+ handler_type, handler_name
296
+ )
297
+
298
+ return (
299
+ handler_name,
300
+ arguments,
301
+ span_data_key,
302
+ span_name,
303
+ mcp_method_name,
304
+ result_data_key,
305
+ )
306
+
307
+
308
+ async def _async_handler_wrapper(handler_type, func, original_args):
309
+ # type: (str, Callable[..., Any], tuple[Any, ...]) -> Any
310
+ """
311
+ Async wrapper for MCP handlers.
312
+
313
+ Args:
314
+ handler_type: "tool", "prompt", or "resource"
315
+ func: The async handler function to wrap
316
+ original_args: Original arguments passed to the handler
317
+ """
318
+ (
319
+ handler_name,
320
+ arguments,
321
+ span_data_key,
322
+ span_name,
323
+ mcp_method_name,
324
+ result_data_key,
325
+ ) = _prepare_handler_data(handler_type, original_args)
326
+
327
+ # Start span and execute
328
+ with get_start_span_function()(
329
+ op=OP.MCP_SERVER,
330
+ name=span_name,
331
+ origin=MCPIntegration.origin,
332
+ ) as span:
333
+ # Get request ID, session ID, and transport from context
334
+ request_id, session_id, transport = _get_request_context_data()
335
+
336
+ # Set input span data
337
+ _set_span_input_data(
338
+ span,
339
+ handler_name,
340
+ span_data_key,
341
+ mcp_method_name,
342
+ arguments,
343
+ request_id,
344
+ session_id,
345
+ transport,
346
+ )
347
+
348
+ # For resources, extract and set protocol
349
+ if handler_type == "resource":
350
+ uri = original_args[0]
351
+ protocol = None
352
+ if hasattr(uri, "scheme"):
353
+ protocol = uri.scheme
354
+ elif handler_name and "://" in handler_name:
355
+ protocol = handler_name.split("://")[0]
356
+ if protocol:
357
+ span.set_data(SPANDATA.MCP_RESOURCE_PROTOCOL, protocol)
358
+
359
+ try:
360
+ # Execute the async handler
361
+ result = await func(*original_args)
362
+ except Exception as e:
363
+ # Set error flag for tools
364
+ if handler_type == "tool":
365
+ span.set_data(SPANDATA.MCP_TOOL_RESULT_IS_ERROR, True)
366
+ sentry_sdk.capture_exception(e)
367
+ raise
368
+
369
+ _set_span_output_data(span, result, result_data_key, handler_type)
370
+ return result
371
+
372
+
373
+ def _sync_handler_wrapper(handler_type, func, original_args):
374
+ # type: (str, Callable[..., Any], tuple[Any, ...]) -> Any
375
+ """
376
+ Sync wrapper for MCP handlers.
377
+
378
+ Args:
379
+ handler_type: "tool", "prompt", or "resource"
380
+ func: The sync handler function to wrap
381
+ original_args: Original arguments passed to the handler
382
+ """
383
+ (
384
+ handler_name,
385
+ arguments,
386
+ span_data_key,
387
+ span_name,
388
+ mcp_method_name,
389
+ result_data_key,
390
+ ) = _prepare_handler_data(handler_type, original_args)
391
+
392
+ # Start span and execute
393
+ with get_start_span_function()(
394
+ op=OP.MCP_SERVER,
395
+ name=span_name,
396
+ origin=MCPIntegration.origin,
397
+ ) as span:
398
+ # Get request ID, session ID, and transport from context
399
+ request_id, session_id, transport = _get_request_context_data()
400
+
401
+ # Set input span data
402
+ _set_span_input_data(
403
+ span,
404
+ handler_name,
405
+ span_data_key,
406
+ mcp_method_name,
407
+ arguments,
408
+ request_id,
409
+ session_id,
410
+ transport,
411
+ )
412
+
413
+ # For resources, extract and set protocol
414
+ if handler_type == "resource":
415
+ uri = original_args[0]
416
+ protocol = None
417
+ if hasattr(uri, "scheme"):
418
+ protocol = uri.scheme
419
+ elif handler_name and "://" in handler_name:
420
+ protocol = handler_name.split("://")[0]
421
+ if protocol:
422
+ span.set_data(SPANDATA.MCP_RESOURCE_PROTOCOL, protocol)
423
+
424
+ try:
425
+ # Execute the sync handler
426
+ result = func(*original_args)
427
+ except Exception as e:
428
+ # Set error flag for tools
429
+ if handler_type == "tool":
430
+ span.set_data(SPANDATA.MCP_TOOL_RESULT_IS_ERROR, True)
431
+ sentry_sdk.capture_exception(e)
432
+ raise
433
+
434
+ _set_span_output_data(span, result, result_data_key, handler_type)
435
+ return result
436
+
437
+
438
+ def _create_instrumented_handler(handler_type, func):
439
+ # type: (str, Callable[..., Any]) -> Callable[..., Any]
440
+ """
441
+ Create an instrumented version of a handler function (async or sync).
442
+
443
+ This function wraps the user's handler with a runtime wrapper that will create
444
+ Sentry spans and capture metrics when the handler is actually called.
445
+
446
+ The wrapper preserves the async/sync nature of the original function, which is
447
+ critical for Python's async/await to work correctly.
448
+
449
+ Args:
450
+ handler_type: "tool", "prompt", or "resource" - determines span configuration
451
+ func: The handler function to instrument (async or sync)
452
+
453
+ Returns:
454
+ A wrapped version of func that creates Sentry spans on execution
455
+ """
456
+ if inspect.iscoroutinefunction(func):
457
+
458
+ @wraps(func)
459
+ async def async_wrapper(*args):
460
+ # type: (*Any) -> Any
461
+ return await _async_handler_wrapper(handler_type, func, args)
462
+
463
+ return async_wrapper
464
+ else:
465
+
466
+ @wraps(func)
467
+ def sync_wrapper(*args):
468
+ # type: (*Any) -> Any
469
+ return _sync_handler_wrapper(handler_type, func, args)
470
+
471
+ return sync_wrapper
472
+
473
+
474
+ def _create_instrumented_decorator(
475
+ original_decorator, handler_type, *decorator_args, **decorator_kwargs
476
+ ):
477
+ # type: (Callable[..., Any], str, *Any, **Any) -> Callable[..., Any]
478
+ """
479
+ Create an instrumented version of an MCP decorator.
480
+
481
+ This function intercepts MCP decorators (like @server.call_tool()) and injects
482
+ Sentry instrumentation into the handler registration flow. The returned decorator
483
+ will:
484
+ 1. Receive the user's handler function
485
+ 2. Wrap it with instrumentation via _create_instrumented_handler
486
+ 3. Pass the instrumented version to the original MCP decorator
487
+
488
+ This ensures that when the handler is called at runtime, it's already wrapped
489
+ with Sentry spans and metrics collection.
490
+
491
+ Args:
492
+ original_decorator: The original MCP decorator method (e.g., Server.call_tool)
493
+ handler_type: "tool", "prompt", or "resource" - determines span configuration
494
+ decorator_args: Positional arguments to pass to the original decorator (e.g., self)
495
+ decorator_kwargs: Keyword arguments to pass to the original decorator
496
+
497
+ Returns:
498
+ A decorator function that instruments handlers before registering them
499
+ """
500
+
501
+ def instrumented_decorator(func):
502
+ # type: (Callable[..., Any]) -> Callable[..., Any]
503
+ # First wrap the handler with instrumentation
504
+ instrumented_func = _create_instrumented_handler(handler_type, func)
505
+ # Then register it with the original MCP decorator
506
+ return original_decorator(*decorator_args, **decorator_kwargs)(
507
+ instrumented_func
508
+ )
509
+
510
+ return instrumented_decorator
511
+
512
+
513
+ def _patch_lowlevel_server():
514
+ # type: () -> None
515
+ """
516
+ Patches the mcp.server.lowlevel.Server class to instrument handler execution.
517
+ """
518
+ # Patch call_tool decorator
519
+ original_call_tool = Server.call_tool
520
+
521
+ def patched_call_tool(self, **kwargs):
522
+ # type: (Server, **Any) -> Callable[[Callable[..., Any]], Callable[..., Any]]
523
+ """Patched version of Server.call_tool that adds Sentry instrumentation."""
524
+ return lambda func: _create_instrumented_decorator(
525
+ original_call_tool, "tool", self, **kwargs
526
+ )(func)
527
+
528
+ Server.call_tool = patched_call_tool
529
+
530
+ # Patch get_prompt decorator
531
+ original_get_prompt = Server.get_prompt
532
+
533
+ def patched_get_prompt(self):
534
+ # type: (Server) -> Callable[[Callable[..., Any]], Callable[..., Any]]
535
+ """Patched version of Server.get_prompt that adds Sentry instrumentation."""
536
+ return lambda func: _create_instrumented_decorator(
537
+ original_get_prompt, "prompt", self
538
+ )(func)
539
+
540
+ Server.get_prompt = patched_get_prompt
541
+
542
+ # Patch read_resource decorator
543
+ original_read_resource = Server.read_resource
544
+
545
+ def patched_read_resource(self):
546
+ # type: (Server) -> Callable[[Callable[..., Any]], Callable[..., Any]]
547
+ """Patched version of Server.read_resource that adds Sentry instrumentation."""
548
+ return lambda func: _create_instrumented_decorator(
549
+ original_read_resource, "resource", self
550
+ )(func)
551
+
552
+ Server.read_resource = patched_read_resource
@@ -3,7 +3,11 @@ from functools import wraps
3
3
  import sentry_sdk
4
4
  from sentry_sdk import consts
5
5
  from sentry_sdk.ai.monitoring import record_token_usage
6
- from sentry_sdk.ai.utils import set_data_normalized, normalize_message_roles
6
+ from sentry_sdk.ai.utils import (
7
+ set_data_normalized,
8
+ normalize_message_roles,
9
+ truncate_and_annotate_messages,
10
+ )
7
11
  from sentry_sdk.consts import SPANDATA
8
12
  from sentry_sdk.integrations import DidNotEnable, Integration
9
13
  from sentry_sdk.scope import should_send_default_pii
@@ -22,9 +26,14 @@ if TYPE_CHECKING:
22
26
 
23
27
  try:
24
28
  try:
25
- from openai import NOT_GIVEN
29
+ from openai import NotGiven
26
30
  except ImportError:
27
- NOT_GIVEN = None
31
+ NotGiven = None
32
+
33
+ try:
34
+ from openai import Omit
35
+ except ImportError:
36
+ Omit = None
28
37
 
29
38
  from openai.resources.chat.completions import Completions, AsyncCompletions
30
39
  from openai.resources import Embeddings, AsyncEmbeddings
@@ -183,9 +192,12 @@ def _set_input_data(span, kwargs, operation, integration):
183
192
  and integration.include_prompts
184
193
  ):
185
194
  normalized_messages = normalize_message_roles(messages)
186
- set_data_normalized(
187
- span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False
188
- )
195
+ scope = sentry_sdk.get_current_scope()
196
+ messages_data = truncate_and_annotate_messages(normalized_messages, span, scope)
197
+ if messages_data is not None:
198
+ set_data_normalized(
199
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False
200
+ )
189
201
 
190
202
  # Input attributes: Common
191
203
  set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
@@ -204,12 +216,12 @@ def _set_input_data(span, kwargs, operation, integration):
204
216
  for key, attribute in kwargs_keys_to_attributes.items():
205
217
  value = kwargs.get(key)
206
218
 
207
- if value is not NOT_GIVEN and value is not None:
219
+ if value is not None and _is_given(value):
208
220
  set_data_normalized(span, attribute, value)
209
221
 
210
222
  # Input attributes: Tools
211
223
  tools = kwargs.get("tools")
212
- if tools is not NOT_GIVEN and tools is not None and len(tools) > 0:
224
+ if tools is not None and _is_given(tools) and len(tools) > 0:
213
225
  set_data_normalized(
214
226
  span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
215
227
  )
@@ -231,7 +243,7 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
231
243
 
232
244
  if hasattr(response, "choices"):
233
245
  if should_send_default_pii() and integration.include_prompts:
234
- response_text = [choice.message.dict() for choice in response.choices]
246
+ response_text = [choice.message.model_dump() for choice in response.choices]
235
247
  if len(response_text) > 0:
236
248
  set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text)
237
249
 
@@ -689,3 +701,15 @@ def _wrap_async_responses_create(f):
689
701
  return await _execute_async(f, *args, **kwargs)
690
702
 
691
703
  return _sentry_patched_responses_async
704
+
705
+
706
+ def _is_given(obj):
707
+ # type: (Any) -> bool
708
+ """
709
+ Check for givenness safely across different openai versions.
710
+ """
711
+ if NotGiven is not None and isinstance(obj, NotGiven):
712
+ return False
713
+ if Omit is not None and isinstance(obj, Omit):
714
+ return False
715
+ return True
@@ -5,6 +5,7 @@ from .patches import (
5
5
  _create_get_all_tools_wrapper,
6
6
  _create_run_wrapper,
7
7
  _patch_agent_run,
8
+ _patch_error_tracing,
8
9
  )
9
10
 
10
11
  try:
@@ -48,6 +49,7 @@ class OpenAIAgentsIntegration(Integration):
48
49
  @staticmethod
49
50
  def setup_once():
50
51
  # type: () -> None
52
+ _patch_error_tracing()
51
53
  _patch_tools()
52
54
  _patch_model()
53
55
  _patch_runner()
@@ -2,3 +2,4 @@ from .models import _create_get_model_wrapper # noqa: F401
2
2
  from .tools import _create_get_all_tools_wrapper # noqa: F401
3
3
  from .runner import _create_run_wrapper # noqa: F401
4
4
  from .agent_run import _patch_agent_run # noqa: F401
5
+ from .error_tracing import _patch_error_tracing # noqa: F401