sentry-sdk 2.42.0__py2.py3-none-any.whl → 2.43.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. sentry_sdk/__init__.py +2 -0
  2. sentry_sdk/_metrics_batcher.py +1 -1
  3. sentry_sdk/ai/utils.py +49 -2
  4. sentry_sdk/client.py +18 -1
  5. sentry_sdk/consts.py +87 -2
  6. sentry_sdk/integrations/__init__.py +2 -0
  7. sentry_sdk/integrations/anthropic.py +8 -5
  8. sentry_sdk/integrations/aws_lambda.py +2 -0
  9. sentry_sdk/integrations/django/caching.py +16 -3
  10. sentry_sdk/integrations/gcp.py +6 -1
  11. sentry_sdk/integrations/google_genai/__init__.py +3 -0
  12. sentry_sdk/integrations/google_genai/utils.py +16 -6
  13. sentry_sdk/integrations/langchain.py +49 -23
  14. sentry_sdk/integrations/langgraph.py +25 -11
  15. sentry_sdk/integrations/litellm.py +17 -6
  16. sentry_sdk/integrations/mcp.py +552 -0
  17. sentry_sdk/integrations/openai.py +33 -9
  18. sentry_sdk/integrations/openai_agents/__init__.py +2 -0
  19. sentry_sdk/integrations/openai_agents/patches/__init__.py +1 -0
  20. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  21. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  22. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  23. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  24. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +217 -0
  25. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +105 -0
  26. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +35 -0
  27. sentry_sdk/integrations/pydantic_ai/patches/tools.py +75 -0
  28. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  29. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +253 -0
  30. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  31. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  32. sentry_sdk/integrations/pydantic_ai/utils.py +175 -0
  33. sentry_sdk/integrations/redis/utils.py +4 -4
  34. sentry_sdk/integrations/starlette.py +1 -1
  35. sentry_sdk/integrations/strawberry.py +10 -9
  36. sentry_sdk/logger.py +14 -2
  37. sentry_sdk/scope.py +13 -6
  38. sentry_sdk/tracing_utils.py +1 -1
  39. sentry_sdk/utils.py +34 -2
  40. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/METADATA +6 -1
  41. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/RECORD +46 -32
  42. /sentry_sdk/{_metrics.py → metrics.py} +0 -0
  43. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/WHEEL +0 -0
  44. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/entry_points.txt +0 -0
  45. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/licenses/LICENSE +0 -0
  46. {sentry_sdk-2.42.0.dist-info → sentry_sdk-2.43.0.dist-info}/top_level.txt +0 -0
sentry_sdk/__init__.py CHANGED
@@ -1,4 +1,5 @@
1
1
  from sentry_sdk import profiler
2
+ from sentry_sdk import metrics
2
3
  from sentry_sdk.scope import Scope
3
4
  from sentry_sdk.transport import Transport, HttpTransport
4
5
  from sentry_sdk.client import Client
@@ -48,6 +49,7 @@ __all__ = [ # noqa
48
49
  "trace",
49
50
  "monitor",
50
51
  "logger",
52
+ "metrics",
51
53
  "profiler",
52
54
  "start_session",
53
55
  "end_session",
@@ -12,7 +12,7 @@ if TYPE_CHECKING:
12
12
 
13
13
 
14
14
  class MetricsBatcher:
15
- MAX_METRICS_BEFORE_FLUSH = 100
15
+ MAX_METRICS_BEFORE_FLUSH = 1000
16
16
  FLUSH_WAIT_TIME = 5.0
17
17
 
18
18
  def __init__(
sentry_sdk/ai/utils.py CHANGED
@@ -1,14 +1,18 @@
1
1
  import json
2
-
2
+ from collections import deque
3
3
  from typing import TYPE_CHECKING
4
+ from sys import getsizeof
4
5
 
5
6
  if TYPE_CHECKING:
6
- from typing import Any, Callable
7
+ from typing import Any, Callable, Dict, List, Optional, Tuple
8
+
7
9
  from sentry_sdk.tracing import Span
8
10
 
9
11
  import sentry_sdk
10
12
  from sentry_sdk.utils import logger
11
13
 
14
+ MAX_GEN_AI_MESSAGE_BYTES = 20_000 # 20KB
15
+
12
16
 
13
17
  class GEN_AI_ALLOWED_MESSAGE_ROLES:
14
18
  SYSTEM = "system"
@@ -95,3 +99,46 @@ def get_start_span_function():
95
99
  current_span is not None and current_span.containing_transaction is not None
96
100
  )
97
101
  return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction
102
+
103
+
104
+ def _find_truncation_index(messages, max_bytes):
105
+ # type: (List[Dict[str, Any]], int) -> int
106
+ """
107
+ Find the index of the first message that would exceed the max bytes limit.
108
+ Compute the individual message sizes, and return the index of the first message from the back
109
+ of the list that would exceed the max bytes limit.
110
+ """
111
+ running_sum = 0
112
+ for idx in range(len(messages) - 1, -1, -1):
113
+ size = len(json.dumps(messages[idx], separators=(",", ":")).encode("utf-8"))
114
+ running_sum += size
115
+ if running_sum > max_bytes:
116
+ return idx + 1
117
+
118
+ return 0
119
+
120
+
121
+ def truncate_messages_by_size(messages, max_bytes=MAX_GEN_AI_MESSAGE_BYTES):
122
+ # type: (List[Dict[str, Any]], int) -> Tuple[List[Dict[str, Any]], int]
123
+ serialized_json = json.dumps(messages, separators=(",", ":"))
124
+ current_size = len(serialized_json.encode("utf-8"))
125
+
126
+ if current_size <= max_bytes:
127
+ return messages, 0
128
+
129
+ truncation_index = _find_truncation_index(messages, max_bytes)
130
+ return messages[truncation_index:], truncation_index
131
+
132
+
133
+ def truncate_and_annotate_messages(
134
+ messages, span, scope, max_bytes=MAX_GEN_AI_MESSAGE_BYTES
135
+ ):
136
+ # type: (Optional[List[Dict[str, Any]]], Any, Any, int) -> Optional[List[Dict[str, Any]]]
137
+ if not messages:
138
+ return None
139
+
140
+ truncated_messages, removed_count = truncate_messages_by_size(messages, max_bytes)
141
+ if removed_count > 0:
142
+ scope._gen_ai_original_message_count[span.span_id] = len(messages)
143
+
144
+ return truncated_messages
sentry_sdk/client.py CHANGED
@@ -598,14 +598,31 @@ class _Client(BaseClient):
598
598
  if event_scrubber:
599
599
  event_scrubber.scrub_event(event)
600
600
 
601
+ if scope is not None and scope._gen_ai_original_message_count:
602
+ spans = event.get("spans", []) # type: List[Dict[str, Any]] | AnnotatedValue
603
+ if isinstance(spans, list):
604
+ for span in spans:
605
+ span_id = span.get("span_id", None)
606
+ span_data = span.get("data", {})
607
+ if (
608
+ span_id
609
+ and span_id in scope._gen_ai_original_message_count
610
+ and SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data
611
+ ):
612
+ span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] = AnnotatedValue(
613
+ span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES],
614
+ {"len": scope._gen_ai_original_message_count[span_id]},
615
+ )
601
616
  if previous_total_spans is not None:
602
617
  event["spans"] = AnnotatedValue(
603
618
  event.get("spans", []), {"len": previous_total_spans}
604
619
  )
605
620
  if previous_total_breadcrumbs is not None:
606
621
  event["breadcrumbs"] = AnnotatedValue(
607
- event.get("breadcrumbs", []), {"len": previous_total_breadcrumbs}
622
+ event.get("breadcrumbs", {"values": []}),
623
+ {"len": previous_total_breadcrumbs},
608
624
  )
625
+
609
626
  # Postprocess the event here so that annotated types do
610
627
  # generally not surface in before_send
611
628
  if event is not None:
sentry_sdk/consts.py CHANGED
@@ -749,6 +749,90 @@ class SPANDATA:
749
749
  Example: "MainThread"
750
750
  """
751
751
 
752
+ MCP_TOOL_NAME = "mcp.tool.name"
753
+ """
754
+ The name of the MCP tool being called.
755
+ Example: "get_weather"
756
+ """
757
+
758
+ MCP_PROMPT_NAME = "mcp.prompt.name"
759
+ """
760
+ The name of the MCP prompt being retrieved.
761
+ Example: "code_review"
762
+ """
763
+
764
+ MCP_RESOURCE_URI = "mcp.resource.uri"
765
+ """
766
+ The URI of the MCP resource being accessed.
767
+ Example: "file:///path/to/resource"
768
+ """
769
+
770
+ MCP_METHOD_NAME = "mcp.method.name"
771
+ """
772
+ The MCP protocol method name being called.
773
+ Example: "tools/call", "prompts/get", "resources/read"
774
+ """
775
+
776
+ MCP_REQUEST_ID = "mcp.request.id"
777
+ """
778
+ The unique identifier for the MCP request.
779
+ Example: "req_123abc"
780
+ """
781
+
782
+ MCP_TOOL_RESULT_CONTENT = "mcp.tool.result.content"
783
+ """
784
+ The result/output content from an MCP tool execution.
785
+ Example: "The weather is sunny"
786
+ """
787
+
788
+ MCP_TOOL_RESULT_CONTENT_COUNT = "mcp.tool.result.content_count"
789
+ """
790
+ The number of items/keys in the MCP tool result.
791
+ Example: 5
792
+ """
793
+
794
+ MCP_TOOL_RESULT_IS_ERROR = "mcp.tool.result.is_error"
795
+ """
796
+ Whether the MCP tool execution resulted in an error.
797
+ Example: True
798
+ """
799
+
800
+ MCP_PROMPT_RESULT_MESSAGE_CONTENT = "mcp.prompt.result.message_content"
801
+ """
802
+ The message content from an MCP prompt retrieval.
803
+ Example: "Review the following code..."
804
+ """
805
+
806
+ MCP_PROMPT_RESULT_MESSAGE_ROLE = "mcp.prompt.result.message_role"
807
+ """
808
+ The role of the message in an MCP prompt retrieval (only set for single-message prompts).
809
+ Example: "user", "assistant", "system"
810
+ """
811
+
812
+ MCP_PROMPT_RESULT_MESSAGE_COUNT = "mcp.prompt.result.message_count"
813
+ """
814
+ The number of messages in an MCP prompt result.
815
+ Example: 1, 3
816
+ """
817
+
818
+ MCP_RESOURCE_PROTOCOL = "mcp.resource.protocol"
819
+ """
820
+ The protocol/scheme of the MCP resource URI.
821
+ Example: "file", "http", "https"
822
+ """
823
+
824
+ MCP_TRANSPORT = "mcp.transport"
825
+ """
826
+ The transport method used for MCP communication.
827
+ Example: "pipe" (stdio), "tcp" (HTTP/WebSocket/SSE)
828
+ """
829
+
830
+ MCP_SESSION_ID = "mcp.session.id"
831
+ """
832
+ The session identifier for the MCP connection.
833
+ Example: "a1b2c3d4e5f6"
834
+ """
835
+
752
836
 
753
837
  class SPANSTATUS:
754
838
  """
@@ -845,6 +929,7 @@ class OP:
845
929
  WEBSOCKET_SERVER = "websocket.server"
846
930
  SOCKET_CONNECTION = "socket.connection"
847
931
  SOCKET_DNS = "socket.dns"
932
+ MCP_SERVER = "mcp.server"
848
933
 
849
934
 
850
935
  # This type exists to trick mypy and PyCharm into thinking `init` and `Client`
@@ -909,7 +994,7 @@ class ClientConstructor:
909
994
  error_sampler=None, # type: Optional[Callable[[Event, Hint], Union[float, bool]]]
910
995
  enable_db_query_source=True, # type: bool
911
996
  db_query_source_threshold_ms=100, # type: int
912
- enable_http_request_source=False, # type: bool
997
+ enable_http_request_source=True, # type: bool
913
998
  http_request_source_threshold_ms=100, # type: int
914
999
  spotlight=None, # type: Optional[Union[bool, str]]
915
1000
  cert_file=None, # type: Optional[str]
@@ -1348,4 +1433,4 @@ DEFAULT_OPTIONS = _get_default_options()
1348
1433
  del _get_default_options
1349
1434
 
1350
1435
 
1351
- VERSION = "2.42.0"
1436
+ VERSION = "2.43.0"
@@ -149,9 +149,11 @@ _MIN_VERSIONS = {
149
149
  "launchdarkly": (9, 8, 0),
150
150
  "litellm": (1, 77, 5),
151
151
  "loguru": (0, 7, 0),
152
+ "mcp": (1, 15, 0),
152
153
  "openai": (1, 0, 0),
153
154
  "openai_agents": (0, 0, 19),
154
155
  "openfeature": (0, 7, 1),
156
+ "pydantic_ai": (1, 0, 0),
155
157
  "quart": (0, 16, 0),
156
158
  "ray": (2, 7, 0),
157
159
  "requests": (2, 0, 0),
@@ -6,6 +6,7 @@ from sentry_sdk.ai.monitoring import record_token_usage
6
6
  from sentry_sdk.ai.utils import (
7
7
  set_data_normalized,
8
8
  normalize_message_roles,
9
+ truncate_and_annotate_messages,
9
10
  get_start_span_function,
10
11
  )
11
12
  from sentry_sdk.consts import OP, SPANDATA, SPANSTATUS
@@ -145,12 +146,14 @@ def _set_input_data(span, kwargs, integration):
145
146
  normalized_messages.append(message)
146
147
 
147
148
  role_normalized_messages = normalize_message_roles(normalized_messages)
148
- set_data_normalized(
149
- span,
150
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
151
- role_normalized_messages,
152
- unpack=False,
149
+ scope = sentry_sdk.get_current_scope()
150
+ messages_data = truncate_and_annotate_messages(
151
+ role_normalized_messages, span, scope
153
152
  )
153
+ if messages_data is not None:
154
+ set_data_normalized(
155
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False
156
+ )
154
157
 
155
158
  set_data_normalized(
156
159
  span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False)
@@ -138,6 +138,8 @@ def _wrap_handler(handler):
138
138
  timeout_thread = TimeoutThread(
139
139
  waiting_time,
140
140
  configured_time / MILLIS_TO_SECONDS,
141
+ isolation_scope=scope,
142
+ current_scope=sentry_sdk.get_current_scope(),
141
143
  )
142
144
 
143
145
  # Starting the thread to raise timeout warning exception
@@ -45,7 +45,8 @@ def _patch_cache_method(cache, method_name, address, port):
45
45
  ):
46
46
  # type: (CacheHandler, str, Callable[..., Any], tuple[Any, ...], dict[str, Any], Optional[str], Optional[int]) -> Any
47
47
  is_set_operation = method_name.startswith("set")
48
- is_get_operation = not is_set_operation
48
+ is_get_method = method_name == "get"
49
+ is_get_many_method = method_name == "get_many"
49
50
 
50
51
  op = OP.CACHE_PUT if is_set_operation else OP.CACHE_GET
51
52
  description = _get_span_description(method_name, args, kwargs)
@@ -69,8 +70,20 @@ def _patch_cache_method(cache, method_name, address, port):
69
70
  span.set_data(SPANDATA.CACHE_KEY, key)
70
71
 
71
72
  item_size = None
72
- if is_get_operation:
73
- if value:
73
+ if is_get_many_method:
74
+ if value != {}:
75
+ item_size = len(str(value))
76
+ span.set_data(SPANDATA.CACHE_HIT, True)
77
+ else:
78
+ span.set_data(SPANDATA.CACHE_HIT, False)
79
+ elif is_get_method:
80
+ default_value = None
81
+ if len(args) >= 2:
82
+ default_value = args[1]
83
+ elif "default" in kwargs:
84
+ default_value = kwargs["default"]
85
+
86
+ if value != default_value:
74
87
  item_size = len(str(value))
75
88
  span.set_data(SPANDATA.CACHE_HIT, True)
76
89
  else:
@@ -75,7 +75,12 @@ def _wrap_func(func):
75
75
  ):
76
76
  waiting_time = configured_time - TIMEOUT_WARNING_BUFFER
77
77
 
78
- timeout_thread = TimeoutThread(waiting_time, configured_time)
78
+ timeout_thread = TimeoutThread(
79
+ waiting_time,
80
+ configured_time,
81
+ isolation_scope=scope,
82
+ current_scope=sentry_sdk.get_current_scope(),
83
+ )
79
84
 
80
85
  # Starting the thread to raise timeout warning exception
81
86
  timeout_thread.start()
@@ -92,6 +92,7 @@ def _wrap_generate_content_stream(f):
92
92
  chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
93
93
  set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
94
94
  chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
95
+ chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
95
96
 
96
97
  try:
97
98
  stream = f(self, *args, **kwargs)
@@ -165,6 +166,7 @@ def _wrap_async_generate_content_stream(f):
165
166
  chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
166
167
  set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
167
168
  chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
169
+ chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
168
170
 
169
171
  try:
170
172
  stream = await f(self, *args, **kwargs)
@@ -233,6 +235,7 @@ def _wrap_generate_content(f):
233
235
  chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
234
236
  chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
235
237
  chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
238
+ chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
236
239
  set_span_data_for_request(
237
240
  chat_span, integration, model_name, contents, kwargs
238
241
  )
@@ -15,7 +15,11 @@ from typing import (
15
15
  )
16
16
 
17
17
  import sentry_sdk
18
- from sentry_sdk.ai.utils import set_data_normalized
18
+ from sentry_sdk.ai.utils import (
19
+ set_data_normalized,
20
+ truncate_and_annotate_messages,
21
+ normalize_message_roles,
22
+ )
19
23
  from sentry_sdk.consts import OP, SPANDATA
20
24
  from sentry_sdk.scope import should_send_default_pii
21
25
  from sentry_sdk.utils import (
@@ -462,12 +466,18 @@ def set_span_data_for_request(span, integration, model, contents, kwargs):
462
466
  messages.append({"role": "user", "content": contents_text})
463
467
 
464
468
  if messages:
465
- set_data_normalized(
466
- span,
467
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
468
- messages,
469
- unpack=False,
469
+ normalized_messages = normalize_message_roles(messages)
470
+ scope = sentry_sdk.get_current_scope()
471
+ messages_data = truncate_and_annotate_messages(
472
+ normalized_messages, span, scope
470
473
  )
474
+ if messages_data is not None:
475
+ set_data_normalized(
476
+ span,
477
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
478
+ messages_data,
479
+ unpack=False,
480
+ )
471
481
 
472
482
  # Extract parameters directly from config (not nested under generation_config)
473
483
  for param, span_key in [
@@ -9,6 +9,7 @@ from sentry_sdk.ai.utils import (
9
9
  normalize_message_roles,
10
10
  set_data_normalized,
11
11
  get_start_span_function,
12
+ truncate_and_annotate_messages,
12
13
  )
13
14
  from sentry_sdk.consts import OP, SPANDATA
14
15
  from sentry_sdk.integrations import DidNotEnable, Integration
@@ -49,9 +50,15 @@ except ImportError:
49
50
 
50
51
 
51
52
  try:
52
- from langchain.agents import AgentExecutor
53
+ # >=v1
54
+ from langchain_classic.agents import AgentExecutor # type: ignore[import-not-found]
53
55
  except ImportError:
54
- AgentExecutor = None
56
+ try:
57
+ # <v1
58
+ from langchain.agents import AgentExecutor
59
+ except ImportError:
60
+ AgentExecutor = None
61
+
55
62
 
56
63
  DATA_FIELDS = {
57
64
  "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
@@ -221,12 +228,17 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
221
228
  }
222
229
  for prompt in prompts
223
230
  ]
224
- set_data_normalized(
225
- span,
226
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
227
- normalized_messages,
228
- unpack=False,
231
+ scope = sentry_sdk.get_current_scope()
232
+ messages_data = truncate_and_annotate_messages(
233
+ normalized_messages, span, scope
229
234
  )
235
+ if messages_data is not None:
236
+ set_data_normalized(
237
+ span,
238
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
239
+ messages_data,
240
+ unpack=False,
241
+ )
230
242
 
231
243
  def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
232
244
  # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
@@ -278,13 +290,17 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
278
290
  self._normalize_langchain_message(message)
279
291
  )
280
292
  normalized_messages = normalize_message_roles(normalized_messages)
281
-
282
- set_data_normalized(
283
- span,
284
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
285
- normalized_messages,
286
- unpack=False,
293
+ scope = sentry_sdk.get_current_scope()
294
+ messages_data = truncate_and_annotate_messages(
295
+ normalized_messages, span, scope
287
296
  )
297
+ if messages_data is not None:
298
+ set_data_normalized(
299
+ span,
300
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
301
+ messages_data,
302
+ unpack=False,
303
+ )
288
304
 
289
305
  def on_chat_model_end(self, response, *, run_id, **kwargs):
290
306
  # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
@@ -758,12 +774,17 @@ def _wrap_agent_executor_invoke(f):
758
774
  and integration.include_prompts
759
775
  ):
760
776
  normalized_messages = normalize_message_roles([input])
761
- set_data_normalized(
762
- span,
763
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
764
- normalized_messages,
765
- unpack=False,
777
+ scope = sentry_sdk.get_current_scope()
778
+ messages_data = truncate_and_annotate_messages(
779
+ normalized_messages, span, scope
766
780
  )
781
+ if messages_data is not None:
782
+ set_data_normalized(
783
+ span,
784
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
785
+ messages_data,
786
+ unpack=False,
787
+ )
767
788
 
768
789
  output = result.get("output")
769
790
  if (
@@ -813,12 +834,17 @@ def _wrap_agent_executor_stream(f):
813
834
  and integration.include_prompts
814
835
  ):
815
836
  normalized_messages = normalize_message_roles([input])
816
- set_data_normalized(
817
- span,
818
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
819
- normalized_messages,
820
- unpack=False,
837
+ scope = sentry_sdk.get_current_scope()
838
+ messages_data = truncate_and_annotate_messages(
839
+ normalized_messages, span, scope
821
840
  )
841
+ if messages_data is not None:
842
+ set_data_normalized(
843
+ span,
844
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
845
+ messages_data,
846
+ unpack=False,
847
+ )
822
848
 
823
849
  # Run the agent
824
850
  result = f(self, *args, **kwargs)
@@ -2,7 +2,11 @@ from functools import wraps
2
2
  from typing import Any, Callable, List, Optional
3
3
 
4
4
  import sentry_sdk
5
- from sentry_sdk.ai.utils import set_data_normalized, normalize_message_roles
5
+ from sentry_sdk.ai.utils import (
6
+ set_data_normalized,
7
+ normalize_message_roles,
8
+ truncate_and_annotate_messages,
9
+ )
6
10
  from sentry_sdk.consts import OP, SPANDATA
7
11
  from sentry_sdk.integrations import DidNotEnable, Integration
8
12
  from sentry_sdk.scope import should_send_default_pii
@@ -181,12 +185,17 @@ def _wrap_pregel_invoke(f):
181
185
  input_messages = _parse_langgraph_messages(args[0])
182
186
  if input_messages:
183
187
  normalized_input_messages = normalize_message_roles(input_messages)
184
- set_data_normalized(
185
- span,
186
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
187
- normalized_input_messages,
188
- unpack=False,
188
+ scope = sentry_sdk.get_current_scope()
189
+ messages_data = truncate_and_annotate_messages(
190
+ normalized_input_messages, span, scope
189
191
  )
192
+ if messages_data is not None:
193
+ set_data_normalized(
194
+ span,
195
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
196
+ messages_data,
197
+ unpack=False,
198
+ )
190
199
 
191
200
  result = f(self, *args, **kwargs)
192
201
 
@@ -232,12 +241,17 @@ def _wrap_pregel_ainvoke(f):
232
241
  input_messages = _parse_langgraph_messages(args[0])
233
242
  if input_messages:
234
243
  normalized_input_messages = normalize_message_roles(input_messages)
235
- set_data_normalized(
236
- span,
237
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
238
- normalized_input_messages,
239
- unpack=False,
244
+ scope = sentry_sdk.get_current_scope()
245
+ messages_data = truncate_and_annotate_messages(
246
+ normalized_input_messages, span, scope
240
247
  )
248
+ if messages_data is not None:
249
+ set_data_normalized(
250
+ span,
251
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
252
+ messages_data,
253
+ unpack=False,
254
+ )
241
255
 
242
256
  result = await f(self, *args, **kwargs)
243
257
 
@@ -3,7 +3,11 @@ from typing import TYPE_CHECKING
3
3
  import sentry_sdk
4
4
  from sentry_sdk import consts
5
5
  from sentry_sdk.ai.monitoring import record_token_usage
6
- from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
6
+ from sentry_sdk.ai.utils import (
7
+ get_start_span_function,
8
+ set_data_normalized,
9
+ truncate_and_annotate_messages,
10
+ )
7
11
  from sentry_sdk.consts import SPANDATA
8
12
  from sentry_sdk.integrations import DidNotEnable, Integration
9
13
  from sentry_sdk.scope import should_send_default_pii
@@ -48,8 +52,11 @@ def _input_callback(kwargs):
48
52
  model = full_model
49
53
  provider = "unknown"
50
54
 
51
- messages = kwargs.get("messages", [])
52
- operation = "chat" if messages else "embeddings"
55
+ call_type = kwargs.get("call_type", None)
56
+ if call_type == "embedding":
57
+ operation = "embeddings"
58
+ else:
59
+ operation = "chat"
53
60
 
54
61
  # Start a new span/transaction
55
62
  span = get_start_span_function()(
@@ -71,10 +78,14 @@ def _input_callback(kwargs):
71
78
  set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
72
79
 
73
80
  # Record messages if allowed
81
+ messages = kwargs.get("messages", [])
74
82
  if messages and should_send_default_pii() and integration.include_prompts:
75
- set_data_normalized(
76
- span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
77
- )
83
+ scope = sentry_sdk.get_current_scope()
84
+ messages_data = truncate_and_annotate_messages(messages, span, scope)
85
+ if messages_data is not None:
86
+ set_data_normalized(
87
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False
88
+ )
78
89
 
79
90
  # Record other parameters
80
91
  params = {