sentry-sdk 2.36.0__py2.py3-none-any.whl → 2.37.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

sentry_sdk/ai/utils.py CHANGED
@@ -1,30 +1,33 @@
1
+ import json
2
+
1
3
  from typing import TYPE_CHECKING
2
4
 
3
5
  if TYPE_CHECKING:
4
6
  from typing import Any
7
+ from sentry_sdk.tracing import Span
5
8
 
6
- from sentry_sdk.tracing import Span
7
9
  from sentry_sdk.utils import logger
8
10
 
9
11
 
10
12
  def _normalize_data(data, unpack=True):
11
13
  # type: (Any, bool) -> Any
12
-
13
14
  # convert pydantic data (e.g. OpenAI v1+) to json compatible format
14
15
  if hasattr(data, "model_dump"):
15
16
  try:
16
- return data.model_dump()
17
+ return _normalize_data(data.model_dump(), unpack=unpack)
17
18
  except Exception as e:
18
19
  logger.warning("Could not convert pydantic data to JSON: %s", e)
19
- return data
20
+ return data if isinstance(data, (int, float, bool, str)) else str(data)
21
+
20
22
  if isinstance(data, list):
21
23
  if unpack and len(data) == 1:
22
24
  return _normalize_data(data[0], unpack=unpack) # remove empty dimensions
23
25
  return list(_normalize_data(x, unpack=unpack) for x in data)
26
+
24
27
  if isinstance(data, dict):
25
28
  return {k: _normalize_data(v, unpack=unpack) for (k, v) in data.items()}
26
29
 
27
- return data
30
+ return data if isinstance(data, (int, float, bool, str)) else str(data)
28
31
 
29
32
 
30
33
  def set_data_normalized(span, key, value, unpack=True):
@@ -33,4 +36,4 @@ def set_data_normalized(span, key, value, unpack=True):
33
36
  if isinstance(normalized, (int, float, bool, str)):
34
37
  span.set_data(key, normalized)
35
38
  else:
36
- span.set_data(key, str(normalized))
39
+ span.set_data(key, json.dumps(normalized))
sentry_sdk/consts.py CHANGED
@@ -792,6 +792,7 @@ class OP:
792
792
  FUNCTION_AWS = "function.aws"
793
793
  FUNCTION_GCP = "function.gcp"
794
794
  GEN_AI_CHAT = "gen_ai.chat"
795
+ GEN_AI_CREATE_AGENT = "gen_ai.create_agent"
795
796
  GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
796
797
  GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
797
798
  GEN_AI_HANDOFF = "gen_ai.handoff"
@@ -1329,4 +1330,4 @@ DEFAULT_OPTIONS = _get_default_options()
1329
1330
  del _get_default_options
1330
1331
 
1331
1332
 
1332
- VERSION = "2.36.0"
1333
+ VERSION = "2.37.1"
@@ -95,6 +95,7 @@ _AUTO_ENABLING_INTEGRATIONS = [
95
95
  "sentry_sdk.integrations.huey.HueyIntegration",
96
96
  "sentry_sdk.integrations.huggingface_hub.HuggingfaceHubIntegration",
97
97
  "sentry_sdk.integrations.langchain.LangchainIntegration",
98
+ "sentry_sdk.integrations.langgraph.LanggraphIntegration",
98
99
  "sentry_sdk.integrations.litestar.LitestarIntegration",
99
100
  "sentry_sdk.integrations.loguru.LoguruIntegration",
100
101
  "sentry_sdk.integrations.openai.OpenAIIntegration",
@@ -142,6 +143,7 @@ _MIN_VERSIONS = {
142
143
  "grpc": (1, 32, 0), # grpcio
143
144
  "huggingface_hub": (0, 22),
144
145
  "langchain": (0, 1, 0),
146
+ "langgraph": (0, 6, 6),
145
147
  "launchdarkly": (9, 8, 0),
146
148
  "loguru": (0, 7, 0),
147
149
  "openai": (1, 0, 0),
@@ -29,7 +29,6 @@ if TYPE_CHECKING:
29
29
 
30
30
 
31
31
  try:
32
- from langchain.agents import AgentExecutor
33
32
  from langchain_core.agents import AgentFinish
34
33
  from langchain_core.callbacks import (
35
34
  BaseCallbackHandler,
@@ -44,6 +43,11 @@ except ImportError:
44
43
  raise DidNotEnable("langchain not installed")
45
44
 
46
45
 
46
+ try:
47
+ from langchain.agents import AgentExecutor
48
+ except ImportError:
49
+ AgentExecutor = None
50
+
47
51
  DATA_FIELDS = {
48
52
  "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
49
53
  "function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
@@ -51,7 +55,6 @@ DATA_FIELDS = {
51
55
  "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
52
56
  "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
53
57
  "tool_calls": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
54
- "tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
55
58
  "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
56
59
  "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
57
60
  }
@@ -203,8 +206,12 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
203
206
  if key in all_params and all_params[key] is not None:
204
207
  set_data_normalized(span, attribute, all_params[key], unpack=False)
205
208
 
209
+ _set_tools_on_span(span, all_params.get("tools"))
210
+
206
211
  if should_send_default_pii() and self.include_prompts:
207
- set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts)
212
+ set_data_normalized(
213
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts, unpack=False
214
+ )
208
215
 
209
216
  def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
210
217
  # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
@@ -246,14 +253,20 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
246
253
  if key in all_params and all_params[key] is not None:
247
254
  set_data_normalized(span, attribute, all_params[key], unpack=False)
248
255
 
256
+ _set_tools_on_span(span, all_params.get("tools"))
257
+
249
258
  if should_send_default_pii() and self.include_prompts:
259
+ normalized_messages = []
260
+ for list_ in messages:
261
+ for message in list_:
262
+ normalized_messages.append(
263
+ self._normalize_langchain_message(message)
264
+ )
250
265
  set_data_normalized(
251
266
  span,
252
267
  SPANDATA.GEN_AI_REQUEST_MESSAGES,
253
- [
254
- [self._normalize_langchain_message(x) for x in list_]
255
- for list_ in messages
256
- ],
268
+ normalized_messages,
269
+ unpack=False,
257
270
  )
258
271
 
259
272
  def on_chat_model_end(self, response, *, run_id, **kwargs):
@@ -351,9 +364,7 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
351
364
 
352
365
  if should_send_default_pii() and self.include_prompts:
353
366
  set_data_normalized(
354
- span,
355
- SPANDATA.GEN_AI_RESPONSE_TEXT,
356
- finish.return_values.items(),
367
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, finish.return_values.items()
357
368
  )
358
369
 
359
370
  self._exit_span(span_data, run_id)
@@ -473,13 +484,11 @@ def _get_token_usage(obj):
473
484
  if usage is not None:
474
485
  return usage
475
486
 
476
- # check for usage in the object itself
477
487
  for name in possible_names:
478
488
  usage = _get_value(obj, name)
479
489
  if usage is not None:
480
490
  return usage
481
491
 
482
- # no usage found anywhere
483
492
  return None
484
493
 
485
494
 
@@ -531,6 +540,87 @@ def _get_request_data(obj, args, kwargs):
531
540
  return (agent_name, tools)
532
541
 
533
542
 
543
+ def _simplify_langchain_tools(tools):
544
+ # type: (Any) -> Optional[List[Any]]
545
+ """Parse and simplify tools into a cleaner format."""
546
+ if not tools:
547
+ return None
548
+
549
+ if not isinstance(tools, (list, tuple)):
550
+ return None
551
+
552
+ simplified_tools = []
553
+ for tool in tools:
554
+ try:
555
+ if isinstance(tool, dict):
556
+
557
+ if "function" in tool and isinstance(tool["function"], dict):
558
+ func = tool["function"]
559
+ simplified_tool = {
560
+ "name": func.get("name"),
561
+ "description": func.get("description"),
562
+ }
563
+ if simplified_tool["name"]:
564
+ simplified_tools.append(simplified_tool)
565
+ elif "name" in tool:
566
+ simplified_tool = {
567
+ "name": tool.get("name"),
568
+ "description": tool.get("description"),
569
+ }
570
+ simplified_tools.append(simplified_tool)
571
+ else:
572
+ name = (
573
+ tool.get("name")
574
+ or tool.get("tool_name")
575
+ or tool.get("function_name")
576
+ )
577
+ if name:
578
+ simplified_tools.append(
579
+ {
580
+ "name": name,
581
+ "description": tool.get("description")
582
+ or tool.get("desc"),
583
+ }
584
+ )
585
+ elif hasattr(tool, "name"):
586
+ simplified_tool = {
587
+ "name": getattr(tool, "name", None),
588
+ "description": getattr(tool, "description", None)
589
+ or getattr(tool, "desc", None),
590
+ }
591
+ if simplified_tool["name"]:
592
+ simplified_tools.append(simplified_tool)
593
+ elif hasattr(tool, "__name__"):
594
+ simplified_tools.append(
595
+ {
596
+ "name": tool.__name__,
597
+ "description": getattr(tool, "__doc__", None),
598
+ }
599
+ )
600
+ else:
601
+ tool_str = str(tool)
602
+ if tool_str and tool_str != "":
603
+ simplified_tools.append({"name": tool_str, "description": None})
604
+ except Exception:
605
+ continue
606
+
607
+ return simplified_tools if simplified_tools else None
608
+
609
+
610
+ def _set_tools_on_span(span, tools):
611
+ # type: (Span, Any) -> None
612
+ """Set available tools data on a span if tools are provided."""
613
+ if tools is not None:
614
+ simplified_tools = _simplify_langchain_tools(tools)
615
+ if simplified_tools:
616
+ set_data_normalized(
617
+ span,
618
+ SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
619
+ simplified_tools,
620
+ unpack=False,
621
+ )
622
+
623
+
534
624
  def _wrap_configure(f):
535
625
  # type: (Callable[..., Any]) -> Callable[..., Any]
536
626
 
@@ -601,7 +691,7 @@ def _wrap_configure(f):
601
691
  ]
602
692
  elif isinstance(local_callbacks, BaseCallbackHandler):
603
693
  local_callbacks = [local_callbacks, sentry_handler]
604
- else: # local_callbacks is a list
694
+ else:
605
695
  local_callbacks = [*local_callbacks, sentry_handler]
606
696
 
607
697
  return f(
@@ -638,10 +728,7 @@ def _wrap_agent_executor_invoke(f):
638
728
  span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
639
729
  span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
640
730
 
641
- if tools:
642
- set_data_normalized(
643
- span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False
644
- )
731
+ _set_tools_on_span(span, tools)
645
732
 
646
733
  # Run the agent
647
734
  result = f(self, *args, **kwargs)
@@ -653,11 +740,7 @@ def _wrap_agent_executor_invoke(f):
653
740
  and integration.include_prompts
654
741
  ):
655
742
  set_data_normalized(
656
- span,
657
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
658
- [
659
- input,
660
- ],
743
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False
661
744
  )
662
745
 
663
746
  output = result.get("output")
@@ -666,7 +749,7 @@ def _wrap_agent_executor_invoke(f):
666
749
  and should_send_default_pii()
667
750
  and integration.include_prompts
668
751
  ):
669
- span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output)
752
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
670
753
 
671
754
  return result
672
755
 
@@ -698,10 +781,7 @@ def _wrap_agent_executor_stream(f):
698
781
  span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
699
782
  span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
700
783
 
701
- if tools:
702
- set_data_normalized(
703
- span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False
704
- )
784
+ _set_tools_on_span(span, tools)
705
785
 
706
786
  input = args[0].get("input") if len(args) >= 1 else None
707
787
  if (
@@ -710,11 +790,7 @@ def _wrap_agent_executor_stream(f):
710
790
  and integration.include_prompts
711
791
  ):
712
792
  set_data_normalized(
713
- span,
714
- SPANDATA.GEN_AI_REQUEST_MESSAGES,
715
- [
716
- input,
717
- ],
793
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False
718
794
  )
719
795
 
720
796
  # Run the agent
@@ -737,7 +813,7 @@ def _wrap_agent_executor_stream(f):
737
813
  and should_send_default_pii()
738
814
  and integration.include_prompts
739
815
  ):
740
- span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output)
816
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
741
817
 
742
818
  span.__exit__(None, None, None)
743
819
 
@@ -756,7 +832,7 @@ def _wrap_agent_executor_stream(f):
756
832
  and should_send_default_pii()
757
833
  and integration.include_prompts
758
834
  ):
759
- span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output)
835
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
760
836
 
761
837
  span.__exit__(None, None, None)
762
838
 
@@ -0,0 +1,321 @@
1
+ from functools import wraps
2
+ from typing import Any, Callable, List, Optional
3
+
4
+ import sentry_sdk
5
+ from sentry_sdk.ai.utils import set_data_normalized
6
+ from sentry_sdk.consts import OP, SPANDATA
7
+ from sentry_sdk.integrations import DidNotEnable, Integration
8
+ from sentry_sdk.scope import should_send_default_pii
9
+ from sentry_sdk.utils import safe_serialize
10
+
11
+
12
+ try:
13
+ from langgraph.graph import StateGraph
14
+ from langgraph.pregel import Pregel
15
+ except ImportError:
16
+ raise DidNotEnable("langgraph not installed")
17
+
18
+
19
+ class LanggraphIntegration(Integration):
20
+ identifier = "langgraph"
21
+ origin = f"auto.ai.{identifier}"
22
+
23
+ def __init__(self, include_prompts=True):
24
+ # type: (LanggraphIntegration, bool) -> None
25
+ self.include_prompts = include_prompts
26
+
27
+ @staticmethod
28
+ def setup_once():
29
+ # type: () -> None
30
+ # LangGraph lets users create agents using a StateGraph or the Functional API.
31
+ # StateGraphs are then compiled to a CompiledStateGraph. Both CompiledStateGraph and
32
+ # the functional API execute on a Pregel instance. Pregel is the runtime for the graph
33
+ # and the invocation happens on Pregel, so patching the invoke methods takes care of both.
34
+ # The streaming methods are not patched, because due to some internal reasons, LangGraph
35
+ # will automatically patch the streaming methods to run through invoke, and by doing this
36
+ # we prevent duplicate spans for invocations.
37
+ StateGraph.compile = _wrap_state_graph_compile(StateGraph.compile)
38
+ if hasattr(Pregel, "invoke"):
39
+ Pregel.invoke = _wrap_pregel_invoke(Pregel.invoke)
40
+ if hasattr(Pregel, "ainvoke"):
41
+ Pregel.ainvoke = _wrap_pregel_ainvoke(Pregel.ainvoke)
42
+
43
+
44
+ def _get_graph_name(graph_obj):
45
+ # type: (Any) -> Optional[str]
46
+ for attr in ["name", "graph_name", "__name__", "_name"]:
47
+ if hasattr(graph_obj, attr):
48
+ name = getattr(graph_obj, attr)
49
+ if name and isinstance(name, str):
50
+ return name
51
+ return None
52
+
53
+
54
+ def _normalize_langgraph_message(message):
55
+ # type: (Any) -> Any
56
+ if not hasattr(message, "content"):
57
+ return None
58
+
59
+ parsed = {"role": getattr(message, "type", None), "content": message.content}
60
+
61
+ for attr in ["name", "tool_calls", "function_call", "tool_call_id"]:
62
+ if hasattr(message, attr):
63
+ value = getattr(message, attr)
64
+ if value is not None:
65
+ parsed[attr] = value
66
+
67
+ return parsed
68
+
69
+
70
+ def _parse_langgraph_messages(state):
71
+ # type: (Any) -> Optional[List[Any]]
72
+ if not state:
73
+ return None
74
+
75
+ messages = None
76
+
77
+ if isinstance(state, dict):
78
+ messages = state.get("messages")
79
+ elif hasattr(state, "messages"):
80
+ messages = state.messages
81
+ elif hasattr(state, "get") and callable(state.get):
82
+ try:
83
+ messages = state.get("messages")
84
+ except Exception:
85
+ pass
86
+
87
+ if not messages or not isinstance(messages, (list, tuple)):
88
+ return None
89
+
90
+ normalized_messages = []
91
+ for message in messages:
92
+ try:
93
+ normalized = _normalize_langgraph_message(message)
94
+ if normalized:
95
+ normalized_messages.append(normalized)
96
+ except Exception:
97
+ continue
98
+
99
+ return normalized_messages if normalized_messages else None
100
+
101
+
102
+ def _wrap_state_graph_compile(f):
103
+ # type: (Callable[..., Any]) -> Callable[..., Any]
104
+ @wraps(f)
105
+ def new_compile(self, *args, **kwargs):
106
+ # type: (Any, Any, Any) -> Any
107
+ integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
108
+ if integration is None:
109
+ return f(self, *args, **kwargs)
110
+ with sentry_sdk.start_span(
111
+ op=OP.GEN_AI_CREATE_AGENT,
112
+ origin=LanggraphIntegration.origin,
113
+ ) as span:
114
+ compiled_graph = f(self, *args, **kwargs)
115
+
116
+ compiled_graph_name = getattr(compiled_graph, "name", None)
117
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "create_agent")
118
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, compiled_graph_name)
119
+
120
+ if compiled_graph_name:
121
+ span.description = f"create_agent {compiled_graph_name}"
122
+ else:
123
+ span.description = "create_agent"
124
+
125
+ if kwargs.get("model", None) is not None:
126
+ span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, kwargs.get("model"))
127
+
128
+ tools = None
129
+ get_graph = getattr(compiled_graph, "get_graph", None)
130
+ if get_graph and callable(get_graph):
131
+ graph_obj = compiled_graph.get_graph()
132
+ nodes = getattr(graph_obj, "nodes", None)
133
+ if nodes and isinstance(nodes, dict):
134
+ tools_node = nodes.get("tools")
135
+ if tools_node:
136
+ data = getattr(tools_node, "data", None)
137
+ if data and hasattr(data, "tools_by_name"):
138
+ tools = list(data.tools_by_name.keys())
139
+
140
+ if tools is not None:
141
+ span.set_data(SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools)
142
+
143
+ return compiled_graph
144
+
145
+ return new_compile
146
+
147
+
148
+ def _wrap_pregel_invoke(f):
149
+ # type: (Callable[..., Any]) -> Callable[..., Any]
150
+
151
+ @wraps(f)
152
+ def new_invoke(self, *args, **kwargs):
153
+ # type: (Any, Any, Any) -> Any
154
+ integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
155
+ if integration is None:
156
+ return f(self, *args, **kwargs)
157
+
158
+ graph_name = _get_graph_name(self)
159
+ span_name = (
160
+ f"invoke_agent {graph_name}".strip() if graph_name else "invoke_agent"
161
+ )
162
+
163
+ with sentry_sdk.start_span(
164
+ op=OP.GEN_AI_INVOKE_AGENT,
165
+ name=span_name,
166
+ origin=LanggraphIntegration.origin,
167
+ ) as span:
168
+ if graph_name:
169
+ span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
170
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)
171
+
172
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
173
+
174
+ # Store input messages to later compare with output
175
+ input_messages = None
176
+ if (
177
+ len(args) > 0
178
+ and should_send_default_pii()
179
+ and integration.include_prompts
180
+ ):
181
+ input_messages = _parse_langgraph_messages(args[0])
182
+ if input_messages:
183
+ set_data_normalized(
184
+ span,
185
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
186
+ input_messages,
187
+ unpack=False,
188
+ )
189
+
190
+ result = f(self, *args, **kwargs)
191
+
192
+ _set_response_attributes(span, input_messages, result, integration)
193
+
194
+ return result
195
+
196
+ return new_invoke
197
+
198
+
199
+ def _wrap_pregel_ainvoke(f):
200
+ # type: (Callable[..., Any]) -> Callable[..., Any]
201
+
202
+ @wraps(f)
203
+ async def new_ainvoke(self, *args, **kwargs):
204
+ # type: (Any, Any, Any) -> Any
205
+ integration = sentry_sdk.get_client().get_integration(LanggraphIntegration)
206
+ if integration is None:
207
+ return await f(self, *args, **kwargs)
208
+
209
+ graph_name = _get_graph_name(self)
210
+ span_name = (
211
+ f"invoke_agent {graph_name}".strip() if graph_name else "invoke_agent"
212
+ )
213
+
214
+ with sentry_sdk.start_span(
215
+ op=OP.GEN_AI_INVOKE_AGENT,
216
+ name=span_name,
217
+ origin=LanggraphIntegration.origin,
218
+ ) as span:
219
+ if graph_name:
220
+ span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
221
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)
222
+
223
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
224
+
225
+ input_messages = None
226
+ if (
227
+ len(args) > 0
228
+ and should_send_default_pii()
229
+ and integration.include_prompts
230
+ ):
231
+ input_messages = _parse_langgraph_messages(args[0])
232
+ if input_messages:
233
+ set_data_normalized(
234
+ span,
235
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
236
+ input_messages,
237
+ unpack=False,
238
+ )
239
+
240
+ result = await f(self, *args, **kwargs)
241
+
242
+ _set_response_attributes(span, input_messages, result, integration)
243
+
244
+ return result
245
+
246
+ return new_ainvoke
247
+
248
+
249
+ def _get_new_messages(input_messages, output_messages):
250
+ # type: (Optional[List[Any]], Optional[List[Any]]) -> Optional[List[Any]]
251
+ """Extract only the new messages added during this invocation."""
252
+ if not output_messages:
253
+ return None
254
+
255
+ if not input_messages:
256
+ return output_messages
257
+
258
+ # only return the new messages, aka the output messages that are not in the input messages
259
+ input_count = len(input_messages)
260
+ new_messages = (
261
+ output_messages[input_count:] if len(output_messages) > input_count else []
262
+ )
263
+
264
+ return new_messages if new_messages else None
265
+
266
+
267
+ def _extract_llm_response_text(messages):
268
+ # type: (Optional[List[Any]]) -> Optional[str]
269
+ if not messages:
270
+ return None
271
+
272
+ for message in reversed(messages):
273
+ if isinstance(message, dict):
274
+ role = message.get("role")
275
+ if role in ["assistant", "ai"]:
276
+ content = message.get("content")
277
+ if content and isinstance(content, str):
278
+ return content
279
+
280
+ return None
281
+
282
+
283
+ def _extract_tool_calls(messages):
284
+ # type: (Optional[List[Any]]) -> Optional[List[Any]]
285
+ if not messages:
286
+ return None
287
+
288
+ tool_calls = []
289
+ for message in messages:
290
+ if isinstance(message, dict):
291
+ msg_tool_calls = message.get("tool_calls")
292
+ if msg_tool_calls and isinstance(msg_tool_calls, list):
293
+ tool_calls.extend(msg_tool_calls)
294
+
295
+ return tool_calls if tool_calls else None
296
+
297
+
298
+ def _set_response_attributes(span, input_messages, result, integration):
299
+ # type: (Any, Optional[List[Any]], Any, LanggraphIntegration) -> None
300
+ if not (should_send_default_pii() and integration.include_prompts):
301
+ return
302
+
303
+ parsed_response_messages = _parse_langgraph_messages(result)
304
+ new_messages = _get_new_messages(input_messages, parsed_response_messages)
305
+
306
+ llm_response_text = _extract_llm_response_text(new_messages)
307
+ if llm_response_text:
308
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, llm_response_text)
309
+ elif new_messages:
310
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, new_messages)
311
+ else:
312
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, result)
313
+
314
+ tool_calls = _extract_tool_calls(new_messages)
315
+ if tool_calls:
316
+ set_data_normalized(
317
+ span,
318
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
319
+ safe_serialize(tool_calls),
320
+ unpack=False,
321
+ )
@@ -179,7 +179,9 @@ def _set_input_data(span, kwargs, operation, integration):
179
179
  and should_send_default_pii()
180
180
  and integration.include_prompts
181
181
  ):
182
- set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages)
182
+ set_data_normalized(
183
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
184
+ )
183
185
 
184
186
  # Input attributes: Common
185
187
  set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
@@ -227,25 +229,46 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
227
229
  if should_send_default_pii() and integration.include_prompts:
228
230
  response_text = [choice.message.dict() for choice in response.choices]
229
231
  if len(response_text) > 0:
230
- set_data_normalized(
231
- span,
232
- SPANDATA.GEN_AI_RESPONSE_TEXT,
233
- safe_serialize(response_text),
234
- )
232
+ set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text)
233
+
235
234
  _calculate_token_usage(messages, response, span, None, integration.count_tokens)
235
+
236
236
  if finish_span:
237
237
  span.__exit__(None, None, None)
238
238
 
239
239
  elif hasattr(response, "output"):
240
240
  if should_send_default_pii() and integration.include_prompts:
241
- response_text = [item.to_dict() for item in response.output]
242
- if len(response_text) > 0:
241
+ output_messages = {
242
+ "response": [],
243
+ "tool": [],
244
+ } # type: (dict[str, list[Any]])
245
+
246
+ for output in response.output:
247
+ if output.type == "function_call":
248
+ output_messages["tool"].append(output.dict())
249
+ elif output.type == "message":
250
+ for output_message in output.content:
251
+ try:
252
+ output_messages["response"].append(output_message.text)
253
+ except AttributeError:
254
+ # Unknown output message type, just return the json
255
+ output_messages["response"].append(output_message.dict())
256
+
257
+ if len(output_messages["tool"]) > 0:
243
258
  set_data_normalized(
244
259
  span,
245
- SPANDATA.GEN_AI_RESPONSE_TEXT,
246
- safe_serialize(response_text),
260
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
261
+ output_messages["tool"],
262
+ unpack=False,
263
+ )
264
+
265
+ if len(output_messages["response"]) > 0:
266
+ set_data_normalized(
267
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
247
268
  )
269
+
248
270
  _calculate_token_usage(messages, response, span, None, integration.count_tokens)
271
+
249
272
  if finish_span:
250
273
  span.__exit__(None, None, None)
251
274
 
@@ -1,4 +1,5 @@
1
1
  import sentry_sdk
2
+ from sentry_sdk.ai.utils import set_data_normalized
2
3
  from sentry_sdk.consts import SPANDATA
3
4
  from sentry_sdk.integrations import DidNotEnable
4
5
  from sentry_sdk.scope import should_send_default_pii
@@ -127,7 +128,9 @@ def _set_input_data(span, get_response_kwargs):
127
128
  if len(messages) > 0:
128
129
  request_messages.append({"role": role, "content": messages})
129
130
 
130
- span.set_data(SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(request_messages))
131
+ set_data_normalized(
132
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, request_messages, unpack=False
133
+ )
131
134
 
132
135
 
133
136
  def _set_output_data(span, result):
@@ -157,6 +160,6 @@ def _set_output_data(span, result):
157
160
  )
158
161
 
159
162
  if len(output_messages["response"]) > 0:
160
- span.set_data(
161
- SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(output_messages["response"])
163
+ set_data_normalized(
164
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
162
165
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sentry-sdk
3
- Version: 2.36.0
3
+ Version: 2.37.1
4
4
  Summary: Python client for Sentry (https://sentry.io)
5
5
  Home-page: https://github.com/getsentry/sentry-python
6
6
  Author: Sentry Team and Contributors
@@ -72,6 +72,8 @@ Provides-Extra: huggingface-hub
72
72
  Requires-Dist: huggingface_hub>=0.22; extra == "huggingface-hub"
73
73
  Provides-Extra: langchain
74
74
  Requires-Dist: langchain>=0.0.210; extra == "langchain"
75
+ Provides-Extra: langgraph
76
+ Requires-Dist: langgraph>=0.6.6; extra == "langgraph"
75
77
  Provides-Extra: launchdarkly
76
78
  Requires-Dist: launchdarkly-server-sdk>=9.8.0; extra == "launchdarkly"
77
79
  Provides-Extra: litestar
@@ -9,7 +9,7 @@ sentry_sdk/_werkzeug.py,sha256=m3GPf-jHd8v3eVOfBHaKw5f0uHoLkXrSO1EcY-8EisY,3734
9
9
  sentry_sdk/api.py,sha256=OkwQ2tA5YASJ77wLOteUdv_woPF4wL_JTOAMxe9z8k4,15282
10
10
  sentry_sdk/attachments.py,sha256=0Dylhm065O6hNFjB40fWCd5Hg4qWSXndmi1TPWglZkI,3109
11
11
  sentry_sdk/client.py,sha256=oQcolwFdLvuX4huUaCcpgABy3M5Yb4IhzymlzyrqfkE,38860
12
- sentry_sdk/consts.py,sha256=uw1W8s4s2lY_9fXS3EqsRXwV_95ArGu7ZIQCijcnmAM,49767
12
+ sentry_sdk/consts.py,sha256=cmxQIqENDy4786iER8LUoa7KncY9epWPsRnCecLM28s,49815
13
13
  sentry_sdk/debug.py,sha256=ddBehQlAuQC1sg1XO-N4N3diZ0x0iT5RWJwFdrtcsjw,1019
14
14
  sentry_sdk/envelope.py,sha256=Mgcib0uLm_5tSVzOrznRLdK9B3CjQ6TEgM1ZIZIfjWo,10355
15
15
  sentry_sdk/feature_flags.py,sha256=99JRig6TBkrkBzVCKqYcmVgjsuA_Hk-ul7jFHGhJplc,2233
@@ -32,12 +32,12 @@ sentry_sdk/utils.py,sha256=Ys7lnnvXZMIR9dcoT30CVxpUx2NnZifSy-TUNrCtMQA,61575
32
32
  sentry_sdk/worker.py,sha256=VSMaigRMbInVyupSFpBC42bft2oIViea-0C_d9ThnIo,4464
33
33
  sentry_sdk/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  sentry_sdk/ai/monitoring.py,sha256=bS_KneWCAL9ehml5XiyficoPVx4DUUG6acbH3cjP3I8,5057
35
- sentry_sdk/ai/utils.py,sha256=11PMqGCfEzAVrI2aSx-rdCl0dNYFzUi0bio6L8iM3kU,1174
35
+ sentry_sdk/ai/utils.py,sha256=JPK6EM0ZcjWJU1IP2k40Jxf0yD_-ox_FlYxVIn7LVR4,1351
36
36
  sentry_sdk/crons/__init__.py,sha256=3Zt6g1-pZZ12uRKKsC8QLm3XgJ4K1VYxgVpNNUygOZY,221
37
37
  sentry_sdk/crons/api.py,sha256=mk-UB8Im2LU2rJFdE-TV302EaKnf8kAjwEL0bIV0Hzc,1767
38
38
  sentry_sdk/crons/consts.py,sha256=dXqJk5meBSu5rjlGpqAOlkpACnuUi7svQnAFoy1ZNUU,87
39
39
  sentry_sdk/crons/decorator.py,sha256=UrjeIqBCbvsuKrfjGkKJbbLBvjw2TQvDWcTO7WwAmrI,3913
40
- sentry_sdk/integrations/__init__.py,sha256=DHt4Ei3sOaEBdTjA4mEpc7pwhS8kGcQgViAZ7DcNzqU,10249
40
+ sentry_sdk/integrations/__init__.py,sha256=NNRTFZUcQacKGRCWT8IuVZk2wtifEhnQ34-6mHuekvw,10339
41
41
  sentry_sdk/integrations/_asgi_common.py,sha256=Ypg7IctB3iPPY60ebVlzChzgT8GeGpZ0YH8VvJNDlEY,3187
42
42
  sentry_sdk/integrations/_wsgi_common.py,sha256=A1-X7l1pZCcrbUhRHkmdKiK_EemEZjn7xToJIvlEuFM,7558
43
43
  sentry_sdk/integrations/aiohttp.py,sha256=_rfDKx1arvVQwcC20vh7HG80p8XtgzqKB3iBuPYZy8A,12895
@@ -71,13 +71,14 @@ sentry_sdk/integrations/graphene.py,sha256=I6ZJ8Apd9dO9XPVvZY7I46-v1eXOW1C1rAkWw
71
71
  sentry_sdk/integrations/httpx.py,sha256=WwUulqzBLoGGqWUUdQg_MThwQUKzBXnA-m3g_1GOpCE,5866
72
72
  sentry_sdk/integrations/huey.py,sha256=wlyxjeWqqJp1X5S3neD5FiZjXcyznm1dl8_u1wIo76U,5443
73
73
  sentry_sdk/integrations/huggingface_hub.py,sha256=ypTn17T0vufQwi7ODXONFkB8fMjUrU5b4Q6JZ34bnA4,6717
74
- sentry_sdk/integrations/langchain.py,sha256=8ht-fXKb9muG3HEnKhuSvna2G8f2_M9BBzm1jXjnqiQ,26413
74
+ sentry_sdk/integrations/langchain.py,sha256=-9uNE_y-0LWJcQ3UWnSTr2g55UYJEsqQx1aMQqRrtAA,29260
75
+ sentry_sdk/integrations/langgraph.py,sha256=YyDDc14gFCNVuqVmKwX8GRQ17T17WOx2SqqD4IHROPs,11015
75
76
  sentry_sdk/integrations/launchdarkly.py,sha256=bvtExuj68xPXZFsQeWTDR-ZBqP087tPuVzP1bNAOZHc,1935
76
77
  sentry_sdk/integrations/litestar.py,sha256=jao0f8v5JQagkBg15dUJTdWGPxpS3LmOV301-lwGkGc,11815
77
78
  sentry_sdk/integrations/logging.py,sha256=4JC2ehLqd5Tz_rad8YVb9KhZnPcDzLxLh-AjopyNVEc,13905
78
79
  sentry_sdk/integrations/loguru.py,sha256=fgivPdQn3rmsMeInUd2wbNlbXPAH9MKhjaqytRVKnsI,6215
79
80
  sentry_sdk/integrations/modules.py,sha256=vzLx3Erg77Vl4mnUvAgTg_3teAuWy7zylFpAidBI9I0,820
80
- sentry_sdk/integrations/openai.py,sha256=EQFY04x2BN0RZDbLvuOcg7OJ8mO2hHyzlDFx8tN_npg,23049
81
+ sentry_sdk/integrations/openai.py,sha256=MtvS7FM41etRn5kPqb37qSflo876L8mcV2skEFwBFlI,23890
81
82
  sentry_sdk/integrations/openfeature.py,sha256=-vvdrN4fK0Xhu2ip41bkPIPEqdzv8xzmLu9wRlI2xPA,1131
82
83
  sentry_sdk/integrations/pure_eval.py,sha256=OvT76XvllQ_J6ABu3jVNU6KD2QAxnXMtTZ7hqhXNhpY,4581
83
84
  sentry_sdk/integrations/pymongo.py,sha256=cPpMGEbXHlV6HTHgmIDL1F-x3w7ZMROXVb4eUhLs3bw,6380
@@ -123,7 +124,7 @@ sentry_sdk/integrations/grpc/aio/client.py,sha256=csOwlJb7fg9fBnzeNHxr-qpZEmU97I
123
124
  sentry_sdk/integrations/grpc/aio/server.py,sha256=SCkdikPZRdWyrlnZewsSGpPk4v6AsdSApVAbO-lf_Lk,4019
124
125
  sentry_sdk/integrations/openai_agents/__init__.py,sha256=-ydqG0sFIrvJlT9JHO58EZpCAzyy9J59Av8dxn0fHuw,1424
125
126
  sentry_sdk/integrations/openai_agents/consts.py,sha256=PTb3vlqkuMPktu21ALK72o5WMIX4-cewTEiTRdHKFdQ,38
126
- sentry_sdk/integrations/openai_agents/utils.py,sha256=ZtsID9kIF7pUYRqzJcGrtnhJZ838DxO2G7yhPdTHRUc,5499
127
+ sentry_sdk/integrations/openai_agents/utils.py,sha256=7OiJc2BW9yvuVZBd8o7A6sWq0jy2ri9dWwvqMjJG2cw,5571
127
128
  sentry_sdk/integrations/openai_agents/patches/__init__.py,sha256=I7C9JZ70Mf8PV3wPdFsxTqvcYl4TYUgSZYfNU2Spb7Y,231
128
129
  sentry_sdk/integrations/openai_agents/patches/agent_run.py,sha256=qPmZ--UMQpExxYGEbaDJ7tY_H9VQ6gv0lpim3_impWk,5733
129
130
  sentry_sdk/integrations/openai_agents/patches/models.py,sha256=DtwqCmSsYFlhRZquKM2jiTOnnAg97eyCTtJYZkWqdww,1405
@@ -159,9 +160,9 @@ sentry_sdk/profiler/__init__.py,sha256=3PI3bHk9RSkkOXZKN84DDedk_7M65EiqqaIGo-DYs
159
160
  sentry_sdk/profiler/continuous_profiler.py,sha256=s0DHkj3RZYRg9HnQQC0G44ku6DaFqRy30fZTMtTYvIs,22828
160
161
  sentry_sdk/profiler/transaction_profiler.py,sha256=e3MsUqs-YIp6-nmzpmBYGoWWIF7RyuSGu24Dj-8GXAU,27970
161
162
  sentry_sdk/profiler/utils.py,sha256=G5s4tYai9ATJqcHrQ3bOIxlK6jIaHzELrDtU5k3N4HI,6556
162
- sentry_sdk-2.36.0.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
163
- sentry_sdk-2.36.0.dist-info/METADATA,sha256=KWUgID6zOzPsptXdO2gMhSTw0JPnGtzm4RRYNW6FXyc,10278
164
- sentry_sdk-2.36.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
165
- sentry_sdk-2.36.0.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
166
- sentry_sdk-2.36.0.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
167
- sentry_sdk-2.36.0.dist-info/RECORD,,
163
+ sentry_sdk-2.37.1.dist-info/licenses/LICENSE,sha256=KhQNZg9GKBL6KQvHQNBGMxJsXsRdhLebVp4Sew7t3Qs,1093
164
+ sentry_sdk-2.37.1.dist-info/METADATA,sha256=OkawBoE3U9CFHg01etuOhSLvQQMmVygWbXUdTvGcAUQ,10358
165
+ sentry_sdk-2.37.1.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
166
+ sentry_sdk-2.37.1.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
167
+ sentry_sdk-2.37.1.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
168
+ sentry_sdk-2.37.1.dist-info/RECORD,,