sentry-sdk 2.37.1__py2.py3-none-any.whl → 2.39.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (31) hide show
  1. sentry_sdk/__init__.py +4 -2
  2. sentry_sdk/_types.py +1 -1
  3. sentry_sdk/ai/utils.py +11 -1
  4. sentry_sdk/consts.py +5 -2
  5. sentry_sdk/envelope.py +1 -1
  6. sentry_sdk/integrations/__init__.py +2 -1
  7. sentry_sdk/integrations/anthropic.py +65 -15
  8. sentry_sdk/integrations/asyncio.py +2 -0
  9. sentry_sdk/integrations/cohere.py +4 -0
  10. sentry_sdk/integrations/dedupe.py +19 -3
  11. sentry_sdk/integrations/gql.py +22 -5
  12. sentry_sdk/integrations/huggingface_hub.py +278 -81
  13. sentry_sdk/integrations/langchain.py +17 -14
  14. sentry_sdk/integrations/openai.py +13 -8
  15. sentry_sdk/integrations/openai_agents/patches/agent_run.py +4 -4
  16. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +2 -2
  17. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +1 -1
  18. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +50 -6
  19. sentry_sdk/integrations/openai_agents/utils.py +3 -10
  20. sentry_sdk/integrations/threading.py +1 -1
  21. sentry_sdk/profiler/continuous_profiler.py +13 -3
  22. sentry_sdk/serializer.py +12 -1
  23. sentry_sdk/tracing.py +3 -3
  24. sentry_sdk/tracing_utils.py +32 -23
  25. sentry_sdk/utils.py +6 -0
  26. {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.39.0.dist-info}/METADATA +1 -1
  27. {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.39.0.dist-info}/RECORD +31 -31
  28. {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.39.0.dist-info}/WHEEL +0 -0
  29. {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.39.0.dist-info}/entry_points.txt +0 -0
  30. {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.39.0.dist-info}/licenses/LICENSE +0 -0
  31. {sentry_sdk-2.37.1.dist-info → sentry_sdk-2.39.0.dist-info}/top_level.txt +0 -0
@@ -1,24 +1,25 @@
1
+ import inspect
1
2
  from functools import wraps
2
3
 
3
- from sentry_sdk import consts
4
+ import sentry_sdk
4
5
  from sentry_sdk.ai.monitoring import record_token_usage
5
6
  from sentry_sdk.ai.utils import set_data_normalized
6
- from sentry_sdk.consts import SPANDATA
7
-
8
- from typing import Any, Iterable, Callable
9
-
10
- import sentry_sdk
11
- from sentry_sdk.scope import should_send_default_pii
7
+ from sentry_sdk.consts import OP, SPANDATA
12
8
  from sentry_sdk.integrations import DidNotEnable, Integration
9
+ from sentry_sdk.scope import should_send_default_pii
10
+ from sentry_sdk.tracing_utils import set_span_errored
13
11
  from sentry_sdk.utils import (
14
12
  capture_internal_exceptions,
15
13
  event_from_exception,
16
14
  )
17
15
 
16
+ from typing import TYPE_CHECKING
17
+
18
+ if TYPE_CHECKING:
19
+ from typing import Any, Callable, Iterable
20
+
18
21
  try:
19
22
  import huggingface_hub.inference._client
20
-
21
- from huggingface_hub import ChatCompletionStreamOutput, TextGenerationOutput
22
23
  except ImportError:
23
24
  raise DidNotEnable("Huggingface not installed")
24
25
 
@@ -34,15 +35,26 @@ class HuggingfaceHubIntegration(Integration):
34
35
  @staticmethod
35
36
  def setup_once():
36
37
  # type: () -> None
38
+
39
+ # Other tasks that can be called: https://huggingface.co/docs/huggingface_hub/guides/inference#supported-providers-and-tasks
37
40
  huggingface_hub.inference._client.InferenceClient.text_generation = (
38
- _wrap_text_generation(
39
- huggingface_hub.inference._client.InferenceClient.text_generation
41
+ _wrap_huggingface_task(
42
+ huggingface_hub.inference._client.InferenceClient.text_generation,
43
+ OP.GEN_AI_GENERATE_TEXT,
44
+ )
45
+ )
46
+ huggingface_hub.inference._client.InferenceClient.chat_completion = (
47
+ _wrap_huggingface_task(
48
+ huggingface_hub.inference._client.InferenceClient.chat_completion,
49
+ OP.GEN_AI_CHAT,
40
50
  )
41
51
  )
42
52
 
43
53
 
44
54
  def _capture_exception(exc):
45
55
  # type: (Any) -> None
56
+ set_span_errored()
57
+
46
58
  event, hint = event_from_exception(
47
59
  exc,
48
60
  client_options=sentry_sdk.get_client().options,
@@ -51,34 +63,70 @@ def _capture_exception(exc):
51
63
  sentry_sdk.capture_event(event, hint=hint)
52
64
 
53
65
 
54
- def _wrap_text_generation(f):
55
- # type: (Callable[..., Any]) -> Callable[..., Any]
66
+ def _wrap_huggingface_task(f, op):
67
+ # type: (Callable[..., Any], str) -> Callable[..., Any]
56
68
  @wraps(f)
57
- def new_text_generation(*args, **kwargs):
69
+ def new_huggingface_task(*args, **kwargs):
58
70
  # type: (*Any, **Any) -> Any
59
71
  integration = sentry_sdk.get_client().get_integration(HuggingfaceHubIntegration)
60
72
  if integration is None:
61
73
  return f(*args, **kwargs)
62
74
 
75
+ prompt = None
63
76
  if "prompt" in kwargs:
64
77
  prompt = kwargs["prompt"]
78
+ elif "messages" in kwargs:
79
+ prompt = kwargs["messages"]
65
80
  elif len(args) >= 2:
66
- kwargs["prompt"] = args[1]
67
- prompt = kwargs["prompt"]
68
- args = (args[0],) + args[2:]
69
- else:
70
- # invalid call, let it return error
81
+ if isinstance(args[1], str) or isinstance(args[1], list):
82
+ prompt = args[1]
83
+
84
+ if prompt is None:
85
+ # invalid call, dont instrument, let it return error
71
86
  return f(*args, **kwargs)
72
87
 
73
- model = kwargs.get("model")
74
- streaming = kwargs.get("stream")
88
+ client = args[0]
89
+ model = client.model or kwargs.get("model") or ""
90
+ operation_name = op.split(".")[-1]
75
91
 
76
92
  span = sentry_sdk.start_span(
77
- op=consts.OP.HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE,
78
- name="Text Generation",
93
+ op=op,
94
+ name=f"{operation_name} {model}",
79
95
  origin=HuggingfaceHubIntegration.origin,
80
96
  )
81
97
  span.__enter__()
98
+
99
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, operation_name)
100
+
101
+ if model:
102
+ span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
103
+
104
+ # Input attributes
105
+ if should_send_default_pii() and integration.include_prompts:
106
+ set_data_normalized(
107
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompt, unpack=False
108
+ )
109
+
110
+ attribute_mapping = {
111
+ "tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
112
+ "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
113
+ "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
114
+ "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
115
+ "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
116
+ "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
117
+ "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K,
118
+ "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
119
+ }
120
+
121
+ for attribute, span_attribute in attribute_mapping.items():
122
+ value = kwargs.get(attribute, None)
123
+ if value is not None:
124
+ if isinstance(value, (int, float, bool, str)):
125
+ span.set_data(span_attribute, value)
126
+ else:
127
+ set_data_normalized(span, span_attribute, value, unpack=False)
128
+
129
+ # LLM Execution
82
130
  try:
83
131
  res = f(*args, **kwargs)
84
132
  except Exception as e:
@@ -86,96 +134,245 @@ def _wrap_text_generation(f):
86
134
  span.__exit__(None, None, None)
87
135
  raise e from None
88
136
 
137
+ # Output attributes
138
+ finish_reason = None
139
+ response_model = None
140
+ response_text_buffer: list[str] = []
141
+ tokens_used = 0
142
+ tool_calls = None
143
+ usage = None
144
+
89
145
  with capture_internal_exceptions():
90
- if should_send_default_pii() and integration.include_prompts:
91
- set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompt)
146
+ if isinstance(res, str) and res is not None:
147
+ response_text_buffer.append(res)
92
148
 
93
- set_data_normalized(span, SPANDATA.AI_MODEL_ID, model)
94
- set_data_normalized(span, SPANDATA.AI_STREAMING, streaming)
149
+ if hasattr(res, "generated_text") and res.generated_text is not None:
150
+ response_text_buffer.append(res.generated_text)
95
151
 
96
- if isinstance(res, str):
97
- if should_send_default_pii() and integration.include_prompts:
98
- set_data_normalized(
99
- span,
100
- SPANDATA.AI_RESPONSES,
101
- [res],
102
- )
103
- span.__exit__(None, None, None)
104
- return res
152
+ if hasattr(res, "model") and res.model is not None:
153
+ response_model = res.model
154
+
155
+ if hasattr(res, "details") and hasattr(res.details, "finish_reason"):
156
+ finish_reason = res.details.finish_reason
157
+
158
+ if (
159
+ hasattr(res, "details")
160
+ and hasattr(res.details, "generated_tokens")
161
+ and res.details.generated_tokens is not None
162
+ ):
163
+ tokens_used = res.details.generated_tokens
164
+
165
+ if hasattr(res, "usage") and res.usage is not None:
166
+ usage = res.usage
167
+
168
+ if hasattr(res, "choices") and res.choices is not None:
169
+ for choice in res.choices:
170
+ if hasattr(choice, "finish_reason"):
171
+ finish_reason = choice.finish_reason
172
+ if hasattr(choice, "message") and hasattr(
173
+ choice.message, "tool_calls"
174
+ ):
175
+ tool_calls = choice.message.tool_calls
176
+ if (
177
+ hasattr(choice, "message")
178
+ and hasattr(choice.message, "content")
179
+ and choice.message.content is not None
180
+ ):
181
+ response_text_buffer.append(choice.message.content)
105
182
 
106
- if isinstance(res, TextGenerationOutput):
107
- if should_send_default_pii() and integration.include_prompts:
183
+ if response_model is not None:
184
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
185
+
186
+ if finish_reason is not None:
187
+ set_data_normalized(
188
+ span,
189
+ SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
190
+ finish_reason,
191
+ )
192
+
193
+ if should_send_default_pii() and integration.include_prompts:
194
+ if tool_calls is not None and len(tool_calls) > 0:
108
195
  set_data_normalized(
109
196
  span,
110
- SPANDATA.AI_RESPONSES,
111
- [res.generated_text],
197
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
198
+ tool_calls,
199
+ unpack=False,
112
200
  )
113
- if res.details is not None and res.details.generated_tokens > 0:
114
- record_token_usage(
115
- span,
116
- total_tokens=res.details.generated_tokens,
117
- )
118
- span.__exit__(None, None, None)
119
- return res
120
201
 
121
- if not isinstance(res, Iterable):
122
- # we only know how to deal with strings and iterables, ignore
123
- set_data_normalized(span, "unknown_response", True)
202
+ if len(response_text_buffer) > 0:
203
+ text_response = "".join(response_text_buffer)
204
+ if text_response:
205
+ set_data_normalized(
206
+ span,
207
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
208
+ text_response,
209
+ )
210
+
211
+ if usage is not None:
212
+ record_token_usage(
213
+ span,
214
+ input_tokens=usage.prompt_tokens,
215
+ output_tokens=usage.completion_tokens,
216
+ total_tokens=usage.total_tokens,
217
+ )
218
+ elif tokens_used > 0:
219
+ record_token_usage(
220
+ span,
221
+ total_tokens=tokens_used,
222
+ )
223
+
224
+ # If the response is not a generator (meaning a streaming response)
225
+ # we are done and can return the response
226
+ if not inspect.isgenerator(res):
124
227
  span.__exit__(None, None, None)
125
228
  return res
126
229
 
127
230
  if kwargs.get("details", False):
128
- # res is Iterable[TextGenerationStreamOutput]
231
+ # text-generation stream output
129
232
  def new_details_iterator():
130
- # type: () -> Iterable[ChatCompletionStreamOutput]
233
+ # type: () -> Iterable[Any]
234
+ finish_reason = None
235
+ response_text_buffer: list[str] = []
236
+ tokens_used = 0
237
+
131
238
  with capture_internal_exceptions():
132
- tokens_used = 0
133
- data_buf: list[str] = []
134
- for x in res:
135
- if hasattr(x, "token") and hasattr(x.token, "text"):
136
- data_buf.append(x.token.text)
137
- if hasattr(x, "details") and hasattr(
138
- x.details, "generated_tokens"
239
+ for chunk in res:
240
+ if (
241
+ hasattr(chunk, "token")
242
+ and hasattr(chunk.token, "text")
243
+ and chunk.token.text is not None
244
+ ):
245
+ response_text_buffer.append(chunk.token.text)
246
+
247
+ if hasattr(chunk, "details") and hasattr(
248
+ chunk.details, "finish_reason"
249
+ ):
250
+ finish_reason = chunk.details.finish_reason
251
+
252
+ if (
253
+ hasattr(chunk, "details")
254
+ and hasattr(chunk.details, "generated_tokens")
255
+ and chunk.details.generated_tokens is not None
139
256
  ):
140
- tokens_used = x.details.generated_tokens
141
- yield x
142
- if (
143
- len(data_buf) > 0
144
- and should_send_default_pii()
145
- and integration.include_prompts
146
- ):
257
+ tokens_used = chunk.details.generated_tokens
258
+
259
+ yield chunk
260
+
261
+ if finish_reason is not None:
147
262
  set_data_normalized(
148
- span, SPANDATA.AI_RESPONSES, "".join(data_buf)
263
+ span,
264
+ SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
265
+ finish_reason,
149
266
  )
267
+
268
+ if should_send_default_pii() and integration.include_prompts:
269
+ if len(response_text_buffer) > 0:
270
+ text_response = "".join(response_text_buffer)
271
+ if text_response:
272
+ set_data_normalized(
273
+ span,
274
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
275
+ text_response,
276
+ )
277
+
150
278
  if tokens_used > 0:
151
279
  record_token_usage(
152
280
  span,
153
281
  total_tokens=tokens_used,
154
282
  )
283
+
155
284
  span.__exit__(None, None, None)
156
285
 
157
286
  return new_details_iterator()
158
- else:
159
- # res is Iterable[str]
160
287
 
288
+ else:
289
+ # chat-completion stream output
161
290
  def new_iterator():
162
291
  # type: () -> Iterable[str]
163
- data_buf: list[str] = []
292
+ finish_reason = None
293
+ response_model = None
294
+ response_text_buffer: list[str] = []
295
+ tool_calls = None
296
+ usage = None
297
+
164
298
  with capture_internal_exceptions():
165
- for s in res:
166
- if isinstance(s, str):
167
- data_buf.append(s)
168
- yield s
169
- if (
170
- len(data_buf) > 0
171
- and should_send_default_pii()
172
- and integration.include_prompts
173
- ):
299
+ for chunk in res:
300
+ if hasattr(chunk, "model") and chunk.model is not None:
301
+ response_model = chunk.model
302
+
303
+ if hasattr(chunk, "usage") and chunk.usage is not None:
304
+ usage = chunk.usage
305
+
306
+ if isinstance(chunk, str):
307
+ if chunk is not None:
308
+ response_text_buffer.append(chunk)
309
+
310
+ if hasattr(chunk, "choices") and chunk.choices is not None:
311
+ for choice in chunk.choices:
312
+ if (
313
+ hasattr(choice, "delta")
314
+ and hasattr(choice.delta, "content")
315
+ and choice.delta.content is not None
316
+ ):
317
+ response_text_buffer.append(
318
+ choice.delta.content
319
+ )
320
+
321
+ if (
322
+ hasattr(choice, "finish_reason")
323
+ and choice.finish_reason is not None
324
+ ):
325
+ finish_reason = choice.finish_reason
326
+
327
+ if (
328
+ hasattr(choice, "delta")
329
+ and hasattr(choice.delta, "tool_calls")
330
+ and choice.delta.tool_calls is not None
331
+ ):
332
+ tool_calls = choice.delta.tool_calls
333
+
334
+ yield chunk
335
+
336
+ if response_model is not None:
337
+ span.set_data(
338
+ SPANDATA.GEN_AI_RESPONSE_MODEL, response_model
339
+ )
340
+
341
+ if finish_reason is not None:
174
342
  set_data_normalized(
175
- span, SPANDATA.AI_RESPONSES, "".join(data_buf)
343
+ span,
344
+ SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
345
+ finish_reason,
176
346
  )
347
+
348
+ if should_send_default_pii() and integration.include_prompts:
349
+ if tool_calls is not None and len(tool_calls) > 0:
350
+ set_data_normalized(
351
+ span,
352
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
353
+ tool_calls,
354
+ unpack=False,
355
+ )
356
+
357
+ if len(response_text_buffer) > 0:
358
+ text_response = "".join(response_text_buffer)
359
+ if text_response:
360
+ set_data_normalized(
361
+ span,
362
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
363
+ text_response,
364
+ )
365
+
366
+ if usage is not None:
367
+ record_token_usage(
368
+ span,
369
+ input_tokens=usage.prompt_tokens,
370
+ output_tokens=usage.completion_tokens,
371
+ total_tokens=usage.total_tokens,
372
+ )
373
+
177
374
  span.__exit__(None, None, None)
178
375
 
179
376
  return new_iterator()
180
377
 
181
- return new_text_generation
378
+ return new_huggingface_task
@@ -4,12 +4,11 @@ from functools import wraps
4
4
 
5
5
  import sentry_sdk
6
6
  from sentry_sdk.ai.monitoring import set_ai_pipeline_name
7
- from sentry_sdk.ai.utils import set_data_normalized
7
+ from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function
8
8
  from sentry_sdk.consts import OP, SPANDATA
9
9
  from sentry_sdk.integrations import DidNotEnable, Integration
10
10
  from sentry_sdk.scope import should_send_default_pii
11
- from sentry_sdk.tracing import Span
12
- from sentry_sdk.tracing_utils import _get_value
11
+ from sentry_sdk.tracing_utils import _get_value, set_span_errored
13
12
  from sentry_sdk.utils import logger, capture_internal_exceptions
14
13
 
15
14
  from typing import TYPE_CHECKING
@@ -26,6 +25,7 @@ if TYPE_CHECKING:
26
25
  Union,
27
26
  )
28
27
  from uuid import UUID
28
+ from sentry_sdk.tracing import Span
29
29
 
30
30
 
31
31
  try:
@@ -116,7 +116,7 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
116
116
 
117
117
  span_data = self.span_map[run_id]
118
118
  span = span_data.span
119
- span.set_status("unknown")
119
+ set_span_errored(span)
120
120
 
121
121
  sentry_sdk.capture_exception(error, span.scope)
122
122
 
@@ -322,14 +322,15 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
322
322
  pass
323
323
 
324
324
  try:
325
- tool_calls = getattr(generation.message, "tool_calls", None)
326
- if tool_calls is not None and tool_calls != []:
327
- set_data_normalized(
328
- span,
329
- SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
330
- tool_calls,
331
- unpack=False,
332
- )
325
+ if should_send_default_pii() and self.include_prompts:
326
+ tool_calls = getattr(generation.message, "tool_calls", None)
327
+ if tool_calls is not None and tool_calls != []:
328
+ set_data_normalized(
329
+ span,
330
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
331
+ tool_calls,
332
+ unpack=False,
333
+ )
333
334
  except AttributeError:
334
335
  pass
335
336
 
@@ -716,8 +717,9 @@ def _wrap_agent_executor_invoke(f):
716
717
  return f(self, *args, **kwargs)
717
718
 
718
719
  agent_name, tools = _get_request_data(self, args, kwargs)
720
+ start_span_function = get_start_span_function()
719
721
 
720
- with sentry_sdk.start_span(
722
+ with start_span_function(
721
723
  op=OP.GEN_AI_INVOKE_AGENT,
722
724
  name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
723
725
  origin=LangchainIntegration.origin,
@@ -767,8 +769,9 @@ def _wrap_agent_executor_stream(f):
767
769
  return f(self, *args, **kwargs)
768
770
 
769
771
  agent_name, tools = _get_request_data(self, args, kwargs)
772
+ start_span_function = get_start_span_function()
770
773
 
771
- span = sentry_sdk.start_span(
774
+ span = start_span_function(
772
775
  op=OP.GEN_AI_INVOKE_AGENT,
773
776
  name=f"invoke_agent {agent_name}".strip(),
774
777
  origin=LangchainIntegration.origin,
@@ -7,6 +7,7 @@ from sentry_sdk.ai.utils import set_data_normalized
7
7
  from sentry_sdk.consts import SPANDATA
8
8
  from sentry_sdk.integrations import DidNotEnable, Integration
9
9
  from sentry_sdk.scope import should_send_default_pii
10
+ from sentry_sdk.tracing_utils import set_span_errored
10
11
  from sentry_sdk.utils import (
11
12
  capture_internal_exceptions,
12
13
  event_from_exception,
@@ -83,6 +84,8 @@ def _capture_exception(exc, manual_span_cleanup=True):
83
84
  # Close an eventually open span
84
85
  # We need to do this by hand because we are not using the start_span context manager
85
86
  current_span = sentry_sdk.get_current_span()
87
+ set_span_errored(current_span)
88
+
86
89
  if manual_span_cleanup and current_span is not None:
87
90
  current_span.__exit__(None, None, None)
88
91
 
@@ -279,9 +282,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
279
282
 
280
283
  def new_iterator():
281
284
  # type: () -> Iterator[ChatCompletionChunk]
282
- with capture_internal_exceptions():
283
- count_tokens_manually = True
284
- for x in old_iterator:
285
+ count_tokens_manually = True
286
+ for x in old_iterator:
287
+ with capture_internal_exceptions():
285
288
  # OpenAI chat completion API
286
289
  if hasattr(x, "choices"):
287
290
  choice_index = 0
@@ -312,8 +315,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
312
315
  )
313
316
  count_tokens_manually = False
314
317
 
315
- yield x
318
+ yield x
316
319
 
320
+ with capture_internal_exceptions():
317
321
  if len(data_buf) > 0:
318
322
  all_responses = ["".join(chunk) for chunk in data_buf]
319
323
  if should_send_default_pii() and integration.include_prompts:
@@ -334,9 +338,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
334
338
 
335
339
  async def new_iterator_async():
336
340
  # type: () -> AsyncIterator[ChatCompletionChunk]
337
- with capture_internal_exceptions():
338
- count_tokens_manually = True
339
- async for x in old_iterator:
341
+ count_tokens_manually = True
342
+ async for x in old_iterator:
343
+ with capture_internal_exceptions():
340
344
  # OpenAI chat completion API
341
345
  if hasattr(x, "choices"):
342
346
  choice_index = 0
@@ -367,8 +371,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
367
371
  )
368
372
  count_tokens_manually = False
369
373
 
370
- yield x
374
+ yield x
371
375
 
376
+ with capture_internal_exceptions():
372
377
  if len(data_buf) > 0:
373
378
  all_responses = ["".join(chunk) for chunk in data_buf]
374
379
  if should_send_default_pii() and integration.include_prompts:
@@ -26,12 +26,12 @@ def _patch_agent_run():
26
26
  original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs
27
27
  original_execute_final_output = agents._run_impl.RunImpl.execute_final_output
28
28
 
29
- def _start_invoke_agent_span(context_wrapper, agent):
30
- # type: (agents.RunContextWrapper, agents.Agent) -> None
29
+ def _start_invoke_agent_span(context_wrapper, agent, kwargs):
30
+ # type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> None
31
31
  """Start an agent invocation span"""
32
32
  # Store the agent on the context wrapper so we can access it later
33
33
  context_wrapper._sentry_current_agent = agent
34
- invoke_agent_span(context_wrapper, agent)
34
+ invoke_agent_span(context_wrapper, agent, kwargs)
35
35
 
36
36
  def _end_invoke_agent_span(context_wrapper, agent, output=None):
37
37
  # type: (agents.RunContextWrapper, agents.Agent, Optional[Any]) -> None
@@ -72,7 +72,7 @@ def _patch_agent_run():
72
72
  if current_agent and current_agent != agent:
73
73
  _end_invoke_agent_span(context_wrapper, current_agent)
74
74
 
75
- _start_invoke_agent_span(context_wrapper, agent)
75
+ _start_invoke_agent_span(context_wrapper, agent, kwargs)
76
76
 
77
77
  # Call original method with all the correct parameters
78
78
  result = await original_run_single_turn(*args, **kwargs)
@@ -1,7 +1,7 @@
1
1
  import sentry_sdk
2
+ from sentry_sdk.ai.utils import get_start_span_function
2
3
 
3
4
  from ..consts import SPAN_ORIGIN
4
- from ..utils import _get_start_span_function
5
5
 
6
6
  from typing import TYPE_CHECKING
7
7
 
@@ -13,7 +13,7 @@ def agent_workflow_span(agent):
13
13
  # type: (agents.Agent) -> sentry_sdk.tracing.Span
14
14
 
15
15
  # Create a transaction or a span if an transaction is already active
16
- span = _get_start_span_function()(
16
+ span = get_start_span_function()(
17
17
  name=f"{agent.name} workflow",
18
18
  origin=SPAN_ORIGIN,
19
19
  )
@@ -42,7 +42,7 @@ def update_execute_tool_span(span, agent, tool, result):
42
42
  if isinstance(result, str) and result.startswith(
43
43
  "An error occurred while running the tool"
44
44
  ):
45
- span.set_status(SPANSTATUS.INTERNAL_ERROR)
45
+ span.set_status(SPANSTATUS.ERROR)
46
46
 
47
47
  if should_send_default_pii():
48
48
  span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, result)