sentry-sdk 2.40.0__py2.py3-none-any.whl → 2.42.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (39) hide show
  1. sentry_sdk/_metrics.py +81 -0
  2. sentry_sdk/_metrics_batcher.py +156 -0
  3. sentry_sdk/_types.py +27 -22
  4. sentry_sdk/ai/__init__.py +7 -0
  5. sentry_sdk/ai/utils.py +48 -0
  6. sentry_sdk/client.py +81 -30
  7. sentry_sdk/consts.py +13 -8
  8. sentry_sdk/envelope.py +3 -3
  9. sentry_sdk/integrations/__init__.py +1 -0
  10. sentry_sdk/integrations/aiohttp.py +4 -1
  11. sentry_sdk/integrations/anthropic.py +10 -2
  12. sentry_sdk/integrations/google_genai/__init__.py +298 -0
  13. sentry_sdk/integrations/google_genai/consts.py +16 -0
  14. sentry_sdk/integrations/google_genai/streaming.py +155 -0
  15. sentry_sdk/integrations/google_genai/utils.py +566 -0
  16. sentry_sdk/integrations/httpx.py +16 -5
  17. sentry_sdk/integrations/langchain.py +29 -4
  18. sentry_sdk/integrations/langgraph.py +5 -3
  19. sentry_sdk/integrations/logging.py +1 -1
  20. sentry_sdk/integrations/loguru.py +1 -1
  21. sentry_sdk/integrations/openai.py +3 -2
  22. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +10 -2
  23. sentry_sdk/integrations/openai_agents/utils.py +35 -18
  24. sentry_sdk/integrations/ray.py +20 -4
  25. sentry_sdk/integrations/stdlib.py +8 -1
  26. sentry_sdk/integrations/threading.py +52 -8
  27. sentry_sdk/logger.py +1 -1
  28. sentry_sdk/tracing.py +0 -26
  29. sentry_sdk/tracing_utils.py +64 -24
  30. sentry_sdk/transport.py +1 -17
  31. sentry_sdk/types.py +3 -0
  32. sentry_sdk/utils.py +17 -1
  33. {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/METADATA +3 -1
  34. {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/RECORD +38 -33
  35. sentry_sdk/metrics.py +0 -971
  36. {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/WHEEL +0 -0
  37. {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/entry_points.txt +0 -0
  38. {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/licenses/LICENSE +0 -0
  39. {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,11 @@ from typing import TYPE_CHECKING
3
3
 
4
4
  import sentry_sdk
5
5
  from sentry_sdk.ai.monitoring import record_token_usage
6
- from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function
6
+ from sentry_sdk.ai.utils import (
7
+ set_data_normalized,
8
+ normalize_message_roles,
9
+ get_start_span_function,
10
+ )
7
11
  from sentry_sdk.consts import OP, SPANDATA, SPANSTATUS
8
12
  from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration
9
13
  from sentry_sdk.scope import should_send_default_pii
@@ -140,8 +144,12 @@ def _set_input_data(span, kwargs, integration):
140
144
  else:
141
145
  normalized_messages.append(message)
142
146
 
147
+ role_normalized_messages = normalize_message_roles(normalized_messages)
143
148
  set_data_normalized(
144
- span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False
149
+ span,
150
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
151
+ role_normalized_messages,
152
+ unpack=False,
145
153
  )
146
154
 
147
155
  set_data_normalized(
@@ -0,0 +1,298 @@
1
+ from functools import wraps
2
+ from typing import (
3
+ Any,
4
+ AsyncIterator,
5
+ Callable,
6
+ Iterator,
7
+ List,
8
+ )
9
+
10
+ import sentry_sdk
11
+ from sentry_sdk.ai.utils import get_start_span_function
12
+ from sentry_sdk.integrations import DidNotEnable, Integration
13
+ from sentry_sdk.consts import OP, SPANDATA
14
+ from sentry_sdk.tracing import SPANSTATUS
15
+
16
+
17
+ try:
18
+ from google.genai.models import Models, AsyncModels
19
+ except ImportError:
20
+ raise DidNotEnable("google-genai not installed")
21
+
22
+
23
+ from .consts import IDENTIFIER, ORIGIN, GEN_AI_SYSTEM
24
+ from .utils import (
25
+ set_span_data_for_request,
26
+ set_span_data_for_response,
27
+ _capture_exception,
28
+ prepare_generate_content_args,
29
+ )
30
+ from .streaming import (
31
+ set_span_data_for_streaming_response,
32
+ accumulate_streaming_response,
33
+ )
34
+
35
+
36
+ class GoogleGenAIIntegration(Integration):
37
+ identifier = IDENTIFIER
38
+ origin = ORIGIN
39
+
40
+ def __init__(self, include_prompts=True):
41
+ # type: (GoogleGenAIIntegration, bool) -> None
42
+ self.include_prompts = include_prompts
43
+
44
+ @staticmethod
45
+ def setup_once():
46
+ # type: () -> None
47
+ # Patch sync methods
48
+ Models.generate_content = _wrap_generate_content(Models.generate_content)
49
+ Models.generate_content_stream = _wrap_generate_content_stream(
50
+ Models.generate_content_stream
51
+ )
52
+
53
+ # Patch async methods
54
+ AsyncModels.generate_content = _wrap_async_generate_content(
55
+ AsyncModels.generate_content
56
+ )
57
+ AsyncModels.generate_content_stream = _wrap_async_generate_content_stream(
58
+ AsyncModels.generate_content_stream
59
+ )
60
+
61
+
62
+ def _wrap_generate_content_stream(f):
63
+ # type: (Callable[..., Any]) -> Callable[..., Any]
64
+ @wraps(f)
65
+ def new_generate_content_stream(self, *args, **kwargs):
66
+ # type: (Any, Any, Any) -> Any
67
+ integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
68
+ if integration is None:
69
+ return f(self, *args, **kwargs)
70
+
71
+ _model, contents, model_name = prepare_generate_content_args(args, kwargs)
72
+
73
+ span = get_start_span_function()(
74
+ op=OP.GEN_AI_INVOKE_AGENT,
75
+ name="invoke_agent",
76
+ origin=ORIGIN,
77
+ )
78
+ span.__enter__()
79
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
80
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
81
+ set_span_data_for_request(span, integration, model_name, contents, kwargs)
82
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
83
+
84
+ chat_span = sentry_sdk.start_span(
85
+ op=OP.GEN_AI_CHAT,
86
+ name=f"chat {model_name}",
87
+ origin=ORIGIN,
88
+ )
89
+ chat_span.__enter__()
90
+ chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
91
+ chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
92
+ chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
93
+ set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
94
+ chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
95
+
96
+ try:
97
+ stream = f(self, *args, **kwargs)
98
+
99
+ # Create wrapper iterator to accumulate responses
100
+ def new_iterator():
101
+ # type: () -> Iterator[Any]
102
+ chunks = [] # type: List[Any]
103
+ try:
104
+ for chunk in stream:
105
+ chunks.append(chunk)
106
+ yield chunk
107
+ except Exception as exc:
108
+ _capture_exception(exc)
109
+ chat_span.set_status(SPANSTATUS.ERROR)
110
+ raise
111
+ finally:
112
+ # Accumulate all chunks and set final response data on spans
113
+ if chunks:
114
+ accumulated_response = accumulate_streaming_response(chunks)
115
+ set_span_data_for_streaming_response(
116
+ chat_span, integration, accumulated_response
117
+ )
118
+ set_span_data_for_streaming_response(
119
+ span, integration, accumulated_response
120
+ )
121
+ chat_span.__exit__(None, None, None)
122
+ span.__exit__(None, None, None)
123
+
124
+ return new_iterator()
125
+
126
+ except Exception as exc:
127
+ _capture_exception(exc)
128
+ chat_span.__exit__(None, None, None)
129
+ span.__exit__(None, None, None)
130
+ raise
131
+
132
+ return new_generate_content_stream
133
+
134
+
135
+ def _wrap_async_generate_content_stream(f):
136
+ # type: (Callable[..., Any]) -> Callable[..., Any]
137
+ @wraps(f)
138
+ async def new_async_generate_content_stream(self, *args, **kwargs):
139
+ # type: (Any, Any, Any) -> Any
140
+ integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
141
+ if integration is None:
142
+ return await f(self, *args, **kwargs)
143
+
144
+ _model, contents, model_name = prepare_generate_content_args(args, kwargs)
145
+
146
+ span = get_start_span_function()(
147
+ op=OP.GEN_AI_INVOKE_AGENT,
148
+ name="invoke_agent",
149
+ origin=ORIGIN,
150
+ )
151
+ span.__enter__()
152
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
153
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
154
+ set_span_data_for_request(span, integration, model_name, contents, kwargs)
155
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
156
+
157
+ chat_span = sentry_sdk.start_span(
158
+ op=OP.GEN_AI_CHAT,
159
+ name=f"chat {model_name}",
160
+ origin=ORIGIN,
161
+ )
162
+ chat_span.__enter__()
163
+ chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
164
+ chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
165
+ chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
166
+ set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
167
+ chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
168
+
169
+ try:
170
+ stream = await f(self, *args, **kwargs)
171
+
172
+ # Create wrapper async iterator to accumulate responses
173
+ async def new_async_iterator():
174
+ # type: () -> AsyncIterator[Any]
175
+ chunks = [] # type: List[Any]
176
+ try:
177
+ async for chunk in stream:
178
+ chunks.append(chunk)
179
+ yield chunk
180
+ except Exception as exc:
181
+ _capture_exception(exc)
182
+ chat_span.set_status(SPANSTATUS.ERROR)
183
+ raise
184
+ finally:
185
+ # Accumulate all chunks and set final response data on spans
186
+ if chunks:
187
+ accumulated_response = accumulate_streaming_response(chunks)
188
+ set_span_data_for_streaming_response(
189
+ chat_span, integration, accumulated_response
190
+ )
191
+ set_span_data_for_streaming_response(
192
+ span, integration, accumulated_response
193
+ )
194
+ chat_span.__exit__(None, None, None)
195
+ span.__exit__(None, None, None)
196
+
197
+ return new_async_iterator()
198
+
199
+ except Exception as exc:
200
+ _capture_exception(exc)
201
+ chat_span.__exit__(None, None, None)
202
+ span.__exit__(None, None, None)
203
+ raise
204
+
205
+ return new_async_generate_content_stream
206
+
207
+
208
+ def _wrap_generate_content(f):
209
+ # type: (Callable[..., Any]) -> Callable[..., Any]
210
+ @wraps(f)
211
+ def new_generate_content(self, *args, **kwargs):
212
+ # type: (Any, Any, Any) -> Any
213
+ integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
214
+ if integration is None:
215
+ return f(self, *args, **kwargs)
216
+
217
+ model, contents, model_name = prepare_generate_content_args(args, kwargs)
218
+
219
+ with get_start_span_function()(
220
+ op=OP.GEN_AI_INVOKE_AGENT,
221
+ name="invoke_agent",
222
+ origin=ORIGIN,
223
+ ) as span:
224
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
225
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
226
+ set_span_data_for_request(span, integration, model_name, contents, kwargs)
227
+
228
+ with sentry_sdk.start_span(
229
+ op=OP.GEN_AI_CHAT,
230
+ name=f"chat {model_name}",
231
+ origin=ORIGIN,
232
+ ) as chat_span:
233
+ chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
234
+ chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
235
+ chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
236
+ set_span_data_for_request(
237
+ chat_span, integration, model_name, contents, kwargs
238
+ )
239
+
240
+ try:
241
+ response = f(self, *args, **kwargs)
242
+ except Exception as exc:
243
+ _capture_exception(exc)
244
+ chat_span.set_status(SPANSTATUS.ERROR)
245
+ raise
246
+
247
+ set_span_data_for_response(chat_span, integration, response)
248
+ set_span_data_for_response(span, integration, response)
249
+
250
+ return response
251
+
252
+ return new_generate_content
253
+
254
+
255
+ def _wrap_async_generate_content(f):
256
+ # type: (Callable[..., Any]) -> Callable[..., Any]
257
+ @wraps(f)
258
+ async def new_async_generate_content(self, *args, **kwargs):
259
+ # type: (Any, Any, Any) -> Any
260
+ integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
261
+ if integration is None:
262
+ return await f(self, *args, **kwargs)
263
+
264
+ model, contents, model_name = prepare_generate_content_args(args, kwargs)
265
+
266
+ with get_start_span_function()(
267
+ op=OP.GEN_AI_INVOKE_AGENT,
268
+ name="invoke_agent",
269
+ origin=ORIGIN,
270
+ ) as span:
271
+ span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
272
+ span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
273
+ set_span_data_for_request(span, integration, model_name, contents, kwargs)
274
+
275
+ with sentry_sdk.start_span(
276
+ op=OP.GEN_AI_CHAT,
277
+ name=f"chat {model_name}",
278
+ origin=ORIGIN,
279
+ ) as chat_span:
280
+ chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
281
+ chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
282
+ chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
283
+ set_span_data_for_request(
284
+ chat_span, integration, model_name, contents, kwargs
285
+ )
286
+ try:
287
+ response = await f(self, *args, **kwargs)
288
+ except Exception as exc:
289
+ _capture_exception(exc)
290
+ chat_span.set_status(SPANSTATUS.ERROR)
291
+ raise
292
+
293
+ set_span_data_for_response(chat_span, integration, response)
294
+ set_span_data_for_response(span, integration, response)
295
+
296
+ return response
297
+
298
+ return new_async_generate_content
@@ -0,0 +1,16 @@
1
+ GEN_AI_SYSTEM = "gcp.gemini"
2
+
3
+ # Mapping of tool attributes to their descriptions
4
+ # These are all tools that are available in the Google GenAI API
5
+ TOOL_ATTRIBUTES_MAP = {
6
+ "google_search_retrieval": "Google Search retrieval tool",
7
+ "google_search": "Google Search tool",
8
+ "retrieval": "Retrieval tool",
9
+ "enterprise_web_search": "Enterprise web search tool",
10
+ "google_maps": "Google Maps tool",
11
+ "code_execution": "Code execution tool",
12
+ "computer_use": "Computer use tool",
13
+ }
14
+
15
+ IDENTIFIER = "google_genai"
16
+ ORIGIN = f"auto.ai.{IDENTIFIER}"
@@ -0,0 +1,155 @@
1
+ from typing import (
2
+ TYPE_CHECKING,
3
+ Any,
4
+ List,
5
+ TypedDict,
6
+ Optional,
7
+ )
8
+
9
+ from sentry_sdk.ai.utils import set_data_normalized
10
+ from sentry_sdk.consts import SPANDATA
11
+ from sentry_sdk.scope import should_send_default_pii
12
+ from sentry_sdk.utils import (
13
+ safe_serialize,
14
+ )
15
+ from .utils import (
16
+ extract_tool_calls,
17
+ extract_finish_reasons,
18
+ extract_contents_text,
19
+ extract_usage_data,
20
+ UsageData,
21
+ )
22
+
23
+ if TYPE_CHECKING:
24
+ from sentry_sdk.tracing import Span
25
+ from google.genai.types import GenerateContentResponse
26
+
27
+
28
+ class AccumulatedResponse(TypedDict):
29
+ id: Optional[str]
30
+ model: Optional[str]
31
+ text: str
32
+ finish_reasons: List[str]
33
+ tool_calls: List[dict[str, Any]]
34
+ usage_metadata: UsageData
35
+
36
+
37
+ def accumulate_streaming_response(chunks):
38
+ # type: (List[GenerateContentResponse]) -> AccumulatedResponse
39
+ """Accumulate streaming chunks into a single response-like object."""
40
+ accumulated_text = []
41
+ finish_reasons = []
42
+ tool_calls = []
43
+ total_input_tokens = 0
44
+ total_output_tokens = 0
45
+ total_tokens = 0
46
+ total_cached_tokens = 0
47
+ total_reasoning_tokens = 0
48
+ response_id = None
49
+ model = None
50
+
51
+ for chunk in chunks:
52
+ # Extract text and tool calls
53
+ if getattr(chunk, "candidates", None):
54
+ for candidate in getattr(chunk, "candidates", []):
55
+ if hasattr(candidate, "content") and getattr(
56
+ candidate.content, "parts", []
57
+ ):
58
+ extracted_text = extract_contents_text(candidate.content)
59
+ if extracted_text:
60
+ accumulated_text.append(extracted_text)
61
+
62
+ extracted_finish_reasons = extract_finish_reasons(chunk)
63
+ if extracted_finish_reasons:
64
+ finish_reasons.extend(extracted_finish_reasons)
65
+
66
+ extracted_tool_calls = extract_tool_calls(chunk)
67
+ if extracted_tool_calls:
68
+ tool_calls.extend(extracted_tool_calls)
69
+
70
+ # Accumulate token usage
71
+ extracted_usage_data = extract_usage_data(chunk)
72
+ total_input_tokens += extracted_usage_data["input_tokens"]
73
+ total_output_tokens += extracted_usage_data["output_tokens"]
74
+ total_cached_tokens += extracted_usage_data["input_tokens_cached"]
75
+ total_reasoning_tokens += extracted_usage_data["output_tokens_reasoning"]
76
+ total_tokens += extracted_usage_data["total_tokens"]
77
+
78
+ accumulated_response = AccumulatedResponse(
79
+ text="".join(accumulated_text),
80
+ finish_reasons=finish_reasons,
81
+ tool_calls=tool_calls,
82
+ usage_metadata=UsageData(
83
+ input_tokens=total_input_tokens,
84
+ output_tokens=total_output_tokens,
85
+ input_tokens_cached=total_cached_tokens,
86
+ output_tokens_reasoning=total_reasoning_tokens,
87
+ total_tokens=total_tokens,
88
+ ),
89
+ id=response_id,
90
+ model=model,
91
+ )
92
+
93
+ return accumulated_response
94
+
95
+
96
+ def set_span_data_for_streaming_response(span, integration, accumulated_response):
97
+ # type: (Span, Any, AccumulatedResponse) -> None
98
+ """Set span data for accumulated streaming response."""
99
+ if (
100
+ should_send_default_pii()
101
+ and integration.include_prompts
102
+ and accumulated_response.get("text")
103
+ ):
104
+ span.set_data(
105
+ SPANDATA.GEN_AI_RESPONSE_TEXT,
106
+ safe_serialize([accumulated_response["text"]]),
107
+ )
108
+
109
+ if accumulated_response.get("finish_reasons"):
110
+ set_data_normalized(
111
+ span,
112
+ SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
113
+ accumulated_response["finish_reasons"],
114
+ )
115
+
116
+ if accumulated_response.get("tool_calls"):
117
+ span.set_data(
118
+ SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
119
+ safe_serialize(accumulated_response["tool_calls"]),
120
+ )
121
+
122
+ if accumulated_response.get("id"):
123
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, accumulated_response["id"])
124
+ if accumulated_response.get("model"):
125
+ span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, accumulated_response["model"])
126
+
127
+ if accumulated_response["usage_metadata"]["input_tokens"]:
128
+ span.set_data(
129
+ SPANDATA.GEN_AI_USAGE_INPUT_TOKENS,
130
+ accumulated_response["usage_metadata"]["input_tokens"],
131
+ )
132
+
133
+ if accumulated_response["usage_metadata"]["input_tokens_cached"]:
134
+ span.set_data(
135
+ SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
136
+ accumulated_response["usage_metadata"]["input_tokens_cached"],
137
+ )
138
+
139
+ if accumulated_response["usage_metadata"]["output_tokens"]:
140
+ span.set_data(
141
+ SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS,
142
+ accumulated_response["usage_metadata"]["output_tokens"],
143
+ )
144
+
145
+ if accumulated_response["usage_metadata"]["output_tokens_reasoning"]:
146
+ span.set_data(
147
+ SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
148
+ accumulated_response["usage_metadata"]["output_tokens_reasoning"],
149
+ )
150
+
151
+ if accumulated_response["usage_metadata"]["total_tokens"]:
152
+ span.set_data(
153
+ SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS,
154
+ accumulated_response["usage_metadata"]["total_tokens"],
155
+ )