sentry-sdk 2.40.0__py2.py3-none-any.whl → 2.42.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/_metrics.py +81 -0
- sentry_sdk/_metrics_batcher.py +156 -0
- sentry_sdk/_types.py +27 -22
- sentry_sdk/ai/__init__.py +7 -0
- sentry_sdk/ai/utils.py +48 -0
- sentry_sdk/client.py +81 -30
- sentry_sdk/consts.py +13 -8
- sentry_sdk/envelope.py +3 -3
- sentry_sdk/integrations/__init__.py +1 -0
- sentry_sdk/integrations/aiohttp.py +4 -1
- sentry_sdk/integrations/anthropic.py +10 -2
- sentry_sdk/integrations/google_genai/__init__.py +298 -0
- sentry_sdk/integrations/google_genai/consts.py +16 -0
- sentry_sdk/integrations/google_genai/streaming.py +155 -0
- sentry_sdk/integrations/google_genai/utils.py +566 -0
- sentry_sdk/integrations/httpx.py +16 -5
- sentry_sdk/integrations/langchain.py +29 -4
- sentry_sdk/integrations/langgraph.py +5 -3
- sentry_sdk/integrations/logging.py +1 -1
- sentry_sdk/integrations/loguru.py +1 -1
- sentry_sdk/integrations/openai.py +3 -2
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +10 -2
- sentry_sdk/integrations/openai_agents/utils.py +35 -18
- sentry_sdk/integrations/ray.py +20 -4
- sentry_sdk/integrations/stdlib.py +8 -1
- sentry_sdk/integrations/threading.py +52 -8
- sentry_sdk/logger.py +1 -1
- sentry_sdk/tracing.py +0 -26
- sentry_sdk/tracing_utils.py +64 -24
- sentry_sdk/transport.py +1 -17
- sentry_sdk/types.py +3 -0
- sentry_sdk/utils.py +17 -1
- {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/METADATA +3 -1
- {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/RECORD +38 -33
- sentry_sdk/metrics.py +0 -971
- {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/WHEEL +0 -0
- {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-2.40.0.dist-info → sentry_sdk-2.42.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import inspect
|
|
3
|
+
from functools import wraps
|
|
4
|
+
from .consts import ORIGIN, TOOL_ATTRIBUTES_MAP, GEN_AI_SYSTEM
|
|
5
|
+
from typing import (
|
|
6
|
+
cast,
|
|
7
|
+
TYPE_CHECKING,
|
|
8
|
+
Iterable,
|
|
9
|
+
Any,
|
|
10
|
+
Callable,
|
|
11
|
+
List,
|
|
12
|
+
Optional,
|
|
13
|
+
Union,
|
|
14
|
+
TypedDict,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
import sentry_sdk
|
|
18
|
+
from sentry_sdk.ai.utils import set_data_normalized
|
|
19
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
20
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
21
|
+
from sentry_sdk.utils import (
|
|
22
|
+
capture_internal_exceptions,
|
|
23
|
+
event_from_exception,
|
|
24
|
+
safe_serialize,
|
|
25
|
+
)
|
|
26
|
+
from google.genai.types import GenerateContentConfig
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from sentry_sdk.tracing import Span
|
|
30
|
+
from google.genai.types import (
|
|
31
|
+
GenerateContentResponse,
|
|
32
|
+
ContentListUnion,
|
|
33
|
+
Tool,
|
|
34
|
+
Model,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class UsageData(TypedDict):
|
|
39
|
+
"""Structure for token usage data."""
|
|
40
|
+
|
|
41
|
+
input_tokens: int
|
|
42
|
+
input_tokens_cached: int
|
|
43
|
+
output_tokens: int
|
|
44
|
+
output_tokens_reasoning: int
|
|
45
|
+
total_tokens: int
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def extract_usage_data(response):
|
|
49
|
+
# type: (Union[GenerateContentResponse, dict[str, Any]]) -> UsageData
|
|
50
|
+
"""Extract usage data from response into a structured format.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
response: The GenerateContentResponse object or dictionary containing usage metadata
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
UsageData: Dictionary with input_tokens, input_tokens_cached,
|
|
57
|
+
output_tokens, and output_tokens_reasoning fields
|
|
58
|
+
"""
|
|
59
|
+
usage_data = UsageData(
|
|
60
|
+
input_tokens=0,
|
|
61
|
+
input_tokens_cached=0,
|
|
62
|
+
output_tokens=0,
|
|
63
|
+
output_tokens_reasoning=0,
|
|
64
|
+
total_tokens=0,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Handle dictionary response (from streaming)
|
|
68
|
+
if isinstance(response, dict):
|
|
69
|
+
usage = response.get("usage_metadata", {})
|
|
70
|
+
if not usage:
|
|
71
|
+
return usage_data
|
|
72
|
+
|
|
73
|
+
prompt_tokens = usage.get("prompt_token_count", 0) or 0
|
|
74
|
+
tool_use_prompt_tokens = usage.get("tool_use_prompt_token_count", 0) or 0
|
|
75
|
+
usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens
|
|
76
|
+
|
|
77
|
+
cached_tokens = usage.get("cached_content_token_count", 0) or 0
|
|
78
|
+
usage_data["input_tokens_cached"] = cached_tokens
|
|
79
|
+
|
|
80
|
+
reasoning_tokens = usage.get("thoughts_token_count", 0) or 0
|
|
81
|
+
usage_data["output_tokens_reasoning"] = reasoning_tokens
|
|
82
|
+
|
|
83
|
+
candidates_tokens = usage.get("candidates_token_count", 0) or 0
|
|
84
|
+
# python-genai reports output and reasoning tokens separately
|
|
85
|
+
# reasoning should be sub-category of output tokens
|
|
86
|
+
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
|
|
87
|
+
|
|
88
|
+
total_tokens = usage.get("total_token_count", 0) or 0
|
|
89
|
+
usage_data["total_tokens"] = total_tokens
|
|
90
|
+
|
|
91
|
+
return usage_data
|
|
92
|
+
|
|
93
|
+
if not hasattr(response, "usage_metadata"):
|
|
94
|
+
return usage_data
|
|
95
|
+
|
|
96
|
+
usage = response.usage_metadata
|
|
97
|
+
|
|
98
|
+
# Input tokens include both prompt and tool use prompt tokens
|
|
99
|
+
prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0
|
|
100
|
+
tool_use_prompt_tokens = getattr(usage, "tool_use_prompt_token_count", 0) or 0
|
|
101
|
+
usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens
|
|
102
|
+
|
|
103
|
+
# Cached input tokens
|
|
104
|
+
cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0
|
|
105
|
+
usage_data["input_tokens_cached"] = cached_tokens
|
|
106
|
+
|
|
107
|
+
# Reasoning tokens
|
|
108
|
+
reasoning_tokens = getattr(usage, "thoughts_token_count", 0) or 0
|
|
109
|
+
usage_data["output_tokens_reasoning"] = reasoning_tokens
|
|
110
|
+
|
|
111
|
+
# output_tokens = candidates_tokens + reasoning_tokens
|
|
112
|
+
# google-genai reports output and reasoning tokens separately
|
|
113
|
+
candidates_tokens = getattr(usage, "candidates_token_count", 0) or 0
|
|
114
|
+
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
|
|
115
|
+
|
|
116
|
+
total_tokens = getattr(usage, "total_token_count", 0) or 0
|
|
117
|
+
usage_data["total_tokens"] = total_tokens
|
|
118
|
+
|
|
119
|
+
return usage_data
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _capture_exception(exc):
|
|
123
|
+
# type: (Any) -> None
|
|
124
|
+
"""Capture exception with Google GenAI mechanism."""
|
|
125
|
+
event, hint = event_from_exception(
|
|
126
|
+
exc,
|
|
127
|
+
client_options=sentry_sdk.get_client().options,
|
|
128
|
+
mechanism={"type": "google_genai", "handled": False},
|
|
129
|
+
)
|
|
130
|
+
sentry_sdk.capture_event(event, hint=hint)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_model_name(model):
|
|
134
|
+
# type: (Union[str, Model]) -> str
|
|
135
|
+
"""Extract model name from model parameter."""
|
|
136
|
+
if isinstance(model, str):
|
|
137
|
+
return model
|
|
138
|
+
# Handle case where model might be an object with a name attribute
|
|
139
|
+
if hasattr(model, "name"):
|
|
140
|
+
return str(model.name)
|
|
141
|
+
return str(model)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def extract_contents_text(contents):
|
|
145
|
+
# type: (ContentListUnion) -> Optional[str]
|
|
146
|
+
"""Extract text from contents parameter which can have various formats."""
|
|
147
|
+
if contents is None:
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
# Simple string case
|
|
151
|
+
if isinstance(contents, str):
|
|
152
|
+
return contents
|
|
153
|
+
|
|
154
|
+
# List of contents or parts
|
|
155
|
+
if isinstance(contents, list):
|
|
156
|
+
texts = []
|
|
157
|
+
for item in contents:
|
|
158
|
+
# Recursively extract text from each item
|
|
159
|
+
extracted = extract_contents_text(item)
|
|
160
|
+
if extracted:
|
|
161
|
+
texts.append(extracted)
|
|
162
|
+
return " ".join(texts) if texts else None
|
|
163
|
+
|
|
164
|
+
# Dictionary case
|
|
165
|
+
if isinstance(contents, dict):
|
|
166
|
+
if "text" in contents:
|
|
167
|
+
return contents["text"]
|
|
168
|
+
# Try to extract from parts if present in dict
|
|
169
|
+
if "parts" in contents:
|
|
170
|
+
return extract_contents_text(contents["parts"])
|
|
171
|
+
|
|
172
|
+
# Content object with parts - recurse into parts
|
|
173
|
+
if getattr(contents, "parts", None):
|
|
174
|
+
return extract_contents_text(contents.parts)
|
|
175
|
+
|
|
176
|
+
# Direct text attribute
|
|
177
|
+
if hasattr(contents, "text"):
|
|
178
|
+
return contents.text
|
|
179
|
+
|
|
180
|
+
return None
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _format_tools_for_span(tools):
|
|
184
|
+
# type: (Iterable[Tool | Callable[..., Any]]) -> Optional[List[dict[str, Any]]]
|
|
185
|
+
"""Format tools parameter for span data."""
|
|
186
|
+
formatted_tools = []
|
|
187
|
+
for tool in tools:
|
|
188
|
+
if callable(tool):
|
|
189
|
+
# Handle callable functions passed directly
|
|
190
|
+
formatted_tools.append(
|
|
191
|
+
{
|
|
192
|
+
"name": getattr(tool, "__name__", "unknown"),
|
|
193
|
+
"description": getattr(tool, "__doc__", None),
|
|
194
|
+
}
|
|
195
|
+
)
|
|
196
|
+
elif (
|
|
197
|
+
hasattr(tool, "function_declarations")
|
|
198
|
+
and tool.function_declarations is not None
|
|
199
|
+
):
|
|
200
|
+
# Tool object with function declarations
|
|
201
|
+
for func_decl in tool.function_declarations:
|
|
202
|
+
formatted_tools.append(
|
|
203
|
+
{
|
|
204
|
+
"name": getattr(func_decl, "name", None),
|
|
205
|
+
"description": getattr(func_decl, "description", None),
|
|
206
|
+
}
|
|
207
|
+
)
|
|
208
|
+
else:
|
|
209
|
+
# Check for predefined tool attributes - each of these tools
|
|
210
|
+
# is an attribute of the tool object, by default set to None
|
|
211
|
+
for attr_name, description in TOOL_ATTRIBUTES_MAP.items():
|
|
212
|
+
if getattr(tool, attr_name, None):
|
|
213
|
+
formatted_tools.append(
|
|
214
|
+
{
|
|
215
|
+
"name": attr_name,
|
|
216
|
+
"description": description,
|
|
217
|
+
}
|
|
218
|
+
)
|
|
219
|
+
break
|
|
220
|
+
|
|
221
|
+
return formatted_tools if formatted_tools else None
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def extract_tool_calls(response):
|
|
225
|
+
# type: (GenerateContentResponse) -> Optional[List[dict[str, Any]]]
|
|
226
|
+
"""Extract tool/function calls from response candidates and automatic function calling history."""
|
|
227
|
+
|
|
228
|
+
tool_calls = []
|
|
229
|
+
|
|
230
|
+
# Extract from candidates, sometimes tool calls are nested under the content.parts object
|
|
231
|
+
if getattr(response, "candidates", []):
|
|
232
|
+
for candidate in response.candidates:
|
|
233
|
+
if not hasattr(candidate, "content") or not getattr(
|
|
234
|
+
candidate.content, "parts", []
|
|
235
|
+
):
|
|
236
|
+
continue
|
|
237
|
+
|
|
238
|
+
for part in candidate.content.parts:
|
|
239
|
+
if getattr(part, "function_call", None):
|
|
240
|
+
function_call = part.function_call
|
|
241
|
+
tool_call = {
|
|
242
|
+
"name": getattr(function_call, "name", None),
|
|
243
|
+
"type": "function_call",
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
# Extract arguments if available
|
|
247
|
+
if getattr(function_call, "args", None):
|
|
248
|
+
tool_call["arguments"] = safe_serialize(function_call.args)
|
|
249
|
+
|
|
250
|
+
tool_calls.append(tool_call)
|
|
251
|
+
|
|
252
|
+
# Extract from automatic_function_calling_history
|
|
253
|
+
# This is the history of tool calls made by the model
|
|
254
|
+
if getattr(response, "automatic_function_calling_history", None):
|
|
255
|
+
for content in response.automatic_function_calling_history:
|
|
256
|
+
if not getattr(content, "parts", None):
|
|
257
|
+
continue
|
|
258
|
+
|
|
259
|
+
for part in getattr(content, "parts", []):
|
|
260
|
+
if getattr(part, "function_call", None):
|
|
261
|
+
function_call = part.function_call
|
|
262
|
+
tool_call = {
|
|
263
|
+
"name": getattr(function_call, "name", None),
|
|
264
|
+
"type": "function_call",
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
# Extract arguments if available
|
|
268
|
+
if hasattr(function_call, "args"):
|
|
269
|
+
tool_call["arguments"] = safe_serialize(function_call.args)
|
|
270
|
+
|
|
271
|
+
tool_calls.append(tool_call)
|
|
272
|
+
|
|
273
|
+
return tool_calls if tool_calls else None
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _capture_tool_input(args, kwargs, tool):
|
|
277
|
+
# type: (tuple[Any, ...], dict[str, Any], Tool) -> dict[str, Any]
|
|
278
|
+
"""Capture tool input from args and kwargs."""
|
|
279
|
+
tool_input = kwargs.copy() if kwargs else {}
|
|
280
|
+
|
|
281
|
+
# If we have positional args, try to map them to the function signature
|
|
282
|
+
if args:
|
|
283
|
+
try:
|
|
284
|
+
sig = inspect.signature(tool)
|
|
285
|
+
param_names = list(sig.parameters.keys())
|
|
286
|
+
for i, arg in enumerate(args):
|
|
287
|
+
if i < len(param_names):
|
|
288
|
+
tool_input[param_names[i]] = arg
|
|
289
|
+
except Exception:
|
|
290
|
+
# Fallback if we can't get the signature
|
|
291
|
+
tool_input["args"] = args
|
|
292
|
+
|
|
293
|
+
return tool_input
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def _create_tool_span(tool_name, tool_doc):
|
|
297
|
+
# type: (str, Optional[str]) -> Span
|
|
298
|
+
"""Create a span for tool execution."""
|
|
299
|
+
span = sentry_sdk.start_span(
|
|
300
|
+
op=OP.GEN_AI_EXECUTE_TOOL,
|
|
301
|
+
name=f"execute_tool {tool_name}",
|
|
302
|
+
origin=ORIGIN,
|
|
303
|
+
)
|
|
304
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
|
|
305
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function")
|
|
306
|
+
if tool_doc:
|
|
307
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_doc)
|
|
308
|
+
return span
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def wrapped_tool(tool):
|
|
312
|
+
# type: (Tool | Callable[..., Any]) -> Tool | Callable[..., Any]
|
|
313
|
+
"""Wrap a tool to emit execute_tool spans when called."""
|
|
314
|
+
if not callable(tool):
|
|
315
|
+
# Not a callable function, return as-is (predefined tools)
|
|
316
|
+
return tool
|
|
317
|
+
|
|
318
|
+
tool_name = getattr(tool, "__name__", "unknown")
|
|
319
|
+
tool_doc = tool.__doc__
|
|
320
|
+
|
|
321
|
+
if inspect.iscoroutinefunction(tool):
|
|
322
|
+
# Async function
|
|
323
|
+
@wraps(tool)
|
|
324
|
+
async def async_wrapped(*args, **kwargs):
|
|
325
|
+
# type: (Any, Any) -> Any
|
|
326
|
+
with _create_tool_span(tool_name, tool_doc) as span:
|
|
327
|
+
# Capture tool input
|
|
328
|
+
tool_input = _capture_tool_input(args, kwargs, tool)
|
|
329
|
+
with capture_internal_exceptions():
|
|
330
|
+
span.set_data(
|
|
331
|
+
SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input)
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
try:
|
|
335
|
+
result = await tool(*args, **kwargs)
|
|
336
|
+
|
|
337
|
+
# Capture tool output
|
|
338
|
+
with capture_internal_exceptions():
|
|
339
|
+
span.set_data(
|
|
340
|
+
SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result)
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
return result
|
|
344
|
+
except Exception as exc:
|
|
345
|
+
_capture_exception(exc)
|
|
346
|
+
raise
|
|
347
|
+
|
|
348
|
+
return async_wrapped
|
|
349
|
+
else:
|
|
350
|
+
# Sync function
|
|
351
|
+
@wraps(tool)
|
|
352
|
+
def sync_wrapped(*args, **kwargs):
|
|
353
|
+
# type: (Any, Any) -> Any
|
|
354
|
+
with _create_tool_span(tool_name, tool_doc) as span:
|
|
355
|
+
# Capture tool input
|
|
356
|
+
tool_input = _capture_tool_input(args, kwargs, tool)
|
|
357
|
+
with capture_internal_exceptions():
|
|
358
|
+
span.set_data(
|
|
359
|
+
SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input)
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
result = tool(*args, **kwargs)
|
|
364
|
+
|
|
365
|
+
# Capture tool output
|
|
366
|
+
with capture_internal_exceptions():
|
|
367
|
+
span.set_data(
|
|
368
|
+
SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result)
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
return result
|
|
372
|
+
except Exception as exc:
|
|
373
|
+
_capture_exception(exc)
|
|
374
|
+
raise
|
|
375
|
+
|
|
376
|
+
return sync_wrapped
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def wrapped_config_with_tools(config):
|
|
380
|
+
# type: (GenerateContentConfig) -> GenerateContentConfig
|
|
381
|
+
"""Wrap tools in config to emit execute_tool spans. Tools are sometimes passed directly as
|
|
382
|
+
callable functions as a part of the config object."""
|
|
383
|
+
|
|
384
|
+
if not config or not getattr(config, "tools", None):
|
|
385
|
+
return config
|
|
386
|
+
|
|
387
|
+
result = copy.copy(config)
|
|
388
|
+
result.tools = [wrapped_tool(tool) for tool in config.tools]
|
|
389
|
+
|
|
390
|
+
return result
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
def _extract_response_text(response):
|
|
394
|
+
# type: (GenerateContentResponse) -> Optional[List[str]]
|
|
395
|
+
"""Extract text from response candidates."""
|
|
396
|
+
|
|
397
|
+
if not response or not getattr(response, "candidates", []):
|
|
398
|
+
return None
|
|
399
|
+
|
|
400
|
+
texts = []
|
|
401
|
+
for candidate in response.candidates:
|
|
402
|
+
if not hasattr(candidate, "content") or not hasattr(candidate.content, "parts"):
|
|
403
|
+
continue
|
|
404
|
+
|
|
405
|
+
for part in candidate.content.parts:
|
|
406
|
+
if getattr(part, "text", None):
|
|
407
|
+
texts.append(part.text)
|
|
408
|
+
|
|
409
|
+
return texts if texts else None
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def extract_finish_reasons(response):
|
|
413
|
+
# type: (GenerateContentResponse) -> Optional[List[str]]
|
|
414
|
+
"""Extract finish reasons from response candidates."""
|
|
415
|
+
if not response or not getattr(response, "candidates", []):
|
|
416
|
+
return None
|
|
417
|
+
|
|
418
|
+
finish_reasons = []
|
|
419
|
+
for candidate in response.candidates:
|
|
420
|
+
if getattr(candidate, "finish_reason", None):
|
|
421
|
+
# Convert enum value to string if necessary
|
|
422
|
+
reason = str(candidate.finish_reason)
|
|
423
|
+
# Remove enum prefix if present (e.g., "FinishReason.STOP" -> "STOP")
|
|
424
|
+
if "." in reason:
|
|
425
|
+
reason = reason.split(".")[-1]
|
|
426
|
+
finish_reasons.append(reason)
|
|
427
|
+
|
|
428
|
+
return finish_reasons if finish_reasons else None
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def set_span_data_for_request(span, integration, model, contents, kwargs):
|
|
432
|
+
# type: (Span, Any, str, ContentListUnion, dict[str, Any]) -> None
|
|
433
|
+
"""Set span data for the request."""
|
|
434
|
+
span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
|
|
435
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
|
|
436
|
+
|
|
437
|
+
if kwargs.get("stream", False):
|
|
438
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
439
|
+
|
|
440
|
+
config = kwargs.get("config")
|
|
441
|
+
|
|
442
|
+
if config is None:
|
|
443
|
+
return
|
|
444
|
+
|
|
445
|
+
config = cast(GenerateContentConfig, config)
|
|
446
|
+
|
|
447
|
+
# Set input messages/prompts if PII is allowed
|
|
448
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
449
|
+
messages = []
|
|
450
|
+
|
|
451
|
+
# Add system instruction if present
|
|
452
|
+
if hasattr(config, "system_instruction"):
|
|
453
|
+
system_instruction = config.system_instruction
|
|
454
|
+
if system_instruction:
|
|
455
|
+
system_text = extract_contents_text(system_instruction)
|
|
456
|
+
if system_text:
|
|
457
|
+
messages.append({"role": "system", "content": system_text})
|
|
458
|
+
|
|
459
|
+
# Add user message
|
|
460
|
+
contents_text = extract_contents_text(contents)
|
|
461
|
+
if contents_text:
|
|
462
|
+
messages.append({"role": "user", "content": contents_text})
|
|
463
|
+
|
|
464
|
+
if messages:
|
|
465
|
+
set_data_normalized(
|
|
466
|
+
span,
|
|
467
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
468
|
+
messages,
|
|
469
|
+
unpack=False,
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
# Extract parameters directly from config (not nested under generation_config)
|
|
473
|
+
for param, span_key in [
|
|
474
|
+
("temperature", SPANDATA.GEN_AI_REQUEST_TEMPERATURE),
|
|
475
|
+
("top_p", SPANDATA.GEN_AI_REQUEST_TOP_P),
|
|
476
|
+
("top_k", SPANDATA.GEN_AI_REQUEST_TOP_K),
|
|
477
|
+
("max_output_tokens", SPANDATA.GEN_AI_REQUEST_MAX_TOKENS),
|
|
478
|
+
("presence_penalty", SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY),
|
|
479
|
+
("frequency_penalty", SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY),
|
|
480
|
+
("seed", SPANDATA.GEN_AI_REQUEST_SEED),
|
|
481
|
+
]:
|
|
482
|
+
if hasattr(config, param):
|
|
483
|
+
value = getattr(config, param)
|
|
484
|
+
if value is not None:
|
|
485
|
+
span.set_data(span_key, value)
|
|
486
|
+
|
|
487
|
+
# Set tools if available
|
|
488
|
+
if hasattr(config, "tools"):
|
|
489
|
+
tools = config.tools
|
|
490
|
+
if tools:
|
|
491
|
+
formatted_tools = _format_tools_for_span(tools)
|
|
492
|
+
if formatted_tools:
|
|
493
|
+
set_data_normalized(
|
|
494
|
+
span,
|
|
495
|
+
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
|
|
496
|
+
formatted_tools,
|
|
497
|
+
unpack=False,
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def set_span_data_for_response(span, integration, response):
|
|
502
|
+
# type: (Span, Any, GenerateContentResponse) -> None
|
|
503
|
+
"""Set span data for the response."""
|
|
504
|
+
if not response:
|
|
505
|
+
return
|
|
506
|
+
|
|
507
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
508
|
+
response_texts = _extract_response_text(response)
|
|
509
|
+
if response_texts:
|
|
510
|
+
# Format as JSON string array as per documentation
|
|
511
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(response_texts))
|
|
512
|
+
|
|
513
|
+
tool_calls = extract_tool_calls(response)
|
|
514
|
+
if tool_calls:
|
|
515
|
+
# Tool calls should be JSON serialized
|
|
516
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls))
|
|
517
|
+
|
|
518
|
+
finish_reasons = extract_finish_reasons(response)
|
|
519
|
+
if finish_reasons:
|
|
520
|
+
set_data_normalized(
|
|
521
|
+
span, SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
if getattr(response, "response_id", None):
|
|
525
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response.response_id)
|
|
526
|
+
|
|
527
|
+
if getattr(response, "model_version", None):
|
|
528
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_version)
|
|
529
|
+
|
|
530
|
+
usage_data = extract_usage_data(response)
|
|
531
|
+
|
|
532
|
+
if usage_data["input_tokens"]:
|
|
533
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage_data["input_tokens"])
|
|
534
|
+
|
|
535
|
+
if usage_data["input_tokens_cached"]:
|
|
536
|
+
span.set_data(
|
|
537
|
+
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
|
|
538
|
+
usage_data["input_tokens_cached"],
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
if usage_data["output_tokens"]:
|
|
542
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage_data["output_tokens"])
|
|
543
|
+
|
|
544
|
+
if usage_data["output_tokens_reasoning"]:
|
|
545
|
+
span.set_data(
|
|
546
|
+
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
|
|
547
|
+
usage_data["output_tokens_reasoning"],
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
if usage_data["total_tokens"]:
|
|
551
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage_data["total_tokens"])
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
def prepare_generate_content_args(args, kwargs):
|
|
555
|
+
# type: (tuple[Any, ...], dict[str, Any]) -> tuple[Any, Any, str]
|
|
556
|
+
"""Extract and prepare common arguments for generate_content methods."""
|
|
557
|
+
model = args[0] if args else kwargs.get("model", "unknown")
|
|
558
|
+
contents = args[1] if len(args) > 1 else kwargs.get("contents")
|
|
559
|
+
model_name = get_model_name(model)
|
|
560
|
+
|
|
561
|
+
config = kwargs.get("config")
|
|
562
|
+
wrapped_config = wrapped_config_with_tools(config)
|
|
563
|
+
if wrapped_config is not config:
|
|
564
|
+
kwargs["config"] = wrapped_config
|
|
565
|
+
|
|
566
|
+
return model, contents, model_name
|
sentry_sdk/integrations/httpx.py
CHANGED
|
@@ -1,8 +1,13 @@
|
|
|
1
1
|
import sentry_sdk
|
|
2
|
+
from sentry_sdk import start_span
|
|
2
3
|
from sentry_sdk.consts import OP, SPANDATA
|
|
3
4
|
from sentry_sdk.integrations import Integration, DidNotEnable
|
|
4
5
|
from sentry_sdk.tracing import BAGGAGE_HEADER_NAME
|
|
5
|
-
from sentry_sdk.tracing_utils import
|
|
6
|
+
from sentry_sdk.tracing_utils import (
|
|
7
|
+
Baggage,
|
|
8
|
+
should_propagate_trace,
|
|
9
|
+
add_http_request_source,
|
|
10
|
+
)
|
|
6
11
|
from sentry_sdk.utils import (
|
|
7
12
|
SENSITIVE_DATA_SUBSTITUTE,
|
|
8
13
|
capture_internal_exceptions,
|
|
@@ -52,7 +57,7 @@ def _install_httpx_client():
|
|
|
52
57
|
with capture_internal_exceptions():
|
|
53
58
|
parsed_url = parse_url(str(request.url), sanitize=False)
|
|
54
59
|
|
|
55
|
-
with
|
|
60
|
+
with start_span(
|
|
56
61
|
op=OP.HTTP_CLIENT,
|
|
57
62
|
name="%s %s"
|
|
58
63
|
% (
|
|
@@ -88,7 +93,10 @@ def _install_httpx_client():
|
|
|
88
93
|
span.set_http_status(rv.status_code)
|
|
89
94
|
span.set_data("reason", rv.reason_phrase)
|
|
90
95
|
|
|
91
|
-
|
|
96
|
+
with capture_internal_exceptions():
|
|
97
|
+
add_http_request_source(span)
|
|
98
|
+
|
|
99
|
+
return rv
|
|
92
100
|
|
|
93
101
|
Client.send = send
|
|
94
102
|
|
|
@@ -106,7 +114,7 @@ def _install_httpx_async_client():
|
|
|
106
114
|
with capture_internal_exceptions():
|
|
107
115
|
parsed_url = parse_url(str(request.url), sanitize=False)
|
|
108
116
|
|
|
109
|
-
with
|
|
117
|
+
with start_span(
|
|
110
118
|
op=OP.HTTP_CLIENT,
|
|
111
119
|
name="%s %s"
|
|
112
120
|
% (
|
|
@@ -144,7 +152,10 @@ def _install_httpx_async_client():
|
|
|
144
152
|
span.set_http_status(rv.status_code)
|
|
145
153
|
span.set_data("reason", rv.reason_phrase)
|
|
146
154
|
|
|
147
|
-
|
|
155
|
+
with capture_internal_exceptions():
|
|
156
|
+
add_http_request_source(span)
|
|
157
|
+
|
|
158
|
+
return rv
|
|
148
159
|
|
|
149
160
|
AsyncClient.send = send
|
|
150
161
|
|
|
@@ -4,7 +4,12 @@ from functools import wraps
|
|
|
4
4
|
|
|
5
5
|
import sentry_sdk
|
|
6
6
|
from sentry_sdk.ai.monitoring import set_ai_pipeline_name
|
|
7
|
-
from sentry_sdk.ai.utils import
|
|
7
|
+
from sentry_sdk.ai.utils import (
|
|
8
|
+
GEN_AI_ALLOWED_MESSAGE_ROLES,
|
|
9
|
+
normalize_message_roles,
|
|
10
|
+
set_data_normalized,
|
|
11
|
+
get_start_span_function,
|
|
12
|
+
)
|
|
8
13
|
from sentry_sdk.consts import OP, SPANDATA
|
|
9
14
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
10
15
|
from sentry_sdk.scope import should_send_default_pii
|
|
@@ -209,8 +214,18 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
209
214
|
_set_tools_on_span(span, all_params.get("tools"))
|
|
210
215
|
|
|
211
216
|
if should_send_default_pii() and self.include_prompts:
|
|
217
|
+
normalized_messages = [
|
|
218
|
+
{
|
|
219
|
+
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.USER,
|
|
220
|
+
"content": {"type": "text", "text": prompt},
|
|
221
|
+
}
|
|
222
|
+
for prompt in prompts
|
|
223
|
+
]
|
|
212
224
|
set_data_normalized(
|
|
213
|
-
span,
|
|
225
|
+
span,
|
|
226
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
227
|
+
normalized_messages,
|
|
228
|
+
unpack=False,
|
|
214
229
|
)
|
|
215
230
|
|
|
216
231
|
def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
|
|
@@ -262,6 +277,8 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
262
277
|
normalized_messages.append(
|
|
263
278
|
self._normalize_langchain_message(message)
|
|
264
279
|
)
|
|
280
|
+
normalized_messages = normalize_message_roles(normalized_messages)
|
|
281
|
+
|
|
265
282
|
set_data_normalized(
|
|
266
283
|
span,
|
|
267
284
|
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
@@ -740,8 +757,12 @@ def _wrap_agent_executor_invoke(f):
|
|
|
740
757
|
and should_send_default_pii()
|
|
741
758
|
and integration.include_prompts
|
|
742
759
|
):
|
|
760
|
+
normalized_messages = normalize_message_roles([input])
|
|
743
761
|
set_data_normalized(
|
|
744
|
-
span,
|
|
762
|
+
span,
|
|
763
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
764
|
+
normalized_messages,
|
|
765
|
+
unpack=False,
|
|
745
766
|
)
|
|
746
767
|
|
|
747
768
|
output = result.get("output")
|
|
@@ -791,8 +812,12 @@ def _wrap_agent_executor_stream(f):
|
|
|
791
812
|
and should_send_default_pii()
|
|
792
813
|
and integration.include_prompts
|
|
793
814
|
):
|
|
815
|
+
normalized_messages = normalize_message_roles([input])
|
|
794
816
|
set_data_normalized(
|
|
795
|
-
span,
|
|
817
|
+
span,
|
|
818
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
819
|
+
normalized_messages,
|
|
820
|
+
unpack=False,
|
|
796
821
|
)
|
|
797
822
|
|
|
798
823
|
# Run the agent
|