sentry-sdk 0.18.0__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sentry_sdk/__init__.py +48 -6
- sentry_sdk/_compat.py +64 -56
- sentry_sdk/_init_implementation.py +84 -0
- sentry_sdk/_log_batcher.py +172 -0
- sentry_sdk/_lru_cache.py +47 -0
- sentry_sdk/_metrics_batcher.py +167 -0
- sentry_sdk/_queue.py +81 -19
- sentry_sdk/_types.py +311 -11
- sentry_sdk/_werkzeug.py +98 -0
- sentry_sdk/ai/__init__.py +7 -0
- sentry_sdk/ai/monitoring.py +137 -0
- sentry_sdk/ai/utils.py +144 -0
- sentry_sdk/api.py +409 -67
- sentry_sdk/attachments.py +75 -0
- sentry_sdk/client.py +849 -103
- sentry_sdk/consts.py +1389 -34
- sentry_sdk/crons/__init__.py +10 -0
- sentry_sdk/crons/api.py +62 -0
- sentry_sdk/crons/consts.py +4 -0
- sentry_sdk/crons/decorator.py +135 -0
- sentry_sdk/debug.py +12 -15
- sentry_sdk/envelope.py +112 -61
- sentry_sdk/feature_flags.py +71 -0
- sentry_sdk/hub.py +442 -386
- sentry_sdk/integrations/__init__.py +228 -58
- sentry_sdk/integrations/_asgi_common.py +108 -0
- sentry_sdk/integrations/_wsgi_common.py +131 -40
- sentry_sdk/integrations/aiohttp.py +221 -72
- sentry_sdk/integrations/anthropic.py +439 -0
- sentry_sdk/integrations/argv.py +4 -6
- sentry_sdk/integrations/ariadne.py +161 -0
- sentry_sdk/integrations/arq.py +247 -0
- sentry_sdk/integrations/asgi.py +237 -135
- sentry_sdk/integrations/asyncio.py +144 -0
- sentry_sdk/integrations/asyncpg.py +208 -0
- sentry_sdk/integrations/atexit.py +13 -18
- sentry_sdk/integrations/aws_lambda.py +233 -80
- sentry_sdk/integrations/beam.py +27 -35
- sentry_sdk/integrations/boto3.py +137 -0
- sentry_sdk/integrations/bottle.py +91 -69
- sentry_sdk/integrations/celery/__init__.py +529 -0
- sentry_sdk/integrations/celery/beat.py +293 -0
- sentry_sdk/integrations/celery/utils.py +43 -0
- sentry_sdk/integrations/chalice.py +35 -28
- sentry_sdk/integrations/clickhouse_driver.py +177 -0
- sentry_sdk/integrations/cloud_resource_context.py +280 -0
- sentry_sdk/integrations/cohere.py +274 -0
- sentry_sdk/integrations/dedupe.py +32 -8
- sentry_sdk/integrations/django/__init__.py +343 -89
- sentry_sdk/integrations/django/asgi.py +201 -22
- sentry_sdk/integrations/django/caching.py +204 -0
- sentry_sdk/integrations/django/middleware.py +80 -32
- sentry_sdk/integrations/django/signals_handlers.py +91 -0
- sentry_sdk/integrations/django/templates.py +69 -2
- sentry_sdk/integrations/django/transactions.py +39 -14
- sentry_sdk/integrations/django/views.py +69 -16
- sentry_sdk/integrations/dramatiq.py +226 -0
- sentry_sdk/integrations/excepthook.py +19 -13
- sentry_sdk/integrations/executing.py +5 -6
- sentry_sdk/integrations/falcon.py +128 -65
- sentry_sdk/integrations/fastapi.py +141 -0
- sentry_sdk/integrations/flask.py +114 -75
- sentry_sdk/integrations/gcp.py +67 -36
- sentry_sdk/integrations/gnu_backtrace.py +14 -22
- sentry_sdk/integrations/google_genai/__init__.py +301 -0
- sentry_sdk/integrations/google_genai/consts.py +16 -0
- sentry_sdk/integrations/google_genai/streaming.py +155 -0
- sentry_sdk/integrations/google_genai/utils.py +576 -0
- sentry_sdk/integrations/gql.py +162 -0
- sentry_sdk/integrations/graphene.py +151 -0
- sentry_sdk/integrations/grpc/__init__.py +168 -0
- sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
- sentry_sdk/integrations/grpc/aio/client.py +95 -0
- sentry_sdk/integrations/grpc/aio/server.py +100 -0
- sentry_sdk/integrations/grpc/client.py +91 -0
- sentry_sdk/integrations/grpc/consts.py +1 -0
- sentry_sdk/integrations/grpc/server.py +66 -0
- sentry_sdk/integrations/httpx.py +178 -0
- sentry_sdk/integrations/huey.py +174 -0
- sentry_sdk/integrations/huggingface_hub.py +378 -0
- sentry_sdk/integrations/langchain.py +1132 -0
- sentry_sdk/integrations/langgraph.py +337 -0
- sentry_sdk/integrations/launchdarkly.py +61 -0
- sentry_sdk/integrations/litellm.py +287 -0
- sentry_sdk/integrations/litestar.py +315 -0
- sentry_sdk/integrations/logging.py +261 -85
- sentry_sdk/integrations/loguru.py +213 -0
- sentry_sdk/integrations/mcp.py +566 -0
- sentry_sdk/integrations/modules.py +6 -33
- sentry_sdk/integrations/openai.py +725 -0
- sentry_sdk/integrations/openai_agents/__init__.py +61 -0
- sentry_sdk/integrations/openai_agents/consts.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
- sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
- sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
- sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
- sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
- sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
- sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
- sentry_sdk/integrations/openai_agents/utils.py +199 -0
- sentry_sdk/integrations/openfeature.py +35 -0
- sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
- sentry_sdk/integrations/opentelemetry/consts.py +5 -0
- sentry_sdk/integrations/opentelemetry/integration.py +58 -0
- sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
- sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
- sentry_sdk/integrations/otlp.py +82 -0
- sentry_sdk/integrations/pure_eval.py +20 -11
- sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
- sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
- sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
- sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
- sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
- sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
- sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
- sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
- sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
- sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
- sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
- sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
- sentry_sdk/integrations/pymongo.py +214 -0
- sentry_sdk/integrations/pyramid.py +71 -60
- sentry_sdk/integrations/quart.py +237 -0
- sentry_sdk/integrations/ray.py +165 -0
- sentry_sdk/integrations/redis/__init__.py +48 -0
- sentry_sdk/integrations/redis/_async_common.py +116 -0
- sentry_sdk/integrations/redis/_sync_common.py +119 -0
- sentry_sdk/integrations/redis/consts.py +19 -0
- sentry_sdk/integrations/redis/modules/__init__.py +0 -0
- sentry_sdk/integrations/redis/modules/caches.py +118 -0
- sentry_sdk/integrations/redis/modules/queries.py +65 -0
- sentry_sdk/integrations/redis/rb.py +32 -0
- sentry_sdk/integrations/redis/redis.py +69 -0
- sentry_sdk/integrations/redis/redis_cluster.py +107 -0
- sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
- sentry_sdk/integrations/redis/utils.py +148 -0
- sentry_sdk/integrations/rq.py +62 -52
- sentry_sdk/integrations/rust_tracing.py +284 -0
- sentry_sdk/integrations/sanic.py +248 -114
- sentry_sdk/integrations/serverless.py +13 -22
- sentry_sdk/integrations/socket.py +96 -0
- sentry_sdk/integrations/spark/spark_driver.py +115 -62
- sentry_sdk/integrations/spark/spark_worker.py +42 -50
- sentry_sdk/integrations/sqlalchemy.py +82 -37
- sentry_sdk/integrations/starlette.py +737 -0
- sentry_sdk/integrations/starlite.py +292 -0
- sentry_sdk/integrations/statsig.py +37 -0
- sentry_sdk/integrations/stdlib.py +100 -58
- sentry_sdk/integrations/strawberry.py +394 -0
- sentry_sdk/integrations/sys_exit.py +70 -0
- sentry_sdk/integrations/threading.py +142 -38
- sentry_sdk/integrations/tornado.py +68 -53
- sentry_sdk/integrations/trytond.py +15 -20
- sentry_sdk/integrations/typer.py +60 -0
- sentry_sdk/integrations/unleash.py +33 -0
- sentry_sdk/integrations/unraisablehook.py +53 -0
- sentry_sdk/integrations/wsgi.py +126 -125
- sentry_sdk/logger.py +96 -0
- sentry_sdk/metrics.py +81 -0
- sentry_sdk/monitor.py +120 -0
- sentry_sdk/profiler/__init__.py +49 -0
- sentry_sdk/profiler/continuous_profiler.py +730 -0
- sentry_sdk/profiler/transaction_profiler.py +839 -0
- sentry_sdk/profiler/utils.py +195 -0
- sentry_sdk/scope.py +1542 -112
- sentry_sdk/scrubber.py +177 -0
- sentry_sdk/serializer.py +152 -210
- sentry_sdk/session.py +177 -0
- sentry_sdk/sessions.py +202 -179
- sentry_sdk/spotlight.py +242 -0
- sentry_sdk/tracing.py +1202 -294
- sentry_sdk/tracing_utils.py +1236 -0
- sentry_sdk/transport.py +693 -189
- sentry_sdk/types.py +52 -0
- sentry_sdk/utils.py +1395 -228
- sentry_sdk/worker.py +30 -17
- sentry_sdk-2.46.0.dist-info/METADATA +268 -0
- sentry_sdk-2.46.0.dist-info/RECORD +189 -0
- {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
- sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
- sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
- sentry_sdk/_functools.py +0 -66
- sentry_sdk/integrations/celery.py +0 -275
- sentry_sdk/integrations/redis.py +0 -103
- sentry_sdk-0.18.0.dist-info/LICENSE +0 -9
- sentry_sdk-0.18.0.dist-info/METADATA +0 -66
- sentry_sdk-0.18.0.dist-info/RECORD +0 -65
- {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,576 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import inspect
|
|
3
|
+
from functools import wraps
|
|
4
|
+
from .consts import ORIGIN, TOOL_ATTRIBUTES_MAP, GEN_AI_SYSTEM
|
|
5
|
+
from typing import (
|
|
6
|
+
cast,
|
|
7
|
+
TYPE_CHECKING,
|
|
8
|
+
Iterable,
|
|
9
|
+
Any,
|
|
10
|
+
Callable,
|
|
11
|
+
List,
|
|
12
|
+
Optional,
|
|
13
|
+
Union,
|
|
14
|
+
TypedDict,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
import sentry_sdk
|
|
18
|
+
from sentry_sdk.ai.utils import (
|
|
19
|
+
set_data_normalized,
|
|
20
|
+
truncate_and_annotate_messages,
|
|
21
|
+
normalize_message_roles,
|
|
22
|
+
)
|
|
23
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
24
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
25
|
+
from sentry_sdk.utils import (
|
|
26
|
+
capture_internal_exceptions,
|
|
27
|
+
event_from_exception,
|
|
28
|
+
safe_serialize,
|
|
29
|
+
)
|
|
30
|
+
from google.genai.types import GenerateContentConfig
|
|
31
|
+
|
|
32
|
+
if TYPE_CHECKING:
|
|
33
|
+
from sentry_sdk.tracing import Span
|
|
34
|
+
from google.genai.types import (
|
|
35
|
+
GenerateContentResponse,
|
|
36
|
+
ContentListUnion,
|
|
37
|
+
Tool,
|
|
38
|
+
Model,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class UsageData(TypedDict):
|
|
43
|
+
"""Structure for token usage data."""
|
|
44
|
+
|
|
45
|
+
input_tokens: int
|
|
46
|
+
input_tokens_cached: int
|
|
47
|
+
output_tokens: int
|
|
48
|
+
output_tokens_reasoning: int
|
|
49
|
+
total_tokens: int
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def extract_usage_data(response):
|
|
53
|
+
# type: (Union[GenerateContentResponse, dict[str, Any]]) -> UsageData
|
|
54
|
+
"""Extract usage data from response into a structured format.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
response: The GenerateContentResponse object or dictionary containing usage metadata
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
UsageData: Dictionary with input_tokens, input_tokens_cached,
|
|
61
|
+
output_tokens, and output_tokens_reasoning fields
|
|
62
|
+
"""
|
|
63
|
+
usage_data = UsageData(
|
|
64
|
+
input_tokens=0,
|
|
65
|
+
input_tokens_cached=0,
|
|
66
|
+
output_tokens=0,
|
|
67
|
+
output_tokens_reasoning=0,
|
|
68
|
+
total_tokens=0,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Handle dictionary response (from streaming)
|
|
72
|
+
if isinstance(response, dict):
|
|
73
|
+
usage = response.get("usage_metadata", {})
|
|
74
|
+
if not usage:
|
|
75
|
+
return usage_data
|
|
76
|
+
|
|
77
|
+
prompt_tokens = usage.get("prompt_token_count", 0) or 0
|
|
78
|
+
tool_use_prompt_tokens = usage.get("tool_use_prompt_token_count", 0) or 0
|
|
79
|
+
usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens
|
|
80
|
+
|
|
81
|
+
cached_tokens = usage.get("cached_content_token_count", 0) or 0
|
|
82
|
+
usage_data["input_tokens_cached"] = cached_tokens
|
|
83
|
+
|
|
84
|
+
reasoning_tokens = usage.get("thoughts_token_count", 0) or 0
|
|
85
|
+
usage_data["output_tokens_reasoning"] = reasoning_tokens
|
|
86
|
+
|
|
87
|
+
candidates_tokens = usage.get("candidates_token_count", 0) or 0
|
|
88
|
+
# python-genai reports output and reasoning tokens separately
|
|
89
|
+
# reasoning should be sub-category of output tokens
|
|
90
|
+
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
|
|
91
|
+
|
|
92
|
+
total_tokens = usage.get("total_token_count", 0) or 0
|
|
93
|
+
usage_data["total_tokens"] = total_tokens
|
|
94
|
+
|
|
95
|
+
return usage_data
|
|
96
|
+
|
|
97
|
+
if not hasattr(response, "usage_metadata"):
|
|
98
|
+
return usage_data
|
|
99
|
+
|
|
100
|
+
usage = response.usage_metadata
|
|
101
|
+
|
|
102
|
+
# Input tokens include both prompt and tool use prompt tokens
|
|
103
|
+
prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0
|
|
104
|
+
tool_use_prompt_tokens = getattr(usage, "tool_use_prompt_token_count", 0) or 0
|
|
105
|
+
usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens
|
|
106
|
+
|
|
107
|
+
# Cached input tokens
|
|
108
|
+
cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0
|
|
109
|
+
usage_data["input_tokens_cached"] = cached_tokens
|
|
110
|
+
|
|
111
|
+
# Reasoning tokens
|
|
112
|
+
reasoning_tokens = getattr(usage, "thoughts_token_count", 0) or 0
|
|
113
|
+
usage_data["output_tokens_reasoning"] = reasoning_tokens
|
|
114
|
+
|
|
115
|
+
# output_tokens = candidates_tokens + reasoning_tokens
|
|
116
|
+
# google-genai reports output and reasoning tokens separately
|
|
117
|
+
candidates_tokens = getattr(usage, "candidates_token_count", 0) or 0
|
|
118
|
+
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
|
|
119
|
+
|
|
120
|
+
total_tokens = getattr(usage, "total_token_count", 0) or 0
|
|
121
|
+
usage_data["total_tokens"] = total_tokens
|
|
122
|
+
|
|
123
|
+
return usage_data
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _capture_exception(exc):
|
|
127
|
+
# type: (Any) -> None
|
|
128
|
+
"""Capture exception with Google GenAI mechanism."""
|
|
129
|
+
event, hint = event_from_exception(
|
|
130
|
+
exc,
|
|
131
|
+
client_options=sentry_sdk.get_client().options,
|
|
132
|
+
mechanism={"type": "google_genai", "handled": False},
|
|
133
|
+
)
|
|
134
|
+
sentry_sdk.capture_event(event, hint=hint)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def get_model_name(model):
|
|
138
|
+
# type: (Union[str, Model]) -> str
|
|
139
|
+
"""Extract model name from model parameter."""
|
|
140
|
+
if isinstance(model, str):
|
|
141
|
+
return model
|
|
142
|
+
# Handle case where model might be an object with a name attribute
|
|
143
|
+
if hasattr(model, "name"):
|
|
144
|
+
return str(model.name)
|
|
145
|
+
return str(model)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def extract_contents_text(contents):
|
|
149
|
+
# type: (ContentListUnion) -> Optional[str]
|
|
150
|
+
"""Extract text from contents parameter which can have various formats."""
|
|
151
|
+
if contents is None:
|
|
152
|
+
return None
|
|
153
|
+
|
|
154
|
+
# Simple string case
|
|
155
|
+
if isinstance(contents, str):
|
|
156
|
+
return contents
|
|
157
|
+
|
|
158
|
+
# List of contents or parts
|
|
159
|
+
if isinstance(contents, list):
|
|
160
|
+
texts = []
|
|
161
|
+
for item in contents:
|
|
162
|
+
# Recursively extract text from each item
|
|
163
|
+
extracted = extract_contents_text(item)
|
|
164
|
+
if extracted:
|
|
165
|
+
texts.append(extracted)
|
|
166
|
+
return " ".join(texts) if texts else None
|
|
167
|
+
|
|
168
|
+
# Dictionary case
|
|
169
|
+
if isinstance(contents, dict):
|
|
170
|
+
if "text" in contents:
|
|
171
|
+
return contents["text"]
|
|
172
|
+
# Try to extract from parts if present in dict
|
|
173
|
+
if "parts" in contents:
|
|
174
|
+
return extract_contents_text(contents["parts"])
|
|
175
|
+
|
|
176
|
+
# Content object with parts - recurse into parts
|
|
177
|
+
if getattr(contents, "parts", None):
|
|
178
|
+
return extract_contents_text(contents.parts)
|
|
179
|
+
|
|
180
|
+
# Direct text attribute
|
|
181
|
+
if hasattr(contents, "text"):
|
|
182
|
+
return contents.text
|
|
183
|
+
|
|
184
|
+
return None
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def _format_tools_for_span(tools):
|
|
188
|
+
# type: (Iterable[Tool | Callable[..., Any]]) -> Optional[List[dict[str, Any]]]
|
|
189
|
+
"""Format tools parameter for span data."""
|
|
190
|
+
formatted_tools = []
|
|
191
|
+
for tool in tools:
|
|
192
|
+
if callable(tool):
|
|
193
|
+
# Handle callable functions passed directly
|
|
194
|
+
formatted_tools.append(
|
|
195
|
+
{
|
|
196
|
+
"name": getattr(tool, "__name__", "unknown"),
|
|
197
|
+
"description": getattr(tool, "__doc__", None),
|
|
198
|
+
}
|
|
199
|
+
)
|
|
200
|
+
elif (
|
|
201
|
+
hasattr(tool, "function_declarations")
|
|
202
|
+
and tool.function_declarations is not None
|
|
203
|
+
):
|
|
204
|
+
# Tool object with function declarations
|
|
205
|
+
for func_decl in tool.function_declarations:
|
|
206
|
+
formatted_tools.append(
|
|
207
|
+
{
|
|
208
|
+
"name": getattr(func_decl, "name", None),
|
|
209
|
+
"description": getattr(func_decl, "description", None),
|
|
210
|
+
}
|
|
211
|
+
)
|
|
212
|
+
else:
|
|
213
|
+
# Check for predefined tool attributes - each of these tools
|
|
214
|
+
# is an attribute of the tool object, by default set to None
|
|
215
|
+
for attr_name, description in TOOL_ATTRIBUTES_MAP.items():
|
|
216
|
+
if getattr(tool, attr_name, None):
|
|
217
|
+
formatted_tools.append(
|
|
218
|
+
{
|
|
219
|
+
"name": attr_name,
|
|
220
|
+
"description": description,
|
|
221
|
+
}
|
|
222
|
+
)
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
return formatted_tools if formatted_tools else None
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def extract_tool_calls(response):
|
|
229
|
+
# type: (GenerateContentResponse) -> Optional[List[dict[str, Any]]]
|
|
230
|
+
"""Extract tool/function calls from response candidates and automatic function calling history."""
|
|
231
|
+
|
|
232
|
+
tool_calls = []
|
|
233
|
+
|
|
234
|
+
# Extract from candidates, sometimes tool calls are nested under the content.parts object
|
|
235
|
+
if getattr(response, "candidates", []):
|
|
236
|
+
for candidate in response.candidates:
|
|
237
|
+
if not hasattr(candidate, "content") or not getattr(
|
|
238
|
+
candidate.content, "parts", []
|
|
239
|
+
):
|
|
240
|
+
continue
|
|
241
|
+
|
|
242
|
+
for part in candidate.content.parts:
|
|
243
|
+
if getattr(part, "function_call", None):
|
|
244
|
+
function_call = part.function_call
|
|
245
|
+
tool_call = {
|
|
246
|
+
"name": getattr(function_call, "name", None),
|
|
247
|
+
"type": "function_call",
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
# Extract arguments if available
|
|
251
|
+
if getattr(function_call, "args", None):
|
|
252
|
+
tool_call["arguments"] = safe_serialize(function_call.args)
|
|
253
|
+
|
|
254
|
+
tool_calls.append(tool_call)
|
|
255
|
+
|
|
256
|
+
# Extract from automatic_function_calling_history
|
|
257
|
+
# This is the history of tool calls made by the model
|
|
258
|
+
if getattr(response, "automatic_function_calling_history", None):
|
|
259
|
+
for content in response.automatic_function_calling_history:
|
|
260
|
+
if not getattr(content, "parts", None):
|
|
261
|
+
continue
|
|
262
|
+
|
|
263
|
+
for part in getattr(content, "parts", []):
|
|
264
|
+
if getattr(part, "function_call", None):
|
|
265
|
+
function_call = part.function_call
|
|
266
|
+
tool_call = {
|
|
267
|
+
"name": getattr(function_call, "name", None),
|
|
268
|
+
"type": "function_call",
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Extract arguments if available
|
|
272
|
+
if hasattr(function_call, "args"):
|
|
273
|
+
tool_call["arguments"] = safe_serialize(function_call.args)
|
|
274
|
+
|
|
275
|
+
tool_calls.append(tool_call)
|
|
276
|
+
|
|
277
|
+
return tool_calls if tool_calls else None
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def _capture_tool_input(args, kwargs, tool):
|
|
281
|
+
# type: (tuple[Any, ...], dict[str, Any], Tool) -> dict[str, Any]
|
|
282
|
+
"""Capture tool input from args and kwargs."""
|
|
283
|
+
tool_input = kwargs.copy() if kwargs else {}
|
|
284
|
+
|
|
285
|
+
# If we have positional args, try to map them to the function signature
|
|
286
|
+
if args:
|
|
287
|
+
try:
|
|
288
|
+
sig = inspect.signature(tool)
|
|
289
|
+
param_names = list(sig.parameters.keys())
|
|
290
|
+
for i, arg in enumerate(args):
|
|
291
|
+
if i < len(param_names):
|
|
292
|
+
tool_input[param_names[i]] = arg
|
|
293
|
+
except Exception:
|
|
294
|
+
# Fallback if we can't get the signature
|
|
295
|
+
tool_input["args"] = args
|
|
296
|
+
|
|
297
|
+
return tool_input
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _create_tool_span(tool_name, tool_doc):
|
|
301
|
+
# type: (str, Optional[str]) -> Span
|
|
302
|
+
"""Create a span for tool execution."""
|
|
303
|
+
span = sentry_sdk.start_span(
|
|
304
|
+
op=OP.GEN_AI_EXECUTE_TOOL,
|
|
305
|
+
name=f"execute_tool {tool_name}",
|
|
306
|
+
origin=ORIGIN,
|
|
307
|
+
)
|
|
308
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
|
|
309
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function")
|
|
310
|
+
if tool_doc:
|
|
311
|
+
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_doc)
|
|
312
|
+
return span
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def wrapped_tool(tool):
|
|
316
|
+
# type: (Tool | Callable[..., Any]) -> Tool | Callable[..., Any]
|
|
317
|
+
"""Wrap a tool to emit execute_tool spans when called."""
|
|
318
|
+
if not callable(tool):
|
|
319
|
+
# Not a callable function, return as-is (predefined tools)
|
|
320
|
+
return tool
|
|
321
|
+
|
|
322
|
+
tool_name = getattr(tool, "__name__", "unknown")
|
|
323
|
+
tool_doc = tool.__doc__
|
|
324
|
+
|
|
325
|
+
if inspect.iscoroutinefunction(tool):
|
|
326
|
+
# Async function
|
|
327
|
+
@wraps(tool)
|
|
328
|
+
async def async_wrapped(*args, **kwargs):
|
|
329
|
+
# type: (Any, Any) -> Any
|
|
330
|
+
with _create_tool_span(tool_name, tool_doc) as span:
|
|
331
|
+
# Capture tool input
|
|
332
|
+
tool_input = _capture_tool_input(args, kwargs, tool)
|
|
333
|
+
with capture_internal_exceptions():
|
|
334
|
+
span.set_data(
|
|
335
|
+
SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input)
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
try:
|
|
339
|
+
result = await tool(*args, **kwargs)
|
|
340
|
+
|
|
341
|
+
# Capture tool output
|
|
342
|
+
with capture_internal_exceptions():
|
|
343
|
+
span.set_data(
|
|
344
|
+
SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result)
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
return result
|
|
348
|
+
except Exception as exc:
|
|
349
|
+
_capture_exception(exc)
|
|
350
|
+
raise
|
|
351
|
+
|
|
352
|
+
return async_wrapped
|
|
353
|
+
else:
|
|
354
|
+
# Sync function
|
|
355
|
+
@wraps(tool)
|
|
356
|
+
def sync_wrapped(*args, **kwargs):
|
|
357
|
+
# type: (Any, Any) -> Any
|
|
358
|
+
with _create_tool_span(tool_name, tool_doc) as span:
|
|
359
|
+
# Capture tool input
|
|
360
|
+
tool_input = _capture_tool_input(args, kwargs, tool)
|
|
361
|
+
with capture_internal_exceptions():
|
|
362
|
+
span.set_data(
|
|
363
|
+
SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input)
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
try:
|
|
367
|
+
result = tool(*args, **kwargs)
|
|
368
|
+
|
|
369
|
+
# Capture tool output
|
|
370
|
+
with capture_internal_exceptions():
|
|
371
|
+
span.set_data(
|
|
372
|
+
SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result)
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
return result
|
|
376
|
+
except Exception as exc:
|
|
377
|
+
_capture_exception(exc)
|
|
378
|
+
raise
|
|
379
|
+
|
|
380
|
+
return sync_wrapped
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
def wrapped_config_with_tools(config):
|
|
384
|
+
# type: (GenerateContentConfig) -> GenerateContentConfig
|
|
385
|
+
"""Wrap tools in config to emit execute_tool spans. Tools are sometimes passed directly as
|
|
386
|
+
callable functions as a part of the config object."""
|
|
387
|
+
|
|
388
|
+
if not config or not getattr(config, "tools", None):
|
|
389
|
+
return config
|
|
390
|
+
|
|
391
|
+
result = copy.copy(config)
|
|
392
|
+
result.tools = [wrapped_tool(tool) for tool in config.tools]
|
|
393
|
+
|
|
394
|
+
return result
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _extract_response_text(response):
|
|
398
|
+
# type: (GenerateContentResponse) -> Optional[List[str]]
|
|
399
|
+
"""Extract text from response candidates."""
|
|
400
|
+
|
|
401
|
+
if not response or not getattr(response, "candidates", []):
|
|
402
|
+
return None
|
|
403
|
+
|
|
404
|
+
texts = []
|
|
405
|
+
for candidate in response.candidates:
|
|
406
|
+
if not hasattr(candidate, "content") or not hasattr(candidate.content, "parts"):
|
|
407
|
+
continue
|
|
408
|
+
|
|
409
|
+
for part in candidate.content.parts:
|
|
410
|
+
if getattr(part, "text", None):
|
|
411
|
+
texts.append(part.text)
|
|
412
|
+
|
|
413
|
+
return texts if texts else None
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def extract_finish_reasons(response):
|
|
417
|
+
# type: (GenerateContentResponse) -> Optional[List[str]]
|
|
418
|
+
"""Extract finish reasons from response candidates."""
|
|
419
|
+
if not response or not getattr(response, "candidates", []):
|
|
420
|
+
return None
|
|
421
|
+
|
|
422
|
+
finish_reasons = []
|
|
423
|
+
for candidate in response.candidates:
|
|
424
|
+
if getattr(candidate, "finish_reason", None):
|
|
425
|
+
# Convert enum value to string if necessary
|
|
426
|
+
reason = str(candidate.finish_reason)
|
|
427
|
+
# Remove enum prefix if present (e.g., "FinishReason.STOP" -> "STOP")
|
|
428
|
+
if "." in reason:
|
|
429
|
+
reason = reason.split(".")[-1]
|
|
430
|
+
finish_reasons.append(reason)
|
|
431
|
+
|
|
432
|
+
return finish_reasons if finish_reasons else None
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def set_span_data_for_request(span, integration, model, contents, kwargs):
|
|
436
|
+
# type: (Span, Any, str, ContentListUnion, dict[str, Any]) -> None
|
|
437
|
+
"""Set span data for the request."""
|
|
438
|
+
span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
|
|
439
|
+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
|
|
440
|
+
|
|
441
|
+
if kwargs.get("stream", False):
|
|
442
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
443
|
+
|
|
444
|
+
config = kwargs.get("config")
|
|
445
|
+
|
|
446
|
+
if config is None:
|
|
447
|
+
return
|
|
448
|
+
|
|
449
|
+
config = cast(GenerateContentConfig, config)
|
|
450
|
+
|
|
451
|
+
# Set input messages/prompts if PII is allowed
|
|
452
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
453
|
+
messages = []
|
|
454
|
+
|
|
455
|
+
# Add system instruction if present
|
|
456
|
+
if hasattr(config, "system_instruction"):
|
|
457
|
+
system_instruction = config.system_instruction
|
|
458
|
+
if system_instruction:
|
|
459
|
+
system_text = extract_contents_text(system_instruction)
|
|
460
|
+
if system_text:
|
|
461
|
+
messages.append({"role": "system", "content": system_text})
|
|
462
|
+
|
|
463
|
+
# Add user message
|
|
464
|
+
contents_text = extract_contents_text(contents)
|
|
465
|
+
if contents_text:
|
|
466
|
+
messages.append({"role": "user", "content": contents_text})
|
|
467
|
+
|
|
468
|
+
if messages:
|
|
469
|
+
normalized_messages = normalize_message_roles(messages)
|
|
470
|
+
scope = sentry_sdk.get_current_scope()
|
|
471
|
+
messages_data = truncate_and_annotate_messages(
|
|
472
|
+
normalized_messages, span, scope
|
|
473
|
+
)
|
|
474
|
+
if messages_data is not None:
|
|
475
|
+
set_data_normalized(
|
|
476
|
+
span,
|
|
477
|
+
SPANDATA.GEN_AI_REQUEST_MESSAGES,
|
|
478
|
+
messages_data,
|
|
479
|
+
unpack=False,
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
# Extract parameters directly from config (not nested under generation_config)
|
|
483
|
+
for param, span_key in [
|
|
484
|
+
("temperature", SPANDATA.GEN_AI_REQUEST_TEMPERATURE),
|
|
485
|
+
("top_p", SPANDATA.GEN_AI_REQUEST_TOP_P),
|
|
486
|
+
("top_k", SPANDATA.GEN_AI_REQUEST_TOP_K),
|
|
487
|
+
("max_output_tokens", SPANDATA.GEN_AI_REQUEST_MAX_TOKENS),
|
|
488
|
+
("presence_penalty", SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY),
|
|
489
|
+
("frequency_penalty", SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY),
|
|
490
|
+
("seed", SPANDATA.GEN_AI_REQUEST_SEED),
|
|
491
|
+
]:
|
|
492
|
+
if hasattr(config, param):
|
|
493
|
+
value = getattr(config, param)
|
|
494
|
+
if value is not None:
|
|
495
|
+
span.set_data(span_key, value)
|
|
496
|
+
|
|
497
|
+
# Set tools if available
|
|
498
|
+
if hasattr(config, "tools"):
|
|
499
|
+
tools = config.tools
|
|
500
|
+
if tools:
|
|
501
|
+
formatted_tools = _format_tools_for_span(tools)
|
|
502
|
+
if formatted_tools:
|
|
503
|
+
set_data_normalized(
|
|
504
|
+
span,
|
|
505
|
+
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
|
|
506
|
+
formatted_tools,
|
|
507
|
+
unpack=False,
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def set_span_data_for_response(span, integration, response):
|
|
512
|
+
# type: (Span, Any, GenerateContentResponse) -> None
|
|
513
|
+
"""Set span data for the response."""
|
|
514
|
+
if not response:
|
|
515
|
+
return
|
|
516
|
+
|
|
517
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
518
|
+
response_texts = _extract_response_text(response)
|
|
519
|
+
if response_texts:
|
|
520
|
+
# Format as JSON string array as per documentation
|
|
521
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(response_texts))
|
|
522
|
+
|
|
523
|
+
tool_calls = extract_tool_calls(response)
|
|
524
|
+
if tool_calls:
|
|
525
|
+
# Tool calls should be JSON serialized
|
|
526
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls))
|
|
527
|
+
|
|
528
|
+
finish_reasons = extract_finish_reasons(response)
|
|
529
|
+
if finish_reasons:
|
|
530
|
+
set_data_normalized(
|
|
531
|
+
span, SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
if getattr(response, "response_id", None):
|
|
535
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response.response_id)
|
|
536
|
+
|
|
537
|
+
if getattr(response, "model_version", None):
|
|
538
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_version)
|
|
539
|
+
|
|
540
|
+
usage_data = extract_usage_data(response)
|
|
541
|
+
|
|
542
|
+
if usage_data["input_tokens"]:
|
|
543
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage_data["input_tokens"])
|
|
544
|
+
|
|
545
|
+
if usage_data["input_tokens_cached"]:
|
|
546
|
+
span.set_data(
|
|
547
|
+
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
|
|
548
|
+
usage_data["input_tokens_cached"],
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
if usage_data["output_tokens"]:
|
|
552
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage_data["output_tokens"])
|
|
553
|
+
|
|
554
|
+
if usage_data["output_tokens_reasoning"]:
|
|
555
|
+
span.set_data(
|
|
556
|
+
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
|
|
557
|
+
usage_data["output_tokens_reasoning"],
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
if usage_data["total_tokens"]:
|
|
561
|
+
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage_data["total_tokens"])
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def prepare_generate_content_args(args, kwargs):
|
|
565
|
+
# type: (tuple[Any, ...], dict[str, Any]) -> tuple[Any, Any, str]
|
|
566
|
+
"""Extract and prepare common arguments for generate_content methods."""
|
|
567
|
+
model = args[0] if args else kwargs.get("model", "unknown")
|
|
568
|
+
contents = args[1] if len(args) > 1 else kwargs.get("contents")
|
|
569
|
+
model_name = get_model_name(model)
|
|
570
|
+
|
|
571
|
+
config = kwargs.get("config")
|
|
572
|
+
wrapped_config = wrapped_config_with_tools(config)
|
|
573
|
+
if wrapped_config is not config:
|
|
574
|
+
kwargs["config"] = wrapped_config
|
|
575
|
+
|
|
576
|
+
return model, contents, model_name
|