sentry-sdk 0.7.5__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sentry_sdk/__init__.py +48 -30
- sentry_sdk/_compat.py +74 -61
- sentry_sdk/_init_implementation.py +84 -0
- sentry_sdk/_log_batcher.py +172 -0
- sentry_sdk/_lru_cache.py +47 -0
- sentry_sdk/_metrics_batcher.py +167 -0
- sentry_sdk/_queue.py +289 -0
- sentry_sdk/_types.py +338 -0
- sentry_sdk/_werkzeug.py +98 -0
- sentry_sdk/ai/__init__.py +7 -0
- sentry_sdk/ai/monitoring.py +137 -0
- sentry_sdk/ai/utils.py +144 -0
- sentry_sdk/api.py +496 -80
- sentry_sdk/attachments.py +75 -0
- sentry_sdk/client.py +1023 -103
- sentry_sdk/consts.py +1438 -66
- sentry_sdk/crons/__init__.py +10 -0
- sentry_sdk/crons/api.py +62 -0
- sentry_sdk/crons/consts.py +4 -0
- sentry_sdk/crons/decorator.py +135 -0
- sentry_sdk/debug.py +15 -14
- sentry_sdk/envelope.py +369 -0
- sentry_sdk/feature_flags.py +71 -0
- sentry_sdk/hub.py +611 -280
- sentry_sdk/integrations/__init__.py +276 -49
- sentry_sdk/integrations/_asgi_common.py +108 -0
- sentry_sdk/integrations/_wsgi_common.py +180 -44
- sentry_sdk/integrations/aiohttp.py +291 -42
- sentry_sdk/integrations/anthropic.py +439 -0
- sentry_sdk/integrations/argv.py +9 -8
- sentry_sdk/integrations/ariadne.py +161 -0
- sentry_sdk/integrations/arq.py +247 -0
- sentry_sdk/integrations/asgi.py +341 -0
- sentry_sdk/integrations/asyncio.py +144 -0
- sentry_sdk/integrations/asyncpg.py +208 -0
- sentry_sdk/integrations/atexit.py +17 -10
- sentry_sdk/integrations/aws_lambda.py +377 -62
- sentry_sdk/integrations/beam.py +176 -0
- sentry_sdk/integrations/boto3.py +137 -0
- sentry_sdk/integrations/bottle.py +221 -0
- sentry_sdk/integrations/celery/__init__.py +529 -0
- sentry_sdk/integrations/celery/beat.py +293 -0
- sentry_sdk/integrations/celery/utils.py +43 -0
- sentry_sdk/integrations/chalice.py +134 -0
- sentry_sdk/integrations/clickhouse_driver.py +177 -0
- sentry_sdk/integrations/cloud_resource_context.py +280 -0
- sentry_sdk/integrations/cohere.py +274 -0
- sentry_sdk/integrations/dedupe.py +48 -14
- sentry_sdk/integrations/django/__init__.py +584 -191
- sentry_sdk/integrations/django/asgi.py +245 -0
- sentry_sdk/integrations/django/caching.py +204 -0
- sentry_sdk/integrations/django/middleware.py +187 -0
- sentry_sdk/integrations/django/signals_handlers.py +91 -0
- sentry_sdk/integrations/django/templates.py +79 -5
- sentry_sdk/integrations/django/transactions.py +49 -22
- sentry_sdk/integrations/django/views.py +96 -0
- sentry_sdk/integrations/dramatiq.py +226 -0
- sentry_sdk/integrations/excepthook.py +50 -13
- sentry_sdk/integrations/executing.py +67 -0
- sentry_sdk/integrations/falcon.py +272 -0
- sentry_sdk/integrations/fastapi.py +141 -0
- sentry_sdk/integrations/flask.py +142 -88
- sentry_sdk/integrations/gcp.py +239 -0
- sentry_sdk/integrations/gnu_backtrace.py +99 -0
- sentry_sdk/integrations/google_genai/__init__.py +301 -0
- sentry_sdk/integrations/google_genai/consts.py +16 -0
- sentry_sdk/integrations/google_genai/streaming.py +155 -0
- sentry_sdk/integrations/google_genai/utils.py +576 -0
- sentry_sdk/integrations/gql.py +162 -0
- sentry_sdk/integrations/graphene.py +151 -0
- sentry_sdk/integrations/grpc/__init__.py +168 -0
- sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
- sentry_sdk/integrations/grpc/aio/client.py +95 -0
- sentry_sdk/integrations/grpc/aio/server.py +100 -0
- sentry_sdk/integrations/grpc/client.py +91 -0
- sentry_sdk/integrations/grpc/consts.py +1 -0
- sentry_sdk/integrations/grpc/server.py +66 -0
- sentry_sdk/integrations/httpx.py +178 -0
- sentry_sdk/integrations/huey.py +174 -0
- sentry_sdk/integrations/huggingface_hub.py +378 -0
- sentry_sdk/integrations/langchain.py +1132 -0
- sentry_sdk/integrations/langgraph.py +337 -0
- sentry_sdk/integrations/launchdarkly.py +61 -0
- sentry_sdk/integrations/litellm.py +287 -0
- sentry_sdk/integrations/litestar.py +315 -0
- sentry_sdk/integrations/logging.py +307 -96
- sentry_sdk/integrations/loguru.py +213 -0
- sentry_sdk/integrations/mcp.py +566 -0
- sentry_sdk/integrations/modules.py +14 -31
- sentry_sdk/integrations/openai.py +725 -0
- sentry_sdk/integrations/openai_agents/__init__.py +61 -0
- sentry_sdk/integrations/openai_agents/consts.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
- sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
- sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
- sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
- sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
- sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
- sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
- sentry_sdk/integrations/openai_agents/utils.py +199 -0
- sentry_sdk/integrations/openfeature.py +35 -0
- sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
- sentry_sdk/integrations/opentelemetry/consts.py +5 -0
- sentry_sdk/integrations/opentelemetry/integration.py +58 -0
- sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
- sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
- sentry_sdk/integrations/otlp.py +82 -0
- sentry_sdk/integrations/pure_eval.py +141 -0
- sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
- sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
- sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
- sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
- sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
- sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
- sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
- sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
- sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
- sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
- sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
- sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
- sentry_sdk/integrations/pymongo.py +214 -0
- sentry_sdk/integrations/pyramid.py +112 -68
- sentry_sdk/integrations/quart.py +237 -0
- sentry_sdk/integrations/ray.py +165 -0
- sentry_sdk/integrations/redis/__init__.py +48 -0
- sentry_sdk/integrations/redis/_async_common.py +116 -0
- sentry_sdk/integrations/redis/_sync_common.py +119 -0
- sentry_sdk/integrations/redis/consts.py +19 -0
- sentry_sdk/integrations/redis/modules/__init__.py +0 -0
- sentry_sdk/integrations/redis/modules/caches.py +118 -0
- sentry_sdk/integrations/redis/modules/queries.py +65 -0
- sentry_sdk/integrations/redis/rb.py +32 -0
- sentry_sdk/integrations/redis/redis.py +69 -0
- sentry_sdk/integrations/redis/redis_cluster.py +107 -0
- sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
- sentry_sdk/integrations/redis/utils.py +148 -0
- sentry_sdk/integrations/rq.py +95 -37
- sentry_sdk/integrations/rust_tracing.py +284 -0
- sentry_sdk/integrations/sanic.py +294 -123
- sentry_sdk/integrations/serverless.py +48 -19
- sentry_sdk/integrations/socket.py +96 -0
- sentry_sdk/integrations/spark/__init__.py +4 -0
- sentry_sdk/integrations/spark/spark_driver.py +316 -0
- sentry_sdk/integrations/spark/spark_worker.py +116 -0
- sentry_sdk/integrations/sqlalchemy.py +142 -0
- sentry_sdk/integrations/starlette.py +737 -0
- sentry_sdk/integrations/starlite.py +292 -0
- sentry_sdk/integrations/statsig.py +37 -0
- sentry_sdk/integrations/stdlib.py +235 -29
- sentry_sdk/integrations/strawberry.py +394 -0
- sentry_sdk/integrations/sys_exit.py +70 -0
- sentry_sdk/integrations/threading.py +158 -28
- sentry_sdk/integrations/tornado.py +84 -52
- sentry_sdk/integrations/trytond.py +50 -0
- sentry_sdk/integrations/typer.py +60 -0
- sentry_sdk/integrations/unleash.py +33 -0
- sentry_sdk/integrations/unraisablehook.py +53 -0
- sentry_sdk/integrations/wsgi.py +201 -119
- sentry_sdk/logger.py +96 -0
- sentry_sdk/metrics.py +81 -0
- sentry_sdk/monitor.py +120 -0
- sentry_sdk/profiler/__init__.py +49 -0
- sentry_sdk/profiler/continuous_profiler.py +730 -0
- sentry_sdk/profiler/transaction_profiler.py +839 -0
- sentry_sdk/profiler/utils.py +195 -0
- sentry_sdk/py.typed +0 -0
- sentry_sdk/scope.py +1713 -85
- sentry_sdk/scrubber.py +177 -0
- sentry_sdk/serializer.py +405 -0
- sentry_sdk/session.py +177 -0
- sentry_sdk/sessions.py +275 -0
- sentry_sdk/spotlight.py +242 -0
- sentry_sdk/tracing.py +1486 -0
- sentry_sdk/tracing_utils.py +1236 -0
- sentry_sdk/transport.py +806 -134
- sentry_sdk/types.py +52 -0
- sentry_sdk/utils.py +1625 -465
- sentry_sdk/worker.py +54 -25
- sentry_sdk-2.46.0.dist-info/METADATA +268 -0
- sentry_sdk-2.46.0.dist-info/RECORD +189 -0
- {sentry_sdk-0.7.5.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
- sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
- sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
- sentry_sdk/integrations/celery.py +0 -119
- sentry_sdk-0.7.5.dist-info/LICENSE +0 -9
- sentry_sdk-0.7.5.dist-info/METADATA +0 -36
- sentry_sdk-0.7.5.dist-info/RECORD +0 -39
- {sentry_sdk-0.7.5.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
from typing import (
|
|
3
|
+
Any,
|
|
4
|
+
AsyncIterator,
|
|
5
|
+
Callable,
|
|
6
|
+
Iterator,
|
|
7
|
+
List,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
import sentry_sdk
|
|
11
|
+
from sentry_sdk.ai.utils import get_start_span_function
|
|
12
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
13
|
+
from sentry_sdk.consts import OP, SPANDATA
|
|
14
|
+
from sentry_sdk.tracing import SPANSTATUS
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
from google.genai.models import Models, AsyncModels
|
|
19
|
+
except ImportError:
|
|
20
|
+
raise DidNotEnable("google-genai not installed")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
from .consts import IDENTIFIER, ORIGIN, GEN_AI_SYSTEM
|
|
24
|
+
from .utils import (
|
|
25
|
+
set_span_data_for_request,
|
|
26
|
+
set_span_data_for_response,
|
|
27
|
+
_capture_exception,
|
|
28
|
+
prepare_generate_content_args,
|
|
29
|
+
)
|
|
30
|
+
from .streaming import (
|
|
31
|
+
set_span_data_for_streaming_response,
|
|
32
|
+
accumulate_streaming_response,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class GoogleGenAIIntegration(Integration):
|
|
37
|
+
identifier = IDENTIFIER
|
|
38
|
+
origin = ORIGIN
|
|
39
|
+
|
|
40
|
+
def __init__(self, include_prompts=True):
|
|
41
|
+
# type: (GoogleGenAIIntegration, bool) -> None
|
|
42
|
+
self.include_prompts = include_prompts
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def setup_once():
|
|
46
|
+
# type: () -> None
|
|
47
|
+
# Patch sync methods
|
|
48
|
+
Models.generate_content = _wrap_generate_content(Models.generate_content)
|
|
49
|
+
Models.generate_content_stream = _wrap_generate_content_stream(
|
|
50
|
+
Models.generate_content_stream
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Patch async methods
|
|
54
|
+
AsyncModels.generate_content = _wrap_async_generate_content(
|
|
55
|
+
AsyncModels.generate_content
|
|
56
|
+
)
|
|
57
|
+
AsyncModels.generate_content_stream = _wrap_async_generate_content_stream(
|
|
58
|
+
AsyncModels.generate_content_stream
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _wrap_generate_content_stream(f):
|
|
63
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
64
|
+
@wraps(f)
|
|
65
|
+
def new_generate_content_stream(self, *args, **kwargs):
|
|
66
|
+
# type: (Any, Any, Any) -> Any
|
|
67
|
+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
|
|
68
|
+
if integration is None:
|
|
69
|
+
return f(self, *args, **kwargs)
|
|
70
|
+
|
|
71
|
+
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
|
|
72
|
+
|
|
73
|
+
span = get_start_span_function()(
|
|
74
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
75
|
+
name="invoke_agent",
|
|
76
|
+
origin=ORIGIN,
|
|
77
|
+
)
|
|
78
|
+
span.__enter__()
|
|
79
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
80
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
81
|
+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
|
|
82
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
83
|
+
|
|
84
|
+
chat_span = sentry_sdk.start_span(
|
|
85
|
+
op=OP.GEN_AI_CHAT,
|
|
86
|
+
name=f"chat {model_name}",
|
|
87
|
+
origin=ORIGIN,
|
|
88
|
+
)
|
|
89
|
+
chat_span.__enter__()
|
|
90
|
+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
|
|
91
|
+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
|
|
92
|
+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
|
|
93
|
+
set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
|
|
94
|
+
chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
95
|
+
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
stream = f(self, *args, **kwargs)
|
|
99
|
+
|
|
100
|
+
# Create wrapper iterator to accumulate responses
|
|
101
|
+
def new_iterator():
|
|
102
|
+
# type: () -> Iterator[Any]
|
|
103
|
+
chunks = [] # type: List[Any]
|
|
104
|
+
try:
|
|
105
|
+
for chunk in stream:
|
|
106
|
+
chunks.append(chunk)
|
|
107
|
+
yield chunk
|
|
108
|
+
except Exception as exc:
|
|
109
|
+
_capture_exception(exc)
|
|
110
|
+
chat_span.set_status(SPANSTATUS.ERROR)
|
|
111
|
+
raise
|
|
112
|
+
finally:
|
|
113
|
+
# Accumulate all chunks and set final response data on spans
|
|
114
|
+
if chunks:
|
|
115
|
+
accumulated_response = accumulate_streaming_response(chunks)
|
|
116
|
+
set_span_data_for_streaming_response(
|
|
117
|
+
chat_span, integration, accumulated_response
|
|
118
|
+
)
|
|
119
|
+
set_span_data_for_streaming_response(
|
|
120
|
+
span, integration, accumulated_response
|
|
121
|
+
)
|
|
122
|
+
chat_span.__exit__(None, None, None)
|
|
123
|
+
span.__exit__(None, None, None)
|
|
124
|
+
|
|
125
|
+
return new_iterator()
|
|
126
|
+
|
|
127
|
+
except Exception as exc:
|
|
128
|
+
_capture_exception(exc)
|
|
129
|
+
chat_span.__exit__(None, None, None)
|
|
130
|
+
span.__exit__(None, None, None)
|
|
131
|
+
raise
|
|
132
|
+
|
|
133
|
+
return new_generate_content_stream
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _wrap_async_generate_content_stream(f):
|
|
137
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
138
|
+
@wraps(f)
|
|
139
|
+
async def new_async_generate_content_stream(self, *args, **kwargs):
|
|
140
|
+
# type: (Any, Any, Any) -> Any
|
|
141
|
+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
|
|
142
|
+
if integration is None:
|
|
143
|
+
return await f(self, *args, **kwargs)
|
|
144
|
+
|
|
145
|
+
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
|
|
146
|
+
|
|
147
|
+
span = get_start_span_function()(
|
|
148
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
149
|
+
name="invoke_agent",
|
|
150
|
+
origin=ORIGIN,
|
|
151
|
+
)
|
|
152
|
+
span.__enter__()
|
|
153
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
154
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
155
|
+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
|
|
156
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
157
|
+
|
|
158
|
+
chat_span = sentry_sdk.start_span(
|
|
159
|
+
op=OP.GEN_AI_CHAT,
|
|
160
|
+
name=f"chat {model_name}",
|
|
161
|
+
origin=ORIGIN,
|
|
162
|
+
)
|
|
163
|
+
chat_span.__enter__()
|
|
164
|
+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
|
|
165
|
+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
|
|
166
|
+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
|
|
167
|
+
set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
|
|
168
|
+
chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
|
|
169
|
+
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
170
|
+
|
|
171
|
+
try:
|
|
172
|
+
stream = await f(self, *args, **kwargs)
|
|
173
|
+
|
|
174
|
+
# Create wrapper async iterator to accumulate responses
|
|
175
|
+
async def new_async_iterator():
|
|
176
|
+
# type: () -> AsyncIterator[Any]
|
|
177
|
+
chunks = [] # type: List[Any]
|
|
178
|
+
try:
|
|
179
|
+
async for chunk in stream:
|
|
180
|
+
chunks.append(chunk)
|
|
181
|
+
yield chunk
|
|
182
|
+
except Exception as exc:
|
|
183
|
+
_capture_exception(exc)
|
|
184
|
+
chat_span.set_status(SPANSTATUS.ERROR)
|
|
185
|
+
raise
|
|
186
|
+
finally:
|
|
187
|
+
# Accumulate all chunks and set final response data on spans
|
|
188
|
+
if chunks:
|
|
189
|
+
accumulated_response = accumulate_streaming_response(chunks)
|
|
190
|
+
set_span_data_for_streaming_response(
|
|
191
|
+
chat_span, integration, accumulated_response
|
|
192
|
+
)
|
|
193
|
+
set_span_data_for_streaming_response(
|
|
194
|
+
span, integration, accumulated_response
|
|
195
|
+
)
|
|
196
|
+
chat_span.__exit__(None, None, None)
|
|
197
|
+
span.__exit__(None, None, None)
|
|
198
|
+
|
|
199
|
+
return new_async_iterator()
|
|
200
|
+
|
|
201
|
+
except Exception as exc:
|
|
202
|
+
_capture_exception(exc)
|
|
203
|
+
chat_span.__exit__(None, None, None)
|
|
204
|
+
span.__exit__(None, None, None)
|
|
205
|
+
raise
|
|
206
|
+
|
|
207
|
+
return new_async_generate_content_stream
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _wrap_generate_content(f):
|
|
211
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
212
|
+
@wraps(f)
|
|
213
|
+
def new_generate_content(self, *args, **kwargs):
|
|
214
|
+
# type: (Any, Any, Any) -> Any
|
|
215
|
+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
|
|
216
|
+
if integration is None:
|
|
217
|
+
return f(self, *args, **kwargs)
|
|
218
|
+
|
|
219
|
+
model, contents, model_name = prepare_generate_content_args(args, kwargs)
|
|
220
|
+
|
|
221
|
+
with get_start_span_function()(
|
|
222
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
223
|
+
name="invoke_agent",
|
|
224
|
+
origin=ORIGIN,
|
|
225
|
+
) as span:
|
|
226
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
227
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
228
|
+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
|
|
229
|
+
|
|
230
|
+
with sentry_sdk.start_span(
|
|
231
|
+
op=OP.GEN_AI_CHAT,
|
|
232
|
+
name=f"chat {model_name}",
|
|
233
|
+
origin=ORIGIN,
|
|
234
|
+
) as chat_span:
|
|
235
|
+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
|
|
236
|
+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
|
|
237
|
+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
|
|
238
|
+
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
239
|
+
set_span_data_for_request(
|
|
240
|
+
chat_span, integration, model_name, contents, kwargs
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
response = f(self, *args, **kwargs)
|
|
245
|
+
except Exception as exc:
|
|
246
|
+
_capture_exception(exc)
|
|
247
|
+
chat_span.set_status(SPANSTATUS.ERROR)
|
|
248
|
+
raise
|
|
249
|
+
|
|
250
|
+
set_span_data_for_response(chat_span, integration, response)
|
|
251
|
+
set_span_data_for_response(span, integration, response)
|
|
252
|
+
|
|
253
|
+
return response
|
|
254
|
+
|
|
255
|
+
return new_generate_content
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def _wrap_async_generate_content(f):
|
|
259
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
260
|
+
@wraps(f)
|
|
261
|
+
async def new_async_generate_content(self, *args, **kwargs):
|
|
262
|
+
# type: (Any, Any, Any) -> Any
|
|
263
|
+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
|
|
264
|
+
if integration is None:
|
|
265
|
+
return await f(self, *args, **kwargs)
|
|
266
|
+
|
|
267
|
+
model, contents, model_name = prepare_generate_content_args(args, kwargs)
|
|
268
|
+
|
|
269
|
+
with get_start_span_function()(
|
|
270
|
+
op=OP.GEN_AI_INVOKE_AGENT,
|
|
271
|
+
name="invoke_agent",
|
|
272
|
+
origin=ORIGIN,
|
|
273
|
+
) as span:
|
|
274
|
+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
|
|
275
|
+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
|
|
276
|
+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
|
|
277
|
+
|
|
278
|
+
with sentry_sdk.start_span(
|
|
279
|
+
op=OP.GEN_AI_CHAT,
|
|
280
|
+
name=f"chat {model_name}",
|
|
281
|
+
origin=ORIGIN,
|
|
282
|
+
) as chat_span:
|
|
283
|
+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
|
|
284
|
+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
|
|
285
|
+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
|
|
286
|
+
set_span_data_for_request(
|
|
287
|
+
chat_span, integration, model_name, contents, kwargs
|
|
288
|
+
)
|
|
289
|
+
try:
|
|
290
|
+
response = await f(self, *args, **kwargs)
|
|
291
|
+
except Exception as exc:
|
|
292
|
+
_capture_exception(exc)
|
|
293
|
+
chat_span.set_status(SPANSTATUS.ERROR)
|
|
294
|
+
raise
|
|
295
|
+
|
|
296
|
+
set_span_data_for_response(chat_span, integration, response)
|
|
297
|
+
set_span_data_for_response(span, integration, response)
|
|
298
|
+
|
|
299
|
+
return response
|
|
300
|
+
|
|
301
|
+
return new_async_generate_content
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
GEN_AI_SYSTEM = "gcp.gemini"
|
|
2
|
+
|
|
3
|
+
# Mapping of tool attributes to their descriptions
|
|
4
|
+
# These are all tools that are available in the Google GenAI API
|
|
5
|
+
TOOL_ATTRIBUTES_MAP = {
|
|
6
|
+
"google_search_retrieval": "Google Search retrieval tool",
|
|
7
|
+
"google_search": "Google Search tool",
|
|
8
|
+
"retrieval": "Retrieval tool",
|
|
9
|
+
"enterprise_web_search": "Enterprise web search tool",
|
|
10
|
+
"google_maps": "Google Maps tool",
|
|
11
|
+
"code_execution": "Code execution tool",
|
|
12
|
+
"computer_use": "Computer use tool",
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
IDENTIFIER = "google_genai"
|
|
16
|
+
ORIGIN = f"auto.ai.{IDENTIFIER}"
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from typing import (
|
|
2
|
+
TYPE_CHECKING,
|
|
3
|
+
Any,
|
|
4
|
+
List,
|
|
5
|
+
TypedDict,
|
|
6
|
+
Optional,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
from sentry_sdk.ai.utils import set_data_normalized
|
|
10
|
+
from sentry_sdk.consts import SPANDATA
|
|
11
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
12
|
+
from sentry_sdk.utils import (
|
|
13
|
+
safe_serialize,
|
|
14
|
+
)
|
|
15
|
+
from .utils import (
|
|
16
|
+
extract_tool_calls,
|
|
17
|
+
extract_finish_reasons,
|
|
18
|
+
extract_contents_text,
|
|
19
|
+
extract_usage_data,
|
|
20
|
+
UsageData,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from sentry_sdk.tracing import Span
|
|
25
|
+
from google.genai.types import GenerateContentResponse
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AccumulatedResponse(TypedDict):
|
|
29
|
+
id: Optional[str]
|
|
30
|
+
model: Optional[str]
|
|
31
|
+
text: str
|
|
32
|
+
finish_reasons: List[str]
|
|
33
|
+
tool_calls: List[dict[str, Any]]
|
|
34
|
+
usage_metadata: UsageData
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def accumulate_streaming_response(chunks):
|
|
38
|
+
# type: (List[GenerateContentResponse]) -> AccumulatedResponse
|
|
39
|
+
"""Accumulate streaming chunks into a single response-like object."""
|
|
40
|
+
accumulated_text = []
|
|
41
|
+
finish_reasons = []
|
|
42
|
+
tool_calls = []
|
|
43
|
+
total_input_tokens = 0
|
|
44
|
+
total_output_tokens = 0
|
|
45
|
+
total_tokens = 0
|
|
46
|
+
total_cached_tokens = 0
|
|
47
|
+
total_reasoning_tokens = 0
|
|
48
|
+
response_id = None
|
|
49
|
+
model = None
|
|
50
|
+
|
|
51
|
+
for chunk in chunks:
|
|
52
|
+
# Extract text and tool calls
|
|
53
|
+
if getattr(chunk, "candidates", None):
|
|
54
|
+
for candidate in getattr(chunk, "candidates", []):
|
|
55
|
+
if hasattr(candidate, "content") and getattr(
|
|
56
|
+
candidate.content, "parts", []
|
|
57
|
+
):
|
|
58
|
+
extracted_text = extract_contents_text(candidate.content)
|
|
59
|
+
if extracted_text:
|
|
60
|
+
accumulated_text.append(extracted_text)
|
|
61
|
+
|
|
62
|
+
extracted_finish_reasons = extract_finish_reasons(chunk)
|
|
63
|
+
if extracted_finish_reasons:
|
|
64
|
+
finish_reasons.extend(extracted_finish_reasons)
|
|
65
|
+
|
|
66
|
+
extracted_tool_calls = extract_tool_calls(chunk)
|
|
67
|
+
if extracted_tool_calls:
|
|
68
|
+
tool_calls.extend(extracted_tool_calls)
|
|
69
|
+
|
|
70
|
+
# Accumulate token usage
|
|
71
|
+
extracted_usage_data = extract_usage_data(chunk)
|
|
72
|
+
total_input_tokens += extracted_usage_data["input_tokens"]
|
|
73
|
+
total_output_tokens += extracted_usage_data["output_tokens"]
|
|
74
|
+
total_cached_tokens += extracted_usage_data["input_tokens_cached"]
|
|
75
|
+
total_reasoning_tokens += extracted_usage_data["output_tokens_reasoning"]
|
|
76
|
+
total_tokens += extracted_usage_data["total_tokens"]
|
|
77
|
+
|
|
78
|
+
accumulated_response = AccumulatedResponse(
|
|
79
|
+
text="".join(accumulated_text),
|
|
80
|
+
finish_reasons=finish_reasons,
|
|
81
|
+
tool_calls=tool_calls,
|
|
82
|
+
usage_metadata=UsageData(
|
|
83
|
+
input_tokens=total_input_tokens,
|
|
84
|
+
output_tokens=total_output_tokens,
|
|
85
|
+
input_tokens_cached=total_cached_tokens,
|
|
86
|
+
output_tokens_reasoning=total_reasoning_tokens,
|
|
87
|
+
total_tokens=total_tokens,
|
|
88
|
+
),
|
|
89
|
+
id=response_id,
|
|
90
|
+
model=model,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
return accumulated_response
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def set_span_data_for_streaming_response(span, integration, accumulated_response):
|
|
97
|
+
# type: (Span, Any, AccumulatedResponse) -> None
|
|
98
|
+
"""Set span data for accumulated streaming response."""
|
|
99
|
+
if (
|
|
100
|
+
should_send_default_pii()
|
|
101
|
+
and integration.include_prompts
|
|
102
|
+
and accumulated_response.get("text")
|
|
103
|
+
):
|
|
104
|
+
span.set_data(
|
|
105
|
+
SPANDATA.GEN_AI_RESPONSE_TEXT,
|
|
106
|
+
safe_serialize([accumulated_response["text"]]),
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if accumulated_response.get("finish_reasons"):
|
|
110
|
+
set_data_normalized(
|
|
111
|
+
span,
|
|
112
|
+
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
|
|
113
|
+
accumulated_response["finish_reasons"],
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
if accumulated_response.get("tool_calls"):
|
|
117
|
+
span.set_data(
|
|
118
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
119
|
+
safe_serialize(accumulated_response["tool_calls"]),
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if accumulated_response.get("id"):
|
|
123
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, accumulated_response["id"])
|
|
124
|
+
if accumulated_response.get("model"):
|
|
125
|
+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, accumulated_response["model"])
|
|
126
|
+
|
|
127
|
+
if accumulated_response["usage_metadata"]["input_tokens"]:
|
|
128
|
+
span.set_data(
|
|
129
|
+
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS,
|
|
130
|
+
accumulated_response["usage_metadata"]["input_tokens"],
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
if accumulated_response["usage_metadata"]["input_tokens_cached"]:
|
|
134
|
+
span.set_data(
|
|
135
|
+
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
|
|
136
|
+
accumulated_response["usage_metadata"]["input_tokens_cached"],
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
if accumulated_response["usage_metadata"]["output_tokens"]:
|
|
140
|
+
span.set_data(
|
|
141
|
+
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS,
|
|
142
|
+
accumulated_response["usage_metadata"]["output_tokens"],
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
if accumulated_response["usage_metadata"]["output_tokens_reasoning"]:
|
|
146
|
+
span.set_data(
|
|
147
|
+
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
|
|
148
|
+
accumulated_response["usage_metadata"]["output_tokens_reasoning"],
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if accumulated_response["usage_metadata"]["total_tokens"]:
|
|
152
|
+
span.set_data(
|
|
153
|
+
SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS,
|
|
154
|
+
accumulated_response["usage_metadata"]["total_tokens"],
|
|
155
|
+
)
|