sentry-sdk 0.7.5__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sentry_sdk/__init__.py +48 -30
- sentry_sdk/_compat.py +74 -61
- sentry_sdk/_init_implementation.py +84 -0
- sentry_sdk/_log_batcher.py +172 -0
- sentry_sdk/_lru_cache.py +47 -0
- sentry_sdk/_metrics_batcher.py +167 -0
- sentry_sdk/_queue.py +289 -0
- sentry_sdk/_types.py +338 -0
- sentry_sdk/_werkzeug.py +98 -0
- sentry_sdk/ai/__init__.py +7 -0
- sentry_sdk/ai/monitoring.py +137 -0
- sentry_sdk/ai/utils.py +144 -0
- sentry_sdk/api.py +496 -80
- sentry_sdk/attachments.py +75 -0
- sentry_sdk/client.py +1023 -103
- sentry_sdk/consts.py +1438 -66
- sentry_sdk/crons/__init__.py +10 -0
- sentry_sdk/crons/api.py +62 -0
- sentry_sdk/crons/consts.py +4 -0
- sentry_sdk/crons/decorator.py +135 -0
- sentry_sdk/debug.py +15 -14
- sentry_sdk/envelope.py +369 -0
- sentry_sdk/feature_flags.py +71 -0
- sentry_sdk/hub.py +611 -280
- sentry_sdk/integrations/__init__.py +276 -49
- sentry_sdk/integrations/_asgi_common.py +108 -0
- sentry_sdk/integrations/_wsgi_common.py +180 -44
- sentry_sdk/integrations/aiohttp.py +291 -42
- sentry_sdk/integrations/anthropic.py +439 -0
- sentry_sdk/integrations/argv.py +9 -8
- sentry_sdk/integrations/ariadne.py +161 -0
- sentry_sdk/integrations/arq.py +247 -0
- sentry_sdk/integrations/asgi.py +341 -0
- sentry_sdk/integrations/asyncio.py +144 -0
- sentry_sdk/integrations/asyncpg.py +208 -0
- sentry_sdk/integrations/atexit.py +17 -10
- sentry_sdk/integrations/aws_lambda.py +377 -62
- sentry_sdk/integrations/beam.py +176 -0
- sentry_sdk/integrations/boto3.py +137 -0
- sentry_sdk/integrations/bottle.py +221 -0
- sentry_sdk/integrations/celery/__init__.py +529 -0
- sentry_sdk/integrations/celery/beat.py +293 -0
- sentry_sdk/integrations/celery/utils.py +43 -0
- sentry_sdk/integrations/chalice.py +134 -0
- sentry_sdk/integrations/clickhouse_driver.py +177 -0
- sentry_sdk/integrations/cloud_resource_context.py +280 -0
- sentry_sdk/integrations/cohere.py +274 -0
- sentry_sdk/integrations/dedupe.py +48 -14
- sentry_sdk/integrations/django/__init__.py +584 -191
- sentry_sdk/integrations/django/asgi.py +245 -0
- sentry_sdk/integrations/django/caching.py +204 -0
- sentry_sdk/integrations/django/middleware.py +187 -0
- sentry_sdk/integrations/django/signals_handlers.py +91 -0
- sentry_sdk/integrations/django/templates.py +79 -5
- sentry_sdk/integrations/django/transactions.py +49 -22
- sentry_sdk/integrations/django/views.py +96 -0
- sentry_sdk/integrations/dramatiq.py +226 -0
- sentry_sdk/integrations/excepthook.py +50 -13
- sentry_sdk/integrations/executing.py +67 -0
- sentry_sdk/integrations/falcon.py +272 -0
- sentry_sdk/integrations/fastapi.py +141 -0
- sentry_sdk/integrations/flask.py +142 -88
- sentry_sdk/integrations/gcp.py +239 -0
- sentry_sdk/integrations/gnu_backtrace.py +99 -0
- sentry_sdk/integrations/google_genai/__init__.py +301 -0
- sentry_sdk/integrations/google_genai/consts.py +16 -0
- sentry_sdk/integrations/google_genai/streaming.py +155 -0
- sentry_sdk/integrations/google_genai/utils.py +576 -0
- sentry_sdk/integrations/gql.py +162 -0
- sentry_sdk/integrations/graphene.py +151 -0
- sentry_sdk/integrations/grpc/__init__.py +168 -0
- sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
- sentry_sdk/integrations/grpc/aio/client.py +95 -0
- sentry_sdk/integrations/grpc/aio/server.py +100 -0
- sentry_sdk/integrations/grpc/client.py +91 -0
- sentry_sdk/integrations/grpc/consts.py +1 -0
- sentry_sdk/integrations/grpc/server.py +66 -0
- sentry_sdk/integrations/httpx.py +178 -0
- sentry_sdk/integrations/huey.py +174 -0
- sentry_sdk/integrations/huggingface_hub.py +378 -0
- sentry_sdk/integrations/langchain.py +1132 -0
- sentry_sdk/integrations/langgraph.py +337 -0
- sentry_sdk/integrations/launchdarkly.py +61 -0
- sentry_sdk/integrations/litellm.py +287 -0
- sentry_sdk/integrations/litestar.py +315 -0
- sentry_sdk/integrations/logging.py +307 -96
- sentry_sdk/integrations/loguru.py +213 -0
- sentry_sdk/integrations/mcp.py +566 -0
- sentry_sdk/integrations/modules.py +14 -31
- sentry_sdk/integrations/openai.py +725 -0
- sentry_sdk/integrations/openai_agents/__init__.py +61 -0
- sentry_sdk/integrations/openai_agents/consts.py +1 -0
- sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
- sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
- sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
- sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
- sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
- sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
- sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
- sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
- sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
- sentry_sdk/integrations/openai_agents/utils.py +199 -0
- sentry_sdk/integrations/openfeature.py +35 -0
- sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
- sentry_sdk/integrations/opentelemetry/consts.py +5 -0
- sentry_sdk/integrations/opentelemetry/integration.py +58 -0
- sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
- sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
- sentry_sdk/integrations/otlp.py +82 -0
- sentry_sdk/integrations/pure_eval.py +141 -0
- sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
- sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
- sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
- sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
- sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
- sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
- sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
- sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
- sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
- sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
- sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
- sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
- sentry_sdk/integrations/pymongo.py +214 -0
- sentry_sdk/integrations/pyramid.py +112 -68
- sentry_sdk/integrations/quart.py +237 -0
- sentry_sdk/integrations/ray.py +165 -0
- sentry_sdk/integrations/redis/__init__.py +48 -0
- sentry_sdk/integrations/redis/_async_common.py +116 -0
- sentry_sdk/integrations/redis/_sync_common.py +119 -0
- sentry_sdk/integrations/redis/consts.py +19 -0
- sentry_sdk/integrations/redis/modules/__init__.py +0 -0
- sentry_sdk/integrations/redis/modules/caches.py +118 -0
- sentry_sdk/integrations/redis/modules/queries.py +65 -0
- sentry_sdk/integrations/redis/rb.py +32 -0
- sentry_sdk/integrations/redis/redis.py +69 -0
- sentry_sdk/integrations/redis/redis_cluster.py +107 -0
- sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
- sentry_sdk/integrations/redis/utils.py +148 -0
- sentry_sdk/integrations/rq.py +95 -37
- sentry_sdk/integrations/rust_tracing.py +284 -0
- sentry_sdk/integrations/sanic.py +294 -123
- sentry_sdk/integrations/serverless.py +48 -19
- sentry_sdk/integrations/socket.py +96 -0
- sentry_sdk/integrations/spark/__init__.py +4 -0
- sentry_sdk/integrations/spark/spark_driver.py +316 -0
- sentry_sdk/integrations/spark/spark_worker.py +116 -0
- sentry_sdk/integrations/sqlalchemy.py +142 -0
- sentry_sdk/integrations/starlette.py +737 -0
- sentry_sdk/integrations/starlite.py +292 -0
- sentry_sdk/integrations/statsig.py +37 -0
- sentry_sdk/integrations/stdlib.py +235 -29
- sentry_sdk/integrations/strawberry.py +394 -0
- sentry_sdk/integrations/sys_exit.py +70 -0
- sentry_sdk/integrations/threading.py +158 -28
- sentry_sdk/integrations/tornado.py +84 -52
- sentry_sdk/integrations/trytond.py +50 -0
- sentry_sdk/integrations/typer.py +60 -0
- sentry_sdk/integrations/unleash.py +33 -0
- sentry_sdk/integrations/unraisablehook.py +53 -0
- sentry_sdk/integrations/wsgi.py +201 -119
- sentry_sdk/logger.py +96 -0
- sentry_sdk/metrics.py +81 -0
- sentry_sdk/monitor.py +120 -0
- sentry_sdk/profiler/__init__.py +49 -0
- sentry_sdk/profiler/continuous_profiler.py +730 -0
- sentry_sdk/profiler/transaction_profiler.py +839 -0
- sentry_sdk/profiler/utils.py +195 -0
- sentry_sdk/py.typed +0 -0
- sentry_sdk/scope.py +1713 -85
- sentry_sdk/scrubber.py +177 -0
- sentry_sdk/serializer.py +405 -0
- sentry_sdk/session.py +177 -0
- sentry_sdk/sessions.py +275 -0
- sentry_sdk/spotlight.py +242 -0
- sentry_sdk/tracing.py +1486 -0
- sentry_sdk/tracing_utils.py +1236 -0
- sentry_sdk/transport.py +806 -134
- sentry_sdk/types.py +52 -0
- sentry_sdk/utils.py +1625 -465
- sentry_sdk/worker.py +54 -25
- sentry_sdk-2.46.0.dist-info/METADATA +268 -0
- sentry_sdk-2.46.0.dist-info/RECORD +189 -0
- {sentry_sdk-0.7.5.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
- sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
- sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
- sentry_sdk/integrations/celery.py +0 -119
- sentry_sdk-0.7.5.dist-info/LICENSE +0 -9
- sentry_sdk-0.7.5.dist-info/METADATA +0 -36
- sentry_sdk-0.7.5.dist-info/RECORD +0 -39
- {sentry_sdk-0.7.5.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
|
|
3
|
+
import sentry_sdk
|
|
4
|
+
from sentry_sdk.integrations import DidNotEnable
|
|
5
|
+
|
|
6
|
+
from ..spans import invoke_agent_span, update_invoke_agent_span
|
|
7
|
+
from ..utils import _capture_exception, pop_agent, push_agent
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from pydantic_ai.agent import Agent # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
raise DidNotEnable("pydantic-ai not installed")
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from typing import Any, Callable, Optional
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class _StreamingContextManagerWrapper:
|
|
21
|
+
"""Wrapper for streaming methods that return async context managers."""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
agent,
|
|
26
|
+
original_ctx_manager,
|
|
27
|
+
user_prompt,
|
|
28
|
+
model,
|
|
29
|
+
model_settings,
|
|
30
|
+
is_streaming=True,
|
|
31
|
+
):
|
|
32
|
+
# type: (Any, Any, Any, Any, Any, bool) -> None
|
|
33
|
+
self.agent = agent
|
|
34
|
+
self.original_ctx_manager = original_ctx_manager
|
|
35
|
+
self.user_prompt = user_prompt
|
|
36
|
+
self.model = model
|
|
37
|
+
self.model_settings = model_settings
|
|
38
|
+
self.is_streaming = is_streaming
|
|
39
|
+
self._isolation_scope = None # type: Any
|
|
40
|
+
self._span = None # type: Optional[sentry_sdk.tracing.Span]
|
|
41
|
+
self._result = None # type: Any
|
|
42
|
+
|
|
43
|
+
async def __aenter__(self):
|
|
44
|
+
# type: () -> Any
|
|
45
|
+
# Set up isolation scope and invoke_agent span
|
|
46
|
+
self._isolation_scope = sentry_sdk.isolation_scope()
|
|
47
|
+
self._isolation_scope.__enter__()
|
|
48
|
+
|
|
49
|
+
# Create invoke_agent span (will be closed in __aexit__)
|
|
50
|
+
self._span = invoke_agent_span(
|
|
51
|
+
self.user_prompt,
|
|
52
|
+
self.agent,
|
|
53
|
+
self.model,
|
|
54
|
+
self.model_settings,
|
|
55
|
+
self.is_streaming,
|
|
56
|
+
)
|
|
57
|
+
self._span.__enter__()
|
|
58
|
+
|
|
59
|
+
# Push agent to contextvar stack after span is successfully created and entered
|
|
60
|
+
# This ensures proper pairing with pop_agent() in __aexit__ even if exceptions occur
|
|
61
|
+
push_agent(self.agent, self.is_streaming)
|
|
62
|
+
|
|
63
|
+
# Enter the original context manager
|
|
64
|
+
result = await self.original_ctx_manager.__aenter__()
|
|
65
|
+
self._result = result
|
|
66
|
+
return result
|
|
67
|
+
|
|
68
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
69
|
+
# type: (Any, Any, Any) -> None
|
|
70
|
+
try:
|
|
71
|
+
# Exit the original context manager first
|
|
72
|
+
await self.original_ctx_manager.__aexit__(exc_type, exc_val, exc_tb)
|
|
73
|
+
|
|
74
|
+
# Update span with output if successful
|
|
75
|
+
if exc_type is None and self._result and hasattr(self._result, "output"):
|
|
76
|
+
output = (
|
|
77
|
+
self._result.output if hasattr(self._result, "output") else None
|
|
78
|
+
)
|
|
79
|
+
if self._span is not None:
|
|
80
|
+
update_invoke_agent_span(self._span, output)
|
|
81
|
+
finally:
|
|
82
|
+
# Pop agent from contextvar stack
|
|
83
|
+
pop_agent()
|
|
84
|
+
|
|
85
|
+
# Clean up invoke span
|
|
86
|
+
if self._span:
|
|
87
|
+
self._span.__exit__(exc_type, exc_val, exc_tb)
|
|
88
|
+
|
|
89
|
+
# Clean up isolation scope
|
|
90
|
+
if self._isolation_scope:
|
|
91
|
+
self._isolation_scope.__exit__(exc_type, exc_val, exc_tb)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _create_run_wrapper(original_func, is_streaming=False):
|
|
95
|
+
# type: (Callable[..., Any], bool) -> Callable[..., Any]
|
|
96
|
+
"""
|
|
97
|
+
Wraps the Agent.run method to create an invoke_agent span.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
original_func: The original run method
|
|
101
|
+
is_streaming: Whether this is a streaming method (for future use)
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
@wraps(original_func)
|
|
105
|
+
async def wrapper(self, *args, **kwargs):
|
|
106
|
+
# type: (Any, *Any, **Any) -> Any
|
|
107
|
+
# Isolate each workflow so that when agents are run in asyncio tasks they
|
|
108
|
+
# don't touch each other's scopes
|
|
109
|
+
with sentry_sdk.isolation_scope():
|
|
110
|
+
# Extract parameters for the span
|
|
111
|
+
user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
|
|
112
|
+
model = kwargs.get("model")
|
|
113
|
+
model_settings = kwargs.get("model_settings")
|
|
114
|
+
|
|
115
|
+
# Create invoke_agent span
|
|
116
|
+
with invoke_agent_span(
|
|
117
|
+
user_prompt, self, model, model_settings, is_streaming
|
|
118
|
+
) as span:
|
|
119
|
+
# Push agent to contextvar stack after span is successfully created and entered
|
|
120
|
+
# This ensures proper pairing with pop_agent() in finally even if exceptions occur
|
|
121
|
+
push_agent(self, is_streaming)
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
result = await original_func(self, *args, **kwargs)
|
|
125
|
+
|
|
126
|
+
# Update span with output
|
|
127
|
+
output = result.output if hasattr(result, "output") else None
|
|
128
|
+
update_invoke_agent_span(span, output)
|
|
129
|
+
|
|
130
|
+
return result
|
|
131
|
+
except Exception as exc:
|
|
132
|
+
_capture_exception(exc)
|
|
133
|
+
raise exc from None
|
|
134
|
+
finally:
|
|
135
|
+
# Pop agent from contextvar stack
|
|
136
|
+
pop_agent()
|
|
137
|
+
|
|
138
|
+
return wrapper
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _create_streaming_wrapper(original_func):
|
|
142
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
143
|
+
"""
|
|
144
|
+
Wraps run_stream method that returns an async context manager.
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
@wraps(original_func)
|
|
148
|
+
def wrapper(self, *args, **kwargs):
|
|
149
|
+
# type: (Any, *Any, **Any) -> Any
|
|
150
|
+
# Extract parameters for the span
|
|
151
|
+
user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
|
|
152
|
+
model = kwargs.get("model")
|
|
153
|
+
model_settings = kwargs.get("model_settings")
|
|
154
|
+
|
|
155
|
+
# Call original function to get the context manager
|
|
156
|
+
original_ctx_manager = original_func(self, *args, **kwargs)
|
|
157
|
+
|
|
158
|
+
# Wrap it with our instrumentation
|
|
159
|
+
return _StreamingContextManagerWrapper(
|
|
160
|
+
agent=self,
|
|
161
|
+
original_ctx_manager=original_ctx_manager,
|
|
162
|
+
user_prompt=user_prompt,
|
|
163
|
+
model=model,
|
|
164
|
+
model_settings=model_settings,
|
|
165
|
+
is_streaming=True,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return wrapper
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _create_streaming_events_wrapper(original_func):
|
|
172
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
173
|
+
"""
|
|
174
|
+
Wraps run_stream_events method - no span needed as it delegates to run().
|
|
175
|
+
|
|
176
|
+
Note: run_stream_events internally calls self.run() with an event_stream_handler,
|
|
177
|
+
so the invoke_agent span will be created by the run() wrapper.
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
@wraps(original_func)
|
|
181
|
+
async def wrapper(self, *args, **kwargs):
|
|
182
|
+
# type: (Any, *Any, **Any) -> Any
|
|
183
|
+
# Just call the original generator - it will call run() which has the instrumentation
|
|
184
|
+
try:
|
|
185
|
+
async for event in original_func(self, *args, **kwargs):
|
|
186
|
+
yield event
|
|
187
|
+
except Exception as exc:
|
|
188
|
+
_capture_exception(exc)
|
|
189
|
+
raise exc from None
|
|
190
|
+
|
|
191
|
+
return wrapper
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _patch_agent_run():
|
|
195
|
+
# type: () -> None
|
|
196
|
+
"""
|
|
197
|
+
Patches the Agent run methods to create spans for agent execution.
|
|
198
|
+
|
|
199
|
+
This patches both non-streaming (run, run_sync) and streaming
|
|
200
|
+
(run_stream, run_stream_events) methods.
|
|
201
|
+
"""
|
|
202
|
+
|
|
203
|
+
# Store original methods
|
|
204
|
+
original_run = Agent.run
|
|
205
|
+
original_run_stream = Agent.run_stream
|
|
206
|
+
original_run_stream_events = Agent.run_stream_events
|
|
207
|
+
|
|
208
|
+
# Wrap and apply patches for non-streaming methods
|
|
209
|
+
Agent.run = _create_run_wrapper(original_run, is_streaming=False)
|
|
210
|
+
|
|
211
|
+
# Wrap and apply patches for streaming methods
|
|
212
|
+
Agent.run_stream = _create_streaming_wrapper(original_run_stream)
|
|
213
|
+
Agent.run_stream_events = _create_streaming_events_wrapper(
|
|
214
|
+
original_run_stream_events
|
|
215
|
+
)
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from contextlib import asynccontextmanager
|
|
2
|
+
from functools import wraps
|
|
3
|
+
|
|
4
|
+
import sentry_sdk
|
|
5
|
+
from sentry_sdk.integrations import DidNotEnable
|
|
6
|
+
|
|
7
|
+
from ..spans import (
|
|
8
|
+
ai_client_span,
|
|
9
|
+
update_ai_client_span,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from pydantic_ai._agent_graph import ModelRequestNode # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
raise DidNotEnable("pydantic-ai not installed")
|
|
16
|
+
|
|
17
|
+
from typing import TYPE_CHECKING
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from typing import Any, Callable
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _extract_span_data(node, ctx):
|
|
24
|
+
# type: (Any, Any) -> tuple[list[Any], Any, Any]
|
|
25
|
+
"""Extract common data needed for creating chat spans.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Tuple of (messages, model, model_settings)
|
|
29
|
+
"""
|
|
30
|
+
# Extract model and settings from context
|
|
31
|
+
model = None
|
|
32
|
+
model_settings = None
|
|
33
|
+
if hasattr(ctx, "deps"):
|
|
34
|
+
model = getattr(ctx.deps, "model", None)
|
|
35
|
+
model_settings = getattr(ctx.deps, "model_settings", None)
|
|
36
|
+
|
|
37
|
+
# Build full message list: history + current request
|
|
38
|
+
messages = []
|
|
39
|
+
if hasattr(ctx, "state") and hasattr(ctx.state, "message_history"):
|
|
40
|
+
messages.extend(ctx.state.message_history)
|
|
41
|
+
|
|
42
|
+
current_request = getattr(node, "request", None)
|
|
43
|
+
if current_request:
|
|
44
|
+
messages.append(current_request)
|
|
45
|
+
|
|
46
|
+
return messages, model, model_settings
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _patch_graph_nodes():
|
|
50
|
+
# type: () -> None
|
|
51
|
+
"""
|
|
52
|
+
Patches the graph node execution to create appropriate spans.
|
|
53
|
+
|
|
54
|
+
ModelRequestNode -> Creates ai_client span for model requests
|
|
55
|
+
CallToolsNode -> Handles tool calls (spans created in tool patching)
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
# Patch ModelRequestNode to create ai_client spans
|
|
59
|
+
original_model_request_run = ModelRequestNode.run
|
|
60
|
+
|
|
61
|
+
@wraps(original_model_request_run)
|
|
62
|
+
async def wrapped_model_request_run(self, ctx):
|
|
63
|
+
# type: (Any, Any) -> Any
|
|
64
|
+
messages, model, model_settings = _extract_span_data(self, ctx)
|
|
65
|
+
|
|
66
|
+
with ai_client_span(messages, None, model, model_settings) as span:
|
|
67
|
+
result = await original_model_request_run(self, ctx)
|
|
68
|
+
|
|
69
|
+
# Extract response from result if available
|
|
70
|
+
model_response = None
|
|
71
|
+
if hasattr(result, "model_response"):
|
|
72
|
+
model_response = result.model_response
|
|
73
|
+
|
|
74
|
+
update_ai_client_span(span, model_response)
|
|
75
|
+
return result
|
|
76
|
+
|
|
77
|
+
ModelRequestNode.run = wrapped_model_request_run
|
|
78
|
+
|
|
79
|
+
# Patch ModelRequestNode.stream for streaming requests
|
|
80
|
+
original_model_request_stream = ModelRequestNode.stream
|
|
81
|
+
|
|
82
|
+
def create_wrapped_stream(original_stream_method):
|
|
83
|
+
# type: (Callable[..., Any]) -> Callable[..., Any]
|
|
84
|
+
"""Create a wrapper for ModelRequestNode.stream that creates chat spans."""
|
|
85
|
+
|
|
86
|
+
@asynccontextmanager
|
|
87
|
+
@wraps(original_stream_method)
|
|
88
|
+
async def wrapped_model_request_stream(self, ctx):
|
|
89
|
+
# type: (Any, Any) -> Any
|
|
90
|
+
messages, model, model_settings = _extract_span_data(self, ctx)
|
|
91
|
+
|
|
92
|
+
# Create chat span for streaming request
|
|
93
|
+
with ai_client_span(messages, None, model, model_settings) as span:
|
|
94
|
+
# Call the original stream method
|
|
95
|
+
async with original_stream_method(self, ctx) as stream:
|
|
96
|
+
yield stream
|
|
97
|
+
|
|
98
|
+
# After streaming completes, update span with response data
|
|
99
|
+
# The ModelRequestNode stores the final response in _result
|
|
100
|
+
model_response = None
|
|
101
|
+
if hasattr(self, "_result") and self._result is not None:
|
|
102
|
+
# _result is a NextNode containing the model_response
|
|
103
|
+
if hasattr(self._result, "model_response"):
|
|
104
|
+
model_response = self._result.model_response
|
|
105
|
+
|
|
106
|
+
update_ai_client_span(span, model_response)
|
|
107
|
+
|
|
108
|
+
return wrapped_model_request_stream
|
|
109
|
+
|
|
110
|
+
ModelRequestNode.stream = create_wrapped_stream(original_model_request_stream)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
from sentry_sdk.integrations import DidNotEnable
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
from pydantic_ai import models # type: ignore
|
|
8
|
+
except ImportError:
|
|
9
|
+
raise DidNotEnable("pydantic-ai not installed")
|
|
10
|
+
|
|
11
|
+
from ..spans import ai_client_span, update_ai_client_span
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _patch_model_request():
|
|
19
|
+
# type: () -> None
|
|
20
|
+
"""
|
|
21
|
+
Patches model request execution to create AI client spans.
|
|
22
|
+
|
|
23
|
+
In pydantic-ai, model requests are handled through the Model interface.
|
|
24
|
+
We need to patch the request method on models to create spans.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
# Patch the base Model class's request method
|
|
28
|
+
if hasattr(models, "Model"):
|
|
29
|
+
original_request = models.Model.request
|
|
30
|
+
|
|
31
|
+
@wraps(original_request)
|
|
32
|
+
async def wrapped_request(self, messages, *args, **kwargs):
|
|
33
|
+
# type: (Any, Any, *Any, **Any) -> Any
|
|
34
|
+
# Pass all messages (full conversation history)
|
|
35
|
+
with ai_client_span(messages, None, self, None) as span:
|
|
36
|
+
result = await original_request(self, messages, *args, **kwargs)
|
|
37
|
+
update_ai_client_span(span, result)
|
|
38
|
+
return result
|
|
39
|
+
|
|
40
|
+
models.Model.request = wrapped_request
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
|
|
3
|
+
from sentry_sdk.integrations import DidNotEnable
|
|
4
|
+
import sentry_sdk
|
|
5
|
+
|
|
6
|
+
from ..spans import execute_tool_span, update_execute_tool_span
|
|
7
|
+
from ..utils import (
|
|
8
|
+
_capture_exception,
|
|
9
|
+
get_current_agent,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
from typing import TYPE_CHECKING
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
from pydantic_ai.mcp import MCPServer # type: ignore
|
|
19
|
+
|
|
20
|
+
HAS_MCP = True
|
|
21
|
+
except ImportError:
|
|
22
|
+
HAS_MCP = False
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
from pydantic_ai._tool_manager import ToolManager # type: ignore
|
|
26
|
+
except ImportError:
|
|
27
|
+
raise DidNotEnable("pydantic-ai not installed")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _patch_tool_execution():
|
|
31
|
+
# type: () -> None
|
|
32
|
+
"""
|
|
33
|
+
Patch ToolManager._call_tool to create execute_tool spans.
|
|
34
|
+
|
|
35
|
+
This is the single point where ALL tool calls flow through in pydantic_ai,
|
|
36
|
+
regardless of toolset type (function, MCP, combined, wrapper, etc.).
|
|
37
|
+
|
|
38
|
+
By patching here, we avoid:
|
|
39
|
+
- Patching multiple toolset classes
|
|
40
|
+
- Dealing with signature mismatches from instrumented MCP servers
|
|
41
|
+
- Complex nested toolset handling
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
original_call_tool = ToolManager._call_tool
|
|
45
|
+
|
|
46
|
+
@wraps(original_call_tool)
|
|
47
|
+
async def wrapped_call_tool(self, call, *args, **kwargs):
|
|
48
|
+
# type: (Any, Any, *Any, **Any) -> Any
|
|
49
|
+
|
|
50
|
+
# Extract tool info before calling original
|
|
51
|
+
name = call.tool_name
|
|
52
|
+
tool = self.tools.get(name) if self.tools else None
|
|
53
|
+
|
|
54
|
+
# Determine tool type by checking tool.toolset
|
|
55
|
+
tool_type = "function" # default
|
|
56
|
+
if tool and HAS_MCP and isinstance(tool.toolset, MCPServer):
|
|
57
|
+
tool_type = "mcp"
|
|
58
|
+
|
|
59
|
+
# Get agent from contextvar
|
|
60
|
+
agent = get_current_agent()
|
|
61
|
+
|
|
62
|
+
if agent and tool:
|
|
63
|
+
try:
|
|
64
|
+
args_dict = call.args_as_dict()
|
|
65
|
+
except Exception:
|
|
66
|
+
args_dict = call.args if isinstance(call.args, dict) else {}
|
|
67
|
+
|
|
68
|
+
# Create execute_tool span
|
|
69
|
+
# Nesting is handled by isolation_scope() to ensure proper parent-child relationships
|
|
70
|
+
with sentry_sdk.isolation_scope():
|
|
71
|
+
with execute_tool_span(
|
|
72
|
+
name,
|
|
73
|
+
args_dict,
|
|
74
|
+
agent,
|
|
75
|
+
tool_type=tool_type,
|
|
76
|
+
) as span:
|
|
77
|
+
try:
|
|
78
|
+
result = await original_call_tool(
|
|
79
|
+
self,
|
|
80
|
+
call,
|
|
81
|
+
*args,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
|
84
|
+
update_execute_tool_span(span, result)
|
|
85
|
+
return result
|
|
86
|
+
except Exception as exc:
|
|
87
|
+
_capture_exception(exc)
|
|
88
|
+
raise exc from None
|
|
89
|
+
|
|
90
|
+
# No span context - just call original
|
|
91
|
+
return await original_call_tool(
|
|
92
|
+
self,
|
|
93
|
+
call,
|
|
94
|
+
*args,
|
|
95
|
+
**kwargs,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
ToolManager._call_tool = wrapped_call_tool
|