sentry-sdk 2.38.0__py2.py3-none-any.whl → 2.40.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentry-sdk might be problematic. Click here for more details.
- sentry_sdk/client.py +6 -6
- sentry_sdk/consts.py +15 -3
- sentry_sdk/envelope.py +28 -14
- sentry_sdk/feature_flags.py +0 -1
- sentry_sdk/hub.py +17 -9
- sentry_sdk/integrations/__init__.py +2 -0
- sentry_sdk/integrations/anthropic.py +18 -3
- sentry_sdk/integrations/asgi.py +3 -2
- sentry_sdk/integrations/cohere.py +4 -0
- sentry_sdk/integrations/dedupe.py +16 -2
- sentry_sdk/integrations/dramatiq.py +89 -31
- sentry_sdk/integrations/grpc/aio/client.py +2 -1
- sentry_sdk/integrations/grpc/client.py +3 -4
- sentry_sdk/integrations/huggingface_hub.py +3 -2
- sentry_sdk/integrations/langchain.py +12 -12
- sentry_sdk/integrations/launchdarkly.py +0 -1
- sentry_sdk/integrations/litellm.py +251 -0
- sentry_sdk/integrations/litestar.py +4 -4
- sentry_sdk/integrations/openai.py +13 -8
- sentry_sdk/integrations/openai_agents/spans/ai_client.py +4 -1
- sentry_sdk/integrations/openai_agents/spans/execute_tool.py +1 -1
- sentry_sdk/integrations/openai_agents/utils.py +28 -1
- sentry_sdk/integrations/pure_eval.py +3 -1
- sentry_sdk/integrations/spark/spark_driver.py +2 -1
- sentry_sdk/integrations/sqlalchemy.py +2 -6
- sentry_sdk/integrations/starlette.py +1 -3
- sentry_sdk/integrations/starlite.py +4 -4
- sentry_sdk/integrations/wsgi.py +3 -2
- sentry_sdk/metrics.py +17 -11
- sentry_sdk/profiler/utils.py +2 -6
- sentry_sdk/scope.py +6 -3
- sentry_sdk/serializer.py +13 -4
- sentry_sdk/session.py +4 -2
- sentry_sdk/sessions.py +4 -2
- sentry_sdk/tracing.py +38 -8
- sentry_sdk/tracing_utils.py +15 -4
- sentry_sdk/transport.py +8 -9
- sentry_sdk/utils.py +5 -3
- {sentry_sdk-2.38.0.dist-info → sentry_sdk-2.40.0.dist-info}/METADATA +3 -1
- {sentry_sdk-2.38.0.dist-info → sentry_sdk-2.40.0.dist-info}/RECORD +44 -43
- {sentry_sdk-2.38.0.dist-info → sentry_sdk-2.40.0.dist-info}/WHEEL +0 -0
- {sentry_sdk-2.38.0.dist-info → sentry_sdk-2.40.0.dist-info}/entry_points.txt +0 -0
- {sentry_sdk-2.38.0.dist-info → sentry_sdk-2.40.0.dist-info}/licenses/LICENSE +0 -0
- {sentry_sdk-2.38.0.dist-info → sentry_sdk-2.40.0.dist-info}/top_level.txt +0 -0
|
@@ -7,6 +7,7 @@ from sentry_sdk.ai.utils import set_data_normalized
|
|
|
7
7
|
from sentry_sdk.consts import OP, SPANDATA
|
|
8
8
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
9
9
|
from sentry_sdk.scope import should_send_default_pii
|
|
10
|
+
from sentry_sdk.tracing_utils import set_span_errored
|
|
10
11
|
from sentry_sdk.utils import (
|
|
11
12
|
capture_internal_exceptions,
|
|
12
13
|
event_from_exception,
|
|
@@ -52,6 +53,8 @@ class HuggingfaceHubIntegration(Integration):
|
|
|
52
53
|
|
|
53
54
|
def _capture_exception(exc):
|
|
54
55
|
# type: (Any) -> None
|
|
56
|
+
set_span_errored()
|
|
57
|
+
|
|
55
58
|
event, hint = event_from_exception(
|
|
56
59
|
exc,
|
|
57
60
|
client_options=sentry_sdk.get_client().options,
|
|
@@ -127,8 +130,6 @@ def _wrap_huggingface_task(f, op):
|
|
|
127
130
|
try:
|
|
128
131
|
res = f(*args, **kwargs)
|
|
129
132
|
except Exception as e:
|
|
130
|
-
# Error Handling
|
|
131
|
-
span.set_status("error")
|
|
132
133
|
_capture_exception(e)
|
|
133
134
|
span.__exit__(None, None, None)
|
|
134
135
|
raise e from None
|
|
@@ -8,8 +8,7 @@ from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function
|
|
|
8
8
|
from sentry_sdk.consts import OP, SPANDATA
|
|
9
9
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
10
10
|
from sentry_sdk.scope import should_send_default_pii
|
|
11
|
-
from sentry_sdk.
|
|
12
|
-
from sentry_sdk.tracing_utils import _get_value
|
|
11
|
+
from sentry_sdk.tracing_utils import _get_value, set_span_errored
|
|
13
12
|
from sentry_sdk.utils import logger, capture_internal_exceptions
|
|
14
13
|
|
|
15
14
|
from typing import TYPE_CHECKING
|
|
@@ -26,6 +25,7 @@ if TYPE_CHECKING:
|
|
|
26
25
|
Union,
|
|
27
26
|
)
|
|
28
27
|
from uuid import UUID
|
|
28
|
+
from sentry_sdk.tracing import Span
|
|
29
29
|
|
|
30
30
|
|
|
31
31
|
try:
|
|
@@ -116,7 +116,7 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
116
116
|
|
|
117
117
|
span_data = self.span_map[run_id]
|
|
118
118
|
span = span_data.span
|
|
119
|
-
span
|
|
119
|
+
set_span_errored(span)
|
|
120
120
|
|
|
121
121
|
sentry_sdk.capture_exception(error, span.scope)
|
|
122
122
|
|
|
@@ -322,14 +322,15 @@ class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc]
|
|
|
322
322
|
pass
|
|
323
323
|
|
|
324
324
|
try:
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
325
|
+
if should_send_default_pii() and self.include_prompts:
|
|
326
|
+
tool_calls = getattr(generation.message, "tool_calls", None)
|
|
327
|
+
if tool_calls is not None and tool_calls != []:
|
|
328
|
+
set_data_normalized(
|
|
329
|
+
span,
|
|
330
|
+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
|
|
331
|
+
tool_calls,
|
|
332
|
+
unpack=False,
|
|
333
|
+
)
|
|
333
334
|
except AttributeError:
|
|
334
335
|
pass
|
|
335
336
|
|
|
@@ -553,7 +554,6 @@ def _simplify_langchain_tools(tools):
|
|
|
553
554
|
for tool in tools:
|
|
554
555
|
try:
|
|
555
556
|
if isinstance(tool, dict):
|
|
556
|
-
|
|
557
557
|
if "function" in tool and isinstance(tool["function"], dict):
|
|
558
558
|
func = tool["function"]
|
|
559
559
|
simplified_tool = {
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
import sentry_sdk
|
|
4
|
+
from sentry_sdk import consts
|
|
5
|
+
from sentry_sdk.ai.monitoring import record_token_usage
|
|
6
|
+
from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
|
|
7
|
+
from sentry_sdk.consts import SPANDATA
|
|
8
|
+
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
9
|
+
from sentry_sdk.scope import should_send_default_pii
|
|
10
|
+
from sentry_sdk.utils import event_from_exception
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from typing import Any, Dict
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
import litellm # type: ignore[import-not-found]
|
|
18
|
+
except ImportError:
|
|
19
|
+
raise DidNotEnable("LiteLLM not installed")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _get_metadata_dict(kwargs):
|
|
23
|
+
# type: (Dict[str, Any]) -> Dict[str, Any]
|
|
24
|
+
"""Get the metadata dictionary from the kwargs."""
|
|
25
|
+
litellm_params = kwargs.setdefault("litellm_params", {})
|
|
26
|
+
|
|
27
|
+
# we need this weird little dance, as metadata might be set but may be None initially
|
|
28
|
+
metadata = litellm_params.get("metadata")
|
|
29
|
+
if metadata is None:
|
|
30
|
+
metadata = {}
|
|
31
|
+
litellm_params["metadata"] = metadata
|
|
32
|
+
return metadata
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _input_callback(kwargs):
|
|
36
|
+
# type: (Dict[str, Any]) -> None
|
|
37
|
+
"""Handle the start of a request."""
|
|
38
|
+
integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
|
|
39
|
+
|
|
40
|
+
if integration is None:
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
# Get key parameters
|
|
44
|
+
full_model = kwargs.get("model", "")
|
|
45
|
+
try:
|
|
46
|
+
model, provider, _, _ = litellm.get_llm_provider(full_model)
|
|
47
|
+
except Exception:
|
|
48
|
+
model = full_model
|
|
49
|
+
provider = "unknown"
|
|
50
|
+
|
|
51
|
+
messages = kwargs.get("messages", [])
|
|
52
|
+
operation = "chat" if messages else "embeddings"
|
|
53
|
+
|
|
54
|
+
# Start a new span/transaction
|
|
55
|
+
span = get_start_span_function()(
|
|
56
|
+
op=(
|
|
57
|
+
consts.OP.GEN_AI_CHAT
|
|
58
|
+
if operation == "chat"
|
|
59
|
+
else consts.OP.GEN_AI_EMBEDDINGS
|
|
60
|
+
),
|
|
61
|
+
name=f"{operation} {model}",
|
|
62
|
+
origin=LiteLLMIntegration.origin,
|
|
63
|
+
)
|
|
64
|
+
span.__enter__()
|
|
65
|
+
|
|
66
|
+
# Store span for later
|
|
67
|
+
_get_metadata_dict(kwargs)["_sentry_span"] = span
|
|
68
|
+
|
|
69
|
+
# Set basic data
|
|
70
|
+
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, provider)
|
|
71
|
+
set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
|
|
72
|
+
|
|
73
|
+
# Record messages if allowed
|
|
74
|
+
if messages and should_send_default_pii() and integration.include_prompts:
|
|
75
|
+
set_data_normalized(
|
|
76
|
+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Record other parameters
|
|
80
|
+
params = {
|
|
81
|
+
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
|
|
82
|
+
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
|
|
83
|
+
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
|
|
84
|
+
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
|
85
|
+
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
|
86
|
+
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
|
|
87
|
+
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
|
|
88
|
+
}
|
|
89
|
+
for key, attribute in params.items():
|
|
90
|
+
value = kwargs.get(key)
|
|
91
|
+
if value is not None:
|
|
92
|
+
set_data_normalized(span, attribute, value)
|
|
93
|
+
|
|
94
|
+
# Record LiteLLM-specific parameters
|
|
95
|
+
litellm_params = {
|
|
96
|
+
"api_base": kwargs.get("api_base"),
|
|
97
|
+
"api_version": kwargs.get("api_version"),
|
|
98
|
+
"custom_llm_provider": kwargs.get("custom_llm_provider"),
|
|
99
|
+
}
|
|
100
|
+
for key, value in litellm_params.items():
|
|
101
|
+
if value is not None:
|
|
102
|
+
set_data_normalized(span, f"gen_ai.litellm.{key}", value)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _success_callback(kwargs, completion_response, start_time, end_time):
|
|
106
|
+
# type: (Dict[str, Any], Any, datetime, datetime) -> None
|
|
107
|
+
"""Handle successful completion."""
|
|
108
|
+
|
|
109
|
+
span = _get_metadata_dict(kwargs).get("_sentry_span")
|
|
110
|
+
if span is None:
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
|
|
114
|
+
if integration is None:
|
|
115
|
+
return
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
# Record model information
|
|
119
|
+
if hasattr(completion_response, "model"):
|
|
120
|
+
set_data_normalized(
|
|
121
|
+
span, SPANDATA.GEN_AI_RESPONSE_MODEL, completion_response.model
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
# Record response content if allowed
|
|
125
|
+
if should_send_default_pii() and integration.include_prompts:
|
|
126
|
+
if hasattr(completion_response, "choices"):
|
|
127
|
+
response_messages = []
|
|
128
|
+
for choice in completion_response.choices:
|
|
129
|
+
if hasattr(choice, "message"):
|
|
130
|
+
if hasattr(choice.message, "model_dump"):
|
|
131
|
+
response_messages.append(choice.message.model_dump())
|
|
132
|
+
elif hasattr(choice.message, "dict"):
|
|
133
|
+
response_messages.append(choice.message.dict())
|
|
134
|
+
else:
|
|
135
|
+
# Fallback for basic message objects
|
|
136
|
+
msg = {}
|
|
137
|
+
if hasattr(choice.message, "role"):
|
|
138
|
+
msg["role"] = choice.message.role
|
|
139
|
+
if hasattr(choice.message, "content"):
|
|
140
|
+
msg["content"] = choice.message.content
|
|
141
|
+
if hasattr(choice.message, "tool_calls"):
|
|
142
|
+
msg["tool_calls"] = choice.message.tool_calls
|
|
143
|
+
response_messages.append(msg)
|
|
144
|
+
|
|
145
|
+
if response_messages:
|
|
146
|
+
set_data_normalized(
|
|
147
|
+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_messages
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Record token usage
|
|
151
|
+
if hasattr(completion_response, "usage"):
|
|
152
|
+
usage = completion_response.usage
|
|
153
|
+
record_token_usage(
|
|
154
|
+
span,
|
|
155
|
+
input_tokens=getattr(usage, "prompt_tokens", None),
|
|
156
|
+
output_tokens=getattr(usage, "completion_tokens", None),
|
|
157
|
+
total_tokens=getattr(usage, "total_tokens", None),
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
finally:
|
|
161
|
+
# Always finish the span and clean up
|
|
162
|
+
span.__exit__(None, None, None)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _failure_callback(kwargs, exception, start_time, end_time):
|
|
166
|
+
# type: (Dict[str, Any], Exception, datetime, datetime) -> None
|
|
167
|
+
"""Handle request failure."""
|
|
168
|
+
span = _get_metadata_dict(kwargs).get("_sentry_span")
|
|
169
|
+
if span is None:
|
|
170
|
+
return
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
# Capture the exception
|
|
174
|
+
event, hint = event_from_exception(
|
|
175
|
+
exception,
|
|
176
|
+
client_options=sentry_sdk.get_client().options,
|
|
177
|
+
mechanism={"type": "litellm", "handled": False},
|
|
178
|
+
)
|
|
179
|
+
sentry_sdk.capture_event(event, hint=hint)
|
|
180
|
+
finally:
|
|
181
|
+
# Always finish the span and clean up
|
|
182
|
+
span.__exit__(type(exception), exception, None)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
class LiteLLMIntegration(Integration):
|
|
186
|
+
"""
|
|
187
|
+
LiteLLM integration for Sentry.
|
|
188
|
+
|
|
189
|
+
This integration automatically captures LiteLLM API calls and sends them to Sentry
|
|
190
|
+
for monitoring and error tracking. It supports all 100+ LLM providers that LiteLLM
|
|
191
|
+
supports, including OpenAI, Anthropic, Google, Cohere, and many others.
|
|
192
|
+
|
|
193
|
+
Features:
|
|
194
|
+
- Automatic exception capture for all LiteLLM calls
|
|
195
|
+
- Token usage tracking across all providers
|
|
196
|
+
- Provider detection and attribution
|
|
197
|
+
- Input/output message capture (configurable)
|
|
198
|
+
- Streaming response support
|
|
199
|
+
- Cost tracking integration
|
|
200
|
+
|
|
201
|
+
Usage:
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
import litellm
|
|
205
|
+
import sentry_sdk
|
|
206
|
+
|
|
207
|
+
# Initialize Sentry with the LiteLLM integration
|
|
208
|
+
sentry_sdk.init(
|
|
209
|
+
dsn="your-dsn",
|
|
210
|
+
send_default_pii=True
|
|
211
|
+
integrations=[
|
|
212
|
+
sentry_sdk.integrations.LiteLLMIntegration(
|
|
213
|
+
include_prompts=True # Set to False to exclude message content
|
|
214
|
+
)
|
|
215
|
+
]
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# All LiteLLM calls will now be monitored
|
|
219
|
+
response = litellm.completion(
|
|
220
|
+
model="gpt-3.5-turbo",
|
|
221
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
222
|
+
)
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
Configuration:
|
|
226
|
+
- include_prompts (bool): Whether to include prompts and responses in spans.
|
|
227
|
+
Defaults to True. Set to False to exclude potentially sensitive data.
|
|
228
|
+
"""
|
|
229
|
+
|
|
230
|
+
identifier = "litellm"
|
|
231
|
+
origin = f"auto.ai.{identifier}"
|
|
232
|
+
|
|
233
|
+
def __init__(self, include_prompts=True):
|
|
234
|
+
# type: (LiteLLMIntegration, bool) -> None
|
|
235
|
+
self.include_prompts = include_prompts
|
|
236
|
+
|
|
237
|
+
@staticmethod
|
|
238
|
+
def setup_once():
|
|
239
|
+
# type: () -> None
|
|
240
|
+
"""Set up LiteLLM callbacks for monitoring."""
|
|
241
|
+
litellm.input_callback = litellm.input_callback or []
|
|
242
|
+
if _input_callback not in litellm.input_callback:
|
|
243
|
+
litellm.input_callback.append(_input_callback)
|
|
244
|
+
|
|
245
|
+
litellm.success_callback = litellm.success_callback or []
|
|
246
|
+
if _success_callback not in litellm.success_callback:
|
|
247
|
+
litellm.success_callback.append(_success_callback)
|
|
248
|
+
|
|
249
|
+
litellm.failure_callback = litellm.failure_callback or []
|
|
250
|
+
if _failure_callback not in litellm.failure_callback:
|
|
251
|
+
litellm.failure_callback.append(_failure_callback)
|
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
from collections.abc import Set
|
|
2
|
+
from copy import deepcopy
|
|
3
|
+
|
|
2
4
|
import sentry_sdk
|
|
3
5
|
from sentry_sdk.consts import OP
|
|
4
6
|
from sentry_sdk.integrations import (
|
|
@@ -222,9 +224,7 @@ def patch_http_route_handle():
|
|
|
222
224
|
return await old_handle(self, scope, receive, send)
|
|
223
225
|
|
|
224
226
|
sentry_scope = sentry_sdk.get_isolation_scope()
|
|
225
|
-
request = scope["app"].request_class(
|
|
226
|
-
scope=scope, receive=receive, send=send
|
|
227
|
-
) # type: Request[Any, Any]
|
|
227
|
+
request = scope["app"].request_class(scope=scope, receive=receive, send=send) # type: Request[Any, Any]
|
|
228
228
|
extracted_request_data = ConnectionDataExtractor(
|
|
229
229
|
parse_body=True, parse_query=True
|
|
230
230
|
)(request)
|
|
@@ -262,7 +262,7 @@ def patch_http_route_handle():
|
|
|
262
262
|
|
|
263
263
|
event.update(
|
|
264
264
|
{
|
|
265
|
-
"request": request_info,
|
|
265
|
+
"request": deepcopy(request_info),
|
|
266
266
|
"transaction": tx_name,
|
|
267
267
|
"transaction_info": tx_info,
|
|
268
268
|
}
|
|
@@ -7,6 +7,7 @@ from sentry_sdk.ai.utils import set_data_normalized
|
|
|
7
7
|
from sentry_sdk.consts import SPANDATA
|
|
8
8
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
9
9
|
from sentry_sdk.scope import should_send_default_pii
|
|
10
|
+
from sentry_sdk.tracing_utils import set_span_errored
|
|
10
11
|
from sentry_sdk.utils import (
|
|
11
12
|
capture_internal_exceptions,
|
|
12
13
|
event_from_exception,
|
|
@@ -83,6 +84,8 @@ def _capture_exception(exc, manual_span_cleanup=True):
|
|
|
83
84
|
# Close an eventually open span
|
|
84
85
|
# We need to do this by hand because we are not using the start_span context manager
|
|
85
86
|
current_span = sentry_sdk.get_current_span()
|
|
87
|
+
set_span_errored(current_span)
|
|
88
|
+
|
|
86
89
|
if manual_span_cleanup and current_span is not None:
|
|
87
90
|
current_span.__exit__(None, None, None)
|
|
88
91
|
|
|
@@ -279,9 +282,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
|
|
|
279
282
|
|
|
280
283
|
def new_iterator():
|
|
281
284
|
# type: () -> Iterator[ChatCompletionChunk]
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
+
count_tokens_manually = True
|
|
286
|
+
for x in old_iterator:
|
|
287
|
+
with capture_internal_exceptions():
|
|
285
288
|
# OpenAI chat completion API
|
|
286
289
|
if hasattr(x, "choices"):
|
|
287
290
|
choice_index = 0
|
|
@@ -312,8 +315,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
|
|
|
312
315
|
)
|
|
313
316
|
count_tokens_manually = False
|
|
314
317
|
|
|
315
|
-
|
|
318
|
+
yield x
|
|
316
319
|
|
|
320
|
+
with capture_internal_exceptions():
|
|
317
321
|
if len(data_buf) > 0:
|
|
318
322
|
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
319
323
|
if should_send_default_pii() and integration.include_prompts:
|
|
@@ -334,9 +338,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
|
|
|
334
338
|
|
|
335
339
|
async def new_iterator_async():
|
|
336
340
|
# type: () -> AsyncIterator[ChatCompletionChunk]
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
341
|
+
count_tokens_manually = True
|
|
342
|
+
async for x in old_iterator:
|
|
343
|
+
with capture_internal_exceptions():
|
|
340
344
|
# OpenAI chat completion API
|
|
341
345
|
if hasattr(x, "choices"):
|
|
342
346
|
choice_index = 0
|
|
@@ -367,8 +371,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
|
|
|
367
371
|
)
|
|
368
372
|
count_tokens_manually = False
|
|
369
373
|
|
|
370
|
-
|
|
374
|
+
yield x
|
|
371
375
|
|
|
376
|
+
with capture_internal_exceptions():
|
|
372
377
|
if len(data_buf) > 0:
|
|
373
378
|
all_responses = ["".join(chunk) for chunk in data_buf]
|
|
374
379
|
if should_send_default_pii() and integration.include_prompts:
|
|
@@ -7,6 +7,7 @@ from ..utils import (
|
|
|
7
7
|
_set_input_data,
|
|
8
8
|
_set_output_data,
|
|
9
9
|
_set_usage_data,
|
|
10
|
+
_create_mcp_execute_tool_spans,
|
|
10
11
|
)
|
|
11
12
|
|
|
12
13
|
from typing import TYPE_CHECKING
|
|
@@ -28,12 +29,14 @@ def ai_client_span(agent, get_response_kwargs):
|
|
|
28
29
|
# TODO-anton: remove hardcoded stuff and replace something that also works for embedding and so on
|
|
29
30
|
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
|
|
30
31
|
|
|
32
|
+
_set_agent_data(span, agent)
|
|
33
|
+
|
|
31
34
|
return span
|
|
32
35
|
|
|
33
36
|
|
|
34
37
|
def update_ai_client_span(span, agent, get_response_kwargs, result):
|
|
35
38
|
# type: (sentry_sdk.tracing.Span, Agent, dict[str, Any], Any) -> None
|
|
36
|
-
_set_agent_data(span, agent)
|
|
37
39
|
_set_usage_data(span, result.usage)
|
|
38
40
|
_set_input_data(span, get_response_kwargs)
|
|
39
41
|
_set_output_data(span, result)
|
|
42
|
+
_create_mcp_execute_tool_spans(span, result)
|
|
@@ -42,7 +42,7 @@ def update_execute_tool_span(span, agent, tool, result):
|
|
|
42
42
|
if isinstance(result, str) and result.startswith(
|
|
43
43
|
"An error occurred while running the tool"
|
|
44
44
|
):
|
|
45
|
-
span.set_status(SPANSTATUS.
|
|
45
|
+
span.set_status(SPANSTATUS.ERROR)
|
|
46
46
|
|
|
47
47
|
if should_send_default_pii():
|
|
48
48
|
span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, result)
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import sentry_sdk
|
|
2
2
|
from sentry_sdk.ai.utils import set_data_normalized
|
|
3
|
-
from sentry_sdk.consts import SPANDATA
|
|
3
|
+
from sentry_sdk.consts import SPANDATA, SPANSTATUS, OP
|
|
4
4
|
from sentry_sdk.integrations import DidNotEnable
|
|
5
5
|
from sentry_sdk.scope import should_send_default_pii
|
|
6
|
+
from sentry_sdk.tracing_utils import set_span_errored
|
|
6
7
|
from sentry_sdk.utils import event_from_exception, safe_serialize
|
|
7
8
|
|
|
8
9
|
from typing import TYPE_CHECKING
|
|
@@ -20,6 +21,8 @@ except ImportError:
|
|
|
20
21
|
|
|
21
22
|
def _capture_exception(exc):
|
|
22
23
|
# type: (Any) -> None
|
|
24
|
+
set_span_errored()
|
|
25
|
+
|
|
23
26
|
event, hint = event_from_exception(
|
|
24
27
|
exc,
|
|
25
28
|
client_options=sentry_sdk.get_client().options,
|
|
@@ -153,3 +156,27 @@ def _set_output_data(span, result):
|
|
|
153
156
|
set_data_normalized(
|
|
154
157
|
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
|
|
155
158
|
)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _create_mcp_execute_tool_spans(span, result):
|
|
162
|
+
# type: (sentry_sdk.tracing.Span, agents.Result) -> None
|
|
163
|
+
for output in result.output:
|
|
164
|
+
if output.__class__.__name__ == "McpCall":
|
|
165
|
+
with sentry_sdk.start_span(
|
|
166
|
+
op=OP.GEN_AI_EXECUTE_TOOL,
|
|
167
|
+
description=f"execute_tool {output.name}",
|
|
168
|
+
start_timestamp=span.start_timestamp,
|
|
169
|
+
) as execute_tool_span:
|
|
170
|
+
set_data_normalized(execute_tool_span, SPANDATA.GEN_AI_TOOL_TYPE, "mcp")
|
|
171
|
+
set_data_normalized(
|
|
172
|
+
execute_tool_span, SPANDATA.GEN_AI_TOOL_NAME, output.name
|
|
173
|
+
)
|
|
174
|
+
if should_send_default_pii():
|
|
175
|
+
execute_tool_span.set_data(
|
|
176
|
+
SPANDATA.GEN_AI_TOOL_INPUT, output.arguments
|
|
177
|
+
)
|
|
178
|
+
execute_tool_span.set_data(
|
|
179
|
+
SPANDATA.GEN_AI_TOOL_OUTPUT, output.output
|
|
180
|
+
)
|
|
181
|
+
if output.error:
|
|
182
|
+
execute_tool_span.set_status(SPANSTATUS.ERROR)
|
|
@@ -116,7 +116,9 @@ def pure_eval_frame(frame):
|
|
|
116
116
|
return (n.lineno, n.col_offset)
|
|
117
117
|
|
|
118
118
|
nodes_before_stmt = [
|
|
119
|
-
node
|
|
119
|
+
node
|
|
120
|
+
for node in nodes
|
|
121
|
+
if start(node) < stmt.last_token.end # type: ignore
|
|
120
122
|
]
|
|
121
123
|
if nodes_before_stmt:
|
|
122
124
|
# The position of the last node before or in the statement
|
|
@@ -64,9 +64,7 @@ def _before_cursor_execute(
|
|
|
64
64
|
@ensure_integration_enabled(SqlalchemyIntegration)
|
|
65
65
|
def _after_cursor_execute(conn, cursor, statement, parameters, context, *args):
|
|
66
66
|
# type: (Any, Any, Any, Any, Any, *Any) -> None
|
|
67
|
-
ctx_mgr = getattr(
|
|
68
|
-
context, "_sentry_sql_span_manager", None
|
|
69
|
-
) # type: Optional[ContextManager[Any]]
|
|
67
|
+
ctx_mgr = getattr(context, "_sentry_sql_span_manager", None) # type: Optional[ContextManager[Any]]
|
|
70
68
|
|
|
71
69
|
if ctx_mgr is not None:
|
|
72
70
|
context._sentry_sql_span_manager = None
|
|
@@ -92,9 +90,7 @@ def _handle_error(context, *args):
|
|
|
92
90
|
# _after_cursor_execute does not get called for crashing SQL stmts. Judging
|
|
93
91
|
# from SQLAlchemy codebase it does seem like any error coming into this
|
|
94
92
|
# handler is going to be fatal.
|
|
95
|
-
ctx_mgr = getattr(
|
|
96
|
-
execution_context, "_sentry_sql_span_manager", None
|
|
97
|
-
) # type: Optional[ContextManager[Any]]
|
|
93
|
+
ctx_mgr = getattr(execution_context, "_sentry_sql_span_manager", None) # type: Optional[ContextManager[Any]]
|
|
98
94
|
|
|
99
95
|
if ctx_mgr is not None:
|
|
100
96
|
execution_context._sentry_sql_span_manager = None
|
|
@@ -103,9 +103,7 @@ class StarletteIntegration(Integration):
|
|
|
103
103
|
self.http_methods_to_capture = tuple(map(str.upper, http_methods_to_capture))
|
|
104
104
|
|
|
105
105
|
if isinstance(failed_request_status_codes, Set):
|
|
106
|
-
self.failed_request_status_codes =
|
|
107
|
-
failed_request_status_codes
|
|
108
|
-
) # type: Container[int]
|
|
106
|
+
self.failed_request_status_codes = failed_request_status_codes # type: Container[int]
|
|
109
107
|
else:
|
|
110
108
|
warnings.warn(
|
|
111
109
|
"Passing a list or None for failed_request_status_codes is deprecated. "
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from copy import deepcopy
|
|
2
|
+
|
|
1
3
|
import sentry_sdk
|
|
2
4
|
from sentry_sdk.consts import OP
|
|
3
5
|
from sentry_sdk.integrations import DidNotEnable, Integration
|
|
@@ -200,9 +202,7 @@ def patch_http_route_handle():
|
|
|
200
202
|
return await old_handle(self, scope, receive, send)
|
|
201
203
|
|
|
202
204
|
sentry_scope = sentry_sdk.get_isolation_scope()
|
|
203
|
-
request = scope["app"].request_class(
|
|
204
|
-
scope=scope, receive=receive, send=send
|
|
205
|
-
) # type: Request[Any, Any]
|
|
205
|
+
request = scope["app"].request_class(scope=scope, receive=receive, send=send) # type: Request[Any, Any]
|
|
206
206
|
extracted_request_data = ConnectionDataExtractor(
|
|
207
207
|
parse_body=True, parse_query=True
|
|
208
208
|
)(request)
|
|
@@ -239,7 +239,7 @@ def patch_http_route_handle():
|
|
|
239
239
|
|
|
240
240
|
event.update(
|
|
241
241
|
{
|
|
242
|
-
"request": request_info,
|
|
242
|
+
"request": deepcopy(request_info),
|
|
243
243
|
"transaction": tx_name,
|
|
244
244
|
"transaction_info": tx_info,
|
|
245
245
|
}
|
sentry_sdk/integrations/wsgi.py
CHANGED
|
@@ -119,14 +119,15 @@ class SentryWsgiMiddleware:
|
|
|
119
119
|
origin=self.span_origin,
|
|
120
120
|
)
|
|
121
121
|
|
|
122
|
-
|
|
122
|
+
transaction_context = (
|
|
123
123
|
sentry_sdk.start_transaction(
|
|
124
124
|
transaction,
|
|
125
125
|
custom_sampling_context={"wsgi_environ": environ},
|
|
126
126
|
)
|
|
127
127
|
if transaction is not None
|
|
128
128
|
else nullcontext()
|
|
129
|
-
)
|
|
129
|
+
)
|
|
130
|
+
with transaction_context:
|
|
130
131
|
try:
|
|
131
132
|
response = self.app(
|
|
132
133
|
environ,
|