sentry-sdk 2.39.0__py2.py3-none-any.whl → 2.41.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (52) hide show
  1. sentry_sdk/_metrics.py +81 -0
  2. sentry_sdk/_metrics_batcher.py +156 -0
  3. sentry_sdk/_types.py +27 -22
  4. sentry_sdk/ai/__init__.py +7 -0
  5. sentry_sdk/ai/utils.py +48 -0
  6. sentry_sdk/client.py +87 -36
  7. sentry_sdk/consts.py +15 -9
  8. sentry_sdk/envelope.py +31 -17
  9. sentry_sdk/feature_flags.py +0 -1
  10. sentry_sdk/hub.py +17 -9
  11. sentry_sdk/integrations/__init__.py +1 -0
  12. sentry_sdk/integrations/anthropic.py +10 -2
  13. sentry_sdk/integrations/asgi.py +3 -2
  14. sentry_sdk/integrations/dramatiq.py +89 -31
  15. sentry_sdk/integrations/grpc/aio/client.py +2 -1
  16. sentry_sdk/integrations/grpc/client.py +3 -4
  17. sentry_sdk/integrations/langchain.py +29 -5
  18. sentry_sdk/integrations/langgraph.py +5 -3
  19. sentry_sdk/integrations/launchdarkly.py +0 -1
  20. sentry_sdk/integrations/litellm.py +251 -0
  21. sentry_sdk/integrations/litestar.py +4 -4
  22. sentry_sdk/integrations/logging.py +1 -1
  23. sentry_sdk/integrations/loguru.py +1 -1
  24. sentry_sdk/integrations/openai.py +3 -2
  25. sentry_sdk/integrations/openai_agents/spans/ai_client.py +4 -1
  26. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +10 -2
  27. sentry_sdk/integrations/openai_agents/utils.py +60 -19
  28. sentry_sdk/integrations/pure_eval.py +3 -1
  29. sentry_sdk/integrations/spark/spark_driver.py +2 -1
  30. sentry_sdk/integrations/sqlalchemy.py +2 -6
  31. sentry_sdk/integrations/starlette.py +1 -3
  32. sentry_sdk/integrations/starlite.py +4 -4
  33. sentry_sdk/integrations/threading.py +52 -8
  34. sentry_sdk/integrations/wsgi.py +3 -2
  35. sentry_sdk/logger.py +1 -1
  36. sentry_sdk/profiler/utils.py +2 -6
  37. sentry_sdk/scope.py +6 -3
  38. sentry_sdk/serializer.py +1 -3
  39. sentry_sdk/session.py +4 -2
  40. sentry_sdk/sessions.py +4 -2
  41. sentry_sdk/tracing.py +36 -33
  42. sentry_sdk/tracing_utils.py +1 -3
  43. sentry_sdk/transport.py +9 -26
  44. sentry_sdk/types.py +3 -0
  45. sentry_sdk/utils.py +22 -4
  46. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/METADATA +3 -1
  47. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/RECORD +51 -49
  48. sentry_sdk/metrics.py +0 -965
  49. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/WHEEL +0 -0
  50. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/entry_points.txt +0 -0
  51. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/licenses/LICENSE +0 -0
  52. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,251 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ import sentry_sdk
4
+ from sentry_sdk import consts
5
+ from sentry_sdk.ai.monitoring import record_token_usage
6
+ from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
7
+ from sentry_sdk.consts import SPANDATA
8
+ from sentry_sdk.integrations import DidNotEnable, Integration
9
+ from sentry_sdk.scope import should_send_default_pii
10
+ from sentry_sdk.utils import event_from_exception
11
+
12
+ if TYPE_CHECKING:
13
+ from typing import Any, Dict
14
+ from datetime import datetime
15
+
16
+ try:
17
+ import litellm # type: ignore[import-not-found]
18
+ except ImportError:
19
+ raise DidNotEnable("LiteLLM not installed")
20
+
21
+
22
+ def _get_metadata_dict(kwargs):
23
+ # type: (Dict[str, Any]) -> Dict[str, Any]
24
+ """Get the metadata dictionary from the kwargs."""
25
+ litellm_params = kwargs.setdefault("litellm_params", {})
26
+
27
+ # we need this weird little dance, as metadata might be set but may be None initially
28
+ metadata = litellm_params.get("metadata")
29
+ if metadata is None:
30
+ metadata = {}
31
+ litellm_params["metadata"] = metadata
32
+ return metadata
33
+
34
+
35
+ def _input_callback(kwargs):
36
+ # type: (Dict[str, Any]) -> None
37
+ """Handle the start of a request."""
38
+ integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
39
+
40
+ if integration is None:
41
+ return
42
+
43
+ # Get key parameters
44
+ full_model = kwargs.get("model", "")
45
+ try:
46
+ model, provider, _, _ = litellm.get_llm_provider(full_model)
47
+ except Exception:
48
+ model = full_model
49
+ provider = "unknown"
50
+
51
+ messages = kwargs.get("messages", [])
52
+ operation = "chat" if messages else "embeddings"
53
+
54
+ # Start a new span/transaction
55
+ span = get_start_span_function()(
56
+ op=(
57
+ consts.OP.GEN_AI_CHAT
58
+ if operation == "chat"
59
+ else consts.OP.GEN_AI_EMBEDDINGS
60
+ ),
61
+ name=f"{operation} {model}",
62
+ origin=LiteLLMIntegration.origin,
63
+ )
64
+ span.__enter__()
65
+
66
+ # Store span for later
67
+ _get_metadata_dict(kwargs)["_sentry_span"] = span
68
+
69
+ # Set basic data
70
+ set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, provider)
71
+ set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
72
+
73
+ # Record messages if allowed
74
+ if messages and should_send_default_pii() and integration.include_prompts:
75
+ set_data_normalized(
76
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
77
+ )
78
+
79
+ # Record other parameters
80
+ params = {
81
+ "model": SPANDATA.GEN_AI_REQUEST_MODEL,
82
+ "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
83
+ "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
84
+ "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
85
+ "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
86
+ "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
87
+ "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
88
+ }
89
+ for key, attribute in params.items():
90
+ value = kwargs.get(key)
91
+ if value is not None:
92
+ set_data_normalized(span, attribute, value)
93
+
94
+ # Record LiteLLM-specific parameters
95
+ litellm_params = {
96
+ "api_base": kwargs.get("api_base"),
97
+ "api_version": kwargs.get("api_version"),
98
+ "custom_llm_provider": kwargs.get("custom_llm_provider"),
99
+ }
100
+ for key, value in litellm_params.items():
101
+ if value is not None:
102
+ set_data_normalized(span, f"gen_ai.litellm.{key}", value)
103
+
104
+
105
+ def _success_callback(kwargs, completion_response, start_time, end_time):
106
+ # type: (Dict[str, Any], Any, datetime, datetime) -> None
107
+ """Handle successful completion."""
108
+
109
+ span = _get_metadata_dict(kwargs).get("_sentry_span")
110
+ if span is None:
111
+ return
112
+
113
+ integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
114
+ if integration is None:
115
+ return
116
+
117
+ try:
118
+ # Record model information
119
+ if hasattr(completion_response, "model"):
120
+ set_data_normalized(
121
+ span, SPANDATA.GEN_AI_RESPONSE_MODEL, completion_response.model
122
+ )
123
+
124
+ # Record response content if allowed
125
+ if should_send_default_pii() and integration.include_prompts:
126
+ if hasattr(completion_response, "choices"):
127
+ response_messages = []
128
+ for choice in completion_response.choices:
129
+ if hasattr(choice, "message"):
130
+ if hasattr(choice.message, "model_dump"):
131
+ response_messages.append(choice.message.model_dump())
132
+ elif hasattr(choice.message, "dict"):
133
+ response_messages.append(choice.message.dict())
134
+ else:
135
+ # Fallback for basic message objects
136
+ msg = {}
137
+ if hasattr(choice.message, "role"):
138
+ msg["role"] = choice.message.role
139
+ if hasattr(choice.message, "content"):
140
+ msg["content"] = choice.message.content
141
+ if hasattr(choice.message, "tool_calls"):
142
+ msg["tool_calls"] = choice.message.tool_calls
143
+ response_messages.append(msg)
144
+
145
+ if response_messages:
146
+ set_data_normalized(
147
+ span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_messages
148
+ )
149
+
150
+ # Record token usage
151
+ if hasattr(completion_response, "usage"):
152
+ usage = completion_response.usage
153
+ record_token_usage(
154
+ span,
155
+ input_tokens=getattr(usage, "prompt_tokens", None),
156
+ output_tokens=getattr(usage, "completion_tokens", None),
157
+ total_tokens=getattr(usage, "total_tokens", None),
158
+ )
159
+
160
+ finally:
161
+ # Always finish the span and clean up
162
+ span.__exit__(None, None, None)
163
+
164
+
165
+ def _failure_callback(kwargs, exception, start_time, end_time):
166
+ # type: (Dict[str, Any], Exception, datetime, datetime) -> None
167
+ """Handle request failure."""
168
+ span = _get_metadata_dict(kwargs).get("_sentry_span")
169
+ if span is None:
170
+ return
171
+
172
+ try:
173
+ # Capture the exception
174
+ event, hint = event_from_exception(
175
+ exception,
176
+ client_options=sentry_sdk.get_client().options,
177
+ mechanism={"type": "litellm", "handled": False},
178
+ )
179
+ sentry_sdk.capture_event(event, hint=hint)
180
+ finally:
181
+ # Always finish the span and clean up
182
+ span.__exit__(type(exception), exception, None)
183
+
184
+
185
+ class LiteLLMIntegration(Integration):
186
+ """
187
+ LiteLLM integration for Sentry.
188
+
189
+ This integration automatically captures LiteLLM API calls and sends them to Sentry
190
+ for monitoring and error tracking. It supports all 100+ LLM providers that LiteLLM
191
+ supports, including OpenAI, Anthropic, Google, Cohere, and many others.
192
+
193
+ Features:
194
+ - Automatic exception capture for all LiteLLM calls
195
+ - Token usage tracking across all providers
196
+ - Provider detection and attribution
197
+ - Input/output message capture (configurable)
198
+ - Streaming response support
199
+ - Cost tracking integration
200
+
201
+ Usage:
202
+
203
+ ```python
204
+ import litellm
205
+ import sentry_sdk
206
+
207
+ # Initialize Sentry with the LiteLLM integration
208
+ sentry_sdk.init(
209
+ dsn="your-dsn",
210
+ send_default_pii=True
211
+ integrations=[
212
+ sentry_sdk.integrations.LiteLLMIntegration(
213
+ include_prompts=True # Set to False to exclude message content
214
+ )
215
+ ]
216
+ )
217
+
218
+ # All LiteLLM calls will now be monitored
219
+ response = litellm.completion(
220
+ model="gpt-3.5-turbo",
221
+ messages=[{"role": "user", "content": "Hello!"}]
222
+ )
223
+ ```
224
+
225
+ Configuration:
226
+ - include_prompts (bool): Whether to include prompts and responses in spans.
227
+ Defaults to True. Set to False to exclude potentially sensitive data.
228
+ """
229
+
230
+ identifier = "litellm"
231
+ origin = f"auto.ai.{identifier}"
232
+
233
+ def __init__(self, include_prompts=True):
234
+ # type: (LiteLLMIntegration, bool) -> None
235
+ self.include_prompts = include_prompts
236
+
237
+ @staticmethod
238
+ def setup_once():
239
+ # type: () -> None
240
+ """Set up LiteLLM callbacks for monitoring."""
241
+ litellm.input_callback = litellm.input_callback or []
242
+ if _input_callback not in litellm.input_callback:
243
+ litellm.input_callback.append(_input_callback)
244
+
245
+ litellm.success_callback = litellm.success_callback or []
246
+ if _success_callback not in litellm.success_callback:
247
+ litellm.success_callback.append(_success_callback)
248
+
249
+ litellm.failure_callback = litellm.failure_callback or []
250
+ if _failure_callback not in litellm.failure_callback:
251
+ litellm.failure_callback.append(_failure_callback)
@@ -1,4 +1,6 @@
1
1
  from collections.abc import Set
2
+ from copy import deepcopy
3
+
2
4
  import sentry_sdk
3
5
  from sentry_sdk.consts import OP
4
6
  from sentry_sdk.integrations import (
@@ -222,9 +224,7 @@ def patch_http_route_handle():
222
224
  return await old_handle(self, scope, receive, send)
223
225
 
224
226
  sentry_scope = sentry_sdk.get_isolation_scope()
225
- request = scope["app"].request_class(
226
- scope=scope, receive=receive, send=send
227
- ) # type: Request[Any, Any]
227
+ request = scope["app"].request_class(scope=scope, receive=receive, send=send) # type: Request[Any, Any]
228
228
  extracted_request_data = ConnectionDataExtractor(
229
229
  parse_body=True, parse_query=True
230
230
  )(request)
@@ -262,7 +262,7 @@ def patch_http_route_handle():
262
262
 
263
263
  event.update(
264
264
  {
265
- "request": request_info,
265
+ "request": deepcopy(request_info),
266
266
  "transaction": tx_name,
267
267
  "transaction_info": tx_info,
268
268
  }
@@ -409,7 +409,7 @@ class SentryLogsHandler(_BaseHandler):
409
409
  attrs["logger.name"] = record.name
410
410
 
411
411
  # noinspection PyProtectedMember
412
- client._capture_experimental_log(
412
+ client._capture_log(
413
413
  {
414
414
  "severity_text": otel_severity_text,
415
415
  "severity_number": otel_severity_number,
@@ -193,7 +193,7 @@ def loguru_sentry_logs_handler(message):
193
193
  if record.get("name"):
194
194
  attrs["logger.name"] = record["name"]
195
195
 
196
- client._capture_experimental_log(
196
+ client._capture_log(
197
197
  {
198
198
  "severity_text": otel_severity_text,
199
199
  "severity_number": otel_severity_number,
@@ -3,7 +3,7 @@ from functools import wraps
3
3
  import sentry_sdk
4
4
  from sentry_sdk import consts
5
5
  from sentry_sdk.ai.monitoring import record_token_usage
6
- from sentry_sdk.ai.utils import set_data_normalized
6
+ from sentry_sdk.ai.utils import set_data_normalized, normalize_message_roles
7
7
  from sentry_sdk.consts import SPANDATA
8
8
  from sentry_sdk.integrations import DidNotEnable, Integration
9
9
  from sentry_sdk.scope import should_send_default_pii
@@ -182,8 +182,9 @@ def _set_input_data(span, kwargs, operation, integration):
182
182
  and should_send_default_pii()
183
183
  and integration.include_prompts
184
184
  ):
185
+ normalized_messages = normalize_message_roles(messages)
185
186
  set_data_normalized(
186
- span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
187
+ span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False
187
188
  )
188
189
 
189
190
  # Input attributes: Common
@@ -7,6 +7,7 @@ from ..utils import (
7
7
  _set_input_data,
8
8
  _set_output_data,
9
9
  _set_usage_data,
10
+ _create_mcp_execute_tool_spans,
10
11
  )
11
12
 
12
13
  from typing import TYPE_CHECKING
@@ -28,12 +29,14 @@ def ai_client_span(agent, get_response_kwargs):
28
29
  # TODO-anton: remove hardcoded stuff and replace something that also works for embedding and so on
29
30
  span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
30
31
 
32
+ _set_agent_data(span, agent)
33
+
31
34
  return span
32
35
 
33
36
 
34
37
  def update_ai_client_span(span, agent, get_response_kwargs, result):
35
38
  # type: (sentry_sdk.tracing.Span, Agent, dict[str, Any], Any) -> None
36
- _set_agent_data(span, agent)
37
39
  _set_usage_data(span, result.usage)
38
40
  _set_input_data(span, get_response_kwargs)
39
41
  _set_output_data(span, result)
42
+ _create_mcp_execute_tool_spans(span, result)
@@ -1,5 +1,9 @@
1
1
  import sentry_sdk
2
- from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
2
+ from sentry_sdk.ai.utils import (
3
+ get_start_span_function,
4
+ set_data_normalized,
5
+ normalize_message_roles,
6
+ )
3
7
  from sentry_sdk.consts import OP, SPANDATA
4
8
  from sentry_sdk.scope import should_send_default_pii
5
9
  from sentry_sdk.utils import safe_serialize
@@ -56,8 +60,12 @@ def invoke_agent_span(context, agent, kwargs):
56
60
  )
57
61
 
58
62
  if len(messages) > 0:
63
+ normalized_messages = normalize_message_roles(messages)
59
64
  set_data_normalized(
60
- span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
65
+ span,
66
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
67
+ normalized_messages,
68
+ unpack=False,
61
69
  )
62
70
 
63
71
  _set_agent_data(span, agent)
@@ -1,6 +1,11 @@
1
1
  import sentry_sdk
2
- from sentry_sdk.ai.utils import set_data_normalized
3
- from sentry_sdk.consts import SPANDATA
2
+ from sentry_sdk.ai.utils import (
3
+ GEN_AI_ALLOWED_MESSAGE_ROLES,
4
+ normalize_message_roles,
5
+ set_data_normalized,
6
+ normalize_message_role,
7
+ )
8
+ from sentry_sdk.consts import SPANDATA, SPANSTATUS, OP
4
9
  from sentry_sdk.integrations import DidNotEnable
5
10
  from sentry_sdk.scope import should_send_default_pii
6
11
  from sentry_sdk.tracing_utils import set_span_errored
@@ -94,35 +99,47 @@ def _set_input_data(span, get_response_kwargs):
94
99
  # type: (sentry_sdk.tracing.Span, dict[str, Any]) -> None
95
100
  if not should_send_default_pii():
96
101
  return
102
+ request_messages = []
97
103
 
98
- messages_by_role = {
99
- "system": [],
100
- "user": [],
101
- "assistant": [],
102
- "tool": [],
103
- } # type: (dict[str, list[Any]])
104
104
  system_instructions = get_response_kwargs.get("system_instructions")
105
105
  if system_instructions:
106
- messages_by_role["system"].append({"type": "text", "text": system_instructions})
106
+ request_messages.append(
107
+ {
108
+ "role": GEN_AI_ALLOWED_MESSAGE_ROLES.SYSTEM,
109
+ "content": [{"type": "text", "text": system_instructions}],
110
+ }
111
+ )
107
112
 
108
113
  for message in get_response_kwargs.get("input", []):
109
114
  if "role" in message:
110
- messages_by_role[message.get("role")].append(
111
- {"type": "text", "text": message.get("content")}
115
+ normalized_role = normalize_message_role(message.get("role"))
116
+ request_messages.append(
117
+ {
118
+ "role": normalized_role,
119
+ "content": [{"type": "text", "text": message.get("content")}],
120
+ }
112
121
  )
113
122
  else:
114
123
  if message.get("type") == "function_call":
115
- messages_by_role["assistant"].append(message)
124
+ request_messages.append(
125
+ {
126
+ "role": GEN_AI_ALLOWED_MESSAGE_ROLES.ASSISTANT,
127
+ "content": [message],
128
+ }
129
+ )
116
130
  elif message.get("type") == "function_call_output":
117
- messages_by_role["tool"].append(message)
118
-
119
- request_messages = []
120
- for role, messages in messages_by_role.items():
121
- if len(messages) > 0:
122
- request_messages.append({"role": role, "content": messages})
131
+ request_messages.append(
132
+ {
133
+ "role": GEN_AI_ALLOWED_MESSAGE_ROLES.TOOL,
134
+ "content": [message],
135
+ }
136
+ )
123
137
 
124
138
  set_data_normalized(
125
- span, SPANDATA.GEN_AI_REQUEST_MESSAGES, request_messages, unpack=False
139
+ span,
140
+ SPANDATA.GEN_AI_REQUEST_MESSAGES,
141
+ normalize_message_roles(request_messages),
142
+ unpack=False,
126
143
  )
127
144
 
128
145
 
@@ -156,3 +173,27 @@ def _set_output_data(span, result):
156
173
  set_data_normalized(
157
174
  span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
158
175
  )
176
+
177
+
178
+ def _create_mcp_execute_tool_spans(span, result):
179
+ # type: (sentry_sdk.tracing.Span, agents.Result) -> None
180
+ for output in result.output:
181
+ if output.__class__.__name__ == "McpCall":
182
+ with sentry_sdk.start_span(
183
+ op=OP.GEN_AI_EXECUTE_TOOL,
184
+ description=f"execute_tool {output.name}",
185
+ start_timestamp=span.start_timestamp,
186
+ ) as execute_tool_span:
187
+ set_data_normalized(execute_tool_span, SPANDATA.GEN_AI_TOOL_TYPE, "mcp")
188
+ set_data_normalized(
189
+ execute_tool_span, SPANDATA.GEN_AI_TOOL_NAME, output.name
190
+ )
191
+ if should_send_default_pii():
192
+ execute_tool_span.set_data(
193
+ SPANDATA.GEN_AI_TOOL_INPUT, output.arguments
194
+ )
195
+ execute_tool_span.set_data(
196
+ SPANDATA.GEN_AI_TOOL_OUTPUT, output.output
197
+ )
198
+ if output.error:
199
+ execute_tool_span.set_status(SPANSTATUS.ERROR)
@@ -116,7 +116,9 @@ def pure_eval_frame(frame):
116
116
  return (n.lineno, n.col_offset)
117
117
 
118
118
  nodes_before_stmt = [
119
- node for node in nodes if start(node) < stmt.last_token.end # type: ignore
119
+ node
120
+ for node in nodes
121
+ if start(node) < stmt.last_token.end # type: ignore
120
122
  ]
121
123
  if nodes_before_stmt:
122
124
  # The position of the last node before or in the statement
@@ -158,7 +158,8 @@ class SparkListener:
158
158
  pass
159
159
 
160
160
  def onExecutorBlacklistedForStage( # noqa: N802
161
- self, executorBlacklistedForStage # noqa: N803
161
+ self,
162
+ executorBlacklistedForStage, # noqa: N803
162
163
  ):
163
164
  # type: (Any) -> None
164
165
  pass
@@ -64,9 +64,7 @@ def _before_cursor_execute(
64
64
  @ensure_integration_enabled(SqlalchemyIntegration)
65
65
  def _after_cursor_execute(conn, cursor, statement, parameters, context, *args):
66
66
  # type: (Any, Any, Any, Any, Any, *Any) -> None
67
- ctx_mgr = getattr(
68
- context, "_sentry_sql_span_manager", None
69
- ) # type: Optional[ContextManager[Any]]
67
+ ctx_mgr = getattr(context, "_sentry_sql_span_manager", None) # type: Optional[ContextManager[Any]]
70
68
 
71
69
  if ctx_mgr is not None:
72
70
  context._sentry_sql_span_manager = None
@@ -92,9 +90,7 @@ def _handle_error(context, *args):
92
90
  # _after_cursor_execute does not get called for crashing SQL stmts. Judging
93
91
  # from SQLAlchemy codebase it does seem like any error coming into this
94
92
  # handler is going to be fatal.
95
- ctx_mgr = getattr(
96
- execution_context, "_sentry_sql_span_manager", None
97
- ) # type: Optional[ContextManager[Any]]
93
+ ctx_mgr = getattr(execution_context, "_sentry_sql_span_manager", None) # type: Optional[ContextManager[Any]]
98
94
 
99
95
  if ctx_mgr is not None:
100
96
  execution_context._sentry_sql_span_manager = None
@@ -103,9 +103,7 @@ class StarletteIntegration(Integration):
103
103
  self.http_methods_to_capture = tuple(map(str.upper, http_methods_to_capture))
104
104
 
105
105
  if isinstance(failed_request_status_codes, Set):
106
- self.failed_request_status_codes = (
107
- failed_request_status_codes
108
- ) # type: Container[int]
106
+ self.failed_request_status_codes = failed_request_status_codes # type: Container[int]
109
107
  else:
110
108
  warnings.warn(
111
109
  "Passing a list or None for failed_request_status_codes is deprecated. "
@@ -1,3 +1,5 @@
1
+ from copy import deepcopy
2
+
1
3
  import sentry_sdk
2
4
  from sentry_sdk.consts import OP
3
5
  from sentry_sdk.integrations import DidNotEnable, Integration
@@ -200,9 +202,7 @@ def patch_http_route_handle():
200
202
  return await old_handle(self, scope, receive, send)
201
203
 
202
204
  sentry_scope = sentry_sdk.get_isolation_scope()
203
- request = scope["app"].request_class(
204
- scope=scope, receive=receive, send=send
205
- ) # type: Request[Any, Any]
205
+ request = scope["app"].request_class(scope=scope, receive=receive, send=send) # type: Request[Any, Any]
206
206
  extracted_request_data = ConnectionDataExtractor(
207
207
  parse_body=True, parse_query=True
208
208
  )(request)
@@ -239,7 +239,7 @@ def patch_http_route_handle():
239
239
 
240
240
  event.update(
241
241
  {
242
- "request": request_info,
242
+ "request": deepcopy(request_info),
243
243
  "transaction": tx_name,
244
244
  "transaction_info": tx_info,
245
245
  }
@@ -2,6 +2,7 @@ import sys
2
2
  import warnings
3
3
  from functools import wraps
4
4
  from threading import Thread, current_thread
5
+ from concurrent.futures import ThreadPoolExecutor, Future
5
6
 
6
7
  import sentry_sdk
7
8
  from sentry_sdk.integrations import Integration
@@ -24,6 +25,7 @@ if TYPE_CHECKING:
24
25
  from sentry_sdk._types import ExcInfo
25
26
 
26
27
  F = TypeVar("F", bound=Callable[..., Any])
28
+ T = TypeVar("T", bound=Any)
27
29
 
28
30
 
29
31
  class ThreadingIntegration(Integration):
@@ -59,6 +61,15 @@ class ThreadingIntegration(Integration):
59
61
  django_version = None
60
62
  channels_version = None
61
63
 
64
+ is_async_emulated_with_threads = (
65
+ sys.version_info < (3, 9)
66
+ and channels_version is not None
67
+ and channels_version < "4.0.0"
68
+ and django_version is not None
69
+ and django_version >= (3, 0)
70
+ and django_version < (4, 0)
71
+ )
72
+
62
73
  @wraps(old_start)
63
74
  def sentry_start(self, *a, **kw):
64
75
  # type: (Thread, *Any, **Any) -> Any
@@ -67,14 +78,7 @@ class ThreadingIntegration(Integration):
67
78
  return old_start(self, *a, **kw)
68
79
 
69
80
  if integration.propagate_scope:
70
- if (
71
- sys.version_info < (3, 9)
72
- and channels_version is not None
73
- and channels_version < "4.0.0"
74
- and django_version is not None
75
- and django_version >= (3, 0)
76
- and django_version < (4, 0)
77
- ):
81
+ if is_async_emulated_with_threads:
78
82
  warnings.warn(
79
83
  "There is a known issue with Django channels 2.x and 3.x when using Python 3.8 or older. "
80
84
  "(Async support is emulated using threads and some Sentry data may be leaked between those threads.) "
@@ -109,6 +113,9 @@ class ThreadingIntegration(Integration):
109
113
  return old_start(self, *a, **kw)
110
114
 
111
115
  Thread.start = sentry_start # type: ignore
116
+ ThreadPoolExecutor.submit = _wrap_threadpool_executor_submit( # type: ignore
117
+ ThreadPoolExecutor.submit, is_async_emulated_with_threads
118
+ )
112
119
 
113
120
 
114
121
  def _wrap_run(isolation_scope_to_use, current_scope_to_use, old_run_func):
@@ -134,6 +141,43 @@ def _wrap_run(isolation_scope_to_use, current_scope_to_use, old_run_func):
134
141
  return run # type: ignore
135
142
 
136
143
 
144
+ def _wrap_threadpool_executor_submit(func, is_async_emulated_with_threads):
145
+ # type: (Callable[..., Future[T]], bool) -> Callable[..., Future[T]]
146
+ """
147
+ Wrap submit call to propagate scopes on task submission.
148
+ """
149
+
150
+ @wraps(func)
151
+ def sentry_submit(self, fn, *args, **kwargs):
152
+ # type: (ThreadPoolExecutor, Callable[..., T], *Any, **Any) -> Future[T]
153
+ integration = sentry_sdk.get_client().get_integration(ThreadingIntegration)
154
+ if integration is None:
155
+ return func(self, fn, *args, **kwargs)
156
+
157
+ if integration.propagate_scope and is_async_emulated_with_threads:
158
+ isolation_scope = sentry_sdk.get_isolation_scope()
159
+ current_scope = sentry_sdk.get_current_scope()
160
+ elif integration.propagate_scope:
161
+ isolation_scope = sentry_sdk.get_isolation_scope().fork()
162
+ current_scope = sentry_sdk.get_current_scope().fork()
163
+ else:
164
+ isolation_scope = None
165
+ current_scope = None
166
+
167
+ def wrapped_fn(*args, **kwargs):
168
+ # type: (*Any, **Any) -> Any
169
+ if isolation_scope is not None and current_scope is not None:
170
+ with use_isolation_scope(isolation_scope):
171
+ with use_scope(current_scope):
172
+ return fn(*args, **kwargs)
173
+
174
+ return fn(*args, **kwargs)
175
+
176
+ return func(self, wrapped_fn, *args, **kwargs)
177
+
178
+ return sentry_submit
179
+
180
+
137
181
  def _capture_exception():
138
182
  # type: () -> ExcInfo
139
183
  exc_info = sys.exc_info()
@@ -119,14 +119,15 @@ class SentryWsgiMiddleware:
119
119
  origin=self.span_origin,
120
120
  )
121
121
 
122
- with (
122
+ transaction_context = (
123
123
  sentry_sdk.start_transaction(
124
124
  transaction,
125
125
  custom_sampling_context={"wsgi_environ": environ},
126
126
  )
127
127
  if transaction is not None
128
128
  else nullcontext()
129
- ):
129
+ )
130
+ with transaction_context:
130
131
  try:
131
132
  response = self.app(
132
133
  environ,