sentry-sdk 3.0.0a4__py2.py3-none-any.whl → 3.0.0a6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (33) hide show
  1. sentry_sdk/__init__.py +1 -0
  2. sentry_sdk/ai/utils.py +7 -8
  3. sentry_sdk/api.py +68 -0
  4. sentry_sdk/client.py +93 -17
  5. sentry_sdk/consts.py +126 -9
  6. sentry_sdk/crons/api.py +5 -0
  7. sentry_sdk/integrations/anthropic.py +133 -73
  8. sentry_sdk/integrations/asgi.py +10 -9
  9. sentry_sdk/integrations/asyncio.py +85 -20
  10. sentry_sdk/integrations/clickhouse_driver.py +55 -28
  11. sentry_sdk/integrations/fastapi.py +1 -7
  12. sentry_sdk/integrations/gnu_backtrace.py +6 -3
  13. sentry_sdk/integrations/langchain.py +462 -218
  14. sentry_sdk/integrations/litestar.py +1 -1
  15. sentry_sdk/integrations/openai_agents/patches/agent_run.py +0 -2
  16. sentry_sdk/integrations/openai_agents/patches/runner.py +18 -15
  17. sentry_sdk/integrations/quart.py +1 -1
  18. sentry_sdk/integrations/starlette.py +1 -5
  19. sentry_sdk/integrations/starlite.py +1 -1
  20. sentry_sdk/opentelemetry/scope.py +3 -1
  21. sentry_sdk/opentelemetry/span_processor.py +1 -0
  22. sentry_sdk/scope.py +11 -11
  23. sentry_sdk/tracing.py +100 -18
  24. sentry_sdk/tracing_utils.py +330 -33
  25. sentry_sdk/transport.py +357 -62
  26. sentry_sdk/utils.py +23 -5
  27. sentry_sdk/worker.py +197 -3
  28. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/METADATA +3 -1
  29. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/RECORD +33 -33
  30. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/WHEEL +0 -0
  31. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/entry_points.txt +0 -0
  32. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/licenses/LICENSE +0 -0
  33. {sentry_sdk-3.0.0a4.dist-info → sentry_sdk-3.0.0a6.dist-info}/top_level.txt +0 -0
sentry_sdk/transport.py CHANGED
@@ -6,6 +6,7 @@ import gzip
6
6
  import socket
7
7
  import ssl
8
8
  import time
9
+ import asyncio
9
10
  from datetime import datetime, timedelta, timezone
10
11
  from collections import defaultdict
11
12
  from urllib.request import getproxies
@@ -17,18 +18,34 @@ except ImportError:
17
18
 
18
19
  try:
19
20
  import httpcore
21
+ except ImportError:
22
+ httpcore = None # type: ignore
23
+
24
+ try:
20
25
  import h2 # noqa: F401
21
26
 
22
- HTTP2_ENABLED = True
27
+ HTTP2_ENABLED = httpcore is not None
23
28
  except ImportError:
24
29
  HTTP2_ENABLED = False
25
30
 
31
+ try:
32
+ import anyio # noqa: F401
33
+
34
+ ASYNC_TRANSPORT_ENABLED = httpcore is not None
35
+ except ImportError:
36
+ ASYNC_TRANSPORT_ENABLED = False
37
+
26
38
  import urllib3
27
39
  import certifi
28
40
 
29
41
  from sentry_sdk.consts import EndpointType
30
- from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions
31
- from sentry_sdk.worker import BackgroundWorker
42
+ from sentry_sdk.utils import (
43
+ Dsn,
44
+ logger,
45
+ capture_internal_exceptions,
46
+ mark_sentry_task_internal,
47
+ )
48
+ from sentry_sdk.worker import BackgroundWorker, Worker, AsyncWorker
32
49
  from sentry_sdk.envelope import Envelope, Item, PayloadRef
33
50
 
34
51
  from typing import TYPE_CHECKING
@@ -162,8 +179,8 @@ def _parse_rate_limits(
162
179
  continue
163
180
 
164
181
 
165
- class BaseHttpTransport(Transport):
166
- """The base HTTP transport."""
182
+ class HttpTransportCore(Transport):
183
+ """Shared base class for sync and async transports."""
167
184
 
168
185
  TIMEOUT = 30 # seconds
169
186
 
@@ -173,7 +190,7 @@ class BaseHttpTransport(Transport):
173
190
  Transport.__init__(self, options)
174
191
  assert self.parsed_dsn is not None
175
192
  self.options: Dict[str, Any] = options
176
- self._worker = BackgroundWorker(queue_size=options["transport_queue_size"])
193
+ self._worker = self._create_worker(options)
177
194
  self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION)
178
195
  self._disabled_until: Dict[Optional[str], datetime] = {}
179
196
  # We only use this Retry() class for the `get_retry_after` method it exposes
@@ -224,6 +241,9 @@ class BaseHttpTransport(Transport):
224
241
  elif self._compression_algo == "br":
225
242
  self._compression_level = 4
226
243
 
244
+ def _create_worker(self, options: dict[str, Any]) -> Worker:
245
+ raise NotImplementedError()
246
+
227
247
  def record_lost_event(
228
248
  self: Self,
229
249
  reason: str,
@@ -286,12 +306,8 @@ class BaseHttpTransport(Transport):
286
306
  seconds=retry_after
287
307
  )
288
308
 
289
- def _send_request(
290
- self: Self,
291
- body: bytes,
292
- headers: Dict[str, str],
293
- endpoint_type: EndpointType = EndpointType.ENVELOPE,
294
- envelope: Optional[Envelope] = None,
309
+ def _handle_request_error(
310
+ self: Self, envelope: Optional[Envelope], loss_reason: str = "network"
295
311
  ) -> None:
296
312
  def record_loss(reason: str) -> None:
297
313
  if envelope is None:
@@ -300,45 +316,45 @@ class BaseHttpTransport(Transport):
300
316
  for item in envelope.items:
301
317
  self.record_lost_event(reason, item=item)
302
318
 
319
+ self.on_dropped_event(loss_reason)
320
+ record_loss("network_error")
321
+
322
+ def _handle_response(
323
+ self: Self,
324
+ response: Union[urllib3.BaseHTTPResponse, httpcore.Response],
325
+ envelope: Optional[Envelope],
326
+ ) -> None:
327
+ self._update_rate_limits(response)
328
+
329
+ if response.status == 429:
330
+ # if we hit a 429. Something was rate limited but we already
331
+ # acted on this in `self._update_rate_limits`. Note that we
332
+ # do not want to record event loss here as we will have recorded
333
+ # an outcome in relay already.
334
+ self.on_dropped_event("status_429")
335
+ pass
336
+
337
+ elif response.status >= 300 or response.status < 200:
338
+ logger.error(
339
+ "Unexpected status code: %s (body: %s)",
340
+ response.status,
341
+ getattr(response, "data", getattr(response, "content", None)),
342
+ )
343
+ self._handle_request_error(
344
+ envelope=envelope, loss_reason="status_{}".format(response.status)
345
+ )
346
+
347
+ def _update_headers(
348
+ self: Self,
349
+ headers: Dict[str, str],
350
+ ) -> None:
351
+
303
352
  headers.update(
304
353
  {
305
354
  "User-Agent": str(self._auth.client),
306
355
  "X-Sentry-Auth": str(self._auth.to_header()),
307
356
  }
308
357
  )
309
- try:
310
- response = self._request(
311
- "POST",
312
- endpoint_type,
313
- body,
314
- headers,
315
- )
316
- except Exception:
317
- self.on_dropped_event("network")
318
- record_loss("network_error")
319
- raise
320
-
321
- try:
322
- self._update_rate_limits(response)
323
-
324
- if response.status == 429:
325
- # if we hit a 429. Something was rate limited but we already
326
- # acted on this in `self._update_rate_limits`. Note that we
327
- # do not want to record event loss here as we will have recorded
328
- # an outcome in relay already.
329
- self.on_dropped_event("status_429")
330
- pass
331
-
332
- elif response.status >= 300 or response.status < 200:
333
- logger.error(
334
- "Unexpected status code: %s (body: %s)",
335
- response.status,
336
- getattr(response, "data", getattr(response, "content", None)),
337
- )
338
- self.on_dropped_event("status_{}".format(response.status))
339
- record_loss("network_error")
340
- finally:
341
- response.close()
342
358
 
343
359
  def on_dropped_event(self: Self, _reason: str) -> None:
344
360
  return None
@@ -375,11 +391,6 @@ class BaseHttpTransport(Transport):
375
391
  type="client_report",
376
392
  )
377
393
 
378
- def _flush_client_reports(self: Self, force: bool = False) -> None:
379
- client_report = self._fetch_pending_client_report(force=force, interval=60)
380
- if client_report is not None:
381
- self.capture_envelope(Envelope(items=[client_report]))
382
-
383
394
  def _check_disabled(self: Self, category: EventDataCategory) -> bool:
384
395
  def _disabled(bucket: Optional[EventDataCategory]) -> bool:
385
396
  ts = self._disabled_until.get(bucket)
@@ -398,7 +409,9 @@ class BaseHttpTransport(Transport):
398
409
  def is_healthy(self: Self) -> bool:
399
410
  return not (self._is_worker_full() or self._is_rate_limited())
400
411
 
401
- def _send_envelope(self: Self, envelope: Envelope) -> None:
412
+ def _prepare_envelope(
413
+ self: Self, envelope: Envelope
414
+ ) -> Optional[Tuple[Envelope, io.BytesIO, Dict[str, str]]]:
402
415
 
403
416
  # remove all items from the envelope which are over quota
404
417
  new_items = []
@@ -442,13 +455,7 @@ class BaseHttpTransport(Transport):
442
455
  if content_encoding:
443
456
  headers["Content-Encoding"] = content_encoding
444
457
 
445
- self._send_request(
446
- body.getvalue(),
447
- headers=headers,
448
- endpoint_type=EndpointType.ENVELOPE,
449
- envelope=envelope,
450
- )
451
- return None
458
+ return envelope, body, headers
452
459
 
453
460
  def _serialize_envelope(
454
461
  self: Self, envelope: Envelope
@@ -494,6 +501,9 @@ class BaseHttpTransport(Transport):
494
501
  httpcore.SOCKSProxy,
495
502
  httpcore.HTTPProxy,
496
503
  httpcore.ConnectionPool,
504
+ httpcore.AsyncSOCKSProxy,
505
+ httpcore.AsyncHTTPProxy,
506
+ httpcore.AsyncConnectionPool,
497
507
  ]:
498
508
  raise NotImplementedError()
499
509
 
@@ -506,6 +516,57 @@ class BaseHttpTransport(Transport):
506
516
  ) -> Union[urllib3.BaseHTTPResponse, httpcore.Response]:
507
517
  raise NotImplementedError()
508
518
 
519
+ def kill(self: Self) -> None:
520
+ logger.debug("Killing HTTP transport")
521
+ self._worker.kill()
522
+
523
+
524
+ class BaseHttpTransport(HttpTransportCore):
525
+ """The base HTTP transport."""
526
+
527
+ def _send_envelope(self: Self, envelope: Envelope) -> None:
528
+ _prepared_envelope = self._prepare_envelope(envelope)
529
+ if _prepared_envelope is not None:
530
+ envelope, body, headers = _prepared_envelope
531
+ self._send_request(
532
+ body.getvalue(),
533
+ headers=headers,
534
+ endpoint_type=EndpointType.ENVELOPE,
535
+ envelope=envelope,
536
+ )
537
+ return None
538
+
539
+ def _send_request(
540
+ self: Self,
541
+ body: bytes,
542
+ headers: Dict[str, str],
543
+ endpoint_type: EndpointType,
544
+ envelope: Optional[Envelope],
545
+ ) -> None:
546
+ self._update_headers(headers)
547
+ try:
548
+ response = self._request(
549
+ "POST",
550
+ endpoint_type,
551
+ body,
552
+ headers,
553
+ )
554
+ except Exception:
555
+ self._handle_request_error(envelope=envelope, loss_reason="network")
556
+ raise
557
+ try:
558
+ self._handle_response(response=response, envelope=envelope)
559
+ finally:
560
+ response.close()
561
+
562
+ def _create_worker(self: Self, options: dict[str, Any]) -> Worker:
563
+ return BackgroundWorker(queue_size=options["transport_queue_size"])
564
+
565
+ def _flush_client_reports(self: Self, force: bool = False) -> None:
566
+ client_report = self._fetch_pending_client_report(force=force, interval=60)
567
+ if client_report is not None:
568
+ self.capture_envelope(Envelope(items=[client_report]))
569
+
509
570
  def capture_envelope(self: Self, envelope: Envelope) -> None:
510
571
  def send_envelope_wrapper() -> None:
511
572
  with capture_internal_exceptions():
@@ -528,10 +589,6 @@ class BaseHttpTransport(Transport):
528
589
  self._worker.submit(lambda: self._flush_client_reports(force=True))
529
590
  self._worker.flush(timeout, callback)
530
591
 
531
- def kill(self: Self) -> None:
532
- logger.debug("Killing HTTP transport")
533
- self._worker.kill()
534
-
535
592
 
536
593
  class HttpTransport(BaseHttpTransport):
537
594
  if TYPE_CHECKING:
@@ -639,6 +696,223 @@ class HttpTransport(BaseHttpTransport):
639
696
  )
640
697
 
641
698
 
699
+ if not ASYNC_TRANSPORT_ENABLED:
700
+ # Sorry, no AsyncHttpTransport for you
701
+ AsyncHttpTransport = HttpTransport
702
+
703
+ else:
704
+
705
+ class AsyncHttpTransport(HttpTransportCore): # type: ignore
706
+ def __init__(self: Self, options: Dict[str, Any]) -> None:
707
+ super().__init__(options)
708
+ # Requires event loop at init time
709
+ self.loop = asyncio.get_running_loop()
710
+
711
+ def _create_worker(self: Self, options: dict[str, Any]) -> Worker:
712
+ return AsyncWorker(queue_size=options["transport_queue_size"])
713
+
714
+ def _get_header_value(self: Self, response: Any, header: str) -> Optional[str]:
715
+ return next(
716
+ (
717
+ val.decode("ascii")
718
+ for key, val in response.headers
719
+ if key.decode("ascii").lower() == header
720
+ ),
721
+ None,
722
+ )
723
+
724
+ async def _send_envelope(self: Self, envelope: Envelope) -> None:
725
+ _prepared_envelope = self._prepare_envelope(envelope)
726
+ if _prepared_envelope is not None:
727
+ envelope, body, headers = _prepared_envelope
728
+ await self._send_request(
729
+ body.getvalue(),
730
+ headers=headers,
731
+ endpoint_type=EndpointType.ENVELOPE,
732
+ envelope=envelope,
733
+ )
734
+ return None
735
+
736
+ async def _send_request(
737
+ self: Self,
738
+ body: bytes,
739
+ headers: Dict[str, str],
740
+ endpoint_type: EndpointType,
741
+ envelope: Optional[Envelope],
742
+ ) -> None:
743
+ self._update_headers(headers)
744
+ try:
745
+ response = await self._request(
746
+ "POST",
747
+ endpoint_type,
748
+ body,
749
+ headers,
750
+ )
751
+ except Exception:
752
+ self._handle_request_error(envelope=envelope, loss_reason="network")
753
+ raise
754
+ try:
755
+ self._handle_response(response=response, envelope=envelope)
756
+ finally:
757
+ await response.aclose()
758
+
759
+ async def _request( # type: ignore[override]
760
+ self: Self,
761
+ method: str,
762
+ endpoint_type: EndpointType,
763
+ body: Any,
764
+ headers: Mapping[str, str],
765
+ ) -> httpcore.Response:
766
+ return await self._pool.request(
767
+ method,
768
+ self._auth.get_api_url(endpoint_type),
769
+ content=body,
770
+ headers=headers, # type: ignore
771
+ extensions={
772
+ "timeout": {
773
+ "pool": self.TIMEOUT,
774
+ "connect": self.TIMEOUT,
775
+ "write": self.TIMEOUT,
776
+ "read": self.TIMEOUT,
777
+ }
778
+ },
779
+ )
780
+
781
+ async def _flush_client_reports(self: Self, force: bool = False) -> None:
782
+ client_report = self._fetch_pending_client_report(force=force, interval=60)
783
+ if client_report is not None:
784
+ self.capture_envelope(Envelope(items=[client_report]))
785
+
786
+ def _capture_envelope(self: Self, envelope: Envelope) -> None:
787
+ async def send_envelope_wrapper() -> None:
788
+ with capture_internal_exceptions():
789
+ await self._send_envelope(envelope)
790
+ await self._flush_client_reports()
791
+
792
+ if not self._worker.submit(send_envelope_wrapper):
793
+ self.on_dropped_event("full_queue")
794
+ for item in envelope.items:
795
+ self.record_lost_event("queue_overflow", item=item)
796
+
797
+ def capture_envelope(self: Self, envelope: Envelope) -> None:
798
+ # Synchronous entry point
799
+ if self.loop and self.loop.is_running():
800
+ self.loop.call_soon_threadsafe(self._capture_envelope, envelope)
801
+ else:
802
+ # The event loop is no longer running
803
+ logger.warning("Async Transport is not running in an event loop.")
804
+ self.on_dropped_event("internal_sdk_error")
805
+ for item in envelope.items:
806
+ self.record_lost_event("internal_sdk_error", item=item)
807
+
808
+ def flush( # type: ignore[override]
809
+ self: Self,
810
+ timeout: float,
811
+ callback: Optional[Callable[[int, float], None]] = None,
812
+ ) -> Optional[asyncio.Task[None]]:
813
+ logger.debug("Flushing HTTP transport")
814
+
815
+ if timeout > 0:
816
+ self._worker.submit(lambda: self._flush_client_reports(force=True))
817
+ return self._worker.flush(timeout, callback) # type: ignore[func-returns-value]
818
+ return None
819
+
820
+ def _get_pool_options(self: Self) -> Dict[str, Any]:
821
+ options: Dict[str, Any] = {
822
+ "http2": False, # no HTTP2 for now
823
+ "retries": 3,
824
+ }
825
+
826
+ socket_options = (
827
+ self.options["socket_options"]
828
+ if self.options["socket_options"] is not None
829
+ else []
830
+ )
831
+
832
+ used_options = {(o[0], o[1]) for o in socket_options}
833
+ for default_option in KEEP_ALIVE_SOCKET_OPTIONS:
834
+ if (default_option[0], default_option[1]) not in used_options:
835
+ socket_options.append(default_option)
836
+
837
+ options["socket_options"] = socket_options
838
+
839
+ ssl_context = ssl.create_default_context()
840
+ ssl_context.load_verify_locations(
841
+ self.options["ca_certs"] # User-provided bundle from the SDK init
842
+ or os.environ.get("SSL_CERT_FILE")
843
+ or os.environ.get("REQUESTS_CA_BUNDLE")
844
+ or certifi.where()
845
+ )
846
+ cert_file = self.options["cert_file"] or os.environ.get("CLIENT_CERT_FILE")
847
+ key_file = self.options["key_file"] or os.environ.get("CLIENT_KEY_FILE")
848
+ if cert_file is not None:
849
+ ssl_context.load_cert_chain(cert_file, key_file)
850
+
851
+ options["ssl_context"] = ssl_context
852
+
853
+ return options
854
+
855
+ def _make_pool(
856
+ self: Self,
857
+ ) -> Union[
858
+ httpcore.AsyncSOCKSProxy,
859
+ httpcore.AsyncHTTPProxy,
860
+ httpcore.AsyncConnectionPool,
861
+ ]:
862
+ if self.parsed_dsn is None:
863
+ raise ValueError("Cannot create HTTP-based transport without valid DSN")
864
+ proxy = None
865
+ no_proxy = self._in_no_proxy(self.parsed_dsn)
866
+
867
+ # try HTTPS first
868
+ https_proxy = self.options["https_proxy"]
869
+ if self.parsed_dsn.scheme == "https" and (https_proxy != ""):
870
+ proxy = https_proxy or (not no_proxy and getproxies().get("https"))
871
+
872
+ # maybe fallback to HTTP proxy
873
+ http_proxy = self.options["http_proxy"]
874
+ if not proxy and (http_proxy != ""):
875
+ proxy = http_proxy or (not no_proxy and getproxies().get("http"))
876
+
877
+ opts = self._get_pool_options()
878
+
879
+ if proxy:
880
+ proxy_headers = self.options["proxy_headers"]
881
+ if proxy_headers:
882
+ opts["proxy_headers"] = proxy_headers
883
+
884
+ if proxy.startswith("socks"):
885
+ try:
886
+ if "socket_options" in opts:
887
+ socket_options = opts.pop("socket_options")
888
+ if socket_options:
889
+ logger.warning(
890
+ "You have defined socket_options but using a SOCKS proxy which doesn't support these. We'll ignore socket_options."
891
+ )
892
+ return httpcore.AsyncSOCKSProxy(proxy_url=proxy, **opts)
893
+ except RuntimeError:
894
+ logger.warning(
895
+ "You have configured a SOCKS proxy (%s) but support for SOCKS proxies is not installed. Disabling proxy support.",
896
+ proxy,
897
+ )
898
+ else:
899
+ return httpcore.AsyncHTTPProxy(proxy_url=proxy, **opts)
900
+
901
+ return httpcore.AsyncConnectionPool(**opts)
902
+
903
+ def kill(self: Self) -> Optional[asyncio.Task[None]]: # type: ignore
904
+
905
+ logger.debug("Killing HTTP transport")
906
+ self._worker.kill()
907
+ try:
908
+ # Return the pool cleanup task so caller can await it if needed
909
+ with mark_sentry_task_internal():
910
+ return self.loop.create_task(self._pool.aclose()) # type: ignore
911
+ except RuntimeError:
912
+ logger.warning("Event loop not running, aborting kill.")
913
+ return None
914
+
915
+
642
916
  if not HTTP2_ENABLED:
643
917
  # Sorry, no Http2Transport for you
644
918
  class Http2Transport(HttpTransport):
@@ -778,11 +1052,32 @@ def make_transport(options: Dict[str, Any]) -> Optional[Transport]:
778
1052
  ref_transport = options["transport"]
779
1053
 
780
1054
  use_http2_transport = options.get("_experiments", {}).get("transport_http2", False)
1055
+ use_async_transport = options.get("_experiments", {}).get("transport_async", False)
1056
+ async_integration = any(
1057
+ integration.__class__.__name__ == "AsyncioIntegration"
1058
+ for integration in options.get("integrations") or []
1059
+ )
781
1060
 
782
1061
  # By default, we use the http transport class
783
1062
  transport_cls: Type[Transport] = (
784
1063
  Http2Transport if use_http2_transport else HttpTransport
785
1064
  )
1065
+ if use_async_transport and ASYNC_TRANSPORT_ENABLED:
1066
+ try:
1067
+ asyncio.get_running_loop()
1068
+ if async_integration:
1069
+ transport_cls = AsyncHttpTransport
1070
+ else:
1071
+ logger.warning(
1072
+ "You tried to use AsyncHttpTransport but the AsyncioIntegration is not enabled. Falling back to sync transport."
1073
+ )
1074
+ except RuntimeError:
1075
+ # No event loop running, fall back to sync transport
1076
+ logger.warning("No event loop running, falling back to sync transport.")
1077
+ elif use_async_transport:
1078
+ logger.warning(
1079
+ "You tried to use AsyncHttpTransport but don't have httpcore[asyncio] installed. Falling back to sync transport."
1080
+ )
786
1081
 
787
1082
  if isinstance(ref_transport, Transport):
788
1083
  return ref_transport
sentry_sdk/utils.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
  import base64
3
+ import contextvars
3
4
  import json
4
5
  import linecache
5
6
  import logging
@@ -12,6 +13,7 @@ import sys
12
13
  import threading
13
14
  import time
14
15
  from collections import namedtuple
16
+ from contextlib import contextmanager
15
17
  from datetime import datetime, timezone
16
18
  from decimal import Decimal
17
19
  from functools import partial, partialmethod, wraps
@@ -44,6 +46,7 @@ if TYPE_CHECKING:
44
46
  Callable,
45
47
  ContextManager,
46
48
  Dict,
49
+ Generator,
47
50
  Iterator,
48
51
  List,
49
52
  NoReturn,
@@ -72,6 +75,25 @@ logger = logging.getLogger("sentry_sdk.errors")
72
75
 
73
76
  _installed_modules = None
74
77
 
78
+ _is_sentry_internal_task = contextvars.ContextVar(
79
+ "is_sentry_internal_task", default=False
80
+ )
81
+
82
+
83
+ def is_internal_task() -> bool:
84
+ return _is_sentry_internal_task.get()
85
+
86
+
87
+ @contextmanager
88
+ def mark_sentry_task_internal() -> Generator[None, None, None]:
89
+ """Context manager to mark a task as Sentry internal."""
90
+ token = _is_sentry_internal_task.set(True)
91
+ try:
92
+ yield
93
+ finally:
94
+ _is_sentry_internal_task.reset(token)
95
+
96
+
75
97
  BASE64_ALPHABET = re.compile(r"^[a-zA-Z0-9/+=]*$")
76
98
 
77
99
  FALSY_ENV_VALUES = frozenset(("false", "f", "n", "no", "off", "0"))
@@ -1424,11 +1446,7 @@ class TimeoutThread(threading.Thread):
1424
1446
  integer_configured_timeout = integer_configured_timeout + 1
1425
1447
 
1426
1448
  # Raising Exception after timeout duration is reached
1427
- raise ServerlessTimeoutWarning(
1428
- "WARNING : Function is expected to get timed out. Configured timeout duration = {} seconds.".format(
1429
- integer_configured_timeout
1430
- )
1431
- )
1449
+ raise ServerlessTimeoutWarning("WARNING: Function is about to time out.")
1432
1450
 
1433
1451
 
1434
1452
  def to_base64(original: str) -> Optional[str]: