sentry-sdk 3.0.0a2__py2.py3-none-any.whl → 3.0.0a4__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (159) hide show
  1. sentry_sdk/__init__.py +4 -0
  2. sentry_sdk/_compat.py +5 -12
  3. sentry_sdk/_init_implementation.py +7 -7
  4. sentry_sdk/_log_batcher.py +17 -29
  5. sentry_sdk/_lru_cache.py +7 -9
  6. sentry_sdk/_queue.py +2 -4
  7. sentry_sdk/_types.py +9 -16
  8. sentry_sdk/_werkzeug.py +5 -7
  9. sentry_sdk/ai/monitoring.py +45 -33
  10. sentry_sdk/ai/utils.py +8 -5
  11. sentry_sdk/api.py +91 -87
  12. sentry_sdk/attachments.py +10 -12
  13. sentry_sdk/client.py +119 -159
  14. sentry_sdk/consts.py +432 -223
  15. sentry_sdk/crons/api.py +16 -17
  16. sentry_sdk/crons/decorator.py +25 -27
  17. sentry_sdk/debug.py +4 -6
  18. sentry_sdk/envelope.py +46 -112
  19. sentry_sdk/feature_flags.py +9 -15
  20. sentry_sdk/integrations/__init__.py +24 -19
  21. sentry_sdk/integrations/_asgi_common.py +16 -18
  22. sentry_sdk/integrations/_wsgi_common.py +22 -33
  23. sentry_sdk/integrations/aiohttp.py +33 -31
  24. sentry_sdk/integrations/anthropic.py +43 -38
  25. sentry_sdk/integrations/argv.py +3 -4
  26. sentry_sdk/integrations/ariadne.py +16 -18
  27. sentry_sdk/integrations/arq.py +20 -29
  28. sentry_sdk/integrations/asgi.py +63 -37
  29. sentry_sdk/integrations/asyncio.py +15 -17
  30. sentry_sdk/integrations/asyncpg.py +1 -1
  31. sentry_sdk/integrations/atexit.py +6 -10
  32. sentry_sdk/integrations/aws_lambda.py +26 -36
  33. sentry_sdk/integrations/beam.py +10 -18
  34. sentry_sdk/integrations/boto3.py +20 -18
  35. sentry_sdk/integrations/bottle.py +25 -34
  36. sentry_sdk/integrations/celery/__init__.py +40 -59
  37. sentry_sdk/integrations/celery/beat.py +22 -26
  38. sentry_sdk/integrations/celery/utils.py +15 -17
  39. sentry_sdk/integrations/chalice.py +8 -10
  40. sentry_sdk/integrations/clickhouse_driver.py +22 -32
  41. sentry_sdk/integrations/cloud_resource_context.py +9 -16
  42. sentry_sdk/integrations/cohere.py +19 -25
  43. sentry_sdk/integrations/dedupe.py +5 -8
  44. sentry_sdk/integrations/django/__init__.py +69 -74
  45. sentry_sdk/integrations/django/asgi.py +25 -33
  46. sentry_sdk/integrations/django/caching.py +24 -20
  47. sentry_sdk/integrations/django/middleware.py +18 -21
  48. sentry_sdk/integrations/django/signals_handlers.py +12 -11
  49. sentry_sdk/integrations/django/templates.py +21 -18
  50. sentry_sdk/integrations/django/transactions.py +16 -11
  51. sentry_sdk/integrations/django/views.py +8 -12
  52. sentry_sdk/integrations/dramatiq.py +21 -21
  53. sentry_sdk/integrations/excepthook.py +10 -10
  54. sentry_sdk/integrations/executing.py +3 -4
  55. sentry_sdk/integrations/falcon.py +27 -42
  56. sentry_sdk/integrations/fastapi.py +13 -16
  57. sentry_sdk/integrations/flask.py +31 -38
  58. sentry_sdk/integrations/gcp.py +13 -16
  59. sentry_sdk/integrations/gnu_backtrace.py +7 -20
  60. sentry_sdk/integrations/gql.py +16 -17
  61. sentry_sdk/integrations/graphene.py +14 -13
  62. sentry_sdk/integrations/grpc/__init__.py +3 -2
  63. sentry_sdk/integrations/grpc/aio/client.py +2 -2
  64. sentry_sdk/integrations/grpc/aio/server.py +15 -14
  65. sentry_sdk/integrations/grpc/client.py +21 -11
  66. sentry_sdk/integrations/grpc/consts.py +2 -0
  67. sentry_sdk/integrations/grpc/server.py +12 -8
  68. sentry_sdk/integrations/httpx.py +11 -14
  69. sentry_sdk/integrations/huey.py +14 -21
  70. sentry_sdk/integrations/huggingface_hub.py +17 -17
  71. sentry_sdk/integrations/langchain.py +204 -114
  72. sentry_sdk/integrations/launchdarkly.py +13 -10
  73. sentry_sdk/integrations/litestar.py +40 -38
  74. sentry_sdk/integrations/logging.py +29 -36
  75. sentry_sdk/integrations/loguru.py +16 -20
  76. sentry_sdk/integrations/modules.py +3 -4
  77. sentry_sdk/integrations/openai.py +421 -204
  78. sentry_sdk/integrations/openai_agents/__init__.py +49 -0
  79. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  80. sentry_sdk/integrations/openai_agents/patches/__init__.py +4 -0
  81. sentry_sdk/integrations/openai_agents/patches/agent_run.py +152 -0
  82. sentry_sdk/integrations/openai_agents/patches/models.py +52 -0
  83. sentry_sdk/integrations/openai_agents/patches/runner.py +42 -0
  84. sentry_sdk/integrations/openai_agents/patches/tools.py +84 -0
  85. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  86. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +20 -0
  87. sentry_sdk/integrations/openai_agents/spans/ai_client.py +46 -0
  88. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +47 -0
  89. sentry_sdk/integrations/openai_agents/spans/handoff.py +24 -0
  90. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +41 -0
  91. sentry_sdk/integrations/openai_agents/utils.py +153 -0
  92. sentry_sdk/integrations/openfeature.py +12 -8
  93. sentry_sdk/integrations/pure_eval.py +6 -10
  94. sentry_sdk/integrations/pymongo.py +14 -18
  95. sentry_sdk/integrations/pyramid.py +31 -36
  96. sentry_sdk/integrations/quart.py +23 -28
  97. sentry_sdk/integrations/ray.py +73 -64
  98. sentry_sdk/integrations/redis/__init__.py +7 -4
  99. sentry_sdk/integrations/redis/_async_common.py +18 -12
  100. sentry_sdk/integrations/redis/_sync_common.py +16 -15
  101. sentry_sdk/integrations/redis/modules/caches.py +17 -8
  102. sentry_sdk/integrations/redis/modules/queries.py +9 -8
  103. sentry_sdk/integrations/redis/rb.py +3 -2
  104. sentry_sdk/integrations/redis/redis.py +4 -4
  105. sentry_sdk/integrations/redis/redis_cluster.py +10 -8
  106. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +3 -2
  107. sentry_sdk/integrations/redis/utils.py +21 -22
  108. sentry_sdk/integrations/rq.py +13 -16
  109. sentry_sdk/integrations/rust_tracing.py +10 -7
  110. sentry_sdk/integrations/sanic.py +34 -46
  111. sentry_sdk/integrations/serverless.py +22 -27
  112. sentry_sdk/integrations/socket.py +29 -17
  113. sentry_sdk/integrations/spark/__init__.py +1 -0
  114. sentry_sdk/integrations/spark/spark_driver.py +45 -83
  115. sentry_sdk/integrations/spark/spark_worker.py +7 -11
  116. sentry_sdk/integrations/sqlalchemy.py +22 -19
  117. sentry_sdk/integrations/starlette.py +89 -93
  118. sentry_sdk/integrations/starlite.py +31 -37
  119. sentry_sdk/integrations/statsig.py +5 -4
  120. sentry_sdk/integrations/stdlib.py +32 -28
  121. sentry_sdk/integrations/strawberry.py +63 -50
  122. sentry_sdk/integrations/sys_exit.py +7 -11
  123. sentry_sdk/integrations/threading.py +13 -15
  124. sentry_sdk/integrations/tornado.py +28 -32
  125. sentry_sdk/integrations/trytond.py +4 -3
  126. sentry_sdk/integrations/typer.py +8 -6
  127. sentry_sdk/integrations/unleash.py +5 -4
  128. sentry_sdk/integrations/wsgi.py +47 -46
  129. sentry_sdk/logger.py +13 -9
  130. sentry_sdk/monitor.py +16 -28
  131. sentry_sdk/opentelemetry/consts.py +11 -4
  132. sentry_sdk/opentelemetry/contextvars_context.py +17 -15
  133. sentry_sdk/opentelemetry/propagator.py +38 -21
  134. sentry_sdk/opentelemetry/sampler.py +51 -34
  135. sentry_sdk/opentelemetry/scope.py +46 -37
  136. sentry_sdk/opentelemetry/span_processor.py +43 -59
  137. sentry_sdk/opentelemetry/tracing.py +32 -12
  138. sentry_sdk/opentelemetry/utils.py +180 -196
  139. sentry_sdk/profiler/continuous_profiler.py +108 -97
  140. sentry_sdk/profiler/transaction_profiler.py +70 -97
  141. sentry_sdk/profiler/utils.py +11 -15
  142. sentry_sdk/scope.py +251 -264
  143. sentry_sdk/scrubber.py +22 -26
  144. sentry_sdk/serializer.py +48 -65
  145. sentry_sdk/session.py +44 -61
  146. sentry_sdk/sessions.py +35 -49
  147. sentry_sdk/spotlight.py +15 -21
  148. sentry_sdk/tracing.py +118 -184
  149. sentry_sdk/tracing_utils.py +103 -123
  150. sentry_sdk/transport.py +131 -157
  151. sentry_sdk/utils.py +278 -309
  152. sentry_sdk/worker.py +16 -28
  153. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/METADATA +1 -1
  154. sentry_sdk-3.0.0a4.dist-info/RECORD +168 -0
  155. sentry_sdk-3.0.0a2.dist-info/RECORD +0 -154
  156. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/WHEEL +0 -0
  157. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/entry_points.txt +0 -0
  158. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/licenses/LICENSE +0 -0
  159. {sentry_sdk-3.0.0a2.dist-info → sentry_sdk-3.0.0a4.dist-info}/top_level.txt +0 -0
@@ -25,6 +25,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
25
  SOFTWARE.
26
26
  """
27
27
 
28
+ from __future__ import annotations
28
29
  import atexit
29
30
  import os
30
31
  import platform
@@ -99,7 +100,7 @@ try:
99
100
  from gevent.monkey import get_original
100
101
  from gevent.threadpool import ThreadPool as _ThreadPool
101
102
 
102
- ThreadPool = _ThreadPool # type: Optional[Type[_ThreadPool]]
103
+ ThreadPool: Optional[Type[_ThreadPool]] = _ThreadPool
103
104
  thread_sleep = get_original("time", "sleep")
104
105
  except ImportError:
105
106
  thread_sleep = time.sleep
@@ -107,7 +108,7 @@ except ImportError:
107
108
  ThreadPool = None
108
109
 
109
110
 
110
- _scheduler = None # type: Optional[Scheduler]
111
+ _scheduler: Optional[Scheduler] = None
111
112
 
112
113
 
113
114
  # The minimum number of unique samples that must exist in a profile to be
@@ -115,8 +116,7 @@ _scheduler = None # type: Optional[Scheduler]
115
116
  PROFILE_MINIMUM_SAMPLES = 2
116
117
 
117
118
 
118
- def has_profiling_enabled(options):
119
- # type: (Dict[str, Any]) -> bool
119
+ def has_profiling_enabled(options: Dict[str, Any]) -> bool:
120
120
  profiles_sampler = options["profiles_sampler"]
121
121
  if profiles_sampler is not None:
122
122
  return True
@@ -128,8 +128,7 @@ def has_profiling_enabled(options):
128
128
  return False
129
129
 
130
130
 
131
- def setup_profiler(options):
132
- # type: (Dict[str, Any]) -> bool
131
+ def setup_profiler(options: Dict[str, Any]) -> bool:
133
132
  global _scheduler
134
133
 
135
134
  if _scheduler is not None:
@@ -172,8 +171,7 @@ def setup_profiler(options):
172
171
  return True
173
172
 
174
173
 
175
- def teardown_profiler():
176
- # type: () -> None
174
+ def teardown_profiler() -> None:
177
175
 
178
176
  global _scheduler
179
177
 
@@ -189,40 +187,38 @@ MAX_PROFILE_DURATION_NS = int(3e10) # 30 seconds
189
187
  class Profile:
190
188
  def __init__(
191
189
  self,
192
- sampled, # type: Optional[bool]
193
- start_ns, # type: int
194
- scheduler=None, # type: Optional[Scheduler]
195
- ):
196
- # type: (...) -> None
190
+ sampled: Optional[bool],
191
+ start_ns: int,
192
+ scheduler: Optional[Scheduler] = None,
193
+ ) -> None:
197
194
  self.scheduler = _scheduler if scheduler is None else scheduler
198
195
 
199
- self.event_id = uuid.uuid4().hex # type: str
196
+ self.event_id: str = uuid.uuid4().hex
200
197
 
201
- self.sampled = sampled # type: Optional[bool]
198
+ self.sampled: Optional[bool] = sampled
202
199
 
203
200
  # Various framework integrations are capable of overwriting the active thread id.
204
201
  # If it is set to `None` at the end of the profile, we fall back to the default.
205
- self._default_active_thread_id = get_current_thread_meta()[0] or 0 # type: int
206
- self.active_thread_id = None # type: Optional[int]
202
+ self._default_active_thread_id: int = get_current_thread_meta()[0] or 0
203
+ self.active_thread_id: Optional[int] = None
207
204
 
208
205
  try:
209
- self.start_ns = start_ns # type: int
206
+ self.start_ns: int = start_ns
210
207
  except AttributeError:
211
208
  self.start_ns = 0
212
209
 
213
- self.stop_ns = 0 # type: int
214
- self.active = False # type: bool
210
+ self.stop_ns: int = 0
211
+ self.active: bool = False
215
212
 
216
- self.indexed_frames = {} # type: Dict[FrameId, int]
217
- self.indexed_stacks = {} # type: Dict[StackId, int]
218
- self.frames = [] # type: List[ProcessedFrame]
219
- self.stacks = [] # type: List[ProcessedStack]
220
- self.samples = [] # type: List[ProcessedSample]
213
+ self.indexed_frames: Dict[FrameId, int] = {}
214
+ self.indexed_stacks: Dict[StackId, int] = {}
215
+ self.frames: List[ProcessedFrame] = []
216
+ self.stacks: List[ProcessedStack] = []
217
+ self.samples: List[ProcessedSample] = []
221
218
 
222
219
  self.unique_samples = 0
223
220
 
224
- def update_active_thread_id(self):
225
- # type: () -> None
221
+ def update_active_thread_id(self) -> None:
226
222
  self.active_thread_id = get_current_thread_meta()[0]
227
223
  logger.debug(
228
224
  "[Profiling] updating active thread id to {tid}".format(
@@ -230,8 +226,7 @@ class Profile:
230
226
  )
231
227
  )
232
228
 
233
- def _set_initial_sampling_decision(self, sampling_context):
234
- # type: (SamplingContext) -> None
229
+ def _set_initial_sampling_decision(self, sampling_context: SamplingContext) -> None:
235
230
  """
236
231
  Sets the profile's sampling decision according to the following
237
232
  precedence rules:
@@ -281,7 +276,8 @@ class Profile:
281
276
  self.sampled = False
282
277
  return
283
278
 
284
- if not is_valid_sample_rate(sample_rate, source="Profiling"):
279
+ sample_rate = is_valid_sample_rate(sample_rate, source="Profiling")
280
+ if sample_rate is None:
285
281
  logger.warning(
286
282
  "[Profiling] Discarding profile because of invalid sample rate."
287
283
  )
@@ -291,19 +287,18 @@ class Profile:
291
287
  # Now we roll the dice. random.random is inclusive of 0, but not of 1,
292
288
  # so strict < is safe here. In case sample_rate is a boolean, cast it
293
289
  # to a float (True becomes 1.0 and False becomes 0.0)
294
- self.sampled = random.random() < float(sample_rate)
290
+ self.sampled = random.random() < sample_rate
295
291
 
296
292
  if self.sampled:
297
293
  logger.debug("[Profiling] Initializing profile")
298
294
  else:
299
295
  logger.debug(
300
296
  "[Profiling] Discarding profile because it's not included in the random sample (sample rate = {sample_rate})".format(
301
- sample_rate=float(sample_rate)
297
+ sample_rate=sample_rate
302
298
  )
303
299
  )
304
300
 
305
- def start(self):
306
- # type: () -> None
301
+ def start(self) -> None:
307
302
  if not self.sampled or self.active:
308
303
  return
309
304
 
@@ -314,8 +309,7 @@ class Profile:
314
309
  self.start_ns = time.perf_counter_ns()
315
310
  self.scheduler.start_profiling(self)
316
311
 
317
- def stop(self):
318
- # type: () -> None
312
+ def stop(self) -> None:
319
313
  if not self.sampled or not self.active:
320
314
  return
321
315
 
@@ -324,8 +318,7 @@ class Profile:
324
318
  self.active = False
325
319
  self.stop_ns = time.perf_counter_ns()
326
320
 
327
- def __enter__(self):
328
- # type: () -> Profile
321
+ def __enter__(self) -> Profile:
329
322
  scope = sentry_sdk.get_isolation_scope()
330
323
  old_profile = scope.profile
331
324
  scope.profile = self
@@ -336,8 +329,9 @@ class Profile:
336
329
 
337
330
  return self
338
331
 
339
- def __exit__(self, ty, value, tb):
340
- # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
332
+ def __exit__(
333
+ self, ty: Optional[Any], value: Optional[Any], tb: Optional[Any]
334
+ ) -> None:
341
335
  self.stop()
342
336
 
343
337
  scope, old_profile = self._context_manager_state
@@ -345,8 +339,7 @@ class Profile:
345
339
 
346
340
  scope.profile = old_profile
347
341
 
348
- def write(self, ts, sample):
349
- # type: (int, ExtractedSample) -> None
342
+ def write(self, ts: int, sample: ExtractedSample) -> None:
350
343
  if not self.active:
351
344
  return
352
345
 
@@ -389,18 +382,17 @@ class Profile:
389
382
  # When this happens, we abandon the current sample as it's bad.
390
383
  capture_internal_exception(sys.exc_info())
391
384
 
392
- def process(self):
393
- # type: () -> ProcessedProfile
385
+ def process(self) -> ProcessedProfile:
394
386
 
395
387
  # This collects the thread metadata at the end of a profile. Doing it
396
388
  # this way means that any threads that terminate before the profile ends
397
389
  # will not have any metadata associated with it.
398
- thread_metadata = {
390
+ thread_metadata: Dict[str, ProcessedThreadMetadata] = {
399
391
  str(thread.ident): {
400
392
  "name": str(thread.name),
401
393
  }
402
394
  for thread in threading.enumerate()
403
- } # type: Dict[str, ProcessedThreadMetadata]
395
+ }
404
396
 
405
397
  return {
406
398
  "frames": self.frames,
@@ -409,8 +401,7 @@ class Profile:
409
401
  "thread_metadata": thread_metadata,
410
402
  }
411
403
 
412
- def to_json(self, event_opt, options):
413
- # type: (Event, Dict[str, Any]) -> Dict[str, Any]
404
+ def to_json(self, event_opt: Event, options: Dict[str, Any]) -> Dict[str, Any]:
414
405
  profile = self.process()
415
406
 
416
407
  set_in_app_in_frames(
@@ -460,8 +451,7 @@ class Profile:
460
451
  ],
461
452
  }
462
453
 
463
- def valid(self):
464
- # type: () -> bool
454
+ def valid(self) -> bool:
465
455
  client = sentry_sdk.get_client()
466
456
  if not client.is_active():
467
457
  return False
@@ -488,39 +478,35 @@ class Profile:
488
478
 
489
479
 
490
480
  class Scheduler(ABC):
491
- mode = "unknown" # type: ProfilerMode
481
+ mode: ProfilerMode = "unknown"
492
482
 
493
- def __init__(self, frequency):
494
- # type: (int) -> None
483
+ def __init__(self, frequency: int) -> None:
495
484
  self.interval = 1.0 / frequency
496
485
 
497
486
  self.sampler = self.make_sampler()
498
487
 
499
488
  # cap the number of new profiles at any time so it does not grow infinitely
500
- self.new_profiles = deque(maxlen=128) # type: Deque[Profile]
501
- self.active_profiles = set() # type: Set[Profile]
489
+ self.new_profiles: Deque[Profile] = deque(maxlen=128)
490
+ self.active_profiles: Set[Profile] = set()
502
491
 
503
- def __enter__(self):
504
- # type: () -> Scheduler
492
+ def __enter__(self) -> Scheduler:
505
493
  self.setup()
506
494
  return self
507
495
 
508
- def __exit__(self, ty, value, tb):
509
- # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
496
+ def __exit__(
497
+ self, ty: Optional[Any], value: Optional[Any], tb: Optional[Any]
498
+ ) -> None:
510
499
  self.teardown()
511
500
 
512
501
  @abstractmethod
513
- def setup(self):
514
- # type: () -> None
502
+ def setup(self) -> None:
515
503
  pass
516
504
 
517
505
  @abstractmethod
518
- def teardown(self):
519
- # type: () -> None
506
+ def teardown(self) -> None:
520
507
  pass
521
508
 
522
- def ensure_running(self):
523
- # type: () -> None
509
+ def ensure_running(self) -> None:
524
510
  """
525
511
  Ensure the scheduler is running. By default, this method is a no-op.
526
512
  The method should be overridden by any implementation for which it is
@@ -528,19 +514,16 @@ class Scheduler(ABC):
528
514
  """
529
515
  return None
530
516
 
531
- def start_profiling(self, profile):
532
- # type: (Profile) -> None
517
+ def start_profiling(self, profile: Profile) -> None:
533
518
  self.ensure_running()
534
519
  self.new_profiles.append(profile)
535
520
 
536
- def make_sampler(self):
537
- # type: () -> Callable[..., None]
521
+ def make_sampler(self) -> Callable[..., None]:
538
522
  cwd = os.getcwd()
539
523
 
540
524
  cache = LRUCache(max_size=256)
541
525
 
542
- def _sample_stack(*args, **kwargs):
543
- # type: (*Any, **Any) -> None
526
+ def _sample_stack(*args: Any, **kwargs: Any) -> None:
544
527
  """
545
528
  Take a sample of the stack on all the threads in the process.
546
529
  This should be called at a regular interval to collect samples.
@@ -611,32 +594,28 @@ class ThreadScheduler(Scheduler):
611
594
  the sampler at a regular interval.
612
595
  """
613
596
 
614
- mode = "thread" # type: ProfilerMode
597
+ mode: ProfilerMode = "thread"
615
598
  name = "sentry.profiler.ThreadScheduler"
616
599
 
617
- def __init__(self, frequency):
618
- # type: (int) -> None
600
+ def __init__(self, frequency: int) -> None:
619
601
  super().__init__(frequency=frequency)
620
602
 
621
603
  # used to signal to the thread that it should stop
622
604
  self.running = False
623
- self.thread = None # type: Optional[threading.Thread]
624
- self.pid = None # type: Optional[int]
605
+ self.thread: Optional[threading.Thread] = None
606
+ self.pid: Optional[int] = None
625
607
  self.lock = threading.Lock()
626
608
 
627
- def setup(self):
628
- # type: () -> None
609
+ def setup(self) -> None:
629
610
  pass
630
611
 
631
- def teardown(self):
632
- # type: () -> None
612
+ def teardown(self) -> None:
633
613
  if self.running:
634
614
  self.running = False
635
615
  if self.thread is not None:
636
616
  self.thread.join()
637
617
 
638
- def ensure_running(self):
639
- # type: () -> None
618
+ def ensure_running(self) -> None:
640
619
  """
641
620
  Check that the profiler has an active thread to run in, and start one if
642
621
  that's not the case.
@@ -674,8 +653,7 @@ class ThreadScheduler(Scheduler):
674
653
  self.thread = None
675
654
  return
676
655
 
677
- def run(self):
678
- # type: () -> None
656
+ def run(self) -> None:
679
657
  last = time.perf_counter()
680
658
 
681
659
  while self.running:
@@ -707,11 +685,10 @@ class GeventScheduler(Scheduler):
707
685
  results in a sample containing only the sampler's code.
708
686
  """
709
687
 
710
- mode = "gevent" # type: ProfilerMode
688
+ mode: ProfilerMode = "gevent"
711
689
  name = "sentry.profiler.GeventScheduler"
712
690
 
713
- def __init__(self, frequency):
714
- # type: (int) -> None
691
+ def __init__(self, frequency: int) -> None:
715
692
 
716
693
  if ThreadPool is None:
717
694
  raise ValueError("Profiler mode: {} is not available".format(self.mode))
@@ -720,27 +697,24 @@ class GeventScheduler(Scheduler):
720
697
 
721
698
  # used to signal to the thread that it should stop
722
699
  self.running = False
723
- self.thread = None # type: Optional[_ThreadPool]
724
- self.pid = None # type: Optional[int]
700
+ self.thread: Optional[_ThreadPool] = None
701
+ self.pid: Optional[int] = None
725
702
 
726
703
  # This intentionally uses the gevent patched threading.Lock.
727
704
  # The lock will be required when first trying to start profiles
728
705
  # as we need to spawn the profiler thread from the greenlets.
729
706
  self.lock = threading.Lock()
730
707
 
731
- def setup(self):
732
- # type: () -> None
708
+ def setup(self) -> None:
733
709
  pass
734
710
 
735
- def teardown(self):
736
- # type: () -> None
711
+ def teardown(self) -> None:
737
712
  if self.running:
738
713
  self.running = False
739
714
  if self.thread is not None:
740
715
  self.thread.join()
741
716
 
742
- def ensure_running(self):
743
- # type: () -> None
717
+ def ensure_running(self) -> None:
744
718
  pid = os.getpid()
745
719
 
746
720
  # is running on the right process
@@ -767,8 +741,7 @@ class GeventScheduler(Scheduler):
767
741
  self.thread = None
768
742
  return
769
743
 
770
- def run(self):
771
- # type: () -> None
744
+ def run(self) -> None:
772
745
  last = time.perf_counter()
773
746
 
774
747
  while self.running:
@@ -1,3 +1,4 @@
1
+ from __future__ import annotations
1
2
  import os
2
3
  from collections import deque
3
4
 
@@ -63,14 +64,12 @@ MAX_STACK_DEPTH = 128
63
64
 
64
65
  if PY311:
65
66
 
66
- def get_frame_name(frame):
67
- # type: (FrameType) -> str
67
+ def get_frame_name(frame: FrameType) -> str:
68
68
  return frame.f_code.co_qualname
69
69
 
70
70
  else:
71
71
 
72
- def get_frame_name(frame):
73
- # type: (FrameType) -> str
72
+ def get_frame_name(frame: FrameType) -> str:
74
73
 
75
74
  f_code = frame.f_code
76
75
  co_varnames = f_code.co_varnames
@@ -117,13 +116,11 @@ else:
117
116
  return name
118
117
 
119
118
 
120
- def frame_id(raw_frame):
121
- # type: (FrameType) -> FrameId
119
+ def frame_id(raw_frame: FrameType) -> FrameId:
122
120
  return (raw_frame.f_code.co_filename, raw_frame.f_lineno, get_frame_name(raw_frame))
123
121
 
124
122
 
125
- def extract_frame(fid, raw_frame, cwd):
126
- # type: (FrameId, FrameType, str) -> ProcessedFrame
123
+ def extract_frame(fid: FrameId, raw_frame: FrameType, cwd: str) -> ProcessedFrame:
127
124
  abs_path = raw_frame.f_code.co_filename
128
125
 
129
126
  try:
@@ -152,12 +149,11 @@ def extract_frame(fid, raw_frame, cwd):
152
149
 
153
150
 
154
151
  def extract_stack(
155
- raw_frame, # type: Optional[FrameType]
156
- cache, # type: LRUCache
157
- cwd, # type: str
158
- max_stack_depth=MAX_STACK_DEPTH, # type: int
159
- ):
160
- # type: (...) -> ExtractedStack
152
+ raw_frame: Optional[FrameType],
153
+ cache: LRUCache,
154
+ cwd: str,
155
+ max_stack_depth: int = MAX_STACK_DEPTH,
156
+ ) -> ExtractedStack:
161
157
  """
162
158
  Extracts the stack starting the specified frame. The extracted stack
163
159
  assumes the specified frame is the top of the stack, and works back
@@ -167,7 +163,7 @@ def extract_stack(
167
163
  only the first `MAX_STACK_DEPTH` frames will be returned.
168
164
  """
169
165
 
170
- raw_frames = deque(maxlen=max_stack_depth) # type: Deque[FrameType]
166
+ raw_frames: Deque[FrameType] = deque(maxlen=max_stack_depth)
171
167
 
172
168
  while raw_frame is not None:
173
169
  f_back = raw_frame.f_back