sentry-sdk 0.18.0__py2.py3-none-any.whl → 2.46.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. sentry_sdk/__init__.py +48 -6
  2. sentry_sdk/_compat.py +64 -56
  3. sentry_sdk/_init_implementation.py +84 -0
  4. sentry_sdk/_log_batcher.py +172 -0
  5. sentry_sdk/_lru_cache.py +47 -0
  6. sentry_sdk/_metrics_batcher.py +167 -0
  7. sentry_sdk/_queue.py +81 -19
  8. sentry_sdk/_types.py +311 -11
  9. sentry_sdk/_werkzeug.py +98 -0
  10. sentry_sdk/ai/__init__.py +7 -0
  11. sentry_sdk/ai/monitoring.py +137 -0
  12. sentry_sdk/ai/utils.py +144 -0
  13. sentry_sdk/api.py +409 -67
  14. sentry_sdk/attachments.py +75 -0
  15. sentry_sdk/client.py +849 -103
  16. sentry_sdk/consts.py +1389 -34
  17. sentry_sdk/crons/__init__.py +10 -0
  18. sentry_sdk/crons/api.py +62 -0
  19. sentry_sdk/crons/consts.py +4 -0
  20. sentry_sdk/crons/decorator.py +135 -0
  21. sentry_sdk/debug.py +12 -15
  22. sentry_sdk/envelope.py +112 -61
  23. sentry_sdk/feature_flags.py +71 -0
  24. sentry_sdk/hub.py +442 -386
  25. sentry_sdk/integrations/__init__.py +228 -58
  26. sentry_sdk/integrations/_asgi_common.py +108 -0
  27. sentry_sdk/integrations/_wsgi_common.py +131 -40
  28. sentry_sdk/integrations/aiohttp.py +221 -72
  29. sentry_sdk/integrations/anthropic.py +439 -0
  30. sentry_sdk/integrations/argv.py +4 -6
  31. sentry_sdk/integrations/ariadne.py +161 -0
  32. sentry_sdk/integrations/arq.py +247 -0
  33. sentry_sdk/integrations/asgi.py +237 -135
  34. sentry_sdk/integrations/asyncio.py +144 -0
  35. sentry_sdk/integrations/asyncpg.py +208 -0
  36. sentry_sdk/integrations/atexit.py +13 -18
  37. sentry_sdk/integrations/aws_lambda.py +233 -80
  38. sentry_sdk/integrations/beam.py +27 -35
  39. sentry_sdk/integrations/boto3.py +137 -0
  40. sentry_sdk/integrations/bottle.py +91 -69
  41. sentry_sdk/integrations/celery/__init__.py +529 -0
  42. sentry_sdk/integrations/celery/beat.py +293 -0
  43. sentry_sdk/integrations/celery/utils.py +43 -0
  44. sentry_sdk/integrations/chalice.py +35 -28
  45. sentry_sdk/integrations/clickhouse_driver.py +177 -0
  46. sentry_sdk/integrations/cloud_resource_context.py +280 -0
  47. sentry_sdk/integrations/cohere.py +274 -0
  48. sentry_sdk/integrations/dedupe.py +32 -8
  49. sentry_sdk/integrations/django/__init__.py +343 -89
  50. sentry_sdk/integrations/django/asgi.py +201 -22
  51. sentry_sdk/integrations/django/caching.py +204 -0
  52. sentry_sdk/integrations/django/middleware.py +80 -32
  53. sentry_sdk/integrations/django/signals_handlers.py +91 -0
  54. sentry_sdk/integrations/django/templates.py +69 -2
  55. sentry_sdk/integrations/django/transactions.py +39 -14
  56. sentry_sdk/integrations/django/views.py +69 -16
  57. sentry_sdk/integrations/dramatiq.py +226 -0
  58. sentry_sdk/integrations/excepthook.py +19 -13
  59. sentry_sdk/integrations/executing.py +5 -6
  60. sentry_sdk/integrations/falcon.py +128 -65
  61. sentry_sdk/integrations/fastapi.py +141 -0
  62. sentry_sdk/integrations/flask.py +114 -75
  63. sentry_sdk/integrations/gcp.py +67 -36
  64. sentry_sdk/integrations/gnu_backtrace.py +14 -22
  65. sentry_sdk/integrations/google_genai/__init__.py +301 -0
  66. sentry_sdk/integrations/google_genai/consts.py +16 -0
  67. sentry_sdk/integrations/google_genai/streaming.py +155 -0
  68. sentry_sdk/integrations/google_genai/utils.py +576 -0
  69. sentry_sdk/integrations/gql.py +162 -0
  70. sentry_sdk/integrations/graphene.py +151 -0
  71. sentry_sdk/integrations/grpc/__init__.py +168 -0
  72. sentry_sdk/integrations/grpc/aio/__init__.py +7 -0
  73. sentry_sdk/integrations/grpc/aio/client.py +95 -0
  74. sentry_sdk/integrations/grpc/aio/server.py +100 -0
  75. sentry_sdk/integrations/grpc/client.py +91 -0
  76. sentry_sdk/integrations/grpc/consts.py +1 -0
  77. sentry_sdk/integrations/grpc/server.py +66 -0
  78. sentry_sdk/integrations/httpx.py +178 -0
  79. sentry_sdk/integrations/huey.py +174 -0
  80. sentry_sdk/integrations/huggingface_hub.py +378 -0
  81. sentry_sdk/integrations/langchain.py +1132 -0
  82. sentry_sdk/integrations/langgraph.py +337 -0
  83. sentry_sdk/integrations/launchdarkly.py +61 -0
  84. sentry_sdk/integrations/litellm.py +287 -0
  85. sentry_sdk/integrations/litestar.py +315 -0
  86. sentry_sdk/integrations/logging.py +261 -85
  87. sentry_sdk/integrations/loguru.py +213 -0
  88. sentry_sdk/integrations/mcp.py +566 -0
  89. sentry_sdk/integrations/modules.py +6 -33
  90. sentry_sdk/integrations/openai.py +725 -0
  91. sentry_sdk/integrations/openai_agents/__init__.py +61 -0
  92. sentry_sdk/integrations/openai_agents/consts.py +1 -0
  93. sentry_sdk/integrations/openai_agents/patches/__init__.py +5 -0
  94. sentry_sdk/integrations/openai_agents/patches/agent_run.py +140 -0
  95. sentry_sdk/integrations/openai_agents/patches/error_tracing.py +77 -0
  96. sentry_sdk/integrations/openai_agents/patches/models.py +50 -0
  97. sentry_sdk/integrations/openai_agents/patches/runner.py +45 -0
  98. sentry_sdk/integrations/openai_agents/patches/tools.py +77 -0
  99. sentry_sdk/integrations/openai_agents/spans/__init__.py +5 -0
  100. sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +21 -0
  101. sentry_sdk/integrations/openai_agents/spans/ai_client.py +42 -0
  102. sentry_sdk/integrations/openai_agents/spans/execute_tool.py +48 -0
  103. sentry_sdk/integrations/openai_agents/spans/handoff.py +19 -0
  104. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +86 -0
  105. sentry_sdk/integrations/openai_agents/utils.py +199 -0
  106. sentry_sdk/integrations/openfeature.py +35 -0
  107. sentry_sdk/integrations/opentelemetry/__init__.py +7 -0
  108. sentry_sdk/integrations/opentelemetry/consts.py +5 -0
  109. sentry_sdk/integrations/opentelemetry/integration.py +58 -0
  110. sentry_sdk/integrations/opentelemetry/propagator.py +117 -0
  111. sentry_sdk/integrations/opentelemetry/span_processor.py +391 -0
  112. sentry_sdk/integrations/otlp.py +82 -0
  113. sentry_sdk/integrations/pure_eval.py +20 -11
  114. sentry_sdk/integrations/pydantic_ai/__init__.py +47 -0
  115. sentry_sdk/integrations/pydantic_ai/consts.py +1 -0
  116. sentry_sdk/integrations/pydantic_ai/patches/__init__.py +4 -0
  117. sentry_sdk/integrations/pydantic_ai/patches/agent_run.py +215 -0
  118. sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py +110 -0
  119. sentry_sdk/integrations/pydantic_ai/patches/model_request.py +40 -0
  120. sentry_sdk/integrations/pydantic_ai/patches/tools.py +98 -0
  121. sentry_sdk/integrations/pydantic_ai/spans/__init__.py +3 -0
  122. sentry_sdk/integrations/pydantic_ai/spans/ai_client.py +246 -0
  123. sentry_sdk/integrations/pydantic_ai/spans/execute_tool.py +49 -0
  124. sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py +112 -0
  125. sentry_sdk/integrations/pydantic_ai/utils.py +223 -0
  126. sentry_sdk/integrations/pymongo.py +214 -0
  127. sentry_sdk/integrations/pyramid.py +71 -60
  128. sentry_sdk/integrations/quart.py +237 -0
  129. sentry_sdk/integrations/ray.py +165 -0
  130. sentry_sdk/integrations/redis/__init__.py +48 -0
  131. sentry_sdk/integrations/redis/_async_common.py +116 -0
  132. sentry_sdk/integrations/redis/_sync_common.py +119 -0
  133. sentry_sdk/integrations/redis/consts.py +19 -0
  134. sentry_sdk/integrations/redis/modules/__init__.py +0 -0
  135. sentry_sdk/integrations/redis/modules/caches.py +118 -0
  136. sentry_sdk/integrations/redis/modules/queries.py +65 -0
  137. sentry_sdk/integrations/redis/rb.py +32 -0
  138. sentry_sdk/integrations/redis/redis.py +69 -0
  139. sentry_sdk/integrations/redis/redis_cluster.py +107 -0
  140. sentry_sdk/integrations/redis/redis_py_cluster_legacy.py +50 -0
  141. sentry_sdk/integrations/redis/utils.py +148 -0
  142. sentry_sdk/integrations/rq.py +62 -52
  143. sentry_sdk/integrations/rust_tracing.py +284 -0
  144. sentry_sdk/integrations/sanic.py +248 -114
  145. sentry_sdk/integrations/serverless.py +13 -22
  146. sentry_sdk/integrations/socket.py +96 -0
  147. sentry_sdk/integrations/spark/spark_driver.py +115 -62
  148. sentry_sdk/integrations/spark/spark_worker.py +42 -50
  149. sentry_sdk/integrations/sqlalchemy.py +82 -37
  150. sentry_sdk/integrations/starlette.py +737 -0
  151. sentry_sdk/integrations/starlite.py +292 -0
  152. sentry_sdk/integrations/statsig.py +37 -0
  153. sentry_sdk/integrations/stdlib.py +100 -58
  154. sentry_sdk/integrations/strawberry.py +394 -0
  155. sentry_sdk/integrations/sys_exit.py +70 -0
  156. sentry_sdk/integrations/threading.py +142 -38
  157. sentry_sdk/integrations/tornado.py +68 -53
  158. sentry_sdk/integrations/trytond.py +15 -20
  159. sentry_sdk/integrations/typer.py +60 -0
  160. sentry_sdk/integrations/unleash.py +33 -0
  161. sentry_sdk/integrations/unraisablehook.py +53 -0
  162. sentry_sdk/integrations/wsgi.py +126 -125
  163. sentry_sdk/logger.py +96 -0
  164. sentry_sdk/metrics.py +81 -0
  165. sentry_sdk/monitor.py +120 -0
  166. sentry_sdk/profiler/__init__.py +49 -0
  167. sentry_sdk/profiler/continuous_profiler.py +730 -0
  168. sentry_sdk/profiler/transaction_profiler.py +839 -0
  169. sentry_sdk/profiler/utils.py +195 -0
  170. sentry_sdk/scope.py +1542 -112
  171. sentry_sdk/scrubber.py +177 -0
  172. sentry_sdk/serializer.py +152 -210
  173. sentry_sdk/session.py +177 -0
  174. sentry_sdk/sessions.py +202 -179
  175. sentry_sdk/spotlight.py +242 -0
  176. sentry_sdk/tracing.py +1202 -294
  177. sentry_sdk/tracing_utils.py +1236 -0
  178. sentry_sdk/transport.py +693 -189
  179. sentry_sdk/types.py +52 -0
  180. sentry_sdk/utils.py +1395 -228
  181. sentry_sdk/worker.py +30 -17
  182. sentry_sdk-2.46.0.dist-info/METADATA +268 -0
  183. sentry_sdk-2.46.0.dist-info/RECORD +189 -0
  184. {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/WHEEL +1 -1
  185. sentry_sdk-2.46.0.dist-info/entry_points.txt +2 -0
  186. sentry_sdk-2.46.0.dist-info/licenses/LICENSE +21 -0
  187. sentry_sdk/_functools.py +0 -66
  188. sentry_sdk/integrations/celery.py +0 -275
  189. sentry_sdk/integrations/redis.py +0 -103
  190. sentry_sdk-0.18.0.dist-info/LICENSE +0 -9
  191. sentry_sdk-0.18.0.dist-info/METADATA +0 -66
  192. sentry_sdk-0.18.0.dist-info/RECORD +0 -65
  193. {sentry_sdk-0.18.0.dist-info → sentry_sdk-2.46.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,730 @@
1
+ import atexit
2
+ import os
3
+ import random
4
+ import sys
5
+ import threading
6
+ import time
7
+ import uuid
8
+ import warnings
9
+ from collections import deque
10
+ from datetime import datetime, timezone
11
+
12
+ from sentry_sdk.consts import VERSION
13
+ from sentry_sdk.envelope import Envelope
14
+ from sentry_sdk._lru_cache import LRUCache
15
+ from sentry_sdk.profiler.utils import (
16
+ DEFAULT_SAMPLING_FREQUENCY,
17
+ extract_stack,
18
+ )
19
+ from sentry_sdk.utils import (
20
+ capture_internal_exception,
21
+ is_gevent,
22
+ logger,
23
+ now,
24
+ set_in_app_in_frames,
25
+ )
26
+
27
+ from typing import TYPE_CHECKING
28
+
29
+ if TYPE_CHECKING:
30
+ from typing import Any
31
+ from typing import Callable
32
+ from typing import Deque
33
+ from typing import Dict
34
+ from typing import List
35
+ from typing import Optional
36
+ from typing import Set
37
+ from typing import Type
38
+ from typing import Union
39
+ from typing_extensions import TypedDict
40
+ from sentry_sdk._types import ContinuousProfilerMode, SDKInfo
41
+ from sentry_sdk.profiler.utils import (
42
+ ExtractedSample,
43
+ FrameId,
44
+ StackId,
45
+ ThreadId,
46
+ ProcessedFrame,
47
+ ProcessedStack,
48
+ )
49
+
50
+ ProcessedSample = TypedDict(
51
+ "ProcessedSample",
52
+ {
53
+ "timestamp": float,
54
+ "thread_id": ThreadId,
55
+ "stack_id": int,
56
+ },
57
+ )
58
+
59
+
60
+ try:
61
+ from gevent.monkey import get_original
62
+ from gevent.threadpool import ThreadPool as _ThreadPool
63
+
64
+ ThreadPool = _ThreadPool # type: Optional[Type[_ThreadPool]]
65
+ thread_sleep = get_original("time", "sleep")
66
+ except ImportError:
67
+ thread_sleep = time.sleep
68
+ ThreadPool = None
69
+
70
+
71
+ _scheduler = None # type: Optional[ContinuousScheduler]
72
+
73
+
74
+ def setup_continuous_profiler(options, sdk_info, capture_func):
75
+ # type: (Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> bool
76
+ global _scheduler
77
+
78
+ already_initialized = _scheduler is not None
79
+
80
+ if already_initialized:
81
+ logger.debug("[Profiling] Continuous Profiler is already setup")
82
+ teardown_continuous_profiler()
83
+
84
+ if is_gevent():
85
+ # If gevent has patched the threading modules then we cannot rely on
86
+ # them to spawn a native thread for sampling.
87
+ # Instead we default to the GeventContinuousScheduler which is capable of
88
+ # spawning native threads within gevent.
89
+ default_profiler_mode = GeventContinuousScheduler.mode
90
+ else:
91
+ default_profiler_mode = ThreadContinuousScheduler.mode
92
+
93
+ if options.get("profiler_mode") is not None:
94
+ profiler_mode = options["profiler_mode"]
95
+ else:
96
+ # TODO: deprecate this and just use the existing `profiler_mode`
97
+ experiments = options.get("_experiments", {})
98
+
99
+ profiler_mode = (
100
+ experiments.get("continuous_profiling_mode") or default_profiler_mode
101
+ )
102
+
103
+ frequency = DEFAULT_SAMPLING_FREQUENCY
104
+
105
+ if profiler_mode == ThreadContinuousScheduler.mode:
106
+ _scheduler = ThreadContinuousScheduler(
107
+ frequency, options, sdk_info, capture_func
108
+ )
109
+ elif profiler_mode == GeventContinuousScheduler.mode:
110
+ _scheduler = GeventContinuousScheduler(
111
+ frequency, options, sdk_info, capture_func
112
+ )
113
+ else:
114
+ raise ValueError("Unknown continuous profiler mode: {}".format(profiler_mode))
115
+
116
+ logger.debug(
117
+ "[Profiling] Setting up continuous profiler in {mode} mode".format(
118
+ mode=_scheduler.mode
119
+ )
120
+ )
121
+
122
+ if not already_initialized:
123
+ atexit.register(teardown_continuous_profiler)
124
+
125
+ return True
126
+
127
+
128
+ def is_profile_session_sampled():
129
+ # type: () -> bool
130
+ if _scheduler is None:
131
+ return False
132
+ return _scheduler.sampled
133
+
134
+
135
+ def try_autostart_continuous_profiler():
136
+ # type: () -> None
137
+
138
+ # TODO: deprecate this as it'll be replaced by the auto lifecycle option
139
+
140
+ if _scheduler is None:
141
+ return
142
+
143
+ if not _scheduler.is_auto_start_enabled():
144
+ return
145
+
146
+ _scheduler.manual_start()
147
+
148
+
149
+ def try_profile_lifecycle_trace_start():
150
+ # type: () -> Union[ContinuousProfile, None]
151
+ if _scheduler is None:
152
+ return None
153
+
154
+ return _scheduler.auto_start()
155
+
156
+
157
+ def start_profiler():
158
+ # type: () -> None
159
+ if _scheduler is None:
160
+ return
161
+
162
+ _scheduler.manual_start()
163
+
164
+
165
+ def start_profile_session():
166
+ # type: () -> None
167
+
168
+ warnings.warn(
169
+ "The `start_profile_session` function is deprecated. Please use `start_profile` instead.",
170
+ DeprecationWarning,
171
+ stacklevel=2,
172
+ )
173
+ start_profiler()
174
+
175
+
176
+ def stop_profiler():
177
+ # type: () -> None
178
+ if _scheduler is None:
179
+ return
180
+
181
+ _scheduler.manual_stop()
182
+
183
+
184
+ def stop_profile_session():
185
+ # type: () -> None
186
+
187
+ warnings.warn(
188
+ "The `stop_profile_session` function is deprecated. Please use `stop_profile` instead.",
189
+ DeprecationWarning,
190
+ stacklevel=2,
191
+ )
192
+ stop_profiler()
193
+
194
+
195
+ def teardown_continuous_profiler():
196
+ # type: () -> None
197
+ stop_profiler()
198
+
199
+ global _scheduler
200
+ _scheduler = None
201
+
202
+
203
+ def get_profiler_id():
204
+ # type: () -> Union[str, None]
205
+ if _scheduler is None:
206
+ return None
207
+ return _scheduler.profiler_id
208
+
209
+
210
+ def determine_profile_session_sampling_decision(sample_rate):
211
+ # type: (Union[float, None]) -> bool
212
+
213
+ # `None` is treated as `0.0`
214
+ if not sample_rate:
215
+ return False
216
+
217
+ return random.random() < float(sample_rate)
218
+
219
+
220
+ class ContinuousProfile:
221
+ active: bool = True
222
+
223
+ def stop(self):
224
+ # type: () -> None
225
+ self.active = False
226
+
227
+
228
+ class ContinuousScheduler:
229
+ mode = "unknown" # type: ContinuousProfilerMode
230
+
231
+ def __init__(self, frequency, options, sdk_info, capture_func):
232
+ # type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
233
+ self.interval = 1.0 / frequency
234
+ self.options = options
235
+ self.sdk_info = sdk_info
236
+ self.capture_func = capture_func
237
+
238
+ self.lifecycle = self.options.get("profile_lifecycle")
239
+ profile_session_sample_rate = self.options.get("profile_session_sample_rate")
240
+ self.sampled = determine_profile_session_sampling_decision(
241
+ profile_session_sample_rate
242
+ )
243
+
244
+ self.sampler = self.make_sampler()
245
+ self.buffer = None # type: Optional[ProfileBuffer]
246
+ self.pid = None # type: Optional[int]
247
+
248
+ self.running = False
249
+ self.soft_shutdown = False
250
+
251
+ self.new_profiles = deque(maxlen=128) # type: Deque[ContinuousProfile]
252
+ self.active_profiles = set() # type: Set[ContinuousProfile]
253
+
254
+ def is_auto_start_enabled(self):
255
+ # type: () -> bool
256
+
257
+ # Ensure that the scheduler only autostarts once per process.
258
+ # This is necessary because many web servers use forks to spawn
259
+ # additional processes. And the profiler is only spawned on the
260
+ # master process, then it often only profiles the main process
261
+ # and not the ones where the requests are being handled.
262
+ if self.pid == os.getpid():
263
+ return False
264
+
265
+ experiments = self.options.get("_experiments")
266
+ if not experiments:
267
+ return False
268
+
269
+ return experiments.get("continuous_profiling_auto_start")
270
+
271
+ def auto_start(self):
272
+ # type: () -> Union[ContinuousProfile, None]
273
+ if not self.sampled:
274
+ return None
275
+
276
+ if self.lifecycle != "trace":
277
+ return None
278
+
279
+ logger.debug("[Profiling] Auto starting profiler")
280
+
281
+ profile = ContinuousProfile()
282
+
283
+ self.new_profiles.append(profile)
284
+ self.ensure_running()
285
+
286
+ return profile
287
+
288
+ def manual_start(self):
289
+ # type: () -> None
290
+ if not self.sampled:
291
+ return
292
+
293
+ if self.lifecycle != "manual":
294
+ return
295
+
296
+ self.ensure_running()
297
+
298
+ def manual_stop(self):
299
+ # type: () -> None
300
+ if self.lifecycle != "manual":
301
+ return
302
+
303
+ self.teardown()
304
+
305
+ def ensure_running(self):
306
+ # type: () -> None
307
+ raise NotImplementedError
308
+
309
+ def teardown(self):
310
+ # type: () -> None
311
+ raise NotImplementedError
312
+
313
+ def pause(self):
314
+ # type: () -> None
315
+ raise NotImplementedError
316
+
317
+ def reset_buffer(self):
318
+ # type: () -> None
319
+ self.buffer = ProfileBuffer(
320
+ self.options, self.sdk_info, PROFILE_BUFFER_SECONDS, self.capture_func
321
+ )
322
+
323
+ @property
324
+ def profiler_id(self):
325
+ # type: () -> Union[str, None]
326
+ if self.buffer is None:
327
+ return None
328
+ return self.buffer.profiler_id
329
+
330
+ def make_sampler(self):
331
+ # type: () -> Callable[..., bool]
332
+ cwd = os.getcwd()
333
+
334
+ cache = LRUCache(max_size=256)
335
+
336
+ if self.lifecycle == "trace":
337
+
338
+ def _sample_stack(*args, **kwargs):
339
+ # type: (*Any, **Any) -> bool
340
+ """
341
+ Take a sample of the stack on all the threads in the process.
342
+ This should be called at a regular interval to collect samples.
343
+ """
344
+
345
+ # no profiles taking place, so we can stop early
346
+ if not self.new_profiles and not self.active_profiles:
347
+ return True
348
+
349
+ # This is the number of profiles we want to pop off.
350
+ # It's possible another thread adds a new profile to
351
+ # the list and we spend longer than we want inside
352
+ # the loop below.
353
+ #
354
+ # Also make sure to set this value before extracting
355
+ # frames so we do not write to any new profiles that
356
+ # were started after this point.
357
+ new_profiles = len(self.new_profiles)
358
+
359
+ ts = now()
360
+
361
+ try:
362
+ sample = [
363
+ (str(tid), extract_stack(frame, cache, cwd))
364
+ for tid, frame in sys._current_frames().items()
365
+ ]
366
+ except AttributeError:
367
+ # For some reason, the frame we get doesn't have certain attributes.
368
+ # When this happens, we abandon the current sample as it's bad.
369
+ capture_internal_exception(sys.exc_info())
370
+ return False
371
+
372
+ # Move the new profiles into the active_profiles set.
373
+ #
374
+ # We cannot directly add the to active_profiles set
375
+ # in `start_profiling` because it is called from other
376
+ # threads which can cause a RuntimeError when it the
377
+ # set sizes changes during iteration without a lock.
378
+ #
379
+ # We also want to avoid using a lock here so threads
380
+ # that are starting profiles are not blocked until it
381
+ # can acquire the lock.
382
+ for _ in range(new_profiles):
383
+ self.active_profiles.add(self.new_profiles.popleft())
384
+ inactive_profiles = []
385
+
386
+ for profile in self.active_profiles:
387
+ if not profile.active:
388
+ # If a profile is marked inactive, we buffer it
389
+ # to `inactive_profiles` so it can be removed.
390
+ # We cannot remove it here as it would result
391
+ # in a RuntimeError.
392
+ inactive_profiles.append(profile)
393
+
394
+ for profile in inactive_profiles:
395
+ self.active_profiles.remove(profile)
396
+
397
+ if self.buffer is not None:
398
+ self.buffer.write(ts, sample)
399
+
400
+ return False
401
+
402
+ else:
403
+
404
+ def _sample_stack(*args, **kwargs):
405
+ # type: (*Any, **Any) -> bool
406
+ """
407
+ Take a sample of the stack on all the threads in the process.
408
+ This should be called at a regular interval to collect samples.
409
+ """
410
+
411
+ ts = now()
412
+
413
+ try:
414
+ sample = [
415
+ (str(tid), extract_stack(frame, cache, cwd))
416
+ for tid, frame in sys._current_frames().items()
417
+ ]
418
+ except AttributeError:
419
+ # For some reason, the frame we get doesn't have certain attributes.
420
+ # When this happens, we abandon the current sample as it's bad.
421
+ capture_internal_exception(sys.exc_info())
422
+ return False
423
+
424
+ if self.buffer is not None:
425
+ self.buffer.write(ts, sample)
426
+
427
+ return False
428
+
429
+ return _sample_stack
430
+
431
+ def run(self):
432
+ # type: () -> None
433
+ last = time.perf_counter()
434
+
435
+ while self.running:
436
+ self.soft_shutdown = self.sampler()
437
+
438
+ # some time may have elapsed since the last time
439
+ # we sampled, so we need to account for that and
440
+ # not sleep for too long
441
+ elapsed = time.perf_counter() - last
442
+ if elapsed < self.interval:
443
+ thread_sleep(self.interval - elapsed)
444
+
445
+ # the soft shutdown happens here to give it a chance
446
+ # for the profiler to be reused
447
+ if self.soft_shutdown:
448
+ self.running = False
449
+
450
+ # make sure to explicitly exit the profiler here or there might
451
+ # be multiple profilers at once
452
+ break
453
+
454
+ # after sleeping, make sure to take the current
455
+ # timestamp so we can use it next iteration
456
+ last = time.perf_counter()
457
+
458
+ if self.buffer is not None:
459
+ self.buffer.flush()
460
+ self.buffer = None
461
+
462
+
463
+ class ThreadContinuousScheduler(ContinuousScheduler):
464
+ """
465
+ This scheduler is based on running a daemon thread that will call
466
+ the sampler at a regular interval.
467
+ """
468
+
469
+ mode = "thread" # type: ContinuousProfilerMode
470
+ name = "sentry.profiler.ThreadContinuousScheduler"
471
+
472
+ def __init__(self, frequency, options, sdk_info, capture_func):
473
+ # type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
474
+ super().__init__(frequency, options, sdk_info, capture_func)
475
+
476
+ self.thread = None # type: Optional[threading.Thread]
477
+ self.lock = threading.Lock()
478
+
479
+ def ensure_running(self):
480
+ # type: () -> None
481
+
482
+ self.soft_shutdown = False
483
+
484
+ pid = os.getpid()
485
+
486
+ # is running on the right process
487
+ if self.running and self.pid == pid:
488
+ return
489
+
490
+ with self.lock:
491
+ # another thread may have tried to acquire the lock
492
+ # at the same time so it may start another thread
493
+ # make sure to check again before proceeding
494
+ if self.running and self.pid == pid:
495
+ return
496
+
497
+ self.pid = pid
498
+ self.running = True
499
+
500
+ # if the profiler thread is changing,
501
+ # we should create a new buffer along with it
502
+ self.reset_buffer()
503
+
504
+ # make sure the thread is a daemon here otherwise this
505
+ # can keep the application running after other threads
506
+ # have exited
507
+ self.thread = threading.Thread(name=self.name, target=self.run, daemon=True)
508
+
509
+ try:
510
+ self.thread.start()
511
+ except RuntimeError:
512
+ # Unfortunately at this point the interpreter is in a state that no
513
+ # longer allows us to spawn a thread and we have to bail.
514
+ self.running = False
515
+ self.thread = None
516
+
517
+ def teardown(self):
518
+ # type: () -> None
519
+ if self.running:
520
+ self.running = False
521
+
522
+ if self.thread is not None:
523
+ self.thread.join()
524
+ self.thread = None
525
+
526
+ self.buffer = None
527
+
528
+
529
+ class GeventContinuousScheduler(ContinuousScheduler):
530
+ """
531
+ This scheduler is based on the thread scheduler but adapted to work with
532
+ gevent. When using gevent, it may monkey patch the threading modules
533
+ (`threading` and `_thread`). This results in the use of greenlets instead
534
+ of native threads.
535
+
536
+ This is an issue because the sampler CANNOT run in a greenlet because
537
+ 1. Other greenlets doing sync work will prevent the sampler from running
538
+ 2. The greenlet runs in the same thread as other greenlets so when taking
539
+ a sample, other greenlets will have been evicted from the thread. This
540
+ results in a sample containing only the sampler's code.
541
+ """
542
+
543
+ mode = "gevent" # type: ContinuousProfilerMode
544
+
545
+ def __init__(self, frequency, options, sdk_info, capture_func):
546
+ # type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
547
+
548
+ if ThreadPool is None:
549
+ raise ValueError("Profiler mode: {} is not available".format(self.mode))
550
+
551
+ super().__init__(frequency, options, sdk_info, capture_func)
552
+
553
+ self.thread = None # type: Optional[_ThreadPool]
554
+ self.lock = threading.Lock()
555
+
556
+ def ensure_running(self):
557
+ # type: () -> None
558
+
559
+ self.soft_shutdown = False
560
+
561
+ pid = os.getpid()
562
+
563
+ # is running on the right process
564
+ if self.running and self.pid == pid:
565
+ return
566
+
567
+ with self.lock:
568
+ # another thread may have tried to acquire the lock
569
+ # at the same time so it may start another thread
570
+ # make sure to check again before proceeding
571
+ if self.running and self.pid == pid:
572
+ return
573
+
574
+ self.pid = pid
575
+ self.running = True
576
+
577
+ # if the profiler thread is changing,
578
+ # we should create a new buffer along with it
579
+ self.reset_buffer()
580
+
581
+ self.thread = ThreadPool(1) # type: ignore[misc]
582
+ try:
583
+ self.thread.spawn(self.run)
584
+ except RuntimeError:
585
+ # Unfortunately at this point the interpreter is in a state that no
586
+ # longer allows us to spawn a thread and we have to bail.
587
+ self.running = False
588
+ self.thread = None
589
+
590
+ def teardown(self):
591
+ # type: () -> None
592
+ if self.running:
593
+ self.running = False
594
+
595
+ if self.thread is not None:
596
+ self.thread.join()
597
+ self.thread = None
598
+
599
+ self.buffer = None
600
+
601
+
602
+ PROFILE_BUFFER_SECONDS = 60
603
+
604
+
605
+ class ProfileBuffer:
606
+ def __init__(self, options, sdk_info, buffer_size, capture_func):
607
+ # type: (Dict[str, Any], SDKInfo, int, Callable[[Envelope], None]) -> None
608
+ self.options = options
609
+ self.sdk_info = sdk_info
610
+ self.buffer_size = buffer_size
611
+ self.capture_func = capture_func
612
+
613
+ self.profiler_id = uuid.uuid4().hex
614
+ self.chunk = ProfileChunk()
615
+
616
+ # Make sure to use the same clock to compute a sample's monotonic timestamp
617
+ # to ensure the timestamps are correctly aligned.
618
+ self.start_monotonic_time = now()
619
+
620
+ # Make sure the start timestamp is defined only once per profiler id.
621
+ # This prevents issues with clock drift within a single profiler session.
622
+ #
623
+ # Subtracting the start_monotonic_time here to find a fixed starting position
624
+ # for relative monotonic timestamps for each sample.
625
+ self.start_timestamp = (
626
+ datetime.now(timezone.utc).timestamp() - self.start_monotonic_time
627
+ )
628
+
629
+ def write(self, monotonic_time, sample):
630
+ # type: (float, ExtractedSample) -> None
631
+ if self.should_flush(monotonic_time):
632
+ self.flush()
633
+ self.chunk = ProfileChunk()
634
+ self.start_monotonic_time = now()
635
+
636
+ self.chunk.write(self.start_timestamp + monotonic_time, sample)
637
+
638
+ def should_flush(self, monotonic_time):
639
+ # type: (float) -> bool
640
+
641
+ # If the delta between the new monotonic time and the start monotonic time
642
+ # exceeds the buffer size, it means we should flush the chunk
643
+ return monotonic_time - self.start_monotonic_time >= self.buffer_size
644
+
645
+ def flush(self):
646
+ # type: () -> None
647
+ chunk = self.chunk.to_json(self.profiler_id, self.options, self.sdk_info)
648
+ envelope = Envelope()
649
+ envelope.add_profile_chunk(chunk)
650
+ self.capture_func(envelope)
651
+
652
+
653
+ class ProfileChunk:
654
+ def __init__(self):
655
+ # type: () -> None
656
+ self.chunk_id = uuid.uuid4().hex
657
+
658
+ self.indexed_frames = {} # type: Dict[FrameId, int]
659
+ self.indexed_stacks = {} # type: Dict[StackId, int]
660
+ self.frames = [] # type: List[ProcessedFrame]
661
+ self.stacks = [] # type: List[ProcessedStack]
662
+ self.samples = [] # type: List[ProcessedSample]
663
+
664
+ def write(self, ts, sample):
665
+ # type: (float, ExtractedSample) -> None
666
+ for tid, (stack_id, frame_ids, frames) in sample:
667
+ try:
668
+ # Check if the stack is indexed first, this lets us skip
669
+ # indexing frames if it's not necessary
670
+ if stack_id not in self.indexed_stacks:
671
+ for i, frame_id in enumerate(frame_ids):
672
+ if frame_id not in self.indexed_frames:
673
+ self.indexed_frames[frame_id] = len(self.indexed_frames)
674
+ self.frames.append(frames[i])
675
+
676
+ self.indexed_stacks[stack_id] = len(self.indexed_stacks)
677
+ self.stacks.append(
678
+ [self.indexed_frames[frame_id] for frame_id in frame_ids]
679
+ )
680
+
681
+ self.samples.append(
682
+ {
683
+ "timestamp": ts,
684
+ "thread_id": tid,
685
+ "stack_id": self.indexed_stacks[stack_id],
686
+ }
687
+ )
688
+ except AttributeError:
689
+ # For some reason, the frame we get doesn't have certain attributes.
690
+ # When this happens, we abandon the current sample as it's bad.
691
+ capture_internal_exception(sys.exc_info())
692
+
693
+ def to_json(self, profiler_id, options, sdk_info):
694
+ # type: (str, Dict[str, Any], SDKInfo) -> Dict[str, Any]
695
+ profile = {
696
+ "frames": self.frames,
697
+ "stacks": self.stacks,
698
+ "samples": self.samples,
699
+ "thread_metadata": {
700
+ str(thread.ident): {
701
+ "name": str(thread.name),
702
+ }
703
+ for thread in threading.enumerate()
704
+ },
705
+ }
706
+
707
+ set_in_app_in_frames(
708
+ profile["frames"],
709
+ options["in_app_exclude"],
710
+ options["in_app_include"],
711
+ options["project_root"],
712
+ )
713
+
714
+ payload = {
715
+ "chunk_id": self.chunk_id,
716
+ "client_sdk": {
717
+ "name": sdk_info["name"],
718
+ "version": VERSION,
719
+ },
720
+ "platform": "python",
721
+ "profile": profile,
722
+ "profiler_id": profiler_id,
723
+ "version": "2",
724
+ }
725
+
726
+ for key in "release", "environment", "dist":
727
+ if options[key] is not None:
728
+ payload[key] = str(options[key]).strip()
729
+
730
+ return payload