sentry-sdk 2.39.0__py2.py3-none-any.whl → 2.41.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

Files changed (52) hide show
  1. sentry_sdk/_metrics.py +81 -0
  2. sentry_sdk/_metrics_batcher.py +156 -0
  3. sentry_sdk/_types.py +27 -22
  4. sentry_sdk/ai/__init__.py +7 -0
  5. sentry_sdk/ai/utils.py +48 -0
  6. sentry_sdk/client.py +87 -36
  7. sentry_sdk/consts.py +15 -9
  8. sentry_sdk/envelope.py +31 -17
  9. sentry_sdk/feature_flags.py +0 -1
  10. sentry_sdk/hub.py +17 -9
  11. sentry_sdk/integrations/__init__.py +1 -0
  12. sentry_sdk/integrations/anthropic.py +10 -2
  13. sentry_sdk/integrations/asgi.py +3 -2
  14. sentry_sdk/integrations/dramatiq.py +89 -31
  15. sentry_sdk/integrations/grpc/aio/client.py +2 -1
  16. sentry_sdk/integrations/grpc/client.py +3 -4
  17. sentry_sdk/integrations/langchain.py +29 -5
  18. sentry_sdk/integrations/langgraph.py +5 -3
  19. sentry_sdk/integrations/launchdarkly.py +0 -1
  20. sentry_sdk/integrations/litellm.py +251 -0
  21. sentry_sdk/integrations/litestar.py +4 -4
  22. sentry_sdk/integrations/logging.py +1 -1
  23. sentry_sdk/integrations/loguru.py +1 -1
  24. sentry_sdk/integrations/openai.py +3 -2
  25. sentry_sdk/integrations/openai_agents/spans/ai_client.py +4 -1
  26. sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +10 -2
  27. sentry_sdk/integrations/openai_agents/utils.py +60 -19
  28. sentry_sdk/integrations/pure_eval.py +3 -1
  29. sentry_sdk/integrations/spark/spark_driver.py +2 -1
  30. sentry_sdk/integrations/sqlalchemy.py +2 -6
  31. sentry_sdk/integrations/starlette.py +1 -3
  32. sentry_sdk/integrations/starlite.py +4 -4
  33. sentry_sdk/integrations/threading.py +52 -8
  34. sentry_sdk/integrations/wsgi.py +3 -2
  35. sentry_sdk/logger.py +1 -1
  36. sentry_sdk/profiler/utils.py +2 -6
  37. sentry_sdk/scope.py +6 -3
  38. sentry_sdk/serializer.py +1 -3
  39. sentry_sdk/session.py +4 -2
  40. sentry_sdk/sessions.py +4 -2
  41. sentry_sdk/tracing.py +36 -33
  42. sentry_sdk/tracing_utils.py +1 -3
  43. sentry_sdk/transport.py +9 -26
  44. sentry_sdk/types.py +3 -0
  45. sentry_sdk/utils.py +22 -4
  46. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/METADATA +3 -1
  47. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/RECORD +51 -49
  48. sentry_sdk/metrics.py +0 -965
  49. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/WHEEL +0 -0
  50. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/entry_points.txt +0 -0
  51. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/licenses/LICENSE +0 -0
  52. {sentry_sdk-2.39.0.dist-info → sentry_sdk-2.41.0.dist-info}/top_level.txt +0 -0
sentry_sdk/metrics.py DELETED
@@ -1,965 +0,0 @@
1
- import io
2
- import os
3
- import random
4
- import re
5
- import sys
6
- import threading
7
- import time
8
- import warnings
9
- import zlib
10
- from abc import ABC, abstractmethod
11
- from contextlib import contextmanager
12
- from datetime import datetime, timezone
13
- from functools import wraps, partial
14
-
15
- import sentry_sdk
16
- from sentry_sdk.utils import (
17
- ContextVar,
18
- now,
19
- nanosecond_time,
20
- to_timestamp,
21
- serialize_frame,
22
- json_dumps,
23
- )
24
- from sentry_sdk.envelope import Envelope, Item
25
- from sentry_sdk.tracing import TransactionSource
26
-
27
- from typing import TYPE_CHECKING
28
-
29
- if TYPE_CHECKING:
30
- from typing import Any
31
- from typing import Callable
32
- from typing import Dict
33
- from typing import Generator
34
- from typing import Iterable
35
- from typing import List
36
- from typing import Optional
37
- from typing import Set
38
- from typing import Tuple
39
- from typing import Union
40
-
41
- from sentry_sdk._types import BucketKey
42
- from sentry_sdk._types import DurationUnit
43
- from sentry_sdk._types import FlushedMetricValue
44
- from sentry_sdk._types import MeasurementUnit
45
- from sentry_sdk._types import MetricMetaKey
46
- from sentry_sdk._types import MetricTagValue
47
- from sentry_sdk._types import MetricTags
48
- from sentry_sdk._types import MetricTagsInternal
49
- from sentry_sdk._types import MetricType
50
- from sentry_sdk._types import MetricValue
51
-
52
-
53
- warnings.warn(
54
- "The sentry_sdk.metrics module is deprecated and will be removed in the next major release. "
55
- "Sentry will reject all metrics sent after October 7, 2024. "
56
- "Learn more: https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics",
57
- DeprecationWarning,
58
- stacklevel=2,
59
- )
60
-
61
- _in_metrics = ContextVar("in_metrics", default=False)
62
- _set = set # set is shadowed below
63
-
64
- GOOD_TRANSACTION_SOURCES = frozenset(
65
- [
66
- TransactionSource.ROUTE,
67
- TransactionSource.VIEW,
68
- TransactionSource.COMPONENT,
69
- TransactionSource.TASK,
70
- ]
71
- )
72
-
73
- _sanitize_unit = partial(re.compile(r"[^a-zA-Z0-9_]+").sub, "")
74
- _sanitize_metric_key = partial(re.compile(r"[^a-zA-Z0-9_\-.]+").sub, "_")
75
- _sanitize_tag_key = partial(re.compile(r"[^a-zA-Z0-9_\-.\/]+").sub, "")
76
-
77
-
78
- def _sanitize_tag_value(value):
79
- # type: (str) -> str
80
- table = str.maketrans(
81
- {
82
- "\n": "\\n",
83
- "\r": "\\r",
84
- "\t": "\\t",
85
- "\\": "\\\\",
86
- "|": "\\u{7c}",
87
- ",": "\\u{2c}",
88
- }
89
- )
90
- return value.translate(table)
91
-
92
-
93
- def get_code_location(stacklevel):
94
- # type: (int) -> Optional[Dict[str, Any]]
95
- try:
96
- frm = sys._getframe(stacklevel)
97
- except Exception:
98
- return None
99
-
100
- return serialize_frame(
101
- frm, include_local_variables=False, include_source_context=True
102
- )
103
-
104
-
105
- @contextmanager
106
- def recursion_protection():
107
- # type: () -> Generator[bool, None, None]
108
- """Enters recursion protection and returns the old flag."""
109
- old_in_metrics = _in_metrics.get()
110
- _in_metrics.set(True)
111
- try:
112
- yield old_in_metrics
113
- finally:
114
- _in_metrics.set(old_in_metrics)
115
-
116
-
117
- def metrics_noop(func):
118
- # type: (Any) -> Any
119
- """Convenient decorator that uses `recursion_protection` to
120
- make a function a noop.
121
- """
122
-
123
- @wraps(func)
124
- def new_func(*args, **kwargs):
125
- # type: (*Any, **Any) -> Any
126
- with recursion_protection() as in_metrics:
127
- if not in_metrics:
128
- return func(*args, **kwargs)
129
-
130
- return new_func
131
-
132
-
133
- class Metric(ABC):
134
- __slots__ = ()
135
-
136
- @abstractmethod
137
- def __init__(self, first):
138
- # type: (MetricValue) -> None
139
- pass
140
-
141
- @property
142
- @abstractmethod
143
- def weight(self):
144
- # type: () -> int
145
- pass
146
-
147
- @abstractmethod
148
- def add(self, value):
149
- # type: (MetricValue) -> None
150
- pass
151
-
152
- @abstractmethod
153
- def serialize_value(self):
154
- # type: () -> Iterable[FlushedMetricValue]
155
- pass
156
-
157
-
158
- class CounterMetric(Metric):
159
- __slots__ = ("value",)
160
-
161
- def __init__(
162
- self, first # type: MetricValue
163
- ):
164
- # type: (...) -> None
165
- self.value = float(first)
166
-
167
- @property
168
- def weight(self):
169
- # type: (...) -> int
170
- return 1
171
-
172
- def add(
173
- self, value # type: MetricValue
174
- ):
175
- # type: (...) -> None
176
- self.value += float(value)
177
-
178
- def serialize_value(self):
179
- # type: (...) -> Iterable[FlushedMetricValue]
180
- return (self.value,)
181
-
182
-
183
- class GaugeMetric(Metric):
184
- __slots__ = (
185
- "last",
186
- "min",
187
- "max",
188
- "sum",
189
- "count",
190
- )
191
-
192
- def __init__(
193
- self, first # type: MetricValue
194
- ):
195
- # type: (...) -> None
196
- first = float(first)
197
- self.last = first
198
- self.min = first
199
- self.max = first
200
- self.sum = first
201
- self.count = 1
202
-
203
- @property
204
- def weight(self):
205
- # type: (...) -> int
206
- # Number of elements.
207
- return 5
208
-
209
- def add(
210
- self, value # type: MetricValue
211
- ):
212
- # type: (...) -> None
213
- value = float(value)
214
- self.last = value
215
- self.min = min(self.min, value)
216
- self.max = max(self.max, value)
217
- self.sum += value
218
- self.count += 1
219
-
220
- def serialize_value(self):
221
- # type: (...) -> Iterable[FlushedMetricValue]
222
- return (
223
- self.last,
224
- self.min,
225
- self.max,
226
- self.sum,
227
- self.count,
228
- )
229
-
230
-
231
- class DistributionMetric(Metric):
232
- __slots__ = ("value",)
233
-
234
- def __init__(
235
- self, first # type: MetricValue
236
- ):
237
- # type(...) -> None
238
- self.value = [float(first)]
239
-
240
- @property
241
- def weight(self):
242
- # type: (...) -> int
243
- return len(self.value)
244
-
245
- def add(
246
- self, value # type: MetricValue
247
- ):
248
- # type: (...) -> None
249
- self.value.append(float(value))
250
-
251
- def serialize_value(self):
252
- # type: (...) -> Iterable[FlushedMetricValue]
253
- return self.value
254
-
255
-
256
- class SetMetric(Metric):
257
- __slots__ = ("value",)
258
-
259
- def __init__(
260
- self, first # type: MetricValue
261
- ):
262
- # type: (...) -> None
263
- self.value = {first}
264
-
265
- @property
266
- def weight(self):
267
- # type: (...) -> int
268
- return len(self.value)
269
-
270
- def add(
271
- self, value # type: MetricValue
272
- ):
273
- # type: (...) -> None
274
- self.value.add(value)
275
-
276
- def serialize_value(self):
277
- # type: (...) -> Iterable[FlushedMetricValue]
278
- def _hash(x):
279
- # type: (MetricValue) -> int
280
- if isinstance(x, str):
281
- return zlib.crc32(x.encode("utf-8")) & 0xFFFFFFFF
282
- return int(x)
283
-
284
- return (_hash(value) for value in self.value)
285
-
286
-
287
- def _encode_metrics(flushable_buckets):
288
- # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) -> bytes
289
- out = io.BytesIO()
290
- _write = out.write
291
-
292
- # Note on sanitization: we intentionally sanitize in emission (serialization)
293
- # and not during aggregation for performance reasons. This means that the
294
- # envelope can in fact have duplicate buckets stored. This is acceptable for
295
- # relay side emission and should not happen commonly.
296
-
297
- for timestamp, buckets in flushable_buckets:
298
- for bucket_key, metric in buckets.items():
299
- metric_type, metric_name, metric_unit, metric_tags = bucket_key
300
- metric_name = _sanitize_metric_key(metric_name)
301
- metric_unit = _sanitize_unit(metric_unit)
302
- _write(metric_name.encode("utf-8"))
303
- _write(b"@")
304
- _write(metric_unit.encode("utf-8"))
305
-
306
- for serialized_value in metric.serialize_value():
307
- _write(b":")
308
- _write(str(serialized_value).encode("utf-8"))
309
-
310
- _write(b"|")
311
- _write(metric_type.encode("ascii"))
312
-
313
- if metric_tags:
314
- _write(b"|#")
315
- first = True
316
- for tag_key, tag_value in metric_tags:
317
- tag_key = _sanitize_tag_key(tag_key)
318
- if not tag_key:
319
- continue
320
- if first:
321
- first = False
322
- else:
323
- _write(b",")
324
- _write(tag_key.encode("utf-8"))
325
- _write(b":")
326
- _write(_sanitize_tag_value(tag_value).encode("utf-8"))
327
-
328
- _write(b"|T")
329
- _write(str(timestamp).encode("ascii"))
330
- _write(b"\n")
331
-
332
- return out.getvalue()
333
-
334
-
335
- def _encode_locations(timestamp, code_locations):
336
- # type: (int, Iterable[Tuple[MetricMetaKey, Dict[str, Any]]]) -> bytes
337
- mapping = {} # type: Dict[str, List[Any]]
338
-
339
- for key, loc in code_locations:
340
- metric_type, name, unit = key
341
- mri = "{}:{}@{}".format(
342
- metric_type, _sanitize_metric_key(name), _sanitize_unit(unit)
343
- )
344
-
345
- loc["type"] = "location"
346
- mapping.setdefault(mri, []).append(loc)
347
-
348
- return json_dumps({"timestamp": timestamp, "mapping": mapping})
349
-
350
-
351
- METRIC_TYPES = {
352
- "c": CounterMetric,
353
- "g": GaugeMetric,
354
- "d": DistributionMetric,
355
- "s": SetMetric,
356
- } # type: dict[MetricType, type[Metric]]
357
-
358
- # some of these are dumb
359
- TIMING_FUNCTIONS = {
360
- "nanosecond": nanosecond_time,
361
- "microsecond": lambda: nanosecond_time() / 1000.0,
362
- "millisecond": lambda: nanosecond_time() / 1000000.0,
363
- "second": now,
364
- "minute": lambda: now() / 60.0,
365
- "hour": lambda: now() / 3600.0,
366
- "day": lambda: now() / 3600.0 / 24.0,
367
- "week": lambda: now() / 3600.0 / 24.0 / 7.0,
368
- }
369
-
370
-
371
- class LocalAggregator:
372
- __slots__ = ("_measurements",)
373
-
374
- def __init__(self):
375
- # type: (...) -> None
376
- self._measurements = (
377
- {}
378
- ) # type: Dict[Tuple[str, MetricTagsInternal], Tuple[float, float, int, float]]
379
-
380
- def add(
381
- self,
382
- ty, # type: MetricType
383
- key, # type: str
384
- value, # type: float
385
- unit, # type: MeasurementUnit
386
- tags, # type: MetricTagsInternal
387
- ):
388
- # type: (...) -> None
389
- export_key = "%s:%s@%s" % (ty, key, unit)
390
- bucket_key = (export_key, tags)
391
-
392
- old = self._measurements.get(bucket_key)
393
- if old is not None:
394
- v_min, v_max, v_count, v_sum = old
395
- v_min = min(v_min, value)
396
- v_max = max(v_max, value)
397
- v_count += 1
398
- v_sum += value
399
- else:
400
- v_min = v_max = v_sum = value
401
- v_count = 1
402
- self._measurements[bucket_key] = (v_min, v_max, v_count, v_sum)
403
-
404
- def to_json(self):
405
- # type: (...) -> Dict[str, Any]
406
- rv = {} # type: Any
407
- for (export_key, tags), (
408
- v_min,
409
- v_max,
410
- v_count,
411
- v_sum,
412
- ) in self._measurements.items():
413
- rv.setdefault(export_key, []).append(
414
- {
415
- "tags": _tags_to_dict(tags),
416
- "min": v_min,
417
- "max": v_max,
418
- "count": v_count,
419
- "sum": v_sum,
420
- }
421
- )
422
- return rv
423
-
424
-
425
- class MetricsAggregator:
426
- ROLLUP_IN_SECONDS = 10.0
427
- MAX_WEIGHT = 100000
428
- FLUSHER_SLEEP_TIME = 5.0
429
-
430
- def __init__(
431
- self,
432
- capture_func, # type: Callable[[Envelope], None]
433
- enable_code_locations=False, # type: bool
434
- ):
435
- # type: (...) -> None
436
- self.buckets = {} # type: Dict[int, Any]
437
- self._enable_code_locations = enable_code_locations
438
- self._seen_locations = _set() # type: Set[Tuple[int, MetricMetaKey]]
439
- self._pending_locations = {} # type: Dict[int, List[Tuple[MetricMetaKey, Any]]]
440
- self._buckets_total_weight = 0
441
- self._capture_func = capture_func
442
- self._running = True
443
- self._lock = threading.Lock()
444
-
445
- self._flush_event = threading.Event() # type: threading.Event
446
- self._force_flush = False
447
-
448
- # The aggregator shifts its flushing by up to an entire rollup window to
449
- # avoid multiple clients trampling on end of a 10 second window as all the
450
- # buckets are anchored to multiples of ROLLUP seconds. We randomize this
451
- # number once per aggregator boot to achieve some level of offsetting
452
- # across a fleet of deployed SDKs. Relay itself will also apply independent
453
- # jittering.
454
- self._flush_shift = random.random() * self.ROLLUP_IN_SECONDS
455
-
456
- self._flusher = None # type: Optional[threading.Thread]
457
- self._flusher_pid = None # type: Optional[int]
458
-
459
- def _ensure_thread(self):
460
- # type: (...) -> bool
461
- """For forking processes we might need to restart this thread.
462
- This ensures that our process actually has that thread running.
463
- """
464
- if not self._running:
465
- return False
466
-
467
- pid = os.getpid()
468
- if self._flusher_pid == pid:
469
- return True
470
-
471
- with self._lock:
472
- # Recheck to make sure another thread didn't get here and start the
473
- # the flusher in the meantime
474
- if self._flusher_pid == pid:
475
- return True
476
-
477
- self._flusher_pid = pid
478
-
479
- self._flusher = threading.Thread(target=self._flush_loop)
480
- self._flusher.daemon = True
481
-
482
- try:
483
- self._flusher.start()
484
- except RuntimeError:
485
- # Unfortunately at this point the interpreter is in a state that no
486
- # longer allows us to spawn a thread and we have to bail.
487
- self._running = False
488
- return False
489
-
490
- return True
491
-
492
- def _flush_loop(self):
493
- # type: (...) -> None
494
- _in_metrics.set(True)
495
- while self._running or self._force_flush:
496
- if self._running:
497
- self._flush_event.wait(self.FLUSHER_SLEEP_TIME)
498
- self._flush()
499
-
500
- def _flush(self):
501
- # type: (...) -> None
502
- self._emit(self._flushable_buckets(), self._flushable_locations())
503
-
504
- def _flushable_buckets(self):
505
- # type: (...) -> (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
506
- with self._lock:
507
- force_flush = self._force_flush
508
- cutoff = time.time() - self.ROLLUP_IN_SECONDS - self._flush_shift
509
- flushable_buckets = () # type: Iterable[Tuple[int, Dict[BucketKey, Metric]]]
510
- weight_to_remove = 0
511
-
512
- if force_flush:
513
- flushable_buckets = self.buckets.items()
514
- self.buckets = {}
515
- self._buckets_total_weight = 0
516
- self._force_flush = False
517
- else:
518
- flushable_buckets = []
519
- for buckets_timestamp, buckets in self.buckets.items():
520
- # If the timestamp of the bucket is newer that the rollup we want to skip it.
521
- if buckets_timestamp <= cutoff:
522
- flushable_buckets.append((buckets_timestamp, buckets))
523
-
524
- # We will clear the elements while holding the lock, in order to avoid requesting it downstream again.
525
- for buckets_timestamp, buckets in flushable_buckets:
526
- for metric in buckets.values():
527
- weight_to_remove += metric.weight
528
- del self.buckets[buckets_timestamp]
529
-
530
- self._buckets_total_weight -= weight_to_remove
531
-
532
- return flushable_buckets
533
-
534
- def _flushable_locations(self):
535
- # type: (...) -> Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
536
- with self._lock:
537
- locations = self._pending_locations
538
- self._pending_locations = {}
539
- return locations
540
-
541
- @metrics_noop
542
- def add(
543
- self,
544
- ty, # type: MetricType
545
- key, # type: str
546
- value, # type: MetricValue
547
- unit, # type: MeasurementUnit
548
- tags, # type: Optional[MetricTags]
549
- timestamp=None, # type: Optional[Union[float, datetime]]
550
- local_aggregator=None, # type: Optional[LocalAggregator]
551
- stacklevel=0, # type: Optional[int]
552
- ):
553
- # type: (...) -> None
554
- if not self._ensure_thread() or self._flusher is None:
555
- return None
556
-
557
- if timestamp is None:
558
- timestamp = time.time()
559
- elif isinstance(timestamp, datetime):
560
- timestamp = to_timestamp(timestamp)
561
-
562
- bucket_timestamp = int(
563
- (timestamp // self.ROLLUP_IN_SECONDS) * self.ROLLUP_IN_SECONDS
564
- )
565
- serialized_tags = _serialize_tags(tags)
566
- bucket_key = (
567
- ty,
568
- key,
569
- unit,
570
- serialized_tags,
571
- )
572
-
573
- with self._lock:
574
- local_buckets = self.buckets.setdefault(bucket_timestamp, {})
575
- metric = local_buckets.get(bucket_key)
576
- if metric is not None:
577
- previous_weight = metric.weight
578
- metric.add(value)
579
- else:
580
- metric = local_buckets[bucket_key] = METRIC_TYPES[ty](value)
581
- previous_weight = 0
582
-
583
- added = metric.weight - previous_weight
584
-
585
- if stacklevel is not None:
586
- self.record_code_location(ty, key, unit, stacklevel + 2, timestamp)
587
-
588
- # Given the new weight we consider whether we want to force flush.
589
- self._consider_force_flush()
590
-
591
- # For sets, we only record that a value has been added to the set but not which one.
592
- # See develop docs: https://develop.sentry.dev/sdk/metrics/#sets
593
- if local_aggregator is not None:
594
- local_value = float(added if ty == "s" else value)
595
- local_aggregator.add(ty, key, local_value, unit, serialized_tags)
596
-
597
- def record_code_location(
598
- self,
599
- ty, # type: MetricType
600
- key, # type: str
601
- unit, # type: MeasurementUnit
602
- stacklevel, # type: int
603
- timestamp=None, # type: Optional[float]
604
- ):
605
- # type: (...) -> None
606
- if not self._enable_code_locations:
607
- return
608
- if timestamp is None:
609
- timestamp = time.time()
610
- meta_key = (ty, key, unit)
611
- start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
612
- hour=0, minute=0, second=0, microsecond=0, tzinfo=None
613
- )
614
- start_of_day = int(to_timestamp(start_of_day))
615
-
616
- if (start_of_day, meta_key) not in self._seen_locations:
617
- self._seen_locations.add((start_of_day, meta_key))
618
- loc = get_code_location(stacklevel + 3)
619
- if loc is not None:
620
- # Group metadata by day to make flushing more efficient.
621
- # There needs to be one envelope item per timestamp.
622
- self._pending_locations.setdefault(start_of_day, []).append(
623
- (meta_key, loc)
624
- )
625
-
626
- @metrics_noop
627
- def need_code_location(
628
- self,
629
- ty, # type: MetricType
630
- key, # type: str
631
- unit, # type: MeasurementUnit
632
- timestamp, # type: float
633
- ):
634
- # type: (...) -> bool
635
- if self._enable_code_locations:
636
- return False
637
- meta_key = (ty, key, unit)
638
- start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
639
- hour=0, minute=0, second=0, microsecond=0, tzinfo=None
640
- )
641
- start_of_day = int(to_timestamp(start_of_day))
642
- return (start_of_day, meta_key) not in self._seen_locations
643
-
644
- def kill(self):
645
- # type: (...) -> None
646
- if self._flusher is None:
647
- return
648
-
649
- self._running = False
650
- self._flush_event.set()
651
- self._flusher = None
652
-
653
- @metrics_noop
654
- def flush(self):
655
- # type: (...) -> None
656
- self._force_flush = True
657
- self._flush()
658
-
659
- def _consider_force_flush(self):
660
- # type: (...) -> None
661
- # It's important to acquire a lock around this method, since it will touch shared data structures.
662
- total_weight = len(self.buckets) + self._buckets_total_weight
663
- if total_weight >= self.MAX_WEIGHT:
664
- self._force_flush = True
665
- self._flush_event.set()
666
-
667
- def _emit(
668
- self,
669
- flushable_buckets, # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
670
- code_locations, # type: Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
671
- ):
672
- # type: (...) -> Optional[Envelope]
673
- envelope = Envelope()
674
-
675
- if flushable_buckets:
676
- encoded_metrics = _encode_metrics(flushable_buckets)
677
- envelope.add_item(Item(payload=encoded_metrics, type="statsd"))
678
-
679
- for timestamp, locations in code_locations.items():
680
- encoded_locations = _encode_locations(timestamp, locations)
681
- envelope.add_item(Item(payload=encoded_locations, type="metric_meta"))
682
-
683
- if envelope.items:
684
- self._capture_func(envelope)
685
- return envelope
686
- return None
687
-
688
-
689
- def _serialize_tags(
690
- tags, # type: Optional[MetricTags]
691
- ):
692
- # type: (...) -> MetricTagsInternal
693
- if not tags:
694
- return ()
695
-
696
- rv = []
697
- for key, value in tags.items():
698
- # If the value is a collection, we want to flatten it.
699
- if isinstance(value, (list, tuple)):
700
- for inner_value in value:
701
- if inner_value is not None:
702
- rv.append((key, str(inner_value)))
703
- elif value is not None:
704
- rv.append((key, str(value)))
705
-
706
- # It's very important to sort the tags in order to obtain the
707
- # same bucket key.
708
- return tuple(sorted(rv))
709
-
710
-
711
- def _tags_to_dict(tags):
712
- # type: (MetricTagsInternal) -> Dict[str, Any]
713
- rv = {} # type: Dict[str, Any]
714
- for tag_name, tag_value in tags:
715
- old_value = rv.get(tag_name)
716
- if old_value is not None:
717
- if isinstance(old_value, list):
718
- old_value.append(tag_value)
719
- else:
720
- rv[tag_name] = [old_value, tag_value]
721
- else:
722
- rv[tag_name] = tag_value
723
- return rv
724
-
725
-
726
- def _get_aggregator():
727
- # type: () -> Optional[MetricsAggregator]
728
- client = sentry_sdk.get_client()
729
- return (
730
- client.metrics_aggregator
731
- if client.is_active() and client.metrics_aggregator is not None
732
- else None
733
- )
734
-
735
-
736
- def _get_aggregator_and_update_tags(key, value, unit, tags):
737
- # type: (str, Optional[MetricValue], MeasurementUnit, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]]
738
- client = sentry_sdk.get_client()
739
- if not client.is_active() or client.metrics_aggregator is None:
740
- return None, None, tags
741
-
742
- updated_tags = dict(tags or ()) # type: Dict[str, MetricTagValue]
743
- updated_tags.setdefault("release", client.options["release"])
744
- updated_tags.setdefault("environment", client.options["environment"])
745
-
746
- scope = sentry_sdk.get_current_scope()
747
- local_aggregator = None
748
-
749
- # We go with the low-level API here to access transaction information as
750
- # this one is the same between just errors and errors + performance
751
- transaction_source = scope._transaction_info.get("source")
752
- if transaction_source in GOOD_TRANSACTION_SOURCES:
753
- transaction_name = scope._transaction
754
- if transaction_name:
755
- updated_tags.setdefault("transaction", transaction_name)
756
- if scope._span is not None:
757
- local_aggregator = scope._span._get_local_aggregator()
758
-
759
- experiments = client.options.get("_experiments", {})
760
- before_emit_callback = experiments.get("before_emit_metric")
761
- if before_emit_callback is not None:
762
- with recursion_protection() as in_metrics:
763
- if not in_metrics:
764
- if not before_emit_callback(key, value, unit, updated_tags):
765
- return None, None, updated_tags
766
-
767
- return client.metrics_aggregator, local_aggregator, updated_tags
768
-
769
-
770
- def increment(
771
- key, # type: str
772
- value=1.0, # type: float
773
- unit="none", # type: MeasurementUnit
774
- tags=None, # type: Optional[MetricTags]
775
- timestamp=None, # type: Optional[Union[float, datetime]]
776
- stacklevel=0, # type: int
777
- ):
778
- # type: (...) -> None
779
- """Increments a counter."""
780
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
781
- key, value, unit, tags
782
- )
783
- if aggregator is not None:
784
- aggregator.add(
785
- "c", key, value, unit, tags, timestamp, local_aggregator, stacklevel
786
- )
787
-
788
-
789
- # alias as incr is relatively common in python
790
- incr = increment
791
-
792
-
793
- class _Timing:
794
- def __init__(
795
- self,
796
- key, # type: str
797
- tags, # type: Optional[MetricTags]
798
- timestamp, # type: Optional[Union[float, datetime]]
799
- value, # type: Optional[float]
800
- unit, # type: DurationUnit
801
- stacklevel, # type: int
802
- ):
803
- # type: (...) -> None
804
- self.key = key
805
- self.tags = tags
806
- self.timestamp = timestamp
807
- self.value = value
808
- self.unit = unit
809
- self.entered = None # type: Optional[float]
810
- self._span = None # type: Optional[sentry_sdk.tracing.Span]
811
- self.stacklevel = stacklevel
812
-
813
- def _validate_invocation(self, context):
814
- # type: (str) -> None
815
- if self.value is not None:
816
- raise TypeError(
817
- "cannot use timing as %s when a value is provided" % context
818
- )
819
-
820
- def __enter__(self):
821
- # type: (...) -> _Timing
822
- self.entered = TIMING_FUNCTIONS[self.unit]()
823
- self._validate_invocation("context-manager")
824
- self._span = sentry_sdk.start_span(op="metric.timing", name=self.key)
825
- if self.tags:
826
- for key, value in self.tags.items():
827
- if isinstance(value, (tuple, list)):
828
- value = ",".join(sorted(map(str, value)))
829
- self._span.set_tag(key, value)
830
- self._span.__enter__()
831
-
832
- # report code locations here for better accuracy
833
- aggregator = _get_aggregator()
834
- if aggregator is not None:
835
- aggregator.record_code_location("d", self.key, self.unit, self.stacklevel)
836
-
837
- return self
838
-
839
- def __exit__(self, exc_type, exc_value, tb):
840
- # type: (Any, Any, Any) -> None
841
- assert self._span, "did not enter"
842
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
843
- self.key,
844
- self.value,
845
- self.unit,
846
- self.tags,
847
- )
848
- if aggregator is not None:
849
- elapsed = TIMING_FUNCTIONS[self.unit]() - self.entered # type: ignore
850
- aggregator.add(
851
- "d",
852
- self.key,
853
- elapsed,
854
- self.unit,
855
- tags,
856
- self.timestamp,
857
- local_aggregator,
858
- None, # code locations are reported in __enter__
859
- )
860
-
861
- self._span.__exit__(exc_type, exc_value, tb)
862
- self._span = None
863
-
864
- def __call__(self, f):
865
- # type: (Any) -> Any
866
- self._validate_invocation("decorator")
867
-
868
- @wraps(f)
869
- def timed_func(*args, **kwargs):
870
- # type: (*Any, **Any) -> Any
871
- with timing(
872
- key=self.key,
873
- tags=self.tags,
874
- timestamp=self.timestamp,
875
- unit=self.unit,
876
- stacklevel=self.stacklevel + 1,
877
- ):
878
- return f(*args, **kwargs)
879
-
880
- return timed_func
881
-
882
-
883
- def timing(
884
- key, # type: str
885
- value=None, # type: Optional[float]
886
- unit="second", # type: DurationUnit
887
- tags=None, # type: Optional[MetricTags]
888
- timestamp=None, # type: Optional[Union[float, datetime]]
889
- stacklevel=0, # type: int
890
- ):
891
- # type: (...) -> _Timing
892
- """Emits a distribution with the time it takes to run the given code block.
893
-
894
- This method supports three forms of invocation:
895
-
896
- - when a `value` is provided, it functions similar to `distribution` but with
897
- - it can be used as a context manager
898
- - it can be used as a decorator
899
- """
900
- if value is not None:
901
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
902
- key, value, unit, tags
903
- )
904
- if aggregator is not None:
905
- aggregator.add(
906
- "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
907
- )
908
- return _Timing(key, tags, timestamp, value, unit, stacklevel)
909
-
910
-
911
- def distribution(
912
- key, # type: str
913
- value, # type: float
914
- unit="none", # type: MeasurementUnit
915
- tags=None, # type: Optional[MetricTags]
916
- timestamp=None, # type: Optional[Union[float, datetime]]
917
- stacklevel=0, # type: int
918
- ):
919
- # type: (...) -> None
920
- """Emits a distribution."""
921
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
922
- key, value, unit, tags
923
- )
924
- if aggregator is not None:
925
- aggregator.add(
926
- "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
927
- )
928
-
929
-
930
- def set(
931
- key, # type: str
932
- value, # type: Union[int, str]
933
- unit="none", # type: MeasurementUnit
934
- tags=None, # type: Optional[MetricTags]
935
- timestamp=None, # type: Optional[Union[float, datetime]]
936
- stacklevel=0, # type: int
937
- ):
938
- # type: (...) -> None
939
- """Emits a set."""
940
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
941
- key, value, unit, tags
942
- )
943
- if aggregator is not None:
944
- aggregator.add(
945
- "s", key, value, unit, tags, timestamp, local_aggregator, stacklevel
946
- )
947
-
948
-
949
- def gauge(
950
- key, # type: str
951
- value, # type: float
952
- unit="none", # type: MeasurementUnit
953
- tags=None, # type: Optional[MetricTags]
954
- timestamp=None, # type: Optional[Union[float, datetime]]
955
- stacklevel=0, # type: int
956
- ):
957
- # type: (...) -> None
958
- """Emits a gauge."""
959
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
960
- key, value, unit, tags
961
- )
962
- if aggregator is not None:
963
- aggregator.add(
964
- "g", key, value, unit, tags, timestamp, local_aggregator, stacklevel
965
- )