sentry-sdk 2.40.0__py2.py3-none-any.whl → 2.41.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentry-sdk might be problematic. Click here for more details.

sentry_sdk/metrics.py DELETED
@@ -1,971 +0,0 @@
1
- import io
2
- import os
3
- import random
4
- import re
5
- import sys
6
- import threading
7
- import time
8
- import warnings
9
- import zlib
10
- from abc import ABC, abstractmethod
11
- from contextlib import contextmanager
12
- from datetime import datetime, timezone
13
- from functools import wraps, partial
14
-
15
- import sentry_sdk
16
- from sentry_sdk.utils import (
17
- ContextVar,
18
- now,
19
- nanosecond_time,
20
- to_timestamp,
21
- serialize_frame,
22
- json_dumps,
23
- )
24
- from sentry_sdk.envelope import Envelope, Item
25
- from sentry_sdk.tracing import TransactionSource
26
-
27
- from typing import TYPE_CHECKING
28
-
29
- if TYPE_CHECKING:
30
- from typing import Any
31
- from typing import Callable
32
- from typing import Dict
33
- from typing import Generator
34
- from typing import Iterable
35
- from typing import List
36
- from typing import Optional
37
- from typing import Set
38
- from typing import Tuple
39
- from typing import Union
40
-
41
- from sentry_sdk._types import BucketKey
42
- from sentry_sdk._types import DurationUnit
43
- from sentry_sdk._types import FlushedMetricValue
44
- from sentry_sdk._types import MeasurementUnit
45
- from sentry_sdk._types import MetricMetaKey
46
- from sentry_sdk._types import MetricTagValue
47
- from sentry_sdk._types import MetricTags
48
- from sentry_sdk._types import MetricTagsInternal
49
- from sentry_sdk._types import MetricType
50
- from sentry_sdk._types import MetricValue
51
-
52
-
53
- warnings.warn(
54
- "The sentry_sdk.metrics module is deprecated and will be removed in the next major release. "
55
- "Sentry will reject all metrics sent after October 7, 2024. "
56
- "Learn more: https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics",
57
- DeprecationWarning,
58
- stacklevel=2,
59
- )
60
-
61
- _in_metrics = ContextVar("in_metrics", default=False)
62
- _set = set # set is shadowed below
63
-
64
- GOOD_TRANSACTION_SOURCES = frozenset(
65
- [
66
- TransactionSource.ROUTE,
67
- TransactionSource.VIEW,
68
- TransactionSource.COMPONENT,
69
- TransactionSource.TASK,
70
- ]
71
- )
72
-
73
- _sanitize_unit = partial(re.compile(r"[^a-zA-Z0-9_]+").sub, "")
74
- _sanitize_metric_key = partial(re.compile(r"[^a-zA-Z0-9_\-.]+").sub, "_")
75
- _sanitize_tag_key = partial(re.compile(r"[^a-zA-Z0-9_\-.\/]+").sub, "")
76
-
77
-
78
- def _sanitize_tag_value(value):
79
- # type: (str) -> str
80
- table = str.maketrans(
81
- {
82
- "\n": "\\n",
83
- "\r": "\\r",
84
- "\t": "\\t",
85
- "\\": "\\\\",
86
- "|": "\\u{7c}",
87
- ",": "\\u{2c}",
88
- }
89
- )
90
- return value.translate(table)
91
-
92
-
93
- def get_code_location(stacklevel):
94
- # type: (int) -> Optional[Dict[str, Any]]
95
- try:
96
- frm = sys._getframe(stacklevel)
97
- except Exception:
98
- return None
99
-
100
- return serialize_frame(
101
- frm, include_local_variables=False, include_source_context=True
102
- )
103
-
104
-
105
- @contextmanager
106
- def recursion_protection():
107
- # type: () -> Generator[bool, None, None]
108
- """Enters recursion protection and returns the old flag."""
109
- old_in_metrics = _in_metrics.get()
110
- _in_metrics.set(True)
111
- try:
112
- yield old_in_metrics
113
- finally:
114
- _in_metrics.set(old_in_metrics)
115
-
116
-
117
- def metrics_noop(func):
118
- # type: (Any) -> Any
119
- """Convenient decorator that uses `recursion_protection` to
120
- make a function a noop.
121
- """
122
-
123
- @wraps(func)
124
- def new_func(*args, **kwargs):
125
- # type: (*Any, **Any) -> Any
126
- with recursion_protection() as in_metrics:
127
- if not in_metrics:
128
- return func(*args, **kwargs)
129
-
130
- return new_func
131
-
132
-
133
- class Metric(ABC):
134
- __slots__ = ()
135
-
136
- @abstractmethod
137
- def __init__(self, first):
138
- # type: (MetricValue) -> None
139
- pass
140
-
141
- @property
142
- @abstractmethod
143
- def weight(self):
144
- # type: () -> int
145
- pass
146
-
147
- @abstractmethod
148
- def add(self, value):
149
- # type: (MetricValue) -> None
150
- pass
151
-
152
- @abstractmethod
153
- def serialize_value(self):
154
- # type: () -> Iterable[FlushedMetricValue]
155
- pass
156
-
157
-
158
- class CounterMetric(Metric):
159
- __slots__ = ("value",)
160
-
161
- def __init__(
162
- self,
163
- first, # type: MetricValue
164
- ):
165
- # type: (...) -> None
166
- self.value = float(first)
167
-
168
- @property
169
- def weight(self):
170
- # type: (...) -> int
171
- return 1
172
-
173
- def add(
174
- self,
175
- value, # type: MetricValue
176
- ):
177
- # type: (...) -> None
178
- self.value += float(value)
179
-
180
- def serialize_value(self):
181
- # type: (...) -> Iterable[FlushedMetricValue]
182
- return (self.value,)
183
-
184
-
185
- class GaugeMetric(Metric):
186
- __slots__ = (
187
- "last",
188
- "min",
189
- "max",
190
- "sum",
191
- "count",
192
- )
193
-
194
- def __init__(
195
- self,
196
- first, # type: MetricValue
197
- ):
198
- # type: (...) -> None
199
- first = float(first)
200
- self.last = first
201
- self.min = first
202
- self.max = first
203
- self.sum = first
204
- self.count = 1
205
-
206
- @property
207
- def weight(self):
208
- # type: (...) -> int
209
- # Number of elements.
210
- return 5
211
-
212
- def add(
213
- self,
214
- value, # type: MetricValue
215
- ):
216
- # type: (...) -> None
217
- value = float(value)
218
- self.last = value
219
- self.min = min(self.min, value)
220
- self.max = max(self.max, value)
221
- self.sum += value
222
- self.count += 1
223
-
224
- def serialize_value(self):
225
- # type: (...) -> Iterable[FlushedMetricValue]
226
- return (
227
- self.last,
228
- self.min,
229
- self.max,
230
- self.sum,
231
- self.count,
232
- )
233
-
234
-
235
- class DistributionMetric(Metric):
236
- __slots__ = ("value",)
237
-
238
- def __init__(
239
- self,
240
- first, # type: MetricValue
241
- ):
242
- # type(...) -> None
243
- self.value = [float(first)]
244
-
245
- @property
246
- def weight(self):
247
- # type: (...) -> int
248
- return len(self.value)
249
-
250
- def add(
251
- self,
252
- value, # type: MetricValue
253
- ):
254
- # type: (...) -> None
255
- self.value.append(float(value))
256
-
257
- def serialize_value(self):
258
- # type: (...) -> Iterable[FlushedMetricValue]
259
- return self.value
260
-
261
-
262
- class SetMetric(Metric):
263
- __slots__ = ("value",)
264
-
265
- def __init__(
266
- self,
267
- first, # type: MetricValue
268
- ):
269
- # type: (...) -> None
270
- self.value = {first}
271
-
272
- @property
273
- def weight(self):
274
- # type: (...) -> int
275
- return len(self.value)
276
-
277
- def add(
278
- self,
279
- value, # type: MetricValue
280
- ):
281
- # type: (...) -> None
282
- self.value.add(value)
283
-
284
- def serialize_value(self):
285
- # type: (...) -> Iterable[FlushedMetricValue]
286
- def _hash(x):
287
- # type: (MetricValue) -> int
288
- if isinstance(x, str):
289
- return zlib.crc32(x.encode("utf-8")) & 0xFFFFFFFF
290
- return int(x)
291
-
292
- return (_hash(value) for value in self.value)
293
-
294
-
295
- def _encode_metrics(flushable_buckets):
296
- # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) -> bytes
297
- out = io.BytesIO()
298
- _write = out.write
299
-
300
- # Note on sanitization: we intentionally sanitize in emission (serialization)
301
- # and not during aggregation for performance reasons. This means that the
302
- # envelope can in fact have duplicate buckets stored. This is acceptable for
303
- # relay side emission and should not happen commonly.
304
-
305
- for timestamp, buckets in flushable_buckets:
306
- for bucket_key, metric in buckets.items():
307
- metric_type, metric_name, metric_unit, metric_tags = bucket_key
308
- metric_name = _sanitize_metric_key(metric_name)
309
- metric_unit = _sanitize_unit(metric_unit)
310
- _write(metric_name.encode("utf-8"))
311
- _write(b"@")
312
- _write(metric_unit.encode("utf-8"))
313
-
314
- for serialized_value in metric.serialize_value():
315
- _write(b":")
316
- _write(str(serialized_value).encode("utf-8"))
317
-
318
- _write(b"|")
319
- _write(metric_type.encode("ascii"))
320
-
321
- if metric_tags:
322
- _write(b"|#")
323
- first = True
324
- for tag_key, tag_value in metric_tags:
325
- tag_key = _sanitize_tag_key(tag_key)
326
- if not tag_key:
327
- continue
328
- if first:
329
- first = False
330
- else:
331
- _write(b",")
332
- _write(tag_key.encode("utf-8"))
333
- _write(b":")
334
- _write(_sanitize_tag_value(tag_value).encode("utf-8"))
335
-
336
- _write(b"|T")
337
- _write(str(timestamp).encode("ascii"))
338
- _write(b"\n")
339
-
340
- return out.getvalue()
341
-
342
-
343
- def _encode_locations(timestamp, code_locations):
344
- # type: (int, Iterable[Tuple[MetricMetaKey, Dict[str, Any]]]) -> bytes
345
- mapping = {} # type: Dict[str, List[Any]]
346
-
347
- for key, loc in code_locations:
348
- metric_type, name, unit = key
349
- mri = "{}:{}@{}".format(
350
- metric_type, _sanitize_metric_key(name), _sanitize_unit(unit)
351
- )
352
-
353
- loc["type"] = "location"
354
- mapping.setdefault(mri, []).append(loc)
355
-
356
- return json_dumps({"timestamp": timestamp, "mapping": mapping})
357
-
358
-
359
- METRIC_TYPES = {
360
- "c": CounterMetric,
361
- "g": GaugeMetric,
362
- "d": DistributionMetric,
363
- "s": SetMetric,
364
- } # type: dict[MetricType, type[Metric]]
365
-
366
- # some of these are dumb
367
- TIMING_FUNCTIONS = {
368
- "nanosecond": nanosecond_time,
369
- "microsecond": lambda: nanosecond_time() / 1000.0,
370
- "millisecond": lambda: nanosecond_time() / 1000000.0,
371
- "second": now,
372
- "minute": lambda: now() / 60.0,
373
- "hour": lambda: now() / 3600.0,
374
- "day": lambda: now() / 3600.0 / 24.0,
375
- "week": lambda: now() / 3600.0 / 24.0 / 7.0,
376
- }
377
-
378
-
379
- class LocalAggregator:
380
- __slots__ = ("_measurements",)
381
-
382
- def __init__(self):
383
- # type: (...) -> None
384
- self._measurements = {} # type: Dict[Tuple[str, MetricTagsInternal], Tuple[float, float, int, float]]
385
-
386
- def add(
387
- self,
388
- ty, # type: MetricType
389
- key, # type: str
390
- value, # type: float
391
- unit, # type: MeasurementUnit
392
- tags, # type: MetricTagsInternal
393
- ):
394
- # type: (...) -> None
395
- export_key = "%s:%s@%s" % (ty, key, unit)
396
- bucket_key = (export_key, tags)
397
-
398
- old = self._measurements.get(bucket_key)
399
- if old is not None:
400
- v_min, v_max, v_count, v_sum = old
401
- v_min = min(v_min, value)
402
- v_max = max(v_max, value)
403
- v_count += 1
404
- v_sum += value
405
- else:
406
- v_min = v_max = v_sum = value
407
- v_count = 1
408
- self._measurements[bucket_key] = (v_min, v_max, v_count, v_sum)
409
-
410
- def to_json(self):
411
- # type: (...) -> Dict[str, Any]
412
- rv = {} # type: Any
413
- for (export_key, tags), (
414
- v_min,
415
- v_max,
416
- v_count,
417
- v_sum,
418
- ) in self._measurements.items():
419
- rv.setdefault(export_key, []).append(
420
- {
421
- "tags": _tags_to_dict(tags),
422
- "min": v_min,
423
- "max": v_max,
424
- "count": v_count,
425
- "sum": v_sum,
426
- }
427
- )
428
- return rv
429
-
430
-
431
- class MetricsAggregator:
432
- ROLLUP_IN_SECONDS = 10.0
433
- MAX_WEIGHT = 100000
434
- FLUSHER_SLEEP_TIME = 5.0
435
-
436
- def __init__(
437
- self,
438
- capture_func, # type: Callable[[Envelope], None]
439
- enable_code_locations=False, # type: bool
440
- ):
441
- # type: (...) -> None
442
- self.buckets = {} # type: Dict[int, Any]
443
- self._enable_code_locations = enable_code_locations
444
- self._seen_locations = _set() # type: Set[Tuple[int, MetricMetaKey]]
445
- self._pending_locations = {} # type: Dict[int, List[Tuple[MetricMetaKey, Any]]]
446
- self._buckets_total_weight = 0
447
- self._capture_func = capture_func
448
- self._running = True
449
- self._lock = threading.Lock()
450
-
451
- self._flush_event = threading.Event() # type: threading.Event
452
- self._force_flush = False
453
-
454
- # The aggregator shifts its flushing by up to an entire rollup window to
455
- # avoid multiple clients trampling on end of a 10 second window as all the
456
- # buckets are anchored to multiples of ROLLUP seconds. We randomize this
457
- # number once per aggregator boot to achieve some level of offsetting
458
- # across a fleet of deployed SDKs. Relay itself will also apply independent
459
- # jittering.
460
- self._flush_shift = random.random() * self.ROLLUP_IN_SECONDS
461
-
462
- self._flusher = None # type: Optional[threading.Thread]
463
- self._flusher_pid = None # type: Optional[int]
464
-
465
- def _ensure_thread(self):
466
- # type: (...) -> bool
467
- """For forking processes we might need to restart this thread.
468
- This ensures that our process actually has that thread running.
469
- """
470
- if not self._running:
471
- return False
472
-
473
- pid = os.getpid()
474
- if self._flusher_pid == pid:
475
- return True
476
-
477
- with self._lock:
478
- # Recheck to make sure another thread didn't get here and start the
479
- # the flusher in the meantime
480
- if self._flusher_pid == pid:
481
- return True
482
-
483
- self._flusher_pid = pid
484
-
485
- self._flusher = threading.Thread(target=self._flush_loop)
486
- self._flusher.daemon = True
487
-
488
- try:
489
- self._flusher.start()
490
- except RuntimeError:
491
- # Unfortunately at this point the interpreter is in a state that no
492
- # longer allows us to spawn a thread and we have to bail.
493
- self._running = False
494
- return False
495
-
496
- return True
497
-
498
- def _flush_loop(self):
499
- # type: (...) -> None
500
- _in_metrics.set(True)
501
- while self._running or self._force_flush:
502
- if self._running:
503
- self._flush_event.wait(self.FLUSHER_SLEEP_TIME)
504
- self._flush()
505
-
506
- def _flush(self):
507
- # type: (...) -> None
508
- self._emit(self._flushable_buckets(), self._flushable_locations())
509
-
510
- def _flushable_buckets(self):
511
- # type: (...) -> (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
512
- with self._lock:
513
- force_flush = self._force_flush
514
- cutoff = time.time() - self.ROLLUP_IN_SECONDS - self._flush_shift
515
- flushable_buckets = () # type: Iterable[Tuple[int, Dict[BucketKey, Metric]]]
516
- weight_to_remove = 0
517
-
518
- if force_flush:
519
- flushable_buckets = self.buckets.items()
520
- self.buckets = {}
521
- self._buckets_total_weight = 0
522
- self._force_flush = False
523
- else:
524
- flushable_buckets = []
525
- for buckets_timestamp, buckets in self.buckets.items():
526
- # If the timestamp of the bucket is newer that the rollup we want to skip it.
527
- if buckets_timestamp <= cutoff:
528
- flushable_buckets.append((buckets_timestamp, buckets))
529
-
530
- # We will clear the elements while holding the lock, in order to avoid requesting it downstream again.
531
- for buckets_timestamp, buckets in flushable_buckets:
532
- for metric in buckets.values():
533
- weight_to_remove += metric.weight
534
- del self.buckets[buckets_timestamp]
535
-
536
- self._buckets_total_weight -= weight_to_remove
537
-
538
- return flushable_buckets
539
-
540
- def _flushable_locations(self):
541
- # type: (...) -> Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
542
- with self._lock:
543
- locations = self._pending_locations
544
- self._pending_locations = {}
545
- return locations
546
-
547
- @metrics_noop
548
- def add(
549
- self,
550
- ty, # type: MetricType
551
- key, # type: str
552
- value, # type: MetricValue
553
- unit, # type: MeasurementUnit
554
- tags, # type: Optional[MetricTags]
555
- timestamp=None, # type: Optional[Union[float, datetime]]
556
- local_aggregator=None, # type: Optional[LocalAggregator]
557
- stacklevel=0, # type: Optional[int]
558
- ):
559
- # type: (...) -> None
560
- if not self._ensure_thread() or self._flusher is None:
561
- return None
562
-
563
- if timestamp is None:
564
- timestamp = time.time()
565
- elif isinstance(timestamp, datetime):
566
- timestamp = to_timestamp(timestamp)
567
-
568
- bucket_timestamp = int(
569
- (timestamp // self.ROLLUP_IN_SECONDS) * self.ROLLUP_IN_SECONDS
570
- )
571
- serialized_tags = _serialize_tags(tags)
572
- bucket_key = (
573
- ty,
574
- key,
575
- unit,
576
- serialized_tags,
577
- )
578
-
579
- with self._lock:
580
- local_buckets = self.buckets.setdefault(bucket_timestamp, {})
581
- metric = local_buckets.get(bucket_key)
582
- if metric is not None:
583
- previous_weight = metric.weight
584
- metric.add(value)
585
- else:
586
- metric = local_buckets[bucket_key] = METRIC_TYPES[ty](value)
587
- previous_weight = 0
588
-
589
- added = metric.weight - previous_weight
590
-
591
- if stacklevel is not None:
592
- self.record_code_location(ty, key, unit, stacklevel + 2, timestamp)
593
-
594
- # Given the new weight we consider whether we want to force flush.
595
- self._consider_force_flush()
596
-
597
- # For sets, we only record that a value has been added to the set but not which one.
598
- # See develop docs: https://develop.sentry.dev/sdk/metrics/#sets
599
- if local_aggregator is not None:
600
- local_value = float(added if ty == "s" else value)
601
- local_aggregator.add(ty, key, local_value, unit, serialized_tags)
602
-
603
- def record_code_location(
604
- self,
605
- ty, # type: MetricType
606
- key, # type: str
607
- unit, # type: MeasurementUnit
608
- stacklevel, # type: int
609
- timestamp=None, # type: Optional[float]
610
- ):
611
- # type: (...) -> None
612
- if not self._enable_code_locations:
613
- return
614
- if timestamp is None:
615
- timestamp = time.time()
616
- meta_key = (ty, key, unit)
617
- start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
618
- hour=0, minute=0, second=0, microsecond=0, tzinfo=None
619
- )
620
- start_of_day = int(to_timestamp(start_of_day))
621
-
622
- if (start_of_day, meta_key) not in self._seen_locations:
623
- self._seen_locations.add((start_of_day, meta_key))
624
- loc = get_code_location(stacklevel + 3)
625
- if loc is not None:
626
- # Group metadata by day to make flushing more efficient.
627
- # There needs to be one envelope item per timestamp.
628
- self._pending_locations.setdefault(start_of_day, []).append(
629
- (meta_key, loc)
630
- )
631
-
632
- @metrics_noop
633
- def need_code_location(
634
- self,
635
- ty, # type: MetricType
636
- key, # type: str
637
- unit, # type: MeasurementUnit
638
- timestamp, # type: float
639
- ):
640
- # type: (...) -> bool
641
- if self._enable_code_locations:
642
- return False
643
- meta_key = (ty, key, unit)
644
- start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
645
- hour=0, minute=0, second=0, microsecond=0, tzinfo=None
646
- )
647
- start_of_day = int(to_timestamp(start_of_day))
648
- return (start_of_day, meta_key) not in self._seen_locations
649
-
650
- def kill(self):
651
- # type: (...) -> None
652
- if self._flusher is None:
653
- return
654
-
655
- self._running = False
656
- self._flush_event.set()
657
- self._flusher = None
658
-
659
- @metrics_noop
660
- def flush(self):
661
- # type: (...) -> None
662
- self._force_flush = True
663
- self._flush()
664
-
665
- def _consider_force_flush(self):
666
- # type: (...) -> None
667
- # It's important to acquire a lock around this method, since it will touch shared data structures.
668
- total_weight = len(self.buckets) + self._buckets_total_weight
669
- if total_weight >= self.MAX_WEIGHT:
670
- self._force_flush = True
671
- self._flush_event.set()
672
-
673
- def _emit(
674
- self,
675
- flushable_buckets, # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
676
- code_locations, # type: Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
677
- ):
678
- # type: (...) -> Optional[Envelope]
679
- envelope = Envelope()
680
-
681
- if flushable_buckets:
682
- encoded_metrics = _encode_metrics(flushable_buckets)
683
- envelope.add_item(Item(payload=encoded_metrics, type="statsd"))
684
-
685
- for timestamp, locations in code_locations.items():
686
- encoded_locations = _encode_locations(timestamp, locations)
687
- envelope.add_item(Item(payload=encoded_locations, type="metric_meta"))
688
-
689
- if envelope.items:
690
- self._capture_func(envelope)
691
- return envelope
692
- return None
693
-
694
-
695
- def _serialize_tags(
696
- tags, # type: Optional[MetricTags]
697
- ):
698
- # type: (...) -> MetricTagsInternal
699
- if not tags:
700
- return ()
701
-
702
- rv = []
703
- for key, value in tags.items():
704
- # If the value is a collection, we want to flatten it.
705
- if isinstance(value, (list, tuple)):
706
- for inner_value in value:
707
- if inner_value is not None:
708
- rv.append((key, str(inner_value)))
709
- elif value is not None:
710
- rv.append((key, str(value)))
711
-
712
- # It's very important to sort the tags in order to obtain the
713
- # same bucket key.
714
- return tuple(sorted(rv))
715
-
716
-
717
- def _tags_to_dict(tags):
718
- # type: (MetricTagsInternal) -> Dict[str, Any]
719
- rv = {} # type: Dict[str, Any]
720
- for tag_name, tag_value in tags:
721
- old_value = rv.get(tag_name)
722
- if old_value is not None:
723
- if isinstance(old_value, list):
724
- old_value.append(tag_value)
725
- else:
726
- rv[tag_name] = [old_value, tag_value]
727
- else:
728
- rv[tag_name] = tag_value
729
- return rv
730
-
731
-
732
- def _get_aggregator():
733
- # type: () -> Optional[MetricsAggregator]
734
- client = sentry_sdk.get_client()
735
- return (
736
- client.metrics_aggregator
737
- if client.is_active() and client.metrics_aggregator is not None
738
- else None
739
- )
740
-
741
-
742
- def _get_aggregator_and_update_tags(key, value, unit, tags):
743
- # type: (str, Optional[MetricValue], MeasurementUnit, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]]
744
- client = sentry_sdk.get_client()
745
- if not client.is_active() or client.metrics_aggregator is None:
746
- return None, None, tags
747
-
748
- updated_tags = dict(tags or ()) # type: Dict[str, MetricTagValue]
749
- updated_tags.setdefault("release", client.options["release"])
750
- updated_tags.setdefault("environment", client.options["environment"])
751
-
752
- scope = sentry_sdk.get_current_scope()
753
- local_aggregator = None
754
-
755
- # We go with the low-level API here to access transaction information as
756
- # this one is the same between just errors and errors + performance
757
- transaction_source = scope._transaction_info.get("source")
758
- if transaction_source in GOOD_TRANSACTION_SOURCES:
759
- transaction_name = scope._transaction
760
- if transaction_name:
761
- updated_tags.setdefault("transaction", transaction_name)
762
- if scope._span is not None:
763
- local_aggregator = scope._span._get_local_aggregator()
764
-
765
- experiments = client.options.get("_experiments", {})
766
- before_emit_callback = experiments.get("before_emit_metric")
767
- if before_emit_callback is not None:
768
- with recursion_protection() as in_metrics:
769
- if not in_metrics:
770
- if not before_emit_callback(key, value, unit, updated_tags):
771
- return None, None, updated_tags
772
-
773
- return client.metrics_aggregator, local_aggregator, updated_tags
774
-
775
-
776
- def increment(
777
- key, # type: str
778
- value=1.0, # type: float
779
- unit="none", # type: MeasurementUnit
780
- tags=None, # type: Optional[MetricTags]
781
- timestamp=None, # type: Optional[Union[float, datetime]]
782
- stacklevel=0, # type: int
783
- ):
784
- # type: (...) -> None
785
- """Increments a counter."""
786
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
787
- key, value, unit, tags
788
- )
789
- if aggregator is not None:
790
- aggregator.add(
791
- "c", key, value, unit, tags, timestamp, local_aggregator, stacklevel
792
- )
793
-
794
-
795
- # alias as incr is relatively common in python
796
- incr = increment
797
-
798
-
799
- class _Timing:
800
- def __init__(
801
- self,
802
- key, # type: str
803
- tags, # type: Optional[MetricTags]
804
- timestamp, # type: Optional[Union[float, datetime]]
805
- value, # type: Optional[float]
806
- unit, # type: DurationUnit
807
- stacklevel, # type: int
808
- ):
809
- # type: (...) -> None
810
- self.key = key
811
- self.tags = tags
812
- self.timestamp = timestamp
813
- self.value = value
814
- self.unit = unit
815
- self.entered = None # type: Optional[float]
816
- self._span = None # type: Optional[sentry_sdk.tracing.Span]
817
- self.stacklevel = stacklevel
818
-
819
- def _validate_invocation(self, context):
820
- # type: (str) -> None
821
- if self.value is not None:
822
- raise TypeError(
823
- "cannot use timing as %s when a value is provided" % context
824
- )
825
-
826
- def __enter__(self):
827
- # type: (...) -> _Timing
828
- self.entered = TIMING_FUNCTIONS[self.unit]()
829
- self._validate_invocation("context-manager")
830
- self._span = sentry_sdk.start_span(op="metric.timing", name=self.key)
831
- if self.tags:
832
- for key, value in self.tags.items():
833
- if isinstance(value, (tuple, list)):
834
- value = ",".join(sorted(map(str, value)))
835
- self._span.set_tag(key, value)
836
- self._span.__enter__()
837
-
838
- # report code locations here for better accuracy
839
- aggregator = _get_aggregator()
840
- if aggregator is not None:
841
- aggregator.record_code_location("d", self.key, self.unit, self.stacklevel)
842
-
843
- return self
844
-
845
- def __exit__(self, exc_type, exc_value, tb):
846
- # type: (Any, Any, Any) -> None
847
- assert self._span, "did not enter"
848
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
849
- self.key,
850
- self.value,
851
- self.unit,
852
- self.tags,
853
- )
854
- if aggregator is not None:
855
- elapsed = TIMING_FUNCTIONS[self.unit]() - self.entered # type: ignore
856
- aggregator.add(
857
- "d",
858
- self.key,
859
- elapsed,
860
- self.unit,
861
- tags,
862
- self.timestamp,
863
- local_aggregator,
864
- None, # code locations are reported in __enter__
865
- )
866
-
867
- self._span.__exit__(exc_type, exc_value, tb)
868
- self._span = None
869
-
870
- def __call__(self, f):
871
- # type: (Any) -> Any
872
- self._validate_invocation("decorator")
873
-
874
- @wraps(f)
875
- def timed_func(*args, **kwargs):
876
- # type: (*Any, **Any) -> Any
877
- with timing(
878
- key=self.key,
879
- tags=self.tags,
880
- timestamp=self.timestamp,
881
- unit=self.unit,
882
- stacklevel=self.stacklevel + 1,
883
- ):
884
- return f(*args, **kwargs)
885
-
886
- return timed_func
887
-
888
-
889
- def timing(
890
- key, # type: str
891
- value=None, # type: Optional[float]
892
- unit="second", # type: DurationUnit
893
- tags=None, # type: Optional[MetricTags]
894
- timestamp=None, # type: Optional[Union[float, datetime]]
895
- stacklevel=0, # type: int
896
- ):
897
- # type: (...) -> _Timing
898
- """Emits a distribution with the time it takes to run the given code block.
899
-
900
- This method supports three forms of invocation:
901
-
902
- - when a `value` is provided, it functions similar to `distribution` but with
903
- - it can be used as a context manager
904
- - it can be used as a decorator
905
- """
906
- if value is not None:
907
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
908
- key, value, unit, tags
909
- )
910
- if aggregator is not None:
911
- aggregator.add(
912
- "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
913
- )
914
- return _Timing(key, tags, timestamp, value, unit, stacklevel)
915
-
916
-
917
- def distribution(
918
- key, # type: str
919
- value, # type: float
920
- unit="none", # type: MeasurementUnit
921
- tags=None, # type: Optional[MetricTags]
922
- timestamp=None, # type: Optional[Union[float, datetime]]
923
- stacklevel=0, # type: int
924
- ):
925
- # type: (...) -> None
926
- """Emits a distribution."""
927
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
928
- key, value, unit, tags
929
- )
930
- if aggregator is not None:
931
- aggregator.add(
932
- "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
933
- )
934
-
935
-
936
- def set(
937
- key, # type: str
938
- value, # type: Union[int, str]
939
- unit="none", # type: MeasurementUnit
940
- tags=None, # type: Optional[MetricTags]
941
- timestamp=None, # type: Optional[Union[float, datetime]]
942
- stacklevel=0, # type: int
943
- ):
944
- # type: (...) -> None
945
- """Emits a set."""
946
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
947
- key, value, unit, tags
948
- )
949
- if aggregator is not None:
950
- aggregator.add(
951
- "s", key, value, unit, tags, timestamp, local_aggregator, stacklevel
952
- )
953
-
954
-
955
- def gauge(
956
- key, # type: str
957
- value, # type: float
958
- unit="none", # type: MeasurementUnit
959
- tags=None, # type: Optional[MetricTags]
960
- timestamp=None, # type: Optional[Union[float, datetime]]
961
- stacklevel=0, # type: int
962
- ):
963
- # type: (...) -> None
964
- """Emits a gauge."""
965
- aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
966
- key, value, unit, tags
967
- )
968
- if aggregator is not None:
969
- aggregator.add(
970
- "g", key, value, unit, tags, timestamp, local_aggregator, stacklevel
971
- )