scout-apm 3.3.0__cp38-cp38-musllinux_1_2_i686.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. scout_apm/__init__.py +0 -0
  2. scout_apm/api/__init__.py +197 -0
  3. scout_apm/async_/__init__.py +1 -0
  4. scout_apm/async_/api.py +41 -0
  5. scout_apm/async_/instruments/__init__.py +0 -0
  6. scout_apm/async_/instruments/jinja2.py +13 -0
  7. scout_apm/async_/starlette.py +101 -0
  8. scout_apm/bottle.py +86 -0
  9. scout_apm/celery.py +153 -0
  10. scout_apm/compat.py +104 -0
  11. scout_apm/core/__init__.py +99 -0
  12. scout_apm/core/_objtrace.cpython-38-i386-linux-gnu.so +0 -0
  13. scout_apm/core/agent/__init__.py +0 -0
  14. scout_apm/core/agent/commands.py +250 -0
  15. scout_apm/core/agent/manager.py +319 -0
  16. scout_apm/core/agent/socket.py +211 -0
  17. scout_apm/core/backtrace.py +116 -0
  18. scout_apm/core/cli/__init__.py +0 -0
  19. scout_apm/core/cli/core_agent_manager.py +32 -0
  20. scout_apm/core/config.py +404 -0
  21. scout_apm/core/context.py +140 -0
  22. scout_apm/core/error.py +95 -0
  23. scout_apm/core/error_service.py +167 -0
  24. scout_apm/core/metadata.py +66 -0
  25. scout_apm/core/n_plus_one_tracker.py +41 -0
  26. scout_apm/core/objtrace.py +24 -0
  27. scout_apm/core/platform_detection.py +66 -0
  28. scout_apm/core/queue_time.py +99 -0
  29. scout_apm/core/sampler.py +149 -0
  30. scout_apm/core/samplers/__init__.py +0 -0
  31. scout_apm/core/samplers/cpu.py +76 -0
  32. scout_apm/core/samplers/memory.py +23 -0
  33. scout_apm/core/samplers/thread.py +41 -0
  34. scout_apm/core/stacktracer.py +30 -0
  35. scout_apm/core/threading.py +56 -0
  36. scout_apm/core/tracked_request.py +328 -0
  37. scout_apm/core/web_requests.py +167 -0
  38. scout_apm/django/__init__.py +7 -0
  39. scout_apm/django/apps.py +137 -0
  40. scout_apm/django/instruments/__init__.py +0 -0
  41. scout_apm/django/instruments/huey.py +30 -0
  42. scout_apm/django/instruments/sql.py +140 -0
  43. scout_apm/django/instruments/template.py +35 -0
  44. scout_apm/django/middleware.py +211 -0
  45. scout_apm/django/request.py +144 -0
  46. scout_apm/dramatiq.py +42 -0
  47. scout_apm/falcon.py +142 -0
  48. scout_apm/flask/__init__.py +118 -0
  49. scout_apm/flask/sqlalchemy.py +28 -0
  50. scout_apm/huey.py +54 -0
  51. scout_apm/hug.py +40 -0
  52. scout_apm/instruments/__init__.py +21 -0
  53. scout_apm/instruments/elasticsearch.py +263 -0
  54. scout_apm/instruments/jinja2.py +127 -0
  55. scout_apm/instruments/pymongo.py +105 -0
  56. scout_apm/instruments/redis.py +77 -0
  57. scout_apm/instruments/urllib3.py +80 -0
  58. scout_apm/rq.py +85 -0
  59. scout_apm/sqlalchemy.py +38 -0
  60. scout_apm-3.3.0.dist-info/LICENSE +21 -0
  61. scout_apm-3.3.0.dist-info/METADATA +82 -0
  62. scout_apm-3.3.0.dist-info/RECORD +65 -0
  63. scout_apm-3.3.0.dist-info/WHEEL +5 -0
  64. scout_apm-3.3.0.dist-info/entry_points.txt +2 -0
  65. scout_apm-3.3.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,167 @@
1
+ # coding=utf-8
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+ import threading
7
+ import time
8
+
9
+ from scout_apm.compat import (
10
+ escape,
11
+ gzip_compress,
12
+ queue,
13
+ urlencode,
14
+ urljoin,
15
+ urllib3_cert_pool_manager,
16
+ )
17
+ from scout_apm.core.config import scout_config
18
+ from scout_apm.core.threading import SingletonThread
19
+
20
+ # Time unit - monkey-patched in tests to make them run faster
21
+ SECOND = 1
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class ErrorServiceThread(SingletonThread):
27
+ _instance_lock = threading.Lock()
28
+ _stop_event = threading.Event()
29
+ _queue = queue.Queue(maxsize=500)
30
+
31
+ @classmethod
32
+ def _on_stop(cls):
33
+ super(ErrorServiceThread, cls)._on_stop()
34
+ # Unblock _queue.get()
35
+ try:
36
+ cls._queue.put(None, False)
37
+ except queue.Full as exc:
38
+ logger.debug("ErrorServiceThread full for stop: %r", exc, exc_info=exc)
39
+ pass
40
+
41
+ @classmethod
42
+ def send(cls, error):
43
+ try:
44
+ cls._queue.put(error, False)
45
+ except queue.Full as exc:
46
+ logger.debug("ErrorServiceThread full for send: %r", exc, exc_info=exc)
47
+
48
+ cls.ensure_started()
49
+
50
+ @classmethod
51
+ def wait_until_drained(cls, timeout_seconds=2.0, callback=None):
52
+ interval_seconds = min(timeout_seconds, 0.05)
53
+ start = time.time()
54
+ while True:
55
+ queue_size = cls._queue.qsize()
56
+ queue_empty = queue_size == 0
57
+ elapsed = time.time() - start
58
+ if queue_empty or elapsed >= timeout_seconds:
59
+ break
60
+
61
+ if callback is not None:
62
+ callback(queue_size)
63
+ callback = None
64
+
65
+ cls.ensure_started()
66
+
67
+ time.sleep(interval_seconds)
68
+ return queue_empty
69
+
70
+ def run(self):
71
+ batch_size = scout_config.value("errors_batch_size") or 1
72
+ http = urllib3_cert_pool_manager()
73
+ try:
74
+ while True:
75
+ errors = []
76
+ try:
77
+ # Attempt to fetch the batch size off of the queue.
78
+ for _ in range(batch_size):
79
+ error = self._queue.get(block=True, timeout=1 * SECOND)
80
+ if error:
81
+ errors.append(error)
82
+ except queue.Empty:
83
+ pass
84
+
85
+ if errors and self._send(http, errors):
86
+ for _ in range(len(errors)):
87
+ self._queue.task_done()
88
+
89
+ # Check for stop event after each read. This allows opening,
90
+ # sending, and then immediately stopping.
91
+ if self._stop_event.is_set():
92
+ logger.debug("ErrorServiceThread stopping.")
93
+ break
94
+ except Exception as exc:
95
+ logger.debug("ErrorServiceThread exception: %r", exc, exc_info=exc)
96
+ finally:
97
+ http.clear()
98
+ logger.debug("ErrorServiceThread stopped.")
99
+
100
+ def _send(self, http, errors):
101
+ try:
102
+ data = json.dumps(
103
+ {
104
+ "notifier": "scout_apm_python",
105
+ "environment": scout_config.value("environment"),
106
+ "root": scout_config.value("application_root"),
107
+ "problems": errors,
108
+ }
109
+ ).encode("utf-8")
110
+ except (ValueError, TypeError) as exc:
111
+ logger.debug(
112
+ "Exception when serializing error message: %r", exc, exc_info=exc
113
+ )
114
+ return False
115
+
116
+ params = {
117
+ "key": scout_config.value("key"),
118
+ "name": escape(scout_config.value("name"), quote=False),
119
+ }
120
+ headers = {
121
+ "Agent-Hostname": scout_config.value("hostname") or "",
122
+ "Content-Encoding": "gzip",
123
+ "Content-Type": "application/json",
124
+ "X-Error-Count": "{}".format(len(errors)),
125
+ }
126
+
127
+ encoded_args = urlencode(params)
128
+ full_url = urljoin(
129
+ scout_config.value("errors_host"), "apps/error.scout"
130
+ ) + "?{}".format(encoded_args)
131
+
132
+ try:
133
+ # urllib3 requires all parameters to be the same type for
134
+ # python 2.7.
135
+ # Since gzip can only return a str, convert all unicode instances
136
+ # to str.
137
+ response = http.request(
138
+ str("POST"),
139
+ str(full_url),
140
+ body=gzip_compress(data),
141
+ headers={str(key): str(value) for key, value in headers.items()},
142
+ )
143
+ if response.status >= 400:
144
+ logger.debug(
145
+ (
146
+ "ErrorServiceThread %r response error on _send:"
147
+ + " %r on PID: %s on thread: %s"
148
+ ),
149
+ response.status,
150
+ response.data,
151
+ os.getpid(),
152
+ threading.current_thread(),
153
+ )
154
+ return False
155
+ except Exception as exc:
156
+ logger.debug(
157
+ (
158
+ "ErrorServiceThread exception on _send:"
159
+ + " %r on PID: %s on thread: %s"
160
+ ),
161
+ exc,
162
+ os.getpid(),
163
+ threading.current_thread(),
164
+ exc_info=exc,
165
+ )
166
+ return False
167
+ return True
@@ -0,0 +1,66 @@
1
+ # coding=utf-8
2
+
3
+ import datetime as dt
4
+ import sys
5
+ from os import getpid
6
+
7
+ from scout_apm.core.agent.commands import ApplicationEvent, format_dt_for_core_agent
8
+ from scout_apm.core.agent.socket import CoreAgentSocketThread
9
+ from scout_apm.core.config import scout_config
10
+
11
+
12
+ def report_app_metadata():
13
+ CoreAgentSocketThread.send(
14
+ ApplicationEvent(
15
+ event_type="scout.metadata",
16
+ event_value=get_metadata(),
17
+ source="Pid: " + str(getpid()),
18
+ timestamp=dt.datetime.now(dt.timezone.utc),
19
+ )
20
+ )
21
+
22
+
23
+ def get_metadata():
24
+ data = {
25
+ "language": "python",
26
+ "language_version": "{}.{}.{}".format(*sys.version_info[:3]),
27
+ "server_time": format_dt_for_core_agent(dt.datetime.now(dt.timezone.utc)),
28
+ "framework": scout_config.value("framework"),
29
+ "framework_version": scout_config.value("framework_version"),
30
+ "environment": "",
31
+ "app_server": scout_config.value("app_server"),
32
+ "hostname": scout_config.value("hostname"),
33
+ "database_engine": "",
34
+ "database_adapter": "",
35
+ "application_name": "",
36
+ "libraries": get_python_packages_versions(),
37
+ "paas": "",
38
+ "application_root": scout_config.value("application_root"),
39
+ "scm_subdirectory": scout_config.value("scm_subdirectory"),
40
+ "git_sha": scout_config.value("revision_sha"),
41
+ }
42
+ # Deprecated - see #327:
43
+ data["version"] = data["language_version"]
44
+ return data
45
+
46
+
47
+ def get_python_packages_versions():
48
+ try:
49
+ from importlib.metadata import distributions
50
+ except ImportError:
51
+ # For some reason it is unavailable
52
+ return []
53
+
54
+ return sorted(
55
+ (
56
+ distribution.metadata["Name"],
57
+ (distribution.metadata["Version"] or "Unknown"),
58
+ )
59
+ for distribution in distributions()
60
+ # Filter out distributions wtih None for name or value. This can be the
61
+ # case for packages without a METADATA or PKG-INFO file in their relevant
62
+ # distribution directory. According to comments in importlib.metadata
63
+ # internals this is possible for certain old packages, but I could only
64
+ # recreate it by deliberately deleting said files.
65
+ if distribution.metadata["Name"]
66
+ )
@@ -0,0 +1,41 @@
1
+ # coding=utf-8
2
+
3
+ from collections import defaultdict
4
+
5
+
6
+ class NPlusOneTrackedItem(object):
7
+ __slots__ = ("count", "duration", "captured")
8
+
9
+ def __init__(self):
10
+ self.count = 0
11
+ self.duration = 0.0
12
+ self.captured = False
13
+
14
+
15
+ class NPlusOneTracker(object):
16
+ # Fetch backtraces on this number of same SQL calls
17
+ COUNT_THRESHOLD = 5
18
+
19
+ # Minimum time in seconds before we start performing any work.
20
+ DURATION_THRESHOLD = 0.150
21
+
22
+ __slots__ = ("_map",)
23
+
24
+ def __init__(self):
25
+ self._map = defaultdict(NPlusOneTrackedItem)
26
+
27
+ def should_capture_backtrace(self, sql, duration, count=1):
28
+ item = self._map[sql]
29
+ if item.captured:
30
+ return False
31
+
32
+ item.duration += duration
33
+ item.count += count
34
+
35
+ if (
36
+ item.duration >= self.DURATION_THRESHOLD
37
+ and item.count >= self.COUNT_THRESHOLD
38
+ ):
39
+ item.captured = True
40
+ return True
41
+ return False
@@ -0,0 +1,24 @@
1
+ # coding=utf-8
2
+
3
+ # Try use the C extension, but if it isn't available, provide a dummy
4
+ # implementation.
5
+
6
+ try:
7
+ from scout_apm.core._objtrace import disable, enable, get_counts, reset_counts
8
+ except ImportError: # pragma: no cover
9
+
10
+ def enable():
11
+ pass
12
+
13
+ def disable():
14
+ pass
15
+
16
+ def get_counts():
17
+ return (0, 0, 0, 0)
18
+
19
+ def reset_counts():
20
+ pass
21
+
22
+ is_extension = False
23
+ else: # pragma: no cover
24
+ is_extension = True
@@ -0,0 +1,66 @@
1
+ # coding=utf-8
2
+
3
+ import platform
4
+
5
+
6
+ def is_valid_triple(triple):
7
+ values = triple.split("-", 1)
8
+ return (
9
+ len(values) == 2
10
+ and values[0] in ("i686", "x86_64", "aarch64", "unknown")
11
+ and values[1]
12
+ in ("unknown-linux-gnu", "unknown-linux-musl", "apple-darwin", "unknown")
13
+ # Validate _apple_darwin_aarch64_override was applied.
14
+ and triple != "aarch64-apple-darwin"
15
+ )
16
+
17
+
18
+ def _apple_darwin_aarch64_override(triple):
19
+ """
20
+ If using M1 ARM64 machine, still use x86_64.
21
+ See https://github.com/scoutapp/scout_apm_python/issues/683
22
+ """
23
+ if triple == "aarch64-apple-darwin":
24
+ return "x86_64-apple-darwin"
25
+ return triple
26
+
27
+
28
+ def get_triple():
29
+ return _apple_darwin_aarch64_override(
30
+ "{arch}-{platform}".format(arch=get_arch(), platform=get_platform())
31
+ )
32
+
33
+
34
+ def get_arch():
35
+ """
36
+ What CPU are we on?
37
+ """
38
+ arch = platform.machine()
39
+ if arch == "i686":
40
+ return "i686"
41
+ elif arch == "x86_64":
42
+ return "x86_64"
43
+ elif arch == "aarch64":
44
+ return "aarch64"
45
+ elif arch == "arm64":
46
+ # We treat these as synonymous and name them "aarch64" for consistency
47
+ # Mac OS, for example, uses "arm64"
48
+ return "aarch64"
49
+ else:
50
+ return "unknown"
51
+
52
+
53
+ def get_platform():
54
+ """
55
+ What Operating System (and sub-system like glibc / musl)
56
+ """
57
+ system_name = platform.system()
58
+ if system_name == "Linux":
59
+ # Previously we'd use either "-gnu" or "-musl" indicate which version
60
+ # of libc we were built against. We now default to musl since it
61
+ # reliably works on all platforms.
62
+ return "unknown-linux-musl"
63
+ elif system_name == "Darwin":
64
+ return "apple-darwin"
65
+ else:
66
+ return "unknown"
@@ -0,0 +1,99 @@
1
+ # coding=utf-8
2
+
3
+ import datetime as dt
4
+ import logging
5
+ import time
6
+ import typing
7
+
8
+ from scout_apm.compat import datetime_to_timestamp
9
+ from scout_apm.core.tracked_request import TrackedRequest
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ # Cutoff epoch is used for determining ambiguous timestamp boundaries
14
+ CUTOFF_EPOCH_S = time.mktime((dt.date.today().year - 10, 1, 1, 0, 0, 0, 0, 0, 0))
15
+ CUTOFF_EPOCH_MS = CUTOFF_EPOCH_S * 1000.0
16
+ CUTOFF_EPOCH_US = CUTOFF_EPOCH_S * 1000000.0
17
+ CUTOFF_EPOCH_NS = CUTOFF_EPOCH_S * 1000000000.0
18
+
19
+
20
+ def _convert_ambiguous_timestamp_to_ns(timestamp: float) -> float:
21
+ """
22
+ Convert an ambiguous float timestamp that could be in nanoseconds,
23
+ microseconds, milliseconds, or seconds to nanoseconds. Return 0.0 for
24
+ values in the more than 10 years ago.
25
+ """
26
+ if timestamp > CUTOFF_EPOCH_NS:
27
+ converted_timestamp = timestamp
28
+ elif timestamp > CUTOFF_EPOCH_US:
29
+ converted_timestamp = timestamp * 1000.0
30
+ elif timestamp > CUTOFF_EPOCH_MS:
31
+ converted_timestamp = timestamp * 1000000.0
32
+ elif timestamp > CUTOFF_EPOCH_S:
33
+ converted_timestamp = timestamp * 1000000000.0
34
+ else:
35
+ return 0.0
36
+ return converted_timestamp
37
+
38
+
39
+ def track_request_queue_time(
40
+ header_value: typing.Any, tracked_request: TrackedRequest
41
+ ) -> bool:
42
+ """
43
+ Attempt to parse a queue time header and store the result in the tracked request.
44
+
45
+ Returns:
46
+ bool: Whether we succeeded in marking queue time. Used for testing.
47
+ """
48
+ if header_value.startswith("t="):
49
+ header_value = header_value[2:]
50
+
51
+ try:
52
+ first_char = header_value[0]
53
+ except IndexError:
54
+ return False
55
+
56
+ if not first_char.isdigit(): # filter out negatives, nan, inf, etc.
57
+ return False
58
+
59
+ try:
60
+ ambiguous_start_timestamp = float(header_value)
61
+ except ValueError:
62
+ return False
63
+
64
+ start_timestamp_ns = _convert_ambiguous_timestamp_to_ns(ambiguous_start_timestamp)
65
+ if start_timestamp_ns == 0.0:
66
+ return False
67
+
68
+ tr_start_timestamp_ns = datetime_to_timestamp(tracked_request.start_time) * 1e9
69
+
70
+ # Ignore if in the future
71
+ if start_timestamp_ns > tr_start_timestamp_ns:
72
+ return False
73
+
74
+ queue_time_ns = int(tr_start_timestamp_ns - start_timestamp_ns)
75
+ tracked_request.tag("scout.queue_time_ns", queue_time_ns)
76
+ return True
77
+
78
+
79
+ def track_job_queue_time(
80
+ header_value: typing.Any, tracked_request: TrackedRequest
81
+ ) -> bool:
82
+ """
83
+ Attempt to parse a queue/latency time header and store the result in the request.
84
+
85
+ Returns:
86
+ bool: Whether we succeeded in marking queue time for the job. Used for testing.
87
+ """
88
+ if header_value is not None:
89
+ now = datetime_to_timestamp(dt.datetime.now(dt.timezone.utc)) * 1e9
90
+ try:
91
+ ambiguous_float_start = typing.cast(float, header_value)
92
+ start = _convert_ambiguous_timestamp_to_ns(ambiguous_float_start)
93
+ queue_time_ns = int(now - start)
94
+ except TypeError:
95
+ logger.debug("Invalid job queue time header: %r", header_value)
96
+ return False
97
+ else:
98
+ tracked_request.tag("scout.job_queue_time_ns", queue_time_ns)
99
+ return True
@@ -0,0 +1,149 @@
1
+ # coding=utf-8
2
+
3
+ import random
4
+ from typing import Dict, Optional, Tuple
5
+
6
+
7
+ class Sampler:
8
+ """
9
+ Handles sampling decision logic for Scout APM.
10
+
11
+ This class encapsulates all sampling-related functionality including:
12
+ - Loading and managing sampling configuration
13
+ - Pattern matching for operations (endpoints and jobs)
14
+ - Making sampling decisions based on operation type and patterns
15
+ """
16
+
17
+ # Constants for operation type detection
18
+ CONTROLLER_PREFIX = "Controller/"
19
+ JOB_PREFIX = "Job/"
20
+
21
+ def __init__(self, config):
22
+ """
23
+ Initialize sampler with Scout configuration.
24
+
25
+ Args:
26
+ config: ScoutConfig instance containing sampling configuration
27
+ """
28
+ self.config = config
29
+ self.sample_rate = config.value("sample_rate")
30
+ self.sample_endpoints = config.value("sample_endpoints")
31
+ self.sample_jobs = config.value("sample_jobs")
32
+ self.ignore_endpoints = set(
33
+ config.value("ignore_endpoints") + config.value("ignore")
34
+ )
35
+ self.ignore_jobs = set(config.value("ignore_jobs"))
36
+ self.endpoint_sample_rate = config.value("endpoint_sample_rate")
37
+ self.job_sample_rate = config.value("job_sample_rate")
38
+
39
+ def _any_sampling(self):
40
+ """
41
+ Check if any sampling is enabled.
42
+
43
+ Returns:
44
+ Boolean indicating if any sampling is enabled
45
+ """
46
+ return (
47
+ self.sample_rate < 100
48
+ or self.sample_endpoints
49
+ or self.sample_jobs
50
+ or self.ignore_endpoints
51
+ or self.ignore_jobs
52
+ or self.endpoint_sample_rate is not None
53
+ or self.job_sample_rate is not None
54
+ )
55
+
56
+ def _find_matching_rate(
57
+ self, name: str, patterns: Dict[str, float]
58
+ ) -> Optional[str]:
59
+ """
60
+ Finds the matching sample rate for a given operation name.
61
+
62
+ Args:
63
+ name: The operation name to match
64
+ patterns: Dictionary of pattern to sample rate mappings
65
+
66
+ Returns:
67
+ The sample rate for the matching pattern or None if no match found
68
+ """
69
+
70
+ for pattern, rate in patterns.items():
71
+ if name.startswith(pattern):
72
+ return rate
73
+ return None
74
+
75
+ def _get_operation_type_and_name(
76
+ self, operation: str
77
+ ) -> Tuple[Optional[str], Optional[str]]:
78
+ """
79
+ Determines if an operation is an endpoint or job and extracts its name.
80
+
81
+ Args:
82
+ operation: The full operation string (e.g. "Controller/users/show")
83
+
84
+ Returns:
85
+ Tuple of (type, name) where type is either 'endpoint' or 'job',
86
+ and name is the operation name without the prefix
87
+ """
88
+ if operation.startswith(self.CONTROLLER_PREFIX):
89
+ return "endpoint", operation[len(self.CONTROLLER_PREFIX) :]
90
+ elif operation.startswith(self.JOB_PREFIX):
91
+ return "job", operation[len(self.JOB_PREFIX) :]
92
+ else:
93
+ return None, None
94
+
95
+ def get_effective_sample_rate(self, operation: str, is_ignored: bool) -> int:
96
+ """
97
+ Determines the effective sample rate for a given operation.
98
+
99
+ Prioritization:
100
+ 1. Sampling rate for specific endpoint or job
101
+ 2. Specified ignore pattern or flag for operation
102
+ 3. Global endpoint or job sample rate
103
+ 4. Global sample rate
104
+
105
+ Args:
106
+ operation: The operation string (e.g. "Controller/users/show")
107
+ is_ignored: boolean for if the specific transaction is ignored
108
+
109
+ Returns:
110
+ Integer between 0 and 100 representing sample rate
111
+ """
112
+ op_type, name = self._get_operation_type_and_name(operation)
113
+ patterns = self.sample_endpoints if op_type == "endpoint" else self.sample_jobs
114
+ ignores = self.ignore_endpoints if op_type == "endpoint" else self.ignore_jobs
115
+ default_operation_rate = (
116
+ self.endpoint_sample_rate if op_type == "endpoint" else self.job_sample_rate
117
+ )
118
+
119
+ if not op_type or not name:
120
+ return self.sample_rate
121
+ matching_rate = self._find_matching_rate(name, patterns)
122
+ if matching_rate is not None:
123
+ return matching_rate
124
+ for prefix in ignores:
125
+ if name.startswith(prefix) or is_ignored:
126
+ return 0
127
+ if default_operation_rate is not None:
128
+ return default_operation_rate
129
+
130
+ # Fall back to global sample rate
131
+ return self.sample_rate
132
+
133
+ def should_sample(self, operation: str, is_ignored: bool) -> bool:
134
+ """
135
+ Determines if an operation should be sampled.
136
+ If no sampling is enabled, always return True.
137
+
138
+ Args:
139
+ operation: The operation string (e.g. "Controller/users/show"
140
+ or "Job/mailer")
141
+
142
+ Returns:
143
+ Boolean indicating whether to sample this operation
144
+ """
145
+ if not self._any_sampling():
146
+ return True
147
+ return random.randint(1, 100) <= self.get_effective_sample_rate(
148
+ operation, is_ignored
149
+ )
File without changes
@@ -0,0 +1,76 @@
1
+ # coding=utf-8
2
+
3
+ import datetime as dt
4
+ import logging
5
+
6
+ import psutil
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class Cpu(object):
12
+ metric_type = "CPU"
13
+ metric_name = "Utilization"
14
+ human_name = "Process CPU"
15
+
16
+ def __init__(self):
17
+ self.last_run = dt.datetime.now(dt.timezone.utc)
18
+ self.last_cpu_times = psutil.Process().cpu_times()
19
+ self.num_processors = psutil.cpu_count()
20
+ if self.num_processors is None:
21
+ logger.debug("Could not determine CPU count - assuming there is one.")
22
+ self.num_processors = 1
23
+
24
+ def run(self):
25
+ now = dt.datetime.now(dt.timezone.utc)
26
+ process = psutil.Process() # get a handle on the current process
27
+ cpu_times = process.cpu_times()
28
+
29
+ wall_clock_elapsed = (now - self.last_run).total_seconds()
30
+ if wall_clock_elapsed < 0:
31
+ self.save_times(now, cpu_times)
32
+ logger.debug(
33
+ "%s: Negative time elapsed. now: %s, last_run: %s.",
34
+ self.human_name,
35
+ now,
36
+ self.last_run,
37
+ )
38
+ return None
39
+
40
+ utime_elapsed = cpu_times.user - self.last_cpu_times.user
41
+ stime_elapsed = cpu_times.system - self.last_cpu_times.system
42
+ process_elapsed = utime_elapsed + stime_elapsed
43
+
44
+ # This can happen right after a fork. This class starts up in
45
+ # pre-fork, records {u,s}time, then forks. This resets {u,s}time to 0
46
+ if process_elapsed < 0:
47
+ self.save_times(now, cpu_times)
48
+ logger.debug(
49
+ "%s: Negative process time elapsed. "
50
+ "utime: %s, stime: %s, total time: %s. "
51
+ "This is normal to see when starting a forking web server.",
52
+ self.human_name,
53
+ utime_elapsed,
54
+ stime_elapsed,
55
+ process_elapsed,
56
+ )
57
+ return None
58
+
59
+ # Normalized to # of processors
60
+ normalized_wall_clock_elapsed = wall_clock_elapsed * self.num_processors
61
+
62
+ # If somehow we run for 0 seconds between calls, don't try to divide by 0
63
+ if normalized_wall_clock_elapsed == 0:
64
+ res = 0
65
+ else:
66
+ res = (process_elapsed / normalized_wall_clock_elapsed) * 100
67
+
68
+ self.save_times(now, cpu_times)
69
+
70
+ logger.debug("%s: %s [%s CPU(s)]", self.human_name, res, self.num_processors)
71
+
72
+ return res
73
+
74
+ def save_times(self, now, cpu_times):
75
+ self.last_run = now
76
+ self.cpu_times = cpu_times