c2cwsgiutils 6.1.0.dev105__py3-none-any.whl → 6.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- c2cwsgiutils/__init__.py +14 -11
- c2cwsgiutils/acceptance/__init__.py +2 -3
- c2cwsgiutils/acceptance/connection.py +1 -2
- c2cwsgiutils/acceptance/image.py +17 -11
- c2cwsgiutils/acceptance/package-lock.json +306 -213
- c2cwsgiutils/acceptance/package.json +2 -2
- c2cwsgiutils/acceptance/print.py +7 -3
- c2cwsgiutils/acceptance/utils.py +1 -3
- c2cwsgiutils/auth.py +27 -25
- c2cwsgiutils/broadcast/__init__.py +15 -16
- c2cwsgiutils/broadcast/interface.py +3 -3
- c2cwsgiutils/broadcast/local.py +1 -0
- c2cwsgiutils/broadcast/redis.py +13 -12
- c2cwsgiutils/client_info.py +19 -1
- c2cwsgiutils/coverage_setup.py +4 -3
- c2cwsgiutils/db.py +35 -41
- c2cwsgiutils/db_maintenance_view.py +13 -13
- c2cwsgiutils/debug/__init__.py +2 -2
- c2cwsgiutils/debug/_listeners.py +2 -7
- c2cwsgiutils/debug/_views.py +20 -12
- c2cwsgiutils/debug/utils.py +9 -9
- c2cwsgiutils/errors.py +13 -15
- c2cwsgiutils/health_check.py +24 -30
- c2cwsgiutils/index.py +34 -13
- c2cwsgiutils/loader.py +21 -2
- c2cwsgiutils/logging_view.py +12 -12
- c2cwsgiutils/models_graph.py +0 -1
- c2cwsgiutils/pretty_json.py +0 -1
- c2cwsgiutils/prometheus.py +10 -10
- c2cwsgiutils/pyramid.py +0 -1
- c2cwsgiutils/pyramid_logging.py +1 -1
- c2cwsgiutils/redis_stats.py +9 -9
- c2cwsgiutils/redis_utils.py +19 -18
- c2cwsgiutils/request_tracking/__init__.py +13 -13
- c2cwsgiutils/request_tracking/_sql.py +0 -1
- c2cwsgiutils/scripts/genversion.py +5 -5
- c2cwsgiutils/scripts/stats_db.py +19 -17
- c2cwsgiutils/scripts/test_print.py +5 -5
- c2cwsgiutils/sentry.py +55 -20
- c2cwsgiutils/services.py +2 -2
- c2cwsgiutils/setup_process.py +0 -1
- c2cwsgiutils/sql_profiler/__init__.py +5 -6
- c2cwsgiutils/sql_profiler/_impl.py +18 -17
- c2cwsgiutils/sqlalchemylogger/README.md +30 -13
- c2cwsgiutils/sqlalchemylogger/handlers.py +12 -11
- c2cwsgiutils/stats_pyramid/__init__.py +1 -5
- c2cwsgiutils/stats_pyramid/_db_spy.py +2 -2
- c2cwsgiutils/stats_pyramid/_pyramid_spy.py +12 -1
- c2cwsgiutils/templates/index.html.mako +4 -1
- c2cwsgiutils/version.py +11 -5
- {c2cwsgiutils-6.1.0.dev105.dist-info → c2cwsgiutils-6.1.7.dist-info}/LICENSE +1 -1
- {c2cwsgiutils-6.1.0.dev105.dist-info → c2cwsgiutils-6.1.7.dist-info}/METADATA +18 -6
- c2cwsgiutils-6.1.7.dist-info/RECORD +67 -0
- {c2cwsgiutils-6.1.0.dev105.dist-info → c2cwsgiutils-6.1.7.dist-info}/WHEEL +1 -1
- c2cwsgiutils-6.1.0.dev105.dist-info/RECORD +0 -67
- {c2cwsgiutils-6.1.0.dev105.dist-info → c2cwsgiutils-6.1.7.dist-info}/entry_points.txt +0 -0
c2cwsgiutils/redis_utils.py
CHANGED
@@ -11,19 +11,19 @@ import yaml
|
|
11
11
|
|
12
12
|
import c2cwsgiutils.config_utils
|
13
13
|
|
14
|
-
|
14
|
+
_LOG = logging.getLogger(__name__)
|
15
15
|
|
16
16
|
REDIS_URL_KEY = "C2C_REDIS_URL"
|
17
|
-
|
17
|
+
_REDIS_OPTIONS_KEY = "C2C_REDIS_OPTIONS"
|
18
18
|
REDIS_SENTINELS_KEY = "C2C_REDIS_SENTINELS"
|
19
19
|
REDIS_SERVICENAME_KEY = "C2C_REDIS_SERVICENAME"
|
20
|
-
|
20
|
+
_REDIS_DB_KEY = "C2C_REDIS_DB"
|
21
21
|
|
22
22
|
REDIS_URL_KEY_PROP = "c2c.redis_url"
|
23
|
-
|
23
|
+
_REDIS_OPTIONS_KEY_PROP = "c2c.redis_options"
|
24
24
|
REDIS_SENTINELS_KEY_PROP = "c2c.redis_sentinels"
|
25
25
|
REDIS_SERVICENAME_KEY_PROP = "c2c.redis_servicename"
|
26
|
-
|
26
|
+
_REDIS_DB_KEY_PROP = "c2c.redis_db"
|
27
27
|
|
28
28
|
_master: Optional["redis.client.Redis[str]"] = None
|
29
29
|
_slave: Optional["redis.client.Redis[str]"] = None
|
@@ -32,7 +32,7 @@ _sentinel: Optional[redis.sentinel.Sentinel] = None
|
|
32
32
|
|
33
33
|
def cleanup() -> None:
|
34
34
|
"""Cleanup the redis connections."""
|
35
|
-
global _master, _slave, _sentinel
|
35
|
+
global _master, _slave, _sentinel # pylint: disable=global-statement
|
36
36
|
_master = None
|
37
37
|
_slave = None
|
38
38
|
_sentinel = None
|
@@ -52,17 +52,17 @@ def get(
|
|
52
52
|
|
53
53
|
|
54
54
|
def _init(settings: Optional[Mapping[str, Any]]) -> None:
|
55
|
-
global _master, _slave, _sentinel
|
55
|
+
global _master, _slave, _sentinel # pylint: disable=global-statement
|
56
56
|
sentinels = c2cwsgiutils.config_utils.env_or_settings(
|
57
57
|
settings, REDIS_SENTINELS_KEY, REDIS_SENTINELS_KEY_PROP
|
58
58
|
)
|
59
59
|
service_name = c2cwsgiutils.config_utils.env_or_settings(
|
60
60
|
settings, REDIS_SERVICENAME_KEY, REDIS_SERVICENAME_KEY_PROP
|
61
61
|
)
|
62
|
-
db = c2cwsgiutils.config_utils.env_or_settings(settings,
|
62
|
+
db = c2cwsgiutils.config_utils.env_or_settings(settings, _REDIS_DB_KEY, _REDIS_DB_KEY_PROP)
|
63
63
|
url = c2cwsgiutils.config_utils.env_or_settings(settings, REDIS_URL_KEY, REDIS_URL_KEY_PROP)
|
64
64
|
redis_options_ = c2cwsgiutils.config_utils.env_or_settings(
|
65
|
-
settings,
|
65
|
+
settings, _REDIS_OPTIONS_KEY, _REDIS_OPTIONS_KEY_PROP
|
66
66
|
)
|
67
67
|
|
68
68
|
redis_options = (
|
@@ -82,16 +82,16 @@ def _init(settings: Optional[Mapping[str, Any]]) -> None:
|
|
82
82
|
db=db,
|
83
83
|
**redis_options,
|
84
84
|
)
|
85
|
-
|
85
|
+
_LOG.info("Redis setup using: %s, %s, %s", sentinels, service_name, redis_options_)
|
86
86
|
_master = _sentinel.master_for(service_name)
|
87
87
|
_slave = _sentinel.slave_for(service_name)
|
88
88
|
return
|
89
89
|
if url:
|
90
|
-
|
90
|
+
_LOG.info("Redis setup using: %s, with options: %s", url, redis_options_)
|
91
91
|
_master = redis.client.Redis.from_url(url, decode_responses=True, **redis_options)
|
92
92
|
_slave = _master
|
93
93
|
else:
|
94
|
-
|
94
|
+
_LOG.info(
|
95
95
|
"No Redis configuration found, use %s or %s to configure it", REDIS_URL_KEY, REDIS_SENTINELS_KEY
|
96
96
|
)
|
97
97
|
|
@@ -114,22 +114,23 @@ class PubSubWorkerThread(threading.Thread):
|
|
114
114
|
try:
|
115
115
|
pubsub.get_message(ignore_subscribe_messages=True, timeout=1)
|
116
116
|
if not last_was_ok:
|
117
|
-
|
117
|
+
_LOG.info("Redis is back")
|
118
118
|
last_was_ok = True
|
119
119
|
except redis.exceptions.RedisError:
|
120
120
|
if last_was_ok:
|
121
|
-
|
121
|
+
_LOG.warning("Redis connection problem")
|
122
122
|
last_was_ok = False
|
123
123
|
time.sleep(0.5)
|
124
124
|
except Exception: # pylint: disable=broad-except
|
125
|
-
|
126
|
-
|
125
|
+
_LOG.warning("Unexpected error", exc_info=True)
|
126
|
+
_LOG.info("Redis subscription worker stopped")
|
127
127
|
pubsub.close()
|
128
128
|
self._running = False
|
129
129
|
|
130
130
|
def stop(self) -> None:
|
131
|
-
|
132
|
-
#
|
131
|
+
"""Stop the worker."""
|
132
|
+
# Stopping simply unsubscribes from all channels and patterns.
|
133
|
+
# The unsubscribe responses that are generated will short circuit
|
133
134
|
# the loop in run(), calling pubsub.close() to clean up the connection
|
134
135
|
self.pubsub.unsubscribe()
|
135
136
|
self.pubsub.punsubscribe()
|
@@ -20,10 +20,10 @@ from pyramid.threadlocal import get_current_request
|
|
20
20
|
|
21
21
|
from c2cwsgiutils import config_utils, prometheus
|
22
22
|
|
23
|
-
|
23
|
+
_ID_HEADERS: list[str] = []
|
24
24
|
_HTTPAdapter_send = requests.adapters.HTTPAdapter.send
|
25
|
-
|
26
|
-
|
25
|
+
_LOG = logging.getLogger(__name__)
|
26
|
+
_DEFAULT_TIMEOUT: Optional[float] = None
|
27
27
|
_PROMETHEUS_REQUESTS_SUMMARY = prometheus_client.Summary(
|
28
28
|
prometheus.build_metric_name("requests"),
|
29
29
|
"Requests requests",
|
@@ -33,7 +33,7 @@ _PROMETHEUS_REQUESTS_SUMMARY = prometheus_client.Summary(
|
|
33
33
|
|
34
34
|
|
35
35
|
def _gen_request_id(request: pyramid.request.Request) -> str:
|
36
|
-
for id_header in
|
36
|
+
for id_header in _ID_HEADERS:
|
37
37
|
if id_header in request.headers:
|
38
38
|
return request.headers[id_header] # type: ignore
|
39
39
|
return str(uuid.uuid4())
|
@@ -50,15 +50,15 @@ def _patch_requests() -> None:
|
|
50
50
|
proxies: Optional[Mapping[str, str]] = None,
|
51
51
|
) -> requests.Response:
|
52
52
|
pyramid_request = get_current_request()
|
53
|
-
header =
|
53
|
+
header = _ID_HEADERS[0]
|
54
54
|
if pyramid_request is not None and header not in request.headers:
|
55
55
|
request.headers[header] = pyramid_request.c2c_request_id
|
56
56
|
|
57
57
|
if timeout is None:
|
58
|
-
if
|
59
|
-
timeout =
|
58
|
+
if _DEFAULT_TIMEOUT is not None:
|
59
|
+
timeout = _DEFAULT_TIMEOUT
|
60
60
|
else:
|
61
|
-
|
61
|
+
_LOG.warning("Doing a %s request without timeout to %s", request.method, request.url)
|
62
62
|
|
63
63
|
assert request.url
|
64
64
|
parsed = urllib.parse.urlparse(request.url)
|
@@ -94,20 +94,20 @@ def includeme(config: Optional[pyramid.config.Configurator] = None) -> None:
|
|
94
94
|
Use a X-Request-ID (or other) header to track all the logs related to a request
|
95
95
|
including on the sub services.
|
96
96
|
"""
|
97
|
-
global
|
98
|
-
|
97
|
+
global _ID_HEADERS, _DEFAULT_TIMEOUT # pylint: disable=global-statement
|
98
|
+
_ID_HEADERS = ["X-Request-ID", "X-Correlation-ID", "Request-ID", "X-Varnish", "X-Amzn-Trace-Id"]
|
99
99
|
if config is not None:
|
100
100
|
extra_header = config_utils.env_or_config(config, "C2C_REQUEST_ID_HEADER", "c2c.request_id_header")
|
101
101
|
if extra_header:
|
102
|
-
|
102
|
+
_ID_HEADERS.insert(0, extra_header)
|
103
103
|
config.add_request_method(_gen_request_id, "c2c_request_id", reify=True)
|
104
104
|
|
105
|
-
|
105
|
+
_DEFAULT_TIMEOUT = config_utils.env_or_config(
|
106
106
|
config, "C2C_REQUESTS_DEFAULT_TIMEOUT", "c2c.requests_default_timeout", type_=float
|
107
107
|
)
|
108
108
|
_patch_requests()
|
109
109
|
|
110
110
|
if config_utils.env_or_config(config, "C2C_SQL_REQUEST_ID", "c2c.sql_request_id", False):
|
111
|
-
from . import _sql
|
111
|
+
from . import _sql # pylint: disable=import-outside-toplevel
|
112
112
|
|
113
113
|
_sql.init()
|
@@ -8,9 +8,9 @@ import sys
|
|
8
8
|
import warnings
|
9
9
|
from typing import Optional, cast
|
10
10
|
|
11
|
-
|
12
|
-
|
13
|
-
|
11
|
+
_SRC_VERSION_RE = re.compile(r"^.*\(([^=]*)===?([^=]*)\)$")
|
12
|
+
_VERSION_RE = re.compile(r"^([^=]*)==([^=]*)$")
|
13
|
+
_LOG = logging.getLogger(__name__)
|
14
14
|
|
15
15
|
|
16
16
|
def _get_package_version(comp: str) -> tuple[Optional[str], Optional[str]]:
|
@@ -19,8 +19,8 @@ def _get_package_version(comp: str) -> tuple[Optional[str], Optional[str]]:
|
|
19
19
|
|
20
20
|
See test_genversion.py for examples.
|
21
21
|
"""
|
22
|
-
src_matcher =
|
23
|
-
matcher = src_matcher or
|
22
|
+
src_matcher = _SRC_VERSION_RE.match(comp)
|
23
|
+
matcher = src_matcher or _VERSION_RE.match(comp)
|
24
24
|
if matcher:
|
25
25
|
return cast(tuple[str, str], matcher.groups())
|
26
26
|
else:
|
c2cwsgiutils/scripts/stats_db.py
CHANGED
@@ -6,7 +6,7 @@ import logging
|
|
6
6
|
import os
|
7
7
|
import sys
|
8
8
|
import time
|
9
|
-
from typing import
|
9
|
+
from typing import Optional
|
10
10
|
from wsgiref.simple_server import make_server
|
11
11
|
|
12
12
|
import sqlalchemy
|
@@ -20,12 +20,9 @@ from zope.sqlalchemy import register
|
|
20
20
|
import c2cwsgiutils.setup_process
|
21
21
|
from c2cwsgiutils import prometheus
|
22
22
|
|
23
|
-
|
24
|
-
scoped_session = sqlalchemy.orm.scoped_session[sqlalchemy.orm.Session]
|
25
|
-
else:
|
26
|
-
scoped_session = sqlalchemy.orm.scoped_session
|
23
|
+
scoped_session = sqlalchemy.orm.scoped_session[sqlalchemy.orm.Session]
|
27
24
|
|
28
|
-
|
25
|
+
_LOG = logging.getLogger(__name__)
|
29
26
|
|
30
27
|
|
31
28
|
def _parse_args() -> argparse.Namespace:
|
@@ -72,6 +69,7 @@ class Reporter:
|
|
72
69
|
self.gauges: dict[str, Gauge] = {}
|
73
70
|
|
74
71
|
def get_gauge(self, kind: str, kind_help: str, labels: list[str]) -> Gauge:
|
72
|
+
"""Get a gauge."""
|
75
73
|
if kind not in self.gauges:
|
76
74
|
self.gauges[kind] = Gauge(
|
77
75
|
prometheus.build_metric_name(f"database_{kind}"),
|
@@ -84,25 +82,30 @@ class Reporter:
|
|
84
82
|
def do_report(
|
85
83
|
self, metric: list[str], value: int, kind: str, kind_help: str, tags: dict[str, str]
|
86
84
|
) -> None:
|
87
|
-
|
85
|
+
"""Report a metric."""
|
86
|
+
_LOG.debug("%s.%s -> %d", kind, ".".join(metric), value)
|
88
87
|
gauge = self.get_gauge(kind, kind_help, list(tags.keys()))
|
89
88
|
gauge.labels(**tags).set(value)
|
90
89
|
|
91
90
|
def commit(self) -> None:
|
91
|
+
"""Commit the metrics."""
|
92
92
|
if self.prometheus_push:
|
93
93
|
push_to_gateway(self.args.prometheus_url, job="db_counts", registry=self.registry)
|
94
94
|
else:
|
95
95
|
port = int(os.environ.get("C2C_PROMETHEUS_PORT", "9090"))
|
96
96
|
app = make_wsgi_app(self.registry)
|
97
97
|
with make_server("", port, app) as httpd:
|
98
|
-
|
98
|
+
_LOG.info("Waiting that Prometheus get the metrics served on port %s...", port)
|
99
99
|
httpd.handle_request()
|
100
100
|
|
101
101
|
def error(self, metric: list[str], error_: Exception) -> None:
|
102
|
+
"""Report an error."""
|
103
|
+
del metric
|
102
104
|
if self._error is None:
|
103
105
|
self._error = error_
|
104
106
|
|
105
107
|
def report_error(self) -> None:
|
108
|
+
"""Raise the error if any."""
|
106
109
|
if self._error is not None:
|
107
110
|
raise self._error
|
108
111
|
|
@@ -225,7 +228,6 @@ def _do_table_count(
|
|
225
228
|
|
226
229
|
def do_extra(session: scoped_session, sql: str, kind: str, gauge_help: str, reporter: Reporter) -> None:
|
227
230
|
"""Do an extra report."""
|
228
|
-
|
229
231
|
for metric, count in session.execute(sqlalchemy.text(sql)):
|
230
232
|
reporter.do_report(
|
231
233
|
str(metric).split("."), count, kind=kind, kind_help=gauge_help, tags={"metric": metric}
|
@@ -253,29 +255,29 @@ def _do_dtats_db(args: argparse.Namespace) -> None:
|
|
253
255
|
params={"schemas": tuple(args.schema)},
|
254
256
|
).fetchall()
|
255
257
|
for schema, table in tables:
|
256
|
-
|
258
|
+
_LOG.info("Process table %s.%s.", schema, table)
|
257
259
|
try:
|
258
260
|
do_table(session, schema, table, reporter)
|
259
261
|
except Exception as e: # pylint: disable=broad-except
|
260
|
-
|
262
|
+
_LOG.exception("Process table %s.%s error.", schema, table)
|
261
263
|
reporter.error([schema, table], e)
|
262
264
|
|
263
265
|
if args.extra:
|
264
266
|
for pos, extra in enumerate(args.extra):
|
265
|
-
|
267
|
+
_LOG.info("Process extra %s.", extra)
|
266
268
|
try:
|
267
269
|
do_extra(session, extra, "extra", "Extra metric", reporter)
|
268
270
|
except Exception as e: # pylint: disable=broad-except
|
269
|
-
|
271
|
+
_LOG.exception("Process extra %s error.", extra)
|
270
272
|
reporter.error(["extra", str(pos + 1)], e)
|
271
273
|
if args.extra_gauge:
|
272
274
|
for pos, extra in enumerate(args.extra_gauge):
|
273
275
|
sql, gauge, gauge_help = extra
|
274
|
-
|
276
|
+
_LOG.info("Process extra %s.", extra)
|
275
277
|
try:
|
276
278
|
do_extra(session, sql, gauge, gauge_help, reporter)
|
277
279
|
except Exception as e: # pylint: disable=broad-except
|
278
|
-
|
280
|
+
_LOG.exception("Process extra %s error.", extra)
|
279
281
|
reporter.error(["extra", str(len(args.extra) + pos + 1)], e)
|
280
282
|
|
281
283
|
reporter.commit()
|
@@ -294,11 +296,11 @@ def main() -> None:
|
|
294
296
|
success = True
|
295
297
|
break
|
296
298
|
except: # pylint: disable=bare-except
|
297
|
-
|
299
|
+
_LOG.exception("Exception during run")
|
298
300
|
time.sleep(float(os.environ.get("C2CWSGIUTILS_STATS_DB_SLEEP", 1)))
|
299
301
|
|
300
302
|
if not success:
|
301
|
-
|
303
|
+
_LOG.error("Not in success, exiting")
|
302
304
|
sys.exit(1)
|
303
305
|
|
304
306
|
|
@@ -8,7 +8,7 @@ import warnings
|
|
8
8
|
import c2cwsgiutils.setup_process
|
9
9
|
from c2cwsgiutils.acceptance.print import PrintConnection
|
10
10
|
|
11
|
-
|
11
|
+
_LOG = logging.getLogger(__name__)
|
12
12
|
|
13
13
|
|
14
14
|
def _parse_args() -> argparse.Namespace:
|
@@ -38,7 +38,7 @@ def main() -> None:
|
|
38
38
|
if args.app is None:
|
39
39
|
for app in print_.get_apps():
|
40
40
|
if app != "default":
|
41
|
-
|
41
|
+
_LOG.info("\n\n%s=================", app)
|
42
42
|
test_app(print_, app)
|
43
43
|
else:
|
44
44
|
test_app(print_, args.app)
|
@@ -47,13 +47,13 @@ def main() -> None:
|
|
47
47
|
def test_app(print_: PrintConnection, app: str) -> None:
|
48
48
|
"""Test the application."""
|
49
49
|
capabilities = print_.get_capabilities(app)
|
50
|
-
|
50
|
+
_LOG.debug("Capabilities:\n%s", pprint.pformat(capabilities))
|
51
51
|
examples = print_.get_example_requests(app)
|
52
52
|
for name, request in examples.items():
|
53
|
-
|
53
|
+
_LOG.info("\n%s-----------------", name)
|
54
54
|
pdf = print_.get_pdf(app, request)
|
55
55
|
size = len(pdf.content)
|
56
|
-
|
56
|
+
_LOG.info("Size=%d", size)
|
57
57
|
|
58
58
|
|
59
59
|
if __name__ == "__main__":
|
c2cwsgiutils/sentry.py
CHANGED
@@ -6,7 +6,8 @@ from collections.abc import Generator, MutableMapping
|
|
6
6
|
from typing import Any, Callable, Optional
|
7
7
|
|
8
8
|
import pyramid.config
|
9
|
-
import sentry_sdk
|
9
|
+
import sentry_sdk.integrations
|
10
|
+
from sentry_sdk.integrations.asyncio import AsyncioIntegration
|
10
11
|
from sentry_sdk.integrations.logging import LoggingIntegration, ignore_logger
|
11
12
|
from sentry_sdk.integrations.pyramid import PyramidIntegration
|
12
13
|
from sentry_sdk.integrations.redis import RedisIntegration
|
@@ -15,14 +16,15 @@ from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
|
|
15
16
|
|
16
17
|
from c2cwsgiutils import config_utils
|
17
18
|
|
18
|
-
|
19
|
-
|
19
|
+
_LOG = logging.getLogger(__name__)
|
20
|
+
_CLIENT_SETUP = False
|
20
21
|
|
21
22
|
|
22
23
|
def _create_before_send_filter(tags: MutableMapping[str, str]) -> Callable[[Any, Any], Any]:
|
23
24
|
"""Create a filter that adds tags to every events."""
|
24
25
|
|
25
26
|
def do_filter(event: Any, hint: Any) -> Any:
|
27
|
+
del hint
|
26
28
|
event.setdefault("tags", {}).update(tags)
|
27
29
|
return event
|
28
30
|
|
@@ -31,17 +33,15 @@ def _create_before_send_filter(tags: MutableMapping[str, str]) -> Callable[[Any,
|
|
31
33
|
|
32
34
|
def init(config: Optional[pyramid.config.Configurator] = None) -> None:
|
33
35
|
"""Initialize the Sentry integration, for backward compatibility."""
|
34
|
-
|
35
36
|
warnings.warn("init function is deprecated; use includeme instead")
|
36
37
|
includeme(config)
|
37
38
|
|
38
39
|
|
39
40
|
def includeme(config: Optional[pyramid.config.Configurator] = None) -> None:
|
40
41
|
"""Initialize the Sentry integration."""
|
41
|
-
|
42
|
-
global _client_setup
|
42
|
+
global _CLIENT_SETUP # pylint: disable=global-statement
|
43
43
|
sentry_url = config_utils.env_or_config(config, "SENTRY_URL", "c2c.sentry.url")
|
44
|
-
if sentry_url is not None and not
|
44
|
+
if sentry_url is not None and not _CLIENT_SETUP:
|
45
45
|
client_info: MutableMapping[str, Any] = {
|
46
46
|
key[14:].lower(): value for key, value in os.environ.items() if key.startswith("SENTRY_CLIENT_")
|
47
47
|
}
|
@@ -55,6 +55,7 @@ def includeme(config: Optional[pyramid.config.Configurator] = None) -> None:
|
|
55
55
|
"propagate_traces",
|
56
56
|
"auto_enabling_integrations",
|
57
57
|
"auto_session_tracking",
|
58
|
+
"enable_tracing",
|
58
59
|
):
|
59
60
|
if key in client_info:
|
60
61
|
client_info[key] = client_info[key].lower() in ("1", "t", "true")
|
@@ -73,25 +74,58 @@ def includeme(config: Optional[pyramid.config.Configurator] = None) -> None:
|
|
73
74
|
client_info["ignore_errors"] = client_info.pop("ignore_exceptions", "SystemExit").split(",")
|
74
75
|
tags = {key[11:].lower(): value for key, value in os.environ.items() if key.startswith("SENTRY_TAG_")}
|
75
76
|
|
76
|
-
sentry_logging = LoggingIntegration(
|
77
|
-
level=logging.DEBUG,
|
78
|
-
event_level=config_utils.env_or_config(
|
79
|
-
config, "SENTRY_LEVEL", "c2c.sentry_level", "ERROR"
|
80
|
-
).upper(),
|
81
|
-
)
|
82
77
|
traces_sample_rate = float(
|
83
78
|
config_utils.env_or_config(
|
84
79
|
config, "SENTRY_TRACES_SAMPLE_RATE", "c2c.sentry_traces_sample_rate", "0.0"
|
85
80
|
)
|
86
81
|
)
|
82
|
+
integrations: list[sentry_sdk.integrations.Integration] = []
|
83
|
+
if config_utils.config_bool(
|
84
|
+
config_utils.env_or_config(
|
85
|
+
config, "SENTRY_INTEGRATION_LOGGING", "c2c.sentry_integration_logging", "true"
|
86
|
+
)
|
87
|
+
):
|
88
|
+
integrations.append(
|
89
|
+
LoggingIntegration(
|
90
|
+
level=logging.DEBUG,
|
91
|
+
event_level=config_utils.env_or_config(
|
92
|
+
config, "SENTRY_LEVEL", "c2c.sentry_level", "ERROR"
|
93
|
+
).upper(),
|
94
|
+
)
|
95
|
+
)
|
96
|
+
if config_utils.config_bool(
|
97
|
+
config_utils.env_or_config(
|
98
|
+
config, "SENTRY_INTEGRATION_PYRAMID", "c2c.sentry_integration_pyramid", "true"
|
99
|
+
)
|
100
|
+
):
|
101
|
+
integrations.append(PyramidIntegration())
|
102
|
+
if config_utils.config_bool(
|
103
|
+
config_utils.env_or_config(
|
104
|
+
config, "SENTRY_INTEGRATION_SQLALCHEMY", "c2c.sentry_integration_sqlalchemy", "true"
|
105
|
+
)
|
106
|
+
):
|
107
|
+
integrations.append(SqlalchemyIntegration())
|
108
|
+
if config_utils.config_bool(
|
109
|
+
config_utils.env_or_config(
|
110
|
+
config, "SENTRY_INTEGRATION_REDIS", "c2c.sentry_integration_redis", "true"
|
111
|
+
)
|
112
|
+
):
|
113
|
+
integrations.append(RedisIntegration())
|
114
|
+
if config_utils.config_bool(
|
115
|
+
config_utils.env_or_config(
|
116
|
+
config, "SENTRY_INTEGRATION_ASYNCIO", "c2c.sentry_integration_asyncio", "true"
|
117
|
+
)
|
118
|
+
):
|
119
|
+
integrations.append(AsyncioIntegration())
|
120
|
+
|
87
121
|
sentry_sdk.init(
|
88
122
|
dsn=sentry_url,
|
89
|
-
integrations=
|
123
|
+
integrations=integrations,
|
90
124
|
traces_sample_rate=traces_sample_rate,
|
91
125
|
before_send=_create_before_send_filter(tags),
|
92
126
|
**client_info,
|
93
127
|
)
|
94
|
-
|
128
|
+
_CLIENT_SETUP = True
|
95
129
|
|
96
130
|
excludes = config_utils.env_or_config(
|
97
131
|
config, "SENTRY_EXCLUDES", "c2c.sentry.excludes", "sentry_sdk"
|
@@ -99,7 +133,7 @@ def includeme(config: Optional[pyramid.config.Configurator] = None) -> None:
|
|
99
133
|
for exclude in excludes:
|
100
134
|
ignore_logger(exclude)
|
101
135
|
|
102
|
-
|
136
|
+
_LOG.info("Configured sentry reporting with client=%s and tags=%s", repr(client_info), repr(tags))
|
103
137
|
|
104
138
|
|
105
139
|
@contextlib.contextmanager
|
@@ -110,7 +144,7 @@ def capture_exceptions() -> Generator[None, None, None]:
|
|
110
144
|
You don't need to use that for exception terminating the process (those not caught). Sentry does that
|
111
145
|
already.
|
112
146
|
"""
|
113
|
-
if
|
147
|
+
if _CLIENT_SETUP:
|
114
148
|
try:
|
115
149
|
yield
|
116
150
|
except Exception:
|
@@ -122,12 +156,12 @@ def capture_exceptions() -> Generator[None, None, None]:
|
|
122
156
|
|
123
157
|
def filter_wsgi_app(application: Callable[..., Any]) -> Callable[..., Any]:
|
124
158
|
"""If sentry is configured, add a Sentry filter around the application."""
|
125
|
-
if
|
159
|
+
if _CLIENT_SETUP:
|
126
160
|
try:
|
127
|
-
|
161
|
+
_LOG.info("Enable WSGI filter for Sentry")
|
128
162
|
return SentryWsgiMiddleware(application)
|
129
163
|
except Exception: # pylint: disable=broad-except
|
130
|
-
|
164
|
+
_LOG.error("Failed enabling sentry. Continuing without it.", exc_info=True)
|
131
165
|
return application
|
132
166
|
else:
|
133
167
|
return application
|
@@ -135,4 +169,5 @@ def filter_wsgi_app(application: Callable[..., Any]) -> Callable[..., Any]:
|
|
135
169
|
|
136
170
|
def filter_factory(*args: Any, **kwargs: Any) -> Callable[..., Any]:
|
137
171
|
"""Get the filter."""
|
172
|
+
del args, kwargs
|
138
173
|
return filter_wsgi_app
|
c2cwsgiutils/services.py
CHANGED
@@ -5,7 +5,7 @@ from cornice import Service
|
|
5
5
|
from pyramid.request import Request
|
6
6
|
from pyramid.response import Response
|
7
7
|
|
8
|
-
|
8
|
+
_LOG = logging.getLogger(__name__)
|
9
9
|
|
10
10
|
|
11
11
|
def create(name: str, path: str, *args: Any, **kwargs: Any) -> Service:
|
@@ -31,6 +31,6 @@ def _cache_cors(response: Response, request: Request) -> Response:
|
|
31
31
|
except Exception:
|
32
32
|
# cornice catches exceptions from filters, and tries call back the filter with only the request.
|
33
33
|
# This leads to a useless message in case of error...
|
34
|
-
|
34
|
+
_LOG.error("Failed fixing cache headers for CORS", exc_info=True)
|
35
35
|
raise
|
36
36
|
return response
|
c2cwsgiutils/setup_process.py
CHANGED
@@ -11,10 +11,9 @@ import pyramid.request
|
|
11
11
|
|
12
12
|
from c2cwsgiutils import auth
|
13
13
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
repository = None
|
14
|
+
_ENV_KEY = "C2C_SQL_PROFILER_ENABLED"
|
15
|
+
_CONFIG_KEY = "c2c.sql_profiler_enabled"
|
16
|
+
_LOG = logging.getLogger(__name__)
|
18
17
|
|
19
18
|
|
20
19
|
def init(config: pyramid.config.Configurator) -> None:
|
@@ -25,7 +24,7 @@ def init(config: pyramid.config.Configurator) -> None:
|
|
25
24
|
|
26
25
|
def includeme(config: pyramid.config.Configurator) -> None:
|
27
26
|
"""Install a pyramid event handler that adds the request information."""
|
28
|
-
if auth.is_enabled(config,
|
29
|
-
from . import _impl
|
27
|
+
if auth.is_enabled(config, _ENV_KEY, _CONFIG_KEY):
|
28
|
+
from . import _impl # pylint: disable=import-outside-toplevel
|
30
29
|
|
31
30
|
_impl.init(config)
|
@@ -16,8 +16,8 @@ import sqlalchemy.event
|
|
16
16
|
|
17
17
|
from c2cwsgiutils import auth, broadcast, config_utils
|
18
18
|
|
19
|
-
|
20
|
-
|
19
|
+
_LOG = logging.getLogger(__name__)
|
20
|
+
_REPOSITORY = None
|
21
21
|
|
22
22
|
|
23
23
|
class _Repository:
|
@@ -35,7 +35,8 @@ class _Repository:
|
|
35
35
|
_context: Any,
|
36
36
|
_executemany: Any,
|
37
37
|
) -> None:
|
38
|
-
|
38
|
+
"""Profile the SQL statement."""
|
39
|
+
if statement.startswith("SELECT ") and _LOG.isEnabledFor(logging.INFO):
|
39
40
|
do_it = False
|
40
41
|
with self._lock:
|
41
42
|
if statement not in self._repo:
|
@@ -43,8 +44,8 @@ class _Repository:
|
|
43
44
|
self._repo.add(statement)
|
44
45
|
if do_it:
|
45
46
|
try:
|
46
|
-
|
47
|
-
|
47
|
+
_LOG.info("statement:\n%s", _indent(_beautify_sql(statement)))
|
48
|
+
_LOG.info("parameters: %s", repr(parameters))
|
48
49
|
with conn.engine.begin() as c:
|
49
50
|
output = "\n ".join(
|
50
51
|
[
|
@@ -54,7 +55,7 @@ class _Repository:
|
|
54
55
|
)
|
55
56
|
]
|
56
57
|
)
|
57
|
-
|
58
|
+
_LOG.info(output)
|
58
59
|
except Exception: # nosec # pylint: disable=broad-except
|
59
60
|
pass
|
60
61
|
|
@@ -64,21 +65,21 @@ def _sql_profiler_view(request: pyramid.request.Request) -> Mapping[str, Any]:
|
|
64
65
|
enable = request.params.get("enable")
|
65
66
|
if enable is not None:
|
66
67
|
broadcast.broadcast("c2c_sql_profiler", params={"enable": enable}, expect_answers=True)
|
67
|
-
return {"status": 200, "enabled":
|
68
|
+
return {"status": 200, "enabled": _REPOSITORY is not None}
|
68
69
|
|
69
70
|
|
70
71
|
def _setup_profiler(enable: str) -> None:
|
71
|
-
global
|
72
|
+
global _REPOSITORY # pylint: disable=global-statement
|
72
73
|
if config_utils.config_bool(enable):
|
73
|
-
if
|
74
|
-
|
75
|
-
|
76
|
-
sqlalchemy.event.listen(sqlalchemy.engine.Engine, "before_cursor_execute",
|
74
|
+
if _REPOSITORY is None:
|
75
|
+
_LOG.info("Enabling the SQL profiler")
|
76
|
+
_REPOSITORY = _Repository()
|
77
|
+
sqlalchemy.event.listen(sqlalchemy.engine.Engine, "before_cursor_execute", _REPOSITORY.profile)
|
77
78
|
else:
|
78
|
-
if
|
79
|
-
|
80
|
-
sqlalchemy.event.remove(sqlalchemy.engine.Engine, "before_cursor_execute",
|
81
|
-
|
79
|
+
if _REPOSITORY is not None:
|
80
|
+
_LOG.info("Disabling the SQL profiler")
|
81
|
+
sqlalchemy.event.remove(sqlalchemy.engine.Engine, "before_cursor_execute", _REPOSITORY.profile)
|
82
|
+
_REPOSITORY = None
|
82
83
|
|
83
84
|
|
84
85
|
def _beautify_sql(statement: str) -> str:
|
@@ -102,4 +103,4 @@ def init(config: pyramid.config.Configurator) -> None:
|
|
102
103
|
"c2c_sql_profiler", config_utils.get_base_path(config) + r"/sql_profiler", request_method="GET"
|
103
104
|
)
|
104
105
|
config.add_view(_sql_profiler_view, route_name="c2c_sql_profiler", renderer="fast_json", http_cache=0)
|
105
|
-
|
106
|
+
_LOG.info("Enabled the /sql_profiler API")
|