c2cwsgiutils 5.1.7.dev20230901073305__py3-none-any.whl → 5.2.1.dev197__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- c2cwsgiutils/__init__.py +13 -13
- c2cwsgiutils/acceptance/connection.py +5 -2
- c2cwsgiutils/acceptance/image.py +98 -4
- c2cwsgiutils/acceptance/package-lock.json +1933 -0
- c2cwsgiutils/acceptance/package.json +7 -0
- c2cwsgiutils/acceptance/print.py +4 -4
- c2cwsgiutils/acceptance/screenshot.js +62 -0
- c2cwsgiutils/acceptance/utils.py +14 -22
- c2cwsgiutils/auth.py +4 -4
- c2cwsgiutils/broadcast/__init__.py +15 -7
- c2cwsgiutils/broadcast/interface.py +3 -2
- c2cwsgiutils/broadcast/local.py +3 -2
- c2cwsgiutils/broadcast/redis.py +8 -7
- c2cwsgiutils/client_info.py +5 -5
- c2cwsgiutils/config_utils.py +2 -1
- c2cwsgiutils/coverage_setup.py +2 -2
- c2cwsgiutils/db.py +58 -37
- c2cwsgiutils/db_maintenance_view.py +2 -1
- c2cwsgiutils/debug/_listeners.py +10 -9
- c2cwsgiutils/debug/_views.py +12 -11
- c2cwsgiutils/debug/utils.py +5 -5
- c2cwsgiutils/errors.py +7 -6
- c2cwsgiutils/health_check.py +96 -85
- c2cwsgiutils/index.py +90 -105
- c2cwsgiutils/loader.py +3 -3
- c2cwsgiutils/logging_view.py +3 -2
- c2cwsgiutils/models_graph.py +8 -6
- c2cwsgiutils/prometheus.py +175 -57
- c2cwsgiutils/pyramid.py +4 -2
- c2cwsgiutils/pyramid_logging.py +2 -1
- c2cwsgiutils/redis_stats.py +13 -11
- c2cwsgiutils/redis_utils.py +15 -14
- c2cwsgiutils/request_tracking/__init__.py +36 -30
- c2cwsgiutils/request_tracking/_sql.py +3 -1
- c2cwsgiutils/scripts/genversion.py +4 -4
- c2cwsgiutils/scripts/stats_db.py +130 -68
- c2cwsgiutils/scripts/test_print.py +1 -1
- c2cwsgiutils/sentry.py +2 -1
- c2cwsgiutils/setup_process.py +13 -17
- c2cwsgiutils/sql_profiler/_impl.py +12 -5
- c2cwsgiutils/sqlalchemylogger/README.md +48 -0
- c2cwsgiutils/sqlalchemylogger/_models.py +7 -4
- c2cwsgiutils/sqlalchemylogger/examples/example.py +15 -0
- c2cwsgiutils/sqlalchemylogger/handlers.py +11 -8
- c2cwsgiutils/static/favicon-16x16.png +0 -0
- c2cwsgiutils/static/favicon-32x32.png +0 -0
- c2cwsgiutils/stats_pyramid/__init__.py +7 -11
- c2cwsgiutils/stats_pyramid/_db_spy.py +14 -11
- c2cwsgiutils/stats_pyramid/_pyramid_spy.py +29 -20
- c2cwsgiutils/templates/index.html.mako +50 -0
- c2cwsgiutils/version.py +49 -16
- c2cwsgiutils-5.2.1.dev197.dist-info/LICENSE +22 -0
- {c2cwsgiutils-5.1.7.dev20230901073305.dist-info → c2cwsgiutils-5.2.1.dev197.dist-info}/METADATA +187 -135
- c2cwsgiutils-5.2.1.dev197.dist-info/RECORD +67 -0
- {c2cwsgiutils-5.1.7.dev20230901073305.dist-info → c2cwsgiutils-5.2.1.dev197.dist-info}/WHEEL +1 -2
- c2cwsgiutils-5.2.1.dev197.dist-info/entry_points.txt +21 -0
- c2cwsgiutils/acceptance/composition.py +0 -129
- c2cwsgiutils/metrics.py +0 -110
- c2cwsgiutils/scripts/check_es.py +0 -130
- c2cwsgiutils/scripts/coverage_report.py +0 -36
- c2cwsgiutils/stats.py +0 -355
- c2cwsgiutils/stats_pyramid/_views.py +0 -16
- c2cwsgiutils-5.1.7.dev20230901073305.data/scripts/c2cwsgiutils-run +0 -32
- c2cwsgiutils-5.1.7.dev20230901073305.dist-info/LICENSE.txt +0 -28
- c2cwsgiutils-5.1.7.dev20230901073305.dist-info/RECORD +0 -69
- c2cwsgiutils-5.1.7.dev20230901073305.dist-info/entry_points.txt +0 -25
- c2cwsgiutils-5.1.7.dev20230901073305.dist-info/top_level.txt +0 -2
- tests/acceptance/__init__.py +0 -0
- tests/acceptance/test_utils.py +0 -13
c2cwsgiutils/scripts/stats_db.py
CHANGED
@@ -1,21 +1,29 @@
|
|
1
1
|
#!/usr/bin/env python3
|
2
|
-
"""
|
2
|
+
"""Provide prometheus gauges for every tables of a database."""
|
3
|
+
|
3
4
|
import argparse
|
4
5
|
import logging
|
5
6
|
import os
|
6
7
|
import sys
|
7
8
|
import time
|
8
|
-
from typing import
|
9
|
+
from typing import TYPE_CHECKING, Optional
|
10
|
+
from wsgiref.simple_server import make_server
|
9
11
|
|
10
12
|
import sqlalchemy
|
11
13
|
import sqlalchemy.exc
|
12
14
|
import sqlalchemy.orm
|
13
15
|
import transaction
|
16
|
+
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
|
17
|
+
from prometheus_client.exposition import make_wsgi_app
|
14
18
|
from zope.sqlalchemy import register
|
15
19
|
|
16
20
|
import c2cwsgiutils.setup_process
|
17
|
-
from c2cwsgiutils import
|
18
|
-
|
21
|
+
from c2cwsgiutils import prometheus
|
22
|
+
|
23
|
+
if TYPE_CHECKING:
|
24
|
+
scoped_session = sqlalchemy.orm.scoped_session[sqlalchemy.orm.Session]
|
25
|
+
else:
|
26
|
+
scoped_session = sqlalchemy.orm.scoped_session
|
19
27
|
|
20
28
|
LOG = logging.getLogger(__name__)
|
21
29
|
|
@@ -28,13 +36,17 @@ def _parse_args() -> argparse.Namespace:
|
|
28
36
|
"--schema", type=str, action="append", required=True, default=["public"], help="schema to dump"
|
29
37
|
)
|
30
38
|
parser.add_argument(
|
31
|
-
"--extra",
|
32
|
-
|
33
|
-
|
34
|
-
"
|
39
|
+
"--extra",
|
40
|
+
type=str,
|
41
|
+
action="append",
|
42
|
+
help="A SQL query that returns a metric name and a value",
|
35
43
|
)
|
36
44
|
parser.add_argument(
|
37
|
-
"--
|
45
|
+
"--extra-gauge",
|
46
|
+
type=str,
|
47
|
+
action="append",
|
48
|
+
nargs=3,
|
49
|
+
help="A SQL query that returns a metric name and a value, with gauge name and help",
|
38
50
|
)
|
39
51
|
parser.add_argument(
|
40
52
|
"--prometheus-url", "--prometheus_url", type=str, help="Base URL for the Prometheus Pushgateway"
|
@@ -54,43 +66,39 @@ class Reporter:
|
|
54
66
|
|
55
67
|
def __init__(self, args: argparse.Namespace) -> None:
|
56
68
|
self._error: Optional[Exception] = None
|
57
|
-
|
58
|
-
|
59
|
-
|
69
|
+
self.registry = CollectorRegistry()
|
70
|
+
self.prometheus_push = args.prometheus_url is not None
|
71
|
+
self.args = args
|
72
|
+
self.gauges: dict[str, Gauge] = {}
|
73
|
+
|
74
|
+
def get_gauge(self, kind: str, kind_help: str, labels: list[str]) -> Gauge:
|
75
|
+
if kind not in self.gauges:
|
76
|
+
self.gauges[kind] = Gauge(
|
77
|
+
prometheus.build_metric_name(f"database_{kind}"),
|
78
|
+
kind_help,
|
79
|
+
labels,
|
80
|
+
registry=self.registry,
|
60
81
|
)
|
61
|
-
|
62
|
-
self.statsd = None
|
63
|
-
|
64
|
-
if args.prometheus_url:
|
65
|
-
self.prometheus: Optional[PushgatewayGroupPublisher] = PushgatewayGroupPublisher(
|
66
|
-
args.prometheus_url,
|
67
|
-
"db_counts",
|
68
|
-
instance=args.prometheus_instance,
|
69
|
-
labels=stats.get_env_tags(),
|
70
|
-
)
|
71
|
-
else:
|
72
|
-
self.prometheus = None
|
82
|
+
return self.gauges[kind]
|
73
83
|
|
74
84
|
def do_report(
|
75
|
-
self, metric:
|
85
|
+
self, metric: list[str], value: int, kind: str, kind_help: str, tags: dict[str, str]
|
76
86
|
) -> None:
|
77
87
|
LOG.debug("%s.%s -> %d", kind, ".".join(metric), value)
|
78
|
-
|
79
|
-
|
80
|
-
if stats.USE_TAGS and tags is not None:
|
81
|
-
self.statsd.gauge([kind], value, tags=tags)
|
82
|
-
else:
|
83
|
-
self.statsd.gauge([kind] + metric, value)
|
84
|
-
if self.prometheus is not None:
|
85
|
-
self.prometheus.add("database_table_" + kind, value, metric_labels=tags)
|
88
|
+
gauge = self.get_gauge(kind, kind_help, list(tags.keys()))
|
89
|
+
gauge.labels(**tags).set(value)
|
86
90
|
|
87
91
|
def commit(self) -> None:
|
88
|
-
if self.
|
89
|
-
self.
|
92
|
+
if self.prometheus_push:
|
93
|
+
push_to_gateway(self.args.prometheus_url, job="db_counts", registry=self.registry)
|
94
|
+
else:
|
95
|
+
port = int(os.environ.get("C2C_PROMETHEUS_PORT", "9090"))
|
96
|
+
app = make_wsgi_app(self.registry)
|
97
|
+
with make_server("", port, app) as httpd:
|
98
|
+
LOG.info("Waiting that Prometheus get the metrics served on port %s...", port)
|
99
|
+
httpd.handle_request()
|
90
100
|
|
91
|
-
def error(self, metric:
|
92
|
-
if self.statsd is not None:
|
93
|
-
self.statsd.counter(["error"] + metric, 1)
|
101
|
+
def error(self, metric: list[str], error_: Exception) -> None:
|
94
102
|
if self._error is None:
|
95
103
|
self._error = error_
|
96
104
|
|
@@ -99,16 +107,27 @@ class Reporter:
|
|
99
107
|
raise self._error
|
100
108
|
|
101
109
|
|
102
|
-
def do_table(
|
110
|
+
def do_table(
|
111
|
+
session: scoped_session,
|
112
|
+
schema: str,
|
113
|
+
table: str,
|
114
|
+
reporter: Reporter,
|
115
|
+
) -> None:
|
103
116
|
"""Do the stats on a table."""
|
104
117
|
_do_table_count(reporter, schema, session, table)
|
105
118
|
_do_table_size(reporter, schema, session, table)
|
106
119
|
_do_indexes(reporter, schema, session, table)
|
107
120
|
|
108
121
|
|
109
|
-
def _do_indexes(
|
122
|
+
def _do_indexes(
|
123
|
+
reporter: Reporter,
|
124
|
+
schema: str,
|
125
|
+
session: scoped_session,
|
126
|
+
table: str,
|
127
|
+
) -> None:
|
110
128
|
for index_name, size_main, size_fsm, number_of_scans, tuples_read, tuples_fetched in session.execute(
|
111
|
-
|
129
|
+
sqlalchemy.text(
|
130
|
+
"""
|
112
131
|
SELECT
|
113
132
|
foo.indexname,
|
114
133
|
pg_relation_size(concat(quote_ident(foo.schemaname), '.', quote_ident(foo.indexrelname)), 'main'),
|
@@ -127,58 +146,90 @@ def _do_indexes(reporter: Reporter, schema: str, session: sqlalchemy.orm.scoped_
|
|
127
146
|
) AS foo
|
128
147
|
ON t.tablename = foo.ctablename AND t.schemaname=foo.schemaname
|
129
148
|
WHERE t.schemaname=:schema AND t.tablename=:table
|
130
|
-
"""
|
149
|
+
"""
|
150
|
+
),
|
131
151
|
params={"schema": schema, "table": table},
|
132
152
|
):
|
133
153
|
for fork, value in (("main", size_main), ("fsm", size_fsm)):
|
134
154
|
reporter.do_report(
|
135
155
|
[schema, table, index_name, fork],
|
136
156
|
value,
|
137
|
-
kind="
|
138
|
-
|
157
|
+
kind="table_index_size",
|
158
|
+
kind_help="Size of the index",
|
159
|
+
tags={"schema": schema, "table": table, "index": index_name, "fork": fork},
|
139
160
|
)
|
140
161
|
for action, value in (("scan", number_of_scans), ("read", tuples_read), ("fetch", tuples_fetched)):
|
141
162
|
reporter.do_report(
|
142
163
|
[schema, table, index_name, action],
|
143
164
|
value,
|
144
|
-
kind="
|
145
|
-
|
165
|
+
kind="table_index_usage",
|
166
|
+
kind_help="Usage of the index",
|
167
|
+
tags={"schema": schema, "table": table, "index": index_name, "action": action},
|
146
168
|
)
|
147
169
|
|
148
170
|
|
149
171
|
def _do_table_size(
|
150
|
-
reporter: Reporter,
|
172
|
+
reporter: Reporter,
|
173
|
+
schema: str,
|
174
|
+
session: scoped_session,
|
175
|
+
table: str,
|
151
176
|
) -> None:
|
152
|
-
|
153
|
-
|
154
|
-
|
177
|
+
result = session.execute(
|
178
|
+
sqlalchemy.text(
|
179
|
+
"""
|
155
180
|
SELECT pg_table_size(c.oid) AS total_bytes
|
156
181
|
FROM pg_class c
|
157
182
|
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
158
183
|
WHERE relkind = 'r' AND nspname=:schema AND relname=:table
|
159
|
-
"""
|
184
|
+
"""
|
185
|
+
),
|
160
186
|
params={"schema": schema, "table": table},
|
161
187
|
).fetchone()
|
162
|
-
|
188
|
+
assert result is not None
|
189
|
+
size: int
|
190
|
+
(size,) = result
|
191
|
+
reporter.do_report(
|
192
|
+
[schema, table],
|
193
|
+
size,
|
194
|
+
kind="table_size",
|
195
|
+
kind_help="Size of the table",
|
196
|
+
tags={"schema": schema, "table": table},
|
197
|
+
)
|
163
198
|
|
164
199
|
|
165
200
|
def _do_table_count(
|
166
|
-
reporter: Reporter,
|
201
|
+
reporter: Reporter,
|
202
|
+
schema: str,
|
203
|
+
session: scoped_session,
|
204
|
+
table: str,
|
167
205
|
) -> None:
|
168
|
-
quote = session.bind.dialect.identifier_preparer.quote
|
169
206
|
# We request and estimation of the count as a real count is very slow on big tables
|
170
|
-
# and seems to cause
|
171
|
-
|
172
|
-
|
173
|
-
|
207
|
+
# and seems to cause replicating lags. This estimate is updated on ANALYZE and VACUUM.
|
208
|
+
result = session.execute(
|
209
|
+
sqlalchemy.text(
|
210
|
+
"SELECT reltuples FROM pg_class where "
|
211
|
+
"oid=(quote_ident(:schema) || '.' || quote_ident(:table))::regclass;"
|
212
|
+
),
|
213
|
+
params={"schema": schema, "table": table},
|
174
214
|
).fetchone()
|
175
|
-
|
215
|
+
assert result is not None
|
216
|
+
(count,) = result
|
217
|
+
reporter.do_report(
|
218
|
+
[schema, table],
|
219
|
+
count,
|
220
|
+
kind="table_count",
|
221
|
+
kind_help="The number of row in the table",
|
222
|
+
tags={"schema": schema, "table": table},
|
223
|
+
)
|
176
224
|
|
177
225
|
|
178
|
-
def do_extra(session:
|
226
|
+
def do_extra(session: scoped_session, sql: str, kind: str, gauge_help: str, reporter: Reporter) -> None:
|
179
227
|
"""Do an extra report."""
|
180
|
-
|
181
|
-
|
228
|
+
|
229
|
+
for metric, count in session.execute(sqlalchemy.text(sql)):
|
230
|
+
reporter.do_report(
|
231
|
+
str(metric).split("."), count, kind=kind, kind_help=gauge_help, tags={"metric": metric}
|
232
|
+
)
|
182
233
|
|
183
234
|
|
184
235
|
def _do_dtats_db(args: argparse.Namespace) -> None:
|
@@ -193,12 +244,14 @@ def _do_dtats_db(args: argparse.Namespace) -> None:
|
|
193
244
|
raise
|
194
245
|
|
195
246
|
tables = session.execute(
|
196
|
-
|
247
|
+
sqlalchemy.text(
|
248
|
+
"""
|
197
249
|
SELECT table_schema, table_name FROM information_schema.tables
|
198
250
|
WHERE table_type='BASE TABLE' AND table_schema IN :schemas
|
199
|
-
"""
|
251
|
+
"""
|
252
|
+
),
|
200
253
|
params={"schemas": tuple(args.schema)},
|
201
|
-
)
|
254
|
+
).fetchall()
|
202
255
|
for schema, table in tables:
|
203
256
|
LOG.info("Process table %s.%s.", schema, table)
|
204
257
|
try:
|
@@ -211,10 +264,19 @@ def _do_dtats_db(args: argparse.Namespace) -> None:
|
|
211
264
|
for pos, extra in enumerate(args.extra):
|
212
265
|
LOG.info("Process extra %s.", extra)
|
213
266
|
try:
|
214
|
-
do_extra(session, extra, reporter)
|
267
|
+
do_extra(session, extra, "extra", "Extra metric", reporter)
|
215
268
|
except Exception as e: # pylint: disable=broad-except
|
216
269
|
LOG.exception("Process extra %s error.", extra)
|
217
270
|
reporter.error(["extra", str(pos + 1)], e)
|
271
|
+
if args.extra_gauge:
|
272
|
+
for pos, extra in enumerate(args.extra_gauge):
|
273
|
+
sql, gauge, gauge_help = extra
|
274
|
+
LOG.info("Process extra %s.", extra)
|
275
|
+
try:
|
276
|
+
do_extra(session, sql, gauge, gauge_help, reporter)
|
277
|
+
except Exception as e: # pylint: disable=broad-except
|
278
|
+
LOG.exception("Process extra %s error.", extra)
|
279
|
+
reporter.error(["extra", str(len(args.extra) + pos + 1)], e)
|
218
280
|
|
219
281
|
reporter.commit()
|
220
282
|
transaction.abort()
|
@@ -225,12 +287,12 @@ def main() -> None:
|
|
225
287
|
"""Run the command."""
|
226
288
|
success = False
|
227
289
|
args = _parse_args()
|
228
|
-
c2cwsgiutils.setup_process.
|
290
|
+
c2cwsgiutils.setup_process.init(args.config_uri)
|
229
291
|
for _ in range(int(os.environ.get("C2CWSGIUTILS_STATS_DB_TRYNUMBER", 10))):
|
230
292
|
try:
|
231
293
|
_do_dtats_db(args)
|
232
294
|
success = True
|
233
|
-
|
295
|
+
break
|
234
296
|
except: # pylint: disable=bare-except
|
235
297
|
LOG.exception("Exception during run")
|
236
298
|
time.sleep(float(os.environ.get("C2CWSGIUTILS_STATS_DB_SLEEP", 1)))
|
@@ -33,7 +33,7 @@ def main() -> None:
|
|
33
33
|
c2cwsgiutils.setup_process.bootstrap_application_from_options(args)
|
34
34
|
if not args.verbose:
|
35
35
|
logging.root.setLevel(logging.INFO)
|
36
|
-
print_ = PrintConnection(base_url=args.url, origin=args.
|
36
|
+
print_ = PrintConnection(base_url=args.url, origin=args.referrer if args.referrer else args.url)
|
37
37
|
print_.wait_ready(app=args.app)
|
38
38
|
if args.app is None:
|
39
39
|
for app in print_.get_apps():
|
c2cwsgiutils/sentry.py
CHANGED
@@ -2,7 +2,8 @@ import contextlib
|
|
2
2
|
import logging
|
3
3
|
import os
|
4
4
|
import warnings
|
5
|
-
from
|
5
|
+
from collections.abc import Generator, MutableMapping
|
6
|
+
from typing import Any, Callable, Optional
|
6
7
|
|
7
8
|
import pyramid.config
|
8
9
|
import sentry_sdk
|
c2cwsgiutils/setup_process.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
"""
|
2
2
|
Used by standalone (non-wsgi) processes to setup all the bits and pieces of c2cwsgiutils that could be useful.
|
3
3
|
|
4
|
-
Must be imported at the very beginning of the process' life, before any other module is imported.
|
4
|
+
Must be imported at the very beginning of the process's life, before any other module is imported.
|
5
5
|
"""
|
6
6
|
|
7
7
|
|
8
8
|
import argparse
|
9
9
|
import warnings
|
10
|
-
from typing import Any, Callable,
|
10
|
+
from typing import Any, Callable, Optional, TypedDict, cast
|
11
11
|
|
12
12
|
import pyramid.config
|
13
13
|
import pyramid.registry
|
@@ -16,7 +16,7 @@ import pyramid.router
|
|
16
16
|
from pyramid.paster import bootstrap
|
17
17
|
from pyramid.scripts.common import get_config_loader, parse_vars
|
18
18
|
|
19
|
-
from c2cwsgiutils import broadcast, coverage_setup, redis_stats, sentry, sql_profiler
|
19
|
+
from c2cwsgiutils import broadcast, coverage_setup, redis_stats, sentry, sql_profiler
|
20
20
|
|
21
21
|
|
22
22
|
def fill_arguments(
|
@@ -51,7 +51,6 @@ def init(config_file: str = "c2c:///app/production.ini") -> None:
|
|
51
51
|
coverage_setup.includeme()
|
52
52
|
sentry.includeme(config)
|
53
53
|
broadcast.includeme(config)
|
54
|
-
stats.init_backends(settings)
|
55
54
|
redis_stats.includeme(config)
|
56
55
|
sql_profiler.includeme(config)
|
57
56
|
|
@@ -63,18 +62,15 @@ def init_logging(config_file: str = "c2c:///app/production.ini") -> None:
|
|
63
62
|
loader.setup_logging(None)
|
64
63
|
|
65
64
|
|
66
|
-
PyramidEnv =
|
67
|
-
"
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
},
|
76
|
-
total=True,
|
77
|
-
)
|
65
|
+
class PyramidEnv(TypedDict, total=True):
|
66
|
+
"""The return type of the bootstrap functions."""
|
67
|
+
|
68
|
+
root: Any
|
69
|
+
closer: Callable[..., Any]
|
70
|
+
registry: pyramid.registry.Registry
|
71
|
+
request: pyramid.request.Request
|
72
|
+
root_factory: object
|
73
|
+
app: Callable[[dict[str, str], Any], Any]
|
78
74
|
|
79
75
|
|
80
76
|
def bootstrap_application_from_options(options: argparse.Namespace) -> PyramidEnv:
|
@@ -91,7 +87,7 @@ def bootstrap_application_from_options(options: argparse.Namespace) -> PyramidEn
|
|
91
87
|
|
92
88
|
def bootstrap_application(
|
93
89
|
config_uri: str = "c2c:///app/production.ini",
|
94
|
-
options: Optional[
|
90
|
+
options: Optional[dict[str, Any]] = None,
|
95
91
|
) -> PyramidEnv:
|
96
92
|
"""
|
97
93
|
Initialize all the application.
|
@@ -5,8 +5,9 @@ That runs an "EXPLAIN ANALYZE" on every SELECT query going through SQLAlchemy.
|
|
5
5
|
"""
|
6
6
|
import logging
|
7
7
|
import re
|
8
|
+
from collections.abc import Mapping
|
8
9
|
from threading import Lock
|
9
|
-
from typing import Any
|
10
|
+
from typing import Any
|
10
11
|
|
11
12
|
import pyramid.request
|
12
13
|
import sqlalchemy.engine
|
@@ -22,7 +23,7 @@ class _Repository:
|
|
22
23
|
def __init__(self) -> None:
|
23
24
|
super().__init__()
|
24
25
|
self._lock = Lock()
|
25
|
-
self._repo:
|
26
|
+
self._repo: set[str] = set()
|
26
27
|
|
27
28
|
def profile(
|
28
29
|
self,
|
@@ -43,9 +44,15 @@ class _Repository:
|
|
43
44
|
try:
|
44
45
|
LOG.info("statement:\n%s", _indent(_beautify_sql(statement)))
|
45
46
|
LOG.info("parameters: %s", repr(parameters))
|
46
|
-
|
47
|
-
|
48
|
-
|
47
|
+
with conn.engine.begin() as c:
|
48
|
+
output = "\n ".join(
|
49
|
+
[
|
50
|
+
row[0]
|
51
|
+
for row in c.execute(
|
52
|
+
sqlalchemy.text(f"EXPLAIN ANALYZE {statement}"), parameters
|
53
|
+
)
|
54
|
+
]
|
55
|
+
)
|
49
56
|
LOG.info(output)
|
50
57
|
except Exception: # nosec # pylint: disable=broad-except
|
51
58
|
pass
|
@@ -0,0 +1,48 @@
|
|
1
|
+
This module is used to ship logging records to an SQL database.
|
2
|
+
|
3
|
+
Currently only `sqlite` and `postgres_psycopg2` are fully supported.
|
4
|
+
|
5
|
+
To add the logger in a pyramid ini file use something like:
|
6
|
+
|
7
|
+
```
|
8
|
+
[handlers]
|
9
|
+
keys = sqlalchemy_logger
|
10
|
+
|
11
|
+
[handler_sqlalchemy_logger]
|
12
|
+
class = c2cwsgiutils.sqlalchemylogger.handlers.SQLAlchemyHandler
|
13
|
+
#args = ({'url':'sqlite:///logger_db.sqlite3','tablename':'test'},'curl')
|
14
|
+
args = ({'url':'postgresql://postgres:password@localhost:5432/test','tablename':'test','tableargs': {'schema':'xyz'}},'curl')
|
15
|
+
level = NOTSET
|
16
|
+
formatter = generic
|
17
|
+
propagate = 0
|
18
|
+
```
|
19
|
+
|
20
|
+
if the credentials given in `args = ` section are sufficient, the handler will
|
21
|
+
create the DB, schema and table it needs directly.
|
22
|
+
|
23
|
+
In the above example the second parameter provided `'curl'` is a negative
|
24
|
+
filter (any valid regex will work) to avoid writing the matching logs to the
|
25
|
+
DB. Useful to filter out health-check specific `User-Agent` headers or so.
|
26
|
+
|
27
|
+
To use the handler in a script, you might:
|
28
|
+
|
29
|
+
```python
|
30
|
+
import logging
|
31
|
+
import time
|
32
|
+
|
33
|
+
from c2cwsgiutils.sqlalchemylogger.handlers import SQLAlchemyHandler
|
34
|
+
|
35
|
+
if __name__ == '__main__':
|
36
|
+
logging.basicConfig(
|
37
|
+
format='%(asctime)s : %(name)s : %(levelname)s : %(message)s',
|
38
|
+
level=logging.DEBUG,
|
39
|
+
)
|
40
|
+
logger = logging.getLogger(__name__)
|
41
|
+
logger_db_engine = {'url':'sqlite:///logger_db.sqlite3'}
|
42
|
+
|
43
|
+
logger.addHandler(SQLAlchemyHandler(logger_db_engine))
|
44
|
+
logger.info('bla')
|
45
|
+
# wait a few seconds because the second thread will write the
|
46
|
+
# logs after a timeout
|
47
|
+
time.sleep(2)
|
48
|
+
```
|
@@ -1,14 +1,14 @@
|
|
1
|
-
from typing import Any,
|
1
|
+
from typing import Any, Union
|
2
2
|
|
3
3
|
from sqlalchemy import Column
|
4
|
-
from sqlalchemy.
|
4
|
+
from sqlalchemy.orm import declarative_base
|
5
5
|
from sqlalchemy.sql import func
|
6
6
|
from sqlalchemy.types import DateTime, Integer, String
|
7
7
|
|
8
8
|
Base = declarative_base()
|
9
9
|
|
10
10
|
|
11
|
-
def create_log_class(tablename: str = "logs", tableargs: Union[str,
|
11
|
+
def create_log_class(tablename: str = "logs", tableargs: Union[str, dict[str, str]] = "") -> Any:
|
12
12
|
"""Get the sqlalchemy lgo class."""
|
13
13
|
|
14
14
|
class Log(Base): # type: ignore
|
@@ -21,7 +21,10 @@ def create_log_class(tablename: str = "logs", tableargs: Union[str, Dict[str, st
|
|
21
21
|
level = Column(String) # info, debug, or error?
|
22
22
|
trace = Column(String) # the full traceback printout
|
23
23
|
msg = Column(String) # any custom log you may have included
|
24
|
-
created_at = Column(
|
24
|
+
created_at = Column( # the current timestamp
|
25
|
+
DateTime,
|
26
|
+
default=func.now(), # pylint: disable=not-callable
|
27
|
+
)
|
25
28
|
|
26
29
|
def __init__(self, logger: Any = None, level: Any = None, trace: Any = None, msg: Any = None) -> None:
|
27
30
|
self.logger = logger
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import logging
|
2
|
+
import time
|
3
|
+
|
4
|
+
from c2cwsgiutils.sqlalchemylogger.handlers import SQLAlchemyHandler
|
5
|
+
|
6
|
+
if __name__ == "__main__":
|
7
|
+
logging.basicConfig(
|
8
|
+
format="%(asctime)s : %(name)s : %(levelname)s : %(message)s",
|
9
|
+
level=logging.DEBUG,
|
10
|
+
)
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
logger_db_engine = {"url": "sqlite:///logger_db.sqlite3"}
|
13
|
+
logger.addHandler(SQLAlchemyHandler(logger_db_engine))
|
14
|
+
logger.info("bla")
|
15
|
+
time.sleep(10)
|
@@ -3,7 +3,7 @@ import queue
|
|
3
3
|
import threading
|
4
4
|
import time
|
5
5
|
import traceback
|
6
|
-
from typing import Any
|
6
|
+
from typing import Any
|
7
7
|
|
8
8
|
import sqlalchemy
|
9
9
|
from sqlalchemy import create_engine
|
@@ -25,7 +25,7 @@ class SQLAlchemyHandler(logging.Handler):
|
|
25
25
|
|
26
26
|
def __init__(
|
27
27
|
self,
|
28
|
-
sqlalchemy_url:
|
28
|
+
sqlalchemy_url: dict[str, str],
|
29
29
|
does_not_contain_expression: str = "",
|
30
30
|
contains_expression: str = "",
|
31
31
|
) -> None:
|
@@ -54,7 +54,7 @@ class SQLAlchemyHandler(logging.Handler):
|
|
54
54
|
LOG.debug("%s: starting processor thread", __name__)
|
55
55
|
while True:
|
56
56
|
logs = []
|
57
|
-
time_since_last = time.
|
57
|
+
time_since_last = time.perf_counter()
|
58
58
|
while True:
|
59
59
|
with self.condition:
|
60
60
|
self.condition.wait(timeout=self.MAX_TIMEOUT)
|
@@ -66,17 +66,17 @@ class SQLAlchemyHandler(logging.Handler):
|
|
66
66
|
# by writing chunks of self.MAX_NB_LOGS size,
|
67
67
|
# but also do not wait forever before writing stuff (self.MAX_TIMOUT)
|
68
68
|
if (len(logs) >= self.MAX_NB_LOGS) or (
|
69
|
-
time.
|
69
|
+
time.perf_counter() >= (time_since_last + self.MAX_TIMEOUT)
|
70
70
|
):
|
71
71
|
self._write_logs(logs)
|
72
72
|
break
|
73
73
|
LOG.debug("%s: stopping processor thread", __name__)
|
74
74
|
|
75
|
-
def _write_logs(self, logs:
|
75
|
+
def _write_logs(self, logs: list[Any]) -> None:
|
76
76
|
try:
|
77
77
|
self.session.bulk_save_objects(logs)
|
78
78
|
self.session.commit()
|
79
|
-
except
|
79
|
+
except SQLAlchemyError:
|
80
80
|
try:
|
81
81
|
self.create_db()
|
82
82
|
self.session.rollback()
|
@@ -98,8 +98,11 @@ class SQLAlchemyHandler(logging.Handler):
|
|
98
98
|
if not isinstance(self.Log.__table_args__, type(None)) and self.Log.__table_args__.get(
|
99
99
|
"schema", None
|
100
100
|
):
|
101
|
-
|
102
|
-
self.engine.
|
101
|
+
with self.engine.begin() as connection:
|
102
|
+
if not self.engine.dialect.has_schema(connection, self.Log.__table_args__["schema"]):
|
103
|
+
connection.execute(
|
104
|
+
sqlalchemy.schema.CreateSchema(self.Log.__table_args__["schema"]), # type: ignore
|
105
|
+
)
|
103
106
|
Base.metadata.create_all(self.engine)
|
104
107
|
|
105
108
|
def emit(self, record: Any) -> None:
|
Binary file
|
Binary file
|
@@ -1,40 +1,36 @@
|
|
1
1
|
"""Generate statsd metrics for pyramid and SQLAlchemy events."""
|
2
|
+
|
2
3
|
import warnings
|
3
4
|
|
4
5
|
import pyramid.config
|
5
6
|
import pyramid.request
|
6
7
|
|
7
|
-
from c2cwsgiutils import
|
8
|
+
from c2cwsgiutils.stats_pyramid import _pyramid_spy
|
8
9
|
|
9
10
|
|
10
11
|
def init(config: pyramid.config.Configurator) -> None:
|
11
12
|
"""Initialize the whole stats module, for backward compatibility."""
|
13
|
+
|
12
14
|
warnings.warn("init function is deprecated; use includeme instead")
|
13
15
|
includeme(config)
|
14
16
|
|
15
17
|
|
16
18
|
def includeme(config: pyramid.config.Configurator) -> None:
|
17
19
|
"""
|
18
|
-
Initialize the whole stats module.
|
20
|
+
Initialize the whole stats pyramid module.
|
19
21
|
|
20
22
|
Arguments:
|
21
23
|
|
22
24
|
config: The Pyramid config
|
23
25
|
"""
|
24
|
-
stats.init_backends(config.get_settings())
|
25
|
-
if stats.BACKENDS: # pragma: nocover
|
26
|
-
if "memory" in stats.BACKENDS: # pragma: nocover
|
27
|
-
from . import _views
|
28
26
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
_pyramid_spy.init(config)
|
33
|
-
init_db_spy()
|
27
|
+
_pyramid_spy.init(config)
|
28
|
+
init_db_spy()
|
34
29
|
|
35
30
|
|
36
31
|
def init_db_spy() -> None:
|
37
32
|
"""Initialize the database spy."""
|
33
|
+
|
38
34
|
from . import _db_spy
|
39
35
|
|
40
36
|
_db_spy.init()
|