pgbelt 0.7.11__tar.gz → 0.8.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pgbelt-0.7.11 → pgbelt-0.8.1}/PKG-INFO +4 -3
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/schema.py +15 -26
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/status.py +11 -1
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/sync.py +16 -38
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/util/dump.py +25 -61
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pyproject.toml +11 -11
- {pgbelt-0.7.11 → pgbelt-0.8.1}/LICENSE +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/README.md +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/__init__.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/__init__.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/convenience.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/helpers.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/login.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/preflight.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/setup.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/cmd/teardown.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/config/__init__.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/config/config.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/config/models.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/config/remote.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/main.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/util/__init__.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/util/asyncfuncs.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/util/logs.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/util/pglogical.py +0 -0
- {pgbelt-0.7.11 → pgbelt-0.8.1}/pgbelt/util/postgres.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pgbelt
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.8.1
|
|
4
4
|
Summary: A CLI tool used to manage Postgres data migrations from beginning to end, for a single database or a fleet, leveraging pglogical replication.
|
|
5
5
|
Author: Varjitt Jeeva
|
|
6
6
|
Author-email: varjitt.jeeva@autodesk.com
|
|
@@ -10,11 +10,12 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
10
10
|
Classifier: Programming Language :: Python :: 3.10
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.11
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
13
14
|
Requires-Dist: aiofiles (>=0.8,<24.2)
|
|
14
|
-
Requires-Dist: asyncpg (>=0.27,<0.
|
|
15
|
+
Requires-Dist: asyncpg (>=0.27,<0.31)
|
|
15
16
|
Requires-Dist: pydantic (>=2.0,<3.0)
|
|
16
17
|
Requires-Dist: tabulate (>=0.9.0,<0.10.0)
|
|
17
|
-
Requires-Dist: typer (>=0.9,<0.
|
|
18
|
+
Requires-Dist: typer (>=0.9,<0.14)
|
|
18
19
|
Description-Content-Type: text/markdown
|
|
19
20
|
|
|
20
21
|
# Pgbelt
|
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
from collections.abc import Awaitable
|
|
2
|
+
from asyncpg import create_pool
|
|
2
3
|
|
|
3
4
|
from pgbelt.cmd.helpers import run_with_configs
|
|
4
5
|
from pgbelt.config.models import DbupgradeConfig
|
|
5
6
|
from pgbelt.util.dump import apply_target_constraints
|
|
6
7
|
from pgbelt.util.dump import apply_target_schema
|
|
7
8
|
from pgbelt.util.dump import create_target_indexes
|
|
8
|
-
from pgbelt.util.dump import dump_dst_not_valid_constraints
|
|
9
9
|
from pgbelt.util.dump import dump_source_schema
|
|
10
|
-
from pgbelt.util.dump import dump_dst_create_index
|
|
11
10
|
from pgbelt.util.dump import remove_dst_not_valid_constraints
|
|
12
11
|
from pgbelt.util.dump import remove_dst_indexes
|
|
13
12
|
from pgbelt.util.logs import get_logger
|
|
13
|
+
from pgbelt.util.postgres import run_analyze
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
@run_with_configs
|
|
@@ -56,17 +56,6 @@ async def load_constraints(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
|
56
56
|
await apply_target_constraints(conf, logger)
|
|
57
57
|
|
|
58
58
|
|
|
59
|
-
@run_with_configs(skip_src=True)
|
|
60
|
-
async def dump_constraints(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
61
|
-
"""
|
|
62
|
-
Dumps the NOT VALID constraints from the target database onto disk, in
|
|
63
|
-
the schemas directory.
|
|
64
|
-
"""
|
|
65
|
-
conf = await config_future
|
|
66
|
-
logger = get_logger(conf.db, conf.dc, "schema.dst")
|
|
67
|
-
await dump_dst_not_valid_constraints(conf, logger)
|
|
68
|
-
|
|
69
|
-
|
|
70
59
|
@run_with_configs(skip_src=True)
|
|
71
60
|
async def remove_constraints(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
72
61
|
"""
|
|
@@ -79,17 +68,6 @@ async def remove_constraints(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
|
79
68
|
await remove_dst_not_valid_constraints(conf, logger)
|
|
80
69
|
|
|
81
70
|
|
|
82
|
-
@run_with_configs(skip_src=True)
|
|
83
|
-
async def dump_indexes(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
84
|
-
"""
|
|
85
|
-
Dumps the CREATE INDEX statements from the target database onto disk, in
|
|
86
|
-
the schemas directory.
|
|
87
|
-
"""
|
|
88
|
-
conf = await config_future
|
|
89
|
-
logger = get_logger(conf.db, conf.dc, "schema.dst")
|
|
90
|
-
await dump_dst_create_index(conf, logger)
|
|
91
|
-
|
|
92
|
-
|
|
93
71
|
@run_with_configs(skip_src=True)
|
|
94
72
|
async def remove_indexes(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
95
73
|
"""
|
|
@@ -109,19 +87,30 @@ async def create_indexes(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
|
109
87
|
as the owner user. This must only be done after most data is synchronized
|
|
110
88
|
(at minimum after the initializing phase) from the source to the destination
|
|
111
89
|
database.
|
|
90
|
+
|
|
91
|
+
After creating indexes, the destination database should be analyzed to ensure
|
|
92
|
+
the query planner has the most up-to-date statistics for the indexes.
|
|
112
93
|
"""
|
|
113
94
|
conf = await config_future
|
|
114
95
|
logger = get_logger(conf.db, conf.dc, "schema.dst")
|
|
115
96
|
await create_target_indexes(conf, logger, during_sync=False)
|
|
116
97
|
|
|
98
|
+
# Run ANALYZE after creating indexes (without statement timeout)
|
|
99
|
+
async with create_pool(
|
|
100
|
+
conf.dst.root_uri,
|
|
101
|
+
min_size=1,
|
|
102
|
+
server_settings={
|
|
103
|
+
"statement_timeout": "0",
|
|
104
|
+
},
|
|
105
|
+
) as dst_pool:
|
|
106
|
+
await run_analyze(dst_pool, logger)
|
|
107
|
+
|
|
117
108
|
|
|
118
109
|
COMMANDS = [
|
|
119
110
|
dump_schema,
|
|
120
111
|
load_schema,
|
|
121
112
|
load_constraints,
|
|
122
|
-
dump_constraints,
|
|
123
113
|
remove_constraints,
|
|
124
|
-
dump_indexes,
|
|
125
114
|
remove_indexes,
|
|
126
115
|
create_indexes,
|
|
127
116
|
]
|
|
@@ -116,10 +116,20 @@ async def status(conf_future: Awaitable[DbupgradeConfig]) -> dict[str, str]:
|
|
|
116
116
|
|
|
117
117
|
result[0].update(result[1])
|
|
118
118
|
result[0]["db"] = conf.db
|
|
119
|
-
|
|
119
|
+
|
|
120
|
+
# We should hide the progress in the following cases:
|
|
121
|
+
# 1. When src -> dst is replicating and dst -> src is any state (replicating, unconfigured, down)
|
|
122
|
+
# a. We do this because the size when done still will be a tad smaller than SRC, showing <100%
|
|
123
|
+
# 2. When src -> dst is unconfigured and dst -> src is replicating (not down or unconfigured)
|
|
124
|
+
# a. We do this because reverse-only occurs at the start of cutover and onwards, and seeing the progress at that stage is not useful.
|
|
125
|
+
if (result[0]["pg1_pg2"] == "replicating") or ( # 1
|
|
126
|
+
result[0]["pg1_pg2"] == "unconfigured"
|
|
127
|
+
and result[0]["pg2_pg1"] == "replicating"
|
|
128
|
+
): # 2
|
|
120
129
|
result[2]["src_dataset_size"] = "n/a"
|
|
121
130
|
result[2]["dst_dataset_size"] = "n/a"
|
|
122
131
|
result[2]["progress"] = "n/a"
|
|
132
|
+
|
|
123
133
|
result[0].update(result[2])
|
|
124
134
|
return result[0]
|
|
125
135
|
finally:
|
|
@@ -108,40 +108,6 @@ async def load_tables(
|
|
|
108
108
|
await load_dumped_tables(conf, tables, logger)
|
|
109
109
|
|
|
110
110
|
|
|
111
|
-
@run_with_configs
|
|
112
|
-
async def sync_tables(
|
|
113
|
-
config_future: Awaitable[DbupgradeConfig],
|
|
114
|
-
tables: list[str] = Option([], help="Specific tables to sync"),
|
|
115
|
-
):
|
|
116
|
-
"""
|
|
117
|
-
Dump and load all tables from the source database to the destination database.
|
|
118
|
-
Equivalent to running dump-tables followed by load-tables. Table data will be
|
|
119
|
-
saved locally in files.
|
|
120
|
-
|
|
121
|
-
You may also provide a list of tables to sync with the
|
|
122
|
-
--tables option and only these tables will be synced.
|
|
123
|
-
"""
|
|
124
|
-
conf = await config_future
|
|
125
|
-
src_logger = get_logger(conf.db, conf.dc, "sync.src")
|
|
126
|
-
dst_logger = get_logger(conf.db, conf.dc, "sync.dst")
|
|
127
|
-
|
|
128
|
-
if tables:
|
|
129
|
-
dump_tables = tables.split(",")
|
|
130
|
-
else:
|
|
131
|
-
async with create_pool(conf.src.pglogical_uri, min_size=1) as src_pool:
|
|
132
|
-
_, dump_tables, _ = await analyze_table_pkeys(
|
|
133
|
-
src_pool, conf.schema_name, src_logger
|
|
134
|
-
)
|
|
135
|
-
|
|
136
|
-
if conf.tables:
|
|
137
|
-
dump_tables = [t for t in dump_tables if t in conf.tables]
|
|
138
|
-
|
|
139
|
-
await dump_source_tables(conf, dump_tables)
|
|
140
|
-
await load_dumped_tables(
|
|
141
|
-
conf, [] if not tables and not conf.tables else dump_tables, dst_logger
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
|
|
145
111
|
@run_with_configs(skip_src=True)
|
|
146
112
|
async def analyze(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
147
113
|
"""
|
|
@@ -150,7 +116,13 @@ async def analyze(config_future: Awaitable[DbupgradeConfig]) -> None:
|
|
|
150
116
|
"""
|
|
151
117
|
conf = await config_future
|
|
152
118
|
logger = get_logger(conf.db, conf.dc, "sync.dst")
|
|
153
|
-
async with create_pool(
|
|
119
|
+
async with create_pool(
|
|
120
|
+
conf.dst.root_uri,
|
|
121
|
+
min_size=1,
|
|
122
|
+
server_settings={
|
|
123
|
+
"statement_timeout": "0",
|
|
124
|
+
},
|
|
125
|
+
) as dst_pool:
|
|
154
126
|
await run_analyze(dst_pool, logger)
|
|
155
127
|
|
|
156
128
|
|
|
@@ -208,8 +180,15 @@ async def sync(
|
|
|
208
180
|
create_pool(conf.src.pglogical_uri, min_size=1),
|
|
209
181
|
create_pool(conf.dst.root_uri, min_size=1),
|
|
210
182
|
create_pool(conf.dst.owner_uri, min_size=1),
|
|
183
|
+
create_pool(
|
|
184
|
+
conf.dst.root_uri,
|
|
185
|
+
min_size=1,
|
|
186
|
+
server_settings={
|
|
187
|
+
"statement_timeout": "0",
|
|
188
|
+
},
|
|
189
|
+
),
|
|
211
190
|
)
|
|
212
|
-
src_pool, dst_root_pool, dst_owner_pool = pools
|
|
191
|
+
src_pool, dst_root_pool, dst_owner_pool, dst_root_no_timeout_pool = pools
|
|
213
192
|
|
|
214
193
|
try:
|
|
215
194
|
src_logger = get_logger(conf.db, conf.dc, "sync.src")
|
|
@@ -253,7 +232,7 @@ async def sync(
|
|
|
253
232
|
conf.schema_name,
|
|
254
233
|
validation_logger,
|
|
255
234
|
),
|
|
256
|
-
run_analyze(
|
|
235
|
+
run_analyze(dst_root_no_timeout_pool, dst_logger),
|
|
257
236
|
)
|
|
258
237
|
finally:
|
|
259
238
|
await gather(*[p.close() for p in pools])
|
|
@@ -263,7 +242,6 @@ COMMANDS = [
|
|
|
263
242
|
sync_sequences,
|
|
264
243
|
dump_tables,
|
|
265
244
|
load_tables,
|
|
266
|
-
sync_tables,
|
|
267
245
|
analyze,
|
|
268
246
|
validate_data,
|
|
269
247
|
sync,
|
|
@@ -310,11 +310,14 @@ async def remove_dst_not_valid_constraints(
|
|
|
310
310
|
if (config.tables and table in config.tables) or not config.tables:
|
|
311
311
|
queries = queries + f"ALTER TABLE {table} DROP CONSTRAINT {constraint};"
|
|
312
312
|
|
|
313
|
-
|
|
313
|
+
if queries != "":
|
|
314
|
+
command = ["psql", config.dst.owner_dsn, "-c", f"'{queries}'"]
|
|
314
315
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
316
|
+
await _execute_subprocess(
|
|
317
|
+
command, "Finished removing NOT VALID constraints from the target.", logger
|
|
318
|
+
)
|
|
319
|
+
else:
|
|
320
|
+
logger.info("No NOT VALID detected for removal.")
|
|
318
321
|
|
|
319
322
|
|
|
320
323
|
async def apply_target_constraints(config: DbupgradeConfig, logger: Logger) -> None:
|
|
@@ -336,53 +339,6 @@ async def apply_target_constraints(config: DbupgradeConfig, logger: Logger) -> N
|
|
|
336
339
|
)
|
|
337
340
|
|
|
338
341
|
|
|
339
|
-
async def dump_dst_create_index(config: DbupgradeConfig, logger: Logger) -> None:
|
|
340
|
-
"""
|
|
341
|
-
Dump CREATE INDEX statements from the target database.
|
|
342
|
-
Used when schema is loaded in outside of pgbelt.
|
|
343
|
-
"""
|
|
344
|
-
|
|
345
|
-
logger.info("Dumping target CREATE INDEX statements...")
|
|
346
|
-
|
|
347
|
-
command = [
|
|
348
|
-
"pg_dump",
|
|
349
|
-
"--schema-only",
|
|
350
|
-
"--no-owner",
|
|
351
|
-
"-n",
|
|
352
|
-
config.schema_name,
|
|
353
|
-
config.dst.pglogical_dsn,
|
|
354
|
-
]
|
|
355
|
-
|
|
356
|
-
out = await _execute_subprocess(command, "Retrieved target schema", logger)
|
|
357
|
-
|
|
358
|
-
# No username replacement needs to be done, so replace dst user with the same.
|
|
359
|
-
commands_raw = _parse_dump_commands(
|
|
360
|
-
out.decode("utf-8"), config.dst.owner_user.name, config.dst.owner_user.name
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
commands = []
|
|
364
|
-
for c in commands_raw:
|
|
365
|
-
if "CREATE" in command and "INDEX" in command:
|
|
366
|
-
regex_matches = search(
|
|
367
|
-
r"CREATE [UNIQUE ]*INDEX (?P<index>[a-zA-Z0-9._]+)+.*",
|
|
368
|
-
c,
|
|
369
|
-
)
|
|
370
|
-
if not regex_matches:
|
|
371
|
-
continue
|
|
372
|
-
commands.append(c)
|
|
373
|
-
|
|
374
|
-
try:
|
|
375
|
-
await makedirs(schema_dir(config.db, config.dc))
|
|
376
|
-
except FileExistsError:
|
|
377
|
-
pass
|
|
378
|
-
|
|
379
|
-
async with aopen(schema_file(config.db, config.dc, ONLY_INDEXES), "w") as out:
|
|
380
|
-
for command in commands:
|
|
381
|
-
await out.write(command)
|
|
382
|
-
|
|
383
|
-
logger.debug("Finished dumping CREATE INDEX statements from the target.")
|
|
384
|
-
|
|
385
|
-
|
|
386
342
|
async def remove_dst_indexes(config: DbupgradeConfig, logger: Logger) -> None:
|
|
387
343
|
"""
|
|
388
344
|
Remove the INDEXes from the schema of the target database.
|
|
@@ -395,7 +351,6 @@ async def remove_dst_indexes(config: DbupgradeConfig, logger: Logger) -> None:
|
|
|
395
351
|
|
|
396
352
|
logger.info("Removing Indexes from the target...")
|
|
397
353
|
|
|
398
|
-
queries = ""
|
|
399
354
|
for c in create_index_statements.split(";"):
|
|
400
355
|
regex_matches = search(
|
|
401
356
|
r"CREATE [UNIQUE ]*INDEX (?P<index>[a-zA-Z0-9._]+)+.*",
|
|
@@ -404,14 +359,20 @@ async def remove_dst_indexes(config: DbupgradeConfig, logger: Logger) -> None:
|
|
|
404
359
|
if not regex_matches:
|
|
405
360
|
continue
|
|
406
361
|
index = regex_matches.groupdict()["index"]
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
362
|
+
if config.schema_name:
|
|
363
|
+
index = f"{config.schema_name}.{index}"
|
|
364
|
+
|
|
365
|
+
# DROP the index
|
|
366
|
+
# Note that the host DSN must have a statement timeout of 0.
|
|
367
|
+
# Example DSN: `host=server-hostname user=user dbname=db_name options='-c statement_timeout=3600000'`
|
|
368
|
+
host_dsn = config.dst.owner_dsn + " options='-c statement_timeout=0'"
|
|
369
|
+
|
|
370
|
+
# DROP INDEX IF EXISTS so no need to catch exceptions
|
|
371
|
+
command = ["psql", host_dsn, "-c", f"DROP INDEX IF EXISTS {index};"]
|
|
372
|
+
logger.info(f"Dropping index {index} on the target...")
|
|
373
|
+
await _execute_subprocess(
|
|
374
|
+
command, f"Finished dropping index {index} on the target.", logger
|
|
375
|
+
)
|
|
415
376
|
|
|
416
377
|
|
|
417
378
|
async def create_target_indexes(
|
|
@@ -448,7 +409,10 @@ async def create_target_indexes(
|
|
|
448
409
|
index = regex_matches.groupdict()["index"]
|
|
449
410
|
|
|
450
411
|
# Create the index
|
|
451
|
-
|
|
412
|
+
# Note that the host DSN must have a statement timeout of 0.
|
|
413
|
+
# Example DSN: `host=server-hostname user=user dbname=db_name options='-c statement_timeout=3600000'`
|
|
414
|
+
host_dsn = config.dst.owner_dsn + " options='-c statement_timeout=0'"
|
|
415
|
+
command = ["psql", host_dsn, "-c", f"{c};"]
|
|
452
416
|
logger.info(f"Creating index {index} on the target...")
|
|
453
417
|
try:
|
|
454
418
|
await _execute_subprocess(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "pgbelt"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.8.1"
|
|
4
4
|
description = "A CLI tool used to manage Postgres data migrations from beginning to end, for a single database or a fleet, leveraging pglogical replication."
|
|
5
5
|
authors = ["Varjitt Jeeva <varjitt.jeeva@autodesk.com>"]
|
|
6
6
|
readme = "README.md"
|
|
@@ -12,20 +12,20 @@ packages = [
|
|
|
12
12
|
[tool.poetry.dependencies]
|
|
13
13
|
python = ">=3.9,<4.0"
|
|
14
14
|
aiofiles = ">=0.8,<24.2"
|
|
15
|
-
asyncpg = ">=0.27,<0.
|
|
15
|
+
asyncpg = ">=0.27,<0.31"
|
|
16
16
|
pydantic = ">=2.0,<3.0"
|
|
17
17
|
tabulate = "^0.9.0"
|
|
18
|
-
typer = ">=0.9,<0.
|
|
18
|
+
typer = ">=0.9,<0.14"
|
|
19
19
|
|
|
20
20
|
[tool.poetry.dev-dependencies]
|
|
21
|
-
black = "~24.
|
|
22
|
-
pre-commit = "~
|
|
21
|
+
black = "~24.10.0"
|
|
22
|
+
pre-commit = "~4.0.1"
|
|
23
23
|
flake8 = "^7.1.1"
|
|
24
|
-
pytest-cov = "~
|
|
24
|
+
pytest-cov = "~6.0.0"
|
|
25
25
|
pytest = "^8.3.3"
|
|
26
26
|
coverage = {extras = ["toml"], version = "^7.6"}
|
|
27
|
-
safety = "^3.2.
|
|
28
|
-
mypy = "^1.
|
|
27
|
+
safety = "^3.2.11"
|
|
28
|
+
mypy = "^1.13"
|
|
29
29
|
xdoctest = {extras = ["colors"], version = "^1.2.0"}
|
|
30
30
|
flake8-bandit = "~4.1.1"
|
|
31
31
|
flake8-bugbear = ">=21.9.2"
|
|
@@ -33,10 +33,10 @@ flake8-docstrings = "^1.6.0"
|
|
|
33
33
|
flake8-rst-docstrings = "^0.3.0"
|
|
34
34
|
pep8-naming = "^0.14.1"
|
|
35
35
|
darglint = "^1.8.1"
|
|
36
|
-
reorder-python-imports = "^3.
|
|
37
|
-
pre-commit-hooks = "^
|
|
36
|
+
reorder-python-imports = "^3.14.0"
|
|
37
|
+
pre-commit-hooks = "^5.0.0"
|
|
38
38
|
Pygments = "^2.18.0"
|
|
39
|
-
pyupgrade = "^3.
|
|
39
|
+
pyupgrade = "^3.19.0"
|
|
40
40
|
pylint = "^3.3.1"
|
|
41
41
|
pytest-asyncio = "~0.24.0"
|
|
42
42
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|