pgbelt 0.6.2__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pgbelt/cmd/convenience.py +5 -7
- pgbelt/cmd/preflight.py +457 -64
- pgbelt/cmd/setup.py +26 -7
- pgbelt/cmd/status.py +36 -0
- pgbelt/cmd/sync.py +40 -15
- pgbelt/cmd/teardown.py +2 -2
- pgbelt/config/models.py +5 -1
- pgbelt/util/dump.py +9 -5
- pgbelt/util/pglogical.py +27 -14
- pgbelt/util/postgres.py +177 -43
- {pgbelt-0.6.2.dist-info → pgbelt-0.7.1.dist-info}/METADATA +5 -1
- pgbelt-0.7.1.dist-info/RECORD +27 -0
- {pgbelt-0.6.2.dist-info → pgbelt-0.7.1.dist-info}/WHEEL +1 -1
- pgbelt-0.6.2.dist-info/RECORD +0 -27
- {pgbelt-0.6.2.dist-info → pgbelt-0.7.1.dist-info}/LICENSE +0 -0
- {pgbelt-0.6.2.dist-info → pgbelt-0.7.1.dist-info}/entry_points.txt +0 -0
pgbelt/util/postgres.py
CHANGED
|
@@ -6,25 +6,44 @@ from asyncpg.exceptions import UndefinedObjectError
|
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
async def dump_sequences(
|
|
9
|
-
pool: Pool, targeted_sequences: list[str], logger: Logger
|
|
9
|
+
pool: Pool, targeted_sequences: list[str], schema: str, logger: Logger
|
|
10
10
|
) -> dict[str, int]:
|
|
11
11
|
"""
|
|
12
12
|
return a dictionary of sequence names mapped to their last values
|
|
13
13
|
"""
|
|
14
14
|
logger.info("Dumping sequence values...")
|
|
15
|
-
|
|
15
|
+
# Get all sequences in the schema
|
|
16
|
+
seqs = await pool.fetch(
|
|
17
|
+
f"""
|
|
18
|
+
SELECT '{schema}' || '.' || sequence_name
|
|
19
|
+
FROM information_schema.sequences
|
|
20
|
+
WHERE sequence_schema = '{schema}';
|
|
21
|
+
"""
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Note: When in an exodus migration with a non-public schema, the sequence names must be prefixed with the schema name.
|
|
25
|
+
# This may not be done by the user, so we must do it here.
|
|
26
|
+
proper_sequence_names = None
|
|
27
|
+
if targeted_sequences is not None:
|
|
28
|
+
proper_sequence_names = []
|
|
29
|
+
for seq in targeted_sequences:
|
|
30
|
+
if f"{schema}." not in seq:
|
|
31
|
+
proper_sequence_names.append(f"{schema}.{seq}")
|
|
32
|
+
else:
|
|
33
|
+
proper_sequence_names.append(seq)
|
|
34
|
+
targeted_sequences = proper_sequence_names
|
|
16
35
|
|
|
17
36
|
seq_vals = {}
|
|
37
|
+
final_seqs = []
|
|
38
|
+
# If we get a list of targeted sequences, we only want to dump whichever of those are found in the database and schema.
|
|
18
39
|
if targeted_sequences:
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
f"SELECT last_value FROM {seq};"
|
|
27
|
-
)
|
|
40
|
+
final_seqs = [r[0] for r in seqs if r[0] in targeted_sequences]
|
|
41
|
+
else: # Otherwise, we want to dump all sequences found in the schema.
|
|
42
|
+
final_seqs = [r[0] for r in seqs]
|
|
43
|
+
|
|
44
|
+
for seq in final_seqs:
|
|
45
|
+
res = await pool.fetchval(f"SELECT last_value FROM {seq};")
|
|
46
|
+
seq_vals[seq.strip()] = res
|
|
28
47
|
|
|
29
48
|
logger.debug(f"Dumped sequences: {seq_vals}")
|
|
30
49
|
return seq_vals
|
|
@@ -46,15 +65,20 @@ async def load_sequences(pool: Pool, seqs: dict[str, int], logger: Logger) -> No
|
|
|
46
65
|
|
|
47
66
|
|
|
48
67
|
async def compare_data(
|
|
49
|
-
src_pool: Pool,
|
|
68
|
+
src_pool: Pool,
|
|
69
|
+
dst_pool: Pool,
|
|
70
|
+
query: str,
|
|
71
|
+
tables: list[str],
|
|
72
|
+
schema: str,
|
|
73
|
+
logger: Logger,
|
|
50
74
|
) -> None:
|
|
51
75
|
"""
|
|
52
76
|
Validate data between source and destination databases by doing the following:
|
|
53
|
-
1. Get all tables with primary keys
|
|
77
|
+
1. Get all tables with primary keys (from the source)
|
|
54
78
|
2. For each of those tables, select * limit 100
|
|
55
79
|
3. For each row, ensure the row in the destination is identical
|
|
56
80
|
"""
|
|
57
|
-
pkeys, _, pkeys_raw = await analyze_table_pkeys(src_pool, logger)
|
|
81
|
+
pkeys, _, pkeys_raw = await analyze_table_pkeys(src_pool, schema, logger)
|
|
58
82
|
|
|
59
83
|
pkeys_dict = {}
|
|
60
84
|
# {
|
|
@@ -77,22 +101,23 @@ async def compare_data(
|
|
|
77
101
|
# If specific table list is defined and iterated table is not in that list, skip.
|
|
78
102
|
if tables and (table not in tables):
|
|
79
103
|
continue
|
|
104
|
+
full_table_name = f"{schema}.{table}"
|
|
80
105
|
|
|
81
|
-
logger.debug(f"Validating table {
|
|
106
|
+
logger.debug(f"Validating table {full_table_name}...")
|
|
82
107
|
order_by_pkeys = ",".join(pkeys_dict[table])
|
|
83
108
|
|
|
84
109
|
src_rows = await src_pool.fetch(
|
|
85
|
-
query.format(table=
|
|
110
|
+
query.format(table=full_table_name, order_by_pkeys=order_by_pkeys)
|
|
86
111
|
)
|
|
87
112
|
|
|
88
113
|
# There is a chance tables are empty...
|
|
89
114
|
if len(src_rows) == 0:
|
|
90
115
|
dst_rows = await dst_pool.fetch(
|
|
91
|
-
query.format(table=
|
|
116
|
+
query.format(table=full_table_name, order_by_pkeys=order_by_pkeys)
|
|
92
117
|
)
|
|
93
118
|
if len(dst_rows) != 0:
|
|
94
119
|
raise AssertionError(
|
|
95
|
-
f"Table {
|
|
120
|
+
f"Table {full_table_name} has 0 rows in source but nonzero rows in target... Big problem. Please investigate."
|
|
96
121
|
)
|
|
97
122
|
else:
|
|
98
123
|
continue
|
|
@@ -114,7 +139,7 @@ async def compare_data(
|
|
|
114
139
|
src_pkeys_string = src_pkeys_string[:-1]
|
|
115
140
|
pkey_vals_dict[pkey] = src_pkeys_string
|
|
116
141
|
|
|
117
|
-
dst_query = f"SELECT * FROM {
|
|
142
|
+
dst_query = f"SELECT * FROM {full_table_name} WHERE "
|
|
118
143
|
|
|
119
144
|
for k, v in pkey_vals_dict.items():
|
|
120
145
|
dst_query = dst_query + f"{k} IN ({v}) AND "
|
|
@@ -131,7 +156,7 @@ async def compare_data(
|
|
|
131
156
|
|
|
132
157
|
if len(src_rows) != len(dst_rows):
|
|
133
158
|
raise AssertionError(
|
|
134
|
-
f'Row count of the sample taken from table "{
|
|
159
|
+
f'Row count of the sample taken from table "{full_table_name}" '
|
|
135
160
|
"does not match in source and destination!\n"
|
|
136
161
|
f"Query: {dst_query}"
|
|
137
162
|
)
|
|
@@ -141,7 +166,7 @@ async def compare_data(
|
|
|
141
166
|
if src_row != dst_row:
|
|
142
167
|
raise AssertionError(
|
|
143
168
|
"Row match failure between source and destination.\n"
|
|
144
|
-
f"Table: {
|
|
169
|
+
f"Table: {full_table_name}\n"
|
|
145
170
|
f"Source Row: {src_row}\n"
|
|
146
171
|
f"Dest Row: {dst_row}"
|
|
147
172
|
)
|
|
@@ -154,7 +179,7 @@ async def compare_data(
|
|
|
154
179
|
|
|
155
180
|
|
|
156
181
|
async def compare_100_rows(
|
|
157
|
-
src_pool: Pool, dst_pool: Pool, tables: list[str], logger: Logger
|
|
182
|
+
src_pool: Pool, dst_pool: Pool, tables: list[str], schema: str, logger: Logger
|
|
158
183
|
) -> None:
|
|
159
184
|
"""
|
|
160
185
|
Validate data between source and destination databases by doing the following:
|
|
@@ -174,11 +199,11 @@ async def compare_100_rows(
|
|
|
174
199
|
ORDER BY {order_by_pkeys};
|
|
175
200
|
"""
|
|
176
201
|
|
|
177
|
-
await compare_data(src_pool, dst_pool, query, tables, logger)
|
|
202
|
+
await compare_data(src_pool, dst_pool, query, tables, schema, logger)
|
|
178
203
|
|
|
179
204
|
|
|
180
205
|
async def compare_latest_100_rows(
|
|
181
|
-
src_pool: Pool, dst_pool: Pool, tables: list[str], logger: Logger
|
|
206
|
+
src_pool: Pool, dst_pool: Pool, tables: list[str], schema: str, logger: Logger
|
|
182
207
|
) -> None:
|
|
183
208
|
"""
|
|
184
209
|
Validate data between source and destination databases by comparing the latest row:
|
|
@@ -195,30 +220,31 @@ async def compare_latest_100_rows(
|
|
|
195
220
|
LIMIT 100;
|
|
196
221
|
"""
|
|
197
222
|
|
|
198
|
-
await compare_data(src_pool, dst_pool, query, tables, logger)
|
|
223
|
+
await compare_data(src_pool, dst_pool, query, tables, schema, logger)
|
|
199
224
|
|
|
200
225
|
|
|
201
|
-
async def table_empty(pool: Pool, table: str, logger: Logger) -> bool:
|
|
226
|
+
async def table_empty(pool: Pool, table: str, schema: str, logger: Logger) -> bool:
|
|
202
227
|
"""
|
|
203
228
|
return true if the table is empty
|
|
204
229
|
"""
|
|
205
230
|
logger.info(f"Checking if table {table} is empty...")
|
|
206
|
-
result = await pool.fetch(f"SELECT * FROM {table} LIMIT 1;")
|
|
231
|
+
result = await pool.fetch(f"SELECT * FROM {schema}.{table} LIMIT 1;")
|
|
207
232
|
return len(result) == 0
|
|
208
233
|
|
|
209
234
|
|
|
210
235
|
async def analyze_table_pkeys(
|
|
211
|
-
pool: Pool, logger: Logger
|
|
236
|
+
pool: Pool, schema: str, logger: Logger
|
|
212
237
|
) -> tuple[list[str], list[str], Record]:
|
|
213
238
|
"""
|
|
214
|
-
|
|
215
|
-
with pkeys in
|
|
216
|
-
The third list is the raw rows of the
|
|
217
|
-
|
|
239
|
+
Return three lists of table names. the first element is all tables
|
|
240
|
+
with pkeys in the config's named schema and the second is all tables
|
|
241
|
+
without pkeys in that schema. The third list is the raw rows of the
|
|
242
|
+
primary key query with the table name, constraint name, position and
|
|
243
|
+
column name for the primary key.
|
|
218
244
|
"""
|
|
219
245
|
logger.info("Checking table primary keys...")
|
|
220
246
|
pkeys_raw = await pool.fetch(
|
|
221
|
-
"""
|
|
247
|
+
f"""
|
|
222
248
|
SELECT kcu.table_name,
|
|
223
249
|
tco.constraint_name,
|
|
224
250
|
kcu.ordinal_position as position,
|
|
@@ -229,7 +255,7 @@ async def analyze_table_pkeys(
|
|
|
229
255
|
AND kcu.constraint_schema = tco.constraint_schema
|
|
230
256
|
AND kcu.constraint_name = tco.constraint_name
|
|
231
257
|
WHERE tco.constraint_type = 'PRIMARY KEY'
|
|
232
|
-
AND kcu.table_schema = '
|
|
258
|
+
AND kcu.table_schema = '{schema}'
|
|
233
259
|
ORDER BY kcu.table_name,
|
|
234
260
|
position;
|
|
235
261
|
"""
|
|
@@ -237,11 +263,11 @@ async def analyze_table_pkeys(
|
|
|
237
263
|
pkeys = [r[0] for r in pkeys_raw]
|
|
238
264
|
|
|
239
265
|
all_tables = await pool.fetch(
|
|
240
|
-
"""SELECT table_name
|
|
266
|
+
f"""SELECT table_name
|
|
241
267
|
FROM
|
|
242
268
|
information_schema.tables
|
|
243
269
|
WHERE
|
|
244
|
-
table_schema = '
|
|
270
|
+
table_schema = '{schema}'
|
|
245
271
|
AND table_name != 'pg_stat_statements'
|
|
246
272
|
ORDER BY 1;"""
|
|
247
273
|
)
|
|
@@ -294,7 +320,13 @@ async def enable_login_users(pool: Pool, users: list[str], logger: Logger) -> No
|
|
|
294
320
|
|
|
295
321
|
|
|
296
322
|
async def precheck_info(
|
|
297
|
-
pool: Pool,
|
|
323
|
+
pool: Pool,
|
|
324
|
+
root_name: str,
|
|
325
|
+
owner_name: str,
|
|
326
|
+
target_tables: list[str],
|
|
327
|
+
target_sequences: list[str],
|
|
328
|
+
schema: str,
|
|
329
|
+
logger: Logger,
|
|
298
330
|
) -> dict:
|
|
299
331
|
"""
|
|
300
332
|
Return a dictionary of information about the database used to determine
|
|
@@ -302,7 +334,7 @@ async def precheck_info(
|
|
|
302
334
|
"""
|
|
303
335
|
logger.info("Checking db requirements...")
|
|
304
336
|
result = {
|
|
305
|
-
"server_version": await pool.fetchval("SHOW server_version
|
|
337
|
+
"server_version": await pool.fetchval("SHOW server_version"),
|
|
306
338
|
"max_replication_slots": await pool.fetchval("SHOW max_replication_slots;"),
|
|
307
339
|
"max_worker_processes": await pool.fetchval("SHOW max_worker_processes;"),
|
|
308
340
|
"max_wal_senders": await pool.fetchval("SHOW max_wal_senders;"),
|
|
@@ -311,9 +343,13 @@ async def precheck_info(
|
|
|
311
343
|
),
|
|
312
344
|
"tables": [],
|
|
313
345
|
"sequences": [],
|
|
314
|
-
"users":
|
|
346
|
+
"users": {},
|
|
347
|
+
"extensions": [],
|
|
315
348
|
}
|
|
316
349
|
|
|
350
|
+
# server_version shows 13.14 (Debian 13.14-1.pgdg120+2) in the output. Remove the Debian part.
|
|
351
|
+
result["server_version"] = result["server_version"].split(" ")[0]
|
|
352
|
+
|
|
317
353
|
try:
|
|
318
354
|
result["rds.logical_replication"] = await pool.fetchval(
|
|
319
355
|
"SHOW rds.logical_replication;"
|
|
@@ -333,9 +369,12 @@ async def precheck_info(
|
|
|
333
369
|
AND n.nspname <> 'pg_catalog'
|
|
334
370
|
AND n.nspname !~ '^pg_toast'
|
|
335
371
|
AND n.nspname <> 'information_schema'
|
|
336
|
-
|
|
372
|
+
AND n.nspname <> 'pglogical'
|
|
337
373
|
ORDER BY 1,2;"""
|
|
338
374
|
)
|
|
375
|
+
# We filter the table list if the user has specified a list of tables to target.
|
|
376
|
+
if target_tables:
|
|
377
|
+
result["tables"] = [t for t in result["tables"] if t["Name"] in target_tables]
|
|
339
378
|
|
|
340
379
|
result["sequences"] = await pool.fetch(
|
|
341
380
|
"""
|
|
@@ -349,10 +388,16 @@ async def precheck_info(
|
|
|
349
388
|
AND n.nspname <> 'pg_catalog'
|
|
350
389
|
AND n.nspname !~ '^pg_toast'
|
|
351
390
|
AND n.nspname <> 'information_schema'
|
|
352
|
-
|
|
391
|
+
AND n.nspname <> 'pglogical'
|
|
353
392
|
ORDER BY 1,2;"""
|
|
354
393
|
)
|
|
355
394
|
|
|
395
|
+
# We filter the sequence list if the user has specified a list of sequences to target.
|
|
396
|
+
if target_sequences:
|
|
397
|
+
result["sequences"] = [
|
|
398
|
+
s for s in result["sequences"] if s["Name"] in target_sequences
|
|
399
|
+
]
|
|
400
|
+
|
|
356
401
|
users = await pool.fetch(
|
|
357
402
|
f"""
|
|
358
403
|
SELECT r.rolname, r.rolsuper, r.rolinherit,
|
|
@@ -364,15 +409,104 @@ async def precheck_info(
|
|
|
364
409
|
WHERE m.member = r.oid) as memberof
|
|
365
410
|
, r.rolreplication
|
|
366
411
|
, r.rolbypassrls
|
|
412
|
+
, has_schema_privilege(r.rolname, '{schema}', 'CREATE') AS can_create
|
|
367
413
|
FROM pg_catalog.pg_roles r
|
|
368
414
|
WHERE r.rolname !~ '^pg_' AND (r.rolname = '{root_name}' OR r.rolname = '{owner_name}')
|
|
369
415
|
ORDER BY 1;"""
|
|
370
416
|
)
|
|
371
417
|
|
|
418
|
+
# We only care about the root and owner users.
|
|
372
419
|
for u in users:
|
|
373
420
|
if u[0] == root_name:
|
|
374
|
-
result["root"] = u
|
|
421
|
+
result["users"]["root"] = u
|
|
375
422
|
if u[0] == owner_name:
|
|
376
|
-
result["owner"] = u
|
|
423
|
+
result["users"]["owner"] = u
|
|
424
|
+
|
|
425
|
+
result["extensions"] = await pool.fetch(
|
|
426
|
+
"""
|
|
427
|
+
SELECT extname
|
|
428
|
+
FROM pg_extension
|
|
429
|
+
ORDER BY extname;
|
|
430
|
+
"""
|
|
431
|
+
)
|
|
377
432
|
|
|
378
433
|
return result
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
# TODO: Need to add schema here when working on non-public schema support.
|
|
437
|
+
async def get_dataset_size(
|
|
438
|
+
tables: list[str], schema: str, pool: Pool, logger: Logger
|
|
439
|
+
) -> str:
|
|
440
|
+
"""
|
|
441
|
+
Get the total disk size of a dataset (via list of tables).
|
|
442
|
+
|
|
443
|
+
This function ALWAYS expects a list of tables. If not, the calling function should handle that.
|
|
444
|
+
"""
|
|
445
|
+
logger.info("Getting the targeted dataset size...")
|
|
446
|
+
|
|
447
|
+
# Tables string must be of form "'table1', 'table2', ..."
|
|
448
|
+
tables_string = ", ".join([f"'{t}'" for t in tables])
|
|
449
|
+
|
|
450
|
+
query = f"""
|
|
451
|
+
SELECT
|
|
452
|
+
sum(pg_total_relation_size(schemaname || '.' || tablename)) AS total_relation_size
|
|
453
|
+
FROM
|
|
454
|
+
pg_tables
|
|
455
|
+
WHERE
|
|
456
|
+
schemaname = '{schema}'
|
|
457
|
+
AND tablename IN ({tables_string});
|
|
458
|
+
"""
|
|
459
|
+
|
|
460
|
+
# Yes it's a duplicate, but it's a pretty one. Rather let Postgres do this than Python.
|
|
461
|
+
pretty_query = f"""
|
|
462
|
+
SELECT
|
|
463
|
+
pg_size_pretty(sum(pg_total_relation_size(schemaname || '.' || tablename))) AS total_relation_size
|
|
464
|
+
FROM
|
|
465
|
+
pg_tables
|
|
466
|
+
WHERE
|
|
467
|
+
schemaname = '{schema}'
|
|
468
|
+
AND tablename IN ({tables_string});
|
|
469
|
+
"""
|
|
470
|
+
|
|
471
|
+
result = {
|
|
472
|
+
"db_size": await pool.fetchval(query),
|
|
473
|
+
"db_size_pretty": await pool.fetchval(pretty_query),
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
return result
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
async def initialization_progress(
|
|
480
|
+
tables: list[str],
|
|
481
|
+
src_schema: str,
|
|
482
|
+
dst_schema: str,
|
|
483
|
+
src_pool: Pool,
|
|
484
|
+
dst_pool: Pool,
|
|
485
|
+
src_logger: Logger,
|
|
486
|
+
dst_logger: Logger,
|
|
487
|
+
) -> dict[str, str]:
|
|
488
|
+
"""
|
|
489
|
+
Get the size progress of the initialization stage
|
|
490
|
+
"""
|
|
491
|
+
|
|
492
|
+
src_dataset_size = await get_dataset_size(tables, src_schema, src_pool, src_logger)
|
|
493
|
+
dst_dataset_size = await get_dataset_size(tables, dst_schema, dst_pool, dst_logger)
|
|
494
|
+
|
|
495
|
+
# Eliminate None values
|
|
496
|
+
if src_dataset_size["db_size"] is None:
|
|
497
|
+
src_dataset_size["db_size"] = 0
|
|
498
|
+
if dst_dataset_size["db_size"] is None:
|
|
499
|
+
dst_dataset_size["db_size"] = 0
|
|
500
|
+
|
|
501
|
+
# Eliminate division by zero
|
|
502
|
+
if src_dataset_size["db_size"] == 0 and dst_dataset_size["db_size"] == 0:
|
|
503
|
+
progress = "0 %"
|
|
504
|
+
else:
|
|
505
|
+
progress = f"{str(round(int(dst_dataset_size['db_size'])/int(src_dataset_size['db_size'])*100 ,1))} %"
|
|
506
|
+
|
|
507
|
+
status = {
|
|
508
|
+
"src_dataset_size": src_dataset_size["db_size_pretty"] or "0 bytes",
|
|
509
|
+
"dst_dataset_size": dst_dataset_size["db_size_pretty"] or "0 bytes",
|
|
510
|
+
"progress": progress,
|
|
511
|
+
}
|
|
512
|
+
return status
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pgbelt
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.7.1
|
|
4
4
|
Summary: A CLI tool used to manage Postgres data migrations from beginning to end, for a single database or a fleet, leveraging pglogical replication.
|
|
5
5
|
Author: Varjitt Jeeva
|
|
6
6
|
Author-email: varjitt.jeeva@autodesk.com
|
|
@@ -65,6 +65,10 @@ Install pgbelt locally:
|
|
|
65
65
|
|
|
66
66
|
See [this doc](docs/quickstart.md)!
|
|
67
67
|
|
|
68
|
+
## Playbook
|
|
69
|
+
|
|
70
|
+
This playbook gets updated actively. If you have any issues, solutions could be found in [this playbook](docs/playbook.md).
|
|
71
|
+
|
|
68
72
|
## Contributing
|
|
69
73
|
|
|
70
74
|
We welcome contributions! See [this doc](CONTRIBUTING.md) on how to do so, including setting up your local development environment.
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
pgbelt/__init__.py,sha256=0FxxDo2hLiQpxjY5NS48g2XC_nRMKPJf5X0aGIxmMo4,137
|
|
2
|
+
pgbelt/cmd/__init__.py,sha256=28SxbIChyvJ1mLoI8XJQPrPPwKA6852V9jtnFm8R0Nw,462
|
|
3
|
+
pgbelt/cmd/convenience.py,sha256=RY762BYoyOccmhHqSOaHWIPVBzY4kU0qZautTprGps0,5857
|
|
4
|
+
pgbelt/cmd/helpers.py,sha256=anMAkZjJRy7n0EwewyMNJGJ4Jx9keSNZdzWqi3ICNgI,5292
|
|
5
|
+
pgbelt/cmd/login.py,sha256=kyDT755YsScg3QA4ZlkyIq3J0WO4XqMepMchufjuaDE,3043
|
|
6
|
+
pgbelt/cmd/preflight.py,sha256=-78Puqqf1rCxNQEyn4bQIAORez_tsy6zJAbSuO_de9s,20676
|
|
7
|
+
pgbelt/cmd/schema.py,sha256=XAAj2BH6HVoA1LxuWw2kheesGDu41L3GAd5UahNp3nE,4760
|
|
8
|
+
pgbelt/cmd/setup.py,sha256=CYaRGQG_ooz0BpP5g1YVmPvt8zcT4DSIdQM6nASr9Qc,5908
|
|
9
|
+
pgbelt/cmd/status.py,sha256=LTvteFwA1jcfnGJdlA2siJn5TUtlf14ffMkGVSi5gi0,4730
|
|
10
|
+
pgbelt/cmd/sync.py,sha256=8TSNbn1dpkyxlEMImY7FjOrzhriSXlorqE-HBjqbUfU,9027
|
|
11
|
+
pgbelt/cmd/teardown.py,sha256=TTSmhmD1bqJ8-P5YyK5ZpCsmneczO0nvd2Q2s2oyXY4,3526
|
|
12
|
+
pgbelt/config/__init__.py,sha256=SXok1aZcpMYJpX_hk5cuKO33CJ5s8IESkswNN9KsVSo,35
|
|
13
|
+
pgbelt/config/config.py,sha256=Kw2H-G1Evfj0TXIbh3k06gE72dZEp_wXWJ2Icq_T54c,3817
|
|
14
|
+
pgbelt/config/models.py,sha256=4qm23kdNNZiQOZrejD1yXP6h0Z38LNFSyI4d0i-IdUs,5830
|
|
15
|
+
pgbelt/config/remote.py,sha256=RQ_dfL5g2ChVP6jeGWnpQMVKQZK1o2m9-ZPYBMaaGj4,5297
|
|
16
|
+
pgbelt/main.py,sha256=YiagBiGt8pbNlukkRxROXnQX1Tx6ax7c6riuHRCrPYU,186
|
|
17
|
+
pgbelt/util/__init__.py,sha256=-6KkvVMz-yGNQfeoo4CZZrgWKXYmFd4CMyoiao8OnFE,40
|
|
18
|
+
pgbelt/util/asyncfuncs.py,sha256=7i_GpBmUNNZ8RUGvU-q5nclsoaCm6Lx8jLP8usYvmZc,583
|
|
19
|
+
pgbelt/util/dump.py,sha256=AwyOAd9CP014gvsl-qlo1lbnXZfxoeN4ujZWUIq7KM8,14715
|
|
20
|
+
pgbelt/util/logs.py,sha256=l2jT-WKZ-33eNDw4S4W1_eE4ISo4rtDRXYLVf4QTV4Y,1699
|
|
21
|
+
pgbelt/util/pglogical.py,sha256=r4I95CG4eRFsEPnN30P4yxt8HeGMzKBTEm3fecfw4ws,13611
|
|
22
|
+
pgbelt/util/postgres.py,sha256=909eSLhnhAkGe_4IVt-j72I3wXHArT5V_YikJnl6gdc,18178
|
|
23
|
+
pgbelt-0.7.1.dist-info/LICENSE,sha256=FQ5cFkW02dKK3LmKH8z-rwn93tWSCh7lsxfNUiWcFsg,10758
|
|
24
|
+
pgbelt-0.7.1.dist-info/METADATA,sha256=uXZgpAXeisAYU3M89TVK_w18R3VaEKteXZVQgQB_Kg8,2964
|
|
25
|
+
pgbelt-0.7.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
26
|
+
pgbelt-0.7.1.dist-info/entry_points.txt,sha256=SCz_poPjkaVnWpJ-CeytAnDzbVc6l0WalOwitIqW_3g,40
|
|
27
|
+
pgbelt-0.7.1.dist-info/RECORD,,
|
pgbelt-0.6.2.dist-info/RECORD
DELETED
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
pgbelt/__init__.py,sha256=0FxxDo2hLiQpxjY5NS48g2XC_nRMKPJf5X0aGIxmMo4,137
|
|
2
|
-
pgbelt/cmd/__init__.py,sha256=28SxbIChyvJ1mLoI8XJQPrPPwKA6852V9jtnFm8R0Nw,462
|
|
3
|
-
pgbelt/cmd/convenience.py,sha256=xeweZUYPKarueYZQBD59meqXzAJ16uaC72pvEgua1Ec,5849
|
|
4
|
-
pgbelt/cmd/helpers.py,sha256=anMAkZjJRy7n0EwewyMNJGJ4Jx9keSNZdzWqi3ICNgI,5292
|
|
5
|
-
pgbelt/cmd/login.py,sha256=kyDT755YsScg3QA4ZlkyIq3J0WO4XqMepMchufjuaDE,3043
|
|
6
|
-
pgbelt/cmd/preflight.py,sha256=CwP74UPpmnZ3RqJ1zFeVj3WReoXJd_2xVwWJVfTnuyg,8213
|
|
7
|
-
pgbelt/cmd/schema.py,sha256=XAAj2BH6HVoA1LxuWw2kheesGDu41L3GAd5UahNp3nE,4760
|
|
8
|
-
pgbelt/cmd/setup.py,sha256=v-phLiNQYFportKHvcG0EBfarfv_F2xekeL9P_wuI5c,5252
|
|
9
|
-
pgbelt/cmd/status.py,sha256=r-Zv5QiO7oQQBTewnTXOdl-IslNGE6lAxlgArAYRTWU,3190
|
|
10
|
-
pgbelt/cmd/sync.py,sha256=-ffZOCTWcFXjcpRe7MFfBRdV8tjSh6uoVALvykX85pU,8375
|
|
11
|
-
pgbelt/cmd/teardown.py,sha256=Yps1iejx9amW2b3kmtKjw5-ySrk_qK3LfaH6EOM7NHE,3490
|
|
12
|
-
pgbelt/config/__init__.py,sha256=SXok1aZcpMYJpX_hk5cuKO33CJ5s8IESkswNN9KsVSo,35
|
|
13
|
-
pgbelt/config/config.py,sha256=Kw2H-G1Evfj0TXIbh3k06gE72dZEp_wXWJ2Icq_T54c,3817
|
|
14
|
-
pgbelt/config/models.py,sha256=XrTZLps-jQvOaOdf9BWV5cx6xqHFRD7TnxtQMpohR1c,5432
|
|
15
|
-
pgbelt/config/remote.py,sha256=RQ_dfL5g2ChVP6jeGWnpQMVKQZK1o2m9-ZPYBMaaGj4,5297
|
|
16
|
-
pgbelt/main.py,sha256=YiagBiGt8pbNlukkRxROXnQX1Tx6ax7c6riuHRCrPYU,186
|
|
17
|
-
pgbelt/util/__init__.py,sha256=-6KkvVMz-yGNQfeoo4CZZrgWKXYmFd4CMyoiao8OnFE,40
|
|
18
|
-
pgbelt/util/asyncfuncs.py,sha256=7i_GpBmUNNZ8RUGvU-q5nclsoaCm6Lx8jLP8usYvmZc,583
|
|
19
|
-
pgbelt/util/dump.py,sha256=p6M26s3FXnSCTmee4v7X0-azYvmcVz6DkZf8J8YxU0s,14357
|
|
20
|
-
pgbelt/util/logs.py,sha256=l2jT-WKZ-33eNDw4S4W1_eE4ISo4rtDRXYLVf4QTV4Y,1699
|
|
21
|
-
pgbelt/util/pglogical.py,sha256=URi4L_4PWYIz0iWw7HcXNfjzvWXPjQCm71cAOqnt3Qk,13059
|
|
22
|
-
pgbelt/util/postgres.py,sha256=9AgnmHdYpWVypyBNTEQEpd4COoa1JEQXoELOR8-HrzQ,13668
|
|
23
|
-
pgbelt-0.6.2.dist-info/LICENSE,sha256=FQ5cFkW02dKK3LmKH8z-rwn93tWSCh7lsxfNUiWcFsg,10758
|
|
24
|
-
pgbelt-0.6.2.dist-info/METADATA,sha256=7bEElDGURHPEE3cVWbF19aPGhAXNQ0_YxPRpdZK_Fc8,2826
|
|
25
|
-
pgbelt-0.6.2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
26
|
-
pgbelt-0.6.2.dist-info/entry_points.txt,sha256=SCz_poPjkaVnWpJ-CeytAnDzbVc6l0WalOwitIqW_3g,40
|
|
27
|
-
pgbelt-0.6.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|