starrocks-br 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
starrocks_br/cli.py CHANGED
@@ -1,23 +1,34 @@
1
- import click
2
1
  import os
3
2
  import sys
4
- from datetime import datetime
3
+
4
+ import click
5
+
6
+ from . import (
7
+ concurrency,
8
+ db,
9
+ executor,
10
+ health,
11
+ labels,
12
+ logger,
13
+ planner,
14
+ repository,
15
+ restore,
16
+ schema,
17
+ )
5
18
  from . import config as config_module
6
- from . import db
7
- from . import health
8
- from . import repository
9
- from . import concurrency
10
- from . import planner
11
- from . import labels
12
- from . import executor
13
- from . import restore
14
- from . import schema
15
- from . import logger
16
-
17
-
18
- def _handle_snapshot_exists_error(error_details: dict, label: str, config: str, repository: str, backup_type: str, group: str, baseline_backup: str = None) -> None:
19
+
20
+
21
+ def _handle_snapshot_exists_error(
22
+ error_details: dict,
23
+ label: str,
24
+ config: str,
25
+ repository: str,
26
+ backup_type: str,
27
+ group: str,
28
+ baseline_backup: str = None,
29
+ ) -> None:
19
30
  """Handle snapshot_exists error by providing helpful guidance to the user.
20
-
31
+
21
32
  Args:
22
33
  error_details: Error details dict containing error_type and snapshot_name
23
34
  label: The backup label that was generated
@@ -27,7 +38,7 @@ def _handle_snapshot_exists_error(error_details: dict, label: str, config: str,
27
38
  group: Inventory group name
28
39
  baseline_backup: Optional baseline backup label (for incremental backups)
29
40
  """
30
- snapshot_name = error_details.get('snapshot_name', label)
41
+ snapshot_name = error_details.get("snapshot_name", label)
31
42
  logger.error(f"Snapshot '{snapshot_name}' already exists in the repository.")
32
43
  logger.info("")
33
44
  logger.info("This typically happens when:")
@@ -35,15 +46,17 @@ def _handle_snapshot_exists_error(error_details: dict, label: str, config: str,
35
46
  logger.info(" • The backup completed on the server, but backup_history wasn't updated")
36
47
  logger.info("")
37
48
  logger.info("To resolve this, retry the backup with a custom label using --name:")
38
-
39
- if backup_type == 'incremental':
49
+
50
+ if backup_type == "incremental":
40
51
  retry_cmd = f" starrocks-br backup incremental --config {config} --group {group} --name {snapshot_name}_retry"
41
52
  if baseline_backup:
42
53
  retry_cmd += f" --baseline-backup {baseline_backup}"
43
54
  logger.info(retry_cmd)
44
55
  else:
45
- logger.info(f" starrocks-br backup full --config {config} --group {group} --name {snapshot_name}_retry")
46
-
56
+ logger.info(
57
+ f" starrocks-br backup full --config {config} --group {group} --name {snapshot_name}_retry"
58
+ )
59
+
47
60
  logger.info("")
48
61
  logger.tip("You can verify the existing backup by checking the repository or running:")
49
62
  logger.tip(f" SHOW SNAPSHOT ON {repository} WHERE Snapshot = '{snapshot_name}'")
@@ -55,32 +68,32 @@ def cli():
55
68
  pass
56
69
 
57
70
 
58
- @cli.command('init')
59
- @click.option('--config', required=True, help='Path to config YAML file')
71
+ @cli.command("init")
72
+ @click.option("--config", required=True, help="Path to config YAML file")
60
73
  def init(config):
61
74
  """Initialize ops database and control tables.
62
-
75
+
63
76
  Creates the ops database with required tables:
64
77
  - ops.table_inventory: Inventory groups mapping to databases/tables
65
78
  - ops.backup_history: Backup operation history
66
79
  - ops.restore_history: Restore operation history
67
80
  - ops.run_status: Job concurrency control
68
-
81
+
69
82
  Run this once before using backup/restore commands.
70
83
  """
71
84
  try:
72
85
  cfg = config_module.load_config(config)
73
86
  config_module.validate_config(cfg)
74
-
87
+
75
88
  database = db.StarRocksDB(
76
- host=cfg['host'],
77
- port=cfg['port'],
78
- user=cfg['user'],
79
- password=os.getenv('STARROCKS_PASSWORD'),
80
- database=cfg['database'],
81
- tls_config=cfg.get('tls'),
89
+ host=cfg["host"],
90
+ port=cfg["port"],
91
+ user=cfg["user"],
92
+ password=os.getenv("STARROCKS_PASSWORD"),
93
+ database=cfg["database"],
94
+ tls_config=cfg.get("tls"),
82
95
  )
83
-
96
+
84
97
  with database:
85
98
  logger.info("Initializing ops schema...")
86
99
  schema.initialize_ops_schema(database)
@@ -95,8 +108,10 @@ def init(config):
95
108
  logger.info(" VALUES ('my_full_dimension_tables', 'your_db', 'dim_products');")
96
109
  logger.info("")
97
110
  logger.info("2. Run your first backup:")
98
- logger.info(" starrocks-br backup incremental --group my_daily_incremental --config config.yaml")
99
-
111
+ logger.info(
112
+ " starrocks-br backup incremental --group my_daily_incremental --config config.yaml"
113
+ )
114
+
100
115
  except FileNotFoundError as e:
101
116
  logger.error(f"Config file not found: {e}")
102
117
  sys.exit(1)
@@ -114,117 +129,139 @@ def backup():
114
129
  pass
115
130
 
116
131
 
117
- @backup.command('incremental')
118
- @click.option('--config', required=True, help='Path to config YAML file')
119
- @click.option('--baseline-backup', help='Specific backup label to use as baseline (optional). If not provided, uses the latest successful full backup.')
120
- @click.option('--group', required=True, help='Inventory group to backup from table_inventory. Supports wildcard \'*\'.')
121
- @click.option('--name', help='Optional logical name (label) for the backup. Supports -v#r placeholder for auto-versioning.')
132
+ @backup.command("incremental")
133
+ @click.option("--config", required=True, help="Path to config YAML file")
134
+ @click.option(
135
+ "--baseline-backup",
136
+ help="Specific backup label to use as baseline (optional). If not provided, uses the latest successful full backup.",
137
+ )
138
+ @click.option(
139
+ "--group",
140
+ required=True,
141
+ help="Inventory group to backup from table_inventory. Supports wildcard '*'.",
142
+ )
143
+ @click.option(
144
+ "--name",
145
+ help="Optional logical name (label) for the backup. Supports -v#r placeholder for auto-versioning.",
146
+ )
122
147
  def backup_incremental(config, baseline_backup, group, name):
123
148
  """Run incremental backup of partitions changed since the latest full backup.
124
-
149
+
125
150
  By default, uses the latest successful full backup as baseline.
126
151
  Optionally specify a specific backup label to use as baseline.
127
-
152
+
128
153
  Flow: load config → check health → ensure repository → reserve job slot →
129
154
  find baseline backup → find recent partitions → generate label → build backup command → execute backup
130
155
  """
131
156
  try:
132
157
  cfg = config_module.load_config(config)
133
158
  config_module.validate_config(cfg)
134
-
159
+
135
160
  database = db.StarRocksDB(
136
- host=cfg['host'],
137
- port=cfg['port'],
138
- user=cfg['user'],
139
- password=os.getenv('STARROCKS_PASSWORD'),
140
- database=cfg['database'],
141
- tls_config=cfg.get('tls'),
161
+ host=cfg["host"],
162
+ port=cfg["port"],
163
+ user=cfg["user"],
164
+ password=os.getenv("STARROCKS_PASSWORD"),
165
+ database=cfg["database"],
166
+ tls_config=cfg.get("tls"),
142
167
  )
143
-
168
+
144
169
  with database:
145
170
  was_created = schema.ensure_ops_schema(database)
146
171
  if was_created:
147
- logger.warning("ops schema was auto-created. Please run 'starrocks-br init' after populating config.")
172
+ logger.warning(
173
+ "ops schema was auto-created. Please run 'starrocks-br init' after populating config."
174
+ )
148
175
  logger.warning("Remember to populate ops.table_inventory with your backup groups!")
149
- sys.exit(1) # Exit if schema was just created, requires user action
150
-
176
+ sys.exit(1) # Exit if schema was just created, requires user action
177
+
151
178
  healthy, message = health.check_cluster_health(database)
152
179
  if not healthy:
153
180
  logger.error(f"Cluster health check failed: {message}")
154
181
  sys.exit(1)
155
-
182
+
156
183
  logger.success(f"Cluster health: {message}")
157
-
158
- repository.ensure_repository(database, cfg['repository'])
159
-
184
+
185
+ repository.ensure_repository(database, cfg["repository"])
186
+
160
187
  logger.success(f"Repository '{cfg['repository']}' verified")
161
-
188
+
162
189
  label = labels.determine_backup_label(
163
190
  db=database,
164
- backup_type='incremental',
165
- database_name=cfg['database'],
166
- custom_name=name
191
+ backup_type="incremental",
192
+ database_name=cfg["database"],
193
+ custom_name=name,
167
194
  )
168
-
195
+
169
196
  logger.success(f"Generated label: {label}")
170
-
197
+
171
198
  if baseline_backup:
172
199
  logger.success(f"Using specified baseline backup: {baseline_backup}")
173
200
  else:
174
- latest_backup = planner.find_latest_full_backup(database, cfg['database'])
201
+ latest_backup = planner.find_latest_full_backup(database, cfg["database"])
175
202
  if latest_backup:
176
- logger.success(f"Using latest full backup as baseline: {latest_backup['label']} ({latest_backup['backup_type']})")
203
+ logger.success(
204
+ f"Using latest full backup as baseline: {latest_backup['label']} ({latest_backup['backup_type']})"
205
+ )
177
206
  else:
178
- logger.warning("No full backup found - this will be the first incremental backup")
179
-
207
+ logger.warning(
208
+ "No full backup found - this will be the first incremental backup"
209
+ )
210
+
180
211
  partitions = planner.find_recent_partitions(
181
- database, cfg['database'], baseline_backup_label=baseline_backup, group_name=group
212
+ database, cfg["database"], baseline_backup_label=baseline_backup, group_name=group
182
213
  )
183
-
214
+
184
215
  if not partitions:
185
216
  logger.warning("No partitions found to backup")
186
217
  sys.exit(1)
187
-
218
+
188
219
  logger.success(f"Found {len(partitions)} partition(s) to backup")
189
-
220
+
190
221
  backup_command = planner.build_incremental_backup_command(
191
- partitions, cfg['repository'], label, cfg['database']
222
+ partitions, cfg["repository"], label, cfg["database"]
192
223
  )
193
-
224
+
225
+ concurrency.reserve_job_slot(database, scope="backup", label=label)
226
+
194
227
  planner.record_backup_partitions(database, label, partitions)
195
-
196
- concurrency.reserve_job_slot(database, scope='backup', label=label)
197
-
198
- logger.success(f"Job slot reserved")
228
+
229
+ logger.success("Job slot reserved")
199
230
  logger.info(f"Starting incremental backup for group '{group}'...")
200
231
  result = executor.execute_backup(
201
232
  database,
202
233
  backup_command,
203
- repository=cfg['repository'],
204
- backup_type='incremental',
205
- scope='backup',
206
- database=cfg['database']
234
+ repository=cfg["repository"],
235
+ backup_type="incremental",
236
+ scope="backup",
237
+ database=cfg["database"],
207
238
  )
208
-
209
- if result['success']:
239
+
240
+ if result["success"]:
210
241
  logger.success(f"Backup completed successfully: {result['final_status']['state']}")
211
242
  sys.exit(0)
212
243
  else:
213
- error_details = result.get('error_details')
214
- if error_details and error_details.get('error_type') == 'snapshot_exists':
244
+ error_details = result.get("error_details")
245
+ if error_details and error_details.get("error_type") == "snapshot_exists":
215
246
  _handle_snapshot_exists_error(
216
- error_details, label, config, cfg['repository'], 'incremental', group, baseline_backup
247
+ error_details,
248
+ label,
249
+ config,
250
+ cfg["repository"],
251
+ "incremental",
252
+ group,
253
+ baseline_backup,
217
254
  )
218
255
  sys.exit(1)
219
-
220
- state = result.get('final_status', {}).get('state', 'UNKNOWN')
256
+
257
+ state = result.get("final_status", {}).get("state", "UNKNOWN")
221
258
  if state == "LOST":
222
259
  logger.critical("Backup tracking lost!")
223
260
  logger.warning("Another backup operation started during ours.")
224
261
  logger.tip("Enable ops.run_status concurrency checks to prevent this.")
225
262
  logger.error(f"{result['error_message']}")
226
263
  sys.exit(1)
227
-
264
+
228
265
  except FileNotFoundError as e:
229
266
  logger.error(f"Config file not found: {e}")
230
267
  sys.exit(1)
@@ -239,100 +276,111 @@ def backup_incremental(config, baseline_backup, group, name):
239
276
  sys.exit(1)
240
277
 
241
278
 
242
- @backup.command('full')
243
- @click.option('--config', required=True, help='Path to config YAML file')
244
- @click.option('--group', required=True, help='Inventory group to backup from table_inventory. Supports wildcard \'*\'.')
245
- @click.option('--name', help='Optional logical name (label) for the backup. Supports -v#r placeholder for auto-versioning.')
279
+ @backup.command("full")
280
+ @click.option("--config", required=True, help="Path to config YAML file")
281
+ @click.option(
282
+ "--group",
283
+ required=True,
284
+ help="Inventory group to backup from table_inventory. Supports wildcard '*'.",
285
+ )
286
+ @click.option(
287
+ "--name",
288
+ help="Optional logical name (label) for the backup. Supports -v#r placeholder for auto-versioning.",
289
+ )
246
290
  def backup_full(config, group, name):
247
291
  """Run a full backup for a specified inventory group.
248
-
292
+
249
293
  Flow: load config → check health → ensure repository → reserve job slot →
250
294
  find tables by group → generate label → build backup command → execute backup
251
295
  """
252
296
  try:
253
297
  cfg = config_module.load_config(config)
254
298
  config_module.validate_config(cfg)
255
-
299
+
256
300
  database = db.StarRocksDB(
257
- host=cfg['host'],
258
- port=cfg['port'],
259
- user=cfg['user'],
260
- password=os.getenv('STARROCKS_PASSWORD'),
261
- database=cfg['database'],
262
- tls_config=cfg.get('tls'),
301
+ host=cfg["host"],
302
+ port=cfg["port"],
303
+ user=cfg["user"],
304
+ password=os.getenv("STARROCKS_PASSWORD"),
305
+ database=cfg["database"],
306
+ tls_config=cfg.get("tls"),
263
307
  )
264
-
308
+
265
309
  with database:
266
310
  was_created = schema.ensure_ops_schema(database)
267
311
  if was_created:
268
- logger.warning("ops schema was auto-created. Please run 'starrocks-br init' after populating config.")
312
+ logger.warning(
313
+ "ops schema was auto-created. Please run 'starrocks-br init' after populating config."
314
+ )
269
315
  logger.warning("Remember to populate ops.table_inventory with your backup groups!")
270
- sys.exit(1) # Exit if schema was just created, requires user action
271
-
316
+ sys.exit(1) # Exit if schema was just created, requires user action
317
+
272
318
  healthy, message = health.check_cluster_health(database)
273
319
  if not healthy:
274
320
  logger.error(f"Cluster health check failed: {message}")
275
321
  sys.exit(1)
276
-
322
+
277
323
  logger.success(f"Cluster health: {message}")
278
-
279
- repository.ensure_repository(database, cfg['repository'])
280
-
324
+
325
+ repository.ensure_repository(database, cfg["repository"])
326
+
281
327
  logger.success(f"Repository '{cfg['repository']}' verified")
282
-
328
+
283
329
  label = labels.determine_backup_label(
284
- db=database,
285
- backup_type='full',
286
- database_name=cfg['database'],
287
- custom_name=name
330
+ db=database, backup_type="full", database_name=cfg["database"], custom_name=name
288
331
  )
289
-
332
+
290
333
  logger.success(f"Generated label: {label}")
291
-
334
+
292
335
  backup_command = planner.build_full_backup_command(
293
- database, group, cfg['repository'], label, cfg['database']
336
+ database, group, cfg["repository"], label, cfg["database"]
294
337
  )
295
-
338
+
296
339
  if not backup_command:
297
- logger.warning(f"No tables found in group '{group}' for database '{cfg['database']}' to backup")
340
+ logger.warning(
341
+ f"No tables found in group '{group}' for database '{cfg['database']}' to backup"
342
+ )
298
343
  sys.exit(1)
299
-
344
+
300
345
  tables = planner.find_tables_by_group(database, group)
301
- all_partitions = planner.get_all_partitions_for_tables(database, cfg['database'], tables)
346
+ all_partitions = planner.get_all_partitions_for_tables(
347
+ database, cfg["database"], tables
348
+ )
349
+
350
+ concurrency.reserve_job_slot(database, scope="backup", label=label)
351
+
302
352
  planner.record_backup_partitions(database, label, all_partitions)
303
-
304
- concurrency.reserve_job_slot(database, scope='backup', label=label)
305
-
306
- logger.success(f"Job slot reserved")
353
+
354
+ logger.success("Job slot reserved")
307
355
  logger.info(f"Starting full backup for group '{group}'...")
308
356
  result = executor.execute_backup(
309
357
  database,
310
358
  backup_command,
311
- repository=cfg['repository'],
312
- backup_type='full',
313
- scope='backup',
314
- database=cfg['database']
359
+ repository=cfg["repository"],
360
+ backup_type="full",
361
+ scope="backup",
362
+ database=cfg["database"],
315
363
  )
316
-
317
- if result['success']:
364
+
365
+ if result["success"]:
318
366
  logger.success(f"Backup completed successfully: {result['final_status']['state']}")
319
367
  sys.exit(0)
320
368
  else:
321
- error_details = result.get('error_details')
322
- if error_details and error_details.get('error_type') == 'snapshot_exists':
369
+ error_details = result.get("error_details")
370
+ if error_details and error_details.get("error_type") == "snapshot_exists":
323
371
  _handle_snapshot_exists_error(
324
- error_details, label, config, cfg['repository'], 'full', group
372
+ error_details, label, config, cfg["repository"], "full", group
325
373
  )
326
374
  sys.exit(1)
327
-
328
- state = result.get('final_status', {}).get('state', 'UNKNOWN')
375
+
376
+ state = result.get("final_status", {}).get("state", "UNKNOWN")
329
377
  if state == "LOST":
330
378
  logger.critical("Backup tracking lost!")
331
379
  logger.warning("Another backup operation started during ours.")
332
380
  logger.tip("Enable ops.run_status concurrency checks to prevent this.")
333
381
  logger.error(f"{result['error_message']}")
334
382
  sys.exit(1)
335
-
383
+
336
384
  except (FileNotFoundError, ValueError, RuntimeError, Exception) as e:
337
385
  if isinstance(e, FileNotFoundError):
338
386
  logger.error(f"Config file not found: {e}")
@@ -345,123 +393,140 @@ def backup_full(config, group, name):
345
393
  sys.exit(1)
346
394
 
347
395
 
348
-
349
-
350
- @cli.command('restore')
351
- @click.option('--config', required=True, help='Path to config YAML file')
352
- @click.option('--target-label', required=True, help='Backup label to restore to')
353
- @click.option('--group', help='Optional inventory group to filter tables to restore')
354
- @click.option('--table', help='Optional table name to restore (table name only, database comes from config). Cannot be used with --group.')
355
- @click.option('--rename-suffix', default='_restored', help='Suffix for temporary tables during restore (default: _restored)')
356
- @click.option('--yes', is_flag=True, help='Skip confirmation prompt and proceed automatically')
396
+ @cli.command("restore")
397
+ @click.option("--config", required=True, help="Path to config YAML file")
398
+ @click.option("--target-label", required=True, help="Backup label to restore to")
399
+ @click.option("--group", help="Optional inventory group to filter tables to restore")
400
+ @click.option(
401
+ "--table",
402
+ help="Optional table name to restore (table name only, database comes from config). Cannot be used with --group.",
403
+ )
404
+ @click.option(
405
+ "--rename-suffix",
406
+ default="_restored",
407
+ help="Suffix for temporary tables during restore (default: _restored)",
408
+ )
409
+ @click.option("--yes", is_flag=True, help="Skip confirmation prompt and proceed automatically")
357
410
  def restore_command(config, target_label, group, table, rename_suffix, yes):
358
411
  """Restore data to a specific point in time using intelligent backup chain resolution.
359
-
412
+
360
413
  This command automatically determines the correct sequence of backups needed for restore:
361
414
  - For full backups: restores directly from the target backup
362
415
  - For incremental backups: restores the base full backup first, then applies the incremental
363
-
416
+
364
417
  The restore process uses temporary tables with the specified suffix for safety, then performs
365
418
  an atomic rename to make the restored data live.
366
-
419
+
367
420
  Flow: load config → check health → ensure repository → find restore pair → get tables from backup → execute restore flow
368
421
  """
369
422
  try:
370
423
  if group and table:
371
- logger.error("Cannot specify both --group and --table. Use --table for single table restore or --group for inventory group restore.")
424
+ logger.error(
425
+ "Cannot specify both --group and --table. Use --table for single table restore or --group for inventory group restore."
426
+ )
372
427
  sys.exit(1)
373
-
428
+
374
429
  if table:
375
430
  table = table.strip()
376
431
  if not table:
377
432
  logger.error("Table name cannot be empty")
378
433
  sys.exit(1)
379
-
380
- if '.' in table:
381
- logger.error("Table name must not include database prefix. Use 'table_name' not 'database.table_name'. Database comes from config file.")
434
+
435
+ if "." in table:
436
+ logger.error(
437
+ "Table name must not include database prefix. Use 'table_name' not 'database.table_name'. Database comes from config file."
438
+ )
382
439
  sys.exit(1)
383
-
440
+
384
441
  cfg = config_module.load_config(config)
385
442
  config_module.validate_config(cfg)
386
-
443
+
387
444
  database = db.StarRocksDB(
388
- host=cfg['host'],
389
- port=cfg['port'],
390
- user=cfg['user'],
391
- password=os.getenv('STARROCKS_PASSWORD'),
392
- database=cfg['database'],
393
- tls_config=cfg.get('tls'),
445
+ host=cfg["host"],
446
+ port=cfg["port"],
447
+ user=cfg["user"],
448
+ password=os.getenv("STARROCKS_PASSWORD"),
449
+ database=cfg["database"],
450
+ tls_config=cfg.get("tls"),
394
451
  )
395
-
452
+
396
453
  with database:
397
454
  was_created = schema.ensure_ops_schema(database)
398
455
  if was_created:
399
- logger.warning("ops schema was auto-created. Please run 'starrocks-br init' after populating config.")
456
+ logger.warning(
457
+ "ops schema was auto-created. Please run 'starrocks-br init' after populating config."
458
+ )
400
459
  logger.warning("Remember to populate ops.table_inventory with your backup groups!")
401
- sys.exit(1) # Exit if schema was just created, requires user action
402
-
460
+ sys.exit(1) # Exit if schema was just created, requires user action
461
+
403
462
  healthy, message = health.check_cluster_health(database)
404
463
  if not healthy:
405
464
  logger.error(f"Cluster health check failed: {message}")
406
465
  sys.exit(1)
407
-
466
+
408
467
  logger.success(f"Cluster health: {message}")
409
-
410
- repository.ensure_repository(database, cfg['repository'])
411
-
468
+
469
+ repository.ensure_repository(database, cfg["repository"])
470
+
412
471
  logger.success(f"Repository '{cfg['repository']}' verified")
413
-
472
+
414
473
  logger.info(f"Finding restore sequence for target backup: {target_label}")
415
-
474
+
416
475
  try:
417
476
  restore_pair = restore.find_restore_pair(database, target_label)
418
477
  logger.success(f"Found restore sequence: {' -> '.join(restore_pair)}")
419
478
  except ValueError as e:
420
479
  logger.error(f"Failed to find restore sequence: {e}")
421
480
  sys.exit(1)
422
-
481
+
423
482
  logger.info("Determining tables to restore from backup manifest...")
424
-
483
+
425
484
  try:
426
485
  tables_to_restore = restore.get_tables_from_backup(
427
- database,
428
- target_label,
429
- group=group,
430
- table=table,
431
- database=cfg['database'] if table else None
486
+ database,
487
+ target_label,
488
+ group=group,
489
+ table=table,
490
+ database=cfg["database"] if table else None,
432
491
  )
433
492
  except ValueError as e:
434
493
  logger.error(str(e))
435
494
  sys.exit(1)
436
-
495
+
437
496
  if not tables_to_restore:
438
497
  if group:
439
- logger.warning(f"No tables found in backup '{target_label}' for group '{group}'")
498
+ logger.warning(
499
+ f"No tables found in backup '{target_label}' for group '{group}'"
500
+ )
440
501
  elif table:
441
- logger.warning(f"No tables found in backup '{target_label}' for table '{table}'")
502
+ logger.warning(
503
+ f"No tables found in backup '{target_label}' for table '{table}'"
504
+ )
442
505
  else:
443
506
  logger.warning(f"No tables found in backup '{target_label}'")
444
507
  sys.exit(1)
445
-
446
- logger.success(f"Found {len(tables_to_restore)} table(s) to restore: {', '.join(tables_to_restore)}")
447
-
508
+
509
+ logger.success(
510
+ f"Found {len(tables_to_restore)} table(s) to restore: {', '.join(tables_to_restore)}"
511
+ )
512
+
448
513
  logger.info("Starting restore flow...")
449
514
  result = restore.execute_restore_flow(
450
515
  database,
451
- cfg['repository'],
516
+ cfg["repository"],
452
517
  restore_pair,
453
518
  tables_to_restore,
454
519
  rename_suffix,
455
- skip_confirmation=yes
520
+ skip_confirmation=yes,
456
521
  )
457
-
458
- if result['success']:
459
- logger.success(result['message'])
522
+
523
+ if result["success"]:
524
+ logger.success(result["message"])
460
525
  sys.exit(0)
461
526
  else:
462
527
  logger.error(f"Restore failed: {result['error_message']}")
463
528
  sys.exit(1)
464
-
529
+
465
530
  except FileNotFoundError as e:
466
531
  logger.error(f"Config file not found: {e}")
467
532
  sys.exit(1)
@@ -476,6 +541,5 @@ def restore_command(config, target_label, group, table, rename_suffix, yes):
476
541
  sys.exit(1)
477
542
 
478
543
 
479
- if __name__ == '__main__':
544
+ if __name__ == "__main__":
480
545
  cli()
481
-