py-pve-cloud-backup 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of py-pve-cloud-backup might be problematic. Click here for more details.

@@ -0,0 +1,529 @@
1
+ import logging
2
+ import subprocess
3
+ import time
4
+ import os
5
+ import shutil
6
+ from tinydb import TinyDB, Query
7
+ import json
8
+ import paramiko
9
+ import base64
10
+ import re
11
+ import pickle
12
+ import base64
13
+ import uuid
14
+ from kubernetes import client
15
+ from kubernetes.client.rest import ApiException
16
+ from pprint import pformat
17
+ import fnmatch
18
+
19
+
20
+ RBD_REPO_TYPES = ["qemu", "lxc", "k8s"]
21
+
22
+ logger = logging.getLogger("bdd")
23
+
24
+ os.environ["BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK"] = "yes" # we need this to stop borg cli from manual prompting
25
+ os.environ["BORG_RELOCATED_REPO_ACCESS_IS_OK"] = "yes"
26
+
27
+ ENV = os.getenv("ENV", "TESTING")
28
+
29
+ # constants
30
+ BACKUP_DIR = os.getenv("BACKUP_DIR", "/tmp/pve-cloud-test-backup")
31
+
32
+ IMAGE_META_DB_PATH = f"{BACKUP_DIR}/image-meta-db.json"
33
+
34
+ STACK_META_DB_PATH = f"{BACKUP_DIR}/stack-meta-db.json"
35
+
36
+
37
+ def group_image_metas(metas, type_keys, group_key, stack_filter=None):
38
+ metas_grouped = {}
39
+
40
+ # group metas by vmid
41
+ for meta in metas:
42
+ logger.debug(f"meta {meta}")
43
+
44
+ if not meta["type"] in type_keys:
45
+ continue # skip non fitting
46
+
47
+ if stack_filter and meta["stack"] != stack_filter:
48
+ continue # skip filtered out stack
49
+
50
+ if meta[group_key] not in metas_grouped:
51
+ metas_grouped[meta[group_key]] = []
52
+
53
+ metas_grouped[meta[group_key]].append(meta)
54
+
55
+ return metas_grouped
56
+
57
+
58
+ # these functions are necessary to convert python k8s naming to camel case
59
+ def to_camel_case(snake_str):
60
+ components = snake_str.split('_')
61
+ return components[0] + ''.join(x.title() for x in components[1:])
62
+
63
+
64
+ # this one too
65
+ def convert_keys_to_camel_case(obj):
66
+ if isinstance(obj, dict):
67
+ new_dict = {}
68
+ for key, value in obj.items():
69
+ new_key = to_camel_case(key)
70
+ new_dict[new_key] = convert_keys_to_camel_case(value)
71
+ return new_dict
72
+ elif isinstance(obj, list):
73
+ return [convert_keys_to_camel_case(item) for item in obj]
74
+ else:
75
+ return obj
76
+
77
+
78
+ def restore_pvcs(metas_grouped, namespace_secret_dict, args, api_client):
79
+ core_v1 = client.CoreV1Api(api_client=api_client)
80
+ apps_v1 = client.AppsV1Api(api_client=api_client)
81
+ storage_v1 = client.StorageV1Api(api_client=api_client)
82
+
83
+ # get ceph storage classes
84
+ ceph_storage_classes = {sc.metadata.name: sc for sc in storage_v1.list_storage_class().items if sc.provisioner == 'rbd.csi.ceph.com'}
85
+
86
+ # load existing ceph pools and fetch their ids, needed for later pv restoring
87
+ ls_call = subprocess.run(["ceph", "osd", "pool", "ls", "detail", "-f", "json"], check=True, text=True, capture_output=True)
88
+ pool_details = json.loads(ls_call.stdout) # load existing ceph pools and fetch their ids, needed for later pv restoring
89
+
90
+ pool_name_id = {}
91
+ for pool_detail in pool_details:
92
+ pool_name_id[pool_detail["pool_name"]] = pool_detail["pool_id"]
93
+
94
+ # get the cluster id from ceph ns
95
+ ceph_csi_config = core_v1.read_namespaced_config_map(name="ceph-csi-config", namespace="ceph-csi")
96
+
97
+ if not ceph_csi_config:
98
+ raise Exception("Could not find ceph-csi-config config map in ceph-csi namespace")
99
+
100
+ ceph_cluster_id = json.loads(ceph_csi_config.data.get("config.json"))[0]["clusterID"]
101
+
102
+ filter_namespaces = [] if args.namespaces == "" else args.namespaces.split(",")
103
+
104
+ for namespace, metas_group in metas_grouped.items():
105
+ if filter_namespaces and namespace not in filter_namespaces:
106
+ continue # skip filtered out namespaces
107
+
108
+ logger.info(f"trying to restore volumes of {namespace}")
109
+
110
+ auto_scale_replicas = {}
111
+ if args.auto_scale:
112
+ # auto downscale deployments and statefulsets of namespace
113
+ deployments = apps_v1.list_namespaced_deployment(namespace)
114
+ for d in deployments.items:
115
+ name = d.metadata.name
116
+ auto_scale_replicas[f"dp-{name}"] = d.spec.replicas # save original replicas for upscale later
117
+ logger.info(f"Scaling Deployment '{name}' to 0 replicas...")
118
+ apps_v1.patch_namespaced_deployment_scale(
119
+ name=name,
120
+ namespace=namespace,
121
+ body={"spec": {"replicas": 0}}
122
+ )
123
+
124
+ statefulsets = apps_v1.list_namespaced_stateful_set(namespace)
125
+ for s in statefulsets.items:
126
+ name = s.metadata.name
127
+ auto_scale_replicas[f"ss-{name}"] = s.spec.replicas
128
+ logger.info(f"Scaling StatefulSet '{name}' to 0 replicas...")
129
+ apps_v1.patch_namespaced_stateful_set_scale(
130
+ name=name,
131
+ namespace=namespace,
132
+ body={"spec": {"replicas": 0}}
133
+ )
134
+
135
+ # wait for termination
136
+ while True:
137
+ pods = core_v1.list_namespaced_pod(namespace)
138
+ remaining = [
139
+ pod.metadata.name
140
+ for pod in pods.items
141
+ if pod.status.phase in ["Running", "Pending", "Terminating"]
142
+ ]
143
+ if not remaining:
144
+ logger.info("All pods have terminated.")
145
+ break
146
+ logger.info(f"Still active pods: {remaining}")
147
+ time.sleep(5)
148
+
149
+
150
+ # check if namespace has pods => throw exeption and tell user to scale down any
151
+ pods = core_v1.list_namespaced_pod(namespace=namespace)
152
+
153
+ existing_pvcs = set(pvc.metadata.name for pvc in core_v1.list_namespaced_persistent_volume_claim(namespace).items)
154
+ logger.debug(f"existing pvcs {existing_pvcs}")
155
+
156
+ # if any pending / running pods exist fail
157
+ pod_phases = [pod for pod in pods.items if pod.status.phase != "Succeeded"]
158
+ if pod_phases:
159
+ raise Exception(f"found pods in {namespace} - {pod_phases} - scale down all and force delete!")
160
+
161
+ # process secret overwrites
162
+ if args.secret_pattern:
163
+
164
+ namespace_secrets = {secret["metadata"]["name"]: secret for secret in namespace_secret_dict[namespace]}
165
+
166
+ for secret_pattern in args.secret_pattern:
167
+ if secret_pattern.split("/")[0] == namespace:
168
+ # arg that is meant for this namespace restore
169
+ pattern = secret_pattern.split("/")[1]
170
+
171
+ for secret in namespace_secrets:
172
+ if fnmatch.fnmatch(secret, pattern):
173
+ logger.info(f"overwrite pattern matched {pattern}, trying to patch {secret}")
174
+ try:
175
+ core_v1.patch_namespaced_secret(name=secret, namespace=namespace, body={"data": namespace_secrets[secret]["data"]})
176
+ except ApiException as e:
177
+ # if it doesnt exist we simply create it
178
+ if e.status == 404:
179
+ core_v1.create_namespaced_secret(
180
+ namespace=namespace,
181
+ body={"metadata": {"name": secret}, "data": namespace_secrets[secret]["data"]}
182
+ )
183
+ logger.info(f"secret {secret} did not exist, created it instead!")
184
+ else:
185
+ raise
186
+
187
+ if args.auto_delete:
188
+ pvcs = core_v1.list_namespaced_persistent_volume_claim(namespace)
189
+ for pvc in pvcs.items:
190
+ name = pvc.metadata.name
191
+ logger.info(f"Deleting PVC: {name}")
192
+ core_v1.delete_namespaced_persistent_volume_claim(
193
+ name=name,
194
+ namespace=namespace,
195
+ body=client.V1DeleteOptions()
196
+ )
197
+
198
+ while True:
199
+ leftover = core_v1.list_namespaced_persistent_volume_claim(namespace).items
200
+ if not leftover:
201
+ logger.info("All PVCs have been deleted.")
202
+ break
203
+ logger.info(f"Still waiting on: {[p.metadata.name for p in leftover]}")
204
+ time.sleep(5)
205
+
206
+ # there are no more existing pvcs
207
+ existing_pvcs = set()
208
+
209
+
210
+ # extract raw rbd images, import and recreate pvc if necessary
211
+ for meta in metas_group:
212
+ logger.debug(f"restoring {meta}")
213
+
214
+ image_name = meta["image_name"]
215
+
216
+ type = meta["type"]
217
+
218
+ pvc_dict = pickle.loads(base64.b64decode(meta["pvc_dict_b64"]))
219
+ logger.debug(f"pvc_dict:\n{pvc_dict}")
220
+ pv_dict = pickle.loads(base64.b64decode(meta["pv_dict_b64"]))
221
+ logger.debug(f"pv_dict:\n{pv_dict}")
222
+
223
+ # extract from borg archive
224
+ if args.backup_path:
225
+ # we can use the absolute path provided
226
+ full_borg_archive = f"{args.backup_path}borg-{type}::{image_name}_{args.timestamp}"
227
+ else:
228
+ full_borg_archive = f"{os.getcwd()}/borg-{type}::{image_name}_{args.timestamp}"
229
+
230
+ # import the image into ceph
231
+ # move to new pool if mapping is defined
232
+ pool = meta["pool"]
233
+ storage_class = pvc_dict["spec"]["storage_class_name"]
234
+
235
+ if args.pool_sc_mapping:
236
+ for pool_mapping in args.pool_sc_mapping:
237
+ old_pool = pool_mapping.split(":")[0]
238
+ new_pool_sc = pool_mapping.split(":")[1]
239
+ if pool == old_pool:
240
+ pool = new_pool_sc.split("/")[0]
241
+ storage_class = new_pool_sc.split("/")[1]
242
+ logger.debug(f"new mapping specified old pool {old_pool}, new pool {pool}, new sc {storage_class}")
243
+ break
244
+
245
+ new_csi_image_name = f"csi-vol-{uuid.uuid4()}"
246
+
247
+ logger.info(f"extracting borg archive {full_borg_archive} into rbd import {pool}/{new_csi_image_name}")
248
+
249
+ with subprocess.Popen(["borg", "extract", "--sparse", "--stdout", full_borg_archive], stdout=subprocess.PIPE) as proc:
250
+ subprocess.run(["rbd", "import", "-", f"{pool}/{new_csi_image_name}"], check=True, stdin=proc.stdout)
251
+
252
+ # restore from pickled pvc dicts
253
+ new_pv_name = f"pvc-{uuid.uuid4()}"
254
+
255
+ logger.debug(f"restoring pv with new pv name {new_pv_name} and csi image name {new_csi_image_name}")
256
+
257
+ # create the new pvc based on the old - remove dynamic fields of old:
258
+ if pvc_dict['metadata']['name'] in existing_pvcs:
259
+ pvc_name = pvc_dict['metadata']['name']
260
+ pvc_dict['metadata']['name'] = f"test-restore-{pvc_name}"
261
+ logger.info(f"pvc {pvc_name} exists, creating it with test-restore- prefix")
262
+
263
+ # clean the old pvc object so it can be submitted freshly
264
+ pvc_dict['metadata']['annotations'].pop('pv.kubernetes.io/bind-completed', None)
265
+ pvc_dict['metadata']['annotations'].pop('pv.kubernetes.io/bound-by-controller', None)
266
+ pvc_dict['metadata'].pop('finalizers', None)
267
+ pvc_dict['metadata'].pop('managed_fields', None)
268
+ pvc_dict['metadata'].pop('resource_version', None)
269
+ pvc_dict['metadata'].pop('uid', None)
270
+ pvc_dict['metadata'].pop('creation_timestamp', None)
271
+ pvc_dict.pop('status', None)
272
+ pvc_dict.pop('kind', None)
273
+ pvc_dict.pop('api_version', None)
274
+
275
+ # set new values
276
+ pvc_dict['spec']['storage_class_name'] = storage_class
277
+
278
+ # we can give it a customized pv name so we know migrated ones - will still behave like a normal created pv
279
+ pvc_dict['spec']['volume_name'] = new_pv_name
280
+
281
+ # creation call
282
+ logger.debug(f"creating new pvc:\n{pformat(pvc_dict)}")
283
+ core_v1.create_namespaced_persistent_volume_claim(namespace=namespace, body=client.V1PersistentVolumeClaim(**convert_keys_to_camel_case(pvc_dict)))
284
+
285
+ # cleanup the old pv aswell for recreation
286
+ pv_dict.pop('api_version', None)
287
+ pv_dict.pop('kind', None)
288
+ pv_dict['metadata'].pop('creation_timestamp', None)
289
+ pv_dict['metadata'].pop('finalizers', None)
290
+ pv_dict['metadata'].pop('managed_fields', None)
291
+ pv_dict['metadata'].pop('resource_version', None)
292
+ pv_dict['metadata']['annotations'].pop('volume.kubernetes.io/provisioner-deletion-secret-name', None)
293
+ pv_dict['metadata']['annotations'].pop('volume.kubernetes.io/provisioner-deletion-secret-namespace', None)
294
+ pv_dict.pop('status', None)
295
+ pv_dict['spec'].pop('claim_ref', None)
296
+ pv_dict['spec'].pop('volume_attributes_class_name', None)
297
+ pv_dict['spec'].pop('scale_io', None)
298
+ pv_dict['spec']['csi'].pop('volume_handle', None)
299
+ pv_dict['spec']['csi']['volume_attributes'].pop('imageName', None)
300
+ pv_dict['spec']['csi']['volume_attributes'].pop('journalPool', None)
301
+ pv_dict['spec']['csi']['volume_attributes'].pop('pool', None)
302
+
303
+ # set values
304
+
305
+ # get the storage class and set secrets from it
306
+ ceph_storage_class = ceph_storage_classes[storage_class]
307
+ pv_dict['metadata']['annotations']['volume.kubernetes.io/provisioner-deletion-secret-name'] = ceph_storage_class.parameters['csi.storage.k8s.io/provisioner-secret-name']
308
+ pv_dict['metadata']['annotations']['volume.kubernetes.io/provisioner-deletion-secret-namespace'] = ceph_storage_class.parameters['csi.storage.k8s.io/provisioner-secret-namespace']
309
+
310
+ pv_dict['spec']['csi']['node_stage_secret_ref']['name'] = ceph_storage_class.parameters['csi.storage.k8s.io/node-stage-secret-name']
311
+ pv_dict['spec']['csi']['node_stage_secret_ref']['namespace'] = ceph_storage_class.parameters['csi.storage.k8s.io/node-stage-secret-namespace']
312
+
313
+ pv_dict['spec']['csi']['controller_expand_secret_ref']['name'] = ceph_storage_class.parameters['csi.storage.k8s.io/controller-expand-secret-name']
314
+ pv_dict['spec']['csi']['controller_expand_secret_ref']['namespace'] = ceph_storage_class.parameters['csi.storage.k8s.io/controller-expand-secret-namespace']
315
+
316
+ pv_dict['spec']['csi']['volume_attributes']['clusterID'] = ceph_cluster_id
317
+
318
+ # reconstruction of volume handle that the ceph csi provisioner understands
319
+ pool_id = format(pool_name_id[pool], '016x')
320
+ trimmed_new_csi_image_name = new_csi_image_name.removeprefix('csi-vol-')
321
+ pv_dict['spec']['csi']['volumeHandle'] = f"0001-0024-{ceph_cluster_id}-{pool_id}-{trimmed_new_csi_image_name}"
322
+
323
+ pv_dict['spec']['csi']['volume_attributes']['imageName'] = new_csi_image_name
324
+ pv_dict['spec']['csi']['volume_attributes']['journalPool'] = pool
325
+ pv_dict['spec']['csi']['volume_attributes']['pool'] = pool
326
+
327
+ pv_dict['spec']['storage_class_name'] = storage_class
328
+
329
+ pv_dict['metadata']['name'] = new_pv_name
330
+
331
+ # creation call
332
+ logger.debug(f"creating new pv:\n{pformat(pv_dict)}")
333
+ core_v1.create_persistent_volume(body=client.V1PersistentVolume(**convert_keys_to_camel_case(pv_dict)))
334
+
335
+ # scale back up again
336
+ if args.auto_scale:
337
+ # auto downscale deployments and statefulsets of namespace
338
+ deployments = apps_v1.list_namespaced_deployment(namespace)
339
+ for d in deployments.items:
340
+ name = d.metadata.name
341
+ logger.info(f"Scaling Deployment '{name}' back up...")
342
+ apps_v1.patch_namespaced_deployment_scale(
343
+ name=name,
344
+ namespace=namespace,
345
+ body={"spec": {"replicas": auto_scale_replicas[f"dp-{name}"]}}
346
+ )
347
+
348
+ statefulsets = apps_v1.list_namespaced_stateful_set(namespace)
349
+ for s in statefulsets.items:
350
+ name = s.metadata.name
351
+ logger.info(f"Scaling StatefulSet '{name}' back up...")
352
+ apps_v1.patch_namespaced_stateful_set_scale(
353
+ name=name,
354
+ namespace=namespace,
355
+ body={"spec": {"replicas": auto_scale_replicas[f"ss-{name}"]}}
356
+ )
357
+
358
+ logger.info(f"restore of namespace {namespace} complete, you can now scale up your deployments again")
359
+
360
+
361
+ def restore_images(image_metas, stack_metas, args, proxmox):
362
+ stack_name_filter = [] if args.stack_names == "" else args.stack_names.split(",")
363
+
364
+ # init paramiko for restoring pve vm conf files, either remote pve host or loopback connection
365
+ ssh = paramiko.SSHClient()
366
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
367
+ if args.proxmox_host and args.proxmox_private_key:
368
+ # remove connection
369
+ ssh.connect(hostname=args.proxmox_host, username="root", pkey=paramiko.RSAKey.from_private_key_file(args.proxmox_private_key))
370
+ else:
371
+ # localhost connection
372
+ ssh.connect(hostname="localhost", username="root")
373
+
374
+ # replace pools
375
+ pool_update_map = {}
376
+ if args.pool_mapping:
377
+ for pool_mapping in args.pool_mapping:
378
+ old_pool = pool_mapping.split(":")[0]
379
+ new_pool = pool_mapping.split(":")[1]
380
+
381
+ if old_pool in pool_update_map:
382
+ raise Exception(f"pool {old_pool} defined in more than one mapping!")
383
+
384
+ pool_update_map[old_pool] = new_pool
385
+
386
+ for vmid, metas_group in image_metas.items():
387
+ stack_meta = stack_metas[vmid]
388
+
389
+ if stack_meta is None:
390
+ raise Exception(f"stack meta for vmid {vmid} is none!")
391
+
392
+ if stack_name_filter and stack_meta["stack"] not in stack_name_filter:
393
+ continue # skip filtered out namespaces
394
+
395
+ # use paramiko to restore the vm conf
396
+ vm_conf = base64.b64decode(stack_meta["vm_conf"]).decode("utf-8")
397
+
398
+ # get id for import
399
+ next_id = proxmox.cluster.nextid.get()
400
+
401
+ # update disks in conf
402
+ for meta in metas_group:
403
+ conf_key = meta["conf_key"]
404
+ pool = meta["pool"]
405
+
406
+ # replace the vm disk id
407
+ search_regex = rf"^({conf_key}:.+?)\d+"
408
+ replace_regex = rf"\g<1>{next_id}"
409
+
410
+ logger.debug(f"search regex: {search_regex}, replace regex {replace_regex}, vm_conf:\n{vm_conf}")
411
+ vm_conf = re.sub(search_regex, replace_regex, vm_conf, flags=re.MULTILINE)
412
+
413
+ logger.debug(f"updated vm conf:\n{vm_conf}")
414
+
415
+ # check if the pool also needs to be replaced
416
+ if pool in pool_update_map:
417
+ update_pool = pool_update_map[pool]
418
+ vm_conf = re.sub(rf"^({conf_key}:\s*)[^:]+", rf"\g<1>{update_pool}", vm_conf, flags=re.MULTILINE)
419
+
420
+
421
+ # for writing the file
422
+ sftp = ssh.open_sftp()
423
+
424
+ if stack_meta["type"] == "qemu":
425
+ with sftp.file(f"/etc/pve/qemu-server/{next_id}.conf", "w") as file:
426
+ file.write(vm_conf)
427
+ elif stack_meta["type"] == "lxc":
428
+ with sftp.file(f"/etc/pve/lxc/{next_id}.conf", "w") as file:
429
+ file.write(vm_conf)
430
+
431
+ sftp.close()
432
+
433
+
434
+ # restore all the images
435
+ for meta in metas_group:
436
+ logger.debug(f"meta {meta}")
437
+
438
+ meta_type = meta["type"]
439
+ image_name = meta["image_name"]
440
+ pool = meta["pool"]
441
+
442
+ new_image_name = re.sub(r"^(vm-)\d+", rf"\g<1>{next_id}", image_name)
443
+
444
+ if pool in pool_update_map:
445
+ pool = pool_update_map[pool]
446
+
447
+ # extract the borg archive - borg extract always extracts to its working dir
448
+ if args.backup_path:
449
+ # we can use the absolute path provided
450
+ full_borg_archive = f"{args.backup_path}borg-{meta_type}::{image_name}_{args.timestamp}"
451
+ else:
452
+ full_borg_archive = f"{os.getcwd()}/borg-{meta_type}::{image_name}_{args.timestamp}"
453
+
454
+ logger.info(f"extracting borg archive {full_borg_archive} into rbd import {pool}/{new_image_name}")
455
+
456
+ with subprocess.Popen(["borg", "extract", "--sparse", "--stdout", full_borg_archive], stdout=subprocess.PIPE) as proc:
457
+ subprocess.run(["rbd", "import", "-", f"{pool}/{new_image_name}"], check=True, stdin=proc.stdout)
458
+
459
+
460
+ def get_stack_metas(args, timestamp, meta_types, unique_group_key):
461
+ stack_meta_db = TinyDB(f"{args.backup_path}stack-meta-db.json")
462
+ Meta = Query()
463
+
464
+ stack_metas = stack_meta_db.search((Meta.timestamp == timestamp) & (Meta.type.one_of(meta_types)))
465
+
466
+ keyed_stack_metas = {}
467
+
468
+ for meta in stack_metas:
469
+ if meta[unique_group_key] in keyed_stack_metas:
470
+ raise Exception(f"duplicate key for meta {unique_group_key} {meta}")
471
+
472
+ keyed_stack_metas[meta[unique_group_key]] = meta
473
+
474
+ return keyed_stack_metas
475
+
476
+
477
+ def get_image_metas(args, timestamp_filter = None):
478
+ image_meta_db = TinyDB(f"{args.backup_path}image-meta-db.json")
479
+
480
+ archives = []
481
+
482
+ for borg_repo_type in RBD_REPO_TYPES:
483
+ list_result = subprocess.run(["borg", "list", f"{args.backup_path}/borg-{borg_repo_type}", "--json"], capture_output=True)
484
+ archives.extend(json.loads(list_result.stdout)["archives"])
485
+
486
+ timestamp_archives = {}
487
+ for archive in archives:
488
+ image = archive["archive"].split("_", 1)[0]
489
+ timestamp = archive["archive"].split("_", 1)[1]
490
+
491
+ if timestamp_filter is not None and timestamp_filter != timestamp:
492
+ continue # skip filtered
493
+
494
+ if timestamp not in timestamp_archives:
495
+ timestamp_archives[timestamp] = []
496
+
497
+ Meta = Query()
498
+ image_meta = image_meta_db.get((Meta.image_name == image) & (Meta.timestamp == timestamp))
499
+
500
+ if image_meta is None:
501
+ logger.error(f"None meta found {timestamp}, image_name {image}, archive {archive}")
502
+ del timestamp_archives[timestamp]
503
+ continue
504
+
505
+ timestamp_archives[timestamp].append(image_meta)
506
+
507
+ return timestamp_archives
508
+
509
+
510
+ def copy_backup_generic():
511
+ source_dir = '/opt/bdd'
512
+ for file in os.listdir(source_dir):
513
+ if not file.startswith("."):
514
+ full_source_path = os.path.join(source_dir, file)
515
+ full_dest_path = os.path.join(BACKUP_DIR, file)
516
+
517
+ if os.path.isfile(full_source_path):
518
+ shutil.copy2(full_source_path, full_dest_path)
519
+
520
+
521
+ # try to mount any of the specified disks
522
+ def mount_disk():
523
+ for disk_uuid in os.getenv("ENV_MODE").split(":")[1].split(","):
524
+ try:
525
+ subprocess.run(["mount", f"UUID={disk_uuid}", BACKUP_DIR], check=True, text=True, capture_output=True)
526
+ break
527
+ except subprocess.CalledProcessError as e:
528
+ logger.info(f"Error mounting disk: {e.stdout + e.stderr}")
529
+