py-pve-cloud-backup 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of py-pve-cloud-backup might be problematic. Click here for more details.

@@ -0,0 +1,609 @@
1
+ import paramiko
2
+ import base64
3
+ import logging
4
+ import pickle
5
+ import subprocess
6
+ import json
7
+ import re
8
+ import yaml
9
+ import os
10
+ from pprint import pformat
11
+ from kubernetes import client
12
+ from kubernetes.config.kube_config import KubeConfigLoader
13
+ import net
14
+ import asyncio
15
+
16
+
17
+ logger = logging.getLogger("fetcher")
18
+
19
+ VM_DISK_PATTERNS = [r"scsi\d+",
20
+ # special win vm keys
21
+ r"tpmstate\d+", r"efidisk\d+"]
22
+
23
+ LXC_DISK_PATTERNS = ["rootfs", r"mp\d+"]
24
+
25
+
26
+ # sshs into master k8s vms and fetches the there lying kubeconfigs
27
+ def get_kubernetes_clients(backup_config, proxmox, pkey):
28
+ logger.info(f"getting k8s clients")
29
+
30
+ k8s_stacks = backup_config["k8s_stacks"].keys()
31
+
32
+ k8s_masters = {}
33
+ # collect one master node per stack
34
+ for node in proxmox.nodes.get():
35
+ node_name = node["node"]
36
+
37
+ if node["status"] == "offline":
38
+ logger.info(f"skipping offline node {node_name}")
39
+ continue
40
+
41
+ for qemu in proxmox.nodes(node_name).qemu.get():
42
+ if "tags" in qemu and any(tag in k8s_stacks for tag in qemu["tags"].split(";")) and 'master' in qemu["tags"].split(";"):
43
+ # found a master
44
+ logger.debug(f"found master {pformat(qemu)}")
45
+
46
+ # find the stack tag
47
+ stack_tag = None
48
+ for tag in qemu["tags"].split(";"):
49
+ for k8s_stack_tag in k8s_stacks:
50
+ if tag == k8s_stack_tag:
51
+ stack_tag = tag
52
+
53
+ if stack_tag is None:
54
+ raise Exception(f"something went terribly wrong, stack tag should never be none - qemu:\n{pformat(qemu)}")
55
+
56
+ if stack_tag in k8s_masters:
57
+ continue # we already saved a master for this stack
58
+
59
+ k8s_masters[stack_tag] = {"pve_host": node_name, "vmid": qemu["vmid"]}
60
+
61
+ logger.debug(f"collected masters:\n{pformat(k8s_masters)}")
62
+
63
+ k8s_kubeconfigs = {}
64
+
65
+ # now we can connect to each master via ssh and fetch the kubeconfig
66
+ for k8s_stack, master in k8s_masters.items():
67
+ ifaces = proxmox.nodes(master["pve_host"]).qemu(master["vmid"]).agent("network-get-interfaces").get()
68
+ logger.debug(f"k8s stack master {k8s_stack} interfaces {pformat(ifaces)}")
69
+
70
+ master_ipv4 = None
71
+
72
+ for iface in ifaces["result"]:
73
+ if iface["name"] == "lo":
74
+ continue # skip the first loopback device
75
+
76
+ # after that comes the primary interface
77
+ for ip_address in iface["ip-addresses"]:
78
+ if ip_address["ip-address-type"] == "ipv4":
79
+ master_ipv4 = ip_address["ip-address"]
80
+ break
81
+
82
+ if master_ipv4 is None:
83
+ raise Exception(f"could not get ipv4 for master {master} stack {k8s_stack}")
84
+
85
+ break
86
+
87
+ # now we can use that address to connect via ssh
88
+ ssh = paramiko.SSHClient()
89
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
90
+
91
+ user = os.getenv("QEMU_ADMIN_USER", "admin")
92
+
93
+ logger.info(f"connecting to master {master_ipv4} - {user}")
94
+ ssh.connect(master_ipv4, username=user, pkey=pkey)
95
+
96
+ # since we need root we cant use sftp and root via ssh is disabled
97
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
98
+
99
+ config = stdout.read().decode('utf-8')
100
+
101
+ logger.debug(f"ssh sudo cat kubeconfig:\n{config}")
102
+
103
+ k8s_kubeconfigs[k8s_stack] = { "raw_kubeconfig": config, "master_ip": master_ipv4 }
104
+
105
+ return k8s_kubeconfigs
106
+
107
+
108
+ def get_vm_configs(metas, pkey):
109
+ pve_vm_map = {}
110
+
111
+ # group by host
112
+ for meta in metas:
113
+ host_ip = meta["host_ip"]
114
+
115
+ if host_ip not in pve_vm_map:
116
+ pve_vm_map[host_ip] = []
117
+
118
+ pve_vm_map[host_ip].append(meta)
119
+
120
+ vm_conf_map = {}
121
+
122
+ for host_ip, metas in pve_vm_map.items():
123
+
124
+ ssh = paramiko.SSHClient()
125
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
126
+
127
+ ssh.connect(host_ip, username="root", pkey=pkey)
128
+
129
+ for meta in metas:
130
+ sftp = ssh.open_sftp()
131
+
132
+ vmid = meta["vmid"]
133
+
134
+ if meta["type"] == "lxc":
135
+ config_path = f"/etc/pve/lxc/{vmid}.conf"
136
+ elif meta["type"] == "qemu":
137
+ config_path = f"/etc/pve/qemu-server/{vmid}.conf"
138
+
139
+ with sftp.open(config_path, "r") as file:
140
+ config = file.read()
141
+
142
+ sftp.close()
143
+
144
+ vm_conf_map[vmid] = base64.b64encode(config).decode("utf-8")
145
+
146
+ ssh.close()
147
+
148
+ return vm_conf_map
149
+
150
+
151
+ # collect all pvc and pv information
152
+ def collect_raw_k8s_meta(backup_config, k8s_kubeconfigs):
153
+
154
+ k8s_stack_meta = {}
155
+
156
+ k8s_stack_namespace_secrets = {}
157
+
158
+ for k8s_stack, k8s_backup_config in backup_config["k8s_stacks"].items():
159
+
160
+ if k8s_backup_config["exclude_namespaces"] is not None and k8s_backup_config["include_namespaces"] is not None:
161
+ raise Exception(f"cannot specify include and exclude for k8s_stack {k8s_stack}")
162
+
163
+ kubeconfig = k8s_kubeconfigs[k8s_stack]
164
+ master_ipv4 = kubeconfig["master_ip"]
165
+ kubeconfig_dict = yaml.safe_load(kubeconfig["raw_kubeconfig"])
166
+
167
+ # override the connection ip as it is set to localhost on the machines
168
+ kubeconfig_dict["clusters"][0]["cluster"]["server"] = f"https://{master_ipv4}:6443"
169
+ logger.debug(f"kubeconfig dict {pformat(kubeconfig_dict)}")
170
+
171
+ # init kube client
172
+ loader = KubeConfigLoader(config_dict=kubeconfig_dict)
173
+ configuration = client.Configuration()
174
+ loader.load_and_set(configuration)
175
+
176
+ # Create a client from this configuration
177
+ api_client = client.ApiClient(configuration)
178
+ v1 = client.CoreV1Api(api_client=api_client)
179
+
180
+ # Use it
181
+ k8s_backup_meta = []
182
+
183
+ # sub dict for secrets of each namespace
184
+ k8s_stack_namespace_secrets[k8s_stack] = {}
185
+
186
+ for namespace_item in v1.list_namespace().items:
187
+ namespace = namespace_item.metadata.name
188
+
189
+ if k8s_backup_config["exclude_namespaces"] is not None and namespace in k8s_backup_config["exclude_namespaces"]:
190
+ continue
191
+
192
+ if k8s_backup_config["include_namespaces"] is not None and namespace not in k8s_backup_config["include_namespaces"]:
193
+ continue
194
+
195
+ # collect secrets of namespace
196
+ k8s_stack_namespace_secrets[k8s_stack][namespace] = [secret.to_dict() for secret in v1.list_namespaced_secret(namespace=namespace).items]
197
+
198
+ pvc_list = v1.list_namespaced_persistent_volume_claim(namespace=namespace)
199
+
200
+ for pvc in pvc_list.items:
201
+ pvc_name = pvc.metadata.name
202
+ volume_name = pvc.spec.volume_name
203
+ status = pvc.status.phase
204
+
205
+ if volume_name:
206
+ pv = v1.read_persistent_volume(name=volume_name)
207
+ pv_dict_b64 = base64.b64encode(pickle.dumps(pv.to_dict())).decode('utf-8')
208
+
209
+ pvc_dict_b64 = base64.b64encode(pickle.dumps(pvc.to_dict())).decode('utf-8')
210
+
211
+ k8s_backup_meta.append({"type": "k8s", "namespace": namespace, "pvc_name": pvc_name, "namespace": namespace,
212
+ "image_name": pv.spec.csi.volume_attributes["imageName"], "pool": pv.spec.csi.volume_attributes["pool"],
213
+ "pvc_dict_b64": pvc_dict_b64, "pv_dict_b64": pv_dict_b64, "storage_class": pvc.spec.storage_class_name})
214
+ else:
215
+ logger.debug(f"PVC: {pvc_name} -> Not bound to a PV [Status: {status}]")
216
+
217
+ k8s_stack_meta[k8s_stack] = k8s_backup_meta
218
+
219
+ return k8s_stack_meta, k8s_stack_namespace_secrets
220
+
221
+
222
+ # collect all existing vms / lxcs that we want to backup
223
+ def collect_raw_vm_meta(proxmox, backup_config):
224
+ backup_vm_meta = []
225
+
226
+ # collect vm stacks to know what vm to backup
227
+ vm_stacks = []
228
+ if backup_config["other_stacks"] is not None:
229
+ vm_stacks.extend(backup_config["other_stacks"])
230
+ else:
231
+ logger.info("no other stacks to backup defined, skipping")
232
+ vm_stacks.extend(backup_config["k8s_stacks"].keys())
233
+
234
+ # k8s exclude include logic
235
+ exclude_vms = {}
236
+ include_vms = {}
237
+ for stack_name, k8s_conf in backup_config["k8s_stacks"].items():
238
+ if k8s_conf["exclude_hostnames"] is not None and k8s_conf["include_hostnames"] is not None:
239
+ raise Exception(f"Cannot both include and exclude hostnames {stack_name}")
240
+
241
+ if k8s_conf["exclude_hostnames"] is not None:
242
+ exclude_vms[stack_name] = k8s_conf["exclude_hostnames"]
243
+
244
+ if k8s_conf["include_hostnames"] is not None:
245
+ include_vms[stack_name] = k8s_conf["include_hostnames"]
246
+
247
+ ceph_storage_pools = [storage['pool'] + ":" for storage in proxmox.storage.get() if storage['type'] == 'rbd'] # added : for easy filtering of vm confs
248
+
249
+ for node in proxmox.nodes.get():
250
+ node_name = node["node"]
251
+ logger.info("collecting node " + node_name)
252
+
253
+ if node["status"] == "offline":
254
+ logger.info(f"skipping offline node {node_name}")
255
+ continue
256
+
257
+ # we later need to connect to the node via ssh and fetch lxc/qemu config for backup
258
+ ifaces = proxmox.nodes(node_name).network.get()
259
+ node_ip_address = None
260
+ for iface in ifaces:
261
+ if 'gateway' in iface:
262
+ if node_ip_address is not None:
263
+ raise Exception(f"found multiple ifaces with gateways for node {node_name}")
264
+ node_ip_address = iface.get("address")
265
+
266
+ if node_ip_address is None:
267
+ raise Exception(f"Could not find ip for node {node_name}")
268
+
269
+ for qemu in proxmox.nodes(node_name).qemu.get():
270
+ logger.debug(f"processing qemu {qemu}")
271
+ if "tags" not in qemu:
272
+ continue # non stack vm
273
+
274
+ stack_tag = next((tag for tag in qemu["tags"].split(";") if tag in vm_stacks), None)
275
+
276
+ if stack_tag:
277
+ if stack_tag in exclude_vms and qemu["name"] in exclude_vms[stack_tag]:
278
+ logger.debug("continue due to exclude")
279
+ continue
280
+
281
+ if stack_tag in include_vms and qemu["name"] not in include_vms[stack_tag]:
282
+ logger.debug("continue due to include")
283
+ continue
284
+
285
+ vm_config = proxmox.nodes(node_name).qemu(qemu["vmid"]).config.get()
286
+
287
+ # collect disk configs
288
+ disk_confs = {}
289
+
290
+ for key in vm_config:
291
+ for disk_pattern in VM_DISK_PATTERNS:
292
+ # only append disks that match pattern and are actually managed by ceph
293
+ if re.search(disk_pattern, key) and any(storage_pool in vm_config[key] for storage_pool in ceph_storage_pools):
294
+ disk_confs[key] = vm_config[key]
295
+ break
296
+
297
+ if not disk_confs:
298
+ raise Exception(f"No disk could be identified for {vm_config}")
299
+
300
+ backup_vm_meta.append({"type": "qemu", "qemu_raw": qemu, "vmid": qemu["vmid"], "disk_confs": disk_confs, "tags": vm_config["tags"], "host_ip": node_ip_address})
301
+
302
+
303
+ for lxc in proxmox.nodes(node_name).lxc.get():
304
+ if "tags" not in lxc:
305
+ continue # non stack vm
306
+
307
+ if backup_config["other_stacks"] is not None and any(tag in backup_config["other_stacks"] for tag in lxc["tags"].split(";")):
308
+
309
+ lxc_config = proxmox.nodes(node_name).lxc(lxc["vmid"]).config.get()
310
+
311
+ # collect disk configs
312
+ disk_confs = {}
313
+ for key in lxc_config:
314
+ for disk_pattern in LXC_DISK_PATTERNS:
315
+ if re.search(disk_pattern, key) and any(storage_pool in lxc_config[key] for storage_pool in ceph_storage_pools):
316
+ disk_confs[key] = lxc_config[key]
317
+
318
+ if not disk_confs:
319
+ raise Exception(f"No disk could be identified for {lxc_config}")
320
+
321
+ backup_vm_meta.append({"type": "lxc", "lxc_raw": lxc, "vmid": lxc["vmid"], "disk_confs": disk_confs, "tags": lxc_config["tags"], "host_ip": node_ip_address})
322
+
323
+ return backup_vm_meta
324
+
325
+
326
+ def pool_images(raw_vm_meta, raw_k8s_meta):
327
+ # initialize for images grouped by pool
328
+ unique_pools = set()
329
+
330
+ # collect pools from vms
331
+ for vm_meta in raw_vm_meta:
332
+ for disk_conf in vm_meta["disk_confs"].values():
333
+ pool = disk_conf.split(",")[0].split(":")[0]
334
+ unique_pools.add(pool)
335
+
336
+ # collect pools from k8s volumes
337
+ for k8s_stack, k8s_metas in raw_k8s_meta.items():
338
+ for k8s_meta in k8s_metas:
339
+ unique_pools.add(k8s_meta["pool"])
340
+
341
+ # create rbd groups
342
+ for pool in unique_pools:
343
+ try:
344
+ # check for errors, capture stderr output as text
345
+ subprocess.run(["rbd", "group", "create", f"{pool}/backups"], check=True, capture_output=True, text=True)
346
+ except subprocess.CalledProcessError as e:
347
+ logger.warning(e.stdout + e.stderr) # no problem if group already exists, cleanup failed tho
348
+
349
+ logger.info("adding images to groups")
350
+ # add all vm disks to the groups
351
+ for vm_meta in raw_vm_meta:
352
+ for disk_conf in vm_meta["disk_confs"].values():
353
+ image = disk_conf.split(",")[0].split(":")[1]
354
+ pool = disk_conf.split(",")[0].split(":")[0]
355
+ try:
356
+ subprocess.run(["rbd", "group", "image", "add", f"{pool}/backups", f"{pool}/{image}"], check=True, capture_output=True, text=True)
357
+ except subprocess.CalledProcessError as e:
358
+ logger.error(e.stdout + e.stderr) # proper error printing
359
+ raise
360
+
361
+ # add rbds from pvcs
362
+ for k8s_metas in raw_k8s_meta.values():
363
+ for k8s_meta in k8s_metas:
364
+ pool = k8s_meta["pool"]
365
+ image = k8s_meta["image_name"]
366
+ try:
367
+ subprocess.run(["rbd", "group", "image", "add", f"{pool}/backups", f"{pool}/{image}"], check=True, capture_output=True, text=True)
368
+ except subprocess.CalledProcessError as e:
369
+ logger.error(e.stdout + e.stderr) # proper error printing
370
+ raise
371
+
372
+ return unique_pools
373
+
374
+
375
+ def clone(pool, image, timestamp):
376
+ try:
377
+ command = subprocess.run(["rbd", "snap", "ls", "--all", "--format", "json", f"{pool}/{image}"], check=True, capture_output=True, text=True)
378
+ snaps = json.loads(command.stdout)
379
+ # doesnt logger.info anything on success
380
+ except subprocess.CalledProcessError as e:
381
+ logger.error(e.stdout + e.stderr)
382
+ raise
383
+
384
+ for snap in snaps:
385
+ if snap["namespace"]["type"] == "group" and snap["namespace"]["group snap"] == timestamp:
386
+ snap_id = snap["id"]
387
+ break
388
+
389
+ logger.debug(f"image {image} snap id {snap_id}")
390
+
391
+ # create temporary clone
392
+ try:
393
+ subprocess.run(["rbd", "clone", "--snap-id", str(snap_id), f"{pool}/{image}", f"{pool}/temp-clone-{timestamp}-{image}", "--rbd-default-clone-format", "2"], check=True, capture_output=True, text=True)
394
+ except subprocess.CalledProcessError as e:
395
+ logger.error(e.stdout + e.stderr)
396
+ raise
397
+
398
+
399
+ def snap_and_clone(raw_vm_meta, raw_k8s_meta, timestamp, unique_pools):
400
+ logger.info("creating snaps")
401
+ for pool in unique_pools:
402
+ try:
403
+ subprocess.run(["rbd", "group", "snap", "create", f"{pool}/backups@{timestamp}"], check=True, capture_output=True, text=True)
404
+ # doesnt logger.info anything on success
405
+ except subprocess.CalledProcessError as e:
406
+ logger.error(e.stdout + e.stderr)
407
+ raise
408
+
409
+ logger.info("creating clones")
410
+
411
+ # clone all the snapshots into new images so we can export them
412
+ # sadly there isnt yet a direct export function for group snapshots
413
+ for vm_meta in raw_vm_meta:
414
+ for disk_conf in vm_meta["disk_confs"].values():
415
+ image = disk_conf.split(",")[0].split(":")[1]
416
+ pool = disk_conf.split(",")[0].split(":")[0]
417
+ clone(pool, image, timestamp)
418
+
419
+ for k8s_metas in raw_k8s_meta.values():
420
+ for k8s_meta in k8s_metas:
421
+ pool = k8s_meta["pool"]
422
+ image = k8s_meta["image_name"]
423
+ clone(pool, image, timestamp)
424
+
425
+
426
+ async def send_exports(commands):
427
+ for send_command in commands:
428
+ backup_addr = send_command["backup_addr"]
429
+ params = send_command["params"]
430
+
431
+ request_dict = {
432
+ "borg_archive_type": params["type"],
433
+ "archive_name": params["image_name"],
434
+ "timestamp": params["timestamp"],
435
+ "stdin_name": params["image_name"] + ".raw"
436
+ }
437
+ logger.info(request_dict)
438
+
439
+ # to get full performance we need to have the subprocess reading async aswell
440
+ async def async_chunk_generator():
441
+ proc = await asyncio.create_subprocess_exec(
442
+ *send_command["subprocess_args"],
443
+ stdout=asyncio.subprocess.PIPE
444
+ )
445
+
446
+ while True:
447
+ chunk = await proc.stdout.read(4 * 1024 * 1024 * 10) # 4MB
448
+ if not chunk:
449
+ break
450
+ yield chunk
451
+
452
+ await proc.wait()
453
+
454
+ await net.archive_async(backup_addr, request_dict, async_chunk_generator)
455
+
456
+
457
+ async def send_backups(raw_vm_meta, raw_k8s_meta, timestamp, backup_addr):
458
+ send_commands = []
459
+
460
+ typed_send_commands = {}
461
+ for stype in ["k8s", "lxc", "qemu"]:
462
+ typed_send_commands[stype] = []
463
+
464
+ for vm_meta in raw_vm_meta:
465
+ for disk_conf in vm_meta["disk_confs"].values():
466
+ image = disk_conf.split(",")[0].split(":")[1]
467
+ pool = disk_conf.split(",")[0].split(":")[0]
468
+
469
+ params = {"timestamp": timestamp, "image_name": image, "pool": pool, "type": vm_meta["type"]}
470
+
471
+ typed_send_commands[vm_meta["type"]].append({"params": params, "backup_addr": backup_addr, "subprocess_args": ["rbd", "export", f"{pool}/temp-clone-{timestamp}-{image}", "-"]})
472
+
473
+
474
+ for k8s_metas in raw_k8s_meta.values():
475
+ for k8s_meta in k8s_metas:
476
+ pool = k8s_meta["pool"]
477
+ image = k8s_meta["image_name"]
478
+
479
+ params = {"timestamp": timestamp, "image_name": image, "pool": pool, "type": k8s_meta["type"]}
480
+
481
+ typed_send_commands[k8s_meta["type"]].append({"params": params, "backup_addr": backup_addr, "subprocess_args": ["rbd", "export", f"{pool}/temp-clone-{timestamp}-{image}", "-"]})
482
+
483
+ # start one thread per type, since borg on bdd side is single threaded per archive
484
+ export_tasks = []
485
+ for commands in typed_send_commands.values():
486
+ export_tasks.append(asyncio.create_task(send_exports(commands)))
487
+
488
+ await asyncio.gather(*export_tasks)
489
+
490
+
491
+ async def post_image_meta(raw_vm_meta, raw_k8s_meta, timestamp, backup_config, backup_addr):
492
+ # post meta for vms
493
+ vm_stacks = []
494
+ if backup_config["other_stacks"] is not None:
495
+ vm_stacks.extend(backup_config["other_stacks"])
496
+
497
+ vm_stacks.extend(backup_config["k8s_stacks"].keys())
498
+
499
+ for vm_meta in raw_vm_meta:
500
+ stack_tag = None
501
+ for tag in vm_meta["tags"].split(";"):
502
+ if tag in vm_stacks:
503
+ stack_tag = tag
504
+
505
+ if stack_tag is None:
506
+ raise Exception(f"stack tag for {vm_meta} could not be found! {vm_stacks}")
507
+
508
+ for conf_key, disk_conf in vm_meta["disk_confs"].items():
509
+ image = disk_conf.split(",")[0].split(":")[1]
510
+ pool = disk_conf.split(",")[0].split(":")[0]
511
+
512
+ body = {"timestamp": timestamp, "image_name": image, "pool": pool, "stack": stack_tag, "type": vm_meta["type"], "conf_key": conf_key,
513
+ "disk_conf": disk_conf, "vmid": vm_meta["vmid"]}
514
+ logger.debug(f"posting {body}")
515
+ await net.image_meta(backup_addr, body)
516
+
517
+
518
+ for k8s_stack, k8s_metas in raw_k8s_meta.items():
519
+ for k8s_meta in k8s_metas:
520
+ pool = k8s_meta["pool"]
521
+ image = k8s_meta["image_name"]
522
+ body = {"timestamp": timestamp, "image_name": image, "pool": pool, "stack": k8s_stack, "type": k8s_meta["type"], "namespace": k8s_meta["namespace"],
523
+ "pvc_dict_b64": k8s_meta['pvc_dict_b64'], "pv_dict_b64": k8s_meta['pv_dict_b64'], "pvc_name": k8s_meta["pvc_name"], "storage_class": k8s_meta["storage_class"]}
524
+
525
+ logger.debug(f"posting {body}")
526
+ await net.image_meta(backup_addr, body)
527
+
528
+
529
+ async def post_k8s_stack_meta(k8s_kubeconfigs, k8s_stack_namespace_secrets, timestamp, backup_addr):
530
+ for k8s_stack, kubeconfig in k8s_kubeconfigs.items():
531
+ namespace_secret_dict_b64 = base64.b64encode(pickle.dumps(k8s_stack_namespace_secrets[k8s_stack])).decode('utf-8')
532
+ body = {"timestamp": timestamp, "stack": k8s_stack, "type": "k8s", "raw_kubeconfig": kubeconfig["raw_kubeconfig"], "master_ip": kubeconfig["master_ip"], "namespace_secret_dict_b64": namespace_secret_dict_b64}
533
+ logger.debug(f"posting {body}")
534
+
535
+ await net.stack_meta(backup_addr, body)
536
+
537
+
538
+ async def post_vm_stack_meta(raw_vm_meta, vm_conf_map, backup_config, backup_addr, timestamp):
539
+ # post meta for vms
540
+ vm_stacks = []
541
+ if backup_config["other_stacks"] is not None:
542
+ vm_stacks.extend(backup_config["other_stacks"])
543
+
544
+ vm_stacks.extend(backup_config["k8s_stacks"].keys())
545
+
546
+ # group raw meta by vmid
547
+ vmid_vm_meta = {}
548
+ for vm_meta in raw_vm_meta:
549
+ vmid_vm_meta[vm_meta["vmid"]] = vm_meta
550
+
551
+ for vmid, vm_conf in vm_conf_map.items():
552
+ vm_meta = vmid_vm_meta[vmid]
553
+ stack_tag = None
554
+ for tag in vm_meta["tags"].split(";"):
555
+ if tag in vm_stacks:
556
+ stack_tag = tag
557
+
558
+ if stack_tag is None:
559
+ raise Exception(f"stack tag for {vm_meta} could not be found! {vm_stacks}")
560
+
561
+ body = {"timestamp": timestamp, "stack": stack_tag, "type": vm_meta["type"], "vmid": vmid, "vm_conf": vm_conf}
562
+ logger.debug(f"posting {body}")
563
+
564
+ await net.stack_meta(backup_addr, body)
565
+
566
+
567
+ def cleanup(raw_vm_meta, raw_k8s_meta, timestamp, unique_pools):
568
+ logger.info("cleanup")
569
+ # delete tmp images
570
+ if raw_vm_meta is not None:
571
+ for vm_meta in raw_vm_meta:
572
+ for disk_conf in vm_meta["disk_confs"].values():
573
+ image = disk_conf.split(",")[0].split(":")[1]
574
+ pool = disk_conf.split(",")[0].split(":")[0]
575
+ try:
576
+ subprocess.run(["rbd", "rm", f"{pool}/temp-clone-{timestamp}-{image}"], check=True, capture_output=True, text=True)
577
+ except subprocess.CalledProcessError as e:
578
+ logger.warning(e.stdout + e.stderr)
579
+
580
+
581
+ if raw_k8s_meta is not None:
582
+ for k8s_stack, k8s_metas in raw_k8s_meta.items():
583
+ for k8s_meta in k8s_metas:
584
+ pool = k8s_meta["pool"]
585
+ image = k8s_meta["image_name"]
586
+ try:
587
+ subprocess.run(["rbd", "rm", f"{pool}/temp-clone-{timestamp}-{image}"], check=True, capture_output=True, text=True)
588
+ except subprocess.CalledProcessError as e:
589
+ logger.warning(e.stdout + e.stderr)
590
+
591
+
592
+ if unique_pools is not None:
593
+ # delete snaps
594
+ for pool in unique_pools:
595
+ logger.debug("removing snaps from pool " + pool)
596
+ try:
597
+ subprocess.run(["rbd", "group", "snap", "rm", f"{pool}/backups@{timestamp}"], check=True, capture_output=True, text=True)
598
+ # doesnt logger.info anything on success
599
+ except subprocess.CalledProcessError as e:
600
+ logger.warning(e.stdout + e.stderr)
601
+
602
+ # delete groups
603
+ for pool in unique_pools:
604
+ logger.debug("removing backup group from pool " + pool)
605
+ try:
606
+ subprocess.run(["rbd", "group", "rm", f"{pool}/backups"], check=True, capture_output=True, text=True)
607
+ # doesnt logger.info anything on success
608
+ except subprocess.CalledProcessError as e:
609
+ logger.warning(e.stdout + e.stderr)
@@ -0,0 +1,45 @@
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import tarfile
5
+ import logging
6
+ from net import archive
7
+
8
+
9
+ logger = logging.getLogger("fetcher")
10
+
11
+
12
+ async def backup_git(backup_addr, timestamp, git_repos):
13
+ for repo_url in git_repos:
14
+ repo_name = os.path.splitext(os.path.basename(repo_url))[0]
15
+
16
+ archive_path = f"{repo_name}.tar"
17
+
18
+ subprocess.run(["git", "clone", repo_url, repo_name], check=True)
19
+
20
+ with tarfile.open(archive_path, "w") as tar:
21
+ tar.add(repo_name, arcname=repo_name)
22
+
23
+ shutil.rmtree(repo_name)
24
+
25
+ logger.info(f"Repository archived successfully as {archive_path}")
26
+
27
+ request_dict = {
28
+ "borg_archive_type": "git",
29
+ "archive_name": repo_name,
30
+ "timestamp": timestamp,
31
+ "stdin_name": archive_path
32
+ }
33
+ logger.info(request_dict)
34
+
35
+ def chunk_generator():
36
+ with open(archive_path, "rb") as file:
37
+ while True:
38
+ chunk = file.read(4 * 1024 * 1024) # 4mb
39
+ if not chunk:
40
+ break
41
+ yield chunk
42
+
43
+ await archive(backup_addr, request_dict, chunk_generator)
44
+
45
+ os.remove(archive_path)