py-pve-cloud-backup 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of py-pve-cloud-backup might be problematic. Click here for more details.

@@ -0,0 +1,86 @@
1
+ from datetime import datetime
2
+ import logging
3
+ import funcs
4
+ from nextcloud import backup_nextcloud
5
+ from git import backup_git
6
+ from proxmoxer import ProxmoxAPI
7
+ import os
8
+ import yaml
9
+ from kubernetes import client, config
10
+ from pprint import pformat
11
+ import paramiko
12
+ import asyncio
13
+
14
+
15
+ logging.basicConfig(level=getattr(logging, os.getenv("LOG_LEVEL", "DEBUG").upper()))
16
+ logger = logging.getLogger("fetcher")
17
+
18
+ proxmox = ProxmoxAPI(
19
+ os.getenv("PROXMOXER_HOST"), user=os.getenv("PROXMOXER_USER"), backend='ssh_paramiko', private_key_file='/opt/id_proxmox'
20
+ )
21
+
22
+ with open("/opt/backup-conf.yaml", "r") as file:
23
+ backup_config = yaml.safe_load(file)
24
+
25
+ backup_addr = backup_config["backup_daemon_address"]
26
+
27
+ # main is prod and always runs in cluster
28
+ config.load_incluster_config()
29
+ v1 = client.CoreV1Api()
30
+
31
+
32
+ async def run():
33
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
34
+
35
+ # backup to borg file repos
36
+
37
+ # defined and not null
38
+ if backup_config["git_repos"]:
39
+ await backup_git(backup_addr, timestamp, backup_config["git_repos"])
40
+ else:
41
+ logger.info("No git repos to backup provided, skipping")
42
+
43
+ # defined and not null
44
+ if backup_config["nextcloud_files"]:
45
+ await backup_nextcloud(backup_addr, timestamp, backup_config["nextcloud_files"])
46
+ else:
47
+ logger.info("No nextcloud files to backup provided, skipping")
48
+
49
+ # backup vms and k8s
50
+ raw_k8s_meta = None
51
+ raw_vm_meta = None
52
+ unique_pools = None
53
+
54
+ try:
55
+ k8s_kubeconfigs = funcs.get_kubernetes_clients(backup_config, proxmox, paramiko.Ed25519Key.from_private_key_file("/opt/id_qemu"))
56
+ logger.debug(f"k8s_kubeconfigs:\n{pformat(k8s_kubeconfigs)}")
57
+
58
+ raw_k8s_meta, k8s_stack_namespace_secrets = funcs.collect_raw_k8s_meta(backup_config, k8s_kubeconfigs)
59
+ logger.debug(f"k8s_meta:\n{pformat(raw_k8s_meta)}")
60
+
61
+ raw_vm_meta = funcs.collect_raw_vm_meta(proxmox, backup_config)
62
+ logger.debug(f"vm_meta:\n{pformat(raw_vm_meta)}")
63
+
64
+ vm_conf_map = funcs.get_vm_configs(raw_vm_meta, paramiko.RSAKey.from_private_key_file("/opt/id_proxmox"))
65
+ logger.debug(f"vm_conf_map:\n{pformat(vm_conf_map)}")
66
+
67
+ # this simply adds all the images to groups inside of ceph
68
+ unique_pools = funcs.pool_images(raw_vm_meta, raw_k8s_meta)
69
+
70
+ # create group snapshots
71
+ funcs.snap_and_clone(raw_vm_meta, raw_k8s_meta, timestamp, unique_pools)
72
+ await funcs.send_backups(raw_vm_meta, raw_k8s_meta, timestamp, backup_addr)
73
+
74
+ await funcs.post_image_meta(raw_vm_meta, raw_k8s_meta, timestamp, backup_config, backup_addr)
75
+ await funcs.post_vm_stack_meta(raw_vm_meta, vm_conf_map, backup_config, backup_addr, timestamp)
76
+ await funcs.post_k8s_stack_meta(k8s_kubeconfigs, k8s_stack_namespace_secrets, timestamp, backup_addr)
77
+
78
+ finally:
79
+ # we always want to do the cleanup even if something failed
80
+ funcs.cleanup(raw_vm_meta, raw_k8s_meta, timestamp, unique_pools)
81
+
82
+
83
+ def main():
84
+ asyncio.run(run())
85
+
86
+
@@ -0,0 +1,126 @@
1
+ import asyncio
2
+ from enum import Enum
3
+ import zstandard as zstd
4
+ import struct
5
+ import logging
6
+ import pickle
7
+
8
+ logger = logging.getLogger("fetcher")
9
+
10
+ class Command(Enum):
11
+ ARCHIVE = 1
12
+ IMAGE_META = 2
13
+ STACK_META = 3
14
+
15
+
16
+ async def archive_init(reader, writer, request_dict):
17
+ # intialize archive command
18
+ writer.write(struct.pack("B", Command.ARCHIVE.value))
19
+ await writer.drain()
20
+ logger.debug("send command")
21
+
22
+ # send the archive request dict
23
+ req_dict_pickled = pickle.dumps(request_dict)
24
+ writer.write(struct.pack("!I", len(req_dict_pickled)))
25
+ await writer.drain()
26
+ writer.write(req_dict_pickled)
27
+ await writer.drain()
28
+ logger.debug("send dict")
29
+
30
+ # wait for go signal, server needs to aquire write lock
31
+ # we dont
32
+ logger.debug("waiting for go from bdd")
33
+ signal = await reader.readexactly(1)
34
+ if signal != b'\x01':
35
+ logger.error("recieved incorrect go signal")
36
+ raise Exception("Incorrect go signal!")
37
+ logger.debug("received go")
38
+
39
+
40
+ async def send_cchunk(writer, compressed_chunk):
41
+ # compress the chunk
42
+ if compressed_chunk: # only send if something actually got compressed
43
+ # send size + chunk
44
+ writer.write(struct.pack("!I", len(compressed_chunk)))
45
+ await writer.drain()
46
+ writer.write(compressed_chunk)
47
+ await writer.drain()
48
+
49
+
50
+ async def archive_async(backup_addr, request_dict, chunk_generator):
51
+ logger.info(request_dict)
52
+ reader, writer = await asyncio.open_connection(backup_addr, 8888)
53
+
54
+ await archive_init(reader, writer, request_dict)
55
+
56
+ # initialize the synchronous generator and start reading chunks, compress and send
57
+ # compressor = zlib.compressobj(level=1)
58
+ compressor = zstd.ZstdCompressor(level=1, threads=6).compressobj()
59
+ async for chunk in chunk_generator():
60
+ await send_cchunk(writer, compressor.compress(chunk))
61
+
62
+ # send rest in compressor, compress doesnt always return a byte array, see bdd.py doc
63
+ # send size first again
64
+ await send_cchunk(writer, compressor.flush())
65
+
66
+ # send eof to server, signal that we are done
67
+ logger.debug("sending eof")
68
+ writer.write(struct.pack("!I", 0))
69
+ await writer.drain()
70
+
71
+ # close the writer here, stdout needs to be closed by caller
72
+ writer.close()
73
+ await writer.wait_closed()
74
+
75
+
76
+ async def archive(backup_addr, request_dict, chunk_generator):
77
+ logger.info(request_dict)
78
+ reader, writer = await asyncio.open_connection(backup_addr, 8888)
79
+
80
+ await archive_init(reader, writer, request_dict)
81
+
82
+ # initialize the synchronous generator and start reading chunks, compress and send
83
+ # compressor = zlib.compressobj(level=1)
84
+ compressor = zstd.ZstdCompressor(level=1, threads=6).compressobj()
85
+ for chunk in chunk_generator():
86
+ await send_cchunk(writer, compressor.compress(chunk))
87
+
88
+ # send rest in compressor, compress doesnt always return a byte array, see bdd.py doc
89
+ # send size first again
90
+ await send_cchunk(writer, compressor.flush())
91
+
92
+ # send eof to server, signal that we are done
93
+ logger.debug("sending eof")
94
+ writer.write(struct.pack("!I", 0))
95
+ await writer.drain()
96
+
97
+ # close the writer here, stdout needs to be closed by caller
98
+ writer.close()
99
+ await writer.wait_closed()
100
+
101
+
102
+ async def meta(backup_addr, cmd, meta_dict):
103
+ reader, writer = await asyncio.open_connection(backup_addr, 8888)
104
+ writer.write(struct.pack("B", cmd.value))
105
+ await writer.drain()
106
+
107
+ meta_pickled = pickle.dumps(meta_dict)
108
+
109
+ # send size first
110
+ writer.write(struct.pack("!I", len(meta_pickled)))
111
+ await writer.drain()
112
+
113
+ # now send the dict
114
+ writer.write(meta_pickled)
115
+ await writer.drain()
116
+
117
+ writer.close()
118
+ await writer.wait_closed()
119
+
120
+
121
+ async def image_meta(backup_addr, meta_dict):
122
+ await meta(backup_addr, Command.IMAGE_META, meta_dict)
123
+
124
+
125
+ async def stack_meta(backup_addr, meta_dict):
126
+ await meta(backup_addr, Command.STACK_META, meta_dict)
@@ -0,0 +1,42 @@
1
+ import requests
2
+ from requests.auth import HTTPBasicAuth
3
+ import os
4
+ import logging
5
+ from net import archive
6
+
7
+
8
+ logger = logging.getLogger("fetcher")
9
+
10
+ password = None
11
+ if os.path.isfile("/opt/nextcloud.pass"):
12
+ with open("/opt/nextcloud.pass", "r", encoding="utf-8") as file:
13
+ password = file.read()
14
+ else:
15
+ logger.info("no nextcloud pass mounted, skipping nextcloud backup.")
16
+
17
+ username = os.getenv("NEXTCLOUD_USER")
18
+
19
+ nextcloud_base = os.getenv("NEXTCLOUD_BASE")
20
+
21
+
22
+ async def backup_nextcloud(backup_addr, timestamp, nextcloud_files):
23
+ if password is None:
24
+ logger.info("no nextcloud pass mounted, skipping nextcloud backup.")
25
+ return
26
+
27
+ for file in nextcloud_files:
28
+ request_dict = {
29
+ "borg_archive_type": "nextcloud",
30
+ "archive_name": file,
31
+ "timestamp": timestamp,
32
+ "stdin_name": file
33
+ }
34
+
35
+ def chunk_generator():
36
+ response = requests.get(f"{nextcloud_base}/remote.php/dav/files/{username}/{file}", auth=HTTPBasicAuth(username, password), stream=True)
37
+ for chunk in response.iter_content(chunk_size=4 * 1024 * 1024):
38
+ if chunk:
39
+ yield chunk
40
+
41
+ await archive(backup_addr, request_dict, chunk_generator)
42
+
@@ -0,0 +1,12 @@
1
+ Metadata-Version: 2.4
2
+ Name: py-pve-cloud-backup
3
+ Version: 0.0.1
4
+ Author-email: Tobias Huebner <tobias.huebner@vmzberlin.com>
5
+ License-Expression: GPL-3.0-or-later
6
+ Requires-Dist: proxmoxer==2.2.0
7
+ Requires-Dist: kubernetes==34.1.0
8
+ Requires-Dist: PyYAML==6.0.2
9
+ Requires-Dist: paramiko==4.0.0
10
+ Requires-Dist: requests==2.32.5
11
+ Requires-Dist: zstandard==0.25.0
12
+ Requires-Dist: tinydb==4.8.2
@@ -0,0 +1,13 @@
1
+ pve_cloud_backup/daemon/bdd.py,sha256=iGc1v7RsPQ2Vd-WcthTrFzEjd2jLeI3YnXYXM876U2Y,5592
2
+ pve_cloud_backup/daemon/brctl.py,sha256=BKR2rxMAvfiLxuwfSN8xiZ4eux3TeQlMMbA096jgoKw,8855
3
+ pve_cloud_backup/daemon/shared.py,sha256=Afbbafs0FchUiofmhAKEr01zaE3jxL_nS_KUBViW6Rs,20729
4
+ pve_cloud_backup/fetcher/funcs.py,sha256=IXaDX0_9Fj2GnUIzAl5nDpk-umn27Sr5X3SYZCzeVKU,22199
5
+ pve_cloud_backup/fetcher/git.py,sha256=km_s5SoT-CIZ_RPGXoM9ieNMXXeUmyjLDn9DELh0QZA,1082
6
+ pve_cloud_backup/fetcher/main.py,sha256=viT04NeyWuDmMsm5_XkCRCGszGWmYnx--o9Z5IiZIaE,2889
7
+ pve_cloud_backup/fetcher/net.py,sha256=MVCqdb3ZFI8gbeUB2NrWrkykwuKAFOznH1gqtqyT4OE,3768
8
+ pve_cloud_backup/fetcher/nextcloud.py,sha256=HNb6rM-3t-GOZSJnWppKai1a4Yugx6lh9vuOlA4drdk,1166
9
+ py_pve_cloud_backup-0.0.1.dist-info/METADATA,sha256=SAcgrc7oK3HBnEch46ZWGR91wcgFwGijA2CE5if78eI,380
10
+ py_pve_cloud_backup-0.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ py_pve_cloud_backup-0.0.1.dist-info/entry_points.txt,sha256=0QG_j55-wiJS1_vLNsjju0Ly9ycpBSTOhUF-PlWizOM,148
12
+ py_pve_cloud_backup-0.0.1.dist-info/top_level.txt,sha256=w9KT8Yar3s02YhN-DBabObecqlIub36RT6lLZ4iG9dE,17
13
+ py_pve_cloud_backup-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,4 @@
1
+ [console_scripts]
2
+ brctl = pve_cloud_backup.daemon.brctl:main
3
+ daemon = pve_cloud_backup.daemon.bdd:main
4
+ fetcher = pve_cloud_backup.fetcher.main:main
@@ -0,0 +1 @@
1
+ pve_cloud_backup