py-pve-cloud-backup 0.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,365 @@
1
+ import paramiko
2
+ import base64
3
+ import logging
4
+ import pickle
5
+ import subprocess
6
+ import json
7
+ import re
8
+ import yaml
9
+ import os
10
+ from pprint import pformat
11
+ from kubernetes import client
12
+ from kubernetes.config.kube_config import KubeConfigLoader
13
+ import pve_cloud_backup.fetcher.net as net
14
+ import asyncio
15
+
16
+
17
+ logger = logging.getLogger("fetcher")
18
+
19
+
20
+ # sshs into master k8s vms and fetches the there present kubeconfigs
21
+ def get_kubernetes_clients(backup_config, proxmox, pkey):
22
+ logger.info(f"getting k8s clients")
23
+
24
+ k8s_stacks = backup_config["k8s_stacks"].keys()
25
+
26
+ k8s_masters = {}
27
+ # collect one master node per stack
28
+ for node in proxmox.nodes.get():
29
+ node_name = node["node"]
30
+
31
+ if node["status"] == "offline":
32
+ logger.info(f"skipping offline node {node_name}")
33
+ continue
34
+
35
+ for qemu in proxmox.nodes(node_name).qemu.get():
36
+ if "tags" in qemu and any(tag in k8s_stacks for tag in qemu["tags"].split(";")) and 'master' in qemu["tags"].split(";"):
37
+ # found a master
38
+ logger.debug(f"found master {pformat(qemu)}")
39
+
40
+ # find the stack tag
41
+ stack_tag = None
42
+ for tag in qemu["tags"].split(";"):
43
+ for k8s_stack_tag in k8s_stacks:
44
+ if tag == k8s_stack_tag:
45
+ stack_tag = tag
46
+
47
+ if stack_tag is None:
48
+ raise Exception(f"something went terribly wrong, stack tag should never be none - qemu:\n{pformat(qemu)}")
49
+
50
+ if stack_tag in k8s_masters:
51
+ continue # we already saved a master for this stack
52
+
53
+ k8s_masters[stack_tag] = {"pve_host": node_name, "vmid": qemu["vmid"]}
54
+
55
+ logger.debug(f"collected masters:\n{pformat(k8s_masters)}")
56
+
57
+ k8s_kubeconfigs = {}
58
+
59
+ # now we can connect to each master via ssh and fetch the kubeconfig
60
+ for k8s_stack, master in k8s_masters.items():
61
+ ifaces = proxmox.nodes(master["pve_host"]).qemu(master["vmid"]).agent("network-get-interfaces").get()
62
+ logger.debug(f"k8s stack master {k8s_stack} interfaces {pformat(ifaces)}")
63
+
64
+ master_ipv4 = None
65
+
66
+ for iface in ifaces["result"]:
67
+ if iface["name"] == "lo":
68
+ continue # skip the first loopback device
69
+
70
+ # after that comes the primary interface
71
+ for ip_address in iface["ip-addresses"]:
72
+ if ip_address["ip-address-type"] == "ipv4":
73
+ master_ipv4 = ip_address["ip-address"]
74
+ break
75
+
76
+ if master_ipv4 is None:
77
+ raise Exception(f"could not get ipv4 for master {master} stack {k8s_stack}")
78
+
79
+ break
80
+
81
+ # now we can use that address to connect via ssh
82
+ ssh = paramiko.SSHClient()
83
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
84
+
85
+ user = os.getenv("QEMU_ADMIN_USER", "admin")
86
+
87
+ logger.info(f"connecting to master {master_ipv4} - {user}")
88
+ ssh.connect(master_ipv4, username=user, pkey=pkey)
89
+
90
+ # since we need root we cant use sftp and root via ssh is disabled
91
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
92
+
93
+ config = stdout.read().decode('utf-8')
94
+
95
+ logger.debug(f"ssh sudo cat kubeconfig:\n{config}")
96
+
97
+ k8s_kubeconfigs[k8s_stack] = { "raw_kubeconfig": config, "master_ip": master_ipv4 }
98
+
99
+ return k8s_kubeconfigs
100
+
101
+
102
+
103
+ # collect all pvc and pv information
104
+ def collect_raw_k8s_meta(backup_config, k8s_kubeconfigs):
105
+
106
+ k8s_stack_meta = {}
107
+
108
+ k8s_stack_namespace_secrets = {}
109
+
110
+ for k8s_stack, k8s_backup_config in backup_config["k8s_stacks"].items():
111
+
112
+ if k8s_backup_config["exclude_namespaces"] is not None and k8s_backup_config["include_namespaces"] is not None:
113
+ raise Exception(f"cannot specify include and exclude for k8s_stack {k8s_stack}")
114
+
115
+ kubeconfig = k8s_kubeconfigs[k8s_stack]
116
+ master_ipv4 = kubeconfig["master_ip"]
117
+ kubeconfig_dict = yaml.safe_load(kubeconfig["raw_kubeconfig"])
118
+
119
+ # override the connection ip as it is set to localhost on the machines
120
+ kubeconfig_dict["clusters"][0]["cluster"]["server"] = f"https://{master_ipv4}:6443"
121
+ logger.debug(f"kubeconfig dict {pformat(kubeconfig_dict)}")
122
+
123
+ # init kube client
124
+ loader = KubeConfigLoader(config_dict=kubeconfig_dict)
125
+ configuration = client.Configuration()
126
+ loader.load_and_set(configuration)
127
+
128
+ # Create a client from this configuration
129
+ api_client = client.ApiClient(configuration)
130
+ v1 = client.CoreV1Api(api_client=api_client)
131
+
132
+ # Use it
133
+ k8s_backup_meta = []
134
+
135
+ # sub dict for secrets of each namespace
136
+ k8s_stack_namespace_secrets[k8s_stack] = {}
137
+
138
+ for namespace_item in v1.list_namespace().items:
139
+ namespace = namespace_item.metadata.name
140
+
141
+ if k8s_backup_config["exclude_namespaces"] is not None and namespace in k8s_backup_config["exclude_namespaces"]:
142
+ continue
143
+
144
+ if k8s_backup_config["include_namespaces"] is not None and namespace not in k8s_backup_config["include_namespaces"]:
145
+ continue
146
+
147
+ # collect secrets of namespace
148
+ k8s_stack_namespace_secrets[k8s_stack][namespace] = [secret.to_dict() for secret in v1.list_namespaced_secret(namespace=namespace).items]
149
+
150
+ pvc_list = v1.list_namespaced_persistent_volume_claim(namespace=namespace)
151
+
152
+ for pvc in pvc_list.items:
153
+ pvc_name = pvc.metadata.name
154
+ volume_name = pvc.spec.volume_name
155
+ status = pvc.status.phase
156
+
157
+ if volume_name:
158
+ pv = v1.read_persistent_volume(name=volume_name)
159
+ pv_dict_b64 = base64.b64encode(pickle.dumps(pv.to_dict())).decode('utf-8')
160
+
161
+ pvc_dict_b64 = base64.b64encode(pickle.dumps(pvc.to_dict())).decode('utf-8')
162
+
163
+ k8s_backup_meta.append({"namespace": namespace, "pvc_name": pvc_name, "namespace": namespace,
164
+ "image_name": pv.spec.csi.volume_attributes["imageName"], "pool": pv.spec.csi.volume_attributes["pool"],
165
+ "pvc_dict_b64": pvc_dict_b64, "pv_dict_b64": pv_dict_b64, "storage_class": pvc.spec.storage_class_name})
166
+ else:
167
+ logger.debug(f"PVC: {pvc_name} -> Not bound to a PV [Status: {status}]")
168
+
169
+ k8s_stack_meta[k8s_stack] = k8s_backup_meta
170
+
171
+ return k8s_stack_meta, k8s_stack_namespace_secrets
172
+
173
+
174
+ def pool_images(raw_k8s_meta):
175
+ # initialize for images grouped by pool
176
+ unique_pools = set()
177
+
178
+ # collect pools from k8s volumes
179
+ for k8s_stack, k8s_metas in raw_k8s_meta.items():
180
+ for k8s_meta in k8s_metas:
181
+ unique_pools.add(k8s_meta["pool"])
182
+
183
+ # create rbd groups
184
+ for pool in unique_pools:
185
+ try:
186
+ # check for errors, capture stderr output as text
187
+ subprocess.run(["rbd", "group", "create", f"{pool}/backups"], check=True, capture_output=True, text=True)
188
+ except subprocess.CalledProcessError as e:
189
+ logger.warning(e.stdout + e.stderr) # no problem if group already exists, cleanup failed tho
190
+
191
+ # add rbds from pvcs
192
+ for k8s_metas in raw_k8s_meta.values():
193
+ for k8s_meta in k8s_metas:
194
+ pool = k8s_meta["pool"]
195
+ image = k8s_meta["image_name"]
196
+ try:
197
+ subprocess.run(["rbd", "group", "image", "add", f"{pool}/backups", f"{pool}/{image}"], check=True, capture_output=True, text=True)
198
+ except subprocess.CalledProcessError as e:
199
+ logger.error(e.stdout + e.stderr) # proper error printing
200
+ raise
201
+
202
+ return unique_pools
203
+
204
+
205
+ def clone(pool, image, timestamp):
206
+ try:
207
+ command = subprocess.run(["rbd", "snap", "ls", "--all", "--format", "json", f"{pool}/{image}"], check=True, capture_output=True, text=True)
208
+ snaps = json.loads(command.stdout)
209
+ # doesnt logger.info anything on success
210
+ except subprocess.CalledProcessError as e:
211
+ logger.error(e.stdout + e.stderr)
212
+ raise
213
+
214
+ for snap in snaps:
215
+ if snap["namespace"]["type"] == "group" and snap["namespace"]["group snap"] == timestamp:
216
+ snap_id = snap["id"]
217
+ break
218
+
219
+ logger.debug(f"image {image} snap id {snap_id}")
220
+
221
+ # create temporary clone
222
+ try:
223
+ subprocess.run(["rbd", "clone", "--snap-id", str(snap_id), f"{pool}/{image}", f"{pool}/temp-clone-{timestamp}-{image}", "--rbd-default-clone-format", "2"], check=True, capture_output=True, text=True)
224
+ except subprocess.CalledProcessError as e:
225
+ logger.error(e.stdout + e.stderr)
226
+ raise
227
+
228
+
229
+ def snap_and_clone(raw_k8s_meta, timestamp, unique_pools):
230
+ logger.info("creating snaps")
231
+ for pool in unique_pools:
232
+ try:
233
+ subprocess.run(["rbd", "group", "snap", "create", f"{pool}/backups@{timestamp}"], check=True, capture_output=True, text=True)
234
+ # doesnt logger.info anything on success
235
+ except subprocess.CalledProcessError as e:
236
+ logger.error(e.stdout + e.stderr)
237
+ raise
238
+
239
+ logger.info("creating clones")
240
+
241
+ # clone all the snapshots into new images so we can export them
242
+ # sadly there isnt yet a direct export function for group snapshots
243
+ for k8s_metas in raw_k8s_meta.values():
244
+ for k8s_meta in k8s_metas:
245
+ pool = k8s_meta["pool"]
246
+ image = k8s_meta["image_name"]
247
+ clone(pool, image, timestamp)
248
+
249
+
250
+ async def send_export(send_command, semaphore):
251
+ async with semaphore:
252
+ backup_addr = send_command["backup_addr"]
253
+ params = send_command["params"]
254
+
255
+ request_dict = {
256
+ "borg_archive_type": params["type"],
257
+ "archive_name": params["image_name"],
258
+ "timestamp": params["timestamp"],
259
+ "stdin_name": params["image_name"] + ".raw",
260
+ "namespace": params["namespace"]
261
+ }
262
+ logger.info(request_dict)
263
+
264
+ # to get full performance we need to have the subprocess reading async aswell
265
+ async def async_chunk_generator():
266
+ proc = await asyncio.create_subprocess_exec(
267
+ *send_command["subprocess_args"],
268
+ stdout=asyncio.subprocess.PIPE
269
+ )
270
+
271
+ while True:
272
+ chunk = await proc.stdout.read(4 * 1024 * 1024 * 10) # 4MB
273
+ if not chunk:
274
+ break
275
+ yield chunk
276
+
277
+ await proc.wait()
278
+
279
+ await net.archive_async(backup_addr, request_dict, async_chunk_generator)
280
+
281
+
282
+ async def send_backups(raw_k8s_meta, timestamp, backup_addr):
283
+ send_commands = []
284
+
285
+ for k8s_metas in raw_k8s_meta.values():
286
+ for k8s_meta in k8s_metas:
287
+ pool = k8s_meta["pool"]
288
+ image = k8s_meta["image_name"]
289
+
290
+ params = {"timestamp": timestamp, "image_name": image, "pool": pool, "type": "k8s", "namespace": k8s_meta["namespace"]}
291
+
292
+ send_commands.append({"params": params, "backup_addr": backup_addr, "subprocess_args": ["rbd", "export", f"{pool}/temp-clone-{timestamp}-{image}", "-"]})
293
+
294
+ semaphore = asyncio.Semaphore(int(os.getenv("SEND_PARALELLISM_NUM", "4")))
295
+
296
+ # start one thread per type, since borg on bdd side is single threaded per archive
297
+ export_tasks = [asyncio.create_task(send_export(command, semaphore)) for command in send_commands]
298
+
299
+ await asyncio.gather(*export_tasks)
300
+
301
+
302
+ async def post_image_meta(raw_k8s_meta, timestamp, backup_config, backup_addr):
303
+ for k8s_stack, k8s_metas in raw_k8s_meta.items():
304
+ for k8s_meta in k8s_metas:
305
+ pool = k8s_meta["pool"]
306
+ image = k8s_meta["image_name"]
307
+ body = {"timestamp": timestamp, "image_name": image, "pool": pool, "stack": k8s_stack, "type": "k8s", "namespace": k8s_meta["namespace"],
308
+ "pvc_dict_b64": k8s_meta['pvc_dict_b64'], "pv_dict_b64": k8s_meta['pv_dict_b64'], "pvc_name": k8s_meta["pvc_name"], "storage_class": k8s_meta["storage_class"]}
309
+
310
+ logger.debug(f"posting {body}")
311
+ await net.image_meta(backup_addr, body)
312
+
313
+
314
+ async def post_k8s_stack_meta(k8s_kubeconfigs, k8s_stack_namespace_secrets, timestamp, backup_addr):
315
+ for k8s_stack, kubeconfig in k8s_kubeconfigs.items():
316
+ namespace_secret_dict_b64 = base64.b64encode(pickle.dumps(k8s_stack_namespace_secrets[k8s_stack])).decode('utf-8')
317
+ body = {"timestamp": timestamp, "stack": k8s_stack, "type": "k8s", "raw_kubeconfig": kubeconfig["raw_kubeconfig"], "master_ip": kubeconfig["master_ip"], "namespace_secret_dict_b64": namespace_secret_dict_b64}
318
+ logger.debug(f"posting {body}")
319
+
320
+ await net.stack_meta(backup_addr, body)
321
+
322
+
323
+ def cleanup(raw_vm_meta, raw_k8s_meta, timestamp, unique_pools):
324
+ logger.info("cleanup")
325
+ # delete tmp images
326
+ if raw_vm_meta is not None:
327
+ for vm_meta in raw_vm_meta:
328
+ for disk_conf in vm_meta["disk_confs"].values():
329
+ image = disk_conf.split(",")[0].split(":")[1]
330
+ pool = disk_conf.split(",")[0].split(":")[0]
331
+ try:
332
+ subprocess.run(["rbd", "rm", f"{pool}/temp-clone-{timestamp}-{image}"], check=True, capture_output=True, text=True)
333
+ except subprocess.CalledProcessError as e:
334
+ logger.warning(e.stdout + e.stderr)
335
+
336
+
337
+ if raw_k8s_meta is not None:
338
+ for k8s_stack, k8s_metas in raw_k8s_meta.items():
339
+ for k8s_meta in k8s_metas:
340
+ pool = k8s_meta["pool"]
341
+ image = k8s_meta["image_name"]
342
+ try:
343
+ subprocess.run(["rbd", "rm", f"{pool}/temp-clone-{timestamp}-{image}"], check=True, capture_output=True, text=True)
344
+ except subprocess.CalledProcessError as e:
345
+ logger.warning(e.stdout + e.stderr)
346
+
347
+
348
+ if unique_pools is not None:
349
+ # delete snaps
350
+ for pool in unique_pools:
351
+ logger.debug("removing snaps from pool " + pool)
352
+ try:
353
+ subprocess.run(["rbd", "group", "snap", "rm", f"{pool}/backups@{timestamp}"], check=True, capture_output=True, text=True)
354
+ # doesnt logger.info anything on success
355
+ except subprocess.CalledProcessError as e:
356
+ logger.warning(e.stdout + e.stderr)
357
+
358
+ # delete groups
359
+ for pool in unique_pools:
360
+ logger.debug("removing backup group from pool " + pool)
361
+ try:
362
+ subprocess.run(["rbd", "group", "rm", f"{pool}/backups"], check=True, capture_output=True, text=True)
363
+ # doesnt logger.info anything on success
364
+ except subprocess.CalledProcessError as e:
365
+ logger.warning(e.stdout + e.stderr)
@@ -0,0 +1,45 @@
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import tarfile
5
+ import logging
6
+ from pve_cloud_backup.fetcher.net import archive
7
+
8
+
9
+ logger = logging.getLogger("fetcher")
10
+
11
+
12
+ async def backup_git(backup_addr, timestamp, git_repos):
13
+ for repo_url in git_repos:
14
+ repo_name = os.path.splitext(os.path.basename(repo_url))[0]
15
+
16
+ archive_path = f"{repo_name}.tar"
17
+
18
+ subprocess.run(["git", "clone", repo_url, repo_name], check=True)
19
+
20
+ with tarfile.open(archive_path, "w") as tar:
21
+ tar.add(repo_name, arcname=repo_name)
22
+
23
+ shutil.rmtree(repo_name)
24
+
25
+ logger.info(f"Repository archived successfully as {archive_path}")
26
+
27
+ request_dict = {
28
+ "borg_archive_type": "git",
29
+ "archive_name": repo_name,
30
+ "timestamp": timestamp,
31
+ "stdin_name": archive_path
32
+ }
33
+ logger.info(request_dict)
34
+
35
+ def chunk_generator():
36
+ with open(archive_path, "rb") as file:
37
+ while True:
38
+ chunk = file.read(4 * 1024 * 1024) # 4mb
39
+ if not chunk:
40
+ break
41
+ yield chunk
42
+
43
+ await archive(backup_addr, request_dict, chunk_generator)
44
+
45
+ os.remove(archive_path)
@@ -0,0 +1,86 @@
1
+ from datetime import datetime
2
+ import logging
3
+ import pve_cloud_backup.fetcher.funcs as funcs
4
+ from pve_cloud_backup.fetcher.nextcloud import backup_nextcloud
5
+ from pve_cloud_backup.fetcher.git import backup_git
6
+ from proxmoxer import ProxmoxAPI
7
+ import os
8
+ import yaml
9
+ from kubernetes import client, config
10
+ from pprint import pformat
11
+ import paramiko
12
+ import asyncio
13
+ from pve_cloud_backup.fetcher.patroni import backup_patroni
14
+
15
+
16
+
17
+ logging.basicConfig(level=getattr(logging, os.getenv("LOG_LEVEL", "DEBUG").upper()))
18
+ logger = logging.getLogger("fetcher")
19
+
20
+ proxmox = ProxmoxAPI(
21
+ os.getenv("PROXMOXER_HOST"), user=os.getenv("PROXMOXER_USER"), backend='ssh_paramiko', private_key_file='/opt/id_proxmox'
22
+ )
23
+
24
+ with open("/opt/backup-conf.yaml", "r") as file:
25
+ backup_config = yaml.safe_load(file)
26
+
27
+ backup_addr = backup_config["backup_daemon_address"]
28
+
29
+ # main is prod and always runs in cluster
30
+ config.load_incluster_config()
31
+ v1 = client.CoreV1Api()
32
+
33
+
34
+ async def run():
35
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
36
+
37
+ # backup to borg file repos
38
+
39
+ # defined and not null
40
+ if backup_config["git_repos"]:
41
+ await backup_git(backup_addr, timestamp, backup_config["git_repos"])
42
+ else:
43
+ logger.info("No git repos to backup provided, skipping")
44
+
45
+ # defined and not null
46
+ if backup_config["nextcloud_files"]:
47
+ await backup_nextcloud(backup_addr, timestamp, backup_config["nextcloud_files"])
48
+ else:
49
+ logger.info("No nextcloud files to backup provided, skipping")
50
+
51
+ if backup_config["patroni_stack"]:
52
+ await backup_patroni(backup_addr, timestamp, proxmox, backup_config["patroni_stack"], paramiko.Ed25519Key.from_private_key_file("/opt/id_qemu"))
53
+ else:
54
+ logger.info("No patroni stack provided, skipping pgdump.")
55
+
56
+ # backup vms and k8s
57
+ raw_k8s_meta = None
58
+ raw_vm_meta = None
59
+ unique_pools = None
60
+
61
+ try:
62
+ k8s_kubeconfigs = funcs.get_kubernetes_clients(backup_config, proxmox, paramiko.Ed25519Key.from_private_key_file("/opt/id_qemu"))
63
+ logger.debug(f"k8s_kubeconfigs:\n{pformat(k8s_kubeconfigs)}")
64
+
65
+ raw_k8s_meta, k8s_stack_namespace_secrets = funcs.collect_raw_k8s_meta(backup_config, k8s_kubeconfigs)
66
+ logger.debug(f"k8s_meta:\n{pformat(raw_k8s_meta)}")
67
+
68
+ # this simply adds all the images to groups inside of ceph
69
+ unique_pools = funcs.pool_images(raw_k8s_meta)
70
+
71
+ # create group snapshots
72
+ funcs.snap_and_clone(raw_k8s_meta, timestamp, unique_pools)
73
+ await funcs.send_backups(raw_k8s_meta, timestamp, backup_addr)
74
+
75
+ await funcs.post_image_meta(raw_k8s_meta, timestamp, backup_config, backup_addr)
76
+ await funcs.post_k8s_stack_meta(k8s_kubeconfigs, k8s_stack_namespace_secrets, timestamp, backup_addr)
77
+
78
+ finally:
79
+ # we always want to do the cleanup even if something failed
80
+ funcs.cleanup(raw_vm_meta, raw_k8s_meta, timestamp, unique_pools)
81
+
82
+
83
+ def main():
84
+ asyncio.run(run())
85
+
86
+
@@ -0,0 +1,126 @@
1
+ import asyncio
2
+ from enum import Enum
3
+ import zstandard as zstd
4
+ import struct
5
+ import logging
6
+ import pickle
7
+
8
+ logger = logging.getLogger("fetcher")
9
+
10
+ class Command(Enum):
11
+ ARCHIVE = 1
12
+ IMAGE_META = 2
13
+ STACK_META = 3
14
+
15
+
16
+ async def archive_init(reader, writer, request_dict):
17
+ # intialize archive command
18
+ writer.write(struct.pack("B", Command.ARCHIVE.value))
19
+ await writer.drain()
20
+ logger.debug("send command")
21
+
22
+ # send the archive request dict
23
+ req_dict_pickled = pickle.dumps(request_dict)
24
+ writer.write(struct.pack("!I", len(req_dict_pickled)))
25
+ await writer.drain()
26
+ writer.write(req_dict_pickled)
27
+ await writer.drain()
28
+ logger.debug("send dict")
29
+
30
+ # wait for go signal, server needs to aquire write lock
31
+ # we dont
32
+ logger.debug("waiting for go from bdd")
33
+ signal = await reader.readexactly(1)
34
+ if signal != b'\x01':
35
+ logger.error("recieved incorrect go signal")
36
+ raise Exception("Incorrect go signal!")
37
+ logger.debug("received go")
38
+
39
+
40
+ async def send_cchunk(writer, compressed_chunk):
41
+ # compress the chunk
42
+ if compressed_chunk: # only send if something actually got compressed
43
+ # send size + chunk
44
+ writer.write(struct.pack("!I", len(compressed_chunk)))
45
+ await writer.drain()
46
+ writer.write(compressed_chunk)
47
+ await writer.drain()
48
+
49
+
50
+ async def archive_async(backup_addr, request_dict, chunk_generator):
51
+ logger.info(request_dict)
52
+ reader, writer = await asyncio.open_connection(backup_addr, 8888)
53
+
54
+ await archive_init(reader, writer, request_dict)
55
+
56
+ # initialize the synchronous generator and start reading chunks, compress and send
57
+ # compressor = zlib.compressobj(level=1)
58
+ compressor = zstd.ZstdCompressor(level=1, threads=6).compressobj()
59
+ async for chunk in chunk_generator():
60
+ await send_cchunk(writer, compressor.compress(chunk))
61
+
62
+ # send rest in compressor, compress doesnt always return a byte array, see bdd.py doc
63
+ # send size first again
64
+ await send_cchunk(writer, compressor.flush())
65
+
66
+ # send eof to server, signal that we are done
67
+ logger.debug("sending eof")
68
+ writer.write(struct.pack("!I", 0))
69
+ await writer.drain()
70
+
71
+ # close the writer here, stdout needs to be closed by caller
72
+ writer.close()
73
+ await writer.wait_closed()
74
+
75
+
76
+ async def archive(backup_addr, request_dict, chunk_generator):
77
+ logger.info(request_dict)
78
+ reader, writer = await asyncio.open_connection(backup_addr, 8888)
79
+
80
+ await archive_init(reader, writer, request_dict)
81
+
82
+ # initialize the synchronous generator and start reading chunks, compress and send
83
+ # compressor = zlib.compressobj(level=1)
84
+ compressor = zstd.ZstdCompressor(level=1, threads=6).compressobj()
85
+ for chunk in chunk_generator():
86
+ await send_cchunk(writer, compressor.compress(chunk))
87
+
88
+ # send rest in compressor, compress doesnt always return a byte array, see bdd.py doc
89
+ # send size first again
90
+ await send_cchunk(writer, compressor.flush())
91
+
92
+ # send eof to server, signal that we are done
93
+ logger.debug("sending eof")
94
+ writer.write(struct.pack("!I", 0))
95
+ await writer.drain()
96
+
97
+ # close the writer here, stdout needs to be closed by caller
98
+ writer.close()
99
+ await writer.wait_closed()
100
+
101
+
102
+ async def meta(backup_addr, cmd, meta_dict):
103
+ reader, writer = await asyncio.open_connection(backup_addr, 8888)
104
+ writer.write(struct.pack("B", cmd.value))
105
+ await writer.drain()
106
+
107
+ meta_pickled = pickle.dumps(meta_dict)
108
+
109
+ # send size first
110
+ writer.write(struct.pack("!I", len(meta_pickled)))
111
+ await writer.drain()
112
+
113
+ # now send the dict
114
+ writer.write(meta_pickled)
115
+ await writer.drain()
116
+
117
+ writer.close()
118
+ await writer.wait_closed()
119
+
120
+
121
+ async def image_meta(backup_addr, meta_dict):
122
+ await meta(backup_addr, Command.IMAGE_META, meta_dict)
123
+
124
+
125
+ async def stack_meta(backup_addr, meta_dict):
126
+ await meta(backup_addr, Command.STACK_META, meta_dict)
@@ -0,0 +1,42 @@
1
+ import requests
2
+ from requests.auth import HTTPBasicAuth
3
+ import os
4
+ import logging
5
+ from pve_cloud_backup.fetcher.net import archive
6
+
7
+
8
+ logger = logging.getLogger("fetcher")
9
+
10
+ password = None
11
+ if os.path.isfile("/opt/nextcloud.pass"):
12
+ with open("/opt/nextcloud.pass", "r", encoding="utf-8") as file:
13
+ password = file.read()
14
+ else:
15
+ logger.info("no nextcloud pass mounted, skipping nextcloud backup.")
16
+
17
+ username = os.getenv("NEXTCLOUD_USER")
18
+
19
+ nextcloud_base = os.getenv("NEXTCLOUD_BASE")
20
+
21
+
22
+ async def backup_nextcloud(backup_addr, timestamp, nextcloud_files):
23
+ if password is None:
24
+ logger.info("no nextcloud pass mounted, skipping nextcloud backup.")
25
+ return
26
+
27
+ for file in nextcloud_files:
28
+ request_dict = {
29
+ "borg_archive_type": "nextcloud",
30
+ "archive_name": file,
31
+ "timestamp": timestamp,
32
+ "stdin_name": file
33
+ }
34
+
35
+ def chunk_generator():
36
+ response = requests.get(f"{nextcloud_base}/remote.php/dav/files/{username}/{file}", auth=HTTPBasicAuth(username, password), stream=True)
37
+ for chunk in response.iter_content(chunk_size=4 * 1024 * 1024):
38
+ if chunk:
39
+ yield chunk
40
+
41
+ await archive(backup_addr, request_dict, chunk_generator)
42
+