py-pve-cloud 0.0.1__py3-none-any.whl → 0.14.5rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of py-pve-cloud might be problematic. Click here for more details.

Files changed (31) hide show
  1. pve_cloud/_version.py +1 -0
  2. pve_cloud/cli/pvcli.py +187 -0
  3. pve_cloud/cli/pvclu.py +146 -0
  4. pve_cloud/lib/inventory.py +276 -0
  5. pve_cloud/lib/validate.py +25 -0
  6. {orm → pve_cloud/orm}/alchemy.py +18 -22
  7. pve_cloud/orm/alembic.ini +147 -0
  8. pve_cloud/orm/migrations/env.py +83 -0
  9. pve_cloud/orm/migrations/versions/04398db10434_external_cp_extra_sans.py +44 -0
  10. pve_cloud/orm/migrations/versions/0ad803c51325_machine_type_refactor.py +65 -0
  11. pve_cloud/orm/migrations/versions/24a548bfce3e_len_rules_enforcements.py +133 -0
  12. pve_cloud/orm/migrations/versions/27724e407e2b_proxy_fqdn.py +32 -0
  13. pve_cloud/orm/migrations/versions/3c95509a5de9_fix.py +44 -0
  14. pve_cloud/orm/migrations/versions/7868bcd05006_migrate_old.py +83 -0
  15. pve_cloud/orm/migrations/versions/7dea8c4ee39f_init.py +36 -0
  16. pve_cloud/orm/migrations/versions/944a8fd5d5bc_ext_ctrl_plns.py +46 -0
  17. pve_cloud/orm/migrations/versions/d9b711555be8_ext_control_plane.py +37 -0
  18. pve_cloud/orm/migrations/versions/e60b9cc63413_ingress_generic.py +33 -0
  19. pve_cloud/orm/migrations/versions/fdcb5aa33b76_slop_firewall_seperation.py +54 -0
  20. py_pve_cloud-0.14.5rc0.dist-info/METADATA +14 -0
  21. py_pve_cloud-0.14.5rc0.dist-info/RECORD +25 -0
  22. py_pve_cloud-0.14.5rc0.dist-info/entry_points.txt +3 -0
  23. py_pve_cloud-0.14.5rc0.dist-info/licenses/LICENSE.md +660 -0
  24. py_pve_cloud-0.14.5rc0.dist-info/top_level.txt +1 -0
  25. cli/pvclu.py +0 -68
  26. py_pve_cloud-0.0.1.dist-info/METADATA +0 -11
  27. py_pve_cloud-0.0.1.dist-info/RECORD +0 -8
  28. py_pve_cloud-0.0.1.dist-info/entry_points.txt +0 -2
  29. py_pve_cloud-0.0.1.dist-info/licenses/LICENSE +0 -674
  30. py_pve_cloud-0.0.1.dist-info/top_level.txt +0 -2
  31. {py_pve_cloud-0.0.1.dist-info → py_pve_cloud-0.14.5rc0.dist-info}/WHEEL +0 -0
pve_cloud/_version.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "0.14.5rc0"
pve_cloud/cli/pvcli.py ADDED
@@ -0,0 +1,187 @@
1
+ import argparse
2
+ import os
3
+
4
+ import paramiko
5
+ import yaml
6
+ from proxmoxer import ProxmoxAPI
7
+
8
+ from pve_cloud.cli.pvclu import get_ssh_master_kubeconfig
9
+ from pve_cloud.lib.inventory import *
10
+
11
+
12
+ def connect_cluster(args):
13
+ # try load current dynamic inventory
14
+ inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
15
+ if os.path.exists(inv_path):
16
+ with open(inv_path, "r") as file:
17
+ dynamic_inventory = yaml.safe_load(file)
18
+ else:
19
+ # initialize empty
20
+ dynamic_inventory = {}
21
+
22
+ # connect to the cluster via paramiko and check if cloud files are already there
23
+ ssh = paramiko.SSHClient()
24
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
25
+
26
+ ssh.connect(args.pve_host, username="root")
27
+
28
+ # since we need root we cant use sftp and root via ssh is disabled
29
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
30
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
31
+
32
+ if not cluster_vars:
33
+ # cluster has not been yet initialized
34
+ pve_cloud_domain = input(
35
+ "Cluster has not yet been fully initialized, assign the cluster a cloud domain and press ENTER:"
36
+ )
37
+ else:
38
+ pve_cloud_domain = cluster_vars["pve_cloud_domain"]
39
+
40
+ # init cloud domain if not there
41
+ if pve_cloud_domain not in dynamic_inventory:
42
+ dynamic_inventory[pve_cloud_domain] = {}
43
+
44
+ # connect to the passed host
45
+ proxmox = ProxmoxAPI(args.pve_host, user="root", backend="ssh_paramiko")
46
+
47
+ # try get the cluster name
48
+ cluster_name = None
49
+ status_resp = proxmox.cluster.status.get()
50
+ for entry in status_resp:
51
+ if entry["id"] == "cluster":
52
+ cluster_name = entry["name"]
53
+ break
54
+
55
+ if cluster_name is None:
56
+ raise Exception("Could not get cluster name")
57
+
58
+ if cluster_name in dynamic_inventory[pve_cloud_domain] and not args.force:
59
+ print(
60
+ f"cluster {cluster_name} already in dynamic inventory, add --force to overwrite current local inv."
61
+ )
62
+ return
63
+
64
+ # overwrite on force / create fresh
65
+ dynamic_inventory[pve_cloud_domain][cluster_name] = {}
66
+
67
+ # not present => add and safe the dynamic inventory
68
+ cluster_hosts = proxmox.nodes.get()
69
+
70
+ for node in cluster_hosts:
71
+ node_name = node["node"]
72
+
73
+ if node["status"] == "offline":
74
+ print(f"skipping offline node {node_name}")
75
+ continue
76
+
77
+ # get the main ip
78
+ ifaces = proxmox.nodes(node_name).network.get()
79
+ node_ip_address = None
80
+ for iface in ifaces:
81
+ if "gateway" in iface:
82
+ if node_ip_address is not None:
83
+ raise Exception(
84
+ f"found multiple ifaces with gateways for node {node_name}"
85
+ )
86
+ node_ip_address = iface.get("address")
87
+
88
+ if node_ip_address is None:
89
+ raise Exception(f"Could not find ip for node {node_name}")
90
+
91
+ print(f"adding {node_name}")
92
+ dynamic_inventory[pve_cloud_domain][cluster_name][node_name] = {
93
+ "ansible_user": "root",
94
+ "ansible_host": node_ip_address,
95
+ }
96
+
97
+ print(f"writing dyn inv to {inv_path}")
98
+ with open(inv_path, "w") as file:
99
+ yaml.dump(dynamic_inventory, file)
100
+
101
+
102
+ def print_kubeconfig(args):
103
+ if not os.path.exists(args.inventory):
104
+ print("The specified inventory file does not exist!")
105
+ return
106
+
107
+ with open(args.inventory, "r") as f:
108
+ inventory = yaml.safe_load(f)
109
+
110
+ target_pve = inventory["target_pve"]
111
+
112
+ target_cloud_domain = get_cloud_domain(target_pve)
113
+ pve_inventory = get_pve_inventory(target_cloud_domain)
114
+
115
+ # find target cluster in loaded inventory
116
+ target_cluster = None
117
+
118
+ for cluster in pve_inventory:
119
+ if target_pve.endswith((cluster + "." + target_cloud_domain)):
120
+ target_cluster = cluster
121
+ break
122
+
123
+ if not target_cluster:
124
+ print("could not find target cluster in pve inventory!")
125
+ return
126
+
127
+ first_host = list(pve_inventory[target_cluster].keys())[0]
128
+
129
+ # connect to the first pve host in the dyn inv, assumes they are all online
130
+ ssh = paramiko.SSHClient()
131
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
132
+ ssh.connect(
133
+ pve_inventory[target_cluster][first_host]["ansible_host"], username="root"
134
+ )
135
+
136
+ # since we need root we cant use sftp and root via ssh is disabled
137
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
138
+
139
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
140
+
141
+ print(get_ssh_master_kubeconfig(cluster_vars, inventory["stack_name"]))
142
+
143
+
144
+ def main():
145
+ parser = argparse.ArgumentParser(
146
+ description="PVE general purpose cli for setting up."
147
+ )
148
+
149
+ base_parser = argparse.ArgumentParser(add_help=False)
150
+
151
+ subparsers = parser.add_subparsers(dest="command", required=True)
152
+
153
+ connect_cluster_parser = subparsers.add_parser(
154
+ "connect-cluster",
155
+ help="Add an entire pve cluster to this machine for use.",
156
+ parents=[base_parser],
157
+ )
158
+ connect_cluster_parser.add_argument(
159
+ "--pve-host",
160
+ type=str,
161
+ help="PVE Host to connect to and add the entire cluster for the local machine.",
162
+ required=True,
163
+ )
164
+ connect_cluster_parser.add_argument(
165
+ "--force", action="store_true", help="Will read the cluster if set."
166
+ )
167
+ connect_cluster_parser.set_defaults(func=connect_cluster)
168
+
169
+ print_kconf_parser = subparsers.add_parser(
170
+ "print-kubeconfig",
171
+ help="Print the kubeconfig from a k8s cluster deployed with pve cloud.",
172
+ parents=[base_parser],
173
+ )
174
+ print_kconf_parser.add_argument(
175
+ "--inventory",
176
+ type=str,
177
+ help="PVE cloud kubespray inventory yaml file.",
178
+ required=True,
179
+ )
180
+ print_kconf_parser.set_defaults(func=print_kubeconfig)
181
+
182
+ args = parser.parse_args()
183
+ args.func(args)
184
+
185
+
186
+ if __name__ == "__main__":
187
+ main()
pve_cloud/cli/pvclu.py ADDED
@@ -0,0 +1,146 @@
1
+ import argparse
2
+ import re
3
+
4
+ import dns.resolver
5
+ import paramiko
6
+ import yaml
7
+
8
+ from pve_cloud.lib.inventory import *
9
+
10
+
11
+ def get_cluster_vars(pve_host):
12
+ ssh = paramiko.SSHClient()
13
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
14
+
15
+ ssh.connect(pve_host, username="root")
16
+
17
+ # since we need root we cant use sftp and root via ssh is disabled
18
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
19
+
20
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
21
+
22
+ return cluster_vars
23
+
24
+
25
+ def get_cloud_env(pve_host):
26
+ ssh = paramiko.SSHClient()
27
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
28
+
29
+ ssh.connect(pve_host, username="root")
30
+
31
+ # since we need root we cant use sftp and root via ssh is disabled
32
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
33
+
34
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
35
+
36
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/secrets/patroni.pass")
37
+
38
+ patroni_pass = stdout.read().decode("utf-8").strip()
39
+
40
+ # fetch bind update key for ingress dns validation
41
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/pve/cloud/secrets/internal.key")
42
+ bind_key_file = stdout.read().decode("utf-8")
43
+
44
+ bind_internal_key = re.search(r'secret\s+"([^"]+)";', bind_key_file).group(1)
45
+
46
+ return cluster_vars, patroni_pass, bind_internal_key
47
+
48
+
49
+ def get_online_pve_host_prsr(args):
50
+ print(
51
+ f"export PVE_ANSIBLE_HOST='{get_online_pve_host(args.target_pve, suppress_warnings=True)}'"
52
+ )
53
+
54
+
55
+ def get_ssh_master_kubeconfig(cluster_vars, stack_name):
56
+ resolver = dns.resolver.Resolver()
57
+ resolver.nameservers = [
58
+ cluster_vars["bind_master_ip"],
59
+ cluster_vars["bind_slave_ip"],
60
+ ]
61
+
62
+ ddns_answer = resolver.resolve(
63
+ f"masters-{stack_name}.{cluster_vars['pve_cloud_domain']}"
64
+ )
65
+ ddns_ips = [rdata.to_text() for rdata in ddns_answer]
66
+
67
+ if not ddns_ips:
68
+ raise Exception("No master could be found via DNS!")
69
+
70
+ ssh = paramiko.SSHClient()
71
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
72
+
73
+ ssh.connect(ddns_ips[0], username="admin")
74
+
75
+ # since we need root we cant use sftp and root via ssh is disabled
76
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
77
+
78
+ admin_conf = yaml.safe_load(stdout.read().decode("utf-8"))
79
+ # rewrite variables for external access
80
+ admin_conf["clusters"][0]["cluster"]["server"] = f"https://{ddns_ips[0]}:6443"
81
+ admin_conf["clusters"][0]["name"] = stack_name
82
+
83
+ admin_conf["contexts"][0]["context"]["cluster"] = stack_name
84
+ admin_conf["contexts"][0]["name"] = stack_name
85
+
86
+ admin_conf["current-context"] = stack_name
87
+
88
+ return yaml.safe_dump(admin_conf)
89
+
90
+
91
+ def export_pg_conn_str(args):
92
+ cloud_domain = get_cloud_domain(args.target_pve, suppress_warnings=True)
93
+ pve_inventory = get_pve_inventory(cloud_domain, suppress_warnings=True)
94
+
95
+ # get ansible ip for first host in target cluster
96
+ ansible_host = None
97
+ for cluster in pve_inventory:
98
+ if args.target_pve.startswith(cluster):
99
+ ansible_host = next(iter(pve_inventory[cluster].values()))["ansible_host"]
100
+
101
+ if not ansible_host:
102
+ raise RuntimeError(f"Could not find online host for {args.target_pve}!")
103
+
104
+ cluster_vars, patroni_pass, bind_internal_key = get_cloud_env(ansible_host)
105
+
106
+ print(
107
+ f"export PG_CONN_STR=\"postgres://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/tf_states?sslmode=disable\""
108
+ )
109
+
110
+
111
+ def main():
112
+ parser = argparse.ArgumentParser(
113
+ description="PVE Cloud utility cli. Should be called with bash eval."
114
+ )
115
+
116
+ base_parser = argparse.ArgumentParser(add_help=False)
117
+
118
+ subparsers = parser.add_subparsers(dest="command", required=True)
119
+
120
+ export_envr_parser = subparsers.add_parser(
121
+ "export-psql", help="Export variables for k8s .envrc", parents=[base_parser]
122
+ )
123
+ export_envr_parser.add_argument(
124
+ "--target-pve", type=str, help="The target pve cluster.", required=True
125
+ )
126
+ export_envr_parser.set_defaults(func=export_pg_conn_str)
127
+
128
+ get_online_pve_host_parser = subparsers.add_parser(
129
+ "get-online-host",
130
+ help="Gets the ip for the first online proxmox host in the cluster.",
131
+ parents=[base_parser],
132
+ )
133
+ get_online_pve_host_parser.add_argument(
134
+ "--target-pve",
135
+ type=str,
136
+ help="The target pve cluster to get the first online ip of.",
137
+ required=True,
138
+ )
139
+ get_online_pve_host_parser.set_defaults(func=get_online_pve_host_prsr)
140
+
141
+ args = parser.parse_args()
142
+ args.func(args)
143
+
144
+
145
+ if __name__ == "__main__":
146
+ main()
@@ -0,0 +1,276 @@
1
+ import os
2
+ import shutil
3
+ import socket
4
+ import subprocess
5
+
6
+ import yaml
7
+ from proxmoxer import ProxmoxAPI
8
+
9
+ from pve_cloud.lib.validate import raise_on_py_cloud_missmatch
10
+
11
+
12
+ def get_cloud_domain(target_pve, suppress_warnings=False):
13
+ if shutil.which("avahi-browse"):
14
+ avahi_disc = subprocess.run(
15
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
16
+ stdout=subprocess.PIPE,
17
+ text=True,
18
+ check=True,
19
+ )
20
+ services = avahi_disc.stdout.splitlines()
21
+
22
+ # find cloud domain hosts and get first online per proxmox cluster
23
+ for service in services:
24
+ if service.startswith("="):
25
+ # avahi service def
26
+ svc_args = service.split(";")
27
+
28
+ cloud_domain = None
29
+ cluster_name = None
30
+
31
+ for txt_arg in svc_args[9].split():
32
+ txt_arg = txt_arg.replace('"', "")
33
+ if txt_arg.startswith("cloud_domain"):
34
+ cloud_domain = txt_arg.split("=")[1]
35
+
36
+ if txt_arg.startswith("cluster_name"):
37
+ cluster_name = txt_arg.split("=")[1]
38
+
39
+ if not cloud_domain or not cluster_name:
40
+ raise ValueError(
41
+ f"Missconfigured proxmox cloud avahi service: {service}"
42
+ )
43
+
44
+ if target_pve.endswith(cloud_domain):
45
+ return cloud_domain
46
+
47
+ raise RuntimeError("Could not get cloud domain via avahi mdns!")
48
+ else:
49
+ if not suppress_warnings:
50
+ print(
51
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
52
+ )
53
+
54
+ with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
55
+ pve_inventory = yaml.safe_load(f)
56
+
57
+ for pve_cloud in pve_inventory:
58
+ for pve_cluster in pve_inventory[pve_cloud]:
59
+ if pve_cluster + "." + pve_cloud == target_pve:
60
+ return pve_cloud
61
+
62
+ raise Exception(f"Could not identify cloud domain for {target_pve}")
63
+
64
+
65
+ def get_online_pve_host(target_pve, suppress_warnings=False, skip_py_cloud_check=False):
66
+ if shutil.which("avahi-browse"):
67
+ avahi_disc = subprocess.run(
68
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
69
+ stdout=subprocess.PIPE,
70
+ text=True,
71
+ check=True,
72
+ )
73
+ services = avahi_disc.stdout.splitlines()
74
+
75
+ for service in services:
76
+ if service.startswith("="):
77
+ # avahi service def
78
+ svc_args = service.split(";")
79
+ host_ip = svc_args[7]
80
+
81
+ cloud_domain = None
82
+ cluster_name = None
83
+
84
+ for txt_arg in svc_args[9].split():
85
+ txt_arg = txt_arg.replace('"', "")
86
+ if txt_arg.startswith("cloud_domain"):
87
+ cloud_domain = txt_arg.split("=")[1]
88
+
89
+ if txt_arg.startswith("cluster_name"):
90
+ cluster_name = txt_arg.split("=")[1]
91
+
92
+ if not cloud_domain or not cluster_name:
93
+ raise ValueError(
94
+ f"Missconfigured proxmox cloud avahi service: {service}"
95
+ )
96
+
97
+ # main pve cloud inventory
98
+ if f"{cluster_name}.{cloud_domain}" == target_pve:
99
+ if not skip_py_cloud_check:
100
+ raise_on_py_cloud_missmatch(
101
+ host_ip
102
+ ) # validate that versions of dev machine and running on cluster match
103
+
104
+ return host_ip
105
+
106
+ raise RuntimeError(f"No online host found for {target_pve}!")
107
+ else:
108
+ if not suppress_warnings:
109
+ print(
110
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
111
+ )
112
+
113
+ with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
114
+ pve_inventory = yaml.safe_load(f)
115
+
116
+ for pve_cloud in pve_inventory:
117
+ for pve_cluster in pve_inventory[pve_cloud]:
118
+ if pve_cluster + "." + pve_cloud == target_pve:
119
+ for pve_host in pve_inventory[pve_cloud][pve_cluster]:
120
+ # check if host is available
121
+ pve_host_ip = pve_inventory[pve_cloud][pve_cluster][pve_host][
122
+ "ansible_host"
123
+ ]
124
+ try:
125
+ with socket.create_connection((pve_host_ip, 22), timeout=3):
126
+
127
+ if not skip_py_cloud_check:
128
+ raise_on_py_cloud_missmatch(
129
+ pve_host_ip
130
+ ) # validate that versions of dev machine and running on cluster match
131
+
132
+ return pve_host_ip
133
+ except Exception as e:
134
+ # debug
135
+ print(e, type(e))
136
+
137
+ raise RuntimeError(f"Could not find online pve host for {target_pve}")
138
+
139
+
140
+ def get_pve_inventory(
141
+ pve_cloud_domain, suppress_warnings=False, skip_py_cloud_check=False
142
+ ):
143
+ if shutil.which("avahi-browse"):
144
+ # avahi is available
145
+
146
+ # call avahi-browse -rpt _pxc._tcp and find online host matching pve cloud domain
147
+ # connect via ssh and fetch all other hosts via proxmox api => build inventory
148
+ avahi_disc = subprocess.run(
149
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
150
+ stdout=subprocess.PIPE,
151
+ text=True,
152
+ check=True,
153
+ )
154
+ services = avahi_disc.stdout.splitlines()
155
+
156
+ pve_inventory = {}
157
+
158
+ py_pve_cloud_performed_version_checks = set()
159
+
160
+ # find cloud domain hosts and get first online per proxmox cluster
161
+ cloud_domain_first_hosts = {}
162
+ for service in services:
163
+ if service.startswith("="):
164
+ # avahi service def
165
+ svc_args = service.split(";")
166
+ host_ip = svc_args[7]
167
+
168
+ cloud_domain = None
169
+ cluster_name = None
170
+
171
+ for txt_arg in svc_args[9].split():
172
+ txt_arg = txt_arg.replace('"', "")
173
+ if txt_arg.startswith("cloud_domain"):
174
+ cloud_domain = txt_arg.split("=")[1]
175
+
176
+ if txt_arg.startswith("cluster_name"):
177
+ cluster_name = txt_arg.split("=")[1]
178
+
179
+ if not cloud_domain or not cluster_name:
180
+ raise ValueError(
181
+ f"Missconfigured proxmox cloud avahi service: {service}"
182
+ )
183
+
184
+ # main pve cloud inventory
185
+ if (
186
+ cloud_domain == pve_cloud_domain
187
+ and cluster_name not in cloud_domain_first_hosts
188
+ ):
189
+ if (
190
+ not skip_py_cloud_check
191
+ and f"{cluster_name}.{cloud_domain}"
192
+ not in py_pve_cloud_performed_version_checks
193
+ ):
194
+ raise_on_py_cloud_missmatch(
195
+ host_ip
196
+ ) # validate that versions of dev machine and running on cluster match
197
+ py_pve_cloud_performed_version_checks.add(
198
+ f"{cluster_name}.{cloud_domain}"
199
+ ) # perform version check only once per cluster
200
+
201
+ cloud_domain_first_hosts[cluster_name] = host_ip
202
+
203
+ # iterate over hosts and build pve inv via proxmox api
204
+ # todo: this needs to be hugely optimized it blocks the grpc server
205
+ for cluster_first, first_host in cloud_domain_first_hosts.items():
206
+ proxmox = ProxmoxAPI(first_host, user="root", backend="ssh_paramiko")
207
+
208
+ cluster_name = None
209
+ status_resp = proxmox.cluster.status.get()
210
+ for entry in status_resp:
211
+ if entry["id"] == "cluster":
212
+ cluster_name = entry["name"]
213
+ break
214
+
215
+ if cluster_name is None:
216
+ raise RuntimeError("Could not get cluster name")
217
+
218
+ if cluster_name != cluster_first:
219
+ raise ValueError(
220
+ f"Proxmox cluster name missconfigured in avahi service {cluster_name}/{cluster_first}"
221
+ )
222
+
223
+ pve_inventory[cluster_name] = {}
224
+
225
+ # fetch other hosts via api
226
+ cluster_hosts = proxmox.nodes.get()
227
+
228
+ for node in cluster_hosts:
229
+ node_name = node["node"]
230
+
231
+ if node["status"] == "offline":
232
+ print(f"skipping offline node {node_name}")
233
+ continue
234
+
235
+ # get the main ip
236
+ ifaces = proxmox.nodes(node_name).network.get()
237
+ node_ip_address = None
238
+ for iface in ifaces:
239
+ if "gateway" in iface:
240
+ if node_ip_address is not None:
241
+ raise RuntimeError(
242
+ f"found multiple ifaces with gateways for node {node_name}"
243
+ )
244
+ node_ip_address = iface.get("address")
245
+
246
+ if node_ip_address is None:
247
+ raise RuntimeError(f"Could not find ip for node {node_name}")
248
+
249
+ pve_inventory[cluster_name][node_name] = {
250
+ "ansible_user": "root",
251
+ "ansible_host": node_ip_address,
252
+ }
253
+
254
+ return pve_inventory
255
+
256
+ else:
257
+ if not suppress_warnings:
258
+ print(
259
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
260
+ )
261
+ # try load fallback manual inventory from disk
262
+ inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
263
+ if not os.path.exists(inv_path):
264
+ raise RuntimeError(
265
+ "Local pve inventory file missing (~/.pve-cloud-dyn-inv.yaml), execute `pvcli connect-cluster` or setup avahi mdns discovery!"
266
+ )
267
+
268
+ with open(inv_path, "r") as file:
269
+ dynamic_inventory = yaml.safe_load(file)
270
+
271
+ if pve_cloud_domain not in dynamic_inventory:
272
+ raise RuntimeError(
273
+ f"{pve_cloud_domain} not in local dynamic inventory (~/.pve-cloud-dyn-inv.yaml created by `pvcli connect-cluster`)!"
274
+ )
275
+
276
+ return dynamic_inventory[pve_cloud_domain]
@@ -0,0 +1,25 @@
1
+ import os
2
+
3
+ import paramiko
4
+ import pve_cloud._version
5
+ import yaml
6
+
7
+
8
+ def raise_on_py_cloud_missmatch(proxmox_host):
9
+ # dont raise in tdd
10
+ if os.getenv("PYTEST_CURRENT_TEST"):
11
+ return
12
+
13
+ ssh = paramiko.SSHClient()
14
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
15
+ ssh.connect(proxmox_host, username="root")
16
+
17
+ # since we need root we cant use sftp and root via ssh is disabled
18
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
19
+
20
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
21
+
22
+ if cluster_vars["py_pve_cloud_version"] != pve_cloud._version.__version__:
23
+ raise RuntimeError(
24
+ f"Version missmatch! py_pve_cloud_version for cluster is {cluster_vars['py_pve_cloud_version']}, while you are using {pve_cloud._version.__version__}"
25
+ )