py-pve-cloud 0.13.22__tar.gz → 0.14.2rc14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {py_pve_cloud-0.13.22/src/py_pve_cloud.egg-info → py_pve_cloud-0.14.2rc14}/PKG-INFO +1 -1
  2. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/README.md +1 -1
  3. py_pve_cloud-0.14.2rc14/src/pve_cloud/_version.py +1 -0
  4. py_pve_cloud-0.14.2rc14/src/pve_cloud/cli/pvcli.py +187 -0
  5. py_pve_cloud-0.14.2rc14/src/pve_cloud/cli/pvclu.py +146 -0
  6. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/lib/inventory.py +103 -54
  7. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/lib/validate.py +8 -5
  8. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14/src/py_pve_cloud.egg-info}/PKG-INFO +1 -1
  9. py_pve_cloud-0.13.22/src/pve_cloud/_version.py +0 -1
  10. py_pve_cloud-0.13.22/src/pve_cloud/cli/pvcli.py +0 -158
  11. py_pve_cloud-0.13.22/src/pve_cloud/cli/pvclu.py +0 -122
  12. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/LICENSE.md +0 -0
  13. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/pyproject.toml +0 -0
  14. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/setup.cfg +0 -0
  15. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/alchemy.py +0 -0
  16. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/alembic.ini +0 -0
  17. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/env.py +0 -0
  18. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/04398db10434_external_cp_extra_sans.py +0 -0
  19. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/0ad803c51325_machine_type_refactor.py +0 -0
  20. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/24a548bfce3e_len_rules_enforcements.py +0 -0
  21. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/27724e407e2b_proxy_fqdn.py +0 -0
  22. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/3c95509a5de9_fix.py +0 -0
  23. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/7868bcd05006_migrate_old.py +0 -0
  24. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/7dea8c4ee39f_init.py +0 -0
  25. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/944a8fd5d5bc_ext_ctrl_plns.py +0 -0
  26. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/d9b711555be8_ext_control_plane.py +0 -0
  27. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/e60b9cc63413_ingress_generic.py +0 -0
  28. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/pve_cloud/orm/migrations/versions/fdcb5aa33b76_slop_firewall_seperation.py +0 -0
  29. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/py_pve_cloud.egg-info/SOURCES.txt +0 -0
  30. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/py_pve_cloud.egg-info/dependency_links.txt +0 -0
  31. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/py_pve_cloud.egg-info/entry_points.txt +0 -0
  32. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/py_pve_cloud.egg-info/requires.txt +0 -0
  33. {py_pve_cloud-0.13.22 → py_pve_cloud-0.14.2rc14}/src/py_pve_cloud.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: py-pve-cloud
3
- Version: 0.13.22
3
+ Version: 0.14.2rc14
4
4
  Author-email: Tobias Huebner <tobias.huebner@vmzberlin.com>
5
5
  License-Expression: GPL-3.0-or-later
6
6
  License-File: LICENSE.md
@@ -18,4 +18,4 @@ PROXY_IP=$(ssh root@$PVE_HOST_IP cat /etc/pve/cloud/cluster_vars.yaml | yq '.pve
18
18
  export PG_CONN_STR=postgresql+psycopg2://postgres:$PATRONI_PASS@$PROXY_IP:5000/pve_cloud?sslmode=disable
19
19
  ```
20
20
 
21
- To create a new migration the database needs to be on the latest version, run `alembic upgrade head` to upgrade it.
21
+ To create a new migration the database needs to be on the latest version, run `alembic upgrade head` to upgrade it.
@@ -0,0 +1 @@
1
+ __version__ = "0.14.2rc14"
@@ -0,0 +1,187 @@
1
+ import argparse
2
+ import os
3
+
4
+ import paramiko
5
+ import yaml
6
+ from proxmoxer import ProxmoxAPI
7
+
8
+ from pve_cloud.cli.pvclu import get_ssh_master_kubeconfig
9
+ from pve_cloud.lib.inventory import *
10
+
11
+
12
+ def connect_cluster(args):
13
+ # try load current dynamic inventory
14
+ inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
15
+ if os.path.exists(inv_path):
16
+ with open(inv_path, "r") as file:
17
+ dynamic_inventory = yaml.safe_load(file)
18
+ else:
19
+ # initialize empty
20
+ dynamic_inventory = {}
21
+
22
+ # connect to the cluster via paramiko and check if cloud files are already there
23
+ ssh = paramiko.SSHClient()
24
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
25
+
26
+ ssh.connect(args.pve_host, username="root")
27
+
28
+ # since we need root we cant use sftp and root via ssh is disabled
29
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
30
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
31
+
32
+ if not cluster_vars:
33
+ # cluster has not been yet initialized
34
+ pve_cloud_domain = input(
35
+ "Cluster has not yet been fully initialized, assign the cluster a cloud domain and press ENTER:"
36
+ )
37
+ else:
38
+ pve_cloud_domain = cluster_vars["pve_cloud_domain"]
39
+
40
+ # init cloud domain if not there
41
+ if pve_cloud_domain not in dynamic_inventory:
42
+ dynamic_inventory[pve_cloud_domain] = {}
43
+
44
+ # connect to the passed host
45
+ proxmox = ProxmoxAPI(args.pve_host, user="root", backend="ssh_paramiko")
46
+
47
+ # try get the cluster name
48
+ cluster_name = None
49
+ status_resp = proxmox.cluster.status.get()
50
+ for entry in status_resp:
51
+ if entry["id"] == "cluster":
52
+ cluster_name = entry["name"]
53
+ break
54
+
55
+ if cluster_name is None:
56
+ raise Exception("Could not get cluster name")
57
+
58
+ if cluster_name in dynamic_inventory[pve_cloud_domain] and not args.force:
59
+ print(
60
+ f"cluster {cluster_name} already in dynamic inventory, add --force to overwrite current local inv."
61
+ )
62
+ return
63
+
64
+ # overwrite on force / create fresh
65
+ dynamic_inventory[pve_cloud_domain][cluster_name] = {}
66
+
67
+ # not present => add and safe the dynamic inventory
68
+ cluster_hosts = proxmox.nodes.get()
69
+
70
+ for node in cluster_hosts:
71
+ node_name = node["node"]
72
+
73
+ if node["status"] == "offline":
74
+ print(f"skipping offline node {node_name}")
75
+ continue
76
+
77
+ # get the main ip
78
+ ifaces = proxmox.nodes(node_name).network.get()
79
+ node_ip_address = None
80
+ for iface in ifaces:
81
+ if "gateway" in iface:
82
+ if node_ip_address is not None:
83
+ raise Exception(
84
+ f"found multiple ifaces with gateways for node {node_name}"
85
+ )
86
+ node_ip_address = iface.get("address")
87
+
88
+ if node_ip_address is None:
89
+ raise Exception(f"Could not find ip for node {node_name}")
90
+
91
+ print(f"adding {node_name}")
92
+ dynamic_inventory[pve_cloud_domain][cluster_name][node_name] = {
93
+ "ansible_user": "root",
94
+ "ansible_host": node_ip_address,
95
+ }
96
+
97
+ print(f"writing dyn inv to {inv_path}")
98
+ with open(inv_path, "w") as file:
99
+ yaml.dump(dynamic_inventory, file)
100
+
101
+
102
+ def print_kubeconfig(args):
103
+ if not os.path.exists(args.inventory):
104
+ print("The specified inventory file does not exist!")
105
+ return
106
+
107
+ with open(args.inventory, "r") as f:
108
+ inventory = yaml.safe_load(f)
109
+
110
+ target_pve = inventory["target_pve"]
111
+
112
+ target_cloud_domain = get_cloud_domain(target_pve)
113
+ pve_inventory = get_pve_inventory(target_cloud_domain)
114
+
115
+ # find target cluster in loaded inventory
116
+ target_cluster = None
117
+
118
+ for cluster in pve_inventory:
119
+ if target_pve.endswith((cluster + "." + target_cloud_domain)):
120
+ target_cluster = cluster
121
+ break
122
+
123
+ if not target_cluster:
124
+ print("could not find target cluster in pve inventory!")
125
+ return
126
+
127
+ first_host = list(pve_inventory[target_cluster].keys())[0]
128
+
129
+ # connect to the first pve host in the dyn inv, assumes they are all online
130
+ ssh = paramiko.SSHClient()
131
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
132
+ ssh.connect(
133
+ pve_inventory[target_cluster][first_host]["ansible_host"], username="root"
134
+ )
135
+
136
+ # since we need root we cant use sftp and root via ssh is disabled
137
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
138
+
139
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
140
+
141
+ print(get_ssh_master_kubeconfig(cluster_vars, inventory["stack_name"]))
142
+
143
+
144
+ def main():
145
+ parser = argparse.ArgumentParser(
146
+ description="PVE general purpose cli for setting up."
147
+ )
148
+
149
+ base_parser = argparse.ArgumentParser(add_help=False)
150
+
151
+ subparsers = parser.add_subparsers(dest="command", required=True)
152
+
153
+ connect_cluster_parser = subparsers.add_parser(
154
+ "connect-cluster",
155
+ help="Add an entire pve cluster to this machine for use.",
156
+ parents=[base_parser],
157
+ )
158
+ connect_cluster_parser.add_argument(
159
+ "--pve-host",
160
+ type=str,
161
+ help="PVE Host to connect to and add the entire cluster for the local machine.",
162
+ required=True,
163
+ )
164
+ connect_cluster_parser.add_argument(
165
+ "--force", action="store_true", help="Will read the cluster if set."
166
+ )
167
+ connect_cluster_parser.set_defaults(func=connect_cluster)
168
+
169
+ print_kconf_parser = subparsers.add_parser(
170
+ "print-kubeconfig",
171
+ help="Print the kubeconfig from a k8s cluster deployed with pve cloud.",
172
+ parents=[base_parser],
173
+ )
174
+ print_kconf_parser.add_argument(
175
+ "--inventory",
176
+ type=str,
177
+ help="PVE cloud kubespray inventory yaml file.",
178
+ required=True,
179
+ )
180
+ print_kconf_parser.set_defaults(func=print_kubeconfig)
181
+
182
+ args = parser.parse_args()
183
+ args.func(args)
184
+
185
+
186
+ if __name__ == "__main__":
187
+ main()
@@ -0,0 +1,146 @@
1
+ import argparse
2
+ import re
3
+
4
+ import dns.resolver
5
+ import paramiko
6
+ import yaml
7
+
8
+ from pve_cloud.lib.inventory import *
9
+
10
+
11
+ def get_cluster_vars(pve_host):
12
+ ssh = paramiko.SSHClient()
13
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
14
+
15
+ ssh.connect(pve_host, username="root")
16
+
17
+ # since we need root we cant use sftp and root via ssh is disabled
18
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
19
+
20
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
21
+
22
+ return cluster_vars
23
+
24
+
25
+ def get_cloud_env(pve_host):
26
+ ssh = paramiko.SSHClient()
27
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
28
+
29
+ ssh.connect(pve_host, username="root")
30
+
31
+ # since we need root we cant use sftp and root via ssh is disabled
32
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
33
+
34
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
35
+
36
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/secrets/patroni.pass")
37
+
38
+ patroni_pass = stdout.read().decode("utf-8").strip()
39
+
40
+ # fetch bind update key for ingress dns validation
41
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/pve/cloud/secrets/internal.key")
42
+ bind_key_file = stdout.read().decode("utf-8")
43
+
44
+ bind_internal_key = re.search(r'secret\s+"([^"]+)";', bind_key_file).group(1)
45
+
46
+ return cluster_vars, patroni_pass, bind_internal_key
47
+
48
+
49
+ def get_online_pve_host_prsr(args):
50
+ print(
51
+ f"export PVE_ANSIBLE_HOST='{get_online_pve_host(args.target_pve, suppress_warnings=True)}'"
52
+ )
53
+
54
+
55
+ def get_ssh_master_kubeconfig(cluster_vars, stack_name):
56
+ resolver = dns.resolver.Resolver()
57
+ resolver.nameservers = [
58
+ cluster_vars["bind_master_ip"],
59
+ cluster_vars["bind_slave_ip"],
60
+ ]
61
+
62
+ ddns_answer = resolver.resolve(
63
+ f"masters-{stack_name}.{cluster_vars['pve_cloud_domain']}"
64
+ )
65
+ ddns_ips = [rdata.to_text() for rdata in ddns_answer]
66
+
67
+ if not ddns_ips:
68
+ raise Exception("No master could be found via DNS!")
69
+
70
+ ssh = paramiko.SSHClient()
71
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
72
+
73
+ ssh.connect(ddns_ips[0], username="admin")
74
+
75
+ # since we need root we cant use sftp and root via ssh is disabled
76
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
77
+
78
+ admin_conf = yaml.safe_load(stdout.read().decode("utf-8"))
79
+ # rewrite variables for external access
80
+ admin_conf["clusters"][0]["cluster"]["server"] = f"https://{ddns_ips[0]}:6443"
81
+ admin_conf["clusters"][0]["name"] = stack_name
82
+
83
+ admin_conf["contexts"][0]["context"]["cluster"] = stack_name
84
+ admin_conf["contexts"][0]["name"] = stack_name
85
+
86
+ admin_conf["current-context"] = stack_name
87
+
88
+ return yaml.safe_dump(admin_conf)
89
+
90
+
91
+ def export_pg_conn_str(args):
92
+ cloud_domain = get_cloud_domain(args.target_pve, suppress_warnings=True)
93
+ pve_inventory = get_pve_inventory(cloud_domain, suppress_warnings=True)
94
+
95
+ # get ansible ip for first host in target cluster
96
+ ansible_host = None
97
+ for cluster in pve_inventory:
98
+ if args.target_pve.startswith(cluster):
99
+ ansible_host = next(iter(pve_inventory[cluster].values()))["ansible_host"]
100
+
101
+ if not ansible_host:
102
+ raise RuntimeError(f"Could not find online host for {args.target_pve}!")
103
+
104
+ cluster_vars, patroni_pass, bind_internal_key = get_cloud_env(ansible_host)
105
+
106
+ print(
107
+ f"export PG_CONN_STR=\"postgres://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/tf_states?sslmode=disable\""
108
+ )
109
+
110
+
111
+ def main():
112
+ parser = argparse.ArgumentParser(
113
+ description="PVE Cloud utility cli. Should be called with bash eval."
114
+ )
115
+
116
+ base_parser = argparse.ArgumentParser(add_help=False)
117
+
118
+ subparsers = parser.add_subparsers(dest="command", required=True)
119
+
120
+ export_envr_parser = subparsers.add_parser(
121
+ "export-psql", help="Export variables for k8s .envrc", parents=[base_parser]
122
+ )
123
+ export_envr_parser.add_argument(
124
+ "--target-pve", type=str, help="The target pve cluster.", required=True
125
+ )
126
+ export_envr_parser.set_defaults(func=export_pg_conn_str)
127
+
128
+ get_online_pve_host_parser = subparsers.add_parser(
129
+ "get-online-host",
130
+ help="Gets the ip for the first online proxmox host in the cluster.",
131
+ parents=[base_parser],
132
+ )
133
+ get_online_pve_host_parser.add_argument(
134
+ "--target-pve",
135
+ type=str,
136
+ help="The target pve cluster to get the first online ip of.",
137
+ required=True,
138
+ )
139
+ get_online_pve_host_parser.set_defaults(func=get_online_pve_host_prsr)
140
+
141
+ args = parser.parse_args()
142
+ args.func(args)
143
+
144
+
145
+ if __name__ == "__main__":
146
+ main()
@@ -1,15 +1,22 @@
1
+ import os
2
+ import shutil
3
+ import socket
1
4
  import subprocess
5
+
6
+ import yaml
2
7
  from proxmoxer import ProxmoxAPI
8
+
3
9
  from pve_cloud.lib.validate import raise_on_py_cloud_missmatch
4
- import shutil
5
- import yaml
6
- import os
7
- import socket
8
10
 
9
11
 
10
- def get_cloud_domain(target_pve, suppress_warnings = False):
12
+ def get_cloud_domain(target_pve, suppress_warnings=False):
11
13
  if shutil.which("avahi-browse"):
12
- avahi_disc = subprocess.run(["avahi-browse", "-rpt", "_pxc._tcp"], stdout=subprocess.PIPE, text=True, check=True)
14
+ avahi_disc = subprocess.run(
15
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
16
+ stdout=subprocess.PIPE,
17
+ text=True,
18
+ check=True,
19
+ )
13
20
  services = avahi_disc.stdout.splitlines()
14
21
 
15
22
  # find cloud domain hosts and get first online per proxmox cluster
@@ -22,7 +29,7 @@ def get_cloud_domain(target_pve, suppress_warnings = False):
22
29
  cluster_name = None
23
30
 
24
31
  for txt_arg in svc_args[9].split():
25
- txt_arg = txt_arg.replace('"', '')
32
+ txt_arg = txt_arg.replace('"', "")
26
33
  if txt_arg.startswith("cloud_domain"):
27
34
  cloud_domain = txt_arg.split("=")[1]
28
35
 
@@ -30,16 +37,19 @@ def get_cloud_domain(target_pve, suppress_warnings = False):
30
37
  cluster_name = txt_arg.split("=")[1]
31
38
 
32
39
  if not cloud_domain or not cluster_name:
33
- raise ValueError(f"Missconfigured proxmox cloud avahi service: {service}")
34
-
40
+ raise ValueError(
41
+ f"Missconfigured proxmox cloud avahi service: {service}"
42
+ )
43
+
35
44
  if target_pve.endswith(cloud_domain):
36
45
  return cloud_domain
37
-
38
46
 
39
47
  raise RuntimeError("Could not get cloud domain via avahi mdns!")
40
48
  else:
41
49
  if not suppress_warnings:
42
- print("avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!")
50
+ print(
51
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
52
+ )
43
53
 
44
54
  with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
45
55
  pve_inventory = yaml.safe_load(f)
@@ -48,13 +58,18 @@ def get_cloud_domain(target_pve, suppress_warnings = False):
48
58
  for pve_cluster in pve_inventory[pve_cloud]:
49
59
  if pve_cluster + "." + pve_cloud == target_pve:
50
60
  return pve_cloud
51
-
61
+
52
62
  raise Exception(f"Could not identify cloud domain for {target_pve}")
53
63
 
54
64
 
55
- def get_online_pve_host(target_pve, suppress_warnings = False, skip_py_cloud_check=False):
65
+ def get_online_pve_host(target_pve, suppress_warnings=False, skip_py_cloud_check=False):
56
66
  if shutil.which("avahi-browse"):
57
- avahi_disc = subprocess.run(["avahi-browse", "-rpt", "_pxc._tcp"], stdout=subprocess.PIPE, text=True, check=True)
67
+ avahi_disc = subprocess.run(
68
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
69
+ stdout=subprocess.PIPE,
70
+ text=True,
71
+ check=True,
72
+ )
58
73
  services = avahi_disc.stdout.splitlines()
59
74
 
60
75
  for service in services:
@@ -67,7 +82,7 @@ def get_online_pve_host(target_pve, suppress_warnings = False, skip_py_cloud_che
67
82
  cluster_name = None
68
83
 
69
84
  for txt_arg in svc_args[9].split():
70
- txt_arg = txt_arg.replace('"', '')
85
+ txt_arg = txt_arg.replace('"', "")
71
86
  if txt_arg.startswith("cloud_domain"):
72
87
  cloud_domain = txt_arg.split("=")[1]
73
88
 
@@ -75,19 +90,25 @@ def get_online_pve_host(target_pve, suppress_warnings = False, skip_py_cloud_che
75
90
  cluster_name = txt_arg.split("=")[1]
76
91
 
77
92
  if not cloud_domain or not cluster_name:
78
- raise ValueError(f"Missconfigured proxmox cloud avahi service: {service}")
79
-
93
+ raise ValueError(
94
+ f"Missconfigured proxmox cloud avahi service: {service}"
95
+ )
96
+
80
97
  # main pve cloud inventory
81
98
  if f"{cluster_name}.{cloud_domain}" == target_pve:
82
99
  if not skip_py_cloud_check:
83
- raise_on_py_cloud_missmatch(host_ip) # validate that versions of dev machine and running on cluster match
100
+ raise_on_py_cloud_missmatch(
101
+ host_ip
102
+ ) # validate that versions of dev machine and running on cluster match
84
103
 
85
104
  return host_ip
86
-
105
+
87
106
  raise RuntimeError(f"No online host found for {target_pve}!")
88
107
  else:
89
108
  if not suppress_warnings:
90
- print("avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!")
109
+ print(
110
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
111
+ )
91
112
 
92
113
  with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
93
114
  pve_inventory = yaml.safe_load(f)
@@ -97,29 +118,39 @@ def get_online_pve_host(target_pve, suppress_warnings = False, skip_py_cloud_che
97
118
  if pve_cluster + "." + pve_cloud == target_pve:
98
119
  for pve_host in pve_inventory[pve_cloud][pve_cluster]:
99
120
  # check if host is available
100
- pve_host_ip = pve_inventory[pve_cloud][pve_cluster][pve_host]["ansible_host"]
121
+ pve_host_ip = pve_inventory[pve_cloud][pve_cluster][pve_host][
122
+ "ansible_host"
123
+ ]
101
124
  try:
102
125
  with socket.create_connection((pve_host_ip, 22), timeout=3):
103
-
126
+
104
127
  if not skip_py_cloud_check:
105
- raise_on_py_cloud_missmatch(pve_host_ip) # validate that versions of dev machine and running on cluster match
128
+ raise_on_py_cloud_missmatch(
129
+ pve_host_ip
130
+ ) # validate that versions of dev machine and running on cluster match
106
131
 
107
132
  return pve_host_ip
108
133
  except Exception as e:
109
134
  # debug
110
135
  print(e, type(e))
111
- pass
112
-
136
+
113
137
  raise RuntimeError(f"Could not find online pve host for {target_pve}")
114
138
 
115
139
 
116
- def get_pve_inventory(pve_cloud_domain, suppress_warnings = False, skip_py_cloud_check=False):
140
+ def get_pve_inventory(
141
+ pve_cloud_domain, suppress_warnings=False, skip_py_cloud_check=False
142
+ ):
117
143
  if shutil.which("avahi-browse"):
118
144
  # avahi is available
119
145
 
120
146
  # call avahi-browse -rpt _pxc._tcp and find online host matching pve cloud domain
121
147
  # connect via ssh and fetch all other hosts via proxmox api => build inventory
122
- avahi_disc = subprocess.run(["avahi-browse", "-rpt", "_pxc._tcp"], stdout=subprocess.PIPE, text=True, check=True)
148
+ avahi_disc = subprocess.run(
149
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
150
+ stdout=subprocess.PIPE,
151
+ text=True,
152
+ check=True,
153
+ )
123
154
  services = avahi_disc.stdout.splitlines()
124
155
 
125
156
  pve_inventory = {}
@@ -138,7 +169,7 @@ def get_pve_inventory(pve_cloud_domain, suppress_warnings = False, skip_py_cloud
138
169
  cluster_name = None
139
170
 
140
171
  for txt_arg in svc_args[9].split():
141
- txt_arg = txt_arg.replace('"', '')
172
+ txt_arg = txt_arg.replace('"', "")
142
173
  if txt_arg.startswith("cloud_domain"):
143
174
  cloud_domain = txt_arg.split("=")[1]
144
175
 
@@ -146,36 +177,49 @@ def get_pve_inventory(pve_cloud_domain, suppress_warnings = False, skip_py_cloud
146
177
  cluster_name = txt_arg.split("=")[1]
147
178
 
148
179
  if not cloud_domain or not cluster_name:
149
- raise ValueError(f"Missconfigured proxmox cloud avahi service: {service}")
150
-
180
+ raise ValueError(
181
+ f"Missconfigured proxmox cloud avahi service: {service}"
182
+ )
183
+
151
184
  # main pve cloud inventory
152
- if cloud_domain == pve_cloud_domain and cluster_name not in cloud_domain_first_hosts:
153
- if not skip_py_cloud_check and f"{cluster_name}.{cloud_domain}" not in py_pve_cloud_performed_version_checks:
154
- raise_on_py_cloud_missmatch(host_ip) # validate that versions of dev machine and running on cluster match
155
- py_pve_cloud_performed_version_checks.add(f"{cluster_name}.{cloud_domain}") # perform version check only once per cluster
185
+ if (
186
+ cloud_domain == pve_cloud_domain
187
+ and cluster_name not in cloud_domain_first_hosts
188
+ ):
189
+ if (
190
+ not skip_py_cloud_check
191
+ and f"{cluster_name}.{cloud_domain}"
192
+ not in py_pve_cloud_performed_version_checks
193
+ ):
194
+ raise_on_py_cloud_missmatch(
195
+ host_ip
196
+ ) # validate that versions of dev machine and running on cluster match
197
+ py_pve_cloud_performed_version_checks.add(
198
+ f"{cluster_name}.{cloud_domain}"
199
+ ) # perform version check only once per cluster
156
200
 
157
201
  cloud_domain_first_hosts[cluster_name] = host_ip
158
-
202
+
159
203
  # iterate over hosts and build pve inv via proxmox api
160
204
  # todo: this needs to be hugely optimized it blocks the grpc server
161
205
  for cluster_first, first_host in cloud_domain_first_hosts.items():
162
- proxmox = ProxmoxAPI(
163
- first_host, user="root", backend='ssh_paramiko'
164
- )
206
+ proxmox = ProxmoxAPI(first_host, user="root", backend="ssh_paramiko")
165
207
 
166
208
  cluster_name = None
167
209
  status_resp = proxmox.cluster.status.get()
168
210
  for entry in status_resp:
169
- if entry['id'] == "cluster":
170
- cluster_name = entry['name']
211
+ if entry["id"] == "cluster":
212
+ cluster_name = entry["name"]
171
213
  break
172
214
 
173
215
  if cluster_name is None:
174
216
  raise RuntimeError("Could not get cluster name")
175
-
217
+
176
218
  if cluster_name != cluster_first:
177
- raise ValueError(f"Proxmox cluster name missconfigured in avahi service {cluster_name}/{cluster_first}")
178
-
219
+ raise ValueError(
220
+ f"Proxmox cluster name missconfigured in avahi service {cluster_name}/{cluster_first}"
221
+ )
222
+
179
223
  pve_inventory[cluster_name] = {}
180
224
 
181
225
  # fetch other hosts via api
@@ -187,41 +231,46 @@ def get_pve_inventory(pve_cloud_domain, suppress_warnings = False, skip_py_cloud
187
231
  if node["status"] == "offline":
188
232
  print(f"skipping offline node {node_name}")
189
233
  continue
190
-
234
+
191
235
  # get the main ip
192
236
  ifaces = proxmox.nodes(node_name).network.get()
193
237
  node_ip_address = None
194
238
  for iface in ifaces:
195
- if 'gateway' in iface:
239
+ if "gateway" in iface:
196
240
  if node_ip_address is not None:
197
- raise RuntimeError(f"found multiple ifaces with gateways for node {node_name}")
241
+ raise RuntimeError(
242
+ f"found multiple ifaces with gateways for node {node_name}"
243
+ )
198
244
  node_ip_address = iface.get("address")
199
245
 
200
246
  if node_ip_address is None:
201
247
  raise RuntimeError(f"Could not find ip for node {node_name}")
202
-
248
+
203
249
  pve_inventory[cluster_name][node_name] = {
204
250
  "ansible_user": "root",
205
- "ansible_host": node_ip_address
251
+ "ansible_host": node_ip_address,
206
252
  }
207
-
208
253
 
209
254
  return pve_inventory
210
255
 
211
256
  else:
212
257
  if not suppress_warnings:
213
- print("avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!")
258
+ print(
259
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
260
+ )
214
261
  # try load fallback manual inventory from disk
215
262
  inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
216
263
  if not os.path.exists(inv_path):
217
- raise RuntimeError("Local pve inventory file missing (~/.pve-cloud-dyn-inv.yaml), execute `pvcli connect-cluster` or setup avahi mdns discovery!")
264
+ raise RuntimeError(
265
+ "Local pve inventory file missing (~/.pve-cloud-dyn-inv.yaml), execute `pvcli connect-cluster` or setup avahi mdns discovery!"
266
+ )
218
267
 
219
268
  with open(inv_path, "r") as file:
220
269
  dynamic_inventory = yaml.safe_load(file)
221
270
 
222
271
  if pve_cloud_domain not in dynamic_inventory:
223
- raise RuntimeError(f"{pve_cloud_domain} not in local dynamic inventory (~/.pve-cloud-dyn-inv.yaml created by `pvcli connect-cluster`)!")
272
+ raise RuntimeError(
273
+ f"{pve_cloud_domain} not in local dynamic inventory (~/.pve-cloud-dyn-inv.yaml created by `pvcli connect-cluster`)!"
274
+ )
224
275
 
225
276
  return dynamic_inventory[pve_cloud_domain]
226
-
227
-
@@ -1,7 +1,8 @@
1
+ import os
2
+
1
3
  import paramiko
2
- import yaml
3
4
  import pve_cloud._version
4
- import os
5
+ import yaml
5
6
 
6
7
 
7
8
  def raise_on_py_cloud_missmatch(proxmox_host):
@@ -16,7 +17,9 @@ def raise_on_py_cloud_missmatch(proxmox_host):
16
17
  # since we need root we cant use sftp and root via ssh is disabled
17
18
  _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
18
19
 
19
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
20
-
20
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
21
+
21
22
  if cluster_vars["py_pve_cloud_version"] != pve_cloud._version.__version__:
22
- raise RuntimeError(f"Version missmatch! py_pve_cloud_version for cluster is {cluster_vars['py_pve_cloud_version']}, while you are using {pve_cloud._version.__version__}")
23
+ raise RuntimeError(
24
+ f"Version missmatch! py_pve_cloud_version for cluster is {cluster_vars['py_pve_cloud_version']}, while you are using {pve_cloud._version.__version__}"
25
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: py-pve-cloud
3
- Version: 0.13.22
3
+ Version: 0.14.2rc14
4
4
  Author-email: Tobias Huebner <tobias.huebner@vmzberlin.com>
5
5
  License-Expression: GPL-3.0-or-later
6
6
  License-File: LICENSE.md
@@ -1 +0,0 @@
1
- __version__ = "0.13.22"
@@ -1,158 +0,0 @@
1
- import argparse
2
- import yaml
3
- from proxmoxer import ProxmoxAPI
4
- import os
5
- import paramiko
6
- from pve_cloud.cli.pvclu import get_ssh_master_kubeconfig
7
- from pve_cloud.lib.inventory import *
8
-
9
-
10
-
11
- def connect_cluster(args):
12
- # try load current dynamic inventory
13
- inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
14
- if os.path.exists(inv_path):
15
- with open(inv_path, "r") as file:
16
- dynamic_inventory = yaml.safe_load(file)
17
- else:
18
- # initialize empty
19
- dynamic_inventory = {}
20
-
21
- # connect to the cluster via paramiko and check if cloud files are already there
22
- ssh = paramiko.SSHClient()
23
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
24
-
25
- ssh.connect(args.pve_host, username="root")
26
-
27
- # since we need root we cant use sftp and root via ssh is disabled
28
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
29
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
30
-
31
- if not cluster_vars:
32
- # cluster has not been yet initialized
33
- pve_cloud_domain = input("Cluster has not yet been fully initialized, assign the cluster a cloud domain and press ENTER:")
34
- else:
35
- pve_cloud_domain = cluster_vars["pve_cloud_domain"]
36
-
37
- # init cloud domain if not there
38
- if pve_cloud_domain not in dynamic_inventory:
39
- dynamic_inventory[pve_cloud_domain] = {}
40
-
41
- # connect to the passed host
42
- proxmox = ProxmoxAPI(
43
- args.pve_host, user="root", backend='ssh_paramiko'
44
- )
45
-
46
- # try get the cluster name
47
- cluster_name = None
48
- status_resp = proxmox.cluster.status.get()
49
- for entry in status_resp:
50
- if entry['id'] == "cluster":
51
- cluster_name = entry['name']
52
- break
53
-
54
- if cluster_name is None:
55
- raise Exception("Could not get cluster name")
56
-
57
- if cluster_name in dynamic_inventory[pve_cloud_domain] and not args.force:
58
- print(f"cluster {cluster_name} already in dynamic inventory, add --force to overwrite current local inv.")
59
- return
60
-
61
- # overwrite on force / create fresh
62
- dynamic_inventory[pve_cloud_domain][cluster_name] = {}
63
-
64
- # not present => add and safe the dynamic inventory
65
- cluster_hosts = proxmox.nodes.get()
66
-
67
- for node in cluster_hosts:
68
- node_name = node["node"]
69
-
70
- if node["status"] == "offline":
71
- print(f"skipping offline node {node_name}")
72
- continue
73
-
74
- # get the main ip
75
- ifaces = proxmox.nodes(node_name).network.get()
76
- node_ip_address = None
77
- for iface in ifaces:
78
- if 'gateway' in iface:
79
- if node_ip_address is not None:
80
- raise Exception(f"found multiple ifaces with gateways for node {node_name}")
81
- node_ip_address = iface.get("address")
82
-
83
- if node_ip_address is None:
84
- raise Exception(f"Could not find ip for node {node_name}")
85
-
86
- print(f"adding {node_name}")
87
- dynamic_inventory[pve_cloud_domain][cluster_name][node_name] = {
88
- "ansible_user": "root",
89
- "ansible_host": node_ip_address
90
- }
91
-
92
- print(f"writing dyn inv to {inv_path}")
93
- with open(inv_path, "w") as file:
94
- yaml.dump(dynamic_inventory, file)
95
-
96
-
97
- def print_kubeconfig(args):
98
- if not os.path.exists(args.inventory):
99
- print("The specified inventory file does not exist!")
100
- return
101
-
102
- with open(args.inventory, "r") as f:
103
- inventory = yaml.safe_load(f)
104
-
105
- target_pve = inventory["target_pve"]
106
-
107
- target_cloud_domain = get_cloud_domain(target_pve)
108
- pve_inventory = get_pve_inventory(target_cloud_domain)
109
-
110
- # find target cluster in loaded inventory
111
- target_cluster = None
112
-
113
- for cluster in pve_inventory:
114
- if target_pve.endswith((cluster + "." + target_cloud_domain)):
115
- target_cluster = cluster
116
- break
117
-
118
- if not target_cluster:
119
- print("could not find target cluster in pve inventory!")
120
- return
121
-
122
- first_host = list(pve_inventory[target_cluster].keys())[0]
123
-
124
- # connect to the first pve host in the dyn inv, assumes they are all online
125
- ssh = paramiko.SSHClient()
126
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
127
- ssh.connect(pve_inventory[target_cluster][first_host]["ansible_host"], username="root")
128
-
129
- # since we need root we cant use sftp and root via ssh is disabled
130
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
131
-
132
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
133
-
134
- print(get_ssh_master_kubeconfig(cluster_vars, inventory["stack_name"]))
135
-
136
-
137
- def main():
138
- parser = argparse.ArgumentParser(description="PVE general purpose cli for setting up.")
139
-
140
- base_parser = argparse.ArgumentParser(add_help=False)
141
-
142
- subparsers = parser.add_subparsers(dest="command", required=True)
143
-
144
- connect_cluster_parser = subparsers.add_parser("connect-cluster", help="Add an entire pve cluster to this machine for use.", parents=[base_parser])
145
- connect_cluster_parser.add_argument("--pve-host", type=str, help="PVE Host to connect to and add the entire cluster for the local machine.", required=True)
146
- connect_cluster_parser.add_argument("--force", action="store_true", help="Will read the cluster if set.")
147
- connect_cluster_parser.set_defaults(func=connect_cluster)
148
-
149
- print_kconf_parser = subparsers.add_parser("print-kubeconfig", help="Print the kubeconfig from a k8s cluster deployed with pve cloud.", parents=[base_parser])
150
- print_kconf_parser.add_argument("--inventory", type=str, help="PVE cloud kubespray inventory yaml file.", required=True)
151
- print_kconf_parser.set_defaults(func=print_kubeconfig)
152
-
153
- args = parser.parse_args()
154
- args.func(args)
155
-
156
-
157
- if __name__ == "__main__":
158
- main()
@@ -1,122 +0,0 @@
1
- import argparse
2
- import yaml
3
- import pprint
4
- import paramiko
5
- import dns.resolver
6
- import base64
7
- import re
8
- from pve_cloud.lib.inventory import *
9
-
10
-
11
- def get_cluster_vars(pve_host):
12
- ssh = paramiko.SSHClient()
13
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
14
-
15
- ssh.connect(pve_host, username="root")
16
-
17
- # since we need root we cant use sftp and root via ssh is disabled
18
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
19
-
20
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
21
-
22
- return cluster_vars
23
-
24
-
25
- def get_cloud_env(pve_host):
26
- ssh = paramiko.SSHClient()
27
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
28
-
29
- ssh.connect(pve_host, username="root")
30
-
31
- # since we need root we cant use sftp and root via ssh is disabled
32
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
33
-
34
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
35
-
36
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/secrets/patroni.pass")
37
-
38
- patroni_pass = stdout.read().decode('utf-8').strip()
39
-
40
- # fetch bind update key for ingress dns validation
41
- _, stdout, _ = ssh.exec_command("sudo cat /etc/pve/cloud/secrets/internal.key")
42
- bind_key_file = stdout.read().decode('utf-8')
43
-
44
- bind_internal_key = re.search(r'secret\s+"([^"]+)";', bind_key_file).group(1)
45
-
46
- return cluster_vars, patroni_pass, bind_internal_key
47
-
48
-
49
- def get_online_pve_host_prsr(args):
50
- print(f"export PVE_ANSIBLE_HOST='{get_online_pve_host(args.target_pve, suppress_warnings=True)}'")
51
-
52
-
53
- def get_ssh_master_kubeconfig(cluster_vars, stack_name):
54
- resolver = dns.resolver.Resolver()
55
- resolver.nameservers = [cluster_vars['bind_master_ip'], cluster_vars['bind_slave_ip']]
56
-
57
- ddns_answer = resolver.resolve(f"masters-{stack_name}.{cluster_vars['pve_cloud_domain']}")
58
- ddns_ips = [rdata.to_text() for rdata in ddns_answer]
59
-
60
- if not ddns_ips:
61
- raise Exception("No master could be found via DNS!")
62
-
63
- ssh = paramiko.SSHClient()
64
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
65
-
66
- ssh.connect(ddns_ips[0], username="admin")
67
-
68
- # since we need root we cant use sftp and root via ssh is disabled
69
- _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
70
-
71
- admin_conf = yaml.safe_load(stdout.read().decode('utf-8'))
72
- # rewrite variables for external access
73
- admin_conf["clusters"][0]["cluster"]["server"] = f"https://{ddns_ips[0]}:6443"
74
- admin_conf["clusters"][0]["name"] = stack_name
75
-
76
- admin_conf["contexts"][0]["context"]["cluster"] = stack_name
77
- admin_conf["contexts"][0]["name"] = stack_name
78
-
79
- admin_conf["current-context"] = stack_name
80
-
81
- return yaml.safe_dump(admin_conf)
82
-
83
-
84
- def export_pg_conn_str(args):
85
- cloud_domain = get_cloud_domain(args.target_pve, suppress_warnings=True)
86
- pve_inventory = get_pve_inventory(cloud_domain, suppress_warnings=True)
87
-
88
- # get ansible ip for first host in target cluster
89
- ansible_host = None
90
- for cluster in pve_inventory:
91
- if args.target_pve.startswith(cluster):
92
- ansible_host = next(iter(pve_inventory[cluster].values()))["ansible_host"]
93
-
94
- if not ansible_host:
95
- raise RuntimeError(f"Could not find online host for {args.target_pve}!")
96
-
97
- cluster_vars, patroni_pass, bind_internal_key = get_cloud_env(ansible_host)
98
-
99
- print(f"export PG_CONN_STR=\"postgres://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/tf_states?sslmode=disable\"")
100
-
101
-
102
- def main():
103
- parser = argparse.ArgumentParser(description="PVE Cloud utility cli. Should be called with bash eval.")
104
-
105
- base_parser = argparse.ArgumentParser(add_help=False)
106
-
107
- subparsers = parser.add_subparsers(dest="command", required=True)
108
-
109
- export_envr_parser = subparsers.add_parser("export-psql", help="Export variables for k8s .envrc", parents=[base_parser])
110
- export_envr_parser.add_argument("--target-pve", type=str, help="The target pve cluster.", required=True)
111
- export_envr_parser.set_defaults(func=export_pg_conn_str)
112
-
113
- get_online_pve_host_parser = subparsers.add_parser("get-online-host", help="Gets the ip for the first online proxmox host in the cluster.", parents=[base_parser])
114
- get_online_pve_host_parser.add_argument("--target-pve", type=str, help="The target pve cluster to get the first online ip of.", required=True)
115
- get_online_pve_host_parser.set_defaults(func=get_online_pve_host_prsr)
116
-
117
- args = parser.parse_args()
118
- args.func(args)
119
-
120
-
121
- if __name__ == "__main__":
122
- main()