py-pve-cloud 0.5.16__py3-none-any.whl → 0.14.2rc26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pve_cloud/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.5.16"
1
+ __version__ = "0.14.2rc26"
pve_cloud/cli/pvcli.py CHANGED
@@ -1,159 +1,187 @@
1
1
  import argparse
2
- import yaml
3
- from proxmoxer import ProxmoxAPI
4
2
  import os
3
+
5
4
  import paramiko
5
+ import yaml
6
+ from proxmoxer import ProxmoxAPI
7
+
6
8
  from pve_cloud.cli.pvclu import get_ssh_master_kubeconfig
9
+ from pve_cloud.lib.inventory import *
7
10
 
8
11
 
9
12
  def connect_cluster(args):
10
- # try load current dynamic inventory
11
- inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
12
- if os.path.exists(inv_path):
13
- with open(inv_path, "r") as file:
14
- dynamic_inventory = yaml.safe_load(file)
15
- else:
16
- # initialize empty
17
- dynamic_inventory = {}
18
-
19
- # connect to the cluster via paramiko and check if cloud files are already there
20
- ssh = paramiko.SSHClient()
21
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
22
-
23
- ssh.connect(args.pve_host, username="root")
24
-
25
- # since we need root we cant use sftp and root via ssh is disabled
26
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
27
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
28
-
29
- if not cluster_vars:
30
- # cluster has not been yet initialized
31
- pve_cloud_domain = input("Cluster has not yet been fully initialized, assign the cluster a cloud domain and press ENTER:")
32
- else:
33
- pve_cloud_domain = cluster_vars["pve_cloud_domain"]
34
-
35
- # init cloud domain if not there
36
- if pve_cloud_domain not in dynamic_inventory:
37
- dynamic_inventory[pve_cloud_domain] = {}
38
-
39
- # connect to the passed host
40
- proxmox = ProxmoxAPI(
41
- args.pve_host, user="root", backend='ssh_paramiko'
42
- )
43
-
44
- # try get the cluster name
45
- cluster_name = None
46
- status_resp = proxmox.cluster.status.get()
47
- for entry in status_resp:
48
- if entry['id'] == "cluster":
49
- cluster_name = entry['name']
50
- break
51
-
52
- if cluster_name is None:
53
- raise Exception("Could not get cluster name")
54
-
55
- if cluster_name in dynamic_inventory[pve_cloud_domain] and not args.force:
56
- print(f"cluster {cluster_name} already in dynamic inventory, add --force to overwrite current local inv.")
57
- return
58
-
59
- # overwrite on force / create fresh
60
- dynamic_inventory[pve_cloud_domain][cluster_name] = {}
61
-
62
- # not present => add and safe the dynamic inventory
63
- cluster_hosts = proxmox.nodes.get()
64
-
65
- for node in cluster_hosts:
66
- node_name = node["node"]
67
-
68
- if node["status"] == "offline":
69
- print(f"skipping offline node {node_name}")
70
- continue
71
-
72
- # get the main ip
73
- ifaces = proxmox.nodes(node_name).network.get()
74
- node_ip_address = None
75
- for iface in ifaces:
76
- if 'gateway' in iface:
77
- if node_ip_address is not None:
78
- raise Exception(f"found multiple ifaces with gateways for node {node_name}")
79
- node_ip_address = iface.get("address")
80
-
81
- if node_ip_address is None:
82
- raise Exception(f"Could not find ip for node {node_name}")
83
-
84
- dynamic_inventory[pve_cloud_domain][cluster_name][node_name] = {
85
- "ansible_user": "root",
86
- "ansible_host": node_ip_address
87
- }
88
-
89
- with open(inv_path, "w") as file:
90
- yaml.dump(dynamic_inventory, file)
13
+ # try load current dynamic inventory
14
+ inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
15
+ if os.path.exists(inv_path):
16
+ with open(inv_path, "r") as file:
17
+ dynamic_inventory = yaml.safe_load(file)
18
+ else:
19
+ # initialize empty
20
+ dynamic_inventory = {}
21
+
22
+ # connect to the cluster via paramiko and check if cloud files are already there
23
+ ssh = paramiko.SSHClient()
24
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
25
+
26
+ ssh.connect(args.pve_host, username="root")
27
+
28
+ # since we need root we cant use sftp and root via ssh is disabled
29
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
30
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
31
+
32
+ if not cluster_vars:
33
+ # cluster has not been yet initialized
34
+ pve_cloud_domain = input(
35
+ "Cluster has not yet been fully initialized, assign the cluster a cloud domain and press ENTER:"
36
+ )
37
+ else:
38
+ pve_cloud_domain = cluster_vars["pve_cloud_domain"]
39
+
40
+ # init cloud domain if not there
41
+ if pve_cloud_domain not in dynamic_inventory:
42
+ dynamic_inventory[pve_cloud_domain] = {}
43
+
44
+ # connect to the passed host
45
+ proxmox = ProxmoxAPI(args.pve_host, user="root", backend="ssh_paramiko")
46
+
47
+ # try get the cluster name
48
+ cluster_name = None
49
+ status_resp = proxmox.cluster.status.get()
50
+ for entry in status_resp:
51
+ if entry["id"] == "cluster":
52
+ cluster_name = entry["name"]
53
+ break
54
+
55
+ if cluster_name is None:
56
+ raise Exception("Could not get cluster name")
57
+
58
+ if cluster_name in dynamic_inventory[pve_cloud_domain] and not args.force:
59
+ print(
60
+ f"cluster {cluster_name} already in dynamic inventory, add --force to overwrite current local inv."
61
+ )
62
+ return
63
+
64
+ # overwrite on force / create fresh
65
+ dynamic_inventory[pve_cloud_domain][cluster_name] = {}
66
+
67
+ # not present => add and safe the dynamic inventory
68
+ cluster_hosts = proxmox.nodes.get()
69
+
70
+ for node in cluster_hosts:
71
+ node_name = node["node"]
72
+
73
+ if node["status"] == "offline":
74
+ print(f"skipping offline node {node_name}")
75
+ continue
76
+
77
+ # get the main ip
78
+ ifaces = proxmox.nodes(node_name).network.get()
79
+ node_ip_address = None
80
+ for iface in ifaces:
81
+ if "gateway" in iface:
82
+ if node_ip_address is not None:
83
+ raise Exception(
84
+ f"found multiple ifaces with gateways for node {node_name}"
85
+ )
86
+ node_ip_address = iface.get("address")
87
+
88
+ if node_ip_address is None:
89
+ raise Exception(f"Could not find ip for node {node_name}")
90
+
91
+ print(f"adding {node_name}")
92
+ dynamic_inventory[pve_cloud_domain][cluster_name][node_name] = {
93
+ "ansible_user": "root",
94
+ "ansible_host": node_ip_address,
95
+ }
96
+
97
+ print(f"writing dyn inv to {inv_path}")
98
+ with open(inv_path, "w") as file:
99
+ yaml.dump(dynamic_inventory, file)
91
100
 
92
101
 
93
102
  def print_kubeconfig(args):
94
- if not os.path.exists(args.inventory):
95
- print("The specified inventory file does not exist!")
96
- return
97
-
98
- with open(args.inventory, "r") as f:
99
- inventory = yaml.safe_load(f)
103
+ if not os.path.exists(args.inventory):
104
+ print("The specified inventory file does not exist!")
105
+ return
100
106
 
101
- target_pve = inventory["target_pve"]
107
+ with open(args.inventory, "r") as f:
108
+ inventory = yaml.safe_load(f)
102
109
 
103
- # load dyn inv to init proxmoxer
104
- inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
105
- with open(inv_path, "r") as file:
106
- dynamic_inventory = yaml.safe_load(file)
110
+ target_pve = inventory["target_pve"]
107
111
 
108
- target_cloud = None
109
- target_cluster = None
110
- for cloud in dynamic_inventory:
111
- for cluster in dynamic_inventory[cloud]:
112
- if target_pve.endswith((cluster + "." + cloud)):
113
- target_cloud = cloud
114
- target_cluster = cluster
115
- break
112
+ target_cloud_domain = get_cloud_domain(target_pve)
113
+ pve_inventory = get_pve_inventory(target_cloud_domain)
116
114
 
117
- if not target_cloud:
118
- print("could not find cloud in dyn inv!")
119
- return
115
+ # find target cluster in loaded inventory
116
+ target_cluster = None
120
117
 
121
- first_host = list(dynamic_inventory[target_cloud][target_cluster].keys())[0]
118
+ for cluster in pve_inventory:
119
+ if target_pve.endswith((cluster + "." + target_cloud_domain)):
120
+ target_cluster = cluster
121
+ break
122
122
 
123
- # connect to the first pve host in the dyn inv, assumes they are all online
124
- ssh = paramiko.SSHClient()
125
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
126
- ssh.connect(dynamic_inventory[target_cloud][target_cluster][first_host]["ansible_host"], username="root")
123
+ if not target_cluster:
124
+ print("could not find target cluster in pve inventory!")
125
+ return
127
126
 
128
- # since we need root we cant use sftp and root via ssh is disabled
129
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
127
+ first_host = list(pve_inventory[target_cluster].keys())[0]
130
128
 
131
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
129
+ # connect to the first pve host in the dyn inv, assumes they are all online
130
+ ssh = paramiko.SSHClient()
131
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
132
+ ssh.connect(
133
+ pve_inventory[target_cluster][first_host]["ansible_host"], username="root"
134
+ )
132
135
 
133
- print(get_ssh_master_kubeconfig(cluster_vars, inventory["stack_name"]))
136
+ # since we need root we cant use sftp and root via ssh is disabled
137
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
134
138
 
139
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
135
140
 
136
- def main():
137
- parser = argparse.ArgumentParser(description="PVE general purpose cli for setting up.")
138
-
139
- base_parser = argparse.ArgumentParser(add_help=False)
140
-
141
- subparsers = parser.add_subparsers(dest="command", required=True)
142
-
143
- connect_cluster_parser = subparsers.add_parser("connect-cluster", help="Add an entire pve cluster to this machine for use.", parents=[base_parser])
144
- connect_cluster_parser.add_argument("--pve-host", type=str, help="PVE Host to connect to and add the entire cluster for the local machine.", required=True)
145
- connect_cluster_parser.add_argument("--force", action="store_true", help="Will read the cluster if set.")
146
- connect_cluster_parser.set_defaults(func=connect_cluster)
141
+ print(get_ssh_master_kubeconfig(cluster_vars, inventory["stack_name"]))
147
142
 
148
143
 
149
- print_kconf_parser = subparsers.add_parser("print-kubeconfig", help="Print the kubeconfig from a k8s cluster deployed with pve cloud.", parents=[base_parser])
150
- print_kconf_parser.add_argument("--inventory", type=str, help="PVE cloud kubespray inventory yaml file.", required=True)
151
- print_kconf_parser.set_defaults(func=print_kubeconfig)
152
-
153
-
154
- args = parser.parse_args()
155
- args.func(args)
144
+ def main():
145
+ parser = argparse.ArgumentParser(
146
+ description="PVE general purpose cli for setting up."
147
+ )
148
+
149
+ base_parser = argparse.ArgumentParser(add_help=False)
150
+
151
+ subparsers = parser.add_subparsers(dest="command", required=True)
152
+
153
+ connect_cluster_parser = subparsers.add_parser(
154
+ "connect-cluster",
155
+ help="Add an entire pve cluster to this machine for use.",
156
+ parents=[base_parser],
157
+ )
158
+ connect_cluster_parser.add_argument(
159
+ "--pve-host",
160
+ type=str,
161
+ help="PVE Host to connect to and add the entire cluster for the local machine.",
162
+ required=True,
163
+ )
164
+ connect_cluster_parser.add_argument(
165
+ "--force", action="store_true", help="Will read the cluster if set."
166
+ )
167
+ connect_cluster_parser.set_defaults(func=connect_cluster)
168
+
169
+ print_kconf_parser = subparsers.add_parser(
170
+ "print-kubeconfig",
171
+ help="Print the kubeconfig from a k8s cluster deployed with pve cloud.",
172
+ parents=[base_parser],
173
+ )
174
+ print_kconf_parser.add_argument(
175
+ "--inventory",
176
+ type=str,
177
+ help="PVE cloud kubespray inventory yaml file.",
178
+ required=True,
179
+ )
180
+ print_kconf_parser.set_defaults(func=print_kubeconfig)
181
+
182
+ args = parser.parse_args()
183
+ args.func(args)
156
184
 
157
185
 
158
186
  if __name__ == "__main__":
159
- main()
187
+ main()
pve_cloud/cli/pvclu.py CHANGED
@@ -1,140 +1,146 @@
1
1
  import argparse
2
- import yaml
3
- import os
4
- import socket
5
- import paramiko
6
- import dns.resolver
7
- import base64
8
2
  import re
9
3
 
4
+ import dns.resolver
5
+ import paramiko
6
+ import yaml
10
7
 
11
- def get_cloud_domain(target_pve):
12
- with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
13
- pve_inventory = yaml.safe_load(f)
8
+ from pve_cloud.lib.inventory import *
14
9
 
15
- for pve_cloud in pve_inventory:
16
- for pve_cluster in pve_inventory[pve_cloud]:
17
- if pve_cluster + "." + pve_cloud == target_pve:
18
- return pve_cloud
19
-
20
- raise Exception(f"Could not identify cloud domain for {target_pve}")
21
10
 
11
+ def get_cluster_vars(pve_host):
12
+ ssh = paramiko.SSHClient()
13
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
22
14
 
23
- def get_cld_domain_prsr(args):
24
- print(f"export PVE_CLOUD_DOMAIN='{get_cloud_domain(args.target_pve)}'")
15
+ ssh.connect(pve_host, username="root")
25
16
 
17
+ # since we need root we cant use sftp and root via ssh is disabled
18
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
26
19
 
27
- def get_online_pve_host(target_pve):
28
- with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
29
- pve_inventory = yaml.safe_load(f)
20
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
30
21
 
31
- for pve_cloud in pve_inventory:
32
- for pve_cluster in pve_inventory[pve_cloud]:
33
- if pve_cluster + "." + pve_cloud == target_pve:
34
- for pve_host in pve_inventory[pve_cloud][pve_cluster]:
35
- # check if host is available
36
- pve_host_ip = pve_inventory[pve_cloud][pve_cluster][pve_host]["ansible_host"]
37
- try:
38
- with socket.create_connection((pve_host_ip, 22), timeout=3):
39
- return pve_host_ip
40
- except Exception as e:
41
- # debug
42
- print(e, type(e))
43
- pass
44
-
45
- raise Exception(f"Could not find online pve host for {target_pve}")
22
+ return cluster_vars
46
23
 
47
24
 
48
25
  def get_cloud_env(pve_host):
49
- ssh = paramiko.SSHClient()
50
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
26
+ ssh = paramiko.SSHClient()
27
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
51
28
 
52
- ssh.connect(pve_host, username="root")
29
+ ssh.connect(pve_host, username="root")
53
30
 
54
- # since we need root we cant use sftp and root via ssh is disabled
55
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
31
+ # since we need root we cant use sftp and root via ssh is disabled
32
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
56
33
 
57
- cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
34
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
58
35
 
59
- _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/secrets/patroni.pass")
36
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/secrets/patroni.pass")
60
37
 
61
- patroni_pass = stdout.read().decode('utf-8').strip()
38
+ patroni_pass = stdout.read().decode("utf-8").strip()
62
39
 
63
- # fetch bind update key for ingress dns validation
64
- _, stdout, _ = ssh.exec_command("sudo cat /etc/pve/cloud/secrets/internal.key")
65
- bind_key_file = stdout.read().decode('utf-8')
40
+ # fetch bind update key for ingress dns validation
41
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/pve/cloud/secrets/internal.key")
42
+ bind_key_file = stdout.read().decode("utf-8")
66
43
 
67
- bind_internal_key = re.search(r'secret\s+"([^"]+)";', bind_key_file).group(1)
44
+ bind_internal_key = re.search(r'secret\s+"([^"]+)";', bind_key_file).group(1)
68
45
 
69
- return cluster_vars, patroni_pass, bind_internal_key
46
+ return cluster_vars, patroni_pass, bind_internal_key
70
47
 
71
48
 
72
49
  def get_online_pve_host_prsr(args):
73
- print(f"export PVE_ANSIBLE_HOST='{get_online_pve_host(args.target_pve)}'")
50
+ print(
51
+ f"export PVE_ANSIBLE_HOST='{get_online_pve_host(args.target_pve, suppress_warnings=True)}'"
52
+ )
74
53
 
75
54
 
76
55
  def get_ssh_master_kubeconfig(cluster_vars, stack_name):
77
- resolver = dns.resolver.Resolver()
78
- resolver.nameservers = [cluster_vars['bind_master_ip'], cluster_vars['bind_slave_ip']]
56
+ resolver = dns.resolver.Resolver()
57
+ resolver.nameservers = [
58
+ cluster_vars["bind_master_ip"],
59
+ cluster_vars["bind_slave_ip"],
60
+ ]
79
61
 
80
- ddns_answer = resolver.resolve(f"masters-{stack_name}.{cluster_vars['pve_cloud_domain']}")
81
- ddns_ips = [rdata.to_text() for rdata in ddns_answer]
62
+ ddns_answer = resolver.resolve(
63
+ f"masters-{stack_name}.{cluster_vars['pve_cloud_domain']}"
64
+ )
65
+ ddns_ips = [rdata.to_text() for rdata in ddns_answer]
82
66
 
83
- if not ddns_ips:
84
- raise Exception("No master could be found via DNS!")
67
+ if not ddns_ips:
68
+ raise Exception("No master could be found via DNS!")
85
69
 
86
- ssh = paramiko.SSHClient()
87
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
70
+ ssh = paramiko.SSHClient()
71
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
88
72
 
89
- ssh.connect(ddns_ips[0], username="admin")
73
+ ssh.connect(ddns_ips[0], username="admin")
90
74
 
91
- # since we need root we cant use sftp and root via ssh is disabled
92
- _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
75
+ # since we need root we cant use sftp and root via ssh is disabled
76
+ _, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
93
77
 
94
- return stdout.read().decode('utf-8').replace("https://127.0.0.1:6443", f"https://{ddns_ips[0]}:6443")
78
+ admin_conf = yaml.safe_load(stdout.read().decode("utf-8"))
79
+ # rewrite variables for external access
80
+ admin_conf["clusters"][0]["cluster"]["server"] = f"https://{ddns_ips[0]}:6443"
81
+ admin_conf["clusters"][0]["name"] = stack_name
95
82
 
83
+ admin_conf["contexts"][0]["context"]["cluster"] = stack_name
84
+ admin_conf["contexts"][0]["name"] = stack_name
96
85
 
97
- def export_envr(args):
98
- ansible_host = get_online_pve_host(args.target_pve)
99
- cloud_domain = get_cloud_domain(args.target_pve)
100
- cluster_vars, patroni_pass, bind_internal_key = get_cloud_env(ansible_host)
101
- print(f"export PVE_ANSIBLE_HOST='{ansible_host}'")
102
- print(f"export PVE_CLOUD_DOMAIN='{cloud_domain}'")
86
+ admin_conf["current-context"] = stack_name
103
87
 
104
- # tf vars
105
- print(f"export PG_CONN_STR=\"postgres://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/tf_states?sslmode=disable\"")
106
- print(f"export TF_VAR_pve_cloud_domain='{cloud_domain}'")
107
- print(f"export TF_VAR_pve_host='{ansible_host}'")
108
- print(f"export TF_VAR_cluster_proxy_ip='{cluster_vars['pve_haproxy_floating_ip_internal']}'")
109
- print(f"export TF_VAR_pve_cloud_pg_cstr=\"postgresql+psycopg2://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/pve_cloud?sslmode=disable\"")
110
- print(f"export TF_VAR_master_b64_kubeconf='{base64.b64encode(get_ssh_master_kubeconfig(cluster_vars, args.stack_name).encode('utf-8')).decode('utf-8')}'")
111
- print(f"export TF_VAR_bind_master_ip='{cluster_vars['bind_master_ip']}'")
112
- print(f"export TF_VAR_bind_internal_key='{bind_internal_key}'")
88
+ return yaml.safe_dump(admin_conf)
113
89
 
114
-
115
- def main():
116
- parser = argparse.ArgumentParser(description="PVE Cloud utility cli. Should be called with bash eval.")
117
90
 
118
- base_parser = argparse.ArgumentParser(add_help=False)
91
+ def export_pg_conn_str(args):
92
+ cloud_domain = get_cloud_domain(args.target_pve, suppress_warnings=True)
93
+ pve_inventory = get_pve_inventory(cloud_domain, suppress_warnings=True)
119
94
 
120
- subparsers = parser.add_subparsers(dest="command", required=True)
95
+ # get ansible ip for first host in target cluster
96
+ ansible_host = None
97
+ for cluster in pve_inventory:
98
+ if args.target_pve.startswith(cluster):
99
+ ansible_host = next(iter(pve_inventory[cluster].values()))["ansible_host"]
121
100
 
122
- get_cld_domain_parser = subparsers.add_parser("get-cloud-domain", help="Get the cloud domain of a pve cluster.", parents=[base_parser])
123
- get_cld_domain_parser.add_argument("--target-pve", type=str, help="The target pve cluster to get the cloud domain of.", required=True)
124
- get_cld_domain_parser .set_defaults(func=get_cld_domain_prsr)
101
+ if not ansible_host:
102
+ raise RuntimeError(f"Could not find online host for {args.target_pve}!")
125
103
 
126
- export_envr_parser = subparsers.add_parser("export-envrc", help="Export variables for k8s .envrc", parents=[base_parser])
127
- export_envr_parser.add_argument("--target-pve", type=str, help="The target pve cluster.", required=True)
128
- export_envr_parser.add_argument("--stack-name", type=str, help="Stack name of the deployment.", required=True)
129
- export_envr_parser.set_defaults(func=export_envr)
104
+ cluster_vars, patroni_pass, bind_internal_key = get_cloud_env(ansible_host)
130
105
 
131
- get_online_pve_host_parser = subparsers.add_parser("get-online-host", help="Gets the ip for the first online proxmox host in the cluster.", parents=[base_parser])
132
- get_online_pve_host_parser.add_argument("--target-pve", type=str, help="The target pve cluster to get the first online ip of.", required=True)
133
- get_online_pve_host_parser.set_defaults(func=get_online_pve_host_prsr)
106
+ print(
107
+ f"export PG_CONN_STR=\"postgres://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/tf_states?sslmode=disable\""
108
+ )
134
109
 
135
- args = parser.parse_args()
136
- args.func(args)
110
+
111
+ def main():
112
+ parser = argparse.ArgumentParser(
113
+ description="PVE Cloud utility cli. Should be called with bash eval."
114
+ )
115
+
116
+ base_parser = argparse.ArgumentParser(add_help=False)
117
+
118
+ subparsers = parser.add_subparsers(dest="command", required=True)
119
+
120
+ export_envr_parser = subparsers.add_parser(
121
+ "export-psql", help="Export variables for k8s .envrc", parents=[base_parser]
122
+ )
123
+ export_envr_parser.add_argument(
124
+ "--target-pve", type=str, help="The target pve cluster.", required=True
125
+ )
126
+ export_envr_parser.set_defaults(func=export_pg_conn_str)
127
+
128
+ get_online_pve_host_parser = subparsers.add_parser(
129
+ "get-online-host",
130
+ help="Gets the ip for the first online proxmox host in the cluster.",
131
+ parents=[base_parser],
132
+ )
133
+ get_online_pve_host_parser.add_argument(
134
+ "--target-pve",
135
+ type=str,
136
+ help="The target pve cluster to get the first online ip of.",
137
+ required=True,
138
+ )
139
+ get_online_pve_host_parser.set_defaults(func=get_online_pve_host_prsr)
140
+
141
+ args = parser.parse_args()
142
+ args.func(args)
137
143
 
138
144
 
139
145
  if __name__ == "__main__":
140
- main()
146
+ main()