py-pve-cloud 0.5.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pve_cloud/_version.py +1 -0
- pve_cloud/cli/pvcli.py +159 -0
- pve_cloud/cli/pvclu.py +140 -0
- pve_cloud/lib/inventory.py +5 -0
- pve_cloud/orm/alchemy.py +181 -0
- pve_cloud/orm/alembic.ini +147 -0
- pve_cloud/orm/migrations/env.py +83 -0
- pve_cloud/orm/migrations/versions/04398db10434_external_cp_extra_sans.py +44 -0
- pve_cloud/orm/migrations/versions/24a548bfce3e_len_rules_enforcements.py +133 -0
- pve_cloud/orm/migrations/versions/27724e407e2b_proxy_fqdn.py +32 -0
- pve_cloud/orm/migrations/versions/3c95509a5de9_fix.py +44 -0
- pve_cloud/orm/migrations/versions/7868bcd05006_migrate_old.py +83 -0
- pve_cloud/orm/migrations/versions/7dea8c4ee39f_init.py +36 -0
- pve_cloud/orm/migrations/versions/944a8fd5d5bc_ext_ctrl_plns.py +46 -0
- pve_cloud/orm/migrations/versions/d9b711555be8_ext_control_plane.py +37 -0
- pve_cloud/orm/migrations/versions/fdcb5aa33b76_slop_firewall_seperation.py +54 -0
- py_pve_cloud-0.5.16.dist-info/METADATA +14 -0
- py_pve_cloud-0.5.16.dist-info/RECORD +22 -0
- py_pve_cloud-0.5.16.dist-info/WHEEL +5 -0
- py_pve_cloud-0.5.16.dist-info/entry_points.txt +3 -0
- py_pve_cloud-0.5.16.dist-info/licenses/LICENSE +674 -0
- py_pve_cloud-0.5.16.dist-info/top_level.txt +1 -0
pve_cloud/_version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.5.16"
|
pve_cloud/cli/pvcli.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import yaml
|
|
3
|
+
from proxmoxer import ProxmoxAPI
|
|
4
|
+
import os
|
|
5
|
+
import paramiko
|
|
6
|
+
from pve_cloud.cli.pvclu import get_ssh_master_kubeconfig
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def connect_cluster(args):
|
|
10
|
+
# try load current dynamic inventory
|
|
11
|
+
inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
|
|
12
|
+
if os.path.exists(inv_path):
|
|
13
|
+
with open(inv_path, "r") as file:
|
|
14
|
+
dynamic_inventory = yaml.safe_load(file)
|
|
15
|
+
else:
|
|
16
|
+
# initialize empty
|
|
17
|
+
dynamic_inventory = {}
|
|
18
|
+
|
|
19
|
+
# connect to the cluster via paramiko and check if cloud files are already there
|
|
20
|
+
ssh = paramiko.SSHClient()
|
|
21
|
+
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
22
|
+
|
|
23
|
+
ssh.connect(args.pve_host, username="root")
|
|
24
|
+
|
|
25
|
+
# since we need root we cant use sftp and root via ssh is disabled
|
|
26
|
+
_, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
|
|
27
|
+
cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
|
|
28
|
+
|
|
29
|
+
if not cluster_vars:
|
|
30
|
+
# cluster has not been yet initialized
|
|
31
|
+
pve_cloud_domain = input("Cluster has not yet been fully initialized, assign the cluster a cloud domain and press ENTER:")
|
|
32
|
+
else:
|
|
33
|
+
pve_cloud_domain = cluster_vars["pve_cloud_domain"]
|
|
34
|
+
|
|
35
|
+
# init cloud domain if not there
|
|
36
|
+
if pve_cloud_domain not in dynamic_inventory:
|
|
37
|
+
dynamic_inventory[pve_cloud_domain] = {}
|
|
38
|
+
|
|
39
|
+
# connect to the passed host
|
|
40
|
+
proxmox = ProxmoxAPI(
|
|
41
|
+
args.pve_host, user="root", backend='ssh_paramiko'
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# try get the cluster name
|
|
45
|
+
cluster_name = None
|
|
46
|
+
status_resp = proxmox.cluster.status.get()
|
|
47
|
+
for entry in status_resp:
|
|
48
|
+
if entry['id'] == "cluster":
|
|
49
|
+
cluster_name = entry['name']
|
|
50
|
+
break
|
|
51
|
+
|
|
52
|
+
if cluster_name is None:
|
|
53
|
+
raise Exception("Could not get cluster name")
|
|
54
|
+
|
|
55
|
+
if cluster_name in dynamic_inventory[pve_cloud_domain] and not args.force:
|
|
56
|
+
print(f"cluster {cluster_name} already in dynamic inventory, add --force to overwrite current local inv.")
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
# overwrite on force / create fresh
|
|
60
|
+
dynamic_inventory[pve_cloud_domain][cluster_name] = {}
|
|
61
|
+
|
|
62
|
+
# not present => add and safe the dynamic inventory
|
|
63
|
+
cluster_hosts = proxmox.nodes.get()
|
|
64
|
+
|
|
65
|
+
for node in cluster_hosts:
|
|
66
|
+
node_name = node["node"]
|
|
67
|
+
|
|
68
|
+
if node["status"] == "offline":
|
|
69
|
+
print(f"skipping offline node {node_name}")
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
# get the main ip
|
|
73
|
+
ifaces = proxmox.nodes(node_name).network.get()
|
|
74
|
+
node_ip_address = None
|
|
75
|
+
for iface in ifaces:
|
|
76
|
+
if 'gateway' in iface:
|
|
77
|
+
if node_ip_address is not None:
|
|
78
|
+
raise Exception(f"found multiple ifaces with gateways for node {node_name}")
|
|
79
|
+
node_ip_address = iface.get("address")
|
|
80
|
+
|
|
81
|
+
if node_ip_address is None:
|
|
82
|
+
raise Exception(f"Could not find ip for node {node_name}")
|
|
83
|
+
|
|
84
|
+
dynamic_inventory[pve_cloud_domain][cluster_name][node_name] = {
|
|
85
|
+
"ansible_user": "root",
|
|
86
|
+
"ansible_host": node_ip_address
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
with open(inv_path, "w") as file:
|
|
90
|
+
yaml.dump(dynamic_inventory, file)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def print_kubeconfig(args):
|
|
94
|
+
if not os.path.exists(args.inventory):
|
|
95
|
+
print("The specified inventory file does not exist!")
|
|
96
|
+
return
|
|
97
|
+
|
|
98
|
+
with open(args.inventory, "r") as f:
|
|
99
|
+
inventory = yaml.safe_load(f)
|
|
100
|
+
|
|
101
|
+
target_pve = inventory["target_pve"]
|
|
102
|
+
|
|
103
|
+
# load dyn inv to init proxmoxer
|
|
104
|
+
inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
|
|
105
|
+
with open(inv_path, "r") as file:
|
|
106
|
+
dynamic_inventory = yaml.safe_load(file)
|
|
107
|
+
|
|
108
|
+
target_cloud = None
|
|
109
|
+
target_cluster = None
|
|
110
|
+
for cloud in dynamic_inventory:
|
|
111
|
+
for cluster in dynamic_inventory[cloud]:
|
|
112
|
+
if target_pve.endswith((cluster + "." + cloud)):
|
|
113
|
+
target_cloud = cloud
|
|
114
|
+
target_cluster = cluster
|
|
115
|
+
break
|
|
116
|
+
|
|
117
|
+
if not target_cloud:
|
|
118
|
+
print("could not find cloud in dyn inv!")
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
first_host = list(dynamic_inventory[target_cloud][target_cluster].keys())[0]
|
|
122
|
+
|
|
123
|
+
# connect to the first pve host in the dyn inv, assumes they are all online
|
|
124
|
+
ssh = paramiko.SSHClient()
|
|
125
|
+
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
126
|
+
ssh.connect(dynamic_inventory[target_cloud][target_cluster][first_host]["ansible_host"], username="root")
|
|
127
|
+
|
|
128
|
+
# since we need root we cant use sftp and root via ssh is disabled
|
|
129
|
+
_, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
|
|
130
|
+
|
|
131
|
+
cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
|
|
132
|
+
|
|
133
|
+
print(get_ssh_master_kubeconfig(cluster_vars, inventory["stack_name"]))
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def main():
|
|
137
|
+
parser = argparse.ArgumentParser(description="PVE general purpose cli for setting up.")
|
|
138
|
+
|
|
139
|
+
base_parser = argparse.ArgumentParser(add_help=False)
|
|
140
|
+
|
|
141
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
142
|
+
|
|
143
|
+
connect_cluster_parser = subparsers.add_parser("connect-cluster", help="Add an entire pve cluster to this machine for use.", parents=[base_parser])
|
|
144
|
+
connect_cluster_parser.add_argument("--pve-host", type=str, help="PVE Host to connect to and add the entire cluster for the local machine.", required=True)
|
|
145
|
+
connect_cluster_parser.add_argument("--force", action="store_true", help="Will read the cluster if set.")
|
|
146
|
+
connect_cluster_parser.set_defaults(func=connect_cluster)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
print_kconf_parser = subparsers.add_parser("print-kubeconfig", help="Print the kubeconfig from a k8s cluster deployed with pve cloud.", parents=[base_parser])
|
|
150
|
+
print_kconf_parser.add_argument("--inventory", type=str, help="PVE cloud kubespray inventory yaml file.", required=True)
|
|
151
|
+
print_kconf_parser.set_defaults(func=print_kubeconfig)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
args = parser.parse_args()
|
|
155
|
+
args.func(args)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
if __name__ == "__main__":
|
|
159
|
+
main()
|
pve_cloud/cli/pvclu.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import yaml
|
|
3
|
+
import os
|
|
4
|
+
import socket
|
|
5
|
+
import paramiko
|
|
6
|
+
import dns.resolver
|
|
7
|
+
import base64
|
|
8
|
+
import re
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_cloud_domain(target_pve):
|
|
12
|
+
with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
|
|
13
|
+
pve_inventory = yaml.safe_load(f)
|
|
14
|
+
|
|
15
|
+
for pve_cloud in pve_inventory:
|
|
16
|
+
for pve_cluster in pve_inventory[pve_cloud]:
|
|
17
|
+
if pve_cluster + "." + pve_cloud == target_pve:
|
|
18
|
+
return pve_cloud
|
|
19
|
+
|
|
20
|
+
raise Exception(f"Could not identify cloud domain for {target_pve}")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def get_cld_domain_prsr(args):
|
|
24
|
+
print(f"export PVE_CLOUD_DOMAIN='{get_cloud_domain(args.target_pve)}'")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_online_pve_host(target_pve):
|
|
28
|
+
with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
|
|
29
|
+
pve_inventory = yaml.safe_load(f)
|
|
30
|
+
|
|
31
|
+
for pve_cloud in pve_inventory:
|
|
32
|
+
for pve_cluster in pve_inventory[pve_cloud]:
|
|
33
|
+
if pve_cluster + "." + pve_cloud == target_pve:
|
|
34
|
+
for pve_host in pve_inventory[pve_cloud][pve_cluster]:
|
|
35
|
+
# check if host is available
|
|
36
|
+
pve_host_ip = pve_inventory[pve_cloud][pve_cluster][pve_host]["ansible_host"]
|
|
37
|
+
try:
|
|
38
|
+
with socket.create_connection((pve_host_ip, 22), timeout=3):
|
|
39
|
+
return pve_host_ip
|
|
40
|
+
except Exception as e:
|
|
41
|
+
# debug
|
|
42
|
+
print(e, type(e))
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
raise Exception(f"Could not find online pve host for {target_pve}")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_cloud_env(pve_host):
|
|
49
|
+
ssh = paramiko.SSHClient()
|
|
50
|
+
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
51
|
+
|
|
52
|
+
ssh.connect(pve_host, username="root")
|
|
53
|
+
|
|
54
|
+
# since we need root we cant use sftp and root via ssh is disabled
|
|
55
|
+
_, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
|
|
56
|
+
|
|
57
|
+
cluster_vars = yaml.safe_load(stdout.read().decode('utf-8'))
|
|
58
|
+
|
|
59
|
+
_, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/secrets/patroni.pass")
|
|
60
|
+
|
|
61
|
+
patroni_pass = stdout.read().decode('utf-8').strip()
|
|
62
|
+
|
|
63
|
+
# fetch bind update key for ingress dns validation
|
|
64
|
+
_, stdout, _ = ssh.exec_command("sudo cat /etc/pve/cloud/secrets/internal.key")
|
|
65
|
+
bind_key_file = stdout.read().decode('utf-8')
|
|
66
|
+
|
|
67
|
+
bind_internal_key = re.search(r'secret\s+"([^"]+)";', bind_key_file).group(1)
|
|
68
|
+
|
|
69
|
+
return cluster_vars, patroni_pass, bind_internal_key
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_online_pve_host_prsr(args):
|
|
73
|
+
print(f"export PVE_ANSIBLE_HOST='{get_online_pve_host(args.target_pve)}'")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def get_ssh_master_kubeconfig(cluster_vars, stack_name):
|
|
77
|
+
resolver = dns.resolver.Resolver()
|
|
78
|
+
resolver.nameservers = [cluster_vars['bind_master_ip'], cluster_vars['bind_slave_ip']]
|
|
79
|
+
|
|
80
|
+
ddns_answer = resolver.resolve(f"masters-{stack_name}.{cluster_vars['pve_cloud_domain']}")
|
|
81
|
+
ddns_ips = [rdata.to_text() for rdata in ddns_answer]
|
|
82
|
+
|
|
83
|
+
if not ddns_ips:
|
|
84
|
+
raise Exception("No master could be found via DNS!")
|
|
85
|
+
|
|
86
|
+
ssh = paramiko.SSHClient()
|
|
87
|
+
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
88
|
+
|
|
89
|
+
ssh.connect(ddns_ips[0], username="admin")
|
|
90
|
+
|
|
91
|
+
# since we need root we cant use sftp and root via ssh is disabled
|
|
92
|
+
_, stdout, _ = ssh.exec_command("sudo cat /etc/kubernetes/admin.conf")
|
|
93
|
+
|
|
94
|
+
return stdout.read().decode('utf-8').replace("https://127.0.0.1:6443", f"https://{ddns_ips[0]}:6443")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def export_envr(args):
|
|
98
|
+
ansible_host = get_online_pve_host(args.target_pve)
|
|
99
|
+
cloud_domain = get_cloud_domain(args.target_pve)
|
|
100
|
+
cluster_vars, patroni_pass, bind_internal_key = get_cloud_env(ansible_host)
|
|
101
|
+
print(f"export PVE_ANSIBLE_HOST='{ansible_host}'")
|
|
102
|
+
print(f"export PVE_CLOUD_DOMAIN='{cloud_domain}'")
|
|
103
|
+
|
|
104
|
+
# tf vars
|
|
105
|
+
print(f"export PG_CONN_STR=\"postgres://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/tf_states?sslmode=disable\"")
|
|
106
|
+
print(f"export TF_VAR_pve_cloud_domain='{cloud_domain}'")
|
|
107
|
+
print(f"export TF_VAR_pve_host='{ansible_host}'")
|
|
108
|
+
print(f"export TF_VAR_cluster_proxy_ip='{cluster_vars['pve_haproxy_floating_ip_internal']}'")
|
|
109
|
+
print(f"export TF_VAR_pve_cloud_pg_cstr=\"postgresql+psycopg2://postgres:{patroni_pass}@{cluster_vars['pve_haproxy_floating_ip_internal']}:5000/pve_cloud?sslmode=disable\"")
|
|
110
|
+
print(f"export TF_VAR_master_b64_kubeconf='{base64.b64encode(get_ssh_master_kubeconfig(cluster_vars, args.stack_name).encode('utf-8')).decode('utf-8')}'")
|
|
111
|
+
print(f"export TF_VAR_bind_master_ip='{cluster_vars['bind_master_ip']}'")
|
|
112
|
+
print(f"export TF_VAR_bind_internal_key='{bind_internal_key}'")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def main():
|
|
116
|
+
parser = argparse.ArgumentParser(description="PVE Cloud utility cli. Should be called with bash eval.")
|
|
117
|
+
|
|
118
|
+
base_parser = argparse.ArgumentParser(add_help=False)
|
|
119
|
+
|
|
120
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
121
|
+
|
|
122
|
+
get_cld_domain_parser = subparsers.add_parser("get-cloud-domain", help="Get the cloud domain of a pve cluster.", parents=[base_parser])
|
|
123
|
+
get_cld_domain_parser.add_argument("--target-pve", type=str, help="The target pve cluster to get the cloud domain of.", required=True)
|
|
124
|
+
get_cld_domain_parser .set_defaults(func=get_cld_domain_prsr)
|
|
125
|
+
|
|
126
|
+
export_envr_parser = subparsers.add_parser("export-envrc", help="Export variables for k8s .envrc", parents=[base_parser])
|
|
127
|
+
export_envr_parser.add_argument("--target-pve", type=str, help="The target pve cluster.", required=True)
|
|
128
|
+
export_envr_parser.add_argument("--stack-name", type=str, help="Stack name of the deployment.", required=True)
|
|
129
|
+
export_envr_parser.set_defaults(func=export_envr)
|
|
130
|
+
|
|
131
|
+
get_online_pve_host_parser = subparsers.add_parser("get-online-host", help="Gets the ip for the first online proxmox host in the cluster.", parents=[base_parser])
|
|
132
|
+
get_online_pve_host_parser.add_argument("--target-pve", type=str, help="The target pve cluster to get the first online ip of.", required=True)
|
|
133
|
+
get_online_pve_host_parser.set_defaults(func=get_online_pve_host_prsr)
|
|
134
|
+
|
|
135
|
+
args = parser.parse_args()
|
|
136
|
+
args.func(args)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
if __name__ == "__main__":
|
|
140
|
+
main()
|
pve_cloud/orm/alchemy.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
from sqlalchemy import Column, Integer, String, Boolean, SmallInteger, Text
|
|
2
|
+
from sqlalchemy.dialects.postgresql import MACADDR, INET, JSONB, insert
|
|
3
|
+
from sqlalchemy import create_engine, MetaData, Table, select, delete, update
|
|
4
|
+
from sqlalchemy.orm import declarative_base
|
|
5
|
+
from alembic.config import Config
|
|
6
|
+
from alembic import command
|
|
7
|
+
import os
|
|
8
|
+
|
|
9
|
+
Base = declarative_base()
|
|
10
|
+
|
|
11
|
+
class BindDomains(Base):
|
|
12
|
+
__tablename__ = "bind_domains"
|
|
13
|
+
|
|
14
|
+
domain = Column(String(253), primary_key=True)
|
|
15
|
+
stack_fqdn = Column(String(253), primary_key=True)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AcmeX509(Base):
|
|
19
|
+
__tablename__ = "acme_x509"
|
|
20
|
+
|
|
21
|
+
stack_fqdn = Column(String(253), primary_key=True)
|
|
22
|
+
config = Column(JSONB)
|
|
23
|
+
ec_csr = Column(JSONB)
|
|
24
|
+
ec_crt = Column(JSONB)
|
|
25
|
+
k8s = Column(JSONB)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class KeaReservations(Base):
|
|
29
|
+
__tablename__ = "kea_reservations"
|
|
30
|
+
|
|
31
|
+
mac = Column(MACADDR, primary_key=True)
|
|
32
|
+
ip = Column(INET) # nullable
|
|
33
|
+
hostname = Column(String(253), nullable=False)
|
|
34
|
+
client_classes = Column(String(1000)) # csv seperated client classes
|
|
35
|
+
stack_fqdn = Column(String(253), nullable=False)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class KeaClientClassDefs(Base):
|
|
39
|
+
__tablename__ = "kea_client_class_defs"
|
|
40
|
+
|
|
41
|
+
stack_fqdn = Column(String(253), nullable=False)
|
|
42
|
+
class_name = Column(String(253), primary_key=True)
|
|
43
|
+
class_content = Column(JSONB, nullable=False)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class K8SWorkers(Base):
|
|
47
|
+
__tablename__ = "k8s_workers"
|
|
48
|
+
|
|
49
|
+
ip = Column(INET, primary_key=True)
|
|
50
|
+
hostname = Column(String(253), nullable=False)
|
|
51
|
+
stack_fqdn = Column(String(253), nullable=False)
|
|
52
|
+
proxy_stack_fqdn = Column(String(253), nullable=False)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class K8SMasters(Base):
|
|
56
|
+
__tablename__ = "k8s_masters"
|
|
57
|
+
|
|
58
|
+
ip = Column(INET, primary_key=True)
|
|
59
|
+
hostname = Column(String(253), nullable=False)
|
|
60
|
+
stack_fqdn = Column(String(253), nullable=False)
|
|
61
|
+
proxy_stack_fqdn = Column(String(253), nullable=False)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class K8SIngressRules(Base):
|
|
65
|
+
__tablename__ = "k8s_ingress_rules"
|
|
66
|
+
|
|
67
|
+
zone = Column(String(253), primary_key=True)
|
|
68
|
+
name = Column(String(253), primary_key=True)
|
|
69
|
+
stack_fqdn = Column(String(253), primary_key=True)
|
|
70
|
+
proxy_stack_fqdn = Column(String(253), nullable=False)
|
|
71
|
+
external = Column(Boolean, default=False)
|
|
72
|
+
rule_len = Column(Integer, nullable=False)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class K8STcpProxies(Base):
|
|
76
|
+
__tablename__ = "k8s_tcp_proxies"
|
|
77
|
+
|
|
78
|
+
proxy_name = Column(String(253), nullable=False)
|
|
79
|
+
haproxy_port = Column(SmallInteger, primary_key=True)
|
|
80
|
+
node_port = Column(SmallInteger, nullable=False)
|
|
81
|
+
stack_fqdn = Column(String(253), nullable=False)
|
|
82
|
+
proxy_snippet = Column(Text)
|
|
83
|
+
proxy_stack_fqdn = Column(String(253), primary_key=True)
|
|
84
|
+
external = Column(Boolean, default=False)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class K8SExternalControlPlanes(Base):
|
|
88
|
+
__tablename__ = "k8s_ext_control_planes"
|
|
89
|
+
|
|
90
|
+
stack_fqdn = Column(String(253), primary_key=True)
|
|
91
|
+
extra_sans = Column(String(2530), nullable=False) # csvs
|
|
92
|
+
proxy_stack_fqdn = Column(String(253), nullable=False)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
# apply the migrations to the database
|
|
96
|
+
def migrate(conn_str):
|
|
97
|
+
alembic_cfg = Config(os.path.join(os.path.dirname(__file__), 'alembic.ini'))
|
|
98
|
+
|
|
99
|
+
alembic_cfg.set_main_option("sqlalchemy.url", conn_str)
|
|
100
|
+
alembic_cfg.set_main_option("prepend_sys_path", os.path.dirname(__file__))
|
|
101
|
+
|
|
102
|
+
command.upgrade(alembic_cfg, "head")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# generic read with simple where equal functionality
|
|
106
|
+
def alch_read(conn_str, table_name, where_equal_args):
|
|
107
|
+
engine = create_engine(conn_str)
|
|
108
|
+
metadata = MetaData()
|
|
109
|
+
|
|
110
|
+
table = Table(table_name, metadata, autoload_with=engine)
|
|
111
|
+
|
|
112
|
+
statement = select(table)
|
|
113
|
+
for col_name, val in where_equal_args.items():
|
|
114
|
+
statement = statement.where(table.c[col_name] == val)
|
|
115
|
+
|
|
116
|
+
with engine.connect() as conn:
|
|
117
|
+
result = conn.execute(statement)
|
|
118
|
+
|
|
119
|
+
return [dict(row._mapping) for row in result]
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def alch_write(conn_str, table_name, rows):
|
|
123
|
+
engine = create_engine(conn_str)
|
|
124
|
+
metadata = MetaData()
|
|
125
|
+
|
|
126
|
+
table = Table(table_name, metadata, autoload_with=engine)
|
|
127
|
+
|
|
128
|
+
stmt = insert(table)
|
|
129
|
+
|
|
130
|
+
with engine.begin() as conn:
|
|
131
|
+
conn.execute(stmt, rows)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def alch_update(conn_str, table_name, values, whereclause):
|
|
135
|
+
engine = create_engine(conn_str)
|
|
136
|
+
metadata = MetaData()
|
|
137
|
+
table = Table(table_name, metadata, autoload_with=engine)
|
|
138
|
+
|
|
139
|
+
stmt = update(table).values(**values)
|
|
140
|
+
|
|
141
|
+
for col, val in whereclause.items():
|
|
142
|
+
stmt = stmt.where(table.c[col] == val)
|
|
143
|
+
|
|
144
|
+
with engine.begin() as conn:
|
|
145
|
+
conn.execute(stmt)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def alch_upsert(conn_str, table_name, values, conflict_columns):
|
|
149
|
+
engine = create_engine(conn_str)
|
|
150
|
+
metadata = MetaData()
|
|
151
|
+
table = Table(table_name, metadata, autoload_with=engine)
|
|
152
|
+
|
|
153
|
+
stmt = insert(table).values(**values)
|
|
154
|
+
|
|
155
|
+
# update every column except conflict columns that were passed in the values
|
|
156
|
+
# this is the same as doing SET column_x = EXCLUDED.column_x
|
|
157
|
+
update_dict = {c: getattr(stmt.excluded, c) for c in values if c not in conflict_columns}
|
|
158
|
+
|
|
159
|
+
stmt = stmt.on_conflict_do_update(
|
|
160
|
+
index_elements=[table.c[c] for c in conflict_columns],
|
|
161
|
+
set_=update_dict
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
with engine.begin() as conn:
|
|
165
|
+
conn.execute(stmt)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def alch_delete(conn_str, table_name, where_equal_args):
|
|
169
|
+
engine = create_engine(conn_str)
|
|
170
|
+
metadata = MetaData()
|
|
171
|
+
|
|
172
|
+
table = Table(table_name, metadata, autoload_with=engine)
|
|
173
|
+
|
|
174
|
+
stmt = delete(table)
|
|
175
|
+
for col_name, val in where_equal_args.items():
|
|
176
|
+
stmt = stmt.where(table.c[col_name] == val)
|
|
177
|
+
|
|
178
|
+
with engine.begin() as conn:
|
|
179
|
+
result = conn.execute(stmt)
|
|
180
|
+
return result.rowcount
|
|
181
|
+
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# A generic, single database configuration.
|
|
2
|
+
|
|
3
|
+
[alembic]
|
|
4
|
+
# path to migration scripts.
|
|
5
|
+
# this is typically a path given in POSIX (e.g. forward slashes)
|
|
6
|
+
# format, relative to the token %(here)s which refers to the location of this
|
|
7
|
+
# ini file
|
|
8
|
+
script_location = %(here)s/migrations
|
|
9
|
+
|
|
10
|
+
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
|
11
|
+
# Uncomment the line below if you want the files to be prepended with date and time
|
|
12
|
+
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
|
|
13
|
+
# for all available tokens
|
|
14
|
+
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
|
|
15
|
+
|
|
16
|
+
# sys.path path, will be prepended to sys.path if present.
|
|
17
|
+
# defaults to the current working directory. for multiple paths, the path separator
|
|
18
|
+
# is defined by "path_separator" below.
|
|
19
|
+
prepend_sys_path = .
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# timezone to use when rendering the date within the migration file
|
|
23
|
+
# as well as the filename.
|
|
24
|
+
# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
|
|
25
|
+
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
|
|
26
|
+
# string value is passed to ZoneInfo()
|
|
27
|
+
# leave blank for localtime
|
|
28
|
+
# timezone =
|
|
29
|
+
|
|
30
|
+
# max length of characters to apply to the "slug" field
|
|
31
|
+
# truncate_slug_length = 40
|
|
32
|
+
|
|
33
|
+
# set to 'true' to run the environment during
|
|
34
|
+
# the 'revision' command, regardless of autogenerate
|
|
35
|
+
# revision_environment = false
|
|
36
|
+
|
|
37
|
+
# set to 'true' to allow .pyc and .pyo files without
|
|
38
|
+
# a source .py file to be detected as revisions in the
|
|
39
|
+
# versions/ directory
|
|
40
|
+
# sourceless = false
|
|
41
|
+
|
|
42
|
+
# version location specification; This defaults
|
|
43
|
+
# to <script_location>/versions. When using multiple version
|
|
44
|
+
# directories, initial revisions must be specified with --version-path.
|
|
45
|
+
# The path separator used here should be the separator specified by "path_separator"
|
|
46
|
+
# below.
|
|
47
|
+
# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions
|
|
48
|
+
|
|
49
|
+
# path_separator; This indicates what character is used to split lists of file
|
|
50
|
+
# paths, including version_locations and prepend_sys_path within configparser
|
|
51
|
+
# files such as alembic.ini.
|
|
52
|
+
# The default rendered in new alembic.ini files is "os", which uses os.pathsep
|
|
53
|
+
# to provide os-dependent path splitting.
|
|
54
|
+
#
|
|
55
|
+
# Note that in order to support legacy alembic.ini files, this default does NOT
|
|
56
|
+
# take place if path_separator is not present in alembic.ini. If this
|
|
57
|
+
# option is omitted entirely, fallback logic is as follows:
|
|
58
|
+
#
|
|
59
|
+
# 1. Parsing of the version_locations option falls back to using the legacy
|
|
60
|
+
# "version_path_separator" key, which if absent then falls back to the legacy
|
|
61
|
+
# behavior of splitting on spaces and/or commas.
|
|
62
|
+
# 2. Parsing of the prepend_sys_path option falls back to the legacy
|
|
63
|
+
# behavior of splitting on spaces, commas, or colons.
|
|
64
|
+
#
|
|
65
|
+
# Valid values for path_separator are:
|
|
66
|
+
#
|
|
67
|
+
# path_separator = :
|
|
68
|
+
# path_separator = ;
|
|
69
|
+
# path_separator = space
|
|
70
|
+
# path_separator = newline
|
|
71
|
+
#
|
|
72
|
+
# Use os.pathsep. Default configuration used for new projects.
|
|
73
|
+
path_separator = os
|
|
74
|
+
|
|
75
|
+
# set to 'true' to search source files recursively
|
|
76
|
+
# in each "version_locations" directory
|
|
77
|
+
# new in Alembic version 1.10
|
|
78
|
+
# recursive_version_locations = false
|
|
79
|
+
|
|
80
|
+
# the output encoding used when revision files
|
|
81
|
+
# are written from script.py.mako
|
|
82
|
+
# output_encoding = utf-8
|
|
83
|
+
|
|
84
|
+
# database URL. This is consumed by the user-maintained env.py script only.
|
|
85
|
+
# other means of configuring database URLs may be customized within the env.py
|
|
86
|
+
# file.
|
|
87
|
+
sqlalchemy.url = driver://user:pass@localhost/dbname
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
[post_write_hooks]
|
|
91
|
+
# post_write_hooks defines scripts or Python functions that are run
|
|
92
|
+
# on newly generated revision scripts. See the documentation for further
|
|
93
|
+
# detail and examples
|
|
94
|
+
|
|
95
|
+
# format using "black" - use the console_scripts runner, against the "black" entrypoint
|
|
96
|
+
# hooks = black
|
|
97
|
+
# black.type = console_scripts
|
|
98
|
+
# black.entrypoint = black
|
|
99
|
+
# black.options = -l 79 REVISION_SCRIPT_FILENAME
|
|
100
|
+
|
|
101
|
+
# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module
|
|
102
|
+
# hooks = ruff
|
|
103
|
+
# ruff.type = module
|
|
104
|
+
# ruff.module = ruff
|
|
105
|
+
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
|
|
106
|
+
|
|
107
|
+
# Alternatively, use the exec runner to execute a binary found on your PATH
|
|
108
|
+
# hooks = ruff
|
|
109
|
+
# ruff.type = exec
|
|
110
|
+
# ruff.executable = ruff
|
|
111
|
+
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
|
|
112
|
+
|
|
113
|
+
# Logging configuration. This is also consumed by the user-maintained
|
|
114
|
+
# env.py script only.
|
|
115
|
+
[loggers]
|
|
116
|
+
keys = root,sqlalchemy,alembic
|
|
117
|
+
|
|
118
|
+
[handlers]
|
|
119
|
+
keys = console
|
|
120
|
+
|
|
121
|
+
[formatters]
|
|
122
|
+
keys = generic
|
|
123
|
+
|
|
124
|
+
[logger_root]
|
|
125
|
+
level = WARNING
|
|
126
|
+
handlers = console
|
|
127
|
+
qualname =
|
|
128
|
+
|
|
129
|
+
[logger_sqlalchemy]
|
|
130
|
+
level = WARNING
|
|
131
|
+
handlers =
|
|
132
|
+
qualname = sqlalchemy.engine
|
|
133
|
+
|
|
134
|
+
[logger_alembic]
|
|
135
|
+
level = INFO
|
|
136
|
+
handlers =
|
|
137
|
+
qualname = alembic
|
|
138
|
+
|
|
139
|
+
[handler_console]
|
|
140
|
+
class = StreamHandler
|
|
141
|
+
args = (sys.stderr,)
|
|
142
|
+
level = NOTSET
|
|
143
|
+
formatter = generic
|
|
144
|
+
|
|
145
|
+
[formatter_generic]
|
|
146
|
+
format = %(levelname)-5.5s [%(name)s] %(message)s
|
|
147
|
+
datefmt = %H:%M:%S
|