py-pve-cloud 0.5.16__py3-none-any.whl → 0.14.2rc26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,276 @@
1
+ import os
2
+ import shutil
3
+ import socket
4
+ import subprocess
1
5
 
2
- def get_pve_cloud_inventory():
3
- pass
6
+ import yaml
7
+ from proxmoxer import ProxmoxAPI
4
8
 
9
+ from pve_cloud.lib.validate import raise_on_py_cloud_missmatch
5
10
 
11
+
12
+ def get_cloud_domain(target_pve, suppress_warnings=False):
13
+ if shutil.which("avahi-browse"):
14
+ avahi_disc = subprocess.run(
15
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
16
+ stdout=subprocess.PIPE,
17
+ text=True,
18
+ check=True,
19
+ )
20
+ services = avahi_disc.stdout.splitlines()
21
+
22
+ # find cloud domain hosts and get first online per proxmox cluster
23
+ for service in services:
24
+ if service.startswith("="):
25
+ # avahi service def
26
+ svc_args = service.split(";")
27
+
28
+ cloud_domain = None
29
+ cluster_name = None
30
+
31
+ for txt_arg in svc_args[9].split():
32
+ txt_arg = txt_arg.replace('"', "")
33
+ if txt_arg.startswith("cloud_domain"):
34
+ cloud_domain = txt_arg.split("=")[1]
35
+
36
+ if txt_arg.startswith("cluster_name"):
37
+ cluster_name = txt_arg.split("=")[1]
38
+
39
+ if not cloud_domain or not cluster_name:
40
+ raise ValueError(
41
+ f"Missconfigured proxmox cloud avahi service: {service}"
42
+ )
43
+
44
+ if target_pve.endswith(cloud_domain):
45
+ return cloud_domain
46
+
47
+ raise RuntimeError("Could not get cloud domain via avahi mdns!")
48
+ else:
49
+ if not suppress_warnings:
50
+ print(
51
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
52
+ )
53
+
54
+ with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
55
+ pve_inventory = yaml.safe_load(f)
56
+
57
+ for pve_cloud in pve_inventory:
58
+ for pve_cluster in pve_inventory[pve_cloud]:
59
+ if pve_cluster + "." + pve_cloud == target_pve:
60
+ return pve_cloud
61
+
62
+ raise Exception(f"Could not identify cloud domain for {target_pve}")
63
+
64
+
65
+ def get_online_pve_host(target_pve, suppress_warnings=False, skip_py_cloud_check=False):
66
+ if shutil.which("avahi-browse"):
67
+ avahi_disc = subprocess.run(
68
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
69
+ stdout=subprocess.PIPE,
70
+ text=True,
71
+ check=True,
72
+ )
73
+ services = avahi_disc.stdout.splitlines()
74
+
75
+ for service in services:
76
+ if service.startswith("="):
77
+ # avahi service def
78
+ svc_args = service.split(";")
79
+ host_ip = svc_args[7]
80
+
81
+ cloud_domain = None
82
+ cluster_name = None
83
+
84
+ for txt_arg in svc_args[9].split():
85
+ txt_arg = txt_arg.replace('"', "")
86
+ if txt_arg.startswith("cloud_domain"):
87
+ cloud_domain = txt_arg.split("=")[1]
88
+
89
+ if txt_arg.startswith("cluster_name"):
90
+ cluster_name = txt_arg.split("=")[1]
91
+
92
+ if not cloud_domain or not cluster_name:
93
+ raise ValueError(
94
+ f"Missconfigured proxmox cloud avahi service: {service}"
95
+ )
96
+
97
+ # main pve cloud inventory
98
+ if f"{cluster_name}.{cloud_domain}" == target_pve:
99
+ if not skip_py_cloud_check:
100
+ raise_on_py_cloud_missmatch(
101
+ host_ip
102
+ ) # validate that versions of dev machine and running on cluster match
103
+
104
+ return host_ip
105
+
106
+ raise RuntimeError(f"No online host found for {target_pve}!")
107
+ else:
108
+ if not suppress_warnings:
109
+ print(
110
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
111
+ )
112
+
113
+ with open(os.path.expanduser("~/.pve-cloud-dyn-inv.yaml"), "r") as f:
114
+ pve_inventory = yaml.safe_load(f)
115
+
116
+ for pve_cloud in pve_inventory:
117
+ for pve_cluster in pve_inventory[pve_cloud]:
118
+ if pve_cluster + "." + pve_cloud == target_pve:
119
+ for pve_host in pve_inventory[pve_cloud][pve_cluster]:
120
+ # check if host is available
121
+ pve_host_ip = pve_inventory[pve_cloud][pve_cluster][pve_host][
122
+ "ansible_host"
123
+ ]
124
+ try:
125
+ with socket.create_connection((pve_host_ip, 22), timeout=3):
126
+
127
+ if not skip_py_cloud_check:
128
+ raise_on_py_cloud_missmatch(
129
+ pve_host_ip
130
+ ) # validate that versions of dev machine and running on cluster match
131
+
132
+ return pve_host_ip
133
+ except Exception as e:
134
+ # debug
135
+ print(e, type(e))
136
+
137
+ raise RuntimeError(f"Could not find online pve host for {target_pve}")
138
+
139
+
140
+ def get_pve_inventory(
141
+ pve_cloud_domain, suppress_warnings=False, skip_py_cloud_check=False
142
+ ):
143
+ if shutil.which("avahi-browse"):
144
+ # avahi is available
145
+
146
+ # call avahi-browse -rpt _pxc._tcp and find online host matching pve cloud domain
147
+ # connect via ssh and fetch all other hosts via proxmox api => build inventory
148
+ avahi_disc = subprocess.run(
149
+ ["avahi-browse", "-rpt", "_pxc._tcp"],
150
+ stdout=subprocess.PIPE,
151
+ text=True,
152
+ check=True,
153
+ )
154
+ services = avahi_disc.stdout.splitlines()
155
+
156
+ pve_inventory = {}
157
+
158
+ py_pve_cloud_performed_version_checks = set()
159
+
160
+ # find cloud domain hosts and get first online per proxmox cluster
161
+ cloud_domain_first_hosts = {}
162
+ for service in services:
163
+ if service.startswith("="):
164
+ # avahi service def
165
+ svc_args = service.split(";")
166
+ host_ip = svc_args[7]
167
+
168
+ cloud_domain = None
169
+ cluster_name = None
170
+
171
+ for txt_arg in svc_args[9].split():
172
+ txt_arg = txt_arg.replace('"', "")
173
+ if txt_arg.startswith("cloud_domain"):
174
+ cloud_domain = txt_arg.split("=")[1]
175
+
176
+ if txt_arg.startswith("cluster_name"):
177
+ cluster_name = txt_arg.split("=")[1]
178
+
179
+ if not cloud_domain or not cluster_name:
180
+ raise ValueError(
181
+ f"Missconfigured proxmox cloud avahi service: {service}"
182
+ )
183
+
184
+ # main pve cloud inventory
185
+ if (
186
+ cloud_domain == pve_cloud_domain
187
+ and cluster_name not in cloud_domain_first_hosts
188
+ ):
189
+ if (
190
+ not skip_py_cloud_check
191
+ and f"{cluster_name}.{cloud_domain}"
192
+ not in py_pve_cloud_performed_version_checks
193
+ ):
194
+ raise_on_py_cloud_missmatch(
195
+ host_ip
196
+ ) # validate that versions of dev machine and running on cluster match
197
+ py_pve_cloud_performed_version_checks.add(
198
+ f"{cluster_name}.{cloud_domain}"
199
+ ) # perform version check only once per cluster
200
+
201
+ cloud_domain_first_hosts[cluster_name] = host_ip
202
+
203
+ # iterate over hosts and build pve inv via proxmox api
204
+ # todo: this needs to be hugely optimized it blocks the grpc server
205
+ for cluster_first, first_host in cloud_domain_first_hosts.items():
206
+ proxmox = ProxmoxAPI(first_host, user="root", backend="ssh_paramiko")
207
+
208
+ cluster_name = None
209
+ status_resp = proxmox.cluster.status.get()
210
+ for entry in status_resp:
211
+ if entry["id"] == "cluster":
212
+ cluster_name = entry["name"]
213
+ break
214
+
215
+ if cluster_name is None:
216
+ raise RuntimeError("Could not get cluster name")
217
+
218
+ if cluster_name != cluster_first:
219
+ raise ValueError(
220
+ f"Proxmox cluster name missconfigured in avahi service {cluster_name}/{cluster_first}"
221
+ )
222
+
223
+ pve_inventory[cluster_name] = {}
224
+
225
+ # fetch other hosts via api
226
+ cluster_hosts = proxmox.nodes.get()
227
+
228
+ for node in cluster_hosts:
229
+ node_name = node["node"]
230
+
231
+ if node["status"] == "offline":
232
+ print(f"skipping offline node {node_name}")
233
+ continue
234
+
235
+ # get the main ip
236
+ ifaces = proxmox.nodes(node_name).network.get()
237
+ node_ip_address = None
238
+ for iface in ifaces:
239
+ if "gateway" in iface:
240
+ if node_ip_address is not None:
241
+ raise RuntimeError(
242
+ f"found multiple ifaces with gateways for node {node_name}"
243
+ )
244
+ node_ip_address = iface.get("address")
245
+
246
+ if node_ip_address is None:
247
+ raise RuntimeError(f"Could not find ip for node {node_name}")
248
+
249
+ pve_inventory[cluster_name][node_name] = {
250
+ "ansible_user": "root",
251
+ "ansible_host": node_ip_address,
252
+ }
253
+
254
+ return pve_inventory
255
+
256
+ else:
257
+ if not suppress_warnings:
258
+ print(
259
+ "avahi-browse not available, falling back to local inventory file from pvcli connect-cluster!"
260
+ )
261
+ # try load fallback manual inventory from disk
262
+ inv_path = os.path.expanduser("~/.pve-cloud-dyn-inv.yaml")
263
+ if not os.path.exists(inv_path):
264
+ raise RuntimeError(
265
+ "Local pve inventory file missing (~/.pve-cloud-dyn-inv.yaml), execute `pvcli connect-cluster` or setup avahi mdns discovery!"
266
+ )
267
+
268
+ with open(inv_path, "r") as file:
269
+ dynamic_inventory = yaml.safe_load(file)
270
+
271
+ if pve_cloud_domain not in dynamic_inventory:
272
+ raise RuntimeError(
273
+ f"{pve_cloud_domain} not in local dynamic inventory (~/.pve-cloud-dyn-inv.yaml created by `pvcli connect-cluster`)!"
274
+ )
275
+
276
+ return dynamic_inventory[pve_cloud_domain]
@@ -0,0 +1,25 @@
1
+ import os
2
+
3
+ import paramiko
4
+ import pve_cloud._version
5
+ import yaml
6
+
7
+
8
+ def raise_on_py_cloud_missmatch(proxmox_host):
9
+ # dont raise in tdd
10
+ if os.getenv("PYTEST_CURRENT_TEST"):
11
+ return
12
+
13
+ ssh = paramiko.SSHClient()
14
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
15
+ ssh.connect(proxmox_host, username="root")
16
+
17
+ # since we need root we cant use sftp and root via ssh is disabled
18
+ _, stdout, _ = ssh.exec_command("cat /etc/pve/cloud/cluster_vars.yaml")
19
+
20
+ cluster_vars = yaml.safe_load(stdout.read().decode("utf-8"))
21
+
22
+ if cluster_vars["py_pve_cloud_version"] != pve_cloud._version.__version__:
23
+ raise RuntimeError(
24
+ f"Version missmatch! py_pve_cloud_version for cluster is {cluster_vars['py_pve_cloud_version']}, while you are using {pve_cloud._version.__version__}"
25
+ )
pve_cloud/orm/alchemy.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from sqlalchemy import Column, Integer, String, Boolean, SmallInteger, Text
2
- from sqlalchemy.dialects.postgresql import MACADDR, INET, JSONB, insert
2
+ from sqlalchemy.dialects.postgresql import MACADDR, INET, JSONB, insert, ENUM
3
3
  from sqlalchemy import create_engine, MetaData, Table, select, delete, update
4
4
  from sqlalchemy.orm import declarative_base
5
5
  from alembic.config import Config
@@ -33,6 +33,7 @@ class KeaReservations(Base):
33
33
  hostname = Column(String(253), nullable=False)
34
34
  client_classes = Column(String(1000)) # csv seperated client classes
35
35
  stack_fqdn = Column(String(253), nullable=False)
36
+ machine_type = Column(String(50), nullable=False)
36
37
 
37
38
 
38
39
  class KeaClientClassDefs(Base):
@@ -43,24 +44,7 @@ class KeaClientClassDefs(Base):
43
44
  class_content = Column(JSONB, nullable=False)
44
45
 
45
46
 
46
- class K8SWorkers(Base):
47
- __tablename__ = "k8s_workers"
48
-
49
- ip = Column(INET, primary_key=True)
50
- hostname = Column(String(253), nullable=False)
51
- stack_fqdn = Column(String(253), nullable=False)
52
- proxy_stack_fqdn = Column(String(253), nullable=False)
53
-
54
-
55
- class K8SMasters(Base):
56
- __tablename__ = "k8s_masters"
57
-
58
- ip = Column(INET, primary_key=True)
59
- hostname = Column(String(253), nullable=False)
60
- stack_fqdn = Column(String(253), nullable=False)
61
- proxy_stack_fqdn = Column(String(253), nullable=False)
62
-
63
-
47
+ # todo: rename just ingress rules
64
48
  class K8SIngressRules(Base):
65
49
  __tablename__ = "k8s_ingress_rules"
66
50
 
@@ -70,8 +54,10 @@ class K8SIngressRules(Base):
70
54
  proxy_stack_fqdn = Column(String(253), nullable=False)
71
55
  external = Column(Boolean, default=False)
72
56
  rule_len = Column(Integer, nullable=False)
57
+ is_k8s = Column(Boolean, nullable=False)
73
58
 
74
59
 
60
+ # todo: rename to just tcp proxies
75
61
  class K8STcpProxies(Base):
76
62
  __tablename__ = "k8s_tcp_proxies"
77
63
 
@@ -79,6 +65,8 @@ class K8STcpProxies(Base):
79
65
  haproxy_port = Column(SmallInteger, primary_key=True)
80
66
  node_port = Column(SmallInteger, nullable=False)
81
67
  stack_fqdn = Column(String(253), nullable=False)
68
+ # determines backend routing in haproxy, if false will go
69
+ is_k8s = Column(Boolean, nullable=False)
82
70
  proxy_snippet = Column(Text)
83
71
  proxy_stack_fqdn = Column(String(253), primary_key=True)
84
72
  external = Column(Boolean, default=False)
@@ -0,0 +1,65 @@
1
+ """machine type refactor
2
+
3
+ Revision ID: 0ad803c51325
4
+ Revises: fdcb5aa33b76
5
+ Create Date: 2025-12-22 13:46:51.857049
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+ from sqlalchemy.dialects import postgresql
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '0ad803c51325'
16
+ down_revision: Union[str, Sequence[str], None] = 'fdcb5aa33b76'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+
25
+ op.add_column('k8s_tcp_proxies', sa.Column('is_k8s', sa.Boolean(), nullable=False, server_default='true'))
26
+ op.add_column('kea_reservations', sa.Column('machine_type', sa.String(length=50), nullable=False, server_default='k8s_worker'))
27
+
28
+ # remove defaults
29
+ op.alter_column('k8s_tcp_proxies', 'is_k8s', server_default=None)
30
+ op.alter_column('kea_reservations', 'machine_type', server_default=None)
31
+
32
+ # set master type
33
+ op.execute("""
34
+ UPDATE kea_reservations kr
35
+ SET machine_type = 'k8s_master'
36
+ FROM k8s_masters km
37
+ WHERE kr.ip = km.ip
38
+ """)
39
+
40
+ op.drop_table('k8s_masters')
41
+ op.drop_table('k8s_workers')
42
+
43
+ # ### end Alembic commands ###
44
+
45
+
46
+ def downgrade() -> None:
47
+ """Downgrade schema."""
48
+ # ### commands auto generated by Alembic - please adjust! ###
49
+ op.drop_column('kea_reservations', 'machine_type')
50
+ op.drop_column('k8s_tcp_proxies', 'is_k8s')
51
+ op.create_table('k8s_workers',
52
+ sa.Column('ip', postgresql.INET(), autoincrement=False, nullable=False),
53
+ sa.Column('hostname', sa.VARCHAR(length=253), autoincrement=False, nullable=False),
54
+ sa.Column('stack_fqdn', sa.VARCHAR(length=253), autoincrement=False, nullable=False),
55
+ sa.Column('proxy_stack_fqdn', sa.VARCHAR(length=253), autoincrement=False, nullable=False),
56
+ sa.PrimaryKeyConstraint('ip', name=op.f('k8s_workers_pkey'))
57
+ )
58
+ op.create_table('k8s_masters',
59
+ sa.Column('ip', postgresql.INET(), autoincrement=False, nullable=False),
60
+ sa.Column('hostname', sa.VARCHAR(length=253), autoincrement=False, nullable=False),
61
+ sa.Column('stack_fqdn', sa.VARCHAR(length=253), autoincrement=False, nullable=False),
62
+ sa.Column('proxy_stack_fqdn', sa.VARCHAR(length=253), autoincrement=False, nullable=False),
63
+ sa.PrimaryKeyConstraint('ip', name=op.f('k8s_masters_pkey'))
64
+ )
65
+ # ### end Alembic commands ###
@@ -0,0 +1,33 @@
1
+ """ingress generic
2
+
3
+ Revision ID: e60b9cc63413
4
+ Revises: 0ad803c51325
5
+ Create Date: 2025-12-22 21:29:59.987939
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = 'e60b9cc63413'
16
+ down_revision: Union[str, Sequence[str], None] = '0ad803c51325'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.add_column('k8s_ingress_rules', sa.Column('is_k8s', sa.Boolean(), nullable=False, server_default='true'))
25
+ op.alter_column('k8s_ingress_rules', 'is_k8s', server_default=None) # default create as true since to this point its only for k8s
26
+ # ### end Alembic commands ###
27
+
28
+
29
+ def downgrade() -> None:
30
+ """Downgrade schema."""
31
+ # ### commands auto generated by Alembic - please adjust! ###
32
+ op.drop_column('k8s_ingress_rules', 'is_k8s')
33
+ # ### end Alembic commands ###
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: py-pve-cloud
3
- Version: 0.5.16
3
+ Version: 0.14.2rc26
4
4
  Author-email: Tobias Huebner <tobias.huebner@vmzberlin.com>
5
5
  License-Expression: GPL-3.0-or-later
6
- License-File: LICENSE
6
+ License-File: LICENSE.md
7
7
  Requires-Dist: PyYAML==6.0.2
8
8
  Requires-Dist: psycopg2-binary==2.9.10
9
9
  Requires-Dist: SQLAlchemy==2.0.43
@@ -0,0 +1,25 @@
1
+ pve_cloud/_version.py,sha256=PeTrwMVliRFapYHocR8c3EJDBUJO0_wu89ViTvJwiS4,27
2
+ pve_cloud/cli/pvcli.py,sha256=Sew-_ZVmugskLYncEzVE_-F1R90O9vM9rPZ2DuxumQQ,5977
3
+ pve_cloud/cli/pvclu.py,sha256=IZRhgbVbEP5wo3gHpiEs3zEu_xmgcpuQAPRWPi18APc,4660
4
+ pve_cloud/lib/inventory.py,sha256=76JVyYbVDKGHYnEab4DollNwxzZTzvgVBdqlPMIkGcw,10598
5
+ pve_cloud/lib/validate.py,sha256=IQ4_1pknjFt_tbqhl497aoaVDnUSgxq5eSLkantdub8,830
6
+ pve_cloud/orm/alchemy.py,sha256=fGDBpO22quf_d7jCeAkDPYxwXPDFSUH99DKe2JuJOUk,5031
7
+ pve_cloud/orm/alembic.ini,sha256=7140n-YUj06aAIHOHACm8U0xhUFUoBZ4Jw23KlYB9EA,4865
8
+ pve_cloud/orm/migrations/env.py,sha256=xtOgjF1KLmRUkG1-yb4eV4F2JzarDKFU1tdWJovNHDc,2200
9
+ pve_cloud/orm/migrations/versions/04398db10434_external_cp_extra_sans.py,sha256=HUrnRni5D_xpqVruXoywekA-u3bIjZb-LvyMi8Ea65E,1465
10
+ pve_cloud/orm/migrations/versions/0ad803c51325_machine_type_refactor.py,sha256=Tp3_KPp9SlEAc1Nh7BdIll_rTAWbQUFP1Evjdx_g-So,2480
11
+ pve_cloud/orm/migrations/versions/24a548bfce3e_len_rules_enforcements.py,sha256=rWVywDbSLs16bEcncdXuwcij1axp-K0mleTXqFRhs3s,5238
12
+ pve_cloud/orm/migrations/versions/27724e407e2b_proxy_fqdn.py,sha256=va2I9uPkn6QEQf_sn986-ogX9HeN08ScmFhzqk-eYsY,918
13
+ pve_cloud/orm/migrations/versions/3c95509a5de9_fix.py,sha256=yAfRQerPhlHoRrW6F2aAi2ZyY5IIuGUOCieyg7IK544,1491
14
+ pve_cloud/orm/migrations/versions/7868bcd05006_migrate_old.py,sha256=rU8Bw2tYDynM1Ny0zFolDbcn6Oe6JAhndbpJAewiTac,3475
15
+ pve_cloud/orm/migrations/versions/7dea8c4ee39f_init.py,sha256=iMDyHhtyvpSywMnLhiSEL3W12YSm6sPa18XRgzQcwDg,954
16
+ pve_cloud/orm/migrations/versions/944a8fd5d5bc_ext_ctrl_plns.py,sha256=LnVAShLaU1asz1L7TYs7oI9SnLxPp2IOA6K83kHNkN0,1674
17
+ pve_cloud/orm/migrations/versions/d9b711555be8_ext_control_plane.py,sha256=uBqv1r5pLX-RjqciKYx0zvWyygJMa5u58DTnVfIEAF0,1073
18
+ pve_cloud/orm/migrations/versions/e60b9cc63413_ingress_generic.py,sha256=fSVHYbRvkssMsxnuZ3TIOflBseuuQhgYa1G5Xrn5m88,1044
19
+ pve_cloud/orm/migrations/versions/fdcb5aa33b76_slop_firewall_seperation.py,sha256=1qM2weneVfnPFG921lnQSJ70QF9mW4oZmn0ZJguBO8U,1948
20
+ py_pve_cloud-0.14.2rc26.dist-info/licenses/LICENSE.md,sha256=ADUqsZhl4juwq34PRTMiBqumpm11s_PMli_dZQjWPqQ,34260
21
+ py_pve_cloud-0.14.2rc26.dist-info/METADATA,sha256=Fk7mX718ZSU7rukjaF3KJ6l3rKtVAZkN6-eKxvLuUvI,433
22
+ py_pve_cloud-0.14.2rc26.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
23
+ py_pve_cloud-0.14.2rc26.dist-info/entry_points.txt,sha256=VvncsKmTJ46irz-9wQZ4Zo1FgNBjRltGDBKR9ht18mE,84
24
+ py_pve_cloud-0.14.2rc26.dist-info/top_level.txt,sha256=mpT7ttGRyZJVt_obhPLBHyIBcjKhUdJ-qVsMEVX5WJg,10
25
+ py_pve_cloud-0.14.2rc26.dist-info/RECORD,,