osi-dump 0.1.2__py3-none-any.whl → 0.1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osi_dump/api/octavia.py +34 -0
- osi_dump/batch_handler/flavor_batch_handler.py +51 -0
- osi_dump/batch_handler/image_batch_handler.py +51 -0
- osi_dump/batch_handler/load_balancer_batch_handler.py +59 -0
- osi_dump/batch_handler/role_assignment_batch_handler.py +61 -0
- osi_dump/batch_handler/router_batch_handler.py +49 -0
- osi_dump/cli.py +71 -0
- osi_dump/exporter/flavor/__init__.py +0 -0
- osi_dump/exporter/flavor/excel_flavor_exporter.py +30 -0
- osi_dump/exporter/flavor/flavor_exporter.py +7 -0
- osi_dump/exporter/image/__init__.py +0 -0
- osi_dump/exporter/image/excel_image_exporter.py +28 -0
- osi_dump/exporter/image/image_exporter.py +7 -0
- osi_dump/exporter/load_balancer/__init__.py +0 -0
- osi_dump/exporter/load_balancer/excel_load_balancer_exporter.py +35 -0
- osi_dump/exporter/load_balancer/load_balancer_exporter.py +7 -0
- osi_dump/exporter/role_assignment/__init__.py +0 -0
- osi_dump/exporter/role_assignment/excel_role_assignment_exporter.py +35 -0
- osi_dump/exporter/role_assignment/role_assignment_exporter.py +7 -0
- osi_dump/exporter/router/__init__.py +0 -0
- osi_dump/exporter/router/excel_router_exporter.py +30 -0
- osi_dump/exporter/router/router_exporter.py +7 -0
- osi_dump/importer/flavor/__init__.py +0 -0
- osi_dump/importer/flavor/flavor_importer.py +9 -0
- osi_dump/importer/flavor/openstack_flavor_importer.py +64 -0
- osi_dump/importer/hypervisor/openstack_hypervisor_importer.py +10 -1
- osi_dump/importer/image/__init__.py +0 -0
- osi_dump/importer/image/image_importer.py +9 -0
- osi_dump/importer/image/openstack_image_importer.py +77 -0
- osi_dump/importer/instance/openstack_instance_importer.py +1 -0
- osi_dump/importer/load_balancer/__init__.py +0 -0
- osi_dump/importer/load_balancer/load_balancer_importer.py +9 -0
- osi_dump/importer/load_balancer/openstack_load_balancer_importer.py +72 -0
- osi_dump/importer/project/openstack_project_importer.py +6 -6
- osi_dump/importer/role_assignment/__init__.py +0 -0
- osi_dump/importer/role_assignment/openstack_role_assignment_importer.py +100 -0
- osi_dump/importer/role_assignment/role_assignment_importer.py +9 -0
- osi_dump/importer/router/__init__.py +0 -0
- osi_dump/importer/router/openstack_router_importer.py +86 -0
- osi_dump/importer/router/router_importer.py +9 -0
- osi_dump/model/flavor.py +20 -0
- osi_dump/model/hypervisor.py +2 -0
- osi_dump/model/image.py +26 -0
- osi_dump/model/instance.py +1 -0
- osi_dump/model/load_balancer.py +17 -0
- osi_dump/model/role_assignment.py +17 -0
- osi_dump/model/router.py +20 -0
- osi_dump/util/__init__.py +1 -0
- osi_dump/util/panda_excel.py +22 -0
- {osi_dump-0.1.2.dist-info → osi_dump-0.1.2.2.dist-info}/METADATA +1 -1
- osi_dump-0.1.2.2.dist-info/RECORD +105 -0
- {osi_dump-0.1.2.dist-info → osi_dump-0.1.2.2.dist-info}/WHEEL +1 -1
- osi_dump-0.1.2.dist-info/RECORD +0 -63
- {osi_dump-0.1.2.dist-info → osi_dump-0.1.2.2.dist-info}/entry_points.txt +0 -0
- {osi_dump-0.1.2.dist-info → osi_dump-0.1.2.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,64 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
import concurrent
|
4
|
+
|
5
|
+
from openstack.connection import Connection
|
6
|
+
from openstack.compute.v2.flavor import Flavor as OSFlavor
|
7
|
+
|
8
|
+
from osi_dump.importer.flavor.flavor_importer import FlavorImporter
|
9
|
+
from osi_dump.model.flavor import Flavor
|
10
|
+
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
|
13
|
+
|
14
|
+
class OpenStackFlavorImporter(FlavorImporter):
|
15
|
+
def __init__(self, connection: Connection):
|
16
|
+
self.connection = connection
|
17
|
+
|
18
|
+
def import_flavors(self) -> list[Flavor]:
|
19
|
+
"""Import flavors information from Openstack
|
20
|
+
|
21
|
+
Raises:
|
22
|
+
Exception: Raises exception if fetching flavor failed
|
23
|
+
|
24
|
+
Returns:
|
25
|
+
list[Instance]: _description_
|
26
|
+
"""
|
27
|
+
|
28
|
+
logger.info(f"Importing flavors for {self.connection.auth['auth_url']}")
|
29
|
+
|
30
|
+
try:
|
31
|
+
osflavors: list[OSFlavor] = list(self.connection.list_flavors())
|
32
|
+
except Exception as e:
|
33
|
+
raise Exception(
|
34
|
+
f"Can not fetch flavors for {self.connection.auth['auth_url']}"
|
35
|
+
) from e
|
36
|
+
|
37
|
+
flavors: list[Flavor] = []
|
38
|
+
|
39
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
40
|
+
futures = [
|
41
|
+
executor.submit(self._get_flavor_info, osflavor)
|
42
|
+
for osflavor in osflavors
|
43
|
+
]
|
44
|
+
for future in concurrent.futures.as_completed(futures):
|
45
|
+
flavors.append(future.result())
|
46
|
+
|
47
|
+
logger.info(f"Imported flavors for {self.connection.auth['auth_url']}")
|
48
|
+
|
49
|
+
return flavors
|
50
|
+
|
51
|
+
def _get_flavor_info(self, flavor: OSFlavor) -> Flavor:
|
52
|
+
|
53
|
+
ret_flavor = Flavor(
|
54
|
+
flavor_id=flavor.id,
|
55
|
+
flavor_name=flavor.name,
|
56
|
+
ram=flavor.ram,
|
57
|
+
vcpus=flavor.vcpus,
|
58
|
+
disk=flavor.disk,
|
59
|
+
swap=flavor.swap,
|
60
|
+
public=flavor.is_public,
|
61
|
+
properties=flavor.extra_specs,
|
62
|
+
)
|
63
|
+
|
64
|
+
return ret_flavor
|
@@ -33,7 +33,9 @@ class OpenStackHypervisorImporter(HypervisorImporter):
|
|
33
33
|
logger.info(f"Importing hypervisors for {self.connection.auth['auth_url']}")
|
34
34
|
|
35
35
|
try:
|
36
|
-
oshypervisors: list[OSHypervisor] = list(
|
36
|
+
oshypervisors: list[OSHypervisor] = list(
|
37
|
+
self.connection.compute.hypervisors(details=True)
|
38
|
+
)
|
37
39
|
except Exception as e:
|
38
40
|
raise Exception(
|
39
41
|
f"Can not fetch hypervisor for {self.connection.auth['auth_url']}"
|
@@ -63,6 +65,12 @@ class OpenStackHypervisorImporter(HypervisorImporter):
|
|
63
65
|
)
|
64
66
|
)
|
65
67
|
|
68
|
+
servers = list(
|
69
|
+
self.connection.compute.servers(
|
70
|
+
details=True, all_project=True, hypervisor_hostname=hypervisor.name
|
71
|
+
)
|
72
|
+
)
|
73
|
+
|
66
74
|
usage_data = get_usage(self.connection, resource_provider_id=hypervisor.id)
|
67
75
|
|
68
76
|
vcpu = rpi[0]
|
@@ -81,6 +89,7 @@ class OpenStackHypervisorImporter(HypervisorImporter):
|
|
81
89
|
vcpus_usage=usage_data["VCPU"],
|
82
90
|
memory_usage=usage_data["MEMORY_MB"],
|
83
91
|
local_disk_usage=usage_data["DISK_GB"],
|
92
|
+
vm_count=len(servers),
|
84
93
|
)
|
85
94
|
|
86
95
|
return ret_hypervisor
|
File without changes
|
@@ -0,0 +1,77 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
import concurrent
|
4
|
+
|
5
|
+
from openstack.connection import Connection
|
6
|
+
from openstack.image.v2.image import Image as OSImage
|
7
|
+
|
8
|
+
from osi_dump.importer.image.image_importer import ImageImporter
|
9
|
+
from osi_dump.model.image import Image
|
10
|
+
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
|
13
|
+
|
14
|
+
class OpenStackImageImporter(ImageImporter):
|
15
|
+
def __init__(self, connection: Connection):
|
16
|
+
self.connection = connection
|
17
|
+
|
18
|
+
def import_images(self) -> list[Image]:
|
19
|
+
"""Import instances information from Openstack
|
20
|
+
|
21
|
+
Raises:
|
22
|
+
Exception: Raises exception if fetching server failed
|
23
|
+
|
24
|
+
Returns:
|
25
|
+
list[Instance]: _description_
|
26
|
+
"""
|
27
|
+
|
28
|
+
logger.info(f"Importing images for {self.connection.auth['auth_url']}")
|
29
|
+
|
30
|
+
try:
|
31
|
+
|
32
|
+
os_images: list[OSImage] = list(self.connection.list_images(show_all=True))
|
33
|
+
except Exception as e:
|
34
|
+
raise Exception(
|
35
|
+
f"Can not fetch images for {self.connection.auth['auth_url']}"
|
36
|
+
) from e
|
37
|
+
|
38
|
+
images: list[OSImage] = []
|
39
|
+
|
40
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
41
|
+
futures = [
|
42
|
+
executor.submit(self._get_image_info, image) for image in os_images
|
43
|
+
]
|
44
|
+
for future in concurrent.futures.as_completed(futures):
|
45
|
+
images.append(future.result())
|
46
|
+
|
47
|
+
logger.info(f"Imported images for {self.connection.auth['auth_url']}")
|
48
|
+
|
49
|
+
return images
|
50
|
+
|
51
|
+
def _get_image_info(self, os_image: OSImage) -> Image:
|
52
|
+
|
53
|
+
properties: dict = os_image.properties
|
54
|
+
|
55
|
+
properties.pop("owner_specified.openstack.md5", None)
|
56
|
+
properties.pop("owner_specified.openstack.sha256", None)
|
57
|
+
properties.pop("owner_specified.openstack.object", None)
|
58
|
+
properties.pop("stores", None)
|
59
|
+
|
60
|
+
image = Image(
|
61
|
+
image_id=os_image.id,
|
62
|
+
disk_format=os_image.disk_format,
|
63
|
+
min_disk=os_image.min_disk,
|
64
|
+
min_ram=os_image.min_ram,
|
65
|
+
image_name=os_image.name,
|
66
|
+
owner=os_image.owner,
|
67
|
+
properties=os_image.properties,
|
68
|
+
protected=os_image.is_protected,
|
69
|
+
status=os_image.status,
|
70
|
+
size=os_image.size,
|
71
|
+
virtual_size=os_image.virtual_size,
|
72
|
+
visibility=os_image.visibility,
|
73
|
+
created_at=os_image.created_at,
|
74
|
+
updated_at=os_image.updated_at,
|
75
|
+
)
|
76
|
+
|
77
|
+
return image
|
@@ -96,6 +96,7 @@ class OpenStackInstanceImporter(InstanceImporter):
|
|
96
96
|
private_v4_ips=private_v4_ips,
|
97
97
|
floating_ip=floating_ip,
|
98
98
|
status=server.status,
|
99
|
+
hypervisor=server.hypervisor_hostname,
|
99
100
|
ram=server.flavor["ram"],
|
100
101
|
vcpus=server.flavor["vcpus"],
|
101
102
|
created_at=server.created_at,
|
File without changes
|
@@ -0,0 +1,72 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
import concurrent
|
4
|
+
|
5
|
+
from openstack.connection import Connection
|
6
|
+
|
7
|
+
from openstack.load_balancer.v2.load_balancer import LoadBalancer as OSLoadBalancer
|
8
|
+
|
9
|
+
from osi_dump.importer.load_balancer.load_balancer_importer import (
|
10
|
+
LoadBalancerImporter,
|
11
|
+
)
|
12
|
+
from osi_dump.model.load_balancer import LoadBalancer
|
13
|
+
|
14
|
+
import osi_dump.api.octavia as octavia_api
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
class OpenStackLoadBalancerImporter(LoadBalancerImporter):
|
20
|
+
def __init__(self, connection: Connection):
|
21
|
+
self.connection = connection
|
22
|
+
|
23
|
+
def import_load_balancers(self) -> list[LoadBalancer]:
|
24
|
+
"""Import load_balancers information from Openstack
|
25
|
+
|
26
|
+
Raises:
|
27
|
+
Exception: Raises exception if fetching load_balancer failed
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
list[LoadBalancer]: _description_
|
31
|
+
"""
|
32
|
+
|
33
|
+
logger.info(f"Importing load_balancers for {self.connection.auth['auth_url']}")
|
34
|
+
|
35
|
+
try:
|
36
|
+
osload_balancers: list[OSLoadBalancer] = octavia_api.get_load_balancers(
|
37
|
+
connection=self.connection
|
38
|
+
)
|
39
|
+
except Exception as e:
|
40
|
+
raise Exception(
|
41
|
+
f"Can not fetch load_balancers for {self.connection.auth['auth_url']} {e}"
|
42
|
+
) from e
|
43
|
+
|
44
|
+
load_balancers: list[LoadBalancer] = []
|
45
|
+
|
46
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
47
|
+
futures = [
|
48
|
+
executor.submit(self._get_load_balancer_info, load_balancer)
|
49
|
+
for load_balancer in osload_balancers
|
50
|
+
]
|
51
|
+
for future in concurrent.futures.as_completed(futures):
|
52
|
+
load_balancers.append(future.result())
|
53
|
+
|
54
|
+
logger.info(f"Imported load_balancers for {self.connection.auth['auth_url']}")
|
55
|
+
|
56
|
+
return load_balancers
|
57
|
+
|
58
|
+
def _get_load_balancer_info(self, load_balancer: OSLoadBalancer) -> LoadBalancer:
|
59
|
+
|
60
|
+
amphoraes = octavia_api.get_amphoraes(
|
61
|
+
connection=self.connection, load_balancer_id=load_balancer["id"]
|
62
|
+
)
|
63
|
+
|
64
|
+
load_balancer_ret = LoadBalancer(
|
65
|
+
id=load_balancer["id"],
|
66
|
+
load_balancer_name=load_balancer["name"],
|
67
|
+
status=load_balancer["operating_status"],
|
68
|
+
project_id=load_balancer["project_id"],
|
69
|
+
amphoraes=amphoraes,
|
70
|
+
)
|
71
|
+
|
72
|
+
return load_balancer_ret
|
@@ -76,12 +76,12 @@ class OpenStackProjectImporter(ProjectImporter):
|
|
76
76
|
quota_ram=compute_quotas.ram,
|
77
77
|
usage_vcpu=compute_quotas.usage["cores"],
|
78
78
|
quota_vcpu=compute_quotas.cores,
|
79
|
-
usage_volume=storage_quotas.volumes,
|
80
|
-
quota_volume=storage_quotas.
|
81
|
-
usage_snapshot=storage_quotas.snapshots,
|
82
|
-
quota_snapshot=storage_quotas.
|
83
|
-
usage_storage=storage_quotas.gigabytes,
|
84
|
-
quota_storage=storage_quotas.
|
79
|
+
usage_volume=storage_quotas.usage["volumes"],
|
80
|
+
quota_volume=storage_quotas.volumes,
|
81
|
+
usage_snapshot=storage_quotas.usage["snapshots"],
|
82
|
+
quota_snapshot=storage_quotas.snapshots,
|
83
|
+
usage_storage=storage_quotas.usage["gigabytes"],
|
84
|
+
quota_storage=storage_quotas.gigabytes,
|
85
85
|
)
|
86
86
|
|
87
87
|
return project_ret
|
File without changes
|
@@ -0,0 +1,100 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
import concurrent
|
4
|
+
|
5
|
+
from openstack.connection import Connection
|
6
|
+
from openstack.identity.v3.role_assignment import RoleAssignment as OSRoleAssignment
|
7
|
+
|
8
|
+
from osi_dump.importer.role_assignment.role_assignment_importer import (
|
9
|
+
RoleAssignmentImporter,
|
10
|
+
)
|
11
|
+
from osi_dump.model.role_assignment import RoleAssignment
|
12
|
+
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
|
16
|
+
class OpenStackRoleAssignmentImporter(RoleAssignmentImporter):
|
17
|
+
def __init__(self, connection: Connection):
|
18
|
+
self.connection = connection
|
19
|
+
|
20
|
+
def import_role_assignments(self) -> list[RoleAssignment]:
|
21
|
+
"""Import role_assignments information from Openstack
|
22
|
+
|
23
|
+
Raises:
|
24
|
+
Exception: Raises exception if fetching role_assignment failed
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
list[RoleAssignment]: _description_
|
28
|
+
"""
|
29
|
+
|
30
|
+
logger.info(
|
31
|
+
f"Importing role_assignments for {self.connection.auth['auth_url']}"
|
32
|
+
)
|
33
|
+
|
34
|
+
try:
|
35
|
+
osrole_assignments: list[OSRoleAssignment] = list(
|
36
|
+
self.connection.identity.role_assignments()
|
37
|
+
)
|
38
|
+
except Exception as e:
|
39
|
+
raise Exception(
|
40
|
+
f"Can not fetch role_assignments for {self.connection.auth['auth_url']}"
|
41
|
+
) from e
|
42
|
+
|
43
|
+
role_assignments: list[RoleAssignment] = []
|
44
|
+
|
45
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
46
|
+
futures = [
|
47
|
+
executor.submit(self._get_role_assignment_info, role_assignment)
|
48
|
+
for role_assignment in osrole_assignments
|
49
|
+
]
|
50
|
+
for future in concurrent.futures.as_completed(futures):
|
51
|
+
role_assignments.append(future.result())
|
52
|
+
|
53
|
+
logger.info(f"Imported role_assignments for {self.connection.auth['auth_url']}")
|
54
|
+
|
55
|
+
return role_assignments
|
56
|
+
|
57
|
+
def _get_role_assignment_info(
|
58
|
+
self, role_assignment: OSRoleAssignment
|
59
|
+
) -> RoleAssignment:
|
60
|
+
|
61
|
+
user_id = None
|
62
|
+
role_id = None
|
63
|
+
|
64
|
+
try:
|
65
|
+
user_id = role_assignment.user["id"]
|
66
|
+
except Exception as e:
|
67
|
+
logger.warning(f"Can not get user id: {e}")
|
68
|
+
|
69
|
+
try:
|
70
|
+
role_id = role_assignment.role["id"]
|
71
|
+
except Exception as e:
|
72
|
+
logger.warning(f"Can not get role id: {e}")
|
73
|
+
|
74
|
+
user_name = None
|
75
|
+
role_name = None
|
76
|
+
|
77
|
+
try:
|
78
|
+
role_name = self.connection.identity.get_role(
|
79
|
+
role_assignment.role["id"]
|
80
|
+
).name
|
81
|
+
|
82
|
+
except Exception as e:
|
83
|
+
logger.warning(f"Can not get role name: {e}")
|
84
|
+
|
85
|
+
try:
|
86
|
+
user_name = self.connection.identity.get_user(
|
87
|
+
role_assignment.user["id"]
|
88
|
+
).name
|
89
|
+
except Exception as e:
|
90
|
+
logger.warning(f"Can not get user name: {e}")
|
91
|
+
|
92
|
+
role_assignment_ret = RoleAssignment(
|
93
|
+
user_id=user_id,
|
94
|
+
user_name=user_name,
|
95
|
+
role_id=role_id,
|
96
|
+
role_name=role_name,
|
97
|
+
scope=role_assignment.scope,
|
98
|
+
)
|
99
|
+
|
100
|
+
return role_assignment_ret
|
File without changes
|
@@ -0,0 +1,86 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
import concurrent
|
4
|
+
|
5
|
+
from openstack.connection import Connection
|
6
|
+
from openstack.network.v2.router import Router as OSRouter
|
7
|
+
|
8
|
+
from osi_dump.importer.router.router_importer import (
|
9
|
+
RouterImporter,
|
10
|
+
)
|
11
|
+
from osi_dump.model.router import Router
|
12
|
+
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
|
16
|
+
class OpenStackRouterImporter(RouterImporter):
|
17
|
+
def __init__(self, connection: Connection):
|
18
|
+
self.connection = connection
|
19
|
+
|
20
|
+
def import_routers(self) -> list[Router]:
|
21
|
+
"""Import routers information from Openstack
|
22
|
+
|
23
|
+
Raises:
|
24
|
+
Exception: Raises exception if fetching router failed
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
list[Router]: _description_
|
28
|
+
"""
|
29
|
+
|
30
|
+
logger.info(f"Importing routers for {self.connection.auth['auth_url']}")
|
31
|
+
|
32
|
+
try:
|
33
|
+
osrouters: list[OSRouter] = list(self.connection.network.routers())
|
34
|
+
except Exception as e:
|
35
|
+
raise Exception(
|
36
|
+
f"Can not fetch routers for {self.connection.auth['auth_url']}"
|
37
|
+
) from e
|
38
|
+
|
39
|
+
routers: list[Router] = []
|
40
|
+
|
41
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
42
|
+
futures = [
|
43
|
+
executor.submit(self._get_router_info, router) for router in osrouters
|
44
|
+
]
|
45
|
+
for future in concurrent.futures.as_completed(futures):
|
46
|
+
routers.append(future.result())
|
47
|
+
|
48
|
+
logger.info(f"Imported routers for {self.connection.auth['auth_url']}")
|
49
|
+
|
50
|
+
return routers
|
51
|
+
|
52
|
+
def _get_router_info(self, router: OSRouter) -> Router:
|
53
|
+
"""
|
54
|
+
{"network_id": "49760654-71d8-4967-8fdd-5a35d3ff78ef", "external_fixed_ips": [{"subnet_id": |
|
55
|
+
| | "c044a5c0-4b11-4d8d-ae5e-9ff4ce6c1be6", "ip_address": "10.0.2.188"}], "enable_snat": true}
|
56
|
+
"""
|
57
|
+
|
58
|
+
external_net_id = None
|
59
|
+
|
60
|
+
try:
|
61
|
+
external_net_id = router.external_gateway_info["network_id"]
|
62
|
+
except Exception as e:
|
63
|
+
logger.warning(f"Could not get external net id for router: {router.id}")
|
64
|
+
|
65
|
+
external_net_ip = None
|
66
|
+
|
67
|
+
try:
|
68
|
+
external_net_ip = router.external_gateway_info["external_fixed_ips"][0][
|
69
|
+
"ip_address"
|
70
|
+
]
|
71
|
+
|
72
|
+
except Exception as e:
|
73
|
+
logger.warning(f"Could not get external net ip for router {router.id}")
|
74
|
+
|
75
|
+
router_ret = Router(
|
76
|
+
router_id=router.id,
|
77
|
+
name=router.name,
|
78
|
+
external_net_id=external_net_id,
|
79
|
+
external_net_ip=external_net_ip,
|
80
|
+
status=router.status,
|
81
|
+
project_id=router.project_id,
|
82
|
+
created_at=router.created_at,
|
83
|
+
updated_at=router.updated_at,
|
84
|
+
)
|
85
|
+
|
86
|
+
return router_ret
|
osi_dump/model/flavor.py
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, ValidationError
|
4
|
+
|
5
|
+
|
6
|
+
class Flavor(BaseModel):
|
7
|
+
model_config = ConfigDict(strict=True)
|
8
|
+
|
9
|
+
flavor_id: str
|
10
|
+
|
11
|
+
flavor_name: str
|
12
|
+
|
13
|
+
properties: Optional[dict]
|
14
|
+
|
15
|
+
ram: int
|
16
|
+
vcpus: int
|
17
|
+
disk: int
|
18
|
+
swap: Optional[int]
|
19
|
+
|
20
|
+
public: bool
|
osi_dump/model/hypervisor.py
CHANGED
osi_dump/model/image.py
ADDED
@@ -0,0 +1,26 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, ValidationError
|
4
|
+
|
5
|
+
|
6
|
+
class Image(BaseModel):
|
7
|
+
model_config = ConfigDict(strict=True)
|
8
|
+
|
9
|
+
image_id: str
|
10
|
+
|
11
|
+
disk_format: str
|
12
|
+
min_disk: int
|
13
|
+
min_ram: int
|
14
|
+
image_name: Optional[str]
|
15
|
+
owner: str
|
16
|
+
|
17
|
+
properties: Optional[dict]
|
18
|
+
|
19
|
+
protected: bool
|
20
|
+
status: str
|
21
|
+
size: int
|
22
|
+
virtual_size: Optional[int]
|
23
|
+
visibility: str
|
24
|
+
|
25
|
+
created_at: str
|
26
|
+
updated_at: str
|
osi_dump/model/instance.py
CHANGED
@@ -0,0 +1,17 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, ValidationError
|
4
|
+
|
5
|
+
|
6
|
+
class LoadBalancer(BaseModel):
|
7
|
+
model_config = ConfigDict(strict=True)
|
8
|
+
|
9
|
+
id: str
|
10
|
+
|
11
|
+
load_balancer_name: Optional[str]
|
12
|
+
|
13
|
+
status: str
|
14
|
+
|
15
|
+
amphoraes: list[str]
|
16
|
+
|
17
|
+
project_id: Optional[str]
|
@@ -0,0 +1,17 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, ValidationError
|
4
|
+
|
5
|
+
|
6
|
+
class RoleAssignment(BaseModel):
|
7
|
+
model_config = ConfigDict(strict=True)
|
8
|
+
|
9
|
+
user_id: Optional[str]
|
10
|
+
|
11
|
+
user_name: Optional[str]
|
12
|
+
|
13
|
+
role_id: Optional[str]
|
14
|
+
|
15
|
+
role_name: Optional[str]
|
16
|
+
|
17
|
+
scope: Optional[dict]
|
osi_dump/model/router.py
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, ValidationError
|
4
|
+
|
5
|
+
|
6
|
+
class Router(BaseModel):
|
7
|
+
model_config = ConfigDict(strict=True)
|
8
|
+
|
9
|
+
router_id: str
|
10
|
+
|
11
|
+
name: Optional[str]
|
12
|
+
|
13
|
+
external_net_id: Optional[str]
|
14
|
+
external_net_ip: Optional[str]
|
15
|
+
|
16
|
+
status: str
|
17
|
+
project_id: Optional[str]
|
18
|
+
|
19
|
+
created_at: str
|
20
|
+
updated_at: str
|
osi_dump/util/__init__.py
CHANGED
@@ -0,0 +1,22 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
|
3
|
+
from pandas import DataFrame
|
4
|
+
|
5
|
+
|
6
|
+
def expand_list_column(df: DataFrame, column: str) -> DataFrame:
|
7
|
+
# Find the maximum length of the list in the column
|
8
|
+
max_len = df[column].apply(len).max()
|
9
|
+
|
10
|
+
# Create a DataFrame from the list column
|
11
|
+
expanded_df = pd.DataFrame(
|
12
|
+
df[column].apply(lambda x: x + [None] * (max_len - len(x))).tolist(),
|
13
|
+
index=df.index,
|
14
|
+
)
|
15
|
+
|
16
|
+
# Rename the columns
|
17
|
+
expanded_df.columns = [f"{column}_{i+1}" for i in range(expanded_df.shape[1])]
|
18
|
+
|
19
|
+
# Drop the original column and join the expanded columns
|
20
|
+
df = df.drop(column, axis=1).join(expanded_df)
|
21
|
+
|
22
|
+
return df
|