osism 0.20250331.0__py3-none-any.whl → 0.20250425.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
osism/api.py CHANGED
@@ -7,11 +7,10 @@ from uuid import UUID
7
7
 
8
8
  from fastapi import FastAPI, Header, Request, Response
9
9
  from pydantic import BaseModel
10
- import pynetbox
11
10
  from starlette.middleware.cors import CORSMiddleware
12
11
 
13
12
  from osism.tasks import reconciler
14
- from osism import settings, utils
13
+ from osism import utils
15
14
  from osism.services.listener import BaremetalEvents
16
15
 
17
16
 
@@ -78,20 +77,6 @@ logger = logging.getLogger("api")
78
77
  baremetal_events = BaremetalEvents()
79
78
 
80
79
 
81
- @app.on_event("startup")
82
- async def startup_event():
83
- if settings.NETBOX_URL and settings.NETBOX_TOKEN:
84
- utils.nb = pynetbox.api(settings.NETBOX_URL, token=settings.NETBOX_TOKEN)
85
-
86
- if settings.IGNORE_SSL_ERRORS:
87
- import requests
88
-
89
- requests.packages.urllib3.disable_warnings()
90
- session = requests.Session()
91
- session.verify = False
92
- utils.nb.http_session = session
93
-
94
-
95
80
  @app.get("/")
96
81
  async def root():
97
82
  return {"message": "Hello World"}
osism/commands/manage.py CHANGED
@@ -12,7 +12,7 @@ import requests
12
12
  from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
13
13
  from osism.tasks import openstack, handle_task
14
14
 
15
- SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.29", "1.30", "1.31"]
15
+ SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.30", "1.31", "1.32"]
16
16
 
17
17
 
18
18
  class ImageClusterapi(Command):
@@ -51,7 +51,7 @@ class ImageClusterapi(Command):
51
51
  parser.add_argument(
52
52
  "--filter",
53
53
  type=str,
54
- help="Filter the version to be managed (e.g. 1.31)",
54
+ help="Filter the version to be managed (e.g. 1.32)",
55
55
  default=None,
56
56
  )
57
57
  return parser
@@ -188,7 +188,9 @@ class ImageOctavia(Command):
188
188
  "--deactivate",
189
189
  ]
190
190
 
191
- task_signature = openstack.image_manager.si(*arguments, configs=result)
191
+ task_signature = openstack.image_manager.si(
192
+ *arguments, configs=result, ignore_env=True
193
+ )
192
194
  task = task_signature.apply_async()
193
195
  if wait:
194
196
  logger.info(
osism/commands/netbox.py CHANGED
@@ -3,29 +3,33 @@
3
3
  from cliff.command import Command
4
4
  from loguru import logger
5
5
 
6
- from osism.tasks import conductor, netbox, reconciler, openstack, handle_task
6
+ from osism.tasks import conductor, netbox, reconciler, handle_task
7
7
 
8
8
 
9
9
  class Ironic(Command):
10
10
  def get_parser(self, prog_name):
11
11
  parser = super(Ironic, self).get_parser(prog_name)
12
+ parser.add_argument(
13
+ "--no-wait",
14
+ help="Do not wait until the sync has been completed",
15
+ action="store_true",
16
+ )
17
+ parser.add_argument(
18
+ "--force-update",
19
+ help="Force update of baremetal nodes (Used to update non-comparable items like passwords)",
20
+ action="store_true",
21
+ )
12
22
  return parser
13
23
 
14
24
  def take_action(self, parsed_args):
15
- # Get Ironic parameters from the conductor
16
- task = conductor.get_ironic_parameters.delay()
17
- task.wait(timeout=None, interval=0.5)
18
- ironic_parameters = task.get()
25
+ wait = not parsed_args.no_wait
19
26
 
20
- # Add all unregistered systems from the Netbox in Ironic
21
- netbox.get_devices_not_yet_registered_in_ironic.apply_async(
22
- (), link=openstack.baremetal_create_nodes.s(ironic_parameters)
27
+ task = conductor.sync_netbox_with_ironic.delay(
28
+ force_update=parsed_args.force_update
23
29
  )
24
-
25
- # Synchronize the current status in Ironic with the Netbox
26
- # openstack.baremetal_node_list.apply_async((), link=netbox.synchronize_device_state.s())
27
-
28
- # Remove systems from Ironic that are no longer present in the Netbox
30
+ if wait:
31
+ logger.info(f"Task {task.task_id} is running. Wait. No more output.")
32
+ task.wait(timeout=None, interval=0.5)
29
33
 
30
34
 
31
35
  class Sync(Command):
@@ -33,7 +37,6 @@ class Sync(Command):
33
37
  parser = super(Sync, self).get_parser(prog_name)
34
38
  parser.add_argument(
35
39
  "--no-wait",
36
- default=False,
37
40
  help="Do not wait until the sync has been completed",
38
41
  action="store_true",
39
42
  )
osism/commands/server.py CHANGED
@@ -93,36 +93,131 @@ class ServerMigrate(Command):
93
93
  class ServerList(Command):
94
94
  def get_parser(self, prog_name):
95
95
  parser = super(ServerList, self).get_parser(prog_name)
96
+ parser.add_argument(
97
+ "--domain",
98
+ help="List all servers of a specific domain",
99
+ type=str,
100
+ default=None,
101
+ )
102
+ parser.add_argument(
103
+ "--project",
104
+ help="List all servers of a specific project",
105
+ type=str,
106
+ default=None,
107
+ )
108
+ parser.add_argument(
109
+ "--project-domain", help="Domain of the project", type=str, default=None
110
+ )
96
111
  return parser
97
112
 
98
113
  def take_action(self, parsed_args):
99
114
  conn = get_cloud_connection()
115
+ domain = parsed_args.domain
116
+ project = parsed_args.project
117
+ project_domain = parsed_args.project_domain
100
118
 
101
119
  result = []
102
- for server in conn.compute.servers(all_projects=True, status="build"):
103
- duration = datetime.now(timezone.utc) - dateutil.parser.parse(
104
- server.created_at
120
+ if domain:
121
+ _domain = conn.identity.find_domain(domain)
122
+ if not _domain:
123
+ logger.error(f"Domain {domain} not found")
124
+ return
125
+ projects = list(conn.identity.projects(domain_id=_domain.id))
126
+
127
+ for project in projects:
128
+ query = {"project_id": project.id}
129
+ for server in conn.compute.servers(all_projects=True, **query):
130
+ result.append(
131
+ [
132
+ project.name,
133
+ project.id,
134
+ server.id,
135
+ server.name,
136
+ server.flavor["original_name"],
137
+ server.status,
138
+ ]
139
+ )
140
+
141
+ print(
142
+ tabulate(
143
+ result,
144
+ headers=["Project", "Project ID", "ID", "Name", "Flavor", "Status"],
145
+ tablefmt="psql",
146
+ )
105
147
  )
106
- if duration.total_seconds() > 7200:
107
- logger.info(
108
- f"Server {server.id} hangs in BUILD status for more than 2 hours"
148
+
149
+ elif project:
150
+ if project_domain:
151
+ _project_domain = conn.identity.find_domain(project_domain)
152
+ if not _project_domain:
153
+ logger.error(f"Project domain {project_domain} not found")
154
+ return
155
+ query = {"domain_id": _project_domain.id}
156
+ _project = conn.identity.find_project(project, **query)
157
+ else:
158
+ _project = conn.identity.find_project(project)
159
+ if not _project:
160
+ logger.error(f"Project {project} not found")
161
+ return
162
+ query = {"project_id": _project.id}
163
+
164
+ for server in conn.compute.servers(all_projects=True, **query):
165
+ result.append(
166
+ [
167
+ server.id,
168
+ server.name,
169
+ server.flavor["original_name"],
170
+ server.status,
171
+ ]
109
172
  )
110
- result.append([server.id, server.name, server.status])
111
173
 
112
- for server in conn.compute.servers(all_projects=True, status="error"):
113
- duration = datetime.now(timezone.utc) - dateutil.parser.parse(
114
- server.created_at
115
- )
116
- if duration.total_seconds() > 7200:
117
- logger.info(
118
- f"Server {server.id} hangs in ERRORstatus for more than 2 hours"
174
+ print(
175
+ tabulate(
176
+ result,
177
+ headers=["ID", "Name", "Flavor", "Status"],
178
+ tablefmt="psql",
119
179
  )
120
- result.append([server.id, server.name, server.status])
180
+ )
121
181
 
122
- print(
123
- tabulate(
124
- result,
125
- headers=["ID", "Name", "Status"],
126
- tablefmt="psql",
182
+ else:
183
+ for server in conn.compute.servers(all_projects=True, status="build"):
184
+ duration = datetime.now(timezone.utc) - dateutil.parser.parse(
185
+ server.created_at
186
+ )
187
+ if duration.total_seconds() > 7200:
188
+ logger.info(
189
+ f"Server {server.id} hangs in BUILD status for more than 2 hours"
190
+ )
191
+ result.append(
192
+ [
193
+ server.id,
194
+ server.name,
195
+ server.flavor["original_name"],
196
+ server.status,
197
+ ]
198
+ )
199
+
200
+ for server in conn.compute.servers(all_projects=True, status="error"):
201
+ duration = datetime.now(timezone.utc) - dateutil.parser.parse(
202
+ server.created_at
203
+ )
204
+ if duration.total_seconds() > 7200:
205
+ logger.info(
206
+ f"Server {server.id} hangs in ERRORstatus for more than 2 hours"
207
+ )
208
+ result.append(
209
+ [
210
+ server.id,
211
+ server.name,
212
+ server.flavor["original_name"],
213
+ server.status,
214
+ ]
215
+ )
216
+
217
+ print(
218
+ tabulate(
219
+ result,
220
+ headers=["ID", "Name", "Flavor", "Status"],
221
+ tablefmt="psql",
222
+ )
127
223
  )
128
- )
osism/commands/volume.py CHANGED
@@ -14,63 +14,160 @@ from osism.commands import get_cloud_connection
14
14
  class VolumeList(Command):
15
15
  def get_parser(self, prog_name):
16
16
  parser = super(VolumeList, self).get_parser(prog_name)
17
+ parser.add_argument(
18
+ "--domain",
19
+ help="List all volumes of a specific domain",
20
+ type=str,
21
+ default=None,
22
+ )
23
+ parser.add_argument(
24
+ "--project",
25
+ help="List all volumes of a specific project",
26
+ type=str,
27
+ default=None,
28
+ )
29
+ parser.add_argument(
30
+ "--project-domain", help="Domain of the project", type=str, default=None
31
+ )
17
32
  return parser
18
33
 
19
34
  def take_action(self, parsed_args):
20
35
  conn = get_cloud_connection()
36
+ domain = parsed_args.domain
37
+ project = parsed_args.project
38
+ project_domain = parsed_args.project_domain
21
39
 
22
40
  result = []
23
- for volume in conn.block_storage.volumes(all_projects=True, status="detaching"):
24
- created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
25
- duration = datetime.now(timezone.utc) - created_at
26
- if duration.total_seconds() > 7200:
27
- logger.info(
28
- f"Volume {volume.id} hangs in DETACHING status for more than 2 hours"
29
- )
30
- result.append([volume.id, volume.name, volume.status])
31
-
32
- for volume in conn.block_storage.volumes(all_projects=True, status="creating"):
33
- created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
34
- duration = datetime.now(timezone.utc) - created_at
35
- if duration.total_seconds() > 7200:
36
- logger.info(
37
- f"Volume {volume.id} hangs in CREATING status for more than 2 hours"
38
- )
39
- result.append([volume.id, volume.name, volume.status])
40
-
41
- for volume in conn.block_storage.volumes(
42
- all_projects=True, status="error_deleting"
43
- ):
44
- created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
45
- duration = datetime.now(timezone.utc) - created_at
46
- if duration.total_seconds() > 7200:
47
- logger.info(
48
- f"Volume {volume.id} hangs in ERROR_DELETING status for more than 2 hours"
41
+ if domain:
42
+ _domain = conn.identity.find_domain(domain)
43
+ if not _domain:
44
+ logger.error(f"Domain {domain} not found")
45
+ return
46
+ projects = list(conn.identity.projects(domain_id=_domain.id))
47
+
48
+ for project in projects:
49
+ query = {"project_id": project.id}
50
+ for volume in conn.block_storage.volumes(all_projects=True, **query):
51
+ result.append(
52
+ [
53
+ project.name,
54
+ project.id,
55
+ volume.id,
56
+ volume.name,
57
+ volume.volume_type,
58
+ volume.status,
59
+ ]
60
+ )
61
+
62
+ print(
63
+ tabulate(
64
+ result,
65
+ headers=["Project", "Project ID", "ID", "Name", "Type", "Status"],
66
+ tablefmt="psql",
49
67
  )
50
- result.append([volume.id, volume.name, volume.status])
51
-
52
- for volume in conn.block_storage.volumes(all_projects=True, status="deleting"):
53
- created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
54
- duration = datetime.now(timezone.utc) - created_at
55
- if duration.total_seconds() > 7200:
56
- logger.info(
57
- f"Volume {volume.id} hangs in DELETING status for more than 2 hours"
68
+ )
69
+
70
+ elif project:
71
+ if project_domain:
72
+ _project_domain = conn.identity.find_domain(project_domain)
73
+ if not _project_domain:
74
+ logger.error(f"Project domain {project_domain} not found")
75
+ return
76
+ query = {"domain_id": _project_domain.id}
77
+ _project = conn.identity.find_project(project, **query)
78
+ else:
79
+ _project = conn.identity.find_project(project)
80
+ if not _project:
81
+ logger.error(f"Project {project} not found")
82
+ return
83
+ query = {"project_id": _project.id}
84
+
85
+ for volume in conn.block_storage.volumes(all_projects=True, **query):
86
+ result.append(
87
+ [
88
+ volume.id,
89
+ volume.name,
90
+ volume.volume_type,
91
+ volume.status,
92
+ ]
58
93
  )
59
- result.append([volume.id, volume.name, volume.status])
60
-
61
- for volume in conn.block_storage.volumes(all_projects=True, status="error"):
62
- created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
63
- duration = datetime.now(timezone.utc) - created_at
64
- if duration.total_seconds() > 7200:
65
- logger.info(
66
- f"Volume {volume.id} hangs in ERROR status for more than 2 hours"
94
+
95
+ print(
96
+ tabulate(
97
+ result,
98
+ headers=["ID", "Name", "Type", "Status"],
99
+ tablefmt="psql",
67
100
  )
68
- result.append([volume.id, volume.name, volume.status])
101
+ )
102
+
103
+ else:
104
+ for volume in conn.block_storage.volumes(
105
+ all_projects=True, status="detaching"
106
+ ):
107
+ created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
108
+ duration = datetime.now(timezone.utc) - created_at
109
+ if duration.total_seconds() > 7200:
110
+ logger.info(
111
+ f"Volume {volume.id} hangs in DETACHING status for more than 2 hours"
112
+ )
113
+ result.append(
114
+ [volume.id, volume.name, volume.volume_type, volume.status]
115
+ )
116
+
117
+ for volume in conn.block_storage.volumes(
118
+ all_projects=True, status="creating"
119
+ ):
120
+ created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
121
+ duration = datetime.now(timezone.utc) - created_at
122
+ if duration.total_seconds() > 7200:
123
+ logger.info(
124
+ f"Volume {volume.id} hangs in CREATING status for more than 2 hours"
125
+ )
126
+ result.append(
127
+ [volume.id, volume.name, volume.volume_type, volume.status]
128
+ )
129
+
130
+ for volume in conn.block_storage.volumes(
131
+ all_projects=True, status="error_deleting"
132
+ ):
133
+ created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
134
+ duration = datetime.now(timezone.utc) - created_at
135
+ if duration.total_seconds() > 7200:
136
+ logger.info(
137
+ f"Volume {volume.id} hangs in ERROR_DELETING status for more than 2 hours"
138
+ )
139
+ result.append(
140
+ [volume.id, volume.name, volume.volume_type, volume.status]
141
+ )
69
142
 
70
- print(
71
- tabulate(
72
- result,
73
- headers=["ID", "Name", "Status"],
74
- tablefmt="psql",
143
+ for volume in conn.block_storage.volumes(
144
+ all_projects=True, status="deleting"
145
+ ):
146
+ created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
147
+ duration = datetime.now(timezone.utc) - created_at
148
+ if duration.total_seconds() > 7200:
149
+ logger.info(
150
+ f"Volume {volume.id} hangs in DELETING status for more than 2 hours"
151
+ )
152
+ result.append(
153
+ [volume.id, volume.name, volume.volume_type, volume.status]
154
+ )
155
+
156
+ for volume in conn.block_storage.volumes(all_projects=True, status="error"):
157
+ created_at = pytz.utc.localize(dateutil.parser.parse(volume.created_at))
158
+ duration = datetime.now(timezone.utc) - created_at
159
+ if duration.total_seconds() > 7200:
160
+ logger.info(
161
+ f"Volume {volume.id} hangs in ERROR status for more than 2 hours"
162
+ )
163
+ result.append(
164
+ [volume.id, volume.name, volume.volume_type, volume.status]
165
+ )
166
+
167
+ print(
168
+ tabulate(
169
+ result,
170
+ headers=["ID", "Name", "Type", "Status"],
171
+ tablefmt="psql",
172
+ )
75
173
  )
76
- )
osism/core/enums.py CHANGED
@@ -188,7 +188,7 @@ MAP_ROLE2ROLE = {
188
188
  [
189
189
  "common",
190
190
  [
191
- ["loadbalancer", ["opensearch", "mariadb-ng"]],
191
+ ["loadbalancer", ["letsencrypt", "opensearch", "mariadb-ng"]],
192
192
  ["openvswitch", ["ovn"]],
193
193
  "memcached",
194
194
  "redis",
@@ -199,6 +199,19 @@ MAP_ROLE2ROLE = {
199
199
  "collection-kubernetes": [
200
200
  ["kubernetes", ["kubeconfig", ["copy-kubeconfig"]]],
201
201
  ],
202
+ "collection-openstack-core": [
203
+ "horizon",
204
+ [
205
+ "keystone",
206
+ [
207
+ "glance",
208
+ "cinder",
209
+ ["neutron", ["octavia"]],
210
+ "designate",
211
+ ["placement", ["nova"]],
212
+ ],
213
+ ],
214
+ ],
202
215
  "collection-openstack": [
203
216
  "horizon",
204
217
  [