osism 0.20250804.0__py3-none-any.whl → 0.20250824.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/api.py +154 -2
- osism/commands/baremetal.py +168 -0
- osism/commands/netbox.py +2 -2
- osism/services/event_bridge.py +304 -0
- osism/services/listener.py +130 -19
- osism/services/websocket_manager.py +271 -0
- osism/settings.py +1 -1
- osism/tasks/conductor/ironic.py +22 -17
- osism/tasks/conductor/netbox.py +58 -1
- osism/tasks/conductor/sonic/config_generator.py +341 -26
- osism/tasks/conductor/sonic/connections.py +123 -0
- osism/tasks/conductor/sonic/interface.py +3 -1
- osism/tasks/openstack.py +35 -15
- osism/utils/__init__.py +2 -2
- {osism-0.20250804.0.dist-info → osism-0.20250824.0.dist-info}/METADATA +7 -6
- {osism-0.20250804.0.dist-info → osism-0.20250824.0.dist-info}/RECORD +22 -20
- {osism-0.20250804.0.dist-info → osism-0.20250824.0.dist-info}/entry_points.txt +4 -0
- osism-0.20250824.0.dist-info/licenses/AUTHORS +1 -0
- osism-0.20250824.0.dist-info/pbr.json +1 -0
- osism-0.20250804.0.dist-info/licenses/AUTHORS +0 -1
- osism-0.20250804.0.dist-info/pbr.json +0 -1
- {osism-0.20250804.0.dist-info → osism-0.20250824.0.dist-info}/WHEEL +0 -0
- {osism-0.20250804.0.dist-info → osism-0.20250824.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250804.0.dist-info → osism-0.20250824.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,271 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import json
|
5
|
+
import logging
|
6
|
+
from datetime import datetime
|
7
|
+
from typing import Dict, Any, Optional, List
|
8
|
+
from uuid import uuid4
|
9
|
+
|
10
|
+
from fastapi import WebSocket, WebSocketDisconnect
|
11
|
+
|
12
|
+
logger = logging.getLogger("osism.websocket")
|
13
|
+
|
14
|
+
|
15
|
+
class EventMessage:
|
16
|
+
"""Represents an event message for WebSocket streaming."""
|
17
|
+
|
18
|
+
def __init__(
|
19
|
+
self,
|
20
|
+
event_type: str,
|
21
|
+
source: str,
|
22
|
+
data: Dict[str, Any],
|
23
|
+
node_name: Optional[str] = None,
|
24
|
+
):
|
25
|
+
self.id = str(uuid4())
|
26
|
+
self.timestamp = datetime.utcnow().isoformat() + "Z"
|
27
|
+
self.event_type = event_type
|
28
|
+
self.source = source
|
29
|
+
self.node_name = node_name
|
30
|
+
self.data = data
|
31
|
+
|
32
|
+
def to_dict(self) -> Dict[str, Any]:
|
33
|
+
"""Convert event message to dictionary for JSON serialization."""
|
34
|
+
return {
|
35
|
+
"id": self.id,
|
36
|
+
"timestamp": self.timestamp,
|
37
|
+
"event_type": self.event_type,
|
38
|
+
"source": self.source,
|
39
|
+
"node_name": self.node_name,
|
40
|
+
"data": self.data,
|
41
|
+
}
|
42
|
+
|
43
|
+
def to_json(self) -> str:
|
44
|
+
"""Convert event message to JSON string."""
|
45
|
+
return json.dumps(self.to_dict())
|
46
|
+
|
47
|
+
|
48
|
+
class WebSocketConnection:
|
49
|
+
"""Represents a WebSocket connection with filtering options."""
|
50
|
+
|
51
|
+
def __init__(self, websocket: WebSocket):
|
52
|
+
self.websocket = websocket
|
53
|
+
self.event_filters: List[str] = [] # List of event types to filter
|
54
|
+
self.node_filters: List[str] = [] # List of node names to filter
|
55
|
+
self.service_filters: List[str] = [] # List of service types to filter
|
56
|
+
|
57
|
+
def matches_filters(self, event: "EventMessage") -> bool:
|
58
|
+
"""Check if event matches this connection's filters."""
|
59
|
+
# If no filters are set, pass all events
|
60
|
+
if (
|
61
|
+
not self.event_filters
|
62
|
+
and not self.node_filters
|
63
|
+
and not self.service_filters
|
64
|
+
):
|
65
|
+
return True
|
66
|
+
|
67
|
+
# Check event type filters
|
68
|
+
event_match = not self.event_filters or event.event_type in self.event_filters
|
69
|
+
|
70
|
+
# Check node filters
|
71
|
+
node_match = not self.node_filters or (
|
72
|
+
event.node_name is not None and event.node_name in self.node_filters
|
73
|
+
)
|
74
|
+
|
75
|
+
# Check service filters
|
76
|
+
service_type = event.event_type.split(".")[0] if event.event_type else "unknown"
|
77
|
+
service_match = not self.service_filters or service_type in self.service_filters
|
78
|
+
|
79
|
+
return event_match and node_match and service_match
|
80
|
+
|
81
|
+
|
82
|
+
class WebSocketManager:
|
83
|
+
"""Manages WebSocket connections and event broadcasting."""
|
84
|
+
|
85
|
+
def __init__(self):
|
86
|
+
# Store active WebSocket connections with filtering support
|
87
|
+
self.connections: Dict[WebSocket, WebSocketConnection] = {}
|
88
|
+
# Event queue for broadcasting
|
89
|
+
self.event_queue: asyncio.Queue = asyncio.Queue()
|
90
|
+
# Background task for event broadcasting
|
91
|
+
self._broadcaster_task: Optional[asyncio.Task] = None
|
92
|
+
# Lock for thread-safe operations
|
93
|
+
self._lock = asyncio.Lock()
|
94
|
+
|
95
|
+
async def connect(self, websocket: WebSocket) -> None:
|
96
|
+
"""Accept a new WebSocket connection."""
|
97
|
+
await websocket.accept()
|
98
|
+
async with self._lock:
|
99
|
+
self.connections[websocket] = WebSocketConnection(websocket)
|
100
|
+
logger.info(f"WebSocket connected. Total connections: {len(self.connections)}")
|
101
|
+
|
102
|
+
# Start broadcaster if this is the first connection
|
103
|
+
if not self._broadcaster_task or self._broadcaster_task.done():
|
104
|
+
self._broadcaster_task = asyncio.create_task(self._broadcast_events())
|
105
|
+
|
106
|
+
async def disconnect(self, websocket: WebSocket) -> None:
|
107
|
+
"""Remove a WebSocket connection."""
|
108
|
+
async with self._lock:
|
109
|
+
self.connections.pop(websocket, None)
|
110
|
+
logger.info(
|
111
|
+
f"WebSocket disconnected. Total connections: {len(self.connections)}"
|
112
|
+
)
|
113
|
+
|
114
|
+
async def update_filters(
|
115
|
+
self,
|
116
|
+
websocket: WebSocket,
|
117
|
+
event_filters: Optional[List[str]] = None,
|
118
|
+
node_filters: Optional[List[str]] = None,
|
119
|
+
service_filters: Optional[List[str]] = None,
|
120
|
+
) -> None:
|
121
|
+
"""Update filters for a specific WebSocket connection."""
|
122
|
+
async with self._lock:
|
123
|
+
if websocket in self.connections:
|
124
|
+
connection = self.connections[websocket]
|
125
|
+
if event_filters is not None:
|
126
|
+
connection.event_filters = event_filters
|
127
|
+
if node_filters is not None:
|
128
|
+
connection.node_filters = node_filters
|
129
|
+
if service_filters is not None:
|
130
|
+
connection.service_filters = service_filters
|
131
|
+
logger.debug(
|
132
|
+
f"Updated filters for WebSocket: events={connection.event_filters}, "
|
133
|
+
f"nodes={connection.node_filters}, services={connection.service_filters}"
|
134
|
+
)
|
135
|
+
|
136
|
+
async def add_event(self, event: EventMessage) -> None:
|
137
|
+
"""Add an event to the broadcast queue."""
|
138
|
+
await self.event_queue.put(event)
|
139
|
+
|
140
|
+
async def broadcast_event_from_notification(
|
141
|
+
self, event_type: str, payload: Dict[str, Any]
|
142
|
+
) -> None:
|
143
|
+
"""Create and broadcast an event from RabbitMQ notification."""
|
144
|
+
try:
|
145
|
+
logger.info(f"Processing event for WebSocket broadcast: {event_type}")
|
146
|
+
logger.debug(f"Active WebSocket connections: {len(self.connections)}")
|
147
|
+
|
148
|
+
# Extract relevant identifiers from different service types
|
149
|
+
node_name = None
|
150
|
+
resource_id = None
|
151
|
+
service_type = event_type.split(".")[0] if event_type else "unknown"
|
152
|
+
|
153
|
+
# Extract identifiers based on service type
|
154
|
+
if service_type == "baremetal" and "ironic_object.data" in payload:
|
155
|
+
ironic_data = payload["ironic_object.data"]
|
156
|
+
node_name = ironic_data.get("name")
|
157
|
+
resource_id = ironic_data.get("uuid")
|
158
|
+
elif service_type in ["compute", "nova"] and "nova_object.data" in payload:
|
159
|
+
nova_data = payload["nova_object.data"]
|
160
|
+
node_name = nova_data.get("host") or nova_data.get("name")
|
161
|
+
resource_id = nova_data.get("uuid")
|
162
|
+
elif service_type in ["network", "neutron"]:
|
163
|
+
# Neutron events may have different payload structures
|
164
|
+
if "neutron_object.data" in payload:
|
165
|
+
neutron_data = payload["neutron_object.data"]
|
166
|
+
resource_id = neutron_data.get("id") or neutron_data.get("uuid")
|
167
|
+
node_name = neutron_data.get("name") or neutron_data.get(
|
168
|
+
"device_id"
|
169
|
+
)
|
170
|
+
elif service_type == "volume" and "cinder_object.data" in payload:
|
171
|
+
cinder_data = payload["cinder_object.data"]
|
172
|
+
resource_id = cinder_data.get("id") or cinder_data.get("uuid")
|
173
|
+
node_name = cinder_data.get("name") or cinder_data.get("display_name")
|
174
|
+
elif service_type == "image" and "glance_object.data" in payload:
|
175
|
+
glance_data = payload["glance_object.data"]
|
176
|
+
resource_id = glance_data.get("id") or glance_data.get("uuid")
|
177
|
+
node_name = glance_data.get("name")
|
178
|
+
elif service_type == "identity" and "keystone_object.data" in payload:
|
179
|
+
keystone_data = payload["keystone_object.data"]
|
180
|
+
resource_id = keystone_data.get("id") or keystone_data.get("uuid")
|
181
|
+
node_name = keystone_data.get("name")
|
182
|
+
|
183
|
+
# Create event message with enhanced metadata
|
184
|
+
event_data = payload.copy()
|
185
|
+
event_data["service_type"] = service_type
|
186
|
+
event_data["resource_id"] = resource_id
|
187
|
+
|
188
|
+
event = EventMessage(
|
189
|
+
event_type=event_type,
|
190
|
+
source="openstack",
|
191
|
+
data=event_data,
|
192
|
+
node_name=node_name,
|
193
|
+
)
|
194
|
+
|
195
|
+
await self.add_event(event)
|
196
|
+
logger.info(
|
197
|
+
f"Added {service_type} event to WebSocket queue: {event_type} for resource {node_name or resource_id}"
|
198
|
+
)
|
199
|
+
logger.debug(f"Event queue size: {self.event_queue.qsize()}")
|
200
|
+
|
201
|
+
except Exception as e:
|
202
|
+
logger.error(f"Error creating event from notification: {e}")
|
203
|
+
|
204
|
+
async def _broadcast_events(self) -> None:
|
205
|
+
"""Background task to broadcast events to all connected clients."""
|
206
|
+
logger.info("Starting WebSocket event broadcaster")
|
207
|
+
|
208
|
+
while True:
|
209
|
+
try:
|
210
|
+
# Wait for an event
|
211
|
+
event = await self.event_queue.get()
|
212
|
+
|
213
|
+
if not self.connections:
|
214
|
+
# No connections, skip broadcasting
|
215
|
+
continue
|
216
|
+
|
217
|
+
# Broadcast to filtered connections
|
218
|
+
message = event.to_json()
|
219
|
+
disconnected_connections = set()
|
220
|
+
sent_count = 0
|
221
|
+
|
222
|
+
async with self._lock:
|
223
|
+
connections_copy = dict(self.connections)
|
224
|
+
|
225
|
+
for websocket, connection in connections_copy.items():
|
226
|
+
try:
|
227
|
+
# Check if event matches connection filters
|
228
|
+
if connection.matches_filters(event):
|
229
|
+
await websocket.send_text(message)
|
230
|
+
sent_count += 1
|
231
|
+
except WebSocketDisconnect:
|
232
|
+
disconnected_connections.add(websocket)
|
233
|
+
except Exception as e:
|
234
|
+
logger.error(f"Error sending message to WebSocket: {e}")
|
235
|
+
disconnected_connections.add(websocket)
|
236
|
+
|
237
|
+
# Remove disconnected connections
|
238
|
+
if disconnected_connections:
|
239
|
+
async with self._lock:
|
240
|
+
for websocket in disconnected_connections:
|
241
|
+
self.connections.pop(websocket, None)
|
242
|
+
logger.info(
|
243
|
+
f"Removed {len(disconnected_connections)} disconnected WebSocket(s). "
|
244
|
+
f"Active connections: {len(self.connections)}"
|
245
|
+
)
|
246
|
+
|
247
|
+
logger.info(
|
248
|
+
f"Broadcasted event {event.event_type} to {sent_count}/{len(self.connections)} connection(s)"
|
249
|
+
)
|
250
|
+
|
251
|
+
except asyncio.CancelledError:
|
252
|
+
logger.info("WebSocket broadcaster task cancelled")
|
253
|
+
break
|
254
|
+
except Exception as e:
|
255
|
+
logger.error(f"Error in WebSocket broadcaster: {e}")
|
256
|
+
# Continue broadcasting even if there's an error
|
257
|
+
|
258
|
+
async def send_heartbeat(self) -> None:
|
259
|
+
"""Send heartbeat to all connected clients."""
|
260
|
+
if not self.connections:
|
261
|
+
return
|
262
|
+
|
263
|
+
heartbeat_event = EventMessage(
|
264
|
+
event_type="heartbeat", source="osism", data={"message": "ping"}
|
265
|
+
)
|
266
|
+
|
267
|
+
await self.add_event(heartbeat_event)
|
268
|
+
|
269
|
+
|
270
|
+
# Global WebSocket manager instance
|
271
|
+
websocket_manager = WebSocketManager()
|
osism/settings.py
CHANGED
@@ -24,7 +24,7 @@ REDIS_DB: int = int(os.getenv("REDIS_DB", "0"))
|
|
24
24
|
|
25
25
|
|
26
26
|
NETBOX_URL = os.getenv("NETBOX_API", os.getenv("NETBOX_URL"))
|
27
|
-
NETBOX_TOKEN = os.getenv("NETBOX_TOKEN"
|
27
|
+
NETBOX_TOKEN = str(os.getenv("NETBOX_TOKEN") or read_secret("NETBOX_TOKEN") or "")
|
28
28
|
IGNORE_SSL_ERRORS = os.getenv("IGNORE_SSL_ERRORS", "True") == "True"
|
29
29
|
|
30
30
|
# 43200 seconds = 12 hours
|
osism/tasks/conductor/ironic.py
CHANGED
@@ -160,27 +160,36 @@ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=
|
|
160
160
|
|
161
161
|
# Filter nodes by node_name if specified
|
162
162
|
if node_name:
|
163
|
-
nodes = [node for node in nodes if node["
|
163
|
+
nodes = [node for node in nodes if node["name"] == node_name]
|
164
164
|
|
165
165
|
for node in nodes:
|
166
166
|
osism_utils.push_task_output(
|
167
|
-
request_id, f"Looking for {node['
|
167
|
+
request_id, f"Looking for {node['name']} in NetBox\n"
|
168
168
|
)
|
169
|
-
if node["
|
169
|
+
if node["name"] not in device_names:
|
170
170
|
if (
|
171
|
-
not node["
|
172
|
-
and node["
|
173
|
-
|
171
|
+
not node["instance_uuid"]
|
172
|
+
and node["provision_state"]
|
173
|
+
in ["enroll", "manageable", "available", "clean failed"]
|
174
|
+
and node["power_state"] in ["power off", None]
|
174
175
|
):
|
175
176
|
osism_utils.push_task_output(
|
176
177
|
request_id,
|
177
|
-
f"Cleaning up baremetal node not found in NetBox: {node['
|
178
|
+
f"Cleaning up baremetal node not found in NetBox: {node['name']}\n",
|
178
179
|
)
|
180
|
+
if node["provision_state"] == "clean failed":
|
181
|
+
# NOTE: Move node to manageable to allow deletion
|
182
|
+
node = openstack.baremetal_node_set_provision_state(
|
183
|
+
node["uuid"], "manage"
|
184
|
+
)
|
185
|
+
node = openstack.baremetal_node_wait_for_nodes_provision_state(
|
186
|
+
node["uuid"], "manageable"
|
187
|
+
)
|
179
188
|
for port in openstack.baremetal_port_list(
|
180
|
-
details=False, attributes=dict(node_uuid=node["
|
189
|
+
details=False, attributes=dict(node_uuid=node["uuid"])
|
181
190
|
):
|
182
191
|
openstack.baremetal_port_delete(port.id)
|
183
|
-
openstack.baremetal_node_delete(node["
|
192
|
+
openstack.baremetal_node_delete(node["uuid"])
|
184
193
|
else:
|
185
194
|
osism_utils.push_task_output(
|
186
195
|
f"Cannot remove baremetal node because it is still provisioned or running: {node}"
|
@@ -218,13 +227,9 @@ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=
|
|
218
227
|
node = openstack.baremetal_node_create(device.name, node_attributes)
|
219
228
|
else:
|
220
229
|
# NOTE: The listener service only reacts to changes in the baremetal node. Explicitly sync provision and power state in case updates were missed by the listener.
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
):
|
225
|
-
netbox.set_provision_state(device.name, node["provision_state"])
|
226
|
-
if device.custom_fields["power_state"] != node["power_state"]:
|
227
|
-
netbox.set_power_state(device.name, node["power_state"])
|
230
|
+
# This sync is done unconditionally, because we do not know the state of secondary netboxes at this point
|
231
|
+
netbox.set_provision_state(device.name, node["provision_state"])
|
232
|
+
netbox.set_power_state(device.name, node["power_state"])
|
228
233
|
# NOTE: Check whether the baremetal node needs to be updated
|
229
234
|
node_updates = {}
|
230
235
|
deep_compare(node_attributes, node, node_updates)
|
@@ -280,7 +285,7 @@ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=
|
|
280
285
|
request_id,
|
281
286
|
f"Validation of management interface successful for baremetal node for {device.name}\n",
|
282
287
|
)
|
283
|
-
if node["provision_state"]
|
288
|
+
if node["provision_state"] in ["enroll", "clean failed"]:
|
284
289
|
osism_utils.push_task_output(
|
285
290
|
request_id,
|
286
291
|
f"Transitioning baremetal node to manageable state for {device.name}\n",
|
osism/tasks/conductor/netbox.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
|
3
|
+
import ipaddress
|
4
4
|
import yaml
|
5
|
+
from loguru import logger
|
5
6
|
|
6
7
|
from osism import settings, utils
|
7
8
|
from osism.tasks import netbox
|
@@ -309,3 +310,59 @@ def get_device_loopbacks(device):
|
|
309
310
|
)
|
310
311
|
|
311
312
|
return {"loopbacks": loopbacks}
|
313
|
+
|
314
|
+
|
315
|
+
def get_device_interface_ips(device):
|
316
|
+
"""Get IPv4 addresses assigned to device interfaces.
|
317
|
+
|
318
|
+
Args:
|
319
|
+
device: NetBox device object
|
320
|
+
|
321
|
+
Returns:
|
322
|
+
dict: Dictionary mapping interface names to their IPv4 addresses
|
323
|
+
{
|
324
|
+
'interface_name': 'ip_address/prefix_length',
|
325
|
+
...
|
326
|
+
}
|
327
|
+
"""
|
328
|
+
interface_ips = {}
|
329
|
+
|
330
|
+
try:
|
331
|
+
# Get all interfaces for the device
|
332
|
+
interfaces = list(utils.nb.dcim.interfaces.filter(device_id=device.id))
|
333
|
+
|
334
|
+
for interface in interfaces:
|
335
|
+
# Skip management interfaces and virtual interfaces for now
|
336
|
+
if interface.mgmt_only or (
|
337
|
+
hasattr(interface, "type")
|
338
|
+
and interface.type
|
339
|
+
and interface.type.value == "virtual"
|
340
|
+
):
|
341
|
+
continue
|
342
|
+
|
343
|
+
# Get IP addresses assigned to this interface
|
344
|
+
ip_addresses = utils.nb.ipam.ip_addresses.filter(
|
345
|
+
assigned_object_id=interface.id,
|
346
|
+
)
|
347
|
+
|
348
|
+
for ip_addr in ip_addresses:
|
349
|
+
if ip_addr.address:
|
350
|
+
# Check if it's an IPv4 address
|
351
|
+
try:
|
352
|
+
ip_obj = ipaddress.ip_interface(ip_addr.address)
|
353
|
+
if ip_obj.version == 4:
|
354
|
+
interface_ips[interface.name] = ip_addr.address
|
355
|
+
logger.debug(
|
356
|
+
f"Found IPv4 address {ip_addr.address} on interface {interface.name} of device {device.name}"
|
357
|
+
)
|
358
|
+
break # Only use the first IPv4 address found
|
359
|
+
except (ValueError, ipaddress.AddressValueError):
|
360
|
+
# Skip invalid IP addresses
|
361
|
+
continue
|
362
|
+
|
363
|
+
except Exception as e:
|
364
|
+
logger.warning(
|
365
|
+
f"Could not get interface IP addresses for device {device.name}: {e}"
|
366
|
+
)
|
367
|
+
|
368
|
+
return interface_ips
|