osism 0.20250804.0__py3-none-any.whl → 0.20250823.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/api.py +55 -1
- osism/commands/baremetal.py +168 -0
- osism/commands/netbox.py +2 -2
- osism/settings.py +1 -1
- osism/tasks/conductor/ironic.py +22 -17
- osism/tasks/conductor/netbox.py +58 -1
- osism/tasks/conductor/sonic/config_generator.py +341 -26
- osism/tasks/conductor/sonic/connections.py +123 -0
- osism/tasks/conductor/sonic/interface.py +3 -1
- osism/tasks/openstack.py +35 -15
- osism/utils/__init__.py +2 -2
- {osism-0.20250804.0.dist-info → osism-0.20250823.0.dist-info}/METADATA +6 -6
- {osism-0.20250804.0.dist-info → osism-0.20250823.0.dist-info}/RECORD +19 -19
- {osism-0.20250804.0.dist-info → osism-0.20250823.0.dist-info}/entry_points.txt +4 -0
- osism-0.20250823.0.dist-info/licenses/AUTHORS +1 -0
- osism-0.20250823.0.dist-info/pbr.json +1 -0
- osism-0.20250804.0.dist-info/licenses/AUTHORS +0 -1
- osism-0.20250804.0.dist-info/pbr.json +0 -1
- {osism-0.20250804.0.dist-info → osism-0.20250823.0.dist-info}/WHEEL +0 -0
- {osism-0.20250804.0.dist-info → osism-0.20250823.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250804.0.dist-info → osism-0.20250823.0.dist-info}/top_level.txt +0 -0
osism/api.py
CHANGED
@@ -10,7 +10,7 @@ from fastapi import FastAPI, Header, Request, Response, HTTPException, status
|
|
10
10
|
from pydantic import BaseModel, Field
|
11
11
|
from starlette.middleware.cors import CORSMiddleware
|
12
12
|
|
13
|
-
from osism.tasks import reconciler
|
13
|
+
from osism.tasks import reconciler, openstack
|
14
14
|
from osism import utils
|
15
15
|
from osism.services.listener import BaremetalEvents
|
16
16
|
|
@@ -109,6 +109,35 @@ class DeviceSearchResult(BaseModel):
|
|
109
109
|
device: Optional[str] = Field(None, description="Device name if found")
|
110
110
|
|
111
111
|
|
112
|
+
class BaremetalNode(BaseModel):
|
113
|
+
uuid: str = Field(..., description="Unique identifier of the node")
|
114
|
+
name: Optional[str] = Field(None, description="Name of the node")
|
115
|
+
power_state: Optional[str] = Field(None, description="Current power state")
|
116
|
+
provision_state: Optional[str] = Field(None, description="Current provision state")
|
117
|
+
maintenance: bool = Field(..., description="Whether node is in maintenance mode")
|
118
|
+
instance_uuid: Optional[str] = Field(
|
119
|
+
None, description="UUID of associated instance"
|
120
|
+
)
|
121
|
+
driver: Optional[str] = Field(None, description="Driver used for the node")
|
122
|
+
resource_class: Optional[str] = Field(
|
123
|
+
None, description="Resource class of the node"
|
124
|
+
)
|
125
|
+
properties: Dict[str, Any] = Field(
|
126
|
+
default_factory=dict, description="Node properties"
|
127
|
+
)
|
128
|
+
extra: Dict[str, Any] = Field(
|
129
|
+
default_factory=dict, description="Extra node information"
|
130
|
+
)
|
131
|
+
last_error: Optional[str] = Field(None, description="Last error message")
|
132
|
+
created_at: Optional[str] = Field(None, description="Creation timestamp")
|
133
|
+
updated_at: Optional[str] = Field(None, description="Last update timestamp")
|
134
|
+
|
135
|
+
|
136
|
+
class BaremetalNodesResponse(BaseModel):
|
137
|
+
nodes: list[BaremetalNode] = Field(..., description="List of baremetal nodes")
|
138
|
+
count: int = Field(..., description="Total number of nodes")
|
139
|
+
|
140
|
+
|
112
141
|
def find_device_by_identifier(identifier: str):
|
113
142
|
"""Find a device in NetBox by various identifiers."""
|
114
143
|
if not utils.nb:
|
@@ -188,6 +217,31 @@ async def write_sink_events(request: Request) -> SinkResponse:
|
|
188
217
|
)
|
189
218
|
|
190
219
|
|
220
|
+
@app.get(
|
221
|
+
"/v1/baremetal/nodes", response_model=BaremetalNodesResponse, tags=["baremetal"]
|
222
|
+
)
|
223
|
+
async def get_baremetal_nodes_list() -> BaremetalNodesResponse:
|
224
|
+
"""Get list of all baremetal nodes managed by Ironic.
|
225
|
+
|
226
|
+
Returns information similar to the 'baremetal list' command,
|
227
|
+
including node details, power state, provision state, and more.
|
228
|
+
"""
|
229
|
+
try:
|
230
|
+
# Use the generalized function to get baremetal nodes
|
231
|
+
nodes_data = openstack.get_baremetal_nodes()
|
232
|
+
|
233
|
+
# Convert to response model
|
234
|
+
nodes = [BaremetalNode(**node) for node in nodes_data]
|
235
|
+
|
236
|
+
return BaremetalNodesResponse(nodes=nodes, count=len(nodes))
|
237
|
+
except Exception as e:
|
238
|
+
logger.error(f"Error retrieving baremetal nodes: {str(e)}")
|
239
|
+
raise HTTPException(
|
240
|
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
241
|
+
detail=f"Failed to retrieve baremetal nodes: {str(e)}",
|
242
|
+
)
|
243
|
+
|
244
|
+
|
191
245
|
@app.post("/v1/notifications/baremetal", status_code=204, tags=["notifications"])
|
192
246
|
async def notifications_baremetal(notification: NotificationBaremetal) -> None:
|
193
247
|
"""Handle baremetal notifications."""
|
osism/commands/baremetal.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
3
|
from cliff.command import Command
|
4
|
+
from argparse import BooleanOptionalAction
|
4
5
|
|
5
6
|
import tempfile
|
6
7
|
import os
|
@@ -533,3 +534,170 @@ class BaremetalPing(Command):
|
|
533
534
|
except Exception as e:
|
534
535
|
logger.error(f"Error during ping operation: {e}")
|
535
536
|
return
|
537
|
+
|
538
|
+
|
539
|
+
class BaremetalBurnIn(Command):
|
540
|
+
def get_parser(self, prog_name):
|
541
|
+
parser = super(BaremetalBurnIn, self).get_parser(prog_name)
|
542
|
+
|
543
|
+
parser.add_argument(
|
544
|
+
"name",
|
545
|
+
nargs="?",
|
546
|
+
type=str,
|
547
|
+
help="Run burn-in on given baremetal node when in provision state available",
|
548
|
+
)
|
549
|
+
parser.add_argument(
|
550
|
+
"--all",
|
551
|
+
default=False,
|
552
|
+
help="Run burn-in on all baremetal nodes in provision state available",
|
553
|
+
action="store_true",
|
554
|
+
)
|
555
|
+
parser.add_argument(
|
556
|
+
"--cpu",
|
557
|
+
default=True,
|
558
|
+
help="Enable CPU burn-in",
|
559
|
+
action=BooleanOptionalAction,
|
560
|
+
)
|
561
|
+
parser.add_argument(
|
562
|
+
"--memory",
|
563
|
+
default=True,
|
564
|
+
help="Enable memory burn-in",
|
565
|
+
action=BooleanOptionalAction,
|
566
|
+
)
|
567
|
+
parser.add_argument(
|
568
|
+
"--disk",
|
569
|
+
default=True,
|
570
|
+
help="Enable disk burn-in",
|
571
|
+
action=BooleanOptionalAction,
|
572
|
+
)
|
573
|
+
return parser
|
574
|
+
|
575
|
+
def take_action(self, parsed_args):
|
576
|
+
all_nodes = parsed_args.all
|
577
|
+
name = parsed_args.name
|
578
|
+
|
579
|
+
stressor = {}
|
580
|
+
stressor["cpu"] = parsed_args.cpu
|
581
|
+
stressor["memory"] = parsed_args.memory
|
582
|
+
stressor["disk"] = parsed_args.disk
|
583
|
+
|
584
|
+
if not all_nodes and not name:
|
585
|
+
logger.error("Please specify a node name or use --all")
|
586
|
+
return
|
587
|
+
|
588
|
+
clean_steps = []
|
589
|
+
for step, activated in stressor.items():
|
590
|
+
if activated:
|
591
|
+
clean_steps.append({"step": "burnin_" + step, "interface": "deploy"})
|
592
|
+
if not clean_steps:
|
593
|
+
logger.error(
|
594
|
+
f"Please specify at least one of {', '.join(stressor.keys())} for burn-in"
|
595
|
+
)
|
596
|
+
return
|
597
|
+
|
598
|
+
conn = get_cloud_connection()
|
599
|
+
|
600
|
+
if all_nodes:
|
601
|
+
burn_in_nodes = list(conn.baremetal.nodes(details=True))
|
602
|
+
else:
|
603
|
+
node = conn.baremetal.find_node(name, ignore_missing=True, details=True)
|
604
|
+
if not node:
|
605
|
+
logger.warning(f"Could not find node {name}")
|
606
|
+
return
|
607
|
+
burn_in_nodes = [node]
|
608
|
+
|
609
|
+
for node in burn_in_nodes:
|
610
|
+
if not node:
|
611
|
+
continue
|
612
|
+
|
613
|
+
if node.provision_state in ["available"]:
|
614
|
+
# NOTE: Burn-In is available in the "manageable" provision state, so we move the node into this state
|
615
|
+
try:
|
616
|
+
node = conn.baremetal.set_node_provision_state(node.id, "manage")
|
617
|
+
node = conn.baremetal.wait_for_nodes_provision_state(
|
618
|
+
[node.id], "manageable"
|
619
|
+
)[0]
|
620
|
+
except Exception as exc:
|
621
|
+
logger.warning(
|
622
|
+
f"Node {node.name} ({node.id}) could not be moved to manageable state: {exc}"
|
623
|
+
)
|
624
|
+
continue
|
625
|
+
|
626
|
+
if node.provision_state in ["manageable"]:
|
627
|
+
try:
|
628
|
+
conn.baremetal.set_node_provision_state(
|
629
|
+
node.id, "clean", clean_steps=clean_steps
|
630
|
+
)
|
631
|
+
except Exception as exc:
|
632
|
+
logger.warning(
|
633
|
+
f"Burn-In of node {node.name} ({node.id}) failed: {exc}"
|
634
|
+
)
|
635
|
+
continue
|
636
|
+
else:
|
637
|
+
logger.warning(
|
638
|
+
f"Node {node.name} ({node.id}) not in supported state! Provision state: {node.provision_state}, maintenance mode: {node['maintenance']}"
|
639
|
+
)
|
640
|
+
continue
|
641
|
+
|
642
|
+
|
643
|
+
class BaremetalMaintenanceSet(Command):
|
644
|
+
def get_parser(self, prog_name):
|
645
|
+
parser = super(BaremetalMaintenanceSet, self).get_parser(prog_name)
|
646
|
+
|
647
|
+
parser.add_argument(
|
648
|
+
"name",
|
649
|
+
nargs="?",
|
650
|
+
type=str,
|
651
|
+
help="Set maintenance on given baremetal node",
|
652
|
+
)
|
653
|
+
parser.add_argument(
|
654
|
+
"--reason",
|
655
|
+
default=None,
|
656
|
+
type=str,
|
657
|
+
help="Reason for maintenance",
|
658
|
+
)
|
659
|
+
return parser
|
660
|
+
|
661
|
+
def take_action(self, parsed_args):
|
662
|
+
name = parsed_args.name
|
663
|
+
reason = parsed_args.reason
|
664
|
+
|
665
|
+
conn = get_cloud_connection()
|
666
|
+
node = conn.baremetal.find_node(name, ignore_missing=True, details=True)
|
667
|
+
if not node:
|
668
|
+
logger.warning(f"Could not find node {name}")
|
669
|
+
return
|
670
|
+
try:
|
671
|
+
conn.baremetal.set_node_maintenance(node, reason=reason)
|
672
|
+
except Exception as exc:
|
673
|
+
logger.error(
|
674
|
+
f"Setting maintenance mode on node {node.name} ({node.id}) failed: {exc}"
|
675
|
+
)
|
676
|
+
|
677
|
+
|
678
|
+
class BaremetalMaintenanceUnset(Command):
|
679
|
+
def get_parser(self, prog_name):
|
680
|
+
parser = super(BaremetalMaintenanceUnset, self).get_parser(prog_name)
|
681
|
+
|
682
|
+
parser.add_argument(
|
683
|
+
"name",
|
684
|
+
nargs="?",
|
685
|
+
type=str,
|
686
|
+
help="Unset maintenance on given baremetal node",
|
687
|
+
)
|
688
|
+
return parser
|
689
|
+
|
690
|
+
def take_action(self, parsed_args):
|
691
|
+
name = parsed_args.name
|
692
|
+
|
693
|
+
conn = get_cloud_connection()
|
694
|
+
node = conn.baremetal.find_node(name, ignore_missing=True, details=True)
|
695
|
+
if not node:
|
696
|
+
logger.warning(f"Could not find node {name}")
|
697
|
+
return
|
698
|
+
try:
|
699
|
+
conn.baremetal.unset_node_maintenance(node)
|
700
|
+
except Exception as exc:
|
701
|
+
logger.error(
|
702
|
+
f"Unsetting maintenance mode on node {node.name} ({node.id}) failed: {exc}"
|
703
|
+
)
|
osism/commands/netbox.py
CHANGED
@@ -228,9 +228,9 @@ class Console(Command):
|
|
228
228
|
if not os.path.exists(nbcli_file):
|
229
229
|
try:
|
230
230
|
with open("/run/secrets/NETBOX_TOKEN", "r") as fp:
|
231
|
-
token = fp.read().strip()
|
231
|
+
token = str(fp.read().strip())
|
232
232
|
except FileNotFoundError:
|
233
|
-
token =
|
233
|
+
token = ""
|
234
234
|
|
235
235
|
url = os.environ.get("NETBOX_API", None)
|
236
236
|
|
osism/settings.py
CHANGED
@@ -24,7 +24,7 @@ REDIS_DB: int = int(os.getenv("REDIS_DB", "0"))
|
|
24
24
|
|
25
25
|
|
26
26
|
NETBOX_URL = os.getenv("NETBOX_API", os.getenv("NETBOX_URL"))
|
27
|
-
NETBOX_TOKEN = os.getenv("NETBOX_TOKEN"
|
27
|
+
NETBOX_TOKEN = str(os.getenv("NETBOX_TOKEN") or read_secret("NETBOX_TOKEN") or "")
|
28
28
|
IGNORE_SSL_ERRORS = os.getenv("IGNORE_SSL_ERRORS", "True") == "True"
|
29
29
|
|
30
30
|
# 43200 seconds = 12 hours
|
osism/tasks/conductor/ironic.py
CHANGED
@@ -160,27 +160,36 @@ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=
|
|
160
160
|
|
161
161
|
# Filter nodes by node_name if specified
|
162
162
|
if node_name:
|
163
|
-
nodes = [node for node in nodes if node["
|
163
|
+
nodes = [node for node in nodes if node["name"] == node_name]
|
164
164
|
|
165
165
|
for node in nodes:
|
166
166
|
osism_utils.push_task_output(
|
167
|
-
request_id, f"Looking for {node['
|
167
|
+
request_id, f"Looking for {node['name']} in NetBox\n"
|
168
168
|
)
|
169
|
-
if node["
|
169
|
+
if node["name"] not in device_names:
|
170
170
|
if (
|
171
|
-
not node["
|
172
|
-
and node["
|
173
|
-
|
171
|
+
not node["instance_uuid"]
|
172
|
+
and node["provision_state"]
|
173
|
+
in ["enroll", "manageable", "available", "clean failed"]
|
174
|
+
and node["power_state"] in ["power off", None]
|
174
175
|
):
|
175
176
|
osism_utils.push_task_output(
|
176
177
|
request_id,
|
177
|
-
f"Cleaning up baremetal node not found in NetBox: {node['
|
178
|
+
f"Cleaning up baremetal node not found in NetBox: {node['name']}\n",
|
178
179
|
)
|
180
|
+
if node["provision_state"] == "clean failed":
|
181
|
+
# NOTE: Move node to manageable to allow deletion
|
182
|
+
node = openstack.baremetal_node_set_provision_state(
|
183
|
+
node["uuid"], "manage"
|
184
|
+
)
|
185
|
+
node = openstack.baremetal_node_wait_for_nodes_provision_state(
|
186
|
+
node["uuid"], "manageable"
|
187
|
+
)
|
179
188
|
for port in openstack.baremetal_port_list(
|
180
|
-
details=False, attributes=dict(node_uuid=node["
|
189
|
+
details=False, attributes=dict(node_uuid=node["uuid"])
|
181
190
|
):
|
182
191
|
openstack.baremetal_port_delete(port.id)
|
183
|
-
openstack.baremetal_node_delete(node["
|
192
|
+
openstack.baremetal_node_delete(node["uuid"])
|
184
193
|
else:
|
185
194
|
osism_utils.push_task_output(
|
186
195
|
f"Cannot remove baremetal node because it is still provisioned or running: {node}"
|
@@ -218,13 +227,9 @@ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=
|
|
218
227
|
node = openstack.baremetal_node_create(device.name, node_attributes)
|
219
228
|
else:
|
220
229
|
# NOTE: The listener service only reacts to changes in the baremetal node. Explicitly sync provision and power state in case updates were missed by the listener.
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
):
|
225
|
-
netbox.set_provision_state(device.name, node["provision_state"])
|
226
|
-
if device.custom_fields["power_state"] != node["power_state"]:
|
227
|
-
netbox.set_power_state(device.name, node["power_state"])
|
230
|
+
# This sync is done unconditionally, because we do not know the state of secondary netboxes at this point
|
231
|
+
netbox.set_provision_state(device.name, node["provision_state"])
|
232
|
+
netbox.set_power_state(device.name, node["power_state"])
|
228
233
|
# NOTE: Check whether the baremetal node needs to be updated
|
229
234
|
node_updates = {}
|
230
235
|
deep_compare(node_attributes, node, node_updates)
|
@@ -280,7 +285,7 @@ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=
|
|
280
285
|
request_id,
|
281
286
|
f"Validation of management interface successful for baremetal node for {device.name}\n",
|
282
287
|
)
|
283
|
-
if node["provision_state"]
|
288
|
+
if node["provision_state"] in ["enroll", "clean failed"]:
|
284
289
|
osism_utils.push_task_output(
|
285
290
|
request_id,
|
286
291
|
f"Transitioning baremetal node to manageable state for {device.name}\n",
|
osism/tasks/conductor/netbox.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
|
3
|
+
import ipaddress
|
4
4
|
import yaml
|
5
|
+
from loguru import logger
|
5
6
|
|
6
7
|
from osism import settings, utils
|
7
8
|
from osism.tasks import netbox
|
@@ -309,3 +310,59 @@ def get_device_loopbacks(device):
|
|
309
310
|
)
|
310
311
|
|
311
312
|
return {"loopbacks": loopbacks}
|
313
|
+
|
314
|
+
|
315
|
+
def get_device_interface_ips(device):
|
316
|
+
"""Get IPv4 addresses assigned to device interfaces.
|
317
|
+
|
318
|
+
Args:
|
319
|
+
device: NetBox device object
|
320
|
+
|
321
|
+
Returns:
|
322
|
+
dict: Dictionary mapping interface names to their IPv4 addresses
|
323
|
+
{
|
324
|
+
'interface_name': 'ip_address/prefix_length',
|
325
|
+
...
|
326
|
+
}
|
327
|
+
"""
|
328
|
+
interface_ips = {}
|
329
|
+
|
330
|
+
try:
|
331
|
+
# Get all interfaces for the device
|
332
|
+
interfaces = list(utils.nb.dcim.interfaces.filter(device_id=device.id))
|
333
|
+
|
334
|
+
for interface in interfaces:
|
335
|
+
# Skip management interfaces and virtual interfaces for now
|
336
|
+
if interface.mgmt_only or (
|
337
|
+
hasattr(interface, "type")
|
338
|
+
and interface.type
|
339
|
+
and interface.type.value == "virtual"
|
340
|
+
):
|
341
|
+
continue
|
342
|
+
|
343
|
+
# Get IP addresses assigned to this interface
|
344
|
+
ip_addresses = utils.nb.ipam.ip_addresses.filter(
|
345
|
+
assigned_object_id=interface.id,
|
346
|
+
)
|
347
|
+
|
348
|
+
for ip_addr in ip_addresses:
|
349
|
+
if ip_addr.address:
|
350
|
+
# Check if it's an IPv4 address
|
351
|
+
try:
|
352
|
+
ip_obj = ipaddress.ip_interface(ip_addr.address)
|
353
|
+
if ip_obj.version == 4:
|
354
|
+
interface_ips[interface.name] = ip_addr.address
|
355
|
+
logger.debug(
|
356
|
+
f"Found IPv4 address {ip_addr.address} on interface {interface.name} of device {device.name}"
|
357
|
+
)
|
358
|
+
break # Only use the first IPv4 address found
|
359
|
+
except (ValueError, ipaddress.AddressValueError):
|
360
|
+
# Skip invalid IP addresses
|
361
|
+
continue
|
362
|
+
|
363
|
+
except Exception as e:
|
364
|
+
logger.warning(
|
365
|
+
f"Could not get interface IP addresses for device {device.name}: {e}"
|
366
|
+
)
|
367
|
+
|
368
|
+
return interface_ips
|
@@ -11,6 +11,7 @@ from loguru import logger
|
|
11
11
|
|
12
12
|
from osism import utils
|
13
13
|
from osism.tasks.conductor.netbox import (
|
14
|
+
get_device_interface_ips,
|
14
15
|
get_device_loopbacks,
|
15
16
|
get_device_oob_ip,
|
16
17
|
get_device_vlans,
|
@@ -29,6 +30,7 @@ from .interface import (
|
|
29
30
|
from .connections import (
|
30
31
|
get_connected_interfaces,
|
31
32
|
get_connected_device_for_sonic_interface,
|
33
|
+
get_connected_interface_ipv4_address,
|
32
34
|
)
|
33
35
|
from .cache import get_cached_device_interfaces
|
34
36
|
|
@@ -73,6 +75,12 @@ def generate_sonic_config(device, hwsku, device_as_mapping=None):
|
|
73
75
|
# Get Loopback configuration from NetBox
|
74
76
|
loopback_info = get_device_loopbacks(device)
|
75
77
|
|
78
|
+
# Get interface IP addresses from NetBox
|
79
|
+
interface_ips = get_device_interface_ips(device)
|
80
|
+
|
81
|
+
# Get IPv4 addresses from transfer role prefixes
|
82
|
+
transfer_ips = _get_transfer_role_ipv4_addresses(device)
|
83
|
+
|
76
84
|
# Get breakout port configuration from NetBox
|
77
85
|
breakout_info = detect_breakout_ports(device)
|
78
86
|
|
@@ -186,7 +194,14 @@ def generate_sonic_config(device, hwsku, device_as_mapping=None):
|
|
186
194
|
)
|
187
195
|
|
188
196
|
# Add interface configurations
|
189
|
-
_add_interface_configurations(
|
197
|
+
_add_interface_configurations(
|
198
|
+
config,
|
199
|
+
connected_interfaces,
|
200
|
+
portchannel_info,
|
201
|
+
interface_ips,
|
202
|
+
netbox_interfaces,
|
203
|
+
device,
|
204
|
+
)
|
190
205
|
|
191
206
|
# Add BGP configurations
|
192
207
|
_add_bgp_configurations(
|
@@ -196,6 +211,10 @@ def generate_sonic_config(device, hwsku, device_as_mapping=None):
|
|
196
211
|
portchannel_info,
|
197
212
|
device,
|
198
213
|
device_as_mapping,
|
214
|
+
interface_ips,
|
215
|
+
netbox_interfaces,
|
216
|
+
transfer_ips,
|
217
|
+
utils.nb,
|
199
218
|
)
|
200
219
|
|
201
220
|
# Add NTP server configuration (device-specific)
|
@@ -531,7 +550,14 @@ def _add_tagged_vlans_to_ports(config, vlan_info, netbox_interfaces, device):
|
|
531
550
|
config["PORT"][port_name]["tagged_vlans"] = tagged_vlans
|
532
551
|
|
533
552
|
|
534
|
-
def _add_interface_configurations(
|
553
|
+
def _add_interface_configurations(
|
554
|
+
config,
|
555
|
+
connected_interfaces,
|
556
|
+
portchannel_info,
|
557
|
+
interface_ips,
|
558
|
+
netbox_interfaces,
|
559
|
+
device,
|
560
|
+
):
|
535
561
|
"""Add INTERFACE configuration for connected interfaces."""
|
536
562
|
for port_name in config["PORT"]:
|
537
563
|
# Check if this port is in the connected interfaces set and not a port channel member
|
@@ -539,8 +565,148 @@ def _add_interface_configurations(config, connected_interfaces, portchannel_info
|
|
539
565
|
port_name in connected_interfaces
|
540
566
|
and port_name not in portchannel_info["member_mapping"]
|
541
567
|
):
|
542
|
-
#
|
543
|
-
|
568
|
+
# Find the NetBox interface name for this SONiC port
|
569
|
+
netbox_interface_name = None
|
570
|
+
if port_name in netbox_interfaces:
|
571
|
+
netbox_interface_name = netbox_interfaces[port_name]["netbox_name"]
|
572
|
+
|
573
|
+
# Check if this interface has an IPv4 address assigned
|
574
|
+
ipv4_address = None
|
575
|
+
if netbox_interface_name and netbox_interface_name in interface_ips:
|
576
|
+
ipv4_address = interface_ips[netbox_interface_name]
|
577
|
+
logger.info(
|
578
|
+
f"Interface {port_name} ({netbox_interface_name}) has IPv4 address: {ipv4_address}"
|
579
|
+
)
|
580
|
+
|
581
|
+
if ipv4_address:
|
582
|
+
# If IPv4 address is available, configure the interface with it
|
583
|
+
# Add base interface entry (similar to VLAN_INTERFACE and LOOPBACK_INTERFACE patterns)
|
584
|
+
config["INTERFACE"][port_name] = {}
|
585
|
+
# Add IP address suffixed entry with scope and family parameters
|
586
|
+
config["INTERFACE"][f"{port_name}|{ipv4_address}"] = {
|
587
|
+
"scope": "global",
|
588
|
+
"family": "IPv4",
|
589
|
+
}
|
590
|
+
logger.info(
|
591
|
+
f"Configured interface {port_name} with IPv4 address {ipv4_address}"
|
592
|
+
)
|
593
|
+
else:
|
594
|
+
# Add interface to INTERFACE section with ipv6_use_link_local_only enabled
|
595
|
+
config["INTERFACE"][port_name] = {"ipv6_use_link_local_only": "enable"}
|
596
|
+
logger.debug(
|
597
|
+
f"Configured interface {port_name} with IPv6 link-local only"
|
598
|
+
)
|
599
|
+
|
600
|
+
|
601
|
+
def _get_transfer_role_ipv4_addresses(device):
|
602
|
+
"""Get IPv4 addresses from IP prefixes with 'transfer' role.
|
603
|
+
|
604
|
+
Args:
|
605
|
+
device: NetBox device object
|
606
|
+
|
607
|
+
Returns:
|
608
|
+
dict: Dictionary mapping interface names to their transfer role IPv4 addresses
|
609
|
+
{
|
610
|
+
'interface_name': 'ip_address/prefix_length',
|
611
|
+
...
|
612
|
+
}
|
613
|
+
"""
|
614
|
+
transfer_ips = {}
|
615
|
+
|
616
|
+
try:
|
617
|
+
# Get all interfaces for the device
|
618
|
+
interfaces = list(utils.nb.dcim.interfaces.filter(device_id=device.id))
|
619
|
+
|
620
|
+
for interface in interfaces:
|
621
|
+
# Skip management interfaces and virtual interfaces
|
622
|
+
if interface.mgmt_only or (
|
623
|
+
hasattr(interface, "type")
|
624
|
+
and interface.type
|
625
|
+
and interface.type.value == "virtual"
|
626
|
+
):
|
627
|
+
continue
|
628
|
+
|
629
|
+
# Get IP addresses assigned to this interface
|
630
|
+
ip_addresses = utils.nb.ipam.ip_addresses.filter(
|
631
|
+
assigned_object_id=interface.id,
|
632
|
+
)
|
633
|
+
|
634
|
+
for ip_addr in ip_addresses:
|
635
|
+
if ip_addr.address:
|
636
|
+
try:
|
637
|
+
ip_obj = ipaddress.ip_interface(ip_addr.address)
|
638
|
+
if ip_obj.version == 4:
|
639
|
+
# Check if this IP belongs to a prefix with 'transfer' role
|
640
|
+
# Query for the prefix this IP belongs to
|
641
|
+
prefixes = utils.nb.ipam.prefixes.filter(
|
642
|
+
contains=str(ip_obj.ip)
|
643
|
+
)
|
644
|
+
|
645
|
+
for prefix in prefixes:
|
646
|
+
# Check if prefix has role and it's 'transfer'
|
647
|
+
if hasattr(prefix, "role") and prefix.role:
|
648
|
+
if prefix.role.slug == "transfer":
|
649
|
+
transfer_ips[interface.name] = ip_addr.address
|
650
|
+
logger.debug(
|
651
|
+
f"Found transfer role IPv4 {ip_addr.address} on interface {interface.name} of device {device.name}"
|
652
|
+
)
|
653
|
+
break
|
654
|
+
|
655
|
+
# Break after first IPv4 found (transfer or not)
|
656
|
+
if interface.name in transfer_ips:
|
657
|
+
break
|
658
|
+
except (ValueError, ipaddress.AddressValueError):
|
659
|
+
# Skip invalid IP addresses
|
660
|
+
continue
|
661
|
+
|
662
|
+
except Exception as e:
|
663
|
+
logger.warning(
|
664
|
+
f"Failed to get transfer role IPv4 addresses for device {device.name}: {e}"
|
665
|
+
)
|
666
|
+
|
667
|
+
return transfer_ips
|
668
|
+
|
669
|
+
|
670
|
+
def _has_direct_ipv4_address(port_name, interface_ips, netbox_interfaces):
|
671
|
+
"""Check if an interface has a direct IPv4 address assigned.
|
672
|
+
|
673
|
+
Args:
|
674
|
+
port_name: SONiC interface name (e.g., "Ethernet0")
|
675
|
+
interface_ips: Dict mapping NetBox interface names to IPv4 addresses
|
676
|
+
netbox_interfaces: Dict mapping SONiC names to NetBox interface info
|
677
|
+
|
678
|
+
Returns:
|
679
|
+
bool: True if interface has a direct IPv4 address, False otherwise
|
680
|
+
"""
|
681
|
+
if not interface_ips or not netbox_interfaces:
|
682
|
+
return False
|
683
|
+
|
684
|
+
if port_name in netbox_interfaces:
|
685
|
+
netbox_interface_name = netbox_interfaces[port_name]["netbox_name"]
|
686
|
+
return netbox_interface_name in interface_ips
|
687
|
+
|
688
|
+
return False
|
689
|
+
|
690
|
+
|
691
|
+
def _has_transfer_role_ipv4(port_name, transfer_ips, netbox_interfaces):
|
692
|
+
"""Check if an interface has an IPv4 from a transfer role prefix.
|
693
|
+
|
694
|
+
Args:
|
695
|
+
port_name: SONiC interface name (e.g., "Ethernet0")
|
696
|
+
transfer_ips: Dict mapping NetBox interface names to transfer role IPv4 addresses
|
697
|
+
netbox_interfaces: Dict mapping SONiC names to NetBox interface info
|
698
|
+
|
699
|
+
Returns:
|
700
|
+
bool: True if interface has a transfer role IPv4 address, False otherwise
|
701
|
+
"""
|
702
|
+
if not transfer_ips or not netbox_interfaces:
|
703
|
+
return False
|
704
|
+
|
705
|
+
if port_name in netbox_interfaces:
|
706
|
+
netbox_interface_name = netbox_interfaces[port_name]["netbox_name"]
|
707
|
+
return netbox_interface_name in transfer_ips
|
708
|
+
|
709
|
+
return False
|
544
710
|
|
545
711
|
|
546
712
|
def _add_bgp_configurations(
|
@@ -550,52 +716,189 @@ def _add_bgp_configurations(
|
|
550
716
|
portchannel_info,
|
551
717
|
device,
|
552
718
|
device_as_mapping=None,
|
719
|
+
interface_ips=None,
|
720
|
+
netbox_interfaces=None,
|
721
|
+
transfer_ips=None,
|
722
|
+
netbox=None,
|
553
723
|
):
|
554
|
-
"""Add BGP configurations.
|
724
|
+
"""Add BGP configurations.
|
725
|
+
|
726
|
+
Args:
|
727
|
+
config: Configuration dictionary to update
|
728
|
+
connected_interfaces: Set of connected interface names
|
729
|
+
connected_portchannels: Set of connected port channel names
|
730
|
+
portchannel_info: Port channel membership information
|
731
|
+
device: NetBox device object
|
732
|
+
device_as_mapping: Mapping of device names to AS numbers
|
733
|
+
interface_ips: Dict of direct IPv4 addresses on interfaces
|
734
|
+
netbox_interfaces: Dict mapping SONiC names to NetBox interface info
|
735
|
+
transfer_ips: Dict of IPv4 addresses from transfer role prefixes
|
736
|
+
netbox: NetBox API client for querying connected interface IPs
|
737
|
+
"""
|
555
738
|
# Add BGP_NEIGHBOR_AF configuration for connected interfaces
|
556
739
|
for port_name in config["PORT"]:
|
740
|
+
has_direct_ipv4 = _has_direct_ipv4_address(
|
741
|
+
port_name, interface_ips, netbox_interfaces
|
742
|
+
)
|
743
|
+
has_transfer_ipv4 = _has_transfer_role_ipv4(
|
744
|
+
port_name, transfer_ips, netbox_interfaces
|
745
|
+
)
|
746
|
+
|
557
747
|
if (
|
558
748
|
port_name in connected_interfaces
|
559
749
|
and port_name not in portchannel_info["member_mapping"]
|
560
750
|
):
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
751
|
+
# Include interfaces with transfer role IPv4 or no direct IPv4
|
752
|
+
if has_transfer_ipv4 or not has_direct_ipv4:
|
753
|
+
# Try to get the IPv4 address of the connected endpoint interface
|
754
|
+
connected_ipv4 = None
|
755
|
+
if netbox:
|
756
|
+
connected_ipv4 = get_connected_interface_ipv4_address(
|
757
|
+
device, port_name, netbox
|
758
|
+
)
|
759
|
+
|
760
|
+
# For BGP_NEIGHBOR_AF, always use interface name like IPv6 does
|
761
|
+
neighbor_id = port_name
|
762
|
+
|
763
|
+
ipv4_key = f"default|{neighbor_id}|ipv4_unicast"
|
764
|
+
config["BGP_NEIGHBOR_AF"][ipv4_key] = {"admin_status": "true"}
|
765
|
+
|
766
|
+
# Only add ipv6_unicast if v6only would be true (no transfer role IPv4)
|
767
|
+
if not has_transfer_ipv4:
|
768
|
+
ipv6_key = f"default|{neighbor_id}|ipv6_unicast"
|
769
|
+
config["BGP_NEIGHBOR_AF"][ipv6_key] = {"admin_status": "true"}
|
770
|
+
logger.debug(
|
771
|
+
f"Added BGP_NEIGHBOR_AF with ipv4_unicast and ipv6_unicast for interface {port_name} (no direct IPv4)"
|
772
|
+
)
|
773
|
+
else:
|
774
|
+
logger.debug(
|
775
|
+
f"Added BGP_NEIGHBOR_AF with ipv4_unicast only for interface {port_name} (transfer role IPv4, v6only=false)"
|
776
|
+
)
|
777
|
+
elif has_direct_ipv4 and not has_transfer_ipv4:
|
778
|
+
logger.info(
|
779
|
+
f"Excluding interface {port_name} from BGP detection (has direct IPv4 address, not transfer role)"
|
780
|
+
)
|
565
781
|
|
566
782
|
# Add BGP_NEIGHBOR_AF configuration for connected port channels
|
567
783
|
for pc_name in connected_portchannels:
|
568
|
-
|
569
|
-
|
784
|
+
# Try to get the IPv4 address of the connected endpoint interface for port channel
|
785
|
+
connected_ipv4 = None
|
786
|
+
if netbox:
|
787
|
+
connected_ipv4 = get_connected_interface_ipv4_address(
|
788
|
+
device, pc_name, netbox
|
789
|
+
)
|
790
|
+
|
791
|
+
# For BGP_NEIGHBOR_AF, always use port channel name like interfaces
|
792
|
+
neighbor_id = pc_name
|
793
|
+
|
794
|
+
ipv4_key = f"default|{neighbor_id}|ipv4_unicast"
|
795
|
+
ipv6_key = f"default|{neighbor_id}|ipv6_unicast"
|
570
796
|
config["BGP_NEIGHBOR_AF"][ipv4_key] = {"admin_status": "true"}
|
571
797
|
config["BGP_NEIGHBOR_AF"][ipv6_key] = {"admin_status": "true"}
|
572
798
|
|
573
799
|
# Add BGP_NEIGHBOR configuration for connected interfaces
|
574
800
|
for port_name in config["PORT"]:
|
801
|
+
has_direct_ipv4 = _has_direct_ipv4_address(
|
802
|
+
port_name, interface_ips, netbox_interfaces
|
803
|
+
)
|
804
|
+
has_transfer_ipv4 = _has_transfer_role_ipv4(
|
805
|
+
port_name, transfer_ips, netbox_interfaces
|
806
|
+
)
|
807
|
+
|
575
808
|
if (
|
576
809
|
port_name in connected_interfaces
|
577
810
|
and port_name not in portchannel_info["member_mapping"]
|
578
811
|
):
|
579
|
-
|
812
|
+
# Include interfaces with transfer role IPv4 or no direct IPv4
|
813
|
+
if has_transfer_ipv4 or not has_direct_ipv4:
|
814
|
+
# Try to get the IPv4 address of the connected endpoint interface
|
815
|
+
connected_ipv4 = None
|
816
|
+
if netbox:
|
817
|
+
connected_ipv4 = get_connected_interface_ipv4_address(
|
818
|
+
device, port_name, netbox
|
819
|
+
)
|
580
820
|
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
821
|
+
# Use the connected interface's IPv4 address if available, otherwise use interface name
|
822
|
+
if connected_ipv4:
|
823
|
+
neighbor_key = f"default|{connected_ipv4}"
|
824
|
+
logger.debug(
|
825
|
+
f"Using connected interface IPv4 address {connected_ipv4} for BGP neighbor on {port_name}"
|
826
|
+
)
|
827
|
+
else:
|
828
|
+
neighbor_key = f"default|{port_name}"
|
829
|
+
logger.debug(
|
830
|
+
f"No connected interface IPv4 found, using interface name {port_name} for BGP neighbor"
|
831
|
+
)
|
832
|
+
|
833
|
+
# Determine peer_type based on connected device AS
|
834
|
+
peer_type = "external" # Default
|
835
|
+
connected_device = get_connected_device_for_sonic_interface(
|
836
|
+
device, port_name
|
589
837
|
)
|
838
|
+
if connected_device:
|
839
|
+
peer_type = _determine_peer_type(
|
840
|
+
device, connected_device, device_as_mapping
|
841
|
+
)
|
590
842
|
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
843
|
+
# Set v6only based on whether interface has transfer role IPv4
|
844
|
+
# - Transfer role IPv4: v6only=false (dual-stack BGP)
|
845
|
+
# - No direct IPv4: v6only=true (IPv6-only BGP)
|
846
|
+
bgp_neighbor_config = {
|
847
|
+
"peer_type": peer_type,
|
848
|
+
"v6only": "false" if has_transfer_ipv4 else "true",
|
849
|
+
}
|
850
|
+
|
851
|
+
# If using IP address as key, also store the local address
|
852
|
+
if connected_ipv4:
|
853
|
+
# Get the local interface IPv4 address
|
854
|
+
local_ipv4 = None
|
855
|
+
if port_name in netbox_interfaces:
|
856
|
+
netbox_interface_name = netbox_interfaces[port_name][
|
857
|
+
"netbox_name"
|
858
|
+
]
|
859
|
+
if netbox_interface_name in interface_ips:
|
860
|
+
local_ipv4 = interface_ips[netbox_interface_name].split(
|
861
|
+
"/"
|
862
|
+
)[0]
|
863
|
+
elif netbox_interface_name in transfer_ips:
|
864
|
+
local_ipv4 = transfer_ips[netbox_interface_name].split("/")[
|
865
|
+
0
|
866
|
+
]
|
867
|
+
|
868
|
+
if local_ipv4:
|
869
|
+
bgp_neighbor_config["local_addr"] = local_ipv4
|
870
|
+
|
871
|
+
config["BGP_NEIGHBOR"][neighbor_key] = bgp_neighbor_config
|
872
|
+
|
873
|
+
if has_transfer_ipv4:
|
874
|
+
logger.debug(
|
875
|
+
f"Added BGP_NEIGHBOR for interface {port_name} (transfer role IPv4, v6only=false)"
|
876
|
+
)
|
877
|
+
else:
|
878
|
+
logger.debug(
|
879
|
+
f"Added BGP_NEIGHBOR for interface {port_name} (no direct IPv4, v6only=true)"
|
880
|
+
)
|
595
881
|
|
596
882
|
# Add BGP_NEIGHBOR configuration for connected port channels
|
597
883
|
for pc_name in connected_portchannels:
|
598
|
-
|
884
|
+
# Try to get the IPv4 address of the connected endpoint interface for port channel
|
885
|
+
connected_ipv4 = None
|
886
|
+
if netbox:
|
887
|
+
connected_ipv4 = get_connected_interface_ipv4_address(
|
888
|
+
device, pc_name, netbox
|
889
|
+
)
|
890
|
+
|
891
|
+
# Use the connected interface's IPv4 address if available, otherwise use port channel name
|
892
|
+
if connected_ipv4:
|
893
|
+
neighbor_key = f"default|{connected_ipv4}"
|
894
|
+
logger.debug(
|
895
|
+
f"Using connected interface IPv4 address {connected_ipv4} for BGP neighbor on {pc_name}"
|
896
|
+
)
|
897
|
+
else:
|
898
|
+
neighbor_key = f"default|{pc_name}"
|
899
|
+
logger.debug(
|
900
|
+
f"No connected interface IPv4 found, using port channel name {pc_name} for BGP neighbor"
|
901
|
+
)
|
599
902
|
|
600
903
|
# Determine peer_type based on connected device AS
|
601
904
|
peer_type = "external" # Default
|
@@ -605,11 +908,23 @@ def _add_bgp_configurations(
|
|
605
908
|
device, connected_device, device_as_mapping
|
606
909
|
)
|
607
910
|
|
608
|
-
|
911
|
+
bgp_neighbor_config = {
|
609
912
|
"peer_type": peer_type,
|
610
913
|
"v6only": "true",
|
611
914
|
}
|
612
915
|
|
916
|
+
# If using IP address as key, also store the local address
|
917
|
+
if connected_ipv4:
|
918
|
+
# For port channels, get the local IPv4 address from interface IPs
|
919
|
+
# Note: Port channels don't have direct IP assignments in NetBox,
|
920
|
+
# so we use the connected interface IP logic
|
921
|
+
local_ipv4 = None
|
922
|
+
# Port channels don't have NetBox interface entries,
|
923
|
+
# so we skip local_addr for port channels for now
|
924
|
+
# TODO: Implement port channel local address lookup if needed
|
925
|
+
|
926
|
+
config["BGP_NEIGHBOR"][neighbor_key] = bgp_neighbor_config
|
927
|
+
|
613
928
|
|
614
929
|
def _get_connected_device_for_interface(device, interface_name):
|
615
930
|
"""Get the connected device for a given interface name.
|
@@ -387,3 +387,126 @@ def get_device_bgp_neighbors_via_loopback(
|
|
387
387
|
logger.warning(f"Could not process BGP neighbors for device {device.name}: {e}")
|
388
388
|
|
389
389
|
return bgp_neighbors
|
390
|
+
|
391
|
+
|
392
|
+
def get_connected_interface_ipv4_address(device, sonic_port_name, netbox):
|
393
|
+
"""
|
394
|
+
Get the IPv4 address(es) of the connected endpoint interface for a given SONiC port.
|
395
|
+
Checks for direct IP addresses first, then for FHRP VIP addresses.
|
396
|
+
|
397
|
+
Args:
|
398
|
+
device: The SONiC device
|
399
|
+
sonic_port_name: The SONiC port name
|
400
|
+
netbox: The NetBox API client
|
401
|
+
|
402
|
+
Returns:
|
403
|
+
- For direct IP: The IPv4 address string of the connected interface
|
404
|
+
- For FHRP VIP: The first VIP address found (if multiple VIPs exist, logs all but returns first)
|
405
|
+
- None if no addresses found
|
406
|
+
"""
|
407
|
+
try:
|
408
|
+
interface = netbox.dcim.interfaces.get(
|
409
|
+
device_id=device.id, name=sonic_port_name
|
410
|
+
)
|
411
|
+
if not interface:
|
412
|
+
return None
|
413
|
+
|
414
|
+
# Check if interface has connected_endpoints using the modern API
|
415
|
+
if not (
|
416
|
+
hasattr(interface, "connected_endpoints") and interface.connected_endpoints
|
417
|
+
):
|
418
|
+
return None
|
419
|
+
|
420
|
+
# Ensure connected_endpoints_reachable is True
|
421
|
+
if not getattr(interface, "connected_endpoints_reachable", False):
|
422
|
+
return None
|
423
|
+
|
424
|
+
# Process each connected endpoint to find the first valid interface
|
425
|
+
connected_interface = None
|
426
|
+
for endpoint in interface.connected_endpoints:
|
427
|
+
if hasattr(endpoint, "id"):
|
428
|
+
connected_interface = endpoint
|
429
|
+
break
|
430
|
+
|
431
|
+
if not connected_interface:
|
432
|
+
return None
|
433
|
+
|
434
|
+
# First, try to get direct IPv4 addresses assigned to the connected interface
|
435
|
+
ip_addresses = netbox.ipam.ip_addresses.filter(
|
436
|
+
assigned_object_id=connected_interface.id,
|
437
|
+
)
|
438
|
+
|
439
|
+
for ip_address in ip_addresses:
|
440
|
+
# Check if it's an IPv4 address
|
441
|
+
if "/" in str(ip_address.address):
|
442
|
+
address = str(ip_address.address).split("/")[0]
|
443
|
+
if "." in address: # IPv4 address
|
444
|
+
logger.debug(
|
445
|
+
f"Found direct IPv4 address {address} on connected interface "
|
446
|
+
f"{connected_interface.name} for port {sonic_port_name}"
|
447
|
+
)
|
448
|
+
return address
|
449
|
+
|
450
|
+
# If no direct IP found, check for FHRP group membership and VIP addresses
|
451
|
+
logger.debug(
|
452
|
+
f"No direct IPv4 found on {connected_interface.name}, checking for FHRP VIP addresses"
|
453
|
+
)
|
454
|
+
|
455
|
+
# Get FHRP group assignments for the connected interface
|
456
|
+
fhrp_assignments = netbox.ipam.fhrp_group_assignments.filter(
|
457
|
+
interface_type="dcim.interface", interface_id=connected_interface.id
|
458
|
+
)
|
459
|
+
|
460
|
+
# Get all VIP addresses once to avoid repeated API calls
|
461
|
+
try:
|
462
|
+
all_vip_addresses = netbox.ipam.ip_addresses.filter(role="vip")
|
463
|
+
except Exception as vip_e:
|
464
|
+
logger.debug(f"Could not query VIP addresses: {vip_e}")
|
465
|
+
all_vip_addresses = []
|
466
|
+
|
467
|
+
# Collect all VIP IPv4 addresses from all FHRP groups this interface belongs to
|
468
|
+
vip_addresses_found = []
|
469
|
+
|
470
|
+
for assignment in fhrp_assignments:
|
471
|
+
if not assignment.group:
|
472
|
+
continue
|
473
|
+
|
474
|
+
# Find VIP addresses assigned to this specific FHRP group
|
475
|
+
for vip in all_vip_addresses:
|
476
|
+
# Check if this VIP is assigned to the current FHRP group
|
477
|
+
if (
|
478
|
+
hasattr(vip, "assigned_object_type")
|
479
|
+
and vip.assigned_object_type == "ipam.fhrpgroup"
|
480
|
+
and hasattr(vip, "assigned_object_id")
|
481
|
+
and vip.assigned_object_id == assignment.group.id
|
482
|
+
):
|
483
|
+
# Check if it's an IPv4 address
|
484
|
+
if "/" in str(vip.address):
|
485
|
+
address = str(vip.address).split("/")[0]
|
486
|
+
if "." in address: # IPv4 address
|
487
|
+
vip_addresses_found.append(address)
|
488
|
+
logger.debug(
|
489
|
+
f"Found FHRP VIP address {address} for connected interface "
|
490
|
+
f"{connected_interface.name} (FHRP group: {assignment.group.name or assignment.group.id}) "
|
491
|
+
f"for port {sonic_port_name}"
|
492
|
+
)
|
493
|
+
|
494
|
+
# Return the first VIP address found (for BGP neighbor compatibility)
|
495
|
+
if vip_addresses_found:
|
496
|
+
logger.debug(
|
497
|
+
f"Found {len(vip_addresses_found)} VIP addresses for port {sonic_port_name}: {vip_addresses_found}"
|
498
|
+
)
|
499
|
+
logger.debug(f"Returning first VIP address: {vip_addresses_found[0]}")
|
500
|
+
return vip_addresses_found[0]
|
501
|
+
|
502
|
+
logger.debug(
|
503
|
+
f"No IPv4 address (direct or FHRP VIP) found on connected interface "
|
504
|
+
f"{connected_interface.name} for port {sonic_port_name}"
|
505
|
+
)
|
506
|
+
return None
|
507
|
+
|
508
|
+
except Exception as e:
|
509
|
+
logger.warning(
|
510
|
+
f"Could not get connected interface IPv4 for port {sonic_port_name}: {e}"
|
511
|
+
)
|
512
|
+
return None
|
@@ -697,7 +697,9 @@ def detect_breakout_ports(device):
|
|
697
697
|
)
|
698
698
|
|
699
699
|
# Calculate breakout mode
|
700
|
-
if interface_speed ==
|
700
|
+
if interface_speed == 10000 and num_subports == 4:
|
701
|
+
brkout_mode = "4x10G"
|
702
|
+
elif interface_speed == 25000 and num_subports == 4:
|
701
703
|
brkout_mode = "4x25G"
|
702
704
|
elif interface_speed == 50000 and num_subports == 4:
|
703
705
|
brkout_mode = "4x50G"
|
osism/tasks/openstack.py
CHANGED
@@ -65,23 +65,43 @@ def baremetal_node_show(self, node_id_or_name, ignore_missing=False):
|
|
65
65
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_node_list")
|
66
66
|
def baremetal_node_list(self):
|
67
67
|
conn = utils.get_openstack_connection()
|
68
|
-
|
69
|
-
result
|
68
|
+
result = conn.baremetal.nodes()
|
69
|
+
return list(result)
|
70
70
|
|
71
|
-
# Simulate the output of the OpenStack CLI with -f json and without --long
|
72
|
-
for node in nodes:
|
73
|
-
result.append(
|
74
|
-
{
|
75
|
-
"UUID": node.id,
|
76
|
-
"Name": node.name,
|
77
|
-
"Instance UUID": node.instance_id,
|
78
|
-
"Power State": node.power_state,
|
79
|
-
"Provisioning State": node.provision_state,
|
80
|
-
"Maintenance": node.is_maintenance,
|
81
|
-
}
|
82
|
-
)
|
83
71
|
|
84
|
-
|
72
|
+
def get_baremetal_nodes():
|
73
|
+
"""Get all baremetal nodes with their details.
|
74
|
+
|
75
|
+
This is a generalized function that can be used by both
|
76
|
+
CLI commands and API endpoints to retrieve baremetal node information.
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
list: List of dictionaries containing node information
|
80
|
+
"""
|
81
|
+
conn = utils.get_openstack_connection()
|
82
|
+
nodes = conn.baremetal.nodes(details=True)
|
83
|
+
|
84
|
+
# Convert generator to list and extract relevant fields
|
85
|
+
node_list = []
|
86
|
+
for node in nodes:
|
87
|
+
node_info = {
|
88
|
+
"uuid": node.get("uuid"),
|
89
|
+
"name": node.get("name"),
|
90
|
+
"power_state": node.get("power_state"),
|
91
|
+
"provision_state": node.get("provision_state"),
|
92
|
+
"maintenance": node.get("maintenance"),
|
93
|
+
"instance_uuid": node.get("instance_uuid"),
|
94
|
+
"driver": node.get("driver"),
|
95
|
+
"resource_class": node.get("resource_class"),
|
96
|
+
"properties": node.get("properties", {}),
|
97
|
+
"extra": node.get("extra", {}),
|
98
|
+
"last_error": node.get("last_error"),
|
99
|
+
"created_at": node.get("created_at"),
|
100
|
+
"updated_at": node.get("updated_at"),
|
101
|
+
}
|
102
|
+
node_list.append(node_info)
|
103
|
+
|
104
|
+
return node_list
|
85
105
|
|
86
106
|
|
87
107
|
@app.task(bind=True, name="osism.tasks.openstack.baremetal_node_validate")
|
osism/utils/__init__.py
CHANGED
@@ -73,7 +73,7 @@ try:
|
|
73
73
|
)
|
74
74
|
if (
|
75
75
|
"NETBOX_TOKEN" not in secondary_nb_settings
|
76
|
-
or not secondary_nb_settings["NETBOX_TOKEN"]
|
76
|
+
or not str(secondary_nb_settings["NETBOX_TOKEN"]).strip()
|
77
77
|
):
|
78
78
|
raise ValueError(
|
79
79
|
"All NETBOX_TOKEN values in the elements of setting NETBOX_SECONDARIES need to be valid NetBox tokens"
|
@@ -82,7 +82,7 @@ try:
|
|
82
82
|
secondary_nb_list.append(
|
83
83
|
get_netbox_connection(
|
84
84
|
secondary_nb_settings["NETBOX_URL"],
|
85
|
-
secondary_nb_settings["NETBOX_TOKEN"],
|
85
|
+
str(secondary_nb_settings["NETBOX_TOKEN"]),
|
86
86
|
secondary_nb_settings.get("IGNORE_SSL_ERRORS", True),
|
87
87
|
)
|
88
88
|
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: osism
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.20250823.0
|
4
4
|
Summary: OSISM manager interface
|
5
5
|
Home-page: https://github.com/osism/python-osism
|
6
6
|
Author: OSISM GmbH
|
@@ -28,20 +28,20 @@ Requires-Dist: Jinja2==3.1.6
|
|
28
28
|
Requires-Dist: PyYAML==6.0.2
|
29
29
|
Requires-Dist: ara==1.7.2
|
30
30
|
Requires-Dist: celery[redis]==5.5.3
|
31
|
-
Requires-Dist: cliff==4.
|
32
|
-
Requires-Dist: deepdiff==8.
|
31
|
+
Requires-Dist: cliff==4.11.0
|
32
|
+
Requires-Dist: deepdiff==8.6.0
|
33
33
|
Requires-Dist: docker==7.1.0
|
34
34
|
Requires-Dist: dtrack-auditor==1.5.0
|
35
35
|
Requires-Dist: fastapi==0.116.1
|
36
36
|
Requires-Dist: flower==2.0.1
|
37
37
|
Requires-Dist: hiredis==3.2.1
|
38
38
|
Requires-Dist: jc==1.25.5
|
39
|
-
Requires-Dist: keystoneauth1==5.
|
39
|
+
Requires-Dist: keystoneauth1==5.12.0
|
40
40
|
Requires-Dist: kombu==5.5.4
|
41
41
|
Requires-Dist: kubernetes==33.1.0
|
42
42
|
Requires-Dist: loguru==0.7.3
|
43
43
|
Requires-Dist: nbcli==0.10.0.dev2
|
44
|
-
Requires-Dist: openstacksdk==4.
|
44
|
+
Requires-Dist: openstacksdk==4.7.0
|
45
45
|
Requires-Dist: paramiko==3.5.1
|
46
46
|
Requires-Dist: pottery==3.0.1
|
47
47
|
Requires-Dist: prompt-toolkit==3.0.51
|
@@ -50,7 +50,7 @@ Requires-Dist: pytest-testinfra==10.2.2
|
|
50
50
|
Requires-Dist: python-dateutil==2.9.0.post0
|
51
51
|
Requires-Dist: setuptools==80.9.0
|
52
52
|
Requires-Dist: sqlmodel==0.0.24
|
53
|
-
Requires-Dist: sushy==5.7.
|
53
|
+
Requires-Dist: sushy==5.7.1
|
54
54
|
Requires-Dist: tabulate==0.9.0
|
55
55
|
Requires-Dist: transitions==0.9.3
|
56
56
|
Requires-Dist: uvicorn[standard]==0.35.0
|
@@ -1,11 +1,11 @@
|
|
1
1
|
osism/__init__.py,sha256=1UiNTBus0V0f2AbZQzAtVtu6zkfCCrw0OTq--NwFAqY,341
|
2
2
|
osism/__main__.py,sha256=ILe4gu61xEISiBsxanqTQIdSkV-YhpZXTRlguCYyssk,141
|
3
|
-
osism/api.py,sha256=
|
3
|
+
osism/api.py,sha256=hhRgBswe3Tem6eTMor54PttNhBhAts2AEDBsxQfqRyU,13363
|
4
4
|
osism/main.py,sha256=Dt2-9sLXcS-Ny4DAz7hrha-KRc7zd7BFUTRdfs_X8z4,893
|
5
|
-
osism/settings.py,sha256=
|
5
|
+
osism/settings.py,sha256=VZT1muZVYWM5Ov1eFRC7a4ZGYIdI2AFmudCm0wZ1C2Q,1898
|
6
6
|
osism/commands/__init__.py,sha256=Ag4wX_DCgXRdoLn6t069jqb3DdRylsX2nyYkiyCx4uk,456
|
7
7
|
osism/commands/apply.py,sha256=GWUccZAXlgkPYqylrCmdWcj8FCkDsPEipIIG937MeII,16833
|
8
|
-
osism/commands/baremetal.py,sha256=
|
8
|
+
osism/commands/baremetal.py,sha256=TeXwg4lYfxel0YkWC3z8bv9qTsJQmsI2QD6u1K4vhIM,24821
|
9
9
|
osism/commands/compose.py,sha256=76HL9wzTJ7bFPhZk-uyfWq0n6Z74lOHn4RE0zzkHgYE,1241
|
10
10
|
osism/commands/compute.py,sha256=cgqXWJa5wAvn-7e3FWCgX6hie_aK0yrKRkcNzjLXwDY,25799
|
11
11
|
osism/commands/configuration.py,sha256=sPe8b0dVKFRbr30xoeVdAnHbGwCwgUh0xa_Vzv5pSQQ,954
|
@@ -14,7 +14,7 @@ osism/commands/container.py,sha256=jHk5A0PXBzHGIm-1d5HQZI_POANAq7An1lZGRbqBvr0,1
|
|
14
14
|
osism/commands/get.py,sha256=ryytjtXWmlMV0NucP5tGkMZu0nIlC4xVtjRk4iMZ06c,8967
|
15
15
|
osism/commands/log.py,sha256=QnnTTNiAoa8oj4kDOcggh0QrRAD6onxcEpLXBy7CvDg,4113
|
16
16
|
osism/commands/manage.py,sha256=FaO9dbYjNHYanS98-zC498bx26oU8E3loxCczH9mfKI,12751
|
17
|
-
osism/commands/netbox.py,sha256=
|
17
|
+
osism/commands/netbox.py,sha256=gqXet5jHIRKFtHkbd6pQ0071jHe4r5c7DFnZWkT_lNQ,8275
|
18
18
|
osism/commands/noset.py,sha256=7zDFuFMyNpo7DUOKcNiYV8nodtdMOYFp5LDPcuJhlZ8,1481
|
19
19
|
osism/commands/reconciler.py,sha256=ubQfX8j13s3NuMKnT0Lt6O-szf7Z1V02AfsMQFHmO74,2209
|
20
20
|
osism/commands/redfish.py,sha256=oBfxd5UBX4ED8XulEuIYziIYQqTvUKpKfcdGyg_AoiI,8431
|
@@ -42,31 +42,31 @@ osism/tasks/conductor.py,sha256=WBLsoPtr0iGUzRGERs0Xt7CMYrnHQVEwNV9qXBssI3s,274
|
|
42
42
|
osism/tasks/kolla.py,sha256=wJQpWn_01iWLkr7l7T7RNrQGfRgsgmYi4WQlTmNGvew,618
|
43
43
|
osism/tasks/kubernetes.py,sha256=VzXq_VrYU_CLm4cOruqnE3Kq2ydfO9glZ3p0bp3OYoc,625
|
44
44
|
osism/tasks/netbox.py,sha256=QGQGz3s0V8WvPvhEJWwo0H24aLFaZrSl-voN-axzRwY,5846
|
45
|
-
osism/tasks/openstack.py,sha256=
|
45
|
+
osism/tasks/openstack.py,sha256=VeNfEcv4JHx_oJxpAb2QO2adlk--hlSqo_2iVQbOtpE,7142
|
46
46
|
osism/tasks/reconciler.py,sha256=PnGWfvfmomzbgddvyCdxul-z5ZLXxWAmrQyRCN874-s,1958
|
47
47
|
osism/tasks/conductor/__init__.py,sha256=eAiaM69sVbTTDam7gCLyjF7wBCt7rd__pRFu7VdY-f8,1930
|
48
48
|
osism/tasks/conductor/config.py,sha256=n1H9_8DY90p5E4mygzKyJUl8G3WdDuGHFTp-SrmZmgU,4543
|
49
|
-
osism/tasks/conductor/ironic.py,sha256=
|
50
|
-
osism/tasks/conductor/netbox.py,sha256=
|
49
|
+
osism/tasks/conductor/ironic.py,sha256=sxUHAzs8_Z-IaB5ZZ0ufObWiytBKiptPUWoIGWo2wcY,16440
|
50
|
+
osism/tasks/conductor/netbox.py,sha256=xPJn-tXLqTAgW3v6L9rQ__XGHhM7ErchnyfsLY6iH14,13381
|
51
51
|
osism/tasks/conductor/redfish.py,sha256=hOOS-_l3Qmo_6vLsgjZmJwTxLTf029hhFRVkU0TMLL0,12723
|
52
52
|
osism/tasks/conductor/utils.py,sha256=-vHsyi0Adlk8qdoZkhM2kUwG7DqwwZZVE0JGK5Z92mI,8127
|
53
53
|
osism/tasks/conductor/sonic/__init__.py,sha256=oxTTl_MGK4iWK9uNDRNlULtGrDGCQHrlJZ04weh_Lh8,777
|
54
54
|
osism/tasks/conductor/sonic/bgp.py,sha256=PC6gGI5bCj2PCXcNGyMV9-EdlJWDsYaodzxigmYSZvw,3088
|
55
55
|
osism/tasks/conductor/sonic/cache.py,sha256=Asv2k3nLJejuq7iB0a_LyK8dEmJzypP9v3OHkNY3GwI,3438
|
56
|
-
osism/tasks/conductor/sonic/config_generator.py,sha256=
|
57
|
-
osism/tasks/conductor/sonic/connections.py,sha256=
|
56
|
+
osism/tasks/conductor/sonic/config_generator.py,sha256=U9tFNdJsZIdk-lzbkodXiWmK4FJq0FplBKKKuP7R27o,53269
|
57
|
+
osism/tasks/conductor/sonic/connections.py,sha256=MU3u7HRG42dDlKL5GMruDVSvVl6AUXEf_WivfYdFURE,19297
|
58
58
|
osism/tasks/conductor/sonic/constants.py,sha256=HjVFwmH-AN3np1qN97ahEAcwz2-4cHa-pA9pXWqWsqs,2219
|
59
59
|
osism/tasks/conductor/sonic/device.py,sha256=ZYJA0bQ8waKWStzWUPxbcwNWa2Z_hMB3pqs8aA_nxXA,2458
|
60
60
|
osism/tasks/conductor/sonic/exporter.py,sha256=25L1vbi84ZQD0xNHNTWk-anTz5QRkGJskCECBkeGQw4,8882
|
61
|
-
osism/tasks/conductor/sonic/interface.py,sha256=
|
61
|
+
osism/tasks/conductor/sonic/interface.py,sha256=M876LHdFqGxUfTizzDusdzvCkDI0vCgqte5uLmOXFaY,39472
|
62
62
|
osism/tasks/conductor/sonic/sync.py,sha256=fpgsQVwq6Hb7eeDHhLkAqx5BkaK3Ce_m_WvmWEsJyOo,9182
|
63
|
-
osism/utils/__init__.py,sha256=
|
63
|
+
osism/utils/__init__.py,sha256=370UHVU5BFy-1wDAxBFaRjSA-zR0KNadJPWQ6zcYRf0,7806
|
64
64
|
osism/utils/ssh.py,sha256=nxeEgwjJWvQCybKDp-NelMeWyODCYpaXFCBchAv4-bg,8691
|
65
|
-
osism-0.
|
66
|
-
osism-0.
|
67
|
-
osism-0.
|
68
|
-
osism-0.
|
69
|
-
osism-0.
|
70
|
-
osism-0.
|
71
|
-
osism-0.
|
72
|
-
osism-0.
|
65
|
+
osism-0.20250823.0.dist-info/licenses/AUTHORS,sha256=EKFIR9F27AvoEXp1cA6FkGbjEOFt4Rcbipr5RJc7jSs,64
|
66
|
+
osism-0.20250823.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
67
|
+
osism-0.20250823.0.dist-info/METADATA,sha256=ZwWM7UZEXo9p5LSTB5v65qa2aEt3o7UOi7LNSrjkBXE,2937
|
68
|
+
osism-0.20250823.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
69
|
+
osism-0.20250823.0.dist-info/entry_points.txt,sha256=h9YS3gfPc5ueU9ZXtCc60e8p4NQEuvtIH_zE0cfVqy0,4439
|
70
|
+
osism-0.20250823.0.dist-info/pbr.json,sha256=t633u2SKCZHSfZL7F8jKzAb9107KiG9OydBqGXdZPNQ,47
|
71
|
+
osism-0.20250823.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
|
72
|
+
osism-0.20250823.0.dist-info/RECORD,,
|
@@ -3,6 +3,7 @@ osism = osism.main:main
|
|
3
3
|
|
4
4
|
[osism.commands]
|
5
5
|
apply = osism.commands.apply:Run
|
6
|
+
baremetal burnin = osism.commands.baremetal:BaremetalBurnIn
|
6
7
|
baremetal deploy = osism.commands.baremetal:BaremetalDeploy
|
7
8
|
baremetal list = osism.commands.baremetal:BaremetalList
|
8
9
|
baremetal ping = osism.commands.baremetal:BaremetalPing
|
@@ -30,8 +31,11 @@ log ansible = osism.commands.log:Ansible
|
|
30
31
|
log container = osism.commands.log:Container
|
31
32
|
log file = osism.commands.log:File
|
32
33
|
log opensearch = osism.commands.log:Opensearch
|
34
|
+
manage baremetal burnin = osism.commands.baremetal:BaremetalBurnIn
|
33
35
|
manage baremetal deploy = osism.commands.baremetal:BaremetalDeploy
|
34
36
|
manage baremetal list = osism.commands.baremetal:BaremetalList
|
37
|
+
manage baremetal maintenance set = osism.commands.baremetal:BaremetalMaintenanceSet
|
38
|
+
manage baremetal maintenance unset = osism.commands.baremetal:BaremetalMaintenanceUnset
|
35
39
|
manage baremetal ping = osism.commands.baremetal:BaremetalPing
|
36
40
|
manage baremetal undeploy = osism.commands.baremetal:BaremetalUndeploy
|
37
41
|
manage compute disable = osism.commands.compute:ComputeDisable
|
@@ -0,0 +1 @@
|
|
1
|
+
renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
|
@@ -0,0 +1 @@
|
|
1
|
+
{"git_version": "eaafb18", "is_release": false}
|
@@ -1 +0,0 @@
|
|
1
|
-
Christian Berendt <berendt@osism.tech>
|
@@ -1 +0,0 @@
|
|
1
|
-
{"git_version": "8c96995", "is_release": false}
|
File without changes
|
File without changes
|
File without changes
|