osism 0.20250709.0__py3-none-any.whl → 0.20250823.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
osism/commands/netbox.py CHANGED
@@ -14,6 +14,11 @@ from osism import utils
14
14
  class Ironic(Command):
15
15
  def get_parser(self, prog_name):
16
16
  parser = super(Ironic, self).get_parser(prog_name)
17
+ parser.add_argument(
18
+ "node",
19
+ nargs="?",
20
+ help="Optional node name to sync only a specific node",
21
+ )
17
22
  parser.add_argument(
18
23
  "--no-wait",
19
24
  help="Do not wait until the sync has been completed",
@@ -35,22 +40,40 @@ class Ironic(Command):
35
40
  def take_action(self, parsed_args):
36
41
  wait = not parsed_args.no_wait
37
42
  task_timeout = parsed_args.task_timeout
43
+ node_name = parsed_args.node
38
44
 
39
- task = conductor.sync_ironic.delay(force_update=parsed_args.force_update)
45
+ task = conductor.sync_ironic.delay(
46
+ node_name=node_name, force_update=parsed_args.force_update
47
+ )
40
48
  if wait:
41
- logger.info(
42
- f"Task {task.task_id} (sync ironic) is running in background. Output comming soon."
43
- )
49
+ if node_name:
50
+ logger.info(
51
+ f"Task {task.task_id} (sync ironic for node {node_name}) is running in background. Output comming soon."
52
+ )
53
+ else:
54
+ logger.info(
55
+ f"Task {task.task_id} (sync ironic) is running in background. Output comming soon."
56
+ )
44
57
  try:
45
58
  return utils.fetch_task_output(task.id, timeout=task_timeout)
46
59
  except TimeoutError:
47
- logger.error(
48
- f"Timeout while waiting for further output of task {task.task_id} (sync ironic)"
49
- )
60
+ if node_name:
61
+ logger.error(
62
+ f"Timeout while waiting for further output of task {task.task_id} (sync ironic for node {node_name})"
63
+ )
64
+ else:
65
+ logger.error(
66
+ f"Timeout while waiting for further output of task {task.task_id} (sync ironic)"
67
+ )
50
68
  else:
51
- logger.info(
52
- f"Task {task.task_id} (sync ironic) is running in background. No more output."
53
- )
69
+ if node_name:
70
+ logger.info(
71
+ f"Task {task.task_id} (sync ironic for node {node_name}) is running in background. No more output."
72
+ )
73
+ else:
74
+ logger.info(
75
+ f"Task {task.task_id} (sync ironic) is running in background. No more output."
76
+ )
54
77
 
55
78
 
56
79
  class Sync(Command):
@@ -205,9 +228,9 @@ class Console(Command):
205
228
  if not os.path.exists(nbcli_file):
206
229
  try:
207
230
  with open("/run/secrets/NETBOX_TOKEN", "r") as fp:
208
- token = fp.read().strip()
231
+ token = str(fp.read().strip())
209
232
  except FileNotFoundError:
210
- token = None
233
+ token = ""
211
234
 
212
235
  url = os.environ.get("NETBOX_API", None)
213
236
 
osism/commands/sonic.py CHANGED
@@ -9,9 +9,16 @@ from cliff.command import Command
9
9
  from loguru import logger
10
10
  import paramiko
11
11
  from prompt_toolkit import prompt
12
+ from tabulate import tabulate
12
13
 
13
14
  from osism import utils
14
15
  from osism.tasks import netbox
16
+ from osism.tasks.conductor.netbox import (
17
+ get_nb_device_query_list_sonic,
18
+ get_device_oob_ip,
19
+ )
20
+ from osism.tasks.conductor.sonic.constants import DEFAULT_SONIC_ROLES, SUPPORTED_HWSKUS
21
+ from osism.utils.ssh import cleanup_ssh_known_hosts_for_node
15
22
 
16
23
  # Suppress paramiko logging messages globally
17
24
  logging.getLogger("paramiko").setLevel(logging.ERROR)
@@ -41,7 +48,15 @@ class SonicCommandBase(Command):
41
48
  if not hasattr(device, "local_context_data") or not device.local_context_data:
42
49
  logger.error(f"Device {hostname} has no local_context_data in NetBox")
43
50
  return None
44
- return device.local_context_data
51
+
52
+ # Filter out keys that start with underscore
53
+ filtered_context = {
54
+ key: value
55
+ for key, value in device.local_context_data.items()
56
+ if not key.startswith("_")
57
+ }
58
+
59
+ return filtered_context
45
60
 
46
61
  def _save_config_context(self, config_context, hostname, today):
47
62
  """Save config context to local file"""
@@ -94,6 +109,13 @@ class SonicCommandBase(Command):
94
109
  return None
95
110
 
96
111
  ssh = paramiko.SSHClient()
112
+ # Load system host keys from centralized known_hosts file
113
+ try:
114
+ ssh.load_host_keys("/share/known_hosts")
115
+ except FileNotFoundError:
116
+ logger.debug(
117
+ "Centralized known_hosts file not found, creating empty host key store"
118
+ )
97
119
  ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
98
120
 
99
121
  try:
@@ -796,6 +818,14 @@ class Reset(SonicCommandBase):
796
818
  logger.info("- Switch will need to be reinstalled after reset")
797
819
  logger.info("- Connection will be terminated by the reboot")
798
820
 
821
+ # Clean up SSH known_hosts entries for the reset node
822
+ logger.info(f"Cleaning up SSH known_hosts entries for {hostname}")
823
+ result = cleanup_ssh_known_hosts_for_node(hostname)
824
+ if result:
825
+ logger.info("- SSH known_hosts cleanup completed successfully")
826
+ else:
827
+ logger.warning("- SSH known_hosts cleanup completed with warnings")
828
+
799
829
  # Set provision_state to 'ztp' in NetBox
800
830
  logger.info("Setting provision_state to 'ztp' in NetBox")
801
831
  netbox.set_provision_state.delay(hostname, "ztp")
@@ -953,7 +983,7 @@ class Console(SonicCommandBase):
953
983
  logger.info(f"Connecting to {hostname} ({ssh_host}) via SSH console")
954
984
 
955
985
  # Execute SSH command using os.system to provide interactive terminal
956
- ssh_command = f"ssh -i {ssh_key_path} -o StrictHostKeyChecking=no {ssh_username}@{ssh_host}"
986
+ ssh_command = f"ssh -i {ssh_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/share/known_hosts {ssh_username}@{ssh_host}"
957
987
 
958
988
  logger.info("Starting SSH session...")
959
989
  logger.info("To exit the console, type 'exit' or press Ctrl+D")
@@ -971,3 +1001,168 @@ class Console(SonicCommandBase):
971
1001
  except Exception as e:
972
1002
  logger.error(f"Error connecting to SONiC device {hostname}: {e}")
973
1003
  return 1
1004
+
1005
+
1006
+ class List(Command):
1007
+ """List SONiC switches with their details"""
1008
+
1009
+ def get_parser(self, prog_name):
1010
+ parser = super(List, self).get_parser(prog_name)
1011
+ parser.add_argument(
1012
+ "device",
1013
+ nargs="?",
1014
+ type=str,
1015
+ help="Optional device name to filter by (same as sonic sync parameter)",
1016
+ )
1017
+ return parser
1018
+
1019
+ def take_action(self, parsed_args):
1020
+ device_name = parsed_args.device
1021
+
1022
+ try:
1023
+ devices = []
1024
+
1025
+ if device_name:
1026
+ # When specific device is requested, fetch it directly
1027
+ try:
1028
+ device = utils.nb.dcim.devices.get(name=device_name)
1029
+ if device:
1030
+ # Check if device role matches allowed roles
1031
+ if device.role and device.role.slug in DEFAULT_SONIC_ROLES:
1032
+ devices.append(device)
1033
+ logger.debug(
1034
+ f"Found device: {device.name} with role: {device.role.slug}"
1035
+ )
1036
+ else:
1037
+ logger.warning(
1038
+ f"Device {device_name} has role '{device.role.slug if device.role else 'None'}' "
1039
+ f"which is not in allowed SONiC roles: {', '.join(DEFAULT_SONIC_ROLES)}"
1040
+ )
1041
+ return 1
1042
+ else:
1043
+ logger.error(f"Device {device_name} not found in NetBox")
1044
+ return 1
1045
+ except Exception as e:
1046
+ logger.error(f"Error fetching device {device_name}: {e}")
1047
+ return 1
1048
+ else:
1049
+ # Get device query list from NETBOX_FILTER_CONDUCTOR_SONIC
1050
+ nb_device_query_list = get_nb_device_query_list_sonic()
1051
+
1052
+ for nb_device_query in nb_device_query_list:
1053
+ # Query devices with the NETBOX_FILTER_CONDUCTOR_SONIC criteria
1054
+ for device in utils.nb.dcim.devices.filter(**nb_device_query):
1055
+ # Check if device role matches allowed roles
1056
+ if device.role and device.role.slug in DEFAULT_SONIC_ROLES:
1057
+ devices.append(device)
1058
+ logger.debug(
1059
+ f"Found device: {device.name} with role: {device.role.slug}"
1060
+ )
1061
+
1062
+ logger.info(f"Found {len(devices)} devices matching criteria")
1063
+
1064
+ # Prepare table data
1065
+ table_data = []
1066
+ headers = [
1067
+ "Name",
1068
+ "OOB IP",
1069
+ "Primary IP",
1070
+ "HWSKU",
1071
+ "Version",
1072
+ "Provision State",
1073
+ ]
1074
+
1075
+ for device in devices:
1076
+ # Get device name
1077
+ device_name = device.name
1078
+
1079
+ # Get OOB IP address
1080
+ oob_ip = "N/A"
1081
+ try:
1082
+ oob_result = get_device_oob_ip(device)
1083
+ if oob_result:
1084
+ oob_ip = oob_result[0] # Get just the IP address
1085
+ except Exception as e:
1086
+ logger.debug(f"Could not get OOB IP for {device_name}: {e}")
1087
+
1088
+ # Get primary IP address
1089
+ primary_ip = "N/A"
1090
+ try:
1091
+ if device.primary_ip4:
1092
+ # Extract IP address from CIDR notation
1093
+ primary_ip = str(device.primary_ip4).split("/")[0]
1094
+ elif device.primary_ip6:
1095
+ # Extract IP address from CIDR notation
1096
+ primary_ip = str(device.primary_ip6).split("/")[0]
1097
+ except Exception as e:
1098
+ logger.debug(f"Could not get primary IP for {device_name}: {e}")
1099
+
1100
+ # Get HWSKU and Version from sonic_parameters custom field
1101
+ hwsku = "N/A"
1102
+ version = "N/A"
1103
+ try:
1104
+ if (
1105
+ hasattr(device, "custom_fields")
1106
+ and "sonic_parameters" in device.custom_fields
1107
+ and device.custom_fields["sonic_parameters"]
1108
+ and isinstance(device.custom_fields["sonic_parameters"], dict)
1109
+ ):
1110
+ sonic_params = device.custom_fields["sonic_parameters"]
1111
+ if "hwsku" in sonic_params and sonic_params["hwsku"]:
1112
+ hwsku = sonic_params["hwsku"]
1113
+ if "version" in sonic_params and sonic_params["version"]:
1114
+ version = sonic_params["version"]
1115
+ except Exception as e:
1116
+ logger.debug(
1117
+ f"Could not extract sonic_parameters for {device_name}: {e}"
1118
+ )
1119
+
1120
+ # Determine provision state
1121
+ provision_state = "Unknown"
1122
+ try:
1123
+ if hwsku == "N/A":
1124
+ provision_state = "No HWSKU"
1125
+ elif hwsku not in SUPPORTED_HWSKUS:
1126
+ provision_state = "Unsupported HWSKU"
1127
+ else:
1128
+ # Use device status to determine provision state
1129
+ if device.status:
1130
+ status_value = (
1131
+ device.status.value
1132
+ if hasattr(device.status, "value")
1133
+ else str(device.status)
1134
+ )
1135
+ if status_value == "active":
1136
+ provision_state = "Provisioned"
1137
+ elif status_value == "staged":
1138
+ provision_state = "Staged"
1139
+ elif status_value == "planned":
1140
+ provision_state = "Planned"
1141
+ else:
1142
+ provision_state = status_value.title()
1143
+ else:
1144
+ provision_state = "No Status"
1145
+ except Exception as e:
1146
+ logger.debug(
1147
+ f"Could not determine provision state for {device_name}: {e}"
1148
+ )
1149
+
1150
+ table_data.append(
1151
+ [device_name, oob_ip, primary_ip, hwsku, version, provision_state]
1152
+ )
1153
+
1154
+ # Sort by device name
1155
+ table_data.sort(key=lambda x: x[0])
1156
+
1157
+ # Print the table
1158
+ if table_data:
1159
+ print(tabulate(table_data, headers=headers, tablefmt="psql"))
1160
+ print(f"\nTotal: {len(table_data)} devices")
1161
+ else:
1162
+ print("No SONiC devices found matching the criteria")
1163
+
1164
+ return 0
1165
+
1166
+ except Exception as e:
1167
+ logger.error(f"Error listing SONiC devices: {e}")
1168
+ return 1
osism/settings.py CHANGED
@@ -24,7 +24,7 @@ REDIS_DB: int = int(os.getenv("REDIS_DB", "0"))
24
24
 
25
25
 
26
26
  NETBOX_URL = os.getenv("NETBOX_API", os.getenv("NETBOX_URL"))
27
- NETBOX_TOKEN = os.getenv("NETBOX_TOKEN", read_secret("NETBOX_TOKEN"))
27
+ NETBOX_TOKEN = str(os.getenv("NETBOX_TOKEN") or read_secret("NETBOX_TOKEN") or "")
28
28
  IGNORE_SSL_ERRORS = os.getenv("IGNORE_SSL_ERRORS", "True") == "True"
29
29
 
30
30
  # 43200 seconds = 12 hours
osism/tasks/__init__.py CHANGED
@@ -238,6 +238,49 @@ def handle_task(t, wait=True, format="log", timeout=3600):
238
238
  f"osism wait --output --live --delay 2 {t.task_id}"
239
239
  )
240
240
  return 1
241
+ except KeyboardInterrupt:
242
+ logger.info(f"\nTask {t.task_id} interrupted by user (CTRL+C)")
243
+
244
+ # Prompt user for task revocation in interactive mode using prompt-toolkit
245
+ try:
246
+ from prompt_toolkit import prompt
247
+
248
+ # Use prompt-toolkit for better UX with yes/no options and default
249
+ response = (
250
+ prompt(
251
+ "Do you want to revoke the running task? [y/N]: ", default="n"
252
+ )
253
+ .strip()
254
+ .lower()
255
+ )
256
+
257
+ if response in ["y", "yes"]:
258
+ logger.info(f"Revoking task {t.task_id}...")
259
+ if utils.revoke_task(t.task_id):
260
+ logger.info(f"Task {t.task_id} has been revoked")
261
+ else:
262
+ logger.error(f"Failed to revoke task {t.task_id}")
263
+ else:
264
+ logger.info(f"Task {t.task_id} continues running in background")
265
+ logger.info(
266
+ "Use this command to continue waiting for this task: "
267
+ f"osism wait --output --live --delay 2 {t.task_id}"
268
+ )
269
+ except KeyboardInterrupt:
270
+ # Handle second CTRL+C during prompt
271
+ logger.info(f"\nTask {t.task_id} continues running in background")
272
+ logger.info(
273
+ "Use this command to continue waiting for this task: "
274
+ f"osism wait --output --live --delay 2 {t.task_id}"
275
+ )
276
+ except EOFError:
277
+ # Handle EOF (e.g., when input is not available)
278
+ logger.info(f"Task {t.task_id} continues running in background")
279
+ logger.info(
280
+ "Use this command to continue waiting for this task: "
281
+ f"osism wait --output --live --delay 2 {t.task_id}"
282
+ )
283
+ return 1
241
284
 
242
285
  else:
243
286
  if format == "log":
@@ -44,8 +44,8 @@ def sync_netbox(self, force_update=False):
44
44
 
45
45
 
46
46
  @app.task(bind=True, name="osism.tasks.conductor.sync_ironic")
47
- def sync_ironic(self, force_update=False):
48
- _sync_ironic(self.request.id, get_ironic_parameters, force_update)
47
+ def sync_ironic(self, node_name=None, force_update=False):
48
+ _sync_ironic(self.request.id, get_ironic_parameters, node_name, force_update)
49
49
 
50
50
 
51
51
  @app.task(bind=True, name="osism.tasks.conductor.sync_sonic")
@@ -30,7 +30,7 @@ def get_configuration():
30
30
  "image_source"
31
31
  ]
32
32
  if not validators.uuid(image_source) and not validators.url(
33
- image_source
33
+ image_source, simple_host=True
34
34
  ):
35
35
  result = openstack.image_get(image_source)
36
36
  if result:
@@ -46,7 +46,7 @@ def get_configuration():
46
46
  "deploy_kernel"
47
47
  ]
48
48
  if not validators.uuid(deploy_kernel) and not validators.url(
49
- deploy_kernel
49
+ deploy_kernel, simple_host=True
50
50
  ):
51
51
  result = openstack.image_get(deploy_kernel)
52
52
  if result:
@@ -63,7 +63,7 @@ def get_configuration():
63
63
  "deploy_ramdisk"
64
64
  ]
65
65
  if not validators.uuid(deploy_ramdisk) and not validators.url(
66
- deploy_ramdisk
66
+ deploy_ramdisk, simple_host=True
67
67
  ):
68
68
  result = openstack.image_get(deploy_ramdisk)
69
69
  if result:
@@ -127,38 +127,69 @@ def _prepare_node_attributes(device, get_ironic_parameters):
127
127
  return node_attributes
128
128
 
129
129
 
130
- def sync_ironic(request_id, get_ironic_parameters, force_update=False):
131
- osism_utils.push_task_output(
132
- request_id,
133
- "Starting NetBox device synchronisation with ironic\n",
134
- )
130
+ def sync_ironic(request_id, get_ironic_parameters, node_name=None, force_update=False):
131
+ if node_name:
132
+ osism_utils.push_task_output(
133
+ request_id,
134
+ f"Starting NetBox device synchronisation with ironic for node {node_name}\n",
135
+ )
136
+ else:
137
+ osism_utils.push_task_output(
138
+ request_id,
139
+ "Starting NetBox device synchronisation with ironic\n",
140
+ )
135
141
  devices = set()
136
142
  nb_device_query_list = get_nb_device_query_list_ironic()
137
143
  for nb_device_query in nb_device_query_list:
138
144
  devices |= set(netbox.get_devices(**nb_device_query))
139
145
 
146
+ # Filter devices by node_name if specified
147
+ if node_name:
148
+ devices = {dev for dev in devices if dev.name == node_name}
149
+ if not devices:
150
+ osism_utils.push_task_output(
151
+ request_id,
152
+ f"Node {node_name} not found in NetBox\n",
153
+ )
154
+ osism_utils.finish_task_output(request_id, rc=1)
155
+ return
156
+
140
157
  # NOTE: Find nodes in Ironic which are no longer present in NetBox and remove them
141
158
  device_names = {dev.name for dev in devices}
142
159
  nodes = openstack.baremetal_node_list()
160
+
161
+ # Filter nodes by node_name if specified
162
+ if node_name:
163
+ nodes = [node for node in nodes if node["name"] == node_name]
164
+
143
165
  for node in nodes:
144
166
  osism_utils.push_task_output(
145
- request_id, f"Looking for {node['Name']} in NetBox\n"
167
+ request_id, f"Looking for {node['name']} in NetBox\n"
146
168
  )
147
- if node["Name"] not in device_names:
169
+ if node["name"] not in device_names:
148
170
  if (
149
- not node["Instance UUID"]
150
- and node["Provisioning State"] in ["enroll", "manageable", "available"]
151
- and node["Power State"] in ["power off", None]
171
+ not node["instance_uuid"]
172
+ and node["provision_state"]
173
+ in ["enroll", "manageable", "available", "clean failed"]
174
+ and node["power_state"] in ["power off", None]
152
175
  ):
153
176
  osism_utils.push_task_output(
154
177
  request_id,
155
- f"Cleaning up baremetal node not found in NetBox: {node['Name']}\n",
178
+ f"Cleaning up baremetal node not found in NetBox: {node['name']}\n",
156
179
  )
180
+ if node["provision_state"] == "clean failed":
181
+ # NOTE: Move node to manageable to allow deletion
182
+ node = openstack.baremetal_node_set_provision_state(
183
+ node["uuid"], "manage"
184
+ )
185
+ node = openstack.baremetal_node_wait_for_nodes_provision_state(
186
+ node["uuid"], "manageable"
187
+ )
157
188
  for port in openstack.baremetal_port_list(
158
- details=False, attributes=dict(node_uuid=node["UUID"])
189
+ details=False, attributes=dict(node_uuid=node["uuid"])
159
190
  ):
160
191
  openstack.baremetal_port_delete(port.id)
161
- openstack.baremetal_node_delete(node["UUID"])
192
+ openstack.baremetal_node_delete(node["uuid"])
162
193
  else:
163
194
  osism_utils.push_task_output(
164
195
  f"Cannot remove baremetal node because it is still provisioned or running: {node}"
@@ -196,13 +227,9 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
196
227
  node = openstack.baremetal_node_create(device.name, node_attributes)
197
228
  else:
198
229
  # NOTE: The listener service only reacts to changes in the baremetal node. Explicitly sync provision and power state in case updates were missed by the listener.
199
- if (
200
- device.custom_fields["provision_state"]
201
- != node["provision_state"]
202
- ):
203
- netbox.set_provision_state(device.name, node["provision_state"])
204
- if device.custom_fields["power_state"] != node["power_state"]:
205
- netbox.set_power_state(device.name, node["power_state"])
230
+ # This sync is done unconditionally, because we do not know the state of secondary netboxes at this point
231
+ netbox.set_provision_state(device.name, node["provision_state"])
232
+ netbox.set_power_state(device.name, node["power_state"])
206
233
  # NOTE: Check whether the baremetal node needs to be updated
207
234
  node_updates = {}
208
235
  deep_compare(node_attributes, node, node_updates)
@@ -258,7 +285,7 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
258
285
  request_id,
259
286
  f"Validation of management interface successful for baremetal node for {device.name}\n",
260
287
  )
261
- if node["provision_state"] == "enroll":
288
+ if node["provision_state"] in ["enroll", "clean failed"]:
262
289
  osism_utils.push_task_output(
263
290
  request_id,
264
291
  f"Transitioning baremetal node to manageable state for {device.name}\n",
@@ -1,7 +1,8 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
- from loguru import logger
3
+ import ipaddress
4
4
  import yaml
5
+ from loguru import logger
5
6
 
6
7
  from osism import settings, utils
7
8
  from osism.tasks import netbox
@@ -309,3 +310,59 @@ def get_device_loopbacks(device):
309
310
  )
310
311
 
311
312
  return {"loopbacks": loopbacks}
313
+
314
+
315
+ def get_device_interface_ips(device):
316
+ """Get IPv4 addresses assigned to device interfaces.
317
+
318
+ Args:
319
+ device: NetBox device object
320
+
321
+ Returns:
322
+ dict: Dictionary mapping interface names to their IPv4 addresses
323
+ {
324
+ 'interface_name': 'ip_address/prefix_length',
325
+ ...
326
+ }
327
+ """
328
+ interface_ips = {}
329
+
330
+ try:
331
+ # Get all interfaces for the device
332
+ interfaces = list(utils.nb.dcim.interfaces.filter(device_id=device.id))
333
+
334
+ for interface in interfaces:
335
+ # Skip management interfaces and virtual interfaces for now
336
+ if interface.mgmt_only or (
337
+ hasattr(interface, "type")
338
+ and interface.type
339
+ and interface.type.value == "virtual"
340
+ ):
341
+ continue
342
+
343
+ # Get IP addresses assigned to this interface
344
+ ip_addresses = utils.nb.ipam.ip_addresses.filter(
345
+ assigned_object_id=interface.id,
346
+ )
347
+
348
+ for ip_addr in ip_addresses:
349
+ if ip_addr.address:
350
+ # Check if it's an IPv4 address
351
+ try:
352
+ ip_obj = ipaddress.ip_interface(ip_addr.address)
353
+ if ip_obj.version == 4:
354
+ interface_ips[interface.name] = ip_addr.address
355
+ logger.debug(
356
+ f"Found IPv4 address {ip_addr.address} on interface {interface.name} of device {device.name}"
357
+ )
358
+ break # Only use the first IPv4 address found
359
+ except (ValueError, ipaddress.AddressValueError):
360
+ # Skip invalid IP addresses
361
+ continue
362
+
363
+ except Exception as e:
364
+ logger.warning(
365
+ f"Could not get interface IP addresses for device {device.name}: {e}"
366
+ )
367
+
368
+ return interface_ips