osism 0.20250616.0__py3-none-any.whl → 0.20250627.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. osism/api.py +49 -5
  2. osism/commands/baremetal.py +23 -3
  3. osism/commands/manage.py +276 -1
  4. osism/commands/reconciler.py +8 -1
  5. osism/commands/sync.py +27 -7
  6. osism/settings.py +1 -0
  7. osism/tasks/conductor/__init__.py +2 -2
  8. osism/tasks/conductor/ironic.py +21 -19
  9. osism/tasks/conductor/sonic/__init__.py +26 -0
  10. osism/tasks/conductor/sonic/bgp.py +87 -0
  11. osism/tasks/conductor/sonic/cache.py +114 -0
  12. osism/tasks/conductor/sonic/config_generator.py +1000 -0
  13. osism/tasks/conductor/sonic/connections.py +389 -0
  14. osism/tasks/conductor/sonic/constants.py +80 -0
  15. osism/tasks/conductor/sonic/device.py +82 -0
  16. osism/tasks/conductor/sonic/exporter.py +226 -0
  17. osism/tasks/conductor/sonic/interface.py +940 -0
  18. osism/tasks/conductor/sonic/sync.py +215 -0
  19. osism/tasks/reconciler.py +12 -2
  20. {osism-0.20250616.0.dist-info → osism-0.20250627.0.dist-info}/METADATA +3 -3
  21. {osism-0.20250616.0.dist-info → osism-0.20250627.0.dist-info}/RECORD +27 -18
  22. {osism-0.20250616.0.dist-info → osism-0.20250627.0.dist-info}/entry_points.txt +3 -0
  23. osism-0.20250627.0.dist-info/licenses/AUTHORS +1 -0
  24. osism-0.20250627.0.dist-info/pbr.json +1 -0
  25. osism/tasks/conductor/sonic.py +0 -1401
  26. osism-0.20250616.0.dist-info/licenses/AUTHORS +0 -1
  27. osism-0.20250616.0.dist-info/pbr.json +0 -1
  28. {osism-0.20250616.0.dist-info → osism-0.20250627.0.dist-info}/WHEEL +0 -0
  29. {osism-0.20250616.0.dist-info → osism-0.20250627.0.dist-info}/licenses/LICENSE +0 -0
  30. {osism-0.20250616.0.dist-info → osism-0.20250627.0.dist-info}/top_level.txt +0 -0
osism/api.py CHANGED
@@ -79,27 +79,71 @@ baremetal_events = BaremetalEvents()
79
79
 
80
80
  @app.get("/")
81
81
  async def root():
82
- return {"message": "Hello World"}
82
+ return {"result": "ok"}
83
83
 
84
84
 
85
- @app.post("/meters/sink")
85
+ @app.get("/v1")
86
+ async def v1():
87
+ return {"result": "ok"}
88
+
89
+
90
+ @app.post("/v1/meters/sink")
86
91
  async def write_sink_meters(request: Request):
87
92
  data = await request.json()
88
93
 
89
94
 
90
- @app.post("/events/sink")
95
+ @app.post("/v1/events/sink")
91
96
  async def write_sink_events(request: Request):
92
97
  data = await request.json()
93
98
 
94
99
 
95
- @app.post("/notifications/baremetal", status_code=204)
100
+ @app.post("/v1/notifications/baremetal", status_code=204)
96
101
  async def notifications_baremetal(notification: NotificationBaremetal) -> None:
97
102
 
98
103
  handler = baremetal_events.get_handler(notification.event_type)
99
104
  handler(notification.payload)
100
105
 
101
106
 
102
- @app.post("/webhook/netbox", response_model=WebhookNetboxResponse, status_code=200)
107
+ @app.post("/v1/switches/{identifier}/ztp/complete")
108
+ async def switches_ztp_complete(identifier: str):
109
+ if not utils.nb:
110
+ return {"result": "netbox not enabled"}
111
+
112
+ device = None
113
+
114
+ # Search by device name
115
+ devices = utils.nb.dcim.devices.filter(name=identifier)
116
+ if devices:
117
+ device = devices[0]
118
+
119
+ # Search by inventory_hostname custom field
120
+ if not device:
121
+ devices = utils.nb.dcim.devices.filter(cf_inventory_hostname=identifier)
122
+ if devices:
123
+ device = devices[0]
124
+
125
+ # Search by serial number
126
+ if not device:
127
+ devices = utils.nb.dcim.devices.filter(serial=identifier)
128
+ if devices:
129
+ device = devices[0]
130
+
131
+ if device:
132
+ logger.info(
133
+ f"Found device {device.name} for ZTP complete with identifier {identifier}"
134
+ )
135
+
136
+ # Set provision_state custom field to active
137
+ device.custom_fields["provision_state"] = "active"
138
+ device.save()
139
+
140
+ return {"result": "ok", "device": device.name}
141
+ else:
142
+ logger.warning(f"No device found for ZTP complete with identifier {identifier}")
143
+ return {"result": "device not found"}
144
+
145
+
146
+ @app.post("/v1/webhook/netbox", response_model=WebhookNetboxResponse, status_code=200)
103
147
  async def webhook(
104
148
  webhook_input: WebhookNetboxData,
105
149
  request: Request,
@@ -131,20 +131,35 @@ class BaremetalDeploy(Command):
131
131
  if not node:
132
132
  continue
133
133
 
134
- if node.provision_state in ["available", "deploy failed"]:
134
+ if (
135
+ node.provision_state in ["available", "deploy failed"]
136
+ and not node["maintenance"]
137
+ ):
135
138
  provision_state = "active"
136
139
  elif (
137
140
  node.provision_state == "error"
138
141
  or node.provision_state == "active"
142
+ and not node["maintenance"]
139
143
  and rebuild
140
144
  ):
141
145
  provision_state = "rebuild"
142
146
  else:
143
147
  logger.warning(
144
- f"Node {node.name} ({node.id}) not in supported provision state"
148
+ f"Node {node.name} ({node.id}) not in supported state! Provision state: {node.provision_state}, maintenance mode: {node['maintenance']}"
145
149
  )
146
150
  continue
147
151
 
152
+ # NOTE: Ironic removes "instance_info" on undeploy. It was saved to "extra" during sync and needs to be refreshed here.
153
+ if (
154
+ "instance_info" in node
155
+ and not node["instance_info"]
156
+ and "instance_info" in node["extra"]
157
+ and node["extra"]["instance_info"]
158
+ ):
159
+ node = conn.baremetal.update_node(
160
+ node, instance_info=json.loads(node.extra["instance_info"])
161
+ )
162
+
148
163
  try:
149
164
  conn.baremetal.validate_node(
150
165
  node.id, required=("boot", "deploy", "power")
@@ -272,12 +287,17 @@ class BaremetalUndeploy(Command):
272
287
 
273
288
  if node.provision_state in ["active", "deploy failed", "error"]:
274
289
  try:
275
- conn.baremetal.set_node_provision_state(node.id, "undeploy")
290
+ node = conn.baremetal.set_node_provision_state(node.id, "undeploy")
276
291
  except Exception as exc:
277
292
  logger.warning(
278
293
  f"Node {node.name} ({node.id}) could not be moved to available state: {exc}"
279
294
  )
280
295
  continue
296
+ # NOTE: Ironic removes "instance_info" on undeploy. It was saved to "extra" during sync and needs to be refreshed here.
297
+ if "instance_info" in node["extra"]:
298
+ node = conn.baremetal.update_node(
299
+ node, instance_info=json.loads(node.extra["instance_info"])
300
+ )
281
301
  else:
282
302
  logger.warning(
283
303
  f"Node {node.name} ({node.id}) not in supported provision state"
osism/commands/manage.py CHANGED
@@ -1,5 +1,8 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
+ import json
4
+ import os
5
+ from datetime import datetime
3
6
  from re import findall
4
7
  from urllib.parse import urljoin
5
8
 
@@ -7,10 +10,12 @@ from cliff.command import Command
7
10
  import docker
8
11
  from jinja2 import Template
9
12
  from loguru import logger
13
+ import paramiko
10
14
  import requests
11
15
 
12
16
  from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
13
- from osism.tasks import openstack, handle_task
17
+ from osism.tasks import openstack, ansible, handle_task
18
+ from osism import utils
14
19
 
15
20
  SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.31", "1.32", "1.33"]
16
21
 
@@ -360,3 +365,273 @@ class Flavors(Command):
360
365
  )
361
366
 
362
367
  return handle_task(task, wait, format="script", timeout=3600)
368
+
369
+
370
+ class Dnsmasq(Command):
371
+ def get_parser(self, prog_name):
372
+ parser = super(Dnsmasq, self).get_parser(prog_name)
373
+ parser.add_argument(
374
+ "--no-wait",
375
+ default=False,
376
+ help="Do not wait until dnsmasq has been applied",
377
+ action="store_true",
378
+ )
379
+ return parser
380
+
381
+ def take_action(self, parsed_args):
382
+ wait = not parsed_args.no_wait
383
+
384
+ task_signature = ansible.run.si("infrastructure", "dnsmasq", [])
385
+ task = task_signature.apply_async()
386
+ if wait:
387
+ logger.info(
388
+ f"It takes a moment until task {task.task_id} (dnsmasq) has been started and output is visible here."
389
+ )
390
+
391
+ return handle_task(task, wait, format="log", timeout=300)
392
+
393
+
394
+ class Sonic(Command):
395
+ def get_parser(self, prog_name):
396
+ parser = super(Sonic, self).get_parser(prog_name)
397
+ parser.add_argument(
398
+ "hostname", type=str, help="Hostname of the SONiC switch to manage"
399
+ )
400
+ parser.add_argument(
401
+ "--reload",
402
+ action="store_true",
403
+ help="Execute config reload after config load to restart services",
404
+ )
405
+ return parser
406
+
407
+ def take_action(self, parsed_args):
408
+ hostname = parsed_args.hostname
409
+ reload_config = parsed_args.reload
410
+ today = datetime.now().strftime("%Y%m%d")
411
+
412
+ try:
413
+ # Get device from NetBox - try by name first, then by inventory_hostname
414
+ device = utils.nb.dcim.devices.get(name=hostname)
415
+ if not device:
416
+ # Try to find by inventory_hostname custom field
417
+ devices = utils.nb.dcim.devices.filter(cf_inventory_hostname=hostname)
418
+ if devices:
419
+ device = devices[0] # Take the first match
420
+ logger.info(f"Device found by inventory_hostname: {device.name}")
421
+ else:
422
+ logger.error(
423
+ f"Device {hostname} not found in NetBox (searched by name and inventory_hostname)"
424
+ )
425
+ return 1
426
+
427
+ # Get device configuration from local_context_data
428
+ if (
429
+ not hasattr(device, "local_context_data")
430
+ or not device.local_context_data
431
+ ):
432
+ logger.error(f"Device {hostname} has no local_context_data in NetBox")
433
+ return 1
434
+
435
+ config_context = device.local_context_data
436
+
437
+ # Save config context to local /tmp directory
438
+ config_context_file = f"/tmp/config_db_{hostname}_{today}.json"
439
+ try:
440
+ with open(config_context_file, "w") as f:
441
+ json.dump(config_context, f, indent=2)
442
+ logger.info(f"Config context saved to {config_context_file}")
443
+ except Exception as e:
444
+ logger.error(f"Failed to save config context: {e}")
445
+ return 1
446
+
447
+ # Extract SSH connection details
448
+ ssh_host = None
449
+ ssh_username = None
450
+
451
+ # Try to get SSH details from config context
452
+ if "management" in config_context:
453
+ mgmt = config_context["management"]
454
+ if "ip" in mgmt:
455
+ ssh_host = mgmt["ip"]
456
+ if "username" in mgmt:
457
+ ssh_username = mgmt["username"]
458
+
459
+ # Fallback: try to get OOB IP from NetBox
460
+ if not ssh_host:
461
+ from osism.tasks.conductor.netbox import get_device_oob_ip
462
+
463
+ oob_result = get_device_oob_ip(device)
464
+ if oob_result:
465
+ ssh_host = oob_result[0]
466
+
467
+ if not ssh_host:
468
+ logger.error(f"No SSH host found for device {hostname}")
469
+ return 1
470
+
471
+ if not ssh_username:
472
+ ssh_username = "admin" # Default SONiC username
473
+
474
+ # SSH private key path
475
+ ssh_key_path = "/ansible/secrets/id_rsa.operator"
476
+
477
+ if not os.path.exists(ssh_key_path):
478
+ logger.error(f"SSH private key not found at {ssh_key_path}")
479
+ return 1
480
+
481
+ logger.info(
482
+ f"Connecting to {hostname} ({ssh_host}) to backup SONiC configuration"
483
+ )
484
+
485
+ # Create SSH connection
486
+ ssh = paramiko.SSHClient()
487
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
488
+
489
+ try:
490
+ # Connect with private key
491
+ ssh.connect(
492
+ hostname=ssh_host,
493
+ username=ssh_username,
494
+ key_filename=ssh_key_path,
495
+ timeout=30,
496
+ )
497
+
498
+ # Generate backup filename with date and increment on switch
499
+ base_backup_path = f"/home/admin/config_db_{hostname}_{today}"
500
+ backup_filename = f"{base_backup_path}_1.json"
501
+
502
+ # Find next available filename on switch
503
+ x = 1
504
+ while True:
505
+ check_cmd = f"ls {base_backup_path}_{x}.json 2>/dev/null"
506
+ stdin, stdout, stderr = ssh.exec_command(check_cmd)
507
+ if stdout.read().decode().strip() == "":
508
+ backup_filename = f"{base_backup_path}_{x}.json"
509
+ break
510
+ x += 1
511
+
512
+ logger.info(
513
+ f"Backing up current configuration on switch to {backup_filename}"
514
+ )
515
+
516
+ # Backup current configuration on switch
517
+ backup_cmd = f"sudo cp /etc/sonic/config_db.json {backup_filename}"
518
+ stdin, stdout, stderr = ssh.exec_command(backup_cmd)
519
+ exit_status = stdout.channel.recv_exit_status()
520
+
521
+ if exit_status != 0:
522
+ error_msg = stderr.read().decode()
523
+ logger.error(
524
+ f"Failed to backup configuration on switch: {error_msg}"
525
+ )
526
+ return 1
527
+
528
+ logger.info("Configuration backed up successfully on switch")
529
+
530
+ # Upload local config context to switch /tmp directory
531
+ switch_config_file = f"/tmp/config_db_{hostname}_current.json"
532
+ logger.info(
533
+ f"Uploading config context to {switch_config_file} on switch"
534
+ )
535
+
536
+ # Use SFTP to upload the config context file
537
+ sftp = ssh.open_sftp()
538
+ try:
539
+ sftp.put(config_context_file, switch_config_file)
540
+ logger.info(
541
+ f"Config context successfully uploaded to {switch_config_file} on switch"
542
+ )
543
+ except Exception as e:
544
+ logger.error(f"Failed to upload config context to switch: {e}")
545
+ return 1
546
+ finally:
547
+ sftp.close()
548
+
549
+ # Load and apply the new configuration
550
+ logger.info("Loading and applying new configuration on switch")
551
+
552
+ load_cmd = f"sudo config load -y {switch_config_file}"
553
+ stdin, stdout, stderr = ssh.exec_command(load_cmd)
554
+ exit_status = stdout.channel.recv_exit_status()
555
+
556
+ if exit_status != 0:
557
+ error_msg = stderr.read().decode()
558
+ logger.error(f"Failed to load configuration: {error_msg}")
559
+ return 1
560
+
561
+ logger.info("Configuration loaded and applied successfully")
562
+
563
+ # Optionally reload configuration to restart services
564
+ config_operations_successful = True
565
+ if reload_config:
566
+ logger.info("Reloading configuration to restart services")
567
+
568
+ reload_cmd = "sudo config reload -y"
569
+ stdin, stdout, stderr = ssh.exec_command(reload_cmd)
570
+ exit_status = stdout.channel.recv_exit_status()
571
+
572
+ if exit_status != 0:
573
+ error_msg = stderr.read().decode()
574
+ logger.error(f"Failed to reload configuration: {error_msg}")
575
+ config_operations_successful = False
576
+ else:
577
+ logger.info("Configuration reloaded successfully")
578
+
579
+ # Save configuration only if load (and optionally reload) were successful
580
+ if config_operations_successful:
581
+ logger.info("Saving configuration to persist changes")
582
+
583
+ save_cmd = "sudo config save -y"
584
+ stdin, stdout, stderr = ssh.exec_command(save_cmd)
585
+ exit_status = stdout.channel.recv_exit_status()
586
+
587
+ if exit_status != 0:
588
+ error_msg = stderr.read().decode()
589
+ logger.error(f"Failed to save configuration: {error_msg}")
590
+ return 1
591
+
592
+ logger.info("Configuration saved successfully")
593
+ else:
594
+ logger.warning("Skipping config save due to reload failure")
595
+
596
+ # Delete the temporary configuration file
597
+ logger.info(f"Cleaning up temporary file {switch_config_file}")
598
+
599
+ delete_cmd = f"rm {switch_config_file}"
600
+ stdin, stdout, stderr = ssh.exec_command(delete_cmd)
601
+ exit_status = stdout.channel.recv_exit_status()
602
+
603
+ if exit_status != 0:
604
+ error_msg = stderr.read().decode()
605
+ logger.warning(f"Failed to delete temporary file: {error_msg}")
606
+ else:
607
+ logger.info("Temporary file deleted successfully")
608
+
609
+ logger.info("SONiC configuration management completed successfully")
610
+ logger.info(f"- Config context saved locally to: {config_context_file}")
611
+ if reload_config and config_operations_successful:
612
+ logger.info("- Configuration loaded, reloaded, and saved on switch")
613
+ elif config_operations_successful:
614
+ logger.info("- Configuration loaded and saved on switch")
615
+ else:
616
+ logger.info(
617
+ "- Configuration loaded on switch (save skipped due to reload failure)"
618
+ )
619
+ logger.info(f"- Backup created on switch: {backup_filename}")
620
+
621
+ return 0
622
+
623
+ except paramiko.AuthenticationException:
624
+ logger.error(f"Authentication failed for {ssh_host}")
625
+ return 1
626
+ except paramiko.SSHException as e:
627
+ logger.error(f"SSH connection failed: {e}")
628
+ return 1
629
+ except Exception as e:
630
+ logger.error(f"Unexpected error during SSH operations: {e}")
631
+ return 1
632
+ finally:
633
+ ssh.close()
634
+
635
+ except Exception as e:
636
+ logger.error(f"Error managing SONiC device {hostname}: {e}")
637
+ return 1
@@ -38,13 +38,20 @@ class Sync(Command):
38
38
  type=int,
39
39
  help="Timeout for a scheduled task that has not been executed yet",
40
40
  )
41
+ parser.add_argument(
42
+ "--flush-cache",
43
+ default=False,
44
+ help="Flush cache before running sync",
45
+ action="store_true",
46
+ )
41
47
  return parser
42
48
 
43
49
  def take_action(self, parsed_args):
44
50
  wait = not parsed_args.no_wait
45
51
  task_timeout = parsed_args.task_timeout
52
+ flush_cache = parsed_args.flush_cache
46
53
 
47
- t = reconciler.run.delay(publish=wait)
54
+ t = reconciler.run.delay(publish=wait, flush_cache=flush_cache)
48
55
  if wait:
49
56
  logger.info(
50
57
  f"Task {t.task_id} (sync inventory) is running in background. Output coming soon."
osism/commands/sync.py CHANGED
@@ -23,24 +23,44 @@ class Facts(Command):
23
23
  class Sonic(Command):
24
24
  def get_parser(self, prog_name):
25
25
  parser = super(Sonic, self).get_parser(prog_name)
26
+ parser.add_argument(
27
+ "device",
28
+ nargs="?",
29
+ help="Optional device name to sync configuration for a specific device",
30
+ )
26
31
  parser.add_argument(
27
32
  "--no-wait",
28
33
  default=False,
29
34
  help="Do not wait until the sync has been completed",
30
35
  action="store_true",
31
36
  )
37
+ parser.add_argument(
38
+ "--diff",
39
+ default=True,
40
+ help="Show configuration diff when changes are detected (default: True)",
41
+ action="store_true",
42
+ )
43
+ parser.add_argument(
44
+ "--no-diff",
45
+ dest="diff",
46
+ help="Do not show configuration diff",
47
+ action="store_false",
48
+ )
32
49
  return parser
33
50
 
34
51
  def take_action(self, parsed_args):
35
52
  wait = not parsed_args.no_wait
53
+ device_name = parsed_args.device
54
+ show_diff = parsed_args.diff
55
+
56
+ task = conductor.sync_sonic.delay(device_name, show_diff)
36
57
 
37
- task = conductor.sync_sonic.delay()
38
- if wait:
58
+ if device_name:
39
59
  logger.info(
40
- f"Task {task.task_id} (sync sonic) is running. Wait. No more output."
60
+ f"Task {task.task_id} (sync sonic for device {device_name}) started"
41
61
  )
42
- task.wait(timeout=None, interval=0.5)
43
62
  else:
44
- logger.info(
45
- f"Task {task.task_id} (sync sonic) is running in background. No more output."
46
- )
63
+ logger.info(f"Task {task.task_id} (sync sonic) started")
64
+
65
+ rc = handle_task(task, wait=wait)
66
+ return rc
osism/settings.py CHANGED
@@ -49,6 +49,7 @@ NETBOX_FILTER_CONDUCTOR_SONIC = os.getenv(
49
49
  SONIC_EXPORT_DIR = os.getenv("SONIC_EXPORT_DIR", "/etc/sonic/export")
50
50
  SONIC_EXPORT_PREFIX = os.getenv("SONIC_EXPORT_PREFIX", "osism_")
51
51
  SONIC_EXPORT_SUFFIX = os.getenv("SONIC_EXPORT_SUFFIX", "_config_db.json")
52
+ SONIC_EXPORT_IDENTIFIER = os.getenv("SONIC_EXPORT_IDENTIFIER", "serial-number")
52
53
 
53
54
  NETBOX_SECONDARIES = (
54
55
  os.getenv("NETBOX_SECONDARIES", read_secret("NETBOX_SECONDARIES")) or "[]"
@@ -48,8 +48,8 @@ def sync_ironic(self, force_update=False):
48
48
 
49
49
 
50
50
  @app.task(bind=True, name="osism.tasks.conductor.sync_sonic")
51
- def sync_sonic(self):
52
- return _sync_sonic()
51
+ def sync_sonic(self, device_name=None, show_diff=True):
52
+ return _sync_sonic(device_name, self.request.id, show_diff)
53
53
 
54
54
 
55
55
  __all__ = [
@@ -152,10 +152,14 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
152
152
  .render(remote_board_address=oob_ip)
153
153
  )
154
154
  node_attributes.update({"resource_class": device.name})
155
- # NOTE: Write metadata used for provisioning into 'extra' field, so that
156
- # it is available during node deploy without querying the NetBox again
157
155
  if "extra" not in node_attributes:
158
156
  node_attributes["extra"] = {}
157
+ # NOTE: Copy instance_info into extra field. because ironic removes it on undeployment. This way it may be readded on undeploy without querying the netbox again
158
+ if "instance_info" in node_attributes and node_attributes["instance_info"]:
159
+ node_attributes["extra"].update(
160
+ {"instance_info": json.dumps(node_attributes["instance_info"])}
161
+ )
162
+ # NOTE: Write metadata used for provisioning into 'extra' field, so that it is available during node deploy without querying the netbox again
159
163
  if (
160
164
  "netplan_parameters" in device.custom_fields
161
165
  and device.custom_fields["netplan_parameters"]
@@ -231,23 +235,21 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
231
235
  details=False, attributes=dict(node_uuid=node["uuid"])
232
236
  )
233
237
  # NOTE: Baremetal ports are only required for (i)pxe boot
234
- if node["boot_interface"] in ["pxe", "ipxe"]:
235
- for port_attributes in ports_attributes:
236
- port_attributes.update({"node_id": node["uuid"]})
237
- port = [
238
- port
239
- for port in node_ports
240
- if port_attributes["address"].upper()
241
- == port["address"].upper()
242
- ]
243
- if not port:
244
- osism_utils.push_task_output(
245
- request_id,
246
- f"Creating baremetal port with MAC address {port_attributes['address']} for {device.name}\n",
247
- )
248
- openstack.baremetal_port_create(port_attributes)
249
- else:
250
- node_ports.remove(port[0])
238
+ for port_attributes in ports_attributes:
239
+ port_attributes.update({"node_id": node["uuid"]})
240
+ port = [
241
+ port
242
+ for port in node_ports
243
+ if port_attributes["address"].upper() == port["address"].upper()
244
+ ]
245
+ if not port:
246
+ osism_utils.push_task_output(
247
+ request_id,
248
+ f"Creating baremetal port with MAC address {port_attributes['address']} for {device.name}\n",
249
+ )
250
+ openstack.baremetal_port_create(port_attributes)
251
+ else:
252
+ node_ports.remove(port[0])
251
253
  for node_port in node_ports:
252
254
  # NOTE: Delete remaining ports not found in NetBox
253
255
  osism_utils.push_task_output(
@@ -0,0 +1,26 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ """SONiC configuration management package."""
4
+
5
+ from .config_generator import generate_sonic_config
6
+ from .exporter import save_config_to_netbox, export_config_to_file
7
+ from .sync import sync_sonic
8
+ from .connections import (
9
+ get_connected_interfaces,
10
+ get_connected_device_for_sonic_interface,
11
+ get_connected_device_via_interface,
12
+ find_interconnected_devices,
13
+ get_device_bgp_neighbors_via_loopback,
14
+ )
15
+
16
+ __all__ = [
17
+ "generate_sonic_config",
18
+ "save_config_to_netbox",
19
+ "export_config_to_file",
20
+ "sync_sonic",
21
+ "get_connected_interfaces",
22
+ "get_connected_device_for_sonic_interface",
23
+ "get_connected_device_via_interface",
24
+ "find_interconnected_devices",
25
+ "get_device_bgp_neighbors_via_loopback",
26
+ ]