osism 0.20250621.0__py3-none-any.whl → 0.20250627.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
osism/api.py CHANGED
@@ -79,27 +79,71 @@ baremetal_events = BaremetalEvents()
79
79
 
80
80
  @app.get("/")
81
81
  async def root():
82
- return {"message": "Hello World"}
82
+ return {"result": "ok"}
83
83
 
84
84
 
85
- @app.post("/meters/sink")
85
+ @app.get("/v1")
86
+ async def v1():
87
+ return {"result": "ok"}
88
+
89
+
90
+ @app.post("/v1/meters/sink")
86
91
  async def write_sink_meters(request: Request):
87
92
  data = await request.json()
88
93
 
89
94
 
90
- @app.post("/events/sink")
95
+ @app.post("/v1/events/sink")
91
96
  async def write_sink_events(request: Request):
92
97
  data = await request.json()
93
98
 
94
99
 
95
- @app.post("/notifications/baremetal", status_code=204)
100
+ @app.post("/v1/notifications/baremetal", status_code=204)
96
101
  async def notifications_baremetal(notification: NotificationBaremetal) -> None:
97
102
 
98
103
  handler = baremetal_events.get_handler(notification.event_type)
99
104
  handler(notification.payload)
100
105
 
101
106
 
102
- @app.post("/webhook/netbox", response_model=WebhookNetboxResponse, status_code=200)
107
+ @app.post("/v1/switches/{identifier}/ztp/complete")
108
+ async def switches_ztp_complete(identifier: str):
109
+ if not utils.nb:
110
+ return {"result": "netbox not enabled"}
111
+
112
+ device = None
113
+
114
+ # Search by device name
115
+ devices = utils.nb.dcim.devices.filter(name=identifier)
116
+ if devices:
117
+ device = devices[0]
118
+
119
+ # Search by inventory_hostname custom field
120
+ if not device:
121
+ devices = utils.nb.dcim.devices.filter(cf_inventory_hostname=identifier)
122
+ if devices:
123
+ device = devices[0]
124
+
125
+ # Search by serial number
126
+ if not device:
127
+ devices = utils.nb.dcim.devices.filter(serial=identifier)
128
+ if devices:
129
+ device = devices[0]
130
+
131
+ if device:
132
+ logger.info(
133
+ f"Found device {device.name} for ZTP complete with identifier {identifier}"
134
+ )
135
+
136
+ # Set provision_state custom field to active
137
+ device.custom_fields["provision_state"] = "active"
138
+ device.save()
139
+
140
+ return {"result": "ok", "device": device.name}
141
+ else:
142
+ logger.warning(f"No device found for ZTP complete with identifier {identifier}")
143
+ return {"result": "device not found"}
144
+
145
+
146
+ @app.post("/v1/webhook/netbox", response_model=WebhookNetboxResponse, status_code=200)
103
147
  async def webhook(
104
148
  webhook_input: WebhookNetboxData,
105
149
  request: Request,
osism/commands/manage.py CHANGED
@@ -1,5 +1,8 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
+ import json
4
+ import os
5
+ from datetime import datetime
3
6
  from re import findall
4
7
  from urllib.parse import urljoin
5
8
 
@@ -7,10 +10,12 @@ from cliff.command import Command
7
10
  import docker
8
11
  from jinja2 import Template
9
12
  from loguru import logger
13
+ import paramiko
10
14
  import requests
11
15
 
12
16
  from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
13
17
  from osism.tasks import openstack, ansible, handle_task
18
+ from osism import utils
14
19
 
15
20
  SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.31", "1.32", "1.33"]
16
21
 
@@ -384,3 +389,249 @@ class Dnsmasq(Command):
384
389
  )
385
390
 
386
391
  return handle_task(task, wait, format="log", timeout=300)
392
+
393
+
394
+ class Sonic(Command):
395
+ def get_parser(self, prog_name):
396
+ parser = super(Sonic, self).get_parser(prog_name)
397
+ parser.add_argument(
398
+ "hostname", type=str, help="Hostname of the SONiC switch to manage"
399
+ )
400
+ parser.add_argument(
401
+ "--reload",
402
+ action="store_true",
403
+ help="Execute config reload after config load to restart services",
404
+ )
405
+ return parser
406
+
407
+ def take_action(self, parsed_args):
408
+ hostname = parsed_args.hostname
409
+ reload_config = parsed_args.reload
410
+ today = datetime.now().strftime("%Y%m%d")
411
+
412
+ try:
413
+ # Get device from NetBox - try by name first, then by inventory_hostname
414
+ device = utils.nb.dcim.devices.get(name=hostname)
415
+ if not device:
416
+ # Try to find by inventory_hostname custom field
417
+ devices = utils.nb.dcim.devices.filter(cf_inventory_hostname=hostname)
418
+ if devices:
419
+ device = devices[0] # Take the first match
420
+ logger.info(f"Device found by inventory_hostname: {device.name}")
421
+ else:
422
+ logger.error(
423
+ f"Device {hostname} not found in NetBox (searched by name and inventory_hostname)"
424
+ )
425
+ return 1
426
+
427
+ # Get device configuration from local_context_data
428
+ if (
429
+ not hasattr(device, "local_context_data")
430
+ or not device.local_context_data
431
+ ):
432
+ logger.error(f"Device {hostname} has no local_context_data in NetBox")
433
+ return 1
434
+
435
+ config_context = device.local_context_data
436
+
437
+ # Save config context to local /tmp directory
438
+ config_context_file = f"/tmp/config_db_{hostname}_{today}.json"
439
+ try:
440
+ with open(config_context_file, "w") as f:
441
+ json.dump(config_context, f, indent=2)
442
+ logger.info(f"Config context saved to {config_context_file}")
443
+ except Exception as e:
444
+ logger.error(f"Failed to save config context: {e}")
445
+ return 1
446
+
447
+ # Extract SSH connection details
448
+ ssh_host = None
449
+ ssh_username = None
450
+
451
+ # Try to get SSH details from config context
452
+ if "management" in config_context:
453
+ mgmt = config_context["management"]
454
+ if "ip" in mgmt:
455
+ ssh_host = mgmt["ip"]
456
+ if "username" in mgmt:
457
+ ssh_username = mgmt["username"]
458
+
459
+ # Fallback: try to get OOB IP from NetBox
460
+ if not ssh_host:
461
+ from osism.tasks.conductor.netbox import get_device_oob_ip
462
+
463
+ oob_result = get_device_oob_ip(device)
464
+ if oob_result:
465
+ ssh_host = oob_result[0]
466
+
467
+ if not ssh_host:
468
+ logger.error(f"No SSH host found for device {hostname}")
469
+ return 1
470
+
471
+ if not ssh_username:
472
+ ssh_username = "admin" # Default SONiC username
473
+
474
+ # SSH private key path
475
+ ssh_key_path = "/ansible/secrets/id_rsa.operator"
476
+
477
+ if not os.path.exists(ssh_key_path):
478
+ logger.error(f"SSH private key not found at {ssh_key_path}")
479
+ return 1
480
+
481
+ logger.info(
482
+ f"Connecting to {hostname} ({ssh_host}) to backup SONiC configuration"
483
+ )
484
+
485
+ # Create SSH connection
486
+ ssh = paramiko.SSHClient()
487
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
488
+
489
+ try:
490
+ # Connect with private key
491
+ ssh.connect(
492
+ hostname=ssh_host,
493
+ username=ssh_username,
494
+ key_filename=ssh_key_path,
495
+ timeout=30,
496
+ )
497
+
498
+ # Generate backup filename with date and increment on switch
499
+ base_backup_path = f"/home/admin/config_db_{hostname}_{today}"
500
+ backup_filename = f"{base_backup_path}_1.json"
501
+
502
+ # Find next available filename on switch
503
+ x = 1
504
+ while True:
505
+ check_cmd = f"ls {base_backup_path}_{x}.json 2>/dev/null"
506
+ stdin, stdout, stderr = ssh.exec_command(check_cmd)
507
+ if stdout.read().decode().strip() == "":
508
+ backup_filename = f"{base_backup_path}_{x}.json"
509
+ break
510
+ x += 1
511
+
512
+ logger.info(
513
+ f"Backing up current configuration on switch to {backup_filename}"
514
+ )
515
+
516
+ # Backup current configuration on switch
517
+ backup_cmd = f"sudo cp /etc/sonic/config_db.json {backup_filename}"
518
+ stdin, stdout, stderr = ssh.exec_command(backup_cmd)
519
+ exit_status = stdout.channel.recv_exit_status()
520
+
521
+ if exit_status != 0:
522
+ error_msg = stderr.read().decode()
523
+ logger.error(
524
+ f"Failed to backup configuration on switch: {error_msg}"
525
+ )
526
+ return 1
527
+
528
+ logger.info("Configuration backed up successfully on switch")
529
+
530
+ # Upload local config context to switch /tmp directory
531
+ switch_config_file = f"/tmp/config_db_{hostname}_current.json"
532
+ logger.info(
533
+ f"Uploading config context to {switch_config_file} on switch"
534
+ )
535
+
536
+ # Use SFTP to upload the config context file
537
+ sftp = ssh.open_sftp()
538
+ try:
539
+ sftp.put(config_context_file, switch_config_file)
540
+ logger.info(
541
+ f"Config context successfully uploaded to {switch_config_file} on switch"
542
+ )
543
+ except Exception as e:
544
+ logger.error(f"Failed to upload config context to switch: {e}")
545
+ return 1
546
+ finally:
547
+ sftp.close()
548
+
549
+ # Load and apply the new configuration
550
+ logger.info("Loading and applying new configuration on switch")
551
+
552
+ load_cmd = f"sudo config load -y {switch_config_file}"
553
+ stdin, stdout, stderr = ssh.exec_command(load_cmd)
554
+ exit_status = stdout.channel.recv_exit_status()
555
+
556
+ if exit_status != 0:
557
+ error_msg = stderr.read().decode()
558
+ logger.error(f"Failed to load configuration: {error_msg}")
559
+ return 1
560
+
561
+ logger.info("Configuration loaded and applied successfully")
562
+
563
+ # Optionally reload configuration to restart services
564
+ config_operations_successful = True
565
+ if reload_config:
566
+ logger.info("Reloading configuration to restart services")
567
+
568
+ reload_cmd = "sudo config reload -y"
569
+ stdin, stdout, stderr = ssh.exec_command(reload_cmd)
570
+ exit_status = stdout.channel.recv_exit_status()
571
+
572
+ if exit_status != 0:
573
+ error_msg = stderr.read().decode()
574
+ logger.error(f"Failed to reload configuration: {error_msg}")
575
+ config_operations_successful = False
576
+ else:
577
+ logger.info("Configuration reloaded successfully")
578
+
579
+ # Save configuration only if load (and optionally reload) were successful
580
+ if config_operations_successful:
581
+ logger.info("Saving configuration to persist changes")
582
+
583
+ save_cmd = "sudo config save -y"
584
+ stdin, stdout, stderr = ssh.exec_command(save_cmd)
585
+ exit_status = stdout.channel.recv_exit_status()
586
+
587
+ if exit_status != 0:
588
+ error_msg = stderr.read().decode()
589
+ logger.error(f"Failed to save configuration: {error_msg}")
590
+ return 1
591
+
592
+ logger.info("Configuration saved successfully")
593
+ else:
594
+ logger.warning("Skipping config save due to reload failure")
595
+
596
+ # Delete the temporary configuration file
597
+ logger.info(f"Cleaning up temporary file {switch_config_file}")
598
+
599
+ delete_cmd = f"rm {switch_config_file}"
600
+ stdin, stdout, stderr = ssh.exec_command(delete_cmd)
601
+ exit_status = stdout.channel.recv_exit_status()
602
+
603
+ if exit_status != 0:
604
+ error_msg = stderr.read().decode()
605
+ logger.warning(f"Failed to delete temporary file: {error_msg}")
606
+ else:
607
+ logger.info("Temporary file deleted successfully")
608
+
609
+ logger.info("SONiC configuration management completed successfully")
610
+ logger.info(f"- Config context saved locally to: {config_context_file}")
611
+ if reload_config and config_operations_successful:
612
+ logger.info("- Configuration loaded, reloaded, and saved on switch")
613
+ elif config_operations_successful:
614
+ logger.info("- Configuration loaded and saved on switch")
615
+ else:
616
+ logger.info(
617
+ "- Configuration loaded on switch (save skipped due to reload failure)"
618
+ )
619
+ logger.info(f"- Backup created on switch: {backup_filename}")
620
+
621
+ return 0
622
+
623
+ except paramiko.AuthenticationException:
624
+ logger.error(f"Authentication failed for {ssh_host}")
625
+ return 1
626
+ except paramiko.SSHException as e:
627
+ logger.error(f"SSH connection failed: {e}")
628
+ return 1
629
+ except Exception as e:
630
+ logger.error(f"Unexpected error during SSH operations: {e}")
631
+ return 1
632
+ finally:
633
+ ssh.close()
634
+
635
+ except Exception as e:
636
+ logger.error(f"Error managing SONiC device {hostname}: {e}")
637
+ return 1
@@ -38,13 +38,20 @@ class Sync(Command):
38
38
  type=int,
39
39
  help="Timeout for a scheduled task that has not been executed yet",
40
40
  )
41
+ parser.add_argument(
42
+ "--flush-cache",
43
+ default=False,
44
+ help="Flush cache before running sync",
45
+ action="store_true",
46
+ )
41
47
  return parser
42
48
 
43
49
  def take_action(self, parsed_args):
44
50
  wait = not parsed_args.no_wait
45
51
  task_timeout = parsed_args.task_timeout
52
+ flush_cache = parsed_args.flush_cache
46
53
 
47
- t = reconciler.run.delay(publish=wait)
54
+ t = reconciler.run.delay(publish=wait, flush_cache=flush_cache)
48
55
  if wait:
49
56
  logger.info(
50
57
  f"Task {t.task_id} (sync inventory) is running in background. Output coming soon."
@@ -235,23 +235,21 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
235
235
  details=False, attributes=dict(node_uuid=node["uuid"])
236
236
  )
237
237
  # NOTE: Baremetal ports are only required for (i)pxe boot
238
- if node["boot_interface"] in ["pxe", "ipxe"]:
239
- for port_attributes in ports_attributes:
240
- port_attributes.update({"node_id": node["uuid"]})
241
- port = [
242
- port
243
- for port in node_ports
244
- if port_attributes["address"].upper()
245
- == port["address"].upper()
246
- ]
247
- if not port:
248
- osism_utils.push_task_output(
249
- request_id,
250
- f"Creating baremetal port with MAC address {port_attributes['address']} for {device.name}\n",
251
- )
252
- openstack.baremetal_port_create(port_attributes)
253
- else:
254
- node_ports.remove(port[0])
238
+ for port_attributes in ports_attributes:
239
+ port_attributes.update({"node_id": node["uuid"]})
240
+ port = [
241
+ port
242
+ for port in node_ports
243
+ if port_attributes["address"].upper() == port["address"].upper()
244
+ ]
245
+ if not port:
246
+ osism_utils.push_task_output(
247
+ request_id,
248
+ f"Creating baremetal port with MAC address {port_attributes['address']} for {device.name}\n",
249
+ )
250
+ openstack.baremetal_port_create(port_attributes)
251
+ else:
252
+ node_ports.remove(port[0])
255
253
  for node_port in node_ports:
256
254
  # NOTE: Delete remaining ports not found in NetBox
257
255
  osism_utils.push_task_output(
@@ -29,7 +29,6 @@ from .interface import (
29
29
  from .connections import (
30
30
  get_connected_interfaces,
31
31
  get_connected_device_for_sonic_interface,
32
- get_device_bgp_neighbors_via_loopback,
33
32
  )
34
33
  from .cache import get_cached_device_interfaces
35
34
 
@@ -305,7 +304,7 @@ def _add_port_configurations(
305
304
  interface_speed = int(port_speed) if port_speed else None
306
305
  is_breakout_port = port_name in breakout_info["breakout_ports"]
307
306
  correct_alias = convert_sonic_interface_to_alias(
308
- port_name, interface_speed, is_breakout_port
307
+ port_name, interface_speed, is_breakout_port, port_config
309
308
  )
310
309
 
311
310
  # Use master port index for breakout ports
@@ -457,7 +456,7 @@ def _add_missing_breakout_ports(
457
456
  # Generate correct alias (breakout port always gets subport notation)
458
457
  interface_speed = int(port_speed)
459
458
  correct_alias = convert_sonic_interface_to_alias(
460
- port_name, interface_speed, is_breakout=True
459
+ port_name, interface_speed, is_breakout=True, port_config=port_config
461
460
  )
462
461
 
463
462
  # Use master port index for breakout ports
@@ -608,11 +607,6 @@ def _add_bgp_configurations(
608
607
  "v6only": "true",
609
608
  }
610
609
 
611
- # Add additional BGP_NEIGHBOR configuration using Loopback0 IP addresses
612
- _add_loopback_bgp_neighbors(
613
- config, device, portchannel_info, connected_interfaces, device_as_mapping
614
- )
615
-
616
610
 
617
611
  def _get_connected_device_for_interface(device, interface_name):
618
612
  """Get the connected device for a given interface name.
@@ -652,10 +646,40 @@ def _determine_peer_type(local_device, connected_device, device_as_mapping=None)
652
646
  connected_as = None
653
647
  if device_as_mapping and connected_device.id in device_as_mapping:
654
648
  connected_as = device_as_mapping[connected_device.id]
655
- elif connected_device.primary_ip4:
656
- connected_as = calculate_local_asn_from_ipv4(
657
- str(connected_device.primary_ip4.address)
658
- )
649
+ else:
650
+ # If connected device is not in device_as_mapping, check if it's a spine/superspine
651
+ # and calculate AS for its group
652
+ if connected_device.role and connected_device.role.slug in [
653
+ "spine",
654
+ "superspine",
655
+ ]:
656
+ # Import here to avoid circular imports
657
+ from .bgp import calculate_minimum_as_for_group
658
+ from .connections import find_interconnected_devices
659
+
660
+ # Get all devices to find the group
661
+ all_devices = list(
662
+ utils.nb.dcim.devices.filter(role=["spine", "superspine"])
663
+ )
664
+ spine_groups = find_interconnected_devices(
665
+ all_devices, ["spine", "superspine"]
666
+ )
667
+
668
+ # Find which group the connected device belongs to
669
+ for group in spine_groups:
670
+ if any(dev.id == connected_device.id for dev in group):
671
+ connected_as = calculate_minimum_as_for_group(group)
672
+ if connected_as:
673
+ logger.debug(
674
+ f"Calculated AS {connected_as} for connected spine/superspine device {connected_device.name}"
675
+ )
676
+ break
677
+
678
+ # Fallback to calculating from IPv4 if still no AS
679
+ if not connected_as and connected_device.primary_ip4:
680
+ connected_as = calculate_local_asn_from_ipv4(
681
+ str(connected_device.primary_ip4.address)
682
+ )
659
683
 
660
684
  # Compare AS numbers
661
685
  if local_as and connected_as and local_as == connected_as:
@@ -670,30 +694,91 @@ def _determine_peer_type(local_device, connected_device, device_as_mapping=None)
670
694
  return "external" # Default to external on error
671
695
 
672
696
 
673
- def _add_loopback_bgp_neighbors(
674
- config, device, portchannel_info, connected_interfaces, device_as_mapping=None
675
- ):
676
- """Add BGP_NEIGHBOR configuration using Loopback0 IP addresses from connected devices."""
697
+ def _get_ntp_server_for_device(device):
698
+ """Get single NTP server IP for a SONiC device based on OOB connection to metalbox.
699
+
700
+ Returns the IP address of the metalbox device interface that is connected to the
701
+ OOB switch. If VLANs are used, returns the IP of the VLAN interface where the
702
+ SONiC switch management interface (eth0) has access.
703
+
704
+ Args:
705
+ device: SONiC device object
706
+
707
+ Returns:
708
+ str: IP address of the NTP server or None if not found
709
+ """
677
710
  try:
678
- # Get BGP neighbors via loopback using the new connections module
679
- bgp_neighbors = get_device_bgp_neighbors_via_loopback(
680
- device, portchannel_info, connected_interfaces, config["PORT"]
681
- )
711
+ # Get the OOB IP configuration for this SONiC device
712
+ oob_ip_result = get_device_oob_ip(device)
713
+ if not oob_ip_result:
714
+ logger.debug(f"No OOB IP found for device {device.name}")
715
+ return None
682
716
 
683
- for neighbor_info in bgp_neighbors:
684
- neighbor_key = f"default|{neighbor_info['ip']}"
717
+ oob_ip, prefix_len = oob_ip_result
718
+ logger.debug(f"Device {device.name} has OOB IP {oob_ip}/{prefix_len}")
685
719
 
686
- # Determine peer_type based on AS comparison
687
- peer_type = _determine_peer_type(
688
- device,
689
- neighbor_info["device"],
690
- device_as_mapping,
691
- )
720
+ # Find the network/subnet that contains this OOB IP
721
+ from ipaddress import IPv4Network, IPv4Address
722
+
723
+ device_network = IPv4Network(f"{oob_ip}/{prefix_len}", strict=False)
692
724
 
693
- config["BGP_NEIGHBOR"][neighbor_key] = {"peer_type": peer_type}
725
+ # Get all metalbox devices
726
+ metalbox_devices = utils.nb.dcim.devices.filter(role="metalbox")
727
+
728
+ for metalbox in metalbox_devices:
729
+ logger.debug(f"Checking metalbox device {metalbox.name} for NTP server")
730
+
731
+ # Get all interfaces on this metalbox
732
+ interfaces = utils.nb.dcim.interfaces.filter(device_id=metalbox.id)
733
+
734
+ for interface in interfaces:
735
+ # Skip management-only interfaces
736
+ if hasattr(interface, "mgmt_only") and interface.mgmt_only:
737
+ continue
738
+
739
+ # Check both physical interfaces and VLAN interfaces (SVIs)
740
+ # VLAN interfaces are typically named "Vlan123" for VLAN ID 123
741
+ is_vlan_interface = (
742
+ hasattr(interface, "type")
743
+ and interface.type
744
+ and interface.type.value == "virtual"
745
+ and interface.name.startswith("Vlan")
746
+ )
747
+
748
+ # Get IP addresses for this interface
749
+ ip_addresses = utils.nb.ipam.ip_addresses.filter(
750
+ assigned_object_id=interface.id,
751
+ )
752
+
753
+ for ip_addr in ip_addresses:
754
+ if ip_addr.address:
755
+ # Extract IP address without prefix
756
+ ip_only = ip_addr.address.split("/")[0]
757
+
758
+ # Check if it's IPv4 and in the same network as the SONiC device
759
+ try:
760
+ metalbox_ip = IPv4Address(ip_only)
761
+ if metalbox_ip in device_network:
762
+ interface_type = (
763
+ "VLAN interface"
764
+ if is_vlan_interface
765
+ else "interface"
766
+ )
767
+ logger.info(
768
+ f"Found NTP server {ip_only} on metalbox {metalbox.name} "
769
+ f"{interface_type} {interface.name} for SONiC device {device.name}"
770
+ )
771
+ return ip_only
772
+ except ValueError:
773
+ # Skip non-IPv4 addresses
774
+ continue
775
+
776
+ logger.warning(f"No suitable NTP server found for SONiC device {device.name}")
777
+ return None
694
778
 
695
779
  except Exception as e:
696
- logger.warning(f"Could not process BGP neighbors for device {device.name}: {e}")
780
+ logger.warning(f"Could not determine NTP server for device {device.name}: {e}")
781
+ return None
697
782
 
698
783
 
699
784
  def _get_ntp_servers():
@@ -755,20 +840,27 @@ def _get_ntp_servers():
755
840
 
756
841
 
757
842
  def _add_ntp_configuration(config, device):
758
- """Add NTP_SERVER configuration to device config."""
759
- try:
760
- ntp_servers = _get_ntp_servers()
843
+ """Add NTP_SERVER configuration to device config.
761
844
 
762
- # Add NTP servers to this device's configuration
763
- for ip, ntp_config in ntp_servers.items():
764
- config["NTP_SERVER"][ip] = copy.deepcopy(ntp_config)
765
-
766
- if ntp_servers:
767
- logger.debug(
768
- f"Added {len(ntp_servers)} NTP servers to device {device.name}"
845
+ Each SONiC switch gets exactly one NTP server - the IP address of the
846
+ metalbox device interface connected to the OOB switch.
847
+ """
848
+ try:
849
+ # Get the specific NTP server for this device
850
+ ntp_server_ip = _get_ntp_server_for_device(device)
851
+
852
+ if ntp_server_ip:
853
+ # Add single NTP server configuration
854
+ config["NTP_SERVER"][ntp_server_ip] = {
855
+ "maxpoll": "10",
856
+ "minpoll": "6",
857
+ "prefer": "false",
858
+ }
859
+ logger.info(
860
+ f"Added NTP server {ntp_server_ip} to SONiC device {device.name}"
769
861
  )
770
862
  else:
771
- logger.debug(f"No NTP servers found for device {device.name}")
863
+ logger.warning(f"No NTP server found for SONiC device {device.name}")
772
864
 
773
865
  except Exception as e:
774
866
  logger.warning(f"Could not add NTP configuration to device {device.name}: {e}")
@@ -71,6 +71,7 @@ PORT_CONFIG_PATH = "/etc/sonic/port_config"
71
71
 
72
72
  # List of supported HWSKUs
73
73
  SUPPORTED_HWSKUS = [
74
+ "Accton-AS4625-54T",
74
75
  "Accton-AS5835-54T",
75
76
  "Accton-AS5835-54X",
76
77
  "Accton-AS7326-56X",
@@ -237,23 +237,43 @@ def _find_sonic_name_by_alias_mapping(interface_name, port_config):
237
237
  - tenGigE1 alias maps to Eth1/1/1 or Eth1/1
238
238
  - tenGigE48 alias maps to Eth1/48/1 or Eth1/48
239
239
  - hundredGigE49 alias maps to Eth1/49/1 or Eth1/49
240
+ - Eth1(Port1) -> Ethernet0, Eth2(Port2) -> Ethernet1, Eth3(Port) -> Ethernet2
240
241
 
241
242
  Args:
242
- interface_name: NetBox interface name (e.g., "Eth1/1" or "Eth1/1/1")
243
+ interface_name: NetBox interface name (e.g., "Eth1/1", "Eth1/1/1", or "Eth1(Port1)")
243
244
  port_config: Port configuration dictionary
244
245
 
245
246
  Returns:
246
247
  str: SONiC interface name or original name if not found
247
248
  """
249
+ logger.debug(f"Finding SONiC name for interface: '{interface_name}'")
250
+ logger.debug(f"Port config contains {len(port_config)} entries")
251
+
252
+ # Handle new Eth1(Port1) format first
253
+ paren_match = re.match(r"Eth(\d+)\(Port(\d*)\)", interface_name)
254
+ if paren_match:
255
+ eth_num = int(paren_match.group(1))
256
+ # Map EthX(PortY) to EthernetX-1 (1-based to 0-based conversion)
257
+ ethernet_num = eth_num - 1
258
+ sonic_name = f"Ethernet{ethernet_num}"
259
+ logger.debug(
260
+ f"Alias mapping: {interface_name} -> {sonic_name} via Eth(Port) format (eth_num={eth_num}, ethernet_num={ethernet_num})"
261
+ )
262
+ return sonic_name
263
+
248
264
  # Create reverse mapping: expected NetBox name -> alias -> SONiC name
249
265
  for sonic_port, config in port_config.items():
250
266
  alias = config.get("alias", "")
251
267
  if not alias:
268
+ logger.debug(f"Skipping {sonic_port}: no alias")
252
269
  continue
253
270
 
254
271
  # Extract number from alias (e.g., tenGigE1 -> 1, hundredGigE49 -> 49)
255
272
  alias_match = re.search(r"(\d+)$", alias)
256
273
  if not alias_match:
274
+ logger.debug(
275
+ f"Skipping {sonic_port}: alias '{alias}' has no trailing number"
276
+ )
257
277
  continue
258
278
 
259
279
  alias_num = int(alias_match.group(1))
@@ -264,18 +284,25 @@ def _find_sonic_name_by_alias_mapping(interface_name, port_config):
264
284
  f"Eth1/{alias_num}/1", # Breakout format (first subport)
265
285
  ]
266
286
 
287
+ logger.debug(
288
+ f"Checking {sonic_port} (alias='{alias}', alias_num={alias_num}) against expected_names: {expected_names}"
289
+ )
290
+
267
291
  if interface_name in expected_names:
268
292
  logger.debug(
269
293
  f"Alias mapping: {interface_name} -> {sonic_port} via alias {alias}"
270
294
  )
271
295
  return sonic_port
272
296
 
273
- logger.warning(f"No alias mapping found for {interface_name}")
297
+ logger.warning(f"No alias mapping found for '{interface_name}'")
298
+ logger.debug(
299
+ f"Available aliases in port_config: {[(sonic_port, config.get('alias', '')) for sonic_port, config in port_config.items()]}"
300
+ )
274
301
  return interface_name
275
302
 
276
303
 
277
304
  def convert_sonic_interface_to_alias(
278
- sonic_interface_name, interface_speed=None, is_breakout=False
305
+ sonic_interface_name, interface_speed=None, is_breakout=False, port_config=None
279
306
  ):
280
307
  """Convert SONiC interface name to NetBox-style alias.
281
308
 
@@ -283,28 +310,134 @@ def convert_sonic_interface_to_alias(
283
310
  sonic_interface_name: SONiC interface name (e.g., "Ethernet0", "Ethernet4")
284
311
  interface_speed: Interface speed in Mbps (optional, for speed-based calculation)
285
312
  is_breakout: Whether this is a breakout port (adds subport notation)
313
+ port_config: Port configuration dictionary (optional, for alias-based calculation)
286
314
 
287
315
  Returns:
288
316
  str: NetBox-style alias (e.g., "Eth1/1", "Eth1/2" or "Eth1/1/1", "Eth1/1/2" for breakout)
289
317
 
290
318
  Examples:
291
- - Regular 100G ports: Ethernet0 -> Eth1/1, Ethernet4 -> Eth1/2, Ethernet8 -> Eth1/3
292
- - Regular other speeds: Ethernet0 -> Eth1/1, Ethernet1 -> Eth1/2, Ethernet2 -> Eth1/3
293
- - Breakout ports: Ethernet0 -> Eth1/1/1, Ethernet1 -> Eth1/1/2, Ethernet2 -> Eth1/1/3, Ethernet3 -> Eth1/1/4
319
+ - Regular ports: Ethernet0 with alias "twentyFiveGigE1" -> Eth1/1
320
+ - Breakout ports: Ethernet2 with base port alias "twentyFiveGigE1" -> Eth1/1/3
294
321
  """
322
+ logger.debug(
323
+ f"Converting SONiC interface to alias: {sonic_interface_name}, speed={interface_speed}, is_breakout={is_breakout}"
324
+ )
325
+
295
326
  # Extract port number from SONiC format (Ethernet0, Ethernet4, etc.)
296
327
  match = re.match(r"Ethernet(\d+)", sonic_interface_name)
297
328
  if not match:
298
329
  # If it doesn't match expected pattern, return as-is
330
+ logger.debug(
331
+ f"Interface {sonic_interface_name} doesn't match Ethernet pattern, returning as-is"
332
+ )
299
333
  return sonic_interface_name
300
334
 
301
- sonic_port_number = int(match.group(1))
335
+ ethernet_num = int(match.group(1))
336
+ logger.debug(f"Extracted ethernet_num: {ethernet_num}")
337
+
338
+ # If port_config is provided, use alias-based calculation
339
+ if port_config:
340
+ return _convert_using_port_config(
341
+ sonic_interface_name, ethernet_num, is_breakout, port_config
342
+ )
343
+
344
+ # Fallback to legacy speed-based calculation
345
+ return _convert_using_speed_calculation(ethernet_num, interface_speed, is_breakout)
346
+
347
+
348
+ def _convert_using_port_config(
349
+ sonic_interface_name, ethernet_num, is_breakout, port_config
350
+ ):
351
+ """Convert using port config alias information."""
352
+ if is_breakout:
353
+ # For breakout ports, find the base port in port_config
354
+ base_port_name = _find_base_port_for_breakout(ethernet_num, port_config)
355
+ if base_port_name and base_port_name in port_config:
356
+ base_alias = port_config[base_port_name].get("alias", "")
357
+ # Extract port number from base alias
358
+ sonic_port_number = _extract_port_number_from_alias(base_alias)
359
+ if sonic_port_number is not None:
360
+ # Calculate subport number: how many ports after the base port
361
+ base_ethernet_num = int(base_port_name.replace("Ethernet", ""))
362
+ subport = (ethernet_num - base_ethernet_num) + 1
363
+
364
+ module = 1
365
+ result = f"Eth{module}/{sonic_port_number}/{subport}"
366
+ logger.debug(
367
+ f"Breakout conversion using port config: {sonic_interface_name} -> {result} "
368
+ f"(base_port={base_port_name}, base_alias={base_alias}, sonic_port_number={sonic_port_number}, subport={subport})"
369
+ )
370
+ return result
371
+
372
+ # Fallback if base port not found
373
+ logger.warning(
374
+ f"Could not find base port for breakout interface {sonic_interface_name}"
375
+ )
376
+ return _convert_using_speed_calculation(ethernet_num, None, is_breakout)
377
+ else:
378
+ # For regular ports, get alias directly
379
+ if sonic_interface_name in port_config:
380
+ alias = port_config[sonic_interface_name].get("alias", "")
381
+ sonic_port_number = _extract_port_number_from_alias(alias)
382
+ if sonic_port_number is not None:
383
+ module = 1
384
+ result = f"Eth{module}/{sonic_port_number}"
385
+ logger.debug(
386
+ f"Regular conversion using port config: {sonic_interface_name} -> {result} "
387
+ f"(alias={alias}, sonic_port_number={sonic_port_number})"
388
+ )
389
+ return result
390
+
391
+ # Fallback if not in port config
392
+ logger.warning(f"Interface {sonic_interface_name} not found in port config")
393
+ return _convert_using_speed_calculation(ethernet_num, None, is_breakout)
394
+
395
+
396
+ def _find_base_port_for_breakout(ethernet_num, port_config):
397
+ """Find the base port for a breakout interface.
398
+
399
+ The base port is the next smaller or equal port that exists in port_config.
400
+ E.g., for Ethernet2 -> check Ethernet2, Ethernet1, Ethernet0 until found.
401
+ """
402
+ for base_num in range(ethernet_num, -1, -1):
403
+ base_port_name = f"Ethernet{base_num}"
404
+ if base_port_name in port_config:
405
+ logger.debug(
406
+ f"Found base port {base_port_name} for breakout interface Ethernet{ethernet_num}"
407
+ )
408
+ return base_port_name
409
+
410
+ logger.warning(f"No base port found for breakout interface Ethernet{ethernet_num}")
411
+ return None
412
+
413
+
414
+ def _extract_port_number_from_alias(alias):
415
+ """Extract the port number from the end of an alias.
416
+
417
+ E.g., "twentyFiveGigE1" -> 1, "hundredGigE49" -> 49
418
+ """
419
+ if not alias:
420
+ return None
421
+
422
+ match = re.search(r"(\d+)$", alias)
423
+ if match:
424
+ port_number = int(match.group(1))
425
+ logger.debug(f"Extracted port number {port_number} from alias '{alias}'")
426
+ return port_number
427
+
428
+ logger.warning(f"Could not extract port number from alias '{alias}'")
429
+ return None
430
+
431
+
432
+ def _convert_using_speed_calculation(ethernet_num, interface_speed, is_breakout):
433
+ """Legacy speed-based conversion (fallback)."""
434
+ logger.debug(f"Using legacy speed-based calculation for Ethernet{ethernet_num}")
302
435
 
303
436
  if is_breakout:
304
437
  # For breakout ports: Ethernet0 -> Eth1/1/1, Ethernet1 -> Eth1/1/2, etc.
305
438
  # Calculate base port (master port) and subport number
306
- base_port = (sonic_port_number // 4) * 4 # Get base port (0, 4, 8, 12, ...)
307
- subport = (sonic_port_number % 4) + 1 # Get subport number (1, 2, 3, 4)
439
+ base_port = (ethernet_num // 4) * 4 # Get base port (0, 4, 8, 12, ...)
440
+ subport = (ethernet_num % 4) + 1 # Get subport number (1, 2, 3, 4)
308
441
 
309
442
  # Calculate physical port number for the base port
310
443
  physical_port = (base_port // 4) + 1 # Convert to 1-based indexing
@@ -312,7 +445,11 @@ def convert_sonic_interface_to_alias(
312
445
  # Assume module 1 for now - could be extended for multi-module systems
313
446
  module = 1
314
447
 
315
- return f"Eth{module}/{physical_port}/{subport}"
448
+ result = f"Eth{module}/{physical_port}/{subport}"
449
+ logger.debug(
450
+ f"Breakout conversion: base_port={base_port}, subport={subport}, physical_port={physical_port}, result={result}"
451
+ )
452
+ return result
316
453
  else:
317
454
  # For regular ports: use speed-based calculation
318
455
  # Determine speed category and multiplier
@@ -323,15 +460,21 @@ def convert_sonic_interface_to_alias(
323
460
  # Default for 1G, 10G, 25G ports - sequential numbering
324
461
  multiplier = 1
325
462
 
463
+ logger.debug(
464
+ f"Regular port calculation: interface_speed={interface_speed}, in_high_speed={interface_speed in HIGH_SPEED_PORTS if interface_speed else False}, multiplier={multiplier}"
465
+ )
466
+
326
467
  # Calculate physical port number
327
- physical_port = (
328
- sonic_port_number // multiplier
329
- ) + 1 # Convert to 1-based indexing
468
+ physical_port = (ethernet_num // multiplier) + 1 # Convert to 1-based indexing
330
469
 
331
470
  # Assume module 1 for now - could be extended for multi-module systems
332
471
  module = 1
333
472
 
334
- return f"Eth{module}/{physical_port}"
473
+ result = f"Eth{module}/{physical_port}"
474
+ logger.debug(
475
+ f"Regular conversion: ethernet_num={ethernet_num}, physical_port={physical_port}, result={result}"
476
+ )
477
+ return result
335
478
 
336
479
 
337
480
  def get_port_config(hwsku):
@@ -558,9 +701,14 @@ def detect_breakout_ports(device):
558
701
  )
559
702
  continue
560
703
 
704
+ # Calculate physical port number (1/1 -> port 1, 1/2 -> port 2, etc.)
705
+ physical_port_num = f"{module}/{port}"
706
+
561
707
  # Add breakout config for master port
562
708
  breakout_cfgs[master_port] = {
709
+ "breakout_owner": "MANUAL",
563
710
  "brkout_mode": brkout_mode,
711
+ "port": physical_port_num,
564
712
  }
565
713
 
566
714
  # Add all subports to breakout_ports
@@ -634,12 +782,15 @@ def detect_breakout_ports(device):
634
782
  else:
635
783
  continue # Skip unsupported speeds
636
784
 
637
- # Calculate physical port number
638
- physical_port_num = (base_port // 4) + 1
785
+ # Calculate physical port number (Ethernet0-3 -> port 1/1, Ethernet4-7 -> port 1/2, etc.)
786
+ physical_port_index = (base_port // 4) + 1
787
+ physical_port_num = f"1/{physical_port_index}"
639
788
 
640
789
  # Add breakout config for master port
641
790
  breakout_cfgs[master_port] = {
791
+ "breakout_owner": "MANUAL",
642
792
  "brkout_mode": brkout_mode,
793
+ "port": physical_port_num,
643
794
  }
644
795
 
645
796
  # Add all ports to breakout_ports
@@ -82,7 +82,32 @@ def sync_sonic(device_name=None, task_id=None, show_diff=True):
82
82
  logger.info(f"Found {len(devices)} devices matching criteria")
83
83
 
84
84
  # Find interconnected spine/superspine groups for special AS calculation
85
- spine_groups = find_interconnected_devices(devices, ["spine", "superspine"])
85
+ # When processing a single device, we need to consider all spine/superspine devices
86
+ # to properly detect interconnected groups, not just the requested device
87
+ if device_name and devices:
88
+ # Check if the single device is a spine/superspine
89
+ target_device = devices[0]
90
+ if target_device.role and target_device.role.slug in ["spine", "superspine"]:
91
+ # Fetch ALL spine/superspine devices to properly detect groups
92
+ logger.debug(
93
+ "Single spine/superspine device detected, fetching all spine/superspine devices for group detection"
94
+ )
95
+ all_spine_devices = []
96
+ nb_device_query_list = get_nb_device_query_list_sonic()
97
+ for nb_device_query in nb_device_query_list:
98
+ for device in utils.nb.dcim.devices.filter(**nb_device_query):
99
+ if device.role and device.role.slug in ["spine", "superspine"]:
100
+ all_spine_devices.append(device)
101
+ spine_groups = find_interconnected_devices(
102
+ all_spine_devices, ["spine", "superspine"]
103
+ )
104
+ else:
105
+ # For non-spine devices, use the original logic
106
+ spine_groups = find_interconnected_devices(devices, ["spine", "superspine"])
107
+ else:
108
+ # For multi-device processing, use the original logic
109
+ spine_groups = find_interconnected_devices(devices, ["spine", "superspine"])
110
+
86
111
  logger.info(f"Found {len(spine_groups)} interconnected spine/superspine groups")
87
112
 
88
113
  # Create mapping from device ID to its assigned AS number
osism/tasks/reconciler.py CHANGED
@@ -1,6 +1,7 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
3
  import io
4
+ import os
4
5
  import subprocess
5
6
 
6
7
  from celery import Celery
@@ -27,7 +28,7 @@ def setup_periodic_tasks(sender, **kwargs):
27
28
 
28
29
 
29
30
  @app.task(bind=True, name="osism.tasks.reconciler.run")
30
- def run(self, publish=True):
31
+ def run(self, publish=True, flush_cache=False):
31
32
  lock = Redlock(
32
33
  key="lock_osism_tasks_reconciler_run",
33
34
  masters={utils.redis},
@@ -36,8 +37,17 @@ def run(self, publish=True):
36
37
 
37
38
  if lock.acquire(timeout=20):
38
39
  logger.info("RUN /run.sh")
40
+
41
+ env = os.environ.copy()
42
+ if flush_cache:
43
+ env["FLUSH_CACHE"] = "true"
44
+
39
45
  p = subprocess.Popen(
40
- "/run.sh", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
46
+ "/run.sh",
47
+ shell=True,
48
+ stdout=subprocess.PIPE,
49
+ stderr=subprocess.STDOUT,
50
+ env=env,
41
51
  )
42
52
 
43
53
  if publish:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: osism
3
- Version: 0.20250621.0
3
+ Version: 0.20250627.0
4
4
  Summary: OSISM manager interface
5
5
  Home-page: https://github.com/osism/python-osism
6
6
  Author: OSISM GmbH
@@ -32,7 +32,7 @@ Requires-Dist: cliff==4.10.0
32
32
  Requires-Dist: deepdiff==8.5.0
33
33
  Requires-Dist: docker==7.1.0
34
34
  Requires-Dist: dtrack-auditor==1.5.0
35
- Requires-Dist: fastapi==0.115.13
35
+ Requires-Dist: fastapi==0.115.14
36
36
  Requires-Dist: flower==2.0.1
37
37
  Requires-Dist: hiredis==3.2.1
38
38
  Requires-Dist: jc==1.25.5
@@ -41,8 +41,8 @@ Requires-Dist: kombu==5.5.4
41
41
  Requires-Dist: kubernetes==33.1.0
42
42
  Requires-Dist: loguru==0.7.3
43
43
  Requires-Dist: nbcli==0.10.0.dev2
44
- Requires-Dist: netmiko==4.5.0
45
44
  Requires-Dist: openstacksdk==4.6.0
45
+ Requires-Dist: paramiko==3.5.1
46
46
  Requires-Dist: pottery==3.0.1
47
47
  Requires-Dist: prompt-toolkit==3.0.51
48
48
  Requires-Dist: pynetbox==7.5.0
@@ -1,6 +1,6 @@
1
1
  osism/__init__.py,sha256=1UiNTBus0V0f2AbZQzAtVtu6zkfCCrw0OTq--NwFAqY,341
2
2
  osism/__main__.py,sha256=ILe4gu61xEISiBsxanqTQIdSkV-YhpZXTRlguCYyssk,141
3
- osism/api.py,sha256=t3HebSzk6fyY7bLJD9P95oEL1qWYXzpX6Yk1o_nVkMo,4356
3
+ osism/api.py,sha256=cvFLczibM6Hrc3KWDNN4daUauyLF7zpuU1jll-5ywPI,5585
4
4
  osism/main.py,sha256=Dt2-9sLXcS-Ny4DAz7hrha-KRc7zd7BFUTRdfs_X8z4,893
5
5
  osism/settings.py,sha256=bebPBT6Hd1-KhJfwZdFR-s8eMwV4B1IFr-WrQBkOrWw,1786
6
6
  osism/actions/__init__.py,sha256=bG7Ffen4LvQtgnYPFEpFccsWs81t4zqqeqn9ZeirH6E,38
@@ -14,10 +14,10 @@ osism/commands/console.py,sha256=8BPz1hio5Wi6kONVAWFuSqkDRrMcLEYeFIY8dbtN6e4,321
14
14
  osism/commands/container.py,sha256=Fku2GaCM3Idq_FxExUtNqjrEM0XYjpVvXmueSVO8S_c,1601
15
15
  osism/commands/get.py,sha256=ryytjtXWmlMV0NucP5tGkMZu0nIlC4xVtjRk4iMZ06c,8967
16
16
  osism/commands/log.py,sha256=2IpYuosC7FZwwLvM8HmKSU1NRNIelVVYzqjjVMCrOJk,4072
17
- osism/commands/manage.py,sha256=FaO9dbYjNHYanS98-zC498bx26oU8E3loxCczH9mfKI,12751
17
+ osism/commands/manage.py,sha256=uzfmt3R0PJ4HxUw_V945pN0FbKb3zhyiBuD9br1ORYE,23149
18
18
  osism/commands/netbox.py,sha256=e65P0kWrjTLw2T9HZthxjDTIRa-KAHgSSJAlvVef7n4,7345
19
19
  osism/commands/noset.py,sha256=7zDFuFMyNpo7DUOKcNiYV8nodtdMOYFp5LDPcuJhlZ8,1481
20
- osism/commands/reconciler.py,sha256=jy07Qbl219e-WCtWbtV9zh49qHHCjDMm6oVTJM61k1A,1958
20
+ osism/commands/reconciler.py,sha256=ubQfX8j13s3NuMKnT0Lt6O-szf7Z1V02AfsMQFHmO74,2209
21
21
  osism/commands/server.py,sha256=avmoOv5rjOi-fN2A-27cPwOtiy2Q2j6UFtCh3QrfWAI,7512
22
22
  osism/commands/service.py,sha256=A1lgAlGeCJpbFFqF55DRWPcCirIgpU0dzjzVLZ0mz3k,2649
23
23
  osism/commands/set.py,sha256=xLBi2DzbVQo2jb3-cOIE9In5UB3vFxquQJkDN-EsfhM,1425
@@ -44,28 +44,28 @@ osism/tasks/kolla.py,sha256=wJQpWn_01iWLkr7l7T7RNrQGfRgsgmYi4WQlTmNGvew,618
44
44
  osism/tasks/kubernetes.py,sha256=VzXq_VrYU_CLm4cOruqnE3Kq2ydfO9glZ3p0bp3OYoc,625
45
45
  osism/tasks/netbox.py,sha256=g0gL5QImiRTHqixRxze7LfNqPth7cXqLzVWQDUJLDjE,5928
46
46
  osism/tasks/openstack.py,sha256=g15tCll5vP1pC6ysxRCTZxplsdGmXbxaCH3k1Qdv5Xg,6367
47
- osism/tasks/reconciler.py,sha256=6iC0EYxeGvitzU2NsRqQzUEDZWW6Il3jgq_IRTN0sZg,1855
47
+ osism/tasks/reconciler.py,sha256=phbSV6urILqq9mHGMYDFwSfx8bLZmldwgEi8sMWs8RA,2040
48
48
  osism/tasks/conductor/__init__.py,sha256=eBkisjRj0YRT0AArvuvpIHGNEqEijsNvR_55BuVX62I,1600
49
49
  osism/tasks/conductor/config.py,sha256=tvfuYNgvw0F_ZbvrjqnyHfrj3vF6z0zhsRtGNu-Lgvo,3410
50
- osism/tasks/conductor/ironic.py,sha256=tIhKH5EWvqzjVtZr32du8_jiVDP_9mFgTkxBjmtEm6g,16487
50
+ osism/tasks/conductor/ironic.py,sha256=VT8JyYNh4IDWJ8QTIo46IokILKpGhINfLHjQvwz4FSU,16337
51
51
  osism/tasks/conductor/netbox.py,sha256=5Nc7wrriDOtSuru1KDLt9QpA54vC7tXDPB2J0JP9GKo,11393
52
52
  osism/tasks/conductor/utils.py,sha256=-a0-pRuhV4Fjj0SgdgBqtRJtAdGdqck5pzfi6NYBApU,2338
53
53
  osism/tasks/conductor/sonic/__init__.py,sha256=oxTTl_MGK4iWK9uNDRNlULtGrDGCQHrlJZ04weh_Lh8,777
54
54
  osism/tasks/conductor/sonic/bgp.py,sha256=PC6gGI5bCj2PCXcNGyMV9-EdlJWDsYaodzxigmYSZvw,3088
55
55
  osism/tasks/conductor/sonic/cache.py,sha256=Asv2k3nLJejuq7iB0a_LyK8dEmJzypP9v3OHkNY3GwI,3438
56
- osism/tasks/conductor/sonic/config_generator.py,sha256=Z2i6SvKp27EI-dK9fkSDo51ghbIIqTgM7qK0-xJtVwU,35098
56
+ osism/tasks/conductor/sonic/config_generator.py,sha256=Fww6uOC7DplhsqR_jW9PHPB0pAAY1AiTGeNAQ2BUN4k,39259
57
57
  osism/tasks/conductor/sonic/connections.py,sha256=NvRjwJv3NF3ry5Xe9qHzk7pQbfDQHYx_j3ATRMUs7gA,14244
58
- osism/tasks/conductor/sonic/constants.py,sha256=nfsiKV1I5iiXPAUzkZgnFPWuSB_oy8xg3gNEZjn1Hb4,2194
58
+ osism/tasks/conductor/sonic/constants.py,sha256=HjVFwmH-AN3np1qN97ahEAcwz2-4cHa-pA9pXWqWsqs,2219
59
59
  osism/tasks/conductor/sonic/device.py,sha256=ZYJA0bQ8waKWStzWUPxbcwNWa2Z_hMB3pqs8aA_nxXA,2458
60
60
  osism/tasks/conductor/sonic/exporter.py,sha256=25L1vbi84ZQD0xNHNTWk-anTz5QRkGJskCECBkeGQw4,8882
61
- osism/tasks/conductor/sonic/interface.py,sha256=MM-HrYlVdh_5fYSKto_38DC4RcfzoqBGEYFxk-Tz760,32233
62
- osism/tasks/conductor/sonic/sync.py,sha256=Jh2xbBv_yyrEJZZZkZRIk7vH0WV-IS4CA-4qQp56I2U,7823
61
+ osism/tasks/conductor/sonic/interface.py,sha256=318wOwXYSSMKTPP2WSZIps-JvIkCQ2gYdQs9ZYHXwwg,38957
62
+ osism/tasks/conductor/sonic/sync.py,sha256=fpgsQVwq6Hb7eeDHhLkAqx5BkaK3Ce_m_WvmWEsJyOo,9182
63
63
  osism/utils/__init__.py,sha256=gN5VtLJfrvyn6_snuTte7YR-vDygkpbORopIV8qSEsA,6064
64
- osism-0.20250621.0.dist-info/licenses/AUTHORS,sha256=oWotd63qsnNR945QLJP9mEXaXNtCMaesfo8ZNuLjwpU,39
65
- osism-0.20250621.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
66
- osism-0.20250621.0.dist-info/METADATA,sha256=0nTzEqeDkHWwUyPJftfn6_Az0MP26rQjhMQNPffXkdM,2903
67
- osism-0.20250621.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
- osism-0.20250621.0.dist-info/entry_points.txt,sha256=X1KbMeQim23k_lGGDFz8ldQIrdCrq8mKnFWgYrrEZI0,3469
69
- osism-0.20250621.0.dist-info/pbr.json,sha256=nvF1nN4eUUNsct1PT2Fq9FvN3QNGbPvpeeWGR0BjJjU,47
70
- osism-0.20250621.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
71
- osism-0.20250621.0.dist-info/RECORD,,
64
+ osism-0.20250627.0.dist-info/licenses/AUTHORS,sha256=oWotd63qsnNR945QLJP9mEXaXNtCMaesfo8ZNuLjwpU,39
65
+ osism-0.20250627.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
66
+ osism-0.20250627.0.dist-info/METADATA,sha256=z9qQh_i8ZIPgWTACdHuDa5mZKbmi9vWrTPQ5xUivpUI,2904
67
+ osism-0.20250627.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
+ osism-0.20250627.0.dist-info/entry_points.txt,sha256=eRq0DXdl4z2DdmPta6dqpzMe1M0DaUCNw9i4Jgdfhf0,3426
69
+ osism-0.20250627.0.dist-info/pbr.json,sha256=rZRiHfI7rgnmVfh2COv00pQVKhMCCu6cw0bifLp9cVw,47
70
+ osism-0.20250627.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
71
+ osism-0.20250627.0.dist-info/RECORD,,
@@ -42,11 +42,10 @@ manage flavors = osism.commands.manage:Flavors
42
42
  manage image clusterapi = osism.commands.manage:ImageClusterapi
43
43
  manage image octavia = osism.commands.manage:ImageOctavia
44
44
  manage images = osism.commands.manage:Images
45
- manage ironic = osism.commands.netbox:Ironic
46
45
  manage netbox = osism.commands.netbox:Manage
47
46
  manage server list = osism.commands.server:ServerList
48
47
  manage server migrate = osism.commands.server:ServerMigrate
49
- manage sonic = osism.commands.sync:Sonic
48
+ manage sonic = osism.commands.manage:Sonic
50
49
  manage volume list = osism.commands.volume:VolumeList
51
50
  netbox = osism.commands.netbox:Console
52
51
  noset bootstrap = osism.commands.noset:NoBootstrap
@@ -0,0 +1 @@
1
+ {"git_version": "e1bd41d", "is_release": false}
@@ -1 +0,0 @@
1
- {"git_version": "ed0a34c", "is_release": false}