osism 0.20250616.0__py3-none-any.whl → 0.20250621.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -131,20 +131,35 @@ class BaremetalDeploy(Command):
131
131
  if not node:
132
132
  continue
133
133
 
134
- if node.provision_state in ["available", "deploy failed"]:
134
+ if (
135
+ node.provision_state in ["available", "deploy failed"]
136
+ and not node["maintenance"]
137
+ ):
135
138
  provision_state = "active"
136
139
  elif (
137
140
  node.provision_state == "error"
138
141
  or node.provision_state == "active"
142
+ and not node["maintenance"]
139
143
  and rebuild
140
144
  ):
141
145
  provision_state = "rebuild"
142
146
  else:
143
147
  logger.warning(
144
- f"Node {node.name} ({node.id}) not in supported provision state"
148
+ f"Node {node.name} ({node.id}) not in supported state! Provision state: {node.provision_state}, maintenance mode: {node['maintenance']}"
145
149
  )
146
150
  continue
147
151
 
152
+ # NOTE: Ironic removes "instance_info" on undeploy. It was saved to "extra" during sync and needs to be refreshed here.
153
+ if (
154
+ "instance_info" in node
155
+ and not node["instance_info"]
156
+ and "instance_info" in node["extra"]
157
+ and node["extra"]["instance_info"]
158
+ ):
159
+ node = conn.baremetal.update_node(
160
+ node, instance_info=json.loads(node.extra["instance_info"])
161
+ )
162
+
148
163
  try:
149
164
  conn.baremetal.validate_node(
150
165
  node.id, required=("boot", "deploy", "power")
@@ -272,12 +287,17 @@ class BaremetalUndeploy(Command):
272
287
 
273
288
  if node.provision_state in ["active", "deploy failed", "error"]:
274
289
  try:
275
- conn.baremetal.set_node_provision_state(node.id, "undeploy")
290
+ node = conn.baremetal.set_node_provision_state(node.id, "undeploy")
276
291
  except Exception as exc:
277
292
  logger.warning(
278
293
  f"Node {node.name} ({node.id}) could not be moved to available state: {exc}"
279
294
  )
280
295
  continue
296
+ # NOTE: Ironic removes "instance_info" on undeploy. It was saved to "extra" during sync and needs to be refreshed here.
297
+ if "instance_info" in node["extra"]:
298
+ node = conn.baremetal.update_node(
299
+ node, instance_info=json.loads(node.extra["instance_info"])
300
+ )
281
301
  else:
282
302
  logger.warning(
283
303
  f"Node {node.name} ({node.id}) not in supported provision state"
osism/commands/manage.py CHANGED
@@ -10,7 +10,7 @@ from loguru import logger
10
10
  import requests
11
11
 
12
12
  from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
13
- from osism.tasks import openstack, handle_task
13
+ from osism.tasks import openstack, ansible, handle_task
14
14
 
15
15
  SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.31", "1.32", "1.33"]
16
16
 
@@ -360,3 +360,27 @@ class Flavors(Command):
360
360
  )
361
361
 
362
362
  return handle_task(task, wait, format="script", timeout=3600)
363
+
364
+
365
+ class Dnsmasq(Command):
366
+ def get_parser(self, prog_name):
367
+ parser = super(Dnsmasq, self).get_parser(prog_name)
368
+ parser.add_argument(
369
+ "--no-wait",
370
+ default=False,
371
+ help="Do not wait until dnsmasq has been applied",
372
+ action="store_true",
373
+ )
374
+ return parser
375
+
376
+ def take_action(self, parsed_args):
377
+ wait = not parsed_args.no_wait
378
+
379
+ task_signature = ansible.run.si("infrastructure", "dnsmasq", [])
380
+ task = task_signature.apply_async()
381
+ if wait:
382
+ logger.info(
383
+ f"It takes a moment until task {task.task_id} (dnsmasq) has been started and output is visible here."
384
+ )
385
+
386
+ return handle_task(task, wait, format="log", timeout=300)
osism/commands/sync.py CHANGED
@@ -23,24 +23,44 @@ class Facts(Command):
23
23
  class Sonic(Command):
24
24
  def get_parser(self, prog_name):
25
25
  parser = super(Sonic, self).get_parser(prog_name)
26
+ parser.add_argument(
27
+ "device",
28
+ nargs="?",
29
+ help="Optional device name to sync configuration for a specific device",
30
+ )
26
31
  parser.add_argument(
27
32
  "--no-wait",
28
33
  default=False,
29
34
  help="Do not wait until the sync has been completed",
30
35
  action="store_true",
31
36
  )
37
+ parser.add_argument(
38
+ "--diff",
39
+ default=True,
40
+ help="Show configuration diff when changes are detected (default: True)",
41
+ action="store_true",
42
+ )
43
+ parser.add_argument(
44
+ "--no-diff",
45
+ dest="diff",
46
+ help="Do not show configuration diff",
47
+ action="store_false",
48
+ )
32
49
  return parser
33
50
 
34
51
  def take_action(self, parsed_args):
35
52
  wait = not parsed_args.no_wait
53
+ device_name = parsed_args.device
54
+ show_diff = parsed_args.diff
55
+
56
+ task = conductor.sync_sonic.delay(device_name, show_diff)
36
57
 
37
- task = conductor.sync_sonic.delay()
38
- if wait:
58
+ if device_name:
39
59
  logger.info(
40
- f"Task {task.task_id} (sync sonic) is running. Wait. No more output."
60
+ f"Task {task.task_id} (sync sonic for device {device_name}) started"
41
61
  )
42
- task.wait(timeout=None, interval=0.5)
43
62
  else:
44
- logger.info(
45
- f"Task {task.task_id} (sync sonic) is running in background. No more output."
46
- )
63
+ logger.info(f"Task {task.task_id} (sync sonic) started")
64
+
65
+ rc = handle_task(task, wait=wait)
66
+ return rc
osism/settings.py CHANGED
@@ -49,6 +49,7 @@ NETBOX_FILTER_CONDUCTOR_SONIC = os.getenv(
49
49
  SONIC_EXPORT_DIR = os.getenv("SONIC_EXPORT_DIR", "/etc/sonic/export")
50
50
  SONIC_EXPORT_PREFIX = os.getenv("SONIC_EXPORT_PREFIX", "osism_")
51
51
  SONIC_EXPORT_SUFFIX = os.getenv("SONIC_EXPORT_SUFFIX", "_config_db.json")
52
+ SONIC_EXPORT_IDENTIFIER = os.getenv("SONIC_EXPORT_IDENTIFIER", "serial-number")
52
53
 
53
54
  NETBOX_SECONDARIES = (
54
55
  os.getenv("NETBOX_SECONDARIES", read_secret("NETBOX_SECONDARIES")) or "[]"
@@ -48,8 +48,8 @@ def sync_ironic(self, force_update=False):
48
48
 
49
49
 
50
50
  @app.task(bind=True, name="osism.tasks.conductor.sync_sonic")
51
- def sync_sonic(self):
52
- return _sync_sonic()
51
+ def sync_sonic(self, device_name=None, show_diff=True):
52
+ return _sync_sonic(device_name, self.request.id, show_diff)
53
53
 
54
54
 
55
55
  __all__ = [
@@ -152,10 +152,14 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
152
152
  .render(remote_board_address=oob_ip)
153
153
  )
154
154
  node_attributes.update({"resource_class": device.name})
155
- # NOTE: Write metadata used for provisioning into 'extra' field, so that
156
- # it is available during node deploy without querying the NetBox again
157
155
  if "extra" not in node_attributes:
158
156
  node_attributes["extra"] = {}
157
+ # NOTE: Copy instance_info into extra field. because ironic removes it on undeployment. This way it may be readded on undeploy without querying the netbox again
158
+ if "instance_info" in node_attributes and node_attributes["instance_info"]:
159
+ node_attributes["extra"].update(
160
+ {"instance_info": json.dumps(node_attributes["instance_info"])}
161
+ )
162
+ # NOTE: Write metadata used for provisioning into 'extra' field, so that it is available during node deploy without querying the netbox again
159
163
  if (
160
164
  "netplan_parameters" in device.custom_fields
161
165
  and device.custom_fields["netplan_parameters"]
@@ -0,0 +1,26 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ """SONiC configuration management package."""
4
+
5
+ from .config_generator import generate_sonic_config
6
+ from .exporter import save_config_to_netbox, export_config_to_file
7
+ from .sync import sync_sonic
8
+ from .connections import (
9
+ get_connected_interfaces,
10
+ get_connected_device_for_sonic_interface,
11
+ get_connected_device_via_interface,
12
+ find_interconnected_devices,
13
+ get_device_bgp_neighbors_via_loopback,
14
+ )
15
+
16
+ __all__ = [
17
+ "generate_sonic_config",
18
+ "save_config_to_netbox",
19
+ "export_config_to_file",
20
+ "sync_sonic",
21
+ "get_connected_interfaces",
22
+ "get_connected_device_for_sonic_interface",
23
+ "get_connected_device_via_interface",
24
+ "find_interconnected_devices",
25
+ "get_device_bgp_neighbors_via_loopback",
26
+ ]
@@ -0,0 +1,87 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ """BGP and AS calculation functions for SONiC configuration."""
4
+
5
+ from loguru import logger
6
+
7
+ from .constants import DEFAULT_LOCAL_AS_PREFIX
8
+
9
+
10
+ def calculate_local_asn_from_ipv4(
11
+ ipv4_address: str, prefix: int = DEFAULT_LOCAL_AS_PREFIX
12
+ ) -> int:
13
+ """Calculate AS number from IPv4 address.
14
+
15
+ Args:
16
+ ipv4_address: IPv4 address in format "192.168.45.123/32" or "192.168.45.123"
17
+ prefix: Four-digit prefix for AS number (default: 4200)
18
+
19
+ Returns:
20
+ AS number calculated as prefix + 3rd octet (padded) + 4th octet (padded)
21
+ Example: 192.168.45.123 with prefix 4200 -> 4200045123
22
+
23
+ Raises:
24
+ ValueError: If IP address format is invalid
25
+ """
26
+ try:
27
+ # Remove CIDR notation if present
28
+ ip_only = ipv4_address.split("/")[0]
29
+ octets = ip_only.split(".")
30
+
31
+ if len(octets) != 4:
32
+ raise ValueError(f"Invalid IPv4 address format: {ipv4_address}")
33
+
34
+ # AS = prefix + third octet (3 digits) + fourth octet (3 digits)
35
+ # Example: 192.168.45.123 -> 4200 + 045 + 123 = 4200045123
36
+ third_octet = int(octets[2])
37
+ fourth_octet = int(octets[3])
38
+
39
+ if not (0 <= third_octet <= 255 and 0 <= fourth_octet <= 255):
40
+ raise ValueError(f"Invalid octet values in: {ipv4_address}")
41
+
42
+ return int(f"{prefix}{third_octet:03d}{fourth_octet:03d}")
43
+ except (IndexError, ValueError) as e:
44
+ raise ValueError(f"Failed to calculate AS from {ipv4_address}: {str(e)}")
45
+
46
+
47
+ # Deprecated: Use connections.find_interconnected_devices instead
48
+ # This function is kept for backward compatibility but delegates to the new module
49
+ def find_interconnected_spine_groups(devices, target_roles=["spine", "superspine"]):
50
+ """Find groups of interconnected spine/superspine switches.
51
+
52
+ Args:
53
+ devices: List of NetBox device objects
54
+ target_roles: List of device roles to consider (default: ["spine", "superspine"])
55
+
56
+ Returns:
57
+ List of groups, where each group is a list of interconnected devices of the same role
58
+ """
59
+ # Import here to avoid circular imports
60
+ from .connections import find_interconnected_devices
61
+
62
+ return find_interconnected_devices(devices, target_roles)
63
+
64
+
65
+ def calculate_minimum_as_for_group(device_group, prefix=DEFAULT_LOCAL_AS_PREFIX):
66
+ """Calculate the minimum AS number for a group of interconnected devices.
67
+
68
+ Args:
69
+ device_group: List of interconnected devices
70
+ prefix: AS prefix (default: DEFAULT_LOCAL_AS_PREFIX)
71
+
72
+ Returns:
73
+ int: Minimum AS number for the group, or None if no valid AS can be calculated
74
+ """
75
+ as_numbers = []
76
+
77
+ for device in device_group:
78
+ if device.primary_ip4:
79
+ try:
80
+ as_number = calculate_local_asn_from_ipv4(
81
+ str(device.primary_ip4), prefix
82
+ )
83
+ as_numbers.append(as_number)
84
+ except ValueError as e:
85
+ logger.debug(f"Could not calculate AS for device {device.name}: {e}")
86
+
87
+ return min(as_numbers) if as_numbers else None
@@ -0,0 +1,114 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ """Interface caching for SONiC configuration generation."""
4
+
5
+ import threading
6
+ from typing import Dict, List, Optional
7
+ from loguru import logger
8
+
9
+ from osism import utils
10
+
11
+
12
+ class InterfaceCache:
13
+ """Thread-local cache for device interfaces during sync_sonic task."""
14
+
15
+ def __init__(self):
16
+ self._cache: Dict[int, List] = {}
17
+ self._lock = threading.Lock()
18
+
19
+ def get_device_interfaces(self, device_id: int) -> List:
20
+ """Get interfaces for a device, using cache if available.
21
+
22
+ Args:
23
+ device_id: NetBox device ID
24
+
25
+ Returns:
26
+ List of interface objects
27
+ """
28
+ with self._lock:
29
+ if device_id not in self._cache:
30
+ logger.debug(f"Fetching interfaces for device {device_id}")
31
+ try:
32
+ interfaces = list(
33
+ utils.nb.dcim.interfaces.filter(device_id=device_id)
34
+ )
35
+ self._cache[device_id] = interfaces
36
+ logger.debug(
37
+ f"Cached {len(interfaces)} interfaces for device {device_id}"
38
+ )
39
+ except Exception as e:
40
+ logger.warning(
41
+ f"Failed to fetch interfaces for device {device_id}: {e}"
42
+ )
43
+ self._cache[device_id] = []
44
+ else:
45
+ logger.debug(f"Using cached interfaces for device {device_id}")
46
+
47
+ return self._cache[device_id]
48
+
49
+ def clear(self):
50
+ """Clear the cache."""
51
+ with self._lock:
52
+ cache_size = len(self._cache)
53
+ self._cache.clear()
54
+ logger.debug(f"Cleared interface cache ({cache_size} devices)")
55
+
56
+ def get_cache_stats(self) -> Dict[str, int]:
57
+ """Get cache statistics.
58
+
59
+ Returns:
60
+ Dictionary with cache statistics
61
+ """
62
+ with self._lock:
63
+ total_interfaces = sum(
64
+ len(interfaces) for interfaces in self._cache.values()
65
+ )
66
+ return {
67
+ "cached_devices": len(self._cache),
68
+ "total_interfaces": total_interfaces,
69
+ }
70
+
71
+
72
+ # Thread-local storage for the interface cache
73
+ _thread_local = threading.local()
74
+
75
+
76
+ def get_interface_cache() -> InterfaceCache:
77
+ """Get the current thread's interface cache.
78
+
79
+ Returns:
80
+ InterfaceCache instance for current thread
81
+ """
82
+ if not hasattr(_thread_local, "interface_cache"):
83
+ _thread_local.interface_cache = InterfaceCache()
84
+ return _thread_local.interface_cache
85
+
86
+
87
+ def get_cached_device_interfaces(device_id: int) -> List:
88
+ """Get interfaces for a device using the thread-local cache.
89
+
90
+ Args:
91
+ device_id: NetBox device ID
92
+
93
+ Returns:
94
+ List of interface objects
95
+ """
96
+ cache = get_interface_cache()
97
+ return cache.get_device_interfaces(device_id)
98
+
99
+
100
+ def clear_interface_cache():
101
+ """Clear the current thread's interface cache."""
102
+ if hasattr(_thread_local, "interface_cache"):
103
+ _thread_local.interface_cache.clear()
104
+
105
+
106
+ def get_interface_cache_stats() -> Optional[Dict[str, int]]:
107
+ """Get cache statistics for the current thread.
108
+
109
+ Returns:
110
+ Dictionary with cache statistics or None if no cache exists
111
+ """
112
+ if hasattr(_thread_local, "interface_cache"):
113
+ return _thread_local.interface_cache.get_cache_stats()
114
+ return None