osism 0.20250628.0__py3-none-any.whl → 0.20250709.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
osism/api.py CHANGED
@@ -208,11 +208,11 @@ async def notifications_baremetal(notification: NotificationBaremetal) -> None:
208
208
 
209
209
 
210
210
  @app.post(
211
- "/v1/switches/{identifier}/ztp/complete",
211
+ "/v1/sonic/{identifier}/ztp/complete",
212
212
  response_model=DeviceSearchResult,
213
- tags=["switches"],
213
+ tags=["sonic"],
214
214
  )
215
- async def switches_ztp_complete(identifier: str) -> DeviceSearchResult:
215
+ async def sonic_ztp_complete(identifier: str) -> DeviceSearchResult:
216
216
  """Mark a switch as ZTP complete by setting provision_state to active."""
217
217
  if not utils.nb:
218
218
  raise HTTPException(
@@ -12,6 +12,7 @@ import yaml
12
12
  from openstack.baremetal import configdrive as configdrive_builder
13
13
 
14
14
  from osism.commands import get_cloud_connection
15
+ from osism import utils
15
16
 
16
17
 
17
18
  class BaremetalList(Command):
@@ -169,16 +170,51 @@ class BaremetalDeploy(Command):
169
170
  continue
170
171
  # NOTE: Prepare osism config drive
171
172
  try:
173
+ # Get default vars from NetBox local_context_data if available
174
+ default_vars = {}
175
+ if utils.nb:
176
+ try:
177
+ # Try to find device by name first
178
+ device = utils.nb.dcim.devices.get(name=node.name)
179
+
180
+ # If not found by name, try by inventory_hostname custom field
181
+ if not device:
182
+ devices = utils.nb.dcim.devices.filter(
183
+ cf_inventory_hostname=node.name
184
+ )
185
+ if devices:
186
+ device = devices[0]
187
+
188
+ # Extract local_context_data if device found and has the field
189
+ if (
190
+ device
191
+ and hasattr(device, "local_context_data")
192
+ and device.local_context_data
193
+ ):
194
+ default_vars = device.local_context_data
195
+ logger.info(
196
+ f"Using NetBox local_context_data for node {node.name}"
197
+ )
198
+ else:
199
+ logger.debug(
200
+ f"No local_context_data found for node {node.name} in NetBox"
201
+ )
202
+ except Exception as e:
203
+ logger.warning(
204
+ f"Failed to fetch NetBox data for node {node.name}: {e}"
205
+ )
206
+
172
207
  playbook = []
173
208
  play = {
174
209
  "name": "Run bootstrap - part 2",
175
210
  "hosts": "localhost",
176
211
  "connection": "local",
177
212
  "gather_facts": True,
178
- "vars": {},
213
+ "vars": default_vars.copy(),
179
214
  "roles": [
180
215
  "osism.commons.hostname",
181
216
  "osism.commons.hosts",
217
+ "osism.commons.operator",
182
218
  ],
183
219
  }
184
220
  play["vars"].update(
@@ -293,11 +329,6 @@ class BaremetalUndeploy(Command):
293
329
  f"Node {node.name} ({node.id}) could not be moved to available state: {exc}"
294
330
  )
295
331
  continue
296
- # NOTE: Ironic removes "instance_info" on undeploy. It was saved to "extra" during sync and needs to be refreshed here.
297
- if "instance_info" in node["extra"]:
298
- node = conn.baremetal.update_node(
299
- node, instance_info=json.loads(node.extra["instance_info"])
300
- )
301
332
  else:
302
333
  logger.warning(
303
334
  f"Node {node.name} ({node.id}) not in supported provision state"
osism/commands/manage.py CHANGED
@@ -1,8 +1,5 @@
1
1
  # SPDX-License-Identifier: Apache-2.0
2
2
 
3
- import json
4
- import os
5
- from datetime import datetime
6
3
  from re import findall
7
4
  from urllib.parse import urljoin
8
5
 
@@ -10,12 +7,10 @@ from cliff.command import Command
10
7
  import docker
11
8
  from jinja2 import Template
12
9
  from loguru import logger
13
- import paramiko
14
10
  import requests
15
11
 
16
12
  from osism.data import TEMPLATE_IMAGE_CLUSTERAPI, TEMPLATE_IMAGE_OCTAVIA
17
13
  from osism.tasks import openstack, ansible, handle_task
18
- from osism import utils
19
14
 
20
15
  SUPPORTED_CLUSTERAPI_K8S_IMAGES = ["1.31", "1.32", "1.33"]
21
16
 
@@ -389,249 +384,3 @@ class Dnsmasq(Command):
389
384
  )
390
385
 
391
386
  return handle_task(task, wait, format="log", timeout=300)
392
-
393
-
394
- class Sonic(Command):
395
- def get_parser(self, prog_name):
396
- parser = super(Sonic, self).get_parser(prog_name)
397
- parser.add_argument(
398
- "hostname", type=str, help="Hostname of the SONiC switch to manage"
399
- )
400
- parser.add_argument(
401
- "--reload",
402
- action="store_true",
403
- help="Execute config reload after config load to restart services",
404
- )
405
- return parser
406
-
407
- def take_action(self, parsed_args):
408
- hostname = parsed_args.hostname
409
- reload_config = parsed_args.reload
410
- today = datetime.now().strftime("%Y%m%d")
411
-
412
- try:
413
- # Get device from NetBox - try by name first, then by inventory_hostname
414
- device = utils.nb.dcim.devices.get(name=hostname)
415
- if not device:
416
- # Try to find by inventory_hostname custom field
417
- devices = utils.nb.dcim.devices.filter(cf_inventory_hostname=hostname)
418
- if devices:
419
- device = devices[0] # Take the first match
420
- logger.info(f"Device found by inventory_hostname: {device.name}")
421
- else:
422
- logger.error(
423
- f"Device {hostname} not found in NetBox (searched by name and inventory_hostname)"
424
- )
425
- return 1
426
-
427
- # Get device configuration from local_context_data
428
- if (
429
- not hasattr(device, "local_context_data")
430
- or not device.local_context_data
431
- ):
432
- logger.error(f"Device {hostname} has no local_context_data in NetBox")
433
- return 1
434
-
435
- config_context = device.local_context_data
436
-
437
- # Save config context to local /tmp directory
438
- config_context_file = f"/tmp/config_db_{hostname}_{today}.json"
439
- try:
440
- with open(config_context_file, "w") as f:
441
- json.dump(config_context, f, indent=2)
442
- logger.info(f"Config context saved to {config_context_file}")
443
- except Exception as e:
444
- logger.error(f"Failed to save config context: {e}")
445
- return 1
446
-
447
- # Extract SSH connection details
448
- ssh_host = None
449
- ssh_username = None
450
-
451
- # Try to get SSH details from config context
452
- if "management" in config_context:
453
- mgmt = config_context["management"]
454
- if "ip" in mgmt:
455
- ssh_host = mgmt["ip"]
456
- if "username" in mgmt:
457
- ssh_username = mgmt["username"]
458
-
459
- # Fallback: try to get OOB IP from NetBox
460
- if not ssh_host:
461
- from osism.tasks.conductor.netbox import get_device_oob_ip
462
-
463
- oob_result = get_device_oob_ip(device)
464
- if oob_result:
465
- ssh_host = oob_result[0]
466
-
467
- if not ssh_host:
468
- logger.error(f"No SSH host found for device {hostname}")
469
- return 1
470
-
471
- if not ssh_username:
472
- ssh_username = "admin" # Default SONiC username
473
-
474
- # SSH private key path
475
- ssh_key_path = "/ansible/secrets/id_rsa.operator"
476
-
477
- if not os.path.exists(ssh_key_path):
478
- logger.error(f"SSH private key not found at {ssh_key_path}")
479
- return 1
480
-
481
- logger.info(
482
- f"Connecting to {hostname} ({ssh_host}) to backup SONiC configuration"
483
- )
484
-
485
- # Create SSH connection
486
- ssh = paramiko.SSHClient()
487
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
488
-
489
- try:
490
- # Connect with private key
491
- ssh.connect(
492
- hostname=ssh_host,
493
- username=ssh_username,
494
- key_filename=ssh_key_path,
495
- timeout=30,
496
- )
497
-
498
- # Generate backup filename with date and increment on switch
499
- base_backup_path = f"/home/admin/config_db_{hostname}_{today}"
500
- backup_filename = f"{base_backup_path}_1.json"
501
-
502
- # Find next available filename on switch
503
- x = 1
504
- while True:
505
- check_cmd = f"ls {base_backup_path}_{x}.json 2>/dev/null"
506
- stdin, stdout, stderr = ssh.exec_command(check_cmd)
507
- if stdout.read().decode().strip() == "":
508
- backup_filename = f"{base_backup_path}_{x}.json"
509
- break
510
- x += 1
511
-
512
- logger.info(
513
- f"Backing up current configuration on switch to {backup_filename}"
514
- )
515
-
516
- # Backup current configuration on switch
517
- backup_cmd = f"sudo cp /etc/sonic/config_db.json {backup_filename}"
518
- stdin, stdout, stderr = ssh.exec_command(backup_cmd)
519
- exit_status = stdout.channel.recv_exit_status()
520
-
521
- if exit_status != 0:
522
- error_msg = stderr.read().decode()
523
- logger.error(
524
- f"Failed to backup configuration on switch: {error_msg}"
525
- )
526
- return 1
527
-
528
- logger.info("Configuration backed up successfully on switch")
529
-
530
- # Upload local config context to switch /tmp directory
531
- switch_config_file = f"/tmp/config_db_{hostname}_current.json"
532
- logger.info(
533
- f"Uploading config context to {switch_config_file} on switch"
534
- )
535
-
536
- # Use SFTP to upload the config context file
537
- sftp = ssh.open_sftp()
538
- try:
539
- sftp.put(config_context_file, switch_config_file)
540
- logger.info(
541
- f"Config context successfully uploaded to {switch_config_file} on switch"
542
- )
543
- except Exception as e:
544
- logger.error(f"Failed to upload config context to switch: {e}")
545
- return 1
546
- finally:
547
- sftp.close()
548
-
549
- # Load and apply the new configuration
550
- logger.info("Loading and applying new configuration on switch")
551
-
552
- load_cmd = f"sudo config load -y {switch_config_file}"
553
- stdin, stdout, stderr = ssh.exec_command(load_cmd)
554
- exit_status = stdout.channel.recv_exit_status()
555
-
556
- if exit_status != 0:
557
- error_msg = stderr.read().decode()
558
- logger.error(f"Failed to load configuration: {error_msg}")
559
- return 1
560
-
561
- logger.info("Configuration loaded and applied successfully")
562
-
563
- # Optionally reload configuration to restart services
564
- config_operations_successful = True
565
- if reload_config:
566
- logger.info("Reloading configuration to restart services")
567
-
568
- reload_cmd = "sudo config reload -y"
569
- stdin, stdout, stderr = ssh.exec_command(reload_cmd)
570
- exit_status = stdout.channel.recv_exit_status()
571
-
572
- if exit_status != 0:
573
- error_msg = stderr.read().decode()
574
- logger.error(f"Failed to reload configuration: {error_msg}")
575
- config_operations_successful = False
576
- else:
577
- logger.info("Configuration reloaded successfully")
578
-
579
- # Save configuration only if load (and optionally reload) were successful
580
- if config_operations_successful:
581
- logger.info("Saving configuration to persist changes")
582
-
583
- save_cmd = "sudo config save -y"
584
- stdin, stdout, stderr = ssh.exec_command(save_cmd)
585
- exit_status = stdout.channel.recv_exit_status()
586
-
587
- if exit_status != 0:
588
- error_msg = stderr.read().decode()
589
- logger.error(f"Failed to save configuration: {error_msg}")
590
- return 1
591
-
592
- logger.info("Configuration saved successfully")
593
- else:
594
- logger.warning("Skipping config save due to reload failure")
595
-
596
- # Delete the temporary configuration file
597
- logger.info(f"Cleaning up temporary file {switch_config_file}")
598
-
599
- delete_cmd = f"rm {switch_config_file}"
600
- stdin, stdout, stderr = ssh.exec_command(delete_cmd)
601
- exit_status = stdout.channel.recv_exit_status()
602
-
603
- if exit_status != 0:
604
- error_msg = stderr.read().decode()
605
- logger.warning(f"Failed to delete temporary file: {error_msg}")
606
- else:
607
- logger.info("Temporary file deleted successfully")
608
-
609
- logger.info("SONiC configuration management completed successfully")
610
- logger.info(f"- Config context saved locally to: {config_context_file}")
611
- if reload_config and config_operations_successful:
612
- logger.info("- Configuration loaded, reloaded, and saved on switch")
613
- elif config_operations_successful:
614
- logger.info("- Configuration loaded and saved on switch")
615
- else:
616
- logger.info(
617
- "- Configuration loaded on switch (save skipped due to reload failure)"
618
- )
619
- logger.info(f"- Backup created on switch: {backup_filename}")
620
-
621
- return 0
622
-
623
- except paramiko.AuthenticationException:
624
- logger.error(f"Authentication failed for {ssh_host}")
625
- return 1
626
- except paramiko.SSHException as e:
627
- logger.error(f"SSH connection failed: {e}")
628
- return 1
629
- except Exception as e:
630
- logger.error(f"Unexpected error during SSH operations: {e}")
631
- return 1
632
- finally:
633
- ssh.close()
634
-
635
- except Exception as e:
636
- logger.error(f"Error managing SONiC device {hostname}: {e}")
637
- return 1
@@ -0,0 +1,219 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import json
4
+ from cliff.command import Command
5
+ from loguru import logger
6
+ from tabulate import tabulate
7
+
8
+ from osism.tasks.conductor import get_redfish_resources
9
+
10
+
11
+ class List(Command):
12
+ def _normalize_column_name(self, column_name):
13
+ """Normalize column name to lowercase with underscores instead of spaces."""
14
+ if not column_name:
15
+ return column_name
16
+ return column_name.lower().replace(" ", "_")
17
+
18
+ def _get_column_mappings(self, resourcetype):
19
+ """Get column mappings for a specific resource type."""
20
+ if resourcetype == "EthernetInterfaces":
21
+ return {
22
+ "ID": "id",
23
+ "Name": "name",
24
+ "Description": "description",
25
+ "MAC Address": "mac_address",
26
+ "Permanent MAC Address": "permanent_mac_address",
27
+ "Speed (Mbps)": "speed_mbps",
28
+ "MTU Size": "mtu_size",
29
+ "Link Status": "link_status",
30
+ "Interface Enabled": "interface_enabled",
31
+ }
32
+ elif resourcetype == "NetworkAdapters":
33
+ return {
34
+ "ID": "id",
35
+ "Name": "name",
36
+ "Description": "description",
37
+ "Manufacturer": "manufacturer",
38
+ "Model": "model",
39
+ "Part Number": "part_number",
40
+ "Serial Number": "serial_number",
41
+ "Firmware Version": "firmware_version",
42
+ }
43
+ elif resourcetype == "NetworkDeviceFunctions":
44
+ return {
45
+ "ID": "id",
46
+ "Name": "name",
47
+ "Description": "description",
48
+ "Device Enabled": "device_enabled",
49
+ "Ethernet Enabled": "ethernet_enabled",
50
+ "MAC Address": "mac_address",
51
+ "Permanent MAC Address": "permanent_mac_address",
52
+ "Adapter ID": "adapter_id",
53
+ "Adapter Name": "adapter_name",
54
+ }
55
+ return None
56
+
57
+ def _get_filtered_columns(self, column_mappings, selected_columns=None):
58
+ """Get filtered column mappings based on selected columns."""
59
+ # If no columns specified, use all available columns
60
+ if not selected_columns:
61
+ return list(column_mappings.keys()), list(column_mappings.values())
62
+
63
+ # Normalize selected columns and filter
64
+ normalized_selected = [
65
+ self._normalize_column_name(col) for col in selected_columns
66
+ ]
67
+ headers = []
68
+ data_keys = []
69
+
70
+ for display_name, data_key in column_mappings.items():
71
+ normalized_display = self._normalize_column_name(display_name)
72
+ if normalized_display in normalized_selected:
73
+ headers.append(display_name)
74
+ data_keys.append(data_key)
75
+
76
+ # Check if any requested columns were not found
77
+ found_columns = [self._normalize_column_name(h) for h in headers]
78
+ for requested_col in normalized_selected:
79
+ if requested_col not in found_columns:
80
+ logger.warning(
81
+ f"Column '{requested_col}' not found. Available columns: {list(column_mappings.keys())}"
82
+ )
83
+
84
+ return headers, data_keys
85
+
86
+ def _filter_json_data(self, data, data_keys):
87
+ """Filter JSON data to include only selected columns."""
88
+ if not data or not data_keys:
89
+ return data
90
+
91
+ filtered_data = []
92
+ for item in data:
93
+ filtered_item = {key: item.get(key) for key in data_keys}
94
+ filtered_data.append(filtered_item)
95
+
96
+ return filtered_data
97
+
98
+ def _filter_and_display_table(self, data, column_mappings, selected_columns=None):
99
+ """Generic method to filter columns and display table data."""
100
+ if not data:
101
+ return
102
+
103
+ headers, data_keys = self._get_filtered_columns(
104
+ column_mappings, selected_columns
105
+ )
106
+
107
+ if not headers:
108
+ print("No valid columns specified")
109
+ return
110
+
111
+ # Prepare table data
112
+ table_data = []
113
+ for item in data:
114
+ row = [item.get(key, "N/A") for key in data_keys]
115
+ table_data.append(row)
116
+
117
+ # Display the table
118
+ print(tabulate(table_data, headers=headers, tablefmt="grid"))
119
+ print(f"\nTotal items: {len(data)}")
120
+
121
+ def get_parser(self, prog_name):
122
+ parser = super(List, self).get_parser(prog_name)
123
+ parser.add_argument(
124
+ "hostname",
125
+ type=str,
126
+ help="Hostname of the target system",
127
+ )
128
+ parser.add_argument(
129
+ "resourcetype",
130
+ type=str,
131
+ help="Resource type to process (e.g., EthernetInterfaces, NetworkAdapters, NetworkDeviceFunctions)",
132
+ )
133
+ parser.add_argument(
134
+ "--format",
135
+ type=str,
136
+ choices=["table", "json"],
137
+ default="table",
138
+ help="Output format (default: table)",
139
+ )
140
+ parser.add_argument(
141
+ "--column",
142
+ action="append",
143
+ help="Column to include in output (can be used multiple times)",
144
+ )
145
+ return parser
146
+
147
+ def take_action(self, parsed_args):
148
+ hostname = parsed_args.hostname
149
+ resourcetype = parsed_args.resourcetype
150
+ output_format = parsed_args.format
151
+ columns = parsed_args.column
152
+ logger.info(
153
+ f"Redfish list command called with hostname: {hostname}, resourcetype: {resourcetype}, format: {output_format}"
154
+ )
155
+
156
+ # Use Celery task to get Redfish resources
157
+ task_result = get_redfish_resources.delay(hostname, resourcetype)
158
+ result = task_result.get()
159
+
160
+ if output_format == "json":
161
+ if result:
162
+ # Apply column filtering for JSON output if columns are specified
163
+ if columns:
164
+ # Get column mappings for the resource type
165
+ column_mappings = self._get_column_mappings(resourcetype)
166
+ if column_mappings:
167
+ _, data_keys = self._get_filtered_columns(
168
+ column_mappings, columns
169
+ )
170
+ filtered_result = self._filter_json_data(result, data_keys)
171
+ print(json.dumps(filtered_result, indent=2))
172
+ else:
173
+ print(json.dumps(result, indent=2))
174
+ else:
175
+ print(json.dumps(result, indent=2))
176
+ else:
177
+ print("[]")
178
+ else:
179
+ if resourcetype == "EthernetInterfaces" and result:
180
+ self._display_ethernet_interfaces(result, columns)
181
+ elif resourcetype == "NetworkAdapters" and result:
182
+ self._display_network_adapters(result, columns)
183
+ elif resourcetype == "NetworkDeviceFunctions" and result:
184
+ self._display_network_device_functions(result, columns)
185
+ elif result:
186
+ logger.info(f"Retrieved resources: {result}")
187
+ else:
188
+ print(f"No {resourcetype} resources found for {hostname}")
189
+
190
+ def _display_ethernet_interfaces(self, interfaces, selected_columns=None):
191
+ """Display EthernetInterfaces in a formatted table."""
192
+ if not interfaces:
193
+ print("No EthernetInterfaces found")
194
+ return
195
+
196
+ column_mappings = self._get_column_mappings("EthernetInterfaces")
197
+ self._filter_and_display_table(interfaces, column_mappings, selected_columns)
198
+
199
+ def _display_network_adapters(self, adapters, selected_columns=None):
200
+ """Display NetworkAdapters in a formatted table."""
201
+ if not adapters:
202
+ print("No NetworkAdapters found")
203
+ return
204
+
205
+ column_mappings = self._get_column_mappings("NetworkAdapters")
206
+ self._filter_and_display_table(adapters, column_mappings, selected_columns)
207
+
208
+ def _display_network_device_functions(
209
+ self, device_functions, selected_columns=None
210
+ ):
211
+ """Display NetworkDeviceFunctions in a formatted table."""
212
+ if not device_functions:
213
+ print("No NetworkDeviceFunctions found")
214
+ return
215
+
216
+ column_mappings = self._get_column_mappings("NetworkDeviceFunctions")
217
+ self._filter_and_display_table(
218
+ device_functions, column_mappings, selected_columns
219
+ )