osism 0.20250628.0__py3-none-any.whl → 0.20250701.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/commands/baremetal.py +37 -6
- osism/commands/redfish.py +219 -0
- osism/settings.py +3 -0
- osism/tasks/conductor/__init__.py +7 -0
- osism/tasks/conductor/config.py +52 -35
- osism/tasks/conductor/ironic.py +95 -99
- osism/tasks/conductor/redfish.py +300 -0
- osism/tasks/conductor/utils.py +148 -0
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/METADATA +2 -1
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/RECORD +16 -14
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/entry_points.txt +1 -0
- osism-0.20250701.0.dist-info/pbr.json +1 -0
- osism-0.20250628.0.dist-info/pbr.json +0 -1
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/WHEEL +0 -0
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/licenses/AUTHORS +0 -0
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250628.0.dist-info → osism-0.20250701.0.dist-info}/top_level.txt +0 -0
osism/commands/baremetal.py
CHANGED
@@ -12,6 +12,7 @@ import yaml
|
|
12
12
|
from openstack.baremetal import configdrive as configdrive_builder
|
13
13
|
|
14
14
|
from osism.commands import get_cloud_connection
|
15
|
+
from osism import utils
|
15
16
|
|
16
17
|
|
17
18
|
class BaremetalList(Command):
|
@@ -169,16 +170,51 @@ class BaremetalDeploy(Command):
|
|
169
170
|
continue
|
170
171
|
# NOTE: Prepare osism config drive
|
171
172
|
try:
|
173
|
+
# Get default vars from NetBox local_context_data if available
|
174
|
+
default_vars = {}
|
175
|
+
if utils.nb:
|
176
|
+
try:
|
177
|
+
# Try to find device by name first
|
178
|
+
device = utils.nb.dcim.devices.get(name=node.name)
|
179
|
+
|
180
|
+
# If not found by name, try by inventory_hostname custom field
|
181
|
+
if not device:
|
182
|
+
devices = utils.nb.dcim.devices.filter(
|
183
|
+
cf_inventory_hostname=node.name
|
184
|
+
)
|
185
|
+
if devices:
|
186
|
+
device = devices[0]
|
187
|
+
|
188
|
+
# Extract local_context_data if device found and has the field
|
189
|
+
if (
|
190
|
+
device
|
191
|
+
and hasattr(device, "local_context_data")
|
192
|
+
and device.local_context_data
|
193
|
+
):
|
194
|
+
default_vars = device.local_context_data
|
195
|
+
logger.info(
|
196
|
+
f"Using NetBox local_context_data for node {node.name}"
|
197
|
+
)
|
198
|
+
else:
|
199
|
+
logger.debug(
|
200
|
+
f"No local_context_data found for node {node.name} in NetBox"
|
201
|
+
)
|
202
|
+
except Exception as e:
|
203
|
+
logger.warning(
|
204
|
+
f"Failed to fetch NetBox data for node {node.name}: {e}"
|
205
|
+
)
|
206
|
+
|
172
207
|
playbook = []
|
173
208
|
play = {
|
174
209
|
"name": "Run bootstrap - part 2",
|
175
210
|
"hosts": "localhost",
|
176
211
|
"connection": "local",
|
177
212
|
"gather_facts": True,
|
178
|
-
"vars":
|
213
|
+
"vars": default_vars.copy(),
|
179
214
|
"roles": [
|
180
215
|
"osism.commons.hostname",
|
181
216
|
"osism.commons.hosts",
|
217
|
+
"osism.commons.operator",
|
182
218
|
],
|
183
219
|
}
|
184
220
|
play["vars"].update(
|
@@ -293,11 +329,6 @@ class BaremetalUndeploy(Command):
|
|
293
329
|
f"Node {node.name} ({node.id}) could not be moved to available state: {exc}"
|
294
330
|
)
|
295
331
|
continue
|
296
|
-
# NOTE: Ironic removes "instance_info" on undeploy. It was saved to "extra" during sync and needs to be refreshed here.
|
297
|
-
if "instance_info" in node["extra"]:
|
298
|
-
node = conn.baremetal.update_node(
|
299
|
-
node, instance_info=json.loads(node.extra["instance_info"])
|
300
|
-
)
|
301
332
|
else:
|
302
333
|
logger.warning(
|
303
334
|
f"Node {node.name} ({node.id}) not in supported provision state"
|
@@ -0,0 +1,219 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import json
|
4
|
+
from cliff.command import Command
|
5
|
+
from loguru import logger
|
6
|
+
from tabulate import tabulate
|
7
|
+
|
8
|
+
from osism.tasks.conductor import get_redfish_resources
|
9
|
+
|
10
|
+
|
11
|
+
class List(Command):
|
12
|
+
def _normalize_column_name(self, column_name):
|
13
|
+
"""Normalize column name to lowercase with underscores instead of spaces."""
|
14
|
+
if not column_name:
|
15
|
+
return column_name
|
16
|
+
return column_name.lower().replace(" ", "_")
|
17
|
+
|
18
|
+
def _get_column_mappings(self, resourcetype):
|
19
|
+
"""Get column mappings for a specific resource type."""
|
20
|
+
if resourcetype == "EthernetInterfaces":
|
21
|
+
return {
|
22
|
+
"ID": "id",
|
23
|
+
"Name": "name",
|
24
|
+
"Description": "description",
|
25
|
+
"MAC Address": "mac_address",
|
26
|
+
"Permanent MAC Address": "permanent_mac_address",
|
27
|
+
"Speed (Mbps)": "speed_mbps",
|
28
|
+
"MTU Size": "mtu_size",
|
29
|
+
"Link Status": "link_status",
|
30
|
+
"Interface Enabled": "interface_enabled",
|
31
|
+
}
|
32
|
+
elif resourcetype == "NetworkAdapters":
|
33
|
+
return {
|
34
|
+
"ID": "id",
|
35
|
+
"Name": "name",
|
36
|
+
"Description": "description",
|
37
|
+
"Manufacturer": "manufacturer",
|
38
|
+
"Model": "model",
|
39
|
+
"Part Number": "part_number",
|
40
|
+
"Serial Number": "serial_number",
|
41
|
+
"Firmware Version": "firmware_version",
|
42
|
+
}
|
43
|
+
elif resourcetype == "NetworkDeviceFunctions":
|
44
|
+
return {
|
45
|
+
"ID": "id",
|
46
|
+
"Name": "name",
|
47
|
+
"Description": "description",
|
48
|
+
"Device Enabled": "device_enabled",
|
49
|
+
"Ethernet Enabled": "ethernet_enabled",
|
50
|
+
"MAC Address": "mac_address",
|
51
|
+
"Permanent MAC Address": "permanent_mac_address",
|
52
|
+
"Adapter ID": "adapter_id",
|
53
|
+
"Adapter Name": "adapter_name",
|
54
|
+
}
|
55
|
+
return None
|
56
|
+
|
57
|
+
def _get_filtered_columns(self, column_mappings, selected_columns=None):
|
58
|
+
"""Get filtered column mappings based on selected columns."""
|
59
|
+
# If no columns specified, use all available columns
|
60
|
+
if not selected_columns:
|
61
|
+
return list(column_mappings.keys()), list(column_mappings.values())
|
62
|
+
|
63
|
+
# Normalize selected columns and filter
|
64
|
+
normalized_selected = [
|
65
|
+
self._normalize_column_name(col) for col in selected_columns
|
66
|
+
]
|
67
|
+
headers = []
|
68
|
+
data_keys = []
|
69
|
+
|
70
|
+
for display_name, data_key in column_mappings.items():
|
71
|
+
normalized_display = self._normalize_column_name(display_name)
|
72
|
+
if normalized_display in normalized_selected:
|
73
|
+
headers.append(display_name)
|
74
|
+
data_keys.append(data_key)
|
75
|
+
|
76
|
+
# Check if any requested columns were not found
|
77
|
+
found_columns = [self._normalize_column_name(h) for h in headers]
|
78
|
+
for requested_col in normalized_selected:
|
79
|
+
if requested_col not in found_columns:
|
80
|
+
logger.warning(
|
81
|
+
f"Column '{requested_col}' not found. Available columns: {list(column_mappings.keys())}"
|
82
|
+
)
|
83
|
+
|
84
|
+
return headers, data_keys
|
85
|
+
|
86
|
+
def _filter_json_data(self, data, data_keys):
|
87
|
+
"""Filter JSON data to include only selected columns."""
|
88
|
+
if not data or not data_keys:
|
89
|
+
return data
|
90
|
+
|
91
|
+
filtered_data = []
|
92
|
+
for item in data:
|
93
|
+
filtered_item = {key: item.get(key) for key in data_keys}
|
94
|
+
filtered_data.append(filtered_item)
|
95
|
+
|
96
|
+
return filtered_data
|
97
|
+
|
98
|
+
def _filter_and_display_table(self, data, column_mappings, selected_columns=None):
|
99
|
+
"""Generic method to filter columns and display table data."""
|
100
|
+
if not data:
|
101
|
+
return
|
102
|
+
|
103
|
+
headers, data_keys = self._get_filtered_columns(
|
104
|
+
column_mappings, selected_columns
|
105
|
+
)
|
106
|
+
|
107
|
+
if not headers:
|
108
|
+
print("No valid columns specified")
|
109
|
+
return
|
110
|
+
|
111
|
+
# Prepare table data
|
112
|
+
table_data = []
|
113
|
+
for item in data:
|
114
|
+
row = [item.get(key, "N/A") for key in data_keys]
|
115
|
+
table_data.append(row)
|
116
|
+
|
117
|
+
# Display the table
|
118
|
+
print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
119
|
+
print(f"\nTotal items: {len(data)}")
|
120
|
+
|
121
|
+
def get_parser(self, prog_name):
|
122
|
+
parser = super(List, self).get_parser(prog_name)
|
123
|
+
parser.add_argument(
|
124
|
+
"hostname",
|
125
|
+
type=str,
|
126
|
+
help="Hostname of the target system",
|
127
|
+
)
|
128
|
+
parser.add_argument(
|
129
|
+
"resourcetype",
|
130
|
+
type=str,
|
131
|
+
help="Resource type to process (e.g., EthernetInterfaces, NetworkAdapters, NetworkDeviceFunctions)",
|
132
|
+
)
|
133
|
+
parser.add_argument(
|
134
|
+
"--format",
|
135
|
+
type=str,
|
136
|
+
choices=["table", "json"],
|
137
|
+
default="table",
|
138
|
+
help="Output format (default: table)",
|
139
|
+
)
|
140
|
+
parser.add_argument(
|
141
|
+
"--column",
|
142
|
+
action="append",
|
143
|
+
help="Column to include in output (can be used multiple times)",
|
144
|
+
)
|
145
|
+
return parser
|
146
|
+
|
147
|
+
def take_action(self, parsed_args):
|
148
|
+
hostname = parsed_args.hostname
|
149
|
+
resourcetype = parsed_args.resourcetype
|
150
|
+
output_format = parsed_args.format
|
151
|
+
columns = parsed_args.column
|
152
|
+
logger.info(
|
153
|
+
f"Redfish list command called with hostname: {hostname}, resourcetype: {resourcetype}, format: {output_format}"
|
154
|
+
)
|
155
|
+
|
156
|
+
# Use Celery task to get Redfish resources
|
157
|
+
task_result = get_redfish_resources.delay(hostname, resourcetype)
|
158
|
+
result = task_result.get()
|
159
|
+
|
160
|
+
if output_format == "json":
|
161
|
+
if result:
|
162
|
+
# Apply column filtering for JSON output if columns are specified
|
163
|
+
if columns:
|
164
|
+
# Get column mappings for the resource type
|
165
|
+
column_mappings = self._get_column_mappings(resourcetype)
|
166
|
+
if column_mappings:
|
167
|
+
_, data_keys = self._get_filtered_columns(
|
168
|
+
column_mappings, columns
|
169
|
+
)
|
170
|
+
filtered_result = self._filter_json_data(result, data_keys)
|
171
|
+
print(json.dumps(filtered_result, indent=2))
|
172
|
+
else:
|
173
|
+
print(json.dumps(result, indent=2))
|
174
|
+
else:
|
175
|
+
print(json.dumps(result, indent=2))
|
176
|
+
else:
|
177
|
+
print("[]")
|
178
|
+
else:
|
179
|
+
if resourcetype == "EthernetInterfaces" and result:
|
180
|
+
self._display_ethernet_interfaces(result, columns)
|
181
|
+
elif resourcetype == "NetworkAdapters" and result:
|
182
|
+
self._display_network_adapters(result, columns)
|
183
|
+
elif resourcetype == "NetworkDeviceFunctions" and result:
|
184
|
+
self._display_network_device_functions(result, columns)
|
185
|
+
elif result:
|
186
|
+
logger.info(f"Retrieved resources: {result}")
|
187
|
+
else:
|
188
|
+
print(f"No {resourcetype} resources found for {hostname}")
|
189
|
+
|
190
|
+
def _display_ethernet_interfaces(self, interfaces, selected_columns=None):
|
191
|
+
"""Display EthernetInterfaces in a formatted table."""
|
192
|
+
if not interfaces:
|
193
|
+
print("No EthernetInterfaces found")
|
194
|
+
return
|
195
|
+
|
196
|
+
column_mappings = self._get_column_mappings("EthernetInterfaces")
|
197
|
+
self._filter_and_display_table(interfaces, column_mappings, selected_columns)
|
198
|
+
|
199
|
+
def _display_network_adapters(self, adapters, selected_columns=None):
|
200
|
+
"""Display NetworkAdapters in a formatted table."""
|
201
|
+
if not adapters:
|
202
|
+
print("No NetworkAdapters found")
|
203
|
+
return
|
204
|
+
|
205
|
+
column_mappings = self._get_column_mappings("NetworkAdapters")
|
206
|
+
self._filter_and_display_table(adapters, column_mappings, selected_columns)
|
207
|
+
|
208
|
+
def _display_network_device_functions(
|
209
|
+
self, device_functions, selected_columns=None
|
210
|
+
):
|
211
|
+
"""Display NetworkDeviceFunctions in a formatted table."""
|
212
|
+
if not device_functions:
|
213
|
+
print("No NetworkDeviceFunctions found")
|
214
|
+
return
|
215
|
+
|
216
|
+
column_mappings = self._get_column_mappings("NetworkDeviceFunctions")
|
217
|
+
self._filter_and_display_table(
|
218
|
+
device_functions, column_mappings, selected_columns
|
219
|
+
)
|
osism/settings.py
CHANGED
@@ -54,3 +54,6 @@ SONIC_EXPORT_IDENTIFIER = os.getenv("SONIC_EXPORT_IDENTIFIER", "serial-number")
|
|
54
54
|
NETBOX_SECONDARIES = (
|
55
55
|
os.getenv("NETBOX_SECONDARIES", read_secret("NETBOX_SECONDARIES")) or "[]"
|
56
56
|
)
|
57
|
+
|
58
|
+
# Redfish connection timeout in seconds
|
59
|
+
REDFISH_TIMEOUT = int(os.getenv("REDFISH_TIMEOUT", "20"))
|
@@ -8,6 +8,7 @@ from loguru import logger
|
|
8
8
|
from osism.tasks import Config
|
9
9
|
from osism.tasks.conductor.config import get_configuration
|
10
10
|
from osism.tasks.conductor.ironic import sync_ironic as _sync_ironic
|
11
|
+
from osism.tasks.conductor.redfish import get_resources as _get_redfish_resources
|
11
12
|
from osism.tasks.conductor.sonic import sync_sonic as _sync_sonic
|
12
13
|
|
13
14
|
|
@@ -52,9 +53,15 @@ def sync_sonic(self, device_name=None, show_diff=True):
|
|
52
53
|
return _sync_sonic(device_name, self.request.id, show_diff)
|
53
54
|
|
54
55
|
|
56
|
+
@app.task(bind=True, name="osism.tasks.conductor.get_redfish_resources")
|
57
|
+
def get_redfish_resources(self, hostname, resource_type):
|
58
|
+
return _get_redfish_resources(hostname, resource_type)
|
59
|
+
|
60
|
+
|
55
61
|
__all__ = [
|
56
62
|
"app",
|
57
63
|
"get_ironic_parameters",
|
64
|
+
"get_redfish_resources",
|
58
65
|
"sync_netbox",
|
59
66
|
"sync_ironic",
|
60
67
|
"sync_sonic",
|
osism/tasks/conductor/config.py
CHANGED
@@ -1,22 +1,12 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import uuid
|
4
|
-
|
5
3
|
from loguru import logger
|
4
|
+
import validators
|
6
5
|
import yaml
|
7
6
|
|
8
7
|
from osism.tasks import Config, openstack
|
9
8
|
|
10
9
|
|
11
|
-
def is_uuid(value):
|
12
|
-
"""Check if a string is a valid UUID."""
|
13
|
-
try:
|
14
|
-
uuid.UUID(value)
|
15
|
-
return True
|
16
|
-
except (ValueError, AttributeError):
|
17
|
-
return False
|
18
|
-
|
19
|
-
|
20
10
|
def get_configuration():
|
21
11
|
with open("/etc/conductor.yml") as fp:
|
22
12
|
configuration = yaml.load(fp, Loader=yaml.SafeLoader)
|
@@ -39,54 +29,81 @@ def get_configuration():
|
|
39
29
|
image_source = configuration["ironic_parameters"]["instance_info"][
|
40
30
|
"image_source"
|
41
31
|
]
|
42
|
-
if not
|
32
|
+
if not validators.uuid(image_source) and not validators.url(
|
33
|
+
image_source
|
34
|
+
):
|
43
35
|
result = openstack.image_get(image_source)
|
44
|
-
|
45
|
-
"
|
46
|
-
|
36
|
+
if result:
|
37
|
+
configuration["ironic_parameters"]["instance_info"][
|
38
|
+
"image_source"
|
39
|
+
] = result.id
|
40
|
+
else:
|
41
|
+
logger.warning(f"Could not resolve image ID for {image_source}")
|
47
42
|
|
48
43
|
if "driver_info" in configuration["ironic_parameters"]:
|
49
44
|
if "deploy_kernel" in configuration["ironic_parameters"]["driver_info"]:
|
50
45
|
deploy_kernel = configuration["ironic_parameters"]["driver_info"][
|
51
46
|
"deploy_kernel"
|
52
47
|
]
|
53
|
-
if not
|
48
|
+
if not validators.uuid(deploy_kernel) and not validators.url(
|
49
|
+
deploy_kernel
|
50
|
+
):
|
54
51
|
result = openstack.image_get(deploy_kernel)
|
55
|
-
|
56
|
-
"
|
57
|
-
|
52
|
+
if result:
|
53
|
+
configuration["ironic_parameters"]["driver_info"][
|
54
|
+
"deploy_kernel"
|
55
|
+
] = result.id
|
56
|
+
else:
|
57
|
+
logger.warning(
|
58
|
+
f"Could not resolve image ID for {deploy_kernel}"
|
59
|
+
)
|
58
60
|
|
59
61
|
if "deploy_ramdisk" in configuration["ironic_parameters"]["driver_info"]:
|
60
62
|
deploy_ramdisk = configuration["ironic_parameters"]["driver_info"][
|
61
63
|
"deploy_ramdisk"
|
62
64
|
]
|
63
|
-
if not
|
65
|
+
if not validators.uuid(deploy_ramdisk) and not validators.url(
|
66
|
+
deploy_ramdisk
|
67
|
+
):
|
64
68
|
result = openstack.image_get(deploy_ramdisk)
|
65
|
-
|
66
|
-
"
|
67
|
-
|
69
|
+
if result:
|
70
|
+
configuration["ironic_parameters"]["driver_info"][
|
71
|
+
"deploy_ramdisk"
|
72
|
+
] = result.id
|
73
|
+
else:
|
74
|
+
logger.warning(
|
75
|
+
f"Could not resolve image ID for {deploy_ramdisk}"
|
76
|
+
)
|
68
77
|
|
69
78
|
if "cleaning_network" in configuration["ironic_parameters"]["driver_info"]:
|
70
|
-
|
79
|
+
cleaning_network = configuration["ironic_parameters"]["driver_info"][
|
80
|
+
"cleaning_network"
|
81
|
+
]
|
82
|
+
result = openstack.network_get(cleaning_network)
|
83
|
+
if result:
|
71
84
|
configuration["ironic_parameters"]["driver_info"][
|
72
85
|
"cleaning_network"
|
73
|
-
]
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
86
|
+
] = result.id
|
87
|
+
else:
|
88
|
+
logger.warning(
|
89
|
+
f"Could not resolve network ID for {cleaning_network}"
|
90
|
+
)
|
78
91
|
|
79
92
|
if (
|
80
93
|
"provisioning_network"
|
81
94
|
in configuration["ironic_parameters"]["driver_info"]
|
82
95
|
):
|
83
|
-
|
96
|
+
provisioning_network = configuration["ironic_parameters"][
|
97
|
+
"driver_info"
|
98
|
+
]["provisioning_network"]
|
99
|
+
result = openstack.network_get(provisioning_network)
|
100
|
+
if result:
|
84
101
|
configuration["ironic_parameters"]["driver_info"][
|
85
102
|
"provisioning_network"
|
86
|
-
]
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
103
|
+
] = result.id
|
104
|
+
else:
|
105
|
+
logger.warning(
|
106
|
+
f"Could not resolve network ID for {provisioning_network}"
|
107
|
+
)
|
91
108
|
|
92
109
|
return configuration
|
osism/tasks/conductor/ironic.py
CHANGED
@@ -34,6 +34,100 @@ driver_params = {
|
|
34
34
|
}
|
35
35
|
|
36
36
|
|
37
|
+
def _prepare_node_attributes(device, get_ironic_parameters):
|
38
|
+
node_attributes = get_ironic_parameters()
|
39
|
+
if (
|
40
|
+
"ironic_parameters" in device.custom_fields
|
41
|
+
and device.custom_fields["ironic_parameters"]
|
42
|
+
):
|
43
|
+
deep_merge(node_attributes, device.custom_fields["ironic_parameters"])
|
44
|
+
|
45
|
+
vault = get_vault()
|
46
|
+
deep_decrypt(node_attributes, vault)
|
47
|
+
|
48
|
+
node_secrets = device.custom_fields.get("secrets", {})
|
49
|
+
if node_secrets is None:
|
50
|
+
node_secrets = {}
|
51
|
+
deep_decrypt(node_secrets, vault)
|
52
|
+
|
53
|
+
if (
|
54
|
+
"driver" in node_attributes
|
55
|
+
and node_attributes["driver"] in driver_params.keys()
|
56
|
+
):
|
57
|
+
if "driver_info" in node_attributes:
|
58
|
+
unused_drivers = [
|
59
|
+
driver
|
60
|
+
for driver in driver_params.keys()
|
61
|
+
if driver != node_attributes["driver"]
|
62
|
+
]
|
63
|
+
for key in list(node_attributes["driver_info"].keys()):
|
64
|
+
for driver in unused_drivers:
|
65
|
+
if key.startswith(driver + "_"):
|
66
|
+
node_attributes["driver_info"].pop(key, None)
|
67
|
+
|
68
|
+
username_key = driver_params[node_attributes["driver"]]["username"]
|
69
|
+
if username_key in node_attributes["driver_info"]:
|
70
|
+
node_attributes["driver_info"][username_key] = (
|
71
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
72
|
+
.from_string(node_attributes["driver_info"][username_key])
|
73
|
+
.render(
|
74
|
+
remote_board_username=str(
|
75
|
+
node_secrets.get("remote_board_username", "admin")
|
76
|
+
)
|
77
|
+
)
|
78
|
+
)
|
79
|
+
|
80
|
+
password_key = driver_params[node_attributes["driver"]]["password"]
|
81
|
+
if password_key in node_attributes["driver_info"]:
|
82
|
+
node_attributes["driver_info"][password_key] = (
|
83
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
84
|
+
.from_string(node_attributes["driver_info"][password_key])
|
85
|
+
.render(
|
86
|
+
remote_board_password=str(
|
87
|
+
node_secrets.get("remote_board_password", "password")
|
88
|
+
)
|
89
|
+
)
|
90
|
+
)
|
91
|
+
|
92
|
+
address_key = driver_params[node_attributes["driver"]]["address"]
|
93
|
+
if address_key in node_attributes["driver_info"]:
|
94
|
+
oob_ip_result = get_device_oob_ip(device)
|
95
|
+
if oob_ip_result:
|
96
|
+
oob_ip, _ = oob_ip_result
|
97
|
+
node_attributes["driver_info"][address_key] = (
|
98
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
99
|
+
.from_string(node_attributes["driver_info"][address_key])
|
100
|
+
.render(remote_board_address=oob_ip)
|
101
|
+
)
|
102
|
+
node_attributes.update({"resource_class": device.name})
|
103
|
+
if "extra" not in node_attributes:
|
104
|
+
node_attributes["extra"] = {}
|
105
|
+
if "instance_info" in node_attributes and node_attributes["instance_info"]:
|
106
|
+
node_attributes["extra"].update(
|
107
|
+
{"instance_info": json.dumps(node_attributes["instance_info"])}
|
108
|
+
)
|
109
|
+
if (
|
110
|
+
"netplan_parameters" in device.custom_fields
|
111
|
+
and device.custom_fields["netplan_parameters"]
|
112
|
+
):
|
113
|
+
node_attributes["extra"].update(
|
114
|
+
{
|
115
|
+
"netplan_parameters": json.dumps(
|
116
|
+
device.custom_fields["netplan_parameters"]
|
117
|
+
)
|
118
|
+
}
|
119
|
+
)
|
120
|
+
if (
|
121
|
+
"frr_parameters" in device.custom_fields
|
122
|
+
and device.custom_fields["frr_parameters"]
|
123
|
+
):
|
124
|
+
node_attributes["extra"].update(
|
125
|
+
{"frr_parameters": json.dumps(device.custom_fields["frr_parameters"])}
|
126
|
+
)
|
127
|
+
|
128
|
+
return node_attributes
|
129
|
+
|
130
|
+
|
37
131
|
def sync_ironic(request_id, get_ironic_parameters, force_update=False):
|
38
132
|
osism_utils.push_task_output(
|
39
133
|
request_id,
|
@@ -79,105 +173,7 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
|
|
79
173
|
|
80
174
|
node_interfaces = list(netbox.get_interfaces_by_device(device.name))
|
81
175
|
|
82
|
-
node_attributes = get_ironic_parameters
|
83
|
-
if (
|
84
|
-
"ironic_parameters" in device.custom_fields
|
85
|
-
and device.custom_fields["ironic_parameters"]
|
86
|
-
):
|
87
|
-
# NOTE: Update node attributes with overrides from NetBox device
|
88
|
-
deep_merge(node_attributes, device.custom_fields["ironic_parameters"])
|
89
|
-
|
90
|
-
# NOTE: Decrypt ansible vaulted secrets
|
91
|
-
vault = get_vault()
|
92
|
-
deep_decrypt(node_attributes, vault)
|
93
|
-
|
94
|
-
node_secrets = device.custom_fields.get("secrets", {})
|
95
|
-
if node_secrets is None:
|
96
|
-
node_secrets = {}
|
97
|
-
deep_decrypt(node_secrets, vault)
|
98
|
-
|
99
|
-
if (
|
100
|
-
"driver" in node_attributes
|
101
|
-
and node_attributes["driver"] in driver_params.keys()
|
102
|
-
):
|
103
|
-
if "driver_info" in node_attributes:
|
104
|
-
# NOTE: Remove all fields belonging to a different driver
|
105
|
-
unused_drivers = [
|
106
|
-
driver
|
107
|
-
for driver in driver_params.keys()
|
108
|
-
if driver != node_attributes["driver"]
|
109
|
-
]
|
110
|
-
for key in list(node_attributes["driver_info"].keys()):
|
111
|
-
for driver in unused_drivers:
|
112
|
-
if key.startswith(driver + "_"):
|
113
|
-
node_attributes["driver_info"].pop(key, None)
|
114
|
-
|
115
|
-
# NOTE: Render driver username field
|
116
|
-
username_key = driver_params[node_attributes["driver"]]["username"]
|
117
|
-
if username_key in node_attributes["driver_info"]:
|
118
|
-
node_attributes["driver_info"][username_key] = (
|
119
|
-
jinja2.Environment(loader=jinja2.BaseLoader())
|
120
|
-
.from_string(node_attributes["driver_info"][username_key])
|
121
|
-
.render(
|
122
|
-
remote_board_username=str(
|
123
|
-
node_secrets.get("remote_board_username", "admin")
|
124
|
-
)
|
125
|
-
)
|
126
|
-
)
|
127
|
-
|
128
|
-
# NOTE: Render driver password field
|
129
|
-
password_key = driver_params[node_attributes["driver"]]["password"]
|
130
|
-
if password_key in node_attributes["driver_info"]:
|
131
|
-
node_attributes["driver_info"][password_key] = (
|
132
|
-
jinja2.Environment(loader=jinja2.BaseLoader())
|
133
|
-
.from_string(node_attributes["driver_info"][password_key])
|
134
|
-
.render(
|
135
|
-
remote_board_password=str(
|
136
|
-
node_secrets.get("remote_board_password", "password")
|
137
|
-
)
|
138
|
-
)
|
139
|
-
)
|
140
|
-
|
141
|
-
# NOTE: Render driver address field
|
142
|
-
address_key = driver_params[node_attributes["driver"]]["address"]
|
143
|
-
if address_key in node_attributes["driver_info"]:
|
144
|
-
oob_ip_result = get_device_oob_ip(device)
|
145
|
-
if oob_ip_result:
|
146
|
-
oob_ip, _ = (
|
147
|
-
oob_ip_result # Extract IP address, ignore prefix length
|
148
|
-
)
|
149
|
-
node_attributes["driver_info"][address_key] = (
|
150
|
-
jinja2.Environment(loader=jinja2.BaseLoader())
|
151
|
-
.from_string(node_attributes["driver_info"][address_key])
|
152
|
-
.render(remote_board_address=oob_ip)
|
153
|
-
)
|
154
|
-
node_attributes.update({"resource_class": device.name})
|
155
|
-
if "extra" not in node_attributes:
|
156
|
-
node_attributes["extra"] = {}
|
157
|
-
# NOTE: Copy instance_info into extra field. because ironic removes it on undeployment. This way it may be readded on undeploy without querying the netbox again
|
158
|
-
if "instance_info" in node_attributes and node_attributes["instance_info"]:
|
159
|
-
node_attributes["extra"].update(
|
160
|
-
{"instance_info": json.dumps(node_attributes["instance_info"])}
|
161
|
-
)
|
162
|
-
# NOTE: Write metadata used for provisioning into 'extra' field, so that it is available during node deploy without querying the netbox again
|
163
|
-
if (
|
164
|
-
"netplan_parameters" in device.custom_fields
|
165
|
-
and device.custom_fields["netplan_parameters"]
|
166
|
-
):
|
167
|
-
node_attributes["extra"].update(
|
168
|
-
{
|
169
|
-
"netplan_parameters": json.dumps(
|
170
|
-
device.custom_fields["netplan_parameters"]
|
171
|
-
)
|
172
|
-
}
|
173
|
-
)
|
174
|
-
if (
|
175
|
-
"frr_parameters" in device.custom_fields
|
176
|
-
and device.custom_fields["frr_parameters"]
|
177
|
-
):
|
178
|
-
node_attributes["extra"].update(
|
179
|
-
{"frr_parameters": json.dumps(device.custom_fields["frr_parameters"])}
|
180
|
-
)
|
176
|
+
node_attributes = _prepare_node_attributes(device, get_ironic_parameters)
|
181
177
|
ports_attributes = [
|
182
178
|
dict(address=interface.mac_address)
|
183
179
|
for interface in node_interfaces
|
@@ -0,0 +1,300 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import json
|
4
|
+
from loguru import logger
|
5
|
+
from osism.tasks.conductor.utils import get_redfish_connection
|
6
|
+
|
7
|
+
|
8
|
+
def _normalize_redfish_data(data):
|
9
|
+
"""
|
10
|
+
Convert Redfish data values to strings and clean up None values.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
data (dict): Dictionary with Redfish resource data
|
14
|
+
|
15
|
+
Returns:
|
16
|
+
dict: Dictionary with normalized string values, None values removed
|
17
|
+
"""
|
18
|
+
normalized_data = {}
|
19
|
+
|
20
|
+
for key, value in data.items():
|
21
|
+
if value is not None:
|
22
|
+
if isinstance(value, (dict, list)):
|
23
|
+
# Convert complex objects to JSON strings
|
24
|
+
normalized_data[key] = json.dumps(value)
|
25
|
+
elif isinstance(value, bool):
|
26
|
+
# Convert booleans to lowercase strings
|
27
|
+
normalized_data[key] = str(value).lower()
|
28
|
+
elif not isinstance(value, str):
|
29
|
+
# Convert numbers and other types to strings
|
30
|
+
normalized_data[key] = str(value)
|
31
|
+
else:
|
32
|
+
# Keep strings as-is
|
33
|
+
normalized_data[key] = value
|
34
|
+
|
35
|
+
return normalized_data
|
36
|
+
|
37
|
+
|
38
|
+
def get_resources(hostname, resource_type):
|
39
|
+
"""
|
40
|
+
Get Redfish resources for a specific hostname and resource type.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
hostname (str): The hostname of the target system
|
44
|
+
resource_type (str): The type of resource to retrieve (e.g., EthernetInterfaces)
|
45
|
+
|
46
|
+
Returns:
|
47
|
+
list: Retrieved Redfish resources or empty list if failed
|
48
|
+
"""
|
49
|
+
logger.info(
|
50
|
+
f"Getting Redfish resources for hostname: {hostname}, resource_type: {resource_type}"
|
51
|
+
)
|
52
|
+
|
53
|
+
if resource_type == "EthernetInterfaces":
|
54
|
+
return _get_ethernet_interfaces(hostname)
|
55
|
+
elif resource_type == "NetworkAdapters":
|
56
|
+
return _get_network_adapters(hostname)
|
57
|
+
elif resource_type == "NetworkDeviceFunctions":
|
58
|
+
return _get_network_device_functions(hostname)
|
59
|
+
|
60
|
+
logger.warning(f"Resource type {resource_type} not supported yet")
|
61
|
+
return []
|
62
|
+
|
63
|
+
|
64
|
+
def _get_ethernet_interfaces(hostname):
|
65
|
+
"""
|
66
|
+
Get all EthernetInterfaces from a Redfish-enabled system.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
hostname (str): The hostname of the target system
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
list: List of EthernetInterface dictionaries
|
73
|
+
"""
|
74
|
+
try:
|
75
|
+
# Get Redfish connection using the utility function
|
76
|
+
redfish_conn = get_redfish_connection(hostname, ignore_ssl_errors=True)
|
77
|
+
|
78
|
+
if not redfish_conn:
|
79
|
+
logger.error(f"Could not establish Redfish connection to {hostname}")
|
80
|
+
return []
|
81
|
+
|
82
|
+
ethernet_interfaces = []
|
83
|
+
|
84
|
+
# Navigate through the Redfish service to find EthernetInterfaces
|
85
|
+
# Structure: /redfish/v1/Systems/{system_id}/EthernetInterfaces
|
86
|
+
for system in redfish_conn.get_system_collection().get_members():
|
87
|
+
logger.debug(f"Processing system: {system.identity}")
|
88
|
+
|
89
|
+
# Check if the system has EthernetInterfaces
|
90
|
+
if hasattr(system, "ethernet_interfaces") and system.ethernet_interfaces:
|
91
|
+
for interface in system.ethernet_interfaces.get_members():
|
92
|
+
try:
|
93
|
+
# Extract relevant information from each EthernetInterface
|
94
|
+
interface_data = {
|
95
|
+
"id": interface.identity,
|
96
|
+
"name": getattr(interface, "name", None),
|
97
|
+
"description": getattr(interface, "description", None),
|
98
|
+
"mac_address": getattr(interface, "mac_address", None),
|
99
|
+
"permanent_mac_address": getattr(
|
100
|
+
interface, "permanent_mac_address", None
|
101
|
+
),
|
102
|
+
"speed_mbps": getattr(interface, "speed_mbps", None),
|
103
|
+
"mtu_size": getattr(interface, "mtu_size", None),
|
104
|
+
"link_status": getattr(interface, "link_status", None),
|
105
|
+
"interface_enabled": getattr(
|
106
|
+
interface, "interface_enabled", None
|
107
|
+
),
|
108
|
+
}
|
109
|
+
|
110
|
+
# Normalize data values to strings and clean up None values
|
111
|
+
interface_data = _normalize_redfish_data(interface_data)
|
112
|
+
|
113
|
+
ethernet_interfaces.append(interface_data)
|
114
|
+
logger.debug(
|
115
|
+
f"Found EthernetInterface: {interface_data.get('name', interface_data.get('id'))}"
|
116
|
+
)
|
117
|
+
|
118
|
+
except Exception as exc:
|
119
|
+
logger.warning(
|
120
|
+
f"Error processing EthernetInterface {interface.identity}: {exc}"
|
121
|
+
)
|
122
|
+
continue
|
123
|
+
|
124
|
+
logger.info(
|
125
|
+
f"Retrieved {len(ethernet_interfaces)} EthernetInterfaces from {hostname}"
|
126
|
+
)
|
127
|
+
return ethernet_interfaces
|
128
|
+
|
129
|
+
except Exception as exc:
|
130
|
+
logger.error(f"Error retrieving EthernetInterfaces from {hostname}: {exc}")
|
131
|
+
return []
|
132
|
+
|
133
|
+
|
134
|
+
def _get_network_adapters(hostname):
|
135
|
+
"""
|
136
|
+
Get all NetworkAdapters from a Redfish-enabled system.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
hostname (str): The hostname of the target system
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
list: List of NetworkAdapter dictionaries
|
143
|
+
"""
|
144
|
+
try:
|
145
|
+
# Get Redfish connection using the utility function
|
146
|
+
redfish_conn = get_redfish_connection(hostname, ignore_ssl_errors=True)
|
147
|
+
|
148
|
+
if not redfish_conn:
|
149
|
+
logger.error(f"Could not establish Redfish connection to {hostname}")
|
150
|
+
return []
|
151
|
+
|
152
|
+
network_adapters = []
|
153
|
+
|
154
|
+
# Navigate through the Redfish service to find NetworkAdapters
|
155
|
+
# Structure: /redfish/v1/Chassis/{chassis_id}/NetworkAdapters
|
156
|
+
for chassis in redfish_conn.get_chassis_collection().get_members():
|
157
|
+
logger.debug(f"Processing chassis: {chassis.identity}")
|
158
|
+
|
159
|
+
# Check if the chassis has NetworkAdapters
|
160
|
+
if hasattr(chassis, "network_adapters") and chassis.network_adapters:
|
161
|
+
for adapter in chassis.network_adapters.get_members():
|
162
|
+
try:
|
163
|
+
# Extract relevant information from each NetworkAdapter
|
164
|
+
adapter_data = {
|
165
|
+
"id": adapter.identity,
|
166
|
+
"name": getattr(adapter, "name", None),
|
167
|
+
"description": getattr(adapter, "description", None),
|
168
|
+
"manufacturer": getattr(adapter, "manufacturer", None),
|
169
|
+
"model": getattr(adapter, "model", None),
|
170
|
+
"part_number": getattr(adapter, "part_number", None),
|
171
|
+
"serial_number": getattr(adapter, "serial_number", None),
|
172
|
+
"firmware_version": getattr(
|
173
|
+
adapter, "firmware_version", None
|
174
|
+
),
|
175
|
+
}
|
176
|
+
|
177
|
+
# Normalize data values to strings and clean up None values
|
178
|
+
adapter_data = _normalize_redfish_data(adapter_data)
|
179
|
+
|
180
|
+
network_adapters.append(adapter_data)
|
181
|
+
logger.debug(
|
182
|
+
f"Found NetworkAdapter: {adapter_data.get('name', adapter_data.get('id'))}"
|
183
|
+
)
|
184
|
+
|
185
|
+
except Exception as exc:
|
186
|
+
logger.warning(
|
187
|
+
f"Error processing NetworkAdapter {adapter.identity}: {exc}"
|
188
|
+
)
|
189
|
+
continue
|
190
|
+
|
191
|
+
logger.info(
|
192
|
+
f"Retrieved {len(network_adapters)} NetworkAdapters from {hostname}"
|
193
|
+
)
|
194
|
+
return network_adapters
|
195
|
+
|
196
|
+
except Exception as exc:
|
197
|
+
logger.error(f"Error retrieving NetworkAdapters from {hostname}: {exc}")
|
198
|
+
return []
|
199
|
+
|
200
|
+
|
201
|
+
def _get_network_device_functions(hostname):
|
202
|
+
"""
|
203
|
+
Get all NetworkDeviceFunctions from a Redfish-enabled system.
|
204
|
+
|
205
|
+
Args:
|
206
|
+
hostname (str): The hostname of the target system
|
207
|
+
|
208
|
+
Returns:
|
209
|
+
list: List of NetworkDeviceFunction dictionaries with MAC addresses
|
210
|
+
"""
|
211
|
+
try:
|
212
|
+
# Get Redfish connection using the utility function
|
213
|
+
redfish_conn = get_redfish_connection(hostname, ignore_ssl_errors=True)
|
214
|
+
|
215
|
+
if not redfish_conn:
|
216
|
+
logger.error(f"Could not establish Redfish connection to {hostname}")
|
217
|
+
return []
|
218
|
+
|
219
|
+
network_device_functions = []
|
220
|
+
|
221
|
+
# Navigate through the Redfish service to find NetworkDeviceFunctions
|
222
|
+
# Structure: /redfish/v1/Chassis/{chassis_id}/NetworkAdapters/{adapter_id}/NetworkDeviceFunctions
|
223
|
+
for chassis in redfish_conn.get_chassis_collection().get_members():
|
224
|
+
logger.debug(f"Processing chassis: {chassis.identity}")
|
225
|
+
|
226
|
+
# Check if the chassis has NetworkAdapters
|
227
|
+
if hasattr(chassis, "network_adapters") and chassis.network_adapters:
|
228
|
+
for adapter in chassis.network_adapters.get_members():
|
229
|
+
logger.debug(f"Processing NetworkAdapter: {adapter.identity}")
|
230
|
+
|
231
|
+
try:
|
232
|
+
for (
|
233
|
+
device_func
|
234
|
+
) in adapter.network_device_functions.get_members():
|
235
|
+
try:
|
236
|
+
# Extract MAC address from Ethernet configuration
|
237
|
+
mac_address = None
|
238
|
+
permanent_mac_address = None
|
239
|
+
|
240
|
+
# Try to get MAC from ethernet configuration
|
241
|
+
if (
|
242
|
+
hasattr(device_func, "ethernet")
|
243
|
+
and device_func.ethernet
|
244
|
+
):
|
245
|
+
ethernet_config = device_func.ethernet
|
246
|
+
mac_address = getattr(
|
247
|
+
ethernet_config, "mac_address", None
|
248
|
+
)
|
249
|
+
permanent_mac_address = getattr(
|
250
|
+
ethernet_config, "permanent_mac_address", None
|
251
|
+
)
|
252
|
+
|
253
|
+
# Extract relevant information from each NetworkDeviceFunction
|
254
|
+
device_func_data = {
|
255
|
+
"id": device_func.identity,
|
256
|
+
"name": getattr(device_func, "name", None),
|
257
|
+
"description": getattr(
|
258
|
+
device_func, "description", None
|
259
|
+
),
|
260
|
+
"device_enabled": getattr(
|
261
|
+
device_func, "device_enabled", None
|
262
|
+
),
|
263
|
+
"ethernet_enabled": getattr(
|
264
|
+
device_func, "ethernet_enabled", None
|
265
|
+
),
|
266
|
+
"mac_address": mac_address,
|
267
|
+
"permanent_mac_address": permanent_mac_address,
|
268
|
+
"adapter_id": adapter.identity,
|
269
|
+
"adapter_name": getattr(adapter, "name", None),
|
270
|
+
}
|
271
|
+
|
272
|
+
# Normalize data values to strings and clean up None values
|
273
|
+
device_func_data = _normalize_redfish_data(
|
274
|
+
device_func_data
|
275
|
+
)
|
276
|
+
|
277
|
+
network_device_functions.append(device_func_data)
|
278
|
+
logger.debug(
|
279
|
+
f"Found NetworkDeviceFunction: {device_func_data.get('name', device_func_data.get('id'))}"
|
280
|
+
)
|
281
|
+
|
282
|
+
except Exception as exc:
|
283
|
+
logger.warning(
|
284
|
+
f"Error processing NetworkDeviceFunction {device_func.identity}: {exc}"
|
285
|
+
)
|
286
|
+
continue
|
287
|
+
except Exception as exc:
|
288
|
+
logger.warning(
|
289
|
+
f"Error processing NetworkAdapter {adapter.identity}: {exc}"
|
290
|
+
)
|
291
|
+
continue
|
292
|
+
|
293
|
+
logger.info(
|
294
|
+
f"Retrieved {len(network_device_functions)} NetworkDeviceFunctions from {hostname}"
|
295
|
+
)
|
296
|
+
return network_device_functions
|
297
|
+
|
298
|
+
except Exception as exc:
|
299
|
+
logger.error(f"Error retrieving NetworkDeviceFunctions from {hostname}: {exc}")
|
300
|
+
return []
|
osism/tasks/conductor/utils.py
CHANGED
@@ -5,6 +5,8 @@ from ansible.parsing.vault import VaultLib, VaultSecret
|
|
5
5
|
from loguru import logger
|
6
6
|
|
7
7
|
from osism import utils
|
8
|
+
import sushy
|
9
|
+
import urllib3
|
8
10
|
|
9
11
|
|
10
12
|
def deep_compare(a, b, updates):
|
@@ -77,3 +79,149 @@ def get_vault():
|
|
77
79
|
logger.error("Unable to get vault secret. Dropping encrypted entries")
|
78
80
|
vault = VaultLib()
|
79
81
|
return vault
|
82
|
+
|
83
|
+
|
84
|
+
def get_redfish_connection(
|
85
|
+
hostname, username=None, password=None, ignore_ssl_errors=True, timeout=None
|
86
|
+
):
|
87
|
+
"""Create a Redfish connection to the specified hostname."""
|
88
|
+
from osism import settings
|
89
|
+
from osism.tasks import openstack
|
90
|
+
|
91
|
+
if not hostname:
|
92
|
+
return None
|
93
|
+
|
94
|
+
# Use configurable timeout if not provided
|
95
|
+
if timeout is None:
|
96
|
+
timeout = settings.REDFISH_TIMEOUT
|
97
|
+
|
98
|
+
# Get Redfish address from Ironic driver_info
|
99
|
+
base_url = f"https://{hostname}"
|
100
|
+
device = None
|
101
|
+
|
102
|
+
# Try to find NetBox device first for conductor configuration fallback
|
103
|
+
if utils.nb:
|
104
|
+
try:
|
105
|
+
# First try to find device by name
|
106
|
+
device = utils.nb.dcim.devices.get(name=hostname)
|
107
|
+
|
108
|
+
# If not found by name, try by inventory_hostname custom field
|
109
|
+
if not device:
|
110
|
+
devices = utils.nb.dcim.devices.filter(cf_inventory_hostname=hostname)
|
111
|
+
if devices:
|
112
|
+
device = devices[0]
|
113
|
+
except Exception as exc:
|
114
|
+
logger.warning(f"Could not resolve hostname {hostname} via NetBox: {exc}")
|
115
|
+
|
116
|
+
try:
|
117
|
+
ironic_node = openstack.baremetal_node_show(hostname, ignore_missing=True)
|
118
|
+
if ironic_node and "driver_info" in ironic_node:
|
119
|
+
driver_info = ironic_node["driver_info"]
|
120
|
+
# Use redfish_address from driver_info if available (contains full URL)
|
121
|
+
if "redfish_address" in driver_info:
|
122
|
+
base_url = driver_info["redfish_address"]
|
123
|
+
logger.info(f"Using Ironic redfish_address {base_url} for {hostname}")
|
124
|
+
else:
|
125
|
+
# Fallback to conductor configuration if Ironic driver_info not available
|
126
|
+
conductor_address = _get_conductor_redfish_address(device)
|
127
|
+
if conductor_address:
|
128
|
+
base_url = conductor_address
|
129
|
+
logger.info(
|
130
|
+
f"Using conductor redfish_address {base_url} for {hostname}"
|
131
|
+
)
|
132
|
+
except Exception as exc:
|
133
|
+
logger.warning(f"Could not get Ironic node for {hostname}: {exc}")
|
134
|
+
# Fallback to conductor configuration on Ironic error
|
135
|
+
conductor_address = _get_conductor_redfish_address(device)
|
136
|
+
if conductor_address:
|
137
|
+
base_url = conductor_address
|
138
|
+
logger.info(f"Using conductor redfish_address {base_url} for {hostname}")
|
139
|
+
|
140
|
+
# Get credentials from conductor configuration if not provided
|
141
|
+
if not username or not password:
|
142
|
+
conductor_username, conductor_password = _get_conductor_redfish_credentials(
|
143
|
+
device
|
144
|
+
)
|
145
|
+
if not username:
|
146
|
+
username = conductor_username
|
147
|
+
if not password:
|
148
|
+
password = conductor_password
|
149
|
+
|
150
|
+
auth = sushy.auth.SessionOrBasicAuth(username=username, password=password)
|
151
|
+
|
152
|
+
try:
|
153
|
+
if ignore_ssl_errors:
|
154
|
+
urllib3.disable_warnings()
|
155
|
+
conn = sushy.Sushy(base_url, auth=auth, verify=False)
|
156
|
+
else:
|
157
|
+
conn = sushy.Sushy(base_url, auth=auth)
|
158
|
+
|
159
|
+
return conn
|
160
|
+
except Exception as exc:
|
161
|
+
logger.error(
|
162
|
+
f"Unable to connect to Redfish API at {base_url} with timeout {timeout}s: {exc}"
|
163
|
+
)
|
164
|
+
return None
|
165
|
+
|
166
|
+
|
167
|
+
def _get_conductor_redfish_credentials(device):
|
168
|
+
"""Get Redfish credentials from conductor configuration and device secrets."""
|
169
|
+
from osism.tasks.conductor.config import get_configuration
|
170
|
+
from osism.tasks.conductor.ironic import _prepare_node_attributes
|
171
|
+
|
172
|
+
try:
|
173
|
+
if not device:
|
174
|
+
return None, None
|
175
|
+
|
176
|
+
# Use _prepare_node_attributes to get processed node attributes
|
177
|
+
def get_ironic_parameters():
|
178
|
+
configuration = get_configuration()
|
179
|
+
return configuration.get("ironic_parameters", {})
|
180
|
+
|
181
|
+
node_attributes = _prepare_node_attributes(device, get_ironic_parameters)
|
182
|
+
|
183
|
+
# Extract Redfish credentials if available
|
184
|
+
if (
|
185
|
+
"driver_info" in node_attributes
|
186
|
+
and node_attributes.get("driver") == "redfish"
|
187
|
+
):
|
188
|
+
driver_info = node_attributes["driver_info"]
|
189
|
+
username = driver_info.get("redfish_username")
|
190
|
+
password = driver_info.get("redfish_password")
|
191
|
+
return username, password
|
192
|
+
|
193
|
+
except Exception as exc:
|
194
|
+
logger.warning(f"Could not get conductor Redfish credentials: {exc}")
|
195
|
+
|
196
|
+
return None, None
|
197
|
+
|
198
|
+
|
199
|
+
def _get_conductor_redfish_address(device):
|
200
|
+
"""Get Redfish address from conductor configuration and device OOB IP."""
|
201
|
+
from osism.tasks.conductor.config import get_configuration
|
202
|
+
from osism.tasks.conductor.ironic import _prepare_node_attributes
|
203
|
+
|
204
|
+
try:
|
205
|
+
if not device:
|
206
|
+
return None
|
207
|
+
|
208
|
+
# Use _prepare_node_attributes to get processed node attributes
|
209
|
+
def get_ironic_parameters():
|
210
|
+
configuration = get_configuration()
|
211
|
+
return configuration.get("ironic_parameters", {})
|
212
|
+
|
213
|
+
node_attributes = _prepare_node_attributes(device, get_ironic_parameters)
|
214
|
+
|
215
|
+
# Extract Redfish address if available
|
216
|
+
if (
|
217
|
+
"driver_info" in node_attributes
|
218
|
+
and node_attributes.get("driver") == "redfish"
|
219
|
+
):
|
220
|
+
driver_info = node_attributes["driver_info"]
|
221
|
+
address = driver_info.get("redfish_address")
|
222
|
+
return address
|
223
|
+
|
224
|
+
except Exception as exc:
|
225
|
+
logger.warning(f"Could not get conductor Redfish address: {exc}")
|
226
|
+
|
227
|
+
return None
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: osism
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.20250701.0
|
4
4
|
Summary: OSISM manager interface
|
5
5
|
Home-page: https://github.com/osism/python-osism
|
6
6
|
Author: OSISM GmbH
|
@@ -54,6 +54,7 @@ Requires-Dist: sushy==5.6.0
|
|
54
54
|
Requires-Dist: tabulate==0.9.0
|
55
55
|
Requires-Dist: transitions==0.9.2
|
56
56
|
Requires-Dist: uvicorn[standard]==0.35.0
|
57
|
+
Requires-Dist: validators==0.35.0
|
57
58
|
Requires-Dist: watchdog==6.0.0
|
58
59
|
Provides-Extra: ansible
|
59
60
|
Requires-Dist: ansible-runner==2.4.1; extra == "ansible"
|
@@ -2,10 +2,10 @@ osism/__init__.py,sha256=1UiNTBus0V0f2AbZQzAtVtu6zkfCCrw0OTq--NwFAqY,341
|
|
2
2
|
osism/__main__.py,sha256=ILe4gu61xEISiBsxanqTQIdSkV-YhpZXTRlguCYyssk,141
|
3
3
|
osism/api.py,sha256=3kEfPJtPwuWD8luDNnEFRx3nEsUY5AeWnmjd4A5ii-A,11079
|
4
4
|
osism/main.py,sha256=Dt2-9sLXcS-Ny4DAz7hrha-KRc7zd7BFUTRdfs_X8z4,893
|
5
|
-
osism/settings.py,sha256=
|
5
|
+
osism/settings.py,sha256=IUbUo8kru8TeiMAnnil5oHwd8SqbkPCY401dVONMygo,1885
|
6
6
|
osism/commands/__init__.py,sha256=Ag4wX_DCgXRdoLn6t069jqb3DdRylsX2nyYkiyCx4uk,456
|
7
7
|
osism/commands/apply.py,sha256=GWUccZAXlgkPYqylrCmdWcj8FCkDsPEipIIG937MeII,16833
|
8
|
-
osism/commands/baremetal.py,sha256=
|
8
|
+
osism/commands/baremetal.py,sha256=yxCb0UwBfERinoGvlD9bvsGPEdDcHhq48Mn6u5b2aM8,12037
|
9
9
|
osism/commands/compose.py,sha256=iqzG7mS9E1VWaLNN6yQowjOqiHn3BMdj-yfXb3Dc4Ok,1200
|
10
10
|
osism/commands/compute.py,sha256=cgqXWJa5wAvn-7e3FWCgX6hie_aK0yrKRkcNzjLXwDY,25799
|
11
11
|
osism/commands/configuration.py,sha256=sPe8b0dVKFRbr30xoeVdAnHbGwCwgUh0xa_Vzv5pSQQ,954
|
@@ -17,6 +17,7 @@ osism/commands/manage.py,sha256=uzfmt3R0PJ4HxUw_V945pN0FbKb3zhyiBuD9br1ORYE,2314
|
|
17
17
|
osism/commands/netbox.py,sha256=e65P0kWrjTLw2T9HZthxjDTIRa-KAHgSSJAlvVef7n4,7345
|
18
18
|
osism/commands/noset.py,sha256=7zDFuFMyNpo7DUOKcNiYV8nodtdMOYFp5LDPcuJhlZ8,1481
|
19
19
|
osism/commands/reconciler.py,sha256=ubQfX8j13s3NuMKnT0Lt6O-szf7Z1V02AfsMQFHmO74,2209
|
20
|
+
osism/commands/redfish.py,sha256=oBfxd5UBX4ED8XulEuIYziIYQqTvUKpKfcdGyg_AoiI,8431
|
20
21
|
osism/commands/server.py,sha256=avmoOv5rjOi-fN2A-27cPwOtiy2Q2j6UFtCh3QrfWAI,7512
|
21
22
|
osism/commands/service.py,sha256=A1lgAlGeCJpbFFqF55DRWPcCirIgpU0dzjzVLZ0mz3k,2649
|
22
23
|
osism/commands/set.py,sha256=xLBi2DzbVQo2jb3-cOIE9In5UB3vFxquQJkDN-EsfhM,1425
|
@@ -42,11 +43,12 @@ osism/tasks/kubernetes.py,sha256=VzXq_VrYU_CLm4cOruqnE3Kq2ydfO9glZ3p0bp3OYoc,625
|
|
42
43
|
osism/tasks/netbox.py,sha256=g0gL5QImiRTHqixRxze7LfNqPth7cXqLzVWQDUJLDjE,5928
|
43
44
|
osism/tasks/openstack.py,sha256=g15tCll5vP1pC6ysxRCTZxplsdGmXbxaCH3k1Qdv5Xg,6367
|
44
45
|
osism/tasks/reconciler.py,sha256=phbSV6urILqq9mHGMYDFwSfx8bLZmldwgEi8sMWs8RA,2040
|
45
|
-
osism/tasks/conductor/__init__.py,sha256=
|
46
|
-
osism/tasks/conductor/config.py,sha256=
|
47
|
-
osism/tasks/conductor/ironic.py,sha256=
|
46
|
+
osism/tasks/conductor/__init__.py,sha256=HW8CXpZOQ6aNgLZ3Ck08YcfQfueoA0ce35Kolh0rWww,1903
|
47
|
+
osism/tasks/conductor/config.py,sha256=tMI0dtEFSWxfueRZdvocpbEq0sIN_PnXG08CuATkPz4,4489
|
48
|
+
osism/tasks/conductor/ironic.py,sha256=Gx9LuWNMIMae4a3cVFKsTR4Hm_sr4jLf-JO9QBPSYcg,15402
|
48
49
|
osism/tasks/conductor/netbox.py,sha256=5Nc7wrriDOtSuru1KDLt9QpA54vC7tXDPB2J0JP9GKo,11393
|
49
|
-
osism/tasks/conductor/
|
50
|
+
osism/tasks/conductor/redfish.py,sha256=hOOS-_l3Qmo_6vLsgjZmJwTxLTf029hhFRVkU0TMLL0,12723
|
51
|
+
osism/tasks/conductor/utils.py,sha256=ZSbQumQr-uL-B9XOwbsscLIX7czJHu6Mq0t_poRpMsw,7769
|
50
52
|
osism/tasks/conductor/sonic/__init__.py,sha256=oxTTl_MGK4iWK9uNDRNlULtGrDGCQHrlJZ04weh_Lh8,777
|
51
53
|
osism/tasks/conductor/sonic/bgp.py,sha256=PC6gGI5bCj2PCXcNGyMV9-EdlJWDsYaodzxigmYSZvw,3088
|
52
54
|
osism/tasks/conductor/sonic/cache.py,sha256=Asv2k3nLJejuq7iB0a_LyK8dEmJzypP9v3OHkNY3GwI,3438
|
@@ -58,11 +60,11 @@ osism/tasks/conductor/sonic/exporter.py,sha256=25L1vbi84ZQD0xNHNTWk-anTz5QRkGJsk
|
|
58
60
|
osism/tasks/conductor/sonic/interface.py,sha256=318wOwXYSSMKTPP2WSZIps-JvIkCQ2gYdQs9ZYHXwwg,38957
|
59
61
|
osism/tasks/conductor/sonic/sync.py,sha256=fpgsQVwq6Hb7eeDHhLkAqx5BkaK3Ce_m_WvmWEsJyOo,9182
|
60
62
|
osism/utils/__init__.py,sha256=gN5VtLJfrvyn6_snuTte7YR-vDygkpbORopIV8qSEsA,6064
|
61
|
-
osism-0.
|
62
|
-
osism-0.
|
63
|
-
osism-0.
|
64
|
-
osism-0.
|
65
|
-
osism-0.
|
66
|
-
osism-0.
|
67
|
-
osism-0.
|
68
|
-
osism-0.
|
63
|
+
osism-0.20250701.0.dist-info/licenses/AUTHORS,sha256=oWotd63qsnNR945QLJP9mEXaXNtCMaesfo8ZNuLjwpU,39
|
64
|
+
osism-0.20250701.0.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
65
|
+
osism-0.20250701.0.dist-info/METADATA,sha256=3Wd0oo4ezSU0A8zyKit87P9kOS1UANr07qNf_H5Y_QI,2938
|
66
|
+
osism-0.20250701.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
67
|
+
osism-0.20250701.0.dist-info/entry_points.txt,sha256=xpOV8qyAEuJ6-jrVDnVUUcTCyfBQQ_EOoj5ImcynClM,3476
|
68
|
+
osism-0.20250701.0.dist-info/pbr.json,sha256=oc9bmpZsjLQNk1EI_gOw4MlbegkQGf47icU8U5FB0iY,47
|
69
|
+
osism-0.20250701.0.dist-info/top_level.txt,sha256=8L8dsI9hcaGHsdnR4k_LN9EM78EhwrXRFHyAryPXZtY,6
|
70
|
+
osism-0.20250701.0.dist-info/RECORD,,
|
@@ -43,6 +43,7 @@ manage image clusterapi = osism.commands.manage:ImageClusterapi
|
|
43
43
|
manage image octavia = osism.commands.manage:ImageOctavia
|
44
44
|
manage images = osism.commands.manage:Images
|
45
45
|
manage netbox = osism.commands.netbox:Manage
|
46
|
+
manage redfish list = osism.commands.redfish:List
|
46
47
|
manage server list = osism.commands.server:ServerList
|
47
48
|
manage server migrate = osism.commands.server:ServerMigrate
|
48
49
|
manage sonic = osism.commands.manage:Sonic
|
@@ -0,0 +1 @@
|
|
1
|
+
{"git_version": "87129c8", "is_release": false}
|
@@ -1 +0,0 @@
|
|
1
|
-
{"git_version": "f3fe7d7", "is_release": false}
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|