osism 0.20250627.0__py3-none-any.whl → 0.20250701.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osism/api.py +246 -120
- osism/commands/apply.py +2 -2
- osism/commands/baremetal.py +37 -6
- osism/commands/redfish.py +219 -0
- osism/commands/validate.py +1 -1
- osism/settings.py +3 -0
- osism/tasks/conductor/__init__.py +7 -0
- osism/tasks/conductor/config.py +52 -35
- osism/tasks/conductor/ironic.py +95 -99
- osism/tasks/conductor/redfish.py +300 -0
- osism/tasks/conductor/utils.py +148 -0
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/METADATA +3 -2
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/RECORD +21 -22
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/entry_points.txt +1 -0
- osism-0.20250701.0.dist-info/pbr.json +1 -0
- osism/actions/__init__.py +0 -1
- osism/core/__init__.py +0 -1
- osism/plugins/__init__.py +0 -1
- osism-0.20250627.0.dist-info/pbr.json +0 -1
- /osism/{core → data}/enums.py +0 -0
- /osism/{core → data}/playbooks.py +0 -0
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/WHEEL +0 -0
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/licenses/AUTHORS +0 -0
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/licenses/LICENSE +0 -0
- {osism-0.20250627.0.dist-info → osism-0.20250701.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,219 @@
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
2
|
+
|
3
|
+
import json
|
4
|
+
from cliff.command import Command
|
5
|
+
from loguru import logger
|
6
|
+
from tabulate import tabulate
|
7
|
+
|
8
|
+
from osism.tasks.conductor import get_redfish_resources
|
9
|
+
|
10
|
+
|
11
|
+
class List(Command):
|
12
|
+
def _normalize_column_name(self, column_name):
|
13
|
+
"""Normalize column name to lowercase with underscores instead of spaces."""
|
14
|
+
if not column_name:
|
15
|
+
return column_name
|
16
|
+
return column_name.lower().replace(" ", "_")
|
17
|
+
|
18
|
+
def _get_column_mappings(self, resourcetype):
|
19
|
+
"""Get column mappings for a specific resource type."""
|
20
|
+
if resourcetype == "EthernetInterfaces":
|
21
|
+
return {
|
22
|
+
"ID": "id",
|
23
|
+
"Name": "name",
|
24
|
+
"Description": "description",
|
25
|
+
"MAC Address": "mac_address",
|
26
|
+
"Permanent MAC Address": "permanent_mac_address",
|
27
|
+
"Speed (Mbps)": "speed_mbps",
|
28
|
+
"MTU Size": "mtu_size",
|
29
|
+
"Link Status": "link_status",
|
30
|
+
"Interface Enabled": "interface_enabled",
|
31
|
+
}
|
32
|
+
elif resourcetype == "NetworkAdapters":
|
33
|
+
return {
|
34
|
+
"ID": "id",
|
35
|
+
"Name": "name",
|
36
|
+
"Description": "description",
|
37
|
+
"Manufacturer": "manufacturer",
|
38
|
+
"Model": "model",
|
39
|
+
"Part Number": "part_number",
|
40
|
+
"Serial Number": "serial_number",
|
41
|
+
"Firmware Version": "firmware_version",
|
42
|
+
}
|
43
|
+
elif resourcetype == "NetworkDeviceFunctions":
|
44
|
+
return {
|
45
|
+
"ID": "id",
|
46
|
+
"Name": "name",
|
47
|
+
"Description": "description",
|
48
|
+
"Device Enabled": "device_enabled",
|
49
|
+
"Ethernet Enabled": "ethernet_enabled",
|
50
|
+
"MAC Address": "mac_address",
|
51
|
+
"Permanent MAC Address": "permanent_mac_address",
|
52
|
+
"Adapter ID": "adapter_id",
|
53
|
+
"Adapter Name": "adapter_name",
|
54
|
+
}
|
55
|
+
return None
|
56
|
+
|
57
|
+
def _get_filtered_columns(self, column_mappings, selected_columns=None):
|
58
|
+
"""Get filtered column mappings based on selected columns."""
|
59
|
+
# If no columns specified, use all available columns
|
60
|
+
if not selected_columns:
|
61
|
+
return list(column_mappings.keys()), list(column_mappings.values())
|
62
|
+
|
63
|
+
# Normalize selected columns and filter
|
64
|
+
normalized_selected = [
|
65
|
+
self._normalize_column_name(col) for col in selected_columns
|
66
|
+
]
|
67
|
+
headers = []
|
68
|
+
data_keys = []
|
69
|
+
|
70
|
+
for display_name, data_key in column_mappings.items():
|
71
|
+
normalized_display = self._normalize_column_name(display_name)
|
72
|
+
if normalized_display in normalized_selected:
|
73
|
+
headers.append(display_name)
|
74
|
+
data_keys.append(data_key)
|
75
|
+
|
76
|
+
# Check if any requested columns were not found
|
77
|
+
found_columns = [self._normalize_column_name(h) for h in headers]
|
78
|
+
for requested_col in normalized_selected:
|
79
|
+
if requested_col not in found_columns:
|
80
|
+
logger.warning(
|
81
|
+
f"Column '{requested_col}' not found. Available columns: {list(column_mappings.keys())}"
|
82
|
+
)
|
83
|
+
|
84
|
+
return headers, data_keys
|
85
|
+
|
86
|
+
def _filter_json_data(self, data, data_keys):
|
87
|
+
"""Filter JSON data to include only selected columns."""
|
88
|
+
if not data or not data_keys:
|
89
|
+
return data
|
90
|
+
|
91
|
+
filtered_data = []
|
92
|
+
for item in data:
|
93
|
+
filtered_item = {key: item.get(key) for key in data_keys}
|
94
|
+
filtered_data.append(filtered_item)
|
95
|
+
|
96
|
+
return filtered_data
|
97
|
+
|
98
|
+
def _filter_and_display_table(self, data, column_mappings, selected_columns=None):
|
99
|
+
"""Generic method to filter columns and display table data."""
|
100
|
+
if not data:
|
101
|
+
return
|
102
|
+
|
103
|
+
headers, data_keys = self._get_filtered_columns(
|
104
|
+
column_mappings, selected_columns
|
105
|
+
)
|
106
|
+
|
107
|
+
if not headers:
|
108
|
+
print("No valid columns specified")
|
109
|
+
return
|
110
|
+
|
111
|
+
# Prepare table data
|
112
|
+
table_data = []
|
113
|
+
for item in data:
|
114
|
+
row = [item.get(key, "N/A") for key in data_keys]
|
115
|
+
table_data.append(row)
|
116
|
+
|
117
|
+
# Display the table
|
118
|
+
print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
119
|
+
print(f"\nTotal items: {len(data)}")
|
120
|
+
|
121
|
+
def get_parser(self, prog_name):
|
122
|
+
parser = super(List, self).get_parser(prog_name)
|
123
|
+
parser.add_argument(
|
124
|
+
"hostname",
|
125
|
+
type=str,
|
126
|
+
help="Hostname of the target system",
|
127
|
+
)
|
128
|
+
parser.add_argument(
|
129
|
+
"resourcetype",
|
130
|
+
type=str,
|
131
|
+
help="Resource type to process (e.g., EthernetInterfaces, NetworkAdapters, NetworkDeviceFunctions)",
|
132
|
+
)
|
133
|
+
parser.add_argument(
|
134
|
+
"--format",
|
135
|
+
type=str,
|
136
|
+
choices=["table", "json"],
|
137
|
+
default="table",
|
138
|
+
help="Output format (default: table)",
|
139
|
+
)
|
140
|
+
parser.add_argument(
|
141
|
+
"--column",
|
142
|
+
action="append",
|
143
|
+
help="Column to include in output (can be used multiple times)",
|
144
|
+
)
|
145
|
+
return parser
|
146
|
+
|
147
|
+
def take_action(self, parsed_args):
|
148
|
+
hostname = parsed_args.hostname
|
149
|
+
resourcetype = parsed_args.resourcetype
|
150
|
+
output_format = parsed_args.format
|
151
|
+
columns = parsed_args.column
|
152
|
+
logger.info(
|
153
|
+
f"Redfish list command called with hostname: {hostname}, resourcetype: {resourcetype}, format: {output_format}"
|
154
|
+
)
|
155
|
+
|
156
|
+
# Use Celery task to get Redfish resources
|
157
|
+
task_result = get_redfish_resources.delay(hostname, resourcetype)
|
158
|
+
result = task_result.get()
|
159
|
+
|
160
|
+
if output_format == "json":
|
161
|
+
if result:
|
162
|
+
# Apply column filtering for JSON output if columns are specified
|
163
|
+
if columns:
|
164
|
+
# Get column mappings for the resource type
|
165
|
+
column_mappings = self._get_column_mappings(resourcetype)
|
166
|
+
if column_mappings:
|
167
|
+
_, data_keys = self._get_filtered_columns(
|
168
|
+
column_mappings, columns
|
169
|
+
)
|
170
|
+
filtered_result = self._filter_json_data(result, data_keys)
|
171
|
+
print(json.dumps(filtered_result, indent=2))
|
172
|
+
else:
|
173
|
+
print(json.dumps(result, indent=2))
|
174
|
+
else:
|
175
|
+
print(json.dumps(result, indent=2))
|
176
|
+
else:
|
177
|
+
print("[]")
|
178
|
+
else:
|
179
|
+
if resourcetype == "EthernetInterfaces" and result:
|
180
|
+
self._display_ethernet_interfaces(result, columns)
|
181
|
+
elif resourcetype == "NetworkAdapters" and result:
|
182
|
+
self._display_network_adapters(result, columns)
|
183
|
+
elif resourcetype == "NetworkDeviceFunctions" and result:
|
184
|
+
self._display_network_device_functions(result, columns)
|
185
|
+
elif result:
|
186
|
+
logger.info(f"Retrieved resources: {result}")
|
187
|
+
else:
|
188
|
+
print(f"No {resourcetype} resources found for {hostname}")
|
189
|
+
|
190
|
+
def _display_ethernet_interfaces(self, interfaces, selected_columns=None):
|
191
|
+
"""Display EthernetInterfaces in a formatted table."""
|
192
|
+
if not interfaces:
|
193
|
+
print("No EthernetInterfaces found")
|
194
|
+
return
|
195
|
+
|
196
|
+
column_mappings = self._get_column_mappings("EthernetInterfaces")
|
197
|
+
self._filter_and_display_table(interfaces, column_mappings, selected_columns)
|
198
|
+
|
199
|
+
def _display_network_adapters(self, adapters, selected_columns=None):
|
200
|
+
"""Display NetworkAdapters in a formatted table."""
|
201
|
+
if not adapters:
|
202
|
+
print("No NetworkAdapters found")
|
203
|
+
return
|
204
|
+
|
205
|
+
column_mappings = self._get_column_mappings("NetworkAdapters")
|
206
|
+
self._filter_and_display_table(adapters, column_mappings, selected_columns)
|
207
|
+
|
208
|
+
def _display_network_device_functions(
|
209
|
+
self, device_functions, selected_columns=None
|
210
|
+
):
|
211
|
+
"""Display NetworkDeviceFunctions in a formatted table."""
|
212
|
+
if not device_functions:
|
213
|
+
print("No NetworkDeviceFunctions found")
|
214
|
+
return
|
215
|
+
|
216
|
+
column_mappings = self._get_column_mappings("NetworkDeviceFunctions")
|
217
|
+
self._filter_and_display_table(
|
218
|
+
device_functions, column_mappings, selected_columns
|
219
|
+
)
|
osism/commands/validate.py
CHANGED
osism/settings.py
CHANGED
@@ -54,3 +54,6 @@ SONIC_EXPORT_IDENTIFIER = os.getenv("SONIC_EXPORT_IDENTIFIER", "serial-number")
|
|
54
54
|
NETBOX_SECONDARIES = (
|
55
55
|
os.getenv("NETBOX_SECONDARIES", read_secret("NETBOX_SECONDARIES")) or "[]"
|
56
56
|
)
|
57
|
+
|
58
|
+
# Redfish connection timeout in seconds
|
59
|
+
REDFISH_TIMEOUT = int(os.getenv("REDFISH_TIMEOUT", "20"))
|
@@ -8,6 +8,7 @@ from loguru import logger
|
|
8
8
|
from osism.tasks import Config
|
9
9
|
from osism.tasks.conductor.config import get_configuration
|
10
10
|
from osism.tasks.conductor.ironic import sync_ironic as _sync_ironic
|
11
|
+
from osism.tasks.conductor.redfish import get_resources as _get_redfish_resources
|
11
12
|
from osism.tasks.conductor.sonic import sync_sonic as _sync_sonic
|
12
13
|
|
13
14
|
|
@@ -52,9 +53,15 @@ def sync_sonic(self, device_name=None, show_diff=True):
|
|
52
53
|
return _sync_sonic(device_name, self.request.id, show_diff)
|
53
54
|
|
54
55
|
|
56
|
+
@app.task(bind=True, name="osism.tasks.conductor.get_redfish_resources")
|
57
|
+
def get_redfish_resources(self, hostname, resource_type):
|
58
|
+
return _get_redfish_resources(hostname, resource_type)
|
59
|
+
|
60
|
+
|
55
61
|
__all__ = [
|
56
62
|
"app",
|
57
63
|
"get_ironic_parameters",
|
64
|
+
"get_redfish_resources",
|
58
65
|
"sync_netbox",
|
59
66
|
"sync_ironic",
|
60
67
|
"sync_sonic",
|
osism/tasks/conductor/config.py
CHANGED
@@ -1,22 +1,12 @@
|
|
1
1
|
# SPDX-License-Identifier: Apache-2.0
|
2
2
|
|
3
|
-
import uuid
|
4
|
-
|
5
3
|
from loguru import logger
|
4
|
+
import validators
|
6
5
|
import yaml
|
7
6
|
|
8
7
|
from osism.tasks import Config, openstack
|
9
8
|
|
10
9
|
|
11
|
-
def is_uuid(value):
|
12
|
-
"""Check if a string is a valid UUID."""
|
13
|
-
try:
|
14
|
-
uuid.UUID(value)
|
15
|
-
return True
|
16
|
-
except (ValueError, AttributeError):
|
17
|
-
return False
|
18
|
-
|
19
|
-
|
20
10
|
def get_configuration():
|
21
11
|
with open("/etc/conductor.yml") as fp:
|
22
12
|
configuration = yaml.load(fp, Loader=yaml.SafeLoader)
|
@@ -39,54 +29,81 @@ def get_configuration():
|
|
39
29
|
image_source = configuration["ironic_parameters"]["instance_info"][
|
40
30
|
"image_source"
|
41
31
|
]
|
42
|
-
if not
|
32
|
+
if not validators.uuid(image_source) and not validators.url(
|
33
|
+
image_source
|
34
|
+
):
|
43
35
|
result = openstack.image_get(image_source)
|
44
|
-
|
45
|
-
"
|
46
|
-
|
36
|
+
if result:
|
37
|
+
configuration["ironic_parameters"]["instance_info"][
|
38
|
+
"image_source"
|
39
|
+
] = result.id
|
40
|
+
else:
|
41
|
+
logger.warning(f"Could not resolve image ID for {image_source}")
|
47
42
|
|
48
43
|
if "driver_info" in configuration["ironic_parameters"]:
|
49
44
|
if "deploy_kernel" in configuration["ironic_parameters"]["driver_info"]:
|
50
45
|
deploy_kernel = configuration["ironic_parameters"]["driver_info"][
|
51
46
|
"deploy_kernel"
|
52
47
|
]
|
53
|
-
if not
|
48
|
+
if not validators.uuid(deploy_kernel) and not validators.url(
|
49
|
+
deploy_kernel
|
50
|
+
):
|
54
51
|
result = openstack.image_get(deploy_kernel)
|
55
|
-
|
56
|
-
"
|
57
|
-
|
52
|
+
if result:
|
53
|
+
configuration["ironic_parameters"]["driver_info"][
|
54
|
+
"deploy_kernel"
|
55
|
+
] = result.id
|
56
|
+
else:
|
57
|
+
logger.warning(
|
58
|
+
f"Could not resolve image ID for {deploy_kernel}"
|
59
|
+
)
|
58
60
|
|
59
61
|
if "deploy_ramdisk" in configuration["ironic_parameters"]["driver_info"]:
|
60
62
|
deploy_ramdisk = configuration["ironic_parameters"]["driver_info"][
|
61
63
|
"deploy_ramdisk"
|
62
64
|
]
|
63
|
-
if not
|
65
|
+
if not validators.uuid(deploy_ramdisk) and not validators.url(
|
66
|
+
deploy_ramdisk
|
67
|
+
):
|
64
68
|
result = openstack.image_get(deploy_ramdisk)
|
65
|
-
|
66
|
-
"
|
67
|
-
|
69
|
+
if result:
|
70
|
+
configuration["ironic_parameters"]["driver_info"][
|
71
|
+
"deploy_ramdisk"
|
72
|
+
] = result.id
|
73
|
+
else:
|
74
|
+
logger.warning(
|
75
|
+
f"Could not resolve image ID for {deploy_ramdisk}"
|
76
|
+
)
|
68
77
|
|
69
78
|
if "cleaning_network" in configuration["ironic_parameters"]["driver_info"]:
|
70
|
-
|
79
|
+
cleaning_network = configuration["ironic_parameters"]["driver_info"][
|
80
|
+
"cleaning_network"
|
81
|
+
]
|
82
|
+
result = openstack.network_get(cleaning_network)
|
83
|
+
if result:
|
71
84
|
configuration["ironic_parameters"]["driver_info"][
|
72
85
|
"cleaning_network"
|
73
|
-
]
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
86
|
+
] = result.id
|
87
|
+
else:
|
88
|
+
logger.warning(
|
89
|
+
f"Could not resolve network ID for {cleaning_network}"
|
90
|
+
)
|
78
91
|
|
79
92
|
if (
|
80
93
|
"provisioning_network"
|
81
94
|
in configuration["ironic_parameters"]["driver_info"]
|
82
95
|
):
|
83
|
-
|
96
|
+
provisioning_network = configuration["ironic_parameters"][
|
97
|
+
"driver_info"
|
98
|
+
]["provisioning_network"]
|
99
|
+
result = openstack.network_get(provisioning_network)
|
100
|
+
if result:
|
84
101
|
configuration["ironic_parameters"]["driver_info"][
|
85
102
|
"provisioning_network"
|
86
|
-
]
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
103
|
+
] = result.id
|
104
|
+
else:
|
105
|
+
logger.warning(
|
106
|
+
f"Could not resolve network ID for {provisioning_network}"
|
107
|
+
)
|
91
108
|
|
92
109
|
return configuration
|
osism/tasks/conductor/ironic.py
CHANGED
@@ -34,6 +34,100 @@ driver_params = {
|
|
34
34
|
}
|
35
35
|
|
36
36
|
|
37
|
+
def _prepare_node_attributes(device, get_ironic_parameters):
|
38
|
+
node_attributes = get_ironic_parameters()
|
39
|
+
if (
|
40
|
+
"ironic_parameters" in device.custom_fields
|
41
|
+
and device.custom_fields["ironic_parameters"]
|
42
|
+
):
|
43
|
+
deep_merge(node_attributes, device.custom_fields["ironic_parameters"])
|
44
|
+
|
45
|
+
vault = get_vault()
|
46
|
+
deep_decrypt(node_attributes, vault)
|
47
|
+
|
48
|
+
node_secrets = device.custom_fields.get("secrets", {})
|
49
|
+
if node_secrets is None:
|
50
|
+
node_secrets = {}
|
51
|
+
deep_decrypt(node_secrets, vault)
|
52
|
+
|
53
|
+
if (
|
54
|
+
"driver" in node_attributes
|
55
|
+
and node_attributes["driver"] in driver_params.keys()
|
56
|
+
):
|
57
|
+
if "driver_info" in node_attributes:
|
58
|
+
unused_drivers = [
|
59
|
+
driver
|
60
|
+
for driver in driver_params.keys()
|
61
|
+
if driver != node_attributes["driver"]
|
62
|
+
]
|
63
|
+
for key in list(node_attributes["driver_info"].keys()):
|
64
|
+
for driver in unused_drivers:
|
65
|
+
if key.startswith(driver + "_"):
|
66
|
+
node_attributes["driver_info"].pop(key, None)
|
67
|
+
|
68
|
+
username_key = driver_params[node_attributes["driver"]]["username"]
|
69
|
+
if username_key in node_attributes["driver_info"]:
|
70
|
+
node_attributes["driver_info"][username_key] = (
|
71
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
72
|
+
.from_string(node_attributes["driver_info"][username_key])
|
73
|
+
.render(
|
74
|
+
remote_board_username=str(
|
75
|
+
node_secrets.get("remote_board_username", "admin")
|
76
|
+
)
|
77
|
+
)
|
78
|
+
)
|
79
|
+
|
80
|
+
password_key = driver_params[node_attributes["driver"]]["password"]
|
81
|
+
if password_key in node_attributes["driver_info"]:
|
82
|
+
node_attributes["driver_info"][password_key] = (
|
83
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
84
|
+
.from_string(node_attributes["driver_info"][password_key])
|
85
|
+
.render(
|
86
|
+
remote_board_password=str(
|
87
|
+
node_secrets.get("remote_board_password", "password")
|
88
|
+
)
|
89
|
+
)
|
90
|
+
)
|
91
|
+
|
92
|
+
address_key = driver_params[node_attributes["driver"]]["address"]
|
93
|
+
if address_key in node_attributes["driver_info"]:
|
94
|
+
oob_ip_result = get_device_oob_ip(device)
|
95
|
+
if oob_ip_result:
|
96
|
+
oob_ip, _ = oob_ip_result
|
97
|
+
node_attributes["driver_info"][address_key] = (
|
98
|
+
jinja2.Environment(loader=jinja2.BaseLoader())
|
99
|
+
.from_string(node_attributes["driver_info"][address_key])
|
100
|
+
.render(remote_board_address=oob_ip)
|
101
|
+
)
|
102
|
+
node_attributes.update({"resource_class": device.name})
|
103
|
+
if "extra" not in node_attributes:
|
104
|
+
node_attributes["extra"] = {}
|
105
|
+
if "instance_info" in node_attributes and node_attributes["instance_info"]:
|
106
|
+
node_attributes["extra"].update(
|
107
|
+
{"instance_info": json.dumps(node_attributes["instance_info"])}
|
108
|
+
)
|
109
|
+
if (
|
110
|
+
"netplan_parameters" in device.custom_fields
|
111
|
+
and device.custom_fields["netplan_parameters"]
|
112
|
+
):
|
113
|
+
node_attributes["extra"].update(
|
114
|
+
{
|
115
|
+
"netplan_parameters": json.dumps(
|
116
|
+
device.custom_fields["netplan_parameters"]
|
117
|
+
)
|
118
|
+
}
|
119
|
+
)
|
120
|
+
if (
|
121
|
+
"frr_parameters" in device.custom_fields
|
122
|
+
and device.custom_fields["frr_parameters"]
|
123
|
+
):
|
124
|
+
node_attributes["extra"].update(
|
125
|
+
{"frr_parameters": json.dumps(device.custom_fields["frr_parameters"])}
|
126
|
+
)
|
127
|
+
|
128
|
+
return node_attributes
|
129
|
+
|
130
|
+
|
37
131
|
def sync_ironic(request_id, get_ironic_parameters, force_update=False):
|
38
132
|
osism_utils.push_task_output(
|
39
133
|
request_id,
|
@@ -79,105 +173,7 @@ def sync_ironic(request_id, get_ironic_parameters, force_update=False):
|
|
79
173
|
|
80
174
|
node_interfaces = list(netbox.get_interfaces_by_device(device.name))
|
81
175
|
|
82
|
-
node_attributes = get_ironic_parameters
|
83
|
-
if (
|
84
|
-
"ironic_parameters" in device.custom_fields
|
85
|
-
and device.custom_fields["ironic_parameters"]
|
86
|
-
):
|
87
|
-
# NOTE: Update node attributes with overrides from NetBox device
|
88
|
-
deep_merge(node_attributes, device.custom_fields["ironic_parameters"])
|
89
|
-
|
90
|
-
# NOTE: Decrypt ansible vaulted secrets
|
91
|
-
vault = get_vault()
|
92
|
-
deep_decrypt(node_attributes, vault)
|
93
|
-
|
94
|
-
node_secrets = device.custom_fields.get("secrets", {})
|
95
|
-
if node_secrets is None:
|
96
|
-
node_secrets = {}
|
97
|
-
deep_decrypt(node_secrets, vault)
|
98
|
-
|
99
|
-
if (
|
100
|
-
"driver" in node_attributes
|
101
|
-
and node_attributes["driver"] in driver_params.keys()
|
102
|
-
):
|
103
|
-
if "driver_info" in node_attributes:
|
104
|
-
# NOTE: Remove all fields belonging to a different driver
|
105
|
-
unused_drivers = [
|
106
|
-
driver
|
107
|
-
for driver in driver_params.keys()
|
108
|
-
if driver != node_attributes["driver"]
|
109
|
-
]
|
110
|
-
for key in list(node_attributes["driver_info"].keys()):
|
111
|
-
for driver in unused_drivers:
|
112
|
-
if key.startswith(driver + "_"):
|
113
|
-
node_attributes["driver_info"].pop(key, None)
|
114
|
-
|
115
|
-
# NOTE: Render driver username field
|
116
|
-
username_key = driver_params[node_attributes["driver"]]["username"]
|
117
|
-
if username_key in node_attributes["driver_info"]:
|
118
|
-
node_attributes["driver_info"][username_key] = (
|
119
|
-
jinja2.Environment(loader=jinja2.BaseLoader())
|
120
|
-
.from_string(node_attributes["driver_info"][username_key])
|
121
|
-
.render(
|
122
|
-
remote_board_username=str(
|
123
|
-
node_secrets.get("remote_board_username", "admin")
|
124
|
-
)
|
125
|
-
)
|
126
|
-
)
|
127
|
-
|
128
|
-
# NOTE: Render driver password field
|
129
|
-
password_key = driver_params[node_attributes["driver"]]["password"]
|
130
|
-
if password_key in node_attributes["driver_info"]:
|
131
|
-
node_attributes["driver_info"][password_key] = (
|
132
|
-
jinja2.Environment(loader=jinja2.BaseLoader())
|
133
|
-
.from_string(node_attributes["driver_info"][password_key])
|
134
|
-
.render(
|
135
|
-
remote_board_password=str(
|
136
|
-
node_secrets.get("remote_board_password", "password")
|
137
|
-
)
|
138
|
-
)
|
139
|
-
)
|
140
|
-
|
141
|
-
# NOTE: Render driver address field
|
142
|
-
address_key = driver_params[node_attributes["driver"]]["address"]
|
143
|
-
if address_key in node_attributes["driver_info"]:
|
144
|
-
oob_ip_result = get_device_oob_ip(device)
|
145
|
-
if oob_ip_result:
|
146
|
-
oob_ip, _ = (
|
147
|
-
oob_ip_result # Extract IP address, ignore prefix length
|
148
|
-
)
|
149
|
-
node_attributes["driver_info"][address_key] = (
|
150
|
-
jinja2.Environment(loader=jinja2.BaseLoader())
|
151
|
-
.from_string(node_attributes["driver_info"][address_key])
|
152
|
-
.render(remote_board_address=oob_ip)
|
153
|
-
)
|
154
|
-
node_attributes.update({"resource_class": device.name})
|
155
|
-
if "extra" not in node_attributes:
|
156
|
-
node_attributes["extra"] = {}
|
157
|
-
# NOTE: Copy instance_info into extra field. because ironic removes it on undeployment. This way it may be readded on undeploy without querying the netbox again
|
158
|
-
if "instance_info" in node_attributes and node_attributes["instance_info"]:
|
159
|
-
node_attributes["extra"].update(
|
160
|
-
{"instance_info": json.dumps(node_attributes["instance_info"])}
|
161
|
-
)
|
162
|
-
# NOTE: Write metadata used for provisioning into 'extra' field, so that it is available during node deploy without querying the netbox again
|
163
|
-
if (
|
164
|
-
"netplan_parameters" in device.custom_fields
|
165
|
-
and device.custom_fields["netplan_parameters"]
|
166
|
-
):
|
167
|
-
node_attributes["extra"].update(
|
168
|
-
{
|
169
|
-
"netplan_parameters": json.dumps(
|
170
|
-
device.custom_fields["netplan_parameters"]
|
171
|
-
)
|
172
|
-
}
|
173
|
-
)
|
174
|
-
if (
|
175
|
-
"frr_parameters" in device.custom_fields
|
176
|
-
and device.custom_fields["frr_parameters"]
|
177
|
-
):
|
178
|
-
node_attributes["extra"].update(
|
179
|
-
{"frr_parameters": json.dumps(device.custom_fields["frr_parameters"])}
|
180
|
-
)
|
176
|
+
node_attributes = _prepare_node_attributes(device, get_ironic_parameters)
|
181
177
|
ports_attributes = [
|
182
178
|
dict(address=interface.mac_address)
|
183
179
|
for interface in node_interfaces
|